Mesh: Update addons for auto smooth removal #104609

Merged
Hans Goudey merged 10 commits from HooglyBoogly/blender-addons:refactor-mesh-corner-normals-lazy into main 2023-10-20 16:53:48 +02:00
56 changed files with 1584 additions and 1001 deletions
Showing only changes of commit 8cd49b012f - Show all commits

View File

@ -6,7 +6,7 @@ bl_info = {
"name": "Grease Pencil Tools",
"description": "Extra tools for Grease Pencil",
"author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola",
"version": (1, 8, 1),
"version": (1, 8, 2),
"blender": (3, 0, 0),
"location": "Sidebar > Grease Pencil > Grease Pencil Tools",
"warning": "",

View File

@ -49,10 +49,10 @@ def get_reduced_area_coord(context):
## minus tool leftbar + sidebar right
regs = context.area.regions
toolbar = regs[2]
sidebar = regs[3]
header = regs[0]
tool_header = regs[1]
toolbar = next((r for r in regs if r.type == 'TOOLS'), None)
sidebar = next((r for r in regs if r.type == 'UI'), None)
header = next((r for r in regs if r.type == 'HEADER'), None)
tool_header = next((r for r in regs if r.type == 'TOOL_HEADER'), None)
up_margin = down_margin = 0
if tool_header.alignment == 'TOP':
up_margin += tool_header.height

View File

@ -5,8 +5,8 @@
bl_info = {
"name": "Import Images as Planes",
"author": "Florian Meyer (tstscr), mont29, matali, Ted Schundler (SpkyElctrc), mrbimax",
"version": (3, 5, 0),
"blender": (2, 91, 0),
"version": (3, 5, 1),
"blender": (4, 0, 0),
"location": "File > Import > Images as Planes or Add > Image > Images as Planes",
"description": "Imports images and creates planes with the appropriate aspect ratio. "
"The images are mapped to the planes.",
@ -25,7 +25,10 @@ from math import pi
import bpy
from bpy.types import Operator
from bpy.app.translations import pgettext_tip as tip_
from bpy.app.translations import (
pgettext_tip as tip_,
contexts as i18n_contexts
)
from mathutils import Vector
from bpy.props import (
@ -151,6 +154,9 @@ def load_images(filenames, directory, force_reload=False, frame_start=1, find_se
file_iter = zip(filenames, repeat(1), repeat(1))
for filename, offset, frames in file_iter:
if not os.path.isfile(bpy.path.abspath(os.path.join(directory, filename))):
continue
image = load_image(filename, directory, check_existing=True, force_reload=force_reload)
# Size is unavailable for sequences, so we grab it early
@ -320,8 +326,8 @@ def get_shadeless_node(dest_node_tree):
output_node = node_tree.nodes.new('NodeGroupOutput')
input_node = node_tree.nodes.new('NodeGroupInput')
node_tree.outputs.new('NodeSocketShader', 'Shader')
node_tree.inputs.new('NodeSocketColor', 'Color')
node_tree.interface.new_socket('Shader', in_out='OUTPUT', socket_type='NodeSocketShader')
node_tree.interface.new_socket('Color', in_out='INPUT', socket_type='NodeSocketColor')
# This could be faster as a transparent shader, but then no ambient occlusion
diffuse_shader = node_tree.nodes.new('ShaderNodeBsdfDiffuse')
@ -731,7 +737,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
('HASHED', "Hashed","Use noise to dither the binary visibility (works well with multi-samples)"),
('OPAQUE', "Opaque","Render surface without transparency"),
)
blend_method: EnumProperty(name="Blend Mode", items=BLEND_METHODS, default='BLEND', description="Blend Mode for Transparent Faces")
blend_method: EnumProperty(
name="Blend Mode", items=BLEND_METHODS, default='BLEND',
description="Blend Mode for Transparent Faces", translation_context=i18n_contexts.id_material)
SHADOW_METHODS = (
('CLIP', "Clip","Use the alpha threshold to clip the visibility (binary visibility)"),
@ -739,7 +747,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
('OPAQUE',"Opaque","Material will cast shadows without transparency"),
('NONE',"None","Material will cast no shadow"),
)
shadow_method: EnumProperty(name="Shadow Mode", items=SHADOW_METHODS, default='CLIP', description="Shadow mapping method")
shadow_method: EnumProperty(
name="Shadow Mode", items=SHADOW_METHODS, default='CLIP',
description="Shadow mapping method", translation_context=i18n_contexts.id_material)
use_backface_culling: BoolProperty(
name="Backface Culling", default=False,
@ -923,11 +933,11 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
if context.active_object and context.active_object.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
self.import_images(context)
ret_code = self.import_images(context)
context.preferences.edit.use_enter_edit_mode = editmode
return {'FINISHED'}
return ret_code
def import_images(self, context):
@ -939,6 +949,10 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
find_sequences=self.image_sequence
))
if not images:
self.report({'WARNING'}, "Please select at least an image.")
return {'CANCELLED'}
# Create individual planes
planes = [self.single_image_spec_to_plane(context, img_spec) for img_spec in images]
@ -962,6 +976,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
# all done!
self.report({'INFO'}, tip_("Added {} Image Plane(s)").format(len(planes)))
return {'FINISHED'}
# operate on a single image
def single_image_spec_to_plane(self, context, img_spec):
@ -1079,7 +1094,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
if self.shader in {'PRINCIPLED', 'SHADELESS'}:
node_tree.links.new(core_shader.inputs[0], tex_image.outputs['Color'])
elif self.shader == 'EMISSION':
node_tree.links.new(core_shader.inputs['Emission'], tex_image.outputs['Color'])
node_tree.links.new(core_shader.inputs['Emission Color'], tex_image.outputs['Color'])
if self.use_transparency:
if self.shader in {'PRINCIPLED', 'EMISSION'}:

View File

@ -5,7 +5,7 @@
bl_info = {
"name": "UV Layout",
"author": "Campbell Barton, Matt Ebb",
"version": (1, 1, 6),
"version": (1, 2, 0),
"blender": (3, 0, 0),
"location": "UV Editor > UV > Export UV Layout",
"description": "Export the UV layout as a 2D graphic",
@ -30,6 +30,8 @@ if "bpy" in locals():
import os
import bpy
from bpy.app.translations import contexts as i18n_contexts
from bpy.props import (
StringProperty,
BoolProperty,
@ -54,10 +56,24 @@ class ExportUVLayout(bpy.types.Operator):
description="Export all UVs in this mesh (not just visible ones)",
default=False,
)
export_tiles: EnumProperty(
name="Export Tiles",
items=(
('NONE', "None",
"Export only UVs in the [0, 1] range"),
('UDIM', "UDIM",
"Export tiles in the UDIM numbering scheme: 1001 + u-tile + 10*v-tile"),
('UV', "UVTILE",
"Export tiles in the UVTILE numbering scheme: u(u-tile + 1)_v(v-tile + 1)"),
),
description="Choose whether to export only the [0, 1 range], or all UV tiles",
default='NONE',
)
modified: BoolProperty(
name="Modified",
description="Exports UVs from the modified mesh",
default=False,
translation_context=i18n_contexts.id_mesh,
)
mode: EnumProperty(
items=(
@ -73,6 +89,7 @@ class ExportUVLayout(bpy.types.Operator):
default='PNG',
)
size: IntVectorProperty(
name="Size",
size=2,
default=(1024, 1024),
min=8, max=32768,
@ -123,9 +140,6 @@ class ExportUVLayout(bpy.types.Operator):
if is_editmode:
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
filepath = self.filepath
filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower())
meshes = list(self.iter_meshes_to_export(context))
polygon_data = list(self.iter_polygon_data_to_draw(context, meshes))
different_colors = set(color for _, color in polygon_data)
@ -135,8 +149,35 @@ class ExportUVLayout(bpy.types.Operator):
obj_eval = obj.evaluated_get(depsgraph)
obj_eval.to_mesh_clear()
tiles = self.tiles_to_export(polygon_data)
export = self.get_exporter()
export(filepath, polygon_data, different_colors, self.size[0], self.size[1], self.opacity)
dirname, filename = os.path.split(self.filepath)
# Strip UDIM or UV numbering, and extension
import re
name_regex = r"^(.*?)"
udim_regex = r"(?:\.[0-9]{4})?"
uv_regex = r"(?:\.u[0-9]+_v[0-9]+)?"
ext_regex = r"(?:\.png|\.eps|\.svg)?$"
if self.export_tiles == 'NONE':
match = re.match(name_regex + ext_regex, filename)
elif self.export_tiles == 'UDIM':
match = re.match(name_regex + udim_regex + ext_regex, filename)
elif self.export_tiles == 'UV':
match = re.match(name_regex + uv_regex + ext_regex, filename)
if match:
filename = match.groups()[0]
for tile in sorted(tiles):
filepath = os.path.join(dirname, filename)
if self.export_tiles == 'UDIM':
filepath += f".{1001 + tile[0] + tile[1] * 10:04}"
elif self.export_tiles == 'UV':
filepath += f".u{tile[0] + 1}_v{tile[1] + 1}"
filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower())
export(filepath, tile, polygon_data, different_colors,
self.size[0], self.size[1], self.opacity)
if is_editmode:
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
@ -161,6 +202,30 @@ class ExportUVLayout(bpy.types.Operator):
continue
yield obj
def tiles_to_export(self, polygon_data):
"""Get a set of tiles containing UVs.
This assumes there is no UV edge crossing an otherwise empty tile.
"""
if self.export_tiles == 'NONE':
return {(0, 0)}
from math import floor
tiles = set()
for poly in polygon_data:
for uv in poly[0]:
# Ignore UVs at corners - precisely touching the right or upper edge
# of a tile should not load its right/upper neighbor as well.
# From intern/cycles/scene/attribute.cpp
u, v = uv[0], uv[1]
x, y = floor(u), floor(v)
if x > 0 and u < x + 1e-6:
x -= 1
if y > 0 and v < y + 1e-6:
y -= 1
if x >= 0 and y >= 0:
tiles.add((x, y))
return tiles
@staticmethod
def currently_image_image_editor(context):
return isinstance(context.space_data, bpy.types.SpaceImageEditor)

View File

@ -5,19 +5,19 @@
import bpy
def export(filepath, face_data, colors, width, height, opacity):
def export(filepath, tile, face_data, colors, width, height, opacity):
with open(filepath, 'w', encoding='utf-8') as file:
for text in get_file_parts(face_data, colors, width, height, opacity):
for text in get_file_parts(tile, face_data, colors, width, height, opacity):
file.write(text)
def get_file_parts(face_data, colors, width, height, opacity):
def get_file_parts(tile, face_data, colors, width, height, opacity):
yield from header(width, height)
if opacity > 0.0:
name_by_color = {}
yield from prepare_colors(colors, name_by_color)
yield from draw_colored_polygons(face_data, name_by_color, width, height)
yield from draw_lines(face_data, width, height)
yield from draw_colored_polygons(tile, face_data, name_by_color, width, height)
yield from draw_lines(tile, face_data, width, height)
yield from footer()
@ -53,24 +53,24 @@ def prepare_colors(colors, out_name_by_color):
yield "} def\n"
def draw_colored_polygons(face_data, name_by_color, width, height):
def draw_colored_polygons(tile, face_data, name_by_color, width, height):
for uvs, color in face_data:
yield from draw_polygon_path(uvs, width, height)
yield from draw_polygon_path(tile, uvs, width, height)
yield "closepath\n"
yield "%s\n" % name_by_color[color]
def draw_lines(face_data, width, height):
def draw_lines(tile, face_data, width, height):
for uvs, _ in face_data:
yield from draw_polygon_path(uvs, width, height)
yield from draw_polygon_path(tile, uvs, width, height)
yield "closepath\n"
yield "stroke\n"
def draw_polygon_path(uvs, width, height):
def draw_polygon_path(tile, uvs, width, height):
yield "newpath\n"
for j, uv in enumerate(uvs):
uv_scale = (uv[0] * width, uv[1] * height)
uv_scale = ((uv[0] - tile[0]) * width, (uv[1] - tile[1]) * height)
if j == 0:
yield "%.5f %.5f moveto\n" % uv_scale
else:

View File

@ -15,14 +15,14 @@ except ImportError:
oiio = None
def export(filepath, face_data, colors, width, height, opacity):
def export(filepath, tile, face_data, colors, width, height, opacity):
offscreen = gpu.types.GPUOffScreen(width, height)
offscreen.bind()
try:
fb = gpu.state.active_framebuffer_get()
fb.clear(color=(0.0, 0.0, 0.0, 0.0))
draw_image(face_data, opacity)
draw_image(tile, face_data, opacity)
pixel_data = fb.read_color(0, 0, width, height, 4, 0, 'UBYTE')
pixel_data.dimensions = width * height * 4
@ -32,11 +32,11 @@ def export(filepath, face_data, colors, width, height, opacity):
offscreen.free()
def draw_image(face_data, opacity):
def draw_image(tile, face_data, opacity):
gpu.state.blend_set('ALPHA')
with gpu.matrix.push_pop():
gpu.matrix.load_matrix(get_normalize_uvs_matrix())
gpu.matrix.load_matrix(get_normalize_uvs_matrix(tile))
gpu.matrix.load_projection_matrix(Matrix.Identity(4))
draw_background_colors(face_data, opacity)
@ -45,11 +45,11 @@ def draw_image(face_data, opacity):
gpu.state.blend_set('NONE')
def get_normalize_uvs_matrix():
def get_normalize_uvs_matrix(tile):
'''matrix maps x and y coordinates from [0, 1] to [-1, 1]'''
matrix = Matrix.Identity(4)
matrix.col[3][0] = -1
matrix.col[3][1] = -1
matrix.col[3][0] = -1 - (tile[0]) * 2
matrix.col[3][1] = -1 - (tile[1]) * 2
matrix[0][0] = 2
matrix[1][1] = 2

View File

@ -7,15 +7,15 @@ from os.path import basename
from xml.sax.saxutils import escape
def export(filepath, face_data, colors, width, height, opacity):
def export(filepath, tile, face_data, colors, width, height, opacity):
with open(filepath, 'w', encoding='utf-8') as file:
for text in get_file_parts(face_data, colors, width, height, opacity):
for text in get_file_parts(tile, face_data, colors, width, height, opacity):
file.write(text)
def get_file_parts(face_data, colors, width, height, opacity):
def get_file_parts(tile, face_data, colors, width, height, opacity):
yield from header(width, height)
yield from draw_polygons(face_data, width, height, opacity)
yield from draw_polygons(tile, face_data, width, height, opacity)
yield from footer()
@ -29,7 +29,7 @@ def header(width, height):
yield f'<desc>{escape(desc)}</desc>\n'
def draw_polygons(face_data, width, height, opacity):
def draw_polygons(tile, face_data, width, height, opacity):
for uvs, color in face_data:
fill = f'fill="{get_color_string(color)}"'
@ -39,7 +39,7 @@ def draw_polygons(face_data, width, height, opacity):
yield ' points="'
for uv in uvs:
x, y = uv[0], 1.0 - uv[1]
x, y = uv[0] - tile[0], 1.0 - uv[1] + tile[1]
yield f'{x*width:.3f},{y*height:.3f} '
yield '" />\n'

View File

@ -1599,17 +1599,6 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
mesh_version.add_variable("mesh", _3ds_uint(3))
object_info.add_subchunk(mesh_version)
# Add MASTERSCALE element
mscale = _3ds_chunk(MASTERSCALE)
mscale.add_variable("scale", _3ds_float(1.0))
object_info.add_subchunk(mscale)
# Add 3D cursor location
if use_cursor:
cursor_chunk = _3ds_chunk(O_CONSTS)
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
object_info.add_subchunk(cursor_chunk)
# Init main keyframe data chunk
if use_keyframes:
revision = 0x0005
@ -1618,92 +1607,6 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
curtime = scene.frame_current
kfdata = make_kfdata(revision, start, stop, curtime)
# Add AMBIENT color
if world is not None and 'WORLD' in object_filter:
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
ambient_light = _3ds_chunk(RGB)
ambient_light.add_variable("ambient", _3ds_float_color(world.color))
ambient_chunk.add_subchunk(ambient_light)
object_info.add_subchunk(ambient_chunk)
# Add BACKGROUND and BITMAP
if world.use_nodes:
bgtype = 'BACKGROUND'
ntree = world.node_tree.links
background_color_chunk = _3ds_chunk(RGB)
background_chunk = _3ds_chunk(SOLIDBACKGND)
background_flag = _3ds_chunk(USE_SOLIDBGND)
bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB'
bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD'
bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT'
bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color)
bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype)
bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False)
gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False)
background_color_chunk.add_variable("color", _3ds_float_color(bg_color))
background_chunk.add_subchunk(background_color_chunk)
if bg_image:
background_image = _3ds_chunk(BITMAP)
background_flag = _3ds_chunk(USE_BITMAP)
background_image.add_variable("image", _3ds_string(sane_name(bg_image.name)))
object_info.add_subchunk(background_image)
object_info.add_subchunk(background_chunk)
# Add VGRADIENT chunk
if gradient and len(gradient) >= 3:
gradient_chunk = _3ds_chunk(VGRADIENT)
background_flag = _3ds_chunk(USE_VGRADIENT)
gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position))
gradient_topcolor_chunk = _3ds_chunk(RGB)
gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3]))
gradient_chunk.add_subchunk(gradient_topcolor_chunk)
gradient_midcolor_chunk = _3ds_chunk(RGB)
gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3]))
gradient_chunk.add_subchunk(gradient_midcolor_chunk)
gradient_lowcolor_chunk = _3ds_chunk(RGB)
gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3]))
gradient_chunk.add_subchunk(gradient_lowcolor_chunk)
object_info.add_subchunk(gradient_chunk)
object_info.add_subchunk(background_flag)
# Add FOG
fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False)
if fognode:
fog_chunk = _3ds_chunk(FOG)
fog_color_chunk = _3ds_chunk(RGB)
use_fog_flag = _3ds_chunk(USE_FOG)
fog_density = fognode.inputs['Density'].default_value * 100
fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3]))
fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start))
fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5))
fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth))
fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5))
fog_chunk.add_subchunk(fog_color_chunk)
object_info.add_subchunk(fog_chunk)
# Add LAYER FOG
foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False)
if foglayer:
layerfog_flag = 0
if world.mist_settings.falloff == 'QUADRATIC':
layerfog_flag |= 0x1
if world.mist_settings.falloff == 'INVERSE_QUADRATIC':
layerfog_flag |= 0x2
layerfog_chunk = _3ds_chunk(LAYER_FOG)
layerfog_color_chunk = _3ds_chunk(RGB)
use_fog_flag = _3ds_chunk(USE_LAYER_FOG)
layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3]))
layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start))
layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height))
layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value))
layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag))
layerfog_chunk.add_subchunk(layerfog_color_chunk)
object_info.add_subchunk(layerfog_chunk)
if fognode or foglayer and layer.use_pass_mist:
object_info.add_subchunk(use_fog_flag)
if use_keyframes and world.animation_data or world.node_tree.animation_data:
kfdata.add_subchunk(make_ambient_node(world))
# Make a list of all materials used in the selected meshes (use dictionary, each material is added once)
materialDict = {}
mesh_objects = []
@ -1772,10 +1675,107 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
f.material_index = 0
# Make material chunks for all materials used in the meshes
# Make MATERIAL chunks for all materials used in the meshes
for ma_image in materialDict.values():
object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1]))
# Add MASTERSCALE element
mscale = _3ds_chunk(MASTERSCALE)
mscale.add_variable("scale", _3ds_float(1.0))
object_info.add_subchunk(mscale)
# Add 3D cursor location
if use_cursor:
cursor_chunk = _3ds_chunk(O_CONSTS)
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
object_info.add_subchunk(cursor_chunk)
# Add AMBIENT color
if world is not None and 'WORLD' in object_filter:
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
ambient_light = _3ds_chunk(RGB)
ambient_light.add_variable("ambient", _3ds_float_color(world.color))
ambient_chunk.add_subchunk(ambient_light)
object_info.add_subchunk(ambient_chunk)
# Add BACKGROUND and BITMAP
if world.use_nodes:
bgtype = 'BACKGROUND'
ntree = world.node_tree.links
background_color_chunk = _3ds_chunk(RGB)
background_chunk = _3ds_chunk(SOLIDBACKGND)
background_flag = _3ds_chunk(USE_SOLIDBGND)
bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB'
bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD'
bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT'
bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color)
bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype)
bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False)
gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False)
background_color_chunk.add_variable("color", _3ds_float_color(bg_color))
background_chunk.add_subchunk(background_color_chunk)
if bg_image and bg_image is not None:
background_image = _3ds_chunk(BITMAP)
background_flag = _3ds_chunk(USE_BITMAP)
background_image.add_variable("image", _3ds_string(sane_name(bg_image.name)))
object_info.add_subchunk(background_image)
object_info.add_subchunk(background_chunk)
# Add VGRADIENT chunk
if gradient and len(gradient) >= 3:
gradient_chunk = _3ds_chunk(VGRADIENT)
background_flag = _3ds_chunk(USE_VGRADIENT)
gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position))
gradient_topcolor_chunk = _3ds_chunk(RGB)
gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3]))
gradient_chunk.add_subchunk(gradient_topcolor_chunk)
gradient_midcolor_chunk = _3ds_chunk(RGB)
gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3]))
gradient_chunk.add_subchunk(gradient_midcolor_chunk)
gradient_lowcolor_chunk = _3ds_chunk(RGB)
gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3]))
gradient_chunk.add_subchunk(gradient_lowcolor_chunk)
object_info.add_subchunk(gradient_chunk)
object_info.add_subchunk(background_flag)
# Add FOG
fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False)
if fognode:
fog_chunk = _3ds_chunk(FOG)
fog_color_chunk = _3ds_chunk(RGB)
use_fog_flag = _3ds_chunk(USE_FOG)
fog_density = fognode.inputs['Density'].default_value * 100
fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3]))
fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start))
fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5))
fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth))
fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5))
fog_chunk.add_subchunk(fog_color_chunk)
object_info.add_subchunk(fog_chunk)
# Add LAYER FOG
foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False)
if foglayer:
layerfog_flag = 0
if world.mist_settings.falloff == 'QUADRATIC':
layerfog_flag |= 0x1
if world.mist_settings.falloff == 'INVERSE_QUADRATIC':
layerfog_flag |= 0x2
layerfog_chunk = _3ds_chunk(LAYER_FOG)
layerfog_color_chunk = _3ds_chunk(RGB)
use_fog_flag = _3ds_chunk(USE_LAYER_FOG)
layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3]))
layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start))
layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height))
layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value))
layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag))
layerfog_chunk.add_subchunk(layerfog_color_chunk)
object_info.add_subchunk(layerfog_chunk)
if fognode or foglayer and layer.use_pass_mist:
object_info.add_subchunk(use_fog_flag)
if use_keyframes and world.animation_data or (world.node_tree and world.node_tree.animation_data):
kfdata.add_subchunk(make_ambient_node(world))
# Collect translation for transformation matrix
translation = {}
rotation = {}
@ -1929,11 +1929,10 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
if ob.parent is None or (ob.parent.name not in object_id):
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
else: # Get the parent ID from the object_id dict
if ob.parent is not None and (ob.parent.name in object_id):
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
object_chunk.add_subchunk(obj_hierarchy_chunk)
# Add light object and hierarchy chunks to object info
@ -1967,11 +1966,10 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
if ob.parent is None or (ob.parent.name not in object_id):
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
else: # Get the parent ID from the object_id dict
if ob.parent is not None and (ob.parent.name in object_id):
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
object_chunk.add_subchunk(obj_hierarchy_chunk)
# Add light object and hierarchy chunks to object info
@ -2012,4 +2010,4 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
# Debugging only: dump the chunk hierarchy
# primary.dump()
return {'FINISHED'}
return {'FINISHED'}

View File

@ -244,7 +244,7 @@ def skip_to_end(file, skip_chunk):
# MATERIALS #
#############
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tintcolor, mapto):
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tint1, tint2, mapto):
shader = contextWrapper.node_principled_bsdf
nodetree = contextWrapper.material.node_tree
shader.location = (-300, 0)
@ -256,26 +256,31 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
mixer.label = "Mixer"
mixer.inputs[0].default_value = pct / 100
mixer.inputs[1].default_value = (
tintcolor[:3] + [1] if tintcolor else
shader.inputs['Base Color'].default_value[:]
)
tint1[:3] + [1] if tint1 else shader.inputs['Base Color'].default_value[:])
contextWrapper._grid_to_location(1, 2, dst_node=mixer, ref_node=shader)
img_wrap = contextWrapper.base_color_texture
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
links.new(mixer.outputs['Color'], shader.inputs['Base Color'])
if tint2 is not None:
img_wrap.colorspace_name = 'Non-Color'
mixer.inputs[2].default_value = tint2[:3] + [1]
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[0])
else:
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
elif mapto == 'ROUGHNESS':
img_wrap = contextWrapper.roughness_texture
elif mapto == 'METALLIC':
shader.location = (300,300)
img_wrap = contextWrapper.metallic_texture
elif mapto == 'SPECULARITY':
shader.location = (300,0)
img_wrap = contextWrapper.specular_tint_texture
elif mapto == 'ALPHA':
shader.location = (0, -300)
shader.location = (-300,0)
img_wrap = contextWrapper.alpha_texture
elif mapto == 'METALLIC':
shader.location = (300, 300)
img_wrap = contextWrapper.metallic_texture
elif mapto == 'ROUGHNESS':
shader.location = (300, 0)
img_wrap = contextWrapper.roughness_texture
img_wrap.use_alpha = False
links.new(img_wrap.node_image.outputs['Color'], img_wrap.socket_dst)
elif mapto == 'EMISSION':
shader.location = (-300, -600)
shader.location = (0,-900)
img_wrap = contextWrapper.emission_color_texture
elif mapto == 'NORMAL':
shader.location = (300, 300)
@ -310,10 +315,12 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
img_wrap.extension = 'CLIP'
if alpha == 'alpha':
own_node = img_wrap.node_image
contextWrapper.material.blend_method = 'HASHED'
links.new(own_node.outputs['Alpha'], img_wrap.socket_dst)
for link in links:
if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB':
tex = link.from_node.image.name
own_node = img_wrap.node_image
own_map = img_wrap.node_mapping
if tex == image.name:
links.new(link.from_node.outputs['Alpha'], img_wrap.socket_dst)
@ -323,9 +330,6 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
if imgs.name[-3:].isdigit():
if not imgs.users:
bpy.data.images.remove(imgs)
else:
links.new(img_wrap.node_image.outputs['Alpha'], img_wrap.socket_dst)
contextWrapper.material.blend_method = 'HASHED'
shader.location = (300, 300)
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
@ -518,7 +522,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
def read_texture(new_chunk, temp_chunk, name, mapto):
uscale, vscale, uoffset, voffset, angle = 1.0, 1.0, 0.0, 0.0, 0.0
contextWrapper.use_nodes = True
tintcolor = None
tint1 = tint2 = None
extend = 'wrap'
alpha = False
pct = 70
@ -542,14 +546,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
img = load_image(texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True)
temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
elif temp_chunk.ID == MAT_MAP_USCALE:
uscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VSCALE:
vscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_UOFFSET:
uoffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VOFFSET:
voffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_BUMP_PERCENT:
contextWrapper.normalmap_strength = (float(read_short(temp_chunk) / 100))
elif temp_chunk.ID == MAT_MAP_TILING:
"""Control bit flags, where 0x1 activates decaling, 0x2 activates mirror,
@ -578,11 +576,20 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
if tiling & 0x200:
tint = 'RGBtint'
elif temp_chunk.ID == MAT_MAP_USCALE:
uscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VSCALE:
vscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_UOFFSET:
uoffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VOFFSET:
voffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_ANG:
angle = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_COL1:
tintcolor = read_byte_color(temp_chunk)
tint1 = read_byte_color(temp_chunk)
elif temp_chunk.ID == MAT_MAP_COL2:
tint2 = read_byte_color(temp_chunk)
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
@ -590,7 +597,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# add the map to the material in the right channel
if img:
add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1),
(uoffset, voffset, 0), angle, tintcolor, mapto)
(uoffset, voffset, 0), angle, tint1, tint2, mapto)
def apply_constrain(vec):
convector = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1))
@ -641,8 +648,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_data = file.read(SZ_U_INT * 2)
track_chunk.bytes_read += SZ_U_INT * 2
nkeys = read_long(track_chunk)
if nkeys == 0:
keyframe_data[0] = default_data
for i in range(nkeys):
nframe = read_long(track_chunk)
nflags = read_short(track_chunk)
@ -657,8 +662,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_data = file.read(SZ_U_SHORT * 5)
track_chunk.bytes_read += SZ_U_SHORT * 5
nkeys = read_long(track_chunk)
if nkeys == 0:
keyframe_angle[0] = default_value
for i in range(nkeys):
nframe = read_long(track_chunk)
nflags = read_short(track_chunk)
@ -815,7 +818,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
if contextWorld is None:
path, filename = os.path.split(file.name)
realname, ext = os.path.splitext(filename)
newWorld = bpy.data.worlds.new("Fog: " + realname)
contextWorld = bpy.data.worlds.new("Fog: " + realname)
context.scene.world = contextWorld
contextWorld.use_nodes = True
links = contextWorld.node_tree.links
@ -1332,7 +1335,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'AMBIENT': # Ambient
keyframe_data = {}
default_data = child.color[:]
keyframe_data[0] = child.color[:]
child.color = read_track_data(new_chunk)[0]
ambinode.inputs[0].default_value[:3] = child.color
ambilite.outputs[0].default_value[:3] = child.color
@ -1346,7 +1349,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'LIGHT': # Color
keyframe_data = {}
default_data = child.data.color[:]
keyframe_data[0] = child.data.color[:]
child.data.color = read_track_data(new_chunk)[0]
for keydata in keyframe_data.items():
child.data.color = keydata[1]
@ -1355,7 +1358,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'OBJECT': # Translation
keyframe_data = {}
default_data = child.location[:]
keyframe_data[0] = child.location[:]
child.location = read_track_data(new_chunk)[0]
if child.type in {'LIGHT', 'CAMERA'}:
trackposition[0] = child.location
@ -1384,6 +1387,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'TARGET': # Target position
keyframe_data = {}
location = child.location
keyframe_data[0] = trackposition[0]
target = mathutils.Vector(read_track_data(new_chunk)[0])
direction = calc_target(location, target)
child.rotation_euler.x = direction[0]
@ -1407,12 +1411,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracktype == 'OBJECT': # Rotation
keyframe_rotation = {}
keyframe_rotation[0] = child.rotation_axis_angle[:]
tflags = read_short(new_chunk)
temp_data = file.read(SZ_U_INT * 2)
new_chunk.bytes_read += SZ_U_INT * 2
nkeys = read_long(new_chunk)
if nkeys == 0:
keyframe_rotation[0] = child.rotation_axis_angle[:]
if tflags & 0x8: # Flag 0x8 locks X axis
child.lock_rotation[0] = True
if tflags & 0x10: # Flag 0x10 locks Y axis
@ -1445,7 +1448,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracktype == 'OBJECT': # Scale
keyframe_data = {}
default_data = child.scale[:]
keyframe_data[0] = child.scale[:]
child.scale = read_track_data(new_chunk)[0]
if contextTrack_flag & 0x8: # Flag 0x8 locks X axis
child.lock_scale[0] = True
@ -1465,7 +1468,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracktype == 'OBJECT': # Roll angle
keyframe_angle = {}
default_value = child.rotation_euler.y
keyframe_angle[0] = child.rotation_euler.y
child.rotation_euler.y = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items():
child.rotation_euler.y = keydata[1]
@ -1475,7 +1478,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and tracking == 'CAMERA': # Field of view
keyframe_angle = {}
default_value = child.data.angle
keyframe_angle[0] = child.data.angle
child.data.angle = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items():
child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2)
@ -1484,7 +1487,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Hotspot
keyframe_angle = {}
cone_angle = math.degrees(child.data.spot_size)
default_value = cone_angle-(child.data.spot_blend * math.floor(cone_angle))
keyframe_angle[0] = cone_angle-(child.data.spot_blend * math.floor(cone_angle))
hot_spot = math.degrees(read_track_angle(new_chunk)[0])
child.data.spot_blend = 1.0 - (hot_spot / cone_angle)
for keydata in keyframe_angle.items():
@ -1493,7 +1496,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Falloff
keyframe_angle = {}
default_value = math.degrees(child.data.spot_size)
keyframe_angle[0] = math.degrees(child.data.spot_size)
child.data.spot_size = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items():
child.data.spot_size = keydata[1]
@ -1732,4 +1735,4 @@ def load(operator, context, filepath="", constrain_size=0.0, use_scene_unit=Fals
IMAGE_SEARCH=use_image_search, FILTER=object_filter, WORLD_MATRIX=use_world_matrix, KEYFRAME=use_keyframes,
APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix, CURSOR=use_cursor, PIVOT=use_center_pivot,)
return {'FINISHED'}
return {'FINISHED'}

View File

@ -5,7 +5,7 @@
bl_info = {
"name": "FBX format",
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
"version": (5, 8, 3),
"version": (5, 8, 8),
"blender": (3, 6, 0),
"location": "File > Import-Export",
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",

View File

@ -250,9 +250,8 @@ class FBXElem:
for elem in self.elems:
offset = elem._calc_offsets(offset, (elem is elem_last))
offset += _BLOCK_SENTINEL_LENGTH
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
if not is_last:
offset += _BLOCK_SENTINEL_LENGTH
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
offset += _BLOCK_SENTINEL_LENGTH
return offset
@ -282,9 +281,8 @@ class FBXElem:
assert(elem.id != b'')
elem._write(write, tell, (elem is elem_last))
write(_BLOCK_SENTINEL_DATA)
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
if not is_last:
write(_BLOCK_SENTINEL_DATA)
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
write(_BLOCK_SENTINEL_DATA)
def _write_timedate_hack(elem_root):

View File

@ -1807,18 +1807,16 @@ def fbx_data_armature_elements(root, arm_obj, scene_data):
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
# Pre-process vertex weights (also to check vertices assigned to more than four bones).
# Pre-process vertex weights so that the vertices only need to be iterated once.
ob = ob_obj.bdata
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
valid_idxs = set(bo_vg_idx.values())
vgroups = {vg.index: {} for vg in ob.vertex_groups}
verts_vgroups = (sorted(((vg.group, vg.weight) for vg in v.groups if vg.weight and vg.group in valid_idxs),
key=lambda e: e[1], reverse=True)
for v in me.vertices)
for idx, vgs in enumerate(verts_vgroups):
for vg_idx, w in vgs:
vgroups[vg_idx][idx] = w
for idx, v in enumerate(me.vertices):
for vg in v.groups:
if (w := vg.weight) and (vg_idx := vg.group) in valid_idxs:
vgroups[vg_idx][idx] = w
for bo_obj, clstr_key in clusters.items():
bo = bo_obj.bdata

View File

@ -1318,42 +1318,154 @@ class AnimationCurveNodeWrapper:
min_reldiff_fac = fac * 1.0e-3 # min relative value evolution: 0.1% of current 'order of magnitude'.
min_absdiff_fac = 0.1 # A tenth of reldiff...
are_keyed = []
for values, frame_write_mask in zip(self._frame_values_array, self._frame_write_mask_array):
# Initialise to no frames written.
frame_write_mask[:] = False
# Initialise to no values enabled for writing.
self._frame_write_mask_array[:] = False
# Create views of the 'previous' and 'current' mask and values. The memoryview, .data, of each array is used
# for its iteration and indexing performance compared to the array.
key = values[1:].data
p_key = values[:-1].data
key_write = frame_write_mask[1:].data
p_key_write = frame_write_mask[:-1].data
# Values are enabled for writing if they differ enough from either of their adjacent values or if they differ
# enough from the closest previous value that is enabled due to either of these conditions.
for sampled_values, enabled_mask in zip(self._frame_values_array, self._frame_write_mask_array):
# Create overlapping views of the 'previous' (all but the last) and 'current' (all but the first)
# `sampled_values` and `enabled_mask`.
# Calculate absolute values from `sampled_values` so that the 'previous' and 'current' absolute arrays can
# be views into the same array instead of separately calculated arrays.
abs_sampled_values = np.abs(sampled_values)
# 'previous' views.
p_val_view = sampled_values[:-1]
p_abs_val_view = abs_sampled_values[:-1]
p_enabled_mask_view = enabled_mask[:-1]
# 'current' views.
c_val_view = sampled_values[1:]
c_abs_val_view = abs_sampled_values[1:]
c_enabled_mask_view = enabled_mask[1:]
p_keyedval = values[0]
is_keyed = False
for idx, (val, p_val) in enumerate(zip(key, p_key)):
if val == p_val:
# Never write keyframe when value is exactly the same as prev one!
continue
# This is contracted form of relative + absolute-near-zero difference:
# absdiff = abs(a - b)
# if absdiff < min_reldiff_fac * min_absdiff_fac:
# return False
# return (absdiff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac
# Note that we ignore the '/ 2' part here, since it's not much significant for us.
if abs(val - p_val) > (min_reldiff_fac * max(abs(val) + abs(p_val), min_absdiff_fac)):
# If enough difference from previous sampled value, key this value *and* the previous one!
key_write[idx] = True
p_key_write[idx] = True
p_keyedval = val
is_keyed = True
elif abs(val - p_keyedval) > (min_reldiff_fac * max((abs(val) + abs(p_keyedval)), min_absdiff_fac)):
# Else, if enough difference from previous keyed value, key this value only!
key_write[idx] = True
p_keyedval = val
is_keyed = True
are_keyed.append(is_keyed)
# If enough difference from previous sampled value, enable the current value *and* the previous one!
# The difference check is symmetrical, so this will compare each value to both of its adjacent values.
# Unless it is forcefully enabled later, this is the only way that the first value can be enabled.
# This is a contracted form of relative + absolute-near-zero difference:
# def is_different(a, b):
# abs_diff = abs(a - b)
# if abs_diff < min_reldiff_fac * min_absdiff_fac:
# return False
# return (abs_diff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac
# Note that we ignore the '/ 2' part here, since it's not much significant for us.
# Contracted form using only builtin Python functions:
# return abs(a - b) > (min_reldiff_fac * max(abs(a) + abs(b), min_absdiff_fac))
abs_diff = np.abs(c_val_view - p_val_view)
different_if_greater_than = min_reldiff_fac * np.maximum(c_abs_val_view + p_abs_val_view, min_absdiff_fac)
enough_diff_p_val_mask = abs_diff > different_if_greater_than
# Enable both the current values *and* the previous values where `enough_diff_p_val_mask` is True. Some
# values may get set to True twice because the views overlap, but this is not a problem.
p_enabled_mask_view[enough_diff_p_val_mask] = True
c_enabled_mask_view[enough_diff_p_val_mask] = True
# Else, if enough difference from previous enabled value, enable the current value only!
# For each 'current' value, get the index of the nearest previous enabled value in `sampled_values` (or
# itself if the value is enabled).
# Start with an array that is the index of the 'current' value in `sampled_values`. The 'current' values are
# all but the first value, so the indices will be from 1 to `len(sampled_values)` exclusive.
# Let len(sampled_values) == 9:
# [1, 2, 3, 4, 5, 6, 7, 8]
p_enabled_idx_in_sampled_values = np.arange(1, len(sampled_values))
# Replace the indices of all disabled values with 0 in preparation of filling them in with the index of the
# nearest previous enabled value. We choose to replace with 0 so that if there is no nearest previous
# enabled value, we instead default to `sampled_values[0]`.
c_val_disabled_mask = ~c_enabled_mask_view
# Let `c_val_disabled_mask` be:
# [F, F, T, F, F, T, T, T]
# Set indices to 0 where `c_val_disabled_mask` is True:
# [1, 2, 3, 4, 5, 6, 7, 8]
# v v v v
# [1, 2, 0, 4, 5, 0, 0, 0]
p_enabled_idx_in_sampled_values[c_val_disabled_mask] = 0
# Accumulative maximum travels across the array from left to right, filling in the zeroed indices with the
# maximum value so far, which will be the closest previous enabled index because the non-zero indices are
# strictly increasing.
# [1, 2, 0, 4, 5, 0, 0, 0]
# v v v v
# [1, 2, 2, 4, 5, 5, 5, 5]
p_enabled_idx_in_sampled_values = np.maximum.accumulate(p_enabled_idx_in_sampled_values)
# Only disabled values need to be checked against their nearest previous enabled values.
# We can additionally ignore all values which equal their immediately previous value because those values
# will never be enabled if they were not enabled by the earlier difference check against immediately
# previous values.
p_enabled_diff_to_check_mask = np.logical_and(c_val_disabled_mask, p_val_view != c_val_view)
# Convert from a mask to indices because we need the indices later and because the array of indices will
# usually be smaller than the mask array making it faster to index other arrays with.
p_enabled_diff_to_check_idx = np.flatnonzero(p_enabled_diff_to_check_mask)
# `p_enabled_idx_in_sampled_values` from earlier:
# [1, 2, 2, 4, 5, 5, 5, 5]
# `p_enabled_diff_to_check_mask` assuming no values equal their immediately previous value:
# [F, F, T, F, F, T, T, T]
# `p_enabled_diff_to_check_idx`:
# [ 2, 5, 6, 7]
# `p_enabled_idx_in_sampled_values_to_check`:
# [ 2, 5, 5, 5]
p_enabled_idx_in_sampled_values_to_check = p_enabled_idx_in_sampled_values[p_enabled_diff_to_check_idx]
# Get the 'current' disabled values that need to be checked.
c_val_to_check = c_val_view[p_enabled_diff_to_check_idx]
c_abs_val_to_check = c_abs_val_view[p_enabled_diff_to_check_idx]
# Get the nearest previous enabled value for each value to be checked.
nearest_p_enabled_val = sampled_values[p_enabled_idx_in_sampled_values_to_check]
abs_nearest_p_enabled_val = np.abs(nearest_p_enabled_val)
# Check the relative + absolute-near-zero difference again, but against the nearest previous enabled value
# this time.
abs_diff = np.abs(c_val_to_check - nearest_p_enabled_val)
different_if_greater_than = (min_reldiff_fac
* np.maximum(c_abs_val_to_check + abs_nearest_p_enabled_val, min_absdiff_fac))
enough_diff_p_enabled_val_mask = abs_diff > different_if_greater_than
# If there are any that are different enough from the previous enabled value, then we have to check them all
# iteratively because enabling a new value can change the nearest previous enabled value of some elements,
# which changes their relative + absolute-near-zero difference:
# `p_enabled_diff_to_check_idx`:
# [2, 5, 6, 7]
# `p_enabled_idx_in_sampled_values_to_check`:
# [2, 5, 5, 5]
# Let `enough_diff_p_enabled_val_mask` be:
# [F, F, T, T]
# The first index that is newly enabled is 6:
# [2, 5,>6<,5]
# But 6 > 5, so the next value's nearest previous enabled index is also affected:
# [2, 5, 6,>6<]
# We had calculated a newly enabled index of 7 too, but that was calculated against the old nearest previous
# enabled index of 5, which has now been updated to 6, so whether 7 is enabled or not needs to be
# recalculated:
# [F, F, T, ?]
if np.any(enough_diff_p_enabled_val_mask):
# Accessing .data, the memoryview of the array, iteratively or by individual index is faster than doing
# the same with the array itself.
zipped = zip(p_enabled_diff_to_check_idx.data,
c_val_to_check.data,
c_abs_val_to_check.data,
p_enabled_idx_in_sampled_values_to_check.data,
enough_diff_p_enabled_val_mask.data)
# While iterating, we could set updated values into `enough_diff_p_enabled_val_mask` as we go and then
# update `enabled_mask` in bulk after the iteration, but if we're going to update an array while
# iterating, we may as well update `enabled_mask` directly instead and skip the bulk update.
# Additionally, the number of `True` writes to `enabled_mask` is usually much less than the number of
# updates that would be required to `enough_diff_p_enabled_val_mask`.
c_enabled_mask_view_mv = c_enabled_mask_view.data
# While iterating, keep track of the most recent newly enabled index, so we can tell when we need to
# recalculate whether the current value needs to be enabled.
new_p_enabled_idx = -1
# Keep track of its value too for performance.
new_p_enabled_val = -1
new_abs_p_enabled_val = -1
for cur_idx, c_val, c_abs_val, old_p_enabled_idx, enough_diff in zipped:
if new_p_enabled_idx > old_p_enabled_idx:
# The nearest previous enabled value is newly enabled and was not included when
# `enough_diff_p_enabled_val_mask` was calculated, so whether the current value is different
# enough needs to be recalculated using the newly enabled value.
# Check if the relative + absolute-near-zero difference is enough to enable this value.
enough_diff = (abs(c_val - new_p_enabled_val)
> (min_reldiff_fac * max(c_abs_val + new_abs_p_enabled_val, min_absdiff_fac)))
if enough_diff:
# The current value needs to be enabled.
c_enabled_mask_view_mv[cur_idx] = True
# Update the index and values for this newly enabled value.
new_p_enabled_idx = cur_idx
new_p_enabled_val = c_val
new_abs_p_enabled_val = c_abs_val
# If we write nothing (action doing nothing) and are in 'force_keep' mode, we key everything! :P
# See T41766.
@ -1362,7 +1474,9 @@ class AnimationCurveNodeWrapper:
# one key in this case.
# See T41719, T41605, T41254...
if self.force_keying or (force_keep and not self):
are_keyed[:] = [True] * len(are_keyed)
are_keyed = [True] * len(self._frame_write_mask_array)
else:
are_keyed = np.any(self._frame_write_mask_array, axis=1)
# If we did key something, ensure first and last sampled values are keyed as well.
if self.force_startend_keying:

View File

@ -2780,7 +2780,9 @@ class FbxImportHelperNode:
for i, w in combined_weights.items():
indices.append(i)
if len(w) > 1:
weights.append(sum(w) / len(w))
# Add ignored child weights to the current bone's weight.
# XXX - Weights that sum to more than 1.0 get clamped to 1.0 when set in the vertex group.
weights.append(sum(w))
else:
weights.append(w[0])
@ -3464,31 +3466,56 @@ def load(operator, context, filepath="",
def _():
fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxShape'))
# - FBX | - Blender equivalent
# Mesh | `Mesh`
# BlendShape | `Key`
# BlendShapeChannel | `ShapeKey`, but without its `.data`.
# Shape | `ShapeKey.data`, but also includes normals and the values are relative to the base Mesh
# | instead of being absolute. The data is sparse, so each Shape has an "Indexes" array too.
# | FBX 2020 introduced 'Modern Style' Shapes that also support tangents, binormals, vertex
# | colors and UVs, and can be absolute values instead of relative, but 'Modern Style' Shapes
# | are not currently supported.
#
# The FBX connections between Shapes and Meshes form multiple many-many relationships:
# Mesh >-< BlendShape >-< BlendShapeChannel >-< Shape
# In practice, the relationships are almost never many-many and are more typically 1-many or 1-1:
# Mesh --- BlendShape:
# usually 1-1 and the FBX SDK might enforce that each BlendShape is connected to at most one Mesh.
# BlendShape --< BlendShapeChannel:
# usually 1-many.
# BlendShapeChannel --- or uncommonly --< Shape:
# usually 1-1, but 1-many is a documented feature.
def connections_gen(c_src_uuid, fbx_id, fbx_type):
"""Helper to reduce duplicate code"""
# Rarely, an imported FBX file will have duplicate connections. For Shape Key related connections, FBX
# appears to ignore the duplicates, or overwrite the existing duplicates such that the end result is the
# same as ignoring them, so keep a set of the seen connections and ignore any duplicates.
seen_connections = set()
for c_dst_uuid, ctype in fbx_connection_map.get(c_src_uuid, ()):
if ctype.props[0] != b'OO':
# 'Object-Object' connections only.
continue
fbx_data, bl_data = fbx_table_nodes.get(c_dst_uuid, (None, None))
if fbx_data is None or fbx_data.id != fbx_id or fbx_data.props[2] != fbx_type:
# Either `c_dst_uuid` doesn't exist, or it has a different id or type.
continue
connection_key = (c_src_uuid, c_dst_uuid)
if connection_key in seen_connections:
# The connection is a duplicate, skip it.
continue
seen_connections.add(connection_key)
yield c_dst_uuid, fbx_data, bl_data
mesh_to_shapes = {}
for s_uuid, s_item in fbx_table_nodes.items():
fbx_sdata, bl_sdata = s_item = fbx_table_nodes.get(s_uuid, (None, None))
for s_uuid, (fbx_sdata, _bl_sdata) in fbx_table_nodes.items():
if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape':
continue
# shape -> blendshapechannel -> blendshape -> mesh.
for bc_uuid, bc_ctype in fbx_connection_map.get(s_uuid, ()):
if bc_ctype.props[0] != b'OO':
continue
fbx_bcdata, _bl_bcdata = fbx_table_nodes.get(bc_uuid, (None, None))
if fbx_bcdata is None or fbx_bcdata.id != b'Deformer' or fbx_bcdata.props[2] != b'BlendShapeChannel':
continue
for bs_uuid, bs_ctype in fbx_connection_map.get(bc_uuid, ()):
if bs_ctype.props[0] != b'OO':
continue
fbx_bsdata, _bl_bsdata = fbx_table_nodes.get(bs_uuid, (None, None))
if fbx_bsdata is None or fbx_bsdata.id != b'Deformer' or fbx_bsdata.props[2] != b'BlendShape':
continue
for m_uuid, m_ctype in fbx_connection_map.get(bs_uuid, ()):
if m_ctype.props[0] != b'OO':
continue
fbx_mdata, bl_mdata = fbx_table_nodes.get(m_uuid, (None, None))
if fbx_mdata is None or fbx_mdata.id != b'Geometry' or fbx_mdata.props[2] != b'Mesh':
continue
for bc_uuid, fbx_bcdata, _bl_bcdata in connections_gen(s_uuid, b'Deformer', b'BlendShapeChannel'):
for bs_uuid, _fbx_bsdata, _bl_bsdata in connections_gen(bc_uuid, b'Deformer', b'BlendShape'):
for m_uuid, _fbx_mdata, bl_mdata in connections_gen(bs_uuid, b'Geometry', b'Mesh'):
# Blenmeshes are assumed already created at that time!
assert(isinstance(bl_mdata, bpy.types.Mesh))
# Group shapes by mesh so that each mesh only needs to be processed once for all of its shape

View File

@ -5,7 +5,7 @@
bl_info = {
'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (4, 0, 32),
"version": (4, 1, 12),
'blender': (4, 0, 0),
'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0',
@ -144,13 +144,10 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
'Most efficient and portable, but more difficult to edit later'),
('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)',
'Exports multiple files, with separate JSON, binary and texture data. '
'Easiest to edit later'),
('GLTF_EMBEDDED', 'glTF Embedded (.gltf)',
'Exports a single file, with all data packed in JSON. '
'Less efficient than binary, but easier to edit later')),
'Easiest to edit later')),
description=(
'Output format and embedding options. Binary is most efficient, '
'but JSON (embedded or separate) may be easier to edit later'
'Output format. Binary is most efficient, '
'but JSON may be easier to edit later'
),
default='GLB', #Warning => If you change the default, need to change the default filter too
update=on_export_format_changed,
@ -174,13 +171,13 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_image_format: EnumProperty(
name='Images',
items=(('AUTO', 'Automatic',
'Save PNGs as PNGs, JPEGs as JPEGs, WEBPs as WEBPs. '
'If neither one, use PNG'),
'Save PNGs as PNGs, JPEGs as JPEGs, WebPs as WebPs. '
'For other formats, use PNG'),
('JPEG', 'JPEG Format (.jpg)',
'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) '
'Be aware of a possible loss in quality'),
('WEBP', 'Webp Format',
'Save images as WEBPs as main image (no fallback)'),
('WEBP', 'WebP Format',
'Save images as WebPs as main image (no fallback)'),
('NONE', 'None',
'Don\'t export images'),
),
@ -192,18 +189,18 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
)
export_image_add_webp: BoolProperty(
name='Create Webp',
name='Create WebP',
description=(
"Creates webp textures for every textures. "
"For already webp textures, nothing happen"
"Creates WebP textures for every texture. "
"For already WebP textures, nothing happens"
),
default=False
)
export_image_webp_fallback: BoolProperty(
name='Webp fallback',
name='WebP fallback',
description=(
"For all webp textures, create a PNG fallback texture."
"For all WebP textures, create a PNG fallback texture"
),
default=False
)
@ -476,6 +473,21 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default=False
)
export_hierarchy_flatten_objs: BoolProperty(
name='Flatten Object Hierarchy',
description='Flatten Object Hierarchy. Useful in case of non decomposable transformation matrix',
default=False
)
export_armature_object_remove: BoolProperty(
name='Remove Armature Object',
description=(
'Remove Armature object if possible.'
'If Armature has multiple root bones, object will not be removed'
),
default=False
)
export_optimize_animation_size: BoolProperty(
name='Optimize Animation Size',
description=(
@ -641,7 +653,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_try_sparse_sk: BoolProperty(
name='Use Sparse Accessor if better',
description='Try using Sparce Accessor if it save space',
description='Try using Sparse Accessor if it saves space',
default=True
)
@ -653,9 +665,9 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_gpu_instances: BoolProperty(
name='GPU Instances',
description='Export using EXT_mesh_gpu_instancing.'
'Limited to children of a same Empty. '
'multiple Materials might be omitted',
description='Export using EXT_mesh_gpu_instancing. '
'Limited to children of a given Empty. '
'Multiple materials might be omitted',
default=False
)
@ -821,6 +833,8 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['gltf_animations'] = self.export_animations
export_settings['gltf_def_bones'] = self.export_def_bones
export_settings['gltf_flatten_bones_hierarchy'] = self.export_hierarchy_flatten_bones
export_settings['gltf_flatten_obj_hierarchy'] = self.export_hierarchy_flatten_objs
export_settings['gltf_armature_object_remove'] = self.export_armature_object_remove
if self.export_animations:
export_settings['gltf_frame_range'] = self.export_frame_range
export_settings['gltf_force_sampling'] = self.export_force_sampling
@ -1054,6 +1068,7 @@ class GLTF_PT_export_data_scene(bpy.types.Panel):
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'export_gpu_instances')
layout.prop(operator, 'export_hierarchy_flatten_objs')
class GLTF_PT_export_data_mesh(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
@ -1279,6 +1294,8 @@ class GLTF_PT_export_data_armature(bpy.types.Panel):
if operator.export_force_sampling is False and operator.export_def_bones is True:
layout.label(text="Export only deformation bones is not possible when not sampling animation")
row = layout.row()
row.prop(operator, 'export_armature_object_remove')
row = layout.row()
row.prop(operator, 'export_hierarchy_flatten_bones')
class GLTF_PT_export_data_compression(bpy.types.Panel):
@ -1648,7 +1665,7 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
items=(
("BLENDER", "Blender (best for import/export round trip)",
"Good for re-importing glTFs exported from Blender, "
"and re-exporting glTFs to glTFs after Blender editing"
"and re-exporting glTFs to glTFs after Blender editing. "
"Bone tips are placed on their local +Y axis (in glTF space)"),
("TEMPERANCE", "Temperance (average)",
"Decent all-around strategy. "
@ -1674,10 +1691,10 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
)
import_webp_texture: BoolProperty(
name='Import Webp textures',
name='Import WebP textures',
description=(
"If a texture exists in webp format,"
"loads the webp texture instead of the fallback png/jpg one"
"If a texture exists in WebP format, "
"loads the WebP texture instead of the fallback PNG/JPEG one"
),
default=False,
)

View File

@ -8,6 +8,10 @@ import bpy
def get_gltf_node_old_name():
return "glTF Settings"
# Old group name
def get_gltf_old_group_node_name():
return "glTF Metallic Roughness"
def get_gltf_node_name():
return "glTF Material Output"

View File

@ -34,7 +34,12 @@ def gather_actions_animations(export_settings):
# Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None:
continue
if export_settings["gltf_armature_object_remove"] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue
animations_, merged_tracks = gather_action_animations(obj_uuid, merged_tracks, len(animations), export_settings)
animations += animations_
@ -63,7 +68,12 @@ def prepare_actions_range(export_settings):
# Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None:
continue
if export_settings["gltf_armature_object_remove"] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue
if obj_uuid not in export_settings['ranges']:
export_settings['ranges'][obj_uuid] = {}
@ -168,7 +178,12 @@ def prepare_actions_range(export_settings):
# Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None:
continue
if export_settings['gltf_armature_object_remove'] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue
blender_actions = __get_blender_actions(obj_uuid, export_settings)
for blender_action, track, type_ in blender_actions:

View File

@ -35,7 +35,12 @@ def gather_scene_animations(export_settings):
# Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None:
continue
if export_settings['gltf_armature_object_remove'] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object

View File

@ -22,7 +22,12 @@ def gather_tracks_animations(export_settings):
# Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None:
continue
if export_settings['gltf_armature_object_remove'] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue
animations_, merged_tracks = gather_track_animations(obj_uuid, merged_tracks, len(animations), export_settings)
animations += animations_

View File

@ -119,6 +119,10 @@ def get_cache_data(path: str,
# Bone has a parent, but in export, after filter, is at root of armature
matrix = blender_bone.matrix.copy()
# Because there is no armature object, we need to apply the TRS of armature to the root bone
if export_settings['gltf_armature_object_remove'] is True:
matrix = matrix @ blender_obj.matrix_world
if blender_obj.animation_data and blender_obj.animation_data.action \
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
if blender_bone.name not in data[obj_uuid][blender_obj.animation_data.action.name]['bone'].keys():

View File

@ -86,11 +86,8 @@ def __create_buffer(exporter, export_settings):
if export_settings['gltf_format'] == 'GLB':
buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True)
else:
if export_settings['gltf_format'] == 'GLTF_EMBEDDED':
exporter.finalize_buffer(export_settings['gltf_filedirectory'])
else:
exporter.finalize_buffer(export_settings['gltf_filedirectory'],
export_settings['gltf_binaryfilename'])
exporter.finalize_buffer(export_settings['gltf_filedirectory'],
export_settings['gltf_binaryfilename'])
return buffer

View File

@ -9,6 +9,7 @@ from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ..com.gltf2_blender_extras import generate_extras
from .gltf2_blender_gather_cache import cached
from . import gltf2_blender_gather_nodes
from . import gltf2_blender_gather_joints
from . import gltf2_blender_gather_tree
from .animation.sampled.object.gltf2_blender_gather_object_keyframes import get_cache_data
from .animation.gltf2_blender_gather_animations import gather_animations
@ -52,6 +53,8 @@ def __gather_scene(blender_scene, export_settings):
vtree = gltf2_blender_gather_tree.VExportTree(export_settings)
vtree.construct(blender_scene)
vtree.search_missing_armature() # In case armature are no parented correctly
vtree.bake_armature_bone_list() # Used in case we remove the armature
vtree.check_if_we_can_remove_armature() # Check if we can remove the armatures objects
export_user_extensions('vtree_before_filter_hook', export_settings, vtree)
@ -59,6 +62,8 @@ def __gather_scene(blender_scene, export_settings):
vtree.filter()
if export_settings['gltf_flatten_bones_hierarchy'] is True:
vtree.break_bone_hierarchy()
if export_settings['gltf_flatten_obj_hierarchy'] is True:
vtree.break_obj_hierarchy()
vtree.variants_reset_to_original()
@ -66,11 +71,41 @@ def __gather_scene(blender_scene, export_settings):
export_settings['vtree'] = vtree
for r in [vtree.nodes[r] for r in vtree.roots]:
node = gltf2_blender_gather_nodes.gather_node(
r, export_settings)
if node is not None:
scene.nodes.append(node)
# If we don't remove armature object, we can't have bones directly at root of scene
# So looping only on root nodes, as they are all nodes, not bones
if export_settings['gltf_armature_object_remove'] is False:
for r in [vtree.nodes[r] for r in vtree.roots]:
node = gltf2_blender_gather_nodes.gather_node(
r, export_settings)
if node is not None:
scene.nodes.append(node)
else:
# If we remove armature objects, we can have bone at root of scene
armature_root_joints = {}
for r in [vtree.nodes[r] for r in vtree.roots]:
# Classic Object/node case
if r.blender_type != gltf2_blender_gather_tree.VExportNode.BONE:
node = gltf2_blender_gather_nodes.gather_node(
r, export_settings)
if node is not None:
scene.nodes.append(node)
else:
# We can have bone are root of scene because we remove the armature object
# and the armature was at root of scene
node = gltf2_blender_gather_joints.gather_joint_vnode(
r.uuid, export_settings)
if node is not None:
scene.nodes.append(node)
if r.armature not in armature_root_joints.keys():
armature_root_joints[r.armature] = []
armature_root_joints[r.armature].append(node)
# Manage objects parented to bones, now we go through all root objects
for k, v in armature_root_joints.items():
gltf2_blender_gather_nodes.get_objects_parented_to_bones(k, v, export_settings)
vtree.add_neutral_bones()

View File

@ -48,11 +48,15 @@ def gather_joint_vnode(vnode, export_settings):
:return: a glTF2 node (acting as a joint)
"""
vtree = export_settings['vtree']
blender_object = vtree.nodes[vnode].blender_object
blender_bone = vtree.nodes[vnode].blender_bone
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world
if export_settings['gltf_armature_object_remove'] is True:
if vtree.nodes[vnode].parent_uuid is not None:
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world
else:
mat = vtree.nodes[vnode].matrix_world
else:
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world
trans, rot, sca = mat.decompose()

View File

@ -21,6 +21,7 @@ from . import gltf2_blender_gather_lights
from .gltf2_blender_gather_tree import VExportNode
def gather_node(vnode, export_settings):
blender_object = vnode.blender_object
skin = gather_skin(vnode.uuid, export_settings)
@ -29,7 +30,7 @@ def gather_node(vnode, export_settings):
node = gltf2_io.Node(
camera=__gather_camera(blender_object, export_settings),
children=__gather_children(vnode, blender_object, export_settings),
children=__gather_children(vnode, export_settings),
extensions=__gather_extensions(blender_object, export_settings),
extras=__gather_extras(blender_object, export_settings),
matrix=__gather_matrix(blender_object, export_settings),
@ -61,78 +62,103 @@ def __gather_camera(blender_object, export_settings):
return gltf2_blender_gather_cameras.gather_camera(blender_object.data, export_settings)
def __gather_children(vnode, blender_object, export_settings):
def __gather_children(vnode, export_settings):
children = []
vtree = export_settings['vtree']
# Standard Children / Collection
for c in [vtree.nodes[c] for c in vnode.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE]:
node = gather_node(c, export_settings)
if node is not None:
children.append(node)
armature_object_uuid = None
# Standard Children / Collection
if export_settings['gltf_armature_object_remove'] is False:
for c in [vtree.nodes[c] for c in vnode.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE]:
node = gather_node(c, export_settings)
if node is not None:
children.append(node)
else:
root_joints = []
for c in [vtree.nodes[c] for c in vnode.children]:
if c.blender_type != gltf2_blender_gather_tree.VExportNode.BONE:
node = gather_node(c, export_settings)
if node is not None:
children.append(node)
else:
# We come here because armature was remove, and bone can be a child of any object
joint = gltf2_blender_gather_joints.gather_joint_vnode(c.uuid, export_settings)
children.append(joint)
armature_object_uuid = c.armature
root_joints.append(joint)
# Now got all bone children (that are root joints), we can get object parented to bones
# Armature --> Retrieve Blender bones
# This can't happen if we remove the Armature Object
if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE:
armature_object_uuid = vnode.uuid
root_joints = []
all_armature_children = vnode.children
root_bones_uuid = [c for c in all_armature_children if export_settings['vtree'].nodes[c].blender_type == VExportNode.BONE]
root_bones_uuid = export_settings['vtree'].get_root_bones_uuid(vnode.uuid)
for bone_uuid in root_bones_uuid:
joint = gltf2_blender_gather_joints.gather_joint_vnode(bone_uuid, export_settings)
children.append(joint)
root_joints.append(joint)
if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE \
or armature_object_uuid is not None:
# Object parented to bones
direct_bone_children = []
for n in [vtree.nodes[i] for i in vtree.get_all_bones(vnode.uuid)]:
direct_bone_children.extend([c for c in n.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE])
def find_parent_joint(joints, name):
for joint in joints:
if joint.name == name:
return joint
parent_joint = find_parent_joint(joint.children, name)
if parent_joint:
return parent_joint
return None
for child in direct_bone_children: # List of object that are parented to bones
# find parent joint
parent_joint = find_parent_joint(root_joints, vtree.nodes[child].blender_object.parent_bone)
if not parent_joint:
continue
child_node = gather_node(vtree.nodes[child], export_settings)
if child_node is None:
continue
blender_bone = blender_object.pose.bones[parent_joint.name]
mat = vtree.nodes[vtree.nodes[child].parent_bone_uuid].matrix_world.inverted_safe() @ vtree.nodes[child].matrix_world
loc, rot_quat, scale = mat.decompose()
trans = __convert_swizzle_location(loc, export_settings)
rot = __convert_swizzle_rotation(rot_quat, export_settings)
sca = __convert_swizzle_scale(scale, export_settings)
translation, rotation, scale = (None, None, None)
if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:
translation = [trans[0], trans[1], trans[2]]
if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:
rotation = [rot[1], rot[2], rot[3], rot[0]]
if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:
scale = [sca[0], sca[1], sca[2]]
child_node.translation = translation
child_node.rotation = rotation
child_node.scale = scale
parent_joint.children.append(child_node)
get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings)
return children
def get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings):
vtree = export_settings['vtree']
direct_bone_children = []
for n in [vtree.nodes[i] for i in vtree.get_all_bones(armature_object_uuid)]:
direct_bone_children.extend([c for c in n.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE])
for child in direct_bone_children: # List of object that are parented to bones
# find parent joint
parent_joint = __find_parent_joint(root_joints, vtree.nodes[child].blender_object.parent_bone)
if not parent_joint:
continue
child_node = gather_node(vtree.nodes[child], export_settings)
if child_node is None:
continue
mat = vtree.nodes[vtree.nodes[child].parent_bone_uuid].matrix_world.inverted_safe() @ vtree.nodes[child].matrix_world
loc, rot_quat, scale = mat.decompose()
trans = __convert_swizzle_location(loc, export_settings)
rot = __convert_swizzle_rotation(rot_quat, export_settings)
sca = __convert_swizzle_scale(scale, export_settings)
translation, rotation, scale = (None, None, None)
if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:
translation = [trans[0], trans[1], trans[2]]
if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:
rotation = [rot[1], rot[2], rot[3], rot[0]]
if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:
scale = [sca[0], sca[1], sca[2]]
child_node.translation = translation
child_node.rotation = rotation
child_node.scale = scale
parent_joint.children.append(child_node)
def __find_parent_joint(joints, name):
for joint in joints:
if joint.name == name:
return joint
parent_joint = __find_parent_joint(joint.children, name)
if parent_joint:
return parent_joint
return None
def __gather_extensions(blender_object, export_settings):
extensions = {}

View File

@ -7,16 +7,27 @@ from ...io.com import gltf2_io
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ...io.com.gltf2_io_constants import TextureFilter, TextureWrap
from .gltf2_blender_gather_cache import cached
from .gltf2_blender_get import (
previous_node,
previous_socket,
get_const_from_socket,
)
from .material.gltf2_blender_search_node_tree import previous_node, previous_socket, get_const_from_socket, NodeSocket
@cached
def gather_sampler(blender_shader_node: bpy.types.Node, export_settings):
wrap_s, wrap_t = __gather_wrap(blender_shader_node, export_settings)
def gather_sampler(blender_shader_node: bpy.types.Node, group_path_str, export_settings):
# reconstruct group_path from group_path_str
sep_item = "##~~gltf-sep~~##"
sep_inside_item = "##~~gltf-inside-sep~~##"
group_path = []
tab = group_path_str.split(sep_item)
if len(tab) > 0:
group_path.append(bpy.data.materials[tab[0]])
for idx, i in enumerate(tab[1:]):
subtab = i.split(sep_inside_item)
if idx == 0:
group_path.append(bpy.data.materials[tab[0]].node_tree.nodes[subtab[1]])
else:
group_path.append(bpy.data.node_groups[subtab[0]].nodes[subtab[1]])
wrap_s, wrap_t = __gather_wrap(blender_shader_node, group_path, export_settings)
sampler = gltf2_io.Sampler(
extensions=__gather_extensions(blender_shader_node, export_settings),
@ -80,7 +91,7 @@ def __gather_name(blender_shader_node, export_settings):
return None
def __gather_wrap(blender_shader_node, export_settings):
def __gather_wrap(blender_shader_node, group_path, export_settings):
# First gather from the Texture node
if blender_shader_node.extension == 'EXTEND':
wrap_s = TextureWrap.ClampToEdge
@ -98,7 +109,7 @@ def __gather_wrap(blender_shader_node, export_settings):
# But still works for old files
# Still needed for heterogen heterogeneous sampler, like MIRROR x REPEAT, for example
# Take manual wrapping into account
result = detect_manual_uv_wrapping(blender_shader_node)
result = detect_manual_uv_wrapping(blender_shader_node, group_path)
if result:
if result['wrap_s'] is not None: wrap_s = result['wrap_s']
if result['wrap_t'] is not None: wrap_t = result['wrap_t']
@ -110,7 +121,7 @@ def __gather_wrap(blender_shader_node, export_settings):
return wrap_s, wrap_t
def detect_manual_uv_wrapping(blender_shader_node):
def detect_manual_uv_wrapping(blender_shader_node, group_path):
# Detects UV wrapping done using math nodes. This is for emulating wrap
# modes Blender doesn't support. It looks like
#
@ -124,38 +135,38 @@ def detect_manual_uv_wrapping(blender_shader_node):
# mode in each direction (or None), and next_socket.
result = {}
comb = previous_node(blender_shader_node.inputs['Vector'])
if comb is None or comb.type != 'COMBXYZ': return None
comb = previous_node(NodeSocket(blender_shader_node.inputs['Vector'], group_path))
if comb.node is None or comb.node.type != 'COMBXYZ': return None
for soc in ['X', 'Y']:
node = previous_node(comb.inputs[soc])
if node is None: return None
node = previous_node(NodeSocket(comb.node.inputs[soc], comb.group_path))
if node.node is None: return None
if node.type == 'SEPXYZ':
if node.node.type == 'SEPXYZ':
# Passed through without change
wrap = None
prev_socket = previous_socket(comb.inputs[soc])
elif node.type == 'MATH':
prev_socket = previous_socket(NodeSocket(comb.node.inputs[soc], comb.group_path))
elif node.node.type == 'MATH':
# Math node applies a manual wrap
if (node.operation == 'PINGPONG' and
get_const_from_socket(node.inputs[1], kind='VALUE') == 1.0): # scale = 1
if (node.node.operation == 'PINGPONG' and
get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind='VALUE') == 1.0): # scale = 1
wrap = TextureWrap.MirroredRepeat
elif (node.operation == 'WRAP' and
get_const_from_socket(node.inputs[1], kind='VALUE') == 0.0 and # min = 0
get_const_from_socket(node.inputs[2], kind='VALUE') == 1.0): # max = 1
elif (node.node.operation == 'WRAP' and
get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind='VALUE') == 0.0 and # min = 0
get_const_from_socket(NodeSocket(node.node.inputs[2], node.group_path), kind='VALUE') == 1.0): # max = 1
wrap = TextureWrap.Repeat
else:
return None
prev_socket = previous_socket(node.inputs[0])
prev_socket = previous_socket(NodeSocket(node.node.inputs[0], node.group_path))
else:
return None
if prev_socket is None: return None
prev_node = prev_socket.node
if prev_socket.socket is None: return None
prev_node = prev_socket.socket.node
if prev_node.type != 'SEPXYZ': return None
# Make sure X goes to X, etc.
if prev_socket.name != soc: return None
if prev_socket.socket.name != soc: return None
# Make sure both attach to the same SeparateXYZ node
if soc == 'X':
sep = prev_node
@ -164,5 +175,5 @@ def detect_manual_uv_wrapping(blender_shader_node):
result['wrap_s' if soc == 'X' else 'wrap_t'] = wrap
result['next_socket'] = sep.inputs[0]
result['next_socket'] = NodeSocket(sep.inputs[0], prev_socket.group_path)
return result

View File

@ -13,6 +13,7 @@ from ...io.com import gltf2_io_constants
from ...io.exp import gltf2_io_binary_data
from ..com.gltf2_blender_default import BLENDER_GLTF_SPECIAL_COLLECTION
from . import gltf2_blender_gather_accessors
from .gltf2_blender_gather_joints import gather_joint_vnode
class VExportNode:
@ -76,7 +77,7 @@ class VExportNode:
def recursive_display(self, tree, mode):
if mode == "simple":
for c in self.children:
print(self.blender_object.name, "/", self.blender_bone.name if self.blender_bone else "", "-->", tree.nodes[c].blender_object.name, "/", tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "" )
print(tree.nodes[c].uuid, self.blender_object.name, "/", self.blender_bone.name if self.blender_bone else "", "-->", tree.nodes[c].blender_object.name, "/", tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "" )
tree.nodes[c].recursive_display(tree, mode)
class VExportTree:
@ -278,23 +279,40 @@ class VExportTree:
def get_all_objects(self):
return [n.uuid for n in self.nodes.values() if n.blender_type != VExportNode.BONE]
def get_all_bones(self, uuid): #For armatue Only
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
def recursive_get_all_bones(uuid):
total = []
if self.nodes[uuid].blender_type == VExportNode.BONE:
total.append(uuid)
for child_uuid in self.nodes[uuid].children:
total.extend(recursive_get_all_bones(child_uuid))
def get_all_bones(self, uuid): #For armature only
if not hasattr(self.nodes[uuid], "all_bones"):
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
def recursive_get_all_bones(uuid):
total = []
if self.nodes[uuid].blender_type == VExportNode.BONE:
total.append(uuid)
for child_uuid in self.nodes[uuid].children:
total.extend(recursive_get_all_bones(child_uuid))
return total
return total
tot = []
for c_uuid in self.nodes[uuid].children:
tot.extend(recursive_get_all_bones(c_uuid))
return tot
tot = []
for c_uuid in self.nodes[uuid].children:
tot.extend(recursive_get_all_bones(c_uuid))
self.nodes[uuid].all_bones = tot
return tot # Not really needed to return, we are just baking it before export really starts
else:
self.nodes[uuid].all_bones = []
return []
else:
return []
return self.nodes[uuid].all_bones
def get_root_bones_uuid(self, uuid): #For armature only
if not hasattr(self.nodes[uuid], "root_bones_uuid"):
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
all_armature_children = self.nodes[uuid].children
self.nodes[uuid].root_bones_uuid = [c for c in all_armature_children if self.nodes[c].blender_type == VExportNode.BONE]
return self.nodes[uuid].root_bones_uuid # Not really needed to return, we are just baking it before export really starts
else:
self.nodes[uuid].root_bones_uuid = []
return []
else:
return self.nodes[uuid].root_bones_uuid
def get_all_node_of_type(self, node_type):
return [n.uuid for n in self.nodes.values() if n.blender_type == node_type]
@ -302,10 +320,9 @@ class VExportTree:
def display(self, mode):
if mode == "simple":
for n in self.roots:
print("Root", self.nodes[n].blender_object.name, "/", self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "" )
print(self.nodes[n].uuid, "Root", self.nodes[n].blender_object.name, "/", self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "" )
self.nodes[n].recursive_display(self, mode)
def filter_tag(self):
roots = self.roots.copy()
for r in roots:
@ -322,7 +339,6 @@ class VExportTree:
self.filter_perform()
self.remove_filtered_nodes()
def recursive_filter_tag(self, uuid, parent_keep_tag):
# parent_keep_tag is for collection instance
# some properties (selection, visibility, renderability)
@ -442,10 +458,20 @@ class VExportTree:
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].objects:
return False
if self.export_settings['gltf_armature_object_remove'] is True:
# If we remove the Armature object
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
self.nodes[uuid].arma_exported = True
return False
return True
def remove_filtered_nodes(self):
self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True}
if self.export_settings['gltf_armature_object_remove'] is True:
# If we remove the Armature object
self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True or (n.keep_tag is False and n.blender_type == VExportNode.ARMATURE)}
else:
self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True}
def search_missing_armature(self):
for n in [n for n in self.nodes.values() if hasattr(n, "armature_needed") is True]:
@ -454,6 +480,14 @@ class VExportTree:
n.armature = candidates[0].uuid
del n.armature_needed
def bake_armature_bone_list(self):
# Used to store data in armature vnode
# If armature is removed from export
# Data are still available, even if armature is not exported (so bones are re-parented)
for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]:
self.get_all_bones(n.uuid)
self.get_root_bones_uuid(n.uuid)
def add_neutral_bones(self):
added_armatures = []
for n in [n for n in self.nodes.values() if \
@ -521,6 +555,9 @@ class VExportTree:
from .gltf2_blender_gather_skins import gather_skin
skins = []
for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]:
if self.export_settings['gltf_armature_object_remove'] is True:
if hasattr(n, "arma_exported") is False:
continue
if len([m for m in self.nodes.values() if m.keep_tag is True and m.blender_type == VExportNode.OBJECT and m.armature == n.uuid]) == 0:
skin = gather_skin(n.uuid, self.export_settings)
skins.append(skin)
@ -552,3 +589,25 @@ class VExportTree:
self.nodes[self.nodes[bone].parent_uuid].children.remove(bone)
self.nodes[bone].parent_uuid = arma
self.nodes[arma].children.append(bone)
def break_obj_hierarchy(self):
# Can be usefull when matrix is not decomposable
# TODO: if we get real collection one day, we probably need to adapt this code
for obj in self.get_all_objects():
if self.nodes[obj].armature is not None and self.nodes[obj].parent_uuid == self.nodes[obj].armature:
continue # Keep skined meshs as children of armature
if self.nodes[obj].parent_uuid is not None:
self.nodes[self.nodes[obj].parent_uuid].children.remove(obj)
self.nodes[obj].parent_uuid = None
self.roots.append(obj)
def check_if_we_can_remove_armature(self):
# If user requested to remove armature, we need to check if it is possible
# If is impossible to remove it if armature has multiple root bones. (glTF validator error)
# Currently, we manage it at export level, not at each armature level
for arma_uuid in [n for n in self.nodes.keys() if self.nodes[n].blender_type == VExportNode.ARMATURE]:
if len(self.get_root_bones_uuid(arma_uuid)) > 1:
# We can't remove armature
self.export_settings['gltf_armature_object_remove'] = False
print("WARNING: We can't remove armature object because some armatures have multiple root bones.")
break

View File

@ -3,12 +3,6 @@
# SPDX-License-Identifier: Apache-2.0
import bpy
from mathutils import Vector, Matrix
from ...blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf
from ...io.com import gltf2_io_debug
from ..com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name
from .material import gltf2_blender_search_node_tree
def get_animation_target(action_group: bpy.types.ActionGroup):
return action_group.channels[0].data_path.split('.')[-1]
@ -31,292 +25,3 @@ def get_object_from_datapath(blender_object, data_path: str):
# path_attr = data_path
return prop
def get_node_socket(blender_material, type, name):
"""
For a given material input name, retrieve the corresponding node tree socket for a given node type.
:param blender_material: a blender material for which to get the socket
:return: a blender NodeSocket for a given type
"""
nodes = [n for n in blender_material.node_tree.nodes if isinstance(n, type) and not n.mute]
nodes = [node for node in nodes if check_if_is_linked_to_active_output(node.outputs[0])]
inputs = sum([[input for input in node.inputs if input.name == name] for node in nodes], [])
if inputs:
return inputs[0]
return None
def get_socket(blender_material: bpy.types.Material, name: str, volume=False):
"""
For a given material input name, retrieve the corresponding node tree socket.
:param blender_material: a blender material for which to get the socket
:param name: the name of the socket
:return: a blender NodeSocket
"""
if blender_material.node_tree and blender_material.use_nodes:
#i = [input for input in blender_material.node_tree.inputs]
#o = [output for output in blender_material.node_tree.outputs]
if name == "Emissive":
# Check for a dedicated Emission node first, it must supersede the newer built-in one
# because the newer one is always present in all Principled BSDF materials.
emissive_socket = get_node_socket(blender_material, bpy.types.ShaderNodeEmission, "Color")
if emissive_socket:
return emissive_socket
# If a dedicated Emission node was not found, fall back to the Principled BSDF Emission Color socket.
name = "Emission Color"
type = bpy.types.ShaderNodeBsdfPrincipled
elif name == "Background":
type = bpy.types.ShaderNodeBackground
name = "Color"
else:
if volume is False:
type = bpy.types.ShaderNodeBsdfPrincipled
else:
type = bpy.types.ShaderNodeVolumeAbsorption
return get_node_socket(blender_material, type, name)
return None
def get_socket_old(blender_material: bpy.types.Material, name: str):
"""
For a given material input name, retrieve the corresponding node tree socket in the special glTF node group.
:param blender_material: a blender material for which to get the socket
:param name: the name of the socket
:return: a blender NodeSocket
"""
gltf_node_group_names = [get_gltf_node_name().lower(), get_gltf_node_old_name().lower()]
if blender_material.node_tree and blender_material.use_nodes:
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
nodes = [n for n in blender_material.node_tree.nodes if \
isinstance(n, bpy.types.ShaderNodeGroup) and \
n.node_tree is not None and
(n.node_tree.name.startswith('glTF Metallic Roughness') or n.node_tree.name.lower() in gltf_node_group_names)]
inputs = sum([[input for input in node.inputs if input.name == name] for node in nodes], [])
if inputs:
return inputs[0]
return None
def check_if_is_linked_to_active_output(shader_socket):
for link in shader_socket.links:
if isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial) and link.to_node.is_active_output is True:
return True
if len(link.to_node.outputs) > 0: # ignore non active output, not having output sockets
ret = check_if_is_linked_to_active_output(link.to_node.outputs[0]) # recursive until find an output material node
if ret is True:
return True
return False
def find_shader_image_from_shader_socket(shader_socket, max_hops=10):
"""Find any ShaderNodeTexImage in the path from the socket."""
if shader_socket is None:
return None
if max_hops <= 0:
return None
for link in shader_socket.links:
if isinstance(link.from_node, bpy.types.ShaderNodeTexImage):
return link.from_node
for socket in link.from_node.inputs.values():
image = find_shader_image_from_shader_socket(shader_socket=socket, max_hops=max_hops - 1)
if image is not None:
return image
return None
def get_texture_transform_from_mapping_node(mapping_node):
if mapping_node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]:
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform because it had type " +
mapping_node.vector_type + "; recommend using POINT instead"
)
return None
rotation_0, rotation_1 = mapping_node.inputs['Rotation'].default_value[0], mapping_node.inputs['Rotation'].default_value[1]
if rotation_0 or rotation_1:
# TODO: can we handle this?
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform because it had non-zero "
"rotations in the X/Y direction; only a Z rotation can be exported!"
)
return None
mapping_transform = {}
mapping_transform["offset"] = [mapping_node.inputs['Location'].default_value[0], mapping_node.inputs['Location'].default_value[1]]
mapping_transform["rotation"] = mapping_node.inputs['Rotation'].default_value[2]
mapping_transform["scale"] = [mapping_node.inputs['Scale'].default_value[0], mapping_node.inputs['Scale'].default_value[1]]
if mapping_node.vector_type == "TEXTURE":
# This means use the inverse of the TRS transform.
def inverted(mapping_transform):
offset = mapping_transform["offset"]
rotation = mapping_transform["rotation"]
scale = mapping_transform["scale"]
# Inverse of a TRS is not always a TRS. This function will be right
# at least when the following don't occur.
if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5:
return None
if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5:
return None
new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector((-offset[0], -offset[1], 1))
new_offset[0] /= scale[0]; new_offset[1] /= scale[1]
return {
"offset": new_offset[0:2],
"rotation": -rotation,
"scale": [1/scale[0], 1/scale[1]],
}
mapping_transform = inverted(mapping_transform)
if mapping_transform is None:
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform with type TEXTURE because "
"we couldn't convert it to TRS; recommend using POINT instead"
)
return None
elif mapping_node.vector_type == "VECTOR":
# Vectors don't get translated
mapping_transform["offset"] = [0, 0]
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
if all([component == 0 for component in texture_transform["offset"]]):
del(texture_transform["offset"])
if all([component == 1 for component in texture_transform["scale"]]):
del(texture_transform["scale"])
if texture_transform["rotation"] == 0:
del(texture_transform["rotation"])
if len(texture_transform) == 0:
return None
return texture_transform
def get_node(data_path):
"""Return Blender node on a given Blender data path."""
if data_path is None:
return None
index = data_path.find("[\"")
if (index == -1):
return None
node_name = data_path[(index + 2):]
index = node_name.find("\"")
if (index == -1):
return None
return node_name[:(index)]
def get_factor_from_socket(socket, kind):
"""
For baseColorFactor, metallicFactor, etc.
Get a constant value from a socket, or a constant value
from a MULTIPLY node just before the socket.
kind is either 'RGB' or 'VALUE'.
"""
fac = get_const_from_socket(socket, kind)
if fac is not None:
return fac
node = previous_node(socket)
if node is not None:
x1, x2 = None, None
if kind == 'RGB':
if node.type == 'MIX' and node.data_type == "RGBA" and node.blend_type == 'MULTIPLY':
# TODO: handle factor in inputs[0]?
x1 = get_const_from_socket(node.inputs[6], kind)
x2 = get_const_from_socket(node.inputs[7], kind)
if kind == 'VALUE':
if node.type == 'MATH' and node.operation == 'MULTIPLY':
x1 = get_const_from_socket(node.inputs[0], kind)
x2 = get_const_from_socket(node.inputs[1], kind)
if x1 is not None and x2 is None: return x1
if x2 is not None and x1 is None: return x2
return None
def get_const_from_default_value_socket(socket, kind):
if kind == 'RGB':
if socket.type != 'RGBA': return None
return list(socket.default_value)[:3]
if kind == 'VALUE':
if socket.type != 'VALUE': return None
return socket.default_value
return None
def get_const_from_socket(socket, kind):
if not socket.is_linked:
if kind == 'RGB':
if socket.type != 'RGBA': return None
return list(socket.default_value)[:3]
if kind == 'VALUE':
if socket.type != 'VALUE': return None
return socket.default_value
# Handle connection to a constant RGB/Value node
prev_node = previous_node(socket)
if prev_node is not None:
if kind == 'RGB' and prev_node.type == 'RGB':
return list(prev_node.outputs[0].default_value)[:3]
if kind == 'VALUE' and prev_node.type == 'VALUE':
return prev_node.outputs[0].default_value
return None
def previous_socket(socket):
while True:
if not socket.is_linked:
return None
from_socket = socket.links[0].from_socket
# Skip over reroute nodes
if from_socket.node.type == 'REROUTE':
socket = from_socket.node.inputs[0]
continue
return from_socket
def previous_node(socket):
prev_socket = previous_socket(socket)
if prev_socket is not None:
return prev_socket.node
return None
def get_tex_from_socket(socket):
result = gltf2_blender_search_node_tree.from_socket(
socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return None
if result[0].shader_node.image is None:
return None
return result[0]
def has_image_node_from_socket(socket):
return get_tex_from_socket(socket) is not None
def image_tex_is_valid_from_socket(socket):
res = get_tex_from_socket(socket)
return res is not None and res.shader_node.image is not None and res.shader_node.image.channels != 0

View File

@ -117,7 +117,7 @@ class GlTF2Exporter:
f.write(self.__buffer.to_bytes())
uri = buffer_name
else:
uri = self.__buffer.to_embed_string()
pass # This is no more possible, we don't export embedded buffers
buffer = gltf2_io.Buffer(
byte_length=self.__buffer.byte_length,
@ -320,6 +320,20 @@ class GlTF2Exporter:
len_ = len([i for i in self.nodes_idx_to_remove if i < skin.skeleton])
skin.skeleton = skin.skeleton - len_
# Remove animation channels that was targeting a node that will be removed
new_animation_list = []
for animation in self.__gltf.animations:
new_channel_list = []
for channel in animation.channels:
if channel.target.node not in self.nodes_idx_to_remove:
new_channel_list.append(channel)
animation.channels = new_channel_list
if len(animation.channels) > 0:
new_animation_list.append(animation)
self.__gltf.animations = new_animation_list
#TODO: remove unused animation accessors?
# And now really remove nodes
self.__gltf.nodes = [node for idx, node in enumerate(self.__gltf.nodes) if idx not in self.nodes_idx_to_remove]

View File

@ -4,8 +4,8 @@
import bpy
from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import has_image_node_from_socket, get_socket, get_factor_from_socket
def export_clearcoat(blender_material, export_settings):
clearcoat_enabled = False
@ -15,15 +15,15 @@ def export_clearcoat(blender_material, export_settings):
clearcoat_extension = {}
clearcoat_roughness_slots = ()
clearcoat_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Weight')
clearcoat_roughness_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Roughness')
clearcoat_normal_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Normal')
clearcoat_socket = get_socket(blender_material, 'Coat Weight')
clearcoat_roughness_socket = get_socket(blender_material, 'Coat Roughness')
clearcoat_normal_socket = get_socket(blender_material, 'Coat Normal')
if isinstance(clearcoat_socket, bpy.types.NodeSocket) and not clearcoat_socket.is_linked:
clearcoat_extension['clearcoatFactor'] = clearcoat_socket.default_value
if isinstance(clearcoat_socket.socket, bpy.types.NodeSocket) and not clearcoat_socket.socket.is_linked:
clearcoat_extension['clearcoatFactor'] = clearcoat_socket.socket.default_value
clearcoat_enabled = clearcoat_extension['clearcoatFactor'] > 0
elif gltf2_blender_get.has_image_node_from_socket(clearcoat_socket):
fac = gltf2_blender_get.get_factor_from_socket(clearcoat_socket, kind='VALUE')
elif has_image_node_from_socket(clearcoat_socket, export_settings):
fac = get_factor_from_socket(clearcoat_socket, kind='VALUE')
# default value in glTF is 0.0, but if there is a texture without factor, use 1
clearcoat_extension['clearcoatFactor'] = fac if fac != None else 1.0
has_clearcoat_texture = True
@ -32,10 +32,10 @@ def export_clearcoat(blender_material, export_settings):
if not clearcoat_enabled:
return None, {}
if isinstance(clearcoat_roughness_socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.is_linked:
clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.default_value
elif gltf2_blender_get.has_image_node_from_socket(clearcoat_roughness_socket):
fac = gltf2_blender_get.get_factor_from_socket(clearcoat_roughness_socket, kind='VALUE')
if isinstance(clearcoat_roughness_socket.socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.socket.is_linked:
clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.socket.default_value
elif has_image_node_from_socket(clearcoat_roughness_socket, export_settings):
fac = get_factor_from_socket(clearcoat_roughness_socket, kind='VALUE')
# default value in glTF is 0.0, but if there is a texture without factor, use 1
clearcoat_extension['clearcoatRoughnessFactor'] = fac if fac != None else 1.0
has_clearcoat_roughness_texture = True
@ -71,7 +71,7 @@ def export_clearcoat(blender_material, export_settings):
clearcoat_extension['clearcoatRoughnessTexture'] = clearcoat_roughness_texture
uvmap_infos.update({'clearcoatRoughnessTexture': uvmap_info})
if gltf2_blender_get.has_image_node_from_socket(clearcoat_normal_socket):
if has_image_node_from_socket(clearcoat_normal_socket, export_settings):
clearcoat_normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class(
clearcoat_normal_socket,
(clearcoat_normal_socket,),

View File

@ -4,20 +4,26 @@
import bpy
from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import \
get_const_from_default_value_socket, \
get_socket, \
get_factor_from_socket, \
get_const_from_socket, \
NodeSocket, \
get_socket_from_gltf_material_node
def export_emission_factor(blender_material, export_settings):
emissive_socket = gltf2_blender_get.get_socket(blender_material, "Emissive")
if emissive_socket is None:
emissive_socket = gltf2_blender_get.get_socket_old(blender_material, "EmissiveFactor")
if isinstance(emissive_socket, bpy.types.NodeSocket):
emissive_socket = get_socket(blender_material, "Emissive")
if emissive_socket.socket is None:
emissive_socket = get_socket_from_gltf_material_node(blender_material, "EmissiveFactor")
if isinstance(emissive_socket.socket, bpy.types.NodeSocket):
if export_settings['gltf_image_format'] != "NONE":
factor = gltf2_blender_get.get_factor_from_socket(emissive_socket, kind='RGB')
factor = get_factor_from_socket(emissive_socket, kind='RGB')
else:
factor = gltf2_blender_get.get_const_from_default_value_socket(emissive_socket, kind='RGB')
factor = get_const_from_default_value_socket(emissive_socket, kind='RGB')
if factor is None and emissive_socket.is_linked:
if factor is None and emissive_socket.socket.is_linked:
# In glTF, the default emissiveFactor is all zeros, so if an emission texture is connected,
# we have to manually set it to all ones.
factor = [1.0, 1.0, 1.0]
@ -26,12 +32,12 @@ def export_emission_factor(blender_material, export_settings):
# Handle Emission Strength
strength_socket = None
if emissive_socket.node.type == 'EMISSION':
strength_socket = emissive_socket.node.inputs['Strength']
elif 'Emission Strength' in emissive_socket.node.inputs:
strength_socket = emissive_socket.node.inputs['Emission Strength']
if emissive_socket.socket.node.type == 'EMISSION':
strength_socket = emissive_socket.socket.node.inputs['Strength']
elif 'Emission Strength' in emissive_socket.socket.node.inputs:
strength_socket = emissive_socket.socket.node.inputs['Emission Strength']
strength = (
gltf2_blender_get.get_const_from_socket(strength_socket, kind='VALUE')
get_const_from_socket(NodeSocket(strength_socket, emissive_socket.group_path), kind='VALUE')
if strength_socket is not None
else None
)
@ -49,9 +55,9 @@ def export_emission_factor(blender_material, export_settings):
return None
def export_emission_texture(blender_material, export_settings):
emissive = gltf2_blender_get.get_socket(blender_material, "Emissive")
if emissive is None:
emissive = gltf2_blender_get.get_socket_old(blender_material, "Emissive")
emissive = get_socket(blender_material, "Emissive")
if emissive.socket is None:
emissive = get_socket_from_gltf_material_node(blender_material, "Emissive")
emissive_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(emissive, (emissive,), (), export_settings)
return emissive_texture, {'emissiveTexture': uvmap_info}

View File

@ -4,20 +4,20 @@
from .....io.com.gltf2_io_extensions import Extension
from .....io.com.gltf2_io_constants import GLTF_IOR
from ....exp import gltf2_blender_get
from ..gltf2_blender_search_node_tree import get_socket
def export_ior(blender_material, extensions, export_settings):
ior_socket = gltf2_blender_get.get_socket(blender_material, 'IOR')
ior_socket = get_socket(blender_material, 'IOR')
if not ior_socket:
if not ior_socket.socket:
return None
# We don't manage case where socket is linked, always check default value
if ior_socket.is_linked:
if ior_socket.socket.is_linked:
# TODOExt: add warning?
return None
if ior_socket.default_value == GLTF_IOR:
if ior_socket.socket.default_value == GLTF_IOR:
return None
# Export only if the following extensions are exported:
@ -31,6 +31,6 @@ def export_ior(blender_material, extensions, export_settings):
return None
ior_extension = {}
ior_extension['ior'] = ior_socket.default_value
ior_extension['ior'] = ior_socket.socket.default_value
return Extension('KHR_materials_ior', ior_extension, False)

View File

@ -4,47 +4,48 @@
import bpy
from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_socket, \
get_factor_from_socket
def export_sheen(blender_material, export_settings):
sheen_extension = {}
sheenTint_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Tint")
sheenRoughness_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Roughness")
sheen_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Weight")
sheenTint_socket = get_socket(blender_material, "Sheen Tint")
sheenRoughness_socket = get_socket(blender_material, "Sheen Roughness")
sheen_socket = get_socket(blender_material, "Sheen Weight")
if sheenTint_socket is None or sheenRoughness_socket is None or sheen_socket is None:
if sheenTint_socket.socket is None or sheenRoughness_socket.socket is None or sheen_socket.socket is None:
return None, {}
if sheen_socket.is_linked is False and sheen_socket.default_value == 0.0:
if sheen_socket.socket.is_linked is False and sheen_socket.socket.default_value == 0.0:
return None, {}
uvmap_infos = {}
#TODOExt : What to do if sheen_socket is linked? or is not between 0 and 1?
sheenTint_non_linked = isinstance(sheenTint_socket, bpy.types.NodeSocket) and not sheenTint_socket.is_linked
sheenRoughness_non_linked = isinstance(sheenRoughness_socket, bpy.types.NodeSocket) and not sheenRoughness_socket.is_linked
sheenTint_non_linked = isinstance(sheenTint_socket.socket, bpy.types.NodeSocket) and not sheenTint_socket.socket.is_linked
sheenRoughness_non_linked = isinstance(sheenRoughness_socket.socket, bpy.types.NodeSocket) and not sheenRoughness_socket.socket.is_linked
use_actives_uvmaps = []
if sheenTint_non_linked is True:
color = sheenTint_socket.default_value[:3]
color = sheenTint_socket.socket.default_value[:3]
if color != (0.0, 0.0, 0.0):
sheen_extension['sheenColorFactor'] = color
else:
# Factor
fac = gltf2_blender_get.get_factor_from_socket(sheenTint_socket, kind='RGB')
fac = get_factor_from_socket(sheenTint_socket, kind='RGB')
if fac is None:
fac = [1.0, 1.0, 1.0] # Default is 0.0/0.0/0.0, so we need to set it to 1 if no factor
if fac is not None and fac != [0.0, 0.0, 0.0]:
sheen_extension['sheenColorFactor'] = fac
# Texture
if gltf2_blender_get.has_image_node_from_socket(sheenTint_socket):
if has_image_node_from_socket(sheenTint_socket, export_settings):
original_sheenColor_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
sheenTint_socket,
(sheenTint_socket,),
@ -55,19 +56,19 @@ def export_sheen(blender_material, export_settings):
uvmap_infos.update({'sheenColorTexture': uvmap_info})
if sheenRoughness_non_linked is True:
fac = sheenRoughness_socket.default_value
fac = sheenRoughness_socket.socket.default_value
if fac != 0.0:
sheen_extension['sheenRoughnessFactor'] = fac
else:
# Factor
fac = gltf2_blender_get.get_factor_from_socket(sheenRoughness_socket, kind='VALUE')
fac = get_factor_from_socket(sheenRoughness_socket, kind='VALUE')
if fac is None:
fac = 1.0 # Default is 0.0 so we need to set it to 1.0 if no factor
if fac is not None and fac != 0.0:
sheen_extension['sheenRoughnessFactor'] = fac
# Texture
if gltf2_blender_get.has_image_node_from_socket(sheenRoughness_socket):
if has_image_node_from_socket(sheenRoughness_socket, export_settings):
original_sheenRoughness_texture, uvmap_info , _ = gltf2_blender_gather_texture_info.gather_texture_info(
sheenRoughness_socket,
(sheenRoughness_socket,),

View File

@ -4,68 +4,96 @@
import bpy
from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material.gltf2_blender_gather_texture_info import gather_texture_info
from ..gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_socket, \
get_factor_from_socket
def export_specular(blender_material, export_settings):
specular_extension = {}
extensions_needed = False
specular_socket = gltf2_blender_get.get_socket(blender_material, 'Specular IOR Level')
speculartint_socket = gltf2_blender_get.get_socket(blender_material, 'Specular Tint')
specular_socket = get_socket(blender_material, 'Specular IOR Level')
speculartint_socket = get_socket(blender_material, 'Specular Tint')
if specular_socket is None or speculartint_socket is None:
if specular_socket.socket is None or speculartint_socket.socket is None:
return None, {}
uvmap_infos = {}
specular_non_linked = isinstance(specular_socket, bpy.types.NodeSocket) and not specular_socket.is_linked
specularcolor_non_linked = isinstance(speculartint_socket, bpy.types.NodeSocket) and not speculartint_socket.is_linked
specular_non_linked = isinstance(specular_socket.socket, bpy.types.NodeSocket) and not specular_socket.socket.is_linked
specularcolor_non_linked = isinstance(speculartint_socket.socket, bpy.types.NodeSocket) and not speculartint_socket.socket.is_linked
if specular_non_linked is True:
fac = specular_socket.default_value
if fac != 1.0:
fac = specular_socket.socket.default_value
fac = fac * 2.0
if fac < 1.0:
specular_extension['specularFactor'] = fac
if fac == 0.0:
return None, {}
extensions_needed = True
elif fac > 1.0:
# glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
extensions_needed = True
else:
pass # If fac == 1.0, no need to export specularFactor, the default value is 1.0
else:
# Factor
fac = gltf2_blender_get.get_factor_from_socket(specular_socket, kind='VALUE')
fac = get_factor_from_socket(specular_socket, kind='VALUE')
if fac is not None and fac != 1.0:
specular_extension['specularFactor'] = fac
if fac == 0.0:
return None, {}
fac = fac * 2.0 if fac is not None else None
if fac is not None and fac < 1.0:
specular_extension['specularFactor'] = fac
extensions_needed = True
elif fac is not None and fac > 1.0:
# glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
extensions_needed = True
# Texture
if gltf2_blender_get.has_image_node_from_socket(specular_socket):
original_specular_texture, uvmap_info, _ = gather_texture_info(
if has_image_node_from_socket(specular_socket, export_settings):
specular_texture, uvmap_info, _ = gather_texture_info(
specular_socket,
(specular_socket,),
(),
export_settings,
)
specular_extension['specularTexture'] = original_specular_texture
specular_extension['specularTexture'] = specular_texture
uvmap_infos.update({'specularTexture': uvmap_info})
extensions_needed = True
if specularcolor_non_linked is True:
color = speculartint_socket.default_value[:3]
color = speculartint_socket.socket.default_value[:3]
if fac is not None and fac > 1.0:
color = (color[0] * fac, color[1] * fac, color[2] * fac)
specular_extension['specularColorFactor'] = color if color != (1.0, 1.0, 1.0) else None
if color != (1.0, 1.0, 1.0):
specular_extension['specularColorFactor'] = color
extensions_needed = True
else:
# Factor
fac = gltf2_blender_get.get_factor_from_socket(speculartint_socket, kind='RGB')
if fac is not None and fac != (1.0, 1.0, 1.0):
specular_extension['specularColorFactor'] = fac
fac_color = get_factor_from_socket(speculartint_socket, kind='RGB')
if fac_color is not None and fac is not None and fac > 1.0:
fac_color = (fac_color[0] * fac, fac_color[1] * fac, fac_color[2] * fac)
elif fac_color is None and fac is not None and fac > 1.0:
fac_color = (fac, fac, fac)
specular_extension['specularColorFactor'] = fac_color if fac_color != (1.0, 1.0, 1.0) else None
if fac_color != (1.0, 1.0, 1.0):
extensions_needed = True
# Texture
if gltf2_blender_get.has_image_node_from_socket(speculartint_socket):
original_specularcolor_texture, uvmap_info, _ = gather_texture_info(
if has_image_node_from_socket(speculartint_socket, export_settings):
specularcolor_texture, uvmap_info, _ = gather_texture_info(
speculartint_socket,
(speculartint_socket,),
(),
export_settings,
)
specular_extension['specularColorTexture'] = original_specularcolor_texture
specular_extension['specularColorTexture'] = specularcolor_texture
uvmap_infos.update({'specularColorTexture': uvmap_info})
extensions_needed = True
if extensions_needed is False:
return None, {}
return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos

View File

@ -4,8 +4,11 @@
import bpy
from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_socket, \
get_factor_from_socket
def export_transmission(blender_material, export_settings):
transmission_enabled = False
@ -14,13 +17,13 @@ def export_transmission(blender_material, export_settings):
transmission_extension = {}
transmission_slots = ()
transmission_socket = gltf2_blender_get.get_socket(blender_material, 'Transmission Weight')
transmission_socket = get_socket(blender_material, 'Transmission Weight')
if isinstance(transmission_socket, bpy.types.NodeSocket) and not transmission_socket.is_linked:
transmission_extension['transmissionFactor'] = transmission_socket.default_value
if isinstance(transmission_socket.socket, bpy.types.NodeSocket) and not transmission_socket.socket.is_linked:
transmission_extension['transmissionFactor'] = transmission_socket.socket.default_value
transmission_enabled = transmission_extension['transmissionFactor'] > 0
elif gltf2_blender_get.has_image_node_from_socket(transmission_socket):
fac = gltf2_blender_get.get_factor_from_socket(transmission_socket, kind='VALUE')
elif has_image_node_from_socket(transmission_socket, export_settings):
fac = get_factor_from_socket(transmission_socket, kind='VALUE')
transmission_extension['transmissionFactor'] = fac if fac is not None else 1.0
has_transmission_texture = True
transmission_enabled = True

View File

@ -4,8 +4,13 @@
import bpy
from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_const_from_default_value_socket, \
get_socket_from_gltf_material_node, \
get_socket, \
get_factor_from_socket
def export_volume(blender_material, export_settings):
@ -13,10 +18,10 @@ def export_volume(blender_material, export_settings):
# If no transmission --> No volume
transmission_enabled = False
transmission_socket = gltf2_blender_get.get_socket(blender_material, 'Transmission Weight')
if isinstance(transmission_socket, bpy.types.NodeSocket) and not transmission_socket.is_linked:
transmission_enabled = transmission_socket.default_value > 0
elif gltf2_blender_get.has_image_node_from_socket(transmission_socket):
transmission_socket = get_socket(blender_material, 'Transmission Weight')
if isinstance(transmission_socket.socket, bpy.types.NodeSocket) and not transmission_socket.socket.is_linked:
transmission_enabled = transmission_socket.socket.default_value > 0
elif has_image_node_from_socket(transmission_socket, export_settings):
transmission_enabled = True
if transmission_enabled is False:
@ -27,43 +32,43 @@ def export_volume(blender_material, export_settings):
thickness_slots = ()
uvmap_info = {}
thicknesss_socket = gltf2_blender_get.get_socket_old(blender_material, 'Thickness')
if thicknesss_socket is None:
thickness_socket = get_socket_from_gltf_material_node(blender_material, 'Thickness')
if thickness_socket.socket is None:
# If no thickness (here because there is no glTF Material Output node), no volume extension export
return None, {}
density_socket = gltf2_blender_get.get_socket(blender_material, 'Density', volume=True)
attenuation_color_socket = gltf2_blender_get.get_socket(blender_material, 'Color', volume=True)
density_socket = get_socket(blender_material, 'Density', volume=True)
attenuation_color_socket = get_socket(blender_material, 'Color', volume=True)
# Even if density or attenuation are not set, we export volume extension
if isinstance(attenuation_color_socket, bpy.types.NodeSocket):
rgb = gltf2_blender_get.get_const_from_default_value_socket(attenuation_color_socket, kind='RGB')
if isinstance(attenuation_color_socket.socket, bpy.types.NodeSocket):
rgb = get_const_from_default_value_socket(attenuation_color_socket, kind='RGB')
volume_extension['attenuationColor'] = rgb
if isinstance(density_socket, bpy.types.NodeSocket):
density = gltf2_blender_get.get_const_from_default_value_socket(density_socket, kind='VALUE')
if isinstance(density_socket.socket, bpy.types.NodeSocket):
density = get_const_from_default_value_socket(density_socket, kind='VALUE')
volume_extension['attenuationDistance'] = 1.0 / density if density != 0 else None # infinity (Using None as glTF default)
if isinstance(thicknesss_socket, bpy.types.NodeSocket) and not thicknesss_socket.is_linked:
val = thicknesss_socket.default_value
if isinstance(thickness_socket.socket, bpy.types.NodeSocket) and not thickness_socket.socket.is_linked:
val = thickness_socket.socket.default_value
if val == 0.0:
# If no thickness, no volume extension export
return None, {}
volume_extension['thicknessFactor'] = val
elif gltf2_blender_get.has_image_node_from_socket(thicknesss_socket):
fac = gltf2_blender_get.get_factor_from_socket(thicknesss_socket, kind='VALUE')
elif has_image_node_from_socket(thickness_socket, export_settings):
fac = get_factor_from_socket(thickness_socket, kind='VALUE')
# default value in glTF is 0.0, but if there is a texture without factor, use 1
volume_extension['thicknessFactor'] = fac if fac != None else 1.0
has_thickness_texture = True
# Pack thickness channel (R).
# Pack thickness channel (G).
if has_thickness_texture:
thickness_slots = (thicknesss_socket,)
thickness_slots = (thickness_socket,)
if len(thickness_slots) > 0:
combined_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
thicknesss_socket,
thickness_socket,
thickness_slots,
(),
export_settings,

View File

@ -147,7 +147,6 @@ class ExportImage:
# Unhappy path = we need to create the image self.fills describes or self.stores describes
if self.numpy_calc is None:
print(">2")
return self.__encode_unhappy(export_settings), None
else:
pixels, width, height, factor = self.numpy_calc(self.stored)

View File

@ -13,7 +13,7 @@ from ....io.com import gltf2_io_debug
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ..gltf2_blender_gather_cache import cached
from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage
from ..gltf2_blender_get import get_tex_from_socket
from .gltf2_blender_search_node_tree import get_texture_node_from_socket, NodeSocket
@cached
def gather_image(
@ -59,7 +59,7 @@ def gather_image(
export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets)
# We also return image_data, as it can be used to generate same file with another extension for webp management
# We also return image_data, as it can be used to generate same file with another extension for WebP management
return image, image_data, factor
def __gather_original_uri(original_uri, export_settings):
@ -114,11 +114,11 @@ def __gather_extras(sockets, export_settings):
def __gather_mime_type(sockets, export_image, export_settings):
# force png or webp if Alpha contained so we can export alpha
for socket in sockets:
if socket.name == "Alpha":
if socket.socket.name == "Alpha":
if export_settings["gltf_image_format"] == "WEBP":
return "image/webp"
else:
# If we keep image as is (no channel composition), we need to keep original format (for webp)
# If we keep image as is (no channel composition), we need to keep original format (for WebP)
image = export_image.blender_image()
if image is not None and __is_blender_image_a_webp(image):
return "image/webp"
@ -191,7 +191,7 @@ def __get_image_data(sockets, default_sockets, export_settings) -> ExportImage:
# For shared resources, such as images, we just store the portion of data that is needed in the glTF property
# in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary
# resources.
results = [get_tex_from_socket(socket) for socket in sockets]
results = [get_texture_node_from_socket(socket, export_settings) for socket in sockets]
# Check if we need a simple mapping or more complex calculation
# There is currently no complex calculation for any textures
@ -222,7 +222,7 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
else:
# rudimentarily try follow the node tree to find the correct image data.
src_chan = Channel.R
src_chan = None
for elem in result.path:
if isinstance(elem.from_node, bpy.types.ShaderNodeSeparateColor):
src_chan = {
@ -233,26 +233,55 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
if elem.from_socket.name == 'Alpha':
src_chan = Channel.A
if src_chan is None:
# No SeparateColor node found, so take the specification channel that is needed
# So export is correct if user plug the texture directly to the socket
if socket.socket.name == 'Metallic':
src_chan = Channel.B
elif socket.socket.name == 'Roughness':
src_chan = Channel.G
elif socket.socket.name == 'Occlusion':
src_chan = Channel.R
elif socket.socket.name == 'Alpha':
src_chan = Channel.A
elif socket.socket.name == 'Coat Weight':
src_chan = Channel.R
elif socket.socket.name == 'Coat Roughness':
src_chan = Channel.G
elif socket.socket.name == 'Thickness': # For KHR_materials_volume
src_chan = Channel.G
if src_chan is None:
# Seems we can't find the channel
# We are in a case where user plugged a texture in a Color socket, but we may have used the alpha one
if socket.socket.name in ["Alpha", "Specular IOR Level", "Sheen Roughness"]:
src_chan = Channel.A
if src_chan is None:
# We definitely can't find the channel, so keep the first channel even if this is wrong
src_chan = Channel.R
dst_chan = None
# some sockets need channel rewriting (gltf pbr defines fixed channels for some attributes)
if socket.name == 'Metallic':
if socket.socket.name == 'Metallic':
dst_chan = Channel.B
elif socket.name == 'Roughness':
elif socket.socket.name == 'Roughness':
dst_chan = Channel.G
elif socket.name == 'Occlusion':
elif socket.socket.name == 'Occlusion':
dst_chan = Channel.R
elif socket.name == 'Alpha':
elif socket.socket.name == 'Alpha':
dst_chan = Channel.A
elif socket.name == 'Coat Weight':
elif socket.socket.name == 'Coat Weight':
dst_chan = Channel.R
elif socket.name == 'Coat Roughness':
elif socket.socket.name == 'Coat Roughness':
dst_chan = Channel.G
elif socket.name == 'Thickness': # For KHR_materials_volume
elif socket.socket.name == 'Thickness': # For KHR_materials_volume
dst_chan = Channel.G
elif socket.name == "Specular IOR Level": # For KHR_material_specular
elif socket.socket.name == "Specular IOR Level": # For KHR_material_specular
dst_chan = Channel.A
elif socket.name == "Sheen Roughness": # For KHR_materials_sheen
elif socket.socket.name == "Sheen Roughness": # For KHR_materials_sheen
dst_chan = Channel.A
if dst_chan is not None:
@ -260,12 +289,12 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
# Since metal/roughness are always used together, make sure
# the other channel is filled.
if socket.name == 'Metallic' and not composed_image.is_filled(Channel.G):
if socket.socket.name == 'Metallic' and not composed_image.is_filled(Channel.G):
if default_roughness is not None:
composed_image.fill_with(Channel.G, default_roughness)
else:
composed_image.fill_white(Channel.G)
elif socket.name == 'Roughness' and not composed_image.is_filled(Channel.B):
elif socket.socket.name == 'Roughness' and not composed_image.is_filled(Channel.B):
if default_metallic is not None:
composed_image.fill_with(Channel.B, default_metallic)
else:

View File

@ -10,7 +10,6 @@ from ....io.com.gltf2_io_extensions import Extension
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....io.com.gltf2_io_debug import print_console
from ...com.gltf2_blender_extras import generate_extras
from ...exp import gltf2_blender_get
from ..gltf2_blender_gather_cache import cached, cached_by_key
from . import gltf2_blender_gather_materials_unlit
from . import gltf2_blender_gather_texture_info
@ -23,6 +22,11 @@ from .extensions.gltf2_blender_gather_materials_specular import export_specular
from .extensions.gltf2_blender_gather_materials_transmission import export_transmission
from .extensions.gltf2_blender_gather_materials_clearcoat import export_clearcoat
from .extensions.gltf2_blender_gather_materials_ior import export_ior
from .gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_socket_from_gltf_material_node, \
get_socket, \
get_node_socket
@cached
def get_material_cache_key(blender_material, export_settings):
@ -90,7 +94,7 @@ def gather_material(blender_material, export_settings):
# If emissive is set, from an emissive node (not PBR)
# We need to set manually default values for
# pbr_metallic_roughness.baseColor
if material.emissive_factor is not None and gltf2_blender_get.get_node_socket(blender_material, bpy.types.ShaderNodeBsdfPrincipled, "Base Color") is None:
if material.emissive_factor is not None and get_node_socket(blender_material, bpy.types.ShaderNodeBsdfPrincipled, "Base Color").socket is None:
material.pbr_metallic_roughness = gltf2_blender_gather_materials_pbr_metallic_roughness.get_default_pbr_for_emissive_node()
export_user_extensions('gather_material_hook', export_settings, material, blender_material)
@ -143,12 +147,6 @@ def __gather_double_sided(blender_material, extensions, export_settings):
if not blender_material.use_backface_culling:
return True
old_double_sided_socket = gltf2_blender_get.get_socket_old(blender_material, "DoubleSided")
if old_double_sided_socket is not None and\
not old_double_sided_socket.is_linked and\
old_double_sided_socket.default_value > 0.5:
return True
return None
@ -222,9 +220,7 @@ def __gather_name(blender_material, export_settings):
def __gather_normal_texture(blender_material, export_settings):
normal = gltf2_blender_get.get_socket(blender_material, "Normal")
if normal is None:
normal = gltf2_blender_get.get_socket_old(blender_material, "Normal")
normal = get_socket(blender_material, "Normal")
normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class(
normal,
(normal,),
@ -236,35 +232,37 @@ def __gather_orm_texture(blender_material, export_settings):
# Check for the presence of Occlusion, Roughness, Metallic sharing a single image.
# If not fully shared, return None, so the images will be cached and processed separately.
occlusion = gltf2_blender_get.get_socket(blender_material, "Occlusion")
if occlusion is None or not gltf2_blender_get.has_image_node_from_socket(occlusion):
occlusion = gltf2_blender_get.get_socket_old(blender_material, "Occlusion")
if occlusion is None or not gltf2_blender_get.has_image_node_from_socket(occlusion):
occlusion = get_socket(blender_material, "Occlusion")
if occlusion.socket is None or not has_image_node_from_socket(occlusion, export_settings):
occlusion = get_socket_from_gltf_material_node(blender_material, "Occlusion")
if occlusion.socket is None or not has_image_node_from_socket(occlusion, export_settings):
return None, None
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")
metallic_socket = get_socket(blender_material, "Metallic")
roughness_socket = get_socket(blender_material, "Roughness")
hasMetal = metallic_socket is not None and gltf2_blender_get.has_image_node_from_socket(metallic_socket)
hasRough = roughness_socket is not None and gltf2_blender_get.has_image_node_from_socket(roughness_socket)
hasMetal = metallic_socket.socket is not None and has_image_node_from_socket(metallic_socket, export_settings)
hasRough = roughness_socket.socket is not None and has_image_node_from_socket(roughness_socket, export_settings)
default_sockets = ()
# Warning: for default socket, do not use NodeSocket object, because it will break cache
# Using directlty the Blender socket object
if not hasMetal and not hasRough:
metallic_roughness = gltf2_blender_get.get_socket_old(blender_material, "MetallicRoughness")
if metallic_roughness is None or not gltf2_blender_get.has_image_node_from_socket(metallic_roughness):
metallic_roughness = get_socket_from_gltf_material_node(blender_material, "MetallicRoughness")
if metallic_roughness.socket is None or not has_image_node_from_socket(metallic_roughness, export_settings):
return None, default_sockets
result = (occlusion, metallic_roughness)
elif not hasMetal:
result = (occlusion, roughness_socket)
default_sockets = (metallic_socket,)
default_sockets = (metallic_socket.socket,)
elif not hasRough:
result = (occlusion, metallic_socket)
default_sockets = (roughness_socket,)
default_sockets = (roughness_socket.socket,)
else:
result = (occlusion, roughness_socket, metallic_socket)
default_sockets = ()
if not gltf2_blender_gather_texture_info.check_same_size_images(result):
if not gltf2_blender_gather_texture_info.check_same_size_images(result, export_settings):
print_console("INFO",
"Occlusion and metal-roughness texture will be exported separately "
"(use same-sized images if you want them combined)")
@ -278,9 +276,9 @@ def __gather_orm_texture(blender_material, export_settings):
return result, default_sockets
def __gather_occlusion_texture(blender_material, orm_texture, default_sockets, export_settings):
occlusion = gltf2_blender_get.get_socket(blender_material, "Occlusion")
if occlusion is None:
occlusion = gltf2_blender_get.get_socket_old(blender_material, "Occlusion")
occlusion = get_socket(blender_material, "Occlusion")
if occlusion.socket is None:
occlusion = get_socket_from_gltf_material_node(blender_material, "Occlusion")
occlusion_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_occlusion_texture_info_class(
occlusion,
orm_texture or (occlusion,),

View File

@ -4,13 +4,18 @@
import bpy
from ....io.com import gltf2_io
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ...exp import gltf2_blender_get
from ..gltf2_blender_gather_cache import cached
from ..gltf2_blender_get import image_tex_is_valid_from_socket
from .gltf2_blender_search_node_tree import get_vertex_color_info
from .gltf2_blender_gather_texture_info import gather_texture_info
from .gltf2_blender_search_node_tree import \
get_socket_from_gltf_material_node, \
has_image_node_from_socket, \
get_const_from_default_value_socket, \
get_socket, \
get_factor_from_socket
@cached
def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings):
@ -49,23 +54,23 @@ def __gather_base_color_factor(blender_material, export_settings):
rgb, alpha = None, None
alpha_socket = gltf2_blender_get.get_socket(blender_material, "Alpha")
if isinstance(alpha_socket, bpy.types.NodeSocket):
alpha_socket = get_socket(blender_material, "Alpha")
if isinstance(alpha_socket.socket, bpy.types.NodeSocket):
if export_settings['gltf_image_format'] != "NONE":
alpha = gltf2_blender_get.get_factor_from_socket(alpha_socket, kind='VALUE')
alpha = get_factor_from_socket(alpha_socket, kind='VALUE')
else:
alpha = gltf2_blender_get.get_const_from_default_value_socket(alpha_socket, kind='VALUE')
alpha = get_const_from_default_value_socket(alpha_socket, kind='VALUE')
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Base Color")
base_color_socket = get_socket(blender_material, "Base Color")
if base_color_socket.socket is None:
base_color_socket = get_socket(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_old(blender_material, "BaseColorFactor")
if isinstance(base_color_socket, bpy.types.NodeSocket):
base_color_socket = get_socket_from_gltf_material_node(blender_material, "BaseColorFactor")
if isinstance(base_color_socket.socket, bpy.types.NodeSocket):
if export_settings['gltf_image_format'] != "NONE":
rgb = gltf2_blender_get.get_factor_from_socket(base_color_socket, kind='RGB')
rgb = get_factor_from_socket(base_color_socket, kind='RGB')
else:
rgb = gltf2_blender_get.get_const_from_default_value_socket(base_color_socket, kind='RGB')
rgb = get_const_from_default_value_socket(base_color_socket, kind='RGB')
if rgb is None: rgb = [1.0, 1.0, 1.0]
if alpha is None: alpha = 1.0
@ -80,18 +85,18 @@ def __gather_base_color_factor(blender_material, export_settings):
def __gather_base_color_texture(blender_material, export_settings):
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Base Color")
base_color_socket = get_socket(blender_material, "Base Color")
if base_color_socket.socket is None:
base_color_socket = get_socket(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_old(blender_material, "BaseColor")
base_color_socket = get_socket_from_gltf_material_node(blender_material, "BaseColor")
alpha_socket = gltf2_blender_get.get_socket(blender_material, "Alpha")
alpha_socket = get_socket(blender_material, "Alpha")
# keep sockets that have some texture : color and/or alpha
inputs = tuple(
socket for socket in [base_color_socket, alpha_socket]
if socket is not None and image_tex_is_valid_from_socket(socket)
if socket.socket is not None and has_image_node_from_socket(socket, export_settings)
)
if not inputs:
return None, {}, {"uv_info": {}, "vc_info": {}}, None
@ -113,34 +118,35 @@ def __gather_metallic_factor(blender_material, export_settings):
if not blender_material.use_nodes:
return blender_material.metallic
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
metallic_socket = get_socket(blender_material, "Metallic")
if metallic_socket is None:
metallic_socket = gltf2_blender_get.get_socket_old(blender_material, "MetallicFactor")
if isinstance(metallic_socket, bpy.types.NodeSocket):
fac = gltf2_blender_get.get_factor_from_socket(metallic_socket, kind='VALUE')
metallic_socket = get_socket_from_gltf_material_node(blender_material, "MetallicFactor")
if isinstance(metallic_socket.socket, bpy.types.NodeSocket):
fac = get_factor_from_socket(metallic_socket, kind='VALUE')
return fac if fac != 1 else None
return None
def __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings):
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")
metallic_socket = get_socket(blender_material, "Metallic")
roughness_socket = get_socket(blender_material, "Roughness")
hasMetal = metallic_socket is not None and image_tex_is_valid_from_socket(metallic_socket)
hasRough = roughness_socket is not None and image_tex_is_valid_from_socket(roughness_socket)
hasMetal = metallic_socket.socket is not None and has_image_node_from_socket(metallic_socket, export_settings)
hasRough = roughness_socket.socket is not None and has_image_node_from_socket(roughness_socket, export_settings)
default_sockets = ()
# Warning: for default socket, do not use NodeSocket object, because it will break cache
# Using directlty the Blender socket object
if not hasMetal and not hasRough:
metallic_roughness = gltf2_blender_get.get_socket_old(blender_material, "MetallicRoughness")
if metallic_roughness is None or not image_tex_is_valid_from_socket(metallic_roughness):
metallic_roughness = get_socket_from_gltf_material_node(blender_material, "MetallicRoughness")
if metallic_roughness is None or not has_image_node_from_socket(metallic_roughness, export_settings):
return None, {}, None
texture_input = (metallic_roughness,)
elif not hasMetal:
texture_input = (roughness_socket,)
default_sockets = (metallic_socket,)
default_sockets = (metallic_socket.socket,)
elif not hasRough:
texture_input = (metallic_socket,)
default_sockets = (roughness_socket,)
default_sockets = (roughness_socket.socket,)
else:
texture_input = (metallic_socket, roughness_socket)
default_sockets = ()
@ -158,11 +164,11 @@ def __gather_roughness_factor(blender_material, export_settings):
if not blender_material.use_nodes:
return blender_material.roughness
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")
roughness_socket = get_socket(blender_material, "Roughness")
if roughness_socket is None:
roughness_socket = gltf2_blender_get.get_socket_old(blender_material, "RoughnessFactor")
if isinstance(roughness_socket, bpy.types.NodeSocket):
fac = gltf2_blender_get.get_factor_from_socket(roughness_socket, kind='VALUE')
roughness_socket = get_socket_from_gltf_material_node(blender_material, "RoughnessFactor")
if isinstance(roughness_socket.socket, bpy.types.NodeSocket):
fac = get_factor_from_socket(roughness_socket, kind='VALUE')
return fac if fac != 1 else None
return None

View File

@ -2,10 +2,14 @@
#
# SPDX-License-Identifier: Apache-2.0
from ....io.com.gltf2_io_extensions import Extension
from ...exp import gltf2_blender_get
from . import gltf2_blender_gather_texture_info
from .gltf2_blender_search_node_tree import get_vertex_color_info
from .gltf2_blender_search_node_tree import \
get_socket, \
NodeSocket, \
previous_socket, \
previous_node, \
get_factor_from_socket
def detect_shadeless_material(blender_material, export_settings):
"""Detect if this material is "shadeless" ie. should be exported
@ -15,8 +19,8 @@ def detect_shadeless_material(blender_material, export_settings):
if not blender_material.use_nodes: return None
# Old Background node detection (unlikely to happen)
bg_socket = gltf2_blender_get.get_socket(blender_material, "Background")
if bg_socket is not None:
bg_socket = get_socket(blender_material, "Background")
if bg_socket.socket is not None:
return {'rgb_socket': bg_socket}
# Look for
@ -27,6 +31,7 @@ def detect_shadeless_material(blender_material, export_settings):
info = {}
#TODOSNode this can be a function call
for node in blender_material.node_tree.nodes:
if node.type == 'OUTPUT_MATERIAL' and node.is_active_output:
socket = node.inputs[0]
@ -34,6 +39,8 @@ def detect_shadeless_material(blender_material, export_settings):
else:
return None
socket = NodeSocket(socket, [blender_material])
# Be careful not to misidentify a lightpath trick as mix-alpha.
result = __detect_lightpath_trick(socket)
if result is not None:
@ -49,10 +56,10 @@ def detect_shadeless_material(blender_material, export_settings):
socket = result['next_socket']
# Check if a color socket, or connected to a color socket
if socket.type != 'RGBA':
from_socket = gltf2_blender_get.previous_socket(socket)
if from_socket is None: return None
if from_socket.type != 'RGBA': return None
if socket.socket.type != 'RGBA':
from_socket = previous_socket(socket)
if from_socket.socket is None: return None
if from_socket.socket.type != 'RGBA': return None
info['rgb_socket'] = socket
return info
@ -68,13 +75,13 @@ def __detect_mix_alpha(socket):
#
# Returns None if not detected. Otherwise, a dict containing alpha_socket
# and next_socket.
prev = gltf2_blender_get.previous_node(socket)
if prev is None or prev.type != 'MIX_SHADER': return None
in1 = gltf2_blender_get.previous_node(prev.inputs[1])
if in1 is None or in1.type != 'BSDF_TRANSPARENT': return None
prev = previous_node(socket)
if prev.node is None or prev.node.type != 'MIX_SHADER': return None
in1 = previous_node(NodeSocket(prev.node.inputs[1], prev.group_path))
if in1.node is None or in1.node.type != 'BSDF_TRANSPARENT': return None
return {
'alpha_socket': prev.inputs[0],
'next_socket': prev.inputs[2],
'alpha_socket': NodeSocket(prev.node.inputs[0], prev.group_path),
'next_socket': NodeSocket(prev.node.inputs[2], prev.group_path),
}
@ -90,17 +97,17 @@ def __detect_lightpath_trick(socket):
# The Emission node can be omitted.
# Returns None if not detected. Otherwise, a dict containing
# next_socket.
prev = gltf2_blender_get.previous_node(socket)
if prev is None or prev.type != 'MIX_SHADER': return None
in0 = gltf2_blender_get.previous_socket(prev.inputs[0])
if in0 is None or in0.node.type != 'LIGHT_PATH': return None
if in0.name != 'Is Camera Ray': return None
next_socket = prev.inputs[2]
prev = previous_node(socket)
if prev.node is None or prev.node.type != 'MIX_SHADER': return None
in0 = previous_socket(NodeSocket(prev.node.inputs[0], prev.group_path))
if in0.socket is None or in0.socket.node.type != 'LIGHT_PATH': return None
if in0.socket.name != 'Is Camera Ray': return None
next_socket = NodeSocket(prev.node.inputs[2], prev.group_path)
# Detect emission
prev = gltf2_blender_get.previous_node(next_socket)
if prev is not None and prev.type == 'EMISSION':
next_socket = prev.inputs[0]
prev = previous_node(next_socket)
if prev.node is not None and prev.node.type == 'EMISSION':
next_socket = NodeSocket(prev.node.inputs[0], prev.group_path)
return {'next_socket': next_socket}
@ -109,9 +116,9 @@ def gather_base_color_factor(info, export_settings):
rgb, alpha = None, None
if 'rgb_socket' in info:
rgb = gltf2_blender_get.get_factor_from_socket(info['rgb_socket'], kind='RGB')
rgb = get_factor_from_socket(info['rgb_socket'], kind='RGB')
if 'alpha_socket' in info:
alpha = gltf2_blender_get.get_factor_from_socket(info['alpha_socket'], kind='VALUE')
alpha = get_factor_from_socket(info['alpha_socket'], kind='VALUE')
if rgb is None: rgb = [1.0, 1.0, 1.0]
if alpha is None: alpha = 1.0
@ -122,8 +129,8 @@ def gather_base_color_factor(info, export_settings):
def gather_base_color_texture(info, export_settings):
sockets = (info.get('rgb_socket'), info.get('alpha_socket'))
sockets = tuple(s for s in sockets if s is not None)
sockets = (info.get('rgb_socket', NodeSocket(None, None)), info.get('alpha_socket', NodeSocket(None, None)))
sockets = tuple(s for s in sockets if s.socket is not None)
if sockets:
# NOTE: separate RGB and Alpha textures will not get combined
# because gather_image determines how to pack images based on the

View File

@ -4,21 +4,23 @@
import typing
import bpy
from ....io.com import gltf2_io_debug
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....io.com.gltf2_io_extensions import Extension
from ....io.exp.gltf2_io_image_data import ImageData
from ....io.exp.gltf2_io_binary_data import BinaryData
from ....io.com import gltf2_io_debug
from ....io.com import gltf2_io
from ..gltf2_blender_gather_cache import cached
from ..gltf2_blender_gather_sampler import gather_sampler
from ..gltf2_blender_get import get_tex_from_socket
from ..gltf2_blender_gather_cache import cached
from .gltf2_blender_search_node_tree import get_texture_node_from_socket, NodeSocket
from . import gltf2_blender_gather_image
@cached
def gather_texture(
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
default_sockets: typing.Tuple[bpy.types.NodeSocket],
default_sockets,
export_settings):
"""
Gather texture sampling information and image channels from a blender shader texture attached to a shader socket.
@ -70,7 +72,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
ext_webp = {}
# If user want to keep original textures, and these textures are webp, we need to remove source from
# If user want to keep original textures, and these textures are WebP, we need to remove source from
# gltf2_io.Texture, and populate extension
if export_settings['gltf_keep_original_textures'] is True \
and source is not None \
@ -79,19 +81,19 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
remove_source = True
required = True
# If user want to export in webp format (so without fallback in png/jpg)
# If user want to export in WebP format (so without fallback in png/jpg)
if export_settings['gltf_image_format'] == "WEBP":
# We create all image without fallback
ext_webp["source"] = source
remove_source = True
required = True
# If user doesn't want to export in webp format, but want webp too. Texture is not webp
# If user doesn't want to export in WebP format, but want WebP too. Texture is not WebP
if export_settings['gltf_image_format'] != "WEBP" \
and export_settings['gltf_add_webp'] \
and source is not None \
and source.mime_type != "image/webp":
# We need here to create some webp textures
# We need here to create some WebP textures
new_mime_type = "image/webp"
new_data, _ = image_data.encode(new_mime_type, export_settings)
@ -116,7 +118,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
ext_webp["source"] = webp_image
# If user doesn't want to export in webp format, but want webp too. Texture is webp
# If user doesn't want to export in WebP format, but want WebP too. Texture is WebP
if export_settings['gltf_image_format'] != "WEBP" \
and source is not None \
and source.mime_type == "image/webp":
@ -127,7 +129,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
remove_source = True
required = True
# If user doesn't want to export in webp format, but want webp too as fallback. Texture is webp
# If user doesn't want to export in webp format, but want WebP too as fallback. Texture is WebP
if export_settings['gltf_image_format'] != "WEBP" \
and webp_image is not None \
and export_settings['gltf_webp_fallback'] is True:
@ -164,14 +166,33 @@ def __gather_name(blender_shader_sockets, export_settings):
def __gather_sampler(blender_shader_sockets, export_settings):
shader_nodes = [get_tex_from_socket(socket) for socket in blender_shader_sockets]
shader_nodes = [get_texture_node_from_socket(socket, export_settings) for socket in blender_shader_sockets]
if len(shader_nodes) > 1:
gltf2_io_debug.print_console("WARNING",
"More than one shader node tex image used for a texture. "
"The resulting glTF sampler will behave like the first shader node tex image.")
first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes)).shader_node
first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes))
# group_path can't be a list, so transform it to str
sep_item = "##~~gltf-sep~~##"
sep_inside_item = "##~~gltf-inside-sep~~##"
group_path_str = ""
if len(first_valid_shader_node.group_path) > 0:
group_path_str += first_valid_shader_node.group_path[0].name
if len(first_valid_shader_node.group_path) > 1:
for idx, i in enumerate(first_valid_shader_node.group_path[1:]):
group_path_str += sep_item
if idx == 0:
group_path_str += first_valid_shader_node.group_path[0].name
else:
group_path_str += i.id_data.name
group_path_str += sep_inside_item
group_path_str += i.name
return gather_sampler(
first_valid_shader_node,
first_valid_shader_node.shader_node,
group_path_str,
export_settings)
@ -209,7 +230,7 @@ def __gather_source(blender_shader_sockets, default_sockets, export_settings):
png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings)
# We inverted the png & webp image, to have the png as main source
# We inverted the png & WebP image, to have the png as main source
return png_image, source, image_data, factor
return source, None, image_data, factor

View File

@ -7,12 +7,17 @@ import typing
from ....io.com import gltf2_io
from ....io.com.gltf2_io_extensions import Extension
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ...exp import gltf2_blender_get
from ..gltf2_blender_get import previous_node, get_tex_from_socket
from ..gltf2_blender_gather_sampler import detect_manual_uv_wrapping
from ..gltf2_blender_gather_cache import cached
from . import gltf2_blender_gather_texture
from . import gltf2_blender_search_node_tree
from .gltf2_blender_search_node_tree import \
get_texture_node_from_socket, \
from_socket, \
FilterByType, \
previous_node, \
get_const_from_socket, \
NodeSocket, \
get_texture_transform_from_mapping_node
# blender_shader_sockets determine the texture and primary_socket determines
# the textransform and UVMap. Ex: when combining an ORM texture, for
@ -37,7 +42,7 @@ def gather_material_occlusion_texture_info_class(primary_socket, blender_shader_
def __gather_texture_info_helper(
primary_socket: bpy.types.NodeSocket,
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
default_sockets: typing.Tuple[bpy.types.NodeSocket],
default_sockets,
kind: str,
filter_type: str,
export_settings):
@ -77,7 +82,7 @@ def __gather_texture_info_helper(
def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings):
if primary_socket is None:
return False
if get_tex_from_socket(primary_socket) is None:
if get_texture_node_from_socket(primary_socket, export_settings) is None:
return False
if not blender_shader_sockets:
return False
@ -85,12 +90,12 @@ def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, e
return False
if filter_type == "ALL":
# Check that all sockets link to texture
if any([get_tex_from_socket(socket) is None for socket in blender_shader_sockets]):
if any([get_texture_node_from_socket(socket, export_settings) is None for socket in blender_shader_sockets]):
# sockets do not lead to a texture --> discard
return False
elif filter_type == "ANY":
# Check that at least one socket link to texture
if all([get_tex_from_socket(socket) is None for socket in blender_shader_sockets]):
if all([get_texture_node_from_socket(socket, export_settings) is None for socket in blender_shader_sockets]):
return False
elif filter_type == "NONE":
# No check
@ -112,9 +117,9 @@ def __gather_extras(blender_shader_sockets, export_settings):
# MaterialNormalTextureInfo only
def __gather_normal_scale(primary_socket, export_settings):
result = gltf2_blender_search_node_tree.from_socket(
result = from_socket(
primary_socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeNormalMap))
FilterByType(bpy.types.ShaderNodeNormalMap))
if not result:
return None
strengthInput = result[0].shader_node.inputs['Strength']
@ -127,11 +132,11 @@ def __gather_normal_scale(primary_socket, export_settings):
def __gather_occlusion_strength(primary_socket, export_settings):
# Look for a MixRGB node that mixes with pure white in front of
# primary_socket. The mix factor gives the occlusion strength.
node = gltf2_blender_get.previous_node(primary_socket)
if node and node.type == 'MIX' and node.blend_type == 'MIX':
fac = gltf2_blender_get.get_const_from_socket(node.inputs['Factor'], kind='VALUE')
col1 = gltf2_blender_get.get_const_from_socket(node.inputs[6], kind='RGB')
col2 = gltf2_blender_get.get_const_from_socket(node.inputs[7], kind='RGB')
node = previous_node(primary_socket)
if node and node.node.type == 'MIX' and node.node.blend_type == 'MIX':
fac = get_const_from_socket(NodeSocket(node.node.inputs['Factor'], node.group_path), kind='VALUE')
col1 = get_const_from_socket(NodeSocket(node.node.inputs[6], node.group_path), kind='RGB')
col2 = get_const_from_socket(NodeSocket(node.node.inputs[7], node.group_path), kind='RGB')
if fac is not None:
if col1 == [1.0, 1.0, 1.0] and col2 is None:
return fac
@ -153,31 +158,32 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
#
# The [UV Wrapping] is for wrap modes like MIRROR that use nodes,
# [Mapping] is for KHR_texture_transform, and [UV Map] is for texCoord.
blender_shader_node = get_tex_from_socket(primary_socket).shader_node
result_tex = get_texture_node_from_socket(primary_socket, export_settings)
blender_shader_node = result_tex.shader_node
# Skip over UV wrapping stuff (it goes in the sampler)
result = detect_manual_uv_wrapping(blender_shader_node)
result = detect_manual_uv_wrapping(blender_shader_node, result_tex.group_path)
if result:
node = previous_node(result['next_socket'])
else:
node = previous_node(blender_shader_node.inputs['Vector'])
node = previous_node(NodeSocket(blender_shader_node.inputs['Vector'], result_tex.group_path))
texture_transform = None
if node and node.type == 'MAPPING':
texture_transform = gltf2_blender_get.get_texture_transform_from_mapping_node(node)
node = previous_node(node.inputs['Vector'])
if node.node and node.node.type == 'MAPPING':
texture_transform = get_texture_transform_from_mapping_node(node)
node = previous_node(NodeSocket(node.node.inputs['Vector'], node.group_path))
uvmap_info = {}
if node and node.type == 'UVMAP' and node.uv_map:
if node.node and node.node.type == 'UVMAP' and node.node.uv_map:
uvmap_info['type'] = "Fixed"
uvmap_info['value'] = node.uv_map
uvmap_info['value'] = node.node.uv_map
elif node and node.type == 'ATTRIBUTE' \
and node.attribute_type == "GEOMETRY" \
and node.attribute_name:
elif node and node.node and node.node.type == 'ATTRIBUTE' \
and node.node.attribute_type == "GEOMETRY" \
and node.node.attribute_name:
uvmap_info['type'] = 'Attribute'
uvmap_info['value'] = node.attribute_name
uvmap_info['value'] = node.node.attribute_name
else:
uvmap_info['type'] = 'Active'
@ -187,6 +193,7 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
def check_same_size_images(
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
export_settings
) -> bool:
"""Check that all sockets leads to images of the same size."""
if not blender_shader_sockets or not all(blender_shader_sockets):
@ -194,7 +201,7 @@ def check_same_size_images(
sizes = set()
for socket in blender_shader_sockets:
tex = get_tex_from_socket(socket)
tex = get_texture_node_from_socket(socket, export_settings)
if tex is None:
return False
size = tex.shader_node.image.size

View File

@ -7,6 +7,11 @@
#
import bpy
from mathutils import Vector, Matrix
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from ...com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name, get_gltf_old_group_node_name
from ....blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf
from io_scene_gltf2.io.com import gltf2_io_debug
import typing
@ -48,13 +53,14 @@ class FilterByType(Filter):
class NodeTreeSearchResult:
def __init__(self, shader_node: bpy.types.Node, path: typing.List[bpy.types.NodeLink]):
def __init__(self, shader_node: bpy.types.Node, path: typing.List[bpy.types.NodeLink], group_path: typing.List[bpy.types.Node]):
self.shader_node = shader_node
self.path = path
self.group_path = group_path
# TODO: cache these searches
def from_socket(start_socket: bpy.types.NodeSocket,
def from_socket(start_socket: NodeTreeSearchResult,
shader_node_filter: typing.Union[Filter, typing.Callable]) -> typing.List[NodeTreeSearchResult]:
"""
Find shader nodes where the filter expression is true.
@ -66,18 +72,39 @@ def from_socket(start_socket: bpy.types.NodeSocket,
# hide implementation (especially the search path)
def __search_from_socket(start_socket: bpy.types.NodeSocket,
shader_node_filter: typing.Union[Filter, typing.Callable],
search_path: typing.List[bpy.types.NodeLink]) -> typing.List[NodeTreeSearchResult]:
search_path: typing.List[bpy.types.NodeLink],
group_path: typing.List[bpy.types.Node]) -> typing.List[NodeTreeSearchResult]:
results = []
for link in start_socket.links:
# follow the link to a shader node
linked_node = link.from_node
if linked_node.type == "GROUP":
group_output_node = [node for node in linked_node.node_tree.nodes if node.type == "GROUP_OUTPUT"][0]
socket = [sock for sock in group_output_node.inputs if sock.name == link.from_socket.name][0]
group_path.append(linked_node)
linked_results = __search_from_socket(socket, shader_node_filter, search_path + [link], group_path.copy())
if linked_results:
# add the link to the current path
search_path.append(link)
results += linked_results
continue
if linked_node.type == "GROUP_INPUT":
socket = [sock for sock in group_path[-1].inputs if sock.name == link.from_socket.name][0]
linked_results = __search_from_socket(socket, shader_node_filter, search_path + [link], group_path[:-1])
if linked_results:
# add the link to the current path
search_path.append(link)
results += linked_results
continue
# check if the node matches the filter
if shader_node_filter(linked_node):
results.append(NodeTreeSearchResult(linked_node, search_path + [link]))
results.append(NodeTreeSearchResult(linked_node, search_path + [link], group_path))
# traverse into inputs of the node
for input_socket in linked_node.inputs:
linked_results = __search_from_socket(input_socket, shader_node_filter, search_path + [link])
linked_results = __search_from_socket(input_socket, shader_node_filter, search_path + [link], group_path.copy())
if linked_results:
# add the link to the current path
search_path.append(link)
@ -85,10 +112,330 @@ def from_socket(start_socket: bpy.types.NodeSocket,
return results
if start_socket is None:
if start_socket.socket is None:
return []
return __search_from_socket(start_socket, shader_node_filter, [])
return __search_from_socket(start_socket.socket, shader_node_filter, [], start_socket.group_path)
@cached
def get_texture_node_from_socket(socket, export_settings):
result = from_socket(
socket,
FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return None
if result[0].shader_node.image is None:
return None
return result[0]
def has_image_node_from_socket(socket, export_settings):
result = get_texture_node_from_socket(socket, export_settings)
return result is not None
# return the default value of a socket, even if this socket is linked
def get_const_from_default_value_socket(socket, kind):
if kind == 'RGB':
if socket.socket.type != 'RGBA': return None
return list(socket.socket.default_value)[:3]
if kind == 'VALUE':
if socket.socket.type != 'VALUE': return None
return socket.socket.default_value
return None
#TODOSNode : @cached? If yes, need to use id of node tree, has this is probably not fully hashable
# For now, not caching it. If we encounter performance issue, we will see later
def get_material_nodes(node_tree: bpy.types.NodeTree, group_path, type):
"""
For a given tree, recursively return all nodes including node groups.
"""
nodes = []
for node in [n for n in node_tree.nodes if isinstance(n, type) and not n.mute]:
nodes.append((node, group_path.copy()))
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
for node in [n for n in node_tree.nodes if n.type == "GROUP" and n.node_tree is not None and not n.mute and n.node_tree.name != get_gltf_old_group_node_name()]: # Do not enter the olf glTF node group
new_group_path = group_path.copy()
new_group_path.append(node)
nodes.extend(get_material_nodes(node.node_tree, new_group_path, type))
return nodes
def get_socket_from_gltf_material_node(blender_material: bpy.types.Material, name: str):
"""
For a given material input name, retrieve the corresponding node tree socket in the special glTF node group.
:param blender_material: a blender material for which to get the socket
:param name: the name of the socket
:return: a blender NodeSocket
"""
gltf_node_group_names = [get_gltf_node_name().lower(), get_gltf_node_old_name().lower()]
if blender_material.node_tree and blender_material.use_nodes:
nodes = get_material_nodes(blender_material.node_tree, [blender_material], bpy.types.ShaderNodeGroup)
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
nodes = [n for n in nodes if n[0].node_tree is not None and ( n[0].node_tree.name.lower().startswith(get_gltf_old_group_node_name()) or n[0].node_tree.name.lower() in gltf_node_group_names)]
inputs = sum([[(input, node[1]) for input in node[0].inputs if input.name == name] for node in nodes], [])
if inputs:
return NodeSocket(inputs[0][0], inputs[0][1])
return NodeSocket(None, None)
class NodeSocket:
def __init__(self, socket, group_path):
self.socket = socket
self.group_path = group_path
class ShNode:
def __init__(self, node, group_path):
self.node = node
self.group_path = group_path
def get_node_socket(blender_material, type, name):
"""
For a given material input name, retrieve the corresponding node tree socket for a given node type.
:param blender_material: a blender material for which to get the socket
:return: a blender NodeSocket for a given type
"""
nodes = get_material_nodes(blender_material.node_tree, [blender_material], type)
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
nodes = [node for node in nodes if check_if_is_linked_to_active_output(node[0].outputs[0], node[1])]
inputs = sum([[(input, node[1]) for input in node[0].inputs if input.name == name] for node in nodes], [])
if inputs:
return NodeSocket(inputs[0][0], inputs[0][1])
return NodeSocket(None, None)
def get_socket(blender_material: bpy.types.Material, name: str, volume=False):
"""
For a given material input name, retrieve the corresponding node tree socket.
:param blender_material: a blender material for which to get the socket
:param name: the name of the socket
:return: a blender NodeSocket
"""
if blender_material.node_tree and blender_material.use_nodes:
#i = [input for input in blender_material.node_tree.inputs]
#o = [output for output in blender_material.node_tree.outputs]
if name == "Emissive":
# Check for a dedicated Emission node first, it must supersede the newer built-in one
# because the newer one is always present in all Principled BSDF materials.
emissive_socket = get_node_socket(blender_material, bpy.types.ShaderNodeEmission, "Color")
if emissive_socket.socket is not None:
return emissive_socket
# If a dedicated Emission node was not found, fall back to the Principled BSDF Emission socket.
name = "Emission Color"
type = bpy.types.ShaderNodeBsdfPrincipled
elif name == "Background":
type = bpy.types.ShaderNodeBackground
name = "Color"
else:
if volume is False:
type = bpy.types.ShaderNodeBsdfPrincipled
else:
type = bpy.types.ShaderNodeVolumeAbsorption
return get_node_socket(blender_material, type, name)
return NodeSocket(None, None)
def get_factor_from_socket(socket, kind):
"""
For baseColorFactor, metallicFactor, etc.
Get a constant value from a socket, or a constant value
from a MULTIPLY node just before the socket.
kind is either 'RGB' or 'VALUE'.
"""
fac = get_const_from_socket(socket, kind)
if fac is not None:
return fac
node = previous_node(socket)
if node.node is not None:
x1, x2 = None, None
if kind == 'RGB':
if node.node.type == 'MIX' and node.node.data_type == "RGBA" and node.node.blend_type == 'MULTIPLY':
# TODO: handle factor in inputs[0]?
x1 = get_const_from_socket(NodeSocket(node.node.inputs[6], node.group_path), kind)
x2 = get_const_from_socket(NodeSocket(node.node.inputs[7], node.group_path), kind)
if kind == 'VALUE':
if node.node.type == 'MATH' and node.node.operation == 'MULTIPLY':
x1 = get_const_from_socket(NodeSocket(node.node.inputs[0], node.group_path), kind)
x2 = get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind)
if x1 is not None and x2 is None: return x1
if x2 is not None and x1 is None: return x2
return None
def get_const_from_socket(socket, kind):
if not socket.socket.is_linked:
if kind == 'RGB':
if socket.socket.type != 'RGBA': return None
return list(socket.socket.default_value)[:3]
if kind == 'VALUE':
if socket.socket.type != 'VALUE': return None
return socket.socket.default_value
# Handle connection to a constant RGB/Value node
prev_node = previous_node(socket)
if prev_node.node is not None:
if kind == 'RGB' and prev_node.node.type == 'RGB':
return list(prev_node.node.outputs[0].default_value)[:3]
if kind == 'VALUE' and prev_node.node.type == 'VALUE':
return prev_node.node.outputs[0].default_value
return None
def previous_socket(socket: NodeSocket):
soc = socket.socket
group_path = socket.group_path.copy()
while True:
if not soc.is_linked:
return NodeSocket(None, None)
from_socket = soc.links[0].from_socket
# If we are entering a node group (from outputs)
if from_socket.node.type == "GROUP":
socket_name = from_socket.name
sockets = [n for n in from_socket.node.node_tree.nodes if n.type == "GROUP_OUTPUT"][0].inputs
socket = [s for s in sockets if s.name == socket_name][0]
group_path.append(from_socket.node)
soc = socket
continue
# If we are exiting a node group (from inputs)
if from_socket.node.type == "GROUP_INPUT":
socket_name = from_socket.name
sockets = group_path[-1].inputs
socket = [s for s in sockets if s.name == socket_name][0]
group_path = group_path[:-1]
soc = socket
continue
# Skip over reroute nodes
if from_socket.node.type == 'REROUTE':
soc = from_socket.node.inputs[0]
continue
return NodeSocket(from_socket, group_path)
def previous_node(socket: NodeSocket):
prev_socket = previous_socket(socket)
if prev_socket.socket is not None:
return ShNode(prev_socket.socket.node, prev_socket.group_path)
return ShNode(None, None)
def get_texture_transform_from_mapping_node(mapping_node):
if mapping_node.node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]:
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform because it had type " +
mapping_node.node.vector_type + "; recommend using POINT instead"
)
return None
rotation_0, rotation_1 = mapping_node.node.inputs['Rotation'].default_value[0], mapping_node.node.inputs['Rotation'].default_value[1]
if rotation_0 or rotation_1:
# TODO: can we handle this?
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform because it had non-zero "
"rotations in the X/Y direction; only a Z rotation can be exported!"
)
return None
mapping_transform = {}
mapping_transform["offset"] = [mapping_node.node.inputs['Location'].default_value[0], mapping_node.node.inputs['Location'].default_value[1]]
mapping_transform["rotation"] = mapping_node.node.inputs['Rotation'].default_value[2]
mapping_transform["scale"] = [mapping_node.node.inputs['Scale'].default_value[0], mapping_node.node.inputs['Scale'].default_value[1]]
if mapping_node.node.vector_type == "TEXTURE":
# This means use the inverse of the TRS transform.
def inverted(mapping_transform):
offset = mapping_transform["offset"]
rotation = mapping_transform["rotation"]
scale = mapping_transform["scale"]
# Inverse of a TRS is not always a TRS. This function will be right
# at least when the following don't occur.
if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5:
return None
if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5:
return None
new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector((-offset[0], -offset[1], 1))
new_offset[0] /= scale[0]; new_offset[1] /= scale[1]
return {
"offset": new_offset[0:2],
"rotation": -rotation,
"scale": [1/scale[0], 1/scale[1]],
}
mapping_transform = inverted(mapping_transform)
if mapping_transform is None:
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform with type TEXTURE because "
"we couldn't convert it to TRS; recommend using POINT instead"
)
return None
elif mapping_node.node.vector_type == "VECTOR":
# Vectors don't get translated
mapping_transform["offset"] = [0, 0]
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
if all([component == 0 for component in texture_transform["offset"]]):
del(texture_transform["offset"])
if all([component == 1 for component in texture_transform["scale"]]):
del(texture_transform["scale"])
if texture_transform["rotation"] == 0:
del(texture_transform["rotation"])
if len(texture_transform) == 0:
return None
return texture_transform
def check_if_is_linked_to_active_output(shader_socket, group_path):
for link in shader_socket.links:
# If we are entering a node group
if link.to_node.type == "GROUP":
socket_name = link.to_socket.name
sockets = [n for n in link.to_node.node_tree.nodes if n.type == "GROUP_INPUT"][0].outputs
socket = [s for s in sockets if s.name == socket_name][0]
group_path.append(link.to_node)
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
ret = check_if_is_linked_to_active_output(socket, group_path) # recursive until find an output material node
if ret is True:
return True
continue
# If we are exiting a node group
if link.to_node.type == "GROUP_OUTPUT":
socket_name = link.to_socket.name
sockets = group_path[-1].outputs
socket = [s for s in sockets if s.name == socket_name][0]
group_path = group_path[:-1]
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
ret = check_if_is_linked_to_active_output(socket, group_path) # recursive until find an output material node
if ret is True:
return True
continue
if isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial) and link.to_node.is_active_output is True:
return True
if len(link.to_node.outputs) > 0: # ignore non active output, not having output sockets
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
ret = check_if_is_linked_to_active_output(link.to_node.outputs[0], group_path) # recursive until find an output material node
if ret is True:
return True
return False
def get_vertex_color_info(primary_socket, sockets, export_settings):
return {"color": None, "alpha": None} #TODO, placeholder for now

View File

@ -39,7 +39,7 @@ def specular(mh, location_specular,
x_specularcolor, y_specularcolor = location_specular_tint
if tex_specular_info is None:
specular_socket.default_value = specular_factor
specular_socket.default_value = specular_factor / 2.0
else:
# Mix specular factor
if specular_factor != 1.0:
@ -51,7 +51,7 @@ def specular(mh, location_specular,
mh.node_tree.links.new(specular_socket, node.outputs[0])
# Inputs
specular_socket = node.inputs[0]
node.inputs[1].default_value = specular_factor
node.inputs[1].default_value = specular_factor / 2.0
x_specular -= 200
texture(

View File

@ -135,6 +135,7 @@ class BlenderNode():
bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION)
bpy.data.scenes[gltf.blender_scene].collection.children.link(bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION])
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_render = True
# Create an icosphere, and assign it to the collection
bpy.ops.mesh.primitive_ico_sphere_add(radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
@ -187,7 +188,10 @@ class BlenderNode():
arma_mat = vnode.editbone_arma_mat
editbone.head = arma_mat @ Vector((0, 0, 0))
editbone.tail = arma_mat @ Vector((0, 1, 0))
editbone.length = vnode.bone_length
if gltf.import_settings['bone_heuristic'] == "BLENDER":
editbone.length = vnode.bone_length / max(blender_arma.scale)
else:
editbone.length = vnode.bone_length
editbone.align_roll(arma_mat @ Vector((0, 0, 1)) - editbone.head)
if isinstance(id, int):
@ -225,7 +229,8 @@ class BlenderNode():
if gltf.import_settings['bone_heuristic'] == "BLENDER":
pose_bone.custom_shape = bpy.data.objects[gltf.bone_shape]
pose_bone.custom_shape_scale_xyz = Vector([0.1, 0.1, 0.1])
armature_max_dim = max([blender_arma.dimensions[0] / blender_arma.scale[0], blender_arma.dimensions[1] / blender_arma.scale[1], blender_arma.dimensions[2] / blender_arma.scale[2]])
pose_bone.custom_shape_scale_xyz = Vector([armature_max_dim * 0.2] * 3)
@staticmethod
def create_mesh_object(gltf, vnode):

View File

@ -50,9 +50,6 @@ def pbr_metallic_roughness(mh: MaterialHelper):
# This value may be overridden later if IOR extension is set on file
pbr_node.inputs['IOR'].default_value = GLTF_IOR
pbr_node.inputs['Specular IOR Level'].default_value = 0.0 # Will be overridden by KHR_materials_specular if set
pbr_node.inputs['Specular Tint'].default_value = [0.0]*3 + [1.0] # Will be overridden by KHR_materials_specular if set
if mh.pymat.occlusion_texture is not None:
if mh.settings_node is None:
mh.settings_node = make_settings_node(mh)

View File

@ -41,7 +41,7 @@ def texture(
if forced_image is None:
if mh.gltf.import_settings['import_webp_texture'] is True:
# Get the webp image if there is one
# Get the WebP image if there is one
if pytexture.extensions \
and 'EXT_texture_webp' in pytexture.extensions \
and pytexture.extensions['EXT_texture_webp']['source'] is not None:

View File

@ -47,8 +47,5 @@ class Buffer:
def to_bytes(self):
return self.__data
def to_embed_string(self):
return 'data:application/octet-stream;base64,' + base64.b64encode(self.__data).decode('ascii')
def clear(self):
self.__data = b""

View File

@ -539,7 +539,7 @@ class NWPreviewNode(Operator, NWBase):
if not viewer_socket:
# create viewer socket
viewer_socket = node.node_tree.interface.new_socket(viewer_socket_name, in_out={'OUTPUT'}, socket_type=socket_type)
viewer_socket = node.node_tree.interface.new_socket(viewer_socket_name, in_out='OUTPUT', socket_type=socket_type)
viewer_socket.NWViewerSocket = True
return viewer_socket

View File

@ -89,10 +89,10 @@ class ActionSlot(PropertyGroup, ActionSlotBase):
target_space: EnumProperty(
name="Transform Space",
items=[("WORLD", "World Space", "World Space"),
("POSE", "Pose Space", "Pose Space"),
("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent"),
("LOCAL", "Local Space", "Local Space")],
items=[("WORLD", "World Space", "World Space", 0),
# ("POSE", "Pose Space", "Pose Space", 1),
# ("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent", 2),
("LOCAL", "Local Space", "Local Space", 3)],
default="LOCAL"
)

View File

@ -153,9 +153,10 @@ def pVisRotExec(bone, active, context):
def pVisScaExec(bone, active, context):
obj_bone = bone.id_data
bone.scale = getmat(bone, active, context,
not obj_bone.data.bones[bone.name].use_inherit_scale)\
.to_scale()
bone.scale = getmat(
bone, active, context,
obj_bone.data.bones[bone.name].inherit_scale not in {'NONE', 'NONE_LEGACY'}
).to_scale()
def pDrwExec(bone, active, context):

View File

@ -156,12 +156,12 @@ def draw_callback_view():
if data_euler or data_quat:
cursor = bpy.context.scene.cursor.location.copy()
derived_matrices = []
for key, quat in data_quat.values():
derived_matrices = dict()
for key, quat in data_quat.items():
matrix = quat.to_matrix().to_4x4()
matrix.translation = cursor
derived_matrices[key] = matrix
for key, eul in data_euler.values():
for key, eul in data_euler.items():
matrix = eul.to_matrix().to_4x4()
matrix.translation = cursor
derived_matrices[key] = matrix

View File

@ -99,9 +99,6 @@ class VIEW3D_MT_Pose(Menu):
layout.operator("pose.quaternions_flip")
layout.operator_context = 'INVOKE_AREA'
layout.separator()
layout.operator("armature.armature_layers", text="Change Armature Layers...")
layout.operator("pose.bone_layers", text="Change Bone Layers...")
layout.separator()
layout.menu("VIEW3D_MT_pose_showhide")
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")

View File

@ -6,6 +6,7 @@
import bpy
from bpy.props import FloatProperty, FloatVectorProperty
from bpy.app.translations import pgettext_iface as iface_
import gpu
from gpu_extras.batch import batch_for_shader
from mathutils import Vector
@ -248,8 +249,8 @@ class SUNPOS_OT_ShowHdr(bpy.types.Operator):
self.initial_azimuth = context.scene.sun_pos_properties.hdr_azimuth
context.workspace.status_text_set(
"Enter/LMB: confirm, Esc/RMB: cancel,"
" MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure")
iface_("Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, "
"mouse wheel: zoom, Ctrl + mouse wheel: set exposure"))
self._handle = bpy.types.SpaceView3D.draw_handler_add(
draw_callback_px, (self, context), 'WINDOW', 'POST_PIXEL'

View File

@ -416,6 +416,19 @@ translations_tuple = (
("fr_FR", "Année",
(False, ())),
),
(("*", "Unknown projection"),
(("scripts/addons/sun_position/hdr.py:181",),
()),
("fr_FR", "Projection inconnue",
(False, ())),
),
(("*", "Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure"),
(("scripts/addons/sun_position/hdr.py:252",),
()),
("fr_FR", "Entrée/ClicG : Confirmer, Échap/ClicD : Annuler, ClicM : défiler, "
"molette : zoom, Ctrl + molette : exposition",
(False, ())),
),
(("*", "Could not find 3D View"),
(("scripts/addons/sun_position/hdr.py:263",),
()),
@ -428,12 +441,6 @@ translations_tuple = (
("fr_FR", "Veuillez utiliser un nœud de texture denvironnement",
(False, ())),
),
(("*", "Unknown projection"),
(("scripts/addons/sun_position/hdr.py:181",),
()),
("fr_FR", "Projection inconnue",
(False, ())),
),
(("*", "Show options and info:"),
(("scripts/addons/sun_position/properties.py:297",),
()),

View File

@ -344,7 +344,9 @@ class UI_OT_i18n_addon_translation_export(Operator):
if not lng.use:
print("Skipping {} language ({}).".format(lng.name, lng.uid))
continue
uid = utils_i18n.find_best_isocode_matches(lng.uid, trans.trans.keys())
translation_keys = {k for k in trans.trans.keys()
if k != self.settings.PARSER_TEMPLATE_ID}
uid = utils_i18n.find_best_isocode_matches(lng.uid, translation_keys)
if uid:
uids.append(uid[0])
@ -357,8 +359,8 @@ class UI_OT_i18n_addon_translation_export(Operator):
if not os.path.isfile(path):
continue
msgs = utils_i18n.I18nMessages(kind='PO', src=path, settings=self.settings)
msgs.update(trans.msgs[self.settings.PARSER_TEMPLATE_ID])
trans.msgs[uid] = msgs
msgs.update(trans.trans[self.settings.PARSER_TEMPLATE_ID])
trans.trans[uid] = msgs
trans.write(kind='PO', langs=set(uids))