Mesh: Update addons for auto smooth removal #104609

Merged
Hans Goudey merged 10 commits from HooglyBoogly/blender-addons:refactor-mesh-corner-normals-lazy into main 2023-10-20 16:53:48 +02:00
56 changed files with 1584 additions and 1001 deletions
Showing only changes of commit 8cd49b012f - Show all commits

View File

@ -6,7 +6,7 @@ bl_info = {
"name": "Grease Pencil Tools", "name": "Grease Pencil Tools",
"description": "Extra tools for Grease Pencil", "description": "Extra tools for Grease Pencil",
"author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola", "author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola",
"version": (1, 8, 1), "version": (1, 8, 2),
"blender": (3, 0, 0), "blender": (3, 0, 0),
"location": "Sidebar > Grease Pencil > Grease Pencil Tools", "location": "Sidebar > Grease Pencil > Grease Pencil Tools",
"warning": "", "warning": "",

View File

@ -49,10 +49,10 @@ def get_reduced_area_coord(context):
## minus tool leftbar + sidebar right ## minus tool leftbar + sidebar right
regs = context.area.regions regs = context.area.regions
toolbar = regs[2] toolbar = next((r for r in regs if r.type == 'TOOLS'), None)
sidebar = regs[3] sidebar = next((r for r in regs if r.type == 'UI'), None)
header = regs[0] header = next((r for r in regs if r.type == 'HEADER'), None)
tool_header = regs[1] tool_header = next((r for r in regs if r.type == 'TOOL_HEADER'), None)
up_margin = down_margin = 0 up_margin = down_margin = 0
if tool_header.alignment == 'TOP': if tool_header.alignment == 'TOP':
up_margin += tool_header.height up_margin += tool_header.height

View File

@ -5,8 +5,8 @@
bl_info = { bl_info = {
"name": "Import Images as Planes", "name": "Import Images as Planes",
"author": "Florian Meyer (tstscr), mont29, matali, Ted Schundler (SpkyElctrc), mrbimax", "author": "Florian Meyer (tstscr), mont29, matali, Ted Schundler (SpkyElctrc), mrbimax",
"version": (3, 5, 0), "version": (3, 5, 1),
"blender": (2, 91, 0), "blender": (4, 0, 0),
"location": "File > Import > Images as Planes or Add > Image > Images as Planes", "location": "File > Import > Images as Planes or Add > Image > Images as Planes",
"description": "Imports images and creates planes with the appropriate aspect ratio. " "description": "Imports images and creates planes with the appropriate aspect ratio. "
"The images are mapped to the planes.", "The images are mapped to the planes.",
@ -25,7 +25,10 @@ from math import pi
import bpy import bpy
from bpy.types import Operator from bpy.types import Operator
from bpy.app.translations import pgettext_tip as tip_ from bpy.app.translations import (
pgettext_tip as tip_,
contexts as i18n_contexts
)
from mathutils import Vector from mathutils import Vector
from bpy.props import ( from bpy.props import (
@ -151,6 +154,9 @@ def load_images(filenames, directory, force_reload=False, frame_start=1, find_se
file_iter = zip(filenames, repeat(1), repeat(1)) file_iter = zip(filenames, repeat(1), repeat(1))
for filename, offset, frames in file_iter: for filename, offset, frames in file_iter:
if not os.path.isfile(bpy.path.abspath(os.path.join(directory, filename))):
continue
image = load_image(filename, directory, check_existing=True, force_reload=force_reload) image = load_image(filename, directory, check_existing=True, force_reload=force_reload)
# Size is unavailable for sequences, so we grab it early # Size is unavailable for sequences, so we grab it early
@ -320,8 +326,8 @@ def get_shadeless_node(dest_node_tree):
output_node = node_tree.nodes.new('NodeGroupOutput') output_node = node_tree.nodes.new('NodeGroupOutput')
input_node = node_tree.nodes.new('NodeGroupInput') input_node = node_tree.nodes.new('NodeGroupInput')
node_tree.outputs.new('NodeSocketShader', 'Shader') node_tree.interface.new_socket('Shader', in_out='OUTPUT', socket_type='NodeSocketShader')
node_tree.inputs.new('NodeSocketColor', 'Color') node_tree.interface.new_socket('Color', in_out='INPUT', socket_type='NodeSocketColor')
# This could be faster as a transparent shader, but then no ambient occlusion # This could be faster as a transparent shader, but then no ambient occlusion
diffuse_shader = node_tree.nodes.new('ShaderNodeBsdfDiffuse') diffuse_shader = node_tree.nodes.new('ShaderNodeBsdfDiffuse')
@ -731,7 +737,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
('HASHED', "Hashed","Use noise to dither the binary visibility (works well with multi-samples)"), ('HASHED', "Hashed","Use noise to dither the binary visibility (works well with multi-samples)"),
('OPAQUE', "Opaque","Render surface without transparency"), ('OPAQUE', "Opaque","Render surface without transparency"),
) )
blend_method: EnumProperty(name="Blend Mode", items=BLEND_METHODS, default='BLEND', description="Blend Mode for Transparent Faces") blend_method: EnumProperty(
name="Blend Mode", items=BLEND_METHODS, default='BLEND',
description="Blend Mode for Transparent Faces", translation_context=i18n_contexts.id_material)
SHADOW_METHODS = ( SHADOW_METHODS = (
('CLIP', "Clip","Use the alpha threshold to clip the visibility (binary visibility)"), ('CLIP', "Clip","Use the alpha threshold to clip the visibility (binary visibility)"),
@ -739,7 +747,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
('OPAQUE',"Opaque","Material will cast shadows without transparency"), ('OPAQUE',"Opaque","Material will cast shadows without transparency"),
('NONE',"None","Material will cast no shadow"), ('NONE',"None","Material will cast no shadow"),
) )
shadow_method: EnumProperty(name="Shadow Mode", items=SHADOW_METHODS, default='CLIP', description="Shadow mapping method") shadow_method: EnumProperty(
name="Shadow Mode", items=SHADOW_METHODS, default='CLIP',
description="Shadow mapping method", translation_context=i18n_contexts.id_material)
use_backface_culling: BoolProperty( use_backface_culling: BoolProperty(
name="Backface Culling", default=False, name="Backface Culling", default=False,
@ -923,11 +933,11 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
if context.active_object and context.active_object.mode != 'OBJECT': if context.active_object and context.active_object.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.object.mode_set(mode='OBJECT')
self.import_images(context) ret_code = self.import_images(context)
context.preferences.edit.use_enter_edit_mode = editmode context.preferences.edit.use_enter_edit_mode = editmode
return {'FINISHED'} return ret_code
def import_images(self, context): def import_images(self, context):
@ -939,6 +949,10 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
find_sequences=self.image_sequence find_sequences=self.image_sequence
)) ))
if not images:
self.report({'WARNING'}, "Please select at least an image.")
return {'CANCELLED'}
# Create individual planes # Create individual planes
planes = [self.single_image_spec_to_plane(context, img_spec) for img_spec in images] planes = [self.single_image_spec_to_plane(context, img_spec) for img_spec in images]
@ -962,6 +976,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
# all done! # all done!
self.report({'INFO'}, tip_("Added {} Image Plane(s)").format(len(planes))) self.report({'INFO'}, tip_("Added {} Image Plane(s)").format(len(planes)))
return {'FINISHED'}
# operate on a single image # operate on a single image
def single_image_spec_to_plane(self, context, img_spec): def single_image_spec_to_plane(self, context, img_spec):
@ -1079,7 +1094,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
if self.shader in {'PRINCIPLED', 'SHADELESS'}: if self.shader in {'PRINCIPLED', 'SHADELESS'}:
node_tree.links.new(core_shader.inputs[0], tex_image.outputs['Color']) node_tree.links.new(core_shader.inputs[0], tex_image.outputs['Color'])
elif self.shader == 'EMISSION': elif self.shader == 'EMISSION':
node_tree.links.new(core_shader.inputs['Emission'], tex_image.outputs['Color']) node_tree.links.new(core_shader.inputs['Emission Color'], tex_image.outputs['Color'])
if self.use_transparency: if self.use_transparency:
if self.shader in {'PRINCIPLED', 'EMISSION'}: if self.shader in {'PRINCIPLED', 'EMISSION'}:

View File

@ -5,7 +5,7 @@
bl_info = { bl_info = {
"name": "UV Layout", "name": "UV Layout",
"author": "Campbell Barton, Matt Ebb", "author": "Campbell Barton, Matt Ebb",
"version": (1, 1, 6), "version": (1, 2, 0),
"blender": (3, 0, 0), "blender": (3, 0, 0),
"location": "UV Editor > UV > Export UV Layout", "location": "UV Editor > UV > Export UV Layout",
"description": "Export the UV layout as a 2D graphic", "description": "Export the UV layout as a 2D graphic",
@ -30,6 +30,8 @@ if "bpy" in locals():
import os import os
import bpy import bpy
from bpy.app.translations import contexts as i18n_contexts
from bpy.props import ( from bpy.props import (
StringProperty, StringProperty,
BoolProperty, BoolProperty,
@ -54,10 +56,24 @@ class ExportUVLayout(bpy.types.Operator):
description="Export all UVs in this mesh (not just visible ones)", description="Export all UVs in this mesh (not just visible ones)",
default=False, default=False,
) )
export_tiles: EnumProperty(
name="Export Tiles",
items=(
('NONE', "None",
"Export only UVs in the [0, 1] range"),
('UDIM', "UDIM",
"Export tiles in the UDIM numbering scheme: 1001 + u-tile + 10*v-tile"),
('UV', "UVTILE",
"Export tiles in the UVTILE numbering scheme: u(u-tile + 1)_v(v-tile + 1)"),
),
description="Choose whether to export only the [0, 1 range], or all UV tiles",
default='NONE',
)
modified: BoolProperty( modified: BoolProperty(
name="Modified", name="Modified",
description="Exports UVs from the modified mesh", description="Exports UVs from the modified mesh",
default=False, default=False,
translation_context=i18n_contexts.id_mesh,
) )
mode: EnumProperty( mode: EnumProperty(
items=( items=(
@ -73,6 +89,7 @@ class ExportUVLayout(bpy.types.Operator):
default='PNG', default='PNG',
) )
size: IntVectorProperty( size: IntVectorProperty(
name="Size",
size=2, size=2,
default=(1024, 1024), default=(1024, 1024),
min=8, max=32768, min=8, max=32768,
@ -123,9 +140,6 @@ class ExportUVLayout(bpy.types.Operator):
if is_editmode: if is_editmode:
bpy.ops.object.mode_set(mode='OBJECT', toggle=False) bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
filepath = self.filepath
filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower())
meshes = list(self.iter_meshes_to_export(context)) meshes = list(self.iter_meshes_to_export(context))
polygon_data = list(self.iter_polygon_data_to_draw(context, meshes)) polygon_data = list(self.iter_polygon_data_to_draw(context, meshes))
different_colors = set(color for _, color in polygon_data) different_colors = set(color for _, color in polygon_data)
@ -135,8 +149,35 @@ class ExportUVLayout(bpy.types.Operator):
obj_eval = obj.evaluated_get(depsgraph) obj_eval = obj.evaluated_get(depsgraph)
obj_eval.to_mesh_clear() obj_eval.to_mesh_clear()
tiles = self.tiles_to_export(polygon_data)
export = self.get_exporter() export = self.get_exporter()
export(filepath, polygon_data, different_colors, self.size[0], self.size[1], self.opacity) dirname, filename = os.path.split(self.filepath)
# Strip UDIM or UV numbering, and extension
import re
name_regex = r"^(.*?)"
udim_regex = r"(?:\.[0-9]{4})?"
uv_regex = r"(?:\.u[0-9]+_v[0-9]+)?"
ext_regex = r"(?:\.png|\.eps|\.svg)?$"
if self.export_tiles == 'NONE':
match = re.match(name_regex + ext_regex, filename)
elif self.export_tiles == 'UDIM':
match = re.match(name_regex + udim_regex + ext_regex, filename)
elif self.export_tiles == 'UV':
match = re.match(name_regex + uv_regex + ext_regex, filename)
if match:
filename = match.groups()[0]
for tile in sorted(tiles):
filepath = os.path.join(dirname, filename)
if self.export_tiles == 'UDIM':
filepath += f".{1001 + tile[0] + tile[1] * 10:04}"
elif self.export_tiles == 'UV':
filepath += f".u{tile[0] + 1}_v{tile[1] + 1}"
filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower())
export(filepath, tile, polygon_data, different_colors,
self.size[0], self.size[1], self.opacity)
if is_editmode: if is_editmode:
bpy.ops.object.mode_set(mode='EDIT', toggle=False) bpy.ops.object.mode_set(mode='EDIT', toggle=False)
@ -161,6 +202,30 @@ class ExportUVLayout(bpy.types.Operator):
continue continue
yield obj yield obj
def tiles_to_export(self, polygon_data):
"""Get a set of tiles containing UVs.
This assumes there is no UV edge crossing an otherwise empty tile.
"""
if self.export_tiles == 'NONE':
return {(0, 0)}
from math import floor
tiles = set()
for poly in polygon_data:
for uv in poly[0]:
# Ignore UVs at corners - precisely touching the right or upper edge
# of a tile should not load its right/upper neighbor as well.
# From intern/cycles/scene/attribute.cpp
u, v = uv[0], uv[1]
x, y = floor(u), floor(v)
if x > 0 and u < x + 1e-6:
x -= 1
if y > 0 and v < y + 1e-6:
y -= 1
if x >= 0 and y >= 0:
tiles.add((x, y))
return tiles
@staticmethod @staticmethod
def currently_image_image_editor(context): def currently_image_image_editor(context):
return isinstance(context.space_data, bpy.types.SpaceImageEditor) return isinstance(context.space_data, bpy.types.SpaceImageEditor)

View File

@ -5,19 +5,19 @@
import bpy import bpy
def export(filepath, face_data, colors, width, height, opacity): def export(filepath, tile, face_data, colors, width, height, opacity):
with open(filepath, 'w', encoding='utf-8') as file: with open(filepath, 'w', encoding='utf-8') as file:
for text in get_file_parts(face_data, colors, width, height, opacity): for text in get_file_parts(tile, face_data, colors, width, height, opacity):
file.write(text) file.write(text)
def get_file_parts(face_data, colors, width, height, opacity): def get_file_parts(tile, face_data, colors, width, height, opacity):
yield from header(width, height) yield from header(width, height)
if opacity > 0.0: if opacity > 0.0:
name_by_color = {} name_by_color = {}
yield from prepare_colors(colors, name_by_color) yield from prepare_colors(colors, name_by_color)
yield from draw_colored_polygons(face_data, name_by_color, width, height) yield from draw_colored_polygons(tile, face_data, name_by_color, width, height)
yield from draw_lines(face_data, width, height) yield from draw_lines(tile, face_data, width, height)
yield from footer() yield from footer()
@ -53,24 +53,24 @@ def prepare_colors(colors, out_name_by_color):
yield "} def\n" yield "} def\n"
def draw_colored_polygons(face_data, name_by_color, width, height): def draw_colored_polygons(tile, face_data, name_by_color, width, height):
for uvs, color in face_data: for uvs, color in face_data:
yield from draw_polygon_path(uvs, width, height) yield from draw_polygon_path(tile, uvs, width, height)
yield "closepath\n" yield "closepath\n"
yield "%s\n" % name_by_color[color] yield "%s\n" % name_by_color[color]
def draw_lines(face_data, width, height): def draw_lines(tile, face_data, width, height):
for uvs, _ in face_data: for uvs, _ in face_data:
yield from draw_polygon_path(uvs, width, height) yield from draw_polygon_path(tile, uvs, width, height)
yield "closepath\n" yield "closepath\n"
yield "stroke\n" yield "stroke\n"
def draw_polygon_path(uvs, width, height): def draw_polygon_path(tile, uvs, width, height):
yield "newpath\n" yield "newpath\n"
for j, uv in enumerate(uvs): for j, uv in enumerate(uvs):
uv_scale = (uv[0] * width, uv[1] * height) uv_scale = ((uv[0] - tile[0]) * width, (uv[1] - tile[1]) * height)
if j == 0: if j == 0:
yield "%.5f %.5f moveto\n" % uv_scale yield "%.5f %.5f moveto\n" % uv_scale
else: else:

View File

@ -15,14 +15,14 @@ except ImportError:
oiio = None oiio = None
def export(filepath, face_data, colors, width, height, opacity): def export(filepath, tile, face_data, colors, width, height, opacity):
offscreen = gpu.types.GPUOffScreen(width, height) offscreen = gpu.types.GPUOffScreen(width, height)
offscreen.bind() offscreen.bind()
try: try:
fb = gpu.state.active_framebuffer_get() fb = gpu.state.active_framebuffer_get()
fb.clear(color=(0.0, 0.0, 0.0, 0.0)) fb.clear(color=(0.0, 0.0, 0.0, 0.0))
draw_image(face_data, opacity) draw_image(tile, face_data, opacity)
pixel_data = fb.read_color(0, 0, width, height, 4, 0, 'UBYTE') pixel_data = fb.read_color(0, 0, width, height, 4, 0, 'UBYTE')
pixel_data.dimensions = width * height * 4 pixel_data.dimensions = width * height * 4
@ -32,11 +32,11 @@ def export(filepath, face_data, colors, width, height, opacity):
offscreen.free() offscreen.free()
def draw_image(face_data, opacity): def draw_image(tile, face_data, opacity):
gpu.state.blend_set('ALPHA') gpu.state.blend_set('ALPHA')
with gpu.matrix.push_pop(): with gpu.matrix.push_pop():
gpu.matrix.load_matrix(get_normalize_uvs_matrix()) gpu.matrix.load_matrix(get_normalize_uvs_matrix(tile))
gpu.matrix.load_projection_matrix(Matrix.Identity(4)) gpu.matrix.load_projection_matrix(Matrix.Identity(4))
draw_background_colors(face_data, opacity) draw_background_colors(face_data, opacity)
@ -45,11 +45,11 @@ def draw_image(face_data, opacity):
gpu.state.blend_set('NONE') gpu.state.blend_set('NONE')
def get_normalize_uvs_matrix(): def get_normalize_uvs_matrix(tile):
'''matrix maps x and y coordinates from [0, 1] to [-1, 1]''' '''matrix maps x and y coordinates from [0, 1] to [-1, 1]'''
matrix = Matrix.Identity(4) matrix = Matrix.Identity(4)
matrix.col[3][0] = -1 matrix.col[3][0] = -1 - (tile[0]) * 2
matrix.col[3][1] = -1 matrix.col[3][1] = -1 - (tile[1]) * 2
matrix[0][0] = 2 matrix[0][0] = 2
matrix[1][1] = 2 matrix[1][1] = 2

View File

@ -7,15 +7,15 @@ from os.path import basename
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
def export(filepath, face_data, colors, width, height, opacity): def export(filepath, tile, face_data, colors, width, height, opacity):
with open(filepath, 'w', encoding='utf-8') as file: with open(filepath, 'w', encoding='utf-8') as file:
for text in get_file_parts(face_data, colors, width, height, opacity): for text in get_file_parts(tile, face_data, colors, width, height, opacity):
file.write(text) file.write(text)
def get_file_parts(face_data, colors, width, height, opacity): def get_file_parts(tile, face_data, colors, width, height, opacity):
yield from header(width, height) yield from header(width, height)
yield from draw_polygons(face_data, width, height, opacity) yield from draw_polygons(tile, face_data, width, height, opacity)
yield from footer() yield from footer()
@ -29,7 +29,7 @@ def header(width, height):
yield f'<desc>{escape(desc)}</desc>\n' yield f'<desc>{escape(desc)}</desc>\n'
def draw_polygons(face_data, width, height, opacity): def draw_polygons(tile, face_data, width, height, opacity):
for uvs, color in face_data: for uvs, color in face_data:
fill = f'fill="{get_color_string(color)}"' fill = f'fill="{get_color_string(color)}"'
@ -39,7 +39,7 @@ def draw_polygons(face_data, width, height, opacity):
yield ' points="' yield ' points="'
for uv in uvs: for uv in uvs:
x, y = uv[0], 1.0 - uv[1] x, y = uv[0] - tile[0], 1.0 - uv[1] + tile[1]
yield f'{x*width:.3f},{y*height:.3f} ' yield f'{x*width:.3f},{y*height:.3f} '
yield '" />\n' yield '" />\n'

View File

@ -1599,17 +1599,6 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
mesh_version.add_variable("mesh", _3ds_uint(3)) mesh_version.add_variable("mesh", _3ds_uint(3))
object_info.add_subchunk(mesh_version) object_info.add_subchunk(mesh_version)
# Add MASTERSCALE element
mscale = _3ds_chunk(MASTERSCALE)
mscale.add_variable("scale", _3ds_float(1.0))
object_info.add_subchunk(mscale)
# Add 3D cursor location
if use_cursor:
cursor_chunk = _3ds_chunk(O_CONSTS)
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
object_info.add_subchunk(cursor_chunk)
# Init main keyframe data chunk # Init main keyframe data chunk
if use_keyframes: if use_keyframes:
revision = 0x0005 revision = 0x0005
@ -1618,92 +1607,6 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
curtime = scene.frame_current curtime = scene.frame_current
kfdata = make_kfdata(revision, start, stop, curtime) kfdata = make_kfdata(revision, start, stop, curtime)
# Add AMBIENT color
if world is not None and 'WORLD' in object_filter:
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
ambient_light = _3ds_chunk(RGB)
ambient_light.add_variable("ambient", _3ds_float_color(world.color))
ambient_chunk.add_subchunk(ambient_light)
object_info.add_subchunk(ambient_chunk)
# Add BACKGROUND and BITMAP
if world.use_nodes:
bgtype = 'BACKGROUND'
ntree = world.node_tree.links
background_color_chunk = _3ds_chunk(RGB)
background_chunk = _3ds_chunk(SOLIDBACKGND)
background_flag = _3ds_chunk(USE_SOLIDBGND)
bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB'
bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD'
bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT'
bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color)
bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype)
bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False)
gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False)
background_color_chunk.add_variable("color", _3ds_float_color(bg_color))
background_chunk.add_subchunk(background_color_chunk)
if bg_image:
background_image = _3ds_chunk(BITMAP)
background_flag = _3ds_chunk(USE_BITMAP)
background_image.add_variable("image", _3ds_string(sane_name(bg_image.name)))
object_info.add_subchunk(background_image)
object_info.add_subchunk(background_chunk)
# Add VGRADIENT chunk
if gradient and len(gradient) >= 3:
gradient_chunk = _3ds_chunk(VGRADIENT)
background_flag = _3ds_chunk(USE_VGRADIENT)
gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position))
gradient_topcolor_chunk = _3ds_chunk(RGB)
gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3]))
gradient_chunk.add_subchunk(gradient_topcolor_chunk)
gradient_midcolor_chunk = _3ds_chunk(RGB)
gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3]))
gradient_chunk.add_subchunk(gradient_midcolor_chunk)
gradient_lowcolor_chunk = _3ds_chunk(RGB)
gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3]))
gradient_chunk.add_subchunk(gradient_lowcolor_chunk)
object_info.add_subchunk(gradient_chunk)
object_info.add_subchunk(background_flag)
# Add FOG
fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False)
if fognode:
fog_chunk = _3ds_chunk(FOG)
fog_color_chunk = _3ds_chunk(RGB)
use_fog_flag = _3ds_chunk(USE_FOG)
fog_density = fognode.inputs['Density'].default_value * 100
fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3]))
fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start))
fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5))
fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth))
fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5))
fog_chunk.add_subchunk(fog_color_chunk)
object_info.add_subchunk(fog_chunk)
# Add LAYER FOG
foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False)
if foglayer:
layerfog_flag = 0
if world.mist_settings.falloff == 'QUADRATIC':
layerfog_flag |= 0x1
if world.mist_settings.falloff == 'INVERSE_QUADRATIC':
layerfog_flag |= 0x2
layerfog_chunk = _3ds_chunk(LAYER_FOG)
layerfog_color_chunk = _3ds_chunk(RGB)
use_fog_flag = _3ds_chunk(USE_LAYER_FOG)
layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3]))
layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start))
layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height))
layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value))
layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag))
layerfog_chunk.add_subchunk(layerfog_color_chunk)
object_info.add_subchunk(layerfog_chunk)
if fognode or foglayer and layer.use_pass_mist:
object_info.add_subchunk(use_fog_flag)
if use_keyframes and world.animation_data or world.node_tree.animation_data:
kfdata.add_subchunk(make_ambient_node(world))
# Make a list of all materials used in the selected meshes (use dictionary, each material is added once) # Make a list of all materials used in the selected meshes (use dictionary, each material is added once)
materialDict = {} materialDict = {}
mesh_objects = [] mesh_objects = []
@ -1772,10 +1675,107 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
f.material_index = 0 f.material_index = 0
# Make material chunks for all materials used in the meshes # Make MATERIAL chunks for all materials used in the meshes
for ma_image in materialDict.values(): for ma_image in materialDict.values():
object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1])) object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1]))
# Add MASTERSCALE element
mscale = _3ds_chunk(MASTERSCALE)
mscale.add_variable("scale", _3ds_float(1.0))
object_info.add_subchunk(mscale)
# Add 3D cursor location
if use_cursor:
cursor_chunk = _3ds_chunk(O_CONSTS)
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
object_info.add_subchunk(cursor_chunk)
# Add AMBIENT color
if world is not None and 'WORLD' in object_filter:
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
ambient_light = _3ds_chunk(RGB)
ambient_light.add_variable("ambient", _3ds_float_color(world.color))
ambient_chunk.add_subchunk(ambient_light)
object_info.add_subchunk(ambient_chunk)
# Add BACKGROUND and BITMAP
if world.use_nodes:
bgtype = 'BACKGROUND'
ntree = world.node_tree.links
background_color_chunk = _3ds_chunk(RGB)
background_chunk = _3ds_chunk(SOLIDBACKGND)
background_flag = _3ds_chunk(USE_SOLIDBGND)
bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB'
bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD'
bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT'
bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color)
bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype)
bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False)
gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False)
background_color_chunk.add_variable("color", _3ds_float_color(bg_color))
background_chunk.add_subchunk(background_color_chunk)
if bg_image and bg_image is not None:
background_image = _3ds_chunk(BITMAP)
background_flag = _3ds_chunk(USE_BITMAP)
background_image.add_variable("image", _3ds_string(sane_name(bg_image.name)))
object_info.add_subchunk(background_image)
object_info.add_subchunk(background_chunk)
# Add VGRADIENT chunk
if gradient and len(gradient) >= 3:
gradient_chunk = _3ds_chunk(VGRADIENT)
background_flag = _3ds_chunk(USE_VGRADIENT)
gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position))
gradient_topcolor_chunk = _3ds_chunk(RGB)
gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3]))
gradient_chunk.add_subchunk(gradient_topcolor_chunk)
gradient_midcolor_chunk = _3ds_chunk(RGB)
gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3]))
gradient_chunk.add_subchunk(gradient_midcolor_chunk)
gradient_lowcolor_chunk = _3ds_chunk(RGB)
gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3]))
gradient_chunk.add_subchunk(gradient_lowcolor_chunk)
object_info.add_subchunk(gradient_chunk)
object_info.add_subchunk(background_flag)
# Add FOG
fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False)
if fognode:
fog_chunk = _3ds_chunk(FOG)
fog_color_chunk = _3ds_chunk(RGB)
use_fog_flag = _3ds_chunk(USE_FOG)
fog_density = fognode.inputs['Density'].default_value * 100
fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3]))
fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start))
fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5))
fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth))
fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5))
fog_chunk.add_subchunk(fog_color_chunk)
object_info.add_subchunk(fog_chunk)
# Add LAYER FOG
foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False)
if foglayer:
layerfog_flag = 0
if world.mist_settings.falloff == 'QUADRATIC':
layerfog_flag |= 0x1
if world.mist_settings.falloff == 'INVERSE_QUADRATIC':
layerfog_flag |= 0x2
layerfog_chunk = _3ds_chunk(LAYER_FOG)
layerfog_color_chunk = _3ds_chunk(RGB)
use_fog_flag = _3ds_chunk(USE_LAYER_FOG)
layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3]))
layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start))
layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height))
layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value))
layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag))
layerfog_chunk.add_subchunk(layerfog_color_chunk)
object_info.add_subchunk(layerfog_chunk)
if fognode or foglayer and layer.use_pass_mist:
object_info.add_subchunk(use_fog_flag)
if use_keyframes and world.animation_data or (world.node_tree and world.node_tree.animation_data):
kfdata.add_subchunk(make_ambient_node(world))
# Collect translation for transformation matrix # Collect translation for transformation matrix
translation = {} translation = {}
rotation = {} rotation = {}
@ -1929,9 +1929,8 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY) obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name])) obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
if ob.parent is None or (ob.parent.name not in object_id): if ob.parent is not None and (ob.parent.name in object_id):
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
else: # Get the parent ID from the object_id dict
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name])) obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk) obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
object_chunk.add_subchunk(obj_hierarchy_chunk) object_chunk.add_subchunk(obj_hierarchy_chunk)
@ -1967,9 +1966,8 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY) obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name])) obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
if ob.parent is None or (ob.parent.name not in object_id): if ob.parent is not None and (ob.parent.name in object_id):
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
else: # Get the parent ID from the object_id dict
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name])) obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk) obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
object_chunk.add_subchunk(obj_hierarchy_chunk) object_chunk.add_subchunk(obj_hierarchy_chunk)

View File

@ -244,7 +244,7 @@ def skip_to_end(file, skip_chunk):
# MATERIALS # # MATERIALS #
############# #############
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tintcolor, mapto): def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tint1, tint2, mapto):
shader = contextWrapper.node_principled_bsdf shader = contextWrapper.node_principled_bsdf
nodetree = contextWrapper.material.node_tree nodetree = contextWrapper.material.node_tree
shader.location = (-300, 0) shader.location = (-300, 0)
@ -256,26 +256,31 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
mixer.label = "Mixer" mixer.label = "Mixer"
mixer.inputs[0].default_value = pct / 100 mixer.inputs[0].default_value = pct / 100
mixer.inputs[1].default_value = ( mixer.inputs[1].default_value = (
tintcolor[:3] + [1] if tintcolor else tint1[:3] + [1] if tint1 else shader.inputs['Base Color'].default_value[:])
shader.inputs['Base Color'].default_value[:]
)
contextWrapper._grid_to_location(1, 2, dst_node=mixer, ref_node=shader) contextWrapper._grid_to_location(1, 2, dst_node=mixer, ref_node=shader)
img_wrap = contextWrapper.base_color_texture img_wrap = contextWrapper.base_color_texture
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
links.new(mixer.outputs['Color'], shader.inputs['Base Color']) links.new(mixer.outputs['Color'], shader.inputs['Base Color'])
elif mapto == 'SPECULARITY': if tint2 is not None:
img_wrap = contextWrapper.specular_tint_texture img_wrap.colorspace_name = 'Non-Color'
elif mapto == 'ALPHA': mixer.inputs[2].default_value = tint2[:3] + [1]
shader.location = (0, -300) links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[0])
img_wrap = contextWrapper.alpha_texture else:
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
elif mapto == 'ROUGHNESS':
img_wrap = contextWrapper.roughness_texture
elif mapto == 'METALLIC': elif mapto == 'METALLIC':
shader.location = (300,300) shader.location = (300,300)
img_wrap = contextWrapper.metallic_texture img_wrap = contextWrapper.metallic_texture
elif mapto == 'ROUGHNESS': elif mapto == 'SPECULARITY':
shader.location = (300,0) shader.location = (300,0)
img_wrap = contextWrapper.roughness_texture img_wrap = contextWrapper.specular_tint_texture
elif mapto == 'ALPHA':
shader.location = (-300,0)
img_wrap = contextWrapper.alpha_texture
img_wrap.use_alpha = False
links.new(img_wrap.node_image.outputs['Color'], img_wrap.socket_dst)
elif mapto == 'EMISSION': elif mapto == 'EMISSION':
shader.location = (-300, -600) shader.location = (0,-900)
img_wrap = contextWrapper.emission_color_texture img_wrap = contextWrapper.emission_color_texture
elif mapto == 'NORMAL': elif mapto == 'NORMAL':
shader.location = (300, 300) shader.location = (300, 300)
@ -310,10 +315,12 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
img_wrap.extension = 'CLIP' img_wrap.extension = 'CLIP'
if alpha == 'alpha': if alpha == 'alpha':
own_node = img_wrap.node_image
contextWrapper.material.blend_method = 'HASHED'
links.new(own_node.outputs['Alpha'], img_wrap.socket_dst)
for link in links: for link in links:
if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB': if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB':
tex = link.from_node.image.name tex = link.from_node.image.name
own_node = img_wrap.node_image
own_map = img_wrap.node_mapping own_map = img_wrap.node_mapping
if tex == image.name: if tex == image.name:
links.new(link.from_node.outputs['Alpha'], img_wrap.socket_dst) links.new(link.from_node.outputs['Alpha'], img_wrap.socket_dst)
@ -323,9 +330,6 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
if imgs.name[-3:].isdigit(): if imgs.name[-3:].isdigit():
if not imgs.users: if not imgs.users:
bpy.data.images.remove(imgs) bpy.data.images.remove(imgs)
else:
links.new(img_wrap.node_image.outputs['Alpha'], img_wrap.socket_dst)
contextWrapper.material.blend_method = 'HASHED'
shader.location = (300, 300) shader.location = (300, 300)
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader) contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
@ -518,7 +522,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
def read_texture(new_chunk, temp_chunk, name, mapto): def read_texture(new_chunk, temp_chunk, name, mapto):
uscale, vscale, uoffset, voffset, angle = 1.0, 1.0, 0.0, 0.0, 0.0 uscale, vscale, uoffset, voffset, angle = 1.0, 1.0, 0.0, 0.0, 0.0
contextWrapper.use_nodes = True contextWrapper.use_nodes = True
tintcolor = None tint1 = tint2 = None
extend = 'wrap' extend = 'wrap'
alpha = False alpha = False
pct = 70 pct = 70
@ -542,14 +546,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
img = load_image(texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True) img = load_image(texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True)
temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
elif temp_chunk.ID == MAT_MAP_USCALE: elif temp_chunk.ID == MAT_BUMP_PERCENT:
uscale = read_float(temp_chunk) contextWrapper.normalmap_strength = (float(read_short(temp_chunk) / 100))
elif temp_chunk.ID == MAT_MAP_VSCALE:
vscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_UOFFSET:
uoffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VOFFSET:
voffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_TILING: elif temp_chunk.ID == MAT_MAP_TILING:
"""Control bit flags, where 0x1 activates decaling, 0x2 activates mirror, """Control bit flags, where 0x1 activates decaling, 0x2 activates mirror,
@ -578,11 +576,20 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
if tiling & 0x200: if tiling & 0x200:
tint = 'RGBtint' tint = 'RGBtint'
elif temp_chunk.ID == MAT_MAP_USCALE:
uscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VSCALE:
vscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_UOFFSET:
uoffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VOFFSET:
voffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_ANG: elif temp_chunk.ID == MAT_MAP_ANG:
angle = read_float(temp_chunk) angle = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_COL1: elif temp_chunk.ID == MAT_MAP_COL1:
tintcolor = read_byte_color(temp_chunk) tint1 = read_byte_color(temp_chunk)
elif temp_chunk.ID == MAT_MAP_COL2:
tint2 = read_byte_color(temp_chunk)
skip_to_end(file, temp_chunk) skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read new_chunk.bytes_read += temp_chunk.bytes_read
@ -590,7 +597,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# add the map to the material in the right channel # add the map to the material in the right channel
if img: if img:
add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1), add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1),
(uoffset, voffset, 0), angle, tintcolor, mapto) (uoffset, voffset, 0), angle, tint1, tint2, mapto)
def apply_constrain(vec): def apply_constrain(vec):
convector = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1)) convector = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1))
@ -641,8 +648,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_data = file.read(SZ_U_INT * 2) temp_data = file.read(SZ_U_INT * 2)
track_chunk.bytes_read += SZ_U_INT * 2 track_chunk.bytes_read += SZ_U_INT * 2
nkeys = read_long(track_chunk) nkeys = read_long(track_chunk)
if nkeys == 0:
keyframe_data[0] = default_data
for i in range(nkeys): for i in range(nkeys):
nframe = read_long(track_chunk) nframe = read_long(track_chunk)
nflags = read_short(track_chunk) nflags = read_short(track_chunk)
@ -657,8 +662,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_data = file.read(SZ_U_SHORT * 5) temp_data = file.read(SZ_U_SHORT * 5)
track_chunk.bytes_read += SZ_U_SHORT * 5 track_chunk.bytes_read += SZ_U_SHORT * 5
nkeys = read_long(track_chunk) nkeys = read_long(track_chunk)
if nkeys == 0:
keyframe_angle[0] = default_value
for i in range(nkeys): for i in range(nkeys):
nframe = read_long(track_chunk) nframe = read_long(track_chunk)
nflags = read_short(track_chunk) nflags = read_short(track_chunk)
@ -815,7 +818,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
if contextWorld is None: if contextWorld is None:
path, filename = os.path.split(file.name) path, filename = os.path.split(file.name)
realname, ext = os.path.splitext(filename) realname, ext = os.path.splitext(filename)
newWorld = bpy.data.worlds.new("Fog: " + realname) contextWorld = bpy.data.worlds.new("Fog: " + realname)
context.scene.world = contextWorld context.scene.world = contextWorld
contextWorld.use_nodes = True contextWorld.use_nodes = True
links = contextWorld.node_tree.links links = contextWorld.node_tree.links
@ -1332,7 +1335,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'AMBIENT': # Ambient elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'AMBIENT': # Ambient
keyframe_data = {} keyframe_data = {}
default_data = child.color[:] keyframe_data[0] = child.color[:]
child.color = read_track_data(new_chunk)[0] child.color = read_track_data(new_chunk)[0]
ambinode.inputs[0].default_value[:3] = child.color ambinode.inputs[0].default_value[:3] = child.color
ambilite.outputs[0].default_value[:3] = child.color ambilite.outputs[0].default_value[:3] = child.color
@ -1346,7 +1349,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'LIGHT': # Color elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'LIGHT': # Color
keyframe_data = {} keyframe_data = {}
default_data = child.data.color[:] keyframe_data[0] = child.data.color[:]
child.data.color = read_track_data(new_chunk)[0] child.data.color = read_track_data(new_chunk)[0]
for keydata in keyframe_data.items(): for keydata in keyframe_data.items():
child.data.color = keydata[1] child.data.color = keydata[1]
@ -1355,7 +1358,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'OBJECT': # Translation elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'OBJECT': # Translation
keyframe_data = {} keyframe_data = {}
default_data = child.location[:] keyframe_data[0] = child.location[:]
child.location = read_track_data(new_chunk)[0] child.location = read_track_data(new_chunk)[0]
if child.type in {'LIGHT', 'CAMERA'}: if child.type in {'LIGHT', 'CAMERA'}:
trackposition[0] = child.location trackposition[0] = child.location
@ -1384,6 +1387,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'TARGET': # Target position elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'TARGET': # Target position
keyframe_data = {} keyframe_data = {}
location = child.location location = child.location
keyframe_data[0] = trackposition[0]
target = mathutils.Vector(read_track_data(new_chunk)[0]) target = mathutils.Vector(read_track_data(new_chunk)[0])
direction = calc_target(location, target) direction = calc_target(location, target)
child.rotation_euler.x = direction[0] child.rotation_euler.x = direction[0]
@ -1407,12 +1411,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracktype == 'OBJECT': # Rotation elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracktype == 'OBJECT': # Rotation
keyframe_rotation = {} keyframe_rotation = {}
keyframe_rotation[0] = child.rotation_axis_angle[:]
tflags = read_short(new_chunk) tflags = read_short(new_chunk)
temp_data = file.read(SZ_U_INT * 2) temp_data = file.read(SZ_U_INT * 2)
new_chunk.bytes_read += SZ_U_INT * 2 new_chunk.bytes_read += SZ_U_INT * 2
nkeys = read_long(new_chunk) nkeys = read_long(new_chunk)
if nkeys == 0:
keyframe_rotation[0] = child.rotation_axis_angle[:]
if tflags & 0x8: # Flag 0x8 locks X axis if tflags & 0x8: # Flag 0x8 locks X axis
child.lock_rotation[0] = True child.lock_rotation[0] = True
if tflags & 0x10: # Flag 0x10 locks Y axis if tflags & 0x10: # Flag 0x10 locks Y axis
@ -1445,7 +1448,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracktype == 'OBJECT': # Scale elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracktype == 'OBJECT': # Scale
keyframe_data = {} keyframe_data = {}
default_data = child.scale[:] keyframe_data[0] = child.scale[:]
child.scale = read_track_data(new_chunk)[0] child.scale = read_track_data(new_chunk)[0]
if contextTrack_flag & 0x8: # Flag 0x8 locks X axis if contextTrack_flag & 0x8: # Flag 0x8 locks X axis
child.lock_scale[0] = True child.lock_scale[0] = True
@ -1465,7 +1468,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracktype == 'OBJECT': # Roll angle elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracktype == 'OBJECT': # Roll angle
keyframe_angle = {} keyframe_angle = {}
default_value = child.rotation_euler.y keyframe_angle[0] = child.rotation_euler.y
child.rotation_euler.y = read_track_angle(new_chunk)[0] child.rotation_euler.y = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items(): for keydata in keyframe_angle.items():
child.rotation_euler.y = keydata[1] child.rotation_euler.y = keydata[1]
@ -1475,7 +1478,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and tracking == 'CAMERA': # Field of view elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and tracking == 'CAMERA': # Field of view
keyframe_angle = {} keyframe_angle = {}
default_value = child.data.angle keyframe_angle[0] = child.data.angle
child.data.angle = read_track_angle(new_chunk)[0] child.data.angle = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items(): for keydata in keyframe_angle.items():
child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2) child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2)
@ -1484,7 +1487,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Hotspot elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Hotspot
keyframe_angle = {} keyframe_angle = {}
cone_angle = math.degrees(child.data.spot_size) cone_angle = math.degrees(child.data.spot_size)
default_value = cone_angle-(child.data.spot_blend * math.floor(cone_angle)) keyframe_angle[0] = cone_angle-(child.data.spot_blend * math.floor(cone_angle))
hot_spot = math.degrees(read_track_angle(new_chunk)[0]) hot_spot = math.degrees(read_track_angle(new_chunk)[0])
child.data.spot_blend = 1.0 - (hot_spot / cone_angle) child.data.spot_blend = 1.0 - (hot_spot / cone_angle)
for keydata in keyframe_angle.items(): for keydata in keyframe_angle.items():
@ -1493,7 +1496,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Falloff elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Falloff
keyframe_angle = {} keyframe_angle = {}
default_value = math.degrees(child.data.spot_size) keyframe_angle[0] = math.degrees(child.data.spot_size)
child.data.spot_size = read_track_angle(new_chunk)[0] child.data.spot_size = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items(): for keydata in keyframe_angle.items():
child.data.spot_size = keydata[1] child.data.spot_size = keydata[1]

View File

@ -5,7 +5,7 @@
bl_info = { bl_info = {
"name": "FBX format", "name": "FBX format",
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem", "author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
"version": (5, 8, 3), "version": (5, 8, 8),
"blender": (3, 6, 0), "blender": (3, 6, 0),
"location": "File > Import-Export", "location": "File > Import-Export",
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions", "description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",

View File

@ -250,8 +250,7 @@ class FBXElem:
for elem in self.elems: for elem in self.elems:
offset = elem._calc_offsets(offset, (elem is elem_last)) offset = elem._calc_offsets(offset, (elem is elem_last))
offset += _BLOCK_SENTINEL_LENGTH offset += _BLOCK_SENTINEL_LENGTH
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL: elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
if not is_last:
offset += _BLOCK_SENTINEL_LENGTH offset += _BLOCK_SENTINEL_LENGTH
return offset return offset
@ -282,8 +281,7 @@ class FBXElem:
assert(elem.id != b'') assert(elem.id != b'')
elem._write(write, tell, (elem is elem_last)) elem._write(write, tell, (elem is elem_last))
write(_BLOCK_SENTINEL_DATA) write(_BLOCK_SENTINEL_DATA)
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL: elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
if not is_last:
write(_BLOCK_SENTINEL_DATA) write(_BLOCK_SENTINEL_DATA)

View File

@ -1807,17 +1807,15 @@ def fbx_data_armature_elements(root, arm_obj, scene_data):
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION) elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is... elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
# Pre-process vertex weights (also to check vertices assigned to more than four bones). # Pre-process vertex weights so that the vertices only need to be iterated once.
ob = ob_obj.bdata ob = ob_obj.bdata
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups} for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
valid_idxs = set(bo_vg_idx.values()) valid_idxs = set(bo_vg_idx.values())
vgroups = {vg.index: {} for vg in ob.vertex_groups} vgroups = {vg.index: {} for vg in ob.vertex_groups}
verts_vgroups = (sorted(((vg.group, vg.weight) for vg in v.groups if vg.weight and vg.group in valid_idxs), for idx, v in enumerate(me.vertices):
key=lambda e: e[1], reverse=True) for vg in v.groups:
for v in me.vertices) if (w := vg.weight) and (vg_idx := vg.group) in valid_idxs:
for idx, vgs in enumerate(verts_vgroups):
for vg_idx, w in vgs:
vgroups[vg_idx][idx] = w vgroups[vg_idx][idx] = w
for bo_obj, clstr_key in clusters.items(): for bo_obj, clstr_key in clusters.items():

View File

@ -1318,42 +1318,154 @@ class AnimationCurveNodeWrapper:
min_reldiff_fac = fac * 1.0e-3 # min relative value evolution: 0.1% of current 'order of magnitude'. min_reldiff_fac = fac * 1.0e-3 # min relative value evolution: 0.1% of current 'order of magnitude'.
min_absdiff_fac = 0.1 # A tenth of reldiff... min_absdiff_fac = 0.1 # A tenth of reldiff...
are_keyed = [] # Initialise to no values enabled for writing.
for values, frame_write_mask in zip(self._frame_values_array, self._frame_write_mask_array): self._frame_write_mask_array[:] = False
# Initialise to no frames written.
frame_write_mask[:] = False
# Create views of the 'previous' and 'current' mask and values. The memoryview, .data, of each array is used # Values are enabled for writing if they differ enough from either of their adjacent values or if they differ
# for its iteration and indexing performance compared to the array. # enough from the closest previous value that is enabled due to either of these conditions.
key = values[1:].data for sampled_values, enabled_mask in zip(self._frame_values_array, self._frame_write_mask_array):
p_key = values[:-1].data # Create overlapping views of the 'previous' (all but the last) and 'current' (all but the first)
key_write = frame_write_mask[1:].data # `sampled_values` and `enabled_mask`.
p_key_write = frame_write_mask[:-1].data # Calculate absolute values from `sampled_values` so that the 'previous' and 'current' absolute arrays can
# be views into the same array instead of separately calculated arrays.
abs_sampled_values = np.abs(sampled_values)
# 'previous' views.
p_val_view = sampled_values[:-1]
p_abs_val_view = abs_sampled_values[:-1]
p_enabled_mask_view = enabled_mask[:-1]
# 'current' views.
c_val_view = sampled_values[1:]
c_abs_val_view = abs_sampled_values[1:]
c_enabled_mask_view = enabled_mask[1:]
p_keyedval = values[0] # If enough difference from previous sampled value, enable the current value *and* the previous one!
is_keyed = False # The difference check is symmetrical, so this will compare each value to both of its adjacent values.
for idx, (val, p_val) in enumerate(zip(key, p_key)): # Unless it is forcefully enabled later, this is the only way that the first value can be enabled.
if val == p_val: # This is a contracted form of relative + absolute-near-zero difference:
# Never write keyframe when value is exactly the same as prev one! # def is_different(a, b):
continue # abs_diff = abs(a - b)
# This is contracted form of relative + absolute-near-zero difference: # if abs_diff < min_reldiff_fac * min_absdiff_fac:
# absdiff = abs(a - b)
# if absdiff < min_reldiff_fac * min_absdiff_fac:
# return False # return False
# return (absdiff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac # return (abs_diff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac
# Note that we ignore the '/ 2' part here, since it's not much significant for us. # Note that we ignore the '/ 2' part here, since it's not much significant for us.
if abs(val - p_val) > (min_reldiff_fac * max(abs(val) + abs(p_val), min_absdiff_fac)): # Contracted form using only builtin Python functions:
# If enough difference from previous sampled value, key this value *and* the previous one! # return abs(a - b) > (min_reldiff_fac * max(abs(a) + abs(b), min_absdiff_fac))
key_write[idx] = True abs_diff = np.abs(c_val_view - p_val_view)
p_key_write[idx] = True different_if_greater_than = min_reldiff_fac * np.maximum(c_abs_val_view + p_abs_val_view, min_absdiff_fac)
p_keyedval = val enough_diff_p_val_mask = abs_diff > different_if_greater_than
is_keyed = True # Enable both the current values *and* the previous values where `enough_diff_p_val_mask` is True. Some
elif abs(val - p_keyedval) > (min_reldiff_fac * max((abs(val) + abs(p_keyedval)), min_absdiff_fac)): # values may get set to True twice because the views overlap, but this is not a problem.
# Else, if enough difference from previous keyed value, key this value only! p_enabled_mask_view[enough_diff_p_val_mask] = True
key_write[idx] = True c_enabled_mask_view[enough_diff_p_val_mask] = True
p_keyedval = val
is_keyed = True # Else, if enough difference from previous enabled value, enable the current value only!
are_keyed.append(is_keyed) # For each 'current' value, get the index of the nearest previous enabled value in `sampled_values` (or
# itself if the value is enabled).
# Start with an array that is the index of the 'current' value in `sampled_values`. The 'current' values are
# all but the first value, so the indices will be from 1 to `len(sampled_values)` exclusive.
# Let len(sampled_values) == 9:
# [1, 2, 3, 4, 5, 6, 7, 8]
p_enabled_idx_in_sampled_values = np.arange(1, len(sampled_values))
# Replace the indices of all disabled values with 0 in preparation of filling them in with the index of the
# nearest previous enabled value. We choose to replace with 0 so that if there is no nearest previous
# enabled value, we instead default to `sampled_values[0]`.
c_val_disabled_mask = ~c_enabled_mask_view
# Let `c_val_disabled_mask` be:
# [F, F, T, F, F, T, T, T]
# Set indices to 0 where `c_val_disabled_mask` is True:
# [1, 2, 3, 4, 5, 6, 7, 8]
# v v v v
# [1, 2, 0, 4, 5, 0, 0, 0]
p_enabled_idx_in_sampled_values[c_val_disabled_mask] = 0
# Accumulative maximum travels across the array from left to right, filling in the zeroed indices with the
# maximum value so far, which will be the closest previous enabled index because the non-zero indices are
# strictly increasing.
# [1, 2, 0, 4, 5, 0, 0, 0]
# v v v v
# [1, 2, 2, 4, 5, 5, 5, 5]
p_enabled_idx_in_sampled_values = np.maximum.accumulate(p_enabled_idx_in_sampled_values)
# Only disabled values need to be checked against their nearest previous enabled values.
# We can additionally ignore all values which equal their immediately previous value because those values
# will never be enabled if they were not enabled by the earlier difference check against immediately
# previous values.
p_enabled_diff_to_check_mask = np.logical_and(c_val_disabled_mask, p_val_view != c_val_view)
# Convert from a mask to indices because we need the indices later and because the array of indices will
# usually be smaller than the mask array making it faster to index other arrays with.
p_enabled_diff_to_check_idx = np.flatnonzero(p_enabled_diff_to_check_mask)
# `p_enabled_idx_in_sampled_values` from earlier:
# [1, 2, 2, 4, 5, 5, 5, 5]
# `p_enabled_diff_to_check_mask` assuming no values equal their immediately previous value:
# [F, F, T, F, F, T, T, T]
# `p_enabled_diff_to_check_idx`:
# [ 2, 5, 6, 7]
# `p_enabled_idx_in_sampled_values_to_check`:
# [ 2, 5, 5, 5]
p_enabled_idx_in_sampled_values_to_check = p_enabled_idx_in_sampled_values[p_enabled_diff_to_check_idx]
# Get the 'current' disabled values that need to be checked.
c_val_to_check = c_val_view[p_enabled_diff_to_check_idx]
c_abs_val_to_check = c_abs_val_view[p_enabled_diff_to_check_idx]
# Get the nearest previous enabled value for each value to be checked.
nearest_p_enabled_val = sampled_values[p_enabled_idx_in_sampled_values_to_check]
abs_nearest_p_enabled_val = np.abs(nearest_p_enabled_val)
# Check the relative + absolute-near-zero difference again, but against the nearest previous enabled value
# this time.
abs_diff = np.abs(c_val_to_check - nearest_p_enabled_val)
different_if_greater_than = (min_reldiff_fac
* np.maximum(c_abs_val_to_check + abs_nearest_p_enabled_val, min_absdiff_fac))
enough_diff_p_enabled_val_mask = abs_diff > different_if_greater_than
# If there are any that are different enough from the previous enabled value, then we have to check them all
# iteratively because enabling a new value can change the nearest previous enabled value of some elements,
# which changes their relative + absolute-near-zero difference:
# `p_enabled_diff_to_check_idx`:
# [2, 5, 6, 7]
# `p_enabled_idx_in_sampled_values_to_check`:
# [2, 5, 5, 5]
# Let `enough_diff_p_enabled_val_mask` be:
# [F, F, T, T]
# The first index that is newly enabled is 6:
# [2, 5,>6<,5]
# But 6 > 5, so the next value's nearest previous enabled index is also affected:
# [2, 5, 6,>6<]
# We had calculated a newly enabled index of 7 too, but that was calculated against the old nearest previous
# enabled index of 5, which has now been updated to 6, so whether 7 is enabled or not needs to be
# recalculated:
# [F, F, T, ?]
if np.any(enough_diff_p_enabled_val_mask):
# Accessing .data, the memoryview of the array, iteratively or by individual index is faster than doing
# the same with the array itself.
zipped = zip(p_enabled_diff_to_check_idx.data,
c_val_to_check.data,
c_abs_val_to_check.data,
p_enabled_idx_in_sampled_values_to_check.data,
enough_diff_p_enabled_val_mask.data)
# While iterating, we could set updated values into `enough_diff_p_enabled_val_mask` as we go and then
# update `enabled_mask` in bulk after the iteration, but if we're going to update an array while
# iterating, we may as well update `enabled_mask` directly instead and skip the bulk update.
# Additionally, the number of `True` writes to `enabled_mask` is usually much less than the number of
# updates that would be required to `enough_diff_p_enabled_val_mask`.
c_enabled_mask_view_mv = c_enabled_mask_view.data
# While iterating, keep track of the most recent newly enabled index, so we can tell when we need to
# recalculate whether the current value needs to be enabled.
new_p_enabled_idx = -1
# Keep track of its value too for performance.
new_p_enabled_val = -1
new_abs_p_enabled_val = -1
for cur_idx, c_val, c_abs_val, old_p_enabled_idx, enough_diff in zipped:
if new_p_enabled_idx > old_p_enabled_idx:
# The nearest previous enabled value is newly enabled and was not included when
# `enough_diff_p_enabled_val_mask` was calculated, so whether the current value is different
# enough needs to be recalculated using the newly enabled value.
# Check if the relative + absolute-near-zero difference is enough to enable this value.
enough_diff = (abs(c_val - new_p_enabled_val)
> (min_reldiff_fac * max(c_abs_val + new_abs_p_enabled_val, min_absdiff_fac)))
if enough_diff:
# The current value needs to be enabled.
c_enabled_mask_view_mv[cur_idx] = True
# Update the index and values for this newly enabled value.
new_p_enabled_idx = cur_idx
new_p_enabled_val = c_val
new_abs_p_enabled_val = c_abs_val
# If we write nothing (action doing nothing) and are in 'force_keep' mode, we key everything! :P # If we write nothing (action doing nothing) and are in 'force_keep' mode, we key everything! :P
# See T41766. # See T41766.
@ -1362,7 +1474,9 @@ class AnimationCurveNodeWrapper:
# one key in this case. # one key in this case.
# See T41719, T41605, T41254... # See T41719, T41605, T41254...
if self.force_keying or (force_keep and not self): if self.force_keying or (force_keep and not self):
are_keyed[:] = [True] * len(are_keyed) are_keyed = [True] * len(self._frame_write_mask_array)
else:
are_keyed = np.any(self._frame_write_mask_array, axis=1)
# If we did key something, ensure first and last sampled values are keyed as well. # If we did key something, ensure first and last sampled values are keyed as well.
if self.force_startend_keying: if self.force_startend_keying:

View File

@ -2780,7 +2780,9 @@ class FbxImportHelperNode:
for i, w in combined_weights.items(): for i, w in combined_weights.items():
indices.append(i) indices.append(i)
if len(w) > 1: if len(w) > 1:
weights.append(sum(w) / len(w)) # Add ignored child weights to the current bone's weight.
# XXX - Weights that sum to more than 1.0 get clamped to 1.0 when set in the vertex group.
weights.append(sum(w))
else: else:
weights.append(w[0]) weights.append(w[0])
@ -3464,31 +3466,56 @@ def load(operator, context, filepath="",
def _(): def _():
fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxShape')) fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxShape'))
# - FBX | - Blender equivalent
# Mesh | `Mesh`
# BlendShape | `Key`
# BlendShapeChannel | `ShapeKey`, but without its `.data`.
# Shape | `ShapeKey.data`, but also includes normals and the values are relative to the base Mesh
# | instead of being absolute. The data is sparse, so each Shape has an "Indexes" array too.
# | FBX 2020 introduced 'Modern Style' Shapes that also support tangents, binormals, vertex
# | colors and UVs, and can be absolute values instead of relative, but 'Modern Style' Shapes
# | are not currently supported.
#
# The FBX connections between Shapes and Meshes form multiple many-many relationships:
# Mesh >-< BlendShape >-< BlendShapeChannel >-< Shape
# In practice, the relationships are almost never many-many and are more typically 1-many or 1-1:
# Mesh --- BlendShape:
# usually 1-1 and the FBX SDK might enforce that each BlendShape is connected to at most one Mesh.
# BlendShape --< BlendShapeChannel:
# usually 1-many.
# BlendShapeChannel --- or uncommonly --< Shape:
# usually 1-1, but 1-many is a documented feature.
def connections_gen(c_src_uuid, fbx_id, fbx_type):
"""Helper to reduce duplicate code"""
# Rarely, an imported FBX file will have duplicate connections. For Shape Key related connections, FBX
# appears to ignore the duplicates, or overwrite the existing duplicates such that the end result is the
# same as ignoring them, so keep a set of the seen connections and ignore any duplicates.
seen_connections = set()
for c_dst_uuid, ctype in fbx_connection_map.get(c_src_uuid, ()):
if ctype.props[0] != b'OO':
# 'Object-Object' connections only.
continue
fbx_data, bl_data = fbx_table_nodes.get(c_dst_uuid, (None, None))
if fbx_data is None or fbx_data.id != fbx_id or fbx_data.props[2] != fbx_type:
# Either `c_dst_uuid` doesn't exist, or it has a different id or type.
continue
connection_key = (c_src_uuid, c_dst_uuid)
if connection_key in seen_connections:
# The connection is a duplicate, skip it.
continue
seen_connections.add(connection_key)
yield c_dst_uuid, fbx_data, bl_data
mesh_to_shapes = {} mesh_to_shapes = {}
for s_uuid, s_item in fbx_table_nodes.items(): for s_uuid, (fbx_sdata, _bl_sdata) in fbx_table_nodes.items():
fbx_sdata, bl_sdata = s_item = fbx_table_nodes.get(s_uuid, (None, None))
if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape': if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape':
continue continue
# shape -> blendshapechannel -> blendshape -> mesh. # shape -> blendshapechannel -> blendshape -> mesh.
for bc_uuid, bc_ctype in fbx_connection_map.get(s_uuid, ()): for bc_uuid, fbx_bcdata, _bl_bcdata in connections_gen(s_uuid, b'Deformer', b'BlendShapeChannel'):
if bc_ctype.props[0] != b'OO': for bs_uuid, _fbx_bsdata, _bl_bsdata in connections_gen(bc_uuid, b'Deformer', b'BlendShape'):
continue for m_uuid, _fbx_mdata, bl_mdata in connections_gen(bs_uuid, b'Geometry', b'Mesh'):
fbx_bcdata, _bl_bcdata = fbx_table_nodes.get(bc_uuid, (None, None))
if fbx_bcdata is None or fbx_bcdata.id != b'Deformer' or fbx_bcdata.props[2] != b'BlendShapeChannel':
continue
for bs_uuid, bs_ctype in fbx_connection_map.get(bc_uuid, ()):
if bs_ctype.props[0] != b'OO':
continue
fbx_bsdata, _bl_bsdata = fbx_table_nodes.get(bs_uuid, (None, None))
if fbx_bsdata is None or fbx_bsdata.id != b'Deformer' or fbx_bsdata.props[2] != b'BlendShape':
continue
for m_uuid, m_ctype in fbx_connection_map.get(bs_uuid, ()):
if m_ctype.props[0] != b'OO':
continue
fbx_mdata, bl_mdata = fbx_table_nodes.get(m_uuid, (None, None))
if fbx_mdata is None or fbx_mdata.id != b'Geometry' or fbx_mdata.props[2] != b'Mesh':
continue
# Blenmeshes are assumed already created at that time! # Blenmeshes are assumed already created at that time!
assert(isinstance(bl_mdata, bpy.types.Mesh)) assert(isinstance(bl_mdata, bpy.types.Mesh))
# Group shapes by mesh so that each mesh only needs to be processed once for all of its shape # Group shapes by mesh so that each mesh only needs to be processed once for all of its shape

View File

@ -5,7 +5,7 @@
bl_info = { bl_info = {
'name': 'glTF 2.0 format', 'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors', 'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (4, 0, 32), "version": (4, 1, 12),
'blender': (4, 0, 0), 'blender': (4, 0, 0),
'location': 'File > Import-Export', 'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0', 'description': 'Import-Export as glTF 2.0',
@ -144,13 +144,10 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
'Most efficient and portable, but more difficult to edit later'), 'Most efficient and portable, but more difficult to edit later'),
('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)', ('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)',
'Exports multiple files, with separate JSON, binary and texture data. ' 'Exports multiple files, with separate JSON, binary and texture data. '
'Easiest to edit later'), 'Easiest to edit later')),
('GLTF_EMBEDDED', 'glTF Embedded (.gltf)',
'Exports a single file, with all data packed in JSON. '
'Less efficient than binary, but easier to edit later')),
description=( description=(
'Output format and embedding options. Binary is most efficient, ' 'Output format. Binary is most efficient, '
'but JSON (embedded or separate) may be easier to edit later' 'but JSON may be easier to edit later'
), ),
default='GLB', #Warning => If you change the default, need to change the default filter too default='GLB', #Warning => If you change the default, need to change the default filter too
update=on_export_format_changed, update=on_export_format_changed,
@ -174,13 +171,13 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_image_format: EnumProperty( export_image_format: EnumProperty(
name='Images', name='Images',
items=(('AUTO', 'Automatic', items=(('AUTO', 'Automatic',
'Save PNGs as PNGs, JPEGs as JPEGs, WEBPs as WEBPs. ' 'Save PNGs as PNGs, JPEGs as JPEGs, WebPs as WebPs. '
'If neither one, use PNG'), 'For other formats, use PNG'),
('JPEG', 'JPEG Format (.jpg)', ('JPEG', 'JPEG Format (.jpg)',
'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) ' 'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) '
'Be aware of a possible loss in quality'), 'Be aware of a possible loss in quality'),
('WEBP', 'Webp Format', ('WEBP', 'WebP Format',
'Save images as WEBPs as main image (no fallback)'), 'Save images as WebPs as main image (no fallback)'),
('NONE', 'None', ('NONE', 'None',
'Don\'t export images'), 'Don\'t export images'),
), ),
@ -192,18 +189,18 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
) )
export_image_add_webp: BoolProperty( export_image_add_webp: BoolProperty(
name='Create Webp', name='Create WebP',
description=( description=(
"Creates webp textures for every textures. " "Creates WebP textures for every texture. "
"For already webp textures, nothing happen" "For already WebP textures, nothing happens"
), ),
default=False default=False
) )
export_image_webp_fallback: BoolProperty( export_image_webp_fallback: BoolProperty(
name='Webp fallback', name='WebP fallback',
description=( description=(
"For all webp textures, create a PNG fallback texture." "For all WebP textures, create a PNG fallback texture"
), ),
default=False default=False
) )
@ -476,6 +473,21 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default=False default=False
) )
export_hierarchy_flatten_objs: BoolProperty(
name='Flatten Object Hierarchy',
description='Flatten Object Hierarchy. Useful in case of non decomposable transformation matrix',
default=False
)
export_armature_object_remove: BoolProperty(
name='Remove Armature Object',
description=(
'Remove Armature object if possible.'
'If Armature has multiple root bones, object will not be removed'
),
default=False
)
export_optimize_animation_size: BoolProperty( export_optimize_animation_size: BoolProperty(
name='Optimize Animation Size', name='Optimize Animation Size',
description=( description=(
@ -641,7 +653,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_try_sparse_sk: BoolProperty( export_try_sparse_sk: BoolProperty(
name='Use Sparse Accessor if better', name='Use Sparse Accessor if better',
description='Try using Sparce Accessor if it save space', description='Try using Sparse Accessor if it saves space',
default=True default=True
) )
@ -654,8 +666,8 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_gpu_instances: BoolProperty( export_gpu_instances: BoolProperty(
name='GPU Instances', name='GPU Instances',
description='Export using EXT_mesh_gpu_instancing. ' description='Export using EXT_mesh_gpu_instancing. '
'Limited to children of a same Empty. ' 'Limited to children of a given Empty. '
'multiple Materials might be omitted', 'Multiple materials might be omitted',
default=False default=False
) )
@ -821,6 +833,8 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['gltf_animations'] = self.export_animations export_settings['gltf_animations'] = self.export_animations
export_settings['gltf_def_bones'] = self.export_def_bones export_settings['gltf_def_bones'] = self.export_def_bones
export_settings['gltf_flatten_bones_hierarchy'] = self.export_hierarchy_flatten_bones export_settings['gltf_flatten_bones_hierarchy'] = self.export_hierarchy_flatten_bones
export_settings['gltf_flatten_obj_hierarchy'] = self.export_hierarchy_flatten_objs
export_settings['gltf_armature_object_remove'] = self.export_armature_object_remove
if self.export_animations: if self.export_animations:
export_settings['gltf_frame_range'] = self.export_frame_range export_settings['gltf_frame_range'] = self.export_frame_range
export_settings['gltf_force_sampling'] = self.export_force_sampling export_settings['gltf_force_sampling'] = self.export_force_sampling
@ -1054,6 +1068,7 @@ class GLTF_PT_export_data_scene(bpy.types.Panel):
sfile = context.space_data sfile = context.space_data
operator = sfile.active_operator operator = sfile.active_operator
layout.prop(operator, 'export_gpu_instances') layout.prop(operator, 'export_gpu_instances')
layout.prop(operator, 'export_hierarchy_flatten_objs')
class GLTF_PT_export_data_mesh(bpy.types.Panel): class GLTF_PT_export_data_mesh(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER' bl_space_type = 'FILE_BROWSER'
@ -1279,6 +1294,8 @@ class GLTF_PT_export_data_armature(bpy.types.Panel):
if operator.export_force_sampling is False and operator.export_def_bones is True: if operator.export_force_sampling is False and operator.export_def_bones is True:
layout.label(text="Export only deformation bones is not possible when not sampling animation") layout.label(text="Export only deformation bones is not possible when not sampling animation")
row = layout.row() row = layout.row()
row.prop(operator, 'export_armature_object_remove')
row = layout.row()
row.prop(operator, 'export_hierarchy_flatten_bones') row.prop(operator, 'export_hierarchy_flatten_bones')
class GLTF_PT_export_data_compression(bpy.types.Panel): class GLTF_PT_export_data_compression(bpy.types.Panel):
@ -1648,7 +1665,7 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
items=( items=(
("BLENDER", "Blender (best for import/export round trip)", ("BLENDER", "Blender (best for import/export round trip)",
"Good for re-importing glTFs exported from Blender, " "Good for re-importing glTFs exported from Blender, "
"and re-exporting glTFs to glTFs after Blender editing" "and re-exporting glTFs to glTFs after Blender editing. "
"Bone tips are placed on their local +Y axis (in glTF space)"), "Bone tips are placed on their local +Y axis (in glTF space)"),
("TEMPERANCE", "Temperance (average)", ("TEMPERANCE", "Temperance (average)",
"Decent all-around strategy. " "Decent all-around strategy. "
@ -1674,10 +1691,10 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
) )
import_webp_texture: BoolProperty( import_webp_texture: BoolProperty(
name='Import Webp textures', name='Import WebP textures',
description=( description=(
"If a texture exists in webp format," "If a texture exists in WebP format, "
"loads the webp texture instead of the fallback png/jpg one" "loads the WebP texture instead of the fallback PNG/JPEG one"
), ),
default=False, default=False,
) )

View File

@ -8,6 +8,10 @@ import bpy
def get_gltf_node_old_name(): def get_gltf_node_old_name():
return "glTF Settings" return "glTF Settings"
# Old group name
def get_gltf_old_group_node_name():
return "glTF Metallic Roughness"
def get_gltf_node_name(): def get_gltf_node_name():
return "glTF Material Output" return "glTF Material Output"

View File

@ -34,6 +34,11 @@ def gather_actions_animations(export_settings):
# Do not manage not exported objects # Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None: if vtree.nodes[obj_uuid].node is None:
if export_settings["gltf_armature_object_remove"] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue continue
animations_, merged_tracks = gather_action_animations(obj_uuid, merged_tracks, len(animations), export_settings) animations_, merged_tracks = gather_action_animations(obj_uuid, merged_tracks, len(animations), export_settings)
@ -63,6 +68,11 @@ def prepare_actions_range(export_settings):
# Do not manage not exported objects # Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None: if vtree.nodes[obj_uuid].node is None:
if export_settings["gltf_armature_object_remove"] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue continue
if obj_uuid not in export_settings['ranges']: if obj_uuid not in export_settings['ranges']:
@ -168,6 +178,11 @@ def prepare_actions_range(export_settings):
# Do not manage not exported objects # Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None: if vtree.nodes[obj_uuid].node is None:
if export_settings['gltf_armature_object_remove'] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue continue
blender_actions = __get_blender_actions(obj_uuid, export_settings) blender_actions = __get_blender_actions(obj_uuid, export_settings)

View File

@ -35,6 +35,11 @@ def gather_scene_animations(export_settings):
# Do not manage not exported objects # Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None: if vtree.nodes[obj_uuid].node is None:
if export_settings['gltf_armature_object_remove'] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue continue
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object

View File

@ -22,6 +22,11 @@ def gather_tracks_animations(export_settings):
# Do not manage not exported objects # Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None: if vtree.nodes[obj_uuid].node is None:
if export_settings['gltf_armature_object_remove'] is True:
# Manage armature object, as this is the object that has the animation
if not vtree.nodes[obj_uuid].blender_object:
continue
else:
continue continue
animations_, merged_tracks = gather_track_animations(obj_uuid, merged_tracks, len(animations), export_settings) animations_, merged_tracks = gather_track_animations(obj_uuid, merged_tracks, len(animations), export_settings)

View File

@ -119,6 +119,10 @@ def get_cache_data(path: str,
# Bone has a parent, but in export, after filter, is at root of armature # Bone has a parent, but in export, after filter, is at root of armature
matrix = blender_bone.matrix.copy() matrix = blender_bone.matrix.copy()
# Because there is no armature object, we need to apply the TRS of armature to the root bone
if export_settings['gltf_armature_object_remove'] is True:
matrix = matrix @ blender_obj.matrix_world
if blender_obj.animation_data and blender_obj.animation_data.action \ if blender_obj.animation_data and blender_obj.animation_data.action \
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
if blender_bone.name not in data[obj_uuid][blender_obj.animation_data.action.name]['bone'].keys(): if blender_bone.name not in data[obj_uuid][blender_obj.animation_data.action.name]['bone'].keys():

View File

@ -85,9 +85,6 @@ def __create_buffer(exporter, export_settings):
buffer = bytes() buffer = bytes()
if export_settings['gltf_format'] == 'GLB': if export_settings['gltf_format'] == 'GLB':
buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True) buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True)
else:
if export_settings['gltf_format'] == 'GLTF_EMBEDDED':
exporter.finalize_buffer(export_settings['gltf_filedirectory'])
else: else:
exporter.finalize_buffer(export_settings['gltf_filedirectory'], exporter.finalize_buffer(export_settings['gltf_filedirectory'],
export_settings['gltf_binaryfilename']) export_settings['gltf_binaryfilename'])

View File

@ -9,6 +9,7 @@ from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ..com.gltf2_blender_extras import generate_extras from ..com.gltf2_blender_extras import generate_extras
from .gltf2_blender_gather_cache import cached from .gltf2_blender_gather_cache import cached
from . import gltf2_blender_gather_nodes from . import gltf2_blender_gather_nodes
from . import gltf2_blender_gather_joints
from . import gltf2_blender_gather_tree from . import gltf2_blender_gather_tree
from .animation.sampled.object.gltf2_blender_gather_object_keyframes import get_cache_data from .animation.sampled.object.gltf2_blender_gather_object_keyframes import get_cache_data
from .animation.gltf2_blender_gather_animations import gather_animations from .animation.gltf2_blender_gather_animations import gather_animations
@ -52,6 +53,8 @@ def __gather_scene(blender_scene, export_settings):
vtree = gltf2_blender_gather_tree.VExportTree(export_settings) vtree = gltf2_blender_gather_tree.VExportTree(export_settings)
vtree.construct(blender_scene) vtree.construct(blender_scene)
vtree.search_missing_armature() # In case armature are no parented correctly vtree.search_missing_armature() # In case armature are no parented correctly
vtree.bake_armature_bone_list() # Used in case we remove the armature
vtree.check_if_we_can_remove_armature() # Check if we can remove the armatures objects
export_user_extensions('vtree_before_filter_hook', export_settings, vtree) export_user_extensions('vtree_before_filter_hook', export_settings, vtree)
@ -59,6 +62,8 @@ def __gather_scene(blender_scene, export_settings):
vtree.filter() vtree.filter()
if export_settings['gltf_flatten_bones_hierarchy'] is True: if export_settings['gltf_flatten_bones_hierarchy'] is True:
vtree.break_bone_hierarchy() vtree.break_bone_hierarchy()
if export_settings['gltf_flatten_obj_hierarchy'] is True:
vtree.break_obj_hierarchy()
vtree.variants_reset_to_original() vtree.variants_reset_to_original()
@ -66,11 +71,41 @@ def __gather_scene(blender_scene, export_settings):
export_settings['vtree'] = vtree export_settings['vtree'] = vtree
# If we don't remove armature object, we can't have bones directly at root of scene
# So looping only on root nodes, as they are all nodes, not bones
if export_settings['gltf_armature_object_remove'] is False:
for r in [vtree.nodes[r] for r in vtree.roots]: for r in [vtree.nodes[r] for r in vtree.roots]:
node = gltf2_blender_gather_nodes.gather_node( node = gltf2_blender_gather_nodes.gather_node(
r, export_settings) r, export_settings)
if node is not None: if node is not None:
scene.nodes.append(node) scene.nodes.append(node)
else:
# If we remove armature objects, we can have bone at root of scene
armature_root_joints = {}
for r in [vtree.nodes[r] for r in vtree.roots]:
# Classic Object/node case
if r.blender_type != gltf2_blender_gather_tree.VExportNode.BONE:
node = gltf2_blender_gather_nodes.gather_node(
r, export_settings)
if node is not None:
scene.nodes.append(node)
else:
# We can have bone are root of scene because we remove the armature object
# and the armature was at root of scene
node = gltf2_blender_gather_joints.gather_joint_vnode(
r.uuid, export_settings)
if node is not None:
scene.nodes.append(node)
if r.armature not in armature_root_joints.keys():
armature_root_joints[r.armature] = []
armature_root_joints[r.armature].append(node)
# Manage objects parented to bones, now we go through all root objects
for k, v in armature_root_joints.items():
gltf2_blender_gather_nodes.get_objects_parented_to_bones(k, v, export_settings)
vtree.add_neutral_bones() vtree.add_neutral_bones()

View File

@ -48,10 +48,14 @@ def gather_joint_vnode(vnode, export_settings):
:return: a glTF2 node (acting as a joint) :return: a glTF2 node (acting as a joint)
""" """
vtree = export_settings['vtree'] vtree = export_settings['vtree']
blender_object = vtree.nodes[vnode].blender_object
blender_bone = vtree.nodes[vnode].blender_bone blender_bone = vtree.nodes[vnode].blender_bone
if export_settings['gltf_armature_object_remove'] is True:
if vtree.nodes[vnode].parent_uuid is not None:
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world
else:
mat = vtree.nodes[vnode].matrix_world
else:
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world
trans, rot, sca = mat.decompose() trans, rot, sca = mat.decompose()

View File

@ -21,6 +21,7 @@ from . import gltf2_blender_gather_lights
from .gltf2_blender_gather_tree import VExportNode from .gltf2_blender_gather_tree import VExportNode
def gather_node(vnode, export_settings): def gather_node(vnode, export_settings):
blender_object = vnode.blender_object blender_object = vnode.blender_object
skin = gather_skin(vnode.uuid, export_settings) skin = gather_skin(vnode.uuid, export_settings)
@ -29,7 +30,7 @@ def gather_node(vnode, export_settings):
node = gltf2_io.Node( node = gltf2_io.Node(
camera=__gather_camera(blender_object, export_settings), camera=__gather_camera(blender_object, export_settings),
children=__gather_children(vnode, blender_object, export_settings), children=__gather_children(vnode, export_settings),
extensions=__gather_extensions(blender_object, export_settings), extensions=__gather_extensions(blender_object, export_settings),
extras=__gather_extras(blender_object, export_settings), extras=__gather_extras(blender_object, export_settings),
matrix=__gather_matrix(blender_object, export_settings), matrix=__gather_matrix(blender_object, export_settings),
@ -61,53 +62,70 @@ def __gather_camera(blender_object, export_settings):
return gltf2_blender_gather_cameras.gather_camera(blender_object.data, export_settings) return gltf2_blender_gather_cameras.gather_camera(blender_object.data, export_settings)
def __gather_children(vnode, blender_object, export_settings): def __gather_children(vnode, export_settings):
children = [] children = []
vtree = export_settings['vtree'] vtree = export_settings['vtree']
armature_object_uuid = None
# Standard Children / Collection # Standard Children / Collection
if export_settings['gltf_armature_object_remove'] is False:
for c in [vtree.nodes[c] for c in vnode.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE]: for c in [vtree.nodes[c] for c in vnode.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE]:
node = gather_node(c, export_settings) node = gather_node(c, export_settings)
if node is not None: if node is not None:
children.append(node) children.append(node)
else:
root_joints = []
for c in [vtree.nodes[c] for c in vnode.children]:
if c.blender_type != gltf2_blender_gather_tree.VExportNode.BONE:
node = gather_node(c, export_settings)
if node is not None:
children.append(node)
else:
# We come here because armature was remove, and bone can be a child of any object
joint = gltf2_blender_gather_joints.gather_joint_vnode(c.uuid, export_settings)
children.append(joint)
armature_object_uuid = c.armature
root_joints.append(joint)
# Now got all bone children (that are root joints), we can get object parented to bones
# Armature --> Retrieve Blender bones # Armature --> Retrieve Blender bones
# This can't happen if we remove the Armature Object
if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE: if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE:
armature_object_uuid = vnode.uuid
root_joints = [] root_joints = []
root_bones_uuid = export_settings['vtree'].get_root_bones_uuid(vnode.uuid)
all_armature_children = vnode.children
root_bones_uuid = [c for c in all_armature_children if export_settings['vtree'].nodes[c].blender_type == VExportNode.BONE]
for bone_uuid in root_bones_uuid: for bone_uuid in root_bones_uuid:
joint = gltf2_blender_gather_joints.gather_joint_vnode(bone_uuid, export_settings) joint = gltf2_blender_gather_joints.gather_joint_vnode(bone_uuid, export_settings)
children.append(joint) children.append(joint)
root_joints.append(joint) root_joints.append(joint)
if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE \
or armature_object_uuid is not None:
# Object parented to bones # Object parented to bones
get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings)
return children
def get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings):
vtree = export_settings['vtree']
direct_bone_children = [] direct_bone_children = []
for n in [vtree.nodes[i] for i in vtree.get_all_bones(vnode.uuid)]: for n in [vtree.nodes[i] for i in vtree.get_all_bones(armature_object_uuid)]:
direct_bone_children.extend([c for c in n.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE]) direct_bone_children.extend([c for c in n.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE])
def find_parent_joint(joints, name):
for joint in joints:
if joint.name == name:
return joint
parent_joint = find_parent_joint(joint.children, name)
if parent_joint:
return parent_joint
return None
for child in direct_bone_children: # List of object that are parented to bones for child in direct_bone_children: # List of object that are parented to bones
# find parent joint # find parent joint
parent_joint = find_parent_joint(root_joints, vtree.nodes[child].blender_object.parent_bone) parent_joint = __find_parent_joint(root_joints, vtree.nodes[child].blender_object.parent_bone)
if not parent_joint: if not parent_joint:
continue continue
child_node = gather_node(vtree.nodes[child], export_settings) child_node = gather_node(vtree.nodes[child], export_settings)
if child_node is None: if child_node is None:
continue continue
blender_bone = blender_object.pose.bones[parent_joint.name]
mat = vtree.nodes[vtree.nodes[child].parent_bone_uuid].matrix_world.inverted_safe() @ vtree.nodes[child].matrix_world mat = vtree.nodes[vtree.nodes[child].parent_bone_uuid].matrix_world.inverted_safe() @ vtree.nodes[child].matrix_world
loc, rot_quat, scale = mat.decompose() loc, rot_quat, scale = mat.decompose()
@ -131,7 +149,15 @@ def __gather_children(vnode, blender_object, export_settings):
parent_joint.children.append(child_node) parent_joint.children.append(child_node)
return children
def __find_parent_joint(joints, name):
for joint in joints:
if joint.name == name:
return joint
parent_joint = __find_parent_joint(joint.children, name)
if parent_joint:
return parent_joint
return None
def __gather_extensions(blender_object, export_settings): def __gather_extensions(blender_object, export_settings):

View File

@ -7,16 +7,27 @@ from ...io.com import gltf2_io
from ...io.exp.gltf2_io_user_extensions import export_user_extensions from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ...io.com.gltf2_io_constants import TextureFilter, TextureWrap from ...io.com.gltf2_io_constants import TextureFilter, TextureWrap
from .gltf2_blender_gather_cache import cached from .gltf2_blender_gather_cache import cached
from .gltf2_blender_get import ( from .material.gltf2_blender_search_node_tree import previous_node, previous_socket, get_const_from_socket, NodeSocket
previous_node,
previous_socket,
get_const_from_socket,
)
@cached @cached
def gather_sampler(blender_shader_node: bpy.types.Node, export_settings): def gather_sampler(blender_shader_node: bpy.types.Node, group_path_str, export_settings):
wrap_s, wrap_t = __gather_wrap(blender_shader_node, export_settings) # reconstruct group_path from group_path_str
sep_item = "##~~gltf-sep~~##"
sep_inside_item = "##~~gltf-inside-sep~~##"
group_path = []
tab = group_path_str.split(sep_item)
if len(tab) > 0:
group_path.append(bpy.data.materials[tab[0]])
for idx, i in enumerate(tab[1:]):
subtab = i.split(sep_inside_item)
if idx == 0:
group_path.append(bpy.data.materials[tab[0]].node_tree.nodes[subtab[1]])
else:
group_path.append(bpy.data.node_groups[subtab[0]].nodes[subtab[1]])
wrap_s, wrap_t = __gather_wrap(blender_shader_node, group_path, export_settings)
sampler = gltf2_io.Sampler( sampler = gltf2_io.Sampler(
extensions=__gather_extensions(blender_shader_node, export_settings), extensions=__gather_extensions(blender_shader_node, export_settings),
@ -80,7 +91,7 @@ def __gather_name(blender_shader_node, export_settings):
return None return None
def __gather_wrap(blender_shader_node, export_settings): def __gather_wrap(blender_shader_node, group_path, export_settings):
# First gather from the Texture node # First gather from the Texture node
if blender_shader_node.extension == 'EXTEND': if blender_shader_node.extension == 'EXTEND':
wrap_s = TextureWrap.ClampToEdge wrap_s = TextureWrap.ClampToEdge
@ -98,7 +109,7 @@ def __gather_wrap(blender_shader_node, export_settings):
# But still works for old files # But still works for old files
# Still needed for heterogen heterogeneous sampler, like MIRROR x REPEAT, for example # Still needed for heterogen heterogeneous sampler, like MIRROR x REPEAT, for example
# Take manual wrapping into account # Take manual wrapping into account
result = detect_manual_uv_wrapping(blender_shader_node) result = detect_manual_uv_wrapping(blender_shader_node, group_path)
if result: if result:
if result['wrap_s'] is not None: wrap_s = result['wrap_s'] if result['wrap_s'] is not None: wrap_s = result['wrap_s']
if result['wrap_t'] is not None: wrap_t = result['wrap_t'] if result['wrap_t'] is not None: wrap_t = result['wrap_t']
@ -110,7 +121,7 @@ def __gather_wrap(blender_shader_node, export_settings):
return wrap_s, wrap_t return wrap_s, wrap_t
def detect_manual_uv_wrapping(blender_shader_node): def detect_manual_uv_wrapping(blender_shader_node, group_path):
# Detects UV wrapping done using math nodes. This is for emulating wrap # Detects UV wrapping done using math nodes. This is for emulating wrap
# modes Blender doesn't support. It looks like # modes Blender doesn't support. It looks like
# #
@ -124,38 +135,38 @@ def detect_manual_uv_wrapping(blender_shader_node):
# mode in each direction (or None), and next_socket. # mode in each direction (or None), and next_socket.
result = {} result = {}
comb = previous_node(blender_shader_node.inputs['Vector']) comb = previous_node(NodeSocket(blender_shader_node.inputs['Vector'], group_path))
if comb is None or comb.type != 'COMBXYZ': return None if comb.node is None or comb.node.type != 'COMBXYZ': return None
for soc in ['X', 'Y']: for soc in ['X', 'Y']:
node = previous_node(comb.inputs[soc]) node = previous_node(NodeSocket(comb.node.inputs[soc], comb.group_path))
if node is None: return None if node.node is None: return None
if node.type == 'SEPXYZ': if node.node.type == 'SEPXYZ':
# Passed through without change # Passed through without change
wrap = None wrap = None
prev_socket = previous_socket(comb.inputs[soc]) prev_socket = previous_socket(NodeSocket(comb.node.inputs[soc], comb.group_path))
elif node.type == 'MATH': elif node.node.type == 'MATH':
# Math node applies a manual wrap # Math node applies a manual wrap
if (node.operation == 'PINGPONG' and if (node.node.operation == 'PINGPONG' and
get_const_from_socket(node.inputs[1], kind='VALUE') == 1.0): # scale = 1 get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind='VALUE') == 1.0): # scale = 1
wrap = TextureWrap.MirroredRepeat wrap = TextureWrap.MirroredRepeat
elif (node.operation == 'WRAP' and elif (node.node.operation == 'WRAP' and
get_const_from_socket(node.inputs[1], kind='VALUE') == 0.0 and # min = 0 get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind='VALUE') == 0.0 and # min = 0
get_const_from_socket(node.inputs[2], kind='VALUE') == 1.0): # max = 1 get_const_from_socket(NodeSocket(node.node.inputs[2], node.group_path), kind='VALUE') == 1.0): # max = 1
wrap = TextureWrap.Repeat wrap = TextureWrap.Repeat
else: else:
return None return None
prev_socket = previous_socket(node.inputs[0]) prev_socket = previous_socket(NodeSocket(node.node.inputs[0], node.group_path))
else: else:
return None return None
if prev_socket is None: return None if prev_socket.socket is None: return None
prev_node = prev_socket.node prev_node = prev_socket.socket.node
if prev_node.type != 'SEPXYZ': return None if prev_node.type != 'SEPXYZ': return None
# Make sure X goes to X, etc. # Make sure X goes to X, etc.
if prev_socket.name != soc: return None if prev_socket.socket.name != soc: return None
# Make sure both attach to the same SeparateXYZ node # Make sure both attach to the same SeparateXYZ node
if soc == 'X': if soc == 'X':
sep = prev_node sep = prev_node
@ -164,5 +175,5 @@ def detect_manual_uv_wrapping(blender_shader_node):
result['wrap_s' if soc == 'X' else 'wrap_t'] = wrap result['wrap_s' if soc == 'X' else 'wrap_t'] = wrap
result['next_socket'] = sep.inputs[0] result['next_socket'] = NodeSocket(sep.inputs[0], prev_socket.group_path)
return result return result

View File

@ -13,6 +13,7 @@ from ...io.com import gltf2_io_constants
from ...io.exp import gltf2_io_binary_data from ...io.exp import gltf2_io_binary_data
from ..com.gltf2_blender_default import BLENDER_GLTF_SPECIAL_COLLECTION from ..com.gltf2_blender_default import BLENDER_GLTF_SPECIAL_COLLECTION
from . import gltf2_blender_gather_accessors from . import gltf2_blender_gather_accessors
from .gltf2_blender_gather_joints import gather_joint_vnode
class VExportNode: class VExportNode:
@ -76,7 +77,7 @@ class VExportNode:
def recursive_display(self, tree, mode): def recursive_display(self, tree, mode):
if mode == "simple": if mode == "simple":
for c in self.children: for c in self.children:
print(self.blender_object.name, "/", self.blender_bone.name if self.blender_bone else "", "-->", tree.nodes[c].blender_object.name, "/", tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "" ) print(tree.nodes[c].uuid, self.blender_object.name, "/", self.blender_bone.name if self.blender_bone else "", "-->", tree.nodes[c].blender_object.name, "/", tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "" )
tree.nodes[c].recursive_display(tree, mode) tree.nodes[c].recursive_display(tree, mode)
class VExportTree: class VExportTree:
@ -278,7 +279,8 @@ class VExportTree:
def get_all_objects(self): def get_all_objects(self):
return [n.uuid for n in self.nodes.values() if n.blender_type != VExportNode.BONE] return [n.uuid for n in self.nodes.values() if n.blender_type != VExportNode.BONE]
def get_all_bones(self, uuid): #For armatue Only def get_all_bones(self, uuid): #For armature only
if not hasattr(self.nodes[uuid], "all_bones"):
if self.nodes[uuid].blender_type == VExportNode.ARMATURE: if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
def recursive_get_all_bones(uuid): def recursive_get_all_bones(uuid):
total = [] total = []
@ -292,9 +294,25 @@ class VExportTree:
tot = [] tot = []
for c_uuid in self.nodes[uuid].children: for c_uuid in self.nodes[uuid].children:
tot.extend(recursive_get_all_bones(c_uuid)) tot.extend(recursive_get_all_bones(c_uuid))
return tot self.nodes[uuid].all_bones = tot
return tot # Not really needed to return, we are just baking it before export really starts
else: else:
self.nodes[uuid].all_bones = []
return [] return []
else:
return self.nodes[uuid].all_bones
def get_root_bones_uuid(self, uuid): #For armature only
if not hasattr(self.nodes[uuid], "root_bones_uuid"):
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
all_armature_children = self.nodes[uuid].children
self.nodes[uuid].root_bones_uuid = [c for c in all_armature_children if self.nodes[c].blender_type == VExportNode.BONE]
return self.nodes[uuid].root_bones_uuid # Not really needed to return, we are just baking it before export really starts
else:
self.nodes[uuid].root_bones_uuid = []
return []
else:
return self.nodes[uuid].root_bones_uuid
def get_all_node_of_type(self, node_type): def get_all_node_of_type(self, node_type):
return [n.uuid for n in self.nodes.values() if n.blender_type == node_type] return [n.uuid for n in self.nodes.values() if n.blender_type == node_type]
@ -302,10 +320,9 @@ class VExportTree:
def display(self, mode): def display(self, mode):
if mode == "simple": if mode == "simple":
for n in self.roots: for n in self.roots:
print("Root", self.nodes[n].blender_object.name, "/", self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "" ) print(self.nodes[n].uuid, "Root", self.nodes[n].blender_object.name, "/", self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "" )
self.nodes[n].recursive_display(self, mode) self.nodes[n].recursive_display(self, mode)
def filter_tag(self): def filter_tag(self):
roots = self.roots.copy() roots = self.roots.copy()
for r in roots: for r in roots:
@ -322,7 +339,6 @@ class VExportTree:
self.filter_perform() self.filter_perform()
self.remove_filtered_nodes() self.remove_filtered_nodes()
def recursive_filter_tag(self, uuid, parent_keep_tag): def recursive_filter_tag(self, uuid, parent_keep_tag):
# parent_keep_tag is for collection instance # parent_keep_tag is for collection instance
# some properties (selection, visibility, renderability) # some properties (selection, visibility, renderability)
@ -442,9 +458,19 @@ class VExportTree:
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].objects: bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].objects:
return False return False
if self.export_settings['gltf_armature_object_remove'] is True:
# If we remove the Armature object
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
self.nodes[uuid].arma_exported = True
return False
return True return True
def remove_filtered_nodes(self): def remove_filtered_nodes(self):
if self.export_settings['gltf_armature_object_remove'] is True:
# If we remove the Armature object
self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True or (n.keep_tag is False and n.blender_type == VExportNode.ARMATURE)}
else:
self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True} self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True}
def search_missing_armature(self): def search_missing_armature(self):
@ -454,6 +480,14 @@ class VExportTree:
n.armature = candidates[0].uuid n.armature = candidates[0].uuid
del n.armature_needed del n.armature_needed
def bake_armature_bone_list(self):
# Used to store data in armature vnode
# If armature is removed from export
# Data are still available, even if armature is not exported (so bones are re-parented)
for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]:
self.get_all_bones(n.uuid)
self.get_root_bones_uuid(n.uuid)
def add_neutral_bones(self): def add_neutral_bones(self):
added_armatures = [] added_armatures = []
for n in [n for n in self.nodes.values() if \ for n in [n for n in self.nodes.values() if \
@ -521,6 +555,9 @@ class VExportTree:
from .gltf2_blender_gather_skins import gather_skin from .gltf2_blender_gather_skins import gather_skin
skins = [] skins = []
for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]: for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]:
if self.export_settings['gltf_armature_object_remove'] is True:
if hasattr(n, "arma_exported") is False:
continue
if len([m for m in self.nodes.values() if m.keep_tag is True and m.blender_type == VExportNode.OBJECT and m.armature == n.uuid]) == 0: if len([m for m in self.nodes.values() if m.keep_tag is True and m.blender_type == VExportNode.OBJECT and m.armature == n.uuid]) == 0:
skin = gather_skin(n.uuid, self.export_settings) skin = gather_skin(n.uuid, self.export_settings)
skins.append(skin) skins.append(skin)
@ -552,3 +589,25 @@ class VExportTree:
self.nodes[self.nodes[bone].parent_uuid].children.remove(bone) self.nodes[self.nodes[bone].parent_uuid].children.remove(bone)
self.nodes[bone].parent_uuid = arma self.nodes[bone].parent_uuid = arma
self.nodes[arma].children.append(bone) self.nodes[arma].children.append(bone)
def break_obj_hierarchy(self):
# Can be usefull when matrix is not decomposable
# TODO: if we get real collection one day, we probably need to adapt this code
for obj in self.get_all_objects():
if self.nodes[obj].armature is not None and self.nodes[obj].parent_uuid == self.nodes[obj].armature:
continue # Keep skined meshs as children of armature
if self.nodes[obj].parent_uuid is not None:
self.nodes[self.nodes[obj].parent_uuid].children.remove(obj)
self.nodes[obj].parent_uuid = None
self.roots.append(obj)
def check_if_we_can_remove_armature(self):
# If user requested to remove armature, we need to check if it is possible
# If is impossible to remove it if armature has multiple root bones. (glTF validator error)
# Currently, we manage it at export level, not at each armature level
for arma_uuid in [n for n in self.nodes.keys() if self.nodes[n].blender_type == VExportNode.ARMATURE]:
if len(self.get_root_bones_uuid(arma_uuid)) > 1:
# We can't remove armature
self.export_settings['gltf_armature_object_remove'] = False
print("WARNING: We can't remove armature object because some armatures have multiple root bones.")
break

View File

@ -3,12 +3,6 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import bpy import bpy
from mathutils import Vector, Matrix
from ...blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf
from ...io.com import gltf2_io_debug
from ..com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name
from .material import gltf2_blender_search_node_tree
def get_animation_target(action_group: bpy.types.ActionGroup): def get_animation_target(action_group: bpy.types.ActionGroup):
return action_group.channels[0].data_path.split('.')[-1] return action_group.channels[0].data_path.split('.')[-1]
@ -31,292 +25,3 @@ def get_object_from_datapath(blender_object, data_path: str):
# path_attr = data_path # path_attr = data_path
return prop return prop
def get_node_socket(blender_material, type, name):
"""
For a given material input name, retrieve the corresponding node tree socket for a given node type.
:param blender_material: a blender material for which to get the socket
:return: a blender NodeSocket for a given type
"""
nodes = [n for n in blender_material.node_tree.nodes if isinstance(n, type) and not n.mute]
nodes = [node for node in nodes if check_if_is_linked_to_active_output(node.outputs[0])]
inputs = sum([[input for input in node.inputs if input.name == name] for node in nodes], [])
if inputs:
return inputs[0]
return None
def get_socket(blender_material: bpy.types.Material, name: str, volume=False):
"""
For a given material input name, retrieve the corresponding node tree socket.
:param blender_material: a blender material for which to get the socket
:param name: the name of the socket
:return: a blender NodeSocket
"""
if blender_material.node_tree and blender_material.use_nodes:
#i = [input for input in blender_material.node_tree.inputs]
#o = [output for output in blender_material.node_tree.outputs]
if name == "Emissive":
# Check for a dedicated Emission node first, it must supersede the newer built-in one
# because the newer one is always present in all Principled BSDF materials.
emissive_socket = get_node_socket(blender_material, bpy.types.ShaderNodeEmission, "Color")
if emissive_socket:
return emissive_socket
# If a dedicated Emission node was not found, fall back to the Principled BSDF Emission Color socket.
name = "Emission Color"
type = bpy.types.ShaderNodeBsdfPrincipled
elif name == "Background":
type = bpy.types.ShaderNodeBackground
name = "Color"
else:
if volume is False:
type = bpy.types.ShaderNodeBsdfPrincipled
else:
type = bpy.types.ShaderNodeVolumeAbsorption
return get_node_socket(blender_material, type, name)
return None
def get_socket_old(blender_material: bpy.types.Material, name: str):
"""
For a given material input name, retrieve the corresponding node tree socket in the special glTF node group.
:param blender_material: a blender material for which to get the socket
:param name: the name of the socket
:return: a blender NodeSocket
"""
gltf_node_group_names = [get_gltf_node_name().lower(), get_gltf_node_old_name().lower()]
if blender_material.node_tree and blender_material.use_nodes:
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
nodes = [n for n in blender_material.node_tree.nodes if \
isinstance(n, bpy.types.ShaderNodeGroup) and \
n.node_tree is not None and
(n.node_tree.name.startswith('glTF Metallic Roughness') or n.node_tree.name.lower() in gltf_node_group_names)]
inputs = sum([[input for input in node.inputs if input.name == name] for node in nodes], [])
if inputs:
return inputs[0]
return None
def check_if_is_linked_to_active_output(shader_socket):
for link in shader_socket.links:
if isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial) and link.to_node.is_active_output is True:
return True
if len(link.to_node.outputs) > 0: # ignore non active output, not having output sockets
ret = check_if_is_linked_to_active_output(link.to_node.outputs[0]) # recursive until find an output material node
if ret is True:
return True
return False
def find_shader_image_from_shader_socket(shader_socket, max_hops=10):
"""Find any ShaderNodeTexImage in the path from the socket."""
if shader_socket is None:
return None
if max_hops <= 0:
return None
for link in shader_socket.links:
if isinstance(link.from_node, bpy.types.ShaderNodeTexImage):
return link.from_node
for socket in link.from_node.inputs.values():
image = find_shader_image_from_shader_socket(shader_socket=socket, max_hops=max_hops - 1)
if image is not None:
return image
return None
def get_texture_transform_from_mapping_node(mapping_node):
if mapping_node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]:
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform because it had type " +
mapping_node.vector_type + "; recommend using POINT instead"
)
return None
rotation_0, rotation_1 = mapping_node.inputs['Rotation'].default_value[0], mapping_node.inputs['Rotation'].default_value[1]
if rotation_0 or rotation_1:
# TODO: can we handle this?
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform because it had non-zero "
"rotations in the X/Y direction; only a Z rotation can be exported!"
)
return None
mapping_transform = {}
mapping_transform["offset"] = [mapping_node.inputs['Location'].default_value[0], mapping_node.inputs['Location'].default_value[1]]
mapping_transform["rotation"] = mapping_node.inputs['Rotation'].default_value[2]
mapping_transform["scale"] = [mapping_node.inputs['Scale'].default_value[0], mapping_node.inputs['Scale'].default_value[1]]
if mapping_node.vector_type == "TEXTURE":
# This means use the inverse of the TRS transform.
def inverted(mapping_transform):
offset = mapping_transform["offset"]
rotation = mapping_transform["rotation"]
scale = mapping_transform["scale"]
# Inverse of a TRS is not always a TRS. This function will be right
# at least when the following don't occur.
if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5:
return None
if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5:
return None
new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector((-offset[0], -offset[1], 1))
new_offset[0] /= scale[0]; new_offset[1] /= scale[1]
return {
"offset": new_offset[0:2],
"rotation": -rotation,
"scale": [1/scale[0], 1/scale[1]],
}
mapping_transform = inverted(mapping_transform)
if mapping_transform is None:
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform with type TEXTURE because "
"we couldn't convert it to TRS; recommend using POINT instead"
)
return None
elif mapping_node.vector_type == "VECTOR":
# Vectors don't get translated
mapping_transform["offset"] = [0, 0]
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
if all([component == 0 for component in texture_transform["offset"]]):
del(texture_transform["offset"])
if all([component == 1 for component in texture_transform["scale"]]):
del(texture_transform["scale"])
if texture_transform["rotation"] == 0:
del(texture_transform["rotation"])
if len(texture_transform) == 0:
return None
return texture_transform
def get_node(data_path):
"""Return Blender node on a given Blender data path."""
if data_path is None:
return None
index = data_path.find("[\"")
if (index == -1):
return None
node_name = data_path[(index + 2):]
index = node_name.find("\"")
if (index == -1):
return None
return node_name[:(index)]
def get_factor_from_socket(socket, kind):
"""
For baseColorFactor, metallicFactor, etc.
Get a constant value from a socket, or a constant value
from a MULTIPLY node just before the socket.
kind is either 'RGB' or 'VALUE'.
"""
fac = get_const_from_socket(socket, kind)
if fac is not None:
return fac
node = previous_node(socket)
if node is not None:
x1, x2 = None, None
if kind == 'RGB':
if node.type == 'MIX' and node.data_type == "RGBA" and node.blend_type == 'MULTIPLY':
# TODO: handle factor in inputs[0]?
x1 = get_const_from_socket(node.inputs[6], kind)
x2 = get_const_from_socket(node.inputs[7], kind)
if kind == 'VALUE':
if node.type == 'MATH' and node.operation == 'MULTIPLY':
x1 = get_const_from_socket(node.inputs[0], kind)
x2 = get_const_from_socket(node.inputs[1], kind)
if x1 is not None and x2 is None: return x1
if x2 is not None and x1 is None: return x2
return None
def get_const_from_default_value_socket(socket, kind):
if kind == 'RGB':
if socket.type != 'RGBA': return None
return list(socket.default_value)[:3]
if kind == 'VALUE':
if socket.type != 'VALUE': return None
return socket.default_value
return None
def get_const_from_socket(socket, kind):
if not socket.is_linked:
if kind == 'RGB':
if socket.type != 'RGBA': return None
return list(socket.default_value)[:3]
if kind == 'VALUE':
if socket.type != 'VALUE': return None
return socket.default_value
# Handle connection to a constant RGB/Value node
prev_node = previous_node(socket)
if prev_node is not None:
if kind == 'RGB' and prev_node.type == 'RGB':
return list(prev_node.outputs[0].default_value)[:3]
if kind == 'VALUE' and prev_node.type == 'VALUE':
return prev_node.outputs[0].default_value
return None
def previous_socket(socket):
while True:
if not socket.is_linked:
return None
from_socket = socket.links[0].from_socket
# Skip over reroute nodes
if from_socket.node.type == 'REROUTE':
socket = from_socket.node.inputs[0]
continue
return from_socket
def previous_node(socket):
prev_socket = previous_socket(socket)
if prev_socket is not None:
return prev_socket.node
return None
def get_tex_from_socket(socket):
result = gltf2_blender_search_node_tree.from_socket(
socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return None
if result[0].shader_node.image is None:
return None
return result[0]
def has_image_node_from_socket(socket):
return get_tex_from_socket(socket) is not None
def image_tex_is_valid_from_socket(socket):
res = get_tex_from_socket(socket)
return res is not None and res.shader_node.image is not None and res.shader_node.image.channels != 0

View File

@ -117,7 +117,7 @@ class GlTF2Exporter:
f.write(self.__buffer.to_bytes()) f.write(self.__buffer.to_bytes())
uri = buffer_name uri = buffer_name
else: else:
uri = self.__buffer.to_embed_string() pass # This is no more possible, we don't export embedded buffers
buffer = gltf2_io.Buffer( buffer = gltf2_io.Buffer(
byte_length=self.__buffer.byte_length, byte_length=self.__buffer.byte_length,
@ -320,6 +320,20 @@ class GlTF2Exporter:
len_ = len([i for i in self.nodes_idx_to_remove if i < skin.skeleton]) len_ = len([i for i in self.nodes_idx_to_remove if i < skin.skeleton])
skin.skeleton = skin.skeleton - len_ skin.skeleton = skin.skeleton - len_
# Remove animation channels that was targeting a node that will be removed
new_animation_list = []
for animation in self.__gltf.animations:
new_channel_list = []
for channel in animation.channels:
if channel.target.node not in self.nodes_idx_to_remove:
new_channel_list.append(channel)
animation.channels = new_channel_list
if len(animation.channels) > 0:
new_animation_list.append(animation)
self.__gltf.animations = new_animation_list
#TODO: remove unused animation accessors?
# And now really remove nodes # And now really remove nodes
self.__gltf.nodes = [node for idx, node in enumerate(self.__gltf.nodes) if idx not in self.nodes_idx_to_remove] self.__gltf.nodes = [node for idx, node in enumerate(self.__gltf.nodes) if idx not in self.nodes_idx_to_remove]

View File

@ -4,8 +4,8 @@
import bpy import bpy
from .....io.com.gltf2_io_extensions import Extension from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import has_image_node_from_socket, get_socket, get_factor_from_socket
def export_clearcoat(blender_material, export_settings): def export_clearcoat(blender_material, export_settings):
clearcoat_enabled = False clearcoat_enabled = False
@ -15,15 +15,15 @@ def export_clearcoat(blender_material, export_settings):
clearcoat_extension = {} clearcoat_extension = {}
clearcoat_roughness_slots = () clearcoat_roughness_slots = ()
clearcoat_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Weight') clearcoat_socket = get_socket(blender_material, 'Coat Weight')
clearcoat_roughness_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Roughness') clearcoat_roughness_socket = get_socket(blender_material, 'Coat Roughness')
clearcoat_normal_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Normal') clearcoat_normal_socket = get_socket(blender_material, 'Coat Normal')
if isinstance(clearcoat_socket, bpy.types.NodeSocket) and not clearcoat_socket.is_linked: if isinstance(clearcoat_socket.socket, bpy.types.NodeSocket) and not clearcoat_socket.socket.is_linked:
clearcoat_extension['clearcoatFactor'] = clearcoat_socket.default_value clearcoat_extension['clearcoatFactor'] = clearcoat_socket.socket.default_value
clearcoat_enabled = clearcoat_extension['clearcoatFactor'] > 0 clearcoat_enabled = clearcoat_extension['clearcoatFactor'] > 0
elif gltf2_blender_get.has_image_node_from_socket(clearcoat_socket): elif has_image_node_from_socket(clearcoat_socket, export_settings):
fac = gltf2_blender_get.get_factor_from_socket(clearcoat_socket, kind='VALUE') fac = get_factor_from_socket(clearcoat_socket, kind='VALUE')
# default value in glTF is 0.0, but if there is a texture without factor, use 1 # default value in glTF is 0.0, but if there is a texture without factor, use 1
clearcoat_extension['clearcoatFactor'] = fac if fac != None else 1.0 clearcoat_extension['clearcoatFactor'] = fac if fac != None else 1.0
has_clearcoat_texture = True has_clearcoat_texture = True
@ -32,10 +32,10 @@ def export_clearcoat(blender_material, export_settings):
if not clearcoat_enabled: if not clearcoat_enabled:
return None, {} return None, {}
if isinstance(clearcoat_roughness_socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.is_linked: if isinstance(clearcoat_roughness_socket.socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.socket.is_linked:
clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.default_value clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.socket.default_value
elif gltf2_blender_get.has_image_node_from_socket(clearcoat_roughness_socket): elif has_image_node_from_socket(clearcoat_roughness_socket, export_settings):
fac = gltf2_blender_get.get_factor_from_socket(clearcoat_roughness_socket, kind='VALUE') fac = get_factor_from_socket(clearcoat_roughness_socket, kind='VALUE')
# default value in glTF is 0.0, but if there is a texture without factor, use 1 # default value in glTF is 0.0, but if there is a texture without factor, use 1
clearcoat_extension['clearcoatRoughnessFactor'] = fac if fac != None else 1.0 clearcoat_extension['clearcoatRoughnessFactor'] = fac if fac != None else 1.0
has_clearcoat_roughness_texture = True has_clearcoat_roughness_texture = True
@ -71,7 +71,7 @@ def export_clearcoat(blender_material, export_settings):
clearcoat_extension['clearcoatRoughnessTexture'] = clearcoat_roughness_texture clearcoat_extension['clearcoatRoughnessTexture'] = clearcoat_roughness_texture
uvmap_infos.update({'clearcoatRoughnessTexture': uvmap_info}) uvmap_infos.update({'clearcoatRoughnessTexture': uvmap_info})
if gltf2_blender_get.has_image_node_from_socket(clearcoat_normal_socket): if has_image_node_from_socket(clearcoat_normal_socket, export_settings):
clearcoat_normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class( clearcoat_normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class(
clearcoat_normal_socket, clearcoat_normal_socket,
(clearcoat_normal_socket,), (clearcoat_normal_socket,),

View File

@ -4,20 +4,26 @@
import bpy import bpy
from .....io.com.gltf2_io_extensions import Extension from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import \
get_const_from_default_value_socket, \
get_socket, \
get_factor_from_socket, \
get_const_from_socket, \
NodeSocket, \
get_socket_from_gltf_material_node
def export_emission_factor(blender_material, export_settings): def export_emission_factor(blender_material, export_settings):
emissive_socket = gltf2_blender_get.get_socket(blender_material, "Emissive") emissive_socket = get_socket(blender_material, "Emissive")
if emissive_socket is None: if emissive_socket.socket is None:
emissive_socket = gltf2_blender_get.get_socket_old(blender_material, "EmissiveFactor") emissive_socket = get_socket_from_gltf_material_node(blender_material, "EmissiveFactor")
if isinstance(emissive_socket, bpy.types.NodeSocket): if isinstance(emissive_socket.socket, bpy.types.NodeSocket):
if export_settings['gltf_image_format'] != "NONE": if export_settings['gltf_image_format'] != "NONE":
factor = gltf2_blender_get.get_factor_from_socket(emissive_socket, kind='RGB') factor = get_factor_from_socket(emissive_socket, kind='RGB')
else: else:
factor = gltf2_blender_get.get_const_from_default_value_socket(emissive_socket, kind='RGB') factor = get_const_from_default_value_socket(emissive_socket, kind='RGB')
if factor is None and emissive_socket.is_linked: if factor is None and emissive_socket.socket.is_linked:
# In glTF, the default emissiveFactor is all zeros, so if an emission texture is connected, # In glTF, the default emissiveFactor is all zeros, so if an emission texture is connected,
# we have to manually set it to all ones. # we have to manually set it to all ones.
factor = [1.0, 1.0, 1.0] factor = [1.0, 1.0, 1.0]
@ -26,12 +32,12 @@ def export_emission_factor(blender_material, export_settings):
# Handle Emission Strength # Handle Emission Strength
strength_socket = None strength_socket = None
if emissive_socket.node.type == 'EMISSION': if emissive_socket.socket.node.type == 'EMISSION':
strength_socket = emissive_socket.node.inputs['Strength'] strength_socket = emissive_socket.socket.node.inputs['Strength']
elif 'Emission Strength' in emissive_socket.node.inputs: elif 'Emission Strength' in emissive_socket.socket.node.inputs:
strength_socket = emissive_socket.node.inputs['Emission Strength'] strength_socket = emissive_socket.socket.node.inputs['Emission Strength']
strength = ( strength = (
gltf2_blender_get.get_const_from_socket(strength_socket, kind='VALUE') get_const_from_socket(NodeSocket(strength_socket, emissive_socket.group_path), kind='VALUE')
if strength_socket is not None if strength_socket is not None
else None else None
) )
@ -49,9 +55,9 @@ def export_emission_factor(blender_material, export_settings):
return None return None
def export_emission_texture(blender_material, export_settings): def export_emission_texture(blender_material, export_settings):
emissive = gltf2_blender_get.get_socket(blender_material, "Emissive") emissive = get_socket(blender_material, "Emissive")
if emissive is None: if emissive.socket is None:
emissive = gltf2_blender_get.get_socket_old(blender_material, "Emissive") emissive = get_socket_from_gltf_material_node(blender_material, "Emissive")
emissive_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(emissive, (emissive,), (), export_settings) emissive_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(emissive, (emissive,), (), export_settings)
return emissive_texture, {'emissiveTexture': uvmap_info} return emissive_texture, {'emissiveTexture': uvmap_info}

View File

@ -4,20 +4,20 @@
from .....io.com.gltf2_io_extensions import Extension from .....io.com.gltf2_io_extensions import Extension
from .....io.com.gltf2_io_constants import GLTF_IOR from .....io.com.gltf2_io_constants import GLTF_IOR
from ....exp import gltf2_blender_get from ..gltf2_blender_search_node_tree import get_socket
def export_ior(blender_material, extensions, export_settings): def export_ior(blender_material, extensions, export_settings):
ior_socket = gltf2_blender_get.get_socket(blender_material, 'IOR') ior_socket = get_socket(blender_material, 'IOR')
if not ior_socket: if not ior_socket.socket:
return None return None
# We don't manage case where socket is linked, always check default value # We don't manage case where socket is linked, always check default value
if ior_socket.is_linked: if ior_socket.socket.is_linked:
# TODOExt: add warning? # TODOExt: add warning?
return None return None
if ior_socket.default_value == GLTF_IOR: if ior_socket.socket.default_value == GLTF_IOR:
return None return None
# Export only if the following extensions are exported: # Export only if the following extensions are exported:
@ -31,6 +31,6 @@ def export_ior(blender_material, extensions, export_settings):
return None return None
ior_extension = {} ior_extension = {}
ior_extension['ior'] = ior_socket.default_value ior_extension['ior'] = ior_socket.socket.default_value
return Extension('KHR_materials_ior', ior_extension, False) return Extension('KHR_materials_ior', ior_extension, False)

View File

@ -4,47 +4,48 @@
import bpy import bpy
from .....io.com.gltf2_io_extensions import Extension from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_socket, \
get_factor_from_socket
def export_sheen(blender_material, export_settings): def export_sheen(blender_material, export_settings):
sheen_extension = {} sheen_extension = {}
sheenTint_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Tint") sheenTint_socket = get_socket(blender_material, "Sheen Tint")
sheenRoughness_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Roughness") sheenRoughness_socket = get_socket(blender_material, "Sheen Roughness")
sheen_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Weight") sheen_socket = get_socket(blender_material, "Sheen Weight")
if sheenTint_socket is None or sheenRoughness_socket is None or sheen_socket is None: if sheenTint_socket.socket is None or sheenRoughness_socket.socket is None or sheen_socket.socket is None:
return None, {} return None, {}
if sheen_socket.is_linked is False and sheen_socket.default_value == 0.0: if sheen_socket.socket.is_linked is False and sheen_socket.socket.default_value == 0.0:
return None, {} return None, {}
uvmap_infos = {} uvmap_infos = {}
#TODOExt : What to do if sheen_socket is linked? or is not between 0 and 1? #TODOExt : What to do if sheen_socket is linked? or is not between 0 and 1?
sheenTint_non_linked = isinstance(sheenTint_socket, bpy.types.NodeSocket) and not sheenTint_socket.is_linked sheenTint_non_linked = isinstance(sheenTint_socket.socket, bpy.types.NodeSocket) and not sheenTint_socket.socket.is_linked
sheenRoughness_non_linked = isinstance(sheenRoughness_socket, bpy.types.NodeSocket) and not sheenRoughness_socket.is_linked sheenRoughness_non_linked = isinstance(sheenRoughness_socket.socket, bpy.types.NodeSocket) and not sheenRoughness_socket.socket.is_linked
use_actives_uvmaps = []
if sheenTint_non_linked is True: if sheenTint_non_linked is True:
color = sheenTint_socket.default_value[:3] color = sheenTint_socket.socket.default_value[:3]
if color != (0.0, 0.0, 0.0): if color != (0.0, 0.0, 0.0):
sheen_extension['sheenColorFactor'] = color sheen_extension['sheenColorFactor'] = color
else: else:
# Factor # Factor
fac = gltf2_blender_get.get_factor_from_socket(sheenTint_socket, kind='RGB') fac = get_factor_from_socket(sheenTint_socket, kind='RGB')
if fac is None: if fac is None:
fac = [1.0, 1.0, 1.0] # Default is 0.0/0.0/0.0, so we need to set it to 1 if no factor fac = [1.0, 1.0, 1.0] # Default is 0.0/0.0/0.0, so we need to set it to 1 if no factor
if fac is not None and fac != [0.0, 0.0, 0.0]: if fac is not None and fac != [0.0, 0.0, 0.0]:
sheen_extension['sheenColorFactor'] = fac sheen_extension['sheenColorFactor'] = fac
# Texture # Texture
if gltf2_blender_get.has_image_node_from_socket(sheenTint_socket): if has_image_node_from_socket(sheenTint_socket, export_settings):
original_sheenColor_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( original_sheenColor_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
sheenTint_socket, sheenTint_socket,
(sheenTint_socket,), (sheenTint_socket,),
@ -55,19 +56,19 @@ def export_sheen(blender_material, export_settings):
uvmap_infos.update({'sheenColorTexture': uvmap_info}) uvmap_infos.update({'sheenColorTexture': uvmap_info})
if sheenRoughness_non_linked is True: if sheenRoughness_non_linked is True:
fac = sheenRoughness_socket.default_value fac = sheenRoughness_socket.socket.default_value
if fac != 0.0: if fac != 0.0:
sheen_extension['sheenRoughnessFactor'] = fac sheen_extension['sheenRoughnessFactor'] = fac
else: else:
# Factor # Factor
fac = gltf2_blender_get.get_factor_from_socket(sheenRoughness_socket, kind='VALUE') fac = get_factor_from_socket(sheenRoughness_socket, kind='VALUE')
if fac is None: if fac is None:
fac = 1.0 # Default is 0.0 so we need to set it to 1.0 if no factor fac = 1.0 # Default is 0.0 so we need to set it to 1.0 if no factor
if fac is not None and fac != 0.0: if fac is not None and fac != 0.0:
sheen_extension['sheenRoughnessFactor'] = fac sheen_extension['sheenRoughnessFactor'] = fac
# Texture # Texture
if gltf2_blender_get.has_image_node_from_socket(sheenRoughness_socket): if has_image_node_from_socket(sheenRoughness_socket, export_settings):
original_sheenRoughness_texture, uvmap_info , _ = gltf2_blender_gather_texture_info.gather_texture_info( original_sheenRoughness_texture, uvmap_info , _ = gltf2_blender_gather_texture_info.gather_texture_info(
sheenRoughness_socket, sheenRoughness_socket,
(sheenRoughness_socket,), (sheenRoughness_socket,),

View File

@ -4,68 +4,96 @@
import bpy import bpy
from .....io.com.gltf2_io_extensions import Extension from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material.gltf2_blender_gather_texture_info import gather_texture_info from ...material.gltf2_blender_gather_texture_info import gather_texture_info
from ..gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_socket, \
get_factor_from_socket
def export_specular(blender_material, export_settings): def export_specular(blender_material, export_settings):
specular_extension = {} specular_extension = {}
extensions_needed = False
specular_socket = gltf2_blender_get.get_socket(blender_material, 'Specular IOR Level') specular_socket = get_socket(blender_material, 'Specular IOR Level')
speculartint_socket = gltf2_blender_get.get_socket(blender_material, 'Specular Tint') speculartint_socket = get_socket(blender_material, 'Specular Tint')
if specular_socket is None or speculartint_socket is None: if specular_socket.socket is None or speculartint_socket.socket is None:
return None, {} return None, {}
uvmap_infos = {} uvmap_infos = {}
specular_non_linked = isinstance(specular_socket, bpy.types.NodeSocket) and not specular_socket.is_linked specular_non_linked = isinstance(specular_socket.socket, bpy.types.NodeSocket) and not specular_socket.socket.is_linked
specularcolor_non_linked = isinstance(speculartint_socket, bpy.types.NodeSocket) and not speculartint_socket.is_linked specularcolor_non_linked = isinstance(speculartint_socket.socket, bpy.types.NodeSocket) and not speculartint_socket.socket.is_linked
if specular_non_linked is True: if specular_non_linked is True:
fac = specular_socket.default_value fac = specular_socket.socket.default_value
if fac != 1.0: fac = fac * 2.0
if fac < 1.0:
specular_extension['specularFactor'] = fac specular_extension['specularFactor'] = fac
if fac == 0.0: extensions_needed = True
return None, {} elif fac > 1.0:
# glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
extensions_needed = True
else:
pass # If fac == 1.0, no need to export specularFactor, the default value is 1.0
else: else:
# Factor # Factor
fac = gltf2_blender_get.get_factor_from_socket(specular_socket, kind='VALUE') fac = get_factor_from_socket(specular_socket, kind='VALUE')
if fac is not None and fac != 1.0: if fac is not None and fac != 1.0:
fac = fac * 2.0 if fac is not None else None
if fac is not None and fac < 1.0:
specular_extension['specularFactor'] = fac specular_extension['specularFactor'] = fac
extensions_needed = True
if fac == 0.0: elif fac is not None and fac > 1.0:
return None, {} # glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
extensions_needed = True
# Texture # Texture
if gltf2_blender_get.has_image_node_from_socket(specular_socket): if has_image_node_from_socket(specular_socket, export_settings):
original_specular_texture, uvmap_info, _ = gather_texture_info( specular_texture, uvmap_info, _ = gather_texture_info(
specular_socket, specular_socket,
(specular_socket,), (specular_socket,),
(), (),
export_settings, export_settings,
) )
specular_extension['specularTexture'] = original_specular_texture specular_extension['specularTexture'] = specular_texture
uvmap_infos.update({'specularTexture': uvmap_info}) uvmap_infos.update({'specularTexture': uvmap_info})
extensions_needed = True
if specularcolor_non_linked is True: if specularcolor_non_linked is True:
color = speculartint_socket.default_value[:3] color = speculartint_socket.socket.default_value[:3]
if fac is not None and fac > 1.0:
color = (color[0] * fac, color[1] * fac, color[2] * fac)
specular_extension['specularColorFactor'] = color if color != (1.0, 1.0, 1.0) else None
if color != (1.0, 1.0, 1.0): if color != (1.0, 1.0, 1.0):
specular_extension['specularColorFactor'] = color extensions_needed = True
else: else:
# Factor # Factor
fac = gltf2_blender_get.get_factor_from_socket(speculartint_socket, kind='RGB') fac_color = get_factor_from_socket(speculartint_socket, kind='RGB')
if fac is not None and fac != (1.0, 1.0, 1.0): if fac_color is not None and fac is not None and fac > 1.0:
specular_extension['specularColorFactor'] = fac fac_color = (fac_color[0] * fac, fac_color[1] * fac, fac_color[2] * fac)
elif fac_color is None and fac is not None and fac > 1.0:
fac_color = (fac, fac, fac)
specular_extension['specularColorFactor'] = fac_color if fac_color != (1.0, 1.0, 1.0) else None
if fac_color != (1.0, 1.0, 1.0):
extensions_needed = True
# Texture # Texture
if gltf2_blender_get.has_image_node_from_socket(speculartint_socket): if has_image_node_from_socket(speculartint_socket, export_settings):
original_specularcolor_texture, uvmap_info, _ = gather_texture_info( specularcolor_texture, uvmap_info, _ = gather_texture_info(
speculartint_socket, speculartint_socket,
(speculartint_socket,), (speculartint_socket,),
(), (),
export_settings, export_settings,
) )
specular_extension['specularColorTexture'] = original_specularcolor_texture specular_extension['specularColorTexture'] = specularcolor_texture
uvmap_infos.update({'specularColorTexture': uvmap_info}) uvmap_infos.update({'specularColorTexture': uvmap_info})
extensions_needed = True
if extensions_needed is False:
return None, {}
return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos

View File

@ -4,8 +4,11 @@
import bpy import bpy
from .....io.com.gltf2_io_extensions import Extension from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_socket, \
get_factor_from_socket
def export_transmission(blender_material, export_settings): def export_transmission(blender_material, export_settings):
transmission_enabled = False transmission_enabled = False
@ -14,13 +17,13 @@ def export_transmission(blender_material, export_settings):
transmission_extension = {} transmission_extension = {}
transmission_slots = () transmission_slots = ()
transmission_socket = gltf2_blender_get.get_socket(blender_material, 'Transmission Weight') transmission_socket = get_socket(blender_material, 'Transmission Weight')
if isinstance(transmission_socket, bpy.types.NodeSocket) and not transmission_socket.is_linked: if isinstance(transmission_socket.socket, bpy.types.NodeSocket) and not transmission_socket.socket.is_linked:
transmission_extension['transmissionFactor'] = transmission_socket.default_value transmission_extension['transmissionFactor'] = transmission_socket.socket.default_value
transmission_enabled = transmission_extension['transmissionFactor'] > 0 transmission_enabled = transmission_extension['transmissionFactor'] > 0
elif gltf2_blender_get.has_image_node_from_socket(transmission_socket): elif has_image_node_from_socket(transmission_socket, export_settings):
fac = gltf2_blender_get.get_factor_from_socket(transmission_socket, kind='VALUE') fac = get_factor_from_socket(transmission_socket, kind='VALUE')
transmission_extension['transmissionFactor'] = fac if fac is not None else 1.0 transmission_extension['transmissionFactor'] = fac if fac is not None else 1.0
has_transmission_texture = True has_transmission_texture = True
transmission_enabled = True transmission_enabled = True

View File

@ -4,8 +4,13 @@
import bpy import bpy
from .....io.com.gltf2_io_extensions import Extension from .....io.com.gltf2_io_extensions import Extension
from ....exp import gltf2_blender_get
from ...material import gltf2_blender_gather_texture_info from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_const_from_default_value_socket, \
get_socket_from_gltf_material_node, \
get_socket, \
get_factor_from_socket
def export_volume(blender_material, export_settings): def export_volume(blender_material, export_settings):
@ -13,10 +18,10 @@ def export_volume(blender_material, export_settings):
# If no transmission --> No volume # If no transmission --> No volume
transmission_enabled = False transmission_enabled = False
transmission_socket = gltf2_blender_get.get_socket(blender_material, 'Transmission Weight') transmission_socket = get_socket(blender_material, 'Transmission Weight')
if isinstance(transmission_socket, bpy.types.NodeSocket) and not transmission_socket.is_linked: if isinstance(transmission_socket.socket, bpy.types.NodeSocket) and not transmission_socket.socket.is_linked:
transmission_enabled = transmission_socket.default_value > 0 transmission_enabled = transmission_socket.socket.default_value > 0
elif gltf2_blender_get.has_image_node_from_socket(transmission_socket): elif has_image_node_from_socket(transmission_socket, export_settings):
transmission_enabled = True transmission_enabled = True
if transmission_enabled is False: if transmission_enabled is False:
@ -27,43 +32,43 @@ def export_volume(blender_material, export_settings):
thickness_slots = () thickness_slots = ()
uvmap_info = {} uvmap_info = {}
thicknesss_socket = gltf2_blender_get.get_socket_old(blender_material, 'Thickness') thickness_socket = get_socket_from_gltf_material_node(blender_material, 'Thickness')
if thicknesss_socket is None: if thickness_socket.socket is None:
# If no thickness (here because there is no glTF Material Output node), no volume extension export # If no thickness (here because there is no glTF Material Output node), no volume extension export
return None, {} return None, {}
density_socket = gltf2_blender_get.get_socket(blender_material, 'Density', volume=True) density_socket = get_socket(blender_material, 'Density', volume=True)
attenuation_color_socket = gltf2_blender_get.get_socket(blender_material, 'Color', volume=True) attenuation_color_socket = get_socket(blender_material, 'Color', volume=True)
# Even if density or attenuation are not set, we export volume extension # Even if density or attenuation are not set, we export volume extension
if isinstance(attenuation_color_socket, bpy.types.NodeSocket): if isinstance(attenuation_color_socket.socket, bpy.types.NodeSocket):
rgb = gltf2_blender_get.get_const_from_default_value_socket(attenuation_color_socket, kind='RGB') rgb = get_const_from_default_value_socket(attenuation_color_socket, kind='RGB')
volume_extension['attenuationColor'] = rgb volume_extension['attenuationColor'] = rgb
if isinstance(density_socket, bpy.types.NodeSocket): if isinstance(density_socket.socket, bpy.types.NodeSocket):
density = gltf2_blender_get.get_const_from_default_value_socket(density_socket, kind='VALUE') density = get_const_from_default_value_socket(density_socket, kind='VALUE')
volume_extension['attenuationDistance'] = 1.0 / density if density != 0 else None # infinity (Using None as glTF default) volume_extension['attenuationDistance'] = 1.0 / density if density != 0 else None # infinity (Using None as glTF default)
if isinstance(thicknesss_socket, bpy.types.NodeSocket) and not thicknesss_socket.is_linked: if isinstance(thickness_socket.socket, bpy.types.NodeSocket) and not thickness_socket.socket.is_linked:
val = thicknesss_socket.default_value val = thickness_socket.socket.default_value
if val == 0.0: if val == 0.0:
# If no thickness, no volume extension export # If no thickness, no volume extension export
return None, {} return None, {}
volume_extension['thicknessFactor'] = val volume_extension['thicknessFactor'] = val
elif gltf2_blender_get.has_image_node_from_socket(thicknesss_socket): elif has_image_node_from_socket(thickness_socket, export_settings):
fac = gltf2_blender_get.get_factor_from_socket(thicknesss_socket, kind='VALUE') fac = get_factor_from_socket(thickness_socket, kind='VALUE')
# default value in glTF is 0.0, but if there is a texture without factor, use 1 # default value in glTF is 0.0, but if there is a texture without factor, use 1
volume_extension['thicknessFactor'] = fac if fac != None else 1.0 volume_extension['thicknessFactor'] = fac if fac != None else 1.0
has_thickness_texture = True has_thickness_texture = True
# Pack thickness channel (R). # Pack thickness channel (G).
if has_thickness_texture: if has_thickness_texture:
thickness_slots = (thicknesss_socket,) thickness_slots = (thickness_socket,)
if len(thickness_slots) > 0: if len(thickness_slots) > 0:
combined_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( combined_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
thicknesss_socket, thickness_socket,
thickness_slots, thickness_slots,
(), (),
export_settings, export_settings,

View File

@ -147,7 +147,6 @@ class ExportImage:
# Unhappy path = we need to create the image self.fills describes or self.stores describes # Unhappy path = we need to create the image self.fills describes or self.stores describes
if self.numpy_calc is None: if self.numpy_calc is None:
print(">2")
return self.__encode_unhappy(export_settings), None return self.__encode_unhappy(export_settings), None
else: else:
pixels, width, height, factor = self.numpy_calc(self.stored) pixels, width, height, factor = self.numpy_calc(self.stored)

View File

@ -13,7 +13,7 @@ from ....io.com import gltf2_io_debug
from ....io.exp.gltf2_io_user_extensions import export_user_extensions from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ..gltf2_blender_gather_cache import cached from ..gltf2_blender_gather_cache import cached
from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage
from ..gltf2_blender_get import get_tex_from_socket from .gltf2_blender_search_node_tree import get_texture_node_from_socket, NodeSocket
@cached @cached
def gather_image( def gather_image(
@ -59,7 +59,7 @@ def gather_image(
export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets) export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets)
# We also return image_data, as it can be used to generate same file with another extension for webp management # We also return image_data, as it can be used to generate same file with another extension for WebP management
return image, image_data, factor return image, image_data, factor
def __gather_original_uri(original_uri, export_settings): def __gather_original_uri(original_uri, export_settings):
@ -114,11 +114,11 @@ def __gather_extras(sockets, export_settings):
def __gather_mime_type(sockets, export_image, export_settings): def __gather_mime_type(sockets, export_image, export_settings):
# force png or webp if Alpha contained so we can export alpha # force png or webp if Alpha contained so we can export alpha
for socket in sockets: for socket in sockets:
if socket.name == "Alpha": if socket.socket.name == "Alpha":
if export_settings["gltf_image_format"] == "WEBP": if export_settings["gltf_image_format"] == "WEBP":
return "image/webp" return "image/webp"
else: else:
# If we keep image as is (no channel composition), we need to keep original format (for webp) # If we keep image as is (no channel composition), we need to keep original format (for WebP)
image = export_image.blender_image() image = export_image.blender_image()
if image is not None and __is_blender_image_a_webp(image): if image is not None and __is_blender_image_a_webp(image):
return "image/webp" return "image/webp"
@ -191,7 +191,7 @@ def __get_image_data(sockets, default_sockets, export_settings) -> ExportImage:
# For shared resources, such as images, we just store the portion of data that is needed in the glTF property # For shared resources, such as images, we just store the portion of data that is needed in the glTF property
# in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary # in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary
# resources. # resources.
results = [get_tex_from_socket(socket) for socket in sockets] results = [get_texture_node_from_socket(socket, export_settings) for socket in sockets]
# Check if we need a simple mapping or more complex calculation # Check if we need a simple mapping or more complex calculation
# There is currently no complex calculation for any textures # There is currently no complex calculation for any textures
@ -222,7 +222,7 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
else: else:
# rudimentarily try follow the node tree to find the correct image data. # rudimentarily try follow the node tree to find the correct image data.
src_chan = Channel.R src_chan = None
for elem in result.path: for elem in result.path:
if isinstance(elem.from_node, bpy.types.ShaderNodeSeparateColor): if isinstance(elem.from_node, bpy.types.ShaderNodeSeparateColor):
src_chan = { src_chan = {
@ -233,26 +233,55 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
if elem.from_socket.name == 'Alpha': if elem.from_socket.name == 'Alpha':
src_chan = Channel.A src_chan = Channel.A
if src_chan is None:
# No SeparateColor node found, so take the specification channel that is needed
# So export is correct if user plug the texture directly to the socket
if socket.socket.name == 'Metallic':
src_chan = Channel.B
elif socket.socket.name == 'Roughness':
src_chan = Channel.G
elif socket.socket.name == 'Occlusion':
src_chan = Channel.R
elif socket.socket.name == 'Alpha':
src_chan = Channel.A
elif socket.socket.name == 'Coat Weight':
src_chan = Channel.R
elif socket.socket.name == 'Coat Roughness':
src_chan = Channel.G
elif socket.socket.name == 'Thickness': # For KHR_materials_volume
src_chan = Channel.G
if src_chan is None:
# Seems we can't find the channel
# We are in a case where user plugged a texture in a Color socket, but we may have used the alpha one
if socket.socket.name in ["Alpha", "Specular IOR Level", "Sheen Roughness"]:
src_chan = Channel.A
if src_chan is None:
# We definitely can't find the channel, so keep the first channel even if this is wrong
src_chan = Channel.R
dst_chan = None dst_chan = None
# some sockets need channel rewriting (gltf pbr defines fixed channels for some attributes) # some sockets need channel rewriting (gltf pbr defines fixed channels for some attributes)
if socket.name == 'Metallic': if socket.socket.name == 'Metallic':
dst_chan = Channel.B dst_chan = Channel.B
elif socket.name == 'Roughness': elif socket.socket.name == 'Roughness':
dst_chan = Channel.G dst_chan = Channel.G
elif socket.name == 'Occlusion': elif socket.socket.name == 'Occlusion':
dst_chan = Channel.R dst_chan = Channel.R
elif socket.name == 'Alpha': elif socket.socket.name == 'Alpha':
dst_chan = Channel.A dst_chan = Channel.A
elif socket.name == 'Coat Weight': elif socket.socket.name == 'Coat Weight':
dst_chan = Channel.R dst_chan = Channel.R
elif socket.name == 'Coat Roughness': elif socket.socket.name == 'Coat Roughness':
dst_chan = Channel.G dst_chan = Channel.G
elif socket.name == 'Thickness': # For KHR_materials_volume elif socket.socket.name == 'Thickness': # For KHR_materials_volume
dst_chan = Channel.G dst_chan = Channel.G
elif socket.name == "Specular IOR Level": # For KHR_material_specular elif socket.socket.name == "Specular IOR Level": # For KHR_material_specular
dst_chan = Channel.A dst_chan = Channel.A
elif socket.name == "Sheen Roughness": # For KHR_materials_sheen elif socket.socket.name == "Sheen Roughness": # For KHR_materials_sheen
dst_chan = Channel.A dst_chan = Channel.A
if dst_chan is not None: if dst_chan is not None:
@ -260,12 +289,12 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
# Since metal/roughness are always used together, make sure # Since metal/roughness are always used together, make sure
# the other channel is filled. # the other channel is filled.
if socket.name == 'Metallic' and not composed_image.is_filled(Channel.G): if socket.socket.name == 'Metallic' and not composed_image.is_filled(Channel.G):
if default_roughness is not None: if default_roughness is not None:
composed_image.fill_with(Channel.G, default_roughness) composed_image.fill_with(Channel.G, default_roughness)
else: else:
composed_image.fill_white(Channel.G) composed_image.fill_white(Channel.G)
elif socket.name == 'Roughness' and not composed_image.is_filled(Channel.B): elif socket.socket.name == 'Roughness' and not composed_image.is_filled(Channel.B):
if default_metallic is not None: if default_metallic is not None:
composed_image.fill_with(Channel.B, default_metallic) composed_image.fill_with(Channel.B, default_metallic)
else: else:

View File

@ -10,7 +10,6 @@ from ....io.com.gltf2_io_extensions import Extension
from ....io.exp.gltf2_io_user_extensions import export_user_extensions from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....io.com.gltf2_io_debug import print_console from ....io.com.gltf2_io_debug import print_console
from ...com.gltf2_blender_extras import generate_extras from ...com.gltf2_blender_extras import generate_extras
from ...exp import gltf2_blender_get
from ..gltf2_blender_gather_cache import cached, cached_by_key from ..gltf2_blender_gather_cache import cached, cached_by_key
from . import gltf2_blender_gather_materials_unlit from . import gltf2_blender_gather_materials_unlit
from . import gltf2_blender_gather_texture_info from . import gltf2_blender_gather_texture_info
@ -23,6 +22,11 @@ from .extensions.gltf2_blender_gather_materials_specular import export_specular
from .extensions.gltf2_blender_gather_materials_transmission import export_transmission from .extensions.gltf2_blender_gather_materials_transmission import export_transmission
from .extensions.gltf2_blender_gather_materials_clearcoat import export_clearcoat from .extensions.gltf2_blender_gather_materials_clearcoat import export_clearcoat
from .extensions.gltf2_blender_gather_materials_ior import export_ior from .extensions.gltf2_blender_gather_materials_ior import export_ior
from .gltf2_blender_search_node_tree import \
has_image_node_from_socket, \
get_socket_from_gltf_material_node, \
get_socket, \
get_node_socket
@cached @cached
def get_material_cache_key(blender_material, export_settings): def get_material_cache_key(blender_material, export_settings):
@ -90,7 +94,7 @@ def gather_material(blender_material, export_settings):
# If emissive is set, from an emissive node (not PBR) # If emissive is set, from an emissive node (not PBR)
# We need to set manually default values for # We need to set manually default values for
# pbr_metallic_roughness.baseColor # pbr_metallic_roughness.baseColor
if material.emissive_factor is not None and gltf2_blender_get.get_node_socket(blender_material, bpy.types.ShaderNodeBsdfPrincipled, "Base Color") is None: if material.emissive_factor is not None and get_node_socket(blender_material, bpy.types.ShaderNodeBsdfPrincipled, "Base Color").socket is None:
material.pbr_metallic_roughness = gltf2_blender_gather_materials_pbr_metallic_roughness.get_default_pbr_for_emissive_node() material.pbr_metallic_roughness = gltf2_blender_gather_materials_pbr_metallic_roughness.get_default_pbr_for_emissive_node()
export_user_extensions('gather_material_hook', export_settings, material, blender_material) export_user_extensions('gather_material_hook', export_settings, material, blender_material)
@ -143,12 +147,6 @@ def __gather_double_sided(blender_material, extensions, export_settings):
if not blender_material.use_backface_culling: if not blender_material.use_backface_culling:
return True return True
old_double_sided_socket = gltf2_blender_get.get_socket_old(blender_material, "DoubleSided")
if old_double_sided_socket is not None and\
not old_double_sided_socket.is_linked and\
old_double_sided_socket.default_value > 0.5:
return True
return None return None
@ -222,9 +220,7 @@ def __gather_name(blender_material, export_settings):
def __gather_normal_texture(blender_material, export_settings): def __gather_normal_texture(blender_material, export_settings):
normal = gltf2_blender_get.get_socket(blender_material, "Normal") normal = get_socket(blender_material, "Normal")
if normal is None:
normal = gltf2_blender_get.get_socket_old(blender_material, "Normal")
normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class( normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class(
normal, normal,
(normal,), (normal,),
@ -236,35 +232,37 @@ def __gather_orm_texture(blender_material, export_settings):
# Check for the presence of Occlusion, Roughness, Metallic sharing a single image. # Check for the presence of Occlusion, Roughness, Metallic sharing a single image.
# If not fully shared, return None, so the images will be cached and processed separately. # If not fully shared, return None, so the images will be cached and processed separately.
occlusion = gltf2_blender_get.get_socket(blender_material, "Occlusion") occlusion = get_socket(blender_material, "Occlusion")
if occlusion is None or not gltf2_blender_get.has_image_node_from_socket(occlusion): if occlusion.socket is None or not has_image_node_from_socket(occlusion, export_settings):
occlusion = gltf2_blender_get.get_socket_old(blender_material, "Occlusion") occlusion = get_socket_from_gltf_material_node(blender_material, "Occlusion")
if occlusion is None or not gltf2_blender_get.has_image_node_from_socket(occlusion): if occlusion.socket is None or not has_image_node_from_socket(occlusion, export_settings):
return None, None return None, None
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic") metallic_socket = get_socket(blender_material, "Metallic")
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness") roughness_socket = get_socket(blender_material, "Roughness")
hasMetal = metallic_socket is not None and gltf2_blender_get.has_image_node_from_socket(metallic_socket) hasMetal = metallic_socket.socket is not None and has_image_node_from_socket(metallic_socket, export_settings)
hasRough = roughness_socket is not None and gltf2_blender_get.has_image_node_from_socket(roughness_socket) hasRough = roughness_socket.socket is not None and has_image_node_from_socket(roughness_socket, export_settings)
default_sockets = () default_sockets = ()
# Warning: for default socket, do not use NodeSocket object, because it will break cache
# Using directlty the Blender socket object
if not hasMetal and not hasRough: if not hasMetal and not hasRough:
metallic_roughness = gltf2_blender_get.get_socket_old(blender_material, "MetallicRoughness") metallic_roughness = get_socket_from_gltf_material_node(blender_material, "MetallicRoughness")
if metallic_roughness is None or not gltf2_blender_get.has_image_node_from_socket(metallic_roughness): if metallic_roughness.socket is None or not has_image_node_from_socket(metallic_roughness, export_settings):
return None, default_sockets return None, default_sockets
result = (occlusion, metallic_roughness) result = (occlusion, metallic_roughness)
elif not hasMetal: elif not hasMetal:
result = (occlusion, roughness_socket) result = (occlusion, roughness_socket)
default_sockets = (metallic_socket,) default_sockets = (metallic_socket.socket,)
elif not hasRough: elif not hasRough:
result = (occlusion, metallic_socket) result = (occlusion, metallic_socket)
default_sockets = (roughness_socket,) default_sockets = (roughness_socket.socket,)
else: else:
result = (occlusion, roughness_socket, metallic_socket) result = (occlusion, roughness_socket, metallic_socket)
default_sockets = () default_sockets = ()
if not gltf2_blender_gather_texture_info.check_same_size_images(result): if not gltf2_blender_gather_texture_info.check_same_size_images(result, export_settings):
print_console("INFO", print_console("INFO",
"Occlusion and metal-roughness texture will be exported separately " "Occlusion and metal-roughness texture will be exported separately "
"(use same-sized images if you want them combined)") "(use same-sized images if you want them combined)")
@ -278,9 +276,9 @@ def __gather_orm_texture(blender_material, export_settings):
return result, default_sockets return result, default_sockets
def __gather_occlusion_texture(blender_material, orm_texture, default_sockets, export_settings): def __gather_occlusion_texture(blender_material, orm_texture, default_sockets, export_settings):
occlusion = gltf2_blender_get.get_socket(blender_material, "Occlusion") occlusion = get_socket(blender_material, "Occlusion")
if occlusion is None: if occlusion.socket is None:
occlusion = gltf2_blender_get.get_socket_old(blender_material, "Occlusion") occlusion = get_socket_from_gltf_material_node(blender_material, "Occlusion")
occlusion_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_occlusion_texture_info_class( occlusion_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_occlusion_texture_info_class(
occlusion, occlusion,
orm_texture or (occlusion,), orm_texture or (occlusion,),

View File

@ -4,13 +4,18 @@
import bpy import bpy
from ....io.com import gltf2_io from ....io.com import gltf2_io
from ....io.exp.gltf2_io_user_extensions import export_user_extensions from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ...exp import gltf2_blender_get
from ..gltf2_blender_gather_cache import cached from ..gltf2_blender_gather_cache import cached
from ..gltf2_blender_get import image_tex_is_valid_from_socket
from .gltf2_blender_search_node_tree import get_vertex_color_info from .gltf2_blender_search_node_tree import get_vertex_color_info
from .gltf2_blender_gather_texture_info import gather_texture_info from .gltf2_blender_gather_texture_info import gather_texture_info
from .gltf2_blender_search_node_tree import \
get_socket_from_gltf_material_node, \
has_image_node_from_socket, \
get_const_from_default_value_socket, \
get_socket, \
get_factor_from_socket
@cached @cached
def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings): def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings):
@ -49,23 +54,23 @@ def __gather_base_color_factor(blender_material, export_settings):
rgb, alpha = None, None rgb, alpha = None, None
alpha_socket = gltf2_blender_get.get_socket(blender_material, "Alpha") alpha_socket = get_socket(blender_material, "Alpha")
if isinstance(alpha_socket, bpy.types.NodeSocket): if isinstance(alpha_socket.socket, bpy.types.NodeSocket):
if export_settings['gltf_image_format'] != "NONE": if export_settings['gltf_image_format'] != "NONE":
alpha = gltf2_blender_get.get_factor_from_socket(alpha_socket, kind='VALUE') alpha = get_factor_from_socket(alpha_socket, kind='VALUE')
else: else:
alpha = gltf2_blender_get.get_const_from_default_value_socket(alpha_socket, kind='VALUE') alpha = get_const_from_default_value_socket(alpha_socket, kind='VALUE')
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Base Color") base_color_socket = get_socket(blender_material, "Base Color")
if base_color_socket.socket is None:
base_color_socket = get_socket(blender_material, "BaseColor")
if base_color_socket is None: if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket(blender_material, "BaseColor") base_color_socket = get_socket_from_gltf_material_node(blender_material, "BaseColorFactor")
if base_color_socket is None: if isinstance(base_color_socket.socket, bpy.types.NodeSocket):
base_color_socket = gltf2_blender_get.get_socket_old(blender_material, "BaseColorFactor")
if isinstance(base_color_socket, bpy.types.NodeSocket):
if export_settings['gltf_image_format'] != "NONE": if export_settings['gltf_image_format'] != "NONE":
rgb = gltf2_blender_get.get_factor_from_socket(base_color_socket, kind='RGB') rgb = get_factor_from_socket(base_color_socket, kind='RGB')
else: else:
rgb = gltf2_blender_get.get_const_from_default_value_socket(base_color_socket, kind='RGB') rgb = get_const_from_default_value_socket(base_color_socket, kind='RGB')
if rgb is None: rgb = [1.0, 1.0, 1.0] if rgb is None: rgb = [1.0, 1.0, 1.0]
if alpha is None: alpha = 1.0 if alpha is None: alpha = 1.0
@ -80,18 +85,18 @@ def __gather_base_color_factor(blender_material, export_settings):
def __gather_base_color_texture(blender_material, export_settings): def __gather_base_color_texture(blender_material, export_settings):
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Base Color") base_color_socket = get_socket(blender_material, "Base Color")
if base_color_socket.socket is None:
base_color_socket = get_socket(blender_material, "BaseColor")
if base_color_socket is None: if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket(blender_material, "BaseColor") base_color_socket = get_socket_from_gltf_material_node(blender_material, "BaseColor")
if base_color_socket is None:
base_color_socket = gltf2_blender_get.get_socket_old(blender_material, "BaseColor")
alpha_socket = gltf2_blender_get.get_socket(blender_material, "Alpha") alpha_socket = get_socket(blender_material, "Alpha")
# keep sockets that have some texture : color and/or alpha # keep sockets that have some texture : color and/or alpha
inputs = tuple( inputs = tuple(
socket for socket in [base_color_socket, alpha_socket] socket for socket in [base_color_socket, alpha_socket]
if socket is not None and image_tex_is_valid_from_socket(socket) if socket.socket is not None and has_image_node_from_socket(socket, export_settings)
) )
if not inputs: if not inputs:
return None, {}, {"uv_info": {}, "vc_info": {}}, None return None, {}, {"uv_info": {}, "vc_info": {}}, None
@ -113,34 +118,35 @@ def __gather_metallic_factor(blender_material, export_settings):
if not blender_material.use_nodes: if not blender_material.use_nodes:
return blender_material.metallic return blender_material.metallic
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic") metallic_socket = get_socket(blender_material, "Metallic")
if metallic_socket is None: if metallic_socket is None:
metallic_socket = gltf2_blender_get.get_socket_old(blender_material, "MetallicFactor") metallic_socket = get_socket_from_gltf_material_node(blender_material, "MetallicFactor")
if isinstance(metallic_socket, bpy.types.NodeSocket): if isinstance(metallic_socket.socket, bpy.types.NodeSocket):
fac = gltf2_blender_get.get_factor_from_socket(metallic_socket, kind='VALUE') fac = get_factor_from_socket(metallic_socket, kind='VALUE')
return fac if fac != 1 else None return fac if fac != 1 else None
return None return None
def __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings): def __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings):
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic") metallic_socket = get_socket(blender_material, "Metallic")
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness") roughness_socket = get_socket(blender_material, "Roughness")
hasMetal = metallic_socket is not None and image_tex_is_valid_from_socket(metallic_socket) hasMetal = metallic_socket.socket is not None and has_image_node_from_socket(metallic_socket, export_settings)
hasRough = roughness_socket is not None and image_tex_is_valid_from_socket(roughness_socket) hasRough = roughness_socket.socket is not None and has_image_node_from_socket(roughness_socket, export_settings)
default_sockets = () default_sockets = ()
# Warning: for default socket, do not use NodeSocket object, because it will break cache
# Using directlty the Blender socket object
if not hasMetal and not hasRough: if not hasMetal and not hasRough:
metallic_roughness = gltf2_blender_get.get_socket_old(blender_material, "MetallicRoughness") metallic_roughness = get_socket_from_gltf_material_node(blender_material, "MetallicRoughness")
if metallic_roughness is None or not image_tex_is_valid_from_socket(metallic_roughness): if metallic_roughness is None or not has_image_node_from_socket(metallic_roughness, export_settings):
return None, {}, None return None, {}, None
texture_input = (metallic_roughness,)
elif not hasMetal: elif not hasMetal:
texture_input = (roughness_socket,) texture_input = (roughness_socket,)
default_sockets = (metallic_socket,) default_sockets = (metallic_socket.socket,)
elif not hasRough: elif not hasRough:
texture_input = (metallic_socket,) texture_input = (metallic_socket,)
default_sockets = (roughness_socket,) default_sockets = (roughness_socket.socket,)
else: else:
texture_input = (metallic_socket, roughness_socket) texture_input = (metallic_socket, roughness_socket)
default_sockets = () default_sockets = ()
@ -158,11 +164,11 @@ def __gather_roughness_factor(blender_material, export_settings):
if not blender_material.use_nodes: if not blender_material.use_nodes:
return blender_material.roughness return blender_material.roughness
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness") roughness_socket = get_socket(blender_material, "Roughness")
if roughness_socket is None: if roughness_socket is None:
roughness_socket = gltf2_blender_get.get_socket_old(blender_material, "RoughnessFactor") roughness_socket = get_socket_from_gltf_material_node(blender_material, "RoughnessFactor")
if isinstance(roughness_socket, bpy.types.NodeSocket): if isinstance(roughness_socket.socket, bpy.types.NodeSocket):
fac = gltf2_blender_get.get_factor_from_socket(roughness_socket, kind='VALUE') fac = get_factor_from_socket(roughness_socket, kind='VALUE')
return fac if fac != 1 else None return fac if fac != 1 else None
return None return None

View File

@ -2,10 +2,14 @@
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
from ....io.com.gltf2_io_extensions import Extension
from ...exp import gltf2_blender_get
from . import gltf2_blender_gather_texture_info from . import gltf2_blender_gather_texture_info
from .gltf2_blender_search_node_tree import get_vertex_color_info from .gltf2_blender_search_node_tree import get_vertex_color_info
from .gltf2_blender_search_node_tree import \
get_socket, \
NodeSocket, \
previous_socket, \
previous_node, \
get_factor_from_socket
def detect_shadeless_material(blender_material, export_settings): def detect_shadeless_material(blender_material, export_settings):
"""Detect if this material is "shadeless" ie. should be exported """Detect if this material is "shadeless" ie. should be exported
@ -15,8 +19,8 @@ def detect_shadeless_material(blender_material, export_settings):
if not blender_material.use_nodes: return None if not blender_material.use_nodes: return None
# Old Background node detection (unlikely to happen) # Old Background node detection (unlikely to happen)
bg_socket = gltf2_blender_get.get_socket(blender_material, "Background") bg_socket = get_socket(blender_material, "Background")
if bg_socket is not None: if bg_socket.socket is not None:
return {'rgb_socket': bg_socket} return {'rgb_socket': bg_socket}
# Look for # Look for
@ -27,6 +31,7 @@ def detect_shadeless_material(blender_material, export_settings):
info = {} info = {}
#TODOSNode this can be a function call
for node in blender_material.node_tree.nodes: for node in blender_material.node_tree.nodes:
if node.type == 'OUTPUT_MATERIAL' and node.is_active_output: if node.type == 'OUTPUT_MATERIAL' and node.is_active_output:
socket = node.inputs[0] socket = node.inputs[0]
@ -34,6 +39,8 @@ def detect_shadeless_material(blender_material, export_settings):
else: else:
return None return None
socket = NodeSocket(socket, [blender_material])
# Be careful not to misidentify a lightpath trick as mix-alpha. # Be careful not to misidentify a lightpath trick as mix-alpha.
result = __detect_lightpath_trick(socket) result = __detect_lightpath_trick(socket)
if result is not None: if result is not None:
@ -49,10 +56,10 @@ def detect_shadeless_material(blender_material, export_settings):
socket = result['next_socket'] socket = result['next_socket']
# Check if a color socket, or connected to a color socket # Check if a color socket, or connected to a color socket
if socket.type != 'RGBA': if socket.socket.type != 'RGBA':
from_socket = gltf2_blender_get.previous_socket(socket) from_socket = previous_socket(socket)
if from_socket is None: return None if from_socket.socket is None: return None
if from_socket.type != 'RGBA': return None if from_socket.socket.type != 'RGBA': return None
info['rgb_socket'] = socket info['rgb_socket'] = socket
return info return info
@ -68,13 +75,13 @@ def __detect_mix_alpha(socket):
# #
# Returns None if not detected. Otherwise, a dict containing alpha_socket # Returns None if not detected. Otherwise, a dict containing alpha_socket
# and next_socket. # and next_socket.
prev = gltf2_blender_get.previous_node(socket) prev = previous_node(socket)
if prev is None or prev.type != 'MIX_SHADER': return None if prev.node is None or prev.node.type != 'MIX_SHADER': return None
in1 = gltf2_blender_get.previous_node(prev.inputs[1]) in1 = previous_node(NodeSocket(prev.node.inputs[1], prev.group_path))
if in1 is None or in1.type != 'BSDF_TRANSPARENT': return None if in1.node is None or in1.node.type != 'BSDF_TRANSPARENT': return None
return { return {
'alpha_socket': prev.inputs[0], 'alpha_socket': NodeSocket(prev.node.inputs[0], prev.group_path),
'next_socket': prev.inputs[2], 'next_socket': NodeSocket(prev.node.inputs[2], prev.group_path),
} }
@ -90,17 +97,17 @@ def __detect_lightpath_trick(socket):
# The Emission node can be omitted. # The Emission node can be omitted.
# Returns None if not detected. Otherwise, a dict containing # Returns None if not detected. Otherwise, a dict containing
# next_socket. # next_socket.
prev = gltf2_blender_get.previous_node(socket) prev = previous_node(socket)
if prev is None or prev.type != 'MIX_SHADER': return None if prev.node is None or prev.node.type != 'MIX_SHADER': return None
in0 = gltf2_blender_get.previous_socket(prev.inputs[0]) in0 = previous_socket(NodeSocket(prev.node.inputs[0], prev.group_path))
if in0 is None or in0.node.type != 'LIGHT_PATH': return None if in0.socket is None or in0.socket.node.type != 'LIGHT_PATH': return None
if in0.name != 'Is Camera Ray': return None if in0.socket.name != 'Is Camera Ray': return None
next_socket = prev.inputs[2] next_socket = NodeSocket(prev.node.inputs[2], prev.group_path)
# Detect emission # Detect emission
prev = gltf2_blender_get.previous_node(next_socket) prev = previous_node(next_socket)
if prev is not None and prev.type == 'EMISSION': if prev.node is not None and prev.node.type == 'EMISSION':
next_socket = prev.inputs[0] next_socket = NodeSocket(prev.node.inputs[0], prev.group_path)
return {'next_socket': next_socket} return {'next_socket': next_socket}
@ -109,9 +116,9 @@ def gather_base_color_factor(info, export_settings):
rgb, alpha = None, None rgb, alpha = None, None
if 'rgb_socket' in info: if 'rgb_socket' in info:
rgb = gltf2_blender_get.get_factor_from_socket(info['rgb_socket'], kind='RGB') rgb = get_factor_from_socket(info['rgb_socket'], kind='RGB')
if 'alpha_socket' in info: if 'alpha_socket' in info:
alpha = gltf2_blender_get.get_factor_from_socket(info['alpha_socket'], kind='VALUE') alpha = get_factor_from_socket(info['alpha_socket'], kind='VALUE')
if rgb is None: rgb = [1.0, 1.0, 1.0] if rgb is None: rgb = [1.0, 1.0, 1.0]
if alpha is None: alpha = 1.0 if alpha is None: alpha = 1.0
@ -122,8 +129,8 @@ def gather_base_color_factor(info, export_settings):
def gather_base_color_texture(info, export_settings): def gather_base_color_texture(info, export_settings):
sockets = (info.get('rgb_socket'), info.get('alpha_socket')) sockets = (info.get('rgb_socket', NodeSocket(None, None)), info.get('alpha_socket', NodeSocket(None, None)))
sockets = tuple(s for s in sockets if s is not None) sockets = tuple(s for s in sockets if s.socket is not None)
if sockets: if sockets:
# NOTE: separate RGB and Alpha textures will not get combined # NOTE: separate RGB and Alpha textures will not get combined
# because gather_image determines how to pack images based on the # because gather_image determines how to pack images based on the

View File

@ -4,21 +4,23 @@
import typing import typing
import bpy import bpy
from ....io.com import gltf2_io_debug
from ....io.exp.gltf2_io_user_extensions import export_user_extensions from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....io.com.gltf2_io_extensions import Extension from ....io.com.gltf2_io_extensions import Extension
from ....io.exp.gltf2_io_image_data import ImageData from ....io.exp.gltf2_io_image_data import ImageData
from ....io.exp.gltf2_io_binary_data import BinaryData from ....io.exp.gltf2_io_binary_data import BinaryData
from ....io.com import gltf2_io_debug
from ....io.com import gltf2_io from ....io.com import gltf2_io
from ..gltf2_blender_gather_cache import cached
from ..gltf2_blender_gather_sampler import gather_sampler from ..gltf2_blender_gather_sampler import gather_sampler
from ..gltf2_blender_get import get_tex_from_socket from ..gltf2_blender_gather_cache import cached
from .gltf2_blender_search_node_tree import get_texture_node_from_socket, NodeSocket
from . import gltf2_blender_gather_image from . import gltf2_blender_gather_image
@cached @cached
def gather_texture( def gather_texture(
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
default_sockets: typing.Tuple[bpy.types.NodeSocket], default_sockets,
export_settings): export_settings):
""" """
Gather texture sampling information and image channels from a blender shader texture attached to a shader socket. Gather texture sampling information and image channels from a blender shader texture attached to a shader socket.
@ -70,7 +72,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
ext_webp = {} ext_webp = {}
# If user want to keep original textures, and these textures are webp, we need to remove source from # If user want to keep original textures, and these textures are WebP, we need to remove source from
# gltf2_io.Texture, and populate extension # gltf2_io.Texture, and populate extension
if export_settings['gltf_keep_original_textures'] is True \ if export_settings['gltf_keep_original_textures'] is True \
and source is not None \ and source is not None \
@ -79,19 +81,19 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
remove_source = True remove_source = True
required = True required = True
# If user want to export in webp format (so without fallback in png/jpg) # If user want to export in WebP format (so without fallback in png/jpg)
if export_settings['gltf_image_format'] == "WEBP": if export_settings['gltf_image_format'] == "WEBP":
# We create all image without fallback # We create all image without fallback
ext_webp["source"] = source ext_webp["source"] = source
remove_source = True remove_source = True
required = True required = True
# If user doesn't want to export in webp format, but want webp too. Texture is not webp # If user doesn't want to export in WebP format, but want WebP too. Texture is not WebP
if export_settings['gltf_image_format'] != "WEBP" \ if export_settings['gltf_image_format'] != "WEBP" \
and export_settings['gltf_add_webp'] \ and export_settings['gltf_add_webp'] \
and source is not None \ and source is not None \
and source.mime_type != "image/webp": and source.mime_type != "image/webp":
# We need here to create some webp textures # We need here to create some WebP textures
new_mime_type = "image/webp" new_mime_type = "image/webp"
new_data, _ = image_data.encode(new_mime_type, export_settings) new_data, _ = image_data.encode(new_mime_type, export_settings)
@ -116,7 +118,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
ext_webp["source"] = webp_image ext_webp["source"] = webp_image
# If user doesn't want to export in webp format, but want webp too. Texture is webp # If user doesn't want to export in WebP format, but want WebP too. Texture is WebP
if export_settings['gltf_image_format'] != "WEBP" \ if export_settings['gltf_image_format'] != "WEBP" \
and source is not None \ and source is not None \
and source.mime_type == "image/webp": and source.mime_type == "image/webp":
@ -127,7 +129,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
remove_source = True remove_source = True
required = True required = True
# If user doesn't want to export in webp format, but want webp too as fallback. Texture is webp # If user doesn't want to export in webp format, but want WebP too as fallback. Texture is WebP
if export_settings['gltf_image_format'] != "WEBP" \ if export_settings['gltf_image_format'] != "WEBP" \
and webp_image is not None \ and webp_image is not None \
and export_settings['gltf_webp_fallback'] is True: and export_settings['gltf_webp_fallback'] is True:
@ -164,14 +166,33 @@ def __gather_name(blender_shader_sockets, export_settings):
def __gather_sampler(blender_shader_sockets, export_settings): def __gather_sampler(blender_shader_sockets, export_settings):
shader_nodes = [get_tex_from_socket(socket) for socket in blender_shader_sockets] shader_nodes = [get_texture_node_from_socket(socket, export_settings) for socket in blender_shader_sockets]
if len(shader_nodes) > 1: if len(shader_nodes) > 1:
gltf2_io_debug.print_console("WARNING", gltf2_io_debug.print_console("WARNING",
"More than one shader node tex image used for a texture. " "More than one shader node tex image used for a texture. "
"The resulting glTF sampler will behave like the first shader node tex image.") "The resulting glTF sampler will behave like the first shader node tex image.")
first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes)).shader_node first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes))
# group_path can't be a list, so transform it to str
sep_item = "##~~gltf-sep~~##"
sep_inside_item = "##~~gltf-inside-sep~~##"
group_path_str = ""
if len(first_valid_shader_node.group_path) > 0:
group_path_str += first_valid_shader_node.group_path[0].name
if len(first_valid_shader_node.group_path) > 1:
for idx, i in enumerate(first_valid_shader_node.group_path[1:]):
group_path_str += sep_item
if idx == 0:
group_path_str += first_valid_shader_node.group_path[0].name
else:
group_path_str += i.id_data.name
group_path_str += sep_inside_item
group_path_str += i.name
return gather_sampler( return gather_sampler(
first_valid_shader_node, first_valid_shader_node.shader_node,
group_path_str,
export_settings) export_settings)
@ -209,7 +230,7 @@ def __gather_source(blender_shader_sockets, default_sockets, export_settings):
png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings) png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings)
# We inverted the png & webp image, to have the png as main source # We inverted the png & WebP image, to have the png as main source
return png_image, source, image_data, factor return png_image, source, image_data, factor
return source, None, image_data, factor return source, None, image_data, factor

View File

@ -7,12 +7,17 @@ import typing
from ....io.com import gltf2_io from ....io.com import gltf2_io
from ....io.com.gltf2_io_extensions import Extension from ....io.com.gltf2_io_extensions import Extension
from ....io.exp.gltf2_io_user_extensions import export_user_extensions from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ...exp import gltf2_blender_get
from ..gltf2_blender_get import previous_node, get_tex_from_socket
from ..gltf2_blender_gather_sampler import detect_manual_uv_wrapping from ..gltf2_blender_gather_sampler import detect_manual_uv_wrapping
from ..gltf2_blender_gather_cache import cached from ..gltf2_blender_gather_cache import cached
from . import gltf2_blender_gather_texture from . import gltf2_blender_gather_texture
from . import gltf2_blender_search_node_tree from .gltf2_blender_search_node_tree import \
get_texture_node_from_socket, \
from_socket, \
FilterByType, \
previous_node, \
get_const_from_socket, \
NodeSocket, \
get_texture_transform_from_mapping_node
# blender_shader_sockets determine the texture and primary_socket determines # blender_shader_sockets determine the texture and primary_socket determines
# the textransform and UVMap. Ex: when combining an ORM texture, for # the textransform and UVMap. Ex: when combining an ORM texture, for
@ -37,7 +42,7 @@ def gather_material_occlusion_texture_info_class(primary_socket, blender_shader_
def __gather_texture_info_helper( def __gather_texture_info_helper(
primary_socket: bpy.types.NodeSocket, primary_socket: bpy.types.NodeSocket,
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
default_sockets: typing.Tuple[bpy.types.NodeSocket], default_sockets,
kind: str, kind: str,
filter_type: str, filter_type: str,
export_settings): export_settings):
@ -77,7 +82,7 @@ def __gather_texture_info_helper(
def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings): def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings):
if primary_socket is None: if primary_socket is None:
return False return False
if get_tex_from_socket(primary_socket) is None: if get_texture_node_from_socket(primary_socket, export_settings) is None:
return False return False
if not blender_shader_sockets: if not blender_shader_sockets:
return False return False
@ -85,12 +90,12 @@ def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, e
return False return False
if filter_type == "ALL": if filter_type == "ALL":
# Check that all sockets link to texture # Check that all sockets link to texture
if any([get_tex_from_socket(socket) is None for socket in blender_shader_sockets]): if any([get_texture_node_from_socket(socket, export_settings) is None for socket in blender_shader_sockets]):
# sockets do not lead to a texture --> discard # sockets do not lead to a texture --> discard
return False return False
elif filter_type == "ANY": elif filter_type == "ANY":
# Check that at least one socket link to texture # Check that at least one socket link to texture
if all([get_tex_from_socket(socket) is None for socket in blender_shader_sockets]): if all([get_texture_node_from_socket(socket, export_settings) is None for socket in blender_shader_sockets]):
return False return False
elif filter_type == "NONE": elif filter_type == "NONE":
# No check # No check
@ -112,9 +117,9 @@ def __gather_extras(blender_shader_sockets, export_settings):
# MaterialNormalTextureInfo only # MaterialNormalTextureInfo only
def __gather_normal_scale(primary_socket, export_settings): def __gather_normal_scale(primary_socket, export_settings):
result = gltf2_blender_search_node_tree.from_socket( result = from_socket(
primary_socket, primary_socket,
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeNormalMap)) FilterByType(bpy.types.ShaderNodeNormalMap))
if not result: if not result:
return None return None
strengthInput = result[0].shader_node.inputs['Strength'] strengthInput = result[0].shader_node.inputs['Strength']
@ -127,11 +132,11 @@ def __gather_normal_scale(primary_socket, export_settings):
def __gather_occlusion_strength(primary_socket, export_settings): def __gather_occlusion_strength(primary_socket, export_settings):
# Look for a MixRGB node that mixes with pure white in front of # Look for a MixRGB node that mixes with pure white in front of
# primary_socket. The mix factor gives the occlusion strength. # primary_socket. The mix factor gives the occlusion strength.
node = gltf2_blender_get.previous_node(primary_socket) node = previous_node(primary_socket)
if node and node.type == 'MIX' and node.blend_type == 'MIX': if node and node.node.type == 'MIX' and node.node.blend_type == 'MIX':
fac = gltf2_blender_get.get_const_from_socket(node.inputs['Factor'], kind='VALUE') fac = get_const_from_socket(NodeSocket(node.node.inputs['Factor'], node.group_path), kind='VALUE')
col1 = gltf2_blender_get.get_const_from_socket(node.inputs[6], kind='RGB') col1 = get_const_from_socket(NodeSocket(node.node.inputs[6], node.group_path), kind='RGB')
col2 = gltf2_blender_get.get_const_from_socket(node.inputs[7], kind='RGB') col2 = get_const_from_socket(NodeSocket(node.node.inputs[7], node.group_path), kind='RGB')
if fac is not None: if fac is not None:
if col1 == [1.0, 1.0, 1.0] and col2 is None: if col1 == [1.0, 1.0, 1.0] and col2 is None:
return fac return fac
@ -153,31 +158,32 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
# #
# The [UV Wrapping] is for wrap modes like MIRROR that use nodes, # The [UV Wrapping] is for wrap modes like MIRROR that use nodes,
# [Mapping] is for KHR_texture_transform, and [UV Map] is for texCoord. # [Mapping] is for KHR_texture_transform, and [UV Map] is for texCoord.
blender_shader_node = get_tex_from_socket(primary_socket).shader_node result_tex = get_texture_node_from_socket(primary_socket, export_settings)
blender_shader_node = result_tex.shader_node
# Skip over UV wrapping stuff (it goes in the sampler) # Skip over UV wrapping stuff (it goes in the sampler)
result = detect_manual_uv_wrapping(blender_shader_node) result = detect_manual_uv_wrapping(blender_shader_node, result_tex.group_path)
if result: if result:
node = previous_node(result['next_socket']) node = previous_node(result['next_socket'])
else: else:
node = previous_node(blender_shader_node.inputs['Vector']) node = previous_node(NodeSocket(blender_shader_node.inputs['Vector'], result_tex.group_path))
texture_transform = None texture_transform = None
if node and node.type == 'MAPPING': if node.node and node.node.type == 'MAPPING':
texture_transform = gltf2_blender_get.get_texture_transform_from_mapping_node(node) texture_transform = get_texture_transform_from_mapping_node(node)
node = previous_node(node.inputs['Vector']) node = previous_node(NodeSocket(node.node.inputs['Vector'], node.group_path))
uvmap_info = {} uvmap_info = {}
if node and node.type == 'UVMAP' and node.uv_map: if node.node and node.node.type == 'UVMAP' and node.node.uv_map:
uvmap_info['type'] = "Fixed" uvmap_info['type'] = "Fixed"
uvmap_info['value'] = node.uv_map uvmap_info['value'] = node.node.uv_map
elif node and node.type == 'ATTRIBUTE' \ elif node and node.node and node.node.type == 'ATTRIBUTE' \
and node.attribute_type == "GEOMETRY" \ and node.node.attribute_type == "GEOMETRY" \
and node.attribute_name: and node.node.attribute_name:
uvmap_info['type'] = 'Attribute' uvmap_info['type'] = 'Attribute'
uvmap_info['value'] = node.attribute_name uvmap_info['value'] = node.node.attribute_name
else: else:
uvmap_info['type'] = 'Active' uvmap_info['type'] = 'Active'
@ -187,6 +193,7 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
def check_same_size_images( def check_same_size_images(
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
export_settings
) -> bool: ) -> bool:
"""Check that all sockets leads to images of the same size.""" """Check that all sockets leads to images of the same size."""
if not blender_shader_sockets or not all(blender_shader_sockets): if not blender_shader_sockets or not all(blender_shader_sockets):
@ -194,7 +201,7 @@ def check_same_size_images(
sizes = set() sizes = set()
for socket in blender_shader_sockets: for socket in blender_shader_sockets:
tex = get_tex_from_socket(socket) tex = get_texture_node_from_socket(socket, export_settings)
if tex is None: if tex is None:
return False return False
size = tex.shader_node.image.size size = tex.shader_node.image.size

View File

@ -7,6 +7,11 @@
# #
import bpy import bpy
from mathutils import Vector, Matrix
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from ...com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name, get_gltf_old_group_node_name
from ....blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf
from io_scene_gltf2.io.com import gltf2_io_debug
import typing import typing
@ -48,13 +53,14 @@ class FilterByType(Filter):
class NodeTreeSearchResult: class NodeTreeSearchResult:
def __init__(self, shader_node: bpy.types.Node, path: typing.List[bpy.types.NodeLink]): def __init__(self, shader_node: bpy.types.Node, path: typing.List[bpy.types.NodeLink], group_path: typing.List[bpy.types.Node]):
self.shader_node = shader_node self.shader_node = shader_node
self.path = path self.path = path
self.group_path = group_path
# TODO: cache these searches # TODO: cache these searches
def from_socket(start_socket: bpy.types.NodeSocket, def from_socket(start_socket: NodeTreeSearchResult,
shader_node_filter: typing.Union[Filter, typing.Callable]) -> typing.List[NodeTreeSearchResult]: shader_node_filter: typing.Union[Filter, typing.Callable]) -> typing.List[NodeTreeSearchResult]:
""" """
Find shader nodes where the filter expression is true. Find shader nodes where the filter expression is true.
@ -66,18 +72,39 @@ def from_socket(start_socket: bpy.types.NodeSocket,
# hide implementation (especially the search path) # hide implementation (especially the search path)
def __search_from_socket(start_socket: bpy.types.NodeSocket, def __search_from_socket(start_socket: bpy.types.NodeSocket,
shader_node_filter: typing.Union[Filter, typing.Callable], shader_node_filter: typing.Union[Filter, typing.Callable],
search_path: typing.List[bpy.types.NodeLink]) -> typing.List[NodeTreeSearchResult]: search_path: typing.List[bpy.types.NodeLink],
group_path: typing.List[bpy.types.Node]) -> typing.List[NodeTreeSearchResult]:
results = [] results = []
for link in start_socket.links: for link in start_socket.links:
# follow the link to a shader node # follow the link to a shader node
linked_node = link.from_node linked_node = link.from_node
if linked_node.type == "GROUP":
group_output_node = [node for node in linked_node.node_tree.nodes if node.type == "GROUP_OUTPUT"][0]
socket = [sock for sock in group_output_node.inputs if sock.name == link.from_socket.name][0]
group_path.append(linked_node)
linked_results = __search_from_socket(socket, shader_node_filter, search_path + [link], group_path.copy())
if linked_results:
# add the link to the current path
search_path.append(link)
results += linked_results
continue
if linked_node.type == "GROUP_INPUT":
socket = [sock for sock in group_path[-1].inputs if sock.name == link.from_socket.name][0]
linked_results = __search_from_socket(socket, shader_node_filter, search_path + [link], group_path[:-1])
if linked_results:
# add the link to the current path
search_path.append(link)
results += linked_results
continue
# check if the node matches the filter # check if the node matches the filter
if shader_node_filter(linked_node): if shader_node_filter(linked_node):
results.append(NodeTreeSearchResult(linked_node, search_path + [link])) results.append(NodeTreeSearchResult(linked_node, search_path + [link], group_path))
# traverse into inputs of the node # traverse into inputs of the node
for input_socket in linked_node.inputs: for input_socket in linked_node.inputs:
linked_results = __search_from_socket(input_socket, shader_node_filter, search_path + [link]) linked_results = __search_from_socket(input_socket, shader_node_filter, search_path + [link], group_path.copy())
if linked_results: if linked_results:
# add the link to the current path # add the link to the current path
search_path.append(link) search_path.append(link)
@ -85,10 +112,330 @@ def from_socket(start_socket: bpy.types.NodeSocket,
return results return results
if start_socket is None: if start_socket.socket is None:
return [] return []
return __search_from_socket(start_socket, shader_node_filter, []) return __search_from_socket(start_socket.socket, shader_node_filter, [], start_socket.group_path)
@cached
def get_texture_node_from_socket(socket, export_settings):
result = from_socket(
socket,
FilterByType(bpy.types.ShaderNodeTexImage))
if not result:
return None
if result[0].shader_node.image is None:
return None
return result[0]
def has_image_node_from_socket(socket, export_settings):
result = get_texture_node_from_socket(socket, export_settings)
return result is not None
# return the default value of a socket, even if this socket is linked
def get_const_from_default_value_socket(socket, kind):
if kind == 'RGB':
if socket.socket.type != 'RGBA': return None
return list(socket.socket.default_value)[:3]
if kind == 'VALUE':
if socket.socket.type != 'VALUE': return None
return socket.socket.default_value
return None
#TODOSNode : @cached? If yes, need to use id of node tree, has this is probably not fully hashable
# For now, not caching it. If we encounter performance issue, we will see later
def get_material_nodes(node_tree: bpy.types.NodeTree, group_path, type):
"""
For a given tree, recursively return all nodes including node groups.
"""
nodes = []
for node in [n for n in node_tree.nodes if isinstance(n, type) and not n.mute]:
nodes.append((node, group_path.copy()))
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
for node in [n for n in node_tree.nodes if n.type == "GROUP" and n.node_tree is not None and not n.mute and n.node_tree.name != get_gltf_old_group_node_name()]: # Do not enter the olf glTF node group
new_group_path = group_path.copy()
new_group_path.append(node)
nodes.extend(get_material_nodes(node.node_tree, new_group_path, type))
return nodes
def get_socket_from_gltf_material_node(blender_material: bpy.types.Material, name: str):
"""
For a given material input name, retrieve the corresponding node tree socket in the special glTF node group.
:param blender_material: a blender material for which to get the socket
:param name: the name of the socket
:return: a blender NodeSocket
"""
gltf_node_group_names = [get_gltf_node_name().lower(), get_gltf_node_old_name().lower()]
if blender_material.node_tree and blender_material.use_nodes:
nodes = get_material_nodes(blender_material.node_tree, [blender_material], bpy.types.ShaderNodeGroup)
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
nodes = [n for n in nodes if n[0].node_tree is not None and ( n[0].node_tree.name.lower().startswith(get_gltf_old_group_node_name()) or n[0].node_tree.name.lower() in gltf_node_group_names)]
inputs = sum([[(input, node[1]) for input in node[0].inputs if input.name == name] for node in nodes], [])
if inputs:
return NodeSocket(inputs[0][0], inputs[0][1])
return NodeSocket(None, None)
class NodeSocket:
def __init__(self, socket, group_path):
self.socket = socket
self.group_path = group_path
class ShNode:
def __init__(self, node, group_path):
self.node = node
self.group_path = group_path
def get_node_socket(blender_material, type, name):
"""
For a given material input name, retrieve the corresponding node tree socket for a given node type.
:param blender_material: a blender material for which to get the socket
:return: a blender NodeSocket for a given type
"""
nodes = get_material_nodes(blender_material.node_tree, [blender_material], type)
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
nodes = [node for node in nodes if check_if_is_linked_to_active_output(node[0].outputs[0], node[1])]
inputs = sum([[(input, node[1]) for input in node[0].inputs if input.name == name] for node in nodes], [])
if inputs:
return NodeSocket(inputs[0][0], inputs[0][1])
return NodeSocket(None, None)
def get_socket(blender_material: bpy.types.Material, name: str, volume=False):
"""
For a given material input name, retrieve the corresponding node tree socket.
:param blender_material: a blender material for which to get the socket
:param name: the name of the socket
:return: a blender NodeSocket
"""
if blender_material.node_tree and blender_material.use_nodes:
#i = [input for input in blender_material.node_tree.inputs]
#o = [output for output in blender_material.node_tree.outputs]
if name == "Emissive":
# Check for a dedicated Emission node first, it must supersede the newer built-in one
# because the newer one is always present in all Principled BSDF materials.
emissive_socket = get_node_socket(blender_material, bpy.types.ShaderNodeEmission, "Color")
if emissive_socket.socket is not None:
return emissive_socket
# If a dedicated Emission node was not found, fall back to the Principled BSDF Emission socket.
name = "Emission Color"
type = bpy.types.ShaderNodeBsdfPrincipled
elif name == "Background":
type = bpy.types.ShaderNodeBackground
name = "Color"
else:
if volume is False:
type = bpy.types.ShaderNodeBsdfPrincipled
else:
type = bpy.types.ShaderNodeVolumeAbsorption
return get_node_socket(blender_material, type, name)
return NodeSocket(None, None)
def get_factor_from_socket(socket, kind):
"""
For baseColorFactor, metallicFactor, etc.
Get a constant value from a socket, or a constant value
from a MULTIPLY node just before the socket.
kind is either 'RGB' or 'VALUE'.
"""
fac = get_const_from_socket(socket, kind)
if fac is not None:
return fac
node = previous_node(socket)
if node.node is not None:
x1, x2 = None, None
if kind == 'RGB':
if node.node.type == 'MIX' and node.node.data_type == "RGBA" and node.node.blend_type == 'MULTIPLY':
# TODO: handle factor in inputs[0]?
x1 = get_const_from_socket(NodeSocket(node.node.inputs[6], node.group_path), kind)
x2 = get_const_from_socket(NodeSocket(node.node.inputs[7], node.group_path), kind)
if kind == 'VALUE':
if node.node.type == 'MATH' and node.node.operation == 'MULTIPLY':
x1 = get_const_from_socket(NodeSocket(node.node.inputs[0], node.group_path), kind)
x2 = get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind)
if x1 is not None and x2 is None: return x1
if x2 is not None and x1 is None: return x2
return None
def get_const_from_socket(socket, kind):
if not socket.socket.is_linked:
if kind == 'RGB':
if socket.socket.type != 'RGBA': return None
return list(socket.socket.default_value)[:3]
if kind == 'VALUE':
if socket.socket.type != 'VALUE': return None
return socket.socket.default_value
# Handle connection to a constant RGB/Value node
prev_node = previous_node(socket)
if prev_node.node is not None:
if kind == 'RGB' and prev_node.node.type == 'RGB':
return list(prev_node.node.outputs[0].default_value)[:3]
if kind == 'VALUE' and prev_node.node.type == 'VALUE':
return prev_node.node.outputs[0].default_value
return None
def previous_socket(socket: NodeSocket):
soc = socket.socket
group_path = socket.group_path.copy()
while True:
if not soc.is_linked:
return NodeSocket(None, None)
from_socket = soc.links[0].from_socket
# If we are entering a node group (from outputs)
if from_socket.node.type == "GROUP":
socket_name = from_socket.name
sockets = [n for n in from_socket.node.node_tree.nodes if n.type == "GROUP_OUTPUT"][0].inputs
socket = [s for s in sockets if s.name == socket_name][0]
group_path.append(from_socket.node)
soc = socket
continue
# If we are exiting a node group (from inputs)
if from_socket.node.type == "GROUP_INPUT":
socket_name = from_socket.name
sockets = group_path[-1].inputs
socket = [s for s in sockets if s.name == socket_name][0]
group_path = group_path[:-1]
soc = socket
continue
# Skip over reroute nodes
if from_socket.node.type == 'REROUTE':
soc = from_socket.node.inputs[0]
continue
return NodeSocket(from_socket, group_path)
def previous_node(socket: NodeSocket):
prev_socket = previous_socket(socket)
if prev_socket.socket is not None:
return ShNode(prev_socket.socket.node, prev_socket.group_path)
return ShNode(None, None)
def get_texture_transform_from_mapping_node(mapping_node):
if mapping_node.node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]:
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform because it had type " +
mapping_node.node.vector_type + "; recommend using POINT instead"
)
return None
rotation_0, rotation_1 = mapping_node.node.inputs['Rotation'].default_value[0], mapping_node.node.inputs['Rotation'].default_value[1]
if rotation_0 or rotation_1:
# TODO: can we handle this?
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform because it had non-zero "
"rotations in the X/Y direction; only a Z rotation can be exported!"
)
return None
mapping_transform = {}
mapping_transform["offset"] = [mapping_node.node.inputs['Location'].default_value[0], mapping_node.node.inputs['Location'].default_value[1]]
mapping_transform["rotation"] = mapping_node.node.inputs['Rotation'].default_value[2]
mapping_transform["scale"] = [mapping_node.node.inputs['Scale'].default_value[0], mapping_node.node.inputs['Scale'].default_value[1]]
if mapping_node.node.vector_type == "TEXTURE":
# This means use the inverse of the TRS transform.
def inverted(mapping_transform):
offset = mapping_transform["offset"]
rotation = mapping_transform["rotation"]
scale = mapping_transform["scale"]
# Inverse of a TRS is not always a TRS. This function will be right
# at least when the following don't occur.
if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5:
return None
if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5:
return None
new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector((-offset[0], -offset[1], 1))
new_offset[0] /= scale[0]; new_offset[1] /= scale[1]
return {
"offset": new_offset[0:2],
"rotation": -rotation,
"scale": [1/scale[0], 1/scale[1]],
}
mapping_transform = inverted(mapping_transform)
if mapping_transform is None:
gltf2_io_debug.print_console("WARNING",
"Skipping exporting texture transform with type TEXTURE because "
"we couldn't convert it to TRS; recommend using POINT instead"
)
return None
elif mapping_node.node.vector_type == "VECTOR":
# Vectors don't get translated
mapping_transform["offset"] = [0, 0]
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
if all([component == 0 for component in texture_transform["offset"]]):
del(texture_transform["offset"])
if all([component == 1 for component in texture_transform["scale"]]):
del(texture_transform["scale"])
if texture_transform["rotation"] == 0:
del(texture_transform["rotation"])
if len(texture_transform) == 0:
return None
return texture_transform
def check_if_is_linked_to_active_output(shader_socket, group_path):
for link in shader_socket.links:
# If we are entering a node group
if link.to_node.type == "GROUP":
socket_name = link.to_socket.name
sockets = [n for n in link.to_node.node_tree.nodes if n.type == "GROUP_INPUT"][0].outputs
socket = [s for s in sockets if s.name == socket_name][0]
group_path.append(link.to_node)
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
ret = check_if_is_linked_to_active_output(socket, group_path) # recursive until find an output material node
if ret is True:
return True
continue
# If we are exiting a node group
if link.to_node.type == "GROUP_OUTPUT":
socket_name = link.to_socket.name
sockets = group_path[-1].outputs
socket = [s for s in sockets if s.name == socket_name][0]
group_path = group_path[:-1]
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
ret = check_if_is_linked_to_active_output(socket, group_path) # recursive until find an output material node
if ret is True:
return True
continue
if isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial) and link.to_node.is_active_output is True:
return True
if len(link.to_node.outputs) > 0: # ignore non active output, not having output sockets
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
ret = check_if_is_linked_to_active_output(link.to_node.outputs[0], group_path) # recursive until find an output material node
if ret is True:
return True
return False
def get_vertex_color_info(primary_socket, sockets, export_settings): def get_vertex_color_info(primary_socket, sockets, export_settings):
return {"color": None, "alpha": None} #TODO, placeholder for now return {"color": None, "alpha": None} #TODO, placeholder for now

View File

@ -39,7 +39,7 @@ def specular(mh, location_specular,
x_specularcolor, y_specularcolor = location_specular_tint x_specularcolor, y_specularcolor = location_specular_tint
if tex_specular_info is None: if tex_specular_info is None:
specular_socket.default_value = specular_factor specular_socket.default_value = specular_factor / 2.0
else: else:
# Mix specular factor # Mix specular factor
if specular_factor != 1.0: if specular_factor != 1.0:
@ -51,7 +51,7 @@ def specular(mh, location_specular,
mh.node_tree.links.new(specular_socket, node.outputs[0]) mh.node_tree.links.new(specular_socket, node.outputs[0])
# Inputs # Inputs
specular_socket = node.inputs[0] specular_socket = node.inputs[0]
node.inputs[1].default_value = specular_factor node.inputs[1].default_value = specular_factor / 2.0
x_specular -= 200 x_specular -= 200
texture( texture(

View File

@ -135,6 +135,7 @@ class BlenderNode():
bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION) bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION)
bpy.data.scenes[gltf.blender_scene].collection.children.link(bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION]) bpy.data.scenes[gltf.blender_scene].collection.children.link(bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION])
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_render = True
# Create an icosphere, and assign it to the collection # Create an icosphere, and assign it to the collection
bpy.ops.mesh.primitive_ico_sphere_add(radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1)) bpy.ops.mesh.primitive_ico_sphere_add(radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
@ -187,6 +188,9 @@ class BlenderNode():
arma_mat = vnode.editbone_arma_mat arma_mat = vnode.editbone_arma_mat
editbone.head = arma_mat @ Vector((0, 0, 0)) editbone.head = arma_mat @ Vector((0, 0, 0))
editbone.tail = arma_mat @ Vector((0, 1, 0)) editbone.tail = arma_mat @ Vector((0, 1, 0))
if gltf.import_settings['bone_heuristic'] == "BLENDER":
editbone.length = vnode.bone_length / max(blender_arma.scale)
else:
editbone.length = vnode.bone_length editbone.length = vnode.bone_length
editbone.align_roll(arma_mat @ Vector((0, 0, 1)) - editbone.head) editbone.align_roll(arma_mat @ Vector((0, 0, 1)) - editbone.head)
@ -225,7 +229,8 @@ class BlenderNode():
if gltf.import_settings['bone_heuristic'] == "BLENDER": if gltf.import_settings['bone_heuristic'] == "BLENDER":
pose_bone.custom_shape = bpy.data.objects[gltf.bone_shape] pose_bone.custom_shape = bpy.data.objects[gltf.bone_shape]
pose_bone.custom_shape_scale_xyz = Vector([0.1, 0.1, 0.1]) armature_max_dim = max([blender_arma.dimensions[0] / blender_arma.scale[0], blender_arma.dimensions[1] / blender_arma.scale[1], blender_arma.dimensions[2] / blender_arma.scale[2]])
pose_bone.custom_shape_scale_xyz = Vector([armature_max_dim * 0.2] * 3)
@staticmethod @staticmethod
def create_mesh_object(gltf, vnode): def create_mesh_object(gltf, vnode):

View File

@ -50,9 +50,6 @@ def pbr_metallic_roughness(mh: MaterialHelper):
# This value may be overridden later if IOR extension is set on file # This value may be overridden later if IOR extension is set on file
pbr_node.inputs['IOR'].default_value = GLTF_IOR pbr_node.inputs['IOR'].default_value = GLTF_IOR
pbr_node.inputs['Specular IOR Level'].default_value = 0.0 # Will be overridden by KHR_materials_specular if set
pbr_node.inputs['Specular Tint'].default_value = [0.0]*3 + [1.0] # Will be overridden by KHR_materials_specular if set
if mh.pymat.occlusion_texture is not None: if mh.pymat.occlusion_texture is not None:
if mh.settings_node is None: if mh.settings_node is None:
mh.settings_node = make_settings_node(mh) mh.settings_node = make_settings_node(mh)

View File

@ -41,7 +41,7 @@ def texture(
if forced_image is None: if forced_image is None:
if mh.gltf.import_settings['import_webp_texture'] is True: if mh.gltf.import_settings['import_webp_texture'] is True:
# Get the webp image if there is one # Get the WebP image if there is one
if pytexture.extensions \ if pytexture.extensions \
and 'EXT_texture_webp' in pytexture.extensions \ and 'EXT_texture_webp' in pytexture.extensions \
and pytexture.extensions['EXT_texture_webp']['source'] is not None: and pytexture.extensions['EXT_texture_webp']['source'] is not None:

View File

@ -47,8 +47,5 @@ class Buffer:
def to_bytes(self): def to_bytes(self):
return self.__data return self.__data
def to_embed_string(self):
return 'data:application/octet-stream;base64,' + base64.b64encode(self.__data).decode('ascii')
def clear(self): def clear(self):
self.__data = b"" self.__data = b""

View File

@ -539,7 +539,7 @@ class NWPreviewNode(Operator, NWBase):
if not viewer_socket: if not viewer_socket:
# create viewer socket # create viewer socket
viewer_socket = node.node_tree.interface.new_socket(viewer_socket_name, in_out={'OUTPUT'}, socket_type=socket_type) viewer_socket = node.node_tree.interface.new_socket(viewer_socket_name, in_out='OUTPUT', socket_type=socket_type)
viewer_socket.NWViewerSocket = True viewer_socket.NWViewerSocket = True
return viewer_socket return viewer_socket

View File

@ -89,10 +89,10 @@ class ActionSlot(PropertyGroup, ActionSlotBase):
target_space: EnumProperty( target_space: EnumProperty(
name="Transform Space", name="Transform Space",
items=[("WORLD", "World Space", "World Space"), items=[("WORLD", "World Space", "World Space", 0),
("POSE", "Pose Space", "Pose Space"), # ("POSE", "Pose Space", "Pose Space", 1),
("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent"), # ("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent", 2),
("LOCAL", "Local Space", "Local Space")], ("LOCAL", "Local Space", "Local Space", 3)],
default="LOCAL" default="LOCAL"
) )

View File

@ -153,9 +153,10 @@ def pVisRotExec(bone, active, context):
def pVisScaExec(bone, active, context): def pVisScaExec(bone, active, context):
obj_bone = bone.id_data obj_bone = bone.id_data
bone.scale = getmat(bone, active, context, bone.scale = getmat(
not obj_bone.data.bones[bone.name].use_inherit_scale)\ bone, active, context,
.to_scale() obj_bone.data.bones[bone.name].inherit_scale not in {'NONE', 'NONE_LEGACY'}
).to_scale()
def pDrwExec(bone, active, context): def pDrwExec(bone, active, context):

View File

@ -156,12 +156,12 @@ def draw_callback_view():
if data_euler or data_quat: if data_euler or data_quat:
cursor = bpy.context.scene.cursor.location.copy() cursor = bpy.context.scene.cursor.location.copy()
derived_matrices = [] derived_matrices = dict()
for key, quat in data_quat.values(): for key, quat in data_quat.items():
matrix = quat.to_matrix().to_4x4() matrix = quat.to_matrix().to_4x4()
matrix.translation = cursor matrix.translation = cursor
derived_matrices[key] = matrix derived_matrices[key] = matrix
for key, eul in data_euler.values(): for key, eul in data_euler.items():
matrix = eul.to_matrix().to_4x4() matrix = eul.to_matrix().to_4x4()
matrix.translation = cursor matrix.translation = cursor
derived_matrices[key] = matrix derived_matrices[key] = matrix

View File

@ -99,9 +99,6 @@ class VIEW3D_MT_Pose(Menu):
layout.operator("pose.quaternions_flip") layout.operator("pose.quaternions_flip")
layout.operator_context = 'INVOKE_AREA' layout.operator_context = 'INVOKE_AREA'
layout.separator() layout.separator()
layout.operator("armature.armature_layers", text="Change Armature Layers...")
layout.operator("pose.bone_layers", text="Change Bone Layers...")
layout.separator()
layout.menu("VIEW3D_MT_pose_showhide") layout.menu("VIEW3D_MT_pose_showhide")
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings") layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")

View File

@ -6,6 +6,7 @@
import bpy import bpy
from bpy.props import FloatProperty, FloatVectorProperty from bpy.props import FloatProperty, FloatVectorProperty
from bpy.app.translations import pgettext_iface as iface_
import gpu import gpu
from gpu_extras.batch import batch_for_shader from gpu_extras.batch import batch_for_shader
from mathutils import Vector from mathutils import Vector
@ -248,8 +249,8 @@ class SUNPOS_OT_ShowHdr(bpy.types.Operator):
self.initial_azimuth = context.scene.sun_pos_properties.hdr_azimuth self.initial_azimuth = context.scene.sun_pos_properties.hdr_azimuth
context.workspace.status_text_set( context.workspace.status_text_set(
"Enter/LMB: confirm, Esc/RMB: cancel," iface_("Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, "
" MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure") "mouse wheel: zoom, Ctrl + mouse wheel: set exposure"))
self._handle = bpy.types.SpaceView3D.draw_handler_add( self._handle = bpy.types.SpaceView3D.draw_handler_add(
draw_callback_px, (self, context), 'WINDOW', 'POST_PIXEL' draw_callback_px, (self, context), 'WINDOW', 'POST_PIXEL'

View File

@ -416,6 +416,19 @@ translations_tuple = (
("fr_FR", "Année", ("fr_FR", "Année",
(False, ())), (False, ())),
), ),
(("*", "Unknown projection"),
(("scripts/addons/sun_position/hdr.py:181",),
()),
("fr_FR", "Projection inconnue",
(False, ())),
),
(("*", "Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure"),
(("scripts/addons/sun_position/hdr.py:252",),
()),
("fr_FR", "Entrée/ClicG : Confirmer, Échap/ClicD : Annuler, ClicM : défiler, "
"molette : zoom, Ctrl + molette : exposition",
(False, ())),
),
(("*", "Could not find 3D View"), (("*", "Could not find 3D View"),
(("scripts/addons/sun_position/hdr.py:263",), (("scripts/addons/sun_position/hdr.py:263",),
()), ()),
@ -428,12 +441,6 @@ translations_tuple = (
("fr_FR", "Veuillez utiliser un nœud de texture denvironnement", ("fr_FR", "Veuillez utiliser un nœud de texture denvironnement",
(False, ())), (False, ())),
), ),
(("*", "Unknown projection"),
(("scripts/addons/sun_position/hdr.py:181",),
()),
("fr_FR", "Projection inconnue",
(False, ())),
),
(("*", "Show options and info:"), (("*", "Show options and info:"),
(("scripts/addons/sun_position/properties.py:297",), (("scripts/addons/sun_position/properties.py:297",),
()), ()),

View File

@ -344,7 +344,9 @@ class UI_OT_i18n_addon_translation_export(Operator):
if not lng.use: if not lng.use:
print("Skipping {} language ({}).".format(lng.name, lng.uid)) print("Skipping {} language ({}).".format(lng.name, lng.uid))
continue continue
uid = utils_i18n.find_best_isocode_matches(lng.uid, trans.trans.keys()) translation_keys = {k for k in trans.trans.keys()
if k != self.settings.PARSER_TEMPLATE_ID}
uid = utils_i18n.find_best_isocode_matches(lng.uid, translation_keys)
if uid: if uid:
uids.append(uid[0]) uids.append(uid[0])
@ -357,8 +359,8 @@ class UI_OT_i18n_addon_translation_export(Operator):
if not os.path.isfile(path): if not os.path.isfile(path):
continue continue
msgs = utils_i18n.I18nMessages(kind='PO', src=path, settings=self.settings) msgs = utils_i18n.I18nMessages(kind='PO', src=path, settings=self.settings)
msgs.update(trans.msgs[self.settings.PARSER_TEMPLATE_ID]) msgs.update(trans.trans[self.settings.PARSER_TEMPLATE_ID])
trans.msgs[uid] = msgs trans.trans[uid] = msgs
trans.write(kind='PO', langs=set(uids)) trans.write(kind='PO', langs=set(uids))