New Addon: Import Autodesk .max #105013

Closed
Sebastian Sille wants to merge 136 commits from (deleted):nrgsille-import_max into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
71 changed files with 2277 additions and 965 deletions
Showing only changes of commit cfff820943 - Show all commits

View File

@ -5,8 +5,8 @@
bl_info = { bl_info = {
"name": "AnimAll", "name": "AnimAll",
"author": "Daniel Salazar (ZanQdo), Damien Picard (pioverfour)", "author": "Daniel Salazar (ZanQdo), Damien Picard (pioverfour)",
"version": (0, 9, 6), "version": (0, 10, 0),
"blender": (3, 3, 0), "blender": (4, 0, 0),
"location": "3D View > Toolbox > Animation tab > AnimAll", "location": "3D View > Toolbox > Animation tab > AnimAll",
"description": "Allows animation of mesh, lattice, curve and surface data", "description": "Allows animation of mesh, lattice, curve and surface data",
"warning": "", "warning": "",
@ -17,11 +17,11 @@ bl_info = {
import bpy import bpy
from bpy.types import (Operator, Panel, AddonPreferences) from bpy.types import (Operator, Panel, AddonPreferences)
from bpy.props import (BoolProperty, StringProperty) from bpy.props import (BoolProperty, StringProperty)
from bpy.app.handlers import persistent
from bpy.app.translations import (pgettext_iface as iface_, from bpy.app.translations import (pgettext_iface as iface_,
pgettext_data as data_) pgettext_data as data_)
from . import translations from . import translations
import re
# Property Definitions # Property Definitions
class AnimallProperties(bpy.types.PropertyGroup): class AnimallProperties(bpy.types.PropertyGroup):
@ -49,10 +49,12 @@ class AnimallProperties(bpy.types.PropertyGroup):
name="Vertex Bevel", name="Vertex Bevel",
description="Insert keyframes on vertex bevel weight", description="Insert keyframes on vertex bevel weight",
default=False) default=False)
# key_vertex_crease: BoolProperty(
# name="Vertex Crease", key_vertex_crease: BoolProperty(
# description="Insert keyframes on vertex crease weight", name="Vertex Crease",
# default=False) description="Insert keyframes on vertex crease weight",
default=False)
key_vertex_group: BoolProperty( key_vertex_group: BoolProperty(
name="Vertex Group", name="Vertex Group",
description="Insert keyframes on active vertex group values", description="Insert keyframes on active vertex group values",
@ -67,8 +69,8 @@ class AnimallProperties(bpy.types.PropertyGroup):
description="Insert keyframes on edge creases", description="Insert keyframes on edge creases",
default=False) default=False)
key_attribute: BoolProperty( key_active_attribute: BoolProperty(
name="Attribute", name="Active Attribute",
description="Insert keyframes on active attribute values", description="Insert keyframes on active attribute values",
default=False) default=False)
key_uvs: BoolProperty( key_uvs: BoolProperty(
@ -115,6 +117,55 @@ def delete_key(data, key):
pass pass
def get_attribute(data, name, type=None, domain=None):
if name in data.attributes:
return data.attributes[name]
if type is not None and domain is not None:
return data.attributes.new(name, type, domain)
def get_attribute_paths(data, attribute, key_selected):
# Cannot animate string attributes?
if attribute.data_type == 'STRING':
return ()
if attribute.data_type in {'FLOAT', 'INT', 'BOOLEAN', 'INT8'}:
attribute_key = "value"
elif attribute.data_type in {'FLOAT_COLOR', 'BYTE_COLOR'}:
attribute_key = "color"
elif attribute.data_type in {'FLOAT_VECTOR', 'FLOAT2'}:
attribute_key = "vector"
if attribute.domain == 'POINT':
group = data_("Vertex %s")
elif attribute.domain == 'EDGE':
group = data_("Edge %s")
elif attribute.domain == 'FACE':
group = data_("Face %s")
elif attribute.domain == 'CORNER':
group = data_("Loop %s")
for e_i, _attribute_data in enumerate(attribute.data):
if (not key_selected
or attribute.domain == 'POINT' and data.vertices[e_i].select
or attribute.domain == 'EDGE' and data.edges[e_i].select
or attribute.domain == 'FACE' and data.polygons[e_i].select
or attribute.domain == 'CORNER' and is_selected_vert_loop(data, e_i)):
yield (f'attributes["{attribute.name}"].data[{e_i}].{attribute_key}', group % e_i)
def insert_attribute_key(data, attribute, key_selected):
for path, group in get_attribute_paths(data, attribute, key_selected):
if path:
insert_key(data, path, group=group)
def delete_attribute_key(data, attribute, key_selected):
for path, group in get_attribute_paths(data, attribute, key_selected):
if path:
delete_key(data, path)
def is_selected_vert_loop(data, loop_i): def is_selected_vert_loop(data, loop_i):
"""Get selection status of vertex corresponding to a loop""" """Get selection status of vertex corresponding to a loop"""
vertex_index = data.loops[loop_i].vertex_index vertex_index = data.loops[loop_i].vertex_index
@ -126,7 +177,7 @@ def is_selected_vert_loop(data, loop_i):
class VIEW3D_PT_animall(Panel): class VIEW3D_PT_animall(Panel):
bl_space_type = 'VIEW_3D' bl_space_type = 'VIEW_3D'
bl_region_type = 'UI' bl_region_type = 'UI'
bl_category = "Animate" bl_category = "Animation"
bl_label = '' bl_label = ''
@classmethod @classmethod
@ -137,7 +188,7 @@ class VIEW3D_PT_animall(Panel):
layout = self.layout layout = self.layout
row = layout.row() row = layout.row()
row.label (text = 'AnimAll', icon = 'ARMATURE_DATA') row.label(text='AnimAll', icon='ARMATURE_DATA')
def draw(self, context): def draw(self, context):
obj = context.active_object obj = context.active_object
@ -161,6 +212,7 @@ class VIEW3D_PT_animall(Panel):
col = layout.column(heading="Points", align=True) col = layout.column(heading="Points", align=True)
col.prop(animall_properties, "key_point_location") col.prop(animall_properties, "key_point_location")
col.prop(animall_properties, "key_vertex_bevel", text="Bevel") col.prop(animall_properties, "key_vertex_bevel", text="Bevel")
col.prop(animall_properties, "key_vertex_crease", text="Crease")
col.prop(animall_properties, "key_vertex_group") col.prop(animall_properties, "key_vertex_group")
col = layout.column(heading="Edges", align=True) col = layout.column(heading="Edges", align=True)
@ -171,7 +223,7 @@ class VIEW3D_PT_animall(Panel):
col.prop(animall_properties, "key_material_index") col.prop(animall_properties, "key_material_index")
col = layout.column(heading="Others", align=True) col = layout.column(heading="Others", align=True)
col.prop(animall_properties, "key_attribute") col.prop(animall_properties, "key_active_attribute")
col.prop(animall_properties, "key_uvs") col.prop(animall_properties, "key_uvs")
col.prop(animall_properties, "key_shape_key") col.prop(animall_properties, "key_shape_key")
@ -179,10 +231,10 @@ class VIEW3D_PT_animall(Panel):
if (obj.data.animation_data is not None if (obj.data.animation_data is not None
and obj.data.animation_data.action is not None): and obj.data.animation_data.action is not None):
for fcurve in context.active_object.data.animation_data.action.fcurves: for fcurve in context.active_object.data.animation_data.action.fcurves:
if fcurve.data_path.startswith("vertex_colors"): if bpy.ops.anim.update_attribute_animation_animall.poll():
col = layout.column(align=True) col = layout.column(align=True)
col.label(text="Object includes old-style vertex colors. Consider updating them.", icon="ERROR") col.label(text="Object includes old-style attributes. Consider updating them.", icon="ERROR")
col.operator("anim.update_vertex_color_animation_animall", icon="FILE_REFRESH") col.operator("anim.update_attribute_animation_animall", icon="FILE_REFRESH")
break break
elif obj.type in {'CURVE', 'SURFACE'}: elif obj.type in {'CURVE', 'SURFACE'}:
@ -315,13 +367,12 @@ class ANIM_OT_insert_keyframe_animall(Operator):
insert_key(vert, 'co', group=data_("Vertex %s") % v_i) insert_key(vert, 'co', group=data_("Vertex %s") % v_i)
if animall_properties.key_vertex_bevel: if animall_properties.key_vertex_bevel:
for v_i, vert in enumerate(data.vertices): attribute = get_attribute(data, "bevel_weight_vert", 'FLOAT', 'POINT')
if not animall_properties.key_selected or vert.select: insert_attribute_key(data, attribute, animall_properties.key_selected)
insert_key(vert, 'bevel_weight', group=data_("Vertex %s") % v_i)
# if animall_properties.key_vertex_crease: if animall_properties.key_vertex_crease:
# for v_i, vert in enumerate(data.vertices): attribute = get_attribute(data, "crease_vert", 'FLOAT', 'POINT')
# if not animall_properties.key_selected or vert.select: insert_attribute_key(data, attribute, animall_properties.key_selected)
# insert_key(vert, 'crease', group=data_("Vertex %s") % v_i)
if animall_properties.key_vertex_group: if animall_properties.key_vertex_group:
for v_i, vert in enumerate(data.vertices): for v_i, vert in enumerate(data.vertices):
@ -330,55 +381,31 @@ class ANIM_OT_insert_keyframe_animall(Operator):
insert_key(group, 'weight', group=data_("Vertex %s") % v_i) insert_key(group, 'weight', group=data_("Vertex %s") % v_i)
if animall_properties.key_edge_bevel: if animall_properties.key_edge_bevel:
for e_i, edge in enumerate(data.edges): attribute = get_attribute(data, "bevel_weight_edge", 'FLOAT', 'EDGE')
if not animall_properties.key_selected or edge.select: insert_attribute_key(data, attribute, animall_properties.key_selected)
insert_key(edge, 'bevel_weight', group=data_("Edge %s") % e_i)
if animall_properties.key_edge_crease: if animall_properties.key_edge_crease:
for e_i, edge in enumerate(data.edges): attribute = get_attribute(data, "crease_edge", 'FLOAT', 'EDGE')
if not animall_properties.key_selected or edge.select: insert_attribute_key(data, attribute, animall_properties.key_selected)
insert_key(edge, 'crease', group=data_("Edge %s") % e_i)
if animall_properties.key_material_index: if animall_properties.key_material_index:
for p_i, polygon in enumerate(data.polygons): for p_i, polygon in enumerate(data.polygons):
if not animall_properties.key_selected or polygon.select: if not animall_properties.key_selected or polygon.select:
insert_key(polygon, 'material_index', group=data_("Face %s") % p_i) insert_key(polygon, 'material_index', group=data_("Face %s") % p_i)
if animall_properties.key_attribute: if animall_properties.key_active_attribute:
if data.attributes.active is not None: if data.attributes.active is not None:
attribute = data.attributes.active for path, group in get_attribute_paths(
if attribute.data_type != 'STRING': data, data.attributes.active,
# Cannot animate string attributes? animall_properties.key_selected):
if attribute.data_type in {'FLOAT', 'INT', 'BOOLEAN', 'INT8'}: if path:
attribute_key = "value" insert_key(data, path, group=group)
elif attribute.data_type in {'FLOAT_COLOR', 'BYTE_COLOR'}:
attribute_key = "color"
elif attribute.data_type in {'FLOAT_VECTOR', 'FLOAT2'}:
attribute_key = "vector"
if attribute.domain == 'POINT':
group = data_("Vertex %s")
elif attribute.domain == 'EDGE':
group = data_("Edge %s")
elif attribute.domain == 'FACE':
group = data_("Face %s")
elif attribute.domain == 'CORNER':
group = data_("Loop %s")
for e_i, _attribute_data in enumerate(attribute.data):
if (not animall_properties.key_selected
or attribute.domain == 'POINT' and data.vertices[e_i].select
or attribute.domain == 'EDGE' and data.edges[e_i].select
or attribute.domain == 'FACE' and data.polygons[e_i].select
or attribute.domain == 'CORNER' and is_selected_vert_loop(data, e_i)):
insert_key(data, f'attributes["{attribute.name}"].data[{e_i}].{attribute_key}',
group=group % e_i)
if animall_properties.key_uvs: if animall_properties.key_uvs:
if data.uv_layers.active is not None: if data.uv_layers.active is not None:
for uv_i, uv in enumerate(data.uv_layers.active.data): for uv_i, uv in enumerate(data.uv_layers.active.data):
if not animall_properties.key_selected or uv.select: if not animall_properties.key_selected or uv.select:
insert_key(uv, 'uv', group=data_("UV layer %s") % uv_i) insert_key(uv, 'uv', group=data_("UV Layer %s") % uv_i)
if animall_properties.key_shape_key: if animall_properties.key_shape_key:
if obj.active_shape_key_index > 0: if obj.active_shape_key_index > 0:
@ -402,9 +429,15 @@ class ANIM_OT_insert_keyframe_animall(Operator):
if obj.active_shape_key_index > 0: if obj.active_shape_key_index > 0:
CV = obj.active_shape_key.data[global_spline_index] CV = obj.active_shape_key.data[global_spline_index]
insert_key(CV, 'co', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i)) insert_key(CV, 'co', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
insert_key(CV, 'handle_left', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i)) insert_key(
insert_key(CV, 'handle_right', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i)) CV, 'handle_left', group=data_("%s Spline %s CV %s") %
insert_key(CV, 'radius', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i)) (sk_name, s_i, v_i))
insert_key(
CV, 'handle_right', group=data_("%s Spline %s CV %s") %
(sk_name, s_i, v_i))
insert_key(
CV, 'radius', group=data_("%s Spline %s CV %s") %
(sk_name, s_i, v_i))
insert_key(CV, 'tilt', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i)) insert_key(CV, 'tilt', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
global_spline_index += 1 global_spline_index += 1
@ -414,7 +447,8 @@ class ANIM_OT_insert_keyframe_animall(Operator):
if obj.active_shape_key_index > 0: if obj.active_shape_key_index > 0:
CV = obj.active_shape_key.data[global_spline_index] CV = obj.active_shape_key.data[global_spline_index]
insert_key(CV, 'co', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i)) insert_key(CV, 'co', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
insert_key(CV, 'radius', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i)) insert_key(
CV, 'radius', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
insert_key(CV, 'tilt', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i)) insert_key(CV, 'tilt', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
global_spline_index += 1 global_spline_index += 1
@ -443,15 +477,22 @@ class ANIM_OT_delete_keyframe_animall(Operator):
for obj in objects: for obj in objects:
data = obj.data data = obj.data
if obj.type == 'MESH': if obj.type == 'MESH':
bpy.ops.object.mode_set(mode='OBJECT')
if animall_properties.key_point_location: if animall_properties.key_point_location:
for vert in data.vertices: for vert in data.vertices:
if not animall_properties.key_selected or vert.select: if not animall_properties.key_selected or vert.select:
delete_key(vert, 'co') delete_key(vert, 'co')
if animall_properties.key_vertex_bevel: if animall_properties.key_vertex_bevel:
for vert in data.vertices: attribute = get_attribute(data, "bevel_weight_vert", 'FLOAT', 'POINT')
if not animall_properties.key_selected or vert.select: if attribute is not None:
delete_key(vert, 'bevel_weight') delete_attribute_key(data, attribute, animall_properties.key_selected)
if animall_properties.key_vertex_crease:
attribute = get_attribute(data, "crease_vert", 'FLOAT', 'POINT')
if attribute is not None:
delete_attribute_key(data, attribute, animall_properties.key_selected)
if animall_properties.key_vertex_group: if animall_properties.key_vertex_group:
for vert in data.vertices: for vert in data.vertices:
@ -459,20 +500,20 @@ class ANIM_OT_delete_keyframe_animall(Operator):
for group in vert.groups: for group in vert.groups:
delete_key(group, 'weight') delete_key(group, 'weight')
# if animall_properties.key_vertex_crease:
# for vert in data.vertices:
# if not animall_properties.key_selected or vert.select:
# delete_key(vert, 'crease')
if animall_properties.key_edge_bevel: if animall_properties.key_edge_bevel:
for edge in data.edges: attribute = get_attribute(data, "bevel_weight_edge", 'FLOAT', 'EDGE')
if not animall_properties.key_selected or edge.select: if attribute is not None:
delete_key(edge, 'bevel_weight') delete_attribute_key(data, attribute, animall_properties.key_selected)
if animall_properties.key_edge_crease: if animall_properties.key_edge_crease:
for edge in data.edges: attribute = get_attribute(data, "crease_edge", 'FLOAT', 'EDGE')
if not animall_properties.key_selected or vert.select: if attribute is not None:
delete_key(edge, 'crease') delete_attribute_key(data, attribute, animall_properties.key_selected)
if animall_properties.key_material_index:
for p_i, polygon in enumerate(data.polygons):
if not animall_properties.key_selected or polygon.select:
delete_key(polygon, 'material_index')
if animall_properties.key_shape_key: if animall_properties.key_shape_key:
if obj.active_shape_key: if obj.active_shape_key:
@ -486,25 +527,15 @@ class ANIM_OT_delete_keyframe_animall(Operator):
if not animall_properties.key_selected or uv.select: if not animall_properties.key_selected or uv.select:
delete_key(uv, 'uv') delete_key(uv, 'uv')
if animall_properties.key_attribute: if animall_properties.key_active_attribute:
if data.attributes.active is not None: if data.attributes.active is not None:
attribute = data.attributes.active for path, _group in get_attribute_paths(
if attribute.data_type != 'STRING': data, data.attributes.active,
# Cannot animate string attributes? animall_properties.key_selected):
if attribute.data_type in {'FLOAT', 'INT', 'BOOLEAN', 'INT8'}: if path:
attribute_key = "value" delete_key(data, path)
elif attribute.data_type in {'FLOAT_COLOR', 'BYTE_COLOR'}:
attribute_key = "color"
elif attribute.data_type in {'FLOAT_VECTOR', 'FLOAT2'}:
attribute_key = "vector"
for e_i, _attribute_data in enumerate(attribute.data): bpy.ops.object.mode_set(mode=mode)
if (not animall_properties.key_selected
or attribute.domain == 'POINT' and data.vertices[e_i].select
or attribute.domain == 'EDGE' and data.edges[e_i].select
or attribute.domain == 'FACE' and data.polygons[e_i].select
or attribute.domain == 'CORNER' and is_selected_vert_loop(data, e_i)):
delete_key(data, f'attributes["{attribute.name}"].data[{e_i}].{attribute_key}')
elif obj.type == 'LATTICE': elif obj.type == 'LATTICE':
if animall_properties.key_shape_key: if animall_properties.key_shape_key:
@ -588,12 +619,20 @@ class ANIM_OT_clear_animation_animall(Operator):
return {'FINISHED'} return {'FINISHED'}
class ANIM_OT_update_vertex_color_animation_animall(Operator): class ANIM_OT_update_attribute_animation_animall(Operator):
bl_label = "Update Vertex Color Animation" bl_label = "Update Attribute Animation"
bl_idname = "anim.update_vertex_color_animation_animall" bl_idname = "anim.update_attribute_animation_animall"
bl_description = "Update old vertex color channel formats from pre-3.3 versions" bl_description = "Update attributes from the old format"
bl_options = {'REGISTER', 'UNDO'} bl_options = {'REGISTER', 'UNDO'}
path_re = re.compile(r"^vertex_colors|(vertices|edges)\[([0-9]+)\]\.(bevel_weight|crease)")
attribute_map = {
("vertices", "bevel_weight"): ("bevel_weight_vert", "FLOAT", "POINT"),
("edges", "bevel_weight"): ("bevel_weight_edge", "FLOAT", "POINT"),
("vertices", "crease"): ("crease_vert", "FLOAT", "EDGE"),
("edges", "crease"): ("crease_edge", "FLOAT", "EDGE"),
}
@classmethod @classmethod
def poll(self, context): def poll(self, context):
if (context.active_object is None if (context.active_object is None
@ -602,21 +641,30 @@ class ANIM_OT_update_vertex_color_animation_animall(Operator):
or context.active_object.data.animation_data.action is None): or context.active_object.data.animation_data.action is None):
return False return False
for fcurve in context.active_object.data.animation_data.action.fcurves: for fcurve in context.active_object.data.animation_data.action.fcurves:
if fcurve.data_path.startswith("vertex_colors"): if self.path_re.match(fcurve.data_path):
return True return True
def execute(self, context): def execute(self, context):
for fcurve in context.active_object.data.animation_data.action.fcurves: for fcurve in context.active_object.data.animation_data.action.fcurves:
if fcurve.data_path.startswith("vertex_colors"): if fcurve.data_path.startswith("vertex_colors"):
# Update pre-3.3 vertex colors
fcurve.data_path = fcurve.data_path.replace("vertex_colors", "attributes") fcurve.data_path = fcurve.data_path.replace("vertex_colors", "attributes")
else:
# Update pre-4.0 attributes
match = self.path_re.match(fcurve.data_path)
if match is None:
continue
domain, index, src_attribute = match.groups()
attribute, type, domain = self.attribute_map[(domain, src_attribute)]
get_attribute(context.active_object.data, attribute, type, domain)
fcurve.data_path = f'attributes["{attribute}"].data[{index}].value'
return {'FINISHED'} return {'FINISHED'}
# Add-ons Preferences Update Panel # Add-ons Preferences Update Panel
# Define Panel classes for updating # Define Panel classes for updating
panels = [ panels = [VIEW3D_PT_animall]
VIEW3D_PT_animall
]
def update_panel(self, context): def update_panel(self, context):
@ -643,7 +691,7 @@ class AnimallAddonPreferences(AddonPreferences):
category: StringProperty( category: StringProperty(
name="Tab Category", name="Tab Category",
description="Choose a name for the category of the panel", description="Choose a name for the category of the panel",
default="Animate", default="Animation",
update=update_panel update=update_panel
) )
@ -658,7 +706,7 @@ class AnimallAddonPreferences(AddonPreferences):
register_classes, unregister_classes = bpy.utils.register_classes_factory( register_classes, unregister_classes = bpy.utils.register_classes_factory(
(AnimallProperties, VIEW3D_PT_animall, ANIM_OT_insert_keyframe_animall, (AnimallProperties, VIEW3D_PT_animall, ANIM_OT_insert_keyframe_animall,
ANIM_OT_delete_keyframe_animall, ANIM_OT_clear_animation_animall, ANIM_OT_delete_keyframe_animall, ANIM_OT_clear_animation_animall,
ANIM_OT_update_vertex_color_animation_animall, AnimallAddonPreferences)) ANIM_OT_update_attribute_animation_animall, AnimallAddonPreferences))
def register(): def register():
register_classes() register_classes()

View File

@ -12,10 +12,10 @@
translations_tuple = ( translations_tuple = (
(("*", ""), (("*", ""),
((), ()), ((), ()),
("fr_FR", "Project-Id-Version: AnimAll 0.9.6 (0)\n", ("fr_FR", "Project-Id-Version: AnimAll 0.10.0 (0)\n",
(False, (False,
("Blender's translation file (po format).", ("Blender's translation file (po format).",
"Copyright (C) 2022 The Blender Foundation.", "Copyright (C) 2022-2023 The Blender Foundation.",
"This file is distributed under the same license as the Blender package.", "This file is distributed under the same license as the Blender package.",
"Damien Picard <dam.pic@free.fr>, 2022."))), "Damien Picard <dam.pic@free.fr>, 2022."))),
), ),
@ -59,7 +59,7 @@ translations_tuple = (
(("Operator", "Delete Key"), (("Operator", "Delete Key"),
(("bpy.types.ANIM_OT_delete_keyframe_animall",), (("bpy.types.ANIM_OT_delete_keyframe_animall",),
()), ()),
("fr_FR", "Supprimer image clé", ("fr_FR", "Supprimer limage clé",
(False, ())), (False, ())),
), ),
(("*", "Delete a Keyframe"), (("*", "Delete a Keyframe"),
@ -68,16 +68,16 @@ translations_tuple = (
("fr_FR", "Supprimer une image clé", ("fr_FR", "Supprimer une image clé",
(False, ())), (False, ())),
), ),
(("Operator", "Update Vertex Color Animation"), (("Operator", "Update Attribute Animation"),
(("bpy.types.ANIM_OT_update_vertex_color_animation_animall",), (("bpy.types.ANIM_OT_update_attribute_animation_animall",),
()), ()),
("fr_FR", "Mettre à jour lanimation des couleurs de sommets", ("fr_FR", "Mettre à jour lanimation des attributs",
(False, ())), (False, ())),
), ),
(("*", "Update old vertex color channel formats from pre-3.3 versions"), (("*", "Update attributes from the old format"),
(("bpy.types.ANIM_OT_update_vertex_color_animation_animall",), (("bpy.types.ANIM_OT_update_attribute_animation_animall",),
()), ()),
("fr_FR", "Mettre à jour les formats des canaux depuis les versions antérieures à la 3.3", ("fr_FR", "Mettre à jour les attributs depuis lancien format",
(False, ())), (False, ())),
), ),
(("*", "Animate"), (("*", "Animate"),
@ -87,7 +87,7 @@ translations_tuple = (
(False, ())), (False, ())),
), ),
(("*", "Insert keyframes on active attribute values"), (("*", "Insert keyframes on active attribute values"),
(("bpy.types.AnimallProperties.key_attribute",), (("bpy.types.AnimallProperties.key_active_attribute",),
()), ()),
("fr_FR", "Insérer des clés sur lattribut actif", ("fr_FR", "Insérer des clés sur lattribut actif",
(False, ())), (False, ())),
@ -98,6 +98,12 @@ translations_tuple = (
("fr_FR", "Insérer des clés sur les poids de biseau darête", ("fr_FR", "Insérer des clés sur les poids de biseau darête",
(False, ())), (False, ())),
), ),
(("*", "Edge Crease"),
(("bpy.types.AnimallProperties.key_edge_crease",),
()),
("fr_FR", "Plis darêtes",
(False, ())),
),
(("*", "Insert keyframes on edge creases"), (("*", "Insert keyframes on edge creases"),
(("bpy.types.AnimallProperties.key_edge_crease",), (("bpy.types.AnimallProperties.key_edge_crease",),
()), ()),
@ -158,6 +164,12 @@ translations_tuple = (
("fr_FR", "Insérer des clés sur les poids de biseau des sommets", ("fr_FR", "Insérer des clés sur les poids de biseau des sommets",
(False, ())), (False, ())),
), ),
(("*", "Insert keyframes on vertex crease weight"),
(("bpy.types.AnimallProperties.key_vertex_crease",),
()),
("fr_FR", "Insérer des clés sur les plis de sommets",
(False, ())),
),
(("*", "Insert keyframes on active vertex group values"), (("*", "Insert keyframes on active vertex group values"),
(("bpy.types.AnimallProperties.key_vertex_group",), (("bpy.types.AnimallProperties.key_vertex_group",),
()), ()),
@ -165,190 +177,187 @@ translations_tuple = (
(False, ())), (False, ())),
), ),
(("*", "AnimAll"), (("*", "AnimAll"),
(("scripts/addons/animation_animall/__init__.py:138", (("Add-on AnimAll info: name",),
"Add-on AnimAll info: name"),
()), ()),
("fr_FR", "AnimAll", ("fr_FR", "AnimAll",
(False, ())), (False, ())),
), ),
(("*", "Key:"), (("*", "Key:"),
(("scripts/addons/animation_animall/__init__.py:146",), (("scripts/addons/animation_animall/__init__.py:200",),
()), ()),
("fr_FR", "Insérer:", ("fr_FR", "Insérer:",
(False, ())), (False, ())),
), ),
(("*", "Tab Category:"), (("*", "Tab Category:"),
(("scripts/addons/animation_animall/__init__.py:653",), (("scripts/addons/animation_animall/__init__.py:704",),
()), ()),
("fr_FR", "Catégorie donglet :", ("fr_FR", "Catégorie donglet :",
(False, ())), (False, ())),
), ),
(("*", "Points"), (("*", "Points"),
(("scripts/addons/animation_animall/__init__.py:152", (("scripts/addons/animation_animall/__init__.py:206",
"scripts/addons/animation_animall/__init__.py:159", "scripts/addons/animation_animall/__init__.py:213",
"scripts/addons/animation_animall/__init__.py:188"), "scripts/addons/animation_animall/__init__.py:243"),
()), ()),
("fr_FR", "Points", ("fr_FR", "Points",
(False, ())), (False, ())),
), ),
(("*", "Others"), (("*", "Others"),
(("scripts/addons/animation_animall/__init__.py:155", (("scripts/addons/animation_animall/__init__.py:209",
"scripts/addons/animation_animall/__init__.py:171", "scripts/addons/animation_animall/__init__.py:226",
"scripts/addons/animation_animall/__init__.py:196"), "scripts/addons/animation_animall/__init__.py:251"),
()), ()),
("fr_FR", "Autres", ("fr_FR", "Autres",
(False, ())), (False, ())),
), ),
(("*", "Bevel"), (("*", "Bevel"),
(("scripts/addons/animation_animall/__init__.py:161", (("scripts/addons/animation_animall/__init__.py:215",
"scripts/addons/animation_animall/__init__.py:165"), "scripts/addons/animation_animall/__init__.py:220"),
()), ()),
("fr_FR", "Biseau", ("fr_FR", "Biseau",
(False, ())), (False, ())),
), ),
(("*", "Edges"), (("*", "Edges"),
(("scripts/addons/animation_animall/__init__.py:164",), (("scripts/addons/animation_animall/__init__.py:219",),
()), ()),
("fr_FR", "Arêtes", ("fr_FR", "Arêtes",
(False, ())), (False, ())),
), ),
(("*", "Crease"), (("*", "Crease"),
(("scripts/addons/animation_animall/__init__.py:166",), (("scripts/addons/animation_animall/__init__.py:216",
"scripts/addons/animation_animall/__init__.py:221",),
()), ()),
("fr_FR", "Plis", ("fr_FR", "Plis",
(False, ())), (False, ())),
), ),
(("*", "Faces"), (("*", "Faces"),
(("scripts/addons/animation_animall/__init__.py:168",), (("scripts/addons/animation_animall/__init__.py:223",),
()), ()),
("fr_FR", "Faces", ("fr_FR", "Faces",
(False, ())), (False, ())),
), ),
(("*", "\"Location\" and \"Shape Key\" are redundant?"), (("*", "\"Location\" and \"Shape Key\" are redundant?"),
(("scripts/addons/animation_animall/__init__.py:218",), (("scripts/addons/animation_animall/__init__.py:273",),
()), ()),
("fr_FR", "\"Position\" et \"Clé de forme\" sont redondants?", ("fr_FR", "\"Position\" et \"Clé de forme\" sont redondants?",
(False, ())), (False, ())),
), ),
(("*", "Splines"), (("*", "Splines"),
(("scripts/addons/animation_animall/__init__.py:193",), (("scripts/addons/animation_animall/__init__.py:248",),
()), ()),
("fr_FR", "Splines", ("fr_FR", "Splines",
(False, ())), (False, ())),
), ),
(("*", "Maybe set \"%s\" to 1.0?"), (("*", "Maybe set \"%s\" to 1.0?"),
(("scripts/addons/animation_animall/__init__.py:209", (("scripts/addons/animation_animall/__init__.py:264"),
"scripts/addons/animation_animall/__init__.py:209"),
()), ()),
("fr_FR", "Essayez de mettre « %s » à 1.0?", ("fr_FR", "Essayez de mettre « %s » à 1.0?",
(False, ())), (False, ())),
), ),
(("*", "Cannot key on Basis Shape"), (("*", "Cannot key on Basis Shape"),
(("scripts/addons/animation_animall/__init__.py:212",), (("scripts/addons/animation_animall/__init__.py:267",),
()), ()),
("fr_FR", "Impossible dajouter une clé sur la forme de base", ("fr_FR", "Impossible dajouter une clé sur la forme de base",
(False, ())), (False, ())),
), ),
(("*", "No active Shape Key"), (("*", "No active Shape Key"),
(("scripts/addons/animation_animall/__init__.py:215",), (("scripts/addons/animation_animall/__init__.py:270",),
()), ()),
("fr_FR", "Pas de clé de forme active", ("fr_FR", "Pas de clé de forme active",
(False, ())), (False, ())),
), ),
(("*", "Clear Animation could not be performed"), (("*", "Clear Animation could not be performed"),
(("scripts/addons/animation_animall/__init__.py:581",), (("scripts/addons/animation_animall/__init__.py:615",),
()), ()),
("fr_FR", "La suppression de lanimation na pas pu aboutir", ("fr_FR", "La suppression de lanimation na pas pu aboutir",
(False, ())), (False, ())),
), ),
(("*", "Object includes old-style vertex colors. Consider updating them."), (("*", "Object includes old-style attributes. Consider updating them."),
(("scripts/addons/animation_animall/__init__.py:182",), (("scripts/addons/animation_animall/__init__.py:237",),
()), ()),
("fr_FR", "Lobjet contient des couleurs de sommets à lancien format. Veuillez les mettre à jour", ("fr_FR", "Lobjet contient des attributs à lancien format. Veuillez les mettre à jour.",
(False, ())), (False, ())),
), ),
(("*", "Vertex %s"), (("*", "Vertex %s"),
(("scripts/addons/animation_animall/__init__.py:358", (("scripts/addons/animation_animall/__init__.py:141",
"scripts/addons/animation_animall/__init__.py:313", "scripts/addons/animation_animall/__init__.py:368",
"scripts/addons/animation_animall/__init__.py:318", "scripts/addons/animation_animall/__init__.py:382",
"scripts/addons/animation_animall/__init__.py:328"), "scripts/addons/animation_animall/__init__.py:416"),
()), ()),
("fr_FR", "Sommet %s", ("fr_FR", "Sommet %s",
(False, ())), (False, ())),
), ),
(("*", "Edge %s"), (("*", "Edge %s"),
(("scripts/addons/animation_animall/__init__.py:360", (("scripts/addons/animation_animall/__init__.py:143"),
"scripts/addons/animation_animall/__init__.py:333",
"scripts/addons/animation_animall/__init__.py:338"),
()), ()),
("fr_FR", "Arête %s", ("fr_FR", "Arête %s",
(False, ())), (False, ())),
), ),
(("*", "Point %s"), (("*", "Point %s"),
(("scripts/addons/animation_animall/__init__.py:265",), (("scripts/addons/animation_animall/__init__.py:320",),
()), ()),
("fr_FR", "Point %s", ("fr_FR", "Point %s",
(False, ())), (False, ())),
), ),
(("*", "Spline %s"), (("*", "Spline %s"),
(("scripts/addons/animation_animall/__init__.py:273",), (("scripts/addons/animation_animall/__init__.py:328",),
()), ()),
("fr_FR", "Spline %s", ("fr_FR", "Spline %s",
(False, ())), (False, ())),
), ),
(("*", "Face %s"), (("*", "Face %s"),
(("scripts/addons/animation_animall/__init__.py:343", (("scripts/addons/animation_animall/__init__.py:145",
"scripts/addons/animation_animall/__init__.py:362"), "scripts/addons/animation_animall/__init__.py:395"),
()), ()),
("fr_FR", "Face %s", ("fr_FR", "Face %s",
(False, ())), (False, ())),
), ),
(("*", "%s Point %s"), (("*", "%s Point %s"),
(("scripts/addons/animation_animall/__init__.py:260",), (("scripts/addons/animation_animall/__init__.py:315",),
()), ()),
("fr_FR", "%s Point %s", ("fr_FR", "%s Point %s",
(False, ())), (False, ())),
), ),
(("*", "Loop %s"), (("*", "Loop %s"),
(("scripts/addons/animation_animall/__init__.py:364",), (("scripts/addons/animation_animall/__init__.py:147",),
()), ()),
("fr_FR", "Boucle %s", ("fr_FR", "Boucle %s",
(False, ())), (False, ())),
), ),
(("*", "UV layer %s"), (("*", "UV Layer %s"),
(("scripts/addons/animation_animall/__init__.py:379",), (("scripts/addons/animation_animall/__init__.py:409",),
()), ()),
("fr_FR", "Calque UV %s", ("fr_FR", "Calque UV %s",
(False, ())), (False, ())),
), ),
(("*", "%s Vertex %s"), (("*", "%s Vertex %s"),
(("scripts/addons/animation_animall/__init__.py:386",), (("scripts/addons/animation_animall/__init__.py:416",),
()), ()),
("fr_FR", "%s Sommet %s", ("fr_FR", "%s Sommet %s",
(False, ())), (False, ())),
), ),
(("*", "Spline %s CV %s"), (("*", "Spline %s CV %s"),
(("scripts/addons/animation_animall/__init__.py:283", (("scripts/addons/animation_animall/__init__.py:338",
"scripts/addons/animation_animall/__init__.py:284", "scripts/addons/animation_animall/__init__.py:339",
"scripts/addons/animation_animall/__init__.py:285", "scripts/addons/animation_animall/__init__.py:340",
"scripts/addons/animation_animall/__init__.py:288", "scripts/addons/animation_animall/__init__.py:343",
"scripts/addons/animation_animall/__init__.py:291", "scripts/addons/animation_animall/__init__.py:346",
"scripts/addons/animation_animall/__init__.py:297", "scripts/addons/animation_animall/__init__.py:352",
"scripts/addons/animation_animall/__init__.py:300", "scripts/addons/animation_animall/__init__.py:355",
"scripts/addons/animation_animall/__init__.py:303"), "scripts/addons/animation_animall/__init__.py:358"),
()), ()),
("fr_FR", "Spline %s Point %s", ("fr_FR", "Spline %s Point %s",
(False, ())), (False, ())),
), ),
(("*", "%s Spline %s CV %s"), (("*", "%s Spline %s CV %s"),
(("scripts/addons/animation_animall/__init__.py:402", (("scripts/addons/animation_animall/__init__.py:432",
"scripts/addons/animation_animall/__init__.py:403", "scripts/addons/animation_animall/__init__.py:434",
"scripts/addons/animation_animall/__init__.py:404", "scripts/addons/animation_animall/__init__.py:437",
"scripts/addons/animation_animall/__init__.py:405", "scripts/addons/animation_animall/__init__.py:440",
"scripts/addons/animation_animall/__init__.py:406", "scripts/addons/animation_animall/__init__.py:442",
"scripts/addons/animation_animall/__init__.py:414", "scripts/addons/animation_animall/__init__.py:450",
"scripts/addons/animation_animall/__init__.py:415", "scripts/addons/animation_animall/__init__.py:452",
"scripts/addons/animation_animall/__init__.py:416"), "scripts/addons/animation_animall/__init__.py:453"),
()), ()),
("fr_FR", "%s Spline %s Point %s", ("fr_FR", "%s Spline %s Point %s",
(False, ())), (False, ())),

View File

@ -460,7 +460,6 @@ def create_glass_material(matname, replace, rv=0.333, gv=0.342, bv=0.9):
node = nodes.new('ShaderNodeBsdfGlossy') node = nodes.new('ShaderNodeBsdfGlossy')
node.name = 'Glossy_0' node.name = 'Glossy_0'
node.distribution = 'SHARP'
node.location = 250, 100 node.location = 250, 100
node = nodes.new('ShaderNodeBsdfTransparent') node = nodes.new('ShaderNodeBsdfTransparent')

View File

@ -4,7 +4,7 @@
import bpy import bpy
import gpu import gpu
from mathutils import Vector, Matrix from mathutils import Matrix
from mathutils.geometry import tessellate_polygon from mathutils.geometry import tessellate_polygon
from gpu_extras.batch import batch_for_shader from gpu_extras.batch import batch_for_shader
@ -48,8 +48,8 @@ def draw_image(tile, face_data, opacity):
def get_normalize_uvs_matrix(tile): def get_normalize_uvs_matrix(tile):
'''matrix maps x and y coordinates from [0, 1] to [-1, 1]''' '''matrix maps x and y coordinates from [0, 1] to [-1, 1]'''
matrix = Matrix.Identity(4) matrix = Matrix.Identity(4)
matrix.col[3][0] = -1 - (tile[0]) * 2 matrix.col[3][0] = -1 - (tile[0] * 2)
matrix.col[3][1] = -1 - (tile[1]) * 2 matrix.col[3][1] = -1 - (tile[1] * 2)
matrix[0][0] = 2 matrix[0][0] = 2
matrix[1][1] = 2 matrix[1][1] = 2

View File

@ -628,8 +628,11 @@ def make_material_texture_chunk(chunk_id, texslots, pct):
mat_sub_mapflags.add_variable("mapflags", _3ds_ushort(mapflags)) mat_sub_mapflags.add_variable("mapflags", _3ds_ushort(mapflags))
mat_sub.add_subchunk(mat_sub_mapflags) mat_sub.add_subchunk(mat_sub_mapflags)
mat_sub_texblur = _3ds_chunk(MAT_MAP_TEXBLUR) # Based on observation this is usually 1.0 texblur = 0.0
mat_sub_texblur.add_variable("maptexblur", _3ds_float(1.0)) mat_sub_texblur = _3ds_chunk(MAT_MAP_TEXBLUR)
if texslot.socket_dst.identifier in {'Base Color', 'Specular Tint'}:
texblur = texslot.node_dst.inputs['Sheen Weight'].default_value
mat_sub_texblur.add_variable("maptexblur", _3ds_float(round(texblur, 6)))
mat_sub.add_subchunk(mat_sub_texblur) mat_sub.add_subchunk(mat_sub_texblur)
mat_sub_uscale = _3ds_chunk(MAT_MAP_USCALE) mat_sub_uscale = _3ds_chunk(MAT_MAP_USCALE)
@ -1306,9 +1309,9 @@ def make_object_node(ob, translation, rotation, scale, name_id):
obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0x0040)) obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0x0040))
# Flag 0x01 display path 0x02 use autosmooth 0x04 object frozen 0x10 motion blur 0x20 material morph 0x40 mesh morph # Flag 0x01 display path 0x02 use autosmooth 0x04 object frozen 0x10 motion blur 0x20 material morph 0x40 mesh morph
if ob.type == 'MESH' and 'Smooth by Angle' in ob.modifiers: if ob.type == 'MESH' and 'Smooth by Angle' in ob.modifiers:
ob_node_header_chunk.add_variable("flags2", _3ds_ushort(0x02)) obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0x02))
else: else:
ob_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0))
obj_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) obj_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
''' '''

View File

@ -90,6 +90,7 @@ MAT_SHIN_MAP = 0xA33C # This is a header for a new roughness map
MAT_SELFI_MAP = 0xA33D # This is a header for a new emission map MAT_SELFI_MAP = 0xA33D # This is a header for a new emission map
MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture
MAT_MAP_TILING = 0xA351 # 2nd bit (from LSB) is mirror UV flag MAT_MAP_TILING = 0xA351 # 2nd bit (from LSB) is mirror UV flag
MAT_MAP_TEXBLUR = 0xA353 # Texture blurring factor (float 0-1)
MAT_MAP_USCALE = 0xA354 # U axis scaling MAT_MAP_USCALE = 0xA354 # U axis scaling
MAT_MAP_VSCALE = 0xA356 # V axis scaling MAT_MAP_VSCALE = 0xA356 # V axis scaling
MAT_MAP_UOFFSET = 0xA358 # U axis offset MAT_MAP_UOFFSET = 0xA358 # U axis offset
@ -426,8 +427,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# in rare cases no materials defined. # in rare cases no materials defined.
bmesh.materials.append(bmat) # can be None bmesh.materials.append(bmat) # can be None
if bmesh.polygons:
for fidx in faces: for fidx in faces:
bmesh.polygons[fidx].material_index = mat_idx bmesh.polygons[fidx].material_index = mat_idx
else:
print("\tError: Mesh has no faces!")
if uv_faces: if uv_faces:
uvl = bmesh.uv_layers.active.data[:] uvl = bmesh.uv_layers.active.data[:]
@ -559,6 +563,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif temp_chunk.ID == MAT_BUMP_PERCENT: elif temp_chunk.ID == MAT_BUMP_PERCENT:
contextWrapper.normalmap_strength = (float(read_short(temp_chunk) / 100)) contextWrapper.normalmap_strength = (float(read_short(temp_chunk) / 100))
elif mapto in {'COLOR', 'SPECULARITY'} and temp_chunk.ID == MAT_MAP_TEXBLUR:
contextWrapper.node_principled_bsdf.inputs['Sheen Weight'].default_value = float(read_float(temp_chunk))
elif temp_chunk.ID == MAT_MAP_TILING: elif temp_chunk.ID == MAT_MAP_TILING:
"""Control bit flags, where 0x1 activates decaling, 0x2 activates mirror, """Control bit flags, where 0x1 activates decaling, 0x2 activates mirror,
@ -645,7 +651,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
hyp = math.sqrt(pow(plane.x,2) + pow(plane.y,2)) hyp = math.sqrt(pow(plane.x,2) + pow(plane.y,2))
dia = math.sqrt(pow(hyp,2) + pow(plane.z,2)) dia = math.sqrt(pow(hyp,2) + pow(plane.z,2))
yaw = math.atan2(math.copysign(hyp, sign_xy), axis_xy) yaw = math.atan2(math.copysign(hyp, sign_xy), axis_xy)
bow = math.acos(hyp / dia) bow = math.acos(hyp / dia) if dia != 0 else 0
turn = angle - yaw if check_sign else angle + yaw turn = angle - yaw if check_sign else angle + yaw
tilt = angle - bow if loca.z > target.z else angle + bow tilt = angle - bow if loca.z > target.z else angle + bow
pan = yaw if check_axes else turn pan = yaw if check_axes else turn
@ -1002,7 +1008,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_XPFALL: elif new_chunk.ID == MAT_XPFALL:
read_chunk(file, temp_chunk) read_chunk(file, temp_chunk)
if temp_chunk.ID == PCT_SHORT: if temp_chunk.ID == PCT_SHORT:
contextTransmission = float(read_short(temp_chunk) / 100) contextTransmission = float(abs(read_short(temp_chunk) / 100))
else: else:
skip_to_end(file, temp_chunk) skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read new_chunk.bytes_read += temp_chunk.bytes_read
@ -1165,6 +1171,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
context.view_layer.active_layer_collection.collection.objects.link(contextLamp) context.view_layer.active_layer_collection.collection.objects.link(contextLamp)
imported_objects.append(contextLamp) imported_objects.append(contextLamp)
object_dictionary[contextObName] = contextLamp object_dictionary[contextObName] = contextLamp
contextLamp.data.use_shadow = False
contextLamp.location = read_float_array(new_chunk) # Position contextLamp.location = read_float_array(new_chunk) # Position
contextMatrix = None # Reset matrix contextMatrix = None # Reset matrix
elif CreateLightObject and new_chunk.ID == COLOR_F: # Color elif CreateLightObject and new_chunk.ID == COLOR_F: # Color
@ -1181,7 +1188,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# If spotlight chunk # If spotlight chunk
elif CreateLightObject and new_chunk.ID == LIGHT_SPOTLIGHT: # Spotlight elif CreateLightObject and new_chunk.ID == LIGHT_SPOTLIGHT: # Spotlight
contextLamp.data.type = 'SPOT' contextLamp.data.type = 'SPOT'
contextLamp.data.use_shadow = False
spot = mathutils.Vector(read_float_array(new_chunk)) # Spot location spot = mathutils.Vector(read_float_array(new_chunk)) # Spot location
aim = calc_target(contextLamp.location, spot) # Target aim = calc_target(contextLamp.location, spot) # Target
contextLamp.rotation_euler.x = aim[0] contextLamp.rotation_euler.x = aim[0]
@ -1622,6 +1628,7 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True,
# here we go! # here we go!
read_chunk(file, current_chunk) read_chunk(file, current_chunk)
if current_chunk.ID != PRIMARY: if current_chunk.ID != PRIMARY:
context.window.cursor_set('DEFAULT')
print("\tFatal Error: Not a valid 3ds file: %r" % filepath) print("\tFatal Error: Not a valid 3ds file: %r" % filepath)
file.close() file.close()
return return

View File

@ -5,7 +5,7 @@
bl_info = { bl_info = {
"name": "FBX format", "name": "FBX format",
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem", "author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
"version": (5, 8, 11), "version": (5, 10, 1),
"blender": (4, 1, 0), "blender": (4, 1, 0),
"location": "File > Import-Export", "location": "File > Import-Export",
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions", "description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",

View File

@ -1155,51 +1155,69 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
# Loop normals. # Loop normals.
tspacenumber = 0 tspacenumber = 0
if write_normals: if write_normals:
# NOTE: this is not supported by importer currently. # NOTE: ByVertice-IndexToDirect is not supported by the importer currently.
# XXX Official docs says normals should use IndexToDirect, # XXX Official docs says normals should use IndexToDirect,
# but this does not seem well supported by apps currently... # but this does not seem well supported by apps currently...
ln_bl_dtype = np.single normal_bl_dtype = np.single
ln_fbx_dtype = np.float64 normal_fbx_dtype = np.float64
t_ln = np.empty(len(me.loops) * 3, dtype=ln_bl_dtype) match me.normals_domain:
me.loops.foreach_get("normal", t_ln) case 'POINT':
t_ln = nors_transformed(t_ln, geom_mat_no, ln_fbx_dtype) # All faces are smooth shaded, so we can get normals from the vertices.
normal_source = me.vertex_normals
normal_mapping = b"ByVertice"
case 'FACE':
# Either all faces or all edges are sharp, so we can get normals from the faces.
normal_source = me.polygon_normals
normal_mapping = b"ByPolygon"
case 'CORNER':
# We have a mix of sharp/smooth edges/faces or custom split normals, so need to get normals from
# corners.
normal_source = me.corner_normals
normal_mapping = b"ByPolygonVertex"
case _:
# Unreachable
raise AssertionError("Unexpected normals domain '%s'" % me.normals_domain)
# Each normal has 3 components, so the length is multiplied by 3.
t_normal = np.empty(len(normal_source) * 3, dtype=normal_bl_dtype)
normal_source.foreach_get("vector", t_normal)
t_normal = nors_transformed(t_normal, geom_mat_no, normal_fbx_dtype)
if 0: if 0:
lnidx_fbx_dtype = np.int32 normal_idx_fbx_dtype = np.int32
lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0) lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION) elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
elem_data_single_string(lay_nor, b"Name", b"") elem_data_single_string(lay_nor, b"Name", b"")
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex") elem_data_single_string(lay_nor, b"MappingInformationType", normal_mapping)
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"IndexToDirect") elem_data_single_string(lay_nor, b"ReferenceInformationType", b"IndexToDirect")
# Tuple of unique sorted normals and then the index in the unique sorted normals of each normal in t_ln. # Tuple of unique sorted normals and then the index in the unique sorted normals of each normal in t_normal.
# Since we don't care about how the normals are sorted, only that they're unique, we can use the fast unique # Since we don't care about how the normals are sorted, only that they're unique, we can use the fast unique
# helper function. # helper function.
t_ln, t_lnidx = fast_first_axis_unique(t_ln.reshape(-1, 3), return_inverse=True) t_normal, t_normal_idx = fast_first_axis_unique(t_normal.reshape(-1, 3), return_inverse=True)
# Convert to the type for fbx # Convert to the type for fbx
t_lnidx = astype_view_signedness(t_lnidx, lnidx_fbx_dtype) t_normal_idx = astype_view_signedness(t_normal_idx, normal_idx_fbx_dtype)
elem_data_single_float64_array(lay_nor, b"Normals", t_ln) elem_data_single_float64_array(lay_nor, b"Normals", t_normal)
# Normal weights, no idea what it is. # Normal weights, no idea what it is.
# t_lnw = np.zeros(len(t_ln), dtype=np.float64) # t_normal_w = np.zeros(len(t_normal), dtype=np.float64)
# elem_data_single_float64_array(lay_nor, b"NormalsW", t_lnw) # elem_data_single_float64_array(lay_nor, b"NormalsW", t_normal_w)
elem_data_single_int32_array(lay_nor, b"NormalsIndex", t_lnidx) elem_data_single_int32_array(lay_nor, b"NormalsIndex", t_normal_idx)
del t_lnidx del t_normal_idx
# del t_lnw # del t_normal_w
else: else:
lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0) lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION) elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
elem_data_single_string(lay_nor, b"Name", b"") elem_data_single_string(lay_nor, b"Name", b"")
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex") elem_data_single_string(lay_nor, b"MappingInformationType", normal_mapping)
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct") elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Normals", t_ln) elem_data_single_float64_array(lay_nor, b"Normals", t_normal)
# Normal weights, no idea what it is. # Normal weights, no idea what it is.
# t_ln = np.zeros(len(me.loops), dtype=np.float64) # t_normal = np.zeros(len(me.loops), dtype=np.float64)
# elem_data_single_float64_array(lay_nor, b"NormalsW", t_ln) # elem_data_single_float64_array(lay_nor, b"NormalsW", t_normal)
del t_ln del t_normal
# tspace # tspace
if scene_data.settings.use_tspace: if scene_data.settings.use_tspace:
@ -1218,7 +1236,7 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
else: else:
del t_lt del t_lt
num_loops = len(me.loops) num_loops = len(me.loops)
t_ln = np.empty(num_loops * 3, dtype=ln_bl_dtype) t_ln = np.empty(num_loops * 3, dtype=normal_bl_dtype)
# t_lnw = np.zeros(len(me.loops), dtype=np.float64) # t_lnw = np.zeros(len(me.loops), dtype=np.float64)
uv_names = [uvlayer.name for uvlayer in me.uv_layers] uv_names = [uvlayer.name for uvlayer in me.uv_layers]
# Annoying, `me.calc_tangent` errors in case there is no geometry... # Annoying, `me.calc_tangent` errors in case there is no geometry...
@ -1236,7 +1254,7 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex") elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct") elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Binormals", elem_data_single_float64_array(lay_nor, b"Binormals",
nors_transformed(t_ln, geom_mat_no, ln_fbx_dtype)) nors_transformed(t_ln, geom_mat_no, normal_fbx_dtype))
# Binormal weights, no idea what it is. # Binormal weights, no idea what it is.
# elem_data_single_float64_array(lay_nor, b"BinormalsW", t_lnw) # elem_data_single_float64_array(lay_nor, b"BinormalsW", t_lnw)
@ -1249,7 +1267,7 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex") elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct") elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Tangents", elem_data_single_float64_array(lay_nor, b"Tangents",
nors_transformed(t_ln, geom_mat_no, ln_fbx_dtype)) nors_transformed(t_ln, geom_mat_no, normal_fbx_dtype))
# Tangent weights, no idea what it is. # Tangent weights, no idea what it is.
# elem_data_single_float64_array(lay_nor, b"TangentsW", t_lnw) # elem_data_single_float64_array(lay_nor, b"TangentsW", t_lnw)

View File

@ -629,7 +629,7 @@ def _transformation_curves_gen(item, values_arrays, channel_keys):
# Create matrices/euler from the initial transformation values of this item. # Create matrices/euler from the initial transformation values of this item.
# These variables will be updated in-place as we iterate through each frame. # These variables will be updated in-place as we iterate through each frame.
lcl_translation_mat = Matrix.Translation(transform_data.loc) lcl_translation_mat = Matrix.Translation(transform_data.loc)
lcl_rotation_eul = Euler(transform_data.rot, transform_data.rot_ord) lcl_rotation_eul = Euler(convert_deg_to_rad_iter(transform_data.rot), transform_data.rot_ord)
lcl_scaling_mat = Matrix() lcl_scaling_mat = Matrix()
lcl_scaling_mat[0][0], lcl_scaling_mat[1][1], lcl_scaling_mat[2][2] = transform_data.sca lcl_scaling_mat[0][0], lcl_scaling_mat[1][1], lcl_scaling_mat[2][2] = transform_data.sca
@ -1928,7 +1928,11 @@ def blen_read_shapes(fbx_tmpl, fbx_data, objects, me, scene):
# will be clamped, and we'll print a warning message to the console. # will be clamped, and we'll print a warning message to the console.
shape_key_values_in_range = True shape_key_values_in_range = True
bc_uuid_to_keyblocks = {} bc_uuid_to_keyblocks = {}
for bc_uuid, fbx_sdata, fbx_bcdata in fbx_data: for bc_uuid, fbx_sdata, fbx_bcdata, shapes_assigned_to_channel in fbx_data:
num_shapes_assigned_to_channel = len(shapes_assigned_to_channel)
if num_shapes_assigned_to_channel > 1:
# Relevant design task: #104698
raise RuntimeError("FBX in-between Shapes are not currently supported") # See bug report #84111
elem_name_utf8 = elem_name_ensure_class(fbx_sdata, b'Geometry') elem_name_utf8 = elem_name_ensure_class(fbx_sdata, b'Geometry')
indices = elem_prop_first(elem_find_first(fbx_sdata, b'Indexes')) indices = elem_prop_first(elem_find_first(fbx_sdata, b'Indexes'))
dvcos = elem_prop_first(elem_find_first(fbx_sdata, b'Vertices')) dvcos = elem_prop_first(elem_find_first(fbx_sdata, b'Vertices'))
@ -1943,22 +1947,44 @@ def blen_read_shapes(fbx_tmpl, fbx_data, objects, me, scene):
dvcos = dvcos[:-remainder] dvcos = dvcos[:-remainder]
dvcos = dvcos.reshape(-1, 3) dvcos = dvcos.reshape(-1, 3)
# There must be the same number of indices as vertex coordinate differences.
assert(len(indices) == len(dvcos))
# We completely ignore normals here! # We completely ignore normals here!
weight = elem_prop_first(elem_find_first(fbx_bcdata, b'DeformPercent'), default=100.0) / 100.0 weight = elem_prop_first(elem_find_first(fbx_bcdata, b'DeformPercent'), default=100.0) / 100.0
vgweights = elem_prop_first(elem_find_first(fbx_bcdata, b'FullWeights')) # The FullWeights array stores the deformation percentages of the BlendShapeChannel that fully activate each
vgweights = parray_as_ndarray(vgweights) if vgweights else np.empty(0, dtype=data_types.ARRAY_FLOAT64) # Shape assigned to the BlendShapeChannel. Blender also uses this array to store Vertex Group weights, but this
# Not doing the division in-place in-case it's possible for FBX shape keys to be used by more than one mesh. # is not part of the FBX standard.
vgweights = vgweights / 100.0 full_weights = elem_prop_first(elem_find_first(fbx_bcdata, b'FullWeights'))
full_weights = parray_as_ndarray(full_weights) if full_weights else np.empty(0, dtype=data_types.ARRAY_FLOAT64)
create_vg = (vgweights != 1.0).any() # Special case for Blender exported Shape Keys with a Vertex Group assigned. The Vertex Group weights are stored
# in the FullWeights array.
# Special case, in case all weights are the same, FullWeight can have only one element - *sigh!* # XXX - It's possible, though very rare, to get a false positive here and create a Vertex Group when we
nbr_indices = len(indices) # shouldn't. This should only be possible when there are extraneous FullWeights or when there is a single
if len(vgweights) == 1 and nbr_indices > 1: # FullWeight and its value is not 100.0.
vgweights = np.full_like(indices, vgweights[0], dtype=vgweights.dtype) if (
# Blender exported Shape Keys only ever export as 1 Shape per BlendShapeChannel.
assert(len(vgweights) == nbr_indices == len(dvcos)) num_shapes_assigned_to_channel == 1
# There should be one vertex weight for each vertex moved by the Shape.
and len(full_weights) == len(indices)
# Skip creating a Vertex Group when all the weights are 100.0 because such a Vertex Group has no effect.
# This also avoids creating a Vertex Group for imported Shapes that only move a single vertex because
# their BlendShapeChannel's singular FullWeight is expected to always be 100.0.
and not np.all(full_weights == 100.0)
# Blender vertex weights are always within the [0.0, 1.0] range (scaled to [0.0, 100.0] when saving to
# FBX). This can eliminate imported BlendShapeChannels from Unreal that have extraneous FullWeights
# because the extraneous values are usually negative.
and np.all((full_weights >= 0.0) & (full_weights <= 100.0))
):
# Not doing the division in-place because it's technically possible for FBX BlendShapeChannels to be used by
# more than one FBX BlendShape, though this shouldn't be the case for Blender exported Shape Keys.
vgweights = full_weights / 100.0
else:
vgweights = None
# There must be a FullWeight for each Shape. Any extra FullWeights are ignored.
assert(len(full_weights) >= num_shapes_assigned_to_channel)
# To add shape keys to the mesh, an Object using the mesh is needed. # To add shape keys to the mesh, an Object using the mesh is needed.
if me.shape_keys is None: if me.shape_keys is None:
@ -1977,7 +2003,7 @@ def blen_read_shapes(fbx_tmpl, fbx_data, objects, me, scene):
kb.value = weight kb.value = weight
# Add vgroup if necessary. # Add vgroup if necessary.
if create_vg: if vgweights is not None:
# VertexGroup.add only allows sequences of int indices, but iterating the indices array directly would # VertexGroup.add only allows sequences of int indices, but iterating the indices array directly would
# produce numpy scalars of types such as np.int32. The underlying memoryview of the indices array, however, # produce numpy scalars of types such as np.int32. The underlying memoryview of the indices array, however,
# does produce standard Python ints when iterated, so pass indices.data to add_vgroup_to_objects instead of # does produce standard Python ints when iterated, so pass indices.data to add_vgroup_to_objects instead of
@ -3508,6 +3534,11 @@ def load(operator, context, filepath="",
seen_connections.add(connection_key) seen_connections.add(connection_key)
yield c_dst_uuid, fbx_data, bl_data yield c_dst_uuid, fbx_data, bl_data
# XXX - Multiple Shapes can be assigned to a single BlendShapeChannel to create a progressive blend between the
# base mesh and the assigned Shapes, with the percentage at which each Shape is fully blended being stored
# in the BlendShapeChannel's FullWeights array. This is also known as 'in-between shapes'.
# We don't have any support for in-between shapes currently.
blend_shape_channel_to_shapes = {}
mesh_to_shapes = {} mesh_to_shapes = {}
for s_uuid, (fbx_sdata, _bl_sdata) in fbx_table_nodes.items(): for s_uuid, (fbx_sdata, _bl_sdata) in fbx_table_nodes.items():
if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape': if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape':
@ -3515,6 +3546,9 @@ def load(operator, context, filepath="",
# shape -> blendshapechannel -> blendshape -> mesh. # shape -> blendshapechannel -> blendshape -> mesh.
for bc_uuid, fbx_bcdata, _bl_bcdata in connections_gen(s_uuid, b'Deformer', b'BlendShapeChannel'): for bc_uuid, fbx_bcdata, _bl_bcdata in connections_gen(s_uuid, b'Deformer', b'BlendShapeChannel'):
# Track the Shapes connected to each BlendShapeChannel.
shapes_assigned_to_channel = blend_shape_channel_to_shapes.setdefault(bc_uuid, [])
shapes_assigned_to_channel.append(s_uuid)
for bs_uuid, _fbx_bsdata, _bl_bsdata in connections_gen(bc_uuid, b'Deformer', b'BlendShape'): for bs_uuid, _fbx_bsdata, _bl_bsdata in connections_gen(bc_uuid, b'Deformer', b'BlendShape'):
for m_uuid, _fbx_mdata, bl_mdata in connections_gen(bs_uuid, b'Geometry', b'Mesh'): for m_uuid, _fbx_mdata, bl_mdata in connections_gen(bs_uuid, b'Geometry', b'Mesh'):
# Blenmeshes are assumed already created at that time! # Blenmeshes are assumed already created at that time!
@ -3534,7 +3568,10 @@ def load(operator, context, filepath="",
mesh_to_shapes[bl_mdata] = (objects, shapes_list) mesh_to_shapes[bl_mdata] = (objects, shapes_list)
else: else:
shapes_list = mesh_to_shapes[bl_mdata][1] shapes_list = mesh_to_shapes[bl_mdata][1]
shapes_list.append((bc_uuid, fbx_sdata, fbx_bcdata)) # Only the number of shapes assigned to each BlendShapeChannel needs to be passed through to
# `blen_read_shapes`, but that number isn't known until all the connections have been
# iterated, so pass the `shapes_assigned_to_channel` list instead.
shapes_list.append((bc_uuid, fbx_sdata, fbx_bcdata, shapes_assigned_to_channel))
# BlendShape deformers are only here to connect BlendShapeChannels to meshes, nothing else to do. # BlendShape deformers are only here to connect BlendShapeChannels to meshes, nothing else to do.
# Iterate through each mesh and create its shape keys # Iterate through each mesh and create its shape keys

View File

@ -5,7 +5,7 @@
bl_info = { bl_info = {
'name': 'glTF 2.0 format', 'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors', 'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (4, 1, 17), "version": (4, 1, 33),
'blender': (4, 1, 0), 'blender': (4, 1, 0),
'location': 'File > Import-Export', 'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0', 'description': 'Import-Export as glTF 2.0',
@ -103,6 +103,24 @@ def on_export_format_changed(self, context):
# Force update of file list, because update the filter does not update the real file list # Force update of file list, because update the filter does not update the real file list
bpy.ops.file.refresh() bpy.ops.file.refresh()
def on_export_action_filter_changed(self, context):
if self.export_action_filter is True:
bpy.types.Scene.gltf_action_filter = bpy.props.CollectionProperty(type=GLTF2_filter_action)
bpy.types.Scene.gltf_action_filter_active = bpy.props.IntProperty()
for action in bpy.data.actions:
if id(action) not in [id(item.action) for item in bpy.data.scenes[0].gltf_action_filter]:
item = bpy.data.scenes[0].gltf_action_filter.add()
item.keep = True
item.action = action
else:
bpy.data.scenes[0].gltf_action_filter.clear()
del bpy.types.Scene.gltf_action_filter
del bpy.types.Scene.gltf_action_filter_active
class ConvertGLTF2_Base: class ConvertGLTF2_Base:
"""Base class containing options that should be exposed during both import and export.""" """Base class containing options that should be exposed during both import and export."""
@ -251,6 +269,12 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default=True default=True
) )
export_gn_mesh: BoolProperty(
name='Geometry Nodes Instances (Experimental)',
description='Export Geometry nodes instance meshes',
default=False
)
export_draco_mesh_compression_enable: BoolProperty( export_draco_mesh_compression_enable: BoolProperty(
name='Draco mesh compression', name='Draco mesh compression',
description='Compress mesh using Draco', description='Compress mesh using Draco',
@ -323,6 +347,16 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default='EXPORT' default='EXPORT'
) )
export_unused_images: BoolProperty(
name='Unused images',
description='Export images not assigned to any material',
default=False)
export_unused_textures: BoolProperty(
name='Unused textures',
description='Export image texture nodes not assigned to any material',
default=False)
export_colors: BoolProperty( export_colors: BoolProperty(
name='dummy', name='dummy',
description='Keep for compatibility only', description='Keep for compatibility only',
@ -412,6 +446,12 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default=False default=False
) )
export_shared_accessors: BoolProperty(
name='Shared Accessors',
description='Export Primitives using shared accessors for attributes',
default=False
)
export_animations: BoolProperty( export_animations: BoolProperty(
name='Animations', name='Animations',
description='Exports active actions and NLA tracks as glTF animations', description='Exports active actions and NLA tracks as glTF animations',
@ -671,6 +711,13 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default=False default=False
) )
export_action_filter: BoolProperty(
name='Filter Actions',
description='Filter Actions to be exported',
default=False,
update=on_export_action_filter_changed,
)
# This parameter is only here for backward compatibility, as this option is removed in 3.6 # This parameter is only here for backward compatibility, as this option is removed in 3.6
# This option does nothing, and is not displayed in UI # This option does nothing, and is not displayed in UI
# What you are looking for is probably "export_animation_mode" # What you are looking for is probably "export_animation_mode"
@ -698,6 +745,12 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
description='Store glTF export settings in the Blender project', description='Store glTF export settings in the Blender project',
default=False) default=False)
export_hierarchy_full_collections: BoolProperty(
name='Full Collection hierarchy',
description='Export full hierarchy, including inbetween collection',
default=False
)
# Custom scene property for saving settings # Custom scene property for saving settings
scene_key = "glTF2ExportSettings" scene_key = "glTF2ExportSettings"
@ -775,6 +828,11 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
# All custom export settings are stored in this container. # All custom export settings are stored in this container.
export_settings = {} export_settings = {}
export_settings['exported_images'] = {}
export_settings['exported_texture_nodes'] = []
export_settings['additional_texture_export'] = []
export_settings['additional_texture_export_current_idx'] = 0
export_settings['timestamp'] = datetime.datetime.now() export_settings['timestamp'] = datetime.datetime.now()
export_settings['gltf_export_id'] = self.gltf_export_id export_settings['gltf_export_id'] = self.gltf_export_id
export_settings['gltf_filepath'] = self.filepath export_settings['gltf_filepath'] = self.filepath
@ -790,7 +848,6 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['gltf_add_webp'] = self.export_image_add_webp export_settings['gltf_add_webp'] = self.export_image_add_webp
export_settings['gltf_webp_fallback'] = self.export_image_webp_fallback export_settings['gltf_webp_fallback'] = self.export_image_webp_fallback
export_settings['gltf_image_quality'] = self.export_image_quality export_settings['gltf_image_quality'] = self.export_image_quality
export_settings['gltf_image_quality'] = self.export_jpeg_quality #For back compatibility
export_settings['gltf_copyright'] = self.export_copyright export_settings['gltf_copyright'] = self.export_copyright
export_settings['gltf_texcoords'] = self.export_texcoords export_settings['gltf_texcoords'] = self.export_texcoords
export_settings['gltf_normals'] = self.export_normals export_settings['gltf_normals'] = self.export_normals
@ -809,10 +866,15 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
else: else:
export_settings['gltf_draco_mesh_compression'] = False export_settings['gltf_draco_mesh_compression'] = False
export_settings['gltf_gn_mesh'] = self.export_gn_mesh
export_settings['gltf_materials'] = self.export_materials export_settings['gltf_materials'] = self.export_materials
export_settings['gltf_attributes'] = self.export_attributes export_settings['gltf_attributes'] = self.export_attributes
export_settings['gltf_cameras'] = self.export_cameras export_settings['gltf_cameras'] = self.export_cameras
export_settings['gltf_unused_textures'] = self.export_unused_textures
export_settings['gltf_unused_images'] = self.export_unused_images
export_settings['gltf_visible'] = self.use_visible export_settings['gltf_visible'] = self.use_visible
export_settings['gltf_renderable'] = self.use_renderable export_settings['gltf_renderable'] = self.use_renderable
@ -828,6 +890,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['gltf_extras'] = self.export_extras export_settings['gltf_extras'] = self.export_extras
export_settings['gltf_yup'] = self.export_yup export_settings['gltf_yup'] = self.export_yup
export_settings['gltf_apply'] = self.export_apply export_settings['gltf_apply'] = self.export_apply
export_settings['gltf_shared_accessors'] = self.export_shared_accessors
export_settings['gltf_current_frame'] = self.export_current_frame export_settings['gltf_current_frame'] = self.export_current_frame
export_settings['gltf_animations'] = self.export_animations export_settings['gltf_animations'] = self.export_animations
export_settings['gltf_def_bones'] = self.export_def_bones export_settings['gltf_def_bones'] = self.export_def_bones
@ -897,6 +960,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
if not self.export_try_sparse_sk: if not self.export_try_sparse_sk:
export_settings['gltf_try_omit_sparse_sk'] = False export_settings['gltf_try_omit_sparse_sk'] = False
export_settings['gltf_hierarchy_full_collections'] = self.export_hierarchy_full_collections
export_settings['gltf_binary'] = bytearray() export_settings['gltf_binary'] = bytearray()
export_settings['gltf_binaryfilename'] = ( export_settings['gltf_binaryfilename'] = (
@ -1066,8 +1130,10 @@ class GLTF_PT_export_data_scene(bpy.types.Panel):
sfile = context.space_data sfile = context.space_data
operator = sfile.active_operator operator = sfile.active_operator
layout.prop(operator, 'export_gn_mesh')
layout.prop(operator, 'export_gpu_instances') layout.prop(operator, 'export_gpu_instances')
layout.prop(operator, 'export_hierarchy_flatten_objs') layout.prop(operator, 'export_hierarchy_flatten_objs')
layout.prop(operator, 'export_hierarchy_full_collections')
class GLTF_PT_export_data_mesh(bpy.types.Panel): class GLTF_PT_export_data_mesh(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER' bl_space_type = 'FILE_BROWSER'
@ -1102,6 +1168,9 @@ class GLTF_PT_export_data_mesh(bpy.types.Panel):
col.prop(operator, 'use_mesh_edges') col.prop(operator, 'use_mesh_edges')
col.prop(operator, 'use_mesh_vertices') col.prop(operator, 'use_mesh_vertices')
col = layout.column()
col.prop(operator, 'export_shared_accessors')
class GLTF_PT_export_data_material(bpy.types.Panel): class GLTF_PT_export_data_material(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER' bl_space_type = 'FILE_BROWSER'
@ -1137,6 +1206,34 @@ class GLTF_PT_export_data_material(bpy.types.Panel):
col.active = operator.export_image_format != "WEBP" col.active = operator.export_image_format != "WEBP"
col.prop(operator, "export_image_webp_fallback") col.prop(operator, "export_image_webp_fallback")
class GLTF_PT_export_unsed_tex_image(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Unused Textures & Images"
bl_parent_id = "GLTF_PT_export_data_material"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
row = layout.row()
row.prop(operator, 'export_unused_images')
row = layout.row()
row.prop(operator, 'export_unused_textures')
class GLTF_PT_export_data_lighting(bpy.types.Panel): class GLTF_PT_export_data_lighting(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER' bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS' bl_region_type = 'TOOL_PROPS'
@ -1799,6 +1896,10 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
self.loglevel = logging.NOTSET self.loglevel = logging.NOTSET
class GLTF2_filter_action(bpy.types.PropertyGroup):
keep : bpy.props.BoolProperty(name="Keep Animation")
action: bpy.props.PointerProperty(type=bpy.types.Action)
def gltf_variant_ui_update(self, context): def gltf_variant_ui_update(self, context):
from .blender.com.gltf2_blender_ui import variant_register, variant_unregister from .blender.com.gltf2_blender_ui import variant_register, variant_unregister
if self.KHR_materials_variants_ui is True: if self.KHR_materials_variants_ui is True:
@ -1855,6 +1956,7 @@ classes = (
GLTF_PT_export_data_scene, GLTF_PT_export_data_scene,
GLTF_PT_export_data_mesh, GLTF_PT_export_data_mesh,
GLTF_PT_export_data_material, GLTF_PT_export_data_material,
GLTF_PT_export_unsed_tex_image,
GLTF_PT_export_data_shapekeys, GLTF_PT_export_data_shapekeys,
GLTF_PT_export_data_sk_optimize, GLTF_PT_export_data_sk_optimize,
GLTF_PT_export_data_armature, GLTF_PT_export_data_armature,
@ -1871,6 +1973,7 @@ classes = (
GLTF_PT_export_user_extensions, GLTF_PT_export_user_extensions,
ImportGLTF2, ImportGLTF2,
GLTF_PT_import_user_extensions, GLTF_PT_import_user_extensions,
GLTF2_filter_action,
GLTF_AddonPreferences GLTF_AddonPreferences
) )

View File

@ -121,21 +121,21 @@ def get_attribute_type(component_type, data_type):
return { return {
gltf2_io_constants.ComponentType.Float: "FLOAT", gltf2_io_constants.ComponentType.Float: "FLOAT",
gltf2_io_constants.ComponentType.UnsignedByte: "INT" # What is the best for compatibility? gltf2_io_constants.ComponentType.UnsignedByte: "INT" # What is the best for compatibility?
}[component_type] }.get(component_type, None)
elif gltf2_io_constants.DataType.num_elements(data_type) == 2: elif gltf2_io_constants.DataType.num_elements(data_type) == 2:
return { return {
gltf2_io_constants.ComponentType.Float: "FLOAT2" gltf2_io_constants.ComponentType.Float: "FLOAT2"
}[component_type] }.get(component_type, None)
elif gltf2_io_constants.DataType.num_elements(data_type) == 3: elif gltf2_io_constants.DataType.num_elements(data_type) == 3:
return { return {
gltf2_io_constants.ComponentType.Float: "FLOAT_VECTOR" gltf2_io_constants.ComponentType.Float: "FLOAT_VECTOR"
}[component_type] }.get(component_type, None)
elif gltf2_io_constants.DataType.num_elements(data_type) == 4: elif gltf2_io_constants.DataType.num_elements(data_type) == 4:
return { return {
gltf2_io_constants.ComponentType.Float: "FLOAT_COLOR", gltf2_io_constants.ComponentType.Float: "FLOAT_COLOR",
gltf2_io_constants.ComponentType.UnsignedShort: "BYTE_COLOR", gltf2_io_constants.ComponentType.UnsignedShort: "BYTE_COLOR",
gltf2_io_constants.ComponentType.UnsignedByte: "BYTE_COLOR" # What is the best for compatibility? gltf2_io_constants.ComponentType.UnsignedByte: "BYTE_COLOR" # What is the best for compatibility?
}[component_type] }.get(component_type, None)
else: else:
pass pass
@ -145,3 +145,11 @@ def get_gltf_interpolation(interpolation):
"LINEAR": "LINEAR", "LINEAR": "LINEAR",
"CONSTANT": "STEP" "CONSTANT": "STEP"
}.get(interpolation, "LINEAR") }.get(interpolation, "LINEAR")
def get_anisotropy_rotation_gltf_to_blender(rotation):
# glTF rotation is in randian, Blender in 0 to 1
return rotation / (2 * np.pi)
def get_anisotropy_rotation_blender_to_gltf(rotation):
# glTF rotation is in randian, Blender in 0 to 1
return rotation * (2 * np.pi)

View File

@ -7,3 +7,9 @@ BLENDER_SPECULAR = 0.5
BLENDER_SPECULAR_TINT = 0.0 BLENDER_SPECULAR_TINT = 0.0
BLENDER_GLTF_SPECIAL_COLLECTION = "glTF_not_exported" BLENDER_GLTF_SPECIAL_COLLECTION = "glTF_not_exported"
LIGHTS = {
"POINT": "point",
"SUN": "directional",
"SPOT": "spot"
}

View File

@ -518,11 +518,87 @@ class SCENE_PT_gltf2_animation(bpy.types.Panel):
class GLTF2_weight(bpy.types.PropertyGroup): class GLTF2_weight(bpy.types.PropertyGroup):
val : bpy.props.FloatProperty(name="weight") val : bpy.props.FloatProperty(name="weight")
################################### Filtering animation ####################
class SCENE_OT_gltf2_action_filter_refresh(bpy.types.Operator):
"""Refresh list of actions"""
bl_idname = "scene.gltf2_action_filter_refresh"
bl_label = "Refresh action list"
bl_options = {'REGISTER'}
@classmethod
def poll(self, context):
return True
def execute(self, context):
for action in bpy.data.actions:
if id(action) in [id(i.action) for i in bpy.data.scenes[0].gltf_action_filter]:
continue
item = bpy.data.scenes[0].gltf_action_filter.add()
item.action = action
item.keep = True
return {'FINISHED'}
class SCENE_UL_gltf2_filter_action(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
action = item.action
layout.context_pointer_set("id", action)
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.prop(item.action, "name", text="", emboss=False)
layout.prop(item, "keep", text="", emboss=True)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
class SCENE_PT_gltf2_action_filter(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Action Filter"
bl_parent_id = "GLTF_PT_export_animation"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(self, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.export_animation_mode in ["ACTIONS", "ACTIVE_ACTIONS"]
def draw_header(self, context):
sfile = context.space_data
operator = sfile.active_operator
self.layout.prop(operator, "export_action_filter", text="")
def draw(self, context):
layout = self.layout
row = layout.row()
sfile = context.space_data
operator = sfile.active_operator
if operator.export_action_filter is False:
return
layout.active = operator.export_animations and operator.export_action_filter
if len(bpy.data.actions) > 0:
row.template_list("SCENE_UL_gltf2_filter_action", "", bpy.data.scenes[0], "gltf_action_filter", bpy.data.scenes[0], "gltf_action_filter_active")
col = row.column()
row = col.column(align=True)
row.operator("scene.gltf2_action_filter_refresh", icon="FILE_REFRESH", text="")
else:
row.label(text="No Actions is .blend file")
############################################################################### ###############################################################################
def register(): def register():
bpy.utils.register_class(NODE_OT_GLTF_SETTINGS) bpy.utils.register_class(NODE_OT_GLTF_SETTINGS)
bpy.types.NODE_MT_category_shader_output.append(add_gltf_settings_to_menu) bpy.types.NODE_MT_category_shader_output.append(add_gltf_settings_to_menu)
bpy.utils.register_class(SCENE_OT_gltf2_action_filter_refresh)
bpy.utils.register_class(SCENE_UL_gltf2_filter_action)
bpy.utils.register_class(SCENE_PT_gltf2_action_filter)
def variant_register(): def variant_register():
bpy.utils.register_class(SCENE_OT_gltf2_display_variant) bpy.utils.register_class(SCENE_OT_gltf2_display_variant)
@ -550,6 +626,9 @@ def variant_register():
def unregister(): def unregister():
bpy.utils.unregister_class(NODE_OT_GLTF_SETTINGS) bpy.utils.unregister_class(NODE_OT_GLTF_SETTINGS)
bpy.utils.unregister_class(SCENE_PT_gltf2_action_filter)
bpy.utils.unregister_class(SCENE_UL_gltf2_filter_action)
bpy.utils.unregister_class(SCENE_OT_gltf2_action_filter_refresh)
def variant_unregister(): def variant_unregister():
bpy.utils.unregister_class(SCENE_OT_gltf2_variant_add) bpy.utils.unregister_class(SCENE_OT_gltf2_variant_add)

View File

@ -13,8 +13,8 @@ from ...com.gltf2_blender_extras import generate_extras
from ..gltf2_blender_gather_cache import cached from ..gltf2_blender_gather_cache import cached
from ..gltf2_blender_gather_tree import VExportNode from ..gltf2_blender_gather_tree import VExportNode
from .fcurves.gltf2_blender_gather_fcurves_animation import gather_animation_fcurves from .fcurves.gltf2_blender_gather_fcurves_animation import gather_animation_fcurves
from .sampled.armature.gltf2_blender_gather_armature_action_sampled import gather_action_armature_sampled from .sampled.armature.armature_action_sampled import gather_action_armature_sampled
from .sampled.armature.gltf2_blender_gather_armature_channels import gather_sampled_bone_channel from .sampled.armature.armature_channels import gather_sampled_bone_channel
from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled
from .sampled.shapekeys.gltf2_blender_gather_sk_action_sampled import gather_action_sk_sampled from .sampled.shapekeys.gltf2_blender_gather_sk_action_sampled import gather_action_sk_sampled
from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels, gather_sampled_object_channel from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels, gather_sampled_object_channel
@ -41,6 +41,9 @@ def gather_actions_animations(export_settings):
else: else:
continue continue
if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION:
continue
animations_, merged_tracks = gather_action_animations(obj_uuid, merged_tracks, len(animations), export_settings) animations_, merged_tracks = gather_action_animations(obj_uuid, merged_tracks, len(animations), export_settings)
animations += animations_ animations += animations_
@ -66,6 +69,9 @@ def prepare_actions_range(export_settings):
vtree = export_settings['vtree'] vtree = export_settings['vtree']
for obj_uuid in vtree.get_all_objects(): for obj_uuid in vtree.get_all_objects():
if vtree.nodes[obj_uuid].blender_type == VExportNode.COLLECTION:
continue
# Do not manage not exported objects # Do not manage not exported objects
if vtree.nodes[obj_uuid].node is None: if vtree.nodes[obj_uuid].node is None:
if export_settings["gltf_armature_object_remove"] is True: if export_settings["gltf_armature_object_remove"] is True:
@ -223,14 +229,14 @@ def gather_action_animations( obj_uuid: int,
current_action = None current_action = None
current_sk_action = None current_sk_action = None
current_world_matrix = None current_world_matrix = None
if blender_object.animation_data and blender_object.animation_data.action: if blender_object and blender_object.animation_data and blender_object.animation_data.action:
# There is an active action. Storing it, to be able to restore after switching all actions during export # There is an active action. Storing it, to be able to restore after switching all actions during export
current_action = blender_object.animation_data.action current_action = blender_object.animation_data.action
elif len(blender_actions) != 0 and blender_object.animation_data is not None and blender_object.animation_data.action is None: elif len(blender_actions) != 0 and blender_object.animation_data is not None and blender_object.animation_data.action is None:
# No current action set, storing world matrix of object # No current action set, storing world matrix of object
current_world_matrix = blender_object.matrix_world.copy() current_world_matrix = blender_object.matrix_world.copy()
if blender_object.type == "MESH" \ if blender_object and blender_object.type == "MESH" \
and blender_object.data is not None \ and blender_object.data is not None \
and blender_object.data.shape_keys is not None \ and blender_object.data.shape_keys is not None \
and blender_object.data.shape_keys.animation_data is not None \ and blender_object.data.shape_keys.animation_data is not None \
@ -239,7 +245,7 @@ def gather_action_animations( obj_uuid: int,
# Remove any solo (starred) NLA track. Restored after export # Remove any solo (starred) NLA track. Restored after export
solo_track = None solo_track = None
if blender_object.animation_data: if blender_object and blender_object.animation_data:
for track in blender_object.animation_data.nla_tracks: for track in blender_object.animation_data.nla_tracks:
if track.is_solo: if track.is_solo:
solo_track = track solo_track = track
@ -247,11 +253,11 @@ def gather_action_animations( obj_uuid: int,
break break
# Remove any tweak mode. Restore after export # Remove any tweak mode. Restore after export
if blender_object.animation_data: if blender_object and blender_object.animation_data:
restore_tweak_mode = blender_object.animation_data.use_tweak_mode restore_tweak_mode = blender_object.animation_data.use_tweak_mode
# Remove use of NLA. Restore after export # Remove use of NLA. Restore after export
if blender_object.animation_data: if blender_object and blender_object.animation_data:
current_use_nla = blender_object.animation_data.use_nla current_use_nla = blender_object.animation_data.use_nla
blender_object.animation_data.use_nla = False blender_object.animation_data.use_nla = False
@ -400,7 +406,7 @@ def gather_action_animations( obj_uuid: int,
# Restore action status # Restore action status
# TODO: do this in a finally # TODO: do this in a finally
if blender_object.animation_data: if blender_object and blender_object.animation_data:
if blender_object.animation_data.action is not None: if blender_object.animation_data.action is not None:
if current_action is None: if current_action is None:
# remove last exported action # remove last exported action
@ -415,14 +421,14 @@ def gather_action_animations( obj_uuid: int,
blender_object.animation_data.use_tweak_mode = restore_tweak_mode blender_object.animation_data.use_tweak_mode = restore_tweak_mode
blender_object.animation_data.use_nla = current_use_nla blender_object.animation_data.use_nla = current_use_nla
if blender_object.type == "MESH" \ if blender_object and blender_object.type == "MESH" \
and blender_object.data is not None \ and blender_object.data is not None \
and blender_object.data.shape_keys is not None \ and blender_object.data.shape_keys is not None \
and blender_object.data.shape_keys.animation_data is not None: and blender_object.data.shape_keys.animation_data is not None:
reset_sk_data(blender_object, blender_actions, export_settings) reset_sk_data(blender_object, blender_actions, export_settings)
blender_object.data.shape_keys.animation_data.action = current_sk_action blender_object.data.shape_keys.animation_data.action = current_sk_action
if current_world_matrix is not None: if blender_object and current_world_matrix is not None:
blender_object.matrix_world = current_world_matrix blender_object.matrix_world = current_world_matrix
export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, True) export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, True)
@ -441,9 +447,15 @@ def __get_blender_actions(obj_uuid: str,
export_user_extensions('pre_gather_actions_hook', export_settings, blender_object) export_user_extensions('pre_gather_actions_hook', export_settings, blender_object)
if blender_object.animation_data is not None: if blender_object and blender_object.animation_data is not None:
# Collect active action. # Collect active action.
if blender_object.animation_data.action is not None: if blender_object.animation_data.action is not None:
# Check the action is not in list of actions to ignore
if hasattr(bpy.data.scenes[0], "gltf_action_filter") \
and id(blender_object.animation_data.action) in [id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
pass # We ignore this action
else:
blender_actions.append(blender_object.animation_data.action) blender_actions.append(blender_object.animation_data.action)
blender_tracks[blender_object.animation_data.action.name] = None blender_tracks[blender_object.animation_data.action.name] = None
action_on_type[blender_object.animation_data.action.name] = "OBJECT" action_on_type[blender_object.animation_data.action.name] = "OBJECT"
@ -457,17 +469,29 @@ def __get_blender_actions(obj_uuid: str,
if track.strips is None or len(non_muted_strips) != 1: if track.strips is None or len(non_muted_strips) != 1:
continue continue
for strip in non_muted_strips: for strip in non_muted_strips:
# Check the action is not in list of actions to ignore
if hasattr(bpy.data.scenes[0], "gltf_action_filter") \
and id(strip.action) in [id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
continue # We ignore this action
blender_actions.append(strip.action) blender_actions.append(strip.action)
blender_tracks[strip.action.name] = track.name # Always set after possible active action -> None will be overwrite blender_tracks[strip.action.name] = track.name # Always set after possible active action -> None will be overwrite
action_on_type[strip.action.name] = "OBJECT" action_on_type[strip.action.name] = "OBJECT"
# For caching, actions linked to SK must be after actions about TRS # For caching, actions linked to SK must be after actions about TRS
if export_settings['gltf_morph_anim'] and blender_object.type == "MESH" \ if export_settings['gltf_morph_anim'] and blender_object and blender_object.type == "MESH" \
and blender_object.data is not None \ and blender_object.data is not None \
and blender_object.data.shape_keys is not None \ and blender_object.data.shape_keys is not None \
and blender_object.data.shape_keys.animation_data is not None: and blender_object.data.shape_keys.animation_data is not None:
if blender_object.data.shape_keys.animation_data.action is not None: if blender_object.data.shape_keys.animation_data.action is not None:
# Check the action is not in list of actions to ignore
if hasattr(bpy.data.scenes[0], "gltf_action_filter") \
and id(blender_object.data.shape_keys.animation_data.action) in [id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
pass # We ignore this action
else:
blender_actions.append(blender_object.data.shape_keys.animation_data.action) blender_actions.append(blender_object.data.shape_keys.animation_data.action)
blender_tracks[blender_object.data.shape_keys.animation_data.action.name] = None blender_tracks[blender_object.data.shape_keys.animation_data.action.name] = None
action_on_type[blender_object.data.shape_keys.animation_data.action.name] = "SHAPEKEY" action_on_type[blender_object.data.shape_keys.animation_data.action.name] = "SHAPEKEY"
@ -480,6 +504,11 @@ def __get_blender_actions(obj_uuid: str,
if track.strips is None or len(non_muted_strips) != 1: if track.strips is None or len(non_muted_strips) != 1:
continue continue
for strip in non_muted_strips: for strip in non_muted_strips:
# Check the action is not in list of actions to ignore
if hasattr(bpy.data.scenes[0], "gltf_action_filter") \
and id(strip.action) in [id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
continue # We ignore this action
blender_actions.append(strip.action) blender_actions.append(strip.action)
blender_tracks[strip.action.name] = track.name # Always set after possible active action -> None will be overwrite blender_tracks[strip.action.name] = track.name # Always set after possible active action -> None will be overwrite
action_on_type[strip.action.name] = "SHAPEKEY" action_on_type[strip.action.name] = "SHAPEKEY"
@ -488,7 +517,7 @@ def __get_blender_actions(obj_uuid: str,
# But only if armature has already some animation_data # But only if armature has already some animation_data
# If not, we says that this armature is never animated, so don't add these additional actions # If not, we says that this armature is never animated, so don't add these additional actions
if export_settings['gltf_export_anim_single_armature'] is True: if export_settings['gltf_export_anim_single_armature'] is True:
if blender_object.type == "ARMATURE" and blender_object.animation_data is not None: if blender_object and blender_object.type == "ARMATURE" and blender_object.animation_data is not None:
if len(export_settings['vtree'].get_all_node_of_type(VExportNode.ARMATURE)) == 1: if len(export_settings['vtree'].get_all_node_of_type(VExportNode.ARMATURE)) == 1:
# Keep all actions on objects (no Shapekey animation) # Keep all actions on objects (no Shapekey animation)
for act in [a for a in bpy.data.actions if a.id_root == "OBJECT"]: for act in [a for a in bpy.data.actions if a.id_root == "OBJECT"]:
@ -499,6 +528,12 @@ def __get_blender_actions(obj_uuid: str,
# Check if this action is already taken into account # Check if this action is already taken into account
if act.name in blender_tracks.keys(): if act.name in blender_tracks.keys():
continue continue
# Check the action is not in list of actions to ignore
if hasattr(bpy.data.scenes[0], "gltf_action_filter") \
and id(act) in [id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]:
continue # We ignore this action
blender_actions.append(act) blender_actions.append(act)
blender_tracks[act.name] = None blender_tracks[act.name] = None
action_on_type[act.name] = "OBJECT" action_on_type[act.name] = "OBJECT"

View File

@ -10,7 +10,7 @@ from ....io.com import gltf2_io
from ....io.exp.gltf2_io_user_extensions import export_user_extensions from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....io.com.gltf2_io_debug import print_console from ....io.com.gltf2_io_debug import print_console
from ..gltf2_blender_gather_tree import VExportNode from ..gltf2_blender_gather_tree import VExportNode
from .sampled.armature.gltf2_blender_gather_armature_action_sampled import gather_action_armature_sampled from .sampled.armature.armature_action_sampled import gather_action_armature_sampled
from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled
from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel
from .gltf2_blender_gather_drivers import get_sk_drivers from .gltf2_blender_gather_drivers import get_sk_drivers
@ -174,7 +174,7 @@ def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None
# If force sampling is OFF, can lead to inconsistent export anyway # If force sampling is OFF, can lead to inconsistent export anyway
if (export_settings['gltf_bake_animation'] is True \ if (export_settings['gltf_bake_animation'] is True \
or export_settings['gltf_animation_mode'] == "NLA_TRACKS") \ or export_settings['gltf_animation_mode'] == "NLA_TRACKS") \
and blender_object.type != "ARMATURE" and export_settings['gltf_force_sampling'] is True: and blender_object and blender_object.type != "ARMATURE" and export_settings['gltf_force_sampling'] is True:
animation = None animation = None
# We also have to check if this is a skinned mesh, because we don't have to force animation baking on this case # We also have to check if this is a skinned mesh, because we don't have to force animation baking on this case
# (skinned meshes TRS must be ignored, says glTF specification) # (skinned meshes TRS must be ignored, says glTF specification)
@ -186,6 +186,7 @@ def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None
# Need to bake sk only if not linked to a driver sk by parent armature # Need to bake sk only if not linked to a driver sk by parent armature
# In case of NLA track export, no baking of SK # In case of NLA track export, no baking of SK
if export_settings['gltf_morph_anim'] \ if export_settings['gltf_morph_anim'] \
and blender_object \
and blender_object.type == "MESH" \ and blender_object.type == "MESH" \
and blender_object.data is not None \ and blender_object.data is not None \
and blender_object.data.shape_keys is not None: and blender_object.data.shape_keys is not None:
@ -220,6 +221,7 @@ def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None
elif (export_settings['gltf_bake_animation'] is True \ elif (export_settings['gltf_bake_animation'] is True \
or export_settings['gltf_animation_mode'] == "NLA_TRACKS") \ or export_settings['gltf_animation_mode'] == "NLA_TRACKS") \
and blender_object \
and blender_object.type == "ARMATURE" \ and blender_object.type == "ARMATURE" \
and mode is None or mode == "OBJECT": and mode is None or mode == "OBJECT":
# We need to bake all bones. Because some bone can have some constraints linking to # We need to bake all bones. Because some bone can have some constraints linking to

View File

@ -7,15 +7,16 @@ from ....io.com import gltf2_io
from ...com.gltf2_blender_extras import generate_extras from ...com.gltf2_blender_extras import generate_extras
from ..gltf2_blender_gather_tree import VExportNode from ..gltf2_blender_gather_tree import VExportNode
from .gltf2_blender_gather_drivers import get_sk_drivers from .gltf2_blender_gather_drivers import get_sk_drivers
from .sampled.armature.gltf2_blender_gather_armature_channels import gather_armature_sampled_channels from .sampled.armature.armature_channels import gather_armature_sampled_channels
from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels
from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sk_sampled_channels from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sk_sampled_channels
from .gltf2_blender_gather_animation_utils import link_samplers, add_slide_data from .gltf2_blender_gather_animation_utils import link_samplers, add_slide_data
def gather_scene_animations(export_settings): def gather_scene_animations(export_settings):
# if there is no animation in file => no need to bake # if there is no animation in file => no need to bake. Except if we are trying to bake GN instances
if len(bpy.data.actions) == 0: if len(bpy.data.actions) == 0 and export_settings['gltf_gn_mesh'] is False:
#TODO : get a better filter by checking we really have some GN instances...
return [] return []
total_channels = [] total_channels = []
@ -42,11 +43,14 @@ def gather_scene_animations(export_settings):
else: else:
continue continue
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION:
continue
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object # blender_object can be None for GN instances
export_settings['ranges'][obj_uuid] = {} export_settings['ranges'][obj_uuid] = {}
export_settings['ranges'][obj_uuid][obj_uuid] = {'start': start_frame, 'end': end_frame} export_settings['ranges'][obj_uuid][obj_uuid] = {'start': start_frame, 'end': end_frame}
if blender_object.type == "ARMATURE": if blender_object and blender_object.type == "ARMATURE":
# Manage sk drivers # Manage sk drivers
obj_drivers = get_sk_drivers(obj_uuid, export_settings) obj_drivers = get_sk_drivers(obj_uuid, export_settings)
for obj_dr in obj_drivers: for obj_dr in obj_drivers:
@ -61,7 +65,7 @@ def gather_scene_animations(export_settings):
# Perform baking animation export # Perform baking animation export
if blender_object.type != "ARMATURE": if blender_object and blender_object.type != "ARMATURE":
# We have to check if this is a skinned mesh, because we don't have to force animation baking on this case # We have to check if this is a skinned mesh, because we don't have to force animation baking on this case
if export_settings['vtree'].nodes[obj_uuid].skin is None: if export_settings['vtree'].nodes[obj_uuid].skin is None:
channels = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) channels = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings)
@ -83,6 +87,12 @@ def gather_scene_animations(export_settings):
channels = gather_sk_sampled_channels(obj_uuid, obj_uuid, export_settings) channels = gather_sk_sampled_channels(obj_uuid, obj_uuid, export_settings)
if channels is not None: if channels is not None:
total_channels.extend(channels) total_channels.extend(channels)
elif blender_object is None:
# This is GN instances
# Currently, not checking if this instance is skinned.... #TODO
channels = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings)
if channels is not None:
total_channels.extend(channels)
else: else:
channels = gather_armature_sampled_channels(obj_uuid, obj_uuid, export_settings) channels = gather_armature_sampled_channels(obj_uuid, obj_uuid, export_settings)
if channels is not None: if channels is not None:
@ -94,7 +104,7 @@ def gather_scene_animations(export_settings):
channels=total_channels, channels=total_channels,
extensions=None, extensions=None,
extras=__gather_extras(blender_object, export_settings), extras=__gather_extras(blender_object, export_settings),
name=blender_object.name, name=blender_object.name if blender_object else "GN Instance",
samplers=[] samplers=[]
) )
link_samplers(animation, export_settings) link_samplers(animation, export_settings)

View File

@ -29,6 +29,9 @@ def gather_tracks_animations(export_settings):
else: else:
continue continue
if export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.COLLECTION:
continue
animations_, merged_tracks = gather_track_animations(obj_uuid, merged_tracks, len(animations), export_settings) animations_, merged_tracks = gather_track_animations(obj_uuid, merged_tracks, len(animations), export_settings)
animations += animations_ animations += animations_

View File

@ -8,7 +8,7 @@ from ......io.exp.gltf2_io_user_extensions import export_user_extensions
from ......io.com.gltf2_io_debug import print_console from ......io.com.gltf2_io_debug import print_console
from ......io.com import gltf2_io from ......io.com import gltf2_io
from .....com.gltf2_blender_extras import generate_extras from .....com.gltf2_blender_extras import generate_extras
from .gltf2_blender_gather_armature_channels import gather_armature_sampled_channels from .armature_channels import gather_armature_sampled_channels

View File

@ -6,15 +6,15 @@ import bpy
import typing import typing
from ......io.com import gltf2_io from ......io.com import gltf2_io
from ......io.exp.gltf2_io_user_extensions import export_user_extensions from ......io.exp.gltf2_io_user_extensions import export_user_extensions
from ......blender.com.gltf2_blender_conversion import get_gltf_interpolation from .....com.gltf2_blender_conversion import get_gltf_interpolation
from .....com.gltf2_blender_conversion import get_target, get_channel_from_target from .....com.gltf2_blender_conversion import get_target, get_channel_from_target
from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups
from ...fcurves.gltf2_blender_gather_fcurves_channels import needs_baking from ...fcurves.gltf2_blender_gather_fcurves_channels import needs_baking
from ...gltf2_blender_gather_drivers import get_sk_drivers from ...gltf2_blender_gather_drivers import get_sk_drivers
from ..object.gltf2_blender_gather_object_channels import gather_sampled_object_channel from ..object.gltf2_blender_gather_object_channels import gather_sampled_object_channel
from ..shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel from ..shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel
from .gltf2_blender_gather_armature_channel_target import gather_armature_sampled_channel_target from .armature_channel_target import gather_armature_sampled_channel_target
from .gltf2_blender_gather_armature_sampler import gather_bone_sampled_animation_sampler from .armature_sampler import gather_bone_sampled_animation_sampler
def gather_armature_sampled_channels(armature_uuid, blender_action_name, export_settings) -> typing.List[gltf2_io.AnimationChannel]: def gather_armature_sampled_channels(armature_uuid, blender_action_name, export_settings) -> typing.List[gltf2_io.AnimationChannel]:
channels = [] channels = []

View File

@ -13,7 +13,7 @@ from .....com import gltf2_blender_math
from ....gltf2_blender_gather_accessors import gather_accessor from ....gltf2_blender_gather_accessors import gather_accessor
from ....gltf2_blender_gather_cache import cached from ....gltf2_blender_gather_cache import cached
from ....gltf2_blender_gather_tree import VExportNode from ....gltf2_blender_gather_tree import VExportNode
from .gltf2_blender_gather_armature_keyframes import gather_bone_sampled_keyframes from .armature_keyframes import gather_bone_sampled_keyframes
@cached @cached
def gather_bone_sampled_animation_sampler( def gather_bone_sampled_animation_sampler(

View File

@ -35,12 +35,18 @@ def get_cache_data(path: str,
if export_settings['gltf_animation_mode'] in "NLA_TRACKS": if export_settings['gltf_animation_mode'] in "NLA_TRACKS":
obj_uuids = [blender_obj_uuid] obj_uuids = [blender_obj_uuid]
depsgraph = bpy.context.evaluated_depsgraph_get()
frame = min_ frame = min_
while frame <= max_: while frame <= max_:
bpy.context.scene.frame_set(int(frame)) bpy.context.scene.frame_set(int(frame))
current_instance = {} # For GN instances, we are going to track instances by their order in instance iterator
for obj_uuid in obj_uuids: for obj_uuid in obj_uuids:
blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object
if blender_obj is None: #GN instance
if export_settings['vtree'].nodes[obj_uuid].parent_uuid not in current_instance.keys():
current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] = 0
# TODO: we may want to avoid looping on all objects, but an accurate filter must be found # TODO: we may want to avoid looping on all objects, but an accurate filter must be found
@ -49,7 +55,10 @@ def get_cache_data(path: str,
parent_mat = mathutils.Matrix.Identity(4).freeze() parent_mat = mathutils.Matrix.Identity(4).freeze()
else: else:
if export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type not in [VExportNode.BONE]: if export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type not in [VExportNode.BONE]:
if export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type != VExportNode.COLLECTION:
parent_mat = export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_object.matrix_world parent_mat = export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_object.matrix_world
else:
parent_mat = export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].matrix_world
else: else:
# Object animated is parented to a bone # Object animated is parented to a bone
blender_bone = export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_bone_uuid].blender_bone blender_bone = export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_bone_uuid].blender_bone
@ -60,15 +69,31 @@ def get_cache_data(path: str,
parent_mat = armature_object.matrix_world @ blender_bone.matrix @ axis_basis_change parent_mat = armature_object.matrix_world @ blender_bone.matrix @ axis_basis_change
#For object inside collection (at root), matrix world is already expressed regarding collection parent #For object inside collection (at root), matrix world is already expressed regarding collection parent
if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None and export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.COLLECTION: if export_settings['vtree'].nodes[obj_uuid].parent_uuid is not None and export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_type == VExportNode.INST_COLLECTION:
parent_mat = mathutils.Matrix.Identity(4).freeze() parent_mat = mathutils.Matrix.Identity(4).freeze()
if blender_obj:
if export_settings['vtree'].nodes[obj_uuid].blender_type != VExportNode.COLLECTION:
mat = parent_mat.inverted_safe() @ blender_obj.matrix_world mat = parent_mat.inverted_safe() @ blender_obj.matrix_world
else:
mat = parent_mat.inverted_safe()
else:
eval = export_settings['vtree'].nodes[export_settings['vtree'].nodes[obj_uuid].parent_uuid].blender_object.evaluated_get(depsgraph)
cpt_inst = 0
for inst in depsgraph.object_instances: # use only as iterator
if inst.parent == eval:
if current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] == cpt_inst:
mat = inst.matrix_world.copy()
current_instance[export_settings['vtree'].nodes[obj_uuid].parent_uuid] += 1
break
cpt_inst += 1
if obj_uuid not in data.keys(): if obj_uuid not in data.keys():
data[obj_uuid] = {} data[obj_uuid] = {}
if blender_obj.animation_data and blender_obj.animation_data.action \ if export_settings['vtree'].nodes[obj_uuid].blender_type != VExportNode.COLLECTION:
if blender_obj and blender_obj.animation_data and blender_obj.animation_data.action \
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
if blender_obj.animation_data.action.name not in data[obj_uuid].keys(): if blender_obj.animation_data.action.name not in data[obj_uuid].keys():
data[obj_uuid][blender_obj.animation_data.action.name] = {} data[obj_uuid][blender_obj.animation_data.action.name] = {}
@ -89,9 +114,15 @@ def get_cache_data(path: str,
data[obj_uuid][obj_uuid]['matrix'] = {} data[obj_uuid][obj_uuid]['matrix'] = {}
data[obj_uuid][obj_uuid]['matrix'][None] = {} data[obj_uuid][obj_uuid]['matrix'][None] = {}
data[obj_uuid][obj_uuid]['matrix'][None][frame] = mat data[obj_uuid][obj_uuid]['matrix'][None][frame] = mat
else:
if obj_uuid not in data[obj_uuid].keys():
data[obj_uuid][obj_uuid] = {}
data[obj_uuid][obj_uuid]['matrix'] = {}
data[obj_uuid][obj_uuid]['matrix'][None] = {}
data[obj_uuid][obj_uuid]['matrix'][None][frame] = mat
# Store data for all bones, if object is an armature # Store data for all bones, if object is an armature
if blender_obj.type == "ARMATURE": if blender_obj and blender_obj.type == "ARMATURE":
bones = export_settings['vtree'].get_all_bones(obj_uuid) bones = export_settings['vtree'].get_all_bones(obj_uuid)
if blender_obj.animation_data and blender_obj.animation_data.action \ if blender_obj.animation_data and blender_obj.animation_data.action \
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
@ -140,10 +171,18 @@ def get_cache_data(path: str,
data[obj_uuid][obj_uuid]['bone'][blender_bone.name] = {} data[obj_uuid][obj_uuid]['bone'][blender_bone.name] = {}
data[obj_uuid][obj_uuid]['bone'][blender_bone.name][frame] = matrix data[obj_uuid][obj_uuid]['bone'][blender_bone.name][frame] = matrix
elif blender_obj is None: # GN instances
# case of baking object, for GN instances
# There is no animation, so use uuid of object as key
if obj_uuid not in data[obj_uuid].keys():
data[obj_uuid][obj_uuid] = {}
data[obj_uuid][obj_uuid]['matrix'] = {}
data[obj_uuid][obj_uuid]['matrix'][None] = {}
data[obj_uuid][obj_uuid]['matrix'][None][frame] = mat
# Check SK animation here, as we are caching data # Check SK animation here, as we are caching data
# This will avoid to have to do it again when exporting SK animation # This will avoid to have to do it again when exporting SK animation
if export_settings['gltf_morph_anim'] and blender_obj.type == "MESH" \ if export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \
and blender_obj.data is not None \ and blender_obj.data is not None \
and blender_obj.data.shape_keys is not None \ and blender_obj.data.shape_keys is not None \
and blender_obj.data.shape_keys.animation_data is not None \ and blender_obj.data.shape_keys.animation_data is not None \
@ -156,7 +195,7 @@ def get_cache_data(path: str,
data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name]['sk'][None] = {} data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name]['sk'][None] = {}
data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name]['sk'][None][frame] = [k.value for k in get_sk_exported(blender_obj.data.shape_keys.key_blocks)] data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name]['sk'][None][frame] = [k.value for k in get_sk_exported(blender_obj.data.shape_keys.key_blocks)]
elif export_settings['gltf_morph_anim'] and blender_obj.type == "MESH" \ elif export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \
and blender_obj.data is not None \ and blender_obj.data is not None \
and blender_obj.data.shape_keys is not None \ and blender_obj.data.shape_keys is not None \
and blender_obj.data.shape_keys.animation_data is not None \ and blender_obj.data.shape_keys.animation_data is not None \
@ -173,7 +212,7 @@ def get_cache_data(path: str,
elif export_settings['gltf_morph_anim'] and blender_obj.type == "MESH" \ elif export_settings['gltf_morph_anim'] and blender_obj and blender_obj.type == "MESH" \
and blender_obj.data is not None \ and blender_obj.data is not None \
and blender_obj.data.shape_keys is not None: and blender_obj.data.shape_keys is not None:
if obj_uuid not in data[obj_uuid].keys(): if obj_uuid not in data[obj_uuid].keys():
@ -187,7 +226,7 @@ def get_cache_data(path: str,
# caching driver sk meshes # caching driver sk meshes
# This will avoid to have to do it again when exporting SK animation # This will avoid to have to do it again when exporting SK animation
if blender_obj.type == "ARMATURE": if blender_obj and blender_obj.type == "ARMATURE":
sk_drivers = get_sk_drivers(obj_uuid, export_settings) sk_drivers = get_sk_drivers(obj_uuid, export_settings)
for dr_obj in sk_drivers: for dr_obj in sk_drivers:
driver_object = export_settings['vtree'].nodes[dr_obj].blender_object driver_object = export_settings['vtree'].nodes[dr_obj].blender_object

View File

@ -15,6 +15,7 @@ from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ..com import gltf2_blender_json from ..com import gltf2_blender_json
from . import gltf2_blender_gather from . import gltf2_blender_gather
from .gltf2_blender_gltf2_exporter import GlTF2Exporter from .gltf2_blender_gltf2_exporter import GlTF2Exporter
from .gltf2_blender_gltf2_exporter import fix_json
def save(context, export_settings): def save(context, export_settings):
@ -58,7 +59,17 @@ def __export(export_settings):
exporter.traverse_extensions() exporter.traverse_extensions()
# now that addons possibly add some fields in json, we can fix in needed # now that addons possibly add some fields in json, we can fix in needed
json = __fix_json(exporter.glTF.to_dict()) json = fix_json(exporter.glTF.to_dict())
# Convert additional data if needed
if export_settings['gltf_unused_textures'] is True:
additional_json_textures = fix_json([i.to_dict() for i in exporter.additional_data.additional_textures])
# Now that we have the final json, we can add the additional data
if len(additional_json_textures) > 0:
if json.get('extras') is None:
json['extras'] = {}
json['extras']['additionalTextures'] = additional_json_textures
return json, buffer return json, buffer
@ -79,6 +90,8 @@ def __gather_gltf(exporter, export_settings):
for animation in animations: for animation in animations:
exporter.add_animation(animation) exporter.add_animation(animation)
exporter.traverse_unused_skins(unused_skins) exporter.traverse_unused_skins(unused_skins)
exporter.traverse_additional_textures()
exporter.traverse_additional_images()
def __create_buffer(exporter, export_settings): def __create_buffer(exporter, export_settings):
@ -92,43 +105,6 @@ def __create_buffer(exporter, export_settings):
return buffer return buffer
def __fix_json(obj):
# TODO: move to custom JSON encoder
fixed = obj
if isinstance(obj, dict):
fixed = {}
for key, value in obj.items():
if key == 'extras' and value is not None:
fixed[key] = value
continue
if not __should_include_json_value(key, value):
continue
fixed[key] = __fix_json(value)
elif isinstance(obj, list):
fixed = []
for value in obj:
fixed.append(__fix_json(value))
elif isinstance(obj, float):
# force floats to int, if they are integers (prevent INTEGER_WRITTEN_AS_FLOAT validator warnings)
if int(obj) == obj:
return int(obj)
return fixed
def __should_include_json_value(key, value):
allowed_empty_collections = ["KHR_materials_unlit"]
if value is None:
return False
elif __is_empty_collection(value) and key not in allowed_empty_collections:
return False
return True
def __is_empty_collection(value):
return (isinstance(value, dict) or isinstance(value, list)) and len(value) == 0
def __write_file(json, buffer, export_settings): def __write_file(json, buffer, export_settings):
try: try:
gltf2_io_export.save_gltf( gltf2_io_export.save_gltf(

View File

@ -53,13 +53,16 @@ def __gather_scene(blender_scene, export_settings):
vtree = gltf2_blender_gather_tree.VExportTree(export_settings) vtree = gltf2_blender_gather_tree.VExportTree(export_settings)
vtree.construct(blender_scene) vtree.construct(blender_scene)
vtree.search_missing_armature() # In case armature are no parented correctly vtree.search_missing_armature() # In case armature are no parented correctly
vtree.bake_armature_bone_list() # Used in case we remove the armature if export_settings['gltf_armature_object_remove'] is True:
vtree.check_if_we_can_remove_armature() # Check if we can remove the armatures objects vtree.check_if_we_can_remove_armature() # Check if we can remove the armatures objects
export_user_extensions('vtree_before_filter_hook', export_settings, vtree) export_user_extensions('vtree_before_filter_hook', export_settings, vtree)
# Now, we can filter tree if needed # Now, we can filter tree if needed
vtree.filter() vtree.filter()
vtree.bake_armature_bone_list() # Used in case we remove the armature. Doing it after filter, as filter can remove some bones
if export_settings['gltf_flatten_bones_hierarchy'] is True: if export_settings['gltf_flatten_bones_hierarchy'] is True:
vtree.break_bone_hierarchy() vtree.break_bone_hierarchy()
if export_settings['gltf_flatten_obj_hierarchy'] is True: if export_settings['gltf_flatten_obj_hierarchy'] is True:

View File

@ -9,6 +9,7 @@ from ...io.com import gltf2_io_lights_punctual
from ...io.com import gltf2_io_debug from ...io.com import gltf2_io_debug
from ..com.gltf2_blender_extras import generate_extras from ..com.gltf2_blender_extras import generate_extras
from ..com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS from ..com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS
from ..com.gltf2_blender_default import LIGHTS
from .gltf2_blender_gather_cache import cached from .gltf2_blender_gather_cache import cached
from . import gltf2_blender_gather_light_spots from . import gltf2_blender_gather_light_spots
from .material import gltf2_blender_search_node_tree from .material import gltf2_blender_search_node_tree
@ -96,11 +97,7 @@ def __gather_spot(blender_lamp, export_settings) -> Optional[gltf2_io_lights_pun
def __gather_type(blender_lamp, _) -> str: def __gather_type(blender_lamp, _) -> str:
return { return LIGHTS[blender_lamp.type]
"POINT": "point",
"SUN": "directional",
"SPOT": "spot"
}[blender_lamp.type]
def __gather_range(blender_lamp, export_settings) -> Optional[float]: def __gather_range(blender_lamp, export_settings) -> Optional[float]:

View File

@ -11,6 +11,7 @@ from ...io.com import gltf2_io
from ...io.com import gltf2_io_extensions from ...io.com import gltf2_io_extensions
from ...io.exp.gltf2_io_user_extensions import export_user_extensions from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ..com.gltf2_blender_extras import generate_extras from ..com.gltf2_blender_extras import generate_extras
from ..com.gltf2_blender_default import LIGHTS
from ..com import gltf2_blender_math from ..com import gltf2_blender_math
from . import gltf2_blender_gather_tree from . import gltf2_blender_gather_tree
from . import gltf2_blender_gather_skins from . import gltf2_blender_gather_skins
@ -29,9 +30,9 @@ def gather_node(vnode, export_settings):
vnode.skin = skin vnode.skin = skin
node = gltf2_io.Node( node = gltf2_io.Node(
camera=__gather_camera(blender_object, export_settings), camera=__gather_camera(vnode, export_settings),
children=__gather_children(vnode, export_settings), children=__gather_children(vnode, export_settings),
extensions=__gather_extensions(blender_object, export_settings), extensions=__gather_extensions(vnode, export_settings),
extras=__gather_extras(blender_object, export_settings), extras=__gather_extras(blender_object, export_settings),
matrix=__gather_matrix(blender_object, export_settings), matrix=__gather_matrix(blender_object, export_settings),
mesh=__gather_mesh(vnode, blender_object, export_settings), mesh=__gather_mesh(vnode, blender_object, export_settings),
@ -55,11 +56,15 @@ def gather_node(vnode, export_settings):
return node return node
def __gather_camera(blender_object, export_settings): def __gather_camera(vnode, export_settings):
if blender_object.type != 'CAMERA': if not vnode.blender_object:
return
if vnode.blender_type == VExportNode.COLLECTION:
return None
if vnode.blender_object.type != 'CAMERA':
return None return None
return gltf2_blender_gather_cameras.gather_camera(blender_object.data, export_settings) return gltf2_blender_gather_cameras.gather_camera(vnode.blender_object.data, export_settings)
def __gather_children(vnode, export_settings): def __gather_children(vnode, export_settings):
@ -160,11 +165,18 @@ def __find_parent_joint(joints, name):
return None return None
def __gather_extensions(blender_object, export_settings): def __gather_extensions(vnode, export_settings):
blender_object = vnode.blender_object
extensions = {} extensions = {}
if export_settings["gltf_lights"] and (blender_object.type == "LAMP" or blender_object.type == "LIGHT"): blender_lamp = None
if export_settings["gltf_lights"] and vnode.blender_type == VExportNode.INSTANCE:
if vnode.data.type in LIGHTS:
blender_lamp = vnode.data
elif export_settings["gltf_lights"] and blender_object is not None and (blender_object.type == "LAMP" or blender_object.type == "LIGHT"):
blender_lamp = blender_object.data blender_lamp = blender_object.data
if blender_lamp is not None:
light = gltf2_blender_gather_lights.gather_lights_punctual( light = gltf2_blender_gather_lights.gather_lights_punctual(
blender_lamp, blender_lamp,
export_settings export_settings
@ -195,18 +207,33 @@ def __gather_matrix(blender_object, export_settings):
# return blender_object.matrix_local # return blender_object.matrix_local
return [] return []
def __gather_mesh(vnode, blender_object, export_settings): def __gather_mesh(vnode, blender_object, export_settings):
if blender_object.type in ['CURVE', 'SURFACE', 'FONT']: if vnode.blender_type == VExportNode.COLLECTION:
return None
if blender_object and blender_object.type in ['CURVE', 'SURFACE', 'FONT']:
return __gather_mesh_from_nonmesh(blender_object, export_settings) return __gather_mesh_from_nonmesh(blender_object, export_settings)
if blender_object is None and type(vnode.data).__name__ not in ["Mesh"]:
return None #TODO
if blender_object is None:
# GN instance
blender_mesh = vnode.data
# Keep materials from the tmp mesh, but if no material, keep from object
materials = tuple(mat for mat in blender_mesh.materials)
if len(materials) == 1 and materials[0] is None:
materials = tuple(ms.material for ms in vnode.original_object.material_slots)
if blender_object.type != "MESH": uuid_for_skined_data = None
modifiers = None
if blender_mesh is None:
return None return None
else:
if blender_object.type != "MESH":
return None
# For duplis instancer, when show is off -> export as empty # For duplis instancer, when show is off -> export as empty
if vnode.force_as_empty is True: if vnode.force_as_empty is True:
return None return None
# Be sure that object is valid (no NaN for example) # Be sure that object is valid (no NaN for example)
res = blender_object.data.validate() res = blender_object.data.validate()
if res is True: if res is True:
@ -243,19 +270,20 @@ def __gather_mesh(vnode, blender_object, export_settings):
for idx, show_viewport in armature_modifiers.items(): for idx, show_viewport in armature_modifiers.items():
blender_object.modifiers[idx].show_viewport = show_viewport blender_object.modifiers[idx].show_viewport = show_viewport
# Keep materials from the newly created tmp mesh # Keep materials from the newly created tmp mesh, but if no materials, keep from object
materials = tuple(mat for mat in blender_mesh.materials) materials = tuple(mat for mat in blender_mesh.materials)
if len(materials) == 1 and materials[0] is None: if len(materials) == 1 and materials[0] is None:
materials = tuple(ms.material for ms in blender_object.material_slots) materials = tuple(ms.material for ms in blender_object.material_slots)
else: else:
blender_mesh = blender_object.data blender_mesh = blender_object.data
# If no skin are exported, no need to have vertex group, this will create a cache miss
if not export_settings['gltf_skins']: if not export_settings['gltf_skins']:
modifiers = None modifiers = None
else: else:
# Check if there is an armature modidier # Check if there is an armature modidier
if len([mod for mod in blender_object.modifiers if mod.type == "ARMATURE"]) == 0: if len([mod for mod in blender_object.modifiers if mod.type == "ARMATURE"]) == 0:
modifiers = None modifiers = None
# Keep materials from object, as no modifiers are applied, so no risk that # Keep materials from object, as no modifiers are applied, so no risk that
# modifiers changed them # modifiers changed them
materials = tuple(ms.material for ms in blender_object.material_slots) materials = tuple(ms.material for ms in blender_object.material_slots)
@ -271,7 +299,7 @@ def __gather_mesh(vnode, blender_object, export_settings):
result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh, result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh,
uuid_for_skined_data, uuid_for_skined_data,
blender_object.vertex_groups, blender_object.vertex_groups if blender_object else None,
modifiers, modifiers,
materials, materials,
None, None,
@ -329,10 +357,12 @@ def __gather_mesh_from_nonmesh(blender_object, export_settings):
def __gather_name(blender_object, export_settings): def __gather_name(blender_object, export_settings):
new_name = blender_object.name if blender_object else "GN Instance"
class GltfHookName: class GltfHookName:
def __init__(self, name): def __init__(self, name):
self.name = name self.name = name
gltf_hook_name = GltfHookName(blender_object.name) gltf_hook_name = GltfHookName(new_name)
export_user_extensions('gather_node_name_hook', export_settings, gltf_hook_name, blender_object) export_user_extensions('gather_node_name_hook', export_settings, gltf_hook_name, blender_object)
return gltf_hook_name.name return gltf_hook_name.name
@ -360,7 +390,7 @@ def __gather_trans_rot_scale(vnode, export_settings):
rot = __convert_swizzle_rotation(rot, export_settings) rot = __convert_swizzle_rotation(rot, export_settings)
sca = __convert_swizzle_scale(sca, export_settings) sca = __convert_swizzle_scale(sca, export_settings)
if vnode.blender_object.instance_type == 'COLLECTION' and vnode.blender_object.instance_collection: if vnode.blender_object and vnode.blender_type != VExportNode.COLLECTION and vnode.blender_object.instance_type == 'COLLECTION' and vnode.blender_object.instance_collection:
offset = -__convert_swizzle_location( offset = -__convert_swizzle_location(
vnode.blender_object.instance_collection.instance_offset, export_settings) vnode.blender_object.instance_collection.instance_offset, export_settings)
@ -388,8 +418,12 @@ def __gather_trans_rot_scale(vnode, export_settings):
return translation, rotation, scale return translation, rotation, scale
def gather_skin(vnode, export_settings): def gather_skin(vnode, export_settings):
if export_settings['vtree'].nodes[vnode].blender_type == VExportNode.COLLECTION:
return None
blender_object = export_settings['vtree'].nodes[vnode].blender_object blender_object = export_settings['vtree'].nodes[vnode].blender_object
modifiers = {m.type: m for m in blender_object.modifiers} modifiers = {m.type: m for m in blender_object.modifiers} if blender_object else {}
if "ARMATURE" not in modifiers or modifiers["ARMATURE"].object is None: if "ARMATURE" not in modifiers or modifiers["ARMATURE"].object is None:
return None return None

View File

@ -54,18 +54,22 @@ def gather_primitives(
""" """
primitives = [] primitives = []
blender_primitives = __gather_cache_primitives(materials, blender_mesh, uuid_for_skined_data, blender_primitives, addional_materials_udim = __gather_cache_primitives(materials, blender_mesh, uuid_for_skined_data,
vertex_groups, modifiers, export_settings) vertex_groups, modifiers, export_settings)
for internal_primitive in blender_primitives: for internal_primitive, udim_material in zip(blender_primitives, addional_materials_udim):
if udim_material is None : # classic case, not an udim material
# We already call this function, in order to retrieve uvmap info, if any # We already call this function, in order to retrieve uvmap info, if any
# So here, only the cache will be used # So here, only the cache will be used
base_material, material_info = get_base_material(internal_primitive['material'], materials, export_settings) base_material, material_info = get_base_material(internal_primitive['material'], materials, export_settings)
# Now, we can retrieve the real material, by checking attributes and active maps # Now, we can retrieve the real material, by checking attributes and active maps
blender_mat = get_material_from_idx(internal_primitive['material'], materials, export_settings) blender_mat = get_material_from_idx(internal_primitive['material'], materials, export_settings)
material = get_final_material(blender_mesh, blender_mat, internal_primitive['uvmap_attributes_index'], base_material, material_info["uv_info"], export_settings) material = get_final_material(blender_mesh, blender_mat, internal_primitive['uvmap_attributes_index'], base_material, material_info["uv_info"], export_settings)
else:
# UDIM case
base_material, material_info, unique_material_id = udim_material
material = get_final_material(blender_mesh, unique_material_id, internal_primitive['uvmap_attributes_index'], base_material, material_info["uv_info"], export_settings)
primitive = gltf2_io.MeshPrimitive( primitive = gltf2_io.MeshPrimitive(
attributes=internal_primitive['attributes'], attributes=internal_primitive['attributes'],
@ -117,9 +121,45 @@ def __gather_cache_primitives(
""" """
primitives = [] primitives = []
blender_primitives = gltf2_blender_gather_primitives_extract.extract_primitives( blender_primitives, additional_materials_udim, shared_attributes = gltf2_blender_gather_primitives_extract.extract_primitives(
materials, blender_mesh, uuid_for_skined_data, vertex_groups, modifiers, export_settings) materials, blender_mesh, uuid_for_skined_data, vertex_groups, modifiers, export_settings)
if shared_attributes is not None:
if len(blender_primitives) > 0:
shared = {}
shared["attributes"] = shared_attributes
attributes = __gather_attributes(shared, blender_mesh, modifiers, export_settings)
targets = __gather_targets(shared, blender_mesh, modifiers, export_settings)
for internal_primitive in blender_primitives:
if internal_primitive.get('mode') is None:
primitive = {
"attributes": attributes,
"indices": __gather_indices(internal_primitive, blender_mesh, modifiers, export_settings),
"mode": internal_primitive.get('mode'),
"material": internal_primitive.get('material'),
"targets": targets,
"uvmap_attributes_index": internal_primitive.get('uvmap_attributes_index')
}
else:
# Edges & points, no shared attributes
primitive = {
"attributes": __gather_attributes(internal_primitive, blender_mesh, modifiers, export_settings),
"indices": __gather_indices(internal_primitive, blender_mesh, modifiers, export_settings),
"mode": internal_primitive.get('mode'),
"material": internal_primitive.get('material'),
"targets": __gather_targets(internal_primitive, blender_mesh, modifiers, export_settings),
"uvmap_attributes_index": internal_primitive.get('uvmap_attributes_index')
}
primitives.append(primitive)
else:
for internal_primitive in blender_primitives: for internal_primitive in blender_primitives:
primitive = { primitive = {
"attributes": __gather_attributes(internal_primitive, blender_mesh, modifiers, export_settings), "attributes": __gather_attributes(internal_primitive, blender_mesh, modifiers, export_settings),
@ -127,11 +167,11 @@ def __gather_cache_primitives(
"mode": internal_primitive.get('mode'), "mode": internal_primitive.get('mode'),
"material": internal_primitive.get('material'), "material": internal_primitive.get('material'),
"targets": __gather_targets(internal_primitive, blender_mesh, modifiers, export_settings), "targets": __gather_targets(internal_primitive, blender_mesh, modifiers, export_settings),
"uvmap_attributes_index": internal_primitive["uvmap_attributes_index"], #This will not be on final glTF dict "uvmap_attributes_index": internal_primitive.get('uvmap_attributes_index')
} }
primitives.append(primitive) primitives.append(primitive)
return primitives return primitives, additional_materials_udim
def __gather_indices(blender_primitive, blender_mesh, modifiers, export_settings): def __gather_indices(blender_primitive, blender_mesh, modifiers, export_settings):
indices = blender_primitive.get('indices') indices = blender_primitive.get('indices')

View File

@ -3,6 +3,7 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import numpy as np import numpy as np
from copy import deepcopy
from mathutils import Vector from mathutils import Vector
from ...blender.com.gltf2_blender_data_path import get_sk_exported from ...blender.com.gltf2_blender_data_path import get_sk_exported
from ...io.com.gltf2_io_debug import print_console from ...io.com.gltf2_io_debug import print_console
@ -10,7 +11,8 @@ from ...io.com.gltf2_io_constants import ROUNDING_DIGIT
from ...io.exp.gltf2_io_user_extensions import export_user_extensions from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ...io.com import gltf2_io_constants from ...io.com import gltf2_io_constants
from ..com import gltf2_blender_conversion from ..com import gltf2_blender_conversion
from .material.gltf2_blender_gather_materials import get_base_material, get_material_from_idx from .material.gltf2_blender_gather_materials import get_base_material, get_material_from_idx, get_active_uvmap_index, get_new_material_texture_shared
from .material.gltf2_blender_gather_texture_info import gather_udim_texture_info
from . import gltf2_blender_gather_skins from . import gltf2_blender_gather_skins
@ -25,7 +27,10 @@ def extract_primitives(materials, blender_mesh, uuid_for_skined_data, blender_ve
primitive_creator.populate_dots_data() primitive_creator.populate_dots_data()
primitive_creator.primitive_split() primitive_creator.primitive_split()
primitive_creator.manage_material_info() # UVMap & Vertex Color primitive_creator.manage_material_info() # UVMap & Vertex Color
return primitive_creator.primitive_creation() if export_settings['gltf_shared_accessors'] is False:
return primitive_creator.primitive_creation_not_shared(), primitive_creator.additional_materials, None
else:
return primitive_creator.primitive_creation_shared()
class PrimitiveCreator: class PrimitiveCreator:
def __init__(self, materials, blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings): def __init__(self, materials, blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings):
@ -379,10 +384,15 @@ class PrimitiveCreator:
# No choice : We need to retrieve materials here. Anyway, this will be baked, and next call will be quick # No choice : We need to retrieve materials here. Anyway, this will be baked, and next call will be quick
# We also need to shuffle Vertex Color data if needed # We also need to shuffle Vertex Color data if needed
new_prim_indices = {}
self.additional_materials = [] # In case of UDIM
self.uvmap_attribute_list = [] # Initialize here, in case we don't have any triangle primitive
materials_use_vc = None materials_use_vc = None
warning_already_displayed = False warning_already_displayed = False
for material_idx in self.prim_indices.keys(): for material_idx in self.prim_indices.keys():
_, material_info = get_base_material(material_idx, self.materials, self.export_settings) base_material, material_info = get_base_material(material_idx, self.materials, self.export_settings)
# UVMaps # UVMaps
self.uvmap_attribute_list = list(set([i['value'] for i in material_info["uv_info"].values() if 'type' in i.keys() and i['type'] == "Attribute" ])) self.uvmap_attribute_list = list(set([i['value'] for i in material_info["uv_info"].values() if 'type' in i.keys() and i['type'] == "Attribute" ]))
@ -424,12 +434,12 @@ class PrimitiveCreator:
# The simplier test is when no vertex color are used # The simplier test is when no vertex color are used
if material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is None: if material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is None:
# Nothing to do # Nothing to do
continue pass
if material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is not None: elif material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is not None:
print_console('WARNING', 'We are not managing this case (Vertex Color alpha without color)') print_console('WARNING', 'We are not managing this case (Vertex Color alpha without color)')
continue
else:
vc_color_name = None vc_color_name = None
vc_alpha_name = None vc_alpha_name = None
if material_info['vc_info']['color_type'] == "name": if material_info['vc_info']['color_type'] == "name":
@ -455,7 +465,7 @@ class PrimitiveCreator:
print_console('WARNING', 'glTF specification does not allow this case (multiple materials with different Vertex Color)') print_console('WARNING', 'glTF specification does not allow this case (multiple materials with different Vertex Color)')
warning_already_displayed = True warning_already_displayed = True
materials_use_vc = vc_key materials_use_vc = vc_key
continue
elif materials_use_vc is None: elif materials_use_vc is None:
materials_use_vc = vc_key materials_use_vc = vc_key
@ -468,7 +478,216 @@ class PrimitiveCreator:
else: else:
pass # Using the same Vertex Color pass # Using the same Vertex Color
def primitive_creation(self): ##### UDIM #####
if len(material_info['udim_info'].keys()) == 0:
new_prim_indices[material_idx] = self.prim_indices[material_idx]
self.additional_materials.append(None)
continue
# We have some UDIM for some texture of this material
# We need to split the mesh into multiple primitives
# We manage only case where all texture are using the same UVMap
# And where UDIM have exactly the same number of tiles (TODO to check?)
# So, retrieve all uvmaps used by this material
all_uvmaps = {}
for tex in material_info['udim_info'].keys():
if material_info['uv_info'][tex]['type'] == "Active":
index_uvmap = get_active_uvmap_index(self.blender_mesh)
uvmap_name = "TEXCOORD_" + str(index_uvmap)
elif material_info['uv_info'][tex]['type'] == "Fixed":
index_uvmap = self.blender_mesh.uv_layers.find(material_info['uv_info'][tex]['value'])
if index_uvmap < 0:
# Using active index
index_uvmap = get_active_uvmap_index(self.blender_mesh)
uvmap_name = "TEXCOORD_" + str(index_uvmap)
else: #Attribute
uvmap_name = material_info['uv_info'][tex]['value']
all_uvmaps[tex] = uvmap_name
if len(set(all_uvmaps.values())) > 1:
print_console('WARNING', 'We are not managing this case (multiple UVMap for UDIM)')
new_prim_indices[material_idx] = self.prim_indices[material_idx]
self.additional_materials.append(None)
continue
print_console('INFO', 'Splitting UDIM tiles into different primitives/materials')
# Retrieve UDIM images
tex = list(material_info['udim_info'].keys())[0]
image = material_info['udim_info'][tex]['image']
new_material_index = len(self.prim_indices.keys())
# Get UVMap used for UDIM
uvmap_name = all_uvmaps[list(all_uvmaps.keys())[0]]
# Retrieve tiles number
tiles = [t.number for t in image.tiles]
u_tiles = max([int(str(t)[3:]) for t in tiles])
v_tiles = max([int(str(t)[2:3]) for t in tiles]) + 1
# We are now going to split the mesh into multiple primitives, based on tiles
# We need to create a new primitive for each tile
for u in range(u_tiles):
for v in range(v_tiles):
if u != u_tiles - 1 and v != v_tiles - 1:
indices = np.where((self.dots[uvmap_name + '0'] >= u) & (self.dots[uvmap_name + '0'] < (u + 1)) & (self.dots[uvmap_name + '1'] <= (1-v) ) & (self.dots[uvmap_name + '1'] > 1-(v + 1)))[0]
elif u == u_tiles - 1 and v != v_tiles - 1:
indices = np.where((self.dots[uvmap_name + '0'] >= u) & (self.dots[uvmap_name + '0'] <= (u + 1)) & (self.dots[uvmap_name + '1'] <= (1-v) ) & (self.dots[uvmap_name + '1'] > 1-(v + 1)))[0]
elif u != u_tiles -1 and v == v_tiles - 1:
indices = np.where((self.dots[uvmap_name + '0'] >= u) & (self.dots[uvmap_name + '0'] < (u + 1)) & (self.dots[uvmap_name + '1'] <= (1-v) ) & (self.dots[uvmap_name + '1'] >= 1-(v + 1)))[0]
else:
indices = np.where((self.dots[uvmap_name + '0'] >= u) & (self.dots[uvmap_name + '0'] <= (u + 1)) & (self.dots[uvmap_name + '1'] <= (1-v) ) & (self.dots[uvmap_name + '1'] >= 1-(v + 1)))[0]
# Reset UVMap to 0-1 : reset to Blener UVMAP => slide to 0-1 => go to glTF UVMap
self.dots[uvmap_name + '1'][indices] -= 1
self.dots[uvmap_name + '1'][indices] *= -1
self.dots[uvmap_name + '0'][indices] -= u
self.dots[uvmap_name + '1'][indices] -= v
self.dots[uvmap_name + '1'][indices] *= -1
self.dots[uvmap_name + '1'][indices] += 1
# Now, get every triangle, and check that it belongs to this tile
# Assume that we can check only the first vertex of each triangle (=> No management of triangle on multiple tiles)
new_triangle_indices = []
for idx, i in enumerate(self.prim_indices[material_idx]):
if idx % 3 == 0 and i in indices:
new_triangle_indices.append(self.prim_indices[material_idx][idx])
new_triangle_indices.append(self.prim_indices[material_idx][idx+1])
new_triangle_indices.append(self.prim_indices[material_idx][idx+2])
new_prim_indices[new_material_index] = np.array(new_triangle_indices, dtype=np.uint32)
new_material_index += 1
# Now we have to create a new material for this tile
# This will be the existing material, but with new textures
# We need to duplicate the material, and add these new textures
new_material = deepcopy(base_material)
get_new_material_texture_shared(base_material, new_material)
for tex in material_info['udim_info'].keys():
new_tex = gather_udim_texture_info(
material_info['udim_info'][tex]['sockets'][0],
material_info['udim_info'][tex]['sockets'],
{
'tile': "10" + str(v) + str(u+1),
'image': material_info['udim_info'][tex]['image']
},
tex,
self.export_settings)
if tex == "baseColorTexture":
new_material.pbr_metallic_roughness.base_color_texture = new_tex
elif tex == "normalTexture":
new_material.normal_texture = new_tex
elif tex == "emissiveTexture":
new_material.emissive_texture = new_tex
elif tex == "metallicRoughnessTexture":
new_material.pbr_metallic_roughness.metallic_roughness_texture = new_tex
elif tex == "occlusionTexture":
new_material.occlusion_texture = new_tex
elif tex == "clearcoatTexture":
new_material.extensions["KHR_materials_clearcoat"].extension['clearcoatTexture'] = new_tex
elif tex == "clearcoatRoughnessTexture":
new_material.extensions["KHR_materials_clearcoat"].extension['clearcoatRoughnessTexture'] = new_tex
elif tex == "clearcoatNormalTexture":
new_material.extensions["KHR_materials_clearcoat"].extension['clearcoatNormalTexture'] = new_tex
elif tex == "sheenColorTexture":
new_material.extensions["KHR_materials_sheen"].extension['sheenColorTexture'] = new_tex
elif tex == "sheenRoughnessTexture":
new_material.extensions["KHR_materials_sheen"].extension['sheenRoughnessTexture'] = new_tex
elif tex == "transmissionTexture":
new_material.extensions["KHR_materials_transmission"].extension['transmissionTexture'] = new_tex
elif tex == "thicknessTexture":
new_material.extensions["KHR_materials_volume"].extension['thicknessTexture'] = new_tex
elif tex == "specularTexture":
new_material.extensions["KHR_materials_specular"].extension['specularTexture'] = new_tex
elif tex == "specularColorTexture":
new_material.extensions["KHR_materials_specular"].extension['specularColorTexture'] = new_tex
elif tex == "anisotropyTexture":
new_material.extensions["KHR_materials_anisotropy"].extension['anisotropyTexture'] = new_tex
else:
print_console('WARNING', 'We are not managing this case yet (UDIM for {})'.format(tex))
self.additional_materials.append((new_material, material_info, int(str(id(base_material)) + str(u) + str(v))))
self.prim_indices = new_prim_indices
def primitive_creation_shared(self):
primitives = []
self.dots, shared_dot_indices = np.unique(self.dots, return_inverse=True)
self.blender_idxs = self.dots['vertex_index']
self.attributes = {}
next_texcoor_idx = self.tex_coord_max
uvmap_attributes_index = {}
for attr in self.uvmap_attribute_list:
res = np.empty((len(self.dots), 2), dtype=gltf2_blender_conversion.get_numpy_type('FLOAT2'))
for i in range(2):
res[:, i] = self.dots[attr + str(i)]
self.attributes["TEXCOORD_" + str(next_texcoor_idx)] = {}
self.attributes["TEXCOORD_" + str(next_texcoor_idx)]["data"] = res
self.attributes["TEXCOORD_" + str(next_texcoor_idx)]["component_type"] = gltf2_io_constants.ComponentType.Float
self.attributes["TEXCOORD_" + str(next_texcoor_idx)]["data_type"] = gltf2_io_constants.DataType.Vec2
uvmap_attributes_index[attr] = next_texcoor_idx
next_texcoor_idx += 1
for attr in self.blender_attributes:
if 'set' in attr:
attr['set'](attr)
else:
self.__set_regular_attribute(self.dots, attr)
if self.skin:
joints = [[] for _ in range(self.num_joint_sets)]
weights = [[] for _ in range(self.num_joint_sets)]
for vi in self.blender_idxs:
bones = self.vert_bones[vi]
for j in range(0, 4 * self.num_joint_sets):
if j < len(bones):
joint, weight = bones[j]
else:
joint, weight = 0, 0.0
joints[j//4].append(joint)
weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)):
self.attributes['JOINTS_%d' % i] = js
self.attributes['WEIGHTS_%d' % i] = ws
for material_idx, dot_indices in self.prim_indices.items():
indices = shared_dot_indices[dot_indices]
if len(indices) == 0:
continue
primitives.append({
# No attribute here, as they are shared accross all primitives
'indices': indices,
'material': material_idx,
'uvmap_attributes_index': uvmap_attributes_index
})
# Manage edges & points primitives.
# One for edges, one for points
# No material for them, so only one primitive for each
has_triangle_primitive = len(primitives) != 0
primitives.extend(self.primitive_creation_edges_and_points())
print_console('INFO', 'Primitives created: %d' % len(primitives))
return primitives, [None]*len(primitives), self.attributes if has_triangle_primitive else None
def primitive_creation_not_shared(self):
primitives = [] primitives = []
for material_idx, dot_indices in self.prim_indices.items(): for material_idx, dot_indices in self.prim_indices.items():
@ -490,7 +709,7 @@ class PrimitiveCreator:
if 'set' in attr: if 'set' in attr:
attr['set'](attr) attr['set'](attr)
else: # Regular case else: # Regular case
self.__set_regular_attribute(attr) self.__set_regular_attribute(self.prim_dots, attr)
next_texcoor_idx = self.tex_coord_max next_texcoor_idx = self.tex_coord_max
uvmap_attributes_index = {} uvmap_attributes_index = {}
@ -506,6 +725,7 @@ class PrimitiveCreator:
uvmap_attributes_index[attr] = next_texcoor_idx uvmap_attributes_index[attr] = next_texcoor_idx
next_texcoor_idx += 1 next_texcoor_idx += 1
if self.skin: if self.skin:
joints = [[] for _ in range(self.num_joint_sets)] joints = [[] for _ in range(self.num_joint_sets)]
weights = [[] for _ in range(self.num_joint_sets)] weights = [[] for _ in range(self.num_joint_sets)]
@ -531,6 +751,18 @@ class PrimitiveCreator:
'uvmap_attributes_index': uvmap_attributes_index 'uvmap_attributes_index': uvmap_attributes_index
}) })
# Manage edges & points primitives.
# One for edges, one for points
# No material for them, so only one primitive for each
primitives.extend(self.primitive_creation_edges_and_points())
print_console('INFO', 'Primitives created: %d' % len(primitives))
return primitives
def primitive_creation_edges_and_points(self):
primitives_edges_points = []
if self.export_settings['gltf_loose_edges']: if self.export_settings['gltf_loose_edges']:
if self.blender_idxs_edges.shape[0] > 0: if self.blender_idxs_edges.shape[0] > 0:
@ -539,21 +771,21 @@ class PrimitiveCreator:
dots_edges, indices = np.unique(self.dots_edges, return_inverse=True) dots_edges, indices = np.unique(self.dots_edges, return_inverse=True)
self.blender_idxs = np.unique(self.blender_idxs_edges) self.blender_idxs = np.unique(self.blender_idxs_edges)
self.attributes = {} self.attributes_edges_points = {}
for attr in self.blender_attributes: for attr in self.blender_attributes:
if attr['blender_domain'] != 'POINT': if attr['blender_domain'] != 'POINT':
continue continue
if 'set' in attr: if 'set' in attr:
attr['set'](attr) attr['set'](attr, edges_points=True)
else: else:
res = np.empty((len(dots_edges), attr['len']), dtype=attr['type']) res = np.empty((len(dots_edges), attr['len']), dtype=attr['type'])
for i in range(attr['len']): for i in range(attr['len']):
res[:, i] = dots_edges[attr['gltf_attribute_name'] + str(i)] res[:, i] = dots_edges[attr['gltf_attribute_name'] + str(i)]
self.attributes[attr['gltf_attribute_name']] = {} self.attributes_edges_points[attr['gltf_attribute_name']] = {}
self.attributes[attr['gltf_attribute_name']]["data"] = res self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = res
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type(attr['blender_data_type']) self.attributes_edges_points[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type(attr['blender_data_type'])
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type(attr['blender_data_type']) self.attributes_edges_points[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type(attr['blender_data_type'])
if self.skin: if self.skin:
@ -571,37 +803,38 @@ class PrimitiveCreator:
weights[j//4].append(weight) weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)): for i, (js, ws) in enumerate(zip(joints, weights)):
self.attributes['JOINTS_%d' % i] = js self.attributes_edges_points['JOINTS_%d' % i] = js
self.attributes['WEIGHTS_%d' % i] = ws self.attributes_edges_points['WEIGHTS_%d' % i] = ws
primitives.append({ primitives_edges_points.append({
'attributes': self.attributes, 'attributes': self.attributes_edges_points,
'indices': indices, 'indices': indices,
'mode': 1, # LINES 'mode': 1, # LINES
'material': 0, 'material': 0,
'uvmap_attributes_index': {} 'uvmap_attributes_index': {}
}) })
self.additional_materials.append(None)
if self.export_settings['gltf_loose_points']: if self.export_settings['gltf_loose_points']:
if self.blender_idxs_points.shape[0] > 0: if self.blender_idxs_points.shape[0] > 0:
self.blender_idxs = self.blender_idxs_points self.blender_idxs = self.blender_idxs_points
self.attributes = {} self.attributes_edges_points = {}
for attr in self.blender_attributes: for attr in self.blender_attributes:
if attr['blender_domain'] != 'POINT': if attr['blender_domain'] != 'POINT':
continue continue
if 'set' in attr: if 'set' in attr:
attr['set'](attr) attr['set'](attr, edges_points=True)
else: else:
res = np.empty((len(self.blender_idxs), attr['len']), dtype=attr['type']) res = np.empty((len(self.blender_idxs), attr['len']), dtype=attr['type'])
for i in range(attr['len']): for i in range(attr['len']):
res[:, i] = self.dots_points[attr['gltf_attribute_name'] + str(i)] res[:, i] = self.dots_points[attr['gltf_attribute_name'] + str(i)]
self.attributes[attr['gltf_attribute_name']] = {} self.attributes_edges_points[attr['gltf_attribute_name']] = {}
self.attributes[attr['gltf_attribute_name']]["data"] = res self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = res
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type(attr['blender_data_type']) self.attributes_edges_points[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type(attr['blender_data_type'])
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type(attr['blender_data_type']) self.attributes_edges_points[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type(attr['blender_data_type'])
if self.skin: if self.skin:
@ -619,19 +852,18 @@ class PrimitiveCreator:
weights[j//4].append(weight) weights[j//4].append(weight)
for i, (js, ws) in enumerate(zip(joints, weights)): for i, (js, ws) in enumerate(zip(joints, weights)):
self.attributes['JOINTS_%d' % i] = js self.attributes_edges_points['JOINTS_%d' % i] = js
self.attributes['WEIGHTS_%d' % i] = ws self.attributes_edges_points['WEIGHTS_%d' % i] = ws
primitives.append({ primitives_edges_points.append({
'attributes': self.attributes, 'attributes': self.attributes_edges_points,
'mode': 0, # POINTS 'mode': 0, # POINTS
'material': 0, 'material': 0,
'uvmap_attributes_index': {} 'uvmap_attributes_index': {}
}) })
self.additional_materials.append(None)
print_console('INFO', 'Primitives created: %d' % len(primitives)) return primitives_edges_points
return primitives
################################## Get ################################################## ################################## Get ##################################################
@ -661,6 +893,8 @@ class PrimitiveCreator:
# glTF stores deltas in morph targets # glTF stores deltas in morph targets
for vs in self.morph_locs: for vs in self.morph_locs:
vs -= self.locs vs -= self.locs
# Some invalid mesh can have NaN value in SK, so replace them by 0, avoid crash
np.nan_to_num(vs, copy=False)
if self.export_settings['gltf_yup']: if self.export_settings['gltf_yup']:
PrimitiveCreator.zup2yup(self.locs) PrimitiveCreator.zup2yup(self.locs)
@ -1016,36 +1250,50 @@ class PrimitiveCreator:
##################################### Set ################################### ##################################### Set ###################################
def set_function(self): def set_function(self):
def setting_function(attr): def setting_function(attr, edges_points=False):
if attr['gltf_attribute_name'] == "POSITION": if attr['gltf_attribute_name'] == "POSITION":
self.__set_positions_attribute(attr) self.__set_positions_attribute(attr, edges_points=edges_points)
elif attr['gltf_attribute_name'].startswith("MORPH_POSITION_"): elif attr['gltf_attribute_name'].startswith("MORPH_POSITION_"):
self.__set_morph_locs_attribute(attr) self.__set_morph_locs_attribute(attr, edges_points=edges_points)
elif attr['gltf_attribute_name'].startswith("MORPH_TANGENT_"): elif attr['gltf_attribute_name'].startswith("MORPH_TANGENT_"):
self.__set_morph_tangent_attribute(attr) self.__set_morph_tangent_attribute(attr, edges_points=edges_points)
return setting_function return setting_function
def __set_positions_attribute(self, attr): def __set_positions_attribute(self, attr, edges_points=False):
if edges_points is False:
self.attributes[attr['gltf_attribute_name']] = {} self.attributes[attr['gltf_attribute_name']] = {}
self.attributes[attr['gltf_attribute_name']]["data"] = self.locs[self.blender_idxs] self.attributes[attr['gltf_attribute_name']]["data"] = self.locs[self.blender_idxs]
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec3 self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec3
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float
else:
self.attributes_edges_points[attr['gltf_attribute_name']] = {}
self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = self.locs[self.blender_idxs]
self.attributes_edges_points[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec3
self.attributes_edges_points[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float
def __set_morph_locs_attribute(self, attr): def __set_morph_locs_attribute(self, attr, edges_points=False):
if edges_points is False:
self.attributes[attr['gltf_attribute_name']] = {} self.attributes[attr['gltf_attribute_name']] = {}
self.attributes[attr['gltf_attribute_name']]["data"] = self.morph_locs[attr['blender_attribute_index']][self.blender_idxs] self.attributes[attr['gltf_attribute_name']]["data"] = self.morph_locs[attr['blender_attribute_index']][self.blender_idxs]
else:
self.attributes_edges_points[attr['gltf_attribute_name']] = {}
self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = self.morph_locs[attr['blender_attribute_index']][self.blender_idxs]
def __set_morph_tangent_attribute(self, attr): def __set_morph_tangent_attribute(self, attr, edges_points=False):
# Morph tangent are after these 3 others, so, they are already calculated # Morph tangent are after these 3 others, so, they are already calculated
self.normals = self.attributes[attr['gltf_attribute_name_normal']]["data"] self.normals = self.attributes[attr['gltf_attribute_name_normal']]["data"]
self.morph_normals = self.attributes[attr['gltf_attribute_name_morph_normal']]["data"] self.morph_normals = self.attributes[attr['gltf_attribute_name_morph_normal']]["data"]
self.tangents = self.attributes[attr['gltf_attribute_name_tangent']]["data"] self.tangents = self.attributes[attr['gltf_attribute_name_tangent']]["data"]
self.__calc_morph_tangents() self.__calc_morph_tangents()
if edges_points is False:
self.attributes[attr['gltf_attribute_name']] = {} self.attributes[attr['gltf_attribute_name']] = {}
self.attributes[attr['gltf_attribute_name']]["data"] = self.morph_tangents self.attributes[attr['gltf_attribute_name']]["data"] = self.morph_tangents
else:
self.attributes_edges_points[attr['gltf_attribute_name']] = {}
self.attributes_edges_points[attr['gltf_attribute_name']]["data"] = self.morph_tangents
def __calc_morph_tangents(self): def __calc_morph_tangents(self):
# TODO: check if this works # TODO: check if this works
@ -1062,10 +1310,10 @@ class PrimitiveCreator:
t_morph.rotate(rotation) t_morph.rotate(rotation)
self.morph_tangents[i] = t_morph - t # back to delta self.morph_tangents[i] = t_morph - t # back to delta
def __set_regular_attribute(self, attr): def __set_regular_attribute(self, dots, attr):
res = np.empty((len(self.prim_dots), attr['len']), dtype=attr['type']) res = np.empty((len(dots), attr['len']), dtype=attr['type'])
for i in range(attr['len']): for i in range(attr['len']):
res[:, i] = self.prim_dots[attr['gltf_attribute_name'] + str(i)] res[:, i] = dots[attr['gltf_attribute_name'] + str(i)]
self.attributes[attr['gltf_attribute_name']] = {} self.attributes[attr['gltf_attribute_name']] = {}
self.attributes[attr['gltf_attribute_name']]["data"] = res self.attributes[attr['gltf_attribute_name']]["data"] = res
if attr['gltf_attribute_name'] == "NORMAL": if attr['gltf_attribute_name'] == "NORMAL":

View File

@ -113,8 +113,6 @@ def __gather_inverse_bind_matrices(armature_uuid, export_settings):
def __gather_joints(armature_uuid, export_settings): def __gather_joints(armature_uuid, export_settings):
blender_armature_object = export_settings['vtree'].nodes[armature_uuid].blender_object
all_armature_children = export_settings['vtree'].nodes[armature_uuid].children all_armature_children = export_settings['vtree'].nodes[armature_uuid].children
root_bones_uuid = [c for c in all_armature_children if export_settings['vtree'].nodes[c].blender_type == VExportNode.BONE] root_bones_uuid = [c for c in all_armature_children if export_settings['vtree'].nodes[c].blender_type == VExportNode.BONE]

View File

@ -23,6 +23,12 @@ class VExportNode:
LIGHT = 4 LIGHT = 4
CAMERA = 5 CAMERA = 5
COLLECTION = 6 COLLECTION = 6
INSTANCE = 7 # For instances of GN
INSTANCIER = 8
NOT_INSTANCIER = 9
INST_COLLECTION = 7
# Parent type, to be set on child regarding its parent # Parent type, to be set on child regarding its parent
NO_PARENT = 54 NO_PARENT = 54
@ -67,6 +73,12 @@ class VExportNode:
# glTF # glTF
self.node = None self.node = None
# For mesh instance data of GN instances
self.data = None
self.materials = None
self.is_instancier = VExportNode.NOT_INSTANCIER
def add_child(self, uuid): def add_child(self, uuid):
self.children.append(uuid) self.children.append(uuid)
@ -77,7 +89,7 @@ class VExportNode:
def recursive_display(self, tree, mode): def recursive_display(self, tree, mode):
if mode == "simple": if mode == "simple":
for c in self.children: for c in self.children:
print(tree.nodes[c].uuid, self.blender_object.name, "/", self.blender_bone.name if self.blender_bone else "", "-->", tree.nodes[c].blender_object.name, "/", tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "" ) print(tree.nodes[c].uuid, self.blender_object.name if self.blender_object is not None else "GN" + self.data.name, "/", self.blender_bone.name if self.blender_bone else "", "-->", tree.nodes[c].blender_object.name if tree.nodes[c].blender_object else "GN" + tree.nodes[c].data.name, "/", tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "" )
tree.nodes[c].recursive_display(tree, mode) tree.nodes[c].recursive_display(tree, mode)
class VExportTree: class VExportTree:
@ -106,35 +118,46 @@ class VExportTree:
# Gather parent/children information once, as calling bobj.children is # Gather parent/children information once, as calling bobj.children is
# very expensive operation : takes O(len(bpy.data.objects)) time. # very expensive operation : takes O(len(bpy.data.objects)) time.
# TODO : In case of full collection export, we should add children / collection in the same way
blender_children = dict() blender_children = dict()
for bobj in bpy.data.objects: for bobj in bpy.data.objects:
bparent = bobj.parent bparent = bobj.parent
blender_children.setdefault(bobj, []) blender_children.setdefault(bobj, [])
blender_children.setdefault(bparent, []).append(bobj) blender_children.setdefault(bparent, []).append(bobj)
if self.export_settings['gltf_hierarchy_full_collections'] is False:
scene_eval = blender_scene.evaluated_get(depsgraph=depsgraph) scene_eval = blender_scene.evaluated_get(depsgraph=depsgraph)
for blender_object in [obj.original for obj in scene_eval.objects if obj.parent is None]: for blender_object in [obj.original for obj in scene_eval.objects if obj.parent is None]:
self.recursive_node_traverse(blender_object, None, None, Matrix.Identity(4), False, blender_children) self.recursive_node_traverse(blender_object, None, None, Matrix.Identity(4), False, blender_children)
else:
self.recursive_node_traverse(blender_scene.collection, None, None, Matrix.Identity(4), False, blender_children, is_collection=True)
def recursive_node_traverse(self, blender_object, blender_bone, parent_uuid, parent_coll_matrix_world, delta, blender_children, armature_uuid=None, dupli_world_matrix=None, is_children_in_collection=False): def recursive_node_traverse(self, blender_object, blender_bone, parent_uuid, parent_coll_matrix_world, delta, blender_children, armature_uuid=None, dupli_world_matrix=None, data=None, original_object=None, is_collection=False, is_children_in_collection=False):
node = VExportNode() node = VExportNode()
node.uuid = str(uuid.uuid4()) node.uuid = str(uuid.uuid4())
node.parent_uuid = parent_uuid node.parent_uuid = parent_uuid
node.set_blender_data(blender_object, blender_bone) node.set_blender_data(blender_object, blender_bone)
if blender_object is None:
node.data = data
node.original_object = original_object
# add to parent if needed # add to parent if needed
if parent_uuid is not None: if parent_uuid is not None:
self.add_children(parent_uuid, node.uuid) self.add_children(parent_uuid, node.uuid)
if self.nodes[parent_uuid].blender_type == VExportNode.COLLECTION: if self.nodes[parent_uuid].blender_type == VExportNode.INST_COLLECTION or original_object is not None:
self.nodes[parent_uuid].children_type[node.uuid] = VExportNode.CHILDREN_IS_IN_COLLECTION if is_children_in_collection is True else VExportNode.CHILDREN_REAL self.nodes[parent_uuid].children_type[node.uuid] = VExportNode.CHILDREN_IS_IN_COLLECTION if is_children_in_collection is True else VExportNode.CHILDREN_REAL
else: else:
self.roots.append(node.uuid) self.roots.append(node.uuid)
# Set blender type # Set blender type
if blender_bone is not None: if blender_object is None: #GN instance
node.blender_type = VExportNode.INSTANCE
elif blender_bone is not None:
node.blender_type = VExportNode.BONE node.blender_type = VExportNode.BONE
self.nodes[armature_uuid].bones[blender_bone.name] = node.uuid self.nodes[armature_uuid].bones[blender_bone.name] = node.uuid
node.use_deform = blender_bone.id_data.data.bones[blender_bone.name].use_deform node.use_deform = blender_bone.id_data.data.bones[blender_bone.name].use_deform
elif is_collection is True:
node.blender_type = VExportNode.COLLECTION
elif blender_object.type == "ARMATURE": elif blender_object.type == "ARMATURE":
node.blender_type = VExportNode.ARMATURE node.blender_type = VExportNode.ARMATURE
elif blender_object.type == "CAMERA": elif blender_object.type == "CAMERA":
@ -142,7 +165,7 @@ class VExportTree:
elif blender_object.type == "LIGHT": elif blender_object.type == "LIGHT":
node.blender_type = VExportNode.LIGHT node.blender_type = VExportNode.LIGHT
elif blender_object.instance_type == "COLLECTION": elif blender_object.instance_type == "COLLECTION":
node.blender_type = VExportNode.COLLECTION node.blender_type = VExportNode.INST_COLLECTION
else: else:
node.blender_type = VExportNode.OBJECT node.blender_type = VExportNode.OBJECT
@ -183,9 +206,12 @@ class VExportTree:
# Store World Matrix for objects # Store World Matrix for objects
if dupli_world_matrix is not None: if dupli_world_matrix is not None:
node.matrix_world = dupli_world_matrix node.matrix_world = dupli_world_matrix
elif node.blender_type in [VExportNode.OBJECT, VExportNode.COLLECTION, VExportNode.ARMATURE, VExportNode.CAMERA, VExportNode.LIGHT]: elif node.blender_type in [VExportNode.OBJECT, VExportNode.COLLECTION, VExportNode.INST_COLLECTION, VExportNode.ARMATURE, VExportNode.CAMERA, VExportNode.LIGHT]:
# Matrix World of object is expressed based on collection instance objects are # Matrix World of object is expressed based on collection instance objects are
# So real world matrix is collection world_matrix @ "world_matrix" of object # So real world matrix is collection world_matrix @ "world_matrix" of object
if is_collection:
node.matrix_world = parent_coll_matrix_world.copy()
else:
node.matrix_world = parent_coll_matrix_world @ blender_object.matrix_world.copy() node.matrix_world = parent_coll_matrix_world @ blender_object.matrix_world.copy()
# If object is parented to bone, and Rest pose is used for Armature, we need to keep the world matrix transformed relative relative to rest pose, # If object is parented to bone, and Rest pose is used for Armature, we need to keep the world matrix transformed relative relative to rest pose,
@ -229,7 +255,7 @@ class VExportTree:
# Force empty ? # Force empty ?
# For duplis, if instancer is not display, we should create an empty # For duplis, if instancer is not display, we should create an empty
if blender_object.is_instancer is True and blender_object.show_instancer_for_render is False: if blender_object and is_collection is False and blender_object.is_instancer is True and blender_object.show_instancer_for_render is False:
node.force_as_empty = True node.force_as_empty = True
# Storing this node # Storing this node
@ -237,10 +263,14 @@ class VExportTree:
###### Manage children ###### ###### Manage children ######
# GN instance have no children
if blender_object is None:
return
# standard children (of object, or of instance collection) # standard children (of object, or of instance collection)
if blender_bone is None: if blender_bone is None and is_collection is False and blender_object.is_instancer is False:
for child_object in blender_children[blender_object]: for child_object in blender_children[blender_object]:
if child_object.parent_bone: if child_object.parent_bone and child_object.parent_type in ("BONE", "BONE_RELATIVE"):
# Object parented to bones # Object parented to bones
# Will be manage later # Will be manage later
continue continue
@ -249,33 +279,69 @@ class VExportTree:
self.recursive_node_traverse(child_object, None, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children) self.recursive_node_traverse(child_object, None, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children)
# Collections # Collections
if blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection: if is_collection is False and (blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection):
if self.export_settings['gltf_hierarchy_full_collections'] is False:
for dupli_object in blender_object.instance_collection.all_objects: for dupli_object in blender_object.instance_collection.all_objects:
if dupli_object.parent is not None: if dupli_object.parent is not None:
continue continue
self.recursive_node_traverse(dupli_object, None, node.uuid, node.matrix_world, new_delta or delta, blender_children, is_children_in_collection=True) self.recursive_node_traverse(dupli_object, None, node.uuid, node.matrix_world, new_delta or delta, blender_children, is_children_in_collection=True)
else:
# Manage children objects
for child in blender_object.instance_collection.objects:
if child.users_collection[0].name != blender_object.name:
continue
self.recursive_node_traverse(child, None, node.uuid, node.matrix_world, new_delta or delta, blender_children)
# Manage children collections
for child in blender_object.instance_collection.children:
self.recursive_node_traverse(child, None, node.uuid, node.matrix_world, new_delta or delta, blender_children, is_collection=True)
if is_collection is True: # Only for gltf_hierarchy_full_collections == True
# Manage children objects
for child in blender_object.objects:
if child.users_collection[0].name != blender_object.name:
continue
self.recursive_node_traverse(child, None, node.uuid, node.matrix_world, new_delta or delta, blender_children)
# Manage children collections
for child in blender_object.children:
self.recursive_node_traverse(child, None, node.uuid, node.matrix_world, new_delta or delta, blender_children, is_collection=True)
# Armature : children are bones with no parent # Armature : children are bones with no parent
if blender_object.type == "ARMATURE" and blender_bone is None: if is_collection is False and blender_object.type == "ARMATURE" and blender_bone is None:
for b in [b for b in blender_object.pose.bones if b.parent is None]: for b in [b for b in blender_object.pose.bones if b.parent is None]:
self.recursive_node_traverse(blender_object, b, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children, node.uuid) self.recursive_node_traverse(blender_object, b, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children, node.uuid)
# Bones # Bones
if blender_object.type == "ARMATURE" and blender_bone is not None: if is_collection is False and blender_object.type == "ARMATURE" and blender_bone is not None:
for b in blender_bone.children: for b in blender_bone.children:
self.recursive_node_traverse(blender_object, b, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children, armature_uuid) self.recursive_node_traverse(blender_object, b, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children, armature_uuid)
# Object parented to bone # Object parented to bone
if blender_bone is not None: if is_collection is False and blender_bone is not None:
for child_object in [c for c in blender_children[blender_object] if c.parent_type == "BONE" and c.parent_bone is not None and c.parent_bone == blender_bone.name]: for child_object in [c for c in blender_children[blender_object] if c.parent_type == "BONE" and c.parent_bone is not None and c.parent_bone == blender_bone.name]:
self.recursive_node_traverse(child_object, None, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children) self.recursive_node_traverse(child_object, None, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children)
# Duplis # Duplis
if blender_object.is_instancer is True and blender_object.instance_type != 'COLLECTION': if is_collection is False and blender_object.is_instancer is True and blender_object.instance_type != 'COLLECTION':
depsgraph = bpy.context.evaluated_depsgraph_get() depsgraph = bpy.context.evaluated_depsgraph_get()
for (dupl, mat) in [(dup.object.original, dup.matrix_world.copy()) for dup in depsgraph.object_instances if dup.parent and id(dup.parent.original) == id(blender_object)]: for (dupl, mat) in [(dup.object.original, dup.matrix_world.copy()) for dup in depsgraph.object_instances if dup.parent and id(dup.parent.original) == id(blender_object)]:
self.recursive_node_traverse(dupl, None, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children, dupli_world_matrix=mat) self.recursive_node_traverse(dupl, None, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children, dupli_world_matrix=mat)
# Geometry Nodes instances
if self.export_settings['gltf_gn_mesh'] is True:
# Do not force export as empty
# Because GN graph can have both geometry and instances
depsgraph = bpy.context.evaluated_depsgraph_get()
eval = blender_object.evaluated_get(depsgraph)
for inst in depsgraph.object_instances: # use only as iterator
if inst.parent == eval:
if not inst.is_instance:
continue
if type(inst.object.data).__name__ == "Mesh" and len(inst.object.data.vertices) == 0:
continue # This is nested instances, and this mesh has no vertices, so is an instancier for other instances
node.is_instancier = VExportNode.INSTANCIER
self.recursive_node_traverse(None, None, node.uuid, parent_coll_matrix_world, new_delta or delta, blender_children, dupli_world_matrix=inst.matrix_world.copy(), data=inst.object.data, original_object=blender_object, is_children_in_collection=True)
def get_all_objects(self): def get_all_objects(self):
return [n.uuid for n in self.nodes.values() if n.blender_type != VExportNode.BONE] return [n.uuid for n in self.nodes.values() if n.blender_type != VExportNode.BONE]
@ -320,7 +386,7 @@ class VExportTree:
def display(self, mode): def display(self, mode):
if mode == "simple": if mode == "simple":
for n in self.roots: for n in self.roots:
print(self.nodes[n].uuid, "Root", self.nodes[n].blender_object.name, "/", self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "" ) print(self.nodes[n].uuid, "Root", self.nodes[n].blender_object.name if self.nodes[n].blender_object else "GN instance", "/", self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "" )
self.nodes[n].recursive_display(self, mode) self.nodes[n].recursive_display(self, mode)
def filter_tag(self): def filter_tag(self):
@ -355,7 +421,7 @@ class VExportTree:
print("This should not happen!") print("This should not happen!")
for child in self.nodes[uuid].children: for child in self.nodes[uuid].children:
if self.nodes[uuid].blender_type == VExportNode.COLLECTION: if self.nodes[uuid].blender_type == VExportNode.INST_COLLECTION or self.nodes[uuid].is_instancier == VExportNode.INSTANCIER:
# We need to split children into 2 categories: real children, and objects inside the collection # We need to split children into 2 categories: real children, and objects inside the collection
if self.nodes[uuid].children_type[child] == VExportNode.CHILDREN_IS_IN_COLLECTION: if self.nodes[uuid].children_type[child] == VExportNode.CHILDREN_IS_IN_COLLECTION:
self.recursive_filter_tag(child, self.nodes[uuid].keep_tag) self.recursive_filter_tag(child, self.nodes[uuid].keep_tag)
@ -419,6 +485,10 @@ class VExportTree:
def node_filter_inheritable_is_kept(self, uuid): def node_filter_inheritable_is_kept(self, uuid):
if self.nodes[uuid].blender_object is None:
# geometry node instances
return True
if self.export_settings['gltf_selected'] and self.nodes[uuid].blender_object.select_get() is False: if self.export_settings['gltf_selected'] and self.nodes[uuid].blender_object.select_get() is False:
return False return False
@ -494,7 +564,10 @@ class VExportTree:
n.armature is not None and \ n.armature is not None and \
n.armature in self.nodes and \ n.armature in self.nodes and \
n.blender_type == VExportNode.OBJECT and \ n.blender_type == VExportNode.OBJECT and \
n.blender_object.type == "MESH" and \
hasattr(self.nodes[n.armature], "need_neutral_bone")]: #all skin meshes objects where neutral bone is needed hasattr(self.nodes[n.armature], "need_neutral_bone")]: #all skin meshes objects where neutral bone is needed
# Only for meshes, as curve can't have skin data (no weights pain available)
if n.armature not in added_armatures: if n.armature not in added_armatures:

View File

@ -2,6 +2,7 @@
# #
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import bpy
import re import re
import os import os
from typing import List from typing import List
@ -13,7 +14,11 @@ from ...io.com.gltf2_io_constants import ComponentType, DataType
from ...io.exp import gltf2_io_binary_data, gltf2_io_buffer, gltf2_io_image_data from ...io.exp import gltf2_io_binary_data, gltf2_io_buffer, gltf2_io_image_data
from ...io.exp.gltf2_io_user_extensions import export_user_extensions from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from .gltf2_blender_gather_accessors import gather_accessor from .gltf2_blender_gather_accessors import gather_accessor
from .material.gltf2_blender_gather_image import get_gltf_image_from_blender_image
class AdditionalData:
def __init__(self):
additional_textures = []
class GlTF2Exporter: class GlTF2Exporter:
""" """
@ -59,6 +64,9 @@ class GlTF2Exporter:
textures=[] textures=[]
) )
self.additional_data = AdditionalData()
self.__buffer = gltf2_io_buffer.Buffer() self.__buffer = gltf2_io_buffer.Buffer()
self.__images = {} self.__images = {}
@ -355,6 +363,22 @@ class GlTF2Exporter:
for s in skins: for s in skins:
self.__traverse(s) self.__traverse(s)
def traverse_additional_textures(self):
if self.export_settings['gltf_unused_textures'] is True:
tab = []
for tex in self.export_settings['additional_texture_export']:
res = self.__traverse(tex)
tab.append(res)
self.additional_data.additional_textures = tab
def traverse_additional_images(self):
if self.export_settings['gltf_unused_images']:
for img in [img for img in bpy.data.images if img.source != "VIEWER"]:
# TODO manage full / partial / custom via hook ...
if img.name not in self.export_settings['exported_images'].keys():
self.__traverse(get_gltf_image_from_blender_image(img.name, self.export_settings))
def add_animation(self, animation: gltf2_io.Animation): def add_animation(self, animation: gltf2_io.Animation):
""" """
Add an animation to the glTF. Add an animation to the glTF.
@ -507,3 +531,38 @@ class GlTF2Exporter:
# do nothing for any type that does not match a glTF schema (primitives) # do nothing for any type that does not match a glTF schema (primitives)
return node return node
def fix_json(obj):
# TODO: move to custom JSON encoder
fixed = obj
if isinstance(obj, dict):
fixed = {}
for key, value in obj.items():
if key == 'extras' and value is not None:
fixed[key] = value
continue
if not __should_include_json_value(key, value):
continue
fixed[key] = fix_json(value)
elif isinstance(obj, list):
fixed = []
for value in obj:
fixed.append(fix_json(value))
elif isinstance(obj, float):
# force floats to int, if they are integers (prevent INTEGER_WRITTEN_AS_FLOAT validator warnings)
if int(obj) == obj:
return int(obj)
return fixed
def __should_include_json_value(key, value):
allowed_empty_collections = ["KHR_materials_unlit", "KHR_materials_specular"]
if value is None:
return False
elif __is_empty_collection(value) and key not in allowed_empty_collections:
return False
return True
def __is_empty_collection(value):
return (isinstance(value, dict) or isinstance(value, list)) and len(value) == 0

View File

@ -0,0 +1,184 @@
# SPDX-FileCopyrightText: 2018-2022 The glTF-Blender-IO authors
#
# SPDX-License-Identifier: Apache-2.0
import bpy
from .gltf2_blender_image import TmpImageGuard, make_temp_image_copy, StoreImage, StoreData
import numpy as np
from .....io.com.gltf2_io_extensions import Extension
from ....com.gltf2_blender_conversion import get_anisotropy_rotation_blender_to_gltf
from ...material import gltf2_blender_gather_texture_info
from ..gltf2_blender_search_node_tree import detect_anisotropy_nodes, get_socket, has_image_node_from_socket, get_factor_from_socket
def export_anisotropy(blender_material, export_settings):
anisotropy_extension = {}
uvmap_infos = {}
udim_infos = {}
anisotropy_socket = get_socket(blender_material, 'Anisotropic')
anisotropic_rotation_socket = get_socket(blender_material, 'Anisotropic Rotation')
anisotropy_tangent_socket = get_socket(blender_material, 'Tangent')
if anisotropy_socket.socket is None or anisotropic_rotation_socket.socket is None or anisotropy_tangent_socket.socket is None:
return None, {}, {}
if anisotropy_socket.socket.is_linked is False and anisotropic_rotation_socket.socket.is_linked is False:
# We don't need the complex node setup, just export the value
anisotropyStrength = anisotropy_socket.socket.default_value
if anisotropyStrength != 0.0:
anisotropy_extension['anisotropyStrength'] = anisotropyStrength
anisotropyRotation = get_anisotropy_rotation_blender_to_gltf(anisotropic_rotation_socket.socket.default_value)
if anisotropyRotation != 0.0:
anisotropy_extension['anisotropyRotation'] = anisotropyRotation
if len(anisotropy_extension) == 0:
return None, {}, {}
return Extension('KHR_materials_anisotropy', anisotropy_extension, False), uvmap_infos, udim_infos
# Get complex node setup
is_anisotropy, anisotropy_data = detect_anisotropy_nodes(
anisotropy_socket,
anisotropic_rotation_socket,
anisotropy_tangent_socket,
export_settings
)
if not is_anisotropy:
# Trying to export from grayscale textures
anisotropy_texture, uvmap_info = export_anisotropy_from_grayscale_textures(blender_material, export_settings)
if anisotropy_texture is None:
return None, {}, {}
fac = get_factor_from_socket(anisotropy_socket, kind='VALUE')
if fac is None and anisotropy_texture is not None:
anisotropy_extension['anisotropyStrength'] = 1.0
elif fac != 0.0 and anisotropy_texture is not None:
anisotropy_extension['anisotropyStrength'] = fac
fac = get_factor_from_socket(anisotropic_rotation_socket, kind='VALUE')
if fac is None and anisotropy_texture is not None:
pass # Rotation 0 is default
elif fac != 0.0 and anisotropy_texture is not None:
anisotropy_extension['anisotropyRotation'] = get_anisotropy_rotation_blender_to_gltf(fac)
anisotropy_extension['anisotropyTexture'] = anisotropy_texture
uvmap_infos.update({'anisotropyTexture': uvmap_info})
return Extension('KHR_materials_anisotropy', anisotropy_extension, False), uvmap_infos, udim_infos
# Export from complex node setup
if anisotropy_data['anisotropyStrength'] != 0.0:
anisotropy_extension['anisotropyStrength'] = anisotropy_data['anisotropyStrength']
if anisotropy_data['anisotropyRotation'] != 0.0:
anisotropy_extension['anisotropyRotation'] = anisotropy_data['anisotropyRotation']
# Get texture data
# No need to check here that we have a texture, this check is already done insode detect_anisotropy_nodes
anisotropy_texture, uvmap_info , udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
anisotropy_data['tex_socket'],
(anisotropy_data['tex_socket'],),
(),
export_settings,
)
anisotropy_extension['anisotropyTexture'] = anisotropy_texture
uvmap_infos.update({'anisotropyTexture': uvmap_info})
udim_infos.update({'anisotropyTexture' : udim_info} if len(udim_info.keys()) > 0 else {})
return Extension('KHR_materials_anisotropy', anisotropy_extension, False), uvmap_infos, udim_infos
def export_anisotropy_from_grayscale_textures(blender_material, export_settings):
# There will be a texture, with a complex calculation (no direct channel mapping)
anisotropy_socket = get_socket(blender_material, 'Anisotropic')
anisotropic_rotation_socket = get_socket(blender_material, 'Anisotropic Rotation')
anisotropy_tangent_socket = get_socket(blender_material, 'Tangent')
sockets = (anisotropy_socket, anisotropic_rotation_socket, anisotropy_tangent_socket)
# Set primary socket having a texture
primary_socket = anisotropy_socket
if not has_image_node_from_socket(primary_socket, export_settings):
primary_socket = anisotropic_rotation_socket
anisotropyTexture, uvmap_info, _, _ = gltf2_blender_gather_texture_info.gather_texture_info(
primary_socket,
sockets,
(),
export_settings,
filter_type='ANY')
if anisotropyTexture is None:
return None, {}
return anisotropyTexture, uvmap_info
def grayscale_anisotropy_calculation(stored, export_settings):
# Find all Blender images used
images = []
for fill in stored.values():
if isinstance(fill, StoreImage):
if fill.image not in images:
images.append(fill.image)
if not images:
# No ImageFills; use a 1x1 white pixel
pixels = np.array([1.0, 1.0, 1.0, 1.0], np.float32)
return pixels, 1, 1
width = max(image.size[0] for image in images)
height = max(image.size[1] for image in images)
buffers = {}
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
for identifier, image in [(ident, store.image) for (ident, store) in stored.items() if isinstance(store, StoreImage)]:
tmp_buf = np.empty(width * height * 4, np.float32)
if image.size[0] == width and image.size[1] == height:
image.pixels.foreach_get(tmp_buf)
else:
# Image is the wrong size; make a temp copy and scale it.
with TmpImageGuard() as guard:
make_temp_image_copy(guard, src_image=image)
tmp_image = guard.image
tmp_image.scale(width, height)
tmp_image.pixels.foreach_get(tmp_buf)
buffers[identifier] = np.reshape(tmp_buf, [width, height, 4])
buffers[identifier] = rgb2gray(buffers[identifier])
for identifier, data in [(ident, data) for (ident, data) in stored.items() if isinstance(data, StoreData)]:
buffers[identifier] = np.full((width, height), 1) # Set to white / 1.0, as value is set as factor
# Combine the image
out_buf = np.zeros((width, height, 4), np.float32)
out_buf[:,:,3] = 1.0 # A : Alpha
out_buf[:,:,2] = buffers['anisotropy'] # B : Strength (Anisotropic socket)
# Rotation needs to be converted from 0-1 to 0-2pi, and then vectorized it, normalized, and apply to R & G channels
# with mapping
buffers['anisotropic_rotation'] = buffers['anisotropic_rotation'] * 2 * np.pi
buffers['anisotropic_rotation'] = np.stack((np.cos(buffers['anisotropic_rotation']), np.sin(buffers['anisotropic_rotation'])), axis=-1)
buffers['anisotropic_rotation'] = buffers['anisotropic_rotation'] / np.linalg.norm(buffers['anisotropic_rotation'], axis=-1, keepdims=True)
buffers['anisotropic_rotation'] = (buffers['anisotropic_rotation'] + 1.0) / 2.0
out_buf[:,:,0] = buffers['anisotropic_rotation'][:,:,0] # R : Rotation X
out_buf[:,:,1] = buffers['anisotropic_rotation'][:,:,1] # G : Rotation Y
out_buf = np.reshape(out_buf, (width * height * 4))
return np.float32(out_buf), width, height, None

View File

@ -30,7 +30,7 @@ def export_clearcoat(blender_material, export_settings):
clearcoat_enabled = True clearcoat_enabled = True
if not clearcoat_enabled: if not clearcoat_enabled:
return None, {} return None, {}, {}
if isinstance(clearcoat_roughness_socket.socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.socket.is_linked: if isinstance(clearcoat_roughness_socket.socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.socket.is_linked:
clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.socket.default_value clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.socket.default_value
@ -49,10 +49,11 @@ def export_clearcoat(blender_material, export_settings):
clearcoat_roughness_slots = (clearcoat_roughness_socket,) clearcoat_roughness_slots = (clearcoat_roughness_socket,)
uvmap_infos = {} uvmap_infos = {}
udim_infos = {}
if len(clearcoat_roughness_slots) > 0: if len(clearcoat_roughness_slots) > 0:
if has_clearcoat_texture: if has_clearcoat_texture:
clearcoat_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( clearcoat_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
clearcoat_socket, clearcoat_socket,
clearcoat_roughness_slots, clearcoat_roughness_slots,
(), (),
@ -60,9 +61,10 @@ def export_clearcoat(blender_material, export_settings):
) )
clearcoat_extension['clearcoatTexture'] = clearcoat_texture clearcoat_extension['clearcoatTexture'] = clearcoat_texture
uvmap_infos.update({'clearcoatTexture' : uvmap_info}) uvmap_infos.update({'clearcoatTexture' : uvmap_info})
udim_infos.update({'clearcoatTexture' : udim_info} if len(udim_info.keys()) > 0 else {})
if has_clearcoat_roughness_texture: if has_clearcoat_roughness_texture:
clearcoat_roughness_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( clearcoat_roughness_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
clearcoat_roughness_socket, clearcoat_roughness_socket,
clearcoat_roughness_slots, clearcoat_roughness_slots,
(), (),
@ -70,14 +72,16 @@ def export_clearcoat(blender_material, export_settings):
) )
clearcoat_extension['clearcoatRoughnessTexture'] = clearcoat_roughness_texture clearcoat_extension['clearcoatRoughnessTexture'] = clearcoat_roughness_texture
uvmap_infos.update({'clearcoatRoughnessTexture': uvmap_info}) uvmap_infos.update({'clearcoatRoughnessTexture': uvmap_info})
udim_infos.update({'clearcoatRoughnessTexture': udim_info} if len(udim_info.keys()) > 0 else {})
if has_image_node_from_socket(clearcoat_normal_socket, export_settings): if has_image_node_from_socket(clearcoat_normal_socket, export_settings):
clearcoat_normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class( clearcoat_normal_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class(
clearcoat_normal_socket, clearcoat_normal_socket,
(clearcoat_normal_socket,), (clearcoat_normal_socket,),
export_settings export_settings
) )
clearcoat_extension['clearcoatNormalTexture'] = clearcoat_normal_texture clearcoat_extension['clearcoatNormalTexture'] = clearcoat_normal_texture
uvmap_infos.update({'clearcoatNormalTexture': uvmap_info}) uvmap_infos.update({'clearcoatNormalTexture': uvmap_info})
udim_infos.update({'clearcoatNormalTexture': udim_info} if len(udim_info.keys()) > 0 else {})
return Extension('KHR_materials_clearcoat', clearcoat_extension, False), uvmap_infos return Extension('KHR_materials_clearcoat', clearcoat_extension, False), uvmap_infos, udim_infos

View File

@ -58,8 +58,8 @@ def export_emission_texture(blender_material, export_settings):
emissive = get_socket(blender_material, "Emissive") emissive = get_socket(blender_material, "Emissive")
if emissive.socket is None: if emissive.socket is None:
emissive = get_socket_from_gltf_material_node(blender_material, "Emissive") emissive = get_socket_from_gltf_material_node(blender_material, "Emissive")
emissive_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(emissive, (emissive,), (), export_settings) emissive_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(emissive, (emissive,), (), export_settings)
return emissive_texture, {'emissiveTexture': uvmap_info} return emissive_texture, {'emissiveTexture': uvmap_info}, {'emissiveTexture': udim_info} if len(udim_info.keys()) > 0 else {}
def export_emission_strength_extension(emissive_factor, export_settings): def export_emission_strength_extension(emissive_factor, export_settings):
emissive_strength_extension = {} emissive_strength_extension = {}

View File

@ -19,12 +19,13 @@ def export_sheen(blender_material, export_settings):
sheen_socket = get_socket(blender_material, "Sheen Weight") sheen_socket = get_socket(blender_material, "Sheen Weight")
if sheenTint_socket.socket is None or sheenRoughness_socket.socket is None or sheen_socket.socket is None: if sheenTint_socket.socket is None or sheenRoughness_socket.socket is None or sheen_socket.socket is None:
return None, {} return None, {}, {}
if sheen_socket.socket.is_linked is False and sheen_socket.socket.default_value == 0.0: if sheen_socket.socket.is_linked is False and sheen_socket.socket.default_value == 0.0:
return None, {} return None, {}, {}
uvmap_infos = {} uvmap_infos = {}
udim_infos = {}
#TODOExt : What to do if sheen_socket is linked? or is not between 0 and 1? #TODOExt : What to do if sheen_socket is linked? or is not between 0 and 1?
@ -46,7 +47,7 @@ def export_sheen(blender_material, export_settings):
# Texture # Texture
if has_image_node_from_socket(sheenTint_socket, export_settings): if has_image_node_from_socket(sheenTint_socket, export_settings):
original_sheenColor_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( original_sheenColor_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
sheenTint_socket, sheenTint_socket,
(sheenTint_socket,), (sheenTint_socket,),
(), (),
@ -54,6 +55,7 @@ def export_sheen(blender_material, export_settings):
) )
sheen_extension['sheenColorTexture'] = original_sheenColor_texture sheen_extension['sheenColorTexture'] = original_sheenColor_texture
uvmap_infos.update({'sheenColorTexture': uvmap_info}) uvmap_infos.update({'sheenColorTexture': uvmap_info})
udim_infos.update({'sheenColorTexture': udim_info} if len(udim_info) > 0 else {})
if sheenRoughness_non_linked is True: if sheenRoughness_non_linked is True:
fac = sheenRoughness_socket.socket.default_value fac = sheenRoughness_socket.socket.default_value
@ -69,7 +71,7 @@ def export_sheen(blender_material, export_settings):
# Texture # Texture
if has_image_node_from_socket(sheenRoughness_socket, export_settings): if has_image_node_from_socket(sheenRoughness_socket, export_settings):
original_sheenRoughness_texture, uvmap_info , _ = gltf2_blender_gather_texture_info.gather_texture_info( original_sheenRoughness_texture, uvmap_info , udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
sheenRoughness_socket, sheenRoughness_socket,
(sheenRoughness_socket,), (sheenRoughness_socket,),
(), (),
@ -77,5 +79,6 @@ def export_sheen(blender_material, export_settings):
) )
sheen_extension['sheenRoughnessTexture'] = original_sheenRoughness_texture sheen_extension['sheenRoughnessTexture'] = original_sheenRoughness_texture
uvmap_infos.update({'sheenRoughnessTexture': uvmap_info}) uvmap_infos.update({'sheenRoughnessTexture': uvmap_info})
udim_infos.update({'sheenRoughnessTexture': udim_info} if len(udim_info) > 0 else {})
return Extension('KHR_materials_sheen', sheen_extension, False), uvmap_infos return Extension('KHR_materials_sheen', sheen_extension, False), uvmap_infos, udim_infos

View File

@ -19,9 +19,10 @@ def export_specular(blender_material, export_settings):
speculartint_socket = get_socket(blender_material, 'Specular Tint') speculartint_socket = get_socket(blender_material, 'Specular Tint')
if specular_socket.socket is None or speculartint_socket.socket is None: if specular_socket.socket is None or speculartint_socket.socket is None:
return None, {} return None, {}, {}
uvmap_infos = {} uvmap_infos = {}
udim_infos = {}
specular_non_linked = isinstance(specular_socket.socket, bpy.types.NodeSocket) and not specular_socket.socket.is_linked specular_non_linked = isinstance(specular_socket.socket, bpy.types.NodeSocket) and not specular_socket.socket.is_linked
specularcolor_non_linked = isinstance(speculartint_socket.socket, bpy.types.NodeSocket) and not speculartint_socket.socket.is_linked specularcolor_non_linked = isinstance(speculartint_socket.socket, bpy.types.NodeSocket) and not speculartint_socket.socket.is_linked
@ -52,7 +53,7 @@ def export_specular(blender_material, export_settings):
# Texture # Texture
if has_image_node_from_socket(specular_socket, export_settings): if has_image_node_from_socket(specular_socket, export_settings):
specular_texture, uvmap_info, _ = gather_texture_info( specular_texture, uvmap_info, udim_info, _ = gather_texture_info(
specular_socket, specular_socket,
(specular_socket,), (specular_socket,),
(), (),
@ -60,6 +61,7 @@ def export_specular(blender_material, export_settings):
) )
specular_extension['specularTexture'] = specular_texture specular_extension['specularTexture'] = specular_texture
uvmap_infos.update({'specularTexture': uvmap_info}) uvmap_infos.update({'specularTexture': uvmap_info})
udim_infos.update({'specularTexture': udim_info} if len(udim_info) > 0 else {})
extensions_needed = True extensions_needed = True
if specularcolor_non_linked is True: if specularcolor_non_linked is True:
@ -83,7 +85,7 @@ def export_specular(blender_material, export_settings):
# Texture # Texture
if has_image_node_from_socket(speculartint_socket, export_settings): if has_image_node_from_socket(speculartint_socket, export_settings):
specularcolor_texture, uvmap_info, _ = gather_texture_info( specularcolor_texture, uvmap_info, udim_info, _ = gather_texture_info(
speculartint_socket, speculartint_socket,
(speculartint_socket,), (speculartint_socket,),
(), (),
@ -91,9 +93,10 @@ def export_specular(blender_material, export_settings):
) )
specular_extension['specularColorTexture'] = specularcolor_texture specular_extension['specularColorTexture'] = specularcolor_texture
uvmap_infos.update({'specularColorTexture': uvmap_info}) uvmap_infos.update({'specularColorTexture': uvmap_info})
udim_infos.update({'specularColorTexture': udim_info} if len(udim_info) > 0 else {})
extensions_needed = True extensions_needed = True
if extensions_needed is False: if extensions_needed is False:
return None, {} return None, {}, {}
return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos, udim_infos

View File

@ -29,16 +29,17 @@ def export_transmission(blender_material, export_settings):
transmission_enabled = True transmission_enabled = True
if not transmission_enabled: if not transmission_enabled:
return None, {} return None, {}, {}
uvmap_info = {} uvmap_info = {}
udim_info = {}
# Pack transmission channel (R). # Pack transmission channel (R).
if has_transmission_texture: if has_transmission_texture:
transmission_slots = (transmission_socket,) transmission_slots = (transmission_socket,)
if len(transmission_slots) > 0: if len(transmission_slots) > 0:
combined_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( combined_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
transmission_socket, transmission_socket,
transmission_slots, transmission_slots,
(), (),
@ -47,4 +48,4 @@ def export_transmission(blender_material, export_settings):
if has_transmission_texture: if has_transmission_texture:
transmission_extension['transmissionTexture'] = combined_texture transmission_extension['transmissionTexture'] = combined_texture
return Extension('KHR_materials_transmission', transmission_extension, False), {'transmissionTexture': uvmap_info} return Extension('KHR_materials_transmission', transmission_extension, False), {'transmissionTexture': uvmap_info}, {'transmissionTexture': udim_info} if len(udim_info) > 0 else {}

View File

@ -25,7 +25,7 @@ def export_volume(blender_material, export_settings):
transmission_enabled = True transmission_enabled = True
if transmission_enabled is False: if transmission_enabled is False:
return None, {} return None, {}, {}
volume_extension = {} volume_extension = {}
has_thickness_texture = False has_thickness_texture = False
@ -35,7 +35,7 @@ def export_volume(blender_material, export_settings):
thickness_socket = get_socket_from_gltf_material_node(blender_material, 'Thickness') thickness_socket = get_socket_from_gltf_material_node(blender_material, 'Thickness')
if thickness_socket.socket is None: if thickness_socket.socket is None:
# If no thickness (here because there is no glTF Material Output node), no volume extension export # If no thickness (here because there is no glTF Material Output node), no volume extension export
return None, {} return None, {}, {}
density_socket = get_socket(blender_material, 'Density', volume=True) density_socket = get_socket(blender_material, 'Density', volume=True)
attenuation_color_socket = get_socket(blender_material, 'Color', volume=True) attenuation_color_socket = get_socket(blender_material, 'Color', volume=True)
@ -54,7 +54,7 @@ def export_volume(blender_material, export_settings):
val = thickness_socket.socket.default_value val = thickness_socket.socket.default_value
if val == 0.0: if val == 0.0:
# If no thickness, no volume extension export # If no thickness, no volume extension export
return None, {} return None, {}, {}
volume_extension['thicknessFactor'] = val volume_extension['thicknessFactor'] = val
elif has_image_node_from_socket(thickness_socket, export_settings): elif has_image_node_from_socket(thickness_socket, export_settings):
fac = get_factor_from_socket(thickness_socket, kind='VALUE') fac = get_factor_from_socket(thickness_socket, kind='VALUE')
@ -66,8 +66,9 @@ def export_volume(blender_material, export_settings):
if has_thickness_texture: if has_thickness_texture:
thickness_slots = (thickness_socket,) thickness_slots = (thickness_socket,)
udim_info = {}
if len(thickness_slots) > 0: if len(thickness_slots) > 0:
combined_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( combined_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
thickness_socket, thickness_socket,
thickness_slots, thickness_slots,
(), (),
@ -76,4 +77,4 @@ def export_volume(blender_material, export_settings):
if has_thickness_texture: if has_thickness_texture:
volume_extension['thicknessTexture'] = combined_texture volume_extension['thicknessTexture'] = combined_texture
return Extension('KHR_materials_volume', volume_extension, False), {'thicknessTexture': uvmap_info} return Extension('KHR_materials_volume', volume_extension, False), {'thicknessTexture': uvmap_info}, {'thicknessTexture': udim_info} if len(udim_info.keys()) > 0 else {}

View File

@ -24,6 +24,13 @@ class FillImage:
self.image = image self.image = image
self.src_chan = src_chan self.src_chan = src_chan
class FillImageTile:
"""Fills a channel with the channel src_chan from a Blender UDIM image."""
def __init__(self, image: bpy.types.Image, tile, src_chan: Channel):
self.image = image
self.tile = tile
self.src_chan = src_chan
class FillWhite: class FillWhite:
"""Fills a channel with all ones (1.0).""" """Fills a channel with all ones (1.0)."""
pass pass
@ -88,6 +95,15 @@ class ExportImage:
export_image.fill_image(image, dst_chan=chan, src_chan=chan) export_image.fill_image(image, dst_chan=chan, src_chan=chan)
return export_image return export_image
@staticmethod
def from_blender_image_tile(export_settings):
export_image = ExportImage()
original_udim = export_settings['current_udim_info']['image']
for chan in range(original_udim.channels):
export_image.fill_image_tile(original_udim, export_settings['current_udim_info']['tile'], dst_chan=chan, src_chan=chan)
return export_image
@staticmethod @staticmethod
def from_original(image: bpy.types.Image): def from_original(image: bpy.types.Image):
return ExportImage(image) return ExportImage(image)
@ -95,6 +111,9 @@ class ExportImage:
def fill_image(self, image: bpy.types.Image, dst_chan: Channel, src_chan: Channel): def fill_image(self, image: bpy.types.Image, dst_chan: Channel, src_chan: Channel):
self.fills[dst_chan] = FillImage(image, src_chan) self.fills[dst_chan] = FillImage(image, src_chan)
def fill_image_tile(self, image: bpy.types.Image, tile, dst_chan: Channel, src_chan: Channel):
self.fills[dst_chan] = FillImageTile(image, tile, src_chan)
def store_data(self, identifier, data, type='Image'): def store_data(self, identifier, data, type='Image'):
if type == "Image": # This is an image if type == "Image": # This is an image
self.stored[identifier] = StoreImage(data) self.stored[identifier] = StoreImage(data)
@ -116,12 +135,17 @@ class ExportImage:
else: else:
return False return False
def blender_image(self) -> Optional[bpy.types.Image]: def blender_image(self, export_settings) -> Optional[bpy.types.Image]:
"""If there's an existing Blender image we can use, """If there's an existing Blender image we can use,
returns it. Otherwise (if channels need packing), returns it. Otherwise (if channels need packing),
returns None. returns None.
""" """
if self.__on_happy_path(): if self.__on_happy_path():
# Store that this image is fully exported (used to export or not not used images)
for fill in self.fills.values():
export_settings['exported_images'][fill.image.name] = 1 # Fully used
break
for fill in self.fills.values(): for fill in self.fills.values():
return fill.image return fill.image
return None return None
@ -134,6 +158,16 @@ class ExportImage:
len(set(fill.image.name for fill in self.fills.values())) == 1 len(set(fill.image.name for fill in self.fills.values())) == 1
) )
def __on_happy_path_udim(self) -> bool:
# All src_chans match their dst_chan and come from the same udim image
return (
all(isinstance(fill, FillImageTile) for fill in self.fills.values()) and
all(dst_chan == fill.src_chan for dst_chan, fill in self.fills.items()) and
len(set(fill.image.name for fill in self.fills.values())) == 1 and
all(fill.tile == self.fills[list(self.fills.keys())[0]].tile for fill in self.fills.values())
)
def encode(self, mime_type: Optional[str], export_settings) -> Tuple[bytes, bool]: def encode(self, mime_type: Optional[str], export_settings) -> Tuple[bytes, bool]:
self.file_format = { self.file_format = {
"image/jpeg": "JPEG", "image/jpeg": "JPEG",
@ -143,17 +177,27 @@ class ExportImage:
# Happy path = we can just use an existing Blender image # Happy path = we can just use an existing Blender image
if self.__on_happy_path(): if self.__on_happy_path():
# Store that this image is fully exported (used to export or not not used images)
for fill in self.fills.values():
export_settings['exported_images'][fill.image.name] = 1 # Fully used
break
return self.__encode_happy(export_settings), None return self.__encode_happy(export_settings), None
if self.__on_happy_path_udim():
return self.__encode_happy_tile(export_settings), None
# Unhappy path = we need to create the image self.fills describes or self.stores describes # Unhappy path = we need to create the image self.fills describes or self.stores describes
if self.numpy_calc is None: if self.numpy_calc is None:
return self.__encode_unhappy(export_settings), None return self.__encode_unhappy(export_settings), None
else: else:
pixels, width, height, factor = self.numpy_calc(self.stored) pixels, width, height, factor = self.numpy_calc(self.stored, export_settings)
return self.__encode_from_numpy_array(pixels, (width, height), export_settings), factor return self.__encode_from_numpy_array(pixels, (width, height), export_settings), factor
def __encode_happy(self, export_settings) -> bytes: def __encode_happy(self, export_settings) -> bytes:
return self.__encode_from_image(self.blender_image(), export_settings) return self.__encode_from_image(self.blender_image(export_settings), export_settings)
def __encode_happy_tile(self, export_settings) -> bytes:
return self.__encode_from_image_tile(self.fills[list(self.fills.keys())[0]].image, export_settings['current_udim_info']['tile'], export_settings)
def __encode_unhappy(self, export_settings) -> bytes: def __encode_unhappy(self, export_settings) -> bytes:
# We need to assemble the image out of channels. # We need to assemble the image out of channels.
@ -165,6 +209,7 @@ class ExportImage:
if isinstance(fill, FillImage): if isinstance(fill, FillImage):
if fill.image not in images: if fill.image not in images:
images.append(fill.image) images.append(fill.image)
export_settings['exported_images'][fill.image.name] = 2 # 2 = partially used
if not images: if not images:
# No ImageFills; use a 1x1 white pixel # No ImageFills; use a 1x1 white pixel
@ -244,6 +289,25 @@ class ExportImage:
tmp_image = guard.image tmp_image = guard.image
return _encode_temp_image(tmp_image, self.file_format, export_settings) return _encode_temp_image(tmp_image, self.file_format, export_settings)
def __encode_from_image_tile(self, udim_image, tile, export_settings):
src_path = bpy.path.abspath(udim_image.filepath_raw).replace("<UDIM>", tile)
if os.path.isfile(src_path):
with open(src_path, 'rb') as f:
data = f.read()
if data:
if self.file_format == 'PNG':
if data.startswith(b'\x89PNG'):
return data
elif self.file_format == 'JPEG':
if data.startswith(b'\xff\xd8\xff'):
return data
elif self.file_format == 'WEBP':
if data[8:12] == b'WEBP':
return data
# We don't manage UDIM packed image, so this could not happen to be here
def _encode_temp_image(tmp_image: bpy.types.Image, file_format: str, export_settings) -> bytes: def _encode_temp_image(tmp_image: bpy.types.Image, file_format: str, export_settings) -> bytes:
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:

View File

@ -13,20 +13,27 @@ from ....io.com import gltf2_io_debug
from ....io.exp.gltf2_io_user_extensions import export_user_extensions from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ..gltf2_blender_gather_cache import cached from ..gltf2_blender_gather_cache import cached
from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage
from .gltf2_blender_search_node_tree import get_texture_node_from_socket, NodeSocket from .gltf2_blender_search_node_tree import get_texture_node_from_socket, detect_anisotropy_nodes
@cached @cached
def gather_image( def gather_image(
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
default_sockets: typing.Tuple[bpy.types.NodeSocket], default_sockets: typing.Tuple[bpy.types.NodeSocket],
use_tile: bool,
export_settings): export_settings):
if not __filter_image(blender_shader_sockets, export_settings): if not __filter_image(blender_shader_sockets, export_settings):
return None, None, None return None, None, None, None
image_data, udim_image = __get_image_data(blender_shader_sockets, default_sockets, use_tile, export_settings)
if udim_image is not None:
# We are in a UDIM case, so we return no image data
# This will be used later to create multiple primitives/material/texture with UDIM information
return None, None, None, udim_image
image_data = __get_image_data(blender_shader_sockets, default_sockets, export_settings)
if image_data.empty(): if image_data.empty():
# The export image has no data # The export image has no data
return None, None, None return None, None, None, None
mime_type = __gather_mime_type(blender_shader_sockets, image_data, export_settings) mime_type = __gather_mime_type(blender_shader_sockets, image_data, export_settings)
name = __gather_name(image_data, export_settings) name = __gather_name(image_data, export_settings)
@ -41,7 +48,7 @@ def gather_image(
# In case we can't retrieve image (for example packed images, with original moved) # In case we can't retrieve image (for example packed images, with original moved)
# We don't create invalid image without uri # We don't create invalid image without uri
factor_uri = None factor_uri = None
if uri is None: return None, None, None if uri is None: return None, None, None, False
buffer_view, factor_buffer_view = __gather_buffer_view(image_data, mime_type, name, export_settings) buffer_view, factor_buffer_view = __gather_buffer_view(image_data, mime_type, name, export_settings)
@ -60,7 +67,7 @@ def gather_image(
export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets) export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets)
# We also return image_data, as it can be used to generate same file with another extension for WebP management # We also return image_data, as it can be used to generate same file with another extension for WebP management
return image, image_data, factor return image, image_data, factor, None
def __gather_original_uri(original_uri, export_settings): def __gather_original_uri(original_uri, export_settings):
@ -119,14 +126,14 @@ def __gather_mime_type(sockets, export_image, export_settings):
return "image/webp" return "image/webp"
else: else:
# If we keep image as is (no channel composition), we need to keep original format (for WebP) # If we keep image as is (no channel composition), we need to keep original format (for WebP)
image = export_image.blender_image() image = export_image.blender_image(export_settings)
if image is not None and __is_blender_image_a_webp(image): if image is not None and __is_blender_image_a_webp(image):
return "image/webp" return "image/webp"
return "image/png" return "image/png"
if export_settings["gltf_image_format"] == "AUTO": if export_settings["gltf_image_format"] == "AUTO":
if export_image.original is None: # We are going to create a new image if export_image.original is None: # We are going to create a new image
image = export_image.blender_image() image = export_image.blender_image(export_settings)
else: else:
# Using original image # Using original image
image = export_image.original image = export_image.original
@ -187,17 +194,51 @@ def __gather_uri(image_data, mime_type, name, export_settings):
return None, None return None, None
def __get_image_data(sockets, default_sockets, export_settings) -> ExportImage: def __get_image_data(sockets, default_sockets, use_tile, export_settings) -> ExportImage:
# For shared resources, such as images, we just store the portion of data that is needed in the glTF property # For shared resources, such as images, we just store the portion of data that is needed in the glTF property
# in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary # in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary
# resources. # resources.
results = [get_texture_node_from_socket(socket, export_settings) for socket in sockets] results = [get_texture_node_from_socket(socket, export_settings) for socket in sockets]
# Check if we need a simple mapping or more complex calculation if use_tile is None:
# There is currently no complex calculation for any textures # First checking if texture used is UDIM
return __get_image_data_mapping(sockets, default_sockets, results, export_settings) # In that case, we return no texture data for now, and only get that this texture is UDIM
# This will be used later
if any([r.shader_node.image.source == "TILED" for r in results if r is not None and r.shader_node.image is not None]):
return ExportImage(), [r.shader_node.image for r in results if r is not None and r.shader_node.image is not None and r.shader_node.image.source == "TILED"][0]
def __get_image_data_mapping(sockets, default_sockets, results, export_settings) -> ExportImage: # If we are here, we are in UDIM split process
# Check if we need a simple mapping or more complex calculation
# Case of Anisotropy : It can be a complex node setup, or simple grayscale textures
# In case of complex node setup, this will be a direct mapping of channels
# But in case of grayscale textures, we need to combine them, we numpy calculations
# So we need to check if we have a complex node setup or not
need_to_check_anisotropy = is_anisotropy = False
try:
anisotropy_socket = [s for s in sockets if s.socket.name == 'Anisotropic'][0]
anisotropy_rotation_socket = [s for s in sockets if s.socket.name == 'Anisotropic Rotation'][0]
anisotropy_tangent_socket = [s for s in sockets if s.socket.name == 'Tangent'][0]
need_to_check_anisotropy = True
except:
need_to_check_anisotropy = False
if need_to_check_anisotropy is True:
is_anisotropy, anisotropy_data = detect_anisotropy_nodes(
anisotropy_socket,
anisotropy_rotation_socket,
anisotropy_tangent_socket,
export_settings
)
if need_to_check_anisotropy is True and is_anisotropy is False:
# We are not in complex node setup, so we can try to get the image data from grayscale textures
return __get_image_data_grayscale_anisotropy(sockets, results, export_settings), None
return __get_image_data_mapping(sockets, default_sockets, results, use_tile, export_settings), None
def __get_image_data_mapping(sockets, default_sockets, results, use_tile, export_settings) -> ExportImage:
""" """
Simple mapping Simple mapping
Will fit for most of exported textures : RoughnessMetallic, Basecolor, normal, ... Will fit for most of exported textures : RoughnessMetallic, Basecolor, normal, ...
@ -285,7 +326,10 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
dst_chan = Channel.A dst_chan = Channel.A
if dst_chan is not None: if dst_chan is not None:
if use_tile is None:
composed_image.fill_image(result.shader_node.image, dst_chan, src_chan) composed_image.fill_image(result.shader_node.image, dst_chan, src_chan)
else:
composed_image.fill_image_tile(result.shader_node.image, export_settings['current_udim_info']['tile'], dst_chan, src_chan)
# Since metal/roughness are always used together, make sure # Since metal/roughness are always used together, make sure
# the other channel is filled. # the other channel is filled.
@ -301,7 +345,10 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
composed_image.fill_white(Channel.B) composed_image.fill_white(Channel.B)
else: else:
# copy full image...eventually following sockets might overwrite things # copy full image...eventually following sockets might overwrite things
if use_tile is None:
composed_image = ExportImage.from_blender_image(result.shader_node.image) composed_image = ExportImage.from_blender_image(result.shader_node.image)
else:
composed_image = ExportImage.from_blender_image_tile(export_settings)
# Check that we don't have some empty channels (based on weird images without any size for example) # Check that we don't have some empty channels (based on weird images without any size for example)
keys = list(composed_image.fills.keys()) # do not loop on dict, we may have to delete an element keys = list(composed_image.fills.keys()) # do not loop on dict, we may have to delete an element
@ -315,6 +362,29 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
return composed_image return composed_image
def __get_image_data_grayscale_anisotropy(sockets, results, export_settings) -> ExportImage:
"""
calculating Anisotropy Texture from grayscale textures, settings needed data
"""
from .extensions.gltf2_blender_gather_materials_anisotropy import grayscale_anisotropy_calculation
composed_image = ExportImage()
composed_image.set_calc(grayscale_anisotropy_calculation)
results = [get_texture_node_from_socket(socket, export_settings) for socket in sockets[:-1]] #No texture from tangent
mapping = {
0: "anisotropy",
1: "anisotropic_rotation",
}
for idx, result in enumerate(results):
if get_texture_node_from_socket(sockets[idx], export_settings):
composed_image.store_data(mapping[idx], result.shader_node.image, type="Image")
else:
composed_image.store_data(mapping[idx], sockets[idx].socket.default_value, type="Data")
return composed_image
def __is_blender_image_a_jpeg(image: bpy.types.Image) -> bool: def __is_blender_image_a_jpeg(image: bpy.types.Image) -> bool:
if image.source != 'FILE': if image.source != 'FILE':
return False return False
@ -332,3 +402,39 @@ def __is_blender_image_a_webp(image: bpy.types.Image) -> bool:
else: else:
path = image.filepath_raw.lower() path = image.filepath_raw.lower()
return path.endswith('.webp') return path.endswith('.webp')
def get_gltf_image_from_blender_image(blender_image_name, export_settings):
image_data = ExportImage.from_blender_image(bpy.data.images[blender_image_name])
name = __gather_name(image_data, export_settings)
mime_type = __get_mime_type_of_image(blender_image_name, export_settings)
uri, _ = __gather_uri(image_data, mime_type, name, export_settings)
buffer_view, _ = __gather_buffer_view(image_data, mime_type, name, export_settings)
return gltf2_io.Image(
buffer_view=buffer_view,
extensions=None,
extras=None,
mime_type=mime_type,
name=name,
uri=uri
)
def __get_mime_type_of_image(blender_image_name, export_settings):
image = bpy.data.images[blender_image_name]
if image.channels == 4:
if __is_blender_image_a_webp(image):
return "image/webp"
return "image/png"
if export_settings["gltf_image_format"] == "AUTO":
if __is_blender_image_a_jpeg(image):
return "image/jpeg"
elif __is_blender_image_a_webp(image):
return "image/webp"
return "image/png"
elif export_settings["gltf_image_format"] == "JPEG":
return "image/jpeg"

View File

@ -21,12 +21,15 @@ from .extensions.gltf2_blender_gather_materials_sheen import export_sheen
from .extensions.gltf2_blender_gather_materials_specular import export_specular from .extensions.gltf2_blender_gather_materials_specular import export_specular
from .extensions.gltf2_blender_gather_materials_transmission import export_transmission from .extensions.gltf2_blender_gather_materials_transmission import export_transmission
from .extensions.gltf2_blender_gather_materials_clearcoat import export_clearcoat from .extensions.gltf2_blender_gather_materials_clearcoat import export_clearcoat
from .extensions.gltf2_blender_gather_materials_anisotropy import export_anisotropy
from .extensions.gltf2_blender_gather_materials_ior import export_ior from .extensions.gltf2_blender_gather_materials_ior import export_ior
from .gltf2_blender_search_node_tree import \ from .gltf2_blender_search_node_tree import \
has_image_node_from_socket, \ has_image_node_from_socket, \
get_socket_from_gltf_material_node, \ get_socket_from_gltf_material_node, \
get_socket, \ get_socket, \
get_node_socket, \ get_node_socket, \
get_material_nodes, \
NodeSocket, \
get_vertex_color_info get_vertex_color_info
@cached @cached
@ -48,21 +51,31 @@ def gather_material(blender_material, export_settings):
:return: a glTF material :return: a glTF material
""" """
if not __filter_material(blender_material, export_settings): if not __filter_material(blender_material, export_settings):
return None, {"uv_info": {}, "vc_info": {'color': None, 'alpha': None, 'color_type': None, 'alpha_type': None}} return None, {"uv_info": {}, "vc_info": {'color': None, 'alpha': None, 'color_type': None, 'alpha_type': None}, "udim_info": {}}
mat_unlit, uvmap_info, vc_info = __export_unlit(blender_material, export_settings) # Reset exported images / textures nodes
export_settings['exported_texture_nodes'] = []
if blender_material.node_tree and blender_material.use_nodes:
nodes = get_material_nodes(blender_material.node_tree, [blender_material], bpy.types.ShaderNodeTexImage)
else:
nodes = []
for node in nodes:
if node[0].get("used", None) is not None:
del(node[0]['used'])
mat_unlit, uvmap_info, vc_info, udim_info = __export_unlit(blender_material, export_settings)
if mat_unlit is not None: if mat_unlit is not None:
export_user_extensions('gather_material_hook', export_settings, mat_unlit, blender_material) export_user_extensions('gather_material_hook', export_settings, mat_unlit, blender_material)
return mat_unlit, {"uv_info": uvmap_info, "vc_info": vc_info} return mat_unlit, {"uv_info": uvmap_info, "vc_info": vc_info, "udim_info": udim_info}
orm_texture, default_sockets = __gather_orm_texture(blender_material, export_settings) orm_texture, default_sockets = __gather_orm_texture(blender_material, export_settings)
emissive_factor = __gather_emissive_factor(blender_material, export_settings) emissive_factor = __gather_emissive_factor(blender_material, export_settings)
emissive_texture, uvmap_info_emissive = __gather_emissive_texture(blender_material, export_settings) emissive_texture, uvmap_info_emissive, udim_info_emissive = __gather_emissive_texture(blender_material, export_settings)
extensions, uvmap_info_extensions = __gather_extensions(blender_material, emissive_factor, export_settings) extensions, uvmap_info_extensions, udim_info_extensions = __gather_extensions(blender_material, emissive_factor, export_settings)
normal_texture, uvmap_info_normal = __gather_normal_texture(blender_material, export_settings) normal_texture, uvmap_info_normal, udim_info_normal = __gather_normal_texture(blender_material, export_settings)
occlusion_texture, uvmap_info_occlusion = __gather_occlusion_texture(blender_material, orm_texture, default_sockets, export_settings) occlusion_texture, uvmap_info_occlusion, udim_occlusion = __gather_occlusion_texture(blender_material, orm_texture, default_sockets, export_settings)
pbr_metallic_roughness, uvmap_info_pbr_metallic_roughness, vc_info = __gather_pbr_metallic_roughness(blender_material, orm_texture, export_settings) pbr_metallic_roughness, uvmap_info_pbr_metallic_roughness, vc_info, udim_info_prb_mr = __gather_pbr_metallic_roughness(blender_material, orm_texture, export_settings)
if any([i>1.0 for i in emissive_factor or []]) is True: if any([i>1.0 for i in emissive_factor or []]) is True:
# Strength is set on extension # Strength is set on extension
@ -85,12 +98,51 @@ def gather_material(blender_material, export_settings):
) )
uvmap_infos = {} uvmap_infos = {}
udim_infos = {}
# Get all textures nodes that are not used in the material
if export_settings['gltf_unused_textures'] is True:
if blender_material.node_tree and blender_material.use_nodes:
nodes = get_material_nodes(blender_material.node_tree, [blender_material], bpy.types.ShaderNodeTexImage)
else:
nodes = []
cpt_additional = 0
for node in nodes:
if node[0].get("used", None) is not None:
del(node[0]['used'])
continue
s = NodeSocket(node[0].outputs[0], node[1])
tex, uv_info_additional, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(s, (s,), (), export_settings)
if tex is not None:
export_settings['exported_images'][node[0].image.name] = 1 # Fully used
uvmap_infos.update({'additional' + str(cpt_additional): uv_info_additional})
udim_infos.update({'additional' + str(cpt_additional): udim_info})
cpt_additional += 1
export_settings['additional_texture_export'].append(tex)
# Reset
if blender_material.node_tree and blender_material.use_nodes:
nodes = get_material_nodes(blender_material.node_tree, [blender_material], bpy.types.ShaderNodeTexImage)
else:
nodes = []
for node in nodes:
if node[0].get("used", None) is not None:
del(node[0]['used'])
uvmap_infos.update(uvmap_info_emissive) uvmap_infos.update(uvmap_info_emissive)
uvmap_infos.update(uvmap_info_extensions) uvmap_infos.update(uvmap_info_extensions)
uvmap_infos.update(uvmap_info_normal) uvmap_infos.update(uvmap_info_normal)
uvmap_infos.update(uvmap_info_occlusion) uvmap_infos.update(uvmap_info_occlusion)
uvmap_infos.update(uvmap_info_pbr_metallic_roughness) uvmap_infos.update(uvmap_info_pbr_metallic_roughness)
udim_infos = {}
udim_infos.update(udim_info_prb_mr)
udim_infos.update(udim_info_normal)
udim_infos.update(udim_info_emissive)
udim_infos.update(udim_occlusion)
udim_infos.update(udim_info_extensions)
# If emissive is set, from an emissive node (not PBR) # If emissive is set, from an emissive node (not PBR)
# We need to set manually default values for # We need to set manually default values for
@ -100,10 +152,10 @@ def gather_material(blender_material, export_settings):
export_user_extensions('gather_material_hook', export_settings, material, blender_material) export_user_extensions('gather_material_hook', export_settings, material, blender_material)
return material, {"uv_info": uvmap_infos, "vc_info": vc_info} return material, {"uv_info": uvmap_infos, "vc_info": vc_info, "udim_info": udim_infos}
def __get_new_material_texture_shared(base, node): def get_new_material_texture_shared(base, node):
if node is None: if node is None:
return return
if callable(node) is True: if callable(node) is True:
@ -115,12 +167,12 @@ def __get_new_material_texture_shared(base, node):
else: else:
if hasattr(node, '__dict__'): if hasattr(node, '__dict__'):
for attr, value in node.__dict__.items(): for attr, value in node.__dict__.items():
__get_new_material_texture_shared(getattr(base, attr), value) get_new_material_texture_shared(getattr(base, attr), value)
else: else:
# For extensions (on a dict) # For extensions (on a dict)
if type(node).__name__ == 'dict': if type(node).__name__ == 'dict':
for i in node.keys(): for i in node.keys():
__get_new_material_texture_shared(base[i], node[i]) get_new_material_texture_shared(base[i], node[i])
def __filter_material(blender_material, export_settings): def __filter_material(blender_material, export_settings):
return export_settings['gltf_materials'] return export_settings['gltf_materials']
@ -162,19 +214,22 @@ def __gather_extensions(blender_material, emissive_factor, export_settings):
extensions = {} extensions = {}
uvmap_infos = {} uvmap_infos = {}
udim_infos = {}
# KHR_materials_clearcoat # KHR_materials_clearcoat
clearcoat_extension, uvmap_info = export_clearcoat(blender_material, export_settings) clearcoat_extension, uvmap_info, udim_info_clearcoat = export_clearcoat(blender_material, export_settings)
if clearcoat_extension: if clearcoat_extension:
extensions["KHR_materials_clearcoat"] = clearcoat_extension extensions["KHR_materials_clearcoat"] = clearcoat_extension
uvmap_infos.update(uvmap_infos) uvmap_infos.update(uvmap_info)
udim_infos.update(udim_info_clearcoat)
# KHR_materials_transmission # KHR_materials_transmission
transmission_extension, uvmap_info = export_transmission(blender_material, export_settings) transmission_extension, uvmap_info, udim_info_transmission = export_transmission(blender_material, export_settings)
if transmission_extension: if transmission_extension:
extensions["KHR_materials_transmission"] = transmission_extension extensions["KHR_materials_transmission"] = transmission_extension
uvmap_infos.update(uvmap_infos) uvmap_infos.update(uvmap_info)
udim_infos.update(udim_info_transmission)
# KHR_materials_emissive_strength # KHR_materials_emissive_strength
if any([i>1.0 for i in emissive_factor or []]): if any([i>1.0 for i in emissive_factor or []]):
@ -184,22 +239,32 @@ def __gather_extensions(blender_material, emissive_factor, export_settings):
# KHR_materials_volume # KHR_materials_volume
volume_extension, uvmap_info = export_volume(blender_material, export_settings) volume_extension, uvmap_info, udim_info = export_volume(blender_material, export_settings)
if volume_extension: if volume_extension:
extensions["KHR_materials_volume"] = volume_extension extensions["KHR_materials_volume"] = volume_extension
uvmap_infos.update(uvmap_info) uvmap_infos.update(uvmap_info)
udim_infos.update(udim_info)
# KHR_materials_specular # KHR_materials_specular
specular_extension, uvmap_info = export_specular(blender_material, export_settings) specular_extension, uvmap_info, udim_info = export_specular(blender_material, export_settings)
if specular_extension: if specular_extension:
extensions["KHR_materials_specular"] = specular_extension extensions["KHR_materials_specular"] = specular_extension
uvmap_infos.update(uvmap_info) uvmap_infos.update(uvmap_info)
udim_infos.update(udim_info)
# KHR_materials_sheen # KHR_materials_sheen
sheen_extension, uvmap_info = export_sheen(blender_material, export_settings) sheen_extension, uvmap_info, udim_info = export_sheen(blender_material, export_settings)
if sheen_extension: if sheen_extension:
extensions["KHR_materials_sheen"] = sheen_extension extensions["KHR_materials_sheen"] = sheen_extension
uvmap_infos.update(uvmap_info) uvmap_infos.update(uvmap_info)
udim_infos.update(udim_info)
# KHR_materials_anisotropy
anisotropy_extension, uvmap_info, udim_info = export_anisotropy(blender_material, export_settings)
if anisotropy_extension:
extensions["KHR_materials_anisotropy"] = anisotropy_extension
uvmap_infos.update(uvmap_info)
udim_infos.update(udim_info)
# KHR_materials_ior # KHR_materials_ior
# Keep this extension at the end, because we export it only if some others are exported # Keep this extension at the end, because we export it only if some others are exported
@ -207,7 +272,7 @@ def __gather_extensions(blender_material, emissive_factor, export_settings):
if ior_extension: if ior_extension:
extensions["KHR_materials_ior"] = ior_extension extensions["KHR_materials_ior"] = ior_extension
return extensions, uvmap_infos return extensions, uvmap_infos, udim_infos
def __gather_extras(blender_material, export_settings): def __gather_extras(blender_material, export_settings):
@ -222,11 +287,11 @@ def __gather_name(blender_material, export_settings):
def __gather_normal_texture(blender_material, export_settings): def __gather_normal_texture(blender_material, export_settings):
normal = get_socket(blender_material, "Normal") normal = get_socket(blender_material, "Normal")
normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class( normal_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class(
normal, normal,
(normal,), (normal,),
export_settings) export_settings)
return normal_texture, {"normalTexture" : uvmap_info} return normal_texture, {"normalTexture" : uvmap_info}, {'normalTexture': udim_info } if len(udim_info.keys()) > 0 else {}
def __gather_orm_texture(blender_material, export_settings): def __gather_orm_texture(blender_material, export_settings):
@ -270,7 +335,7 @@ def __gather_orm_texture(blender_material, export_settings):
return None, () return None, ()
# Double-check this will past the filter in texture_info # Double-check this will past the filter in texture_info
info, _, _ = gltf2_blender_gather_texture_info.gather_texture_info(result[0], result, default_sockets, export_settings) info, _, _, _ = gltf2_blender_gather_texture_info.gather_texture_info(result[0], result, default_sockets, export_settings)
if info is None: if info is None:
return None, () return None, ()
@ -280,13 +345,15 @@ def __gather_occlusion_texture(blender_material, orm_texture, default_sockets, e
occlusion = get_socket(blender_material, "Occlusion") occlusion = get_socket(blender_material, "Occlusion")
if occlusion.socket is None: if occlusion.socket is None:
occlusion = get_socket_from_gltf_material_node(blender_material, "Occlusion") occlusion = get_socket_from_gltf_material_node(blender_material, "Occlusion")
occlusion_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_occlusion_texture_info_class( if occlusion.socket is None:
return None, {}, {}
occlusion_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_material_occlusion_texture_info_class(
occlusion, occlusion,
orm_texture or (occlusion,), orm_texture or (occlusion,),
default_sockets, default_sockets,
export_settings) export_settings)
return occlusion_texture, \ return occlusion_texture, \
{"occlusionTexture" : uvmap_info} {"occlusionTexture" : uvmap_info}, {'occlusionTexture': udim_info } if len(udim_info.keys()) > 0 else {}
def __gather_pbr_metallic_roughness(blender_material, orm_texture, export_settings): def __gather_pbr_metallic_roughness(blender_material, orm_texture, export_settings):
@ -300,9 +367,9 @@ def __export_unlit(blender_material, export_settings):
info = gltf2_unlit.detect_shadeless_material(blender_material, export_settings) info = gltf2_unlit.detect_shadeless_material(blender_material, export_settings)
if info is None: if info is None:
return None, {}, {"color": None, "alpha": None, "color_type": None, "alpha_type": None} return None, {}, {"color": None, "alpha": None, "color_type": None, "alpha_type": None}, {}
base_color_texture, uvmap_info = gltf2_unlit.gather_base_color_texture(info, export_settings) base_color_texture, uvmap_info, udim_info = gltf2_unlit.gather_base_color_texture(info, export_settings)
vc_info = get_vertex_color_info(info.get('rgb_socket'), info.get('alpha_socket'), export_settings) vc_info = get_vertex_color_info(info.get('rgb_socket'), info.get('alpha_socket'), export_settings)
@ -331,7 +398,7 @@ def __export_unlit(blender_material, export_settings):
export_user_extensions('gather_material_unlit_hook', export_settings, material, blender_material) export_user_extensions('gather_material_unlit_hook', export_settings, material, blender_material)
return material, uvmap_info, vc_info return material, uvmap_info, vc_info, udim_info
def get_active_uvmap_index(blender_mesh): def get_active_uvmap_index(blender_mesh):
# retrieve active render UVMap # retrieve active render UVMap
@ -348,9 +415,13 @@ def get_final_material(mesh, blender_material, attr_indices, base_material, uvma
# First, we need to calculate all index of UVMap # First, we need to calculate all index of UVMap
indices = {} indices = {}
additional_indices = 0
for m, v in uvmap_info.items(): for m, v in uvmap_info.items():
if m.startswith("additional") and additional_indices <= int(m[10:]):
additional_indices = +1
if not 'type' in v.keys(): if not 'type' in v.keys():
continue continue
@ -367,7 +438,7 @@ def get_final_material(mesh, blender_material, attr_indices, base_material, uvma
indices[m] = attr_indices[v['value']] indices[m] = attr_indices[v['value']]
# Now we have all needed indices, let's create a set that can be used for caching, so containing all possible textures # Now we have all needed indices, let's create a set that can be used for caching, so containing all possible textures
all_textures = get_all_textures() all_textures = get_all_textures(additional_indices)
caching_indices = [] caching_indices = []
for tex in all_textures: for tex in all_textures:
@ -398,44 +469,51 @@ def __get_final_material_with_indices(blender_material, base_material, caching_i
return base_material return base_material
material = deepcopy(base_material) material = deepcopy(base_material)
__get_new_material_texture_shared(base_material, material) get_new_material_texture_shared(base_material, material)
for tex, ind in zip(get_all_textures(), caching_indices): for tex, ind in zip(get_all_textures(len(caching_indices) - len(get_all_textures())), caching_indices):
if ind is None: if ind is None:
continue continue
# Need to check if texture is not None, because it can be the case for UDIM on non managed UDIM textures
if tex == "emissiveTexture": if tex == "emissiveTexture":
material.emissive_texture.tex_coord = ind if material.emissive_texture: material.emissive_texture.tex_coord = ind
elif tex == "normalTexture": elif tex == "normalTexture":
material.normal_texture.tex_coord = ind if material.normal_texture: material.normal_texture.tex_coord = ind
elif tex == "occlusionTexture": elif tex == "occlusionTexture":
material.occlusion_texture.tex_coord = ind if material.occlusion_texture: material.occlusion_texture.tex_coord = ind
elif tex == "baseColorTexture": elif tex == "baseColorTexture":
material.pbr_metallic_roughness.base_color_texture.tex_coord = ind if material.pbr_metallic_roughness.base_color_texture: material.pbr_metallic_roughness.base_color_texture.tex_coord = ind
elif tex == "metallicRoughnessTexture": elif tex == "metallicRoughnessTexture":
material.pbr_metallic_roughness.metallic_roughness_texture.tex_coord = ind if material.pbr_metallic_roughness.metallic_roughness_texture: material.pbr_metallic_roughness.metallic_roughness_texture.tex_coord = ind
elif tex == "clearcoatTexture": elif tex == "clearcoatTexture":
material.extensions["KHR_materials_clearcoat"].extension['clearcoatTexture'].tex_coord = ind if material.extensions["KHR_materials_clearcoat"].extension['clearcoatTexture']: material.extensions["KHR_materials_clearcoat"].extension['clearcoatTexture'].tex_coord = ind
elif tex == "clearcoatRoughnessTexture": elif tex == "clearcoatRoughnessTexture":
material.extensions["KHR_materials_clearcoat"].extension['clearcoatRoughnessTexture'].tex_coord = ind if material.extensions["KHR_materials_clearcoat"].extension['clearcoatRoughnessTexture']: material.extensions["KHR_materials_clearcoat"].extension['clearcoatRoughnessTexture'].tex_coord = ind
elif tex == "clearcoatNormalTexture": elif tex == "clearcoatNormalTexture":
material.extensions["KHR_materials_clearcoat"].extension['clearcoatNormalTexture'].tex_coord = ind if material.extensions["KHR_materials_clearcoat"].extension['clearcoatNormalTexture']: material.extensions["KHR_materials_clearcoat"].extension['clearcoatNormalTexture'].tex_coord = ind
elif tex == "transmissionTexture": elif tex == "transmissionTexture":
material.extensions["KHR_materials_transmission"].extension['transmissionTexture'].tex_coord = ind if material.extensions["KHR_materials_transmission"].extension['transmissionTexture']: material.extensions["KHR_materials_transmission"].extension['transmissionTexture'].tex_coord = ind
elif tex == "specularTexture": elif tex == "specularTexture":
material.extensions["KHR_materials_specular"].extension['specularTexture'].tex_coord = ind if material.extensions["KHR_materials_specular"].extension['specularTexture']: material.extensions["KHR_materials_specular"].extension['specularTexture'].tex_coord = ind
elif tex == "specularColorTexture": elif tex == "specularColorTexture":
material.extensions["KHR_materials_specular"].extension['specularColorTexture'].tex_coord = ind if material.extensions["KHR_materials_specular"].extension['specularColorTexture']: material.extensions["KHR_materials_specular"].extension['specularColorTexture'].tex_coord = ind
elif tex == "sheenColorTexture": elif tex == "sheenColorTexture":
material.extensions["KHR_materials_sheen"].extension['sheenColorTexture'].tex_coord = ind if material.extensions["KHR_materials_sheen"].extension['sheenColorTexture']: material.extensions["KHR_materials_sheen"].extension['sheenColorTexture'].tex_coord = ind
elif tex == "sheenRoughnessTexture": elif tex == "sheenRoughnessTexture":
material.extensions["KHR_materials_sheen"].extension['sheenRoughnessTexture'].tex_coord = ind if material.extensions["KHR_materials_sheen"].extension['sheenRoughnessTexture']: material.extensions["KHR_materials_sheen"].extension['sheenRoughnessTexture'].tex_coord = ind
elif tex == "thicknessTexture": elif tex == "thicknessTexture":
material.extensions["KHR_materials_volume"].extension['thicknessTexture'].tex_ccord = ind if material.extensions["KHR_materials_volume"].extension['thicknessTexture']: material.extensions["KHR_materials_volume"].extension['thicknessTexture'].tex_ccord = ind
elif tex == "anisotropyTexture":
if material.extensions["KHR_materials_anisotropy"].extension['anisotropyTexture']: material.extensions["KHR_materials_anisotropy"].extension['anisotropyTexture'].tex_coord = ind
elif tex.startswith("additional"):
export_settings['additional_texture_export'][export_settings['additional_texture_export_current_idx'] + int(tex[10:])].tex_coord = ind
else: else:
print_console("ERROR", "some Textures tex coord are not managed") print_console("ERROR", "some Textures tex coord are not managed")
export_settings['additional_texture_export_current_idx'] = len(export_settings['additional_texture_export'])
return material return material
@ -450,7 +528,16 @@ def get_material_from_idx(material_idx, materials, export_settings):
def get_base_material(material_idx, materials, export_settings): def get_base_material(material_idx, materials, export_settings):
material = None material = None
material_info = {"uv_info": {}, "vc_info": {"color": None, "alpha": None, "color_type": None, "alpha_type": None}} material_info = {
"uv_info": {},
"vc_info": {
"color": None,
"alpha": None,
"color_type": None,
"alpha_type": None
},
"udim_info": {}
}
mat = get_material_from_idx(material_idx, materials, export_settings) mat = get_material_from_idx(material_idx, materials, export_settings)
if mat is not None: if mat is not None:
@ -460,7 +547,7 @@ def get_base_material(material_idx, materials, export_settings):
) )
return material, material_info return material, material_info
def get_all_textures(): def get_all_textures(idx=0):
# Make sure to have all texture here, always in same order # Make sure to have all texture here, always in same order
tab = [] tab = []
@ -478,5 +565,9 @@ def get_all_textures():
tab.append("sheenColorTexture") tab.append("sheenColorTexture")
tab.append("sheenRoughnessTexture") tab.append("sheenRoughnessTexture")
tab.append("thicknessTexture") tab.append("thicknessTexture")
tab.append("anisotropyTexture")
for i in range(idx):
tab.append("additional" + str(i))
return tab return tab

View File

@ -20,14 +20,17 @@ from .gltf2_blender_search_node_tree import \
@cached @cached
def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings): def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings):
if not __filter_pbr_material(blender_material, export_settings): if not __filter_pbr_material(blender_material, export_settings):
return None, {}, {'color': None, 'alpha': None, 'color_type': None, 'alpha_type': None} return None, {}, {'color': None, 'alpha': None, 'color_type': None, 'alpha_type': None}, {}
uvmap_infos = {} uvmap_infos = {}
udim_infos = {}
base_color_texture, uvmap_info, _ = __gather_base_color_texture(blender_material, export_settings) base_color_texture, uvmap_info, udim_info_bc, _ = __gather_base_color_texture(blender_material, export_settings)
uvmap_infos.update(uvmap_info) uvmap_infos.update(uvmap_info)
metallic_roughness_texture, uvmap_info, _ = __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings) udim_infos.update(udim_info_bc)
metallic_roughness_texture, uvmap_info, udim_info_mr, _ = __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings)
uvmap_infos.update(uvmap_info) uvmap_infos.update(uvmap_info)
udim_infos.update(udim_info_mr)
base_color_factor, vc_info = __gather_base_color_factor(blender_material, export_settings) base_color_factor, vc_info = __gather_base_color_factor(blender_material, export_settings)
@ -43,7 +46,7 @@ def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export
export_user_extensions('gather_material_pbr_metallic_roughness_hook', export_settings, material, blender_material, orm_texture) export_user_extensions('gather_material_pbr_metallic_roughness_hook', export_settings, material, blender_material, orm_texture)
return material, uvmap_infos, vc_info return material, uvmap_infos, vc_info, udim_infos
def __filter_pbr_material(blender_material, export_settings): def __filter_pbr_material(blender_material, export_settings):
@ -103,10 +106,10 @@ def __gather_base_color_texture(blender_material, export_settings):
if socket.socket is not None and has_image_node_from_socket(socket, export_settings) if socket.socket is not None and has_image_node_from_socket(socket, export_settings)
) )
if not inputs: if not inputs:
return None, {}, None return None, {}, {}, None
tex, uvmap_info, factor = gather_texture_info(inputs[0], inputs, (), export_settings) tex, uvmap_info, udim_info, factor = gather_texture_info(inputs[0], inputs, (), export_settings)
return tex, {'baseColorTexture': uvmap_info}, factor return tex, {'baseColorTexture': uvmap_info}, {'baseColorTexture': udim_info} if len(udim_info.keys()) > 0 else {}, factor
def __gather_extensions(blender_material, export_settings): def __gather_extensions(blender_material, export_settings):
@ -143,7 +146,7 @@ def __gather_metallic_roughness_texture(blender_material, orm_texture, export_se
if not hasMetal and not hasRough: if not hasMetal and not hasRough:
metallic_roughness = get_socket_from_gltf_material_node(blender_material, "MetallicRoughness") metallic_roughness = get_socket_from_gltf_material_node(blender_material, "MetallicRoughness")
if metallic_roughness is None or not has_image_node_from_socket(metallic_roughness, export_settings): if metallic_roughness is None or not has_image_node_from_socket(metallic_roughness, export_settings):
return None, {}, None return None, {}, {}, None
elif not hasMetal: elif not hasMetal:
texture_input = (roughness_socket,) texture_input = (roughness_socket,)
default_sockets = (metallic_socket.socket,) default_sockets = (metallic_socket.socket,)
@ -154,14 +157,14 @@ def __gather_metallic_roughness_texture(blender_material, orm_texture, export_se
texture_input = (metallic_socket, roughness_socket) texture_input = (metallic_socket, roughness_socket)
default_sockets = () default_sockets = ()
tex, uvmap_info, factor = gather_texture_info( tex, uvmap_info, udim_info, factor = gather_texture_info(
texture_input[0], texture_input[0],
orm_texture or texture_input, orm_texture or texture_input,
default_sockets, default_sockets,
export_settings, export_settings,
) )
return tex, {'metallicRoughnessTexture': uvmap_info}, factor return tex, {'metallicRoughnessTexture': uvmap_info}, {'metallicRoughnessTexture' : udim_info} if len(udim_info.keys()) > 0 else {}, factor
def __gather_roughness_factor(blender_material, export_settings): def __gather_roughness_factor(blender_material, export_settings):
if not blender_material.use_nodes: if not blender_material.use_nodes:
@ -185,5 +188,3 @@ def get_default_pbr_for_emissive_node():
metallic_roughness_texture=None, metallic_roughness_texture=None,
roughness_factor=None roughness_factor=None
) )

View File

@ -135,12 +135,12 @@ def gather_base_color_texture(info, export_settings):
# because gather_image determines how to pack images based on the # because gather_image determines how to pack images based on the
# names of sockets, and the names are hard-coded to a Principled # names of sockets, and the names are hard-coded to a Principled
# style graph. # style graph.
unlit_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info( unlit_texture, uvmap_info, udim_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
sockets[0], sockets[0],
sockets, sockets,
(), (),
export_settings, export_settings,
) )
return unlit_texture, {'baseColorTexture': uvmap_info} return unlit_texture, {'baseColorTexture': uvmap_info}, {'baseColorTexture':udim_info} if len(udim_info.keys()) > 0 else {}
return None, {} return None, {}, {}

View File

@ -21,6 +21,7 @@ from . import gltf2_blender_gather_image
def gather_texture( def gather_texture(
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket], blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
default_sockets, default_sockets,
use_tile: bool,
export_settings): export_settings):
""" """
Gather texture sampling information and image channels from a blender shader texture attached to a shader socket. Gather texture sampling information and image channels from a blender shader texture attached to a shader socket.
@ -31,9 +32,9 @@ def gather_texture(
""" """
if not __filter_texture(blender_shader_sockets, export_settings): if not __filter_texture(blender_shader_sockets, export_settings):
return None, None return None, None, False
source, webp_image, image_data, factor = __gather_source(blender_shader_sockets, default_sockets, export_settings) source, webp_image, image_data, factor, udim_image = __gather_source(blender_shader_sockets, default_sockets, use_tile, export_settings)
exts, remove_source = __gather_extensions(blender_shader_sockets, source, webp_image, image_data, export_settings) exts, remove_source = __gather_extensions(blender_shader_sockets, source, webp_image, image_data, export_settings)
@ -48,11 +49,11 @@ def gather_texture(
# although valid, most viewers can't handle missing source properties # although valid, most viewers can't handle missing source properties
# This can have None source for "keep original", when original can't be found # This can have None source for "keep original", when original can't be found
if texture.source is None and remove_source is False: if texture.source is None and remove_source is False:
return None, None return None, None, udim_image
export_user_extensions('gather_texture_hook', export_settings, texture, blender_shader_sockets) export_user_extensions('gather_texture_hook', export_settings, texture, blender_shader_sockets)
return texture, factor return texture, factor, udim_image
def __filter_texture(blender_shader_sockets, export_settings): def __filter_texture(blender_shader_sockets, export_settings):
@ -196,8 +197,8 @@ def __gather_sampler(blender_shader_sockets, export_settings):
export_settings) export_settings)
def __gather_source(blender_shader_sockets, default_sockets, export_settings): def __gather_source(blender_shader_sockets, default_sockets, use_tile, export_settings):
source, image_data, factor = gltf2_blender_gather_image.gather_image(blender_shader_sockets, default_sockets, export_settings) source, image_data, factor, udim_image = gltf2_blender_gather_image.gather_image(blender_shader_sockets, default_sockets, use_tile, export_settings)
if export_settings['gltf_keep_original_textures'] is False \ if export_settings['gltf_keep_original_textures'] is False \
@ -207,7 +208,7 @@ def __gather_source(blender_shader_sockets, default_sockets, export_settings):
if export_settings['gltf_webp_fallback'] is False: if export_settings['gltf_webp_fallback'] is False:
# Already managed in __gather_extensions # Already managed in __gather_extensions
return source, None, image_data, factor return source, None, image_data, factor, udim_image
else: else:
# Need to create a PNG texture # Need to create a PNG texture
@ -231,7 +232,7 @@ def __gather_source(blender_shader_sockets, default_sockets, export_settings):
png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings) png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings)
# We inverted the png & WebP image, to have the png as main source # We inverted the png & WebP image, to have the png as main source
return png_image, source, image_data, factor return png_image, source, image_data, factor, udim_image
return source, None, image_data, factor return source, None, image_data, factor, udim_image
# Helpers # Helpers

View File

@ -47,11 +47,15 @@ def __gather_texture_info_helper(
filter_type: str, filter_type: str,
export_settings): export_settings):
if not __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings): if not __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings):
return None, {}, None return None, {}, {}, None
tex_transform, uvmap_info = __gather_texture_transform_and_tex_coord(primary_socket, export_settings) tex_transform, uvmap_info = __gather_texture_transform_and_tex_coord(primary_socket, export_settings)
index, factor = __gather_index(blender_shader_sockets, default_sockets, export_settings) index, factor, udim_image = __gather_index(blender_shader_sockets, default_sockets, None, export_settings)
if udim_image is not None:
udim_info = {'udim': udim_image is not None, 'image': udim_image, 'sockets': blender_shader_sockets}
else:
udim_info = {}
fields = { fields = {
'extensions': __gather_extensions(tex_transform, export_settings), 'extensions': __gather_extensions(tex_transform, export_settings),
@ -72,11 +76,43 @@ def __gather_texture_info_helper(
texture_info = gltf2_io.MaterialOcclusionTextureInfoClass(**fields) texture_info = gltf2_io.MaterialOcclusionTextureInfoClass(**fields)
if texture_info.index is None: if texture_info.index is None:
return None, {}, None return None, {} if udim_image is None else uvmap_info, udim_info, None
export_user_extensions('gather_texture_info_hook', export_settings, texture_info, blender_shader_sockets) export_user_extensions('gather_texture_info_hook', export_settings, texture_info, blender_shader_sockets)
return texture_info, uvmap_info, factor return texture_info, uvmap_info, udim_info, factor
def gather_udim_texture_info(
primary_socket: bpy.types.NodeSocket,
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
udim_info,
tex,
export_settings):
tex_transform, _ = __gather_texture_transform_and_tex_coord(primary_socket, export_settings)
export_settings['current_udim_info'] = udim_info
index, _, _ = __gather_index(blender_shader_sockets, (), udim_info['image'].name + str(udim_info['tile']), export_settings)
export_settings['current_udim_info'] = {}
fields = {
'extensions': __gather_extensions(tex_transform, export_settings),
'extras': __gather_extras(blender_shader_sockets, export_settings),
'index': index,
'tex_coord': None # This will be set later, as some data are dependant of mesh or object
}
if tex in ["normalTexture", "clearcoatNormalTexture"]:
fields['scale'] = __gather_normal_scale(primary_socket, export_settings)
texture_info = gltf2_io.MaterialNormalTextureInfoClass(**fields)
elif tex in "occlusionTexture":
fields['strength'] = __gather_occlusion_strength(primary_socket, export_settings)
texture_info = gltf2_io.MaterialOcclusionTextureInfoClass(**fields)
else:
texture_info = gltf2_io.TextureInfo(**fields)
export_user_extensions('gather_udim_texture_info_hook', export_settings, texture_info, blender_shader_sockets)
return texture_info
def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings): def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings):
@ -146,9 +182,9 @@ def __gather_occlusion_strength(primary_socket, export_settings):
return None return None
def __gather_index(blender_shader_sockets, default_sockets, export_settings): def __gather_index(blender_shader_sockets, default_sockets, use_tile, export_settings):
# We just put the actual shader into the 'index' member # We just put the actual shader into the 'index' member
return gltf2_blender_gather_texture.gather_texture(blender_shader_sockets, default_sockets, export_settings) return gltf2_blender_gather_texture.gather_texture(blender_shader_sockets, default_sockets, use_tile, export_settings)
def __gather_texture_transform_and_tex_coord(primary_socket, export_settings): def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
@ -161,6 +197,8 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
result_tex = get_texture_node_from_socket(primary_socket, export_settings) result_tex = get_texture_node_from_socket(primary_socket, export_settings)
blender_shader_node = result_tex.shader_node blender_shader_node = result_tex.shader_node
blender_shader_node['used'] = True
# Skip over UV wrapping stuff (it goes in the sampler) # Skip over UV wrapping stuff (it goes in the sampler)
result = detect_manual_uv_wrapping(blender_shader_node, result_tex.group_path) result = detect_manual_uv_wrapping(blender_shader_node, result_tex.group_path)
if result: if result:

View File

@ -115,6 +115,10 @@ def from_socket(start_socket: NodeTreeSearchResult,
if start_socket.socket is None: if start_socket.socket is None:
return [] return []
# Search if direct node of the socket matches the filter
if shader_node_filter(start_socket.socket.node):
return [NodeTreeSearchResult(start_socket.socket.node, [], start_socket.group_path.copy())]
return __search_from_socket(start_socket.socket, shader_node_filter, [], start_socket.group_path) return __search_from_socket(start_socket.socket, shader_node_filter, [], start_socket.group_path)
@cached @cached
@ -347,6 +351,7 @@ def get_texture_transform_from_mapping_node(mapping_node):
return None return None
mapping_transform = {} mapping_transform = {}
if mapping_node.node.vector_type != "VECTOR":
mapping_transform["offset"] = [mapping_node.node.inputs['Location'].default_value[0], mapping_node.node.inputs['Location'].default_value[1]] mapping_transform["offset"] = [mapping_node.node.inputs['Location'].default_value[0], mapping_node.node.inputs['Location'].default_value[1]]
mapping_transform["rotation"] = mapping_node.node.inputs['Rotation'].default_value[2] mapping_transform["rotation"] = mapping_node.node.inputs['Rotation'].default_value[2]
mapping_transform["scale"] = [mapping_node.node.inputs['Scale'].default_value[0], mapping_node.node.inputs['Scale'].default_value[1]] mapping_transform["scale"] = [mapping_node.node.inputs['Scale'].default_value[0], mapping_node.node.inputs['Scale'].default_value[1]]
@ -492,3 +497,122 @@ def get_attribute_name(socket, export_settings):
return True, None, True return True, None, True
return False, None, None return False, None, None
def detect_anisotropy_nodes(
anisotropy_socket,
anisotropy_rotation_socket,
anisotropy_tangent_socket,
export_settings):
"""
Detects if the material uses anisotropy and returns the corresponding data.
:param anisotropy_socket: the anisotropy socket
:param anisotropy_rotation_socket: the anisotropy rotation socket
:param anisotropy_tangent_socket: the anisotropy tangent socket
:param export_settings: the export settings
:return: a tuple (is_anisotropy, anisotropy_data)
"""
if anisotropy_socket.socket is None:
return False, None
if anisotropy_rotation_socket.socket is None:
return False, None
if anisotropy_tangent_socket.socket is None:
return False, None
# Check that tangent is linked to a tangent node, with UVMap as input
tangent_node = previous_node(anisotropy_tangent_socket)
if tangent_node.node is None or tangent_node.node.type != "TANGENT":
return False, None
if tangent_node.node.direction_type != "UV_MAP":
return False, None
# Check that anisotropy is linked to a multiply node
if not anisotropy_socket.socket.is_linked:
return False, None
if not anisotropy_rotation_socket.socket.is_linked:
return False, None
if not anisotropy_tangent_socket.socket.is_linked:
return False, None
anisotropy_multiply_node = anisotropy_socket.socket.links[0].from_node
if anisotropy_multiply_node is None or anisotropy_multiply_node.type != "MATH":
return False, None
if anisotropy_multiply_node.operation != "MULTIPLY":
return False, None
# this multiply node should have the first input linked to separate XYZ, on Z
if not anisotropy_multiply_node.inputs[0].is_linked:
return False, None
separate_xyz_node = anisotropy_multiply_node.inputs[0].links[0].from_node
if separate_xyz_node is None or separate_xyz_node.type != "SEPXYZ":
return False, None
separate_xyz_z_socket = anisotropy_multiply_node.inputs[0].links[0].from_socket
if separate_xyz_z_socket.name != "Z":
return False, None
# This separate XYZ node output should be linked to ArcTan2 node (X on inputs[1], Y on inputs[0])
if not separate_xyz_node.outputs[0].is_linked:
return False, None
arctan2_node = separate_xyz_node.outputs[0].links[0].to_node
if arctan2_node.type != "MATH":
return False, None
if arctan2_node.operation != "ARCTAN2":
return False, None
if arctan2_node.inputs[0].links[0].from_socket.name != "Y":
return False, None
if arctan2_node.inputs[1].links[0].from_socket.name != "X":
return False, None
# This arctan2 node output should be linked to anisotropy rotation (Math add node)
if not arctan2_node.outputs[0].is_linked:
return False, None
anisotropy_rotation_node = arctan2_node.outputs[0].links[0].to_node
if anisotropy_rotation_node.type != "MATH":
return False, None
if anisotropy_rotation_node.operation != "ADD":
return False, None
# This anisotropy rotation node should have the output linked to rotation conversion node
if not anisotropy_rotation_node.outputs[0].is_linked:
return False, None
rotation_conversion_node = anisotropy_rotation_node.outputs[0].links[0].to_node
if rotation_conversion_node.type != "MATH":
return False, None
if rotation_conversion_node.operation != "DIVIDE":
return False, None
# This rotation conversion node should have the second input value PI
if abs(rotation_conversion_node.inputs[1].default_value - 6.283185) > 0.0001:
return False, None
# This rotation conversion node should have the output linked to anisotropy rotation socket of Principled BSDF
if not rotation_conversion_node.outputs[0].is_linked:
return False, None
if rotation_conversion_node.outputs[0].links[0].to_socket.name != "Anisotropic Rotation":
return False, None
if rotation_conversion_node.outputs[0].links[0].to_node.type != "BSDF_PRINCIPLED":
return False, None
# Separate XYZ node should have the input linked to anisotropy multiply Add node (for normalization)
if not separate_xyz_node.inputs[0].is_linked:
return False, None
anisotropy_multiply_add_node = separate_xyz_node.inputs[0].links[0].from_node
if anisotropy_multiply_add_node.type != "VECT_MATH":
return False, None
if anisotropy_multiply_add_node.operation != "MULTIPLY_ADD":
return False, None
if list(anisotropy_multiply_add_node.inputs[1].default_value) != [2.0, 2.0, 1.0]:
return False, None
if list(anisotropy_multiply_add_node.inputs[2].default_value) != [-1.0, -1.0, 0.0]:
return False, None
if not anisotropy_multiply_add_node.inputs[0].is_linked:
return False, None
# This anisotropy multiply Add node should have the first input linked to a texture node
anisotropy_texture_node = anisotropy_multiply_add_node.inputs[0].links[0].from_node
if anisotropy_texture_node.type != "TEX_IMAGE":
return False, None
tex_ok = has_image_node_from_socket(NodeSocket(anisotropy_multiply_add_node.inputs[0], anisotropy_socket.group_path), export_settings)
if tex_ok is False:
return False, None
return True, {
'anisotropyStrength': get_const_from_socket(NodeSocket(anisotropy_multiply_node.inputs[1], anisotropy_socket.group_path), 'VALUE'),
'anisotropyRotation': get_const_from_socket(NodeSocket(anisotropy_rotation_node.inputs[1], anisotropy_socket.group_path), 'VALUE'),
'tangent': tangent_node.node.uv_map,
'tex_socket': NodeSocket(anisotropy_multiply_add_node.inputs[0], anisotropy_socket.group_path),
}

View File

@ -0,0 +1,116 @@
# SPDX-FileCopyrightText: 2018-2023 The glTF-Blender-IO authors
#
# SPDX-License-Identifier: Apache-2.0
from ...io.com.gltf2_io import TextureInfo
from .gltf2_blender_texture import texture
from ..com.gltf2_blender_conversion import get_anisotropy_rotation_gltf_to_blender
from math import pi
from mathutils import Vector
def anisotropy(
mh,
location,
anisotropy_socket,
anisotropy_rotation_socket,
anisotropy_tangent_socket
):
if anisotropy_socket is None or anisotropy_rotation_socket is None or anisotropy_tangent_socket is None:
return
x, y = location
try:
ext = mh.pymat.extensions['KHR_materials_anisotropy']
except Exception:
return
anisotropy_strength = ext.get('anisotropyStrength', 0)
anisotropy_rotation = ext.get('anisotropyRotation', 0)
tex_info = ext.get('anisotropyTexture')
if tex_info is not None:
tex_info = TextureInfo.from_dict(tex_info)
# We are going to use UVMap of Normal map if it exists, as input for the anisotropy tangent
if tex_info is None:
anisotropy_socket.default_value = anisotropy_strength
anisotropy_rotation_socket.default_value = get_anisotropy_rotation_gltf_to_blender(anisotropy_rotation)
return
# Tangent node
node = mh.node_tree.nodes.new('ShaderNodeTangent')
node.direction_type = "UV_MAP"
node.location = x - 180, y - 200
uv_idx = tex_info.tex_coord or 0
# Get the UVMap of the normal map if available (if not, keeping the first UVMap available, uv_idx = 0)
tex_info_normal = mh.pymat.normal_texture
if tex_info_normal is not None:
try:
uv_idx = tex_info.extensions['KHR_texture_transform']['texCoord']
except Exception:
pass
node.uv_map = 'UVMap' if uv_idx == 0 else 'UVMap.%03d' % uv_idx
mh.node_tree.links.new(anisotropy_tangent_socket, node.outputs['Tangent'])
# Multiply node
multiply_node = mh.node_tree.nodes.new('ShaderNodeMath')
multiply_node.label = 'Anisotropy strength'
multiply_node.operation = 'MULTIPLY'
multiply_node.location = x - 180, y + 200
mh.node_tree.links.new(anisotropy_socket, multiply_node.outputs[0])
multiply_node.inputs[1].default_value = anisotropy_strength
# Divide node
divide_node = mh.node_tree.nodes.new('ShaderNodeMath')
divide_node.label = 'Rotation conversion'
divide_node.operation = 'DIVIDE'
divide_node.location = x - 180, y
mh.node_tree.links.new(anisotropy_rotation_socket, divide_node.outputs[0])
divide_node.inputs[1].default_value = 2 * pi
# Rotation node
rotation_node = mh.node_tree.nodes.new('ShaderNodeMath')
rotation_node.label = 'Anisotropy rotation'
rotation_node.operation = 'ADD'
rotation_node.location = x - 180*2, y
mh.node_tree.links.new(divide_node.inputs[0], rotation_node.outputs[0])
rotation_node.inputs[1].default_value = anisotropy_rotation
# ArcTan node
arctan_node = mh.node_tree.nodes.new('ShaderNodeMath')
arctan_node.label = 'ArcTan2'
arctan_node.operation = 'ARCTAN2'
arctan_node.location = x - 180*3, y
mh.node_tree.links.new(rotation_node.inputs[0], arctan_node.outputs[0])
# Separate XYZ
sep_node = mh.node_tree.nodes.new('ShaderNodeSeparateXYZ')
sep_node.location = x - 180*4, y
mh.node_tree.links.new(arctan_node.inputs[0], sep_node.outputs[1])
mh.node_tree.links.new(arctan_node.inputs[1], sep_node.outputs[0])
mh.node_tree.links.new(multiply_node.inputs[0], sep_node.outputs[2])
# Multiply add node
multiply_add_node = mh.node_tree.nodes.new('ShaderNodeVectorMath')
multiply_add_node.location = x - 180*5, y
multiply_add_node.operation = 'MULTIPLY_ADD'
multiply_add_node.inputs[1].default_value = Vector((2, 2, 1))
multiply_add_node.inputs[2].default_value = Vector((-1, -1, 0))
mh.node_tree.links.new(sep_node.inputs[0], multiply_add_node.outputs[0])
# Texture
texture(
mh,
tex_info=tex_info,
label='ANISOTROPY',
location=(x - 180*6, y),
is_data=True,
color_socket=multiply_add_node.inputs[0]
)

View File

@ -458,6 +458,9 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
attribute_type[attr] attribute_type[attr]
) )
if blender_attribute_data_type is None:
continue
blender_attribute = mesh.attributes.new(attr, blender_attribute_data_type, 'POINT') blender_attribute = mesh.attributes.new(attr, blender_attribute_data_type, 'POINT')
if DataType.num_elements(attribute_type[attr]) == 1: if DataType.num_elements(attribute_type[attr]) == 1:
blender_attribute.data.foreach_set('value', attribute_data[idx].flatten()) blender_attribute.data.foreach_set('value', attribute_data[idx].flatten())

View File

@ -15,6 +15,7 @@ from .gltf2_blender_KHR_materials_ior import ior
from .gltf2_blender_KHR_materials_volume import volume from .gltf2_blender_KHR_materials_volume import volume
from .gltf2_blender_KHR_materials_specular import specular from .gltf2_blender_KHR_materials_specular import specular
from .gltf2_blender_KHR_materials_sheen import sheen from .gltf2_blender_KHR_materials_sheen import sheen
from .gltf2_blender_KHR_materials_anisotropy import anisotropy
class MaterialHelper: class MaterialHelper:
"""Helper class. Stores material stuff to be passed around everywhere.""" """Helper class. Stores material stuff to be passed around everywhere."""
@ -158,6 +159,14 @@ def pbr_metallic_roughness(mh: MaterialHelper):
specular_tint_socket=pbr_node.inputs['Specular Tint'] specular_tint_socket=pbr_node.inputs['Specular Tint']
) )
anisotropy(
mh,
location=locs['anisotropy'],
anisotropy_socket=pbr_node.inputs['Anisotropic'],
anisotropy_rotation_socket=pbr_node.inputs['Anisotropic Rotation'],
anisotropy_tangent_socket=pbr_node.inputs['Tangent']
)
sheen( sheen(
mh, mh,
location_sheenTint=locs['sheenColorTexture'], location_sheenTint=locs['sheenColorTexture'],
@ -201,6 +210,11 @@ def calc_locations(mh):
except: except:
specular_ext = {} specular_ext = {}
try:
anisotropy_ext = mh.pymat.extensions['KHR_materials_anisotropy']
except:
anisotropy_ext = {}
try: try:
sheen_ext = mh.pymat.extensions['KHR_materials_sheen'] sheen_ext = mh.pymat.extensions['KHR_materials_sheen']
except: except:
@ -224,6 +238,9 @@ def calc_locations(mh):
locs['specularColorTexture'] = (x, y) locs['specularColorTexture'] = (x, y)
if 'specularColorTexture' in specular_ext: if 'specularColorTexture' in specular_ext:
y -= height y -= height
locs['anisotropy'] = (x, y)
if 'anisotropyTexture' in anisotropy_ext:
y -= height
locs['sheenRoughnessTexture'] = (x, y) locs['sheenRoughnessTexture'] = (x, y)
if 'sheenRoughnessTexture' in sheen_ext: if 'sheenRoughnessTexture' in sheen_ext:
y -= height y -= height

View File

@ -55,7 +55,8 @@ class glTFImporter():
'KHR_materials_sheen', 'KHR_materials_sheen',
'KHR_materials_ior', 'KHR_materials_ior',
'KHR_materials_volume', 'KHR_materials_volume',
'EXT_texture_webp' 'EXT_texture_webp',
'KHR_materials_anisotropy'
] ]
# Add extensions required supported by custom import extensions # Add extensions required supported by custom import extensions

View File

@ -5,8 +5,8 @@
bl_info = { bl_info = {
"name": "Node Wrangler", "name": "Node Wrangler",
"author": "Bartek Skorupa, Greg Zaal, Sebastian Koenig, Christian Brinkmann, Florian Meyer", "author": "Bartek Skorupa, Greg Zaal, Sebastian Koenig, Christian Brinkmann, Florian Meyer",
"version": (3, 46), "version": (3, 47),
"blender": (3, 6, 0), "blender": (4, 0, 0),
"location": "Node Editor Toolbar or Shift-W", "location": "Node Editor Toolbar or Shift-W",
"description": "Various tools to enhance and speed up node-based workflow", "description": "Various tools to enhance and speed up node-based workflow",
"warning": "", "warning": "",

View File

@ -20,10 +20,6 @@ def drawlayout(context, layout, mode='non-panel'):
col.menu(NWMergeNodesMenu.bl_idname) col.menu(NWMergeNodesMenu.bl_idname)
col.separator() col.separator()
col = layout.column(align=True)
col.menu(NWSwitchNodeTypeMenu.bl_idname, text="Switch Node Type")
col.separator()
if tree_type == 'ShaderNodeTree': if tree_type == 'ShaderNodeTree':
col = layout.column(align=True) col = layout.column(align=True)
col.operator(operators.NWAddTextureSetup.bl_idname, text="Add Texture Setup", icon='NODE_SEL') col.operator(operators.NWAddTextureSetup.bl_idname, text="Add Texture Setup", icon='NODE_SEL')
@ -385,32 +381,8 @@ class NWSwitchNodeTypeMenu(Menu, NWBase):
def draw(self, context): def draw(self, context):
layout = self.layout layout = self.layout
categories = [c for c in node_categories_iter(context) layout.label(text="This operator is removed due to the changes of node menus.", icon='ERROR')
if c.name not in ['Group', 'Script']] layout.label(text="A native implementation of the function is expected in the future.")
for cat in categories:
idname = f"NODE_MT_nw_switch_{cat.identifier}_submenu"
if hasattr(bpy.types, idname):
layout.menu(idname)
else:
layout.label(text="Unable to load altered node lists.")
layout.label(text="Please re-enable Node Wrangler.")
break
def draw_switch_category_submenu(self, context):
layout = self.layout
if self.category.name == 'Layout':
for node in self.category.items(context):
if node.nodetype != 'NodeFrame':
props = layout.operator(operators.NWSwitchNodeType.bl_idname, text=node.label)
props.to_type = node.nodetype
else:
for node in self.category.items(context):
if isinstance(node, NodeItemCustom):
node.draw(self, layout, context)
continue
props = layout.operator(operators.NWSwitchNodeType.bl_idname, text=node.label)
props.to_type = node.nodetype
# #
# APPENDAGES TO EXISTING UI # APPENDAGES TO EXISTING UI

View File

@ -914,195 +914,6 @@ class NWReloadImages(Operator):
return {'CANCELLED'} return {'CANCELLED'}
class NWSwitchNodeType(Operator, NWBase):
"""Switch type of selected nodes """
bl_idname = "node.nw_swtch_node_type"
bl_label = "Switch Node Type"
bl_options = {'REGISTER', 'UNDO'}
to_type: StringProperty(
name="Switch to type",
default='',
)
def execute(self, context):
to_type = self.to_type
if len(to_type) == 0:
return {'CANCELLED'}
nodes, links = get_nodes_links(context)
# Those types of nodes will not swap.
src_excludes = ('NodeFrame')
# Those attributes of nodes will be copied if possible
attrs_to_pass = ('color', 'hide', 'label', 'mute', 'parent',
'show_options', 'show_preview', 'show_texture',
'use_alpha', 'use_clamp', 'use_custom_color', 'location'
)
selected = [n for n in nodes if n.select]
reselect = []
for node in [n for n in selected if
n.rna_type.identifier not in src_excludes and
n.rna_type.identifier != to_type]:
new_node = nodes.new(to_type)
for attr in attrs_to_pass:
if hasattr(node, attr) and hasattr(new_node, attr):
setattr(new_node, attr, getattr(node, attr))
# set image datablock of dst to image of src
if hasattr(node, 'image') and hasattr(new_node, 'image'):
if node.image:
new_node.image = node.image
# Special cases
if new_node.type == 'SWITCH':
new_node.hide = True
# Dictionaries: src_sockets and dst_sockets:
# 'INPUTS': input sockets ordered by type (entry 'MAIN' main type of inputs).
# 'OUTPUTS': output sockets ordered by type (entry 'MAIN' main type of outputs).
# in 'INPUTS' and 'OUTPUTS':
# 'SHADER', 'RGBA', 'VECTOR', 'VALUE' - sockets of those types.
# socket entry:
# (index_in_type, socket_index, socket_name, socket_default_value, socket_links)
src_sockets = {
'INPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE': [], 'MAIN': None},
'OUTPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE': [], 'MAIN': None},
}
dst_sockets = {
'INPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE': [], 'MAIN': None},
'OUTPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE': [], 'MAIN': None},
}
types_order_one = 'SHADER', 'RGBA', 'VECTOR', 'VALUE'
types_order_two = 'SHADER', 'VECTOR', 'RGBA', 'VALUE'
# check src node to set src_sockets values and dst node to set dst_sockets dict values
for sockets, nd in ((src_sockets, node), (dst_sockets, new_node)):
# Check node's inputs and outputs and fill proper entries in "sockets" dict
for in_out, in_out_name in ((nd.inputs, 'INPUTS'), (nd.outputs, 'OUTPUTS')):
# enumerate in inputs, then in outputs
# find name, default value and links of socket
for i, socket in enumerate(in_out):
the_name = socket.name
dval = None
# Not every socket, especially in outputs has "default_value"
if hasattr(socket, 'default_value'):
dval = socket.default_value
socket_links = []
for lnk in socket.links:
socket_links.append(lnk)
# check type of socket to fill proper keys.
for the_type in types_order_one:
if socket.type == the_type:
# create values for sockets['INPUTS'][the_type] and sockets['OUTPUTS'][the_type]
# entry structure: (index_in_type, socket_index, socket_name,
# socket_default_value, socket_links)
sockets[in_out_name][the_type].append(
(len(sockets[in_out_name][the_type]), i, the_name, dval, socket_links))
# Check which of the types in inputs/outputs is considered to be "main".
# Set values of sockets['INPUTS']['MAIN'] and sockets['OUTPUTS']['MAIN']
for type_check in types_order_one:
if sockets[in_out_name][type_check]:
sockets[in_out_name]['MAIN'] = type_check
break
matches = {
'INPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE_NAME': [], 'VALUE': [], 'MAIN': []},
'OUTPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE_NAME': [], 'VALUE': [], 'MAIN': []},
}
for inout, soctype in (
('INPUTS', 'MAIN',),
('INPUTS', 'SHADER',),
('INPUTS', 'RGBA',),
('INPUTS', 'VECTOR',),
('INPUTS', 'VALUE',),
('OUTPUTS', 'MAIN',),
('OUTPUTS', 'SHADER',),
('OUTPUTS', 'RGBA',),
('OUTPUTS', 'VECTOR',),
('OUTPUTS', 'VALUE',),
):
if src_sockets[inout][soctype] and dst_sockets[inout][soctype]:
if soctype == 'MAIN':
sc = src_sockets[inout][src_sockets[inout]['MAIN']]
dt = dst_sockets[inout][dst_sockets[inout]['MAIN']]
else:
sc = src_sockets[inout][soctype]
dt = dst_sockets[inout][soctype]
# start with 'dt' to determine number of possibilities.
for i, soc in enumerate(dt):
# if src main has enough entries - match them with dst main sockets by indexes.
if len(sc) > i:
matches[inout][soctype].append(((sc[i][1], sc[i][3]), (soc[1], soc[3])))
# add 'VALUE_NAME' criterion to inputs.
if inout == 'INPUTS' and soctype == 'VALUE':
for s in sc:
if s[2] == soc[2]: # if names match
# append src (index, dval), dst (index, dval)
matches['INPUTS']['VALUE_NAME'].append(((s[1], s[3]), (soc[1], soc[3])))
# When src ['INPUTS']['MAIN'] is 'VECTOR' replace 'MAIN' with matches VECTOR if possible.
# This creates better links when relinking textures.
if src_sockets['INPUTS']['MAIN'] == 'VECTOR' and matches['INPUTS']['VECTOR']:
matches['INPUTS']['MAIN'] = matches['INPUTS']['VECTOR']
# Pass default values and RELINK:
for tp in ('MAIN', 'SHADER', 'RGBA', 'VECTOR', 'VALUE_NAME', 'VALUE'):
# INPUTS: Base on matches in proper order.
for (src_i, src_dval), (dst_i, dst_dval) in matches['INPUTS'][tp]:
# pass dvals
if src_dval and dst_dval and tp in {'RGBA', 'VALUE_NAME'}:
new_node.inputs[dst_i].default_value = src_dval
# Special case: switch to math
if node.type in {'MIX_RGB', 'ALPHAOVER', 'ZCOMBINE'} and\
new_node.type == 'MATH' and\
tp == 'MAIN':
new_dst_dval = max(src_dval[0], src_dval[1], src_dval[2])
new_node.inputs[dst_i].default_value = new_dst_dval
if node.type == 'MIX_RGB':
if node.blend_type in [o[0] for o in operations]:
new_node.operation = node.blend_type
# Special case: switch from math to some types
if node.type == 'MATH' and\
new_node.type in {'MIX_RGB', 'ALPHAOVER', 'ZCOMBINE'} and\
tp == 'MAIN':
for i in range(3):
new_node.inputs[dst_i].default_value[i] = src_dval
if new_node.type == 'MIX_RGB':
if node.operation in [t[0] for t in blend_types]:
new_node.blend_type = node.operation
# Set Fac of MIX_RGB to 1.0
new_node.inputs[0].default_value = 1.0
# make link only when dst matching input is not linked already.
if node.inputs[src_i].links and not new_node.inputs[dst_i].links:
in_src_link = node.inputs[src_i].links[0]
in_dst_socket = new_node.inputs[dst_i]
connect_sockets(in_src_link.from_socket, in_dst_socket)
links.remove(in_src_link)
# OUTPUTS: Base on matches in proper order.
for (src_i, src_dval), (dst_i, dst_dval) in matches['OUTPUTS'][tp]:
for out_src_link in node.outputs[src_i].links:
out_dst_socket = new_node.outputs[dst_i]
connect_sockets(out_dst_socket, out_src_link.to_socket)
# relink rest inputs if possible, no criteria
for src_inp in node.inputs:
for dst_inp in new_node.inputs:
if src_inp.links and not dst_inp.links:
src_link = src_inp.links[0]
connect_sockets(src_link.from_socket, dst_inp)
links.remove(src_link)
# relink rest outputs if possible, base on node kind if any left.
for src_o in node.outputs:
for out_src_link in src_o.links:
for dst_o in new_node.outputs:
if src_o.type == dst_o.type:
connect_sockets(dst_o, out_src_link.to_socket)
# relink rest outputs no criteria if any left. Link all from first output.
for src_o in node.outputs:
for out_src_link in src_o.links:
if new_node.outputs:
connect_sockets(new_node.outputs[0], out_src_link.to_socket)
nodes.remove(node)
force_update(context)
return {'FINISHED'}
class NWMergeNodes(Operator, NWBase): class NWMergeNodes(Operator, NWBase):
bl_idname = "node.nw_merge_nodes" bl_idname = "node.nw_merge_nodes"
bl_label = "Merge Nodes" bl_label = "Merge Nodes"
@ -1960,7 +1771,7 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
['Roughness', rough_abbr + gloss_abbr, None], ['Roughness', rough_abbr + gloss_abbr, None],
['Normal', normal_abbr + bump_abbr, None], ['Normal', normal_abbr + bump_abbr, None],
['Transmission Weight', tags.transmission.split(' '), None], ['Transmission Weight', tags.transmission.split(' '), None],
['Emission', tags.emission.split(' '), None], ['Emission Color', tags.emission.split(' '), None],
['Alpha', tags.alpha.split(' '), None], ['Alpha', tags.alpha.split(' '), None],
['Ambient Occlusion', tags.ambient_occlusion.split(' '), None], ['Ambient Occlusion', tags.ambient_occlusion.split(' '), None],
] ]
@ -2076,8 +1887,8 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
# This is a simple connection Texture --> Input slot # This is a simple connection Texture --> Input slot
link = connect_sockets(active_node.inputs[sname[0]], texture_node.outputs[0]) link = connect_sockets(active_node.inputs[sname[0]], texture_node.outputs[0])
# Use non-color for all but 'Base Color' Textures # Use non-color except for color inputs
if not sname[0] in ['Base Color', 'Emission'] and texture_node.image: if sname[0] not in ['Base Color', 'Emission Color'] and texture_node.image:
texture_node.image.colorspace_settings.is_data = True texture_node.image.colorspace_settings.is_data = True
else: else:
@ -2976,7 +2787,6 @@ classes = (
NWPreviewNode, NWPreviewNode,
NWFrameSelected, NWFrameSelected,
NWReloadImages, NWReloadImages,
NWSwitchNodeType,
NWMergeNodes, NWMergeNodes,
NWBatchChangeNodes, NWBatchChangeNodes,
NWChangeMixFactor, NWChangeMixFactor,

View File

@ -162,7 +162,6 @@ class NWNodeWrangler(bpy.types.AddonPreferences):
# #
# REGISTER/UNREGISTER CLASSES AND KEYMAP ITEMS # REGISTER/UNREGISTER CLASSES AND KEYMAP ITEMS
# #
switch_category_menus = []
addon_keymaps = [] addon_keymaps = []
# kmi_defs entry: (identifier, key, action, CTRL, SHIFT, ALT, props, nice name) # kmi_defs entry: (identifier, key, action, CTRL, SHIFT, ALT, props, nice name)
# props entry: (property name, property value) # props entry: (property name, property value)
@ -392,28 +391,8 @@ def register():
setattr(kmi.properties, prop, value) setattr(kmi.properties, prop, value)
addon_keymaps.append((km, kmi)) addon_keymaps.append((km, kmi))
# switch submenus
switch_category_menus.clear()
for cat in node_categories_iter(None):
if cat.name not in ['Group', 'Script']:
idname = f"NODE_MT_nw_switch_{cat.identifier}_submenu"
switch_category_type = type(idname, (bpy.types.Menu,), {
"bl_space_type": 'NODE_EDITOR',
"bl_label": cat.name,
"category": cat,
"poll": cat.poll,
"draw": interface.draw_switch_category_submenu,
})
switch_category_menus.append(switch_category_type)
bpy.utils.register_class(switch_category_type)
def unregister(): def unregister():
for cat_types in switch_category_menus:
bpy.utils.unregister_class(cat_types)
switch_category_menus.clear()
# keymaps # keymaps
for km, kmi in addon_keymaps: for km, kmi in addon_keymaps:

View File

@ -6,8 +6,8 @@ bl_info = {
"name": "Collection Manager", "name": "Collection Manager",
"description": "Manage collections and their objects", "description": "Manage collections and their objects",
"author": "Ryan Inch", "author": "Ryan Inch",
"version": (2, 24, 8), "version": (2, 24, 9),
"blender": (3, 0, 0), "blender": (4, 0, 0),
"location": "View3D - Object Mode (Shortcut - M)", "location": "View3D - Object Mode (Shortcut - M)",
"warning": '', # used for warning icon and text in addons panel "warning": '', # used for warning icon and text in addons panel
"doc_url": "{BLENDER_MANUAL_URL}/addons/interface/collection_manager.html", "doc_url": "{BLENDER_MANUAL_URL}/addons/interface/collection_manager.html",

View File

@ -50,7 +50,7 @@ def get_tool_text(self):
return self["tool_text_color"] return self["tool_text_color"]
else: else:
color = bpy.context.preferences.themes[0].user_interface.wcol_tool.text color = bpy.context.preferences.themes[0].user_interface.wcol_tool.text
self["tool_text_color"] = color.r, color.g, color.b self["tool_text_color"] = color[0], color[1], color[2]
return self["tool_text_color"] return self["tool_text_color"]
def set_tool_text(self, values): def set_tool_text(self, values):
@ -62,7 +62,7 @@ def get_tool_text_sel(self):
return self["tool_text_sel_color"] return self["tool_text_sel_color"]
else: else:
color = bpy.context.preferences.themes[0].user_interface.wcol_tool.text_sel color = bpy.context.preferences.themes[0].user_interface.wcol_tool.text_sel
self["tool_text_sel_color"] = color.r, color.g, color.b self["tool_text_sel_color"] = color[0], color[1], color[2]
return self["tool_text_sel_color"] return self["tool_text_sel_color"]
def set_tool_text_sel(self, values): def set_tool_text_sel(self, values):
@ -98,11 +98,11 @@ def get_tool_outline(self):
return self["tool_outline_color"] return self["tool_outline_color"]
else: else:
color = bpy.context.preferences.themes[0].user_interface.wcol_tool.outline color = bpy.context.preferences.themes[0].user_interface.wcol_tool.outline
self["tool_outline_color"] = color.r, color.g, color.b self["tool_outline_color"] = color[0], color[1], color[2], color[3]
return self["tool_outline_color"] return self["tool_outline_color"]
def set_tool_outline(self, values): def set_tool_outline(self, values):
self["tool_outline_color"] = values[0], values[1], values[2] self["tool_outline_color"] = values[0], values[1], values[2], values[3]
def get_menu_back_text(self): def get_menu_back_text(self):
@ -110,7 +110,7 @@ def get_menu_back_text(self):
return self["menu_back_text_color"] return self["menu_back_text_color"]
else: else:
color = bpy.context.preferences.themes[0].user_interface.wcol_menu_back.text color = bpy.context.preferences.themes[0].user_interface.wcol_menu_back.text
self["menu_back_text_color"] = color.r, color.g, color.b self["menu_back_text_color"] = color[0], color[1], color[2]
return self["menu_back_text_color"] return self["menu_back_text_color"]
def set_menu_back_text(self, values): def set_menu_back_text(self, values):
@ -134,11 +134,11 @@ def get_menu_back_outline(self):
return self["menu_back_outline_color"] return self["menu_back_outline_color"]
else: else:
color = bpy.context.preferences.themes[0].user_interface.wcol_menu_back.outline color = bpy.context.preferences.themes[0].user_interface.wcol_menu_back.outline
self["menu_back_outline_color"] = color.r, color.g, color.b self["menu_back_outline_color"] = color[0], color[1], color[2], color[3]
return self["menu_back_outline_color"] return self["menu_back_outline_color"]
def set_menu_back_outline(self, values): def set_menu_back_outline(self, values):
self["menu_back_outline_color"] = values[0], values[1], values[2] self["menu_back_outline_color"] = values[0], values[1], values[2], values[3]
def get_tooltip_text(self): def get_tooltip_text(self):
@ -146,7 +146,7 @@ def get_tooltip_text(self):
return self["tooltip_text_color"] return self["tooltip_text_color"]
else: else:
color = bpy.context.preferences.themes[0].user_interface.wcol_tooltip.text color = bpy.context.preferences.themes[0].user_interface.wcol_tooltip.text
self["tooltip_text_color"] = color.r, color.g, color.b self["tooltip_text_color"] = color[0], color[1], color[2]
return self["tooltip_text_color"] return self["tooltip_text_color"]
def set_tooltip_text(self, values): def set_tooltip_text(self, values):
@ -170,11 +170,11 @@ def get_tooltip_outline(self):
return self["tooltip_outline_color"] return self["tooltip_outline_color"]
else: else:
color = bpy.context.preferences.themes[0].user_interface.wcol_tooltip.outline color = bpy.context.preferences.themes[0].user_interface.wcol_tooltip.outline
self["tooltip_outline_color"] = color.r, color.g, color.b self["tooltip_outline_color"] = color[0], color[1], color[2], color[3]
return self["tooltip_outline_color"] return self["tooltip_outline_color"]
def set_tooltip_outline(self, values): def set_tooltip_outline(self, values):
self["tooltip_outline_color"] = values[0], values[1], values[2] self["tooltip_outline_color"] = values[0], values[1], values[2], values[3]
class CMPreferences(AddonPreferences): class CMPreferences(AddonPreferences):

View File

@ -771,7 +771,7 @@ def draw_callback_px(self, context):
main_window = self.areas["Main Window"] main_window = self.areas["Main Window"]
outline_color = addon_prefs.qcd_ogl_widget_menu_back_outline outline_color = addon_prefs.qcd_ogl_widget_menu_back_outline
background_color = addon_prefs.qcd_ogl_widget_menu_back_inner background_color = addon_prefs.qcd_ogl_widget_menu_back_inner
draw_rounded_rect(main_window, line_shader, outline_color[:] + (1,), outline=True) draw_rounded_rect(main_window, line_shader, outline_color[:], outline=True)
draw_rounded_rect(main_window, shader, background_color) draw_rounded_rect(main_window, shader, background_color)
# draw window title # draw window title
@ -852,7 +852,7 @@ def draw_callback_px(self, context):
# draw button # draw button
outline_color = addon_prefs.qcd_ogl_widget_tool_outline outline_color = addon_prefs.qcd_ogl_widget_tool_outline
draw_rounded_rect(button_area, line_shader, outline_color[:] + (1,), tl, tr, bl, br, outline=True) draw_rounded_rect(button_area, line_shader, outline_color[:], tl, tr, bl, br, outline=True)
draw_rounded_rect(button_area, shader, button_color, tl, tr, bl, br) draw_rounded_rect(button_area, shader, button_color, tl, tr, bl, br)
# ACTIVE OBJECT # ACTIVE OBJECT
@ -979,7 +979,7 @@ def draw_tooltip(self, context, shader, line_shader, message):
outline_color = addon_prefs.qcd_ogl_widget_tooltip_outline outline_color = addon_prefs.qcd_ogl_widget_tooltip_outline
background_color = addon_prefs.qcd_ogl_widget_tooltip_inner background_color = addon_prefs.qcd_ogl_widget_tooltip_inner
draw_rounded_rect(tooltip, line_shader, outline_color[:] + (1,), outline=True) draw_rounded_rect(tooltip, line_shader, outline_color[:], outline=True)
draw_rounded_rect(tooltip, shader, background_color) draw_rounded_rect(tooltip, shader, background_color)
line_pos = padding + line_height line_pos = padding + line_height

View File

@ -1,6 +0,0 @@
import bpy
sun_props = bpy.context.scene.sun_pos_properties
sun_props.UTC_zone = 8
sun_props.latitude = 29.558300
sun_props.longitude = 106.567000

View File

@ -1,6 +0,0 @@
import bpy
sun_props = bpy.context.scene.sun_pos_properties
sun_props.UTC_zone = 1
sun_props.latitude = -4.325000
sun_props.longitude = 15.322200

View File

@ -1,6 +0,0 @@
import bpy
sun_props = bpy.context.scene.sun_pos_properties
sun_props.UTC_zone = 0
sun_props.latitude = 51.507200
sun_props.longitude = -0.127500

View File

@ -1,6 +0,0 @@
import bpy
sun_props = bpy.context.scene.sun_pos_properties
sun_props.UTC_zone = -5
sun_props.latitude = 40.661100
sun_props.longitude = -73.943900

View File

@ -1,6 +0,0 @@
import bpy
sun_props = bpy.context.scene.sun_pos_properties
sun_props.UTC_zone = -3
sun_props.latitude = -23.550000
sun_props.longitude = -46.633300

View File

@ -1,6 +0,0 @@
import bpy
sun_props = bpy.context.scene.sun_pos_properties
sun_props.UTC_zone = 10
sun_props.latitude = -33.865000
sun_props.longitude = 151.209000

View File

@ -851,4 +851,3 @@ def create_sample(obj):
arm.edit_bones.active = bone arm.edit_bones.active = bone
if bcoll := arm.collections.active: if bcoll := arm.collections.active:
bcoll.assign(bone) bcoll.assign(bone)

View File

@ -31,10 +31,11 @@ if "bpy" in locals():
importlib.reload(properties) importlib.reload(properties)
importlib.reload(ui_sun) importlib.reload(ui_sun)
importlib.reload(hdr) importlib.reload(hdr)
importlib.reload(sun_calc)
importlib.reload(translations) importlib.reload(translations)
else: else:
from . import properties, ui_sun, hdr, translations from . import properties, ui_sun, hdr, sun_calc, translations
import bpy import bpy
from bpy.app.handlers import persistent from bpy.app.handlers import persistent

View File

@ -5,9 +5,6 @@
import bpy import bpy
from bpy.app.handlers import persistent from bpy.app.handlers import persistent
import gpu
from gpu_extras.batch import batch_for_shader
from mathutils import Euler, Vector from mathutils import Euler, Vector
from math import degrees, radians, pi, sin, cos, asin, acos, tan, floor from math import degrees, radians, pi, sin, cos, asin, acos, tan, floor

View File

@ -3,13 +3,11 @@
# SPDX-License-Identifier: GPL-2.0-or-later # SPDX-License-Identifier: GPL-2.0-or-later
import bpy import bpy
from bpy.types import Operator, Menu from bpy.types import Operator
from bl_operators.presets import AddPresetBase from bl_operators.presets import AddPresetBase
from bl_ui.utils import PresetPanel from bl_ui.utils import PresetPanel
import os
from math import degrees
from .sun_calc import format_lat_long, format_time, format_hms, sun from .sun_calc import format_time, format_hms, sun
# ------------------------------------------------------------------- # -------------------------------------------------------------------