From 5768826f19dd601a9fca092da65314d4061d0d5e Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 18 Mar 2024 08:19:15 +0100 Subject: [PATCH 1/4] Export_x3d: Fixed light radius variable Changed light radius variable from distance to shadow_soft_size --- io_scene_x3d/export_x3d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_scene_x3d/export_x3d.py b/io_scene_x3d/export_x3d.py index e26af775a..6bbd1446b 100644 --- a/io_scene_x3d/export_x3d.py +++ b/io_scene_x3d/export_x3d.py @@ -417,7 +417,7 @@ def export(file, location = matrix.to_translation()[:] - radius = lamp.distance * math.cos(beamWidth) + radius = lamp.shadow_soft_size * math.cos(beamWidth) # radius = lamp.dist*math.cos(beamWidth) ident_step = ident + (' ' * (-len(ident) + \ fw('%s\n') -- 2.30.2 From 8f43b9ab0e803418c7ad2e4b70823937e5351d8a Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 18 Mar 2024 08:19:47 +0100 Subject: [PATCH 2/4] Export_x3d: Changed light radius variable Changed light radius variable #105235 --- io_scene_x3d/export_x3d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_scene_x3d/export_x3d.py b/io_scene_x3d/export_x3d.py index 6bbd1446b..a168c9fa7 100644 --- a/io_scene_x3d/export_x3d.py +++ b/io_scene_x3d/export_x3d.py @@ -417,7 +417,7 @@ def export(file, location = matrix.to_translation()[:] - radius = lamp.shadow_soft_size * math.cos(beamWidth) + radius = lamp.cutoff_distance * math.cos(beamWidth) # radius = lamp.dist*math.cos(beamWidth) ident_step = ident + (' ' * (-len(ident) + \ fw('%s\n') -- 2.30.2 From be736167b8cd8b7bb515a4e17daebb6aefef7efd Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 18 Mar 2024 08:20:19 +0100 Subject: [PATCH 3/4] Import_x3d: Fixed light radius variable Changed light radius variable from distance to cutoff_distance #105235 --- io_scene_x3d/import_x3d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_scene_x3d/import_x3d.py b/io_scene_x3d/import_x3d.py index d481145d9..4c2155163 100644 --- a/io_scene_x3d/import_x3d.py +++ b/io_scene_x3d/import_x3d.py @@ -3171,7 +3171,7 @@ def importLamp_PointLight(node, ancestry): bpylamp = bpy.data.lights.new(vrmlname, 'POINT') bpylamp.energy = intensity - bpylamp.distance = radius + bpylamp.cutoff_distance = radius bpylamp.color = color mtx = Matrix.Translation(Vector(location)) @@ -3220,7 +3220,7 @@ def importLamp_SpotLight(node, ancestry): bpylamp = bpy.data.lights.new(vrmlname, 'SPOT') bpylamp.energy = intensity - bpylamp.distance = radius + bpylamp.cutoff_distance = radius bpylamp.color = color bpylamp.spot_size = cutOffAngle if beamWidth > cutOffAngle: -- 2.30.2 From 983c719aa02f66a4ec029fcdd7f6423bef4d89a7 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 18 Mar 2024 08:26:27 +0100 Subject: [PATCH 4/4] Merge branch 'main' into blender-v4.1-release revert Merge branch 'main' into blender-v4.1-release --- io_scene_3ds/__init__.py | 45 +--- io_scene_3ds/export_3ds.py | 2 +- io_scene_3ds/import_3ds.py | 24 +- io_scene_gltf2/__init__.py | 123 ++------- .../blender/com/gltf2_blender_data_path.py | 10 +- .../blender/com/gltf2_blender_ui.py | 2 +- .../gltf2_blender_gather_fcurves_animation.py | 6 +- .../gltf2_blender_gather_fcurves_channels.py | 105 +++----- .../gltf2_blender_gather_fcurves_keyframes.py | 26 +- .../gltf2_blender_gather_fcurves_sampler.py | 19 +- .../animation/gltf2_blender_gather_action.py | 119 +------- .../gltf2_blender_gather_animation_utils.py | 11 +- .../gltf2_blender_gather_animations.py | 2 +- .../gltf2_blender_gather_keyframes.py | 22 +- .../gltf2_blender_gather_scene_animation.py | 6 +- .../animation/gltf2_blender_gather_tracks.py | 3 - .../armature/armature_action_sampled.py | 24 +- .../sampled/armature/armature_channels.py | 9 +- ...blender_gather_animation_sampling_cache.py | 41 +-- ...f2_blender_gather_object_action_sampled.py | 24 +- .../gltf2_blender_gather_object_channels.py | 8 +- .../gltf2_blender_gather_sk_keyframes.py | 74 +---- .../blender/exp/gltf2_blender_export.py | 22 +- .../blender/exp/gltf2_blender_gather_cache.py | 2 +- .../exp/gltf2_blender_gather_joints.py | 2 +- .../exp/gltf2_blender_gather_lights.py | 5 +- .../blender/exp/gltf2_blender_gather_mesh.py | 3 +- .../blender/exp/gltf2_blender_gather_nodes.py | 3 +- ...tf2_blender_gather_primitive_attributes.py | 13 +- .../exp/gltf2_blender_gather_primitives.py | 3 +- ...gltf2_blender_gather_primitives_extract.py | 29 +- .../blender/exp/gltf2_blender_gather_skins.py | 12 +- .../blender/exp/gltf2_blender_gather_tree.py | 65 +---- .../material/gltf2_blender_gather_image.py | 3 +- .../gltf2_blender_gather_materials.py | 8 +- .../material/gltf2_blender_gather_texture.py | 7 +- .../gltf2_blender_gather_texture_info.py | 13 +- .../gltf2_blender_search_node_tree.py | 255 ++++-------------- .../blender/imp/gltf2_blender_mesh.py | 8 +- .../blender/imp/gltf2_blender_node.py | 2 +- .../gltf2_io_draco_compression_extension.py | 13 +- io_scene_gltf2/io/com/gltf2_io.py | 2 +- io_scene_gltf2/io/com/gltf2_io_debug.py | 105 ++++---- .../gltf2_io_draco_compression_extension.py | 9 +- .../gltf2_io_draco_compression_extension.py | 7 +- .../io/exp/gltf2_io_user_extensions.py | 5 +- io_scene_gltf2/io/imp/gltf2_io_gltf.py | 6 +- .../io/imp/gltf2_io_user_extensions.py | 4 +- object_print3d_utils/export.py | 5 - 49 files changed, 329 insertions(+), 987 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index af427f354..a6f85bb97 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -13,13 +13,12 @@ from bpy.props import ( EnumProperty, FloatProperty, StringProperty, - CollectionProperty, ) import bpy bl_info = { "name": "Autodesk 3DS format", "author": "Bob Holcomb, Campbell Barton, Sebastian Schrand", - "version": (2, 5, 0), + "version": (2, 4, 9), "blender": (4, 1, 0), "location": "File > Import-Export", "description": "3DS Import/Export meshes, UVs, materials, textures, " @@ -47,9 +46,6 @@ class Import3DS(bpy.types.Operator, ImportHelper): filename_ext = ".3ds" filter_glob: StringProperty(default="*.3ds", options={'HIDDEN'}) - filepath: StringProperty(subtype='FILE_PATH', options={'SKIP_SAVE'}) - files: CollectionProperty(type=bpy.types.OperatorFileListElement, options={'HIDDEN', 'SKIP_SAVE'}) - directory: StringProperty(subtype='DIR_PATH') constrain_size: FloatProperty( name="Constrain Size", @@ -102,20 +98,15 @@ class Import3DS(bpy.types.Operator, ImportHelper): description="Transform to matrix world", default=False, ) - use_collection: BoolProperty( - name="Collection", - description="Create a new collection", - default=False, - ) use_cursor: BoolProperty( name="Cursor Origin", description="Read the 3D cursor location", default=False, ) - def execute(self, context): from . import import_3ds + keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob", @@ -132,17 +123,6 @@ class Import3DS(bpy.types.Operator, ImportHelper): pass -class MAX3DS_FH_import(bpy.types.FileHandler): - bl_idname = "MAX3DS_FH_import" - bl_label = "File handler for 3ds import" - bl_import_operator = "import_scene.max3ds" - bl_file_extensions = ".3ds;.3DS" - - @classmethod - def poll_drop(cls, context): - return (context.area and context.area.type == 'VIEW_3D') - - class MAX3DS_PT_import_include(bpy.types.Panel): bl_space_type = 'FILE_BROWSER' bl_region_type = 'TOOL_PROPS' @@ -172,9 +152,6 @@ class MAX3DS_PT_import_include(bpy.types.Panel): layrow.prop(operator, "use_keyframes") layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') layrow = layout.row(align=True) - layrow.prop(operator, "use_collection") - layrow.label(text="", icon='OUTLINER_COLLECTION' if operator.use_collection else 'GROUP') - layrow = layout.row(align=True) layrow.prop(operator, "use_cursor") layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR') @@ -258,16 +235,16 @@ class Export3DS(bpy.types.Operator, ExportHelper): description="Object types to export", default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, ) - use_keyframes: BoolProperty( - name="Animation", - description="Write the keyframe data", - default=True, - ) use_hierarchy: BoolProperty( name="Hierarchy", description="Export hierarchy chunks", default=False, ) + use_keyframes: BoolProperty( + name="Animation", + description="Write the keyframe data", + default=False, + ) use_cursor: BoolProperty( name="Cursor Origin", description="Save the 3D cursor location", @@ -319,12 +296,12 @@ class MAX3DS_PT_export_include(bpy.types.Panel): layrow.label(text="", icon='RESTRICT_SELECT_OFF' if operator.use_selection else 'RESTRICT_SELECT_ON') layout.column().prop(operator, "object_filter") layrow = layout.row(align=True) - layrow.prop(operator, "use_keyframes") - layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') - layrow = layout.row(align=True) layrow.prop(operator, "use_hierarchy") layrow.label(text="", icon='OUTLINER' if operator.use_hierarchy else 'CON_CHILDOF') layrow = layout.row(align=True) + layrow.prop(operator, "use_keyframes") + layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') + layrow = layout.row(align=True) layrow.prop(operator, "use_cursor") layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR') @@ -369,7 +346,6 @@ def menu_func_import(self, context): def register(): bpy.utils.register_class(Import3DS) - bpy.utils.register_class(MAX3DS_FH_import) bpy.utils.register_class(MAX3DS_PT_import_include) bpy.utils.register_class(MAX3DS_PT_import_transform) bpy.utils.register_class(Export3DS) @@ -381,7 +357,6 @@ def register(): def unregister(): bpy.utils.unregister_class(Import3DS) - bpy.utils.unregister_class(MAX3DS_FH_import) bpy.utils.unregister_class(MAX3DS_PT_import_include) bpy.utils.unregister_class(MAX3DS_PT_import_transform) bpy.utils.unregister_class(Export3DS) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 67e39b3f4..3ab9f723e 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1563,7 +1563,7 @@ def make_ambient_node(world): ########## def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, use_selection=False, - object_filter=None, use_keyframes=True, use_hierarchy=False, global_matrix=None, use_cursor=False): + object_filter=None, use_hierarchy=False, use_keyframes=False, global_matrix=None, use_cursor=False): """Save the Blender scene to a 3ds file.""" # Time the export diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 70a422932..0142d06cc 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -10,7 +10,6 @@ import struct import mathutils from bpy_extras.image_utils import load_image from bpy_extras.node_shader_utils import PrincipledBSDFWrapper -from pathlib import Path BOUNDS_3DS = [] @@ -1672,12 +1671,10 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, object_dictionary.clear() object_matrix.clear() - """ if APPLY_MATRIX: for ob in imported_objects: if ob.type == 'MESH': ob.data.transform(ob.matrix_local.inverted()) - """ if UNITS: unit_mtx = mathutils.Matrix.Scale(MEASURE,4) @@ -1771,27 +1768,12 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, file.close() -def load(operator, context, files=None, directory="", filepath="", constrain_size=0.0, use_scene_unit=False, +def load(operator, context, filepath="", constrain_size=0.0, use_scene_unit=False, use_image_search=True, object_filter=None, use_world_matrix=False, use_keyframes=True, - use_apply_transform=True, global_matrix=None, use_cursor=False, use_center_pivot=False, use_collection=False): + use_apply_transform=True, global_matrix=None, use_cursor=False, use_center_pivot=False): - # Get the active collection - collection_init = context.view_layer.active_layer_collection.collection - - # Load each selected file - for file in files: - # Create new collections if activated (collection name = 3ds file name) - if use_collection: - collection = bpy.data.collections.new(Path(file.name).stem) - context.scene.collection.children.link(collection) - context.view_layer.active_layer_collection = context.view_layer.layer_collection.children[collection.name] - load_3ds(Path(directory, file.name), context, CONSTRAIN=constrain_size, UNITS=use_scene_unit, + load_3ds(filepath, context, CONSTRAIN=constrain_size, UNITS=use_scene_unit, IMAGE_SEARCH=use_image_search, FILTER=object_filter, WORLD_MATRIX=use_world_matrix, KEYFRAME=use_keyframes, APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix, CURSOR=use_cursor, PIVOT=use_center_pivot,) - # Retrive the initial collection as active - active = context.view_layer.layer_collection.children.get(collection_init.name) - if active is not None: - context.view_layer.active_layer_collection = active - return {'FINISHED'} diff --git a/io_scene_gltf2/__init__.py b/io_scene_gltf2/__init__.py index a53e36f13..7554b05e4 100755 --- a/io_scene_gltf2/__init__.py +++ b/io_scene_gltf2/__init__.py @@ -5,7 +5,7 @@ bl_info = { 'name': 'glTF 2.0 format', 'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors', - "version": (4, 2, 8), + "version": (4, 1, 62), 'blender': (4, 1, 0), 'location': 'File > Import-Export', 'description': 'Import-Export as glTF 2.0', @@ -600,10 +600,6 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): 'Export actions (actives and on NLA tracks) as separate animations'), ('ACTIVE_ACTIONS', 'Active actions merged', 'All the currently assigned actions become one glTF animation'), - ('BROADCAST', 'Broadcast actions', - 'Broadcast all compatible actions to all objects. ' - 'Animated objects will get all actions compatible with them, ' - 'others will get no animation at all'), ('NLA_TRACKS', 'NLA Tracks', 'Export individual NLA Tracks as separate animation'), ('SCENE', 'Scene', @@ -648,15 +644,6 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): default=False ) - export_leaf_bone: BoolProperty( - name='Add Leaf Bones', - description=( - 'Append a final bone to the end of each chain to specify last bone length ' - '(use this when you intend to edit the armature from exported data)' - ), - default=False - ) - export_optimize_animation_size: BoolProperty( name='Optimize Animation Size', description=( @@ -686,15 +673,6 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): default=False ) - export_optimize_armature_disable_viewport: BoolProperty( - name='Disable viewport if possible', - description=( - "When exporting armature, disable viewport for other objects, " - "for performance. Drivers on shape keys for skined meshes prevent this optimization for now" - ), - default=False - ) - export_negative_frame: EnumProperty( name='Negative Frames', items=(('SLIDE', 'Slide', @@ -889,15 +867,6 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): default=False ) - export_extra_animations: BoolProperty( - name='Prepare extra animations', - description=( - 'Export additional animations' - 'This feature is not standard and needs an external extension to be included in the glTF file' - ), - default=False - ) - # Custom scene property for saving settings scene_key = "glTF2ExportSettings" @@ -964,8 +933,6 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): def execute(self, context): import os import datetime - import logging - from .io.com.gltf2_io_debug import Log from .blender.exp import gltf2_blender_export from .io.com.gltf2_io_path import path_to_uri @@ -977,8 +944,6 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): # All custom export settings are stored in this container. export_settings = {} - export_settings['loglevel'] = logging.INFO - export_settings['exported_images'] = {} export_settings['exported_texture_nodes'] = [] export_settings['additional_texture_export'] = [] @@ -1048,7 +1013,6 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): export_settings['gltf_flatten_bones_hierarchy'] = self.export_hierarchy_flatten_bones export_settings['gltf_flatten_obj_hierarchy'] = self.export_hierarchy_flatten_objs export_settings['gltf_armature_object_remove'] = self.export_armature_object_remove - export_settings['gltf_leaf_bone'] = self.export_leaf_bone if self.export_animations: export_settings['gltf_frame_range'] = self.export_frame_range export_settings['gltf_force_sampling'] = self.export_force_sampling @@ -1067,14 +1031,12 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): export_settings['gltf_optimize_animation'] = self.export_optimize_animation_size export_settings['gltf_optimize_animation_keep_armature'] = self.export_optimize_animation_keep_anim_armature export_settings['gltf_optimize_animation_keep_object'] = self.export_optimize_animation_keep_anim_object - export_settings['gltf_optimize_armature_disable_viewport'] = self.export_optimize_armature_disable_viewport export_settings['gltf_export_anim_single_armature'] = self.export_anim_single_armature export_settings['gltf_export_reset_pose_bones'] = self.export_reset_pose_bones export_settings['gltf_export_reset_sk_data'] = self.export_morph_reset_sk_data export_settings['gltf_bake_animation'] = self.export_bake_animation export_settings['gltf_negative_frames'] = self.export_negative_frame export_settings['gltf_anim_slide_to_zero'] = self.export_anim_slide_to_zero - export_settings['gltf_export_extra_animations'] = self.export_extra_animations else: export_settings['gltf_frame_range'] = False export_settings['gltf_force_sampling'] = False @@ -1082,11 +1044,9 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): export_settings['gltf_optimize_animation'] = False export_settings['gltf_optimize_animation_keep_armature'] = False export_settings['gltf_optimize_animation_keep_object'] = False - export_settings['gltf_optimize_armature_disable_viewport'] = False export_settings['gltf_export_anim_single_armature'] = False export_settings['gltf_export_reset_pose_bones'] = False export_settings['gltf_export_reset_sk_data'] = False - export_settings['gltf_export_extra_animations'] = False export_settings['gltf_skins'] = self.export_skins if self.export_skins: export_settings['gltf_all_vertex_influences'] = self.export_all_influences @@ -1168,19 +1128,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base): export_settings['pre_export_callbacks'] = pre_export_callbacks export_settings['post_export_callbacks'] = post_export_callbacks - - # Initialize logging for export - export_settings['log'] = Log(export_settings['loglevel']) - - res = gltf2_blender_export.save(context, export_settings) - - # Display popup log, if any - for message_type, message in export_settings['log'].messages(): - self.report({message_type}, message) - - export_settings['log'].flush() - - return res + return gltf2_blender_export.save(context, export_settings) def draw(self, context): pass # Is needed to get panels available @@ -1708,7 +1656,7 @@ class GLTF_PT_export_animation(bpy.types.Panel): layout.prop(operator, 'export_nla_strips_merged_animation_name') row = layout.row() - row.active = operator.export_force_sampling and operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS', 'BROACAST'] + row.active = operator.export_force_sampling and operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS'] row.prop(operator, 'export_bake_animation') if operator.export_animation_mode == "SCENE": layout.prop(operator, 'export_anim_scene_split_object') @@ -1767,11 +1715,11 @@ class GLTF_PT_export_animation_ranges(bpy.types.Panel): layout.prop(operator, 'export_current_frame') row = layout.row() - row.active = operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS', 'BROADCAST', 'NLA_TRACKS'] + row.active = operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS', 'NLA_TRACKS'] row.prop(operator, 'export_frame_range') layout.prop(operator, 'export_anim_slide_to_zero') row = layout.row() - row.active = operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS', 'BROADCAST', 'NLA_TRACKS'] + row.active = operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS', 'NLA_TRACKS'] layout.prop(operator, 'export_negative_frame') class GLTF_PT_export_animation_armature(bpy.types.Panel): @@ -1851,7 +1799,7 @@ class GLTF_PT_export_animation_sampling(bpy.types.Panel): def draw_header(self, context): sfile = context.space_data operator = sfile.active_operator - self.layout.active = operator.export_animations and operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS', 'BROADCAST'] + self.layout.active = operator.export_animations and operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS'] self.layout.prop(operator, "export_force_sampling", text="") def draw(self, context): @@ -1899,36 +1847,6 @@ class GLTF_PT_export_animation_optimize(bpy.types.Panel): row = layout.row() row.prop(operator, 'export_optimize_animation_keep_anim_object') - row = layout.row() - row.prop(operator, 'export_optimize_armature_disable_viewport') - -class GLTF_PT_export_animation_extra(bpy.types.Panel): - bl_space_type = 'FILE_BROWSER' - bl_region_type = 'TOOL_PROPS' - bl_label = "Extra Animations" - bl_parent_id = "GLTF_PT_export_animation" - bl_options = {'DEFAULT_CLOSED'} - - @classmethod - def poll(cls, context): - sfile = context.space_data - operator = sfile.active_operator - - return operator.bl_idname == "EXPORT_SCENE_OT_gltf" and \ - operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS'] - - def draw(self, context): - layout = self.layout - layout.use_property_split = True - layout.use_property_decorate = False # No animation. - - sfile = context.space_data - operator = sfile.active_operator - - layout.active = operator.export_animations - - layout.prop(operator, 'export_extra_animations') - class GLTF_PT_export_user_extensions(bpy.types.Panel): bl_space_type = 'FILE_BROWSER' @@ -2136,18 +2054,14 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper): gltf_importer.read() gltf_importer.checks() - gltf_importer.log.info("Data are loaded, start creating Blender stuff") + print("Data are loaded, start creating Blender stuff") start_time = time.time() BlenderGlTF.create(gltf_importer) elapsed_s = "{:.2f}s".format(time.time() - start_time) - gltf_importer.log.info("glTF import finished in " + elapsed_s) + print("glTF import finished in " + elapsed_s) - # Display popup log, if any - for message_type, message in gltf_importer.log.messages(): - self.report({message_type}, message) - - gltf_importer.log.flush() + gltf_importer.log.removeHandler(gltf_importer.log_handler) return {'FINISHED'} @@ -2157,16 +2071,16 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper): def set_debug_log(self): import logging - if bpy.app.debug_value == 0: # Default values => Display all messages except debug ones - self.loglevel = logging.INFO - elif bpy.app.debug_value == 1: - self.loglevel = logging.WARNING - elif bpy.app.debug_value == 2: - self.loglevel = logging.ERROR - elif bpy.app.debug_value == 3: + if bpy.app.debug_value == 0: self.loglevel = logging.CRITICAL - elif bpy.app.debug_value == 4: - self.loglevel = logging.DEBUG + elif bpy.app.debug_value == 1: + self.loglevel = logging.ERROR + elif bpy.app.debug_value == 2: + self.loglevel = logging.WARNING + elif bpy.app.debug_value == 3: + self.loglevel = logging.INFO + else: + self.loglevel = logging.NOTSET class GLTF2_filter_action(bpy.types.PropertyGroup): @@ -2262,7 +2176,6 @@ classes = ( GLTF_PT_export_animation_shapekeys, GLTF_PT_export_animation_sampling, GLTF_PT_export_animation_optimize, - GLTF_PT_export_animation_extra, GLTF_PT_export_gltfpack, GLTF_PT_export_user_extensions, ImportGLTF2, diff --git a/io_scene_gltf2/blender/com/gltf2_blender_data_path.py b/io_scene_gltf2/blender/com/gltf2_blender_data_path.py index 2462d24aa..b5a34ef12 100755 --- a/io_scene_gltf2/blender/com/gltf2_blender_data_path.py +++ b/io_scene_gltf2/blender/com/gltf2_blender_data_path.py @@ -5,19 +5,11 @@ def get_target_property_name(data_path: str) -> str: """Retrieve target property.""" - - if data_path.endswith("]"): - return None - else: - return data_path.rsplit('.', 1)[-1] + return data_path.rsplit('.', 1)[-1] def get_target_object_path(data_path: str) -> str: """Retrieve target object data path without property""" - if data_path.endswith("]"): - return data_path.rsplit('[', 1)[0] - elif data_path.startswith("pose.bones["): - return data_path[:data_path.find('"]')] + '"]' path_split = data_path.rsplit('.', 1) self_targeting = len(path_split) < 2 if self_targeting: diff --git a/io_scene_gltf2/blender/com/gltf2_blender_ui.py b/io_scene_gltf2/blender/com/gltf2_blender_ui.py index 03f92e623..25e3312b7 100644 --- a/io_scene_gltf2/blender/com/gltf2_blender_ui.py +++ b/io_scene_gltf2/blender/com/gltf2_blender_ui.py @@ -564,7 +564,7 @@ class SCENE_PT_gltf2_action_filter(bpy.types.Panel): def poll(self, context): sfile = context.space_data operator = sfile.active_operator - return operator.export_animation_mode in ["ACTIONS", "ACTIVE_ACTIONS", "BROADCAST"] + return operator.export_animation_mode in ["ACTIONS", "ACTIVE_ACTIONS"] def draw_header(self, context): sfile = context.space_data diff --git a/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py b/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py index ae5116110..56ecfaea8 100644 --- a/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py +++ b/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_animation.py @@ -16,7 +16,7 @@ def gather_animation_fcurves( name = __gather_name(blender_action, export_settings) - channels, to_be_sampled, extra_samplers = __gather_channels_fcurves(obj_uuid, blender_action, export_settings) + channels, to_be_sampled = __gather_channels_fcurves(obj_uuid, blender_action, export_settings) animation = gltf2_io.Animation( channels=channels, @@ -27,12 +27,12 @@ def gather_animation_fcurves( ) if not animation.channels: - return None, to_be_sampled, extra_samplers + return None, to_be_sampled blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object export_user_extensions('animation_gather_fcurve', export_settings, blender_object, blender_action) - return animation, to_be_sampled, extra_samplers + return animation, to_be_sampled def __gather_name(blender_action: bpy.types.Action, export_settings diff --git a/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py b/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py index af1f9a149..7fa5a2516 100644 --- a/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py +++ b/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_channels.py @@ -6,6 +6,7 @@ import bpy import typing from .....io.exp.gltf2_io_user_extensions import export_user_extensions from .....blender.com.gltf2_blender_data_path import skip_sk +from .....io.com import gltf2_io_debug from .....io.com import gltf2_io from ....exp.gltf2_blender_gather_cache import cached from ....com.gltf2_blender_data_path import get_target_object_path, get_target_property_name, get_rotation_modes @@ -22,40 +23,25 @@ def gather_animation_fcurves_channels( export_settings ): - channels_to_perform, to_be_sampled, extra_channels_to_perform = get_channel_groups(obj_uuid, blender_action, export_settings) + channels_to_perform, to_be_sampled = get_channel_groups(obj_uuid, blender_action, export_settings) custom_range = None if blender_action.use_frame_range: custom_range = (blender_action.frame_start, blender_action.frame_end) channels = [] - extra_samplers = [] - - for chan in [chan for chan in channels_to_perform.values() if len(chan['properties']) != 0]: + for chan in [chan for chan in channels_to_perform.values() if len(chan['properties']) != 0 and chan['type'] != "EXTRA"]: for channel_group in chan['properties'].values(): channel = __gather_animation_fcurve_channel(chan['obj_uuid'], channel_group, chan['bone'], custom_range, export_settings) if channel is not None: channels.append(channel) - if export_settings['gltf_export_extra_animations']: - for chan in [chan for chan in extra_channels_to_perform.values() if len(chan['properties']) != 0]: - for channel_group_name, channel_group in chan['properties'].items(): - - # No glTF channel here, as we don't have any target - # Trying to retrieve sampler directly - sampler = __gather_sampler(obj_uuid, tuple(channel_group), None, custom_range, True, export_settings) - if sampler is not None: - extra_samplers.append((channel_group_name, sampler, "OBJECT", None)) + return channels, to_be_sampled - return channels, to_be_sampled, extra_samplers - - -def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_settings, no_sample_option=False): - # no_sample_option is used when we want to retrieve all SK channels, to be evaluate. +def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_settings): targets = {} - targets_extra = {} blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object @@ -75,33 +61,20 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s # example of target_property : location, rotation_quaternion, value target_property = get_target_property_name(fcurve.data_path) except: - export_settings['log'].warning("Invalid animation fcurve data path on action {}".format(blender_action.name)) + gltf2_io_debug.print_console("WARNING", "Invalid animation fcurve name on action {}".format(blender_action.name)) continue object_path = get_target_object_path(fcurve.data_path) # find the object affected by this action # object_path : blank for blender_object itself, key_blocks[""] for SK, pose.bones[""] for bones if not object_path: - if fcurve.data_path.startswith("["): - target = blender_object - type_ = "EXTRA" - else: - target = blender_object - type_ = "OBJECT" + target = blender_object + type_ = "OBJECT" else: try: target = get_object_from_datapath(blender_object, object_path) - if blender_object.type == "ARMATURE" and fcurve.data_path.startswith("pose.bones["): - if target_property is not None: - if get_target(target_property) is not None: - type_ = "BONE" - else: - type_ = "EXTRA" - else: - type_ = "EXTRA" - - + type_ = "BONE" else: type_ = "EXTRA" if blender_object.type == "MESH" and object_path.startswith("key_blocks"): @@ -122,10 +95,10 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s type_ = "SK" except: # Something is wrong, for example a bone animation is linked to an object mesh... - export_settings['log'].warning("Invalid animation fcurve data path on action {}".format(blender_action.name)) + gltf2_io_debug.print_console("WARNING", "Animation target {} not found".format(object_path)) continue else: - export_settings['log'].warning("Animation target {} not found".format(object_path)) + gltf2_io_debug.print_console("WARNING", "Animation target {} not found".format(object_path)) continue # Detect that object or bone are not multiple keyed for euler and quaternion @@ -135,25 +108,6 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s multiple_rotation_mode_detected[target] = True continue - if type_ == "EXTRA": - # No group by property, because we are going to export fcurve separately - # We are going to evaluate fcurve, so no check if need to be sampled - if target_property is None: - target_property = fcurve.data_path - if not target_property.startswith("pose.bones["): - target_property = fcurve.data_path - target_data = targets_extra.get(target, {}) - target_data['type'] = type_ - target_data['bone'] = target.name - target_data['obj_uuid'] = obj_uuid - target_properties = target_data.get('properties', {}) - channels = target_properties.get(target_property, []) - channels.append(fcurve) - target_properties[target_property] = channels - target_data['properties'] = target_properties - targets_extra[target] = target_data - continue - # group channels by target object and affected property of the target target_data = targets.get(target, {}) target_data['type'] = type_ @@ -168,7 +122,7 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s targets[target] = target_data for targ in multiple_rotation_mode_detected.keys(): - export_settings['log'].warning("Multiple rotation mode detected for {}".format(targ.name)) + gltf2_io_debug.print_console("WARNING", "Multiple rotation mode detected for {}".format(targ.name)) # Now that all curves are extracted, # - check that there is no normal + delta transforms @@ -194,7 +148,7 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s # Check if the property can be exported without sampling new_properties = {} for prop in target_data['properties'].keys(): - if no_sample_option is False and needs_baking(obj_uuid, target_data['properties'][prop], export_settings) is True: + if needs_baking(obj_uuid, target_data['properties'][prop], export_settings) is True: to_be_sampled.append((obj_uuid, target_data['type'], get_channel_from_target(get_target(prop)), target_data['bone'])) # bone can be None if not a bone :) else: new_properties[prop] = target_data['properties'][prop] @@ -211,7 +165,7 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s to_be_sampled = list(set(to_be_sampled)) - return targets, to_be_sampled, targets_extra + return targets, to_be_sampled def __get_channel_group_sorted(channels: typing.Tuple[bpy.types.FCurve], blender_object: bpy.types.Object): @@ -272,7 +226,7 @@ def __gather_animation_fcurve_channel(obj_uuid: str, __target= __gather_target(obj_uuid, channel_group, bone, export_settings) if __target.path is not None: - sampler = __gather_sampler(obj_uuid, channel_group, bone, custom_range, False, export_settings) + sampler = __gather_sampler(obj_uuid, channel_group, bone, custom_range, export_settings) if sampler is None: # After check, no need to animate this node for this channel @@ -307,10 +261,9 @@ def __gather_sampler(obj_uuid: str, channel_group: typing.Tuple[bpy.types.FCurve], bone: typing.Optional[str], custom_range: typing.Optional[set], - extra_mode: bool, export_settings) -> gltf2_io.AnimationSampler: - return gather_animation_fcurves_sampler(obj_uuid, channel_group, bone, custom_range, extra_mode, export_settings) + return gather_animation_fcurves_sampler(obj_uuid, channel_group, bone, custom_range, export_settings) def needs_baking(obj_uuid: str, channels: typing.Tuple[bpy.types.FCurve], @@ -328,24 +281,24 @@ def needs_baking(obj_uuid: str, # Sampling due to unsupported interpolation interpolation = [c for c in channels if c is not None][0].keyframe_points[0].interpolation if interpolation not in ["BEZIER", "LINEAR", "CONSTANT"]: - export_settings['log'].warning( - "Baking animation because of an unsupported interpolation method: {}".format(interpolation) - ) + gltf2_io_debug.print_console("WARNING", + "Baking animation because of an unsupported interpolation method: {}".format( + interpolation) + ) return True if any(any(k.interpolation != interpolation for k in c.keyframe_points) for c in channels if c is not None): # There are different interpolation methods in one action group - export_settings['log'].warning( - "Baking animation because there are keyframes with different " + gltf2_io_debug.print_console("WARNING", + "Baking animation because there are keyframes with different " "interpolation methods in one channel" - ) + ) return True if not all_equal([len(c.keyframe_points) for c in channels if c is not None]): - export_settings['log'].warning( - "Baking animation because the number of keyframes is not " - "equal for all channel tracks" - ) + gltf2_io_debug.print_console("WARNING", + "Baking animation because the number of keyframes is not " + "equal for all channel tracks") return True if len([c for c in channels if c is not None][0].keyframe_points) <= 1: @@ -354,7 +307,8 @@ def needs_baking(obj_uuid: str, if not all_equal(list(zip([[k.co[0] for k in c.keyframe_points] for c in channels if c is not None]))): # The channels have differently located keyframes - export_settings['log'].warning("Baking animation because of differently located keyframes in one channel") + gltf2_io_debug.print_console("WARNING", + "Baking animation because of differently located keyframes in one channel") return True if export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE": @@ -362,7 +316,8 @@ def needs_baking(obj_uuid: str, if isinstance(animation_target, bpy.types.PoseBone): if len(animation_target.constraints) != 0: # Constraints such as IK act on the bone -> can not be represented in glTF atm - export_settings['log'].warning("Baking animation because of unsupported constraints acting on the bone") + gltf2_io_debug.print_console("WARNING", + "Baking animation because of unsupported constraints acting on the bone") return True return False diff --git a/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py b/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py index d236eea16..82ef9835f 100644 --- a/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py +++ b/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_keyframes.py @@ -16,12 +16,11 @@ def gather_fcurve_keyframes( channel_group: typing.Tuple[bpy.types.FCurve], bone: typing.Optional[str], custom_range: typing.Optional[set], - extra_mode: bool, export_settings): keyframes = [] - non_keyed_values = gather_non_keyed_values(obj_uuid, channel_group, bone, extra_mode, export_settings) + non_keyed_values = __gather_non_keyed_values(obj_uuid, channel_group, bone, export_settings) # Just use the keyframes as they are specified in blender # Note: channels has some None items only for SK if some SK are not animated @@ -46,7 +45,7 @@ def gather_fcurve_keyframes( key.value = [c.evaluate(frame) for c in channel_group if c is not None] # Complete key with non keyed values, if needed if len([c for c in channel_group if c is not None]) != key.get_target_len(): - complete_key(key, non_keyed_values) + __complete_key(key, non_keyed_values) # compute tangents for cubic spline interpolation if [c for c in channel_group if c is not None][0].keyframe_points[0].interpolation == "BEZIER": @@ -88,18 +87,13 @@ def gather_fcurve_keyframes( return keyframes -def gather_non_keyed_values( +def __gather_non_keyed_values( obj_uuid: str, channel_group: typing.Tuple[bpy.types.FCurve], bone: typing.Optional[str], - extra_mode: bool, export_settings ) -> typing.Tuple[typing.Optional[float]]: - if extra_mode is True: - # No need to check if there are non non keyed values, as we export fcurve independently - return [None] - blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object non_keyed_values = [] @@ -138,7 +132,7 @@ def gather_non_keyed_values( if i in indices: non_keyed_values.append(None) else: - if bone is None: + if bone is None is None: non_keyed_values.append({ "delta_location" : blender_object.delta_location, "delta_rotation_euler" : blender_object.delta_rotation_euler, @@ -175,16 +169,16 @@ def gather_non_keyed_values( shapekeys_idx[cpt_sk] = sk.name cpt_sk += 1 - for idx_c, channel in enumerate(channel_group): - if channel is None: - non_keyed_values.append(blender_object.data.shape_keys.key_blocks[shapekeys_idx[idx_c]].value) - else: - non_keyed_values.append(None) + for idx_c, channel in enumerate(channel_group): + if channel is None: + non_keyed_values.append(blender_object.data.shape_keys.key_blocks[shapekeys_idx[idx_c]].value) + else: + non_keyed_values.append(None) return tuple(non_keyed_values) -def complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]): +def __complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]): """ Complete keyframe with non keyed values """ diff --git a/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py b/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py index 99d142357..984b5f693 100644 --- a/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py +++ b/io_scene_gltf2/blender/exp/animation/fcurves/gltf2_blender_gather_fcurves_sampler.py @@ -23,7 +23,6 @@ def gather_animation_fcurves_sampler( channel_group: typing.Tuple[bpy.types.FCurve], bone: typing.Optional[str], custom_range: typing.Optional[set], - extra_mode: bool, export_settings ) -> gltf2_io.AnimationSampler: @@ -34,7 +33,6 @@ def gather_animation_fcurves_sampler( channel_group, bone, custom_range, - extra_mode, export_settings) if keyframes is None: @@ -42,7 +40,7 @@ def gather_animation_fcurves_sampler( return None # Now we are raw input/output, we need to convert to glTF data - input, output = __convert_keyframes(obj_uuid, channel_group, bone, keyframes, extra_mode, export_settings) + input, output = __convert_keyframes(obj_uuid, channel_group, bone, keyframes, export_settings) sampler = gltf2_io.AnimationSampler( extensions=None, @@ -64,18 +62,16 @@ def __gather_keyframes( channel_group: typing.Tuple[bpy.types.FCurve], bone: typing.Optional[str], custom_range: typing.Optional[set], - extra_mode: bool, export_settings ): - return gather_fcurve_keyframes(obj_uuid, channel_group, bone, custom_range, extra_mode, export_settings) + return gather_fcurve_keyframes(obj_uuid, channel_group, bone, custom_range, export_settings) def __convert_keyframes( obj_uuid: str, channel_group: typing.Tuple[bpy.types.FCurve], bone_name: typing.Optional[str], keyframes, - extra_mode: bool, export_settings): times = [k.seconds for k in keyframes] @@ -141,17 +137,6 @@ def __convert_keyframes( values = [] fps = (bpy.context.scene.render.fps * bpy.context.scene.render.fps_base) for keyframe in keyframes: - - if extra_mode is True: - # Export as is, without trying to convert - keyframe_value = keyframe.value - if keyframe.in_tangent is not None: - keyframe_value = keyframe.in_tangent + keyframe_value - if keyframe.out_tangent is not None: - keyframe_value = keyframe_value + keyframe.out_tangent - values += keyframe_value - continue - # Transform the data and build gltf control points value = gltf2_blender_math.transform(keyframe.value, target_datapath, transform, need_rotation_correction) if is_yup and bone_name is None: diff --git a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py index 287037d2c..2f370e17c 100644 --- a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py +++ b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_action.py @@ -5,6 +5,7 @@ import bpy import typing from ....io.com import gltf2_io +from ....io.com.gltf2_io_debug import print_console from ....io.exp.gltf2_io_user_extensions import export_user_extensions from ....blender.com.gltf2_blender_conversion import get_gltf_interpolation from ...com.gltf2_blender_data_path import is_bone_anim_channel @@ -260,25 +261,6 @@ def gather_action_animations( obj_uuid: int, current_use_nla = blender_object.animation_data.use_nla blender_object.animation_data.use_nla = False - # Try to disable all except armature in viewport, for performance - if export_settings['gltf_optimize_armature_disable_viewport'] \ - and export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE": - - # If the skinned mesh has driver(s), we can't disable it to bake armature. - need_to_enable_again = False - sk_drivers = get_sk_drivers(obj_uuid, export_settings) - if len(sk_drivers) == 0: - need_to_enable_again = True - # Before baking, disabling from viewport all meshes - for obj in [n.blender_object for n in export_settings['vtree'].nodes.values() if n.blender_type in - [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]: - obj.hide_viewport = True - export_settings['vtree'].nodes[obj_uuid].blender_object.hide_viewport = False - else: - export_settings['log'].warning("Can't disable viewport because of drivers") - export_settings['gltf_optimize_armature_disable_viewport'] = False # We changed the option here, so we don't need to re-check it later, during - - export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, False) ######## Export @@ -299,7 +281,7 @@ def gather_action_animations( obj_uuid: int, export_user_extensions('post_animation_switch_hook', export_settings, blender_object, blender_action, track_name, on_type) except: error = "Action is readonly. Please check NLA editor" - export_settings['log'].warning("Animation '{}' could not be exported. Cause: {}".format(blender_action.name, error)) + print_console("WARNING", "Animation '{}' could not be exported. Cause: {}".format(blender_action.name, error)) continue if on_type == "SHAPEKEY": @@ -314,9 +296,9 @@ def gather_action_animations( obj_uuid: int, if export_settings['gltf_force_sampling'] is True: if export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE": - animation, extra_samplers = gather_action_armature_sampled(obj_uuid, blender_action, None, export_settings) + animation = gather_action_armature_sampled(obj_uuid, blender_action, None, export_settings) elif on_type == "OBJECT": - animation, extra_samplers = gather_action_object_sampled(obj_uuid, blender_action, None, export_settings) + animation = gather_action_object_sampled(obj_uuid, blender_action, None, export_settings) else: animation = gather_action_sk_sampled(obj_uuid, blender_action, None, export_settings) else: @@ -325,7 +307,7 @@ def gather_action_animations( obj_uuid: int, # - animation on fcurves # - fcurve that cannot be handled not sampled, to be sampled # to_be_sampled is : (object_uuid , type , prop, optional(bone.name) ) - animation, to_be_sampled, extra_samplers = gather_animation_fcurves(obj_uuid, blender_action, export_settings) + animation, to_be_sampled = gather_animation_fcurves(obj_uuid, blender_action, export_settings) for (obj_uuid, type_, prop, bone) in to_be_sampled: if type_ == "BONE": channel = gather_sampled_bone_channel(obj_uuid, bone, prop, blender_action.name, True, get_gltf_interpolation("LINEAR"), export_settings) @@ -336,7 +318,7 @@ def gather_action_animations( obj_uuid: int, elif type_ == "EXTRA": channel = None else: - export_settings['log'].error("Type unknown. Should not happen") + print("Type unknown. Should not happen") if animation is None and channel is not None: # If all channels need to be sampled, no animation was created @@ -352,11 +334,6 @@ def gather_action_animations( obj_uuid: int, if channel is not None: animation.channels.append(channel) - # Add extra samplers - # Because this is not core glTF specification, you can add extra samplers using hook - if export_settings['gltf_export_extra_animations'] and len(extra_samplers) != 0: - export_user_extensions('extra_animation_manage', export_settings, extra_samplers, obj_uuid, blender_object, blender_action, animation) - # If we are in a SK animation, and we need to bake (if there also in TRS anim) if len([a for a in blender_actions if a[2] == "OBJECT"]) == 0 and on_type == "SHAPEKEY": if export_settings['gltf_bake_animation'] is True and export_settings['gltf_force_sampling'] is True: @@ -366,7 +343,7 @@ def gather_action_animations( obj_uuid: int, if obj_uuid not in export_settings['ranges'].keys(): export_settings['ranges'][obj_uuid] = {} export_settings['ranges'][obj_uuid][obj_uuid] = export_settings['ranges'][obj_uuid][blender_action.name] - channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) + channels = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) if channels is not None: if animation is None: animation = gltf2_io.Animation( @@ -454,15 +431,6 @@ def gather_action_animations( obj_uuid: int, if blender_object and current_world_matrix is not None: blender_object.matrix_world = current_world_matrix - if export_settings['gltf_optimize_armature_disable_viewport'] \ - and export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE": - if need_to_enable_again is True: - # And now, restoring meshes in viewport - for node, obj in [(n, n.blender_object) for n in export_settings['vtree'].nodes.values() if n.blender_type in - [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]: - obj.hide_viewport = node.default_hide_viewport - export_settings['vtree'].nodes[obj_uuid].blender_object.hide_viewport = export_settings['vtree'].nodes[obj_uuid].default_hide_viewport - export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, True) return animations, tracks @@ -479,9 +447,6 @@ def __get_blender_actions(obj_uuid: str, export_user_extensions('pre_gather_actions_hook', export_settings, blender_object) - if export_settings['gltf_animation_mode'] == "BROADCAST": - return __get_blender_actions_broadcast(obj_uuid, export_settings) - if blender_object and blender_object.animation_data is not None: # Collect active action. if blender_object.animation_data.action is not None: @@ -502,7 +467,6 @@ def __get_blender_actions(obj_uuid: str, # so skip them for now and only write single-strip tracks. non_muted_strips = [strip for strip in track.strips if strip.action is not None and strip.mute is False] if track.strips is None or len(non_muted_strips) != 1: - export_settings['log'].warning("NLA track '{}' has {} strips, but only single-strip tracks are supported in 'actions' mode.".format(track.name, len(track.strips)), popup=True) continue for strip in non_muted_strips: @@ -607,72 +571,3 @@ def __gather_extras(blender_action, export_settings): if export_settings['gltf_extras']: return generate_extras(blender_action) return None - -def __get_blender_actions_broadcast(obj_uuid, export_settings): - blender_actions = [] - blender_tracks = {} - action_on_type = {} - - blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object - - # Note : Like in FBX exporter: - # - Object with animation data will get all actions - # - Object without animation will not get any action - - # Collect all actions - for blender_action in bpy.data.actions: - if hasattr(bpy.data.scenes[0], "gltf_action_filter") \ - and id(blender_action) in [id(item.action) for item in bpy.data.scenes[0].gltf_action_filter if item.keep is False]: - continue # We ignore this action - - # Keep all actions on objects (no Shapekey animation, No armature animation (on bones)) - if blender_action.id_root == "OBJECT": #TRS and Bone animations - if blender_object.animation_data is None: - continue - if blender_object and blender_object.type == "ARMATURE" and __is_armature_action(blender_action): - blender_actions.append(blender_action) - blender_tracks[blender_action.name] = None - action_on_type[blender_action.name] = "OBJECT" - elif blender_object.type == "MESH": - if not __is_armature_action(blender_action): - blender_actions.append(blender_action) - blender_tracks[blender_action.name] = None - action_on_type[blender_action.name] = "OBJECT" - elif blender_action.id_root == "KEY": - if blender_object.type != "MESH" or blender_object.data is None or blender_object.data.shape_keys is None or blender_object.data.shape_keys.animation_data is None: - continue - # Checking that the object has some SK and some animation on it - if blender_object is None: - continue - if blender_object.type != "MESH": - continue - if blender_object.data is None or blender_object.data.shape_keys is None: - continue - blender_actions.append(blender_action) - blender_tracks[blender_action.name] = None - action_on_type[blender_action.name] = "SHAPEKEY" - - - # Use a class to get parameters, to be able to modify them - class GatherActionHookParameters: - def __init__(self, blender_actions, blender_tracks, action_on_type): - self.blender_actions = blender_actions - self.blender_tracks = blender_tracks - self.action_on_type = action_on_type - - gatheractionhookparams = GatherActionHookParameters(blender_actions, blender_tracks, action_on_type) - - export_user_extensions('gather_actions_hook', export_settings, blender_object, gatheractionhookparams) - - # Get params back from hooks - blender_actions = gatheractionhookparams.blender_actions - blender_tracks = gatheractionhookparams.blender_tracks - action_on_type = gatheractionhookparams.action_on_type - - # Remove duplicate actions. - blender_actions = list(set(blender_actions)) - # sort animations alphabetically (case insensitive) so they have a defined order and match Blender's Action list - blender_actions.sort(key = lambda a: a.name.lower()) - - return [(blender_action, blender_tracks[blender_action.name], action_on_type[blender_action.name]) for blender_action in blender_actions] - diff --git a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py index e6d9884c1..a6c0e6509 100644 --- a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py +++ b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animation_utils.py @@ -8,6 +8,7 @@ from mathutils import Matrix from ....blender.com.gltf2_blender_data_path import get_sk_exported from ....io.com import gltf2_io from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ....io.com.gltf2_io_debug import print_console from ..gltf2_blender_gather_tree import VExportNode from .sampled.armature.armature_action_sampled import gather_action_armature_sampled from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled @@ -129,7 +130,7 @@ def merge_tracks_perform(merged_tracks, animations, export_settings): for channel in animations[anim_idx].channels: if (channel.target.node, channel.target.path) in already_animated: - export_settings['log'].warning("Some strips have same channel animation ({}), on node {} !".format(channel.target.path, channel.target.node.name)) + print_console("WARNING", "Some strips have same channel animation ({}), on node {} !".format(channel.target.path, channel.target.node.name)) continue animations[base_animation_idx].channels.append(channel) animations[base_animation_idx].channels[-1].sampler = animations[base_animation_idx].channels[-1].sampler + offset_sampler @@ -161,9 +162,6 @@ def merge_tracks_perform(merged_tracks, animations, export_settings): def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None): - # Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct - # Access to fcurve and action data - # if there is no animation in file => no need to bake if len(bpy.data.actions) == 0: return None @@ -182,7 +180,8 @@ def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None # (skinned meshes TRS must be ignored, says glTF specification) if export_settings['vtree'].nodes[obj_uuid].skin is None: if mode is None or mode == "OBJECT": - animation, _ = gather_action_object_sampled(obj_uuid, None, animation_key, export_settings) + animation = gather_action_object_sampled(obj_uuid, None, animation_key, export_settings) + # Need to bake sk only if not linked to a driver sk by parent armature # In case of NLA track export, no baking of SK @@ -228,7 +227,7 @@ def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None # We need to bake all bones. Because some bone can have some constraints linking to # some other armature bones, for example - animation, _ = gather_action_armature_sampled(obj_uuid, None, animation_key, export_settings) + animation = gather_action_armature_sampled(obj_uuid, None, animation_key, export_settings) link_samplers(animation, export_settings) if animation is not None: return animation diff --git a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py index cb5a70f7f..405cc6e3b 100644 --- a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py +++ b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_animations.py @@ -14,7 +14,7 @@ def gather_animations(export_settings): export_settings['ranges'] = {} export_settings['slide'] = {} - if export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + if export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: return gather_actions_animations(export_settings) elif export_settings['gltf_animation_mode'] == "SCENE": return gather_scene_animations(export_settings) diff --git a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py index d1e1f8adc..c08dfa247 100644 --- a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py +++ b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_keyframes.py @@ -15,21 +15,12 @@ class Keyframe: self.__length_morph = 0 # Note: channels has some None items only for SK if some SK are not animated if bake_channel is None: - if not all([c == None for c in channels]): - self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1] - if self.target != "value": - self.__indices = [c.array_index for c in channels] - else: - self.__indices = [i for i, c in enumerate(channels) if c is not None] - self.__length_morph = len(channels) + self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1] + if self.target != "value": + self.__indices = [c.array_index for c in channels] else: - # If all channels are None (baking evaluate SK case) - self.target = "value" - self.__indices = [] + self.__indices = [i for i, c in enumerate(channels) if c is not None] self.__length_morph = len(channels) - for i in range(self.get_target_len()): - self.__indices.append(i) - else: if bake_channel == "value": self.__length_morph = len(channels) @@ -56,7 +47,10 @@ class Keyframe: "rotation_quaternion": 4, "scale": 3, "value": self.__length_morph - }.get(self.target, 1) + }.get(self.target) + + if length is None: + raise RuntimeError("Animations with target type '{}' are not supported.".format(self.target)) return length diff --git a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py index 8afc79cb6..359f379a4 100644 --- a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py +++ b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_scene_animation.py @@ -68,7 +68,7 @@ def gather_scene_animations(export_settings): if blender_object and blender_object.type != "ARMATURE": # We have to check if this is a skinned mesh, because we don't have to force animation baking on this case if export_settings['vtree'].nodes[obj_uuid].skin is None: - channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) + channels = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) if channels is not None: total_channels.extend(channels) if export_settings['gltf_morph_anim'] and blender_object.type == "MESH" \ @@ -90,11 +90,11 @@ def gather_scene_animations(export_settings): elif blender_object is None: # This is GN instances # Currently, not checking if this instance is skinned.... #TODO - channels, _ = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) + channels = gather_object_sampled_channels(obj_uuid, obj_uuid, export_settings) if channels is not None: total_channels.extend(channels) else: - channels, _ = gather_armature_sampled_channels(obj_uuid, obj_uuid, export_settings) + channels = gather_armature_sampled_channels(obj_uuid, obj_uuid, export_settings) if channels is not None: total_channels.extend(channels) diff --git a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py index 08917a403..58e59faa5 100644 --- a/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py +++ b/io_scene_gltf2/blender/exp/animation/gltf2_blender_gather_tracks.py @@ -47,9 +47,6 @@ def gather_track_animations( obj_uuid: int, animations = [] - # Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct - # Access to fcurve and action data - blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object # Collect all tracks affecting this object. blender_tracks = __get_blender_tracks(obj_uuid, export_settings) diff --git a/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py b/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py index f35b2ce52..34fc43f14 100644 --- a/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py +++ b/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_action_sampled.py @@ -5,9 +5,9 @@ import bpy import typing from ......io.exp.gltf2_io_user_extensions import export_user_extensions +from ......io.com.gltf2_io_debug import print_console from ......io.com import gltf2_io from .....com.gltf2_blender_extras import generate_extras -from ...fcurves.gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler from .armature_channels import gather_armature_sampled_channels @@ -22,42 +22,28 @@ def gather_action_armature_sampled(armature_uuid: str, blender_action: typing.Op name = __gather_name(blender_action, armature_uuid, cache_key, export_settings) try: - channels, extra_channels = __gather_channels(armature_uuid, blender_action.name if blender_action else cache_key, export_settings) animation = gltf2_io.Animation( - channels=channels, + channels=__gather_channels(armature_uuid, blender_action.name if blender_action else cache_key, export_settings), extensions=None, extras=__gather_extras(blender_action, export_settings), name=name, samplers=[] # We need to gather the samplers after gathering all channels --> populate this list in __link_samplers ) except RuntimeError as error: - export_settings['log'].warning("Animation '{}' could not be exported. Cause: {}".format(name, error)) + print_console("WARNING", "Animation '{}' could not be exported. Cause: {}".format(name, error)) return None export_user_extensions('pre_gather_animation_hook', export_settings, animation, blender_action, blender_object) - - extra_samplers = [] - if export_settings['gltf_export_extra_animations']: - for chan in [chan for chan in extra_channels.values() if len(chan['properties']) != 0]: - for channel_group_name, channel_group in chan['properties'].items(): - - # No glTF channel here, as we don't have any target - # Trying to retrieve sampler directly - sampler = gather_animation_fcurves_sampler(armature_uuid, tuple(channel_group), None, None, True, export_settings) - if sampler is not None: - extra_samplers.append((channel_group_name, sampler)) - - if not animation.channels: - return None, extra_samplers + return None # To allow reuse of samplers in one animation : This will be done later, when we know all channels are here export_user_extensions('gather_animation_hook', export_settings, animation, blender_action, blender_object) # For compatibility for older version export_user_extensions('animation_action_armature_sampled', export_settings, animation, blender_object, blender_action, cache_key) - return animation, extra_samplers + return animation def __gather_name(blender_action: bpy.types.Action, armature_uuid: str, diff --git a/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py b/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py index 3853215fd..c0334ecd0 100644 --- a/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py +++ b/io_scene_gltf2/blender/exp/animation/sampled/armature/armature_channels.py @@ -18,18 +18,17 @@ from .armature_sampler import gather_bone_sampled_animation_sampler def gather_armature_sampled_channels(armature_uuid, blender_action_name, export_settings) -> typing.List[gltf2_io.AnimationChannel]: channels = [] - extra_channels = {} # Then bake all bones bones_to_be_animated = [] bones_uuid = export_settings["vtree"].get_all_bones(armature_uuid) - bones_to_be_animated = [export_settings["vtree"].nodes[b].blender_bone.name for b in bones_uuid if export_settings["vtree"].nodes[b].leaf_reference is None] + bones_to_be_animated = [export_settings["vtree"].nodes[b].blender_bone.name for b in bones_uuid] # List of really animated bones is needed for optimization decision list_of_animated_bone_channels = {} if armature_uuid != blender_action_name and blender_action_name in bpy.data.actions: # Not bake situation - channels_animated, to_be_sampled, extra_channels = get_channel_groups(armature_uuid, bpy.data.actions[blender_action_name], export_settings) + channels_animated, to_be_sampled = get_channel_groups(armature_uuid, bpy.data.actions[blender_action_name], export_settings) for chan in [chan for chan in channels_animated.values() if chan['bone'] is not None]: for prop in chan['properties'].keys(): list_of_animated_bone_channels[ @@ -89,7 +88,7 @@ def gather_armature_sampled_channels(armature_uuid, blender_action_name, export_ if channel is not None: channels.append(channel) - return channels, extra_channels + return channels def gather_sampled_bone_channel( armature_uuid: str, @@ -153,7 +152,7 @@ def __gather_sampler(armature_uuid, bone, channel, action_name, node_channel_is_ def __gather_armature_object_channel(obj_uuid: str, blender_action, export_settings): channels = [] - channels_animated, to_be_sampled, extra_channels = get_channel_groups(obj_uuid, blender_action, export_settings) + channels_animated, to_be_sampled = get_channel_groups(obj_uuid, blender_action, export_settings) # Remove all channel linked to bones, keep only directly object channels channels_animated = [c for c in channels_animated.values() if c['type'] == "OBJECT"] to_be_sampled = [c for c in to_be_sampled if c[1] == "OBJECT"] diff --git a/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py b/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py index 4c823ead5..1cfe15be4 100644 --- a/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py +++ b/io_scene_gltf2/blender/exp/animation/sampled/gltf2_blender_gather_animation_sampling_cache.py @@ -35,27 +35,6 @@ def get_cache_data(path: str, if export_settings['gltf_animation_mode'] in "NLA_TRACKS": obj_uuids = [blender_obj_uuid] - # If there is only 1 object to cache, we can disable viewport for other objects (for performance) - # This can be on these cases: - # - TRACK mode - # - Only one object to cache (but here, no really useful for performance) - # - Action mode, where some object have multiple actions - # - For this case, on first call, we will cache active action for all objects - # - On next calls, we will cache only the action of current object, so we can disable viewport for others - # For armature : We already checked that we can disable viewport (in case of drivers, this is currently not possible) - - need_to_enable_again = False - if export_settings['gltf_optimize_armature_disable_viewport'] is True and len(obj_uuids) == 1: - need_to_enable_again = True - # Before baking, disabling from viewport all meshes - for obj in [n.blender_object for n in export_settings['vtree'].nodes.values() if n.blender_type in - [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]: - if obj is None: - continue - obj.hide_viewport = True - export_settings['vtree'].nodes[obj_uuids[0]].blender_object.hide_viewport = False - - depsgraph = bpy.context.evaluated_depsgraph_get() frame = min_ @@ -115,7 +94,7 @@ def get_cache_data(path: str, if export_settings['vtree'].nodes[obj_uuid].blender_type != VExportNode.COLLECTION: if blender_obj and blender_obj.animation_data and blender_obj.animation_data.action \ - and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: if blender_obj.animation_data.action.name not in data[obj_uuid].keys(): data[obj_uuid][blender_obj.animation_data.action.name] = {} data[obj_uuid][blender_obj.animation_data.action.name]['matrix'] = {} @@ -146,7 +125,7 @@ def get_cache_data(path: str, if blender_obj and blender_obj.type == "ARMATURE": bones = export_settings['vtree'].get_all_bones(obj_uuid) if blender_obj.animation_data and blender_obj.animation_data.action \ - and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: if 'bone' not in data[obj_uuid][blender_obj.animation_data.action.name].keys(): data[obj_uuid][blender_obj.animation_data.action.name]['bone'] = {} elif blender_obj.animation_data \ @@ -157,7 +136,7 @@ def get_cache_data(path: str, if 'bone' not in data[obj_uuid][obj_uuid].keys(): data[obj_uuid][obj_uuid]['bone'] = {} - for bone_uuid in [bone for bone in bones if export_settings['vtree'].nodes[bone].leaf_reference is None]: + for bone_uuid in bones: blender_bone = export_settings['vtree'].nodes[bone_uuid].blender_bone if export_settings['vtree'].nodes[bone_uuid].parent_uuid is not None and export_settings['vtree'].nodes[export_settings['vtree'].nodes[bone_uuid].parent_uuid].blender_type == VExportNode.BONE: @@ -176,7 +155,7 @@ def get_cache_data(path: str, matrix = matrix @ blender_obj.matrix_world if blender_obj.animation_data and blender_obj.animation_data.action \ - and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: if blender_bone.name not in data[obj_uuid][blender_obj.animation_data.action.name]['bone'].keys(): data[obj_uuid][blender_obj.animation_data.action.name]['bone'][blender_bone.name] = {} data[obj_uuid][blender_obj.animation_data.action.name]['bone'][blender_bone.name][frame] = matrix @@ -208,7 +187,7 @@ def get_cache_data(path: str, and blender_obj.data.shape_keys is not None \ and blender_obj.data.shape_keys.animation_data is not None \ and blender_obj.data.shape_keys.animation_data.action is not None \ - and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: if blender_obj.data.shape_keys.animation_data.action.name not in data[obj_uuid].keys(): data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name] = {} @@ -254,7 +233,7 @@ def get_cache_data(path: str, if dr_obj not in data.keys(): data[dr_obj] = {} if blender_obj.animation_data and blender_obj.animation_data.action \ - and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS", "BROADCAST"]: + and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]: if obj_uuid + "_" + blender_obj.animation_data.action.name not in data[dr_obj]: # Using uuid of armature + armature animation name as animation name data[dr_obj][obj_uuid + "_" + blender_obj.animation_data.action.name] = {} data[dr_obj][obj_uuid + "_" + blender_obj.animation_data.action.name]['sk'] = {} @@ -275,14 +254,6 @@ def get_cache_data(path: str, data[dr_obj][obj_uuid + "_" + obj_uuid]['sk'][None][frame] = [k.value for k in get_sk_exported(driver_object.data.shape_keys.key_blocks)] frame += step - - # And now, restoring meshes in viewport - for node, obj in [(n, n.blender_object) for n in export_settings['vtree'].nodes.values() if n.blender_type in - [VExportNode.OBJECT, VExportNode.ARMATURE, VExportNode.COLLECTION]]: - obj.hide_viewport = node.default_hide_viewport - export_settings['vtree'].nodes[obj_uuids[0]].blender_object.hide_viewport = export_settings['vtree'].nodes[obj_uuids[0]].default_hide_viewport - - return data # For perf, we may be more precise, and get a list of ranges to be exported that include all needed frames diff --git a/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py b/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py index 134d8f6bd..20db20be0 100644 --- a/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py +++ b/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_action_sampled.py @@ -7,45 +7,29 @@ import typing from ......io.com import gltf2_io from ......io.exp.gltf2_io_user_extensions import export_user_extensions from .....com.gltf2_blender_extras import generate_extras -from ...fcurves.gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler from .gltf2_blender_gather_object_channels import gather_object_sampled_channels def gather_action_object_sampled(object_uuid: str, blender_action: typing.Optional[bpy.types.Action], cache_key: str, export_settings): - extra_samplers = [] - # If no animation in file, no need to bake if len(bpy.data.actions) == 0: - return None, extra_samplers + return None - channels, extra_channels = __gather_channels(object_uuid, blender_action.name if blender_action else cache_key, export_settings) animation = gltf2_io.Animation( - channels=channels, + channels=__gather_channels(object_uuid, blender_action.name if blender_action else cache_key, export_settings), extensions=None, extras=__gather_extras(blender_action, export_settings), name=__gather_name(object_uuid, blender_action, cache_key, export_settings), samplers=[] ) - if export_settings['gltf_export_extra_animations']: - for chan in [chan for chan in extra_channels.values() if len(chan['properties']) != 0]: - for channel_group_name, channel_group in chan['properties'].items(): - - # No glTF channel here, as we don't have any target - # Trying to retrieve sampler directly - sampler = gather_animation_fcurves_sampler(object_uuid, tuple(channel_group), None, None, True, export_settings) - if sampler is not None: - extra_samplers.append((channel_group_name, sampler, "OBJECT", None)) - - - if not animation.channels: - return None, extra_samplers + return None blender_object = export_settings['vtree'].nodes[object_uuid].blender_object export_user_extensions('animation_action_object_sampled', export_settings, animation, blender_object, blender_action, cache_key) - return animation, extra_samplers + return animation def __gather_name(object_uuid: str, blender_action: typing.Optional[bpy.types.Action], cache_key: str, export_settings): if blender_action: diff --git a/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py b/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py index 299ed1db3..b7c476ae0 100644 --- a/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py +++ b/io_scene_gltf2/blender/exp/animation/sampled/object/gltf2_blender_gather_object_channels.py @@ -15,15 +15,11 @@ from .gltf2_blender_gather_object_channel_target import gather_object_sampled_ch def gather_object_sampled_channels(object_uuid: str, blender_action_name: str, export_settings) -> typing.List[gltf2_io.AnimationChannel]: channels = [] - extra_channels = {} - - # Bake situation does not export any extra animation channels, as we bake TRS + weights on Track or scene level, without direct - # Access to fcurve and action data list_of_animated_channels = {} if object_uuid != blender_action_name and blender_action_name in bpy.data.actions: # Not bake situation - channels_animated, to_be_sampled, extra_channels = get_channel_groups(object_uuid, bpy.data.actions[blender_action_name], export_settings) + channels_animated, to_be_sampled = get_channel_groups(object_uuid, bpy.data.actions[blender_action_name], export_settings) for chan in [chan for chan in channels_animated.values() if chan['bone'] is None]: for prop in chan['properties'].keys(): list_of_animated_channels[ @@ -49,7 +45,7 @@ def gather_object_sampled_channels(object_uuid: str, blender_action_name: str, e export_user_extensions('animation_gather_object_channel', export_settings, blender_object, blender_action_name) - return channels if len(channels) > 0 else None, extra_channels + return channels if len(channels) > 0 else None @cached def gather_sampled_object_channel( diff --git a/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py b/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py index 5a8ae7693..303a1fc21 100644 --- a/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py +++ b/io_scene_gltf2/blender/exp/animation/sampled/shapekeys/gltf2_blender_gather_sk_keyframes.py @@ -2,14 +2,10 @@ # # SPDX-License-Identifier: Apache-2.0 -import bpy -import typing import numpy as np from ......blender.com.gltf2_blender_data_path import get_sk_exported from ....gltf2_blender_gather_cache import cached from ...gltf2_blender_gather_keyframes import Keyframe -from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups -from ...fcurves.gltf2_blender_gather_fcurves_keyframes import gather_non_keyed_values from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data @@ -26,54 +22,20 @@ def gather_sk_sampled_keyframes(obj_uuid, frame = start_frame step = export_settings['gltf_frame_step'] blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object + while frame <= end_frame: + key = Keyframe([None] * (len(get_sk_exported(blender_obj.data.shape_keys.key_blocks))), frame, 'value') + key.value_total = get_cache_data( + 'sk', + obj_uuid, + None, + action_name, + frame, + step, + export_settings + ) - - if export_settings['gltf_optimize_armature_disable_viewport'] is True: - # Using this option, we miss the drivers :( - # No solution exists for now. In the future, we should be able to copy a driver - if action_name in bpy.data.actions: - channel_group, _ = get_channel_groups(obj_uuid, bpy.data.actions[action_name], export_settings, no_sample_option=True) - elif blender_obj.data.shape_keys.animation_data and blender_obj.data.shape_keys.animation_data.action: - channel_group, _ = get_channel_groups(obj_uuid, blender_obj.data.shape_keys.animation_data.action, export_settings, no_sample_option=True) - else: - channel_group = {} - channels = [None] * len(get_sk_exported(blender_obj.data.shape_keys.key_blocks)) - - # One day, if we will be able to bake drivers or evaluate it the right way, we can add here the driver fcurves - - for chan in channel_group.values(): - channels = chan['properties']['value'] - break - - non_keyed_values = gather_non_keyed_values(obj_uuid, channels, None, export_settings) - - while frame <= end_frame: - key = Keyframe(channels, frame, None) - key.value = [c.evaluate(frame) for c in channels if c is not None] - # Complete key with non keyed values, if needed - if len([c for c in channels if c is not None]) != key.get_target_len(): - complete_key(key, non_keyed_values) - - keyframes.append(key) - frame += step - - else: - # Full bake, we will go frame by frame. This can take time (more than using evaluate) - - while frame <= end_frame: - key = Keyframe([None] * (len(get_sk_exported(blender_obj.data.shape_keys.key_blocks))), frame, 'value') - key.value_total = get_cache_data( - 'sk', - obj_uuid, - None, - action_name, - frame, - step, - export_settings - ) - - keyframes.append(key) - frame += step + keyframes.append(key) + frame += step if len(keyframes) == 0: # For example, option CROP negative frames, but all are negatives @@ -92,13 +54,3 @@ def gather_sk_sampled_keyframes(obj_uuid, def fcurve_is_constant(keyframes): return all([j < 0.0001 for j in np.ptp([[k.value[i] for i in range(len(keyframes[0].value))] for k in keyframes], axis=0)]) - -#TODO de-duplicate, but import issue??? -def complete_key(key: Keyframe, non_keyed_values: typing.Tuple[typing.Optional[float]]): - """ - Complete keyframe with non keyed values - """ - for i in range(0, key.get_target_len()): - if i in key.get_indices(): - continue # this is a keyed array_index or a SK animated - key.set_value_index(i, non_keyed_values[i]) diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_export.py b/io_scene_gltf2/blender/exp/gltf2_blender_export.py index 5094e0d1c..2408b25fd 100755 --- a/io_scene_gltf2/blender/exp/gltf2_blender_export.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_export.py @@ -10,6 +10,7 @@ import bpy import sys import traceback +from ...io.com.gltf2_io_debug import print_console, print_newline from ...io.exp import gltf2_io_export from ...io.exp import gltf2_io_draco_compression_extension from ...io.exp.gltf2_io_user_extensions import export_user_extensions @@ -29,7 +30,7 @@ def save(context, export_settings): if not export_settings['gltf_current_frame']: bpy.context.scene.frame_set(0) - __notify_start(context, export_settings) + __notify_start(context) start_time = time.time() pre_export_callbacks = export_settings["pre_export_callbacks"] for callback in pre_export_callbacks: @@ -43,11 +44,10 @@ def save(context, export_settings): __write_file(json, buffer, export_settings) end_time = time.time() - __notify_end(context, end_time - start_time, export_settings) + __notify_end(context, end_time - start_time) if not export_settings['gltf_current_frame']: bpy.context.scene.frame_set(int(original_frame)) - return {'FINISHED'} @@ -178,7 +178,7 @@ def __postprocess_with_gltfpack(export_settings): try: subprocess.run([gltfpack_binary_file_path] + options + parameters, check=True) except subprocess.CalledProcessError as e: - export_settings['log'].error("Calling gltfpack was not successful") + print_console('ERROR', "Calling gltfpack was not successful") def __write_file(json, buffer, export_settings): try: @@ -196,18 +196,18 @@ def __write_file(json, buffer, export_settings): tb_info = traceback.extract_tb(tb) for tbi in tb_info: filename, line, func, text = tbi - export_settings['log'].error('An error occurred on line {} in statement {}'.format(line, text)) - export_settings['log'].error(str(e)) + print_console('ERROR', 'An error occurred on line {} in statement {}'.format(line, text)) + print_console('ERROR', str(e)) raise e -def __notify_start(context, export_settings): - export_settings['log'].info('Starting glTF 2.0 export') +def __notify_start(context): + print_console('INFO', 'Starting glTF 2.0 export') context.window_manager.progress_begin(0, 100) context.window_manager.progress_update(0) -def __notify_end(context, elapsed, export_settings): - export_settings['log'].info('Finished glTF 2.0 export in {} s'.format(elapsed)) +def __notify_end(context, elapsed): + print_console('INFO', 'Finished glTF 2.0 export in {} s'.format(elapsed)) context.window_manager.progress_end() - print() + print_newline() diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py index a862644ed..961de6bae 100755 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_cache.py @@ -100,7 +100,7 @@ def datacache(func): # Here are the key used: result[obj_uuid][action_name][path][bone][frame] return result[cache_key_args[1]][cache_key_args[3]][cache_key_args[0]][cache_key_args[2]][cache_key_args[4]] # object is in cache, but not this action - # We need to not erase other actions of this object + # We need to keep other actions elif cache_key_args[3] not in func.__cache[cache_key_args[1]].keys(): result = func(*args, only_gather_provided=True) # The result can contains multiples animations, in case this is an armature with drivers diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py index a0e305c74..4a09a5fb2 100755 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_joints.py @@ -86,7 +86,7 @@ def gather_joint_vnode(vnode, export_settings): extras=__gather_extras(blender_bone, export_settings), matrix=None, mesh=None, - name=blender_bone.name if vtree.nodes[vnode].leaf_reference is None else vtree.nodes[vtree.nodes[vnode].leaf_reference].blender_bone.name + '_leaf', + name=blender_bone.name, rotation=rotation, scale=scale, skin=None, diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py index fa32334c3..04ad435c6 100644 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_lights.py @@ -36,7 +36,7 @@ def gather_lights_punctual(blender_lamp, export_settings) -> Optional[Dict[str, def __filter_lights_punctual(blender_lamp, export_settings) -> bool: if blender_lamp.type in ["HEMI", "AREA"]: - export_settings['log'].warning("Unsupported light source {}".format(blender_lamp.type)) + gltf2_io_debug.print_console("WARNING", "Unsupported light source {}".format(blender_lamp.type)) return False return True @@ -63,7 +63,8 @@ def __gather_intensity(blender_lamp, export_settings) -> Optional[float]: quadratic_falloff_node = result[0].shader_node emission_strength = quadratic_falloff_node.inputs["Strength"].default_value / (math.pi * 4.0) else: - export_settings['log'].warning('No quadratic light falloff node attached to emission strength property') + gltf2_io_debug.print_console('WARNING', + 'No quadratic light falloff node attached to emission strength property') emission_strength = blender_lamp.energy else: emission_strength = emission_node.inputs["Strength"].default_value diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py index 5c6a3d8bb..fc4a29f7b 100755 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_mesh.py @@ -6,6 +6,7 @@ import bpy from typing import Optional, Dict, List, Any, Tuple from ...io.com import gltf2_io from ...blender.com.gltf2_blender_data_path import get_sk_exported +from ...io.com.gltf2_io_debug import print_console from ...io.exp.gltf2_io_user_extensions import export_user_extensions from ..com.gltf2_blender_extras import generate_extras from . import gltf2_blender_gather_primitives @@ -58,7 +59,7 @@ def gather_mesh(blender_mesh: bpy.types.Mesh, ) if len(mesh.primitives) == 0: - export_settings['log'].warning("Mesh '{}' has no primitives and will be omitted.".format(mesh.name)) + print_console("WARNING", "Mesh '{}' has no primitives and will be omitted.".format(mesh.name)) return None blender_object = None diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py index c24c854ed..341f2ba2e 100755 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_nodes.py @@ -6,6 +6,7 @@ import math import bpy from mathutils import Matrix, Quaternion, Vector +from ...io.com.gltf2_io_debug import print_console from ...io.com import gltf2_io from ...io.com import gltf2_io_extensions from ...io.exp.gltf2_io_user_extensions import export_user_extensions @@ -250,7 +251,7 @@ def __gather_mesh(vnode, blender_object, export_settings): # Be sure that object is valid (no NaN for example) res = blender_object.data.validate() if res is True: - export_settings['log'].warning("Mesh " + blender_object.data.name + " is not valid, and may be exported wrongly") + print_console("WARNING", "Mesh " + blender_object.data.name + " is not valid, and may be exported wrongly") modifiers = blender_object.modifiers if len(modifiers) == 0: diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py index d4bea2c5e..f8ddf12f9 100755 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitive_attributes.py @@ -73,10 +73,8 @@ def __gather_skins(blender_primitive, export_settings): # Set warning, for the case where there are more group of 4 weights needed # Warning for the case where we are in the same group, will be done later (for example, 3 weights needed, but 2 wanted by user) if max_bone_set_index > wanted_max_bone_set_index: - export_settings['log'].warning( - "There are more than {} joint vertex influences." - "The {} with highest weight will be used (and normalized).".format(export_settings['gltf_vertex_influences_nb'], export_settings['gltf_vertex_influences_nb']) - ) + gltf2_io_debug.print_console("WARNING", "There are more than {} joint vertex influences." + "The {} with highest weight will be used (and normalized).".format(export_settings['gltf_vertex_influences_nb'], export_settings['gltf_vertex_influences_nb'])) # Take into account only the first set of 4 weights max_bone_set_index = wanted_max_bone_set_index @@ -101,10 +99,9 @@ def __gather_skins(blender_primitive, export_settings): idx = 4-1-i if not all(weight[:, idx]): if warning_done is False: - export_settings['log'].warning( - "There are more than {} joint vertex influences." - "The {} with highest weight will be used (and normalized).".format(export_settings['gltf_vertex_influences_nb'], export_settings['gltf_vertex_influences_nb']) - ) + gltf2_io_debug.print_console("WARNING", "There are more than {} joint vertex influences." + "The {} with highest weight will be used (and normalized).".format(export_settings['gltf_vertex_influences_nb'], export_settings['gltf_vertex_influences_nb'])) + warning_done = True weight[:, idx] = 0.0 diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives.py index c88c18f59..746c434fd 100755 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives.py @@ -6,6 +6,7 @@ import bpy from typing import List, Optional, Tuple import numpy as np from ...io.com import gltf2_io, gltf2_io_constants, gltf2_io_extensions +from ...io.com.gltf2_io_debug import print_console from ...blender.com.gltf2_blender_data_path import get_sk_exported from ...io.exp import gltf2_io_binary_data from .gltf2_blender_gather_cache import cached, cached_by_key @@ -191,7 +192,7 @@ def __gather_indices(blender_primitive, blender_mesh, modifiers, export_settings component_type = gltf2_io_constants.ComponentType.UnsignedInt indices = indices.astype(np.uint32, copy=False) else: - export_settings['log'].error('A mesh contains too many vertices (' + str(max_index) + ') and needs to be split before export.') + print_console('ERROR', 'A mesh contains too many vertices (' + str(max_index) + ') and needs to be split before export.') return None element_type = gltf2_io_constants.DataType.Scalar diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives_extract.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives_extract.py index e220a5395..8de8e086d 100644 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives_extract.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_primitives_extract.py @@ -6,6 +6,7 @@ import numpy as np from copy import deepcopy from mathutils import Vector from ...blender.com.gltf2_blender_data_path import get_sk_exported +from ...io.com.gltf2_io_debug import print_console from ...io.com.gltf2_io_constants import ROUNDING_DIGIT from ...io.exp.gltf2_io_user_extensions import export_user_extensions from ...io.com import gltf2_io_constants @@ -18,7 +19,7 @@ from . import gltf2_blender_gather_skins def extract_primitives(materials, blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings): """Extract primitives from a mesh.""" - export_settings['log'].info("Extracting primitive: " + blender_mesh.name) + print_console('INFO', 'Extracting primitive: ' + blender_mesh.name) primitive_creator = PrimitiveCreator(materials, blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings) primitive_creator.prepare_data() @@ -77,7 +78,7 @@ class PrimitiveCreator: self.blender_mesh.calc_tangents() self.use_tangents = True except Exception: - self.export_settings['log'].warning("{}: Could not calculate tangents. Please try to triangulate the mesh first.".format(self.blender_mesh.name), popup=True) + print_console('WARNING', 'Could not calculate tangents. Please try to triangulate the mesh first.') self.tex_coord_max = 0 if self.export_settings['gltf_texcoords']: @@ -186,7 +187,7 @@ class PrimitiveCreator: # Seems we sometime can have name collision about attributes # Avoid crash and ignoring one of duplicated attribute name if attr['gltf_attribute_name'] in [a['gltf_attribute_name'] for a in self.blender_attributes]: - self.export_settings['log'].warning('Attribute collision name: ' + blender_attribute.name + ", ignoring one of them") + print_console('WARNING', 'Attribute collision name: ' + blender_attribute.name + ", ignoring one of them") continue self.blender_attributes.append(attr) @@ -425,7 +426,7 @@ class PrimitiveCreator: self.blender_mesh.attributes[attr].data.foreach_get('vector', data) data = data.reshape(-1, 2) else: - self.export_settings['log'].warning('We are not managing this case (UVMap as custom attribute for unknown type)') + print_console('WARNING', 'We are not managing this case yet (UVMap as custom attribute for unknown type)') continue # Blender UV space -> glTF UV space # u,v -> u,1-v @@ -447,7 +448,7 @@ class PrimitiveCreator: pass elif material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is not None: - self.export_settings['log'].warning('We are not managing this case (Vertex Color alpha without color)') + print_console('WARNING', 'We are not managing this case (Vertex Color alpha without color)') else: vc_color_name = None @@ -474,7 +475,7 @@ class PrimitiveCreator: if materials_use_vc is not None and materials_use_vc != vc_key: if warning_already_displayed is False: - self.export_settings['log'].warning('glTF specification does not allow this case (multiple materials with different Vertex Color)') + print_console('WARNING', 'glTF specification does not allow this case (multiple materials with different Vertex Color)') warning_already_displayed = True materials_use_vc = vc_key @@ -519,12 +520,12 @@ class PrimitiveCreator: all_uvmaps[tex] = uvmap_name if len(set(all_uvmaps.values())) > 1: - self.export_settings['log'].warning('We are not managing this case (multiple UVMap for UDIM)') + print_console('WARNING', 'We are not managing this case (multiple UVMap for UDIM)') new_prim_indices[material_idx] = self.prim_indices[material_idx] self.additional_materials.append(None) continue - self.export_settings['log'].info('Splitting UDIM tiles into different primitives/materials') + print_console('INFO', 'Splitting UDIM tiles into different primitives/materials') # Retrieve UDIM images tex = list(material_info['udim_info'].keys())[0] image = material_info['udim_info'][tex]['image'] @@ -622,7 +623,7 @@ class PrimitiveCreator: elif tex == "anisotropyTexture": new_material.extensions["KHR_materials_anisotropy"].extension['anisotropyTexture'] = new_tex else: - self.export_settings['log'].warning('We are not managing this case (UDIM for {})'.format(tex)) + print_console('WARNING', 'We are not managing this case yet (UDIM for {})'.format(tex)) self.additional_materials.append((new_material, material_info, int(str(id(base_material)) + str(u) + str(v)))) @@ -695,7 +696,7 @@ class PrimitiveCreator: has_triangle_primitive = len(primitives) != 0 primitives.extend(self.primitive_creation_edges_and_points()) - self.export_settings['log'].info('Primitives created: %d' % len(primitives)) + print_console('INFO', 'Primitives created: %d' % len(primitives)) return primitives, [None]*len(primitives), self.attributes if has_triangle_primitive else None @@ -768,7 +769,7 @@ class PrimitiveCreator: # No material for them, so only one primitive for each primitives.extend(self.primitive_creation_edges_and_points()) - self.export_settings['log'].info('Primitives created: %d' % len(primitives)) + print_console('INFO', 'Primitives created: %d' % len(primitives)) return primitives @@ -1060,7 +1061,7 @@ class PrimitiveCreator: elif attr['blender_domain'] in ['FACE']: data = np.empty(len(self.blender_mesh.polygons) * attr['len'], dtype=attr['type']) else: - self.export_settings['log'].error("domain not known") + print_console("ERROR", "domain not known") if attr['blender_data_type'] == "BYTE_COLOR": self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('color', data) @@ -1092,7 +1093,7 @@ class PrimitiveCreator: self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data) data = data.reshape(-1, attr['len']) else: - self.export_settings['log'].error("blender type not found " + attr['blender_data_type']) + print_console('ERROR',"blender type not found " + attr['blender_data_type']) if attr['blender_domain'] in ['CORNER']: for i in range(attr['len']): @@ -1128,7 +1129,7 @@ class PrimitiveCreator: self.dots[attr['gltf_attribute_name'] + str(i)] = data_attr[:, i] else: - self.export_settings['log'].error("domain not known") + print_console("ERROR", "domain not known") def __get_uvs_attribute(self, blender_uv_idx, attr): layer = self.blender_mesh.uv_layers[blender_uv_idx] diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py index 3ca1b8ffb..e5e755469 100755 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_skins.py @@ -90,17 +90,7 @@ def __gather_inverse_bind_matrices(armature_uuid, export_settings): matrices = [] for b in bones_uuid: - if export_settings['vtree'].nodes[b].leaf_reference is None: - __collect_matrices(blender_armature_object.pose.bones[export_settings['vtree'].nodes[b].blender_bone.name]) - else: - inverse_bind_matrix = ( - axis_basis_change @ - ( - blender_armature_object.matrix_world @ - export_settings['vtree'].nodes[export_settings['vtree'].nodes[b].leaf_reference].matrix_world_tail - ) - ).inverted_safe() - matrices.append(inverse_bind_matrix) # Leaf bone + __collect_matrices(blender_armature_object.pose.bones[export_settings['vtree'].nodes[b].blender_bone.name]) # flatten the matrices inverse_matrices = [] diff --git a/io_scene_gltf2/blender/exp/gltf2_blender_gather_tree.py b/io_scene_gltf2/blender/exp/gltf2_blender_gather_tree.py index 5cff45c93..9466098d3 100644 --- a/io_scene_gltf2/blender/exp/gltf2_blender_gather_tree.py +++ b/io_scene_gltf2/blender/exp/gltf2_blender_gather_tree.py @@ -54,9 +54,6 @@ class VExportNode: self.blender_object = None self.blender_bone = None - self.leaf_reference = None # For leaf bones only - - self.default_hide_viewport = False # Need to store the default value for meshes in case of animation baking on armature self.force_as_empty = False # Used for instancer display @@ -163,17 +160,14 @@ class VExportTree: node.blender_type = VExportNode.COLLECTION elif blender_object.type == "ARMATURE": node.blender_type = VExportNode.ARMATURE - node.default_hide_viewport = blender_object.hide_viewport elif blender_object.type == "CAMERA": node.blender_type = VExportNode.CAMERA elif blender_object.type == "LIGHT": node.blender_type = VExportNode.LIGHT elif blender_object.instance_type == "COLLECTION": node.blender_type = VExportNode.INST_COLLECTION - node.default_hide_viewport = blender_object.hide_viewport else: node.blender_type = VExportNode.OBJECT - node.default_hide_viewport = blender_object.hide_viewport # For meshes with armature modifier (parent is armature), keep armature uuid if node.blender_type == VExportNode.OBJECT: @@ -182,10 +176,8 @@ class VExportTree: if parent_uuid is None or not self.nodes[parent_uuid].blender_type == VExportNode.ARMATURE: # correct workflow is to parent skinned mesh to armature, but ... # all users don't use correct workflow - self.export_settings['log'].warning( - "Armature must be the parent of skinned mesh" - "Armature is selected by its name, but may be false in case of instances" - ) + print("WARNING: Armature must be the parent of skinned mesh") + print("Armature is selected by its name, but may be false in case of instances") # Search an armature by name, and use the first found # This will be done after all objects are setup node.armature_needed = modifiers["ARMATURE"].object.name @@ -249,13 +241,9 @@ class VExportTree: if self.export_settings['gltf_rest_position_armature'] is False: # Use pose bone for TRS node.matrix_world = self.nodes[node.armature].matrix_world @ blender_bone.matrix - if self.export_settings['gltf_leaf_bone'] is True: - node.matrix_world_tail = self.nodes[node.armature].matrix_world @ Matrix.Translation(blender_bone.tail) - node.matrix_world_tail = node.matrix_world_tail @ self.axis_basis_change else: # Use edit bone for TRS --> REST pose will be used node.matrix_world = self.nodes[node.armature].matrix_world @ blender_bone.bone.matrix_local - # Tail will be set after, as we need to be in edit mode node.matrix_world = node.matrix_world @ self.axis_basis_change if delta is True: @@ -430,7 +418,7 @@ class VExportTree: elif parent_keep_tag is False: self.nodes[uuid].keep_tag = False else: - self.export_settings['log'].error("This should not happen") + print("This should not happen!") for child in self.nodes[uuid].children: if self.nodes[uuid].blender_type == VExportNode.INST_COLLECTION or self.nodes[uuid].is_instancier == VExportNode.INSTANCIER: @@ -563,56 +551,13 @@ class VExportTree: del n.armature_needed def bake_armature_bone_list(self): - - if self.export_settings['gltf_leaf_bone'] is True: - self.add_leaf_bones() - # Used to store data in armature vnode # If armature is removed from export # Data are still available, even if armature is not exported (so bones are re-parented) for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]: - self.get_all_bones(n.uuid) self.get_root_bones_uuid(n.uuid) - def add_leaf_bones(self): - - # If we are using rest pose, we need to get tail of editbone, going to edit mode for each armature - if self.export_settings['gltf_rest_position_armature'] is True: - for obj_uuid in [n for n in self.nodes if self.nodes[n].blender_type == VExportNode.ARMATURE]: - armature = self.nodes[obj_uuid].blender_object - bpy.context.view_layer.objects.active = armature - bpy.ops.object.mode_set(mode="EDIT") - - for bone in armature.data.edit_bones: - if len(bone.children) == 0: - self.nodes[self.nodes[obj_uuid].bones[bone.name]].matrix_world_tail = armature.matrix_world @ Matrix.Translation(bone.tail) @ self.axis_basis_change - - bpy.ops.object.mode_set(mode="OBJECT") - - - for bone_uuid in [n for n in self.nodes if self.nodes[n].blender_type == VExportNode.BONE \ - and len(self.nodes[n].children) == 0]: - - bone_node = self.nodes[bone_uuid] - - # Add a new node - node = VExportNode() - node.uuid = str(uuid.uuid4()) - node.parent_uuid = bone_uuid - node.parent_bone_uuid = bone_uuid - node.blender_object = bone_node.blender_object - node.armature = bone_node.armature - node.blender_type = VExportNode.BONE - node.leaf_reference = bone_uuid - node.keep_tag = True - - node.matrix_world = bone_node.matrix_world_tail.copy() - - self.add_children(bone_uuid, node.uuid) - self.add_node(node) - - def add_neutral_bones(self): added_armatures = [] for n in [n for n in self.nodes.values() if \ @@ -625,7 +570,7 @@ class VExportTree: # Be sure to add it to really exported meshes if n.node.skin is None: - self.export_settings['log'].warning("{} has no skin, skipping adding neutral bone data on it.".format(n.blender_object.name)) + print("WARNING: {} has no skin, skipping adding neutral bone data on it.".format(n.blender_object.name)) continue if n.armature not in added_armatures: @@ -741,5 +686,5 @@ class VExportTree: if len(self.get_root_bones_uuid(arma_uuid)) > 1: # We can't remove armature self.export_settings['gltf_armature_object_remove'] = False - self.export_settings['log'].warning("We can't remove armature object because some armatures have multiple root bones.") + print("WARNING: We can't remove armature object because some armatures have multiple root bones.") break diff --git a/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_image.py b/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_image.py index 4ae477532..c4da595fa 100644 --- a/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_image.py +++ b/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_image.py @@ -340,7 +340,8 @@ def __get_image_data_mapping(sockets, results, use_tile, export_settings) -> Exp keys = list(composed_image.fills.keys()) # do not loop on dict, we may have to delete an element for k in [k for k in keys if isinstance(composed_image.fills[k], FillImage)]: if composed_image.fills[k].image.size[0] == 0 or composed_image.fills[k].image.size[1] == 0: - export_settings['log'].warning("Image '{}' has no size and cannot be exported.".format( + gltf2_io_debug.print_console("WARNING", + "Image '{}' has no size and cannot be exported.".format( composed_image.fills[k].image)) del composed_image.fills[k] diff --git a/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials.py b/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials.py index c9baeb3aa..75a57d2f4 100644 --- a/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials.py +++ b/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_materials.py @@ -8,6 +8,7 @@ import bpy from ....io.com import gltf2_io from ....io.com.gltf2_io_extensions import Extension from ....io.exp.gltf2_io_user_extensions import export_user_extensions +from ....io.com.gltf2_io_debug import print_console from ...com.gltf2_blender_extras import generate_extras from ..gltf2_blender_gather_cache import cached, cached_by_key from . import gltf2_blender_gather_materials_unlit @@ -327,10 +328,9 @@ def __gather_orm_texture(blender_material, export_settings): result = (occlusion, roughness_socket, metallic_socket) if not gltf2_blender_gather_texture_info.check_same_size_images(result, export_settings): - export_settings['log'].info( + print_console("INFO", "Occlusion and metal-roughness texture will be exported separately " - "(use same-sized images if you want them combined)" - ) + "(use same-sized images if you want them combined)") return None # Double-check this will past the filter in texture_info @@ -508,7 +508,7 @@ def __get_final_material_with_indices(blender_material, base_material, caching_i elif tex.startswith("additional"): export_settings['additional_texture_export'][export_settings['additional_texture_export_current_idx'] + int(tex[10:])].tex_coord = ind else: - export_settings['log'].error("some Textures tex coord are not managed") + print_console("ERROR", "some Textures tex coord are not managed") export_settings['additional_texture_export_current_idx'] = len(export_settings['additional_texture_export']) diff --git a/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture.py b/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture.py index 253f494aa..4637eafda 100644 --- a/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture.py +++ b/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture.py @@ -168,10 +168,9 @@ def __gather_name(blender_shader_sockets, export_settings): def __gather_sampler(blender_shader_sockets, export_settings): shader_nodes = [get_texture_node_from_socket(socket, export_settings) for socket in blender_shader_sockets] if len(shader_nodes) > 1: - export_settings['log'].warning( - "More than one shader node tex image used for a texture. " - "The resulting glTF sampler will behave like the first shader node tex image." - ) + gltf2_io_debug.print_console("WARNING", + "More than one shader node tex image used for a texture. " + "The resulting glTF sampler will behave like the first shader node tex image.") first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes)) # group_path can't be a list, so transform it to str diff --git a/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture_info.py b/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture_info.py index 4e7ac659c..9fef019ea 100644 --- a/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture_info.py +++ b/io_scene_gltf2/blender/exp/material/gltf2_blender_gather_texture_info.py @@ -163,13 +163,12 @@ def __gather_normal_scale(primary_socket, export_settings): def __gather_occlusion_strength(primary_socket, export_settings): # Look for a MixRGB node that mixes with pure white in front of # primary_socket. The mix factor gives the occlusion strength. - nav = primary_socket.to_node_nav() - nav.move_back() - if nav.moved and nav.node.type == 'MIX' and nav.node.blend_type == 'MIX': - fac = nav.get_constant('Factor') + node = previous_node(primary_socket) + if node and node.node.type == 'MIX' and node.node.blend_type == 'MIX': + fac = get_const_from_socket(NodeSocket(node.node.inputs['Factor'], node.group_path), kind='VALUE') + col1 = get_const_from_socket(NodeSocket(node.node.inputs[6], node.group_path), kind='RGB') + col2 = get_const_from_socket(NodeSocket(node.node.inputs[7], node.group_path), kind='RGB') if fac is not None: - col1 = nav.get_constant('#A_Color') - col2 = nav.get_constant('#B_Color') if col1 == [1.0, 1.0, 1.0] and col2 is None: return fac if col1 is None and col2 == [1.0, 1.0, 1.0]: @@ -204,7 +203,7 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings): texture_transform = None if node.node and node.node.type == 'MAPPING': - texture_transform = get_texture_transform_from_mapping_node(node, export_settings) + texture_transform = get_texture_transform_from_mapping_node(node) node = previous_node(NodeSocket(node.node.inputs['Vector'], node.group_path)) uvmap_info = {} diff --git a/io_scene_gltf2/blender/exp/material/gltf2_blender_search_node_tree.py b/io_scene_gltf2/blender/exp/material/gltf2_blender_search_node_tree.py index 49bd2813d..a5bd46b50 100644 --- a/io_scene_gltf2/blender/exp/material/gltf2_blender_search_node_tree.py +++ b/io_scene_gltf2/blender/exp/material/gltf2_blender_search_node_tree.py @@ -11,6 +11,7 @@ from mathutils import Vector, Matrix from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached from ...com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name, get_gltf_old_group_node_name from ....blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf +from io_scene_gltf2.io.com import gltf2_io_debug import typing @@ -183,210 +184,11 @@ def get_socket_from_gltf_material_node(blender_material: bpy.types.Material, nam return NodeSocket(None, None) - -class NodeNav: - """Helper for navigating through node trees.""" - def __init__(self, node, in_socket=None, out_socket=None): - self.node = node # Current node - self.out_socket = out_socket # Socket through which we arrived at this node - self.in_socket = in_socket # Socket through which we will leave this node - self.stack = [] # Stack of (group node, socket) pairs descended through to get here - self.moved = False # Whether the last move_back call moved back or not - - def copy(self): - new = NodeNav(self.node) - new.assign(self) - return new - - def assign(self, other): - self.node = other.node - self.in_socket = other.in_socket - self.out_socket = other.out_socket - self.stack = other.stack.copy() - self.moved = other.moved - - def select_input_socket(self, in_soc): - """Selects an input socket. - - Most operations that operate on the input socket can be passed an in_soc - parameter to select an input socket before running. - """ - if in_soc is None: - # Keep current selected input socket - return - elif isinstance(in_soc, bpy.types.NodeSocket): - assert in_soc.node == self.node - self.in_socket = in_soc - elif isinstance(in_soc, int): - self.in_socket = self.node.inputs[in_soc] - else: - assert isinstance(in_soc, str) - # An identifier like "#A_Color" selects a socket by - # identifier. This is useful for sockets that cannot be - # selected because of non-unique names. - if in_soc.startswith('#'): - ident = in_soc.removeprefix('#') - for socket in self.node.inputs: - if socket.identifier == ident: - self.in_socket = socket - return - # Select by regular name - self.in_socket = self.node.inputs[in_soc] - - def get_out_socket_index(self): - assert self.out_socket - for i, soc in enumerate(self.node.outputs): - if soc == self.out_socket: - return i - assert False - - def descend(self): - """Descend into a group node.""" - if self.node and self.node.type == 'GROUP' and self.node.node_tree and self.out_socket: - i = self.get_out_socket_index() - self.stack.append((self.node, self.out_socket)) - self.node = next(node for node in self.node.node_tree.nodes if node.type == 'GROUP_OUTPUT') - self.in_socket = self.node.inputs[i] - self.out_socket = None - - def ascend(self): - """Ascend from a group input node back to the group node.""" - if self.stack and self.node and self.node.type == 'GROUP_INPUT' and self.out_socket: - i = self.get_out_socket_index() - self.node, self.out_socket = self.stack.pop() - self.in_socket = self.node.inputs[i] - - def move_back(self, in_soc=None): - """Move backwards through an input socket to the next node.""" - self.moved = False - - self.select_input_socket(in_soc) - - if not self.in_socket or not self.in_socket.is_linked: - return - - # Warning, slow! socket.links is O(total number of links)! - link = self.in_socket.links[0] - - self.node = link.from_node - self.out_socket = link.from_socket - self.in_socket = None - self.moved = True - - # Continue moving - if self.node.type == 'REROUTE': - self.move_back(0) - elif self.node.type == 'GROUP': - self.descend() - self.move_back() - elif self.node.type == 'GROUP_INPUT': - self.ascend() - self.move_back() - - def peek_back(self, in_soc=None): - """Peeks backwards through an input socket without modifying self.""" - s = self.copy() - s.select_input_socket(in_soc) - s.move_back() - return s - - def get_constant(self, in_soc=None): - """Gets a constant from an input socket. Returns None if non-constant.""" - self.select_input_socket(in_soc) - - if not self.in_socket: - return None - - # Get constant from unlinked socket's default value - if not self.in_socket.is_linked: - if self.in_socket.type == 'RGBA': - color = list(self.in_socket.default_value) - color = color[:3] # drop unused alpha component (assumes shader tree) - return color - - elif self.in_socket.type == 'SHADER': - # Treat unlinked shader sockets as black - return [0.0, 0.0, 0.0] - - elif self.in_socket.type == 'VECTOR': - return list(self.in_socket.default_value) - - elif self.in_socket.type == 'VALUE': - return self.in_socket.default_value - - else: - return None - - # Check for a constant in the next node - nav = self.peek_back() - if nav.moved: - if self.in_socket.type == 'RGBA': - if nav.node.type == 'RGB': - color = list(nav.out_socket.default_value) - color = color[:3] # drop unused alpha component (assumes shader tree) - return color - - elif self.in_socket.type == 'VALUE': - if nav.node.type == 'VALUE': - return nav.node.out_socket.default_value - - return None - - def get_factor(self, in_soc=None): - """Gets a factor, eg. metallicFactor. Either a constant or constant multiplier.""" - self.select_input_socket(in_soc) - - if not self.in_socket: - return None - - # Constant - fac = self.get_constant() - if fac is not None: - return fac - - # Multiplied by constant - nav = self.peek_back() - if nav.moved: - x1, x2 = None, None - - if self.in_socket.type == 'RGBA': - is_mul = ( - nav.node.type == 'MIX' and - nav.node.data_type == 'RGBA' and - nav.node.blend_type == 'MULTIPLY' - ) - if is_mul: - # TODO: check factor is 1? - x1 = nav.get_constant('#A_Color') - x2 = nav.get_constant('#B_Color') - - elif self.in_socket.type == 'VALUE': - if nav.node.type == 'MATH' and nav.node.operation == 'MULTIPLY': - x1 = nav.get_constant(0) - x2 = nav.get_constant(1) - - if x1 is not None and x2 is None: return x1 - if x2 is not None and x1 is None: return x2 - - return None - - class NodeSocket: def __init__(self, socket, group_path): self.socket = socket self.group_path = group_path - def to_node_nav(self): - assert self.socket - nav = NodeNav( - self.socket.node, - out_socket=self.socket if self.socket.is_output else None, - in_socket=self.socket if not self.socket.is_output else None, - ) - # No output socket information - nav.stack = [(node, None) for node in self.group_path] - return nav - class ShNode: def __init__(self, node, group_path): self.node = node @@ -441,15 +243,52 @@ def get_socket(blender_material: bpy.types.Material, name: str, volume=False): return NodeSocket(None, None) - -# Old, prefer NodeNav.get_factor in new code def get_factor_from_socket(socket, kind): - return socket.to_node_nav().get_factor() + """ + For baseColorFactor, metallicFactor, etc. + Get a constant value from a socket, or a constant value + from a MULTIPLY node just before the socket. + kind is either 'RGB' or 'VALUE'. + """ + fac = get_const_from_socket(socket, kind) + if fac is not None: + return fac + node = previous_node(socket) + if node.node is not None: + x1, x2 = None, None + if kind == 'RGB': + if node.node.type == 'MIX' and node.node.data_type == "RGBA" and node.node.blend_type == 'MULTIPLY': + # TODO: handle factor in inputs[0]? + x1 = get_const_from_socket(NodeSocket(node.node.inputs[6], node.group_path), kind) + x2 = get_const_from_socket(NodeSocket(node.node.inputs[7], node.group_path), kind) + if kind == 'VALUE': + if node.node.type == 'MATH' and node.node.operation == 'MULTIPLY': + x1 = get_const_from_socket(NodeSocket(node.node.inputs[0], node.group_path), kind) + x2 = get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind) + if x1 is not None and x2 is None: return x1 + if x2 is not None and x1 is None: return x2 + + return None -# Old, prefer NodeNav.get_constant in new code def get_const_from_socket(socket, kind): - return socket.to_node_nav().get_constant() + if not socket.socket.is_linked: + if kind == 'RGB': + if socket.socket.type != 'RGBA': return None + return list(socket.socket.default_value)[:3] + if kind == 'VALUE': + if socket.socket.type != 'VALUE': return None + return socket.socket.default_value + + # Handle connection to a constant RGB/Value node + prev_node = previous_node(socket) + if prev_node.node is not None: + if kind == 'RGB' and prev_node.node.type == 'RGB': + return list(prev_node.node.outputs[0].default_value)[:3] + if kind == 'VALUE' and prev_node.node.type == 'VALUE': + return prev_node.node.outputs[0].default_value + + return None def previous_socket(socket: NodeSocket): @@ -493,9 +332,9 @@ def previous_node(socket: NodeSocket): return ShNode(prev_socket.socket.node, prev_socket.group_path) return ShNode(None, None) -def get_texture_transform_from_mapping_node(mapping_node, export_settings): +def get_texture_transform_from_mapping_node(mapping_node): if mapping_node.node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]: - export_settings['log'].warning( + gltf2_io_debug.print_console("WARNING", "Skipping exporting texture transform because it had type " + mapping_node.node.vector_type + "; recommend using POINT instead" ) @@ -505,7 +344,7 @@ def get_texture_transform_from_mapping_node(mapping_node, export_settings): rotation_0, rotation_1 = mapping_node.node.inputs['Rotation'].default_value[0], mapping_node.node.inputs['Rotation'].default_value[1] if rotation_0 or rotation_1: # TODO: can we handle this? - export_settings['log'].warning( + gltf2_io_debug.print_console("WARNING", "Skipping exporting texture transform because it had non-zero " "rotations in the X/Y direction; only a Z rotation can be exported!" ) @@ -541,7 +380,7 @@ def get_texture_transform_from_mapping_node(mapping_node, export_settings): mapping_transform = inverted(mapping_transform) if mapping_transform is None: - export_settings['log'].warning( + gltf2_io_debug.print_console("WARNING", "Skipping exporting texture transform with type TEXTURE because " "we couldn't convert it to TRS; recommend using POINT instead" ) diff --git a/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py b/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py index 10dc46f1b..3f5d31c9a 100755 --- a/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py +++ b/io_scene_gltf2/blender/imp/gltf2_blender_mesh.py @@ -6,6 +6,7 @@ import bpy from mathutils import Matrix import numpy as np from ...io.imp.gltf2_io_user_extensions import import_user_extensions +from ...io.com.gltf2_io_debug import print_console from ...io.imp.gltf2_io_binary import BinaryData from ...io.com.gltf2_io_constants import DataType, ComponentType from ...blender.com.gltf2_blender_conversion import get_attribute_type @@ -156,8 +157,7 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob): vert_index_base = len(vert_locs) if prim.extensions is not None and 'KHR_draco_mesh_compression' in prim.extensions: - - gltf.log.info('Draco Decoder: Decode primitive {}'.format(pymesh.name or '[unnamed]')) + print_console('INFO', 'Draco Decoder: Decode primitive {}'.format(pymesh.name or '[unnamed]')) decode_primitive(gltf, prim) import_user_extensions('gather_import_decode_primitive', gltf, pymesh, prim, skin_idx) @@ -319,7 +319,7 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob): layer = mesh.uv_layers.new(name=name) if layer is None: - gltf.log.warning("WARNING: UV map is ignored because the maximum number of UV layers has been reached.") + print("WARNING: UV map is ignored because the maximum number of UV layers has been reached.") break layer.uv.foreach_set('vector', squish(loop_uvs[uv_i], np.float32)) @@ -639,7 +639,7 @@ def skin_into_bind_pose(gltf, skin_idx, vert_joints, vert_weights, locs, vert_no # We set all weight ( aka 1.0 ) to the first bone zeros_indices = np.where(weight_sums == 0)[0] if zeros_indices.shape[0] > 0: - gltf.log.error('File is invalid: Some vertices are not assigned to bone(s) ') + print_console('ERROR', 'File is invalid: Some vertices are not assigned to bone(s) ') vert_weights[0][:, 0][zeros_indices] = 1.0 # Assign to first bone with all weight # Reprocess IBM for these vertices diff --git a/io_scene_gltf2/blender/imp/gltf2_blender_node.py b/io_scene_gltf2/blender/imp/gltf2_blender_node.py index 19100efcc..ed1d1945b 100755 --- a/io_scene_gltf2/blender/imp/gltf2_blender_node.py +++ b/io_scene_gltf2/blender/imp/gltf2_blender_node.py @@ -269,7 +269,7 @@ class BlenderNode(): if cache_key is not None and cache_key in pymesh.blender_name: mesh = bpy.data.meshes[pymesh.blender_name[cache_key]] else: - gltf.log.info("Blender create Mesh node {}".format(pymesh.name or pynode.mesh)) + gltf.log.info("Blender create Mesh node %s", pymesh.name or pynode.mesh) mesh = BlenderMesh.create(gltf, pynode.mesh, pynode.skin) if cache_key is not None: pymesh.blender_name[cache_key] = mesh.name diff --git a/io_scene_gltf2/blender/imp/gltf2_io_draco_compression_extension.py b/io_scene_gltf2/blender/imp/gltf2_io_draco_compression_extension.py index 105c4ed40..e8911bee1 100644 --- a/io_scene_gltf2/blender/imp/gltf2_io_draco_compression_extension.py +++ b/io_scene_gltf2/blender/imp/gltf2_io_draco_compression_extension.py @@ -6,6 +6,7 @@ from ctypes import * from ...io.com.gltf2_io import BufferView from ...io.imp.gltf2_io_binary import BinaryData +from ...io.com.gltf2_io_debug import print_console from ...io.com.gltf2_io_draco_compression_extension import dll_path @@ -62,7 +63,7 @@ def decode_primitive(gltf, prim): # Create Draco decoder. draco_buffer = bytes(BinaryData.get_buffer_view(gltf, extension['bufferView'])) if not dll.decoderDecode(decoder, draco_buffer, len(draco_buffer)): - gltf.log.error('Draco Decoder: Unable to decode. Skipping primitive {}.'.format(name)) + print_console('ERROR', 'Draco Decoder: Unable to decode. Skipping primitive {}.'.format(name)) return # Choose a buffer index which does not yet exist, skipping over existing glTF buffers yet to be loaded @@ -75,10 +76,10 @@ def decode_primitive(gltf, prim): # Read indices. index_accessor = gltf.data.accessors[prim.indices] if dll.decoderGetIndexCount(decoder) != index_accessor.count: - gltf.log.warning('Draco Decoder: Index count of accessor and decoded index count does not match. Updating accessor.') + print_console('WARNING', 'Draco Decoder: Index count of accessor and decoded index count does not match. Updating accessor.') index_accessor.count = dll.decoderGetIndexCount(decoder) if not dll.decoderReadIndices(decoder, index_accessor.component_type): - gltf.log.error('Draco Decoder: Unable to decode indices. Skipping primitive {}.'.format(name)) + print_console('ERROR', 'Draco Decoder: Unable to decode indices. Skipping primitive {}.'.format(name)) return indices_byte_length = dll.decoderGetIndicesByteLength(decoder) @@ -101,15 +102,15 @@ def decode_primitive(gltf, prim): for attr_idx, attr in enumerate(extension['attributes']): dracoId = extension['attributes'][attr] if attr not in prim.attributes: - gltf.log.error('Draco Decoder: Draco attribute {} not in primitive attributes. Skipping primitive {}.'.format(attr, name)) + print_console('ERROR', 'Draco Decoder: Draco attribute {} not in primitive attributes. Skipping primitive {}.'.format(attr, name)) return accessor = gltf.data.accessors[prim.attributes[attr]] if dll.decoderGetVertexCount(decoder) != accessor.count: - gltf.log.warning('Draco Decoder: Vertex count of accessor and decoded vertex count does not match for attribute {}. Updating accessor.'.format(attr, name)) + print_console('WARNING', 'Draco Decoder: Vertex count of accessor and decoded vertex count does not match for attribute {}. Updating accessor.'.format(attr, name)) accessor.count = dll.decoderGetVertexCount(decoder) if not dll.decoderReadAttribute(decoder, dracoId, accessor.component_type, accessor.type.encode()): - gltf.log.error('Draco Decoder: Could not decode attribute {}. Skipping primitive {}.'.format(attr, name)) + print_console('ERROR', 'Draco Decoder: Could not decode attribute {}. Skipping primitive {}.'.format(attr, name)) return byte_length = dll.decoderGetAttributeByteLength(decoder, dracoId) diff --git a/io_scene_gltf2/io/com/gltf2_io.py b/io_scene_gltf2/io/com/gltf2_io.py index e178e8d86..188ec8e9b 100755 --- a/io_scene_gltf2/io/com/gltf2_io.py +++ b/io_scene_gltf2/io/com/gltf2_io.py @@ -42,7 +42,7 @@ def from_union(fs, x): tb_info = traceback.extract_tb(tb) for tbi in tb_info: filename, line, func, text = tbi - print('ERROR', 'An error occurred on line {} in statement {}'.format(line, text)) + gltf2_io_debug.print_console('ERROR', 'An error occurred on line {} in statement {}'.format(line, text)) assert False diff --git a/io_scene_gltf2/io/com/gltf2_io_debug.py b/io_scene_gltf2/io/com/gltf2_io_debug.py index d614792ce..8cb5f315d 100755 --- a/io_scene_gltf2/io/com/gltf2_io_debug.py +++ b/io_scene_gltf2/io/com/gltf2_io_debug.py @@ -8,17 +8,48 @@ import time import logging -import logging.handlers # # Globals # +OUTPUT_LEVELS = ['ERROR', 'WARNING', 'INFO', 'PROFILE', 'DEBUG', 'VERBOSE'] + +g_current_output_level = 'DEBUG' g_profile_started = False g_profile_start = 0.0 g_profile_end = 0.0 g_profile_delta = 0.0 +# +# Functions +# + + +def set_output_level(level): + """Set an output debug level.""" + global g_current_output_level + + if OUTPUT_LEVELS.index(level) < 0: + return + + g_current_output_level = level + + +def print_console(level, output): + """Print to Blender console with a given header and output.""" + global OUTPUT_LEVELS + global g_current_output_level + + if OUTPUT_LEVELS.index(level) > OUTPUT_LEVELS.index(g_current_output_level): + return + + print(get_timestamp() + " | " + level + ': ' + output) + + +def print_newline(): + """Print a new line to Blender console.""" + print() def get_timestamp(): @@ -26,13 +57,23 @@ def get_timestamp(): return time.strftime("%H:%M:%S", current_time) +def print_timestamp(label=None): + """Print a timestamp to Blender console.""" + output = 'Timestamp: ' + get_timestamp() + + if label is not None: + output = output + ' (' + label + ')' + + print_console('PROFILE', output) + + def profile_start(): """Start profiling by storing the current time.""" global g_profile_start global g_profile_started if g_profile_started: - print('ERROR', 'Profiling already started') + print_console('ERROR', 'Profiling already started') return g_profile_started = True @@ -47,7 +88,7 @@ def profile_end(label=None): global g_profile_started if not g_profile_started: - print('ERROR', 'Profiling not started') + print_console('ERROR', 'Profiling not started') return g_profile_started = False @@ -60,60 +101,16 @@ def profile_end(label=None): if label is not None: output = output + ' (' + label + ')' - print('PROFILE', output) + print_console('PROFILE', output) +# TODO: need to have a unique system for logging importer/exporter +# TODO: this logger is used for importer, but in io and in blender part, but is written here in a _io_ file class Log: def __init__(self, loglevel): self.logger = logging.getLogger('glTFImporter') - - # For console display - self.console_handler = logging.StreamHandler() - formatter = logging.Formatter('%(asctime)s | %(levelname)s: %(message)s', "%H:%M:%S") - self.console_handler.setFormatter(formatter) - - # For popup display - self.popup_handler = logging.handlers.MemoryHandler(1024*10) - - self.logger.addHandler(self.console_handler) - #self.logger.addHandler(self.popup_handler) => Make sure to not attach the popup handler to the logger - + self.hdlr = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + self.hdlr.setFormatter(formatter) + self.logger.addHandler(self.hdlr) self.logger.setLevel(int(loglevel)) - - def error(self, message, popup=False): - self.logger.error(message) - if popup: - self.popup_handler.buffer.append(('ERROR', message)) - - def warning(self, message, popup=False): - self.logger.warning(message) - if popup: - self.popup_handler.buffer.append(('WARNING', message)) - - def info(self, message, popup=False): - self.logger.info(message) - if popup: - self.popup_handler.buffer.append(('INFO', message)) - - def debug(self, message, popup=False): - self.logger.debug(message) - if popup: - self.popup_handler.buffer.append(('DEBUG', message)) - - def critical(self, message, popup=False): - self.logger.critical(message) - if popup: - self.popup_handler.buffer.append(('ERROR', message)) # There is no Critical level in Blender, so we use error - - def profile(self, message, popup=False): # There is no profile level in logging, so we use info - self.logger.info(message) - if popup: - self.popup_handler.buffer.append(('PROFILE', message)) - - def messages(self): - return self.popup_handler.buffer - - def flush(self): - self.logger.removeHandler(self.console_handler) - self.popup_handler.flush() - self.logger.removeHandler(self.popup_handler) diff --git a/io_scene_gltf2/io/com/gltf2_io_draco_compression_extension.py b/io_scene_gltf2/io/com/gltf2_io_draco_compression_extension.py index b85183286..58f81125f 100644 --- a/io_scene_gltf2/io/com/gltf2_io_draco_compression_extension.py +++ b/io_scene_gltf2/io/com/gltf2_io_draco_compression_extension.py @@ -7,6 +7,9 @@ import sys from pathlib import Path import bpy +from ...io.com.gltf2_io_debug import print_console + + def dll_path() -> Path: """ Get the DLL path depending on the underlying platform. @@ -34,7 +37,7 @@ def dll_path() -> Path: }.get(sys.platform) if path is None or library_name is None: - print('WARNING', 'Unsupported platform {}, Draco mesh compression is unavailable'.format(sys.platform)) + print_console('WARNING', 'Unsupported platform {}, Draco mesh compression is unavailable'.format(sys.platform)) return path / library_name @@ -48,7 +51,7 @@ def dll_exists(quiet=False) -> bool: exists = path.exists() and path.is_file() if quiet is False: if exists: - print('INFO', 'Draco mesh compression is available, use library at %s' % dll_path().absolute()) + print_console('INFO', 'Draco mesh compression is available, use library at %s' % dll_path().absolute()) else: - print('ERROR', 'Draco mesh compression is not available because library could not be found at %s' % dll_path().absolute()) + print_console('ERROR', 'Draco mesh compression is not available because library could not be found at %s' % dll_path().absolute()) return exists diff --git a/io_scene_gltf2/io/exp/gltf2_io_draco_compression_extension.py b/io_scene_gltf2/io/exp/gltf2_io_draco_compression_extension.py index 5e37929ad..65cdc4b5c 100644 --- a/io_scene_gltf2/io/exp/gltf2_io_draco_compression_extension.py +++ b/io_scene_gltf2/io/exp/gltf2_io_draco_compression_extension.py @@ -6,6 +6,7 @@ from ctypes import * from pathlib import Path from ...io.exp.gltf2_io_binary_data import BinaryData +from ...io.com.gltf2_io_debug import print_console from ...io.com.gltf2_io_draco_compression_extension import dll_path @@ -89,7 +90,7 @@ def __traverse_node(node, f): def __encode_node(node, dll, export_settings, encoded_primitives_cache): if node.mesh is not None: - export_settings['log'].info('Draco encoder: Encoding mesh {}.'.format(node.name)) + print_console('INFO', 'Draco encoder: Encoding mesh {}.'.format(node.name)) for primitive in node.mesh.primitives: __encode_primitive(primitive, dll, export_settings, encoded_primitives_cache) @@ -111,7 +112,7 @@ def __encode_primitive(primitive, dll, export_settings, encoded_primitives_cache return if 'POSITION' not in attributes: - export_settings['log'].warning('Draco encoder: Primitive without positions encountered. Skipping.') + print_console('WARNING', 'Draco encoder: Primitive without positions encountered. Skipping.') return positions = attributes['POSITION'] @@ -140,7 +141,7 @@ def __encode_primitive(primitive, dll, export_settings, encoded_primitives_cache preserve_triangle_order = primitive.targets is not None and len(primitive.targets) > 0 if not dll.encoderEncode(encoder, preserve_triangle_order): - export_settings['log'].error('Could not encode primitive. Skipping primitive.') + print_console('ERROR', 'Could not encode primitive. Skipping primitive.') byte_length = dll.encoderGetByteLength(encoder) encoded_data = bytes(byte_length) diff --git a/io_scene_gltf2/io/exp/gltf2_io_user_extensions.py b/io_scene_gltf2/io/exp/gltf2_io_user_extensions.py index de4db9e9c..62e40ca2e 100644 --- a/io_scene_gltf2/io/exp/gltf2_io_user_extensions.py +++ b/io_scene_gltf2/io/exp/gltf2_io_user_extensions.py @@ -13,6 +13,5 @@ def export_user_extensions(hook_name, export_settings, *args): try: hook(*args, export_settings) except Exception as e: - export_settings['log'].error("Extension hook", hook_name, "fails on", extension) - export_settings['log'].error(str(e)) - + print(hook_name, "fails on", extension) + print(str(e)) diff --git a/io_scene_gltf2/io/imp/gltf2_io_gltf.py b/io_scene_gltf2/io/imp/gltf2_io_gltf.py index 039683a9b..bda8609eb 100755 --- a/io_scene_gltf2/io/imp/gltf2_io_gltf.py +++ b/io_scene_gltf2/io/imp/gltf2_io_gltf.py @@ -32,9 +32,11 @@ class glTFImporter(): self.variant_mapping = {} # Used to map between mgltf material idx and blender material, for Variants if 'loglevel' not in self.import_settings.keys(): - self.import_settings['loglevel'] = logging.CRITICAL + self.import_settings['loglevel'] = logging.ERROR - self.log = Log(import_settings['loglevel']) + log = Log(import_settings['loglevel']) + self.log = log.logger + self.log_handler = log.hdlr # TODO: move to a com place? self.extensions_managed = [ diff --git a/io_scene_gltf2/io/imp/gltf2_io_user_extensions.py b/io_scene_gltf2/io/imp/gltf2_io_user_extensions.py index 84aa8e370..c55b70e38 100644 --- a/io_scene_gltf2/io/imp/gltf2_io_user_extensions.py +++ b/io_scene_gltf2/io/imp/gltf2_io_user_extensions.py @@ -9,5 +9,5 @@ def import_user_extensions(hook_name, gltf, *args): try: hook(*args, gltf) except Exception as e: - gltf.log.error(hook_name, "fails on", extension) - gltf.log.error(str(e)) + print(hook_name, "fails on", extension) + print(str(e)) diff --git a/object_print3d_utils/export.py b/object_print3d_utils/export.py index bd393547d..a971833ab 100644 --- a/object_print3d_utils/export.py +++ b/object_print3d_utils/export.py @@ -72,11 +72,6 @@ def write_mesh(context, report_cb): obj = layer.objects.active export_data_layers = print_3d.use_data_layers - # Make sure at least one object is selected. - if not context.selected_objects: - report_cb({'ERROR'}, "No objects selected") - return False - # Create name 'export_path/blendname-objname' # add the filename component if bpy.data.is_saved: -- 2.30.2