3D Print Toolbox: Add hollow out #105194

Merged
Mikhail Rachinskiy merged 9 commits from usfreitas/blender-addons:hollow into main 2024-03-18 12:24:30 +01:00
38 changed files with 310 additions and 205 deletions
Showing only changes of commit 913618d0a2 - Show all commits

View File

@ -102,12 +102,18 @@ class Import3DS(bpy.types.Operator, ImportHelper):
description="Transform to matrix world",
default=False,
)
use_collection: BoolProperty(
name="Collection",
description="Create a new collection",
default=False,
)
use_cursor: BoolProperty(
name="Cursor Origin",
description="Read the 3D cursor location",
default=False,
)
def execute(self, context):
from . import import_3ds
keywords = self.as_keywords(ignore=("axis_forward",
@ -166,6 +172,9 @@ class MAX3DS_PT_import_include(bpy.types.Panel):
layrow.prop(operator, "use_keyframes")
layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER')
layrow = layout.row(align=True)
layrow.prop(operator, "use_collection")
layrow.label(text="", icon='OUTLINER_COLLECTION' if operator.use_collection else 'GROUP')
layrow = layout.row(align=True)
layrow.prop(operator, "use_cursor")
layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR')
@ -249,14 +258,14 @@ class Export3DS(bpy.types.Operator, ExportHelper):
description="Object types to export",
default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'},
)
use_hierarchy: BoolProperty(
name="Hierarchy",
description="Export hierarchy chunks",
default=False,
)
use_keyframes: BoolProperty(
name="Animation",
description="Write the keyframe data",
default=True,
)
use_hierarchy: BoolProperty(
name="Hierarchy",
description="Export hierarchy chunks",
default=False,
)
use_cursor: BoolProperty(
@ -310,12 +319,12 @@ class MAX3DS_PT_export_include(bpy.types.Panel):
layrow.label(text="", icon='RESTRICT_SELECT_OFF' if operator.use_selection else 'RESTRICT_SELECT_ON')
layout.column().prop(operator, "object_filter")
layrow = layout.row(align=True)
layrow.prop(operator, "use_hierarchy")
layrow.label(text="", icon='OUTLINER' if operator.use_hierarchy else 'CON_CHILDOF')
layrow = layout.row(align=True)
layrow.prop(operator, "use_keyframes")
layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER')
layrow = layout.row(align=True)
layrow.prop(operator, "use_hierarchy")
layrow.label(text="", icon='OUTLINER' if operator.use_hierarchy else 'CON_CHILDOF')
layrow = layout.row(align=True)
layrow.prop(operator, "use_cursor")
layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR')

View File

@ -1563,7 +1563,7 @@ def make_ambient_node(world):
##########
def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, use_selection=False,
object_filter=None, use_hierarchy=False, use_keyframes=False, global_matrix=None, use_cursor=False):
object_filter=None, use_keyframes=True, use_hierarchy=False, global_matrix=None, use_cursor=False):
"""Save the Blender scene to a 3ds file."""
# Time the export

View File

@ -10,6 +10,7 @@ import struct
import mathutils
from bpy_extras.image_utils import load_image
from bpy_extras.node_shader_utils import PrincipledBSDFWrapper
from pathlib import Path
BOUNDS_3DS = []
@ -1772,11 +1773,25 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True,
def load(operator, context, files=None, directory="", filepath="", constrain_size=0.0, use_scene_unit=False,
use_image_search=True, object_filter=None, use_world_matrix=False, use_keyframes=True,
use_apply_transform=True, global_matrix=None, use_cursor=False, use_center_pivot=False):
use_apply_transform=True, global_matrix=None, use_cursor=False, use_center_pivot=False, use_collection=False):
for f in files:
load_3ds(os.path.join(directory, f.name), context, CONSTRAIN=constrain_size, UNITS=use_scene_unit,
# Get the active collection
collection_init = context.view_layer.active_layer_collection.collection
# Load each selected file
for file in files:
# Create new collections if activated (collection name = 3ds file name)
if use_collection:
collection = bpy.data.collections.new(Path(file.name).stem)
context.scene.collection.children.link(collection)
context.view_layer.active_layer_collection = context.view_layer.layer_collection.children[collection.name]
load_3ds(Path(directory, file.name), context, CONSTRAIN=constrain_size, UNITS=use_scene_unit,
IMAGE_SEARCH=use_image_search, FILTER=object_filter, WORLD_MATRIX=use_world_matrix, KEYFRAME=use_keyframes,
APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix, CURSOR=use_cursor, PIVOT=use_center_pivot,)
# Retrive the initial collection as active
active = context.view_layer.layer_collection.children.get(collection_init.name)
if active is not None:
context.view_layer.active_layer_collection = active
return {'FINISHED'}

View File

@ -5,7 +5,7 @@
bl_info = {
'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (4, 2, 5),
"version": (4, 2, 8),
'blender': (4, 1, 0),
'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0',
@ -648,6 +648,15 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default=False
)
export_leaf_bone: BoolProperty(
name='Add Leaf Bones',
description=(
'Append a final bone to the end of each chain to specify last bone length '
'(use this when you intend to edit the armature from exported data)'
),
default=False
)
export_optimize_animation_size: BoolProperty(
name='Optimize Animation Size',
description=(
@ -955,6 +964,8 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
def execute(self, context):
import os
import datetime
import logging
from .io.com.gltf2_io_debug import Log
from .blender.exp import gltf2_blender_export
from .io.com.gltf2_io_path import path_to_uri
@ -966,6 +977,8 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
# All custom export settings are stored in this container.
export_settings = {}
export_settings['loglevel'] = logging.INFO
export_settings['exported_images'] = {}
export_settings['exported_texture_nodes'] = []
export_settings['additional_texture_export'] = []
@ -1035,6 +1048,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['gltf_flatten_bones_hierarchy'] = self.export_hierarchy_flatten_bones
export_settings['gltf_flatten_obj_hierarchy'] = self.export_hierarchy_flatten_objs
export_settings['gltf_armature_object_remove'] = self.export_armature_object_remove
export_settings['gltf_leaf_bone'] = self.export_leaf_bone
if self.export_animations:
export_settings['gltf_frame_range'] = self.export_frame_range
export_settings['gltf_force_sampling'] = self.export_force_sampling
@ -1154,7 +1168,19 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['pre_export_callbacks'] = pre_export_callbacks
export_settings['post_export_callbacks'] = post_export_callbacks
return gltf2_blender_export.save(context, export_settings)
# Initialize logging for export
export_settings['log'] = Log(export_settings['loglevel'])
res = gltf2_blender_export.save(context, export_settings)
# Display popup log, if any
for message_type, message in export_settings['log'].messages():
self.report({message_type}, message)
export_settings['log'].flush()
return res
def draw(self, context):
pass # Is needed to get panels available
@ -2110,14 +2136,18 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
gltf_importer.read()
gltf_importer.checks()
print("Data are loaded, start creating Blender stuff")
gltf_importer.log.info("Data are loaded, start creating Blender stuff")
start_time = time.time()
BlenderGlTF.create(gltf_importer)
elapsed_s = "{:.2f}s".format(time.time() - start_time)
print("glTF import finished in " + elapsed_s)
gltf_importer.log.info("glTF import finished in " + elapsed_s)
gltf_importer.log.removeHandler(gltf_importer.log_handler)
# Display popup log, if any
for message_type, message in gltf_importer.log.messages():
self.report({message_type}, message)
gltf_importer.log.flush()
return {'FINISHED'}
@ -2127,16 +2157,16 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
def set_debug_log(self):
import logging
if bpy.app.debug_value == 0:
self.loglevel = logging.CRITICAL
elif bpy.app.debug_value == 1:
self.loglevel = logging.ERROR
elif bpy.app.debug_value == 2:
self.loglevel = logging.WARNING
elif bpy.app.debug_value == 3:
if bpy.app.debug_value == 0: # Default values => Display all messages except debug ones
self.loglevel = logging.INFO
else:
self.loglevel = logging.NOTSET
elif bpy.app.debug_value == 1:
self.loglevel = logging.WARNING
elif bpy.app.debug_value == 2:
self.loglevel = logging.ERROR
elif bpy.app.debug_value == 3:
self.loglevel = logging.CRITICAL
elif bpy.app.debug_value == 4:
self.loglevel = logging.DEBUG
class GLTF2_filter_action(bpy.types.PropertyGroup):

View File

@ -6,7 +6,6 @@ import bpy
import typing
from .....io.exp.gltf2_io_user_extensions import export_user_extensions
from .....blender.com.gltf2_blender_data_path import skip_sk
from .....io.com import gltf2_io_debug
from .....io.com import gltf2_io
from ....exp.gltf2_blender_gather_cache import cached
from ....com.gltf2_blender_data_path import get_target_object_path, get_target_property_name, get_rotation_modes
@ -76,7 +75,7 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s
# example of target_property : location, rotation_quaternion, value
target_property = get_target_property_name(fcurve.data_path)
except:
gltf2_io_debug.print_console("WARNING", "Invalid animation fcurve name on action {}".format(blender_action.name))
export_settings['log'].warning("Invalid animation fcurve data path on action {}".format(blender_action.name))
continue
object_path = get_target_object_path(fcurve.data_path)
@ -123,10 +122,10 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s
type_ = "SK"
except:
# Something is wrong, for example a bone animation is linked to an object mesh...
gltf2_io_debug.print_console("WARNING", "Animation target {} not found".format(object_path))
export_settings['log'].warning("Invalid animation fcurve data path on action {}".format(blender_action.name))
continue
else:
gltf2_io_debug.print_console("WARNING", "Animation target {} not found".format(object_path))
export_settings['log'].warning("Animation target {} not found".format(object_path))
continue
# Detect that object or bone are not multiple keyed for euler and quaternion
@ -169,7 +168,7 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s
targets[target] = target_data
for targ in multiple_rotation_mode_detected.keys():
gltf2_io_debug.print_console("WARNING", "Multiple rotation mode detected for {}".format(targ.name))
export_settings['log'].warning("Multiple rotation mode detected for {}".format(targ.name))
# Now that all curves are extracted,
# - check that there is no normal + delta transforms
@ -329,24 +328,24 @@ def needs_baking(obj_uuid: str,
# Sampling due to unsupported interpolation
interpolation = [c for c in channels if c is not None][0].keyframe_points[0].interpolation
if interpolation not in ["BEZIER", "LINEAR", "CONSTANT"]:
gltf2_io_debug.print_console("WARNING",
"Baking animation because of an unsupported interpolation method: {}".format(
interpolation)
export_settings['log'].warning(
"Baking animation because of an unsupported interpolation method: {}".format(interpolation)
)
return True
if any(any(k.interpolation != interpolation for k in c.keyframe_points) for c in channels if c is not None):
# There are different interpolation methods in one action group
gltf2_io_debug.print_console("WARNING",
export_settings['log'].warning(
"Baking animation because there are keyframes with different "
"interpolation methods in one channel"
)
return True
if not all_equal([len(c.keyframe_points) for c in channels if c is not None]):
gltf2_io_debug.print_console("WARNING",
export_settings['log'].warning(
"Baking animation because the number of keyframes is not "
"equal for all channel tracks")
"equal for all channel tracks"
)
return True
if len([c for c in channels if c is not None][0].keyframe_points) <= 1:
@ -355,8 +354,7 @@ def needs_baking(obj_uuid: str,
if not all_equal(list(zip([[k.co[0] for k in c.keyframe_points] for c in channels if c is not None]))):
# The channels have differently located keyframes
gltf2_io_debug.print_console("WARNING",
"Baking animation because of differently located keyframes in one channel")
export_settings['log'].warning("Baking animation because of differently located keyframes in one channel")
return True
if export_settings['vtree'].nodes[obj_uuid].blender_object.type == "ARMATURE":
@ -364,8 +362,7 @@ def needs_baking(obj_uuid: str,
if isinstance(animation_target, bpy.types.PoseBone):
if len(animation_target.constraints) != 0:
# Constraints such as IK act on the bone -> can not be represented in glTF atm
gltf2_io_debug.print_console("WARNING",
"Baking animation because of unsupported constraints acting on the bone")
export_settings['log'].warning("Baking animation because of unsupported constraints acting on the bone")
return True
return False

View File

@ -5,7 +5,6 @@
import bpy
import typing
from ....io.com import gltf2_io
from ....io.com.gltf2_io_debug import print_console
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....blender.com.gltf2_blender_conversion import get_gltf_interpolation
from ...com.gltf2_blender_data_path import is_bone_anim_channel
@ -276,7 +275,7 @@ def gather_action_animations( obj_uuid: int,
obj.hide_viewport = True
export_settings['vtree'].nodes[obj_uuid].blender_object.hide_viewport = False
else:
print_console("WARNING", "Can't disable viewport because of drivers")
export_settings['log'].warning("Can't disable viewport because of drivers")
export_settings['gltf_optimize_armature_disable_viewport'] = False # We changed the option here, so we don't need to re-check it later, during
@ -300,7 +299,7 @@ def gather_action_animations( obj_uuid: int,
export_user_extensions('post_animation_switch_hook', export_settings, blender_object, blender_action, track_name, on_type)
except:
error = "Action is readonly. Please check NLA editor"
print_console("WARNING", "Animation '{}' could not be exported. Cause: {}".format(blender_action.name, error))
export_settings['log'].warning("Animation '{}' could not be exported. Cause: {}".format(blender_action.name, error))
continue
if on_type == "SHAPEKEY":
@ -337,7 +336,7 @@ def gather_action_animations( obj_uuid: int,
elif type_ == "EXTRA":
channel = None
else:
print("Type unknown. Should not happen")
export_settings['log'].error("Type unknown. Should not happen")
if animation is None and channel is not None:
# If all channels need to be sampled, no animation was created
@ -503,6 +502,7 @@ def __get_blender_actions(obj_uuid: str,
# so skip them for now and only write single-strip tracks.
non_muted_strips = [strip for strip in track.strips if strip.action is not None and strip.mute is False]
if track.strips is None or len(non_muted_strips) != 1:
export_settings['log'].warning("NLA track '{}' has {} strips, but only single-strip tracks are supported in 'actions' mode.".format(track.name, len(track.strips)), popup=True)
continue
for strip in non_muted_strips:

View File

@ -8,7 +8,6 @@ from mathutils import Matrix
from ....blender.com.gltf2_blender_data_path import get_sk_exported
from ....io.com import gltf2_io
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....io.com.gltf2_io_debug import print_console
from ..gltf2_blender_gather_tree import VExportNode
from .sampled.armature.armature_action_sampled import gather_action_armature_sampled
from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled
@ -130,7 +129,7 @@ def merge_tracks_perform(merged_tracks, animations, export_settings):
for channel in animations[anim_idx].channels:
if (channel.target.node, channel.target.path) in already_animated:
print_console("WARNING", "Some strips have same channel animation ({}), on node {} !".format(channel.target.path, channel.target.node.name))
export_settings['log'].warning("Some strips have same channel animation ({}), on node {} !".format(channel.target.path, channel.target.node.name))
continue
animations[base_animation_idx].channels.append(channel)
animations[base_animation_idx].channels[-1].sampler = animations[base_animation_idx].channels[-1].sampler + offset_sampler

View File

@ -5,7 +5,6 @@
import bpy
import typing
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
from ......io.com.gltf2_io_debug import print_console
from ......io.com import gltf2_io
from .....com.gltf2_blender_extras import generate_extras
from ...fcurves.gltf2_blender_gather_fcurves_sampler import gather_animation_fcurves_sampler
@ -32,7 +31,7 @@ def gather_action_armature_sampled(armature_uuid: str, blender_action: typing.Op
samplers=[] # We need to gather the samplers after gathering all channels --> populate this list in __link_samplers
)
except RuntimeError as error:
print_console("WARNING", "Animation '{}' could not be exported. Cause: {}".format(name, error))
export_settings['log'].warning("Animation '{}' could not be exported. Cause: {}".format(name, error))
return None
export_user_extensions('pre_gather_animation_hook', export_settings, animation, blender_action, blender_object)

View File

@ -23,7 +23,7 @@ def gather_armature_sampled_channels(armature_uuid, blender_action_name, export_
# Then bake all bones
bones_to_be_animated = []
bones_uuid = export_settings["vtree"].get_all_bones(armature_uuid)
bones_to_be_animated = [export_settings["vtree"].nodes[b].blender_bone.name for b in bones_uuid]
bones_to_be_animated = [export_settings["vtree"].nodes[b].blender_bone.name for b in bones_uuid if export_settings["vtree"].nodes[b].leaf_reference is None]
# List of really animated bones is needed for optimization decision
list_of_animated_bone_channels = {}

View File

@ -157,7 +157,7 @@ def get_cache_data(path: str,
if 'bone' not in data[obj_uuid][obj_uuid].keys():
data[obj_uuid][obj_uuid]['bone'] = {}
for bone_uuid in bones:
for bone_uuid in [bone for bone in bones if export_settings['vtree'].nodes[bone].leaf_reference is None]:
blender_bone = export_settings['vtree'].nodes[bone_uuid].blender_bone
if export_settings['vtree'].nodes[bone_uuid].parent_uuid is not None and export_settings['vtree'].nodes[export_settings['vtree'].nodes[bone_uuid].parent_uuid].blender_type == VExportNode.BONE:

View File

@ -10,7 +10,6 @@ import bpy
import sys
import traceback
from ...io.com.gltf2_io_debug import print_console, print_newline
from ...io.exp import gltf2_io_export
from ...io.exp import gltf2_io_draco_compression_extension
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
@ -30,7 +29,7 @@ def save(context, export_settings):
if not export_settings['gltf_current_frame']:
bpy.context.scene.frame_set(0)
__notify_start(context)
__notify_start(context, export_settings)
start_time = time.time()
pre_export_callbacks = export_settings["pre_export_callbacks"]
for callback in pre_export_callbacks:
@ -44,10 +43,11 @@ def save(context, export_settings):
__write_file(json, buffer, export_settings)
end_time = time.time()
__notify_end(context, end_time - start_time)
__notify_end(context, end_time - start_time, export_settings)
if not export_settings['gltf_current_frame']:
bpy.context.scene.frame_set(int(original_frame))
return {'FINISHED'}
@ -178,7 +178,7 @@ def __postprocess_with_gltfpack(export_settings):
try:
subprocess.run([gltfpack_binary_file_path] + options + parameters, check=True)
except subprocess.CalledProcessError as e:
print_console('ERROR', "Calling gltfpack was not successful")
export_settings['log'].error("Calling gltfpack was not successful")
def __write_file(json, buffer, export_settings):
try:
@ -196,18 +196,18 @@ def __write_file(json, buffer, export_settings):
tb_info = traceback.extract_tb(tb)
for tbi in tb_info:
filename, line, func, text = tbi
print_console('ERROR', 'An error occurred on line {} in statement {}'.format(line, text))
print_console('ERROR', str(e))
export_settings['log'].error('An error occurred on line {} in statement {}'.format(line, text))
export_settings['log'].error(str(e))
raise e
def __notify_start(context):
print_console('INFO', 'Starting glTF 2.0 export')
def __notify_start(context, export_settings):
export_settings['log'].info('Starting glTF 2.0 export')
context.window_manager.progress_begin(0, 100)
context.window_manager.progress_update(0)
def __notify_end(context, elapsed):
print_console('INFO', 'Finished glTF 2.0 export in {} s'.format(elapsed))
def __notify_end(context, elapsed, export_settings):
export_settings['log'].info('Finished glTF 2.0 export in {} s'.format(elapsed))
context.window_manager.progress_end()
print_newline()
print()

View File

@ -86,7 +86,7 @@ def gather_joint_vnode(vnode, export_settings):
extras=__gather_extras(blender_bone, export_settings),
matrix=None,
mesh=None,
name=blender_bone.name,
name=blender_bone.name if vtree.nodes[vnode].leaf_reference is None else vtree.nodes[vtree.nodes[vnode].leaf_reference].blender_bone.name + '_leaf',
rotation=rotation,
scale=scale,
skin=None,

View File

@ -36,7 +36,7 @@ def gather_lights_punctual(blender_lamp, export_settings) -> Optional[Dict[str,
def __filter_lights_punctual(blender_lamp, export_settings) -> bool:
if blender_lamp.type in ["HEMI", "AREA"]:
gltf2_io_debug.print_console("WARNING", "Unsupported light source {}".format(blender_lamp.type))
export_settings['log'].warning("Unsupported light source {}".format(blender_lamp.type))
return False
return True
@ -63,8 +63,7 @@ def __gather_intensity(blender_lamp, export_settings) -> Optional[float]:
quadratic_falloff_node = result[0].shader_node
emission_strength = quadratic_falloff_node.inputs["Strength"].default_value / (math.pi * 4.0)
else:
gltf2_io_debug.print_console('WARNING',
'No quadratic light falloff node attached to emission strength property')
export_settings['log'].warning('No quadratic light falloff node attached to emission strength property')
emission_strength = blender_lamp.energy
else:
emission_strength = emission_node.inputs["Strength"].default_value

View File

@ -6,7 +6,6 @@ import bpy
from typing import Optional, Dict, List, Any, Tuple
from ...io.com import gltf2_io
from ...blender.com.gltf2_blender_data_path import get_sk_exported
from ...io.com.gltf2_io_debug import print_console
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ..com.gltf2_blender_extras import generate_extras
from . import gltf2_blender_gather_primitives
@ -59,7 +58,7 @@ def gather_mesh(blender_mesh: bpy.types.Mesh,
)
if len(mesh.primitives) == 0:
print_console("WARNING", "Mesh '{}' has no primitives and will be omitted.".format(mesh.name))
export_settings['log'].warning("Mesh '{}' has no primitives and will be omitted.".format(mesh.name))
return None
blender_object = None

View File

@ -6,7 +6,6 @@ import math
import bpy
from mathutils import Matrix, Quaternion, Vector
from ...io.com.gltf2_io_debug import print_console
from ...io.com import gltf2_io
from ...io.com import gltf2_io_extensions
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
@ -251,7 +250,7 @@ def __gather_mesh(vnode, blender_object, export_settings):
# Be sure that object is valid (no NaN for example)
res = blender_object.data.validate()
if res is True:
print_console("WARNING", "Mesh " + blender_object.data.name + " is not valid, and may be exported wrongly")
export_settings['log'].warning("Mesh " + blender_object.data.name + " is not valid, and may be exported wrongly")
modifiers = blender_object.modifiers
if len(modifiers) == 0:

View File

@ -73,8 +73,10 @@ def __gather_skins(blender_primitive, export_settings):
# Set warning, for the case where there are more group of 4 weights needed
# Warning for the case where we are in the same group, will be done later (for example, 3 weights needed, but 2 wanted by user)
if max_bone_set_index > wanted_max_bone_set_index:
gltf2_io_debug.print_console("WARNING", "There are more than {} joint vertex influences."
"The {} with highest weight will be used (and normalized).".format(export_settings['gltf_vertex_influences_nb'], export_settings['gltf_vertex_influences_nb']))
export_settings['log'].warning(
"There are more than {} joint vertex influences."
"The {} with highest weight will be used (and normalized).".format(export_settings['gltf_vertex_influences_nb'], export_settings['gltf_vertex_influences_nb'])
)
# Take into account only the first set of 4 weights
max_bone_set_index = wanted_max_bone_set_index
@ -99,9 +101,10 @@ def __gather_skins(blender_primitive, export_settings):
idx = 4-1-i
if not all(weight[:, idx]):
if warning_done is False:
gltf2_io_debug.print_console("WARNING", "There are more than {} joint vertex influences."
"The {} with highest weight will be used (and normalized).".format(export_settings['gltf_vertex_influences_nb'], export_settings['gltf_vertex_influences_nb']))
export_settings['log'].warning(
"There are more than {} joint vertex influences."
"The {} with highest weight will be used (and normalized).".format(export_settings['gltf_vertex_influences_nb'], export_settings['gltf_vertex_influences_nb'])
)
warning_done = True
weight[:, idx] = 0.0

View File

@ -6,7 +6,6 @@ import bpy
from typing import List, Optional, Tuple
import numpy as np
from ...io.com import gltf2_io, gltf2_io_constants, gltf2_io_extensions
from ...io.com.gltf2_io_debug import print_console
from ...blender.com.gltf2_blender_data_path import get_sk_exported
from ...io.exp import gltf2_io_binary_data
from .gltf2_blender_gather_cache import cached, cached_by_key
@ -192,7 +191,7 @@ def __gather_indices(blender_primitive, blender_mesh, modifiers, export_settings
component_type = gltf2_io_constants.ComponentType.UnsignedInt
indices = indices.astype(np.uint32, copy=False)
else:
print_console('ERROR', 'A mesh contains too many vertices (' + str(max_index) + ') and needs to be split before export.')
export_settings['log'].error('A mesh contains too many vertices (' + str(max_index) + ') and needs to be split before export.')
return None
element_type = gltf2_io_constants.DataType.Scalar

View File

@ -6,7 +6,6 @@ import numpy as np
from copy import deepcopy
from mathutils import Vector
from ...blender.com.gltf2_blender_data_path import get_sk_exported
from ...io.com.gltf2_io_debug import print_console
from ...io.com.gltf2_io_constants import ROUNDING_DIGIT
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ...io.com import gltf2_io_constants
@ -19,7 +18,7 @@ from . import gltf2_blender_gather_skins
def extract_primitives(materials, blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings):
"""Extract primitives from a mesh."""
print_console('INFO', 'Extracting primitive: ' + blender_mesh.name)
export_settings['log'].info("Extracting primitive: " + blender_mesh.name)
primitive_creator = PrimitiveCreator(materials, blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings)
primitive_creator.prepare_data()
@ -78,7 +77,7 @@ class PrimitiveCreator:
self.blender_mesh.calc_tangents()
self.use_tangents = True
except Exception:
print_console('WARNING', 'Could not calculate tangents. Please try to triangulate the mesh first.')
self.export_settings['log'].warning("{}: Could not calculate tangents. Please try to triangulate the mesh first.".format(self.blender_mesh.name), popup=True)
self.tex_coord_max = 0
if self.export_settings['gltf_texcoords']:
@ -187,7 +186,7 @@ class PrimitiveCreator:
# Seems we sometime can have name collision about attributes
# Avoid crash and ignoring one of duplicated attribute name
if attr['gltf_attribute_name'] in [a['gltf_attribute_name'] for a in self.blender_attributes]:
print_console('WARNING', 'Attribute collision name: ' + blender_attribute.name + ", ignoring one of them")
self.export_settings['log'].warning('Attribute collision name: ' + blender_attribute.name + ", ignoring one of them")
continue
self.blender_attributes.append(attr)
@ -426,7 +425,7 @@ class PrimitiveCreator:
self.blender_mesh.attributes[attr].data.foreach_get('vector', data)
data = data.reshape(-1, 2)
else:
print_console('WARNING', 'We are not managing this case yet (UVMap as custom attribute for unknown type)')
self.export_settings['log'].warning('We are not managing this case (UVMap as custom attribute for unknown type)')
continue
# Blender UV space -> glTF UV space
# u,v -> u,1-v
@ -448,7 +447,7 @@ class PrimitiveCreator:
pass
elif material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is not None:
print_console('WARNING', 'We are not managing this case (Vertex Color alpha without color)')
self.export_settings['log'].warning('We are not managing this case (Vertex Color alpha without color)')
else:
vc_color_name = None
@ -475,7 +474,7 @@ class PrimitiveCreator:
if materials_use_vc is not None and materials_use_vc != vc_key:
if warning_already_displayed is False:
print_console('WARNING', 'glTF specification does not allow this case (multiple materials with different Vertex Color)')
self.export_settings['log'].warning('glTF specification does not allow this case (multiple materials with different Vertex Color)')
warning_already_displayed = True
materials_use_vc = vc_key
@ -520,12 +519,12 @@ class PrimitiveCreator:
all_uvmaps[tex] = uvmap_name
if len(set(all_uvmaps.values())) > 1:
print_console('WARNING', 'We are not managing this case (multiple UVMap for UDIM)')
self.export_settings['log'].warning('We are not managing this case (multiple UVMap for UDIM)')
new_prim_indices[material_idx] = self.prim_indices[material_idx]
self.additional_materials.append(None)
continue
print_console('INFO', 'Splitting UDIM tiles into different primitives/materials')
self.export_settings['log'].info('Splitting UDIM tiles into different primitives/materials')
# Retrieve UDIM images
tex = list(material_info['udim_info'].keys())[0]
image = material_info['udim_info'][tex]['image']
@ -623,7 +622,7 @@ class PrimitiveCreator:
elif tex == "anisotropyTexture":
new_material.extensions["KHR_materials_anisotropy"].extension['anisotropyTexture'] = new_tex
else:
print_console('WARNING', 'We are not managing this case yet (UDIM for {})'.format(tex))
self.export_settings['log'].warning('We are not managing this case (UDIM for {})'.format(tex))
self.additional_materials.append((new_material, material_info, int(str(id(base_material)) + str(u) + str(v))))
@ -696,7 +695,7 @@ class PrimitiveCreator:
has_triangle_primitive = len(primitives) != 0
primitives.extend(self.primitive_creation_edges_and_points())
print_console('INFO', 'Primitives created: %d' % len(primitives))
self.export_settings['log'].info('Primitives created: %d' % len(primitives))
return primitives, [None]*len(primitives), self.attributes if has_triangle_primitive else None
@ -769,7 +768,7 @@ class PrimitiveCreator:
# No material for them, so only one primitive for each
primitives.extend(self.primitive_creation_edges_and_points())
print_console('INFO', 'Primitives created: %d' % len(primitives))
self.export_settings['log'].info('Primitives created: %d' % len(primitives))
return primitives
@ -1061,7 +1060,7 @@ class PrimitiveCreator:
elif attr['blender_domain'] in ['FACE']:
data = np.empty(len(self.blender_mesh.polygons) * attr['len'], dtype=attr['type'])
else:
print_console("ERROR", "domain not known")
self.export_settings['log'].error("domain not known")
if attr['blender_data_type'] == "BYTE_COLOR":
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('color', data)
@ -1093,7 +1092,7 @@ class PrimitiveCreator:
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data)
data = data.reshape(-1, attr['len'])
else:
print_console('ERROR',"blender type not found " + attr['blender_data_type'])
self.export_settings['log'].error("blender type not found " + attr['blender_data_type'])
if attr['blender_domain'] in ['CORNER']:
for i in range(attr['len']):
@ -1129,7 +1128,7 @@ class PrimitiveCreator:
self.dots[attr['gltf_attribute_name'] + str(i)] = data_attr[:, i]
else:
print_console("ERROR", "domain not known")
self.export_settings['log'].error("domain not known")
def __get_uvs_attribute(self, blender_uv_idx, attr):
layer = self.blender_mesh.uv_layers[blender_uv_idx]

View File

@ -90,7 +90,17 @@ def __gather_inverse_bind_matrices(armature_uuid, export_settings):
matrices = []
for b in bones_uuid:
if export_settings['vtree'].nodes[b].leaf_reference is None:
__collect_matrices(blender_armature_object.pose.bones[export_settings['vtree'].nodes[b].blender_bone.name])
else:
inverse_bind_matrix = (
axis_basis_change @
(
blender_armature_object.matrix_world @
export_settings['vtree'].nodes[export_settings['vtree'].nodes[b].leaf_reference].matrix_world_tail
)
).inverted_safe()
matrices.append(inverse_bind_matrix) # Leaf bone
# flatten the matrices
inverse_matrices = []

View File

@ -54,6 +54,7 @@ class VExportNode:
self.blender_object = None
self.blender_bone = None
self.leaf_reference = None # For leaf bones only
self.default_hide_viewport = False # Need to store the default value for meshes in case of animation baking on armature
@ -181,8 +182,10 @@ class VExportTree:
if parent_uuid is None or not self.nodes[parent_uuid].blender_type == VExportNode.ARMATURE:
# correct workflow is to parent skinned mesh to armature, but ...
# all users don't use correct workflow
print("WARNING: Armature must be the parent of skinned mesh")
print("Armature is selected by its name, but may be false in case of instances")
self.export_settings['log'].warning(
"Armature must be the parent of skinned mesh"
"Armature is selected by its name, but may be false in case of instances"
)
# Search an armature by name, and use the first found
# This will be done after all objects are setup
node.armature_needed = modifiers["ARMATURE"].object.name
@ -246,9 +249,13 @@ class VExportTree:
if self.export_settings['gltf_rest_position_armature'] is False:
# Use pose bone for TRS
node.matrix_world = self.nodes[node.armature].matrix_world @ blender_bone.matrix
if self.export_settings['gltf_leaf_bone'] is True:
node.matrix_world_tail = self.nodes[node.armature].matrix_world @ Matrix.Translation(blender_bone.tail)
node.matrix_world_tail = node.matrix_world_tail @ self.axis_basis_change
else:
# Use edit bone for TRS --> REST pose will be used
node.matrix_world = self.nodes[node.armature].matrix_world @ blender_bone.bone.matrix_local
# Tail will be set after, as we need to be in edit mode
node.matrix_world = node.matrix_world @ self.axis_basis_change
if delta is True:
@ -423,7 +430,7 @@ class VExportTree:
elif parent_keep_tag is False:
self.nodes[uuid].keep_tag = False
else:
print("This should not happen!")
self.export_settings['log'].error("This should not happen")
for child in self.nodes[uuid].children:
if self.nodes[uuid].blender_type == VExportNode.INST_COLLECTION or self.nodes[uuid].is_instancier == VExportNode.INSTANCIER:
@ -556,13 +563,56 @@ class VExportTree:
del n.armature_needed
def bake_armature_bone_list(self):
if self.export_settings['gltf_leaf_bone'] is True:
self.add_leaf_bones()
# Used to store data in armature vnode
# If armature is removed from export
# Data are still available, even if armature is not exported (so bones are re-parented)
for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]:
self.get_all_bones(n.uuid)
self.get_root_bones_uuid(n.uuid)
def add_leaf_bones(self):
# If we are using rest pose, we need to get tail of editbone, going to edit mode for each armature
if self.export_settings['gltf_rest_position_armature'] is True:
for obj_uuid in [n for n in self.nodes if self.nodes[n].blender_type == VExportNode.ARMATURE]:
armature = self.nodes[obj_uuid].blender_object
bpy.context.view_layer.objects.active = armature
bpy.ops.object.mode_set(mode="EDIT")
for bone in armature.data.edit_bones:
if len(bone.children) == 0:
self.nodes[self.nodes[obj_uuid].bones[bone.name]].matrix_world_tail = armature.matrix_world @ Matrix.Translation(bone.tail) @ self.axis_basis_change
bpy.ops.object.mode_set(mode="OBJECT")
for bone_uuid in [n for n in self.nodes if self.nodes[n].blender_type == VExportNode.BONE \
and len(self.nodes[n].children) == 0]:
bone_node = self.nodes[bone_uuid]
# Add a new node
node = VExportNode()
node.uuid = str(uuid.uuid4())
node.parent_uuid = bone_uuid
node.parent_bone_uuid = bone_uuid
node.blender_object = bone_node.blender_object
node.armature = bone_node.armature
node.blender_type = VExportNode.BONE
node.leaf_reference = bone_uuid
node.keep_tag = True
node.matrix_world = bone_node.matrix_world_tail.copy()
self.add_children(bone_uuid, node.uuid)
self.add_node(node)
def add_neutral_bones(self):
added_armatures = []
for n in [n for n in self.nodes.values() if \
@ -575,7 +625,7 @@ class VExportTree:
# Be sure to add it to really exported meshes
if n.node.skin is None:
print("WARNING: {} has no skin, skipping adding neutral bone data on it.".format(n.blender_object.name))
self.export_settings['log'].warning("{} has no skin, skipping adding neutral bone data on it.".format(n.blender_object.name))
continue
if n.armature not in added_armatures:
@ -691,5 +741,5 @@ class VExportTree:
if len(self.get_root_bones_uuid(arma_uuid)) > 1:
# We can't remove armature
self.export_settings['gltf_armature_object_remove'] = False
print("WARNING: We can't remove armature object because some armatures have multiple root bones.")
self.export_settings['log'].warning("We can't remove armature object because some armatures have multiple root bones.")
break

View File

@ -47,7 +47,7 @@ def gather_image(
# In case we can't retrieve image (for example packed images, with original moved)
# We don't create invalid image without uri
factor_uri = None
if uri is None: return None, None, None, False
if uri is None: return None, None, None, None
buffer_view, factor_buffer_view = __gather_buffer_view(image_data, mime_type, name, export_settings)
@ -340,8 +340,7 @@ def __get_image_data_mapping(sockets, results, use_tile, export_settings) -> Exp
keys = list(composed_image.fills.keys()) # do not loop on dict, we may have to delete an element
for k in [k for k in keys if isinstance(composed_image.fills[k], FillImage)]:
if composed_image.fills[k].image.size[0] == 0 or composed_image.fills[k].image.size[1] == 0:
gltf2_io_debug.print_console("WARNING",
"Image '{}' has no size and cannot be exported.".format(
export_settings['log'].warning("Image '{}' has no size and cannot be exported.".format(
composed_image.fills[k].image))
del composed_image.fills[k]

View File

@ -8,7 +8,6 @@ import bpy
from ....io.com import gltf2_io
from ....io.com.gltf2_io_extensions import Extension
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....io.com.gltf2_io_debug import print_console
from ...com.gltf2_blender_extras import generate_extras
from ..gltf2_blender_gather_cache import cached, cached_by_key
from . import gltf2_blender_gather_materials_unlit
@ -328,9 +327,10 @@ def __gather_orm_texture(blender_material, export_settings):
result = (occlusion, roughness_socket, metallic_socket)
if not gltf2_blender_gather_texture_info.check_same_size_images(result, export_settings):
print_console("INFO",
export_settings['log'].info(
"Occlusion and metal-roughness texture will be exported separately "
"(use same-sized images if you want them combined)")
"(use same-sized images if you want them combined)"
)
return None
# Double-check this will past the filter in texture_info
@ -508,7 +508,7 @@ def __get_final_material_with_indices(blender_material, base_material, caching_i
elif tex.startswith("additional"):
export_settings['additional_texture_export'][export_settings['additional_texture_export_current_idx'] + int(tex[10:])].tex_coord = ind
else:
print_console("ERROR", "some Textures tex coord are not managed")
export_settings['log'].error("some Textures tex coord are not managed")
export_settings['additional_texture_export_current_idx'] = len(export_settings['additional_texture_export'])

View File

@ -31,7 +31,7 @@ def gather_texture(
"""
if not __filter_texture(blender_shader_sockets, export_settings):
return None, None, False
return None, None, None
source, webp_image, image_data, factor, udim_image = __gather_source(blender_shader_sockets, use_tile, export_settings)
@ -168,9 +168,10 @@ def __gather_name(blender_shader_sockets, export_settings):
def __gather_sampler(blender_shader_sockets, export_settings):
shader_nodes = [get_texture_node_from_socket(socket, export_settings) for socket in blender_shader_sockets]
if len(shader_nodes) > 1:
gltf2_io_debug.print_console("WARNING",
export_settings['log'].warning(
"More than one shader node tex image used for a texture. "
"The resulting glTF sampler will behave like the first shader node tex image.")
"The resulting glTF sampler will behave like the first shader node tex image."
)
first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes))
# group_path can't be a list, so transform it to str

View File

@ -204,7 +204,7 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
texture_transform = None
if node.node and node.node.type == 'MAPPING':
texture_transform = get_texture_transform_from_mapping_node(node)
texture_transform = get_texture_transform_from_mapping_node(node, export_settings)
node = previous_node(NodeSocket(node.node.inputs['Vector'], node.group_path))
uvmap_info = {}

View File

@ -11,7 +11,6 @@ from mathutils import Vector, Matrix
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from ...com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name, get_gltf_old_group_node_name
from ....blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf
from io_scene_gltf2.io.com import gltf2_io_debug
import typing
@ -494,9 +493,9 @@ def previous_node(socket: NodeSocket):
return ShNode(prev_socket.socket.node, prev_socket.group_path)
return ShNode(None, None)
def get_texture_transform_from_mapping_node(mapping_node):
def get_texture_transform_from_mapping_node(mapping_node, export_settings):
if mapping_node.node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]:
gltf2_io_debug.print_console("WARNING",
export_settings['log'].warning(
"Skipping exporting texture transform because it had type " +
mapping_node.node.vector_type + "; recommend using POINT instead"
)
@ -506,7 +505,7 @@ def get_texture_transform_from_mapping_node(mapping_node):
rotation_0, rotation_1 = mapping_node.node.inputs['Rotation'].default_value[0], mapping_node.node.inputs['Rotation'].default_value[1]
if rotation_0 or rotation_1:
# TODO: can we handle this?
gltf2_io_debug.print_console("WARNING",
export_settings['log'].warning(
"Skipping exporting texture transform because it had non-zero "
"rotations in the X/Y direction; only a Z rotation can be exported!"
)
@ -542,7 +541,7 @@ def get_texture_transform_from_mapping_node(mapping_node):
mapping_transform = inverted(mapping_transform)
if mapping_transform is None:
gltf2_io_debug.print_console("WARNING",
export_settings['log'].warning(
"Skipping exporting texture transform with type TEXTURE because "
"we couldn't convert it to TRS; recommend using POINT instead"
)

View File

@ -6,7 +6,6 @@ import bpy
from mathutils import Matrix
import numpy as np
from ...io.imp.gltf2_io_user_extensions import import_user_extensions
from ...io.com.gltf2_io_debug import print_console
from ...io.imp.gltf2_io_binary import BinaryData
from ...io.com.gltf2_io_constants import DataType, ComponentType
from ...blender.com.gltf2_blender_conversion import get_attribute_type
@ -157,7 +156,8 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
vert_index_base = len(vert_locs)
if prim.extensions is not None and 'KHR_draco_mesh_compression' in prim.extensions:
print_console('INFO', 'Draco Decoder: Decode primitive {}'.format(pymesh.name or '[unnamed]'))
gltf.log.info('Draco Decoder: Decode primitive {}'.format(pymesh.name or '[unnamed]'))
decode_primitive(gltf, prim)
import_user_extensions('gather_import_decode_primitive', gltf, pymesh, prim, skin_idx)
@ -319,7 +319,7 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
layer = mesh.uv_layers.new(name=name)
if layer is None:
print("WARNING: UV map is ignored because the maximum number of UV layers has been reached.")
gltf.log.warning("WARNING: UV map is ignored because the maximum number of UV layers has been reached.")
break
layer.uv.foreach_set('vector', squish(loop_uvs[uv_i], np.float32))
@ -639,7 +639,7 @@ def skin_into_bind_pose(gltf, skin_idx, vert_joints, vert_weights, locs, vert_no
# We set all weight ( aka 1.0 ) to the first bone
zeros_indices = np.where(weight_sums == 0)[0]
if zeros_indices.shape[0] > 0:
print_console('ERROR', 'File is invalid: Some vertices are not assigned to bone(s) ')
gltf.log.error('File is invalid: Some vertices are not assigned to bone(s) ')
vert_weights[0][:, 0][zeros_indices] = 1.0 # Assign to first bone with all weight
# Reprocess IBM for these vertices

View File

@ -269,7 +269,7 @@ class BlenderNode():
if cache_key is not None and cache_key in pymesh.blender_name:
mesh = bpy.data.meshes[pymesh.blender_name[cache_key]]
else:
gltf.log.info("Blender create Mesh node %s", pymesh.name or pynode.mesh)
gltf.log.info("Blender create Mesh node {}".format(pymesh.name or pynode.mesh))
mesh = BlenderMesh.create(gltf, pynode.mesh, pynode.skin)
if cache_key is not None:
pymesh.blender_name[cache_key] = mesh.name

View File

@ -6,7 +6,6 @@ from ctypes import *
from ...io.com.gltf2_io import BufferView
from ...io.imp.gltf2_io_binary import BinaryData
from ...io.com.gltf2_io_debug import print_console
from ...io.com.gltf2_io_draco_compression_extension import dll_path
@ -63,7 +62,7 @@ def decode_primitive(gltf, prim):
# Create Draco decoder.
draco_buffer = bytes(BinaryData.get_buffer_view(gltf, extension['bufferView']))
if not dll.decoderDecode(decoder, draco_buffer, len(draco_buffer)):
print_console('ERROR', 'Draco Decoder: Unable to decode. Skipping primitive {}.'.format(name))
gltf.log.error('Draco Decoder: Unable to decode. Skipping primitive {}.'.format(name))
return
# Choose a buffer index which does not yet exist, skipping over existing glTF buffers yet to be loaded
@ -76,10 +75,10 @@ def decode_primitive(gltf, prim):
# Read indices.
index_accessor = gltf.data.accessors[prim.indices]
if dll.decoderGetIndexCount(decoder) != index_accessor.count:
print_console('WARNING', 'Draco Decoder: Index count of accessor and decoded index count does not match. Updating accessor.')
gltf.log.warning('Draco Decoder: Index count of accessor and decoded index count does not match. Updating accessor.')
index_accessor.count = dll.decoderGetIndexCount(decoder)
if not dll.decoderReadIndices(decoder, index_accessor.component_type):
print_console('ERROR', 'Draco Decoder: Unable to decode indices. Skipping primitive {}.'.format(name))
gltf.log.error('Draco Decoder: Unable to decode indices. Skipping primitive {}.'.format(name))
return
indices_byte_length = dll.decoderGetIndicesByteLength(decoder)
@ -102,15 +101,15 @@ def decode_primitive(gltf, prim):
for attr_idx, attr in enumerate(extension['attributes']):
dracoId = extension['attributes'][attr]
if attr not in prim.attributes:
print_console('ERROR', 'Draco Decoder: Draco attribute {} not in primitive attributes. Skipping primitive {}.'.format(attr, name))
gltf.log.error('Draco Decoder: Draco attribute {} not in primitive attributes. Skipping primitive {}.'.format(attr, name))
return
accessor = gltf.data.accessors[prim.attributes[attr]]
if dll.decoderGetVertexCount(decoder) != accessor.count:
print_console('WARNING', 'Draco Decoder: Vertex count of accessor and decoded vertex count does not match for attribute {}. Updating accessor.'.format(attr, name))
gltf.log.warning('Draco Decoder: Vertex count of accessor and decoded vertex count does not match for attribute {}. Updating accessor.'.format(attr, name))
accessor.count = dll.decoderGetVertexCount(decoder)
if not dll.decoderReadAttribute(decoder, dracoId, accessor.component_type, accessor.type.encode()):
print_console('ERROR', 'Draco Decoder: Could not decode attribute {}. Skipping primitive {}.'.format(attr, name))
gltf.log.error('Draco Decoder: Could not decode attribute {}. Skipping primitive {}.'.format(attr, name))
return
byte_length = dll.decoderGetAttributeByteLength(decoder, dracoId)

View File

@ -42,7 +42,7 @@ def from_union(fs, x):
tb_info = traceback.extract_tb(tb)
for tbi in tb_info:
filename, line, func, text = tbi
gltf2_io_debug.print_console('ERROR', 'An error occurred on line {} in statement {}'.format(line, text))
print('ERROR', 'An error occurred on line {} in statement {}'.format(line, text))
assert False

View File

@ -8,48 +8,17 @@
import time
import logging
import logging.handlers
#
# Globals
#
OUTPUT_LEVELS = ['ERROR', 'WARNING', 'INFO', 'PROFILE', 'DEBUG', 'VERBOSE']
g_current_output_level = 'DEBUG'
g_profile_started = False
g_profile_start = 0.0
g_profile_end = 0.0
g_profile_delta = 0.0
#
# Functions
#
def set_output_level(level):
"""Set an output debug level."""
global g_current_output_level
if OUTPUT_LEVELS.index(level) < 0:
return
g_current_output_level = level
def print_console(level, output):
"""Print to Blender console with a given header and output."""
global OUTPUT_LEVELS
global g_current_output_level
if OUTPUT_LEVELS.index(level) > OUTPUT_LEVELS.index(g_current_output_level):
return
print(get_timestamp() + " | " + level + ': ' + output)
def print_newline():
"""Print a new line to Blender console."""
print()
def get_timestamp():
@ -57,23 +26,13 @@ def get_timestamp():
return time.strftime("%H:%M:%S", current_time)
def print_timestamp(label=None):
"""Print a timestamp to Blender console."""
output = 'Timestamp: ' + get_timestamp()
if label is not None:
output = output + ' (' + label + ')'
print_console('PROFILE', output)
def profile_start():
"""Start profiling by storing the current time."""
global g_profile_start
global g_profile_started
if g_profile_started:
print_console('ERROR', 'Profiling already started')
print('ERROR', 'Profiling already started')
return
g_profile_started = True
@ -88,7 +47,7 @@ def profile_end(label=None):
global g_profile_started
if not g_profile_started:
print_console('ERROR', 'Profiling not started')
print('ERROR', 'Profiling not started')
return
g_profile_started = False
@ -101,16 +60,60 @@ def profile_end(label=None):
if label is not None:
output = output + ' (' + label + ')'
print_console('PROFILE', output)
print('PROFILE', output)
# TODO: need to have a unique system for logging importer/exporter
# TODO: this logger is used for importer, but in io and in blender part, but is written here in a _io_ file
class Log:
def __init__(self, loglevel):
self.logger = logging.getLogger('glTFImporter')
self.hdlr = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
self.hdlr.setFormatter(formatter)
self.logger.addHandler(self.hdlr)
# For console display
self.console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s | %(levelname)s: %(message)s', "%H:%M:%S")
self.console_handler.setFormatter(formatter)
# For popup display
self.popup_handler = logging.handlers.MemoryHandler(1024*10)
self.logger.addHandler(self.console_handler)
#self.logger.addHandler(self.popup_handler) => Make sure to not attach the popup handler to the logger
self.logger.setLevel(int(loglevel))
def error(self, message, popup=False):
self.logger.error(message)
if popup:
self.popup_handler.buffer.append(('ERROR', message))
def warning(self, message, popup=False):
self.logger.warning(message)
if popup:
self.popup_handler.buffer.append(('WARNING', message))
def info(self, message, popup=False):
self.logger.info(message)
if popup:
self.popup_handler.buffer.append(('INFO', message))
def debug(self, message, popup=False):
self.logger.debug(message)
if popup:
self.popup_handler.buffer.append(('DEBUG', message))
def critical(self, message, popup=False):
self.logger.critical(message)
if popup:
self.popup_handler.buffer.append(('ERROR', message)) # There is no Critical level in Blender, so we use error
def profile(self, message, popup=False): # There is no profile level in logging, so we use info
self.logger.info(message)
if popup:
self.popup_handler.buffer.append(('PROFILE', message))
def messages(self):
return self.popup_handler.buffer
def flush(self):
self.logger.removeHandler(self.console_handler)
self.popup_handler.flush()
self.logger.removeHandler(self.popup_handler)

View File

@ -7,9 +7,6 @@ import sys
from pathlib import Path
import bpy
from ...io.com.gltf2_io_debug import print_console
def dll_path() -> Path:
"""
Get the DLL path depending on the underlying platform.
@ -37,7 +34,7 @@ def dll_path() -> Path:
}.get(sys.platform)
if path is None or library_name is None:
print_console('WARNING', 'Unsupported platform {}, Draco mesh compression is unavailable'.format(sys.platform))
print('WARNING', 'Unsupported platform {}, Draco mesh compression is unavailable'.format(sys.platform))
return path / library_name
@ -51,7 +48,7 @@ def dll_exists(quiet=False) -> bool:
exists = path.exists() and path.is_file()
if quiet is False:
if exists:
print_console('INFO', 'Draco mesh compression is available, use library at %s' % dll_path().absolute())
print('INFO', 'Draco mesh compression is available, use library at %s' % dll_path().absolute())
else:
print_console('ERROR', 'Draco mesh compression is not available because library could not be found at %s' % dll_path().absolute())
print('ERROR', 'Draco mesh compression is not available because library could not be found at %s' % dll_path().absolute())
return exists

View File

@ -6,7 +6,6 @@ from ctypes import *
from pathlib import Path
from ...io.exp.gltf2_io_binary_data import BinaryData
from ...io.com.gltf2_io_debug import print_console
from ...io.com.gltf2_io_draco_compression_extension import dll_path
@ -90,7 +89,7 @@ def __traverse_node(node, f):
def __encode_node(node, dll, export_settings, encoded_primitives_cache):
if node.mesh is not None:
print_console('INFO', 'Draco encoder: Encoding mesh {}.'.format(node.name))
export_settings['log'].info('Draco encoder: Encoding mesh {}.'.format(node.name))
for primitive in node.mesh.primitives:
__encode_primitive(primitive, dll, export_settings, encoded_primitives_cache)
@ -112,7 +111,7 @@ def __encode_primitive(primitive, dll, export_settings, encoded_primitives_cache
return
if 'POSITION' not in attributes:
print_console('WARNING', 'Draco encoder: Primitive without positions encountered. Skipping.')
export_settings['log'].warning('Draco encoder: Primitive without positions encountered. Skipping.')
return
positions = attributes['POSITION']
@ -141,7 +140,7 @@ def __encode_primitive(primitive, dll, export_settings, encoded_primitives_cache
preserve_triangle_order = primitive.targets is not None and len(primitive.targets) > 0
if not dll.encoderEncode(encoder, preserve_triangle_order):
print_console('ERROR', 'Could not encode primitive. Skipping primitive.')
export_settings['log'].error('Could not encode primitive. Skipping primitive.')
byte_length = dll.encoderGetByteLength(encoder)
encoded_data = bytes(byte_length)

View File

@ -13,5 +13,6 @@ def export_user_extensions(hook_name, export_settings, *args):
try:
hook(*args, export_settings)
except Exception as e:
print(hook_name, "fails on", extension)
print(str(e))
export_settings['log'].error("Extension hook", hook_name, "fails on", extension)
export_settings['log'].error(str(e))

View File

@ -32,11 +32,9 @@ class glTFImporter():
self.variant_mapping = {} # Used to map between mgltf material idx and blender material, for Variants
if 'loglevel' not in self.import_settings.keys():
self.import_settings['loglevel'] = logging.ERROR
self.import_settings['loglevel'] = logging.CRITICAL
log = Log(import_settings['loglevel'])
self.log = log.logger
self.log_handler = log.hdlr
self.log = Log(import_settings['loglevel'])
# TODO: move to a com place?
self.extensions_managed = [

View File

@ -9,5 +9,5 @@ def import_user_extensions(hook_name, gltf, *args):
try:
hook(*args, gltf)
except Exception as e:
print(hook_name, "fails on", extension)
print(str(e))
gltf.log.error(hook_name, "fails on", extension)
gltf.log.error(str(e))

View File

@ -417,7 +417,7 @@ def export(file,
location = matrix.to_translation()[:]
radius = lamp.distance * math.cos(beamWidth)
radius = lamp.cutoff_distance * math.cos(beamWidth)
# radius = lamp.dist*math.cos(beamWidth)
ident_step = ident + (' ' * (-len(ident) + \
fw('%s<SpotLight ' % ident)))
@ -479,7 +479,7 @@ def export(file,
fw(ident_step + 'color="%.4f %.4f %.4f"\n' % clamp_color(light.color))
fw(ident_step + 'intensity="%.4f"\n' % intensity)
fw(ident_step + 'radius="%.4f" \n' % light.distance)
fw(ident_step + 'radius="%.4f" \n' % light.cutoff_distance)
fw(ident_step + 'location="%.4f %.4f %.4f"\n' % location)
fw(ident_step + '/>\n')

View File

@ -3171,7 +3171,7 @@ def importLamp_PointLight(node, ancestry):
bpylamp = bpy.data.lights.new(vrmlname, 'POINT')
bpylamp.energy = intensity
bpylamp.distance = radius
bpylamp.cutoff_distance = radius
bpylamp.color = color
mtx = Matrix.Translation(Vector(location))
@ -3220,7 +3220,7 @@ def importLamp_SpotLight(node, ancestry):
bpylamp = bpy.data.lights.new(vrmlname, 'SPOT')
bpylamp.energy = intensity
bpylamp.distance = radius
bpylamp.cutoff_distance = radius
bpylamp.color = color
bpylamp.spot_size = cutOffAngle
if beamWidth > cutOffAngle:

View File

@ -1839,6 +1839,7 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
# If Bump add bump node in between
bump_node_texture = nodes.new(type='ShaderNodeTexImage')
img = bpy.data.images.load(path.join(import_path, sname[2]))
img.colorspace_settings.is_data = True
bump_node_texture.image = img
bump_node_texture.label = 'Bump'
@ -1857,6 +1858,7 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
# If Normal add normal node in between
normal_node_texture = nodes.new(type='ShaderNodeTexImage')
img = bpy.data.images.load(path.join(import_path, sname[2]))
img.colorspace_settings.is_data = True
normal_node_texture.image = img
normal_node_texture.label = 'Normal'