FBX IO: Speed up animation simplification using NumPy #104904
@ -320,8 +320,8 @@ def get_shadeless_node(dest_node_tree):
|
|||||||
output_node = node_tree.nodes.new('NodeGroupOutput')
|
output_node = node_tree.nodes.new('NodeGroupOutput')
|
||||||
input_node = node_tree.nodes.new('NodeGroupInput')
|
input_node = node_tree.nodes.new('NodeGroupInput')
|
||||||
|
|
||||||
node_tree.outputs.new('NodeSocketShader', 'Shader')
|
node_tree.interface.new_socket('Shader', in_out='OUTPUT', socket_type='NodeSocketShader')
|
||||||
node_tree.inputs.new('NodeSocketColor', 'Color')
|
node_tree.interface.new_socket('Color', in_out='INPUT', socket_type='NodeSocketColor')
|
||||||
|
|
||||||
# This could be faster as a transparent shader, but then no ambient occlusion
|
# This could be faster as a transparent shader, but then no ambient occlusion
|
||||||
diffuse_shader = node_tree.nodes.new('ShaderNodeBsdfDiffuse')
|
diffuse_shader = node_tree.nodes.new('ShaderNodeBsdfDiffuse')
|
||||||
@ -1079,7 +1079,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
|||||||
if self.shader in {'PRINCIPLED', 'SHADELESS'}:
|
if self.shader in {'PRINCIPLED', 'SHADELESS'}:
|
||||||
node_tree.links.new(core_shader.inputs[0], tex_image.outputs['Color'])
|
node_tree.links.new(core_shader.inputs[0], tex_image.outputs['Color'])
|
||||||
elif self.shader == 'EMISSION':
|
elif self.shader == 'EMISSION':
|
||||||
node_tree.links.new(core_shader.inputs['Emission'], tex_image.outputs['Color'])
|
node_tree.links.new(core_shader.inputs['Emission Color'], tex_image.outputs['Color'])
|
||||||
|
|
||||||
if self.use_transparency:
|
if self.use_transparency:
|
||||||
if self.shader in {'PRINCIPLED', 'EMISSION'}:
|
if self.shader in {'PRINCIPLED', 'EMISSION'}:
|
||||||
|
@ -1608,17 +1608,6 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
mesh_version.add_variable("mesh", _3ds_uint(3))
|
mesh_version.add_variable("mesh", _3ds_uint(3))
|
||||||
object_info.add_subchunk(mesh_version)
|
object_info.add_subchunk(mesh_version)
|
||||||
|
|
||||||
# Add MASTERSCALE element
|
|
||||||
mscale = _3ds_chunk(MASTERSCALE)
|
|
||||||
mscale.add_variable("scale", _3ds_float(1.0))
|
|
||||||
object_info.add_subchunk(mscale)
|
|
||||||
|
|
||||||
# Add 3D cursor location
|
|
||||||
if use_cursor:
|
|
||||||
cursor_chunk = _3ds_chunk(O_CONSTS)
|
|
||||||
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
|
|
||||||
object_info.add_subchunk(cursor_chunk)
|
|
||||||
|
|
||||||
# Init main keyframe data chunk
|
# Init main keyframe data chunk
|
||||||
if use_keyframes:
|
if use_keyframes:
|
||||||
revision = 0x0005
|
revision = 0x0005
|
||||||
@ -1627,92 +1616,6 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
curtime = scene.frame_current
|
curtime = scene.frame_current
|
||||||
kfdata = make_kfdata(revision, start, stop, curtime)
|
kfdata = make_kfdata(revision, start, stop, curtime)
|
||||||
|
|
||||||
# Add AMBIENT color
|
|
||||||
if world is not None and 'WORLD' in object_filter:
|
|
||||||
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
|
|
||||||
ambient_light = _3ds_chunk(RGB)
|
|
||||||
ambient_light.add_variable("ambient", _3ds_float_color(world.color))
|
|
||||||
ambient_chunk.add_subchunk(ambient_light)
|
|
||||||
object_info.add_subchunk(ambient_chunk)
|
|
||||||
|
|
||||||
# Add BACKGROUND and BITMAP
|
|
||||||
if world.use_nodes:
|
|
||||||
bgtype = 'BACKGROUND'
|
|
||||||
ntree = world.node_tree.links
|
|
||||||
background_color_chunk = _3ds_chunk(RGB)
|
|
||||||
background_chunk = _3ds_chunk(SOLIDBACKGND)
|
|
||||||
background_flag = _3ds_chunk(USE_SOLIDBGND)
|
|
||||||
bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB'
|
|
||||||
bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD'
|
|
||||||
bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT'
|
|
||||||
bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color)
|
|
||||||
bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype)
|
|
||||||
bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False)
|
|
||||||
gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False)
|
|
||||||
background_color_chunk.add_variable("color", _3ds_float_color(bg_color))
|
|
||||||
background_chunk.add_subchunk(background_color_chunk)
|
|
||||||
if bg_image:
|
|
||||||
background_image = _3ds_chunk(BITMAP)
|
|
||||||
background_flag = _3ds_chunk(USE_BITMAP)
|
|
||||||
background_image.add_variable("image", _3ds_string(sane_name(bg_image.name)))
|
|
||||||
object_info.add_subchunk(background_image)
|
|
||||||
object_info.add_subchunk(background_chunk)
|
|
||||||
|
|
||||||
# Add VGRADIENT chunk
|
|
||||||
if gradient and len(gradient) >= 3:
|
|
||||||
gradient_chunk = _3ds_chunk(VGRADIENT)
|
|
||||||
background_flag = _3ds_chunk(USE_VGRADIENT)
|
|
||||||
gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position))
|
|
||||||
gradient_topcolor_chunk = _3ds_chunk(RGB)
|
|
||||||
gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3]))
|
|
||||||
gradient_chunk.add_subchunk(gradient_topcolor_chunk)
|
|
||||||
gradient_midcolor_chunk = _3ds_chunk(RGB)
|
|
||||||
gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3]))
|
|
||||||
gradient_chunk.add_subchunk(gradient_midcolor_chunk)
|
|
||||||
gradient_lowcolor_chunk = _3ds_chunk(RGB)
|
|
||||||
gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3]))
|
|
||||||
gradient_chunk.add_subchunk(gradient_lowcolor_chunk)
|
|
||||||
object_info.add_subchunk(gradient_chunk)
|
|
||||||
object_info.add_subchunk(background_flag)
|
|
||||||
|
|
||||||
# Add FOG
|
|
||||||
fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False)
|
|
||||||
if fognode:
|
|
||||||
fog_chunk = _3ds_chunk(FOG)
|
|
||||||
fog_color_chunk = _3ds_chunk(RGB)
|
|
||||||
use_fog_flag = _3ds_chunk(USE_FOG)
|
|
||||||
fog_density = fognode.inputs['Density'].default_value * 100
|
|
||||||
fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3]))
|
|
||||||
fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start))
|
|
||||||
fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5))
|
|
||||||
fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth))
|
|
||||||
fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5))
|
|
||||||
fog_chunk.add_subchunk(fog_color_chunk)
|
|
||||||
object_info.add_subchunk(fog_chunk)
|
|
||||||
|
|
||||||
# Add LAYER FOG
|
|
||||||
foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False)
|
|
||||||
if foglayer:
|
|
||||||
layerfog_flag = 0
|
|
||||||
if world.mist_settings.falloff == 'QUADRATIC':
|
|
||||||
layerfog_flag |= 0x1
|
|
||||||
if world.mist_settings.falloff == 'INVERSE_QUADRATIC':
|
|
||||||
layerfog_flag |= 0x2
|
|
||||||
layerfog_chunk = _3ds_chunk(LAYER_FOG)
|
|
||||||
layerfog_color_chunk = _3ds_chunk(RGB)
|
|
||||||
use_fog_flag = _3ds_chunk(USE_LAYER_FOG)
|
|
||||||
layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3]))
|
|
||||||
layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start))
|
|
||||||
layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height))
|
|
||||||
layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value))
|
|
||||||
layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag))
|
|
||||||
layerfog_chunk.add_subchunk(layerfog_color_chunk)
|
|
||||||
object_info.add_subchunk(layerfog_chunk)
|
|
||||||
if fognode or foglayer and layer.use_pass_mist:
|
|
||||||
object_info.add_subchunk(use_fog_flag)
|
|
||||||
if use_keyframes and world.animation_data or world.node_tree.animation_data:
|
|
||||||
kfdata.add_subchunk(make_ambient_node(world))
|
|
||||||
|
|
||||||
# Make a list of all materials used in the selected meshes (use dictionary, each material is added once)
|
# Make a list of all materials used in the selected meshes (use dictionary, each material is added once)
|
||||||
materialDict = {}
|
materialDict = {}
|
||||||
mesh_objects = []
|
mesh_objects = []
|
||||||
@ -1781,10 +1684,107 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
f.material_index = 0
|
f.material_index = 0
|
||||||
|
|
||||||
|
|
||||||
# Make material chunks for all materials used in the meshes
|
# Make MATERIAL chunks for all materials used in the meshes
|
||||||
for ma_image in materialDict.values():
|
for ma_image in materialDict.values():
|
||||||
object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1]))
|
object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1]))
|
||||||
|
|
||||||
|
# Add MASTERSCALE element
|
||||||
|
mscale = _3ds_chunk(MASTERSCALE)
|
||||||
|
mscale.add_variable("scale", _3ds_float(1.0))
|
||||||
|
object_info.add_subchunk(mscale)
|
||||||
|
|
||||||
|
# Add 3D cursor location
|
||||||
|
if use_cursor:
|
||||||
|
cursor_chunk = _3ds_chunk(O_CONSTS)
|
||||||
|
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
|
||||||
|
object_info.add_subchunk(cursor_chunk)
|
||||||
|
|
||||||
|
# Add AMBIENT color
|
||||||
|
if world is not None and 'WORLD' in object_filter:
|
||||||
|
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
|
||||||
|
ambient_light = _3ds_chunk(RGB)
|
||||||
|
ambient_light.add_variable("ambient", _3ds_float_color(world.color))
|
||||||
|
ambient_chunk.add_subchunk(ambient_light)
|
||||||
|
object_info.add_subchunk(ambient_chunk)
|
||||||
|
|
||||||
|
# Add BACKGROUND and BITMAP
|
||||||
|
if world.use_nodes:
|
||||||
|
bgtype = 'BACKGROUND'
|
||||||
|
ntree = world.node_tree.links
|
||||||
|
background_color_chunk = _3ds_chunk(RGB)
|
||||||
|
background_chunk = _3ds_chunk(SOLIDBACKGND)
|
||||||
|
background_flag = _3ds_chunk(USE_SOLIDBGND)
|
||||||
|
bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB'
|
||||||
|
bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD'
|
||||||
|
bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT'
|
||||||
|
bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color)
|
||||||
|
bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype)
|
||||||
|
bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False)
|
||||||
|
gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False)
|
||||||
|
background_color_chunk.add_variable("color", _3ds_float_color(bg_color))
|
||||||
|
background_chunk.add_subchunk(background_color_chunk)
|
||||||
|
if bg_image and bg_image is not None:
|
||||||
|
background_image = _3ds_chunk(BITMAP)
|
||||||
|
background_flag = _3ds_chunk(USE_BITMAP)
|
||||||
|
background_image.add_variable("image", _3ds_string(sane_name(bg_image.name)))
|
||||||
|
object_info.add_subchunk(background_image)
|
||||||
|
object_info.add_subchunk(background_chunk)
|
||||||
|
|
||||||
|
# Add VGRADIENT chunk
|
||||||
|
if gradient and len(gradient) >= 3:
|
||||||
|
gradient_chunk = _3ds_chunk(VGRADIENT)
|
||||||
|
background_flag = _3ds_chunk(USE_VGRADIENT)
|
||||||
|
gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position))
|
||||||
|
gradient_topcolor_chunk = _3ds_chunk(RGB)
|
||||||
|
gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3]))
|
||||||
|
gradient_chunk.add_subchunk(gradient_topcolor_chunk)
|
||||||
|
gradient_midcolor_chunk = _3ds_chunk(RGB)
|
||||||
|
gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3]))
|
||||||
|
gradient_chunk.add_subchunk(gradient_midcolor_chunk)
|
||||||
|
gradient_lowcolor_chunk = _3ds_chunk(RGB)
|
||||||
|
gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3]))
|
||||||
|
gradient_chunk.add_subchunk(gradient_lowcolor_chunk)
|
||||||
|
object_info.add_subchunk(gradient_chunk)
|
||||||
|
object_info.add_subchunk(background_flag)
|
||||||
|
|
||||||
|
# Add FOG
|
||||||
|
fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False)
|
||||||
|
if fognode:
|
||||||
|
fog_chunk = _3ds_chunk(FOG)
|
||||||
|
fog_color_chunk = _3ds_chunk(RGB)
|
||||||
|
use_fog_flag = _3ds_chunk(USE_FOG)
|
||||||
|
fog_density = fognode.inputs['Density'].default_value * 100
|
||||||
|
fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3]))
|
||||||
|
fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start))
|
||||||
|
fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5))
|
||||||
|
fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth))
|
||||||
|
fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5))
|
||||||
|
fog_chunk.add_subchunk(fog_color_chunk)
|
||||||
|
object_info.add_subchunk(fog_chunk)
|
||||||
|
|
||||||
|
# Add LAYER FOG
|
||||||
|
foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False)
|
||||||
|
if foglayer:
|
||||||
|
layerfog_flag = 0
|
||||||
|
if world.mist_settings.falloff == 'QUADRATIC':
|
||||||
|
layerfog_flag |= 0x1
|
||||||
|
if world.mist_settings.falloff == 'INVERSE_QUADRATIC':
|
||||||
|
layerfog_flag |= 0x2
|
||||||
|
layerfog_chunk = _3ds_chunk(LAYER_FOG)
|
||||||
|
layerfog_color_chunk = _3ds_chunk(RGB)
|
||||||
|
use_fog_flag = _3ds_chunk(USE_LAYER_FOG)
|
||||||
|
layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3]))
|
||||||
|
layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start))
|
||||||
|
layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height))
|
||||||
|
layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value))
|
||||||
|
layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag))
|
||||||
|
layerfog_chunk.add_subchunk(layerfog_color_chunk)
|
||||||
|
object_info.add_subchunk(layerfog_chunk)
|
||||||
|
if fognode or foglayer and layer.use_pass_mist:
|
||||||
|
object_info.add_subchunk(use_fog_flag)
|
||||||
|
if use_keyframes and world.animation_data or (world.node_tree and world.node_tree.animation_data):
|
||||||
|
kfdata.add_subchunk(make_ambient_node(world))
|
||||||
|
|
||||||
# Collect translation for transformation matrix
|
# Collect translation for transformation matrix
|
||||||
translation = {}
|
translation = {}
|
||||||
rotation = {}
|
rotation = {}
|
||||||
@ -1938,11 +1938,10 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
|
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
|
||||||
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
||||||
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
|
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
|
||||||
if ob.parent is None or (ob.parent.name not in object_id):
|
if ob.parent is not None and (ob.parent.name in object_id):
|
||||||
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
|
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
||||||
else: # Get the parent ID from the object_id dict
|
|
||||||
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
|
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
|
||||||
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
|
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
|
||||||
object_chunk.add_subchunk(obj_hierarchy_chunk)
|
object_chunk.add_subchunk(obj_hierarchy_chunk)
|
||||||
|
|
||||||
# Add light object and hierarchy chunks to object info
|
# Add light object and hierarchy chunks to object info
|
||||||
@ -1976,11 +1975,10 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
|
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
|
||||||
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
||||||
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
|
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
|
||||||
if ob.parent is None or (ob.parent.name not in object_id):
|
if ob.parent is not None and (ob.parent.name in object_id):
|
||||||
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
|
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
||||||
else: # Get the parent ID from the object_id dict
|
|
||||||
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
|
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
|
||||||
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
|
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
|
||||||
object_chunk.add_subchunk(obj_hierarchy_chunk)
|
object_chunk.add_subchunk(obj_hierarchy_chunk)
|
||||||
|
|
||||||
# Add light object and hierarchy chunks to object info
|
# Add light object and hierarchy chunks to object info
|
||||||
@ -2021,4 +2019,4 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
# Debugging only: dump the chunk hierarchy
|
# Debugging only: dump the chunk hierarchy
|
||||||
# primary.dump()
|
# primary.dump()
|
||||||
|
|
||||||
return {'FINISHED'}
|
return {'FINISHED'}
|
@ -263,19 +263,19 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
|||||||
img_wrap = contextWrapper.base_color_texture
|
img_wrap = contextWrapper.base_color_texture
|
||||||
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
|
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
|
||||||
links.new(mixer.outputs['Color'], shader.inputs['Base Color'])
|
links.new(mixer.outputs['Color'], shader.inputs['Base Color'])
|
||||||
|
elif mapto == 'ROUGHNESS':
|
||||||
|
img_wrap = contextWrapper.roughness_texture
|
||||||
|
elif mapto == 'METALLIC':
|
||||||
|
shader.location = (300,300)
|
||||||
|
img_wrap = contextWrapper.metallic_texture
|
||||||
elif mapto == 'SPECULARITY':
|
elif mapto == 'SPECULARITY':
|
||||||
|
shader.location = (300,0)
|
||||||
img_wrap = contextWrapper.specular_tint_texture
|
img_wrap = contextWrapper.specular_tint_texture
|
||||||
elif mapto == 'ALPHA':
|
elif mapto == 'ALPHA':
|
||||||
shader.location = (0, -300)
|
shader.location = (-300,0)
|
||||||
img_wrap = contextWrapper.alpha_texture
|
img_wrap = contextWrapper.alpha_texture
|
||||||
elif mapto == 'METALLIC':
|
|
||||||
shader.location = (300, 300)
|
|
||||||
img_wrap = contextWrapper.metallic_texture
|
|
||||||
elif mapto == 'ROUGHNESS':
|
|
||||||
shader.location = (300, 0)
|
|
||||||
img_wrap = contextWrapper.roughness_texture
|
|
||||||
elif mapto == 'EMISSION':
|
elif mapto == 'EMISSION':
|
||||||
shader.location = (-300, -600)
|
shader.location = (0,-900)
|
||||||
img_wrap = contextWrapper.emission_color_texture
|
img_wrap = contextWrapper.emission_color_texture
|
||||||
elif mapto == 'NORMAL':
|
elif mapto == 'NORMAL':
|
||||||
shader.location = (300, 300)
|
shader.location = (300, 300)
|
||||||
@ -641,8 +641,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
temp_data = file.read(SZ_U_INT * 2)
|
temp_data = file.read(SZ_U_INT * 2)
|
||||||
track_chunk.bytes_read += SZ_U_INT * 2
|
track_chunk.bytes_read += SZ_U_INT * 2
|
||||||
nkeys = read_long(track_chunk)
|
nkeys = read_long(track_chunk)
|
||||||
if nkeys == 0:
|
|
||||||
keyframe_data[0] = default_data
|
|
||||||
for i in range(nkeys):
|
for i in range(nkeys):
|
||||||
nframe = read_long(track_chunk)
|
nframe = read_long(track_chunk)
|
||||||
nflags = read_short(track_chunk)
|
nflags = read_short(track_chunk)
|
||||||
@ -657,8 +655,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
temp_data = file.read(SZ_U_SHORT * 5)
|
temp_data = file.read(SZ_U_SHORT * 5)
|
||||||
track_chunk.bytes_read += SZ_U_SHORT * 5
|
track_chunk.bytes_read += SZ_U_SHORT * 5
|
||||||
nkeys = read_long(track_chunk)
|
nkeys = read_long(track_chunk)
|
||||||
if nkeys == 0:
|
|
||||||
keyframe_angle[0] = default_value
|
|
||||||
for i in range(nkeys):
|
for i in range(nkeys):
|
||||||
nframe = read_long(track_chunk)
|
nframe = read_long(track_chunk)
|
||||||
nflags = read_short(track_chunk)
|
nflags = read_short(track_chunk)
|
||||||
@ -815,7 +811,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
if contextWorld is None:
|
if contextWorld is None:
|
||||||
path, filename = os.path.split(file.name)
|
path, filename = os.path.split(file.name)
|
||||||
realname, ext = os.path.splitext(filename)
|
realname, ext = os.path.splitext(filename)
|
||||||
newWorld = bpy.data.worlds.new("Fog: " + realname)
|
contextWorld = bpy.data.worlds.new("Fog: " + realname)
|
||||||
context.scene.world = contextWorld
|
context.scene.world = contextWorld
|
||||||
contextWorld.use_nodes = True
|
contextWorld.use_nodes = True
|
||||||
links = contextWorld.node_tree.links
|
links = contextWorld.node_tree.links
|
||||||
@ -1333,7 +1329,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'AMBIENT': # Ambient
|
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'AMBIENT': # Ambient
|
||||||
keyframe_data = {}
|
keyframe_data = {}
|
||||||
default_data = child.color[:]
|
keyframe_data[0] = child.color[:]
|
||||||
child.color = read_track_data(new_chunk)[0]
|
child.color = read_track_data(new_chunk)[0]
|
||||||
ambinode.inputs[0].default_value[:3] = child.color
|
ambinode.inputs[0].default_value[:3] = child.color
|
||||||
ambilite.outputs[0].default_value[:3] = child.color
|
ambilite.outputs[0].default_value[:3] = child.color
|
||||||
@ -1347,7 +1343,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'LIGHT': # Color
|
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'LIGHT': # Color
|
||||||
keyframe_data = {}
|
keyframe_data = {}
|
||||||
default_data = child.data.color[:]
|
keyframe_data[0] = child.data.color[:]
|
||||||
child.data.color = read_track_data(new_chunk)[0]
|
child.data.color = read_track_data(new_chunk)[0]
|
||||||
for keydata in keyframe_data.items():
|
for keydata in keyframe_data.items():
|
||||||
child.data.color = keydata[1]
|
child.data.color = keydata[1]
|
||||||
@ -1356,7 +1352,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'OBJECT': # Translation
|
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'OBJECT': # Translation
|
||||||
keyframe_data = {}
|
keyframe_data = {}
|
||||||
default_data = child.location[:]
|
keyframe_data[0] = child.location[:]
|
||||||
child.location = read_track_data(new_chunk)[0]
|
child.location = read_track_data(new_chunk)[0]
|
||||||
if child.type in {'LIGHT', 'CAMERA'}:
|
if child.type in {'LIGHT', 'CAMERA'}:
|
||||||
trackposition[0] = child.location
|
trackposition[0] = child.location
|
||||||
@ -1408,12 +1404,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracktype == 'OBJECT': # Rotation
|
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracktype == 'OBJECT': # Rotation
|
||||||
keyframe_rotation = {}
|
keyframe_rotation = {}
|
||||||
|
keyframe_rotation[0] = child.rotation_axis_angle[:]
|
||||||
tflags = read_short(new_chunk)
|
tflags = read_short(new_chunk)
|
||||||
temp_data = file.read(SZ_U_INT * 2)
|
temp_data = file.read(SZ_U_INT * 2)
|
||||||
new_chunk.bytes_read += SZ_U_INT * 2
|
new_chunk.bytes_read += SZ_U_INT * 2
|
||||||
nkeys = read_long(new_chunk)
|
nkeys = read_long(new_chunk)
|
||||||
if nkeys == 0:
|
|
||||||
keyframe_rotation[0] = child.rotation_axis_angle[:]
|
|
||||||
if tflags & 0x8: # Flag 0x8 locks X axis
|
if tflags & 0x8: # Flag 0x8 locks X axis
|
||||||
child.lock_rotation[0] = True
|
child.lock_rotation[0] = True
|
||||||
if tflags & 0x10: # Flag 0x10 locks Y axis
|
if tflags & 0x10: # Flag 0x10 locks Y axis
|
||||||
@ -1446,7 +1441,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracktype == 'OBJECT': # Scale
|
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracktype == 'OBJECT': # Scale
|
||||||
keyframe_data = {}
|
keyframe_data = {}
|
||||||
default_data = child.scale[:]
|
keyframe_data[0] = child.scale[:]
|
||||||
child.scale = read_track_data(new_chunk)[0]
|
child.scale = read_track_data(new_chunk)[0]
|
||||||
if contextTrack_flag & 0x8: # Flag 0x8 locks X axis
|
if contextTrack_flag & 0x8: # Flag 0x8 locks X axis
|
||||||
child.lock_scale[0] = True
|
child.lock_scale[0] = True
|
||||||
@ -1466,7 +1461,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracktype == 'OBJECT': # Roll angle
|
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracktype == 'OBJECT': # Roll angle
|
||||||
keyframe_angle = {}
|
keyframe_angle = {}
|
||||||
default_value = child.rotation_euler.y
|
keyframe_angle[0] = child.rotation_euler.y
|
||||||
child.rotation_euler.y = read_track_angle(new_chunk)[0]
|
child.rotation_euler.y = read_track_angle(new_chunk)[0]
|
||||||
for keydata in keyframe_angle.items():
|
for keydata in keyframe_angle.items():
|
||||||
child.rotation_euler.y = keydata[1]
|
child.rotation_euler.y = keydata[1]
|
||||||
@ -1476,7 +1471,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and tracking == 'CAMERA': # Field of view
|
elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and tracking == 'CAMERA': # Field of view
|
||||||
keyframe_angle = {}
|
keyframe_angle = {}
|
||||||
default_value = child.data.angle
|
keyframe_angle[0] = child.data.angle
|
||||||
child.data.angle = read_track_angle(new_chunk)[0]
|
child.data.angle = read_track_angle(new_chunk)[0]
|
||||||
for keydata in keyframe_angle.items():
|
for keydata in keyframe_angle.items():
|
||||||
child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2)
|
child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2)
|
||||||
@ -1485,7 +1480,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Hotspot
|
elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Hotspot
|
||||||
keyframe_angle = {}
|
keyframe_angle = {}
|
||||||
cone_angle = math.degrees(child.data.spot_size)
|
cone_angle = math.degrees(child.data.spot_size)
|
||||||
default_value = cone_angle-(child.data.spot_blend * math.floor(cone_angle))
|
keyframe_angle[0] = cone_angle-(child.data.spot_blend * math.floor(cone_angle))
|
||||||
hot_spot = math.degrees(read_track_angle(new_chunk)[0])
|
hot_spot = math.degrees(read_track_angle(new_chunk)[0])
|
||||||
child.data.spot_blend = 1.0 - (hot_spot / cone_angle)
|
child.data.spot_blend = 1.0 - (hot_spot / cone_angle)
|
||||||
for keydata in keyframe_angle.items():
|
for keydata in keyframe_angle.items():
|
||||||
@ -1494,7 +1489,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Falloff
|
elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Falloff
|
||||||
keyframe_angle = {}
|
keyframe_angle = {}
|
||||||
default_value = math.degrees(child.data.spot_size)
|
keyframe_angle[0] = math.degrees(child.data.spot_size)
|
||||||
child.data.spot_size = read_track_angle(new_chunk)[0]
|
child.data.spot_size = read_track_angle(new_chunk)[0]
|
||||||
for keydata in keyframe_angle.items():
|
for keydata in keyframe_angle.items():
|
||||||
child.data.spot_size = keydata[1]
|
child.data.spot_size = keydata[1]
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
bl_info = {
|
bl_info = {
|
||||||
"name": "FBX format",
|
"name": "FBX format",
|
||||||
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
|
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
|
||||||
"version": (5, 8, 3),
|
"version": (5, 8, 6),
|
||||||
"blender": (3, 6, 0),
|
"blender": (3, 6, 0),
|
||||||
"location": "File > Import-Export",
|
"location": "File > Import-Export",
|
||||||
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
|
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
|
||||||
|
@ -250,9 +250,8 @@ class FBXElem:
|
|||||||
for elem in self.elems:
|
for elem in self.elems:
|
||||||
offset = elem._calc_offsets(offset, (elem is elem_last))
|
offset = elem._calc_offsets(offset, (elem is elem_last))
|
||||||
offset += _BLOCK_SENTINEL_LENGTH
|
offset += _BLOCK_SENTINEL_LENGTH
|
||||||
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||||
if not is_last:
|
offset += _BLOCK_SENTINEL_LENGTH
|
||||||
offset += _BLOCK_SENTINEL_LENGTH
|
|
||||||
|
|
||||||
return offset
|
return offset
|
||||||
|
|
||||||
@ -282,9 +281,8 @@ class FBXElem:
|
|||||||
assert(elem.id != b'')
|
assert(elem.id != b'')
|
||||||
elem._write(write, tell, (elem is elem_last))
|
elem._write(write, tell, (elem is elem_last))
|
||||||
write(_BLOCK_SENTINEL_DATA)
|
write(_BLOCK_SENTINEL_DATA)
|
||||||
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||||
if not is_last:
|
write(_BLOCK_SENTINEL_DATA)
|
||||||
write(_BLOCK_SENTINEL_DATA)
|
|
||||||
|
|
||||||
|
|
||||||
def _write_timedate_hack(elem_root):
|
def _write_timedate_hack(elem_root):
|
||||||
|
@ -1810,18 +1810,16 @@ def fbx_data_armature_elements(root, arm_obj, scene_data):
|
|||||||
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
|
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
|
||||||
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
|
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
|
||||||
|
|
||||||
# Pre-process vertex weights (also to check vertices assigned to more than four bones).
|
# Pre-process vertex weights so that the vertices only need to be iterated once.
|
||||||
ob = ob_obj.bdata
|
ob = ob_obj.bdata
|
||||||
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
|
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
|
||||||
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
|
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
|
||||||
valid_idxs = set(bo_vg_idx.values())
|
valid_idxs = set(bo_vg_idx.values())
|
||||||
vgroups = {vg.index: {} for vg in ob.vertex_groups}
|
vgroups = {vg.index: {} for vg in ob.vertex_groups}
|
||||||
verts_vgroups = (sorted(((vg.group, vg.weight) for vg in v.groups if vg.weight and vg.group in valid_idxs),
|
for idx, v in enumerate(me.vertices):
|
||||||
key=lambda e: e[1], reverse=True)
|
for vg in v.groups:
|
||||||
for v in me.vertices)
|
if (w := vg.weight) and (vg_idx := vg.group) in valid_idxs:
|
||||||
for idx, vgs in enumerate(verts_vgroups):
|
vgroups[vg_idx][idx] = w
|
||||||
for vg_idx, w in vgs:
|
|
||||||
vgroups[vg_idx][idx] = w
|
|
||||||
|
|
||||||
for bo_obj, clstr_key in clusters.items():
|
for bo_obj, clstr_key in clusters.items():
|
||||||
bo = bo_obj.bdata
|
bo = bo_obj.bdata
|
||||||
|
@ -2784,7 +2784,9 @@ class FbxImportHelperNode:
|
|||||||
for i, w in combined_weights.items():
|
for i, w in combined_weights.items():
|
||||||
indices.append(i)
|
indices.append(i)
|
||||||
if len(w) > 1:
|
if len(w) > 1:
|
||||||
weights.append(sum(w) / len(w))
|
# Add ignored child weights to the current bone's weight.
|
||||||
|
# XXX - Weights that sum to more than 1.0 get clamped to 1.0 when set in the vertex group.
|
||||||
|
weights.append(sum(w))
|
||||||
else:
|
else:
|
||||||
weights.append(w[0])
|
weights.append(w[0])
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
bl_info = {
|
bl_info = {
|
||||||
'name': 'glTF 2.0 format',
|
'name': 'glTF 2.0 format',
|
||||||
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
|
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
|
||||||
"version": (4, 0, 32),
|
"version": (4, 0, 34),
|
||||||
'blender': (4, 0, 0),
|
'blender': (4, 0, 0),
|
||||||
'location': 'File > Import-Export',
|
'location': 'File > Import-Export',
|
||||||
'description': 'Import-Export as glTF 2.0',
|
'description': 'Import-Export as glTF 2.0',
|
||||||
|
@ -147,7 +147,6 @@ class ExportImage:
|
|||||||
|
|
||||||
# Unhappy path = we need to create the image self.fills describes or self.stores describes
|
# Unhappy path = we need to create the image self.fills describes or self.stores describes
|
||||||
if self.numpy_calc is None:
|
if self.numpy_calc is None:
|
||||||
print(">2")
|
|
||||||
return self.__encode_unhappy(export_settings), None
|
return self.__encode_unhappy(export_settings), None
|
||||||
else:
|
else:
|
||||||
pixels, width, height, factor = self.numpy_calc(self.stored)
|
pixels, width, height, factor = self.numpy_calc(self.stored)
|
||||||
|
@ -135,6 +135,7 @@ class BlenderNode():
|
|||||||
bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION)
|
bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION)
|
||||||
bpy.data.scenes[gltf.blender_scene].collection.children.link(bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION])
|
bpy.data.scenes[gltf.blender_scene].collection.children.link(bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION])
|
||||||
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True
|
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True
|
||||||
|
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_render = True
|
||||||
|
|
||||||
# Create an icosphere, and assign it to the collection
|
# Create an icosphere, and assign it to the collection
|
||||||
bpy.ops.mesh.primitive_ico_sphere_add(radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
|
bpy.ops.mesh.primitive_ico_sphere_add(radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
|
||||||
@ -187,7 +188,10 @@ class BlenderNode():
|
|||||||
arma_mat = vnode.editbone_arma_mat
|
arma_mat = vnode.editbone_arma_mat
|
||||||
editbone.head = arma_mat @ Vector((0, 0, 0))
|
editbone.head = arma_mat @ Vector((0, 0, 0))
|
||||||
editbone.tail = arma_mat @ Vector((0, 1, 0))
|
editbone.tail = arma_mat @ Vector((0, 1, 0))
|
||||||
editbone.length = vnode.bone_length
|
if gltf.import_settings['bone_heuristic'] == "BLENDER":
|
||||||
|
editbone.length = vnode.bone_length / max(blender_arma.scale)
|
||||||
|
else:
|
||||||
|
editbone.length = vnode.bone_length
|
||||||
editbone.align_roll(arma_mat @ Vector((0, 0, 1)) - editbone.head)
|
editbone.align_roll(arma_mat @ Vector((0, 0, 1)) - editbone.head)
|
||||||
|
|
||||||
if isinstance(id, int):
|
if isinstance(id, int):
|
||||||
@ -225,7 +229,8 @@ class BlenderNode():
|
|||||||
|
|
||||||
if gltf.import_settings['bone_heuristic'] == "BLENDER":
|
if gltf.import_settings['bone_heuristic'] == "BLENDER":
|
||||||
pose_bone.custom_shape = bpy.data.objects[gltf.bone_shape]
|
pose_bone.custom_shape = bpy.data.objects[gltf.bone_shape]
|
||||||
pose_bone.custom_shape_scale_xyz = Vector([0.1, 0.1, 0.1])
|
armature_max_dim = max([blender_arma.dimensions[0] / blender_arma.scale[0], blender_arma.dimensions[1] / blender_arma.scale[1], blender_arma.dimensions[2] / blender_arma.scale[2]])
|
||||||
|
pose_bone.custom_shape_scale_xyz = Vector([armature_max_dim * 0.2] * 3)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create_mesh_object(gltf, vnode):
|
def create_mesh_object(gltf, vnode):
|
||||||
|
@ -539,7 +539,7 @@ class NWPreviewNode(Operator, NWBase):
|
|||||||
|
|
||||||
if not viewer_socket:
|
if not viewer_socket:
|
||||||
# create viewer socket
|
# create viewer socket
|
||||||
viewer_socket = node.node_tree.interface.new_socket(viewer_socket_name, in_out={'OUTPUT'}, socket_type=socket_type)
|
viewer_socket = node.node_tree.interface.new_socket(viewer_socket_name, in_out='OUTPUT', socket_type=socket_type)
|
||||||
viewer_socket.NWViewerSocket = True
|
viewer_socket.NWViewerSocket = True
|
||||||
return viewer_socket
|
return viewer_socket
|
||||||
|
|
||||||
|
@ -99,9 +99,6 @@ class VIEW3D_MT_Pose(Menu):
|
|||||||
layout.operator("pose.quaternions_flip")
|
layout.operator("pose.quaternions_flip")
|
||||||
layout.operator_context = 'INVOKE_AREA'
|
layout.operator_context = 'INVOKE_AREA'
|
||||||
layout.separator()
|
layout.separator()
|
||||||
layout.operator("armature.armature_layers", text="Change Armature Layers...")
|
|
||||||
layout.operator("pose.bone_layers", text="Change Bone Layers...")
|
|
||||||
layout.separator()
|
|
||||||
layout.menu("VIEW3D_MT_pose_showhide")
|
layout.menu("VIEW3D_MT_pose_showhide")
|
||||||
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")
|
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user