Images as Planes: Improve option panels #104936
@ -5,8 +5,8 @@
|
||||
bl_info = {
|
||||
"name": "AnimAll",
|
||||
"author": "Daniel Salazar (ZanQdo), Damien Picard (pioverfour)",
|
||||
"version": (0, 9, 6),
|
||||
"blender": (3, 3, 0),
|
||||
"version": (0, 10, 0),
|
||||
"blender": (4, 0, 0),
|
||||
"location": "3D View > Toolbox > Animation tab > AnimAll",
|
||||
"description": "Allows animation of mesh, lattice, curve and surface data",
|
||||
"warning": "",
|
||||
@ -22,6 +22,7 @@ from bpy.app.translations import (pgettext_iface as iface_,
|
||||
pgettext_data as data_)
|
||||
from . import translations
|
||||
|
||||
import re
|
||||
|
||||
# Property Definitions
|
||||
class AnimallProperties(bpy.types.PropertyGroup):
|
||||
@ -49,10 +50,12 @@ class AnimallProperties(bpy.types.PropertyGroup):
|
||||
name="Vertex Bevel",
|
||||
description="Insert keyframes on vertex bevel weight",
|
||||
default=False)
|
||||
# key_vertex_crease: BoolProperty(
|
||||
# name="Vertex Crease",
|
||||
# description="Insert keyframes on vertex crease weight",
|
||||
# default=False)
|
||||
|
||||
key_vertex_crease: BoolProperty(
|
||||
name="Vertex Crease",
|
||||
description="Insert keyframes on vertex crease weight",
|
||||
default=False)
|
||||
|
||||
key_vertex_group: BoolProperty(
|
||||
name="Vertex Group",
|
||||
description="Insert keyframes on active vertex group values",
|
||||
@ -67,8 +70,8 @@ class AnimallProperties(bpy.types.PropertyGroup):
|
||||
description="Insert keyframes on edge creases",
|
||||
default=False)
|
||||
|
||||
key_attribute: BoolProperty(
|
||||
name="Attribute",
|
||||
key_active_attribute: BoolProperty(
|
||||
name="Active Attribute",
|
||||
description="Insert keyframes on active attribute values",
|
||||
default=False)
|
||||
key_uvs: BoolProperty(
|
||||
@ -115,6 +118,55 @@ def delete_key(data, key):
|
||||
pass
|
||||
|
||||
|
||||
def get_attribute(data, name, type=None, domain=None):
|
||||
if name in data.attributes:
|
||||
return data.attributes[name]
|
||||
if type is not None and domain is not None:
|
||||
return data.attributes.new(name, type, domain)
|
||||
|
||||
|
||||
def get_attribute_paths(data, attribute, key_selected):
|
||||
# Cannot animate string attributes?
|
||||
if attribute.data_type == 'STRING':
|
||||
yield ("", "")
|
||||
|
||||
if attribute.data_type in {'FLOAT', 'INT', 'BOOLEAN', 'INT8'}:
|
||||
attribute_key = "value"
|
||||
elif attribute.data_type in {'FLOAT_COLOR', 'BYTE_COLOR'}:
|
||||
attribute_key = "color"
|
||||
elif attribute.data_type in {'FLOAT_VECTOR', 'FLOAT2'}:
|
||||
attribute_key = "vector"
|
||||
|
||||
if attribute.domain == 'POINT':
|
||||
group = data_("Vertex %s")
|
||||
elif attribute.domain == 'EDGE':
|
||||
group = data_("Edge %s")
|
||||
elif attribute.domain == 'FACE':
|
||||
group = data_("Face %s")
|
||||
elif attribute.domain == 'CORNER':
|
||||
group = data_("Loop %s")
|
||||
|
||||
for e_i, _attribute_data in enumerate(attribute.data):
|
||||
if (not key_selected
|
||||
or attribute.domain == 'POINT' and data.vertices[e_i].select
|
||||
or attribute.domain == 'EDGE' and data.edges[e_i].select
|
||||
or attribute.domain == 'FACE' and data.polygons[e_i].select
|
||||
or attribute.domain == 'CORNER' and is_selected_vert_loop(data, e_i)):
|
||||
yield (f'attributes["{attribute.name}"].data[{e_i}].{attribute_key}', group % e_i)
|
||||
|
||||
|
||||
def insert_attribute_key(data, attribute, key_selected):
|
||||
for path, group in get_attribute_paths(data, attribute, key_selected):
|
||||
if path:
|
||||
insert_key(data, path, group=group)
|
||||
|
||||
|
||||
def delete_attribute_key(data, attribute, key_selected):
|
||||
for path, group in get_attribute_paths(data, attribute, key_selected):
|
||||
if path:
|
||||
delete_key(data, path)
|
||||
|
||||
|
||||
def is_selected_vert_loop(data, loop_i):
|
||||
"""Get selection status of vertex corresponding to a loop"""
|
||||
vertex_index = data.loops[loop_i].vertex_index
|
||||
@ -126,7 +178,7 @@ def is_selected_vert_loop(data, loop_i):
|
||||
class VIEW3D_PT_animall(Panel):
|
||||
bl_space_type = 'VIEW_3D'
|
||||
bl_region_type = 'UI'
|
||||
bl_category = "Animate"
|
||||
bl_category = "Animation"
|
||||
bl_label = ''
|
||||
|
||||
@classmethod
|
||||
@ -161,6 +213,7 @@ class VIEW3D_PT_animall(Panel):
|
||||
col = layout.column(heading="Points", align=True)
|
||||
col.prop(animall_properties, "key_point_location")
|
||||
col.prop(animall_properties, "key_vertex_bevel", text="Bevel")
|
||||
col.prop(animall_properties, "key_vertex_crease", text="Crease")
|
||||
col.prop(animall_properties, "key_vertex_group")
|
||||
|
||||
col = layout.column(heading="Edges", align=True)
|
||||
@ -171,7 +224,7 @@ class VIEW3D_PT_animall(Panel):
|
||||
col.prop(animall_properties, "key_material_index")
|
||||
|
||||
col = layout.column(heading="Others", align=True)
|
||||
col.prop(animall_properties, "key_attribute")
|
||||
col.prop(animall_properties, "key_active_attribute")
|
||||
col.prop(animall_properties, "key_uvs")
|
||||
col.prop(animall_properties, "key_shape_key")
|
||||
|
||||
@ -179,10 +232,10 @@ class VIEW3D_PT_animall(Panel):
|
||||
if (obj.data.animation_data is not None
|
||||
and obj.data.animation_data.action is not None):
|
||||
for fcurve in context.active_object.data.animation_data.action.fcurves:
|
||||
if fcurve.data_path.startswith("vertex_colors"):
|
||||
if bpy.ops.anim.update_attribute_animation_animall.poll():
|
||||
col = layout.column(align=True)
|
||||
col.label(text="Object includes old-style vertex colors. Consider updating them.", icon="ERROR")
|
||||
col.operator("anim.update_vertex_color_animation_animall", icon="FILE_REFRESH")
|
||||
col.label(text="Object includes old-style attributes. Consider updating them.", icon="ERROR")
|
||||
col.operator("anim.update_attribute_animation_animall", icon="FILE_REFRESH")
|
||||
break
|
||||
|
||||
elif obj.type in {'CURVE', 'SURFACE'}:
|
||||
@ -315,13 +368,12 @@ class ANIM_OT_insert_keyframe_animall(Operator):
|
||||
insert_key(vert, 'co', group=data_("Vertex %s") % v_i)
|
||||
|
||||
if animall_properties.key_vertex_bevel:
|
||||
for v_i, vert in enumerate(data.vertices):
|
||||
if not animall_properties.key_selected or vert.select:
|
||||
insert_key(vert, 'bevel_weight', group=data_("Vertex %s") % v_i)
|
||||
# if animall_properties.key_vertex_crease:
|
||||
# for v_i, vert in enumerate(data.vertices):
|
||||
# if not animall_properties.key_selected or vert.select:
|
||||
# insert_key(vert, 'crease', group=data_("Vertex %s") % v_i)
|
||||
attribute = get_attribute(data, "bevel_weight_vert", 'FLOAT', 'POINT')
|
||||
insert_attribute_key(data, attribute, animall_properties.key_selected)
|
||||
|
||||
if animall_properties.key_vertex_crease:
|
||||
attribute = get_attribute(data, "crease_vert", 'FLOAT', 'POINT')
|
||||
insert_attribute_key(data, attribute, animall_properties.key_selected)
|
||||
|
||||
if animall_properties.key_vertex_group:
|
||||
for v_i, vert in enumerate(data.vertices):
|
||||
@ -330,55 +382,31 @@ class ANIM_OT_insert_keyframe_animall(Operator):
|
||||
insert_key(group, 'weight', group=data_("Vertex %s") % v_i)
|
||||
|
||||
if animall_properties.key_edge_bevel:
|
||||
for e_i, edge in enumerate(data.edges):
|
||||
if not animall_properties.key_selected or edge.select:
|
||||
insert_key(edge, 'bevel_weight', group=data_("Edge %s") % e_i)
|
||||
attribute = get_attribute(data, "bevel_weight_edge", 'FLOAT', 'EDGE')
|
||||
insert_attribute_key(data, attribute, animall_properties.key_selected)
|
||||
|
||||
if animall_properties.key_edge_crease:
|
||||
for e_i, edge in enumerate(data.edges):
|
||||
if not animall_properties.key_selected or edge.select:
|
||||
insert_key(edge, 'crease', group=data_("Edge %s") % e_i)
|
||||
attribute = get_attribute(data, "crease_edge", 'FLOAT', 'EDGE')
|
||||
insert_attribute_key(data, attribute, animall_properties.key_selected)
|
||||
|
||||
if animall_properties.key_material_index:
|
||||
for p_i, polygon in enumerate(data.polygons):
|
||||
if not animall_properties.key_selected or polygon.select:
|
||||
insert_key(polygon, 'material_index', group=data_("Face %s") % p_i)
|
||||
|
||||
if animall_properties.key_attribute:
|
||||
if animall_properties.key_active_attribute:
|
||||
if data.attributes.active is not None:
|
||||
attribute = data.attributes.active
|
||||
if attribute.data_type != 'STRING':
|
||||
# Cannot animate string attributes?
|
||||
if attribute.data_type in {'FLOAT', 'INT', 'BOOLEAN', 'INT8'}:
|
||||
attribute_key = "value"
|
||||
elif attribute.data_type in {'FLOAT_COLOR', 'BYTE_COLOR'}:
|
||||
attribute_key = "color"
|
||||
elif attribute.data_type in {'FLOAT_VECTOR', 'FLOAT2'}:
|
||||
attribute_key = "vector"
|
||||
|
||||
if attribute.domain == 'POINT':
|
||||
group = data_("Vertex %s")
|
||||
elif attribute.domain == 'EDGE':
|
||||
group = data_("Edge %s")
|
||||
elif attribute.domain == 'FACE':
|
||||
group = data_("Face %s")
|
||||
elif attribute.domain == 'CORNER':
|
||||
group = data_("Loop %s")
|
||||
|
||||
for e_i, _attribute_data in enumerate(attribute.data):
|
||||
if (not animall_properties.key_selected
|
||||
or attribute.domain == 'POINT' and data.vertices[e_i].select
|
||||
or attribute.domain == 'EDGE' and data.edges[e_i].select
|
||||
or attribute.domain == 'FACE' and data.polygons[e_i].select
|
||||
or attribute.domain == 'CORNER' and is_selected_vert_loop(data, e_i)):
|
||||
insert_key(data, f'attributes["{attribute.name}"].data[{e_i}].{attribute_key}',
|
||||
group=group % e_i)
|
||||
for path, group in get_attribute_paths(
|
||||
data, data.attributes.active,
|
||||
animall_properties.key_selected):
|
||||
if path:
|
||||
insert_key(data, path, group=group)
|
||||
|
||||
if animall_properties.key_uvs:
|
||||
if data.uv_layers.active is not None:
|
||||
for uv_i, uv in enumerate(data.uv_layers.active.data):
|
||||
if not animall_properties.key_selected or uv.select:
|
||||
insert_key(uv, 'uv', group=data_("UV layer %s") % uv_i)
|
||||
insert_key(uv, 'uv', group=data_("UV Layer %s") % uv_i)
|
||||
|
||||
if animall_properties.key_shape_key:
|
||||
if obj.active_shape_key_index > 0:
|
||||
@ -402,9 +430,15 @@ class ANIM_OT_insert_keyframe_animall(Operator):
|
||||
if obj.active_shape_key_index > 0:
|
||||
CV = obj.active_shape_key.data[global_spline_index]
|
||||
insert_key(CV, 'co', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
insert_key(CV, 'handle_left', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
insert_key(CV, 'handle_right', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
insert_key(CV, 'radius', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
insert_key(
|
||||
CV, 'handle_left', group=data_("%s Spline %s CV %s") %
|
||||
(sk_name, s_i, v_i))
|
||||
insert_key(
|
||||
CV, 'handle_right', group=data_("%s Spline %s CV %s") %
|
||||
(sk_name, s_i, v_i))
|
||||
insert_key(
|
||||
CV, 'radius', group=data_("%s Spline %s CV %s") %
|
||||
(sk_name, s_i, v_i))
|
||||
insert_key(CV, 'tilt', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
global_spline_index += 1
|
||||
|
||||
@ -414,7 +448,8 @@ class ANIM_OT_insert_keyframe_animall(Operator):
|
||||
if obj.active_shape_key_index > 0:
|
||||
CV = obj.active_shape_key.data[global_spline_index]
|
||||
insert_key(CV, 'co', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
insert_key(CV, 'radius', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
insert_key(
|
||||
CV, 'radius', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
insert_key(CV, 'tilt', group=data_("%s Spline %s CV %s") % (sk_name, s_i, v_i))
|
||||
global_spline_index += 1
|
||||
|
||||
@ -443,15 +478,22 @@ class ANIM_OT_delete_keyframe_animall(Operator):
|
||||
for obj in objects:
|
||||
data = obj.data
|
||||
if obj.type == 'MESH':
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
if animall_properties.key_point_location:
|
||||
for vert in data.vertices:
|
||||
if not animall_properties.key_selected or vert.select:
|
||||
delete_key(vert, 'co')
|
||||
|
||||
if animall_properties.key_vertex_bevel:
|
||||
for vert in data.vertices:
|
||||
if not animall_properties.key_selected or vert.select:
|
||||
delete_key(vert, 'bevel_weight')
|
||||
attribute = get_attribute(data, "bevel_weight_vert", 'FLOAT', 'POINT')
|
||||
if attribute is not None:
|
||||
delete_attribute_key(data, attribute, animall_properties.key_selected)
|
||||
|
||||
if animall_properties.key_vertex_crease:
|
||||
attribute = get_attribute(data, "crease_vert", 'FLOAT', 'POINT')
|
||||
if attribute is not None:
|
||||
delete_attribute_key(data, attribute, animall_properties.key_selected)
|
||||
|
||||
if animall_properties.key_vertex_group:
|
||||
for vert in data.vertices:
|
||||
@ -459,20 +501,20 @@ class ANIM_OT_delete_keyframe_animall(Operator):
|
||||
for group in vert.groups:
|
||||
delete_key(group, 'weight')
|
||||
|
||||
# if animall_properties.key_vertex_crease:
|
||||
# for vert in data.vertices:
|
||||
# if not animall_properties.key_selected or vert.select:
|
||||
# delete_key(vert, 'crease')
|
||||
|
||||
if animall_properties.key_edge_bevel:
|
||||
for edge in data.edges:
|
||||
if not animall_properties.key_selected or edge.select:
|
||||
delete_key(edge, 'bevel_weight')
|
||||
attribute = get_attribute(data, "bevel_weight_edge", 'FLOAT', 'EDGE')
|
||||
if attribute is not None:
|
||||
delete_attribute_key(data, attribute, animall_properties.key_selected)
|
||||
|
||||
if animall_properties.key_edge_crease:
|
||||
for edge in data.edges:
|
||||
if not animall_properties.key_selected or vert.select:
|
||||
delete_key(edge, 'crease')
|
||||
attribute = get_attribute(data, "crease_edge", 'FLOAT', 'EDGE')
|
||||
if attribute is not None:
|
||||
delete_attribute_key(data, attribute, animall_properties.key_selected)
|
||||
|
||||
if animall_properties.key_material_index:
|
||||
for p_i, polygon in enumerate(data.polygons):
|
||||
if not animall_properties.key_selected or polygon.select:
|
||||
delete_key(polygon, 'material_index')
|
||||
|
||||
if animall_properties.key_shape_key:
|
||||
if obj.active_shape_key:
|
||||
@ -486,25 +528,15 @@ class ANIM_OT_delete_keyframe_animall(Operator):
|
||||
if not animall_properties.key_selected or uv.select:
|
||||
delete_key(uv, 'uv')
|
||||
|
||||
if animall_properties.key_attribute:
|
||||
if animall_properties.key_active_attribute:
|
||||
if data.attributes.active is not None:
|
||||
attribute = data.attributes.active
|
||||
if attribute.data_type != 'STRING':
|
||||
# Cannot animate string attributes?
|
||||
if attribute.data_type in {'FLOAT', 'INT', 'BOOLEAN', 'INT8'}:
|
||||
attribute_key = "value"
|
||||
elif attribute.data_type in {'FLOAT_COLOR', 'BYTE_COLOR'}:
|
||||
attribute_key = "color"
|
||||
elif attribute.data_type in {'FLOAT_VECTOR', 'FLOAT2'}:
|
||||
attribute_key = "vector"
|
||||
for path, _group in get_attribute_paths(
|
||||
data, data.attributes.active,
|
||||
animall_properties.key_selected):
|
||||
if path:
|
||||
delete_key(data, path)
|
||||
|
||||
for e_i, _attribute_data in enumerate(attribute.data):
|
||||
if (not animall_properties.key_selected
|
||||
or attribute.domain == 'POINT' and data.vertices[e_i].select
|
||||
or attribute.domain == 'EDGE' and data.edges[e_i].select
|
||||
or attribute.domain == 'FACE' and data.polygons[e_i].select
|
||||
or attribute.domain == 'CORNER' and is_selected_vert_loop(data, e_i)):
|
||||
delete_key(data, f'attributes["{attribute.name}"].data[{e_i}].{attribute_key}')
|
||||
bpy.ops.object.mode_set(mode=mode)
|
||||
|
||||
elif obj.type == 'LATTICE':
|
||||
if animall_properties.key_shape_key:
|
||||
@ -588,12 +620,20 @@ class ANIM_OT_clear_animation_animall(Operator):
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class ANIM_OT_update_vertex_color_animation_animall(Operator):
|
||||
bl_label = "Update Vertex Color Animation"
|
||||
bl_idname = "anim.update_vertex_color_animation_animall"
|
||||
bl_description = "Update old vertex color channel formats from pre-3.3 versions"
|
||||
class ANIM_OT_update_attribute_animation_animall(Operator):
|
||||
bl_label = "Update Attribute Animation"
|
||||
bl_idname = "anim.update_attribute_animation_animall"
|
||||
bl_description = "Update attributes from the old format"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
path_re = re.compile(r"^vertex_colors|(vertices|edges)\[([0-9]+)\]\.(bevel_weight|crease)")
|
||||
attribute_map = {
|
||||
("vertices", "bevel_weight"): ("bevel_weight_vert", "FLOAT", "POINT"),
|
||||
("edges", "bevel_weight"): ("bevel_weight_edge", "FLOAT", "POINT"),
|
||||
("vertices", "crease"): ("crease_vert", "FLOAT", "EDGE"),
|
||||
("edges", "crease"): ("crease_edge", "FLOAT", "EDGE"),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
if (context.active_object is None
|
||||
@ -602,21 +642,30 @@ class ANIM_OT_update_vertex_color_animation_animall(Operator):
|
||||
or context.active_object.data.animation_data.action is None):
|
||||
return False
|
||||
for fcurve in context.active_object.data.animation_data.action.fcurves:
|
||||
if fcurve.data_path.startswith("vertex_colors"):
|
||||
if self.path_re.match(fcurve.data_path):
|
||||
return True
|
||||
|
||||
def execute(self, context):
|
||||
for fcurve in context.active_object.data.animation_data.action.fcurves:
|
||||
if fcurve.data_path.startswith("vertex_colors"):
|
||||
# Update pre-3.3 vertex colors
|
||||
fcurve.data_path = fcurve.data_path.replace("vertex_colors", "attributes")
|
||||
else:
|
||||
# Update pre-4.0 attributes
|
||||
match = self.path_re.match(fcurve.data_path)
|
||||
if match is None:
|
||||
continue
|
||||
domain, index, src_attribute = match.groups()
|
||||
attribute, type, domain = self.attribute_map[(domain, src_attribute)]
|
||||
get_attribute(context.active_object.data, attribute, type, domain)
|
||||
fcurve.data_path = f'attributes["{attribute}"].data[{index}].value'
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
# Add-ons Preferences Update Panel
|
||||
|
||||
# Define Panel classes for updating
|
||||
panels = [
|
||||
VIEW3D_PT_animall
|
||||
]
|
||||
panels = [VIEW3D_PT_animall]
|
||||
|
||||
|
||||
def update_panel(self, context):
|
||||
@ -643,7 +692,7 @@ class AnimallAddonPreferences(AddonPreferences):
|
||||
category: StringProperty(
|
||||
name="Tab Category",
|
||||
description="Choose a name for the category of the panel",
|
||||
default="Animate",
|
||||
default="Animation",
|
||||
update=update_panel
|
||||
)
|
||||
|
||||
@ -658,7 +707,7 @@ class AnimallAddonPreferences(AddonPreferences):
|
||||
register_classes, unregister_classes = bpy.utils.register_classes_factory(
|
||||
(AnimallProperties, VIEW3D_PT_animall, ANIM_OT_insert_keyframe_animall,
|
||||
ANIM_OT_delete_keyframe_animall, ANIM_OT_clear_animation_animall,
|
||||
ANIM_OT_update_vertex_color_animation_animall, AnimallAddonPreferences))
|
||||
ANIM_OT_update_attribute_animation_animall, AnimallAddonPreferences))
|
||||
|
||||
def register():
|
||||
register_classes()
|
||||
|
@ -12,10 +12,10 @@
|
||||
translations_tuple = (
|
||||
(("*", ""),
|
||||
((), ()),
|
||||
("fr_FR", "Project-Id-Version: AnimAll 0.9.6 (0)\n",
|
||||
("fr_FR", "Project-Id-Version: AnimAll 0.10.0 (0)\n",
|
||||
(False,
|
||||
("Blender's translation file (po format).",
|
||||
"Copyright (C) 2022 The Blender Foundation.",
|
||||
"Copyright (C) 2022-2023 The Blender Foundation.",
|
||||
"This file is distributed under the same license as the Blender package.",
|
||||
"Damien Picard <dam.pic@free.fr>, 2022."))),
|
||||
),
|
||||
@ -59,7 +59,7 @@ translations_tuple = (
|
||||
(("Operator", "Delete Key"),
|
||||
(("bpy.types.ANIM_OT_delete_keyframe_animall",),
|
||||
()),
|
||||
("fr_FR", "Supprimer image clé",
|
||||
("fr_FR", "Supprimer l’image clé",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Delete a Keyframe"),
|
||||
@ -68,16 +68,16 @@ translations_tuple = (
|
||||
("fr_FR", "Supprimer une image clé",
|
||||
(False, ())),
|
||||
),
|
||||
(("Operator", "Update Vertex Color Animation"),
|
||||
(("bpy.types.ANIM_OT_update_vertex_color_animation_animall",),
|
||||
(("Operator", "Update Attribute Animation"),
|
||||
(("bpy.types.ANIM_OT_update_attribute_animation_animall",),
|
||||
()),
|
||||
("fr_FR", "Mettre à jour l’animation des couleurs de sommets",
|
||||
("fr_FR", "Mettre à jour l’animation des attributs",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Update old vertex color channel formats from pre-3.3 versions"),
|
||||
(("bpy.types.ANIM_OT_update_vertex_color_animation_animall",),
|
||||
(("*", "Update attributes from the old format"),
|
||||
(("bpy.types.ANIM_OT_update_attribute_animation_animall",),
|
||||
()),
|
||||
("fr_FR", "Mettre à jour les formats des canaux depuis les versions antérieures à la 3.3",
|
||||
("fr_FR", "Mettre à jour les attributs depuis l’ancien format",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Animate"),
|
||||
@ -87,7 +87,7 @@ translations_tuple = (
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Insert keyframes on active attribute values"),
|
||||
(("bpy.types.AnimallProperties.key_attribute",),
|
||||
(("bpy.types.AnimallProperties.key_active_attribute",),
|
||||
()),
|
||||
("fr_FR", "Insérer des clés sur l’attribut actif",
|
||||
(False, ())),
|
||||
@ -98,6 +98,12 @@ translations_tuple = (
|
||||
("fr_FR", "Insérer des clés sur les poids de biseau d’arête",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Edge Crease"),
|
||||
(("bpy.types.AnimallProperties.key_edge_crease",),
|
||||
()),
|
||||
("fr_FR", "Plis d’arêtes",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Insert keyframes on edge creases"),
|
||||
(("bpy.types.AnimallProperties.key_edge_crease",),
|
||||
()),
|
||||
@ -158,6 +164,12 @@ translations_tuple = (
|
||||
("fr_FR", "Insérer des clés sur les poids de biseau des sommets",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Insert keyframes on vertex crease weight"),
|
||||
(("bpy.types.AnimallProperties.key_vertex_crease",),
|
||||
()),
|
||||
("fr_FR", "Insérer des clés sur les plis de sommets",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Insert keyframes on active vertex group values"),
|
||||
(("bpy.types.AnimallProperties.key_vertex_group",),
|
||||
()),
|
||||
@ -165,190 +177,187 @@ translations_tuple = (
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "AnimAll"),
|
||||
(("scripts/addons/animation_animall/__init__.py:138",
|
||||
"Add-on AnimAll info: name"),
|
||||
(("Add-on AnimAll info: name",),
|
||||
()),
|
||||
("fr_FR", "AnimAll",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Key:"),
|
||||
(("scripts/addons/animation_animall/__init__.py:146",),
|
||||
(("scripts/addons/animation_animall/__init__.py:200",),
|
||||
()),
|
||||
("fr_FR", "Insérer :",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Tab Category:"),
|
||||
(("scripts/addons/animation_animall/__init__.py:653",),
|
||||
(("scripts/addons/animation_animall/__init__.py:704",),
|
||||
()),
|
||||
("fr_FR", "Catégorie d’onglet :",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Points"),
|
||||
(("scripts/addons/animation_animall/__init__.py:152",
|
||||
"scripts/addons/animation_animall/__init__.py:159",
|
||||
"scripts/addons/animation_animall/__init__.py:188"),
|
||||
(("scripts/addons/animation_animall/__init__.py:206",
|
||||
"scripts/addons/animation_animall/__init__.py:213",
|
||||
"scripts/addons/animation_animall/__init__.py:243"),
|
||||
()),
|
||||
("fr_FR", "Points",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Others"),
|
||||
(("scripts/addons/animation_animall/__init__.py:155",
|
||||
"scripts/addons/animation_animall/__init__.py:171",
|
||||
"scripts/addons/animation_animall/__init__.py:196"),
|
||||
(("scripts/addons/animation_animall/__init__.py:209",
|
||||
"scripts/addons/animation_animall/__init__.py:226",
|
||||
"scripts/addons/animation_animall/__init__.py:251"),
|
||||
()),
|
||||
("fr_FR", "Autres",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Bevel"),
|
||||
(("scripts/addons/animation_animall/__init__.py:161",
|
||||
"scripts/addons/animation_animall/__init__.py:165"),
|
||||
(("scripts/addons/animation_animall/__init__.py:215",
|
||||
"scripts/addons/animation_animall/__init__.py:220"),
|
||||
()),
|
||||
("fr_FR", "Biseau",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Edges"),
|
||||
(("scripts/addons/animation_animall/__init__.py:164",),
|
||||
(("scripts/addons/animation_animall/__init__.py:219",),
|
||||
()),
|
||||
("fr_FR", "Arêtes",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Crease"),
|
||||
(("scripts/addons/animation_animall/__init__.py:166",),
|
||||
(("scripts/addons/animation_animall/__init__.py:216",
|
||||
"scripts/addons/animation_animall/__init__.py:221",),
|
||||
()),
|
||||
("fr_FR", "Plis",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Faces"),
|
||||
(("scripts/addons/animation_animall/__init__.py:168",),
|
||||
(("scripts/addons/animation_animall/__init__.py:223",),
|
||||
()),
|
||||
("fr_FR", "Faces",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "\"Location\" and \"Shape Key\" are redundant?"),
|
||||
(("scripts/addons/animation_animall/__init__.py:218",),
|
||||
(("scripts/addons/animation_animall/__init__.py:273",),
|
||||
()),
|
||||
("fr_FR", "\"Position\" et \"Clé de forme\" sont redondants ?",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Splines"),
|
||||
(("scripts/addons/animation_animall/__init__.py:193",),
|
||||
(("scripts/addons/animation_animall/__init__.py:248",),
|
||||
()),
|
||||
("fr_FR", "Splines",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Maybe set \"%s\" to 1.0?"),
|
||||
(("scripts/addons/animation_animall/__init__.py:209",
|
||||
"scripts/addons/animation_animall/__init__.py:209"),
|
||||
(("scripts/addons/animation_animall/__init__.py:264"),
|
||||
()),
|
||||
("fr_FR", "Essayez de mettre « %s » à 1.0 ?",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Cannot key on Basis Shape"),
|
||||
(("scripts/addons/animation_animall/__init__.py:212",),
|
||||
(("scripts/addons/animation_animall/__init__.py:267",),
|
||||
()),
|
||||
("fr_FR", "Impossible d’ajouter une clé sur la forme de base",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "No active Shape Key"),
|
||||
(("scripts/addons/animation_animall/__init__.py:215",),
|
||||
(("scripts/addons/animation_animall/__init__.py:270",),
|
||||
()),
|
||||
("fr_FR", "Pas de clé de forme active",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Clear Animation could not be performed"),
|
||||
(("scripts/addons/animation_animall/__init__.py:581",),
|
||||
(("scripts/addons/animation_animall/__init__.py:615",),
|
||||
()),
|
||||
("fr_FR", "La suppression de l’animation n’a pas pu aboutir",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Object includes old-style vertex colors. Consider updating them."),
|
||||
(("scripts/addons/animation_animall/__init__.py:182",),
|
||||
(("*", "Object includes old-style attributes. Consider updating them."),
|
||||
(("scripts/addons/animation_animall/__init__.py:237",),
|
||||
()),
|
||||
("fr_FR", "L’objet contient des couleurs de sommets à l’ancien format. Veuillez les mettre à jour",
|
||||
("fr_FR", "L’objet contient des attributs à l’ancien format. Veuillez les mettre à jour.",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Vertex %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:358",
|
||||
"scripts/addons/animation_animall/__init__.py:313",
|
||||
"scripts/addons/animation_animall/__init__.py:318",
|
||||
"scripts/addons/animation_animall/__init__.py:328"),
|
||||
(("scripts/addons/animation_animall/__init__.py:141",
|
||||
"scripts/addons/animation_animall/__init__.py:368",
|
||||
"scripts/addons/animation_animall/__init__.py:382",
|
||||
"scripts/addons/animation_animall/__init__.py:416"),
|
||||
()),
|
||||
("fr_FR", "Sommet %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Edge %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:360",
|
||||
"scripts/addons/animation_animall/__init__.py:333",
|
||||
"scripts/addons/animation_animall/__init__.py:338"),
|
||||
(("scripts/addons/animation_animall/__init__.py:143"),
|
||||
()),
|
||||
("fr_FR", "Arête %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Point %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:265",),
|
||||
(("scripts/addons/animation_animall/__init__.py:320",),
|
||||
()),
|
||||
("fr_FR", "Point %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Spline %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:273",),
|
||||
(("scripts/addons/animation_animall/__init__.py:328",),
|
||||
()),
|
||||
("fr_FR", "Spline %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Face %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:343",
|
||||
"scripts/addons/animation_animall/__init__.py:362"),
|
||||
(("scripts/addons/animation_animall/__init__.py:145",
|
||||
"scripts/addons/animation_animall/__init__.py:395"),
|
||||
()),
|
||||
("fr_FR", "Face %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "%s Point %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:260",),
|
||||
(("scripts/addons/animation_animall/__init__.py:315",),
|
||||
()),
|
||||
("fr_FR", "%s Point %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Loop %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:364",),
|
||||
(("scripts/addons/animation_animall/__init__.py:147",),
|
||||
()),
|
||||
("fr_FR", "Boucle %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "UV layer %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:379",),
|
||||
(("*", "UV Layer %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:409",),
|
||||
()),
|
||||
("fr_FR", "Calque UV %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "%s Vertex %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:386",),
|
||||
(("scripts/addons/animation_animall/__init__.py:416",),
|
||||
()),
|
||||
("fr_FR", "%s Sommet %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Spline %s CV %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:283",
|
||||
"scripts/addons/animation_animall/__init__.py:284",
|
||||
"scripts/addons/animation_animall/__init__.py:285",
|
||||
"scripts/addons/animation_animall/__init__.py:288",
|
||||
"scripts/addons/animation_animall/__init__.py:291",
|
||||
"scripts/addons/animation_animall/__init__.py:297",
|
||||
"scripts/addons/animation_animall/__init__.py:300",
|
||||
"scripts/addons/animation_animall/__init__.py:303"),
|
||||
(("scripts/addons/animation_animall/__init__.py:338",
|
||||
"scripts/addons/animation_animall/__init__.py:339",
|
||||
"scripts/addons/animation_animall/__init__.py:340",
|
||||
"scripts/addons/animation_animall/__init__.py:343",
|
||||
"scripts/addons/animation_animall/__init__.py:346",
|
||||
"scripts/addons/animation_animall/__init__.py:352",
|
||||
"scripts/addons/animation_animall/__init__.py:355",
|
||||
"scripts/addons/animation_animall/__init__.py:358"),
|
||||
()),
|
||||
("fr_FR", "Spline %s Point %s",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "%s Spline %s CV %s"),
|
||||
(("scripts/addons/animation_animall/__init__.py:402",
|
||||
"scripts/addons/animation_animall/__init__.py:403",
|
||||
"scripts/addons/animation_animall/__init__.py:404",
|
||||
"scripts/addons/animation_animall/__init__.py:405",
|
||||
"scripts/addons/animation_animall/__init__.py:406",
|
||||
"scripts/addons/animation_animall/__init__.py:414",
|
||||
"scripts/addons/animation_animall/__init__.py:415",
|
||||
"scripts/addons/animation_animall/__init__.py:416"),
|
||||
(("scripts/addons/animation_animall/__init__.py:432",
|
||||
"scripts/addons/animation_animall/__init__.py:434",
|
||||
"scripts/addons/animation_animall/__init__.py:437",
|
||||
"scripts/addons/animation_animall/__init__.py:440",
|
||||
"scripts/addons/animation_animall/__init__.py:442",
|
||||
"scripts/addons/animation_animall/__init__.py:450",
|
||||
"scripts/addons/animation_animall/__init__.py:452",
|
||||
"scripts/addons/animation_animall/__init__.py:453"),
|
||||
()),
|
||||
("fr_FR", "%s Spline %s Point %s",
|
||||
(False, ())),
|
||||
|
@ -460,7 +460,6 @@ def create_glass_material(matname, replace, rv=0.333, gv=0.342, bv=0.9):
|
||||
|
||||
node = nodes.new('ShaderNodeBsdfGlossy')
|
||||
node.name = 'Glossy_0'
|
||||
node.distribution = 'SHARP'
|
||||
node.location = 250, 100
|
||||
|
||||
node = nodes.new('ShaderNodeBsdfTransparent')
|
||||
|
@ -6,7 +6,7 @@ bl_info = {
|
||||
"name": "Grease Pencil Tools",
|
||||
"description": "Extra tools for Grease Pencil",
|
||||
"author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola",
|
||||
"version": (1, 8, 1),
|
||||
"version": (1, 8, 2),
|
||||
"blender": (3, 0, 0),
|
||||
"location": "Sidebar > Grease Pencil > Grease Pencil Tools",
|
||||
"warning": "",
|
||||
|
@ -49,10 +49,10 @@ def get_reduced_area_coord(context):
|
||||
|
||||
## minus tool leftbar + sidebar right
|
||||
regs = context.area.regions
|
||||
toolbar = regs[2]
|
||||
sidebar = regs[3]
|
||||
header = regs[0]
|
||||
tool_header = regs[1]
|
||||
toolbar = next((r for r in regs if r.type == 'TOOLS'), None)
|
||||
sidebar = next((r for r in regs if r.type == 'UI'), None)
|
||||
header = next((r for r in regs if r.type == 'HEADER'), None)
|
||||
tool_header = next((r for r in regs if r.type == 'TOOL_HEADER'), None)
|
||||
up_margin = down_margin = 0
|
||||
if tool_header.alignment == 'TOP':
|
||||
up_margin += tool_header.height
|
||||
|
@ -5,8 +5,8 @@
|
||||
bl_info = {
|
||||
"name": "Import Images as Planes",
|
||||
"author": "Florian Meyer (tstscr), mont29, matali, Ted Schundler (SpkyElctrc), mrbimax",
|
||||
"version": (3, 5, 0),
|
||||
"blender": (2, 91, 0),
|
||||
"version": (3, 5, 1),
|
||||
"blender": (4, 0, 0),
|
||||
"location": "File > Import > Images as Planes or Add > Image > Images as Planes",
|
||||
"description": "Imports images and creates planes with the appropriate aspect ratio. "
|
||||
"The images are mapped to the planes.",
|
||||
@ -25,7 +25,10 @@ from math import pi
|
||||
|
||||
import bpy
|
||||
from bpy.types import Operator
|
||||
from bpy.app.translations import pgettext_tip as tip_
|
||||
from bpy.app.translations import (
|
||||
pgettext_tip as tip_,
|
||||
contexts as i18n_contexts
|
||||
)
|
||||
from mathutils import Vector
|
||||
|
||||
from bpy.props import (
|
||||
@ -151,6 +154,9 @@ def load_images(filenames, directory, force_reload=False, frame_start=1, find_se
|
||||
file_iter = zip(filenames, repeat(1), repeat(1))
|
||||
|
||||
for filename, offset, frames in file_iter:
|
||||
if not os.path.isfile(bpy.path.abspath(os.path.join(directory, filename))):
|
||||
continue
|
||||
|
||||
image = load_image(filename, directory, check_existing=True, force_reload=force_reload)
|
||||
|
||||
# Size is unavailable for sequences, so we grab it early
|
||||
@ -734,7 +740,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
('HASHED', "Hashed","Use noise to dither the binary visibility (works well with multi-samples)"),
|
||||
('OPAQUE', "Opaque","Render surface without transparency"),
|
||||
)
|
||||
blend_method: EnumProperty(name="Blend Mode", items=BLEND_METHODS, default='BLEND', description="Blend Mode for Transparent Faces")
|
||||
blend_method: EnumProperty(
|
||||
name="Blend Mode", items=BLEND_METHODS, default='BLEND',
|
||||
description="Blend Mode for Transparent Faces", translation_context=i18n_contexts.id_material)
|
||||
|
||||
SHADOW_METHODS = (
|
||||
('CLIP', "Clip","Use the alpha threshold to clip the visibility (binary visibility)"),
|
||||
@ -742,7 +750,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
('OPAQUE',"Opaque","Material will cast shadows without transparency"),
|
||||
('NONE',"None","Material will cast no shadow"),
|
||||
)
|
||||
shadow_method: EnumProperty(name="Shadow Mode", items=SHADOW_METHODS, default='CLIP', description="Shadow mapping method")
|
||||
shadow_method: EnumProperty(
|
||||
name="Shadow Mode", items=SHADOW_METHODS, default='CLIP',
|
||||
description="Shadow mapping method", translation_context=i18n_contexts.id_material)
|
||||
|
||||
use_backface_culling: BoolProperty(
|
||||
name="Backface Culling", default=False,
|
||||
@ -923,11 +933,11 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
if context.active_object and context.active_object.mode != 'OBJECT':
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.import_images(context)
|
||||
ret_code = self.import_images(context)
|
||||
|
||||
context.preferences.edit.use_enter_edit_mode = editmode
|
||||
|
||||
return {'FINISHED'}
|
||||
return ret_code
|
||||
|
||||
def import_images(self, context):
|
||||
|
||||
@ -939,6 +949,10 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
find_sequences=self.image_sequence
|
||||
))
|
||||
|
||||
if not images:
|
||||
self.report({'WARNING'}, "Please select at least an image.")
|
||||
return {'CANCELLED'}
|
||||
|
||||
# Create individual planes
|
||||
planes = [self.single_image_spec_to_plane(context, img_spec) for img_spec in images]
|
||||
|
||||
@ -962,6 +976,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
|
||||
# all done!
|
||||
self.report({'INFO'}, tip_("Added {} Image Plane(s)").format(len(planes)))
|
||||
return {'FINISHED'}
|
||||
|
||||
# operate on a single image
|
||||
def single_image_spec_to_plane(self, context, img_spec):
|
||||
|
@ -30,6 +30,8 @@ if "bpy" in locals():
|
||||
import os
|
||||
import bpy
|
||||
|
||||
from bpy.app.translations import contexts as i18n_contexts
|
||||
|
||||
from bpy.props import (
|
||||
StringProperty,
|
||||
BoolProperty,
|
||||
@ -58,6 +60,7 @@ class ExportUVLayout(bpy.types.Operator):
|
||||
name="Modified",
|
||||
description="Exports UVs from the modified mesh",
|
||||
default=False,
|
||||
translation_context=i18n_contexts.id_mesh,
|
||||
)
|
||||
mode: EnumProperty(
|
||||
items=(
|
||||
|
@ -54,6 +54,8 @@ MATSHINESS = 0xA040 # Specular intensity of the object/material (percent)
|
||||
MATSHIN2 = 0xA041 # Reflection of the object/material (percent)
|
||||
MATSHIN3 = 0xA042 # metallic/mirror of the object/material (percent)
|
||||
MATTRANS = 0xA050 # Transparency value (100-OpacityValue) (percent)
|
||||
MATXPFALL = 0xA052 # Transparency falloff ratio (percent)
|
||||
MATREFBLUR = 0xA053 # Reflection blurring ratio (percent)
|
||||
MATSELFILLUM = 0xA080 # # Material self illumination flag
|
||||
MATSELFILPCT = 0xA084 # Self illumination strength (percent)
|
||||
MATWIRE = 0xA085 # Material wireframe rendered flag
|
||||
@ -626,8 +628,11 @@ def make_material_texture_chunk(chunk_id, texslots, pct):
|
||||
mat_sub_mapflags.add_variable("mapflags", _3ds_ushort(mapflags))
|
||||
mat_sub.add_subchunk(mat_sub_mapflags)
|
||||
|
||||
mat_sub_texblur = _3ds_chunk(MAT_MAP_TEXBLUR) # Based on observation this is usually 1.0
|
||||
mat_sub_texblur.add_variable("maptexblur", _3ds_float(1.0))
|
||||
texblur = 0.0
|
||||
mat_sub_texblur = _3ds_chunk(MAT_MAP_TEXBLUR)
|
||||
if texslot.socket_dst.identifier in {'Base Color', 'Specular Tint'}:
|
||||
texblur = texslot.node_dst.inputs['Sheen Weight'].default_value
|
||||
mat_sub_texblur.add_variable("maptexblur", _3ds_float(round(texblur, 6)))
|
||||
mat_sub.add_subchunk(mat_sub_texblur)
|
||||
|
||||
mat_sub_uscale = _3ds_chunk(MAT_MAP_USCALE)
|
||||
@ -650,12 +655,15 @@ def make_material_texture_chunk(chunk_id, texslots, pct):
|
||||
mat_sub_angle.add_variable("mapangle", _3ds_float(round(texslot.rotation[2], 6)))
|
||||
mat_sub.add_subchunk(mat_sub_angle)
|
||||
|
||||
if texslot.socket_dst.identifier in {'Base Color', 'Specular Tint'}:
|
||||
rgb = _3ds_chunk(MAP_COL1) # Add tint color
|
||||
base = texslot.owner_shader.material.diffuse_color[:3]
|
||||
spec = texslot.owner_shader.material.specular_color[:]
|
||||
rgb.add_variable("mapcolor", _3ds_rgb_color(spec if texslot.socket_dst.identifier == 'Specular Tint' else base))
|
||||
mat_sub.add_subchunk(rgb)
|
||||
if texslot.socket_dst.identifier in {'Base Color', 'Specular Tint'}: # Add tint color
|
||||
tint = texslot.socket_dst.identifier == 'Base Color' and texslot.image.colorspace_settings.name == 'Non-Color'
|
||||
if tint or texslot.socket_dst.identifier == 'Specular Tint':
|
||||
tint1 = _3ds_chunk(MAP_COL1)
|
||||
tint2 = _3ds_chunk(MAP_COL2)
|
||||
tint1.add_variable("tint1", _3ds_rgb_color(texslot.node_dst.inputs['Coat Tint'].default_value[:3]))
|
||||
tint2.add_variable("tint2", _3ds_rgb_color(texslot.node_dst.inputs['Sheen Tint'].default_value[:3]))
|
||||
mat_sub.add_subchunk(tint1)
|
||||
mat_sub.add_subchunk(tint2)
|
||||
|
||||
# Store all textures for this mapto in order. This at least is what the
|
||||
# 3DS exporter did so far, afaik most readers will just skip over 2nd textures
|
||||
@ -703,7 +711,9 @@ def make_material_chunk(material, image):
|
||||
material_chunk.add_subchunk(make_percent_subchunk(MATSHIN2, wrap.specular))
|
||||
material_chunk.add_subchunk(make_percent_subchunk(MATSHIN3, wrap.metallic))
|
||||
material_chunk.add_subchunk(make_percent_subchunk(MATTRANS, 1 - wrap.alpha))
|
||||
material_chunk.add_subchunk(make_percent_subchunk(MATXPFALL, wrap.transmission))
|
||||
material_chunk.add_subchunk(make_percent_subchunk(MATSELFILPCT, wrap.emission_strength))
|
||||
material_chunk.add_subchunk(make_percent_subchunk(MATREFBLUR, wrap.node_principled_bsdf.inputs['Coat Weight'].default_value))
|
||||
material_chunk.add_subchunk(shading)
|
||||
|
||||
primary_tex = False
|
||||
@ -1608,6 +1618,86 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
||||
mesh_version.add_variable("mesh", _3ds_uint(3))
|
||||
object_info.add_subchunk(mesh_version)
|
||||
|
||||
# Init main keyframe data chunk
|
||||
if use_keyframes:
|
||||
revision = 0x0005
|
||||
stop = scene.frame_end
|
||||
start = scene.frame_start
|
||||
curtime = scene.frame_current
|
||||
kfdata = make_kfdata(revision, start, stop, curtime)
|
||||
|
||||
# Make a list of all materials used in the selected meshes (use dictionary, each material is added once)
|
||||
materialDict = {}
|
||||
mesh_objects = []
|
||||
|
||||
if use_selection:
|
||||
objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)]
|
||||
else:
|
||||
objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer)]
|
||||
|
||||
empty_objects = [ob for ob in objects if ob.type == 'EMPTY']
|
||||
light_objects = [ob for ob in objects if ob.type == 'LIGHT']
|
||||
camera_objects = [ob for ob in objects if ob.type == 'CAMERA']
|
||||
|
||||
for ob in objects:
|
||||
# Get derived objects
|
||||
derived_dict = bpy_extras.io_utils.create_derived_objects(depsgraph, [ob])
|
||||
derived = derived_dict.get(ob)
|
||||
|
||||
if derived is None:
|
||||
continue
|
||||
|
||||
for ob_derived, mtx in derived:
|
||||
if ob.type not in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META'}:
|
||||
continue
|
||||
|
||||
try:
|
||||
data = ob_derived.to_mesh()
|
||||
except:
|
||||
data = None
|
||||
|
||||
if data:
|
||||
matrix = global_matrix @ mtx
|
||||
data.transform(matrix)
|
||||
data.transform(mtx_scale)
|
||||
mesh_objects.append((ob_derived, data, matrix))
|
||||
ma_ls = data.materials
|
||||
ma_ls_len = len(ma_ls)
|
||||
|
||||
# Get material/image tuples
|
||||
if data.uv_layers:
|
||||
if not ma_ls:
|
||||
ma = ma_name = None
|
||||
|
||||
for f, uf in zip(data.polygons, data.uv_layers.active.data):
|
||||
if ma_ls:
|
||||
ma_index = f.material_index
|
||||
if ma_index >= ma_ls_len:
|
||||
ma_index = f.material_index = 0
|
||||
ma = ma_ls[ma_index]
|
||||
ma_name = None if ma is None else ma.name
|
||||
# Else there already set to none
|
||||
|
||||
img = get_uv_image(ma)
|
||||
img_name = None if img is None else img.name
|
||||
|
||||
materialDict.setdefault((ma_name, img_name), (ma, img))
|
||||
|
||||
else:
|
||||
for ma in ma_ls:
|
||||
if ma: # Material may be None so check its not
|
||||
materialDict.setdefault((ma.name, None), (ma, None))
|
||||
|
||||
# Why 0 Why!
|
||||
for f in data.polygons:
|
||||
if f.material_index >= ma_ls_len:
|
||||
f.material_index = 0
|
||||
|
||||
|
||||
# Make MATERIAL chunks for all materials used in the meshes
|
||||
for ma_image in materialDict.values():
|
||||
object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1]))
|
||||
|
||||
# Add MASTERSCALE element
|
||||
mscale = _3ds_chunk(MASTERSCALE)
|
||||
mscale.add_variable("scale", _3ds_float(1.0))
|
||||
@ -1619,14 +1709,6 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
||||
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
|
||||
object_info.add_subchunk(cursor_chunk)
|
||||
|
||||
# Init main keyframe data chunk
|
||||
if use_keyframes:
|
||||
revision = 0x0005
|
||||
stop = scene.frame_end
|
||||
start = scene.frame_start
|
||||
curtime = scene.frame_current
|
||||
kfdata = make_kfdata(revision, start, stop, curtime)
|
||||
|
||||
# Add AMBIENT color
|
||||
if world is not None and 'WORLD' in object_filter:
|
||||
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
|
||||
@ -1710,81 +1792,9 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
||||
object_info.add_subchunk(layerfog_chunk)
|
||||
if fognode or foglayer and layer.use_pass_mist:
|
||||
object_info.add_subchunk(use_fog_flag)
|
||||
if use_keyframes and world.animation_data or world.node_tree.animation_data:
|
||||
if use_keyframes and world.animation_data or (world.node_tree and world.node_tree.animation_data):
|
||||
kfdata.add_subchunk(make_ambient_node(world))
|
||||
|
||||
# Make a list of all materials used in the selected meshes (use dictionary, each material is added once)
|
||||
materialDict = {}
|
||||
mesh_objects = []
|
||||
|
||||
if use_selection:
|
||||
objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)]
|
||||
else:
|
||||
objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer)]
|
||||
|
||||
empty_objects = [ob for ob in objects if ob.type == 'EMPTY']
|
||||
light_objects = [ob for ob in objects if ob.type == 'LIGHT']
|
||||
camera_objects = [ob for ob in objects if ob.type == 'CAMERA']
|
||||
|
||||
for ob in objects:
|
||||
# Get derived objects
|
||||
derived_dict = bpy_extras.io_utils.create_derived_objects(depsgraph, [ob])
|
||||
derived = derived_dict.get(ob)
|
||||
|
||||
if derived is None:
|
||||
continue
|
||||
|
||||
for ob_derived, mtx in derived:
|
||||
if ob.type not in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META'}:
|
||||
continue
|
||||
|
||||
try:
|
||||
data = ob_derived.to_mesh()
|
||||
except:
|
||||
data = None
|
||||
|
||||
if data:
|
||||
matrix = global_matrix @ mtx
|
||||
data.transform(matrix)
|
||||
data.transform(mtx_scale)
|
||||
mesh_objects.append((ob_derived, data, matrix))
|
||||
ma_ls = data.materials
|
||||
ma_ls_len = len(ma_ls)
|
||||
|
||||
# Get material/image tuples
|
||||
if data.uv_layers:
|
||||
if not ma_ls:
|
||||
ma = ma_name = None
|
||||
|
||||
for f, uf in zip(data.polygons, data.uv_layers.active.data):
|
||||
if ma_ls:
|
||||
ma_index = f.material_index
|
||||
if ma_index >= ma_ls_len:
|
||||
ma_index = f.material_index = 0
|
||||
ma = ma_ls[ma_index]
|
||||
ma_name = None if ma is None else ma.name
|
||||
# Else there already set to none
|
||||
|
||||
img = get_uv_image(ma)
|
||||
img_name = None if img is None else img.name
|
||||
|
||||
materialDict.setdefault((ma_name, img_name), (ma, img))
|
||||
|
||||
else:
|
||||
for ma in ma_ls:
|
||||
if ma: # Material may be None so check its not
|
||||
materialDict.setdefault((ma.name, None), (ma, None))
|
||||
|
||||
# Why 0 Why!
|
||||
for f in data.polygons:
|
||||
if f.material_index >= ma_ls_len:
|
||||
f.material_index = 0
|
||||
|
||||
|
||||
# Make material chunks for all materials used in the meshes
|
||||
for ma_image in materialDict.values():
|
||||
object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1]))
|
||||
|
||||
# Collect translation for transformation matrix
|
||||
translation = {}
|
||||
rotation = {}
|
||||
|
@ -90,6 +90,7 @@ MAT_SHIN_MAP = 0xA33C # This is a header for a new roughness map
|
||||
MAT_SELFI_MAP = 0xA33D # This is a header for a new emission map
|
||||
MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture
|
||||
MAT_MAP_TILING = 0xA351 # 2nd bit (from LSB) is mirror UV flag
|
||||
MAT_MAP_TEXBLUR = 0xA353 # Texture blurring factor (float 0-1)
|
||||
MAT_MAP_USCALE = 0xA354 # U axis scaling
|
||||
MAT_MAP_VSCALE = 0xA356 # V axis scaling
|
||||
MAT_MAP_UOFFSET = 0xA358 # U axis offset
|
||||
@ -244,7 +245,7 @@ def skip_to_end(file, skip_chunk):
|
||||
# MATERIALS #
|
||||
#############
|
||||
|
||||
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tintcolor, mapto):
|
||||
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tint1, tint2, mapto):
|
||||
shader = contextWrapper.node_principled_bsdf
|
||||
nodetree = contextWrapper.material.node_tree
|
||||
shader.location = (-300, 0)
|
||||
@ -256,24 +257,33 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
||||
mixer.label = "Mixer"
|
||||
mixer.inputs[0].default_value = pct / 100
|
||||
mixer.inputs[1].default_value = (
|
||||
tintcolor[:3] + [1] if tintcolor else
|
||||
shader.inputs['Base Color'].default_value[:]
|
||||
)
|
||||
tint1[:3] + [1] if tint1 else shader.inputs['Base Color'].default_value[:])
|
||||
contextWrapper._grid_to_location(1, 2, dst_node=mixer, ref_node=shader)
|
||||
img_wrap = contextWrapper.base_color_texture
|
||||
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
|
||||
links.new(mixer.outputs['Color'], shader.inputs['Base Color'])
|
||||
if tint2 is not None:
|
||||
img_wrap.colorspace_name = 'Non-Color'
|
||||
mixer.inputs[2].default_value = tint2[:3] + [1]
|
||||
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[0])
|
||||
else:
|
||||
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
|
||||
elif mapto == 'ROUGHNESS':
|
||||
img_wrap = contextWrapper.roughness_texture
|
||||
elif mapto == 'METALLIC':
|
||||
shader.location = (300,300)
|
||||
img_wrap = contextWrapper.metallic_texture
|
||||
elif mapto == 'SPECULARITY':
|
||||
shader.location = (0,-300)
|
||||
shader.location = (300,0)
|
||||
img_wrap = contextWrapper.specular_tint_texture
|
||||
if tint1:
|
||||
img_wrap.node_dst.inputs['Coat Tint'].default_value = tint1[:3] + [1]
|
||||
if tint2:
|
||||
img_wrap.node_dst.inputs['Sheen Tint'].default_value = tint2[:3] + [1]
|
||||
elif mapto == 'ALPHA':
|
||||
shader.location = (300,300)
|
||||
shader.location = (-300,0)
|
||||
img_wrap = contextWrapper.alpha_texture
|
||||
img_wrap.use_alpha = False
|
||||
links.new(img_wrap.node_image.outputs['Color'], img_wrap.socket_dst)
|
||||
elif mapto == 'EMISSION':
|
||||
shader.location = (0,-900)
|
||||
img_wrap = contextWrapper.emission_color_texture
|
||||
@ -310,22 +320,24 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
||||
img_wrap.extension = 'CLIP'
|
||||
|
||||
if alpha == 'alpha':
|
||||
own_node = img_wrap.node_image
|
||||
contextWrapper.material.blend_method = 'HASHED'
|
||||
links.new(own_node.outputs['Alpha'], img_wrap.socket_dst)
|
||||
for link in links:
|
||||
if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB':
|
||||
tex = link.from_node.image.name
|
||||
own_node = img_wrap.node_image
|
||||
own_map = img_wrap.node_mapping
|
||||
if tex == image.name:
|
||||
links.new(link.from_node.outputs['Alpha'], img_wrap.socket_dst)
|
||||
try:
|
||||
nodes.remove(own_map)
|
||||
nodes.remove(own_node)
|
||||
except:
|
||||
pass
|
||||
for imgs in bpy.data.images:
|
||||
if imgs.name[-3:].isdigit():
|
||||
if not imgs.users:
|
||||
bpy.data.images.remove(imgs)
|
||||
else:
|
||||
links.new(img_wrap.node_image.outputs['Alpha'], img_wrap.socket_dst)
|
||||
contextWrapper.material.blend_method = 'HASHED'
|
||||
|
||||
shader.location = (300, 300)
|
||||
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
|
||||
@ -350,6 +362,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
contextColor = None
|
||||
contextWrapper = None
|
||||
contextMatrix = None
|
||||
contextReflection = None
|
||||
contextTransmission = None
|
||||
contextMesh_vertls = None
|
||||
contextMesh_facels = None
|
||||
contextMesh_flag = None
|
||||
@ -518,7 +532,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
def read_texture(new_chunk, temp_chunk, name, mapto):
|
||||
uscale, vscale, uoffset, voffset, angle = 1.0, 1.0, 0.0, 0.0, 0.0
|
||||
contextWrapper.use_nodes = True
|
||||
tintcolor = None
|
||||
tint1 = tint2 = None
|
||||
extend = 'wrap'
|
||||
alpha = False
|
||||
pct = 70
|
||||
@ -526,11 +540,13 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
contextWrapper.base_color = contextColor[:]
|
||||
contextWrapper.metallic = contextMaterial.metallic
|
||||
contextWrapper.roughness = contextMaterial.roughness
|
||||
contextWrapper.transmission = contextTransmission
|
||||
contextWrapper.specular = contextMaterial.specular_intensity
|
||||
contextWrapper.specular_tint = contextMaterial.specular_color[:]
|
||||
contextWrapper.emission_color = contextMaterial.line_color[:3]
|
||||
contextWrapper.emission_strength = contextMaterial.line_priority / 100
|
||||
contextWrapper.alpha = contextMaterial.diffuse_color[3] = contextAlpha
|
||||
contextWrapper.node_principled_bsdf.inputs['Coat Weight'].default_value = contextReflection
|
||||
|
||||
while (new_chunk.bytes_read < new_chunk.length):
|
||||
read_chunk(file, temp_chunk)
|
||||
@ -542,14 +558,10 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
img = load_image(texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True)
|
||||
temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
|
||||
|
||||
elif temp_chunk.ID == MAT_MAP_USCALE:
|
||||
uscale = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_VSCALE:
|
||||
vscale = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_UOFFSET:
|
||||
uoffset = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_VOFFSET:
|
||||
voffset = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_BUMP_PERCENT:
|
||||
contextWrapper.normalmap_strength = (float(read_short(temp_chunk) / 100))
|
||||
elif mapto in {'COLOR', 'SPECULARITY'} and temp_chunk.ID == MAT_MAP_TEXBLUR:
|
||||
contextWrapper.node_principled_bsdf.inputs['Sheen Weight'].default_value = float(read_float(temp_chunk))
|
||||
|
||||
elif temp_chunk.ID == MAT_MAP_TILING:
|
||||
"""Control bit flags, where 0x1 activates decaling, 0x2 activates mirror,
|
||||
@ -578,11 +590,20 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
if tiling & 0x200:
|
||||
tint = 'RGBtint'
|
||||
|
||||
elif temp_chunk.ID == MAT_MAP_USCALE:
|
||||
uscale = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_VSCALE:
|
||||
vscale = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_UOFFSET:
|
||||
uoffset = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_VOFFSET:
|
||||
voffset = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_ANG:
|
||||
angle = read_float(temp_chunk)
|
||||
|
||||
elif temp_chunk.ID == MAT_MAP_COL1:
|
||||
tintcolor = read_byte_color(temp_chunk)
|
||||
tint1 = read_byte_color(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_COL2:
|
||||
tint2 = read_byte_color(temp_chunk)
|
||||
|
||||
skip_to_end(file, temp_chunk)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
@ -590,7 +611,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
# add the map to the material in the right channel
|
||||
if img:
|
||||
add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1),
|
||||
(uoffset, voffset, 0), angle, tintcolor, mapto)
|
||||
(uoffset, voffset, 0), angle, tint1, tint2, mapto)
|
||||
|
||||
def apply_constrain(vec):
|
||||
convector = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1))
|
||||
@ -627,7 +648,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
hyp = math.sqrt(pow(plane.x,2) + pow(plane.y,2))
|
||||
dia = math.sqrt(pow(hyp,2) + pow(plane.z,2))
|
||||
yaw = math.atan2(math.copysign(hyp, sign_xy), axis_xy)
|
||||
bow = math.acos(hyp / dia)
|
||||
bow = math.acos(hyp / dia) if dia != 0 else 0
|
||||
turn = angle - yaw if check_sign else angle + yaw
|
||||
tilt = angle - bow if loca.z > target.z else angle + bow
|
||||
pan = yaw if check_axes else turn
|
||||
@ -891,6 +912,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
# If material chunk
|
||||
elif new_chunk.ID == MATERIAL:
|
||||
contextAlpha = True
|
||||
contextReflection = False
|
||||
contextTransmission = False
|
||||
contextColor = mathutils.Color((0.8, 0.8, 0.8))
|
||||
contextMaterial = bpy.data.materials.new('Material')
|
||||
contextWrapper = PrincipledBSDFWrapper(contextMaterial, is_readonly=False, use_nodes=False)
|
||||
@ -979,6 +1002,24 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
contextMaterial.blend_method = 'BLEND'
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif new_chunk.ID == MAT_XPFALL:
|
||||
read_chunk(file, temp_chunk)
|
||||
if temp_chunk.ID == PCT_SHORT:
|
||||
contextTransmission = float(abs(read_short(temp_chunk) / 100))
|
||||
else:
|
||||
skip_to_end(file, temp_chunk)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif new_chunk.ID == MAT_REFBLUR:
|
||||
read_chunk(file, temp_chunk)
|
||||
if temp_chunk.ID == PCT_SHORT:
|
||||
contextReflection = float(read_short(temp_chunk) / 100)
|
||||
elif temp_chunk.ID == PCT_FLOAT:
|
||||
contextReflection = float(read_float(temp_chunk))
|
||||
else:
|
||||
skip_to_end(file, temp_chunk)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif new_chunk.ID == MAT_SELF_ILPCT:
|
||||
read_chunk(file, temp_chunk)
|
||||
if temp_chunk.ID == PCT_SHORT:
|
||||
@ -996,11 +1037,13 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
contextWrapper.base_color = contextColor[:]
|
||||
contextWrapper.metallic = contextMaterial.metallic
|
||||
contextWrapper.roughness = contextMaterial.roughness
|
||||
contextWrapper.transmission = contextTransmission
|
||||
contextWrapper.specular = contextMaterial.specular_intensity
|
||||
contextWrapper.specular_tint = contextMaterial.specular_color[:]
|
||||
contextWrapper.emission_color = contextMaterial.line_color[:3]
|
||||
contextWrapper.emission_strength = contextMaterial.line_priority / 100
|
||||
contextWrapper.alpha = contextMaterial.diffuse_color[3] = contextAlpha
|
||||
contextWrapper.node_principled_bsdf.inputs['Coat Weight'].default_value = contextReflection
|
||||
contextWrapper.use_nodes = False
|
||||
if shading >= 3:
|
||||
contextWrapper.use_nodes = True
|
||||
@ -1125,6 +1168,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
context.view_layer.active_layer_collection.collection.objects.link(contextLamp)
|
||||
imported_objects.append(contextLamp)
|
||||
object_dictionary[contextObName] = contextLamp
|
||||
contextLamp.data.use_shadow = False
|
||||
contextLamp.location = read_float_array(new_chunk) # Position
|
||||
contextMatrix = None # Reset matrix
|
||||
elif CreateLightObject and new_chunk.ID == COLOR_F: # Color
|
||||
@ -1141,7 +1185,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
# If spotlight chunk
|
||||
elif CreateLightObject and new_chunk.ID == LIGHT_SPOTLIGHT: # Spotlight
|
||||
contextLamp.data.type = 'SPOT'
|
||||
contextLamp.data.use_shadow = False
|
||||
spot = mathutils.Vector(read_float_array(new_chunk)) # Spot location
|
||||
aim = calc_target(contextLamp.location, spot) # Target
|
||||
contextLamp.rotation_euler.x = aim[0]
|
||||
@ -1381,6 +1424,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'TARGET': # Target position
|
||||
keyframe_data = {}
|
||||
location = child.location
|
||||
keyframe_data[0] = trackposition[0]
|
||||
target = mathutils.Vector(read_track_data(new_chunk)[0])
|
||||
direction = calc_target(location, target)
|
||||
child.rotation_euler.x = direction[0]
|
||||
|
@ -5,8 +5,8 @@
|
||||
bl_info = {
|
||||
"name": "FBX format",
|
||||
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
|
||||
"version": (5, 8, 3),
|
||||
"blender": (3, 6, 0),
|
||||
"version": (5, 8, 12),
|
||||
"blender": (4, 0, 0),
|
||||
"location": "File > Import-Export",
|
||||
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
|
||||
"warning": "",
|
||||
|
@ -250,8 +250,7 @@ class FBXElem:
|
||||
for elem in self.elems:
|
||||
offset = elem._calc_offsets(offset, (elem is elem_last))
|
||||
offset += _BLOCK_SENTINEL_LENGTH
|
||||
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||
if not is_last:
|
||||
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||
offset += _BLOCK_SENTINEL_LENGTH
|
||||
|
||||
return offset
|
||||
@ -282,8 +281,7 @@ class FBXElem:
|
||||
assert(elem.id != b'')
|
||||
elem._write(write, tell, (elem is elem_last))
|
||||
write(_BLOCK_SENTINEL_DATA)
|
||||
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||
if not is_last:
|
||||
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||
write(_BLOCK_SENTINEL_DATA)
|
||||
|
||||
|
||||
|
@ -1810,17 +1810,15 @@ def fbx_data_armature_elements(root, arm_obj, scene_data):
|
||||
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
|
||||
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
|
||||
|
||||
# Pre-process vertex weights (also to check vertices assigned to more than four bones).
|
||||
# Pre-process vertex weights so that the vertices only need to be iterated once.
|
||||
ob = ob_obj.bdata
|
||||
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
|
||||
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
|
||||
valid_idxs = set(bo_vg_idx.values())
|
||||
vgroups = {vg.index: {} for vg in ob.vertex_groups}
|
||||
verts_vgroups = (sorted(((vg.group, vg.weight) for vg in v.groups if vg.weight and vg.group in valid_idxs),
|
||||
key=lambda e: e[1], reverse=True)
|
||||
for v in me.vertices)
|
||||
for idx, vgs in enumerate(verts_vgroups):
|
||||
for vg_idx, w in vgs:
|
||||
for idx, v in enumerate(me.vertices):
|
||||
for vg in v.groups:
|
||||
if (w := vg.weight) and (vg_idx := vg.group) in valid_idxs:
|
||||
vgroups[vg_idx][idx] = w
|
||||
|
||||
for bo_obj, clstr_key in clusters.items():
|
||||
|
@ -1318,42 +1318,154 @@ class AnimationCurveNodeWrapper:
|
||||
min_reldiff_fac = fac * 1.0e-3 # min relative value evolution: 0.1% of current 'order of magnitude'.
|
||||
min_absdiff_fac = 0.1 # A tenth of reldiff...
|
||||
|
||||
are_keyed = []
|
||||
for values, frame_write_mask in zip(self._frame_values_array, self._frame_write_mask_array):
|
||||
# Initialise to no frames written.
|
||||
frame_write_mask[:] = False
|
||||
# Initialise to no values enabled for writing.
|
||||
self._frame_write_mask_array[:] = False
|
||||
|
||||
# Create views of the 'previous' and 'current' mask and values. The memoryview, .data, of each array is used
|
||||
# for its iteration and indexing performance compared to the array.
|
||||
key = values[1:].data
|
||||
p_key = values[:-1].data
|
||||
key_write = frame_write_mask[1:].data
|
||||
p_key_write = frame_write_mask[:-1].data
|
||||
# Values are enabled for writing if they differ enough from either of their adjacent values or if they differ
|
||||
# enough from the closest previous value that is enabled due to either of these conditions.
|
||||
for sampled_values, enabled_mask in zip(self._frame_values_array, self._frame_write_mask_array):
|
||||
# Create overlapping views of the 'previous' (all but the last) and 'current' (all but the first)
|
||||
# `sampled_values` and `enabled_mask`.
|
||||
# Calculate absolute values from `sampled_values` so that the 'previous' and 'current' absolute arrays can
|
||||
# be views into the same array instead of separately calculated arrays.
|
||||
abs_sampled_values = np.abs(sampled_values)
|
||||
# 'previous' views.
|
||||
p_val_view = sampled_values[:-1]
|
||||
p_abs_val_view = abs_sampled_values[:-1]
|
||||
p_enabled_mask_view = enabled_mask[:-1]
|
||||
# 'current' views.
|
||||
c_val_view = sampled_values[1:]
|
||||
c_abs_val_view = abs_sampled_values[1:]
|
||||
c_enabled_mask_view = enabled_mask[1:]
|
||||
|
||||
p_keyedval = values[0]
|
||||
is_keyed = False
|
||||
for idx, (val, p_val) in enumerate(zip(key, p_key)):
|
||||
if val == p_val:
|
||||
# Never write keyframe when value is exactly the same as prev one!
|
||||
continue
|
||||
# This is contracted form of relative + absolute-near-zero difference:
|
||||
# absdiff = abs(a - b)
|
||||
# if absdiff < min_reldiff_fac * min_absdiff_fac:
|
||||
# If enough difference from previous sampled value, enable the current value *and* the previous one!
|
||||
# The difference check is symmetrical, so this will compare each value to both of its adjacent values.
|
||||
# Unless it is forcefully enabled later, this is the only way that the first value can be enabled.
|
||||
# This is a contracted form of relative + absolute-near-zero difference:
|
||||
# def is_different(a, b):
|
||||
# abs_diff = abs(a - b)
|
||||
# if abs_diff < min_reldiff_fac * min_absdiff_fac:
|
||||
# return False
|
||||
# return (absdiff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac
|
||||
# return (abs_diff / ((abs(a) + abs(b)) / 2)) > min_reldiff_fac
|
||||
# Note that we ignore the '/ 2' part here, since it's not much significant for us.
|
||||
if abs(val - p_val) > (min_reldiff_fac * max(abs(val) + abs(p_val), min_absdiff_fac)):
|
||||
# If enough difference from previous sampled value, key this value *and* the previous one!
|
||||
key_write[idx] = True
|
||||
p_key_write[idx] = True
|
||||
p_keyedval = val
|
||||
is_keyed = True
|
||||
elif abs(val - p_keyedval) > (min_reldiff_fac * max((abs(val) + abs(p_keyedval)), min_absdiff_fac)):
|
||||
# Else, if enough difference from previous keyed value, key this value only!
|
||||
key_write[idx] = True
|
||||
p_keyedval = val
|
||||
is_keyed = True
|
||||
are_keyed.append(is_keyed)
|
||||
# Contracted form using only builtin Python functions:
|
||||
# return abs(a - b) > (min_reldiff_fac * max(abs(a) + abs(b), min_absdiff_fac))
|
||||
abs_diff = np.abs(c_val_view - p_val_view)
|
||||
different_if_greater_than = min_reldiff_fac * np.maximum(c_abs_val_view + p_abs_val_view, min_absdiff_fac)
|
||||
enough_diff_p_val_mask = abs_diff > different_if_greater_than
|
||||
# Enable both the current values *and* the previous values where `enough_diff_p_val_mask` is True. Some
|
||||
# values may get set to True twice because the views overlap, but this is not a problem.
|
||||
p_enabled_mask_view[enough_diff_p_val_mask] = True
|
||||
c_enabled_mask_view[enough_diff_p_val_mask] = True
|
||||
|
||||
# Else, if enough difference from previous enabled value, enable the current value only!
|
||||
# For each 'current' value, get the index of the nearest previous enabled value in `sampled_values` (or
|
||||
# itself if the value is enabled).
|
||||
# Start with an array that is the index of the 'current' value in `sampled_values`. The 'current' values are
|
||||
# all but the first value, so the indices will be from 1 to `len(sampled_values)` exclusive.
|
||||
# Let len(sampled_values) == 9:
|
||||
# [1, 2, 3, 4, 5, 6, 7, 8]
|
||||
p_enabled_idx_in_sampled_values = np.arange(1, len(sampled_values))
|
||||
# Replace the indices of all disabled values with 0 in preparation of filling them in with the index of the
|
||||
# nearest previous enabled value. We choose to replace with 0 so that if there is no nearest previous
|
||||
# enabled value, we instead default to `sampled_values[0]`.
|
||||
c_val_disabled_mask = ~c_enabled_mask_view
|
||||
# Let `c_val_disabled_mask` be:
|
||||
# [F, F, T, F, F, T, T, T]
|
||||
# Set indices to 0 where `c_val_disabled_mask` is True:
|
||||
# [1, 2, 3, 4, 5, 6, 7, 8]
|
||||
# v v v v
|
||||
# [1, 2, 0, 4, 5, 0, 0, 0]
|
||||
p_enabled_idx_in_sampled_values[c_val_disabled_mask] = 0
|
||||
# Accumulative maximum travels across the array from left to right, filling in the zeroed indices with the
|
||||
# maximum value so far, which will be the closest previous enabled index because the non-zero indices are
|
||||
# strictly increasing.
|
||||
# [1, 2, 0, 4, 5, 0, 0, 0]
|
||||
# v v v v
|
||||
# [1, 2, 2, 4, 5, 5, 5, 5]
|
||||
p_enabled_idx_in_sampled_values = np.maximum.accumulate(p_enabled_idx_in_sampled_values)
|
||||
# Only disabled values need to be checked against their nearest previous enabled values.
|
||||
# We can additionally ignore all values which equal their immediately previous value because those values
|
||||
# will never be enabled if they were not enabled by the earlier difference check against immediately
|
||||
# previous values.
|
||||
p_enabled_diff_to_check_mask = np.logical_and(c_val_disabled_mask, p_val_view != c_val_view)
|
||||
# Convert from a mask to indices because we need the indices later and because the array of indices will
|
||||
# usually be smaller than the mask array making it faster to index other arrays with.
|
||||
p_enabled_diff_to_check_idx = np.flatnonzero(p_enabled_diff_to_check_mask)
|
||||
# `p_enabled_idx_in_sampled_values` from earlier:
|
||||
# [1, 2, 2, 4, 5, 5, 5, 5]
|
||||
# `p_enabled_diff_to_check_mask` assuming no values equal their immediately previous value:
|
||||
# [F, F, T, F, F, T, T, T]
|
||||
# `p_enabled_diff_to_check_idx`:
|
||||
# [ 2, 5, 6, 7]
|
||||
# `p_enabled_idx_in_sampled_values_to_check`:
|
||||
# [ 2, 5, 5, 5]
|
||||
p_enabled_idx_in_sampled_values_to_check = p_enabled_idx_in_sampled_values[p_enabled_diff_to_check_idx]
|
||||
# Get the 'current' disabled values that need to be checked.
|
||||
c_val_to_check = c_val_view[p_enabled_diff_to_check_idx]
|
||||
c_abs_val_to_check = c_abs_val_view[p_enabled_diff_to_check_idx]
|
||||
# Get the nearest previous enabled value for each value to be checked.
|
||||
nearest_p_enabled_val = sampled_values[p_enabled_idx_in_sampled_values_to_check]
|
||||
abs_nearest_p_enabled_val = np.abs(nearest_p_enabled_val)
|
||||
# Check the relative + absolute-near-zero difference again, but against the nearest previous enabled value
|
||||
# this time.
|
||||
abs_diff = np.abs(c_val_to_check - nearest_p_enabled_val)
|
||||
different_if_greater_than = (min_reldiff_fac
|
||||
* np.maximum(c_abs_val_to_check + abs_nearest_p_enabled_val, min_absdiff_fac))
|
||||
enough_diff_p_enabled_val_mask = abs_diff > different_if_greater_than
|
||||
# If there are any that are different enough from the previous enabled value, then we have to check them all
|
||||
# iteratively because enabling a new value can change the nearest previous enabled value of some elements,
|
||||
# which changes their relative + absolute-near-zero difference:
|
||||
# `p_enabled_diff_to_check_idx`:
|
||||
# [2, 5, 6, 7]
|
||||
# `p_enabled_idx_in_sampled_values_to_check`:
|
||||
# [2, 5, 5, 5]
|
||||
# Let `enough_diff_p_enabled_val_mask` be:
|
||||
# [F, F, T, T]
|
||||
# The first index that is newly enabled is 6:
|
||||
# [2, 5,>6<,5]
|
||||
# But 6 > 5, so the next value's nearest previous enabled index is also affected:
|
||||
# [2, 5, 6,>6<]
|
||||
# We had calculated a newly enabled index of 7 too, but that was calculated against the old nearest previous
|
||||
# enabled index of 5, which has now been updated to 6, so whether 7 is enabled or not needs to be
|
||||
# recalculated:
|
||||
# [F, F, T, ?]
|
||||
if np.any(enough_diff_p_enabled_val_mask):
|
||||
# Accessing .data, the memoryview of the array, iteratively or by individual index is faster than doing
|
||||
# the same with the array itself.
|
||||
zipped = zip(p_enabled_diff_to_check_idx.data,
|
||||
c_val_to_check.data,
|
||||
c_abs_val_to_check.data,
|
||||
p_enabled_idx_in_sampled_values_to_check.data,
|
||||
enough_diff_p_enabled_val_mask.data)
|
||||
# While iterating, we could set updated values into `enough_diff_p_enabled_val_mask` as we go and then
|
||||
# update `enabled_mask` in bulk after the iteration, but if we're going to update an array while
|
||||
# iterating, we may as well update `enabled_mask` directly instead and skip the bulk update.
|
||||
# Additionally, the number of `True` writes to `enabled_mask` is usually much less than the number of
|
||||
# updates that would be required to `enough_diff_p_enabled_val_mask`.
|
||||
c_enabled_mask_view_mv = c_enabled_mask_view.data
|
||||
|
||||
# While iterating, keep track of the most recent newly enabled index, so we can tell when we need to
|
||||
# recalculate whether the current value needs to be enabled.
|
||||
new_p_enabled_idx = -1
|
||||
# Keep track of its value too for performance.
|
||||
new_p_enabled_val = -1
|
||||
new_abs_p_enabled_val = -1
|
||||
for cur_idx, c_val, c_abs_val, old_p_enabled_idx, enough_diff in zipped:
|
||||
if new_p_enabled_idx > old_p_enabled_idx:
|
||||
# The nearest previous enabled value is newly enabled and was not included when
|
||||
# `enough_diff_p_enabled_val_mask` was calculated, so whether the current value is different
|
||||
# enough needs to be recalculated using the newly enabled value.
|
||||
# Check if the relative + absolute-near-zero difference is enough to enable this value.
|
||||
enough_diff = (abs(c_val - new_p_enabled_val)
|
||||
> (min_reldiff_fac * max(c_abs_val + new_abs_p_enabled_val, min_absdiff_fac)))
|
||||
if enough_diff:
|
||||
# The current value needs to be enabled.
|
||||
c_enabled_mask_view_mv[cur_idx] = True
|
||||
# Update the index and values for this newly enabled value.
|
||||
new_p_enabled_idx = cur_idx
|
||||
new_p_enabled_val = c_val
|
||||
new_abs_p_enabled_val = c_abs_val
|
||||
|
||||
# If we write nothing (action doing nothing) and are in 'force_keep' mode, we key everything! :P
|
||||
# See T41766.
|
||||
@ -1362,7 +1474,9 @@ class AnimationCurveNodeWrapper:
|
||||
# one key in this case.
|
||||
# See T41719, T41605, T41254...
|
||||
if self.force_keying or (force_keep and not self):
|
||||
are_keyed[:] = [True] * len(are_keyed)
|
||||
are_keyed = [True] * len(self._frame_write_mask_array)
|
||||
else:
|
||||
are_keyed = np.any(self._frame_write_mask_array, axis=1)
|
||||
|
||||
# If we did key something, ensure first and last sampled values are keyed as well.
|
||||
if self.force_startend_keying:
|
||||
|
@ -629,7 +629,7 @@ def _transformation_curves_gen(item, values_arrays, channel_keys):
|
||||
# Create matrices/euler from the initial transformation values of this item.
|
||||
# These variables will be updated in-place as we iterate through each frame.
|
||||
lcl_translation_mat = Matrix.Translation(transform_data.loc)
|
||||
lcl_rotation_eul = Euler(transform_data.rot, transform_data.rot_ord)
|
||||
lcl_rotation_eul = Euler(convert_deg_to_rad_iter(transform_data.rot), transform_data.rot_ord)
|
||||
lcl_scaling_mat = Matrix()
|
||||
lcl_scaling_mat[0][0], lcl_scaling_mat[1][1], lcl_scaling_mat[2][2] = transform_data.sca
|
||||
|
||||
@ -2784,7 +2784,9 @@ class FbxImportHelperNode:
|
||||
for i, w in combined_weights.items():
|
||||
indices.append(i)
|
||||
if len(w) > 1:
|
||||
weights.append(sum(w) / len(w))
|
||||
# Add ignored child weights to the current bone's weight.
|
||||
# XXX - Weights that sum to more than 1.0 get clamped to 1.0 when set in the vertex group.
|
||||
weights.append(sum(w))
|
||||
else:
|
||||
weights.append(w[0])
|
||||
|
||||
@ -3468,31 +3470,56 @@ def load(operator, context, filepath="",
|
||||
def _():
|
||||
fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxShape'))
|
||||
|
||||
# - FBX | - Blender equivalent
|
||||
# Mesh | `Mesh`
|
||||
# BlendShape | `Key`
|
||||
# BlendShapeChannel | `ShapeKey`, but without its `.data`.
|
||||
# Shape | `ShapeKey.data`, but also includes normals and the values are relative to the base Mesh
|
||||
# | instead of being absolute. The data is sparse, so each Shape has an "Indexes" array too.
|
||||
# | FBX 2020 introduced 'Modern Style' Shapes that also support tangents, binormals, vertex
|
||||
# | colors and UVs, and can be absolute values instead of relative, but 'Modern Style' Shapes
|
||||
# | are not currently supported.
|
||||
#
|
||||
# The FBX connections between Shapes and Meshes form multiple many-many relationships:
|
||||
# Mesh >-< BlendShape >-< BlendShapeChannel >-< Shape
|
||||
# In practice, the relationships are almost never many-many and are more typically 1-many or 1-1:
|
||||
# Mesh --- BlendShape:
|
||||
# usually 1-1 and the FBX SDK might enforce that each BlendShape is connected to at most one Mesh.
|
||||
# BlendShape --< BlendShapeChannel:
|
||||
# usually 1-many.
|
||||
# BlendShapeChannel --- or uncommonly --< Shape:
|
||||
# usually 1-1, but 1-many is a documented feature.
|
||||
|
||||
def connections_gen(c_src_uuid, fbx_id, fbx_type):
|
||||
"""Helper to reduce duplicate code"""
|
||||
# Rarely, an imported FBX file will have duplicate connections. For Shape Key related connections, FBX
|
||||
# appears to ignore the duplicates, or overwrite the existing duplicates such that the end result is the
|
||||
# same as ignoring them, so keep a set of the seen connections and ignore any duplicates.
|
||||
seen_connections = set()
|
||||
for c_dst_uuid, ctype in fbx_connection_map.get(c_src_uuid, ()):
|
||||
if ctype.props[0] != b'OO':
|
||||
# 'Object-Object' connections only.
|
||||
continue
|
||||
fbx_data, bl_data = fbx_table_nodes.get(c_dst_uuid, (None, None))
|
||||
if fbx_data is None or fbx_data.id != fbx_id or fbx_data.props[2] != fbx_type:
|
||||
# Either `c_dst_uuid` doesn't exist, or it has a different id or type.
|
||||
continue
|
||||
connection_key = (c_src_uuid, c_dst_uuid)
|
||||
if connection_key in seen_connections:
|
||||
# The connection is a duplicate, skip it.
|
||||
continue
|
||||
seen_connections.add(connection_key)
|
||||
yield c_dst_uuid, fbx_data, bl_data
|
||||
|
||||
mesh_to_shapes = {}
|
||||
for s_uuid, s_item in fbx_table_nodes.items():
|
||||
fbx_sdata, bl_sdata = s_item = fbx_table_nodes.get(s_uuid, (None, None))
|
||||
for s_uuid, (fbx_sdata, _bl_sdata) in fbx_table_nodes.items():
|
||||
if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape':
|
||||
continue
|
||||
|
||||
# shape -> blendshapechannel -> blendshape -> mesh.
|
||||
for bc_uuid, bc_ctype in fbx_connection_map.get(s_uuid, ()):
|
||||
if bc_ctype.props[0] != b'OO':
|
||||
continue
|
||||
fbx_bcdata, _bl_bcdata = fbx_table_nodes.get(bc_uuid, (None, None))
|
||||
if fbx_bcdata is None or fbx_bcdata.id != b'Deformer' or fbx_bcdata.props[2] != b'BlendShapeChannel':
|
||||
continue
|
||||
for bs_uuid, bs_ctype in fbx_connection_map.get(bc_uuid, ()):
|
||||
if bs_ctype.props[0] != b'OO':
|
||||
continue
|
||||
fbx_bsdata, _bl_bsdata = fbx_table_nodes.get(bs_uuid, (None, None))
|
||||
if fbx_bsdata is None or fbx_bsdata.id != b'Deformer' or fbx_bsdata.props[2] != b'BlendShape':
|
||||
continue
|
||||
for m_uuid, m_ctype in fbx_connection_map.get(bs_uuid, ()):
|
||||
if m_ctype.props[0] != b'OO':
|
||||
continue
|
||||
fbx_mdata, bl_mdata = fbx_table_nodes.get(m_uuid, (None, None))
|
||||
if fbx_mdata is None or fbx_mdata.id != b'Geometry' or fbx_mdata.props[2] != b'Mesh':
|
||||
continue
|
||||
for bc_uuid, fbx_bcdata, _bl_bcdata in connections_gen(s_uuid, b'Deformer', b'BlendShapeChannel'):
|
||||
for bs_uuid, _fbx_bsdata, _bl_bsdata in connections_gen(bc_uuid, b'Deformer', b'BlendShape'):
|
||||
for m_uuid, _fbx_mdata, bl_mdata in connections_gen(bs_uuid, b'Geometry', b'Mesh'):
|
||||
# Blenmeshes are assumed already created at that time!
|
||||
assert(isinstance(bl_mdata, bpy.types.Mesh))
|
||||
# Group shapes by mesh so that each mesh only needs to be processed once for all of its shape
|
||||
|
@ -5,7 +5,7 @@
|
||||
bl_info = {
|
||||
'name': 'glTF 2.0 format',
|
||||
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
|
||||
"version": (4, 0, 33),
|
||||
"version": (4, 0, 43),
|
||||
'blender': (4, 0, 0),
|
||||
'location': 'File > Import-Export',
|
||||
'description': 'Import-Export as glTF 2.0',
|
||||
@ -144,13 +144,10 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
'Most efficient and portable, but more difficult to edit later'),
|
||||
('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)',
|
||||
'Exports multiple files, with separate JSON, binary and texture data. '
|
||||
'Easiest to edit later'),
|
||||
('GLTF_EMBEDDED', 'glTF Embedded (.gltf)',
|
||||
'Exports a single file, with all data packed in JSON. '
|
||||
'Less efficient than binary, but easier to edit later')),
|
||||
'Easiest to edit later')),
|
||||
description=(
|
||||
'Output format and embedding options. Binary is most efficient, '
|
||||
'but JSON (embedded or separate) may be easier to edit later'
|
||||
'Output format. Binary is most efficient, '
|
||||
'but JSON may be easier to edit later'
|
||||
),
|
||||
default='GLB', #Warning => If you change the default, need to change the default filter too
|
||||
update=on_export_format_changed,
|
||||
@ -174,13 +171,13 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
export_image_format: EnumProperty(
|
||||
name='Images',
|
||||
items=(('AUTO', 'Automatic',
|
||||
'Save PNGs as PNGs, JPEGs as JPEGs, WEBPs as WEBPs. '
|
||||
'If neither one, use PNG'),
|
||||
'Save PNGs as PNGs, JPEGs as JPEGs, WebPs as WebPs. '
|
||||
'For other formats, use PNG'),
|
||||
('JPEG', 'JPEG Format (.jpg)',
|
||||
'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) '
|
||||
'Be aware of a possible loss in quality'),
|
||||
('WEBP', 'Webp Format',
|
||||
'Save images as WEBPs as main image (no fallback)'),
|
||||
('WEBP', 'WebP Format',
|
||||
'Save images as WebPs as main image (no fallback)'),
|
||||
('NONE', 'None',
|
||||
'Don\'t export images'),
|
||||
),
|
||||
@ -192,18 +189,18 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
)
|
||||
|
||||
export_image_add_webp: BoolProperty(
|
||||
name='Create Webp',
|
||||
name='Create WebP',
|
||||
description=(
|
||||
"Creates webp textures for every textures. "
|
||||
"For already webp textures, nothing happen"
|
||||
"Creates WebP textures for every texture. "
|
||||
"For already WebP textures, nothing happens"
|
||||
),
|
||||
default=False
|
||||
)
|
||||
|
||||
export_image_webp_fallback: BoolProperty(
|
||||
name='Webp fallback',
|
||||
name='WebP fallback',
|
||||
description=(
|
||||
"For all webp textures, create a PNG fallback texture."
|
||||
"For all WebP textures, create a PNG fallback texture"
|
||||
),
|
||||
default=False
|
||||
)
|
||||
@ -641,7 +638,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
|
||||
export_try_sparse_sk: BoolProperty(
|
||||
name='Use Sparse Accessor if better',
|
||||
description='Try using Sparce Accessor if it save space',
|
||||
description='Try using Sparse Accessor if it saves space',
|
||||
default=True
|
||||
)
|
||||
|
||||
@ -654,8 +651,8 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
export_gpu_instances: BoolProperty(
|
||||
name='GPU Instances',
|
||||
description='Export using EXT_mesh_gpu_instancing. '
|
||||
'Limited to children of a same Empty. '
|
||||
'multiple Materials might be omitted',
|
||||
'Limited to children of a given Empty. '
|
||||
'Multiple materials might be omitted',
|
||||
default=False
|
||||
)
|
||||
|
||||
@ -1648,7 +1645,7 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
|
||||
items=(
|
||||
("BLENDER", "Blender (best for import/export round trip)",
|
||||
"Good for re-importing glTFs exported from Blender, "
|
||||
"and re-exporting glTFs to glTFs after Blender editing"
|
||||
"and re-exporting glTFs to glTFs after Blender editing. "
|
||||
"Bone tips are placed on their local +Y axis (in glTF space)"),
|
||||
("TEMPERANCE", "Temperance (average)",
|
||||
"Decent all-around strategy. "
|
||||
@ -1674,10 +1671,10 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
|
||||
)
|
||||
|
||||
import_webp_texture: BoolProperty(
|
||||
name='Import Webp textures',
|
||||
name='Import WebP textures',
|
||||
description=(
|
||||
"If a texture exists in webp format,"
|
||||
"loads the webp texture instead of the fallback png/jpg one"
|
||||
"If a texture exists in WebP format, "
|
||||
"loads the WebP texture instead of the fallback PNG/JPEG one"
|
||||
),
|
||||
default=False,
|
||||
)
|
||||
|
@ -13,8 +13,8 @@ from ...com.gltf2_blender_extras import generate_extras
|
||||
from ..gltf2_blender_gather_cache import cached
|
||||
from ..gltf2_blender_gather_tree import VExportNode
|
||||
from .fcurves.gltf2_blender_gather_fcurves_animation import gather_animation_fcurves
|
||||
from .sampled.armature.gltf2_blender_gather_armature_action_sampled import gather_action_armature_sampled
|
||||
from .sampled.armature.gltf2_blender_gather_armature_channels import gather_sampled_bone_channel
|
||||
from .sampled.armature.armature_action_sampled import gather_action_armature_sampled
|
||||
from .sampled.armature.armature_channels import gather_sampled_bone_channel
|
||||
from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled
|
||||
from .sampled.shapekeys.gltf2_blender_gather_sk_action_sampled import gather_action_sk_sampled
|
||||
from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels, gather_sampled_object_channel
|
||||
|
@ -10,7 +10,7 @@ from ....io.com import gltf2_io
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ....io.com.gltf2_io_debug import print_console
|
||||
from ..gltf2_blender_gather_tree import VExportNode
|
||||
from .sampled.armature.gltf2_blender_gather_armature_action_sampled import gather_action_armature_sampled
|
||||
from .sampled.armature.armature_action_sampled import gather_action_armature_sampled
|
||||
from .sampled.object.gltf2_blender_gather_object_action_sampled import gather_action_object_sampled
|
||||
from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel
|
||||
from .gltf2_blender_gather_drivers import get_sk_drivers
|
||||
|
@ -7,7 +7,7 @@ from ....io.com import gltf2_io
|
||||
from ...com.gltf2_blender_extras import generate_extras
|
||||
from ..gltf2_blender_gather_tree import VExportNode
|
||||
from .gltf2_blender_gather_drivers import get_sk_drivers
|
||||
from .sampled.armature.gltf2_blender_gather_armature_channels import gather_armature_sampled_channels
|
||||
from .sampled.armature.armature_channels import gather_armature_sampled_channels
|
||||
from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels
|
||||
from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sk_sampled_channels
|
||||
from .gltf2_blender_gather_animation_utils import link_samplers, add_slide_data
|
||||
|
@ -8,7 +8,7 @@ from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......io.com.gltf2_io_debug import print_console
|
||||
from ......io.com import gltf2_io
|
||||
from .....com.gltf2_blender_extras import generate_extras
|
||||
from .gltf2_blender_gather_armature_channels import gather_armature_sampled_channels
|
||||
from .armature_channels import gather_armature_sampled_channels
|
||||
|
||||
|
||||
|
@ -6,15 +6,15 @@ import bpy
|
||||
import typing
|
||||
from ......io.com import gltf2_io
|
||||
from ......io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ......blender.com.gltf2_blender_conversion import get_gltf_interpolation
|
||||
from .....com.gltf2_blender_conversion import get_gltf_interpolation
|
||||
from .....com.gltf2_blender_conversion import get_target, get_channel_from_target
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_channels import get_channel_groups
|
||||
from ...fcurves.gltf2_blender_gather_fcurves_channels import needs_baking
|
||||
from ...gltf2_blender_gather_drivers import get_sk_drivers
|
||||
from ..object.gltf2_blender_gather_object_channels import gather_sampled_object_channel
|
||||
from ..shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel
|
||||
from .gltf2_blender_gather_armature_channel_target import gather_armature_sampled_channel_target
|
||||
from .gltf2_blender_gather_armature_sampler import gather_bone_sampled_animation_sampler
|
||||
from .armature_channel_target import gather_armature_sampled_channel_target
|
||||
from .armature_sampler import gather_bone_sampled_animation_sampler
|
||||
|
||||
def gather_armature_sampled_channels(armature_uuid, blender_action_name, export_settings) -> typing.List[gltf2_io.AnimationChannel]:
|
||||
channels = []
|
@ -13,7 +13,7 @@ from .....com import gltf2_blender_math
|
||||
from ....gltf2_blender_gather_accessors import gather_accessor
|
||||
from ....gltf2_blender_gather_cache import cached
|
||||
from ....gltf2_blender_gather_tree import VExportNode
|
||||
from .gltf2_blender_gather_armature_keyframes import gather_bone_sampled_keyframes
|
||||
from .armature_keyframes import gather_bone_sampled_keyframes
|
||||
|
||||
@cached
|
||||
def gather_bone_sampled_animation_sampler(
|
@ -85,9 +85,6 @@ def __create_buffer(exporter, export_settings):
|
||||
buffer = bytes()
|
||||
if export_settings['gltf_format'] == 'GLB':
|
||||
buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True)
|
||||
else:
|
||||
if export_settings['gltf_format'] == 'GLTF_EMBEDDED':
|
||||
exporter.finalize_buffer(export_settings['gltf_filedirectory'])
|
||||
else:
|
||||
exporter.finalize_buffer(export_settings['gltf_filedirectory'],
|
||||
export_settings['gltf_binaryfilename'])
|
||||
@ -119,7 +116,7 @@ def __fix_json(obj):
|
||||
|
||||
|
||||
def __should_include_json_value(key, value):
|
||||
allowed_empty_collections = ["KHR_materials_unlit", "KHR_materials_specular"]
|
||||
allowed_empty_collections = ["KHR_materials_unlit"]
|
||||
|
||||
if value is None:
|
||||
return False
|
||||
|
@ -154,6 +154,7 @@ def get_texture_transform_from_mapping_node(mapping_node):
|
||||
return None
|
||||
|
||||
mapping_transform = {}
|
||||
if mapping_node.vector_type != "VECTOR":
|
||||
mapping_transform["offset"] = [mapping_node.inputs['Location'].default_value[0], mapping_node.inputs['Location'].default_value[1]]
|
||||
mapping_transform["rotation"] = mapping_node.inputs['Rotation'].default_value[2]
|
||||
mapping_transform["scale"] = [mapping_node.inputs['Scale'].default_value[0], mapping_node.inputs['Scale'].default_value[1]]
|
||||
|
@ -117,7 +117,7 @@ class GlTF2Exporter:
|
||||
f.write(self.__buffer.to_bytes())
|
||||
uri = buffer_name
|
||||
else:
|
||||
uri = self.__buffer.to_embed_string()
|
||||
pass # This is no more possible, we don't export embedded buffers
|
||||
|
||||
buffer = gltf2_io.Buffer(
|
||||
byte_length=self.__buffer.byte_length,
|
||||
@ -320,6 +320,20 @@ class GlTF2Exporter:
|
||||
len_ = len([i for i in self.nodes_idx_to_remove if i < skin.skeleton])
|
||||
skin.skeleton = skin.skeleton - len_
|
||||
|
||||
# Remove animation channels that was targeting a node that will be removed
|
||||
new_animation_list = []
|
||||
for animation in self.__gltf.animations:
|
||||
new_channel_list = []
|
||||
for channel in animation.channels:
|
||||
if channel.target.node not in self.nodes_idx_to_remove:
|
||||
new_channel_list.append(channel)
|
||||
animation.channels = new_channel_list
|
||||
if len(animation.channels) > 0:
|
||||
new_animation_list.append(animation)
|
||||
self.__gltf.animations = new_animation_list
|
||||
|
||||
#TODO: remove unused animation accessors?
|
||||
|
||||
# And now really remove nodes
|
||||
self.__gltf.nodes = [node for idx, node in enumerate(self.__gltf.nodes) if idx not in self.nodes_idx_to_remove]
|
||||
|
||||
|
@ -9,6 +9,7 @@ from ...material.gltf2_blender_gather_texture_info import gather_texture_info
|
||||
|
||||
def export_specular(blender_material, export_settings):
|
||||
specular_extension = {}
|
||||
extensions_needed = False
|
||||
|
||||
specular_socket = gltf2_blender_get.get_socket(blender_material, 'Specular IOR Level')
|
||||
speculartint_socket = gltf2_blender_get.get_socket(blender_material, 'Specular Tint')
|
||||
@ -23,18 +24,27 @@ def export_specular(blender_material, export_settings):
|
||||
|
||||
if specular_non_linked is True:
|
||||
fac = specular_socket.default_value
|
||||
if fac != 1.0:
|
||||
fac = fac * 2.0
|
||||
if fac < 1.0:
|
||||
specular_extension['specularFactor'] = fac
|
||||
if fac == 0.0:
|
||||
return None, {}
|
||||
extensions_needed = True
|
||||
elif fac > 1.0:
|
||||
# glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
|
||||
extensions_needed = True
|
||||
else:
|
||||
pass # If fac == 1.0, no need to export specularFactor, the default value is 1.0
|
||||
|
||||
else:
|
||||
# Factor
|
||||
fac = gltf2_blender_get.get_factor_from_socket(specular_socket, kind='VALUE')
|
||||
if fac is not None and fac != 1.0:
|
||||
fac = fac * 2.0 if fac is not None else None
|
||||
if fac is not None and fac < 1.0:
|
||||
specular_extension['specularFactor'] = fac
|
||||
|
||||
if fac == 0.0:
|
||||
return None, {}
|
||||
extensions_needed = True
|
||||
elif fac is not None and fac > 1.0:
|
||||
# glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
|
||||
extensions_needed = True
|
||||
|
||||
# Texture
|
||||
if gltf2_blender_get.has_image_node_from_socket(specular_socket):
|
||||
@ -46,16 +56,26 @@ def export_specular(blender_material, export_settings):
|
||||
)
|
||||
specular_extension['specularTexture'] = original_specular_texture
|
||||
uvmap_infos.update({'specularTexture': uvmap_info})
|
||||
extensions_needed = True
|
||||
|
||||
if specularcolor_non_linked is True:
|
||||
color = speculartint_socket.default_value[:3]
|
||||
if fac is not None and fac > 1.0:
|
||||
color = (color[0] * fac, color[1] * fac, color[2] * fac)
|
||||
specular_extension['specularColorFactor'] = color if color != (1.0, 1.0, 1.0) else None
|
||||
if color != (1.0, 1.0, 1.0):
|
||||
specular_extension['specularColorFactor'] = color
|
||||
extensions_needed = True
|
||||
|
||||
else:
|
||||
# Factor
|
||||
fac = gltf2_blender_get.get_factor_from_socket(speculartint_socket, kind='RGB')
|
||||
if fac is not None and fac != (1.0, 1.0, 1.0):
|
||||
specular_extension['specularColorFactor'] = fac
|
||||
fac_color = gltf2_blender_get.get_factor_from_socket(speculartint_socket, kind='RGB')
|
||||
if fac_color is not None and fac is not None and fac > 1.0:
|
||||
fac_color = (fac_color[0] * fac, fac_color[1] * fac, fac_color[2] * fac)
|
||||
elif fac_color is None and fac is not None and fac > 1.0:
|
||||
fac_color = (fac, fac, fac)
|
||||
specular_extension['specularColorFactor'] = fac_color if fac_color != (1.0, 1.0, 1.0) else None
|
||||
if fac_color != (1.0, 1.0, 1.0):
|
||||
extensions_needed = True
|
||||
|
||||
# Texture
|
||||
if gltf2_blender_get.has_image_node_from_socket(speculartint_socket):
|
||||
@ -67,5 +87,9 @@ def export_specular(blender_material, export_settings):
|
||||
)
|
||||
specular_extension['specularColorTexture'] = original_specularcolor_texture
|
||||
uvmap_infos.update({'specularColorTexture': uvmap_info})
|
||||
extensions_needed = True
|
||||
|
||||
if extensions_needed is False:
|
||||
return None, {}
|
||||
|
||||
return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos
|
||||
|
@ -59,7 +59,7 @@ def gather_image(
|
||||
|
||||
export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets)
|
||||
|
||||
# We also return image_data, as it can be used to generate same file with another extension for webp management
|
||||
# We also return image_data, as it can be used to generate same file with another extension for WebP management
|
||||
return image, image_data, factor
|
||||
|
||||
def __gather_original_uri(original_uri, export_settings):
|
||||
@ -118,7 +118,7 @@ def __gather_mime_type(sockets, export_image, export_settings):
|
||||
if export_settings["gltf_image_format"] == "WEBP":
|
||||
return "image/webp"
|
||||
else:
|
||||
# If we keep image as is (no channel composition), we need to keep original format (for webp)
|
||||
# If we keep image as is (no channel composition), we need to keep original format (for WebP)
|
||||
image = export_image.blender_image()
|
||||
if image is not None and __is_blender_image_a_webp(image):
|
||||
return "image/webp"
|
||||
|
@ -168,14 +168,14 @@ def __gather_extensions(blender_material, emissive_factor, export_settings):
|
||||
clearcoat_extension, uvmap_info = export_clearcoat(blender_material, export_settings)
|
||||
if clearcoat_extension:
|
||||
extensions["KHR_materials_clearcoat"] = clearcoat_extension
|
||||
uvmap_infos.update(uvmap_infos)
|
||||
uvmap_infos.update(uvmap_info)
|
||||
|
||||
# KHR_materials_transmission
|
||||
|
||||
transmission_extension, uvmap_info = export_transmission(blender_material, export_settings)
|
||||
if transmission_extension:
|
||||
extensions["KHR_materials_transmission"] = transmission_extension
|
||||
uvmap_infos.update(uvmap_infos)
|
||||
uvmap_infos.update(uvmap_info)
|
||||
|
||||
# KHR_materials_emissive_strength
|
||||
if any([i>1.0 for i in emissive_factor or []]):
|
||||
|
@ -70,7 +70,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
|
||||
|
||||
ext_webp = {}
|
||||
|
||||
# If user want to keep original textures, and these textures are webp, we need to remove source from
|
||||
# If user want to keep original textures, and these textures are WebP, we need to remove source from
|
||||
# gltf2_io.Texture, and populate extension
|
||||
if export_settings['gltf_keep_original_textures'] is True \
|
||||
and source is not None \
|
||||
@ -79,19 +79,19 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
|
||||
remove_source = True
|
||||
required = True
|
||||
|
||||
# If user want to export in webp format (so without fallback in png/jpg)
|
||||
# If user want to export in WebP format (so without fallback in png/jpg)
|
||||
if export_settings['gltf_image_format'] == "WEBP":
|
||||
# We create all image without fallback
|
||||
ext_webp["source"] = source
|
||||
remove_source = True
|
||||
required = True
|
||||
|
||||
# If user doesn't want to export in webp format, but want webp too. Texture is not webp
|
||||
# If user doesn't want to export in WebP format, but want WebP too. Texture is not WebP
|
||||
if export_settings['gltf_image_format'] != "WEBP" \
|
||||
and export_settings['gltf_add_webp'] \
|
||||
and source is not None \
|
||||
and source.mime_type != "image/webp":
|
||||
# We need here to create some webp textures
|
||||
# We need here to create some WebP textures
|
||||
|
||||
new_mime_type = "image/webp"
|
||||
new_data, _ = image_data.encode(new_mime_type, export_settings)
|
||||
@ -116,7 +116,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
|
||||
ext_webp["source"] = webp_image
|
||||
|
||||
|
||||
# If user doesn't want to export in webp format, but want webp too. Texture is webp
|
||||
# If user doesn't want to export in WebP format, but want WebP too. Texture is WebP
|
||||
if export_settings['gltf_image_format'] != "WEBP" \
|
||||
and source is not None \
|
||||
and source.mime_type == "image/webp":
|
||||
@ -127,7 +127,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
|
||||
remove_source = True
|
||||
required = True
|
||||
|
||||
# If user doesn't want to export in webp format, but want webp too as fallback. Texture is webp
|
||||
# If user doesn't want to export in webp format, but want WebP too as fallback. Texture is WebP
|
||||
if export_settings['gltf_image_format'] != "WEBP" \
|
||||
and webp_image is not None \
|
||||
and export_settings['gltf_webp_fallback'] is True:
|
||||
@ -209,7 +209,7 @@ def __gather_source(blender_shader_sockets, default_sockets, export_settings):
|
||||
|
||||
png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings)
|
||||
|
||||
# We inverted the png & webp image, to have the png as main source
|
||||
# We inverted the png & WebP image, to have the png as main source
|
||||
return png_image, source, image_data, factor
|
||||
return source, None, image_data, factor
|
||||
|
||||
|
@ -39,7 +39,7 @@ def specular(mh, location_specular,
|
||||
x_specularcolor, y_specularcolor = location_specular_tint
|
||||
|
||||
if tex_specular_info is None:
|
||||
specular_socket.default_value = specular_factor
|
||||
specular_socket.default_value = specular_factor / 2.0
|
||||
else:
|
||||
# Mix specular factor
|
||||
if specular_factor != 1.0:
|
||||
@ -51,7 +51,7 @@ def specular(mh, location_specular,
|
||||
mh.node_tree.links.new(specular_socket, node.outputs[0])
|
||||
# Inputs
|
||||
specular_socket = node.inputs[0]
|
||||
node.inputs[1].default_value = specular_factor
|
||||
node.inputs[1].default_value = specular_factor / 2.0
|
||||
x_specular -= 200
|
||||
|
||||
texture(
|
||||
|
@ -135,6 +135,7 @@ class BlenderNode():
|
||||
bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION)
|
||||
bpy.data.scenes[gltf.blender_scene].collection.children.link(bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION])
|
||||
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True
|
||||
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_render = True
|
||||
|
||||
# Create an icosphere, and assign it to the collection
|
||||
bpy.ops.mesh.primitive_ico_sphere_add(radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
|
||||
|
@ -50,9 +50,6 @@ def pbr_metallic_roughness(mh: MaterialHelper):
|
||||
# This value may be overridden later if IOR extension is set on file
|
||||
pbr_node.inputs['IOR'].default_value = GLTF_IOR
|
||||
|
||||
pbr_node.inputs['Specular IOR Level'].default_value = 0.0 # Will be overridden by KHR_materials_specular if set
|
||||
pbr_node.inputs['Specular Tint'].default_value = [0.0]*3 + [1.0] # Will be overridden by KHR_materials_specular if set
|
||||
|
||||
if mh.pymat.occlusion_texture is not None:
|
||||
if mh.settings_node is None:
|
||||
mh.settings_node = make_settings_node(mh)
|
||||
|
@ -41,7 +41,7 @@ def texture(
|
||||
if forced_image is None:
|
||||
|
||||
if mh.gltf.import_settings['import_webp_texture'] is True:
|
||||
# Get the webp image if there is one
|
||||
# Get the WebP image if there is one
|
||||
if pytexture.extensions \
|
||||
and 'EXT_texture_webp' in pytexture.extensions \
|
||||
and pytexture.extensions['EXT_texture_webp']['source'] is not None:
|
||||
|
@ -47,8 +47,5 @@ class Buffer:
|
||||
def to_bytes(self):
|
||||
return self.__data
|
||||
|
||||
def to_embed_string(self):
|
||||
return 'data:application/octet-stream;base64,' + base64.b64encode(self.__data).decode('ascii')
|
||||
|
||||
def clear(self):
|
||||
self.__data = b""
|
||||
|
@ -20,10 +20,6 @@ def drawlayout(context, layout, mode='non-panel'):
|
||||
col.menu(NWMergeNodesMenu.bl_idname)
|
||||
col.separator()
|
||||
|
||||
col = layout.column(align=True)
|
||||
col.menu(NWSwitchNodeTypeMenu.bl_idname, text="Switch Node Type")
|
||||
col.separator()
|
||||
|
||||
if tree_type == 'ShaderNodeTree':
|
||||
col = layout.column(align=True)
|
||||
col.operator(operators.NWAddTextureSetup.bl_idname, text="Add Texture Setup", icon='NODE_SEL')
|
||||
@ -385,32 +381,8 @@ class NWSwitchNodeTypeMenu(Menu, NWBase):
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
categories = [c for c in node_categories_iter(context)
|
||||
if c.name not in ['Group', 'Script']]
|
||||
for cat in categories:
|
||||
idname = f"NODE_MT_nw_switch_{cat.identifier}_submenu"
|
||||
if hasattr(bpy.types, idname):
|
||||
layout.menu(idname)
|
||||
else:
|
||||
layout.label(text="Unable to load altered node lists.")
|
||||
layout.label(text="Please re-enable Node Wrangler.")
|
||||
break
|
||||
|
||||
|
||||
def draw_switch_category_submenu(self, context):
|
||||
layout = self.layout
|
||||
if self.category.name == 'Layout':
|
||||
for node in self.category.items(context):
|
||||
if node.nodetype != 'NodeFrame':
|
||||
props = layout.operator(operators.NWSwitchNodeType.bl_idname, text=node.label)
|
||||
props.to_type = node.nodetype
|
||||
else:
|
||||
for node in self.category.items(context):
|
||||
if isinstance(node, NodeItemCustom):
|
||||
node.draw(self, layout, context)
|
||||
continue
|
||||
props = layout.operator(operators.NWSwitchNodeType.bl_idname, text=node.label)
|
||||
props.to_type = node.nodetype
|
||||
layout.label(text="This operator is removed due to the changes of node menus.", icon='ERROR')
|
||||
layout.label(text="A native implementation of the function is expected in the future.")
|
||||
|
||||
#
|
||||
# APPENDAGES TO EXISTING UI
|
||||
|
@ -914,195 +914,6 @@ class NWReloadImages(Operator):
|
||||
return {'CANCELLED'}
|
||||
|
||||
|
||||
class NWSwitchNodeType(Operator, NWBase):
|
||||
"""Switch type of selected nodes """
|
||||
bl_idname = "node.nw_swtch_node_type"
|
||||
bl_label = "Switch Node Type"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
to_type: StringProperty(
|
||||
name="Switch to type",
|
||||
default='',
|
||||
)
|
||||
|
||||
def execute(self, context):
|
||||
to_type = self.to_type
|
||||
if len(to_type) == 0:
|
||||
return {'CANCELLED'}
|
||||
|
||||
nodes, links = get_nodes_links(context)
|
||||
# Those types of nodes will not swap.
|
||||
src_excludes = ('NodeFrame')
|
||||
# Those attributes of nodes will be copied if possible
|
||||
attrs_to_pass = ('color', 'hide', 'label', 'mute', 'parent',
|
||||
'show_options', 'show_preview', 'show_texture',
|
||||
'use_alpha', 'use_clamp', 'use_custom_color', 'location'
|
||||
)
|
||||
selected = [n for n in nodes if n.select]
|
||||
reselect = []
|
||||
for node in [n for n in selected if
|
||||
n.rna_type.identifier not in src_excludes and
|
||||
n.rna_type.identifier != to_type]:
|
||||
new_node = nodes.new(to_type)
|
||||
for attr in attrs_to_pass:
|
||||
if hasattr(node, attr) and hasattr(new_node, attr):
|
||||
setattr(new_node, attr, getattr(node, attr))
|
||||
# set image datablock of dst to image of src
|
||||
if hasattr(node, 'image') and hasattr(new_node, 'image'):
|
||||
if node.image:
|
||||
new_node.image = node.image
|
||||
# Special cases
|
||||
if new_node.type == 'SWITCH':
|
||||
new_node.hide = True
|
||||
# Dictionaries: src_sockets and dst_sockets:
|
||||
# 'INPUTS': input sockets ordered by type (entry 'MAIN' main type of inputs).
|
||||
# 'OUTPUTS': output sockets ordered by type (entry 'MAIN' main type of outputs).
|
||||
# in 'INPUTS' and 'OUTPUTS':
|
||||
# 'SHADER', 'RGBA', 'VECTOR', 'VALUE' - sockets of those types.
|
||||
# socket entry:
|
||||
# (index_in_type, socket_index, socket_name, socket_default_value, socket_links)
|
||||
src_sockets = {
|
||||
'INPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE': [], 'MAIN': None},
|
||||
'OUTPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE': [], 'MAIN': None},
|
||||
}
|
||||
dst_sockets = {
|
||||
'INPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE': [], 'MAIN': None},
|
||||
'OUTPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE': [], 'MAIN': None},
|
||||
}
|
||||
types_order_one = 'SHADER', 'RGBA', 'VECTOR', 'VALUE'
|
||||
types_order_two = 'SHADER', 'VECTOR', 'RGBA', 'VALUE'
|
||||
# check src node to set src_sockets values and dst node to set dst_sockets dict values
|
||||
for sockets, nd in ((src_sockets, node), (dst_sockets, new_node)):
|
||||
# Check node's inputs and outputs and fill proper entries in "sockets" dict
|
||||
for in_out, in_out_name in ((nd.inputs, 'INPUTS'), (nd.outputs, 'OUTPUTS')):
|
||||
# enumerate in inputs, then in outputs
|
||||
# find name, default value and links of socket
|
||||
for i, socket in enumerate(in_out):
|
||||
the_name = socket.name
|
||||
dval = None
|
||||
# Not every socket, especially in outputs has "default_value"
|
||||
if hasattr(socket, 'default_value'):
|
||||
dval = socket.default_value
|
||||
socket_links = []
|
||||
for lnk in socket.links:
|
||||
socket_links.append(lnk)
|
||||
# check type of socket to fill proper keys.
|
||||
for the_type in types_order_one:
|
||||
if socket.type == the_type:
|
||||
# create values for sockets['INPUTS'][the_type] and sockets['OUTPUTS'][the_type]
|
||||
# entry structure: (index_in_type, socket_index, socket_name,
|
||||
# socket_default_value, socket_links)
|
||||
sockets[in_out_name][the_type].append(
|
||||
(len(sockets[in_out_name][the_type]), i, the_name, dval, socket_links))
|
||||
# Check which of the types in inputs/outputs is considered to be "main".
|
||||
# Set values of sockets['INPUTS']['MAIN'] and sockets['OUTPUTS']['MAIN']
|
||||
for type_check in types_order_one:
|
||||
if sockets[in_out_name][type_check]:
|
||||
sockets[in_out_name]['MAIN'] = type_check
|
||||
break
|
||||
|
||||
matches = {
|
||||
'INPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE_NAME': [], 'VALUE': [], 'MAIN': []},
|
||||
'OUTPUTS': {'SHADER': [], 'RGBA': [], 'VECTOR': [], 'VALUE_NAME': [], 'VALUE': [], 'MAIN': []},
|
||||
}
|
||||
|
||||
for inout, soctype in (
|
||||
('INPUTS', 'MAIN',),
|
||||
('INPUTS', 'SHADER',),
|
||||
('INPUTS', 'RGBA',),
|
||||
('INPUTS', 'VECTOR',),
|
||||
('INPUTS', 'VALUE',),
|
||||
('OUTPUTS', 'MAIN',),
|
||||
('OUTPUTS', 'SHADER',),
|
||||
('OUTPUTS', 'RGBA',),
|
||||
('OUTPUTS', 'VECTOR',),
|
||||
('OUTPUTS', 'VALUE',),
|
||||
):
|
||||
if src_sockets[inout][soctype] and dst_sockets[inout][soctype]:
|
||||
if soctype == 'MAIN':
|
||||
sc = src_sockets[inout][src_sockets[inout]['MAIN']]
|
||||
dt = dst_sockets[inout][dst_sockets[inout]['MAIN']]
|
||||
else:
|
||||
sc = src_sockets[inout][soctype]
|
||||
dt = dst_sockets[inout][soctype]
|
||||
# start with 'dt' to determine number of possibilities.
|
||||
for i, soc in enumerate(dt):
|
||||
# if src main has enough entries - match them with dst main sockets by indexes.
|
||||
if len(sc) > i:
|
||||
matches[inout][soctype].append(((sc[i][1], sc[i][3]), (soc[1], soc[3])))
|
||||
# add 'VALUE_NAME' criterion to inputs.
|
||||
if inout == 'INPUTS' and soctype == 'VALUE':
|
||||
for s in sc:
|
||||
if s[2] == soc[2]: # if names match
|
||||
# append src (index, dval), dst (index, dval)
|
||||
matches['INPUTS']['VALUE_NAME'].append(((s[1], s[3]), (soc[1], soc[3])))
|
||||
|
||||
# When src ['INPUTS']['MAIN'] is 'VECTOR' replace 'MAIN' with matches VECTOR if possible.
|
||||
# This creates better links when relinking textures.
|
||||
if src_sockets['INPUTS']['MAIN'] == 'VECTOR' and matches['INPUTS']['VECTOR']:
|
||||
matches['INPUTS']['MAIN'] = matches['INPUTS']['VECTOR']
|
||||
|
||||
# Pass default values and RELINK:
|
||||
for tp in ('MAIN', 'SHADER', 'RGBA', 'VECTOR', 'VALUE_NAME', 'VALUE'):
|
||||
# INPUTS: Base on matches in proper order.
|
||||
for (src_i, src_dval), (dst_i, dst_dval) in matches['INPUTS'][tp]:
|
||||
# pass dvals
|
||||
if src_dval and dst_dval and tp in {'RGBA', 'VALUE_NAME'}:
|
||||
new_node.inputs[dst_i].default_value = src_dval
|
||||
# Special case: switch to math
|
||||
if node.type in {'MIX_RGB', 'ALPHAOVER', 'ZCOMBINE'} and\
|
||||
new_node.type == 'MATH' and\
|
||||
tp == 'MAIN':
|
||||
new_dst_dval = max(src_dval[0], src_dval[1], src_dval[2])
|
||||
new_node.inputs[dst_i].default_value = new_dst_dval
|
||||
if node.type == 'MIX_RGB':
|
||||
if node.blend_type in [o[0] for o in operations]:
|
||||
new_node.operation = node.blend_type
|
||||
# Special case: switch from math to some types
|
||||
if node.type == 'MATH' and\
|
||||
new_node.type in {'MIX_RGB', 'ALPHAOVER', 'ZCOMBINE'} and\
|
||||
tp == 'MAIN':
|
||||
for i in range(3):
|
||||
new_node.inputs[dst_i].default_value[i] = src_dval
|
||||
if new_node.type == 'MIX_RGB':
|
||||
if node.operation in [t[0] for t in blend_types]:
|
||||
new_node.blend_type = node.operation
|
||||
# Set Fac of MIX_RGB to 1.0
|
||||
new_node.inputs[0].default_value = 1.0
|
||||
# make link only when dst matching input is not linked already.
|
||||
if node.inputs[src_i].links and not new_node.inputs[dst_i].links:
|
||||
in_src_link = node.inputs[src_i].links[0]
|
||||
in_dst_socket = new_node.inputs[dst_i]
|
||||
connect_sockets(in_src_link.from_socket, in_dst_socket)
|
||||
links.remove(in_src_link)
|
||||
# OUTPUTS: Base on matches in proper order.
|
||||
for (src_i, src_dval), (dst_i, dst_dval) in matches['OUTPUTS'][tp]:
|
||||
for out_src_link in node.outputs[src_i].links:
|
||||
out_dst_socket = new_node.outputs[dst_i]
|
||||
connect_sockets(out_dst_socket, out_src_link.to_socket)
|
||||
# relink rest inputs if possible, no criteria
|
||||
for src_inp in node.inputs:
|
||||
for dst_inp in new_node.inputs:
|
||||
if src_inp.links and not dst_inp.links:
|
||||
src_link = src_inp.links[0]
|
||||
connect_sockets(src_link.from_socket, dst_inp)
|
||||
links.remove(src_link)
|
||||
# relink rest outputs if possible, base on node kind if any left.
|
||||
for src_o in node.outputs:
|
||||
for out_src_link in src_o.links:
|
||||
for dst_o in new_node.outputs:
|
||||
if src_o.type == dst_o.type:
|
||||
connect_sockets(dst_o, out_src_link.to_socket)
|
||||
# relink rest outputs no criteria if any left. Link all from first output.
|
||||
for src_o in node.outputs:
|
||||
for out_src_link in src_o.links:
|
||||
if new_node.outputs:
|
||||
connect_sockets(new_node.outputs[0], out_src_link.to_socket)
|
||||
nodes.remove(node)
|
||||
force_update(context)
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class NWMergeNodes(Operator, NWBase):
|
||||
bl_idname = "node.nw_merge_nodes"
|
||||
bl_label = "Merge Nodes"
|
||||
@ -2976,7 +2787,6 @@ classes = (
|
||||
NWPreviewNode,
|
||||
NWFrameSelected,
|
||||
NWReloadImages,
|
||||
NWSwitchNodeType,
|
||||
NWMergeNodes,
|
||||
NWBatchChangeNodes,
|
||||
NWChangeMixFactor,
|
||||
|
@ -162,7 +162,6 @@ class NWNodeWrangler(bpy.types.AddonPreferences):
|
||||
#
|
||||
# REGISTER/UNREGISTER CLASSES AND KEYMAP ITEMS
|
||||
#
|
||||
switch_category_menus = []
|
||||
addon_keymaps = []
|
||||
# kmi_defs entry: (identifier, key, action, CTRL, SHIFT, ALT, props, nice name)
|
||||
# props entry: (property name, property value)
|
||||
@ -392,28 +391,8 @@ def register():
|
||||
setattr(kmi.properties, prop, value)
|
||||
addon_keymaps.append((km, kmi))
|
||||
|
||||
# switch submenus
|
||||
switch_category_menus.clear()
|
||||
for cat in node_categories_iter(None):
|
||||
if cat.name not in ['Group', 'Script']:
|
||||
idname = f"NODE_MT_nw_switch_{cat.identifier}_submenu"
|
||||
switch_category_type = type(idname, (bpy.types.Menu,), {
|
||||
"bl_space_type": 'NODE_EDITOR',
|
||||
"bl_label": cat.name,
|
||||
"category": cat,
|
||||
"poll": cat.poll,
|
||||
"draw": interface.draw_switch_category_submenu,
|
||||
})
|
||||
|
||||
switch_category_menus.append(switch_category_type)
|
||||
|
||||
bpy.utils.register_class(switch_category_type)
|
||||
|
||||
|
||||
def unregister():
|
||||
for cat_types in switch_category_menus:
|
||||
bpy.utils.unregister_class(cat_types)
|
||||
switch_category_menus.clear()
|
||||
|
||||
# keymaps
|
||||
for km, kmi in addon_keymaps:
|
||||
|
@ -6,8 +6,8 @@ bl_info = {
|
||||
"name": "Collection Manager",
|
||||
"description": "Manage collections and their objects",
|
||||
"author": "Ryan Inch",
|
||||
"version": (2, 24, 8),
|
||||
"blender": (3, 0, 0),
|
||||
"version": (2, 24, 9),
|
||||
"blender": (4, 0, 0),
|
||||
"location": "View3D - Object Mode (Shortcut - M)",
|
||||
"warning": '', # used for warning icon and text in addons panel
|
||||
"doc_url": "{BLENDER_MANUAL_URL}/addons/interface/collection_manager.html",
|
||||
|
@ -50,7 +50,7 @@ def get_tool_text(self):
|
||||
return self["tool_text_color"]
|
||||
else:
|
||||
color = bpy.context.preferences.themes[0].user_interface.wcol_tool.text
|
||||
self["tool_text_color"] = color.r, color.g, color.b
|
||||
self["tool_text_color"] = color[0], color[1], color[2]
|
||||
return self["tool_text_color"]
|
||||
|
||||
def set_tool_text(self, values):
|
||||
@ -62,7 +62,7 @@ def get_tool_text_sel(self):
|
||||
return self["tool_text_sel_color"]
|
||||
else:
|
||||
color = bpy.context.preferences.themes[0].user_interface.wcol_tool.text_sel
|
||||
self["tool_text_sel_color"] = color.r, color.g, color.b
|
||||
self["tool_text_sel_color"] = color[0], color[1], color[2]
|
||||
return self["tool_text_sel_color"]
|
||||
|
||||
def set_tool_text_sel(self, values):
|
||||
@ -98,11 +98,11 @@ def get_tool_outline(self):
|
||||
return self["tool_outline_color"]
|
||||
else:
|
||||
color = bpy.context.preferences.themes[0].user_interface.wcol_tool.outline
|
||||
self["tool_outline_color"] = color.r, color.g, color.b
|
||||
self["tool_outline_color"] = color[0], color[1], color[2], color[3]
|
||||
return self["tool_outline_color"]
|
||||
|
||||
def set_tool_outline(self, values):
|
||||
self["tool_outline_color"] = values[0], values[1], values[2]
|
||||
self["tool_outline_color"] = values[0], values[1], values[2], values[3]
|
||||
|
||||
|
||||
def get_menu_back_text(self):
|
||||
@ -110,7 +110,7 @@ def get_menu_back_text(self):
|
||||
return self["menu_back_text_color"]
|
||||
else:
|
||||
color = bpy.context.preferences.themes[0].user_interface.wcol_menu_back.text
|
||||
self["menu_back_text_color"] = color.r, color.g, color.b
|
||||
self["menu_back_text_color"] = color[0], color[1], color[2]
|
||||
return self["menu_back_text_color"]
|
||||
|
||||
def set_menu_back_text(self, values):
|
||||
@ -134,11 +134,11 @@ def get_menu_back_outline(self):
|
||||
return self["menu_back_outline_color"]
|
||||
else:
|
||||
color = bpy.context.preferences.themes[0].user_interface.wcol_menu_back.outline
|
||||
self["menu_back_outline_color"] = color.r, color.g, color.b
|
||||
self["menu_back_outline_color"] = color[0], color[1], color[2], color[3]
|
||||
return self["menu_back_outline_color"]
|
||||
|
||||
def set_menu_back_outline(self, values):
|
||||
self["menu_back_outline_color"] = values[0], values[1], values[2]
|
||||
self["menu_back_outline_color"] = values[0], values[1], values[2], values[3]
|
||||
|
||||
|
||||
def get_tooltip_text(self):
|
||||
@ -146,7 +146,7 @@ def get_tooltip_text(self):
|
||||
return self["tooltip_text_color"]
|
||||
else:
|
||||
color = bpy.context.preferences.themes[0].user_interface.wcol_tooltip.text
|
||||
self["tooltip_text_color"] = color.r, color.g, color.b
|
||||
self["tooltip_text_color"] = color[0], color[1], color[2]
|
||||
return self["tooltip_text_color"]
|
||||
|
||||
def set_tooltip_text(self, values):
|
||||
@ -170,11 +170,11 @@ def get_tooltip_outline(self):
|
||||
return self["tooltip_outline_color"]
|
||||
else:
|
||||
color = bpy.context.preferences.themes[0].user_interface.wcol_tooltip.outline
|
||||
self["tooltip_outline_color"] = color.r, color.g, color.b
|
||||
self["tooltip_outline_color"] = color[0], color[1], color[2], color[3]
|
||||
return self["tooltip_outline_color"]
|
||||
|
||||
def set_tooltip_outline(self, values):
|
||||
self["tooltip_outline_color"] = values[0], values[1], values[2]
|
||||
self["tooltip_outline_color"] = values[0], values[1], values[2], values[3]
|
||||
|
||||
|
||||
class CMPreferences(AddonPreferences):
|
||||
|
@ -771,7 +771,7 @@ def draw_callback_px(self, context):
|
||||
main_window = self.areas["Main Window"]
|
||||
outline_color = addon_prefs.qcd_ogl_widget_menu_back_outline
|
||||
background_color = addon_prefs.qcd_ogl_widget_menu_back_inner
|
||||
draw_rounded_rect(main_window, line_shader, outline_color[:] + (1,), outline=True)
|
||||
draw_rounded_rect(main_window, line_shader, outline_color[:], outline=True)
|
||||
draw_rounded_rect(main_window, shader, background_color)
|
||||
|
||||
# draw window title
|
||||
@ -852,7 +852,7 @@ def draw_callback_px(self, context):
|
||||
|
||||
# draw button
|
||||
outline_color = addon_prefs.qcd_ogl_widget_tool_outline
|
||||
draw_rounded_rect(button_area, line_shader, outline_color[:] + (1,), tl, tr, bl, br, outline=True)
|
||||
draw_rounded_rect(button_area, line_shader, outline_color[:], tl, tr, bl, br, outline=True)
|
||||
draw_rounded_rect(button_area, shader, button_color, tl, tr, bl, br)
|
||||
|
||||
# ACTIVE OBJECT
|
||||
@ -979,7 +979,7 @@ def draw_tooltip(self, context, shader, line_shader, message):
|
||||
|
||||
outline_color = addon_prefs.qcd_ogl_widget_tooltip_outline
|
||||
background_color = addon_prefs.qcd_ogl_widget_tooltip_inner
|
||||
draw_rounded_rect(tooltip, line_shader, outline_color[:] + (1,), outline=True)
|
||||
draw_rounded_rect(tooltip, line_shader, outline_color[:], outline=True)
|
||||
draw_rounded_rect(tooltip, shader, background_color)
|
||||
|
||||
line_pos = padding + line_height
|
||||
|
@ -89,10 +89,10 @@ class ActionSlot(PropertyGroup, ActionSlotBase):
|
||||
|
||||
target_space: EnumProperty(
|
||||
name="Transform Space",
|
||||
items=[("WORLD", "World Space", "World Space"),
|
||||
("POSE", "Pose Space", "Pose Space"),
|
||||
("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent"),
|
||||
("LOCAL", "Local Space", "Local Space")],
|
||||
items=[("WORLD", "World Space", "World Space", 0),
|
||||
# ("POSE", "Pose Space", "Pose Space", 1),
|
||||
# ("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent", 2),
|
||||
("LOCAL", "Local Space", "Local Space", 3)],
|
||||
default="LOCAL"
|
||||
)
|
||||
|
||||
|
@ -153,9 +153,10 @@ def pVisRotExec(bone, active, context):
|
||||
|
||||
def pVisScaExec(bone, active, context):
|
||||
obj_bone = bone.id_data
|
||||
bone.scale = getmat(bone, active, context,
|
||||
not obj_bone.data.bones[bone.name].use_inherit_scale)\
|
||||
.to_scale()
|
||||
bone.scale = getmat(
|
||||
bone, active, context,
|
||||
obj_bone.data.bones[bone.name].inherit_scale not in {'NONE', 'NONE_LEGACY'}
|
||||
).to_scale()
|
||||
|
||||
|
||||
def pDrwExec(bone, active, context):
|
||||
|
@ -18,7 +18,7 @@
|
||||
bl_info = {
|
||||
"name": "Sun Position",
|
||||
"author": "Michael Martin, Damien Picard",
|
||||
"version": (3, 5, 3),
|
||||
"version": (3, 5, 4),
|
||||
"blender": (3, 2, 0),
|
||||
"location": "World > Sun Position",
|
||||
"description": "Show sun position with objects and/or sky texture",
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
import bpy
|
||||
from bpy.props import FloatProperty, FloatVectorProperty
|
||||
from bpy.app.translations import pgettext_iface as iface_
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
from mathutils import Vector
|
||||
@ -77,7 +78,19 @@ class SUNPOS_OT_ShowHdr(bpy.types.Operator):
|
||||
@classmethod
|
||||
def poll(self, context):
|
||||
sun_props = context.scene.sun_pos_properties
|
||||
return sun_props.hdr_texture and sun_props.sun_object is not None
|
||||
if sun_props.sun_object is None:
|
||||
self.poll_message_set("Please select a Sun object")
|
||||
return False
|
||||
if not sun_props.hdr_texture:
|
||||
self.poll_message_set("Please select an Environment Texture node")
|
||||
return False
|
||||
|
||||
nt = context.scene.world.node_tree.nodes
|
||||
env_tex_node = nt.get(context.scene.sun_pos_properties.hdr_texture)
|
||||
if env_tex_node is None or env_tex_node.type != "TEX_ENVIRONMENT":
|
||||
self.poll_message_set("Please select a valid Environment Texture node")
|
||||
return False
|
||||
return True
|
||||
|
||||
def update(self, context, event):
|
||||
sun_props = context.scene.sun_pos_properties
|
||||
@ -248,8 +261,8 @@ class SUNPOS_OT_ShowHdr(bpy.types.Operator):
|
||||
self.initial_azimuth = context.scene.sun_pos_properties.hdr_azimuth
|
||||
|
||||
context.workspace.status_text_set(
|
||||
"Enter/LMB: confirm, Esc/RMB: cancel,"
|
||||
" MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure")
|
||||
iface_("Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, "
|
||||
"mouse wheel: zoom, Ctrl + mouse wheel: set exposure"))
|
||||
|
||||
self._handle = bpy.types.SpaceView3D.draw_handler_add(
|
||||
draw_callback_px, (self, context), 'WINDOW', 'POST_PIXEL'
|
||||
|
@ -116,6 +116,7 @@ class SunPosProperties(PropertyGroup):
|
||||
description="Enter coordinates from an online map",
|
||||
get=get_coordinates,
|
||||
set=set_coordinates,
|
||||
default="00°00′00.00″ 00°00′00.00″",
|
||||
options={'SKIP_SAVE'})
|
||||
|
||||
latitude: FloatProperty(
|
||||
|
@ -416,6 +416,19 @@ translations_tuple = (
|
||||
("fr_FR", "Année",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Unknown projection"),
|
||||
(("scripts/addons/sun_position/hdr.py:181",),
|
||||
()),
|
||||
("fr_FR", "Projection inconnue",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure"),
|
||||
(("scripts/addons/sun_position/hdr.py:252",),
|
||||
()),
|
||||
("fr_FR", "Entrée/ClicG : Confirmer, Échap/ClicD : Annuler, ClicM : défiler, "
|
||||
"molette : zoom, Ctrl + molette : exposition",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Could not find 3D View"),
|
||||
(("scripts/addons/sun_position/hdr.py:263",),
|
||||
()),
|
||||
@ -428,12 +441,6 @@ translations_tuple = (
|
||||
("fr_FR", "Veuillez utiliser un nœud de texture d’environnement",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Unknown projection"),
|
||||
(("scripts/addons/sun_position/hdr.py:181",),
|
||||
()),
|
||||
("fr_FR", "Projection inconnue",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Show options and info:"),
|
||||
(("scripts/addons/sun_position/properties.py:297",),
|
||||
()),
|
||||
|
@ -344,7 +344,9 @@ class UI_OT_i18n_addon_translation_export(Operator):
|
||||
if not lng.use:
|
||||
print("Skipping {} language ({}).".format(lng.name, lng.uid))
|
||||
continue
|
||||
uid = utils_i18n.find_best_isocode_matches(lng.uid, trans.trans.keys())
|
||||
translation_keys = {k for k in trans.trans.keys()
|
||||
if k != self.settings.PARSER_TEMPLATE_ID}
|
||||
uid = utils_i18n.find_best_isocode_matches(lng.uid, translation_keys)
|
||||
if uid:
|
||||
uids.append(uid[0])
|
||||
|
||||
@ -357,8 +359,8 @@ class UI_OT_i18n_addon_translation_export(Operator):
|
||||
if not os.path.isfile(path):
|
||||
continue
|
||||
msgs = utils_i18n.I18nMessages(kind='PO', src=path, settings=self.settings)
|
||||
msgs.update(trans.msgs[self.settings.PARSER_TEMPLATE_ID])
|
||||
trans.msgs[uid] = msgs
|
||||
msgs.update(trans.trans[self.settings.PARSER_TEMPLATE_ID])
|
||||
trans.trans[uid] = msgs
|
||||
|
||||
trans.write(kind='PO', langs=set(uids))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user