Fix #104875: Animated dupli instances export frozen in place #104876
@ -641,7 +641,6 @@ class discombobulator_dodads_list(Menu):
|
||||
bl_idname = "OBJECT_MT_discombobulator_dodad_list"
|
||||
bl_label = "List of saved Doodads"
|
||||
bl_description = "List of the saved Doodad Object Names"
|
||||
bl_options = {"REGISTER"}
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
@ -660,7 +659,6 @@ class discombob_help(Menu):
|
||||
bl_idname = "HELP_MT_discombobulator"
|
||||
bl_label = "Usage Information"
|
||||
bl_description = "Help"
|
||||
bl_options = {"REGISTER"}
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
|
@ -7,7 +7,7 @@
|
||||
bl_info = {
|
||||
"name": "Is key Free",
|
||||
"author": "Antonio Vazquez (antonioya)",
|
||||
"version": (1, 1, 2),
|
||||
"version": (1, 1, 3),
|
||||
"blender": (2, 80, 0),
|
||||
"location": "Text Editor > Sidebar > Dev Tab",
|
||||
"description": "Find free shortcuts, inform about used and print a key list",
|
||||
@ -16,6 +16,7 @@ bl_info = {
|
||||
}
|
||||
|
||||
import bpy
|
||||
|
||||
from bpy.props import (
|
||||
BoolProperty,
|
||||
EnumProperty,
|
||||
@ -28,6 +29,7 @@ from bpy.types import (
|
||||
PropertyGroup,
|
||||
)
|
||||
|
||||
import unicodedata
|
||||
|
||||
# ------------------------------------------------------
|
||||
# Class to find keymaps
|
||||
@ -498,6 +500,15 @@ class IsKeyFreeRunExportKeys(Operator):
|
||||
except:
|
||||
return None
|
||||
|
||||
def unicodelen(self, string):
|
||||
n = 0
|
||||
for c in string:
|
||||
if unicodedata.east_asian_width(c) in 'FWA':
|
||||
n += 2
|
||||
else:
|
||||
n += 1
|
||||
return n
|
||||
|
||||
def execute(self, context):
|
||||
wm = bpy.context.window_manager
|
||||
from collections import defaultdict
|
||||
@ -536,7 +547,7 @@ class IsKeyFreeRunExportKeys(Operator):
|
||||
textblock.write("\n[%s]\nEntries: %s\n\n" % (ctx, len(mykeys[ctx])))
|
||||
line_k = sorted(mykeys[ctx])
|
||||
for keys in line_k:
|
||||
add_ticks = "-" * (max_line - (len(keys[0]) + len(keys[1])))
|
||||
add_ticks = "-" * (max_line - (self.unicodelen(keys[0]) + len(keys[1])))
|
||||
entries = "{ticks} {entry}".format(ticks=add_ticks, entry=keys[1])
|
||||
textblock.write("{name} {entry}\n".format(name=keys[0], entry=entries))
|
||||
|
||||
|
@ -12,6 +12,7 @@ class StormHydraRenderEngine(bpy.types.HydraRenderEngine):
|
||||
|
||||
bl_use_preview = True
|
||||
bl_use_gpu_context = True
|
||||
bl_use_materialx = True
|
||||
|
||||
bl_delegate_id = 'HdStormRendererPlugin'
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
bl_info = {
|
||||
"name": "FBX format",
|
||||
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
|
||||
"version": (5, 7, 1),
|
||||
"version": (5, 7, 3),
|
||||
"blender": (3, 6, 0),
|
||||
"location": "File > Import-Export",
|
||||
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
|
||||
|
@ -526,6 +526,70 @@ def blen_read_object_transform_preprocess(fbx_props, fbx_obj, rot_alt_mat, use_p
|
||||
|
||||
# ---------
|
||||
# Animation
|
||||
def _blen_read_object_transform_do_anim(transform_data, lcl_translation_mat, lcl_rot_euler, lcl_scale_mat,
|
||||
extra_pre_matrix, extra_post_matrix):
|
||||
"""Specialized version of blen_read_object_transform_do for animation that pre-calculates the non-animated matrices
|
||||
and returns a function that calculates (base_mat @ geom_mat). See the comments in blen_read_object_transform_do for
|
||||
a full description of what this function is doing.
|
||||
|
||||
The lcl_translation_mat, lcl_rot_euler and lcl_scale_mat arguments should have their values updated each frame and
|
||||
then calling the returned function will calculate the matrix for the current frame.
|
||||
|
||||
extra_pre_matrix and extra_post_matrix are any extra matrices to multiply first/last."""
|
||||
# Translation
|
||||
geom_loc = Matrix.Translation(transform_data.geom_loc)
|
||||
|
||||
# Rotation
|
||||
def to_rot_xyz(rot):
|
||||
# All the rotations that can be precalculated have a fixed XYZ order.
|
||||
return Euler(convert_deg_to_rad_iter(rot), 'XYZ').to_matrix().to_4x4()
|
||||
pre_rot = to_rot_xyz(transform_data.pre_rot)
|
||||
pst_rot_inv = to_rot_xyz(transform_data.pst_rot).inverted_safe()
|
||||
geom_rot = to_rot_xyz(transform_data.geom_rot)
|
||||
|
||||
# Offsets and pivots
|
||||
rot_ofs = Matrix.Translation(transform_data.rot_ofs)
|
||||
rot_piv = Matrix.Translation(transform_data.rot_piv)
|
||||
rot_piv_inv = rot_piv.inverted_safe()
|
||||
sca_ofs = Matrix.Translation(transform_data.sca_ofs)
|
||||
sca_piv = Matrix.Translation(transform_data.sca_piv)
|
||||
sca_piv_inv = sca_piv.inverted_safe()
|
||||
|
||||
# Scale
|
||||
geom_scale = Matrix()
|
||||
geom_scale[0][0], geom_scale[1][1], geom_scale[2][2] = transform_data.geom_sca
|
||||
|
||||
# Some matrices can be combined in advance, using the associative property of matrix multiplication, so that less
|
||||
# matrix multiplication is required each frame.
|
||||
geom_mat = geom_loc @ geom_rot @ geom_scale
|
||||
post_lcl_translation = rot_ofs @ rot_piv @ pre_rot
|
||||
post_lcl_rotation = transform_data.rot_alt_mat @ pst_rot_inv @ rot_piv_inv @ sca_ofs @ sca_piv
|
||||
post_lcl_scaling = sca_piv_inv @ geom_mat @ extra_post_matrix
|
||||
|
||||
# Get the bound to_matrix method to avoid re-binding it on each call.
|
||||
lcl_rot_euler_to_matrix_3x3 = lcl_rot_euler.to_matrix
|
||||
# Get the unbound Matrix.to_4x4 method to avoid having to look it up again on each call.
|
||||
matrix_to_4x4 = Matrix.to_4x4
|
||||
|
||||
if extra_pre_matrix == Matrix():
|
||||
# There aren't any other matrices that must be multiplied before lcl_translation_mat that extra_pre_matrix can
|
||||
# be combined with, so skip extra_pre_matrix when it's the identity matrix.
|
||||
return lambda: (lcl_translation_mat @
|
||||
post_lcl_translation @
|
||||
matrix_to_4x4(lcl_rot_euler_to_matrix_3x3()) @
|
||||
post_lcl_rotation @
|
||||
lcl_scale_mat @
|
||||
post_lcl_scaling)
|
||||
else:
|
||||
return lambda: (extra_pre_matrix @
|
||||
lcl_translation_mat @
|
||||
post_lcl_translation @
|
||||
matrix_to_4x4(lcl_rot_euler_to_matrix_3x3()) @
|
||||
post_lcl_rotation @
|
||||
lcl_scale_mat @
|
||||
post_lcl_scaling)
|
||||
|
||||
|
||||
def _transformation_curves_gen(item, values_arrays, channel_keys):
|
||||
"""Yields flattened location/rotation/scaling values for imported PoseBone/Object Lcl Translation/Rotation/Scaling
|
||||
animation curve values.
|
||||
@ -547,77 +611,85 @@ def _transformation_curves_gen(item, values_arrays, channel_keys):
|
||||
rot_eul_prev = bl_obj.rotation_euler.copy()
|
||||
rot_quat_prev = bl_obj.rotation_quaternion.copy()
|
||||
|
||||
# Pre-compute inverted local rest matrix of the bone, if relevant.
|
||||
restmat_inv = item.get_bind_matrix().inverted_safe() if item.is_bone else None
|
||||
# Pre-compute combined pre-matrix
|
||||
# Remove that rest pose matrix from current matrix (also in parent space) by computing the inverted local rest
|
||||
# matrix of the bone, if relevant.
|
||||
combined_pre_matrix = item.get_bind_matrix().inverted_safe() if item.is_bone else Matrix()
|
||||
# item.pre_matrix will contain any correction for a parent's correction matrix or the global matrix
|
||||
if item.pre_matrix:
|
||||
combined_pre_matrix @= item.pre_matrix
|
||||
|
||||
transform_prop_to_attr = {
|
||||
b'Lcl Translation': transform_data.loc,
|
||||
b'Lcl Rotation': transform_data.rot,
|
||||
b'Lcl Scaling': transform_data.sca,
|
||||
}
|
||||
# Pre-compute combined post-matrix
|
||||
# Compensate for changes in the local matrix during processing
|
||||
combined_post_matrix = item.anim_compensation_matrix.copy() if item.anim_compensation_matrix else Matrix()
|
||||
# item.post_matrix will contain any correction for lights, camera and bone orientation
|
||||
if item.post_matrix:
|
||||
combined_post_matrix @= item.post_matrix
|
||||
|
||||
# Create matrices/euler from the initial transformation values of this item.
|
||||
# These variables will be updated in-place as we iterate through each frame.
|
||||
lcl_translation_mat = Matrix.Translation(transform_data.loc)
|
||||
lcl_rotation_eul = Euler(transform_data.rot, transform_data.rot_ord)
|
||||
lcl_scaling_mat = Matrix()
|
||||
lcl_scaling_mat[0][0], lcl_scaling_mat[1][1], lcl_scaling_mat[2][2] = transform_data.sca
|
||||
|
||||
# Create setters into lcl_translation_mat, lcl_rotation_eul and lcl_scaling_mat for each values_array and convert
|
||||
# any rotation values into radians.
|
||||
lcl_setters = []
|
||||
values_arrays_converted = []
|
||||
for values_array, (fbx_prop, channel) in zip(values_arrays, channel_keys):
|
||||
if fbx_prop == b'Lcl Translation':
|
||||
# lcl_translation_mat.translation[channel] = value
|
||||
setter = partial(setitem, lcl_translation_mat.translation, channel)
|
||||
elif fbx_prop == b'Lcl Rotation':
|
||||
# FBX rotations are in degrees, but Blender uses radians, so convert all rotation values in advance.
|
||||
values_array = np.deg2rad(values_array)
|
||||
# lcl_rotation_eul[channel] = value
|
||||
setter = partial(setitem, lcl_rotation_eul, channel)
|
||||
else:
|
||||
assert(fbx_prop == b'Lcl Scaling')
|
||||
# lcl_scaling_mat[channel][channel] = value
|
||||
setter = partial(setitem, lcl_scaling_mat[channel], channel)
|
||||
lcl_setters.append(setter)
|
||||
values_arrays_converted.append(values_array)
|
||||
|
||||
# Create a setter into transform_data for each values array. e.g. a values array for 'Lcl Scaling' with channel == 2
|
||||
# would set transform_data.sca[2].
|
||||
setters = [partial(setitem, transform_prop_to_attr[fbx_prop], channel) for fbx_prop, channel in channel_keys]
|
||||
# Create an iterator that gets one value from each array. Each iterated tuple will be all the imported
|
||||
# Lcl Translation/Lcl Rotation/Lcl Scaling values for a single frame, in that order.
|
||||
# Note that an FBX animation does not have to animate all the channels, so only the animated channels of each
|
||||
# property will be present.
|
||||
# .data, the memoryview of an np.ndarray, is faster to iterate than the ndarray itself.
|
||||
frame_values_it = zip(*(arr.data for arr in values_arrays))
|
||||
frame_values_it = zip(*(arr.data for arr in values_arrays_converted))
|
||||
|
||||
# Pre-get/calculate these to slightly reduce the work done inside the loop.
|
||||
anim_compensation_matrix = item.anim_compensation_matrix
|
||||
do_anim_compensation_matrix = bool(anim_compensation_matrix)
|
||||
# Getting the unbound methods in advance avoids having to look them up again on each call within the loop.
|
||||
mat_decompose = Matrix.decompose
|
||||
quat_to_axis_angle = Quaternion.to_axis_angle
|
||||
quat_to_euler = Quaternion.to_euler
|
||||
quat_dot = Quaternion.dot
|
||||
|
||||
pre_matrix = item.pre_matrix
|
||||
do_pre_matrix = bool(pre_matrix)
|
||||
|
||||
post_matrix = item.post_matrix
|
||||
do_post_matrix = bool(post_matrix)
|
||||
|
||||
do_restmat_inv = bool(restmat_inv)
|
||||
|
||||
decompose = Matrix.decompose
|
||||
to_axis_angle = Quaternion.to_axis_angle
|
||||
to_euler = Quaternion.to_euler
|
||||
calc_mat = _blen_read_object_transform_do_anim(transform_data,
|
||||
lcl_translation_mat, lcl_rotation_eul, lcl_scaling_mat,
|
||||
combined_pre_matrix, combined_post_matrix)
|
||||
|
||||
# Iterate through the values for each frame.
|
||||
for frame_values in frame_values_it:
|
||||
# Set each value into its corresponding attribute in transform_data.
|
||||
for setter, value in zip(setters, frame_values):
|
||||
setter(value)
|
||||
# Set each value into its corresponding lcl matrix/euler.
|
||||
for lcl_setter, value in zip(lcl_setters, frame_values):
|
||||
lcl_setter(value)
|
||||
|
||||
# Calculate the updated matrix for this frame.
|
||||
mat, _, _ = blen_read_object_transform_do(transform_data)
|
||||
|
||||
# compensate for changes in the local matrix during processing
|
||||
if do_anim_compensation_matrix:
|
||||
mat = mat @ anim_compensation_matrix
|
||||
|
||||
# apply pre- and post matrix
|
||||
# post-matrix will contain any correction for lights, camera and bone orientation
|
||||
# pre-matrix will contain any correction for a parent's correction matrix or the global matrix
|
||||
if do_pre_matrix:
|
||||
mat = pre_matrix @ mat
|
||||
if do_post_matrix:
|
||||
mat = mat @ post_matrix
|
||||
|
||||
# And now, remove that rest pose matrix from current mat (also in parent space).
|
||||
if do_restmat_inv:
|
||||
mat = restmat_inv @ mat
|
||||
mat = calc_mat()
|
||||
|
||||
# Now we have a virtual matrix of transform from AnimCurves, we can yield keyframe values!
|
||||
loc, rot, sca = decompose(mat)
|
||||
loc, rot, sca = mat_decompose(mat)
|
||||
if rot_mode == 'QUATERNION':
|
||||
if rot_quat_prev.dot(rot) < 0.0:
|
||||
if quat_dot(rot_quat_prev, rot) < 0.0:
|
||||
rot = -rot
|
||||
rot_quat_prev = rot
|
||||
elif rot_mode == 'AXIS_ANGLE':
|
||||
vec, ang = to_axis_angle(rot)
|
||||
vec, ang = quat_to_axis_angle(rot)
|
||||
rot = ang, vec.x, vec.y, vec.z
|
||||
else: # Euler
|
||||
rot = to_euler(rot, rot_mode, rot_eul_prev)
|
||||
rot = quat_to_euler(rot, rot_mode, rot_eul_prev)
|
||||
rot_eul_prev = rot
|
||||
|
||||
# Yield order matches the order that the location/rotation/scale FCurves are created in.
|
||||
@ -626,39 +698,6 @@ def _transformation_curves_gen(item, values_arrays, channel_keys):
|
||||
yield from sca
|
||||
|
||||
|
||||
def blen_read_animation_channel_curves(curves):
|
||||
"""Read one or (very rarely) more animation curves, that affect a single channel of a single property, from FBX
|
||||
data.
|
||||
|
||||
When there are multiple curves, they will be combined into a single sorted animation curve with later curves taking
|
||||
precedence when the curves contain duplicate times.
|
||||
|
||||
It is expected that there will almost never be more than a single curve to read because FBX's default animation
|
||||
system only uses the first curve assigned to a channel.
|
||||
|
||||
Returns an array of sorted, unique FBX keyframe times and an array of values for each of those keyframe times."""
|
||||
if len(curves) > 1:
|
||||
times_and_values_tuples = list(map(blen_read_single_animation_curve, curves))
|
||||
# The FBX animation system's default implementation only uses the first curve assigned to a channel.
|
||||
# Additional curves per channel are allowed by the FBX specification, but the handling of these curves is
|
||||
# considered the responsibility of the application that created them. Note that each curve node is expected to
|
||||
# have a unique set of channels, so these additional curves with the same channel would have to belong to
|
||||
# separate curve nodes. See the FBX SDK documentation for FbxAnimCurveNode.
|
||||
|
||||
# Combine the curves together to produce a single array of sorted keyframe times and a single array of values.
|
||||
# The arrays are concatenated in reverse so that if there are duplicate times in the read curves, then only the
|
||||
# value of the last occurrence is kept.
|
||||
all_times = np.concatenate([t[0] for t in reversed(times_and_values_tuples)])
|
||||
all_values = np.concatenate([t[1] for t in reversed(times_and_values_tuples)])
|
||||
# Get the unique, sorted times and the index in all_times of the first occurrence of each unique value.
|
||||
sorted_unique_times, unique_indices_in_all_times = np.unique(all_times, return_index=True)
|
||||
|
||||
values_of_sorted_unique_times = all_values[unique_indices_in_all_times]
|
||||
return sorted_unique_times, values_of_sorted_unique_times
|
||||
else:
|
||||
return blen_read_single_animation_curve(curves[0])
|
||||
|
||||
|
||||
def _combine_curve_keyframe_times(times_and_values_tuples, initial_values):
|
||||
"""Combine multiple parsed animation curves, that affect different channels, such that every animation curve
|
||||
contains the keyframes from every other curve, interpolating the values for the newly inserted keyframes in each
|
||||
@ -779,8 +818,8 @@ def _convert_fbx_time_to_blender_time(key_times, blen_start_offset, fbx_start_of
|
||||
return key_times
|
||||
|
||||
|
||||
def blen_read_single_animation_curve(fbx_curve):
|
||||
"""Read a single animation curve from FBX data.
|
||||
def blen_read_animation_curve(fbx_curve):
|
||||
"""Read an animation curve from FBX data.
|
||||
|
||||
The parsed keyframe times are guaranteed to be strictly increasing."""
|
||||
key_times = parray_as_ndarray(elem_prop_first(elem_find_first(fbx_curve, b'KeyTime')))
|
||||
@ -851,11 +890,19 @@ def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset, glo
|
||||
"""
|
||||
from bpy.types import Object, PoseBone, ShapeKey, Material, Camera
|
||||
|
||||
fbx_curves: dict[bytes, dict[int, list[FBXElem]]] = {}
|
||||
fbx_curves: dict[bytes, dict[int, FBXElem]] = {}
|
||||
for curves, fbxprop in cnodes.values():
|
||||
channels_dict = fbx_curves.setdefault(fbxprop, {})
|
||||
for (fbx_acdata, _blen_data), channel in curves.values():
|
||||
channels_dict.setdefault(channel, []).append(fbx_acdata)
|
||||
if channel in channels_dict:
|
||||
# Ignore extra curves when one has already been found for this channel because FBX's default animation
|
||||
# system implementation only uses the first curve assigned to a channel.
|
||||
# Additional curves per channel are allowed by the FBX specification, but the handling of these curves
|
||||
# is considered the responsibility of the application that created them. Note that each curve node is
|
||||
# expected to have a unique set of channels, so these additional curves with the same channel would have
|
||||
# to belong to separate curve nodes. See the FBX SDK documentation for FbxAnimCurveNode.
|
||||
continue
|
||||
channels_dict[channel] = fbx_acdata
|
||||
|
||||
# Leave if no curves are attached (if a blender curve is attached to scale but without keys it defaults to 0).
|
||||
if len(fbx_curves) == 0:
|
||||
@ -894,23 +941,23 @@ def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset, glo
|
||||
for prop, nbr_channels, grpname in props for channel in range(nbr_channels)]
|
||||
|
||||
if isinstance(item, Material):
|
||||
for fbxprop, channel_to_curves in fbx_curves.items():
|
||||
for fbxprop, channel_to_curve in fbx_curves.items():
|
||||
assert(fbxprop == b'DiffuseColor')
|
||||
for channel, curves in channel_to_curves.items():
|
||||
for channel, curve in channel_to_curve.items():
|
||||
assert(channel in {0, 1, 2})
|
||||
blen_curve = blen_curves[channel]
|
||||
fbx_key_times, values = blen_read_animation_channel_curves(curves)
|
||||
fbx_key_times, values = blen_read_animation_curve(curve)
|
||||
blen_store_keyframes(fbx_key_times, blen_curve, values, anim_offset, fps)
|
||||
|
||||
elif isinstance(item, ShapeKey):
|
||||
deform_values = shape_key_deforms.setdefault(item, [])
|
||||
for fbxprop, channel_to_curves in fbx_curves.items():
|
||||
for fbxprop, channel_to_curve in fbx_curves.items():
|
||||
assert(fbxprop == b'DeformPercent')
|
||||
for channel, curves in channel_to_curves.items():
|
||||
for channel, curve in channel_to_curve.items():
|
||||
assert(channel == 0)
|
||||
blen_curve = blen_curves[channel]
|
||||
|
||||
fbx_key_times, values = blen_read_animation_channel_curves(curves)
|
||||
fbx_key_times, values = blen_read_animation_curve(curve)
|
||||
# A fully activated shape key in FBX DeformPercent is 100.0 whereas it is 1.0 in Blender.
|
||||
values = values / 100.0
|
||||
blen_store_keyframes(fbx_key_times, blen_curve, values, anim_offset, fps)
|
||||
@ -921,15 +968,15 @@ def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset, glo
|
||||
deform_values.append(values.max())
|
||||
|
||||
elif isinstance(item, Camera):
|
||||
for fbxprop, channel_to_curves in fbx_curves.items():
|
||||
for fbxprop, channel_to_curve in fbx_curves.items():
|
||||
is_focus_distance = fbxprop == b'FocusDistance'
|
||||
assert(fbxprop == b'FocalLength' or is_focus_distance)
|
||||
for channel, curves in channel_to_curves.items():
|
||||
for channel, curve in channel_to_curve.items():
|
||||
assert(channel == 0)
|
||||
# The indices are determined by the creation of the `props` list above.
|
||||
blen_curve = blen_curves[1 if is_focus_distance else 0]
|
||||
|
||||
fbx_key_times, values = blen_read_animation_channel_curves(curves)
|
||||
fbx_key_times, values = blen_read_animation_curve(curve)
|
||||
if is_focus_distance:
|
||||
# Remap the imported values from FBX to Blender.
|
||||
values = values / 1000.0
|
||||
@ -950,13 +997,13 @@ def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset, glo
|
||||
times_and_values_tuples = []
|
||||
initial_values = []
|
||||
channel_keys = []
|
||||
for fbxprop, channel_to_curves in fbx_curves.items():
|
||||
for fbxprop, channel_to_curve in fbx_curves.items():
|
||||
if fbxprop not in transform_prop_to_attr:
|
||||
# Currently, we only care about transformation curves.
|
||||
continue
|
||||
for channel, curves in channel_to_curves.items():
|
||||
for channel, curve in channel_to_curve.items():
|
||||
assert(channel in {0, 1, 2})
|
||||
fbx_key_times, values = blen_read_animation_channel_curves(curves)
|
||||
fbx_key_times, values = blen_read_animation_curve(curve)
|
||||
|
||||
channel_keys.append((fbxprop, channel))
|
||||
|
||||
|
@ -386,48 +386,22 @@ class RigifyBoneCollectionReference(bpy.types.PropertyGroup):
|
||||
uid: IntProperty(name="Unique ID", default=-1)
|
||||
|
||||
def find_collection(self, *, update=False, raise_error=False) -> bpy.types.BoneCollection | None:
|
||||
uid = self.uid
|
||||
if uid < 0:
|
||||
return None
|
||||
|
||||
arm = self.id_data.data
|
||||
|
||||
if name := self.get("name", ""):
|
||||
name_coll = arm.collections.get(name)
|
||||
|
||||
if name_coll and name_coll.rigify_uid == uid:
|
||||
return name_coll
|
||||
|
||||
for coll in arm.collections:
|
||||
if coll.rigify_uid == uid:
|
||||
if update:
|
||||
self["name"] = coll.name
|
||||
return coll
|
||||
|
||||
if raise_error:
|
||||
raise utils.errors.MetarigError(f"Broken bone collection reference: {name} #{uid}")
|
||||
|
||||
return None
|
||||
return utils.layers.resolve_collection_reference(self.id_data, self, update=update, raise_error=raise_error)
|
||||
|
||||
def set_collection(self, coll: bpy.types.BoneCollection | None):
|
||||
if not coll:
|
||||
if coll is None:
|
||||
self.uid = -1
|
||||
self["name"] = ""
|
||||
return
|
||||
|
||||
if coll.rigify_uid < 0:
|
||||
coll.rigify_uid = utils.misc.choose_next_uid(coll.id_data.collections, "rigify_uid")
|
||||
|
||||
self.uid = coll.rigify_uid
|
||||
self["name"] = coll.name
|
||||
else:
|
||||
self.uid = utils.layers.ensure_collection_uid(coll)
|
||||
self["name"] = coll.name
|
||||
|
||||
def _name_get(self):
|
||||
if coll := self.find_collection(update=False):
|
||||
return coll.name
|
||||
|
||||
if self.uid >= 0:
|
||||
if name := self.get("name", ""):
|
||||
return f"? {name} #{self.uid}"
|
||||
return self.get('name') or '?'
|
||||
|
||||
return ""
|
||||
|
||||
|
@ -10,7 +10,8 @@ from typing import Optional, TYPE_CHECKING
|
||||
|
||||
from .utils.errors import MetarigError
|
||||
from .utils.bones import new_bone
|
||||
from .utils.layers import ORG_COLLECTION, MCH_COLLECTION, DEF_COLLECTION, ROOT_COLLECTION, set_bone_layers
|
||||
from .utils.layers import (ORG_COLLECTION, MCH_COLLECTION, DEF_COLLECTION, ROOT_COLLECTION, set_bone_layers,
|
||||
validate_collection_references)
|
||||
from .utils.naming import (ORG_PREFIX, MCH_PREFIX, DEF_PREFIX, ROOT_NAME, make_original_name,
|
||||
change_name_side, get_name_side, Side)
|
||||
from .utils.widgets import WGT_PREFIX, WGT_GROUP_PREFIX
|
||||
@ -214,9 +215,11 @@ class Generator(base_generate.BaseGenerator):
|
||||
def ensure_root_bone_collection(self):
|
||||
collections = self.metarig.data.collections
|
||||
|
||||
validate_collection_references(self.metarig)
|
||||
|
||||
if ROOT_COLLECTION not in collections:
|
||||
coll = collections.new(ROOT_COLLECTION)
|
||||
coll.rigify_ui_row = choose_next_uid(collections, 'rigify_ui_row', min_value=1)
|
||||
coll.rigify_ui_row = 2 + choose_next_uid(collections, 'rigify_ui_row', min_value=1)
|
||||
|
||||
def __duplicate_rig(self):
|
||||
obj = self.obj
|
||||
|
@ -92,7 +92,7 @@ def make_metarig_add_execute(module):
|
||||
|
||||
|
||||
def make_metarig_menu_func(bl_idname: str, text: str):
|
||||
""" For some reason lambda's don't work for adding multiple menu
|
||||
""" For some reason lambdas don't work for adding multiple menu
|
||||
items, so we use this instead to generate the functions.
|
||||
"""
|
||||
def metarig_menu(self, _context):
|
||||
|
@ -52,9 +52,7 @@ def create(obj): # noqa
|
||||
arm.collections.remove(bcoll)
|
||||
|
||||
def add_bone_collection(name, *, ui_row=0, ui_title='', sel_set=False, color_set_id=0):
|
||||
uid = len(arm.collections)
|
||||
new_bcoll = arm.collections.new(name)
|
||||
new_bcoll.rigify_uid = uid
|
||||
new_bcoll.rigify_ui_row = ui_row
|
||||
new_bcoll.rigify_ui_title = ui_title
|
||||
new_bcoll.rigify_sel_set = sel_set
|
||||
|
@ -52,9 +52,7 @@ def create(obj): # noqa
|
||||
arm.collections.remove(bcoll)
|
||||
|
||||
def add_bone_collection(name, *, ui_row=0, ui_title='', sel_set=False, color_set_id=0):
|
||||
uid = len(arm.collections)
|
||||
new_bcoll = arm.collections.new(name)
|
||||
new_bcoll.rigify_uid = uid
|
||||
new_bcoll.rigify_ui_row = ui_row
|
||||
new_bcoll.rigify_ui_title = ui_title
|
||||
new_bcoll.rigify_sel_set = sel_set
|
||||
|
@ -52,9 +52,7 @@ def create(obj): # noqa
|
||||
arm.collections.remove(bcoll)
|
||||
|
||||
def add_bone_collection(name, *, ui_row=0, ui_title='', sel_set=False, color_set_id=0):
|
||||
uid = len(arm.collections)
|
||||
new_bcoll = arm.collections.new(name)
|
||||
new_bcoll.rigify_uid = uid
|
||||
new_bcoll.rigify_ui_row = ui_row
|
||||
new_bcoll.rigify_ui_title = ui_title
|
||||
new_bcoll.rigify_sel_set = sel_set
|
||||
|
@ -52,9 +52,7 @@ def create(obj): # noqa
|
||||
arm.collections.remove(bcoll)
|
||||
|
||||
def add_bone_collection(name, *, ui_row=0, ui_title='', sel_set=False, color_set_id=0):
|
||||
uid = len(arm.collections)
|
||||
new_bcoll = arm.collections.new(name)
|
||||
new_bcoll.rigify_uid = uid
|
||||
new_bcoll.rigify_ui_row = ui_row
|
||||
new_bcoll.rigify_ui_title = ui_title
|
||||
new_bcoll.rigify_sel_set = sel_set
|
||||
|
@ -52,9 +52,7 @@ def create(obj): # noqa
|
||||
arm.collections.remove(bcoll)
|
||||
|
||||
def add_bone_collection(name, *, ui_row=0, ui_title='', sel_set=False, color_set_id=0):
|
||||
uid = len(arm.collections)
|
||||
new_bcoll = arm.collections.new(name)
|
||||
new_bcoll.rigify_uid = uid
|
||||
new_bcoll.rigify_ui_row = ui_row
|
||||
new_bcoll.rigify_ui_title = ui_title
|
||||
new_bcoll.rigify_sel_set = sel_set
|
||||
|
@ -52,9 +52,7 @@ def create(obj): # noqa
|
||||
arm.collections.remove(bcoll)
|
||||
|
||||
def add_bone_collection(name, *, ui_row=0, ui_title='', sel_set=False, color_set_id=0):
|
||||
uid = len(arm.collections)
|
||||
new_bcoll = arm.collections.new(name)
|
||||
new_bcoll.rigify_uid = uid
|
||||
new_bcoll.rigify_ui_row = ui_row
|
||||
new_bcoll.rigify_ui_title = ui_title
|
||||
new_bcoll.rigify_sel_set = sel_set
|
||||
|
@ -52,9 +52,7 @@ def create(obj): # noqa
|
||||
arm.collections.remove(bcoll)
|
||||
|
||||
def add_bone_collection(name, *, ui_row=0, ui_title='', sel_set=False, color_set_id=0):
|
||||
uid = len(arm.collections)
|
||||
new_bcoll = arm.collections.new(name)
|
||||
new_bcoll.rigify_uid = uid
|
||||
new_bcoll.rigify_ui_row = ui_row
|
||||
new_bcoll.rigify_ui_title = ui_title
|
||||
new_bcoll.rigify_sel_set = sel_set
|
||||
|
@ -52,9 +52,7 @@ def create(obj): # noqa
|
||||
arm.collections.remove(bcoll)
|
||||
|
||||
def add_bone_collection(name, *, ui_row=0, ui_title='', sel_set=False, color_set_id=0):
|
||||
uid = len(arm.collections)
|
||||
new_bcoll = arm.collections.new(name)
|
||||
new_bcoll.rigify_uid = uid
|
||||
new_bcoll.rigify_ui_row = ui_row
|
||||
new_bcoll.rigify_ui_title = ui_title
|
||||
new_bcoll.rigify_sel_set = sel_set
|
||||
|
@ -5,7 +5,7 @@
|
||||
import bpy
|
||||
import importlib
|
||||
|
||||
from ..utils.layers import is_collection_ref_list_prop, mirror_ref_list
|
||||
from ..utils.layers import REFS_TOGGLE_SUFFIX, REFS_LIST_SUFFIX, is_collection_ref_list_prop, copy_ref_list
|
||||
from ..utils.naming import Side, get_name_base_and_sides, mirror_name
|
||||
from ..utils.misc import property_to_python
|
||||
|
||||
@ -59,10 +59,10 @@ class POSE_OT_rigify_copy_single_parameter(bpy.types.Operator):
|
||||
num_copied = 0
|
||||
|
||||
# If copying collection references, include the toggle
|
||||
is_coll_refs = self.property_name.endswith("_coll_refs")
|
||||
is_coll_refs = self.property_name.endswith(REFS_LIST_SUFFIX)
|
||||
if is_coll_refs:
|
||||
assert is_collection_ref_list_prop(value)
|
||||
coll_refs_toggle_prop = self.property_name[:-10] + "_layers_extra"
|
||||
coll_refs_toggle_prop = self.property_name[:-len(REFS_LIST_SUFFIX)] + REFS_TOGGLE_SUFFIX
|
||||
coll_refs_toggle_val = getattr(params, coll_refs_toggle_prop)
|
||||
|
||||
# Copy to different bones of appropriate rig types
|
||||
@ -73,8 +73,6 @@ class POSE_OT_rigify_copy_single_parameter(bpy.types.Operator):
|
||||
rig_class = get_rig_class(rig_type)
|
||||
|
||||
if rig_class and issubclass(rig_class, filter_rig_class):
|
||||
new_value = value
|
||||
|
||||
# If mirror requested and copying to a different side bone, mirror the value
|
||||
do_mirror = False
|
||||
|
||||
@ -87,11 +85,10 @@ class POSE_OT_rigify_copy_single_parameter(bpy.types.Operator):
|
||||
# Assign the final value
|
||||
sel_params = get_rigify_params(sel_pbone)
|
||||
|
||||
if is_coll_refs and do_mirror:
|
||||
mirror_ref_list(getattr(sel_params, self.property_name), value)
|
||||
elif do_mirror:
|
||||
setattr(sel_params, self.property_name, mirror_name(value))
|
||||
if is_coll_refs:
|
||||
copy_ref_list(getattr(sel_params, self.property_name), value, mirror=do_mirror)
|
||||
else:
|
||||
new_value = mirror_name(value) if do_mirror else value
|
||||
setattr(sel_params, self.property_name, new_value)
|
||||
|
||||
if is_coll_refs:
|
||||
@ -155,10 +152,10 @@ def copy_rigify_params(from_bone: bpy.types.PoseBone, to_bone: bpy.types.PoseBon
|
||||
to_params_typed = get_rigify_params(to_bone)
|
||||
|
||||
for prop_name in param_dict.keys():
|
||||
if prop_name.endswith("_coll_refs"):
|
||||
if prop_name.endswith(REFS_LIST_SUFFIX):
|
||||
ref_list = getattr(from_params_typed, prop_name)
|
||||
if is_collection_ref_list_prop(ref_list):
|
||||
mirror_ref_list(getattr(to_params_typed, prop_name), ref_list)
|
||||
copy_ref_list(getattr(to_params_typed, prop_name), ref_list, mirror=True)
|
||||
else:
|
||||
to_bone['rigify_parameters'] = param_dict
|
||||
else:
|
||||
|
@ -405,10 +405,6 @@ def create_sample(obj):
|
||||
bpy.ops.object.mode_set(mode='EDIT')
|
||||
arm = obj.data
|
||||
|
||||
def assign_bone_collections(pose_bone):
|
||||
if active := arm.collections.active:
|
||||
active.assign(pose_bone)
|
||||
|
||||
bones = {}
|
||||
|
||||
bone = arm.edit_bones.new('thigh.L')
|
||||
|
@ -178,7 +178,7 @@ class Rig(BasicChainRig):
|
||||
|
||||
return parent
|
||||
|
||||
def get_control_node_layers(self, node: ControlBoneNode) -> list[bool]:
|
||||
def get_control_node_layers(self, node: ControlBoneNode) -> list[bpy.types.BoneCollection]:
|
||||
layers = None
|
||||
|
||||
# Secondary Layers used for the middle pivot
|
||||
|
28
rigify/ui.py
28
rigify/ui.py
@ -16,7 +16,7 @@ from typing import TYPE_CHECKING, Callable, Any
|
||||
from mathutils import Color
|
||||
|
||||
from .utils.errors import MetarigError
|
||||
from .utils.layers import ROOT_COLLECTION
|
||||
from .utils.layers import ROOT_COLLECTION, validate_collection_references
|
||||
from .utils.rig import write_metarig, get_rigify_type, get_rigify_target_rig, \
|
||||
get_rigify_colors, get_rigify_params
|
||||
from .utils.widgets import write_widget
|
||||
@ -306,6 +306,8 @@ class DATA_PT_rigify_collection_list(bpy.types.Panel):
|
||||
col.operator("armature.collection_move", icon='TRIA_UP', text="").direction = 'UP'
|
||||
col.operator("armature.collection_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
|
||||
|
||||
layout.operator(operator='armature.rigify_validate_layers')
|
||||
|
||||
if active_coll:
|
||||
col = layout.column()
|
||||
col.use_property_split = True
|
||||
@ -1062,6 +1064,29 @@ class UpgradeMetarigLayers(bpy.types.Operator):
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class ValidateMetarigLayers(bpy.types.Operator):
|
||||
"""Validates references from rig component settings to bone collections"""
|
||||
|
||||
bl_idname = "armature.rigify_validate_layers"
|
||||
bl_label = "Validate Collection References"
|
||||
bl_description = 'Validate references from rig component settings to bone collections. Always run this both '\
|
||||
'before and after joining two metarig armature objects into one to avoid glitches'
|
||||
bl_options = {'UNDO'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return is_valid_metarig(context) and context.object.mode != 'EDIT'
|
||||
|
||||
def execute(self, context):
|
||||
obj = verify_armature_obj(context.object)
|
||||
messages = validate_collection_references(obj)
|
||||
for msg in messages:
|
||||
self.report({'WARNING'}, msg)
|
||||
if not messages:
|
||||
self.report({'INFO'}, "No issues detected.")
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
class Sample(bpy.types.Operator):
|
||||
"""Create a sample metarig to be modified before generating the final rig"""
|
||||
|
||||
@ -1695,6 +1720,7 @@ classes = (
|
||||
Generate,
|
||||
UpgradeMetarigTypes,
|
||||
UpgradeMetarigLayers,
|
||||
ValidateMetarigLayers,
|
||||
Sample,
|
||||
VIEW3D_MT_rigify,
|
||||
EncodeMetarig,
|
||||
|
@ -7,7 +7,7 @@ from bpy.types import Action, Mesh, Armature
|
||||
from bl_math import clamp
|
||||
|
||||
from .errors import MetarigError
|
||||
from .misc import MeshObject, IdPropSequence
|
||||
from .misc import MeshObject, IdPropSequence, verify_mesh_obj
|
||||
from .naming import Side, get_name_side, change_name_side, mirror_name
|
||||
from .bones import BoneUtilityMixin
|
||||
from .mechanism import MechanismUtilityMixin, driver_var_transform, quote_property
|
||||
@ -415,7 +415,7 @@ class ActionLayerBuilder(GeneratorPlugin, BoneUtilityMixin, MechanismUtilityMixi
|
||||
def rig_bones(self):
|
||||
if self.layers:
|
||||
self.child_meshes = [
|
||||
child
|
||||
verify_mesh_obj(child)
|
||||
for child in self.generator.obj.children_recursive
|
||||
if child.type == 'MESH'
|
||||
]
|
||||
|
@ -3,12 +3,19 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import bpy
|
||||
import random
|
||||
import re
|
||||
import zlib
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import TYPE_CHECKING, Sequence, Optional, Mapping, Iterable, Any
|
||||
|
||||
from bpy.types import bpy_prop_collection # noqa
|
||||
from bpy.types import Bone, UILayout, Object, PoseBone, Armature, BoneCollection, EditBone
|
||||
from idprop.types import IDPropertyGroup
|
||||
from rna_prop_ui import rna_idprop_value_to_python
|
||||
|
||||
from .errors import MetarigError
|
||||
from .misc import ArmatureObject
|
||||
from .naming import mirror_name_fuzzy
|
||||
|
||||
@ -22,6 +29,9 @@ DEF_COLLECTION = "DEF"
|
||||
ORG_COLLECTION = "ORG"
|
||||
MCH_COLLECTION = "MCH"
|
||||
|
||||
REFS_TOGGLE_SUFFIX = '_layers_extra'
|
||||
REFS_LIST_SUFFIX = "_coll_refs"
|
||||
|
||||
|
||||
def set_bone_layers(bone: Bone | EditBone, layers: Sequence[BoneCollection], *, combine=False):
|
||||
if not layers:
|
||||
@ -61,13 +71,148 @@ def is_collection_ref_list_prop(param: Any) -> bool:
|
||||
all(isinstance(item, RigifyBoneCollectionReference) for item in param))
|
||||
|
||||
|
||||
def mirror_ref_list(to_ref_list, from_ref_list):
|
||||
def copy_ref_list(to_ref_list, from_ref_list, *, mirror=False):
|
||||
"""Copy collection references between two RigifyBoneCollectionReference lists."""
|
||||
to_ref_list.clear()
|
||||
|
||||
for ref in from_ref_list:
|
||||
to_ref = to_ref_list.add()
|
||||
to_ref['uid'] = ref['uid']
|
||||
to_ref['name'] = ref['name']
|
||||
to_ref.name = mirror_name_fuzzy(ref.name)
|
||||
|
||||
if mirror:
|
||||
to_ref.name = mirror_name_fuzzy(ref.name)
|
||||
|
||||
|
||||
def ensure_collection_uid(bcoll: BoneCollection):
|
||||
"""Retrieve the uid of the given bone collection, assigning a new one if necessary."""
|
||||
uid = bcoll.rigify_uid
|
||||
if uid >= 0:
|
||||
return uid
|
||||
|
||||
# Choose the initial uid value
|
||||
max_uid = 0x7fffffff
|
||||
|
||||
if re.fullmatch(r"Bones(\.\d+)?", bcoll.name):
|
||||
# Use random numbers for collections with the default name
|
||||
uid = random.randint(0, max_uid)
|
||||
else:
|
||||
uid = zlib.adler32(bcoll.name.encode("utf-8")) & max_uid
|
||||
|
||||
# Ensure the uid is unique within the armature
|
||||
used_ids = set(coll.rigify_uid for coll in bcoll.id_data.collections)
|
||||
|
||||
while uid in used_ids:
|
||||
uid = random.randint(0, max_uid)
|
||||
|
||||
assert uid >= 0
|
||||
bcoll.rigify_uid = uid
|
||||
return uid
|
||||
|
||||
|
||||
def resolve_collection_reference(obj: ArmatureObject, ref: Any, *,
|
||||
update=False, raise_error=False) -> bpy.types.BoneCollection | None:
|
||||
"""
|
||||
Find the bone collection referenced by the given reference.
|
||||
The reference should be RigifyBoneCollectionReference, either typed or as a raw idproperty.
|
||||
"""
|
||||
|
||||
uid = ref["uid"]
|
||||
if uid < 0:
|
||||
return None
|
||||
|
||||
arm = obj.data
|
||||
|
||||
name = ref.get("name", "")
|
||||
name_coll = arm.collections.get(name) if name else None
|
||||
|
||||
# First try an exact match of both name and uid
|
||||
if name_coll and name_coll.rigify_uid == uid:
|
||||
return name_coll
|
||||
|
||||
# Then try searching by the uid
|
||||
for coll in arm.collections:
|
||||
if coll.rigify_uid == uid:
|
||||
if update:
|
||||
ref["name"] = coll.name
|
||||
return coll
|
||||
|
||||
# Fallback to lookup by name only if possible
|
||||
if name_coll:
|
||||
if update:
|
||||
ref["uid"] = ensure_collection_uid(name_coll)
|
||||
return name_coll
|
||||
|
||||
if raise_error:
|
||||
raise MetarigError(f"Broken bone collection reference: {name} #{uid}")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def validate_collection_references(obj: ArmatureObject):
|
||||
# Scan and update all references. This uses raw idprop access
|
||||
# to avoid depending on valid rig component definitions.
|
||||
refs = defaultdict(list)
|
||||
warnings = []
|
||||
|
||||
for pose_bone in obj.pose.bones:
|
||||
params = pose_bone.get("rigify_parameters")
|
||||
if not params:
|
||||
continue
|
||||
|
||||
for prop_name, prop_value in params.items():
|
||||
prop_name: str
|
||||
|
||||
# Filter for reference list properties
|
||||
if not prop_name.endswith(REFS_LIST_SUFFIX):
|
||||
continue
|
||||
|
||||
value = rna_idprop_value_to_python(prop_value)
|
||||
if not isinstance(value, list):
|
||||
continue
|
||||
|
||||
for item in value:
|
||||
# Scan valid reference items
|
||||
if not isinstance(item, IDPropertyGroup):
|
||||
continue
|
||||
|
||||
name = item.get("name")
|
||||
if not name or item.get("uid", -1) < 0:
|
||||
continue
|
||||
|
||||
ref_coll = resolve_collection_reference(obj, item, update=True)
|
||||
|
||||
if ref_coll:
|
||||
refs[ref_coll.name].append(item)
|
||||
else:
|
||||
stem = prop_name[:-len(REFS_LIST_SUFFIX)].replace("_", " ").title()
|
||||
warnings.append(f"bone {pose_bone.name} has a broken reference to {stem} collection '{name}'")
|
||||
print(f"RIGIFY: {warnings[-1]}")
|
||||
|
||||
# Ensure uids are unique
|
||||
known_uids = dict()
|
||||
|
||||
for bcoll in obj.data.collections:
|
||||
uid = bcoll.rigify_uid
|
||||
if uid < 0:
|
||||
continue
|
||||
|
||||
prev_use = known_uids.get(uid)
|
||||
|
||||
if prev_use is not None:
|
||||
warnings.append(f"collection {bcoll.name} has the same uid {uid} as {prev_use}")
|
||||
print(f"RIGIFY: {warnings[-1]}")
|
||||
|
||||
# Replace the uid
|
||||
bcoll.rigify_uid = -1
|
||||
uid = ensure_collection_uid(bcoll)
|
||||
|
||||
for ref in refs[bcoll.name]:
|
||||
ref["uid"] = uid
|
||||
|
||||
known_uids[uid] = bcoll.name
|
||||
|
||||
return warnings
|
||||
|
||||
|
||||
##############################################
|
||||
@ -83,8 +228,8 @@ class ControlLayersOption:
|
||||
self.toggle_default = toggle_default
|
||||
self.description = description
|
||||
|
||||
self.toggle_option = self.name+'_layers_extra'
|
||||
self.refs_option = self.name + '_coll_refs'
|
||||
self.toggle_option = self.name + REFS_TOGGLE_SUFFIX
|
||||
self.refs_option = self.name + REFS_LIST_SUFFIX
|
||||
|
||||
if toggle_name:
|
||||
self.toggle_name = toggle_name
|
||||
@ -195,8 +340,9 @@ class ControlLayersOption:
|
||||
|
||||
for i, ref in enumerate(refs):
|
||||
row = col.row(align=True)
|
||||
row.prop(ref, "name", text="")
|
||||
row.alert = ref.uid >= 0 and not ref.find_collection()
|
||||
row.prop(ref, "name", text="")
|
||||
row.alert = False
|
||||
|
||||
props = row.operator(operator="pose.rigify_collection_ref_remove", text="", icon="REMOVE")
|
||||
props.prop_name = self.refs_option
|
||||
|
@ -257,7 +257,7 @@ def make_driver(owner: bpy_struct, prop: str, *, index=-1, type='SUM',
|
||||
|
||||
Specification format:
|
||||
If the variables argument is a dictionary, keys specify variable names.
|
||||
Otherwise, names are set to var, var1, var2, ... etc:
|
||||
Otherwise, names are set to var, var1, var2, ... etc.:
|
||||
|
||||
variables = [ ..., ..., ... ]
|
||||
variables = { 'var': ..., 'var1': ..., 'var2': ... }
|
||||
|
@ -357,7 +357,7 @@ def verify_mesh_obj(obj: bpy.types.Object) -> MeshObject:
|
||||
return obj # noqa
|
||||
|
||||
|
||||
class IdPropSequence(typing.Mapping[str, T], ABC):
|
||||
class IdPropSequence(typing.Mapping[str, T], typing.Sequence[T], ABC):
|
||||
def __getitem__(self, item: str | int) -> T:
|
||||
pass
|
||||
|
||||
|
@ -10,7 +10,6 @@ import enum
|
||||
|
||||
from typing import Optional, TYPE_CHECKING
|
||||
|
||||
from .misc import map_list
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..base_generate import BaseGenerator
|
||||
|
@ -184,7 +184,8 @@ def resolve_layer_names(layers):
|
||||
|
||||
|
||||
def upgrade_metarig_layers(metarig: ArmatureObject):
|
||||
from .layers import DEF_COLLECTION, MCH_COLLECTION, ORG_COLLECTION, ROOT_COLLECTION
|
||||
from .layers import (REFS_LIST_SUFFIX, DEF_COLLECTION, MCH_COLLECTION, ORG_COLLECTION, ROOT_COLLECTION,
|
||||
ensure_collection_uid)
|
||||
|
||||
arm = metarig.data
|
||||
|
||||
@ -195,10 +196,6 @@ def upgrade_metarig_layers(metarig: ArmatureObject):
|
||||
if m := re.match(r'^Layer (\d+)', coll.name):
|
||||
coll_table[int(m[1]) - 1] = coll
|
||||
|
||||
# Assign UIDs from layer index
|
||||
for idx, coll in coll_table.items():
|
||||
coll.rigify_uid = idx
|
||||
|
||||
# Assign names to special layers if they exist
|
||||
special_layers = {28: ROOT_COLLECTION, 29: DEF_COLLECTION, 30: MCH_COLLECTION, 31: ORG_COLLECTION}
|
||||
|
||||
@ -226,15 +223,13 @@ def upgrade_metarig_layers(metarig: ArmatureObject):
|
||||
if new_name:
|
||||
if not coll:
|
||||
coll = arm.collections.new(new_name)
|
||||
coll.rigify_uid = i
|
||||
coll_table[i] = coll
|
||||
else:
|
||||
coll.name = new_name
|
||||
|
||||
if coll:
|
||||
coll_idx = find_index(arm.collections, coll)
|
||||
if hasattr(arm.collections, 'move'):
|
||||
arm.collections.move(coll_idx, cur_idx)
|
||||
arm.collections.move(coll_idx, cur_idx)
|
||||
cur_idx += 1
|
||||
|
||||
coll.rigify_ui_row = layer.get("row", 1)
|
||||
@ -282,7 +277,7 @@ def upgrade_metarig_layers(metarig: ArmatureObject):
|
||||
# Work around the stupid legacy default where one layer is implicitly selected
|
||||
for name_stem in default_map.get(get_rigify_type(pose_bone), []):
|
||||
prop_name = name_stem + "_layers"
|
||||
if prop_name not in params and name_stem + "_coll_refs" not in params:
|
||||
if prop_name not in params and name_stem + REFS_LIST_SUFFIX not in params:
|
||||
params[prop_name] = default_layers
|
||||
|
||||
for prop_name, prop_value in list(params.items()):
|
||||
@ -292,9 +287,11 @@ def upgrade_metarig_layers(metarig: ArmatureObject):
|
||||
for i, show in enumerate(prop_value.to_list()):
|
||||
if show:
|
||||
coll = coll_table.get(i)
|
||||
entries.append({"uid": i, "name": coll.name if coll else "<?>"})
|
||||
uid = ensure_collection_uid(coll) if coll else i
|
||||
name = coll.name if coll else f"Layer {i+1}"
|
||||
entries.append({"uid": uid, "name": name})
|
||||
|
||||
params[prop_name[:-7] + "_coll_refs"] = entries
|
||||
params[prop_name[:-7] + REFS_LIST_SUFFIX] = entries
|
||||
|
||||
del params[prop_name]
|
||||
|
||||
@ -472,7 +469,7 @@ def write_metarig(obj: ArmatureObject, layers=False, func_name="create",
|
||||
Write a metarig as a python script, this rig is to have all info needed for
|
||||
generating the real rig with rigify.
|
||||
"""
|
||||
from .. import RigifyBoneCollectionReference
|
||||
from .layers import REFS_LIST_SUFFIX, is_collection_ref_list_prop
|
||||
|
||||
code = [
|
||||
"import bpy\n",
|
||||
@ -535,9 +532,7 @@ def write_metarig(obj: ArmatureObject, layers=False, func_name="create",
|
||||
args = ', '.join(f'{k}={repr(v)}' for k, v in collection_attrs.items())
|
||||
|
||||
code.append(f" def add_bone_collection(name, *, {args}):")
|
||||
code.append(f" uid = len(arm.collections)")
|
||||
code.append(f" new_bcoll = arm.collections.new(name)")
|
||||
code.append(f" new_bcoll.rigify_uid = uid")
|
||||
for k, _v in collection_attrs.items():
|
||||
code.append(f" new_bcoll.rigify_{k} = {k}")
|
||||
code.append(" bone_collections[name] = new_bcoll")
|
||||
@ -613,8 +608,7 @@ def write_metarig(obj: ArmatureObject, layers=False, func_name="create",
|
||||
param = _get_property_value(rigify_parameters, param_name)
|
||||
|
||||
if isinstance(param, bpy_prop_collection):
|
||||
if (layers and param_name.endswith("_coll_refs") and
|
||||
all(isinstance(item, RigifyBoneCollectionReference) for item in param)):
|
||||
if layers and param_name.endswith(REFS_LIST_SUFFIX) and is_collection_ref_list_prop(param):
|
||||
bcoll_set = [item.find_collection() for item in param]
|
||||
bcoll_set = [bcoll for bcoll in bcoll_set if bcoll is not None]
|
||||
if len(bcoll_set) > 0:
|
||||
|
@ -370,7 +370,7 @@ class SwitchParentBuilder(GeneratorPlugin, MechanismUtilityMixin):
|
||||
prop_id = child['prop_id'] = child['prop_id'] or 'parent_switch'
|
||||
|
||||
parent_names = [parent[1] or strip_prefix(parent[0])
|
||||
for parent in [(None, 'None'), *parent_bones]]
|
||||
for parent in [('None', 'None'), *parent_bones]]
|
||||
parent_str = ', '.join(['%s (%d)' % (name, i) for i, name in enumerate(parent_names)])
|
||||
|
||||
ctrl_bone = child['ctrl_bone'] or bone
|
||||
|
@ -113,7 +113,6 @@ class PIE_MT_SelectionsEM(Menu):
|
||||
class PIE_MT_SelectAllBySelection(Menu):
|
||||
bl_idname = "PIE_MT_selectallbyselection"
|
||||
bl_label = "Verts Edges Faces"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
@ -194,7 +193,6 @@ class PIE_OT_vertsedgesfacesop(Operator):
|
||||
class PIE_MT_SelectLoopSelection(Menu):
|
||||
bl_idname = "OBJECT_MT_selectloopselection"
|
||||
bl_label = "Verts Edges Faces"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
|
Loading…
Reference in New Issue
Block a user