Pose Library: Update to use the asset shelf (when enabled) #104546

Merged
Julian Eisel merged 33 commits from asset-shelf into main 2023-08-04 15:00:21 +02:00
14 changed files with 234 additions and 31 deletions
Showing only changes of commit f2a28e5dd4 - Show all commits

View File

@ -16,7 +16,7 @@ import bpy
bl_info = {
"name": "Autodesk 3DS format",
"author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand",
"version": (2, 4, 1),
"version": (2, 4, 3),
"blender": (3, 6, 0),
"location": "File > Import-Export",
"description": "3DS Import/Export meshes, UVs, materials, textures, "
@ -109,6 +109,11 @@ class Export3DS(bpy.types.Operator, ExportHelper):
description="Export selected objects only",
default=False,
)
use_hierarchy: bpy.props.BoolProperty(
name="Export Hierarchy",
description="Export hierarchy chunks",
default=False,
)
write_keyframe: BoolProperty(
name="Write Keyframe",
description="Write the keyframe data",

View File

@ -87,6 +87,8 @@ MASTERSCALE = 0x0100 # Master scale factor
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object
OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
OBJECT_HIERARCHY = 0x4F00 # Hierarchy id of the object
OBJECT_PARENT = 0x4F10 # Parent id of the object
# >------ Sub defines of LIGHT
LIGHT_MULTIPLIER = 0x465B # The light energy factor
@ -1477,7 +1479,7 @@ def make_ambient_node(world):
# EXPORT #
##########
def save(operator, context, filepath="", use_selection=False, write_keyframe=False, global_matrix=None):
def save(operator, context, filepath="", use_selection=False, use_hierarchy=False, write_keyframe=False, global_matrix=None):
"""Save the Blender scene to a 3ds file."""
# Time the export
@ -1608,6 +1610,7 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
scale = {}
# Give all objects a unique ID and build a dictionary from object name to object id
object_id = {}
name_id = {}
for ob, data, matrix in mesh_objects:
@ -1615,6 +1618,7 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
rotation[ob.name] = ob.rotation_euler.to_quaternion().inverted()
scale[ob.name] = ob.scale
name_id[ob.name] = len(name_id)
object_id[ob.name] = len(object_id)
for ob in empty_objects:
translation[ob.name] = ob.location
@ -1622,6 +1626,12 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
scale[ob.name] = ob.scale
name_id[ob.name] = len(name_id)
for ob in light_objects:
object_id[ob.name] = len(object_id)
for ob in camera_objects:
object_id[ob.name] = len(object_id)
# Create object chunks for all meshes
i = 0
for ob, mesh, matrix in mesh_objects:
@ -1633,20 +1643,32 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
# Make a mesh chunk out of the mesh
object_chunk.add_subchunk(make_mesh_chunk(ob, mesh, matrix, materialDict, translation))
# Ensure the mesh has no over sized arrays, skip ones that do!
# Add hierachy chunk with ID from object_id dictionary
if use_hierarchy:
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
# Add parent chunk if object has a parent
if ob.parent is not None and (ob.parent.name in object_id):
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
object_chunk.add_subchunk(obj_hierarchy_chunk)
# ensure the mesh has no over sized arrays - skip ones that do!
# Otherwise we cant write since the array size wont fit into USHORT
if object_chunk.validate():
object_info.add_subchunk(object_chunk)
else:
operator.report({'WARNING'}, "Object %r can't be written into a 3DS file")
# Export kf object node
# Export object node
if write_keyframe:
kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id))
i += i
# Create chunks for all empties, only requires a kf object node
# Create chunks for all empties - only requires a object node
if write_keyframe:
for ob in empty_objects:
kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id))
@ -1654,15 +1676,15 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
# Create light object chunks
for ob in light_objects:
object_chunk = _3ds_chunk(OBJECT)
light_chunk = _3ds_chunk(OBJECT_LIGHT)
obj_light_chunk = _3ds_chunk(OBJECT_LIGHT)
color_float_chunk = _3ds_chunk(RGB)
energy_factor = _3ds_chunk(LIGHT_MULTIPLIER)
light_energy_factor = _3ds_chunk(LIGHT_MULTIPLIER)
object_chunk.add_variable("light", _3ds_string(sane_name(ob.name)))
light_chunk.add_variable("location", _3ds_point_3d(ob.location))
obj_light_chunk.add_variable("location", _3ds_point_3d(ob.location))
color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color))
energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001))
light_chunk.add_subchunk(color_float_chunk)
light_chunk.add_subchunk(energy_factor)
light_energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001))
obj_light_chunk.add_subchunk(color_float_chunk)
obj_light_chunk.add_subchunk(light_energy_factor)
if ob.data.type == 'SPOT':
cone_angle = math.degrees(ob.data.spot_size)
@ -1684,10 +1706,24 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
if ob.data.use_square:
spot_square_chunk = _3ds_chunk(LIGHT_SPOT_RECTANGLE)
spotlight_chunk.add_subchunk(spot_square_chunk)
light_chunk.add_subchunk(spotlight_chunk)
obj_light_chunk.add_subchunk(spotlight_chunk)
# Add light to object info
object_chunk.add_subchunk(light_chunk)
# Add light to object chunk
object_chunk.add_subchunk(obj_light_chunk)
# Add hierachy chunks with ID from object_id dictionary
if use_hierarchy:
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
if ob.parent is None or (ob.parent.name not in object_id):
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
else: # Get the parent ID from the object_id dict
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
object_chunk.add_subchunk(obj_hierarchy_chunk)
# Add light object and hierarchy chunks to object info
object_info.add_subchunk(object_chunk)
# Export light and spotlight target node
@ -1714,6 +1750,20 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
camera_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6)))
camera_chunk.add_variable("lens", _3ds_float(ob.data.lens))
object_chunk.add_subchunk(camera_chunk)
# Add hierachy chunks with ID from object_id dictionary
if use_hierarchy:
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
if ob.parent is None or (ob.parent.name not in object_id):
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
else: # Get the parent ID from the object_id dict
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
object_chunk.add_subchunk(obj_hierarchy_chunk)
# Add light object and hierarchy chunks to object info
object_info.add_subchunk(object_chunk)
# Export camera and target node

View File

@ -92,6 +92,8 @@ MAT_MAP_BCOL = 0xA368 # Blue mapping
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object
OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
OBJECT_HIERARCHY = 0x4F00 # This lets us know the hierachy id of the object
OBJECT_PARENT = 0x4F10 # This lets us know the parent id of the object
# >------ Sub defines of LIGHT
LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight
@ -322,6 +324,9 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
childs_list = []
parent_list = []
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE):
contextObName = None
@ -461,6 +466,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_chunk = Chunk()
CreateBlenderObject = False
CreateCameraObject = False
CreateLightObject = False
CreateTrackData = False
@ -924,6 +930,23 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMatrix = mathutils.Matrix(
(data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1])).transposed()
# If hierarchy chunk
elif new_chunk.ID == OBJECT_HIERARCHY:
child_id = read_short(new_chunk)
childs_list.insert(child_id, contextObName)
parent_list.insert(child_id, None)
if child_id in parent_list:
idp = parent_list.index(child_id)
parent_list[idp] = contextObName
elif new_chunk.ID == OBJECT_PARENT:
parent_id = read_short(new_chunk)
if parent_id > len(childs_list):
parent_list[child_id] = parent_id
parent_list.extend([None]*(parent_id-len(parent_list)))
parent_list.insert(parent_id, contextObName)
elif parent_id < len(childs_list):
parent_list[child_id] = childs_list[parent_id]
# If light chunk
elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support
newLamp = bpy.data.lights.new("Lamp", 'POINT')
@ -934,9 +957,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_data = file.read(SZ_3FLOAT)
contextLamp.location = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += SZ_3FLOAT
contextMatrix = None # Reset matrix
CreateBlenderObject = False
CreateLightObject = True
contextMatrix = None # Reset matrix
elif CreateLightObject and new_chunk.ID == COLOR_F: # Light color
temp_data = file.read(SZ_3FLOAT)
contextLamp.data.color = struct.unpack('<3f', temp_data)
@ -973,6 +996,21 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextLamp.data.show_cone = True
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_RECTANGLE: # Square
contextLamp.data.use_square = True
elif CreateLightObject and new_chunk.ID == OBJECT_HIERARCHY:
child_id = read_short(new_chunk)
childs_list.insert(child_id, contextObName)
parent_list.insert(child_id, None)
if child_id in parent_list:
idp = parent_list.index(child_id)
parent_list[idp] = contextObName
elif CreateLightObject and new_chunk.ID == OBJECT_PARENT:
parent_id = read_short(new_chunk)
if parent_id > len(childs_list):
parent_list[child_id] = parent_id
parent_list.extend([None]*(parent_id-len(parent_list)))
parent_list.insert(parent_id, contextObName)
elif parent_id < len(childs_list):
parent_list[child_id] = childs_list[parent_id]
# If camera chunk
elif contextObName and new_chunk.ID == OBJECT_CAMERA: # Basic camera support
@ -996,8 +1034,24 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_data = file.read(SZ_FLOAT)
contextCamera.data.lens = float(struct.unpack('<f', temp_data)[0]) # Focus
new_chunk.bytes_read += SZ_FLOAT
contextMatrix = None # Reset matrix
CreateBlenderObject = False
CreateCameraObject = True
contextMatrix = None # Reset matrix
elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY:
child_id = read_short(new_chunk)
childs_list.insert(child_id, contextObName)
parent_list.insert(child_id, None)
if child_id in parent_list:
idp = parent_list.index(child_id)
parent_list[idp] = contextObName
elif CreateCameraObject and new_chunk.ID == OBJECT_PARENT:
parent_id = read_short(new_chunk)
if parent_id > len(childs_list):
parent_list[child_id] = parent_id
parent_list.extend([None]*(parent_id-len(parent_list)))
parent_list.insert(parent_id, contextObName)
elif parent_id < len(childs_list):
parent_list[child_id] = childs_list[parent_id]
# start keyframe section
elif new_chunk.ID == EDITKEYFRAME:
@ -1296,12 +1350,22 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
#pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining?
# if parent name
for par, objs in parent_dictionary.items():
parent = object_dictionary.get(par)
for ob in objs:
if parent is not None:
ob.parent = parent
# If hierarchy
hierarchy = dict(zip(childs_list, parent_list))
hierarchy.pop(None, ...)
for idt, (child, parent) in enumerate(hierarchy.items()):
child_obj = object_dictionary.get(child)
parent_obj = object_dictionary.get(parent)
if child_obj and parent_obj is not None:
child_obj.parent = parent_obj
# fix pivots
for ind, ob in enumerate(object_list):
if ob.type == 'MESH':
@ -1312,14 +1376,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
ob.data.transform(pivot_matrix)
def load_3ds(filepath,
context,
CONSTRAIN=10.0,
IMAGE_SEARCH=True,
WORLD_MATRIX=False,
KEYFRAME=True,
APPLY_MATRIX=True,
CONVERSE=None):
def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None):
print("importing 3DS: %r..." % (filepath), end="")

View File

@ -3,7 +3,7 @@
bl_info = {
"name": "FBX format",
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
"version": (5, 3, 0),
"version": (5, 3, 3),
"blender": (3, 6, 0),
"location": "File > Import-Export",
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",

View File

@ -4,6 +4,7 @@
# Script copyright (C) 2013 Blender Foundation
BOOL = b'C'[0]
INT8 = b'Z'[0]
INT16 = b'Y'[0]
INT32 = b'I'[0]
INT64 = b'L'[0]

View File

@ -56,6 +56,13 @@ class FBXElem:
self.props_type.append(data_types.BOOL)
self.props.append(data)
def add_int8(self, data):
assert(isinstance(data, int))
data = pack('<b', data)
self.props_type.append(data_types.INT8)
self.props.append(data)
def add_int16(self, data):
assert(isinstance(data, int))
data = pack('<h', data)

View File

@ -77,7 +77,7 @@ from .fbx_utils import (
# Animation.
AnimationCurveNodeWrapper,
# Objects.
ObjectWrapper, fbx_name_class,
ObjectWrapper, fbx_name_class, ensure_object_not_in_edit_mode,
# Top level.
FBXExportSettingsMedia, FBXExportSettings, FBXExportData,
)
@ -2899,20 +2899,23 @@ def fbx_data_from_scene(scene, depsgraph, settings):
_objs_indices = {}
for ma, (ma_key, ob_objs) in data_materials.items():
for ob_obj in ob_objs:
connections.append((b"OO", get_fbx_uuid_from_key(ma_key), ob_obj.fbx_uuid, None))
# Get index of this material for this object (or dupliobject).
# Material indices for mesh faces are determined by their order in 'ma to ob' connections.
# Only materials for meshes currently...
# Note in case of dupliobjects a same me/ma idx will be generated several times...
# Should not be an issue in practice, and it's needed in case we export duplis but not the original!
if ob_obj.type not in BLENDER_OBJECT_TYPES_MESHLIKE:
connections.append((b"OO", get_fbx_uuid_from_key(ma_key), ob_obj.fbx_uuid, None))
continue
_mesh_key, me, _free = data_meshes[ob_obj]
material_indices = mesh_material_indices.setdefault(me, {})
if ma in material_indices:
# Material has already been found for this mesh.
# XXX If a mesh has multiple material slots with the same material, they are combined into one slot.
# Even if duplicate materials were exported without combining them into one slot, keeping duplicate
# materials separated does not appear to be common behaviour of external software when importing FBX.
continue
connections.append((b"OO", get_fbx_uuid_from_key(ma_key), ob_obj.fbx_uuid, None))
idx = _objs_indices[ob_obj] = _objs_indices.get(ob_obj, -1) + 1
material_indices[ma] = idx
del _objs_indices
@ -3494,6 +3497,14 @@ def save(operator, context,
ctx_objects = context.view_layer.objects
if use_visible:
ctx_objects = tuple(obj for obj in ctx_objects if obj.visible_get())
# Ensure no Objects are in Edit mode.
# Copy to a tuple for safety, to avoid the risk of modifying ctx_objects while iterating.
for obj in tuple(ctx_objects):
if not ensure_object_not_in_edit_mode(context, obj):
operator.report({'ERROR'}, "%s could not be set out of Edit Mode, so cannot be exported" % obj.name)
return {'CANCELLED'}
kwargs_mod["context_objects"] = ctx_objects
depsgraph = context.evaluated_depsgraph_get()
@ -3525,6 +3536,16 @@ def save(operator, context,
else:
data_seq = tuple((scene, scene.name, 'objects') for scene in bpy.data.scenes if scene.objects)
# Ensure no Objects are in Edit mode.
for data, data_name, data_obj_propname in data_seq:
# Copy to a tuple for safety, to avoid the risk of modifying the data prop while iterating it.
for obj in tuple(getattr(data, data_obj_propname)):
if not ensure_object_not_in_edit_mode(context, obj):
operator.report({'ERROR'},
"%s in %s could not be set out of Edit Mode, so cannot be exported"
% (obj.name, data_name))
return {'CANCELLED'}
# call this function within a loop with BATCH_ENABLE == False
new_fbxpath = fbxpath # own dir option modifies, we need to keep an original

View File

@ -28,6 +28,7 @@ for each property.
The types are as follows:
* 'Z': - INT8
* 'Y': - INT16
* 'C': - BOOL
* 'I': - INT32
@ -106,6 +107,7 @@ def unpack_array(read, array_type, array_stride, array_byteswap):
read_data_dict = {
b'Z'[0]: lambda read: unpack(b'<b', read(1))[0], # 8 bit int
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
b'C'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int
@ -221,6 +223,7 @@ def parse(fn, use_namedtuple=True):
data_types = type(array)("data_types")
data_types.__dict__.update(
dict(
INT8 = b'Z'[0],
INT16 = b'Y'[0],
BOOL = b'C'[0],
INT32 = b'I'[0],

View File

@ -540,6 +540,58 @@ def fast_first_axis_unique(ar, return_unique=True, return_index=False, return_in
return result
def ensure_object_not_in_edit_mode(context, obj):
"""Objects in Edit mode usually cannot be exported because much of the API used when exporting is not available for
Objects in Edit mode.
Exiting the currently active Object (and any other Objects opened in multi-editing) from Edit mode is simple and
should be done with `bpy.ops.mesh.mode_set(mode='OBJECT')` instead of using this function.
This function is for the rare case where an Object is in Edit mode, but the current context mode is not Edit mode.
This can occur from a state where the current context mode is Edit mode, but then the active Object of the current
View Layer is changed to a different Object that is not in Edit mode. This changes the current context mode, but
leaves the other Object(s) in Edit mode.
"""
if obj.mode != 'EDIT':
return True
# Get the active View Layer.
view_layer = context.view_layer
# A View Layer belongs to a scene.
scene = view_layer.id_data
# Get the current active Object of this View Layer, so we can restore it once done.
orig_active = view_layer.objects.active
# Check if obj is in the View Layer. If obj is not in the View Layer, it cannot be set as the active Object.
# We don't use `obj.name in view_layer.objects` because an Object from a Library could have the same name.
is_in_view_layer = any(o == obj for o in view_layer.objects)
do_unlink_from_scene_collection = False
try:
if not is_in_view_layer:
# There might not be any enabled collections in the View Layer, so link obj into the Scene Collection
# instead, which is always available to all View Layers of that Scene.
scene.collection.objects.link(obj)
do_unlink_from_scene_collection = True
view_layer.objects.active = obj
# Now we're finally ready to attempt to change obj's mode.
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
if obj.mode == 'EDIT':
# The Object could not be set out of EDIT mode and therefore cannot be exported.
return False
finally:
# Always restore the original active Object and unlink obj from the Scene Collection if it had to be linked.
view_layer.objects.active = orig_active
if do_unlink_from_scene_collection:
scene.collection.objects.unlink(obj)
return True
# ##### UIDs code. #####
# ID class (mere int).
@ -722,6 +774,10 @@ def elem_data_single_bool(elem, name, value):
return _elem_data_single(elem, name, value, "add_bool")
def elem_data_single_int8(elem, name, value):
return _elem_data_single(elem, name, value, "add_int8")
def elem_data_single_int16(elem, name, value):
return _elem_data_single(elem, name, value, "add_int16")

View File

@ -27,6 +27,7 @@ for each property.
The types are as follows:
* 'Z': - INT8
* 'Y': - INT16
* 'C': - BOOL
* 'I': - INT32
@ -65,6 +66,8 @@ def parse_json_rec(fbx_root, json_node):
for d, dt in zip(data, data_types):
if dt == "C":
e.add_bool(d)
elif dt == "Z":
e.add_int8(d)
elif dt == "Y":
e.add_int16(d)
elif dt == "I":

View File

@ -67,6 +67,7 @@ def unpack_array(read, array_type, array_stride, array_byteswap):
read_data_dict = {
b'Z'[0]: lambda read: unpack(b'<b', read(1))[0], # byte
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
b'C'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int

View File

@ -658,7 +658,7 @@ class BTool_FindBrush(Operator):
bpy.ops.object.select_all(action="TOGGLE")
bpy.ops.object.select_all(action="DESELECT")
bpy.context.view_layer.objects.active = ob
ob.set_select(state=True)
ob.select_set(state=True)
return {"FINISHED"}

View File

@ -142,7 +142,7 @@ def write_mesh(context, report_cb):
filepath=filepath,
apply_modifiers=True,
export_selected_objects=True,
scaling_factor=global_scale,
global_scale=global_scale,
path_mode=path_mode,
export_normals=export_data_layers,
export_uv=export_data_layers,

View File

@ -311,7 +311,6 @@ class PoseAssetUser:
if not (
context.object
and context.object.mode == "POSE" # This condition may not be desired.
and context.asset_library_ref
and context.asset_file_handle
):
return False