new addon simple_deform_helper #104464

Closed
EMM wants to merge 29 commits from Guai_Wo_Ge_EMM/blender-addons:simple_deform_helper into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
12 changed files with 466 additions and 190 deletions
Showing only changes of commit 70677c3b82 - Show all commits

View File

@ -2,4 +2,4 @@ ${CommitTitle}
${CommitBody}
Pull Request #${PullRequestIndex}
Pull Request: https://projects.blender.org/blender/blender-addons/pulls/${PullRequestIndex}

View File

@ -1,3 +1,3 @@
${PullRequestTitle}
Pull Request #${PullRequestIndex}
Pull Request: https://projects.blender.org/blender/blender-addons/pulls/${PullRequestIndex}

View File

@ -11,7 +11,7 @@ bl_info = {
"name": "Archimesh",
"author": "Antonio Vazquez (antonioya)",
"location": "View3D > Add Mesh / Sidebar > Create Tab",
"version": (1, 2, 4),
"version": (1, 2, 5),
"blender": (3, 0, 0),
"description": "Generate rooms, doors, windows, and other architecture objects",
"doc_url": "{BLENDER_MANUAL_URL}/addons/add_mesh/archimesh.html",

View File

@ -108,8 +108,7 @@ class ARCHIMESH_OT_Hole(Operator):
# ---------------------------------------
for child in obj.parent.children:
# noinspection PyBroadException
try:
if child["archimesh.ctrl_hole"]:
if "archimesh.ctrl_hole" in child and child["archimesh.ctrl_hole"]:
# apply scale
t = parentobj.RoomGenerator[0].wall_width
if t > 0:
@ -117,11 +116,8 @@ class ARCHIMESH_OT_Hole(Operator):
else:
child.scale.y = 1
# add boolean modifier
if isboolean(myroom, child) is False:
if not isboolean(myroom, child):
set_modifier_boolean(myroom, child)
except:
# print("Unexpected error:" + str(sys.exc_info()))
pass
# ---------------------------------------
# Now add the modifiers to baseboard

View File

@ -172,9 +172,7 @@ def set_modifier_solidify(myobject, width):
# Add modifier (boolean)
# --------------------------------------------------------------------
def set_modifier_boolean(myobject, bolobject):
bpy.context.view_layer.objects.active = myobject
if bpy.context.view_layer.objects.active.name == myobject.name:
boolean_modifier = context.object.modifiers.new("", 'BOOLEAN')
boolean_modifier = myobject.modifiers.new("", 'BOOLEAN')
boolean_modifier.operation = 'DIFFERENCE'
boolean_modifier.object = bolobject

View File

@ -2,9 +2,9 @@
bl_info = {
"name": "FBX format",
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier",
"version": (4, 37, 5),
"blender": (3, 4, 0),
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
"version": (5, 1, 0),
"blender": (3, 6, 0),
"location": "File > Import-Export",
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
"warning": "",
@ -482,6 +482,12 @@ class ExportFBX(bpy.types.Operator, ExportHelper):
description="Export vertex color attributes",
default='SRGB',
)
prioritize_active_color: BoolProperty(
name="Prioritize Active Color",
description="Make sure active color will be exported first. Could be important "
"since some other software can discard other color attributes besides the first one",
default=False,
)
use_subsurf: BoolProperty(
name="Export Subdivision Surface",
description="Export the last Catmull-Rom subdivision modifier as FBX subdivision "
@ -787,6 +793,7 @@ class FBX_PT_export_geometry(bpy.types.Panel):
#~ sub.enabled = operator.mesh_smooth_type in {'OFF'}
sub.prop(operator, "use_tspace")
layout.prop(operator, "colors_type")
layout.prop(operator, "prioritize_active_color")
class FBX_PT_export_armature(bpy.types.Panel):

View File

@ -11,6 +11,7 @@ import os
import time
from itertools import zip_longest, chain
from functools import cache
if "bpy" in locals():
import importlib
@ -47,9 +48,10 @@ from .fbx_utils import (
# Miscellaneous utils.
PerfMon,
units_blender_to_fbx_factor, units_convertor, units_convertor_iter,
matrix4_to_array, similar_values, similar_values_iter, astype_view_signedness, fast_first_axis_unique,
matrix4_to_array, similar_values, shape_difference_exclude_similar, astype_view_signedness, fast_first_axis_unique,
fast_first_axis_flat,
# Mesh transform helpers.
vcos_transformed_gen, nors_transformed_gen, vcos_transformed, nors_transformed,
vcos_transformed_gen, vcos_transformed, nors_transformed,
# UUID from key.
get_fbx_uuid_from_key,
# Key generators.
@ -760,15 +762,19 @@ def fbx_data_mesh_shapes_elements(root, me_obj, me, scene_data, fbx_me_tmpl, fbx
for shape, (channel_key, geom_key, shape_verts_co, shape_verts_idx) in shapes.items():
# Use vgroups as weights, if defined.
if shape.vertex_group and shape.vertex_group in me_obj.bdata.vertex_groups:
shape_verts_weights = array.array(data_types.ARRAY_FLOAT64, [0.0]) * (len(shape_verts_co) // 3)
shape_verts_weights = np.zeros(len(shape_verts_idx), dtype=np.float64)
# It's slightly faster to iterate and index the underlying memoryview objects
mv_shape_verts_weights = shape_verts_weights.data
mv_shape_verts_idx = shape_verts_idx.data
vg_idx = me_obj.bdata.vertex_groups[shape.vertex_group].index
for sk_idx, v_idx in enumerate(shape_verts_idx):
for sk_idx, v_idx in enumerate(mv_shape_verts_idx):
for vg in vertices[v_idx].groups:
if vg.group == vg_idx:
shape_verts_weights[sk_idx] = vg.weight * 100.0
mv_shape_verts_weights[sk_idx] = vg.weight
break
shape_verts_weights *= 100.0
else:
shape_verts_weights = array.array(data_types.ARRAY_FLOAT64, [100.0]) * (len(shape_verts_co) // 3)
shape_verts_weights = np.full(len(shape_verts_idx), 100.0, dtype=np.float64)
channels.append((channel_key, shape, shape_verts_weights))
geom = elem_data_single_int64(root, b"Geometry", get_fbx_uuid_from_key(geom_key))
@ -784,8 +790,7 @@ def fbx_data_mesh_shapes_elements(root, me_obj, me, scene_data, fbx_me_tmpl, fbx
elem_data_single_int32_array(geom, b"Indexes", shape_verts_idx)
elem_data_single_float64_array(geom, b"Vertices", shape_verts_co)
if write_normals:
elem_data_single_float64_array(geom, b"Normals",
array.array(data_types.ARRAY_FLOAT64, [0.0]) * len(shape_verts_co))
elem_data_single_float64_array(geom, b"Normals", np.zeros(len(shape_verts_idx) * 3, dtype=np.float64))
# Yiha! BindPose for shapekeys too! Dodecasigh...
# XXX Not sure yet whether several bindposes on same mesh are allowed, or not... :/
@ -897,19 +902,60 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
# We do loose edges as two-vertices faces, if enabled...
#
# Note we have to process Edges in the same time, as they are based on poly's loops...
# Total number of loops, including any extra added for loose edges.
loop_nbr = len(me.loops)
t_pvi = array.array(data_types.ARRAY_INT32, (0,)) * loop_nbr
t_ls = [None] * len(me.polygons)
me.loops.foreach_get("vertex_index", t_pvi)
# dtypes matching the C data. Matching the C datatype avoids iteration and casting of every element in foreach_get's
# C code.
bl_vertex_index_dtype = bl_edge_index_dtype = bl_loop_index_dtype = np.uintc
# Start vertex indices of loops. May contain elements for loops added for the export of loose edges.
t_lvi = np.empty(len(me.loops), dtype=bl_vertex_index_dtype)
# Loop start indices of polygons. May contain elements for the polygons added for the export of loose edges.
t_ls = np.empty(len(me.polygons), dtype=bl_loop_index_dtype)
# Vertex indices of edges (unsorted, unlike Mesh.edge_keys), flattened into an array twice the length of the number
# of edges.
t_ev = np.empty(len(me.edges) * 2, dtype=bl_vertex_index_dtype)
# Each edge has two vertex indices, so it's useful to view the array as 2d where each element on the first axis is a
# pair of vertex indices
t_ev_pair_view = t_ev.view()
t_ev_pair_view.shape = (-1, 2)
# Edge indices of loops. May contain elements for loops added for the export of loose edges.
t_lei = np.empty(len(me.loops), dtype=bl_edge_index_dtype)
me.loops.foreach_get("vertex_index", t_lvi)
me.polygons.foreach_get("loop_start", t_ls)
me.edges.foreach_get("vertices", t_ev)
me.loops.foreach_get("edge_index", t_lei)
# Add "fake" faces for loose edges.
# Add "fake" faces for loose edges. Each "fake" face consists of two loops creating a new 2-sided polygon.
if scene_data.settings.use_mesh_edges:
t_le = tuple(e.vertices for e in me.edges if e.is_loose)
t_pvi.extend(chain(*t_le))
t_ls.extend(range(loop_nbr, loop_nbr + len(t_le) * 2, 2))
bl_edge_is_loose_dtype = bool
# Get the mask of edges that are loose
loose_mask = np.empty(len(me.edges), dtype=bl_edge_is_loose_dtype)
me.edges.foreach_get('is_loose', loose_mask)
indices_of_loose_edges = np.flatnonzero(loose_mask)
# Since we add two loops per loose edge, repeat the indices so that there's one for each new loop
new_loop_edge_indices = np.repeat(indices_of_loose_edges, 2)
# Get the loose edge vertex index pairs
t_le = t_ev_pair_view[loose_mask]
# append will automatically flatten the pairs in t_le
t_lvi = np.append(t_lvi, t_le)
t_lei = np.append(t_lei, new_loop_edge_indices)
# Two loops are added per loose edge
loop_nbr += 2 * len(t_le)
t_ls = np.append(t_ls, np.arange(len(me.loops), loop_nbr, 2, dtype=t_ls.dtype))
del t_le
del loose_mask
del indices_of_loose_edges
del new_loop_edge_indices
# Edges...
# Note: Edges are represented as a loop here: each edge uses a single index, which refers to the polygon array.
@ -919,83 +965,136 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
# for loose edges).
# We also have to store a mapping from real edges to their indices in this array, for edge-mapped data
# (like e.g. crease).
t_eli = array.array(data_types.ARRAY_INT32)
edges_map = {}
edges_nbr = 0
if t_ls and t_pvi:
# t_ls is loop start indices of polygons, but we want to use it to indicate the end loop of each polygon.
# The loop end index of a polygon is the loop start index of the next polygon minus one, so the first element of
# t_ls will be ignored, and we need to add an extra element at the end to signify the end of the last polygon.
# If we were to add another polygon to the mesh, its loop start index would be the next loop index.
t_ls = set(t_ls[1:])
t_ls.add(loop_nbr)
todo_edges = [None] * len(me.edges) * 2
# Sigh, cannot access edge.key through foreach_get... :/
me.edges.foreach_get("vertices", todo_edges)
todo_edges = set((v1, v2) if v1 < v2 else (v2, v1) for v1, v2 in zip(*(iter(todo_edges),) * 2))
eli_fbx_dtype = np.int32
li = 0
vi = vi_start = t_pvi[0]
for li_next, vi_next in enumerate(t_pvi[1:] + t_pvi[:1], start=1):
if li_next in t_ls: # End of a poly's loop.
vi2 = vi_start
vi_start = vi_next
else:
vi2 = vi_next
# Edge index of each unique edge-key, used to map per-edge data to unique edge-keys (t_pvi).
t_pvi_edge_indices = np.empty(0, dtype=t_lei.dtype)
e_key = (vi, vi2) if vi < vi2 else (vi2, vi)
if e_key in todo_edges:
t_eli.append(li)
todo_edges.remove(e_key)
edges_map[e_key] = edges_nbr
edges_nbr += 1
pvi_fbx_dtype = np.int32
if t_ls.size and t_lvi.size:
# Get unsorted edge keys by indexing the edge->vertex-indices array by the loop->edge-index array.
t_pvi_edge_keys = t_ev_pair_view[t_lei]
vi = vi_next
li = li_next
# End of edges!
# Sort each [edge_start_n, edge_end_n] pair to get edge keys. Heapsort seems to be the fastest for this specific
# use case.
t_pvi_edge_keys.sort(axis=1, kind='heapsort')
# Note that finding unique edge keys means that if there are multiple edges that share the same vertices (which
# shouldn't normally happen), only the first edge found in loops will be exported along with its per-edge data.
# To export separate edges that share the same vertices, fast_first_axis_unique can be replaced with np.unique
# with t_lei as the first argument, finding unique edges rather than unique edge keys.
#
# Since we want the unique values in their original order, the only part we care about is the indices of the
# first occurrence of the unique elements in t_pvi_edge_keys, so we can use our fast uniqueness helper function.
t_eli = fast_first_axis_unique(t_pvi_edge_keys, return_unique=False, return_index=True)
# To get the indices of the elements in t_pvi_edge_keys that produce unique values, but in the original order of
# t_pvi_edge_keys, t_eli must be sorted.
# Due to loops and their edge keys tending to have a partial ordering within meshes, sorting with kind='stable'
# with radix sort tends to be faster than the default of kind='quicksort' with introsort.
t_eli.sort(kind='stable')
# Edge index of each element in unique t_pvi_edge_keys, used to map per-edge data such as sharp and creases.
t_pvi_edge_indices = t_lei[t_eli]
# We have to ^-1 last index of each loop.
for ls in t_ls:
t_pvi[ls - 1] ^= -1
# Ensure t_pvi is the correct number of bits before inverting.
t_pvi = astype_view_signedness(t_lvi, pvi_fbx_dtype)
# The index of the end of each loop is one before the index of the start of the next loop.
t_pvi[t_ls[1:] - 1] ^= -1
# The index of the end of the last loop will be the very last index.
t_pvi[-1] ^= -1
del t_pvi_edge_keys
else:
# Should be empty, but make sure it's the correct type.
t_pvi = np.empty(0, dtype=pvi_fbx_dtype)
t_eli = np.empty(0, dtype=eli_fbx_dtype)
# And finally we can write data!
t_pvi = astype_view_signedness(t_pvi, pvi_fbx_dtype)
t_eli = astype_view_signedness(t_eli, eli_fbx_dtype)
elem_data_single_int32_array(geom, b"PolygonVertexIndex", t_pvi)
elem_data_single_int32_array(geom, b"Edges", t_eli)
del t_lvi
del t_pvi
del t_ls
del t_eli
del t_ev
del t_ev_pair_view
# And now, layers!
# Smoothing.
if smooth_type in {'FACE', 'EDGE'}:
t_ps = None
ps_fbx_dtype = np.int32
poly_use_smooth_dtype = bool
edge_use_sharp_dtype = bool
_map = b""
if smooth_type == 'FACE':
t_ps = array.array(data_types.ARRAY_INT32, (0,)) * len(me.polygons)
t_ps = np.empty(len(me.polygons), dtype=poly_use_smooth_dtype)
me.polygons.foreach_get("use_smooth", t_ps)
_map = b"ByPolygon"
else: # EDGE
_map = b"ByEdge"
if t_pvi_edge_indices.size:
# Write Edge Smoothing.
# Note edge is sharp also if it's used by more than two faces, or one of its faces is flat.
t_ps = array.array(data_types.ARRAY_INT32, (0,)) * edges_nbr
sharp_edges = set()
temp_sharp_edges = {}
for p in me.polygons:
if not p.use_smooth:
sharp_edges.update(p.edge_keys)
continue
for k in p.edge_keys:
if temp_sharp_edges.setdefault(k, 0) > 1:
sharp_edges.add(k)
mesh_poly_nbr = len(me.polygons)
mesh_edge_nbr = len(me.edges)
mesh_loop_nbr = len(me.loops)
# t_ls and t_lei may contain extra polygons or loops added for loose edges that are not present in the
# mesh data, so create views that exclude the extra data added for loose edges.
mesh_t_ls_view = t_ls[:mesh_poly_nbr]
mesh_t_lei_view = t_lei[:mesh_loop_nbr]
# - Get sharp edges from flat shaded faces
# Get the 'use_smooth' attribute of all polygons.
p_use_smooth_mask = np.empty(mesh_poly_nbr, dtype=poly_use_smooth_dtype)
me.polygons.foreach_get('use_smooth', p_use_smooth_mask)
# Invert to get all flat shaded polygons.
p_flat_mask = np.invert(p_use_smooth_mask, out=p_use_smooth_mask)
# Convert flat shaded polygons to flat shaded loops by repeating each element by the number of sides of
# that polygon.
# Polygon sides can be calculated from the element-wise difference of loop starts appended by the number
# of loops. Alternatively, polygon sides can be retrieved directly from the 'loop_total' attribute of
# polygons, but since we already have t_ls, it tends to be quicker to calculate from t_ls when above
# around 10_000 polygons.
polygon_sides = np.diff(mesh_t_ls_view, append=mesh_loop_nbr)
p_flat_loop_mask = np.repeat(p_flat_mask, polygon_sides)
# Convert flat shaded loops to flat shaded (sharp) edge indices.
# Note that if an edge is in multiple loops that are part of flat shaded faces, its edge index will end
# up in sharp_edge_indices_from_polygons multiple times.
sharp_edge_indices_from_polygons = mesh_t_lei_view[p_flat_loop_mask]
# - Get sharp edges from edges marked as sharp
e_use_sharp_mask = np.empty(mesh_edge_nbr, dtype=edge_use_sharp_dtype)
me.edges.foreach_get('use_edge_sharp', e_use_sharp_mask)
# - Get sharp edges from edges used by more than two loops (and therefore more than two faces)
e_more_than_two_faces_mask = np.bincount(mesh_t_lei_view, minlength=mesh_edge_nbr) > 2
# - Combine with edges that are sharp because they're in more than two faces
e_use_sharp_mask = np.logical_or(e_use_sharp_mask, e_more_than_two_faces_mask, out=e_use_sharp_mask)
# - Combine with edges that are sharp because a polygon they're in has flat shading
e_use_sharp_mask[sharp_edge_indices_from_polygons] = True
# - Convert sharp edges to sharp edge keys (t_pvi)
ek_use_sharp_mask = e_use_sharp_mask[t_pvi_edge_indices]
# - Sharp edges are indicated in FBX as zero (False), so invert
t_ps = np.invert(ek_use_sharp_mask, out=ek_use_sharp_mask)
del ek_use_sharp_mask
del e_use_sharp_mask
del sharp_edge_indices_from_polygons
del p_flat_loop_mask
del polygon_sides
del p_flat_mask
del p_use_smooth_mask
del mesh_t_lei_view
del mesh_t_ls_view
else:
temp_sharp_edges[k] += 1
del temp_sharp_edges
for e in me.edges:
if e.key not in edges_map:
continue # Only loose edges, in theory!
t_ps[edges_map[e.key]] = not (e.use_edge_sharp or (e.key in sharp_edges))
_map = b"ByEdge"
t_ps = np.empty(0, dtype=ps_fbx_dtype)
t_ps = t_ps.astype(ps_fbx_dtype, copy=False)
lay_smooth = elem_data_single_int32(geom, b"LayerElementSmoothing", 0)
elem_data_single_int32(lay_smooth, b"Version", FBX_GEOMETRY_SMOOTHING_VERSION)
elem_data_single_string(lay_smooth, b"Name", b"")
@ -1003,16 +1102,29 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
elem_data_single_string(lay_smooth, b"ReferenceInformationType", b"Direct")
elem_data_single_int32_array(lay_smooth, b"Smoothing", t_ps) # Sight, int32 for bool...
del t_ps
del t_ls
del t_lei
# Edge crease for subdivision
if write_crease:
t_ec = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * edges_nbr
for e in me.edges:
if e.key not in edges_map:
continue # Only loose edges, in theory!
ec_fbx_dtype = np.float64
if t_pvi_edge_indices.size:
ec_bl_dtype = np.single
t_ec_raw = np.empty(len(me.edges), dtype=ec_bl_dtype)
me.edges.foreach_get('crease', t_ec_raw)
# Convert to t_pvi edge-keys.
t_ec_ek_raw = t_ec_raw[t_pvi_edge_indices]
# Blender squares those values before sending them to OpenSubdiv, when other software don't,
# so we need to compensate that to get similar results through FBX...
t_ec[edges_map[e.key]] = e.crease * e.crease
# Use the precision of the fbx dtype for the calculation since it's usually higher precision.
t_ec_ek_raw = t_ec_ek_raw.astype(ec_fbx_dtype, copy=False)
t_ec = np.square(t_ec_ek_raw, out=t_ec_ek_raw)
del t_ec_ek_raw
del t_ec_raw
else:
t_ec = np.empty(0, dtype=ec_fbx_dtype)
lay_crease = elem_data_single_int32(geom, b"LayerElementEdgeCrease", 0)
elem_data_single_int32(lay_crease, b"Version", FBX_GEOMETRY_CREASE_VERSION)
@ -1023,7 +1135,7 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
del t_ec
# And we are done with edges!
del edges_map
del t_pvi_edge_indices
# Loop normals.
tspacenumber = 0
@ -1033,28 +1145,35 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
# but this does not seem well supported by apps currently...
me.calc_normals_split()
t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 3
ln_bl_dtype = np.single
ln_fbx_dtype = np.float64
t_ln = np.empty(len(me.loops) * 3, dtype=ln_bl_dtype)
me.loops.foreach_get("normal", t_ln)
t_ln = nors_transformed_gen(t_ln, geom_mat_no)
t_ln = nors_transformed(t_ln, geom_mat_no, ln_fbx_dtype)
if 0:
t_ln = tuple(t_ln) # No choice... :/
lnidx_fbx_dtype = np.int32
lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
elem_data_single_int32(lay_nor, b"Version", FBX_GEOMETRY_NORMAL_VERSION)
elem_data_single_string(lay_nor, b"Name", b"")
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"IndexToDirect")
ln2idx = tuple(set(t_ln))
elem_data_single_float64_array(lay_nor, b"Normals", chain(*ln2idx))
# Tuple of unique sorted normals and then the index in the unique sorted normals of each normal in t_ln.
# Since we don't care about how the normals are sorted, only that they're unique, we can use the fast unique
# helper function.
t_ln, t_lnidx = fast_first_axis_unique(t_ln.reshape(-1, 3), return_inverse=True)
# Convert to the type for fbx
t_lnidx = astype_view_signedness(t_lnidx, lnidx_fbx_dtype)
elem_data_single_float64_array(lay_nor, b"Normals", t_ln)
# Normal weights, no idea what it is.
# t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(ln2idx)
# t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(t_ln)
# elem_data_single_float64_array(lay_nor, b"NormalsW", t_lnw)
ln2idx = {nor: idx for idx, nor in enumerate(ln2idx)}
elem_data_single_int32_array(lay_nor, b"NormalsIndex", (ln2idx[n] for n in t_ln))
elem_data_single_int32_array(lay_nor, b"NormalsIndex", t_lnidx)
del ln2idx
del t_lnidx
# del t_lnw
else:
lay_nor = elem_data_single_int32(geom, b"LayerElementNormal", 0)
@ -1062,7 +1181,7 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
elem_data_single_string(lay_nor, b"Name", b"")
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Normals", chain(*t_ln))
elem_data_single_float64_array(lay_nor, b"Normals", t_ln)
# Normal weights, no idea what it is.
# t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops)
# elem_data_single_float64_array(lay_nor, b"NormalsW", t_ln)
@ -1073,9 +1192,10 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
tspacenumber = len(me.uv_layers)
if tspacenumber:
# We can only compute tspace on tessellated meshes, need to check that here...
t_lt = [None] * len(me.polygons)
lt_bl_dtype = np.uintc
t_lt = np.empty(len(me.polygons), dtype=lt_bl_dtype)
me.polygons.foreach_get("loop_total", t_lt)
if any((lt > 4 for lt in t_lt)):
if (t_lt > 4).any():
del t_lt
scene_data.settings.report(
{'WARNING'},
@ -1084,7 +1204,7 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
else:
del t_lt
num_loops = len(me.loops)
t_ln = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * num_loops * 3
t_ln = np.empty(num_loops * 3, dtype=ln_bl_dtype)
# t_lnw = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops)
uv_names = [uvlayer.name for uvlayer in me.uv_layers]
# Annoying, `me.calc_tangent` errors in case there is no geometry...
@ -1102,7 +1222,7 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Binormals",
chain(*nors_transformed_gen(t_ln, geom_mat_no)))
nors_transformed(t_ln, geom_mat_no, ln_fbx_dtype))
# Binormal weights, no idea what it is.
# elem_data_single_float64_array(lay_nor, b"BinormalsW", t_lnw)
@ -1115,7 +1235,7 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
elem_data_single_string(lay_nor, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_nor, b"ReferenceInformationType", b"Direct")
elem_data_single_float64_array(lay_nor, b"Tangents",
chain(*nors_transformed_gen(t_ln, geom_mat_no)))
nors_transformed(t_ln, geom_mat_no, ln_fbx_dtype))
# Tangent weights, no idea what it is.
# elem_data_single_float64_array(lay_nor, b"TangentsW", t_lnw)
@ -1129,70 +1249,164 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
colors_type = scene_data.settings.colors_type
vcolnumber = 0 if colors_type == 'NONE' else len(me.color_attributes)
if vcolnumber:
def _coltuples_gen(raw_cols):
return zip(*(iter(raw_cols),) * 4)
color_prop_name = "color_srgb" if colors_type == 'SRGB' else "color"
# ByteColorAttribute color also gets returned by the API as single precision float
bl_lc_dtype = np.single
bl_lvi_dtype = np.uintc
fbx_lc_dtype = np.float64
fbx_lcidx_dtype = np.int32
t_lvi = None
for colindex, collayer in enumerate(me.color_attributes):
color_attributes = me.color_attributes
if scene_data.settings.prioritize_active_color:
active_color = me.color_attributes.active_color
color_attributes = sorted(color_attributes, key=lambda x: x == active_color, reverse=True)
for colindex, collayer in enumerate(color_attributes):
is_point = collayer.domain == "POINT"
vcollen = len(me.vertices if is_point else me.loops)
t_lc = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * vcollen * 4
# Each rgba component is flattened in the array
t_lc = np.empty(vcollen * 4, dtype=bl_lc_dtype)
collayer.data.foreach_get(color_prop_name, t_lc)
lay_vcol = elem_data_single_int32(geom, b"LayerElementColor", colindex)
elem_data_single_int32(lay_vcol, b"Version", FBX_GEOMETRY_VCOLOR_VERSION)
elem_data_single_string_unicode(lay_vcol, b"Name", collayer.name)
elem_data_single_string(lay_vcol, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_vcol, b"ReferenceInformationType", b"IndexToDirect")
col2idx = tuple(set(_coltuples_gen(t_lc)))
elem_data_single_float64_array(lay_vcol, b"Colors", chain(*col2idx)) # Flatten again...
# Use the fast uniqueness helper function since we don't care about sorting.
t_lc, col_indices = fast_first_axis_unique(t_lc.reshape(-1, 4), return_inverse=True)
col2idx = {col: idx for idx, col in enumerate(col2idx)}
col_indices = list(col2idx[c] for c in _coltuples_gen(t_lc))
if is_point:
# for "point" domain colors, we could directly emit them
# with a "ByVertex" mapping type, but some software does not
# properly understand that. So expand to full "ByPolygonVertex"
# index map.
col_indices = list((col_indices[c.vertex_index] for c in me.loops))
if t_lvi is None:
t_lvi = np.empty(len(me.loops), dtype=bl_lvi_dtype)
me.loops.foreach_get("vertex_index", t_lvi)
col_indices = col_indices[t_lvi]
t_lc = t_lc.astype(fbx_lc_dtype, copy=False)
col_indices = astype_view_signedness(col_indices, fbx_lcidx_dtype)
elem_data_single_float64_array(lay_vcol, b"Colors", t_lc)
elem_data_single_int32_array(lay_vcol, b"ColorIndex", col_indices)
del col2idx
del t_lc
del _coltuples_gen
del col_indices
del t_lvi
# Write UV layers.
# Note: LayerElementTexture is deprecated since FBX 2011 - luckily!
# Textures are now only related to materials, in FBX!
uvnumber = len(me.uv_layers)
if uvnumber:
luv_bl_dtype = np.single
luv_fbx_dtype = np.float64
lv_idx_bl_dtype = np.uintc
lv_idx_fbx_dtype = np.int32
t_luv = np.empty(len(me.loops) * 2, dtype=luv_bl_dtype)
# Fast view for sort-based uniqueness of pairs.
t_luv_fast_pair_view = fast_first_axis_flat(t_luv.reshape(-1, 2))
# It must be a view of t_luv otherwise it won't update when t_luv is updated.
assert(t_luv_fast_pair_view.base is t_luv)
# Looks like this mapping is also expected to convey UV islands (arg..... :((((( ).
# So we need to generate unique triplets (uv, vertex_idx) here, not only just based on UV values.
def _uvtuples_gen(raw_uvs, raw_lvidxs):
return zip(zip(*(iter(raw_uvs),) * 2), raw_lvidxs)
t_luv = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 2
t_lvidx = array.array(data_types.ARRAY_INT32, (0,)) * len(me.loops)
t_lvidx = np.empty(len(me.loops), dtype=lv_idx_bl_dtype)
me.loops.foreach_get("vertex_index", t_lvidx)
# If we were to create a combined array of (uv, vertex_idx) elements, we could find unique triplets by sorting
# that array by first sorting by the vertex_idx column and then sorting by the uv column using a stable sorting
# algorithm.
# This is exactly what we'll do, but without creating the combined array, because only the uv elements are
# included in the export and the vertex_idx column is the same for every uv layer.
# Because the vertex_idx column is the same for every uv layer, the vertex_idx column can be sorted in advance.
# argsort gets the indices that sort the array, which are needed to be able to sort the array of uv pairs in the
# same way to create the indices that recreate the full uvs from the unique uvs.
# Loops and vertices tend to naturally have a partial ordering, which makes sorting with kind='stable' (radix
# sort) faster than the default of kind='quicksort' (introsort) in most cases.
perm_vidx = t_lvidx.argsort(kind='stable')
# Mask and uv indices arrays will be modified and re-used by each uv layer.
unique_mask = np.empty(len(me.loops), dtype=np.bool_)
unique_mask[:1] = True
uv_indices = np.empty(len(me.loops), dtype=lv_idx_fbx_dtype)
for uvindex, uvlayer in enumerate(me.uv_layers):
uvlayer.data.foreach_get("uv", t_luv)
lay_uv = elem_data_single_int32(geom, b"LayerElementUV", uvindex)
elem_data_single_int32(lay_uv, b"Version", FBX_GEOMETRY_UV_VERSION)
elem_data_single_string_unicode(lay_uv, b"Name", uvlayer.name)
elem_data_single_string(lay_uv, b"MappingInformationType", b"ByPolygonVertex")
elem_data_single_string(lay_uv, b"ReferenceInformationType", b"IndexToDirect")
uv_ids = tuple(set(_uvtuples_gen(t_luv, t_lvidx)))
elem_data_single_float64_array(lay_uv, b"UV", chain(*(uv for uv, vidx in uv_ids))) # Flatten again...
uvlayer.data.foreach_get("uv", t_luv)
uv2idx = {uv_id: idx for idx, uv_id in enumerate(uv_ids)}
elem_data_single_int32_array(lay_uv, b"UVIndex", (uv2idx[uv_id] for uv_id in _uvtuples_gen(t_luv, t_lvidx)))
del uv2idx
del uv_ids
del t_luv
# t_luv_fast_pair_view is a view in a dtype that compares elements by individual bytes, but float types have
# separate byte representations of positive and negative zero. For uniqueness, these should be considered
# the same, so replace all -0.0 with 0.0 in advance.
t_luv[t_luv == -0.0] = 0.0
# These steps to create unique_uv_pairs are the same as how np.unique would find unique values by sorting a
# structured array where each element is a triplet of (uv, vertex_idx), except uv and vertex_idx are
# separate arrays here and vertex_idx has already been sorted in advance.
# Sort according to the vertex_idx column, using the precalculated indices that sort it.
sorted_t_luv_fast = t_luv_fast_pair_view[perm_vidx]
# Get the indices that would sort the sorted uv pairs. Stable sorting must be used to maintain the sorting
# of the vertex indices.
perm_uv_pairs = sorted_t_luv_fast.argsort(kind='stable')
# Use the indices to sort both the uv pairs and the vertex_idx columns.
perm_combined = perm_vidx[perm_uv_pairs]
sorted_vidx = t_lvidx[perm_combined]
sorted_t_luv_fast = sorted_t_luv_fast[perm_uv_pairs]
# Create a mask where either the uv pair doesn't equal the previous value in the array, or the vertex index
# doesn't equal the previous value, these will be the unique uv-vidx triplets.
# For an imaginary triplet array:
# ...
# [(0.4, 0.2), 0]
# [(0.4, 0.2), 1] -> Unique because vertex index different from previous
# [(0.4, 0.2), 2] -> Unique because vertex index different from previous
# [(0.7, 0.6), 2] -> Unique because uv different from previous
# [(0.7, 0.6), 2]
# ...
# Output the result into unique_mask.
np.logical_or(sorted_t_luv_fast[1:] != sorted_t_luv_fast[:-1], sorted_vidx[1:] != sorted_vidx[:-1],
out=unique_mask[1:])
# Get each uv pair marked as unique by the unique_mask and then view as the original dtype.
unique_uvs = sorted_t_luv_fast[unique_mask].view(luv_bl_dtype)
# NaN values are considered invalid and indicate a bug somewhere else in Blender or in an addon, we want
# these bugs to be reported instead of hiding them by allowing the export to continue.
if np.isnan(unique_uvs).any():
raise RuntimeError("UV layer %s on %r has invalid UVs containing NaN values" % (uvlayer.name, me))
# Convert to the type needed for fbx
unique_uvs = unique_uvs.astype(luv_fbx_dtype, copy=False)
# Set the indices of pairs in unique_uvs that reconstruct the pairs in t_luv into uv_indices.
# uv_indices will then be the same as an inverse array returned by np.unique with return_inverse=True.
uv_indices[perm_combined] = np.cumsum(unique_mask, dtype=uv_indices.dtype) - 1
elem_data_single_float64_array(lay_uv, b"UV", unique_uvs)
elem_data_single_int32_array(lay_uv, b"UVIndex", uv_indices)
del unique_uvs
del sorted_t_luv_fast
del sorted_vidx
del perm_uv_pairs
del perm_combined
del uv_indices
del unique_mask
del perm_vidx
del t_lvidx
del _uvtuples_gen
del t_luv
del t_luv_fast_pair_view
# Face's materials.
me_fbxmaterials_idx = scene_data.mesh_material_indices.get(me)
@ -1205,16 +1419,27 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
elem_data_single_string(lay_ma, b"Name", b"")
nbr_mats = len(me_fbxmaterials_idx)
if nbr_mats > 1:
t_pm = array.array(data_types.ARRAY_INT32, (0,)) * len(me.polygons)
bl_pm_dtype = np.uintc
fbx_pm_dtype = np.int32
t_pm = np.empty(len(me.polygons), dtype=bl_pm_dtype)
me.polygons.foreach_get("material_index", t_pm)
# We have to validate mat indices, and map them to FBX indices.
# Note a mat might not be in me_fbxmats_idx (e.g. node mats are ignored).
def_ma = next(me_fbxmaterials_idx[m] for m in me_blmaterials if m in me_fbxmaterials_idx)
blmaterials_to_fbxmaterials_idxs = [me_fbxmaterials_idx.get(m, def_ma) for m in me_blmaterials]
ma_idx_limit = len(blmaterials_to_fbxmaterials_idxs)
_gen = (blmaterials_to_fbxmaterials_idxs[m] if m < ma_idx_limit else def_ma for m in t_pm)
t_pm = array.array(data_types.ARRAY_INT32, _gen)
# Note a mat might not be in me_fbxmaterials_idx (e.g. node mats are ignored).
# The first valid material will be used for materials out of bounds of me_blmaterials or materials not
# in me_fbxmaterials_idx.
def_me_blmaterial_idx, def_ma = next(
(i, me_fbxmaterials_idx[m]) for i, m in enumerate(me_blmaterials) if m in me_fbxmaterials_idx)
# Set material indices that are out of bounds to the default material index
mat_idx_limit = len(me_blmaterials)
t_pm[t_pm >= mat_idx_limit] = def_me_blmaterial_idx
# Map to FBX indices. Materials not in me_fbxmaterials_idx will be set to the default material index.
blmat_fbx_idx = np.fromiter((me_fbxmaterials_idx.get(m, def_ma) for m in me_blmaterials),
dtype=fbx_pm_dtype)
t_pm = blmat_fbx_idx[t_pm]
elem_data_single_string(lay_ma, b"MappingInformationType", b"ByPolygon")
# XXX Logically, should be "Direct" reference type, since we do not have any index array, and have one
@ -2375,6 +2600,18 @@ def fbx_data_from_scene(scene, depsgraph, settings):
# ShapeKeys.
data_deformers_shape = {}
geom_mat_co = settings.global_matrix if settings.bake_space_transform else None
co_bl_dtype = np.single
co_fbx_dtype = np.float64
idx_fbx_dtype = np.int32
def empty_verts_fallbacks():
"""Create fallback arrays for when there are no verts"""
# FBX does not like empty shapes (makes Unity crash e.g.).
# To prevent this, we add a vertex that does nothing, but it keeps the shape key intact
single_vert_co = np.zeros((1, 3), dtype=co_fbx_dtype)
single_vert_idx = np.zeros(1, dtype=idx_fbx_dtype)
return single_vert_co, single_vert_idx
for me_key, me, _free in data_meshes.values():
if not (me.shape_keys and len(me.shape_keys.key_blocks) > 1): # We do not want basis-only relative skeys...
continue
@ -2382,41 +2619,44 @@ def fbx_data_from_scene(scene, depsgraph, settings):
continue
shapes_key = get_blender_mesh_shape_key(me)
# We gather all vcos first, since some skeys may be based on others...
_cos = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.vertices) * 3
me.vertices.foreach_get("co", _cos)
v_cos = tuple(vcos_transformed_gen(_cos, geom_mat_co))
sk_cos = {}
for shape in me.shape_keys.key_blocks[1:]:
shape.data.foreach_get("co", _cos)
sk_cos[shape] = tuple(vcos_transformed_gen(_cos, geom_mat_co))
sk_base = me.shape_keys.key_blocks[0]
# Get and cache only the cos that we need
@cache
def sk_cos(shape_key):
_cos = np.empty(len(me.vertices) * 3, dtype=co_bl_dtype)
if shape_key == sk_base:
me.vertices.foreach_get("co", _cos)
else:
shape_key.data.foreach_get("co", _cos)
return vcos_transformed(_cos, geom_mat_co, co_fbx_dtype)
for shape in me.shape_keys.key_blocks[1:]:
# Only write vertices really different from org coordinates!
shape_verts_co = []
shape_verts_idx = []
# Only write vertices really different from base coordinates!
relative_key = shape.relative_key
if shape == relative_key:
# Shape is its own relative key, so it does nothing
shape_verts_co, shape_verts_idx = empty_verts_fallbacks()
else:
sv_cos = sk_cos(shape)
ref_cos = sk_cos(shape.relative_key)
sv_cos = sk_cos[shape]
ref_cos = v_cos if shape.relative_key == sk_base else sk_cos[shape.relative_key]
for idx, (sv_co, ref_co) in enumerate(zip(sv_cos, ref_cos)):
if similar_values_iter(sv_co, ref_co):
# Note: Maybe this is a bit too simplistic, should we use real shape base here? Though FBX does not
# have this at all... Anyway, this should cover most common cases imho.
continue
shape_verts_co.extend(Vector(sv_co) - Vector(ref_co))
shape_verts_idx.append(idx)
# Exclude cos similar to ref_cos and get the indices of the cos that remain
shape_verts_co, shape_verts_idx = shape_difference_exclude_similar(sv_cos, ref_cos)
# FBX does not like empty shapes (makes Unity crash e.g.).
# To prevent this, we add a vertex that does nothing, but it keeps the shape key intact
if not shape_verts_co:
shape_verts_co.extend((0, 0, 0))
shape_verts_idx.append(0)
if not shape_verts_co.size:
shape_verts_co, shape_verts_idx = empty_verts_fallbacks()
else:
# Ensure the indices are of the correct type
shape_verts_idx = astype_view_signedness(shape_verts_idx, idx_fbx_dtype)
channel_key, geom_key = get_blender_mesh_shape_channel_key(me, shape)
data = (channel_key, geom_key, shape_verts_co, shape_verts_idx)
data_deformers_shape.setdefault(me, (me_key, shapes_key, {}))[2][shape] = data
del sk_cos
perfmon.step("FBX export prepare: Wrapping Armatures...")
# Armatures!
@ -3055,6 +3295,7 @@ def save_single(operator, scene, depsgraph, filepath="",
bake_space_transform=False,
armature_nodetype='NULL',
colors_type='SRGB',
prioritize_active_color=False,
**kwargs
):
@ -3122,7 +3363,7 @@ def save_single(operator, scene, depsgraph, filepath="",
add_leaf_bones, bone_correction_matrix, bone_correction_matrix_inv,
bake_anim, bake_anim_use_all_bones, bake_anim_use_nla_strips, bake_anim_use_all_actions,
bake_anim_step, bake_anim_simplify_factor, bake_anim_force_startend_keying,
False, media_settings, use_custom_props, colors_type,
False, media_settings, use_custom_props, colors_type, prioritize_active_color
)
import bpy_extras.io_utils

View File

@ -260,18 +260,46 @@ def similar_values_iter(v1, v2, e=1e-6):
return False
return True
def shape_difference_exclude_similar(sv_cos, ref_cos, e=1e-6):
"""Return a tuple of:
the difference between the vertex cos in sv_cos and ref_cos, excluding any that are nearly the same,
and the indices of the vertices that are not nearly the same"""
assert(sv_cos.size == ref_cos.size)
# Create views of 1 co per row of the arrays, only making copies if needed.
sv_cos = sv_cos.reshape(-1, 3)
ref_cos = ref_cos.reshape(-1, 3)
# Quick check for equality
if np.array_equal(sv_cos, ref_cos):
# There's no difference between the two arrays.
empty_cos = np.empty((0, 3), dtype=sv_cos.dtype)
empty_indices = np.empty(0, dtype=np.int32)
return empty_cos, empty_indices
# Note that unlike math.isclose(a,b), np.isclose(a,b) is not symmetrical and the second argument 'b', is
# considered to be the reference value.
# Note that atol=0 will mean that if only one co component being compared is zero, they won't be considered close.
similar_mask = np.isclose(sv_cos, ref_cos, atol=0, rtol=e)
# A co is only similar if every component in it is similar.
co_similar_mask = np.all(similar_mask, axis=1)
# Get the indices of cos that are not similar.
not_similar_verts_idx = np.flatnonzero(~co_similar_mask)
# Subtracting first over the entire arrays and then indexing seems faster than indexing both arrays first and then
# subtracting, until less than about 3% of the cos are being indexed.
difference_cos = (sv_cos - ref_cos)[not_similar_verts_idx]
return difference_cos, not_similar_verts_idx
def vcos_transformed_gen(raw_cos, m=None):
# Note: we could most likely get much better performances with numpy, but will leave this as TODO for now.
gen = zip(*(iter(raw_cos),) * 3)
return gen if m is None else (m @ Vector(v) for v in gen)
def nors_transformed_gen(raw_nors, m=None):
# Great, now normals are also expected 4D!
# XXX Back to 3D normals for now!
# gen = zip(*(iter(raw_nors),) * 3 + (_infinite_gen(1.0),))
gen = zip(*(iter(raw_nors),) * 3)
return gen if m is None else (m @ Vector(v) for v in gen)
def _mat4_vec3_array_multiply(mat4, vec3_array, dtype=None, return_4d=False):
"""Multiply a 4d matrix by each 3d vector in an array and return as an array of either 3d or 4d vectors.
@ -1461,7 +1489,7 @@ FBXExportSettings = namedtuple("FBXExportSettings", (
"bone_correction_matrix", "bone_correction_matrix_inv",
"bake_anim", "bake_anim_use_all_bones", "bake_anim_use_nla_strips", "bake_anim_use_all_actions",
"bake_anim_step", "bake_anim_simplify_factor", "bake_anim_force_startend_keying",
"use_metadata", "media_settings", "use_custom_props", "colors_type",
"use_metadata", "media_settings", "use_custom_props", "colors_type", "prioritize_active_color"
))
# Helper container gathering some data we need multiple times:

View File

@ -4,7 +4,7 @@
bl_info = {
'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (3, 6, 4),
"version": (3, 6, 5),
'blender': (3, 5, 0),
'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0',

View File

@ -137,5 +137,5 @@ def __convert_keyframes(obj_uuid: str, channel: str, keyframes, action_name: str
return input, output
def __gather_interpolation(export_settings):
# TODO: check if the bone was animated with CONSTANT
# TODO: check if the object was animated with CONSTANT
return 'LINEAR'

View File

@ -103,5 +103,5 @@ def __convert_keyframes(obj_uuid, keyframes, action_name: str, export_settings):
return input, output
def __gather_interpolation(export_settings):
# TODO: check if the bone was animated with CONSTANT
# TODO: check if the SK was animated with CONSTANT
return 'LINEAR'

View File

@ -5,6 +5,7 @@ import numpy as np
from ...io.com import gltf2_io, gltf2_io_constants, gltf2_io_debug
from ...io.exp import gltf2_io_binary_data
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
def gather_primitive_attributes(blender_primitive, export_settings):
@ -151,6 +152,8 @@ def __gather_attribute(blender_primitive, attribute, export_settings):
data['data'] += 0.5 # bias for rounding
data['data'] = data['data'].astype(np.uint16)
export_user_extensions('gather_attribute_change', export_settings, attribute, data, True)
return { attribute : gltf2_io.Accessor(
buffer_view=gltf2_io_binary_data.BinaryData(data['data'].tobytes(), gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER),
byte_offset=None,
@ -171,6 +174,9 @@ def __gather_attribute(blender_primitive, attribute, export_settings):
return __gather_skins(blender_primitive, export_settings)
else:
export_user_extensions('gather_attribute_change', export_settings, attribute, data, False)
return {
attribute: array_to_accessor(
data['data'],