FBX IO: Corner vert/edge and edge verts access with attributes #104648

Merged
3 changed files with 23 additions and 21 deletions

View File

@ -912,29 +912,26 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
# dtypes matching the C data. Matching the C datatype avoids iteration and casting of every element in foreach_get's
# C code.
bl_vertex_index_dtype = bl_edge_index_dtype = bl_loop_index_dtype = np.uintc
bl_loop_index_dtype = np.uintc
# Start vertex indices of loops. May contain elements for loops added for the export of loose edges.
t_lvi = np.empty(len(me.loops), dtype=bl_vertex_index_dtype)
# Start vertex indices of loops (corners). May contain elements for loops added for the export of loose edges.
t_lvi = MESH_ATTRIBUTE_CORNER_VERT.to_ndarray(attributes)
# Loop start indices of polygons. May contain elements for the polygons added for the export of loose edges.
t_ls = np.empty(len(me.polygons), dtype=bl_loop_index_dtype)
# Vertex indices of edges (unsorted, unlike Mesh.edge_keys), flattened into an array twice the length of the number
# of edges.
t_ev = np.empty(len(me.edges) * 2, dtype=bl_vertex_index_dtype)
t_ev = MESH_ATTRIBUTE_EDGE_VERTS.to_ndarray(attributes)
# Each edge has two vertex indices, so it's useful to view the array as 2d where each element on the first axis is a
# pair of vertex indices
t_ev_pair_view = t_ev.view()
t_ev_pair_view.shape = (-1, 2)
# Edge indices of loops. May contain elements for loops added for the export of loose edges.
t_lei = np.empty(len(me.loops), dtype=bl_edge_index_dtype)
# Edge indices of loops (corners). May contain elements for loops added for the export of loose edges.
t_lei = MESH_ATTRIBUTE_CORNER_EDGE.to_ndarray(attributes)
me.loops.foreach_get("vertex_index", t_lvi)
me.polygons.foreach_get("loop_start", t_ls)
me.edges.foreach_get("vertices", t_ev)
me.loops.foreach_get("edge_index", t_lei)
# Add "fake" faces for loose edges. Each "fake" face consists of two loops creating a new 2-sided polygon.
if scene_data.settings.use_mesh_edges:

View File

@ -414,8 +414,13 @@ def nors_transformed(raw_nors, m=None, dtype=None):
def astype_view_signedness(arr, new_dtype):
"""Unsafely views arr as new_dtype if the itemsize and byteorder of arr matches but the signedness does not,
otherwise calls np.ndarray.astype with copy=False.
"""Unsafely views arr as new_dtype if the itemsize and byteorder of arr matches but the signedness does not.
Safely views arr as new_dtype if both arr and new_dtype have the same itemsize, byteorder and signedness, but could
have a different character code, e.g. 'i' and 'l'. np.ndarray.astype with copy=False does not normally create this
view, but Blender can be picky about the character code used, so this function will create the view.
Review

I do not understand that comment, besides the i vs. u special case, this function just returns the result of np.ndarray.astype. So how can it force described behavior regarding cases like i vs. l, if np.ndarray.astype does not handle it?

I do not understand that comment, besides the `i` vs. `u` special case, this function just returns the result of `np.ndarray.astype`. So how can it force described behavior regarding cases like `i` vs. `l`, if `np.ndarray.astype` does not handle it?
Review

Before this patch, the explicit creation of a view only occurred when arr and new_dtype were both integer types, but had opposite signedness (and had the same itemsize and byteorder).

This patch changes the explicit creation of a view to no longer require that both inputs have opposite signedness, just that both inputs are either signed or unsigned integers (and have the same itemsize and byteorder).

The dtype 'kind' is not the character code that describes the buffer data, it is a NumPy specific character that describes the general kind of data in the buffer. In this case, the two kinds are 'i' signed integer (of any size) and 'u' unsigned integer (of any size). https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html

On a system like mine where the C long and int have the same itemsize, NumPy creates long arrays when I use the np.int32 type. Using the np.intc type instead will force the creation of an int array which is what foreach_get/set are expecting. But, because these two types only differ by their i and l character codes, np.ndarray.astype appears to consider the two types equal and therefore simply returns the array itself when using copy=False instead of creating a view with the character code of the new_dtype e.g. long_array.astype(np.intc, copy=False) is long_array is True whereas it would have been False if a np.intc view was created.

Confusingly, np.int32 == np.intc is False, but np.dtype(np.int32) == np.dtype(np.intc) is True, I don't know if this is intended behaviour of NumPy or a bug.

Eventually I aim to do some work on Blender's buffer support in foreach_get/set and other areas of the Python API so that Blender stops caring about specific character codes so long as the 'kind' of data, the itemsize and byteorder are correct, but for now, the character code of the array passed to foreach_get/set is important for whether Blender uses the array as a buffer (fast) or a sequence (slow).

Edit: In this case, the long arrays are coming from the parsed .fbx because data_types.ARRAY_INT32 ends up being l because it finds the character code for a 32-bit integer by iterating through 'ilq'. If both i and l are 32-bit, then l is used because it is iterated last.

Before this patch, the explicit creation of a view only occurred when `arr` and `new_dtype` were both integer types, but had opposite signedness (and had the same itemsize and byteorder). This patch changes the explicit creation of a view to no longer require that both inputs have opposite signedness, just that both inputs are either signed or unsigned integers (and have the same itemsize and byteorder). The dtype 'kind' is not the character code that describes the buffer data, it is a NumPy specific character that describes the general kind of data in the buffer. In this case, the two kinds are `'i'` signed integer (of any size) and `'u'` unsigned integer (of any size). https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html On a system like mine where the C `long` and `int` have the same itemsize, NumPy creates `long` arrays when I use the `np.int32` type. Using the `np.intc` type instead will force the creation of an `int` array which is what `foreach_get/set` are expecting. But, because these two types only differ by their `i` and `l` character codes, `np.ndarray.astype` appears to consider the two types equal and therefore simply returns the array itself when using `copy=False` instead of creating a view with the character code of the `new_dtype` e.g. `long_array.astype(np.intc, copy=False) is long_array` is `True` whereas it would have been `False` if a `np.intc` view was created. Confusingly, `np.int32 == np.intc` is `False`, but `np.dtype(np.int32) == np.dtype(np.intc)` is `True`, I don't know if this is intended behaviour of NumPy or a bug. Eventually I aim to do some work on Blender's buffer support in `foreach_get/set` and other areas of the Python API so that Blender stops caring about specific character codes so long as the 'kind' of data, the itemsize and byteorder are correct, but for now, the character code of the array passed to `foreach_get/set` is important for whether Blender uses the array as a buffer (fast) or a sequence (slow). Edit: In this case, the `long` arrays are coming from the parsed .fbx because `data_types.ARRAY_INT32` ends up being `l` because it finds the character code for a 32-bit integer by iterating through `'ilq'`. If both `i` and `l` are 32-bit, then `l` is used because it is iterated last.
Review

Thanks, I understand better now!

Thanks, I understand better now!
Otherwise, calls np.ndarray.astype with copy=False.
The benefit of copy=False is that if the array can be safely viewed as the new type, then a view is made, instead of
a copy with the new type.
@ -436,13 +441,14 @@ def astype_view_signedness(arr, new_dtype):
# else is left to .astype.
arr_kind = arr_dtype.kind
new_kind = new_dtype.kind
# Signed and unsigned int are opposite in terms of signedness. Other types don't have signedness.
integer_kinds = {'i', 'u'}
if (
# Signed and unsigned int are opposite in terms of signedness. Other types don't have signedness.
((arr_kind == 'i' and new_kind == 'u') or (arr_kind == 'u' and new_kind == 'i'))
arr_kind in integer_kinds and new_kind in integer_kinds
and arr_dtype.itemsize == new_dtype.itemsize
and arr_dtype.byteorder == new_dtype.byteorder
):
# new_dtype has opposite signedness and matching itemsize and byteorder, so return a view of the new type.
# arr and new_dtype have signedness and matching itemsize and byteorder, so return a view of the new type.
return arr.view(new_dtype)
else:
return arr.astype(new_dtype, copy=False)

View File

@ -1420,8 +1420,7 @@ def blen_read_geom_layer_normal(fbx_obj, mesh, xform=None):
mesh.loops.foreach_set("normal", loop_normals.ravel())
elif blen_data_type == "Vertices":
# We have to copy vnors to lnors! Far from elegant, but simple.
loop_vertex_indices = np.empty(len(mesh.loops), dtype=np.uintc)
mesh.loops.foreach_get("vertex_index", loop_vertex_indices)
loop_vertex_indices = MESH_ATTRIBUTE_CORNER_VERT.to_ndarray(mesh.attributes)
mesh.loops.foreach_set("normal", bdata[loop_vertex_indices].ravel())
return True
@ -1478,7 +1477,7 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
mesh.vertices.foreach_set("co", fbx_verts.ravel())
if tot_loops:
bl_loop_start_dtype = bl_loop_vertex_index_dtype = np.uintc
bl_loop_start_dtype = np.uintc
mesh.loops.add(tot_loops)
# The end of each polygon is specified by an inverted index.
@ -1489,7 +1488,8 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
# Un-invert the loop ends.
fbx_polys[fbx_loop_end_idx] ^= -1
# Set loop vertex indices, casting to the Blender C type first for performance.
mesh.loops.foreach_set("vertex_index", astype_view_signedness(fbx_polys, bl_loop_vertex_index_dtype))
MESH_ATTRIBUTE_CORNER_VERT.foreach_set(
attributes, astype_view_signedness(fbx_polys, MESH_ATTRIBUTE_CORNER_VERT.dtype))
poly_loop_starts = np.empty(tot_polys, dtype=bl_loop_start_dtype)
# The first loop is always a loop start.
@ -1506,7 +1506,6 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
if tot_edges:
# edges in fact index the polygons (NOT the vertices)
bl_edge_vertex_indices_dtype = np.uintc
# The first vertex index of each edge is the vertex index of the corresponding loop in fbx_polys.
edges_a = fbx_polys[fbx_edges]
@ -1530,12 +1529,12 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
# Stack edges_a and edges_b as individual columns like np.column_stack((edges_a, edges_b)).
# np.concatenate is used because np.column_stack doesn't allow specifying the dtype of the returned array.
edges_conv = np.concatenate((edges_a.reshape(-1, 1), edges_b.reshape(-1, 1)),
axis=1, dtype=bl_edge_vertex_indices_dtype, casting='unsafe')
axis=1, dtype=MESH_ATTRIBUTE_EDGE_VERTS.dtype, casting='unsafe')
# Add the edges and set their vertex indices.
mesh.edges.add(len(edges_conv))
# ravel() because edges_conv must be flat and C-contiguous when passed to foreach_set.
mesh.edges.foreach_set("vertices", edges_conv.ravel())
MESH_ATTRIBUTE_EDGE_VERTS.foreach_set(attributes, edges_conv.ravel())
elif tot_edges:
print("ERROR: No polygons, but edges exist. Ignoring the edges!")