FBX IO: Speed up animation import using NumPy #104856

Merged
Thomas Barlow merged 12 commits from Mysteryem/blender-addons:fbx_import_anim_numpy_p1 into main 2023-09-04 22:07:45 +02:00
Showing only changes of commit f91d576625 - Show all commits

View File

@ -524,6 +524,593 @@ def blen_read_object_transform_preprocess(fbx_props, fbx_obj, rot_alt_mat, use_p
# --------- # ---------
# Animation # Animation
def _transformation_curves_gen(item, values_arrays, channel_keys):
from operator import setitem
from functools import partial
if item.is_bone:
bl_obj = item.bl_obj.pose.bones[item.bl_bone]
else:
bl_obj = item.bl_obj
rot_mode = bl_obj.rotation_mode
transform_data = item.fbx_transform_data
rot_eul_prev = bl_obj.rotation_euler.copy()
rot_quat_prev = bl_obj.rotation_quaternion.copy()
# Pre-compute inverted local rest matrix of the bone, if relevant.
restmat_inv = item.get_bind_matrix().inverted_safe() if item.is_bone else None
transform_prop_to_attr = {
b'Lcl Translation': transform_data.loc,
b'Lcl Rotation': transform_data.rot,
b'Lcl Scaling': transform_data.sca,
}
# Pre-get/calculate these to reduce the work done inside the hot loop.
anim_compensation_matrix = item.anim_compensation_matrix
do_anim_compensation_matrix = bool(anim_compensation_matrix)
pre_matrix = item.pre_matrix
do_pre_matrix = bool(pre_matrix)
post_matrix = item.post_matrix
do_post_matrix = bool(post_matrix)
do_restmat_inv = bool(restmat_inv)
Review

transform_data.scale[2] I believe?

`transform_data.scale[2]` I believe?
Review

In this case .sca is correct, the FBXTransformData namedtuple uses rather short attribute names.

In this case `.sca` is correct, the `FBXTransformData` namedtuple uses rather short attribute names.
# Create a setter into transform_data for each values array. e.g. a values array for 'Lcl Scaling' with channel == 2
# would set transform_data.sca[2].
# TODO: Might be faster to create a list of each transform_prop_to_attr[fbx_prop] and a list of channels, then zip
# both and in the main loop, do transform_data_attr[channel] = value
setters = [partial(setitem, transform_prop_to_attr[fbx_prop], channel) for fbx_prop, channel in channel_keys]
zipped_values_iterators = zip(*(iter(arr.data) for arr in values_arrays))
# todo: Rather than having to get the Matrix/Quaternion methods upon each call within the loop, we can instead get
# them in advance.
# Before the loop:
# `mat_decompose = Matrix.decompose`
# then within the loop:
# `mat_decompose(mat)`
for values in zipped_values_iterators:
for setter, value in zip(setters, values):
setter(value)
mat, _, _ = blen_read_object_transform_do(transform_data)
# compensate for changes in the local matrix during processing
if do_anim_compensation_matrix:
mat = mat @ anim_compensation_matrix
# apply pre- and post matrix
# post-matrix will contain any correction for lights, camera and bone orientation
# pre-matrix will contain any correction for a parent's correction matrix or the global matrix
if do_pre_matrix:
mat = pre_matrix @ mat
if do_post_matrix:
mat = mat @ post_matrix
# And now, remove that rest pose matrix from current mat (also in parent space).
if do_restmat_inv:
mat = restmat_inv @ mat
# Now we have a virtual matrix of transform from AnimCurves, we can insert keyframes!
loc, rot, sca = mat.decompose()
if rot_mode == 'QUATERNION':
if rot_quat_prev.dot(rot) < 0.0:
rot = -rot
rot_quat_prev = rot
elif rot_mode == 'AXIS_ANGLE':
vec, ang = rot.to_axis_angle()
rot = ang, vec.x, vec.y, vec.z
else: # Euler
rot = rot.to_euler(rot_mode, rot_eul_prev)
rot_eul_prev = rot
# Yield order matches the order that the location/rotation/scale FCurves are created in.
yield from loc
yield from rot
yield from sca
def _combine_same_property_curves(times_and_values_tuples):
"""Combine multiple sorted animation curves, that affect the same property, into a single sorted animation curve."""
if len(times_and_values_tuples) > 1:
# TODO: Print a warning to the console that more than one curve was found
# The FBX animation system's default implementation only uses the first curve assigned to a channel.
# Additional curves per channel are allowed by the FBX specification, but the handling of these curves is
# considered the responsibility of the application that created them. Note that each curve node is expected to
# have a unique set of channels, so these additional curves with the same channel would have to belong to
# separate curve nodes. See the FBX SDK documentation for FbxAnimCurveNode.
# Concatenate all the times into one array and all the values into one array.
all_times = np.concatenate([t[0] for t in times_and_values_tuples])
all_values = np.concatenate([t[1] for t in times_and_values_tuples])
# Get the unique, sorted times and the index in all_times of the first occurrence of each unique value.
sorted_unique_times, unique_indices_in_all_times = np.unique(all_times, return_index=True)
values_of_sorted_unique_times = all_values[unique_indices_in_all_times]
return sorted_unique_times, values_of_sorted_unique_times
# # Get the indices that would sort all_times.
# # Use a stable algorithm so that if there are any duplicate times, they maintain their original order.
# perm = np.argsort(kind='stable')
# # Use the indices to sort both all_times and all_values.
# all_times = all_times[perm]
# all_values = all_values[perm]
else:
return times_and_values_tuples[0]
def _combine_curve_keyframes(times_and_values_tuples, initial_values):
"""Combine multiple sorted animation curves, that affect different properties, such that every animation curve
contains the keyframes from every other curve, interpolating the values for the newly inserted keyframes in each
curve.
Currently, linear interpolation is assumed, but FBX does store how keyframes should be interpolated, so correctly
interpolating the keyframe values is a TODO."""
# all_times = []
# #all_values = []
# #all_curve_idx = []
# for i, (times, values) in enumerate(times_and_values_tuples):
# all_times.append(times)
# #all_values.append(values)
# #all_curve_idx.append(np.full_like(times, i))
# all_times = np.concatenate(all_times)
# #all_curve_idx = np.concatenate(all_curve_idx)
# perm = np.argsort(all_times, kind='stable')
#
# sorted_all_times = all_times[perm]
#
# # Get the index in sorted_all_times of each time in all_times
# all_times_indices = np.empty(len(perm), dtype=np.intp)
# all_times_indices[perm] = np.arange(len(perm))
all_times = [t[0] for t in times_and_values_tuples]
# Get sorted unique times and the index in sorted_all_times of each time in all_times
sorted_all_times, all_times_indices = np.unique(np.concatenate(all_times), return_inverse=True)
#sorted_all_values = all_values[perm]
#sorted_curve_idx = all_curve_idx[perm]
# An alternative would be to concatenated filled arrays with the index of each array and then index that by perm,
# then a mask for each array can be found by checking for values that equal the index of that array.
values_arrays = []
times_start = 0
for (times, values), initial_value in zip(times_and_values_tuples, initial_values):
times_end = times_start + len(times)
# The index in sorted_all_times of each value in times
times_indices = all_times_indices[times_start:times_end]
# Update times_start for the next array
times_start = times_end
# todo: Not sure the best way to mask out the values here, will need investigating
#times_extended = sorted_all_times.copy()
needs_interpolation_mask = np.full(len(sorted_all_times), True)
needs_interpolation_mask[times_indices] = False
#imported_times_mask = ~needs_interpolation_mask
# # Need to find the before and after times for each time that needs interpolation
# # Times are sorted, so the smallest and largest are simply the first and last values.
# min_time_value = values[0]
# max_time_value = values[-1]
# todo: It's possible we can get the same result faster by doing the interpolation manually, since we can figure
# out the before and after values for each time that needs interpolating (this would also likely make it easier
# to update the function to support other interpolation than just linear).
interp_values = np.interp(sorted_all_times[needs_interpolation_mask], times, values, left=initial_value)
# Alt, though we typically expect there to be few times that need interpolation:
# extended_values = np.interp(sorted_all_times, times, values, left=initial_value)
extended_values = np.empty_like(values, shape=len(sorted_all_times))
extended_values[needs_interpolation_mask] = interp_values
extended_values[times_indices] = values
values_arrays.append(extended_values)
# FIXME: If we have two curves with the same times, aren't they going to break because they're going to try and
# interpolate the same times?
# times1 = [1,2,3,4]
# times2 = [1,2,3,4]
# sorted_all_times = [1,1,2,2,3,3,4,4]... not good
# # Manual linear interpolation (it may be easier to extend for other interpolation):
# # Get the index of the previous and next elements that are not interpolated
# prev_value_indices = np.arange(len(sorted_all_times), dtype=np.intp)
# next_value_indices = prev_value_indices.copy()
# prev_value_indices[needs_interpolation_mask] = times_indices[0]
# next_value_indices[needs_interpolation_mask] = times_indices[-1]
# prev_value_indices = np.maximum.accumulate(prev_value_indices)
# next_value_indices = np.flip(np.minimum.accumulate(np.flip(next_value_indices)))
#
# # TODO: May be faster to simply not index by needs_interpolation_mask every time and then only index by
# # needs_interpolation_mask at the end.
# prev_times = sorted_all_times[prev_value_indices][needs_interpolation_mask]
# prev_values = sorted_all_values[prev_value_indices][needs_interpolation_mask]
# next_times = sorted_all_times[next_value_indices][needs_interpolation_mask]
# next_values = sorted_all_values[next_value_indices][needs_interpolation_mask]
#
# interp_times = sorted_all_times[needs_interpolation_mask]
# ifac = (interp_times - prev_times) / (next_times - prev_times)
# interp_values = ifac * (next_values - prev_values) + prev_values
# ifac = (sorted_all_times[needs_interpolation_mask] - prev_times) / ()
#
# values_extended =
#
# min_before = np.full_like(sorted_all_times, times[0])
# max_after = np.full_like(sorted_all_times, times[-1])
#
# # FIXME: But we need the indices so we can get the before and after values and interpolate between those...
# before_times = times_extended.copy()
# before_times[needs_interpolation_mask] = min_time
# before_times = np.maximum.accumulate(before_times)
#
# after_times = times_extended.copy()
# after_times[needs_interpolation_mask] = max_time
# after_times = np.flip(np.minimum.accumulate(np.flip(after_times)))
#
# before_times[needs_interpolation_mask]
#
#
#
#
# times_full = sorted_all_times.copy()
# values_full = np.full_like(sorted_all_times, np.nan)
# values_full[sorted_curve_idx == i] = values
return sorted_all_times, values_arrays
def blen_read_invalid_animation_curve(key_times, key_values):
"""FBX will parse animation curves even when their keyframe times are invalid (not strictly increasing). It's
unclear exactly how FBX handles invalid curves, but this matches in some cases and is how the FBX IO addon has been
handling invalid keyframe times for a long time.
Notably, this function will also correctly parse valid animation curves, though is much slower than the trivial,
regular way.
The returned keyframe times are guaranteed to be strictly increasing."""
sorted_unique_times = np.unique(key_times)
# Unsure if this can be vectorized with numpy, so using iteration for now.
def index_gen():
idx = 0
key_times_len = len(key_times)
# Iterating .data, the memoryview of the array, is faster than iterating the array directly.
for curr_fbxktime in sorted_unique_times.data:
if key_times[idx] < curr_fbxktime:
if idx >= 0:
idx += 1
if idx >= key_times_len:
idx = -1
yield idx
indices = np.fromiter(index_gen(), dtype=np.int64, count=len(sorted_unique_times))
indexed_times = key_times[indices]
indexed_values = key_values[indices]
# Interpolate the value for each time in sorted_unique_times according to the times and values at each index and
# the previous index.
interpolated_values = np.empty_like(indexed_values)
# Where the index is 0, there's no previous value to interpolate from, so we set the value without
# interpolating.
# Because the indices are in increasing order, all zeroes must be at the start, so we can find the index of the
# last zero and use that to index with a slice instead of a boolean array for performance.
# Equivalent to, but as a slice:
# idx_zero_mask = indices == 0
# idx_nonzero_mask = ~idx_zero_mask
first_nonzero_idx = np.searchsorted(indices, 0, side='right')
idx_zero_slice = slice(0, first_nonzero_idx) # [:first_nonzero_idx]
idx_nonzero_slice = slice(first_nonzero_idx, None) # [first_nonzero_idx:]
interpolated_values[idx_zero_slice] = indexed_values[idx_zero_slice]
indexed_times_nonzero_idx = indexed_times[idx_nonzero_slice]
indexed_values_nonzero_idx = indexed_values[idx_nonzero_slice]
indices_nonzero = indices[idx_nonzero_slice]
prev_indices_nonzero = indices_nonzero - 1
prev_indexed_times_nonzero_idx = key_times[prev_indices_nonzero]
prev_indexed_values_nonzero_idx = key_values[prev_indices_nonzero]
ifac_a = sorted_unique_times[idx_nonzero_slice] - prev_indexed_times_nonzero_idx
ifac_b = indexed_times_nonzero_idx - prev_indexed_times_nonzero_idx
# If key_times contains two (or more) duplicate times in a row, then values in `ifac_b` can be zero which would
# result in division by zero.
# Use the `np.errstate` context manager to suppress printing the RuntimeWarning to the system console.
with np.errstate(divide='ignore'):
ifac = ifac_a / ifac_b
interpolated_values[idx_nonzero_slice] = ((indexed_values_nonzero_idx - prev_indexed_values_nonzero_idx) * ifac
+ prev_indexed_values_nonzero_idx)
# If the time to interpolate at is larger than the time in indexed_times, then the value has been extrapolated.
# Extrapolated values are excluded.
valid_mask = indexed_times >= sorted_unique_times
key_times = sorted_unique_times[valid_mask]
key_values = interpolated_values[valid_mask]
return key_times, key_values
def _convert_fbx_time_to_blender_time(key_times, blen_start_offset, fbx_start_offset, fps):
# todo: Could move this into blen_store_keyframes since it probably doesn't need to be used anywhere else
from .fbx_utils import FBX_KTIME
timefac = fps / FBX_KTIME
# Convert from FBX timing to Blender timing.
# Cannot subtract in-place because key_times could be read directly from FBX and could be used by multiple Actions.
key_times = key_times - fbx_start_offset
# timefac is a Python float, so the new array will be a np.float64 array.
key_times = key_times * timefac
key_times += blen_start_offset
return key_times
def blen_read_single_animation_curve(fbx_curve):
"""Read a single animation curve from FBX data.
The parsed keyframe times are guaranteed to be strictly increasing."""
# TODO: Remove these, we can do all time conversion at the very end, just before combining times and values into a
# single array
# from .fbx_utils import FBX_KTIME
# timefac = fps / FBX_KTIME
key_times = parray_as_ndarray(elem_prop_first(elem_find_first(fbx_curve, b'KeyTime')))
key_values = parray_as_ndarray(elem_prop_first(elem_find_first(fbx_curve, b'KeyValueFloat')))
assert(len(key_values) == len(key_times))
# The FBX SDK specifies that only one key per time is allowed and that the keys are sorted in time order.
# https://help.autodesk.com/view/FBX/2020/ENU/?guid=FBX_Developer_Help_cpp_ref_class_fbx_anim_curve_html
all_times_strictly_increasing = (key_times[1:] > key_times[:-1]).all()
if all_times_strictly_increasing:
return key_times, key_values
else:
# todo: Print something to the console warning that the animation curve was invalid.
# FBX will still read animation curves even if they are invalid.
return blen_read_invalid_animation_curve(key_times, key_values)
# todo When we have transformation curves (or more than one curve per channel (optional support)) separately combine
# singular parsed curves and fill in the gaps with linear interpolation. .concatenate and .unique the key_times
# arrays with return_inverse=True. Use the lengths of each key_times array and their order in the concatenation to
# get the index of each of their elements in the sorted, unique concatenation.
# For each key_times array, create an all True array and use those indices to set values to False.
# Copy the sorted, unique concatenation and use this new mask to effectively delete all times that didn't come from
# this key_times array. Use .maximum.accumulate and a reversed .minimum.accumulate to get the first time before and
# first time after each time that needs its value to be interpolated. These two arrays get the start and end times
# to interpolate from. For each time that needs its value to be interpolated, get the values for the start and end
# times and then use those and the times that needs their values interpolated to calculate the interpolated values.
# Care will need to be taken for times where there is no first value before or where there is no first value after,
# in which case interpolation can't take place and we'll either need to start set values at the very start and end
# or otherwise fill the values that can't be interpolated with a default value or the first/last value in
# key_times.
if not all_times_strictly_increasing:
# We try to match how FBX behaves when it encounters an invalid KeyTime array. This doesn't quite match when the
# maximum value is not the last value (FBX discards some keyframes whereas we don't), but it's close enough.
# Start the curve from the index of the smallest KeyTime value.
min_idx = np.amin(key_times) if key_times.size else 0
key_times = key_times[min_idx:]
key_values = key_values[min_idx:]
max_idx = np.amax(key_times) if key_times.size else 0
# If the largest KeyTime value is at the last index then it's simple.
if max_idx == key_times.size - 1:
# Set each element to the maximum of itself and all elements before it
key_times = np.maximum.accumulate(key_times)
else:
# This works the same as Blender's original animation curve parser, without the conversion from FBX time to
# Blender time and modified to operate on a single curve rather than multiple
# todo: Maybe these should be sorted instead?
# todo: Maybe these should be np.maximum.accumulate-d instead?
# Sorted unique key times
sorted_unique_times = np.unique(key_times)
# TODO: How is this different from np.searchsorted on np.maximum.acccumulate-d times? Can we use it to find
# the points at which idx will increase and then np.cumsum those increases?
def parse_invalid_curve_times_to_indices():
idx = 0
times = key_times.data
num_times = len(times)
for curr_fbxktime in sorted_unique_times.data:
curr_time = times[idx]
if curr_time < curr_fbxktime:
if idx >= 0:
idx += 1
if idx >= num_times:
# We have reached our last element for this curve, stay on it from now on...
idx = -1
yield idx
key_time_indices = np.fromiter(parse_invalid_curve_times_to_indices(), dtype=np.int64)
key_times = key_times[key_time_indices]
key_values = key_values[key_values]
# Filter out invalid times
valid_mask = key_times >= sorted_unique_times
key_times = key_times[valid_mask]
key_values = key_values[valid_mask]
interpolated_values = np.empty_like(key_values)
interpolated_values[:1] = key_values[:1]
ifac = (sorted_unique_times[1:] - key_times[:-1]) / (key_values[1:] - key_times[:-1])
interpolated_values[1:] = (key_values[1:] - key_values[:-1]) * ifac + key_values[:-1]
key_values = interpolated_values
# def parse_curve_fallback_gen():
# idx = 0
# times = key_times.data
# num_times = len(times)
# values = key_values.data
# # Sorted unique times
# sorted_unique_times = np.unique(key_times)
# for curr_fbxktime in sorted_unique_times.data:
# curr_time = times[idx]
# if curr_time < curr_fbxktime:
# if idx >= 0:
# idx += 1
# if idx >= num_times:
# # We have reached our last element for this curve, stay on it from now on...
# idx = -1
# curr_time = times[idx]
# if curr_time >= curr_fbxktime:
# if idx == 0:
# curr_value = values[idx]
# else:
# # Interpolate between this key and the previous one
# prev_time = times[idx - 1]
# ifac = (curr_fbxktime - prev_time) / (curr_time - prev_time)
# prev_value =
# curr_value = (values[idx] - values[idx - 1]) * ifac + values[idx - 1]
# yield curr_fbxktime, curr_value
# structured_dtype = np.dtype([("time", key_times.dtype), ("value", key_values.dtype)])
# times_and_values = np.fromiter(parse_curve_fallback_gen(), dtype=structured_dtype)
# key_times = times_and_values["time"]
# key_values = times_and_values["values"]
# # todo: Get some printable attribute from fbx_curve, don't print the entire fbx_curve
# print("WARNING: Invalid animation keyframe times for %s. The key frame times that are not strictly increasing"
# " and the keyframes before the first keyframe chronologically have been discarded." % str(fbx_curve))
# # We could sort the key times, but starting from the minimum value and then taking an accumulative maximum
# # better matches FBX and older Blender behaviour when the times are not in order.
# # FIXME: min_idx thing doesn't work, things get weird when the start and end aren't the min and max times...
# min_idx = np.amin(key_times) if key_times.size else 0
# """
# Ok, so moving the max_idx to not be last has odd effects...
# Given frames [0, 10, ..., 90, 100] and values [0, ..., 100]. Moving the last time into the:
# second-last position: Frames -> [0, ..., 90], values -> [0, ..., 80, 85] (not 90??)
# [..., 80, _90_, (100)]
# Perhaps [..., 80, ????, (90)]
# Interp [..., 80, 85, (90)]
# Frames [..., 80, 90, (100)]
# Blender appears to agree with Unity in this case, but Blender doesn't exclude the (<value>) element
# whereas Unity does.
# Interp [..., 80, 85, 90]
# Frames [..., 80, 90, 100]
# third-last position: Frames -> [0, ..., 70, 90], values -> [0, ..., 70, 76.6666] (not 80??)
# [..., 70, _80_, (90), (100)]
# Perhaps [..., 70, ????, ????, (90)]
# Interp [..., 70, 76.6, 83.3, (90)]
# Frames [..., 70, (80), 90, (100)]
#
# Blender sets frame 100 to 80 instead, which would have been the next value after 70, but Blender
# doesn't exclude the (<value>) elements whereas Unity does.
# Perhaps [..., 70, (??), ????, (80)]
# Interp [..., 70,(73.3),76.6, (80)]
# Frames [..., 70, 80, 90, 100]
# fourth-last position: Frames -> [0, ..., 60, 90], values -> [0, ..., 60, 67.5]
# [..., 60, _70_, (80), (90), (100)]
# Perhaps [..., 60, ????, ????, ????, (90)]
# Interp [..., 60, 67.5, 75, 82.5, (90)]
# Frames [..., 60, (70), (80), 90, (100)]
#
# Blender sets frame 100 to 70 instead, which would have been the next value after 60, but Blender
# doesn't exclude the (<value>) elements whereas Unity does.
# Perhaps [..., 60, (??), (??), ????, (70)]
# Interp [..., 60,(62.5),(65), 67.5, (70)]
# Frames [..., 60, 70, 80, 90, 100]
# TODO: Try changing the 90 value to something else and see if the interpolations still hold.
# """
# max_idx = np.amax(key_times) if key_times.size else 0 # max idx might also get reduced by min_idx?
# last_idx = len(key_times) - 1
# if max_idx != last_idx:
# max_idx = last_idx - 2 # Seems to exclude the last two keyframes for some reason...
# key_times = key_times[min_idx:max_idx]
# key_values = key_values[min_idx:max_idx]
# key_times = np.maximum.accumulate(key_times)
# # TODO: As an alternative to finding the duplicates ourselves, we could just return key_times and key_values
# # as they are from here, letting Blender remove the duplicates when calling .update().
# # Yes, we should leave it to Blender, imagine a transform channel with values [10, 2, 5, 20] at times
# # [1, 12, 12, 40], and then another transform channel with times at [6, 30]. The first channel will need to
# # interpolate for the missing times of 6 and 30.
# unique_mask = np.empty_like(key_times, dtype=bool)
# # Remove keyframes at duplicate times. Blender would do this when calling .update() on the FCurve. Where there
# # are duplicate times, Blender only keeps the last duplicate.
# # Note that this may produce different results to software that uses the FBX SDK, because it does not remove
# # duplicates.
# # Because key_times is now in ascending order, unique elements are the last element and elements that are
# # pairwise not equal.
# # 1,1,1,2,3,3,4,5,5
# # F,F,T,T,F,T,T,F
# np.not_equal(key_times[:-1], key_times[1:], out=unique_mask[:-1])
# # The last element is always kept:
# # F,F,T,T,F,T,T,F,T
# unique_mask[-1:] = True
#
# # # FIXME: This currently gets the first unique time, but if we were to import even duplicate times, when we
# # # .update() the FCurve, Blender keeps only the *last* unique time.
# # # Remove duplicates. Because key_times is now in ascending order, unique elements are the first element and
# # # elements that are pairwise not equal.
# # # 1,1,1,2,3,3,4,5
# # # F,F,T,T,F,T,T
# # # The first element is always unique:
# # # T,F,F,T,T,F,T,T
# # unique_mask[:1] = True
# # np.not_equal(key_times[:-1], key_times[1:], out=unique_mask[1:])
# #
# # #indices = np.where(unique_mask, np.arange(len(unique_mask), 0))
# # #indices = np.maximum.accumulate(indices)
# #
# # Use the mask to get only the times (and their values) that are strictly increasing.
# key_times = key_times[unique_mask]
# key_values = key_values[unique_mask]
# Convert from FBX timing to Blender timing.
# Cannot subtract in-place because this curve could be used in multiple Actions.
key_times = key_times - fbx_start_offset
# timefac is a Python float, so the new array will be a np.float64 array.
key_times = key_times * timefac
key_times += blen_start_offset
return key_times, key_values
def blen_store_keyframes(blen_fcurve, key_times, key_values):
"""Set all keyframe times and values for a newly created FCurve.
Linear interpolation is currently assumed."""
# The fcurve must be newly created and thus have no keyframe_points.
assert(len(blen_fcurve.keyframe_points) == 0)
num_keys = len(key_times)
# Compatible with C float type
bl_keyframe_dtype = np.single
# Compatible with C char type
bl_enum_dtype = np.byte
# TODO: get this value once and store it as a global variable
linear_enum_value = bpy.types.Keyframe.bl_rna.properties['interpolation'].enum_items['LINEAR'].value
# Stack the arrays into a flattened array of flattened (frame, value) pairs
# Same as `np.column_stack(key_times, key_values).ravel()`, but allows specifying the dtype.
full_key_frame_array = np.concatenate((key_times.reshape(-1, 1), key_values.reshape(-1, 1)),
dtype=bl_keyframe_dtype, casting='unsafe', axis=1).ravel()
# Add the keyframe points to the FCurve and then set the 'co' and 'interpolation' of each point.
blen_fcurve.keyframe_points.add(num_keys)
blen_fcurve.keyframe_points.foreach_set('co', full_key_frame_array.ravel())
blen_fcurve.keyframe_points.foreach_set('interpolation', np.full(num_keys, linear_enum_value, dtype=bl_enum_dtype))
# Since we inserted our keyframes in 'ultra-fast' mode, we have to update the fcurves now.
blen_fcurve.update()
# TODO: Remove this function
def blen_read_animations_curves_iter(fbx_curves, blen_start_offset, fbx_start_offset, fps): def blen_read_animations_curves_iter(fbx_curves, blen_start_offset, fbx_start_offset, fps):
""" """
Get raw FBX AnimCurve list, and yield values for all curves at each singular curves' keyframes, Get raw FBX AnimCurve list, and yield values for all curves at each singular curves' keyframes,
@ -572,42 +1159,54 @@ def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset, glo
taking any pre_ and post_ matrix into account to transform from fbx into blender space. taking any pre_ and post_ matrix into account to transform from fbx into blender space.
""" """
from bpy.types import Object, PoseBone, ShapeKey, Material, Camera from bpy.types import Object, PoseBone, ShapeKey, Material, Camera
# todo: Remove this import
from itertools import chain from itertools import chain
fbx_curves = [] fbx_curves: dict[bytes, dict[int, list[FBXElem]]] = {}
used_channels = set()
warn_multiple_curves_per_channel = False
for curves, fbxprop in cnodes.values(): for curves, fbxprop in cnodes.values():
channels_dict = fbx_curves.setdefault(fbxprop, {})
for (fbx_acdata, _blen_data), channel in curves.values(): for (fbx_acdata, _blen_data), channel in curves.values():
channel_id = (fbxprop, channel) channels_dict.setdefault(channel, []).append(fbx_acdata)
if channel_id in used_channels:
# The FBX animation system's default implementation only uses the first curve assigned to a channel. # fbx_curves = []
# Additional curves per channel are allowed by the FBX specification, but the handling of these curves # used_channels = set()
# is considered the responsibility of the application that created them. Note that each curve node is # warn_multiple_curves_per_channel = False
# expected to have a unique set of channels, so these additional curves with the same channel would have # for curves, fbxprop in cnodes.values():
# to belong to separate curve nodes. See the FBX SDK documentation for FbxAnimCurveNode. # channels_dict = fbx_curves_props_channels.setdefault(fbxprop, {})
warn_multiple_curves_per_channel = True # for (fbx_acdata, _blen_data), channel in curves.values():
else: # channels_dict.setdefault(channel, []).append(fbx_acdata)
used_channels.add(channel_id) #
fbx_curves.append((fbxprop, channel, fbx_acdata)) # for (fbx_acdata, _blen_data), channel in curves.values():
if warn_multiple_curves_per_channel: # channel_id = (fbxprop, channel)
print("WARNING: Multiple animation curves per animated property channel were found for %s. All but the first" # if channel_id in used_channels:
"curve for each property channel has been discarded." % action.name) # # The FBX animation system's default implementation only uses the first curve assigned to a channel.
# # Additional curves per channel are allowed by the FBX specification, but the handling of these curves
# # is considered the responsibility of the application that created them. Note that each curve node is
# # expected to have a unique set of channels, so these additional curves with the same channel would have
# # to belong to separate curve nodes. See the FBX SDK documentation for FbxAnimCurveNode.
# warn_multiple_curves_per_channel = True
# else:
# used_channels.add(channel_id)
# fbx_curves.append((fbxprop, channel, fbx_acdata))
# if warn_multiple_curves_per_channel:
# print("WARNING: Multiple animation curves per animated property channel were found for %s. All but the first"
# "curve for each property channel has been discarded." % action.name)
# Leave if no curves are attached (if a blender curve is attached to scale but without keys it defaults to 0). # Leave if no curves are attached (if a blender curve is attached to scale but without keys it defaults to 0).
if len(fbx_curves) == 0: if len(fbx_curves) == 0:
return return
blen_curves = [] # todo: Remove these
props = [] # blen_curves = []
keyframes = {} # props = []
# keyframes = {}
# Add each keyframe to the keyframe dict # # Add each keyframe to the keyframe dict
def store_keyframe(fc, frame, value): # def store_keyframe(fc, frame, value):
fc_key = (fc.data_path, fc.array_index) # fc_key = (fc.data_path, fc.array_index)
if not keyframes.get(fc_key): # if not keyframes.get(fc_key):
keyframes[fc_key] = [] # keyframes[fc_key] = []
keyframes[fc_key].extend((frame, value)) # keyframes[fc_key].extend((frame, value))
if isinstance(item, Material): if isinstance(item, Material):
grpname = item.name grpname = item.name
@ -642,115 +1241,105 @@ def blen_read_animations_action_item(action, item, cnodes, fps, anim_offset, glo
for prop, nbr_channels, grpname in props for channel in range(nbr_channels)] for prop, nbr_channels, grpname in props for channel in range(nbr_channels)]
if isinstance(item, Material): if isinstance(item, Material):
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps): for fbxprop, channel_to_curves in fbx_curves.items():
value = [0,0,0] assert(fbxprop == b'DiffuseColor')
for v, (fbxprop, channel, _fbx_acdata) in values: for channel, curves in channel_to_curves.items():
assert(fbxprop == b'DiffuseColor')
assert(channel in {0, 1, 2}) assert(channel in {0, 1, 2})
value[channel] = v blen_curve = blen_curves[channel]
parsed_curves = tuple(map(blen_read_single_animation_curve, curves))
for fc, v in zip(blen_curves, value): fbx_key_times, values = _combine_same_property_curves(parsed_curves)
store_keyframe(fc, frame, v) bl_key_times = _convert_fbx_time_to_blender_time(fbx_key_times, anim_offset, 0, fps)
blen_store_keyframes(blen_curve, bl_key_times, values)
elif isinstance(item, ShapeKey): elif isinstance(item, ShapeKey):
deform_values = shape_key_deforms.setdefault(item, []) deform_values = shape_key_deforms.setdefault(item, [])
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps): for fbxprop, channel_to_curves in fbx_curves.items():
value = 0.0 assert(fbxprop == b'DeformPercent')
for v, (fbxprop, channel, _fbx_acdata) in values: for channel, curves in channel_to_curves.items():
assert(fbxprop == b'DeformPercent')
assert(channel == 0) assert(channel == 0)
value = v / 100.0 blen_curve = blen_curves[channel]
deform_values.append(value) parsed_curves = tuple(map(blen_read_single_animation_curve, curves))
fbx_key_times, values = _combine_same_property_curves(parsed_curves)
for fc, v in zip(blen_curves, (value,)): bl_key_times = _convert_fbx_time_to_blender_time(fbx_key_times, anim_offset, 0, fps)
store_keyframe(fc, frame, v) # A fully activated shape key in FBX DeformPercent is 100.0 whereas it is 1.0 in Blender.
values = values / 100.0
blen_store_keyframes(blen_curve, bl_key_times, values)
# Store the minimum and maximum shape key values, so that the shape key's slider range can be expanded if
# necessary after reading all animations.
deform_values.append(values.min())
deform_values.append(values.max())
elif isinstance(item, Camera): elif isinstance(item, Camera):
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps): for fbxprop, channel_to_curves in fbx_curves.items():
focal_length = 0.0 is_focus_distance = fbxprop == b'FocusDistance'
focus_distance = 0.0 assert(fbxprop == b'FocalLength' or is_focus_distance)
for v, (fbxprop, channel, _fbx_acdata) in values: for channel, curves in channel_to_curves.items():
assert(fbxprop == b'FocalLength' or fbxprop == b'FocusDistance' )
assert(channel == 0) assert(channel == 0)
if (fbxprop == b'FocalLength' ): # The indices are determined by the creation of the `props` list above.
focal_length = v blen_curve = blen_curves[1 if is_focus_distance else 0]
elif(fbxprop == b'FocusDistance'): parsed_curves = tuple(map(blen_read_single_animation_curve, curves))
focus_distance = v / 1000 * global_scale fbx_key_times, values = _combine_same_property_curves(parsed_curves)
bl_key_times = _convert_fbx_time_to_blender_time(fbx_key_times, anim_offset, 0, fps)
for fc, v in zip(blen_curves, (focal_length, focus_distance)): if is_focus_distance:
store_keyframe(fc, frame, v) # Remap the imported values from FBX to Blender.
values = values / 1000.0
values *= global_scale
blen_store_keyframes(blen_curve, bl_key_times, values)
else: # Object or PoseBone: else: # Object or PoseBone:
if item.is_bone:
bl_obj = item.bl_obj.pose.bones[item.bl_bone]
else:
bl_obj = item.bl_obj
transform_data = item.fbx_transform_data transform_data = item.fbx_transform_data
rot_eul_prev = bl_obj.rotation_euler.copy()
rot_quat_prev = bl_obj.rotation_quaternion.copy()
# Pre-compute inverted local rest matrix of the bone, if relevant. # Each transformation curve needs to have keyframes at the times of every other transformation curve
restmat_inv = item.get_bind_matrix().inverted_safe() if item.is_bone else None # (interpolating missing values), so that we can construct a matrix at every keyframe.
transform_prop_to_attr = {
b'Lcl Translation': transform_data.loc,
b'Lcl Rotation': transform_data.rot,
b'Lcl Scaling': transform_data.sca,
}
for frame, values in blen_read_animations_curves_iter(fbx_curves, anim_offset, 0, fps): times_and_values_tuples = []
for v, (fbxprop, channel, _fbx_acdata) in values: initial_values = []
if fbxprop == b'Lcl Translation': channel_keys = []
transform_data.loc[channel] = v for fbxprop, channel_to_curves in fbx_curves.items():
elif fbxprop == b'Lcl Rotation': if fbxprop not in transform_prop_to_attr:
transform_data.rot[channel] = v # Currently, we only care about transformation curves.
elif fbxprop == b'Lcl Scaling': continue
transform_data.sca[channel] = v for channel, curves in channel_to_curves.items():
mat, _, _ = blen_read_object_transform_do(transform_data) assert(channel in {0, 1, 2})
parsed_curves = tuple(map(blen_read_single_animation_curve, curves))
fbx_key_times, values = _combine_same_property_curves(parsed_curves)
# compensate for changes in the local matrix during processing channel_keys.append((fbxprop, channel))
if item.anim_compensation_matrix:
mat = mat @ item.anim_compensation_matrix
# apply pre- and post matrix initial_values.append(transform_prop_to_attr[fbxprop][channel])
# post-matrix will contain any correction for lights, camera and bone orientation
# pre-matrix will contain any correction for a parent's correction matrix or the global matrix
if item.pre_matrix:
mat = item.pre_matrix @ mat
if item.post_matrix:
mat = mat @ item.post_matrix
# And now, remove that rest pose matrix from current mat (also in parent space). times_and_values_tuples.append((fbx_key_times, values))
if restmat_inv:
mat = restmat_inv @ mat
# Now we have a virtual matrix of transform from AnimCurves, we can insert keyframes! combined_fbx_times, values_arrays = _combine_curve_keyframes(times_and_values_tuples, initial_values)
loc, rot, sca = mat.decompose()
if rot_mode == 'QUATERNION':
if rot_quat_prev.dot(rot) < 0.0:
rot = -rot
rot_quat_prev = rot
elif rot_mode == 'AXIS_ANGLE':
vec, ang = rot.to_axis_angle()
rot = ang, vec.x, vec.y, vec.z
else: # Euler
rot = rot.to_euler(rot_mode, rot_eul_prev)
rot_eul_prev = rot
# Add each keyframe and its value to the keyframe dict bl_key_times = _convert_fbx_time_to_blender_time(combined_fbx_times, anim_offset, 0, fps)
for fc, value in zip(blen_curves, chain(loc, rot, sca)):
store_keyframe(fc, frame, value)
# Add all keyframe points to the fcurves at once and modify them after flattened_channel_values_gen = _transformation_curves_gen(item, values_arrays, channel_keys)
for fc_key, key_values in keyframes.items():
data_path, index = fc_key
# Add all keyframe points at once num_loc_channels = 3
fcurve = action.fcurves.find(data_path=data_path, index=index) num_rot_channels = 4 if rot_mode in {'QUATERNION', 'AXIS_ANGLE'} else 3 # Variations of EULER are all 3
num_keys = len(key_values) // 2 num_sca_channels = 3
fcurve.keyframe_points.add(num_keys) num_channels = num_loc_channels + num_rot_channels + num_sca_channels
fcurve.keyframe_points.foreach_set('co', key_values) num_frames = len(combined_fbx_times)
linear_enum_value = bpy.types.Keyframe.bl_rna.properties['interpolation'].enum_items['LINEAR'].value full_length = num_channels * num_frames
fcurve.keyframe_points.foreach_set('interpolation', (linear_enum_value,) * num_keys)
# Since we inserted our keyframes in 'ultra-fast' mode, we have to update the fcurves now. # TODO: It may be beneficial to iterate into np.float64 since the generator yields Python floats
for fc in blen_curves: flattened_channel_values = np.fromiter(flattened_channel_values_gen, dtype=np.single, count=full_length)
fc.update() # Reshape to one row per frame and then view the transpose so that each row corresponds to a single channel.
# e.g.
# loc_channels = channel_values[:num_loc_channels]
# rot_channels = channel_values[num_loc_channels:num_loc_channels + num_rot_channels]
# sca_channels = channel_values[num_loc_channels + num_rot_channels:]
channel_values = flattened_channel_values.reshape(num_frames, num_channels).T
for blen_curve, values in zip(blen_curves, channel_values):
# TODO: The bl_key_times is used more than once, meaning we duplicate some of the work
blen_store_keyframes(blen_curve, bl_key_times, values)
def blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, anim_offset, global_scale): def blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, anim_offset, global_scale):