diff --git a/io_scene_fbx/__init__.py b/io_scene_fbx/__init__.py index 9cfebeae6..7087f1f9a 100644 --- a/io_scene_fbx/__init__.py +++ b/io_scene_fbx/__init__.py @@ -5,7 +5,7 @@ bl_info = { "name": "FBX format", "author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem", - "version": (5, 11, 2), + "version": (5, 11, 3), "blender": (4, 1, 0), "location": "File > Import-Export", "description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions", diff --git a/io_scene_fbx/parse_fbx.py b/io_scene_fbx/parse_fbx.py index 539742ec2..1af5994e2 100644 --- a/io_scene_fbx/parse_fbx.py +++ b/io_scene_fbx/parse_fbx.py @@ -16,6 +16,7 @@ import zlib from io import BytesIO from . import data_types +from .fbx_utils_threading import MultiThreadedTaskConsumer # at the end of each nested block, there is a NUL record to indicate # that the sub-scope exists (i.e. to distinguish between P: and P : {}) @@ -59,16 +60,10 @@ def read_elem_start64(read): return end_offset, prop_count, elem_id -def unpack_array(read, array_type, array_stride, array_byteswap): - length, encoding, comp_len = read_array_params(read) - - data = read(comp_len) - - if encoding == 0: - pass - elif encoding == 1: - data = zlib.decompress(data) - +def _create_array(data, length, array_type, array_stride, array_byteswap): + """Create an array from FBX data.""" + # If size of the data does not match the expected size of the array, then something is wrong with the code or the + # FBX file. assert(length * array_stride == len(data)) data_array = array.array(array_type, data) @@ -77,6 +72,49 @@ def unpack_array(read, array_type, array_stride, array_byteswap): return data_array +def _decompress_and_insert_array(elem_props_data, index_to_set, compressed_array_args): + """Decompress array data and insert the created array into the FBX tree being parsed. + + This is usually called from a separate thread to the main thread.""" + compressed_data, length, array_type, array_stride, array_byteswap = compressed_array_args + + # zlib.decompress releases the Global Interpreter Lock, so another thread can run code while waiting for the + # decompression to complete. + data = zlib.decompress(compressed_data, bufsize=length * array_stride) + + # Create and insert the array into the parsed FBX hierarchy. + elem_props_data[index_to_set] = _create_array(data, length, array_type, array_stride, array_byteswap) + + +def unpack_array(read, array_type, array_stride, array_byteswap): + """Unpack an array from an FBX file being parsed. + + If the array data is compressed, the compressed data is combined with the other arguments into a tuple to prepare + for decompressing on a separate thread if possible. + + If the array data is not compressed, the array is created. + + Returns (tuple, True) or (array, False).""" + length, encoding, comp_len = read_array_params(read) + + data = read(comp_len) + + if encoding == 1: + # Array data requires decompression, which is done in a separate thread if possible. + return (data, length, array_type, array_stride, array_byteswap), True + else: + return _create_array(data, length, array_type, array_stride, array_byteswap), False + + +read_array_dict = { + b'b'[0]: lambda read: unpack_array(read, data_types.ARRAY_BOOL, 1, False), # bool + b'c'[0]: lambda read: unpack_array(read, data_types.ARRAY_BYTE, 1, False), # ubyte + b'i'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT32, 4, True), # int + b'l'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT64, 8, True), # long + b'f'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT32, 4, False), # float + b'd'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT64, 8, False), # double +} + read_data_dict = { b'Z'[0]: lambda read: unpack(b'