Fix #105088: FBX Camera Focus Distance is interpreted as millimeters #105124

Merged
Thomas Barlow merged 4 commits from Mysteryem/blender-addons:fix_105088_fbx_camera_focus_distance_mm into main 2024-01-16 21:48:22 +01:00
21 changed files with 408 additions and 98 deletions
Showing only changes of commit 24cb20a61f - Show all commits

View File

@ -13,7 +13,7 @@
bl_info = {
"name": "Extra Objects",
"author": "Multiple Authors",
"version": (0, 3, 9),
"version": (0, 3, 10),
"blender": (2, 80, 0),
"location": "View3D > Add > Mesh",
"description": "Add extra mesh object types",

View File

@ -51,6 +51,11 @@ def vSum(list):
return reduce(lambda a, b: a + b, list)
# Get a copy of the input faces, but with the normals flipped by reversing the order of the vertex indices of each face.
def flippedFaceNormals(faces):
return [list(reversed(vertexIndices)) for vertexIndices in faces]
# creates the 5 platonic solids as a base for the rest
# plato: should be one of {"4","6","8","12","20"}. decides what solid the
# outcome will be.
@ -146,7 +151,8 @@ def createSolid(plato, vtrunc, etrunc, dual, snub):
vInput, fInput = source(dualSource[plato])
supposedSize = vSum(vInput[i] for i in fInput[0]).length / len(fInput[0])
vInput = [-i * supposedSize for i in vInput] # mirror it
return vInput, fInput
# Inverting vInput turns the mesh inside-out, so normals need to be flipped.
return vInput, flippedFaceNormals(fInput)
return source(plato)
elif 0 < vtrunc <= 0.5: # simple truncation of the source
vInput, fInput = source(plato)
@ -161,7 +167,8 @@ def createSolid(plato, vtrunc, etrunc, dual, snub):
vInput = [i * supposedSize for i in vInput]
return vInput, fInput
vInput = [-i * supposedSize for i in vInput]
return vInput, fInput
# Inverting vInput turns the mesh inside-out, so normals need to be flipped.
return vInput, flippedFaceNormals(fInput)
# generate connection database
vDict = [{} for i in vInput]
@ -269,6 +276,10 @@ def createSolid(plato, vtrunc, etrunc, dual, snub):
if supposedSize and not dual: # this to make the vtrunc > 1 work
supposedSize *= len(fvOutput[0]) / vSum(vOutput[i] for i in fvOutput[0]).length
vOutput = [-i * supposedSize for i in vOutput]
# Inverting vOutput turns the mesh inside-out, so normals need to be flipped.
flipNormals = True
else:
flipNormals = False
# create new faces by replacing old vert IDs by newly generated verts
ffOutput = [[] for i in fInput]
@ -287,7 +298,10 @@ def createSolid(plato, vtrunc, etrunc, dual, snub):
ffOutput[x].append(fvOutput[i][vData[i][3].index(x) - 1])
if not dual:
return vOutput, fvOutput + feOutput + ffOutput
fOutput = fvOutput + feOutput + ffOutput
if flipNormals:
fOutput = flippedFaceNormals(fOutput)
return vOutput, fOutput
else:
# do the same procedure as above, only now on the generated mesh
# generate connection database

View File

@ -55,8 +55,6 @@ class AutoKeying:
options.add('INSERTKEY_VISUAL')
if prefs.edit.use_keyframe_insert_needed:
options.add('INSERTKEY_NEEDED')
if prefs.edit.use_insertkey_xyz_to_rgb:
options.add('INSERTKEY_XYZ_TO_RGB')
if ts.use_keyframe_cycle_aware:
options.add('INSERTKEY_CYCLE_AWARE')
return options

View File

@ -911,7 +911,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
# Core functionality
def invoke(self, context, event):
engine = context.scene.render.engine
if engine not in {'CYCLES', 'BLENDER_EEVEE'}:
if engine not in {'CYCLES', 'BLENDER_EEVEE','BLENDER_EEVEE_NEXT'}:
if engine != 'BLENDER_WORKBENCH':
self.report({'ERROR'}, tip_("Cannot generate materials for unknown %s render engine") % engine)
return {'CANCELLED'}
@ -986,7 +986,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
# Configure material
engine = context.scene.render.engine
if engine in {'CYCLES', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}:
if engine in {'CYCLES', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'}:
material = self.create_cycles_material(context, img_spec)
# Create and position plane object

View File

@ -3,7 +3,7 @@
# SPDX-License-Identifier: GPL-2.0-or-later
bl_info = {
"name": "STL format",
"name": "STL format (legacy)",
"author": "Guillaume Bouchard (Guillaum)",
"version": (1, 1, 3),
"blender": (2, 81, 6),
@ -60,7 +60,7 @@ from bpy.types import (
@orientation_helper(axis_forward='Y', axis_up='Z')
class ImportSTL(Operator, ImportHelper):
bl_idname = "import_mesh.stl"
bl_label = "Import STL"
bl_label = "Import STL (legacy)"
bl_description = "Load STL triangle mesh data"
bl_options = {'UNDO'}
@ -190,7 +190,7 @@ class STL_PT_import_geometry(bpy.types.Panel):
@orientation_helper(axis_forward='Y', axis_up='Z')
class ExportSTL(Operator, ExportHelper):
bl_idname = "export_mesh.stl"
bl_label = "Export STL"
bl_label = "Export STL (legacy)"
bl_description = """Save STL triangle mesh data"""
filename_ext = ".stl"
@ -403,11 +403,11 @@ class STL_PT_export_geometry(bpy.types.Panel):
def menu_import(self, context):
self.layout.operator(ImportSTL.bl_idname, text="Stl (.stl)")
self.layout.operator(ImportSTL.bl_idname, text="Stl (.stl) (legacy)")
def menu_export(self, context):
self.layout.operator(ExportSTL.bl_idname, text="Stl (.stl)")
self.layout.operator(ExportSTL.bl_idname, text="Stl (.stl) (legacy)")
classes = (

View File

@ -367,4 +367,4 @@ def unregister():
if __name__ == "__main__":
register()
register()

View File

@ -174,7 +174,7 @@ def sane_name(name):
return name_fixed
# Strip non ascii chars
new_name_clean = new_name = name.encode("ASCII", "replace").decode("ASCII")[:12]
new_name_clean = new_name = name.encode("ASCII", "replace").decode("ASCII")[:16]
i = 0
while new_name in name_unique:

View File

@ -260,6 +260,7 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
tint1[:3] + [1] if tint1 else shader.inputs['Base Color'].default_value[:])
contextWrapper._grid_to_location(1, 2, dst_node=mixer, ref_node=shader)
img_wrap = contextWrapper.base_color_texture
image.alpha_mode = 'CHANNEL_PACKED'
links.new(mixer.outputs['Color'], shader.inputs['Base Color'])
if tint2 is not None:
img_wrap.colorspace_name = 'Non-Color'

View File

@ -5,7 +5,7 @@
bl_info = {
"name": "FBX format",
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
"version": (5, 11, 3),
"version": (5, 11, 4),
"blender": (4, 1, 0),
"location": "File > Import-Export",
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",

View File

@ -4,10 +4,13 @@
try:
from . import data_types
from .fbx_utils_threading import MultiThreadedTaskConsumer
except:
import data_types
from fbx_utils_threading import MultiThreadedTaskConsumer
from struct import pack
from contextlib import contextmanager
import array
import numpy as np
import zlib
@ -51,6 +54,57 @@ class FBXElem:
self._end_offset = -1
self._props_length = -1
@classmethod
@contextmanager
def enable_multithreading_cm(cls):
"""Temporarily enable multithreaded array compression.
The context manager handles starting up and shutting down the threads.
Only exits once all the threads are done (either all tasks were completed or an error occurred and the threads
were stopped prematurely).
Writing to a file is temporarily disabled as a safeguard."""
# __enter__()
orig_func = cls._add_compressed_array_helper
orig_write = cls._write
def insert_compressed_array(props, insert_at, data, length):
# zlib.compress releases the GIL, so can be multithreaded.
data = zlib.compress(data, 1)
comp_len = len(data)
encoding = 1
data = pack('<3I', length, encoding, comp_len) + data
props[insert_at] = data
with MultiThreadedTaskConsumer.new_cpu_bound_cm(insert_compressed_array) as wrapped_func:
try:
def _add_compressed_array_helper_multi(self, data, length):
# Append a dummy value that will be replaced with the compressed array data later.
self.props.append(...)
# The index to insert the compressed array into.
insert_at = len(self.props) - 1
# Schedule the array to be compressed on a separate thread and then inserted into the hierarchy at
# `insert_at`.
wrapped_func(self.props, insert_at, data, length)
# As an extra safeguard, temporarily replace the `_write` function to raise an error if called.
def temp_write(*_args, **_kwargs):
raise RuntimeError("Writing is not allowed until multithreaded array compression has been disabled")
cls._add_compressed_array_helper = _add_compressed_array_helper_multi
cls._write = temp_write
# Return control back to the caller of __enter__().
yield
finally:
# __exit__()
# Restore the original functions.
cls._add_compressed_array_helper = orig_func
cls._write = orig_write
# Exiting the MultiThreadedTaskConsumer context manager will wait for all scheduled tasks to complete.
def add_bool(self, data):
assert(isinstance(data, bool))
data = pack('?', data)
@ -130,21 +184,26 @@ class FBXElem:
self.props_type.append(data_types.STRING)
self.props.append(data)
def _add_compressed_array_helper(self, data, length):
"""Note: This function may be swapped out by enable_multithreading_cm with an equivalent that supports
multithreading."""
data = zlib.compress(data, 1)
comp_len = len(data)
encoding = 1
data = pack('<3I', length, encoding, comp_len) + data
self.props.append(data)
def _add_array_helper(self, data, prop_type, length):
self.props_type.append(prop_type)
# mimic behavior of fbxconverter (also common sense)
# we could make this configurable.
encoding = 0 if len(data) <= 128 else 1
if encoding == 0:
pass
data = pack('<3I', length, encoding, len(data)) + data
self.props.append(data)
elif encoding == 1:
data = zlib.compress(data, 1)
comp_len = len(data)
data = pack('<3I', length, encoding, comp_len) + data
self.props_type.append(prop_type)
self.props.append(data)
self._add_compressed_array_helper(data, length)
def _add_parray_helper(self, data, array_type, prop_type):
assert (isinstance(data, array.array))

View File

@ -3495,31 +3495,35 @@ def save_single(operator, scene, depsgraph, filepath="",
# Generate some data about exported scene...
scene_data = fbx_data_from_scene(scene, depsgraph, settings)
root = elem_empty(None, b"") # Root element has no id, as it is not saved per se!
# Enable multithreaded array compression in FBXElem and wait until all threads are done before exiting the context
# manager.
with encode_bin.FBXElem.enable_multithreading_cm():
# Writing elements into an FBX hierarchy can now begin.
root = elem_empty(None, b"") # Root element has no id, as it is not saved per se!
# Mostly FBXHeaderExtension and GlobalSettings.
fbx_header_elements(root, scene_data)
# Mostly FBXHeaderExtension and GlobalSettings.
fbx_header_elements(root, scene_data)
# Documents and References are pretty much void currently.
fbx_documents_elements(root, scene_data)
fbx_references_elements(root, scene_data)
# Documents and References are pretty much void currently.
fbx_documents_elements(root, scene_data)
fbx_references_elements(root, scene_data)
# Templates definitions.
fbx_definitions_elements(root, scene_data)
# Templates definitions.
fbx_definitions_elements(root, scene_data)
# Actual data.
fbx_objects_elements(root, scene_data)
# Actual data.
fbx_objects_elements(root, scene_data)
# How data are inter-connected.
fbx_connections_elements(root, scene_data)
# How data are inter-connected.
fbx_connections_elements(root, scene_data)
# Animation.
fbx_takes_elements(root, scene_data)
# Animation.
fbx_takes_elements(root, scene_data)
# Cleanup!
fbx_scene_data_cleanup(scene_data)
# Cleanup!
fbx_scene_data_cleanup(scene_data)
# And we are down, we can write the whole thing!
# And we are done, all multithreaded tasks are complete, and we can write the whole thing to file!
encode_bin.write(filepath, root, FBX_VERSION)
# Clear cached ObjectWrappers!

View File

@ -0,0 +1,194 @@
# SPDX-FileCopyrightText: 2023 Blender Foundation
#
# SPDX-License-Identifier: GPL-2.0-or-later
from contextlib import contextmanager, nullcontext
import os
from queue import SimpleQueue
# Note: `bpy` cannot be imported here because this module is also used by the fbx2json.py and json2fbx.py scripts.
# For debugging/profiling purposes, can be modified at runtime to force single-threaded execution.
_MULTITHREADING_ENABLED = True
# The concurrent.futures module may not work or may not be available on WebAssembly platforms wasm32-emscripten and
# wasm32-wasi.
try:
from concurrent.futures import ThreadPoolExecutor
except ModuleNotFoundError:
_MULTITHREADING_ENABLED = False
ThreadPoolExecutor = None
else:
try:
# The module may be available, but not be fully functional. An error may be raised when attempting to start a
# new thread.
with ThreadPoolExecutor() as tpe:
# Attempt to start a thread by submitting a callable.
tpe.submit(lambda: None)
except Exception:
# Assume that multithreading is not supported and fall back to single-threaded execution.
_MULTITHREADING_ENABLED = False
def get_cpu_count():
"""Get the number of cpus assigned to the current process if that information is available on this system.
If not available, get the total number of cpus.
If the cpu count is indeterminable, it is assumed that there is only 1 cpu available."""
sched_getaffinity = getattr(os, "sched_getaffinity", None)
if sched_getaffinity is not None:
# Return the number of cpus assigned to the current process.
return len(sched_getaffinity(0))
count = os.cpu_count()
return count if count is not None else 1
class MultiThreadedTaskConsumer:
"""Helper class that encapsulates everything needed to run a function on separate threads, with a single-threaded
fallback if multithreading is not available.
Lower overhead than typical use of ThreadPoolExecutor because no Future objects are returned, which makes this class
more suitable to running many smaller tasks.
As with any threaded parallelization, because of Python's Global Interpreter Lock, only one thread can execute
Python code at a time, so threaded parallelization is only useful when the functions used release the GIL, such as
many IO related functions."""
# A special task value used to signal task consumer threads to shut down.
_SHUT_DOWN_THREADS = object()
__slots__ = ("_consumer_function", "_shared_task_queue", "_task_consumer_futures", "_executor",
"_max_consumer_threads", "_shutting_down", "_max_queue_per_consumer")
def __init__(self, consumer_function, max_consumer_threads, max_queue_per_consumer=5):
# It's recommended to use MultiThreadedTaskConsumer.new_cpu_bound_cm() instead of creating new instances
# directly.
# __init__ should only be called after checking _MULTITHREADING_ENABLED.
assert(_MULTITHREADING_ENABLED)
# The function that will be called on separate threads to consume tasks.
self._consumer_function = consumer_function
# All the threads share a single queue. This is a simplistic approach, but it is unlikely to be problematic
# unless the main thread is expected to wait a long time for the consumer threads to finish.
self._shared_task_queue = SimpleQueue()
# Reference to each thread is kept through the returned Future objects. This is used as part of determining when
# new threads should be started and is used to be able to receive and handle exceptions from the threads.
self._task_consumer_futures = []
# Create the executor.
self._executor = ThreadPoolExecutor(max_workers=max_consumer_threads)
# Technically the max workers of the executor is accessible through its `._max_workers`, but since it's private,
# meaning it could be changed without warning, we'll store the max workers/consumers ourselves.
self._max_consumer_threads = max_consumer_threads
# The maximum task queue size (before another consumer thread is started) increases by this amount with every
# additional consumer thread.
self._max_queue_per_consumer = max_queue_per_consumer
# When shutting down the threads, this is set to True as an extra safeguard to prevent new tasks being
# scheduled.
self._shutting_down = False
@classmethod
def new_cpu_bound_cm(cls, consumer_function, other_cpu_bound_threads_in_use=1, hard_max_threads=32):
"""Return a context manager that, when entered, returns a wrapper around `consumer_function` that schedules
`consumer_function` to be run on a separate thread.
If the system can't use multithreading, then the context manager's returned function will instead be the input
`consumer_function` argument, causing tasks to be run immediately on the calling thread.
When exiting the context manager, it waits for all scheduled tasks to complete and prevents the creation of new
tasks, similar to calling ThreadPoolExecutor.shutdown(). For these reasons, the wrapped function should only be
called from the thread that entered the context manager, otherwise there is no guarantee that all tasks will get
scheduled before the context manager exits.
Any task that fails with an exception will cause all task consumer threads to stop.
The maximum number of threads used matches the number of cpus available up to a maximum of `hard_max_threads`.
`hard_max_threads`'s default of 32 matches ThreadPoolExecutor's default behaviour.
The maximum number of threads used is decreased by `other_cpu_bound_threads_in_use`. Defaulting to `1`, assuming
that the calling thread will also be doing CPU-bound work.
Most IO-bound tasks can probably use a ThreadPoolExecutor directly instead because there will typically be fewer
tasks and, on average, each individual task will take longer.
If needed, `cls.new_cpu_bound_cm(consumer_function, -4)` could be suitable for lots of small IO-bound tasks,
because it ensures a minimum of 5 threads, like the default ThreadPoolExecutor."""
if _MULTITHREADING_ENABLED:
max_threads = get_cpu_count() - other_cpu_bound_threads_in_use
max_threads = min(max_threads, hard_max_threads)
if max_threads > 0:
return cls(consumer_function, max_threads)._wrap_executor_cm()
# Fall back to single-threaded.
return nullcontext(consumer_function)
def _task_consumer_callable(self):
"""Callable that is run by each task consumer thread.
Signals the other task consumer threads to stop when stopped intentionally or when an exception occurs."""
try:
while True:
# Blocks until it can get a task.
task_args = self._shared_task_queue.get()
if task_args is self._SHUT_DOWN_THREADS:
# This special value signals that it's time for all the threads to stop.
break
else:
# Call the task consumer function.
self._consumer_function(*task_args)
finally:
# Either the thread has been told to shut down because it received _SHUT_DOWN_THREADS or an exception has
# occurred.
# Add _SHUT_DOWN_THREADS to the queue so that the other consumer threads will also shut down.
self._shared_task_queue.put(self._SHUT_DOWN_THREADS)
def _schedule_task(self, *args):
"""Task consumer threads are only started as tasks are added.
To mitigate starting lots of threads if many tasks are scheduled in quick succession, new threads are only
started if the number of queued tasks grows too large.
This function is a slight misuse of ThreadPoolExecutor. Normally each task to be scheduled would be submitted
through ThreadPoolExecutor.submit, but doing so is noticeably slower for small tasks. We could start new Thread
instances manually without using ThreadPoolExecutor, but ThreadPoolExecutor gives us a higher level API for
waiting for threads to finish and handling exceptions without having to implement an API using Thread ourselves.
"""
if self._shutting_down:
# Shouldn't occur through normal usage.
raise RuntimeError("Cannot schedule new tasks after shutdown")
# Schedule the task by adding it to the task queue.
self._shared_task_queue.put(args)
# Check if more consumer threads need to be added to account for the rate at which tasks are being scheduled
# compared to the rate at which tasks are being consumed.
current_consumer_count = len(self._task_consumer_futures)
if current_consumer_count < self._max_consumer_threads:
# The max queue size increases as new threads are added, otherwise, by the time the next task is added, it's
# likely that the queue size will still be over the max, causing another new thread to be added immediately.
# Increasing the max queue size whenever a new thread is started gives some time for the new thread to start
# up and begin consuming tasks before it's determined that another thread is needed.
max_queue_size_for_current_consumers = self._max_queue_per_consumer * current_consumer_count
if self._shared_task_queue.qsize() > max_queue_size_for_current_consumers:
# Add a new consumer thread because the queue has grown too large.
self._task_consumer_futures.append(self._executor.submit(self._task_consumer_callable))
@contextmanager
def _wrap_executor_cm(self):
"""Wrap the executor's context manager to instead return self._schedule_task and such that the threads
automatically start shutting down before the executor itself starts shutting down."""
# .__enter__()
# Exiting the context manager of the executor will wait for all threads to finish and prevent new
# threads from being created, as if its shutdown() method had been called.
with self._executor:
try:
yield self._schedule_task
finally:
# .__exit__()
self._shutting_down = True
# Signal all consumer threads to finish up and shut down so that the executor can shut down.
# When this is run on the same thread that schedules new tasks, this guarantees that no more tasks will
# be scheduled after the consumer threads start to shut down.
self._shared_task_queue.put(self._SHUT_DOWN_THREADS)
# Because `self._executor` was entered with a context manager, it will wait for all the consumer threads
# to finish even if we propagate an exception from one of the threads here.
for future in self._task_consumer_futures:
# .exception() waits for the future to finish and returns its raised exception or None.
ex = future.exception()
if ex is not None:
# If one of the threads raised an exception, propagate it to the main thread.
# Only the first exception will be propagated if there were multiple.
raise ex

View File

@ -133,10 +133,10 @@ def json2fbx(fn):
fn_fbx = "%s.fbx" % os.path.splitext(fn)[0]
print("Writing: %r " % fn_fbx, end="")
json_root = []
with open(fn) as f_json:
json_root = json.load(f_json)
fbx_root, fbx_version = parse_json(json_root)
with encode_bin.FBXElem.enable_multithreading_cm():
fbx_root, fbx_version = parse_json(json_root)
print("(Version %d) ..." % fbx_version)
encode_bin.write(fn_fbx, fbx_root, fbx_version)

View File

@ -16,6 +16,7 @@ import zlib
from io import BytesIO
from . import data_types
from .fbx_utils_threading import MultiThreadedTaskConsumer
# at the end of each nested block, there is a NUL record to indicate
# that the sub-scope exists (i.e. to distinguish between P: and P : {})
@ -59,16 +60,10 @@ def read_elem_start64(read):
return end_offset, prop_count, elem_id
def unpack_array(read, array_type, array_stride, array_byteswap):
length, encoding, comp_len = read_array_params(read)
data = read(comp_len)
if encoding == 0:
pass
elif encoding == 1:
data = zlib.decompress(data)
def _create_array(data, length, array_type, array_stride, array_byteswap):
"""Create an array from FBX data."""
# If size of the data does not match the expected size of the array, then something is wrong with the code or the
# FBX file.
assert(length * array_stride == len(data))
data_array = array.array(array_type, data)
@ -77,6 +72,49 @@ def unpack_array(read, array_type, array_stride, array_byteswap):
return data_array
def _decompress_and_insert_array(elem_props_data, index_to_set, compressed_array_args):
"""Decompress array data and insert the created array into the FBX tree being parsed.
This is usually called from a separate thread to the main thread."""
compressed_data, length, array_type, array_stride, array_byteswap = compressed_array_args
# zlib.decompress releases the Global Interpreter Lock, so another thread can run code while waiting for the
# decompression to complete.
data = zlib.decompress(compressed_data, bufsize=length * array_stride)
# Create and insert the array into the parsed FBX hierarchy.
elem_props_data[index_to_set] = _create_array(data, length, array_type, array_stride, array_byteswap)
def unpack_array(read, array_type, array_stride, array_byteswap):
"""Unpack an array from an FBX file being parsed.
If the array data is compressed, the compressed data is combined with the other arguments into a tuple to prepare
for decompressing on a separate thread if possible.
If the array data is not compressed, the array is created.
Returns (tuple, True) or (array, False)."""
length, encoding, comp_len = read_array_params(read)
data = read(comp_len)
if encoding == 1:
# Array data requires decompression, which is done in a separate thread if possible.
return (data, length, array_type, array_stride, array_byteswap), True
else:
return _create_array(data, length, array_type, array_stride, array_byteswap), False
read_array_dict = {
b'b'[0]: lambda read: unpack_array(read, data_types.ARRAY_BOOL, 1, False), # bool
b'c'[0]: lambda read: unpack_array(read, data_types.ARRAY_BYTE, 1, False), # ubyte
b'i'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT32, 4, True), # int
b'l'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT64, 8, True), # long
b'f'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT32, 4, False), # float
b'd'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT64, 8, False), # double
}
read_data_dict = {
b'Z'[0]: lambda read: unpack(b'<b', read(1))[0], # byte
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
@ -88,12 +126,6 @@ read_data_dict = {
b'L'[0]: lambda read: unpack(b'<q', read(8))[0], # 64 bit int
b'R'[0]: lambda read: read(read_uint(read)), # binary data
b'S'[0]: lambda read: read(read_uint(read)), # string data
b'f'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT32, 4, False), # array (float)
b'i'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT32, 4, True), # array (int)
b'd'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT64, 8, False), # array (double)
b'l'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT64, 8, True), # array (long)
b'b'[0]: lambda read: unpack_array(read, data_types.ARRAY_BOOL, 1, False), # array (bool)
b'c'[0]: lambda read: unpack_array(read, data_types.ARRAY_BYTE, 1, False), # array (ubyte)
}
@ -115,7 +147,7 @@ def init_version(fbx_version):
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
def read_elem(read, tell, use_namedtuple, tell_file_offset=0):
def read_elem(read, tell, use_namedtuple, decompress_array_func, tell_file_offset=0):
# [0] the offset at which this block ends
# [1] the number of properties in the scope
# [2] the length of the property list
@ -133,7 +165,17 @@ def read_elem(read, tell, use_namedtuple, tell_file_offset=0):
for i in range(prop_count):
data_type = read(1)[0]
elem_props_data[i] = read_data_dict[data_type](read)
if data_type in read_array_dict:
val, needs_decompression = read_array_dict[data_type](read)
if needs_decompression:
# Array decompression releases the GIL, so can be multithreaded (if possible on the current system) for
# performance.
# After decompressing, the array is inserted into elem_props_data[i].
decompress_array_func(elem_props_data, i, val)
else:
elem_props_data[i] = val
else:
elem_props_data[i] = read_data_dict[data_type](read)
elem_props_type[i] = data_type
pos = tell()
@ -176,7 +218,7 @@ def read_elem(read, tell, use_namedtuple, tell_file_offset=0):
sub_pos = start_sub_pos
while sub_pos < sub_tree_end:
elem_subtree.append(read_elem(read, tell, use_namedtuple, tell_file_offset))
elem_subtree.append(read_elem(read, tell, use_namedtuple, decompress_array_func, tell_file_offset))
sub_pos = tell()
# At the end of each subtree there should be a sentinel (an empty element with all bytes set to zero).
@ -211,7 +253,8 @@ def parse_version(fn):
def parse(fn, use_namedtuple=True):
root_elems = []
with open(fn, 'rb') as f:
multithread_decompress_array_cm = MultiThreadedTaskConsumer.new_cpu_bound_cm(_decompress_and_insert_array)
with open(fn, 'rb') as f, multithread_decompress_array_cm as decompress_array_func:
read = f.read
tell = f.tell
@ -222,7 +265,7 @@ def parse(fn, use_namedtuple=True):
init_version(fbx_version)
while True:
elem = read_elem(read, tell, use_namedtuple)
elem = read_elem(read, tell, use_namedtuple, decompress_array_func)
if elem is None:
break
root_elems.append(elem)

View File

@ -20,7 +20,7 @@
bl_info = {
"name": "Material Utilities",
"author": "MichaleW, ChrisHinde",
"version": (2, 2, 1),
"version": (2, 2, 2),
"blender": (3, 0, 0),
"location": "View3D > Shift + Q key",
"description": "Menu of material tools (assign, select..) in the 3D View",

View File

@ -16,7 +16,7 @@ def mu_assign_material_slots(object, material_list):
active_object = bpy.context.active_object
bpy.context.view_layer.objects.active = object
for s in object.material_slots:
for _ in range(len(object.material_slots)):
bpy.ops.object.material_slot_remove()
# re-add them and assign material

View File

@ -361,7 +361,8 @@ class NWAttributeMenu(bpy.types.Menu):
for obj in objs:
if obj.data.attributes:
for attr in obj.data.attributes:
attrs.append(attr.name)
if not attr.is_internal:
attrs.append(attr.name)
attrs = list(set(attrs)) # get a unique list
if attrs:

View File

@ -5,7 +5,7 @@
bl_info = {
"name": "3D-Print Toolbox",
"author": "Campbell Barton",
"blender": (3, 6, 0),
"blender": (4, 1, 0),
"location": "3D View > Sidebar",
"description": "Utilities for 3D printing",
"doc_url": "{BLENDER_MANUAL_URL}/addons/mesh/3d_print_toolbox.html",

View File

@ -106,13 +106,12 @@ def write_mesh(context, report_cb):
addon_utils.enable(addon_id, default_set=False)
if export_format == 'STL':
addon_ensure("io_mesh_stl")
filepath = bpy.path.ensure_ext(filepath, ".stl")
ret = bpy.ops.export_mesh.stl(
ret = bpy.ops.wm.stl_export(
filepath=filepath,
ascii=False,
use_mesh_modifiers=True,
use_selection=True,
ascii_format=False,
apply_modifiers=True,
export_selected_objects=True,
global_scale=global_scale,
)
elif export_format == 'PLY':

View File

@ -43,6 +43,22 @@ class PIE_OT_PivotToSelection(Operator):
# Pivot to Bottom
def origin_to_bottom(ob):
if ob.type != 'MESH':
return
init = 0
for x in ob.data.vertices:
if init == 0:
a = x.co.z
init = 1
elif x.co.z < a:
a = x.co.z
for x in ob.data.vertices:
x.co.z -= a
ob.location.z += a
class PIE_OT_PivotBottom(Operator):
bl_idname = "object.pivotobottom"
@ -59,19 +75,9 @@ class PIE_OT_PivotBottom(Operator):
def execute(self, context):
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
o = context.active_object
init = 0
for x in o.data.vertices:
if init == 0:
a = x.co.z
init = 1
elif x.co.z < a:
a = x.co.z
for x in o.data.vertices:
x.co.z -= a
o.location.z += a
for ob in context.selected_objects:
origin_to_bottom(ob)
return {'FINISHED'}
@ -93,19 +99,10 @@ class PIE_OT_PivotBottom_edit(Operator):
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
o = context.active_object
init = 0
for x in o.data.vertices:
if init == 0:
a = x.co.z
init = 1
elif x.co.z < a:
a = x.co.z
for x in o.data.vertices:
x.co.z -= a
for ob in context.selected_objects:
origin_to_bottom(ob)
o.location.z += a
bpy.ops.object.mode_set(mode='EDIT')
return {'FINISHED'}

View File

@ -77,7 +77,7 @@ class PT_VDMBaker(bpy.types.Panel):
It also has settings for name (image, texture and brush at once), resolution, compression and color depth.
"""
bl_label = 'VDM Brush Baker'
bl_idname = 'Editor_PT_LayoutPanel'
bl_idname = 'VDM_PT_bake_tools'
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Tool'