Node Wrangler: Improved accuracy on Align Nodes operator #104551

Open
quackarooni wants to merge 18 commits from quackarooni/blender-addons:nw_rework_align_nodes into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
4 changed files with 74 additions and 46 deletions
Showing only changes of commit 6e5c7b7355 - Show all commits

View File

@ -16,7 +16,7 @@ except:
bl_info = {
"name": "Import AutoCAD DXF Format (.dxf)",
"author": "Lukas Treyer, Manfred Moitzi (support + dxfgrabber library), Vladimir Elistratov, Bastien Montagne, Remigiusz Fiedler (AKA migius)",
"version": (0, 9, 6),
"version": (0, 9, 8),
"blender": (2, 80, 0),
"location": "File > Import > AutoCAD DXF",
"description": "Import files in the Autocad DXF format (.dxf)",

View File

@ -9,6 +9,8 @@ __author__ = "mozman <mozman@gmx.at>"
import math
from mathutils import Vector
from . import const
from .color import TrueColor
from .styles import default_text_style
@ -733,11 +735,6 @@ def deg2vec(deg):
return math.cos(rad), math.sin(rad), 0.
def normalized(vector):
x, y, z = vector
m = (x**2 + y**2 + z**2)**0.5
return x/m, y/m, z/m
##################################################
# MTEXT inline codes
# \L Start underline
@ -850,7 +847,7 @@ class MText(DXFEntity):
self.raw_text = "".join(lines)
if xdir is None:
xdir = deg2vec(rotation)
self.xdirection = normalized(xdir)
self.xdirection = Vector(xdir).normalized()
self.set_default_extrusion()
def lines(self):

View File

@ -949,7 +949,7 @@ class Do:
# create the block
if len(block_group.objects) == 0 or name not in self.known_blocks.keys():
bpy.context.screen.scene = block_scene
bpy.context.window.scene = block_scene
block_inserts = [en for en in entity if is_.insert(en.dxftype)]
bc = (en for en in entity if is_.combined_entity(en))
bs = (en for en in entity if is_.separated_entity(en) and not is_.insert(en.dxftype))
@ -985,7 +985,7 @@ class Do:
else:
bbox = self.known_blocks[name][2]
bpy.context.screen.scene = scene
bpy.context.window.scene = scene
o = bbox.copy()
# o.empty_display_size = 0.3
o.instance_type = "COLLECTION"
@ -1379,7 +1379,7 @@ class Do:
return o
def _recenter(self, scene, name):
bpy.context.screen.scene = scene
bpy.context.window.scene = scene
bpy.context.view_layer.update()
bpy.ops.object.select_all(action='DESELECT')
@ -1621,7 +1621,7 @@ class Do:
elif self.pScene is not None: # assume Proj
scene['SRID'] = re.findall(r"\+init=(.+)\s", self.pScene.srs)[0]
#bpy.context.screen.scene = scene
#bpy.context.window.scene = scene
return self.errors
# trying to import dimensions:

View File

@ -137,10 +137,10 @@ FOV_TRACK_TAG = 0xB023
ROLL_TRACK_TAG = 0xB024
COL_TRACK_TAG = 0xB025
# MORPH_TRACK_TAG = 0xB026
# HOTSPOT_TRACK_TAG = 0xB027
# FALLOFF_TRACK_TAG = 0xB028
HOTSPOT_TRACK_TAG = 0xB027
FALLOFF_TRACK_TAG = 0xB028
# HIDE_TRACK_TAG = 0xB029
# OBJECT_NODE_ID = 0xB030
OBJECT_NODE_ID = 0xB030
ROOT_OBJECT = 0xFFFF
@ -308,7 +308,7 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME):
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN_BOUNDS, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME):
from bpy_extras.image_utils import load_image
contextObName = None
@ -337,6 +337,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
SZ_4U_SHORT = struct.calcsize('4H')
SZ_4x3MAT = struct.calcsize('ffffffffffff')
object_dict = {} # object identities
object_list = [] # for hierarchy
object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent
pivot_list = [] # pivots with hierarchy handling
@ -567,7 +568,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
temp_data = file.read(SZ_U_SHORT)
nflags = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
for f in range(nflags): # Check for spline terms
if nflags > 0: # Check for spline terms
temp_data = file.read(SZ_FLOAT)
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_3FLOAT)
@ -591,7 +592,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
temp_data = file.read(SZ_U_SHORT)
nflags = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
for f in range(nflags): # Check for spline terms
if nflags > 0: # Check for spline terms
temp_data = file.read(SZ_FLOAT)
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_FLOAT)
@ -634,7 +635,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
# is it an object info chunk?
elif new_chunk.ID == OBJECTINFO:
process_next_chunk(context, file, new_chunk, imported_objects, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN_BOUNDS, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
# keep track of how much we read in the main chunk
new_chunk.bytes_read += temp_chunk.bytes_read
@ -898,16 +899,16 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
CreateBlenderObject = False
CreateLightObject = True
elif CreateLightObject and new_chunk.ID == COLOR_F: # color
elif CreateLightObject and new_chunk.ID == COLOR_F: # Light color
temp_data = file.read(SZ_3FLOAT)
contextLamp.data.color = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += SZ_3FLOAT
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_MULTIPLIER: # intensity
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_MULTIPLIER: # Intensity
temp_data = file.read(SZ_FLOAT)
contextLamp.data.energy = (float(struct.unpack('f', temp_data)[0]) * 1000)
new_chunk.bytes_read += SZ_FLOAT
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_SPOT: # spotlight
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_SPOT: # Spotlight
temp_data = file.read(SZ_3FLOAT)
contextLamp.data.type = 'SPOT'
spot = mathutils.Vector(struct.unpack('<3f', temp_data))
@ -918,15 +919,15 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
contextLamp.rotation_euler[0] = -1 * math.copysign(angle, aim[1])
contextLamp.rotation_euler[2] = -1 * (math.radians(90) - math.acos(aim[0] / hypo))
new_chunk.bytes_read += SZ_3FLOAT
temp_data = file.read(SZ_FLOAT) # hotspot
temp_data = file.read(SZ_FLOAT) # Hotspot
hotspot = float(struct.unpack('f', temp_data)[0])
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_FLOAT) # angle
temp_data = file.read(SZ_FLOAT) # Beam angle
beam_angle = float(struct.unpack('f', temp_data)[0])
contextLamp.data.spot_size = math.radians(beam_angle)
contextLamp.data.spot_blend = (1.0 - (hotspot / beam_angle)) * 2
new_chunk.bytes_read += SZ_FLOAT
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_ROLL: # roll
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_ROLL: # Roll
temp_data = file.read(SZ_FLOAT)
contextLamp.rotation_euler[1] = float(struct.unpack('f', temp_data)[0])
new_chunk.bytes_read += SZ_FLOAT
@ -980,18 +981,20 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
# including these here means their OB_NODE_HDR are scanned
# another object is being processed
elif new_chunk.ID in {KFDATA_AMBIENT, KFDATA_OBJECT}:
elif new_chunk.ID in {KFDATA_AMBIENT, KFDATA_OBJECT, KFDATA_CAMERA, KFDATA_LIGHT}:
object_id = ROOT_OBJECT
tracking = 'OBJECT'
child = None
elif new_chunk.ID in {KFDATA_CAMERA, KFDATA_LIGHT}:
tracking = 'STUDIO'
child = None
elif new_chunk.ID in {KFDATA_TARGET, KFDATA_L_TARGET}:
tracking = 'TARGET'
child = None
elif new_chunk.ID == OBJECT_NODE_ID:
temp_data = file.read(SZ_U_SHORT)
object_id = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
elif new_chunk.ID == OBJECT_NODE_HDR:
object_name, read_str_len = read_string(file)
new_chunk.bytes_read += read_str_len
@ -1012,7 +1015,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
context.view_layer.active_layer_collection.collection.objects.link(child)
imported_objects.append(child)
if tracking not in {'STUDIO', 'TARGET'} and object_name != '$AMBIENT$':
if tracking != 'TARGET' and object_name != '$AMBIENT$':
object_dict[object_id] = child
object_list.append(child)
object_parent.append(hierarchy)
pivot_list.append(mathutils.Vector((0.0, 0.0, 0.0)))
@ -1043,18 +1047,24 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
keyframe_data = {}
default_data = child.color[:]
child.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_track_data(temp_chunk)[0]
for keydata in keyframe_data.items():
child.node_tree.nodes['Background'].inputs[0].default_value[:3] = keydata[1]
child.node_tree.keyframe_insert(data_path="nodes[\"Background\"].inputs[0].default_value", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and colortrack == 'LIGHT': # Color
keyframe_data = {}
default_data = child.data.color[:]
child.data.color = read_track_data(temp_chunk)[0]
for keydata in keyframe_data.items():
child.data.color = keydata[1]
child.data.keyframe_insert(data_path="color", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking in {'OBJECT', 'STUDIO'}: # Translation
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'OBJECT': # Translation
keyframe_data = {}
default_data = child.location[:]
child.location = read_track_data(temp_chunk)[0]
for keydata in keyframe_data.items():
child.location = keydata[1]
child.location = mathutils.Vector(keydata[1]) * (CONSTRAIN_BOUNDS * 0.1) if hierarchy == ROOT_OBJECT and CONSTRAIN_BOUNDS != 0.0 else keydata[1]
child.keyframe_insert(data_path="location", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'TARGET': # Target position
@ -1076,7 +1086,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
child.rotation_euler[2] = -1*(math.radians(90)-math.acos(pos[0]/foc))
child.keyframe_insert(data_path="rotation_euler", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracking in {'OBJECT', 'STUDIO'}: # Rotation
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracking == 'OBJECT': # Rotation
keyframe_rotation = {}
new_chunk.bytes_read += SZ_U_SHORT * 5
temp_data = file.read(SZ_U_SHORT * 5)
@ -1092,7 +1102,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
temp_data = file.read(SZ_U_SHORT)
nflags = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
for f in range(nflags): # Check for spline term values
if nflags > 0: # Check for spline term values
temp_data = file.read(SZ_FLOAT)
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_4FLOAT)
@ -1106,26 +1116,47 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler()
child.keyframe_insert(data_path="rotation_euler", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracking in {'OBJECT', 'STUDIO'}: # Scale
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracking == 'OBJECT': # Scale
keyframe_data = {}
default_data = child.scale[:]
child.scale = read_track_data(temp_chunk)[0]
for keydata in keyframe_data.items():
child.scale = keydata[1]
child.scale = mathutils.Vector(keydata[1]) * (CONSTRAIN_BOUNDS * 0.1) if hierarchy == ROOT_OBJECT and CONSTRAIN_BOUNDS != 0.0 else keydata[1]
child.keyframe_insert(data_path="scale", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracking in {'OBJECT', 'STUDIO'}: # Roll angle
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracking == 'OBJECT': # Roll angle
keyframe_angle = {}
default_value = child.rotation_euler[1]
child.rotation_euler[1] = read_track_angle(temp_chunk)[0]
for keydata in keyframe_angle.items():
child.rotation_euler[1] = keydata[1]
child.keyframe_insert(data_path="rotation_euler", frame=keydata[0])
child.keyframe_insert(data_path="rotation_euler", index=1, frame=keydata[0])
elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and child.type == 'CAMERA': # Field of view
keyframe_angle = {}
default_value = child.data.angle
child.data.angle = read_track_angle(temp_chunk)[0]
for keydata in keyframe_angle.items():
child.data.lens = (child.data.sensor_width/2)/math.tan(keydata[1]/2)
child.data.keyframe_insert(data_path="lens", frame=keydata[0])
elif new_chunk.ID == HOTSPOT_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Hotspot
keyframe_angle = {}
cone_angle = math.degrees(child.data.spot_size)
default_value = cone_angle-(child.data.spot_blend*math.floor(cone_angle))
hot_spot = read_track_angle(temp_chunk)[0]
child.data.spot_blend = 1.0 - (hot_spot/cone_angle)
for keydata in keyframe_angle.items():
child.data.spot_blend = 1.0 - (keydata[1]/cone_angle)
child.data.keyframe_insert(data_path="spot_blend", frame=keydata[0])
elif new_chunk.ID == FALLOFF_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Falloff
keyframe_angle = {}
default_value = math.degrees(child.data.spot_size)
child.data.spot_size = read_track_angle(temp_chunk)[0]
for keydata in keyframe_angle.items():
child.data.spot_size = keydata[1]
child.data.keyframe_insert(data_path="spot_size", frame=keydata[0])
else:
buffer_size = new_chunk.length - new_chunk.bytes_read
@ -1160,11 +1191,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
if ob.parent is not None:
ob.parent = None
else:
if ob.parent != object_list[parent]:
if ob == object_list[parent]:
if ob.parent != object_dict[parent]:
if ob == object_dict[parent]:
print(' warning: Cannot assign self to parent ', ob)
else:
ob.parent = object_list[parent]
ob.parent = object_dict[parent]
# pivot_list[ind] += pivot_list[parent] # XXX, not sure this is correct, should parent space matrix be applied before combining?
# fix pivots
@ -1179,7 +1210,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE
def load_3ds(filepath,
context,
IMPORT_CONSTRAIN_BOUNDS=10.0,
CONSTRAIN_BOUNDS=10.0,
IMAGE_SEARCH=True,
WORLD_MATRIX=False,
KEYFRAME=True,
@ -1210,7 +1241,7 @@ def load_3ds(filepath,
file.close()
return
if IMPORT_CONSTRAIN_BOUNDS:
if CONSTRAIN_BOUNDS:
BOUNDS_3DS[:] = [1 << 30, 1 << 30, 1 << 30, -1 << 30, -1 << 30, -1 << 30]
else:
del BOUNDS_3DS[:]
@ -1224,7 +1255,7 @@ def load_3ds(filepath,
scn = context.scene
imported_objects = [] # Fill this list with objects
process_next_chunk(context, file, current_chunk, imported_objects, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN_BOUNDS, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
# fixme, make unglobal
object_dictionary.clear()
@ -1283,7 +1314,7 @@ def load_3ds(filepath,
axis_min = [1000000000] * 3
axis_max = [-1000000000] * 3
global_clamp_size = IMPORT_CONSTRAIN_BOUNDS
global_clamp_size = CONSTRAIN_BOUNDS
if global_clamp_size != 0.0:
# Get all object bounds
for ob in imported_objects:
@ -1327,7 +1358,7 @@ def load(operator,
load_3ds(filepath,
context,
IMPORT_CONSTRAIN_BOUNDS=constrain_size,
CONSTRAIN_BOUNDS=constrain_size,
IMAGE_SEARCH=use_image_search,
WORLD_MATRIX=use_world_matrix,
KEYFRAME=read_keyframe,