Node Wrangler: Improved accuracy on Align Nodes operator #104551

Open
quackarooni wants to merge 18 commits from quackarooni/blender-addons:nw_rework_align_nodes into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
5 changed files with 246 additions and 231 deletions
Showing only changes of commit cf69c82250 - Show all commits

View File

@ -151,14 +151,6 @@ def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
# NOTES:
# why add 1 extra vertex? and remove it when done? -
# "Answer - eekadoodle - would need to re-order UV's without this since face
# order isnt always what we give blender, BMesh will solve :D"
#
# disabled scaling to size, this requires exposing bb (easy) and understanding
# how it works (needs some time)
if __name__ == "__main__":
register()

View File

@ -13,9 +13,9 @@ import mathutils
import bpy_extras
from bpy_extras import node_shader_utils
######################################################
# Data Structures
######################################################
###################
# Data Structures #
###################
# Some of the chunks that we will export
# ----- Primary Chunk, at the beginning of each file
@ -85,7 +85,10 @@ OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
# >------ Sub defines of LIGHT
LIGHT_MULTIPLIER = 0x465B # The light energy factor
LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight
LIGHT_SPOTROLL = 0x4656 # The roll angle of the spot
LIGHT_SPOT_ROLL = 0x4656 # Light spot roll angle
LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag
LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot show cone flag
LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag
# >------ sub defines of CAMERA
OBJECT_CAM_RANGES = 0x4720 # The camera range values
@ -106,13 +109,29 @@ KFDATA_KFCURTIME = 0xB009
KFDATA_OBJECT_NODE_TAG = 0xB002
# >------ sub defines of OBJECT_NODE_TAG
OBJECT_NODE_ID = 0xB030
OBJECT_NODE_HDR = 0xB010
OBJECT_PIVOT = 0xB013
OBJECT_INSTANCE_NAME = 0xB011
POS_TRACK_TAG = 0xB020
ROT_TRACK_TAG = 0xB021
SCL_TRACK_TAG = 0xB022
AMBIENT_NODE_TAG = 0xB001 # Ambient node tag
OBJECT_NODE_TAG = 0xB002 # Object tree tag
CAMERA_NODE_TAG = 0xB003 # Camera object tag
TARGET_NODE_TAG = 0xB004 # Camera target tag
LIGHT_NODE_TAG = 0xB005 # Light object tag
LTARGET_NODE_TAG = 0xB006 # Light target tag
SPOT_NODE_TAG = 0xB007 # Spotlight tag
OBJECT_NODE_ID = 0xB030 # Object hierachy ID
OBJECT_NODE_HDR = 0xB010 # Hierachy tree header
OBJECT_INSTANCE_NAME = 0xB011 # Object instance name
OBJECT_PIVOT = 0xB013 # Object pivot position
OBJECT_BOUNDBOX = 0xB014 # Object boundbox
OBJECT_MORPH_SMOOTH = 0xB015 # Object smooth angle
POS_TRACK_TAG = 0xB020 # Position transform tag
ROT_TRACK_TAG = 0xB021 # Rotation transform tag
SCL_TRACK_TAG = 0xB022 # Scale transform tag
FOV_TRACK_TAG = 0xB023 # Field of view tag
ROLL_TRACK_TAG = 0xB024 # Roll transform tag
COL_TRACK_TAG = 0xB025 # Color transform tag
HOTSPOT_TRACK_TAG = 0xB027 # Hotspot transform tag
FALLOFF_TRACK_TAG = 0xB028 # Falloff transform tag
ROOT_OBJECT = 0xFFFF # Root object
# So 3ds max can open files, limit names to 12 in length
@ -120,7 +139,6 @@ SCL_TRACK_TAG = 0xB022
name_unique = [] # stores str, ascii only
name_mapping = {} # stores {orig: byte} mapping
def sane_name(name):
name_fixed = name_mapping.get(name)
if name_fixed is not None:
@ -143,13 +161,11 @@ def sane_name(name):
def uv_key(uv):
return round(uv[0], 6), round(uv[1], 6)
# size defines:
SZ_SHORT = 2
SZ_INT = 4
SZ_FLOAT = 4
class _3ds_ushort(object):
"""Class representing a short (2-byte integer) for a 3ds file.
*** This looks like an unsigned short H is unsigned from the struct docs - Cam***"""
@ -467,9 +483,9 @@ class _3ds_chunk(object):
subchunk.dump(indent + 1)
######################################################
# EXPORT
######################################################
##########
# EXPORT #
##########
def get_material_image(material):
""" Get images from paint slots."""
@ -570,12 +586,12 @@ def make_material_texture_chunk(chunk_id, texslots, pct):
if socket == 'Alpha':
mat_sub_alpha = _3ds_chunk(MAP_TILING)
alphaflag = 0x40 # summed area sampling 0x20
alphaflag |= 0x40 # summed area sampling 0x20
mat_sub_alpha.add_variable("alpha", _3ds_ushort(alphaflag))
mat_sub.add_subchunk(mat_sub_alpha)
if texslot.socket_dst.identifier in {'Base Color', 'Specular'}:
mat_sub_tint = _3ds_chunk(MAP_TILING) # RGB tint 0x200
tint = 0x80 if texslot.image.colorspace_settings.name == 'Non-Color' else 0x200
tint |= 0x80 if texslot.image.colorspace_settings.name == 'Non-Color' else 0x200
mat_sub_tint.add_variable("tint", _3ds_ushort(tint))
mat_sub.add_subchunk(mat_sub_tint)
@ -801,11 +817,11 @@ def extract_triangles(mesh):
faceflag = 0
if c_a.use_edge_sharp:
faceflag = faceflag + 0x1
faceflag |= 0x1
if b_c.use_edge_sharp:
faceflag = faceflag + 0x2
faceflag |= 0x2
if a_b.use_edge_sharp:
faceflag = faceflag + 0x4
faceflag |= 0x4
smoothgroup = polygroup[face.polygon_index]
@ -1362,7 +1378,7 @@ def save(operator,
object_chunk.add_variable("light", _3ds_string(sane_name(ob.name)))
light_chunk.add_variable("location", _3ds_point_3d(ob.location))
color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color))
energy_factor.add_variable("energy", _3ds_float(ob.data.energy * .001))
energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001))
light_chunk.add_subchunk(color_float_chunk)
light_chunk.add_subchunk(energy_factor)
@ -1374,12 +1390,18 @@ def save(operator,
pos_y = ob.location[1] + (ob.location[0] * math.tan(math.radians(90) - ob.rotation_euler[2]))
pos_z = hypo * math.tan(math.radians(90) - ob.rotation_euler[0])
spotlight_chunk = _3ds_chunk(LIGHT_SPOTLIGHT)
spot_roll_chunk = _3ds_chunk(LIGHT_SPOTROLL)
spot_roll_chunk = _3ds_chunk(LIGHT_SPOT_ROLL)
spotlight_chunk.add_variable("target", _3ds_point_3d((pos_x, pos_y, pos_z)))
spotlight_chunk.add_variable("hotspot", _3ds_float(round(hotspot, 4)))
spotlight_chunk.add_variable("angle", _3ds_float(round(cone_angle, 4)))
spot_roll_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6)))
spotlight_chunk.add_subchunk(spot_roll_chunk)
if ob.data.show_cone:
spot_cone_chunk = _3ds_chunk(LIGHT_SPOT_SEE_CONE)
spotlight_chunk.add_subchunk(spot_cone_chunk)
if ob.data.use_square:
spot_square_chunk = _3ds_chunk(LIGHT_SPOT_RECTANGLE)
spotlight_chunk.add_subchunk(spot_square_chunk)
light_chunk.add_subchunk(spotlight_chunk)
# Add light to object info

View File

@ -12,9 +12,9 @@ from bpy_extras.node_shader_utils import PrincipledBSDFWrapper
BOUNDS_3DS = []
######################################################
# Data Structures
######################################################
###################
# Data Structures #
###################
# Some of the chunks that we will see
# ----- Primary Chunk, at the beginning of each file
@ -76,71 +76,73 @@ MAT_MAP_BCOL = 0xA368 # Blue mapping
# >------ sub defines of OBJECT
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
OBJECT_LIGHT = 0x4600 # This lets un know we are reading a light object
OBJECT_LIGHT_SPOT = 0x4610 # The light is a spotloght.
OBJECT_LIGHT_OFF = 0x4620 # The light off.
OBJECT_LIGHT_ATTENUATE = 0x4625
OBJECT_LIGHT_RAYSHADE = 0x4627
OBJECT_LIGHT_SHADOWED = 0x4630
OBJECT_LIGHT_LOCAL_SHADOW = 0x4640
OBJECT_LIGHT_LOCAL_SHADOW2 = 0x4641
OBJECT_LIGHT_SEE_CONE = 0x4650
OBJECT_LIGHT_SPOT_RECTANGULAR = 0x4651
OBJECT_LIGHT_SPOT_OVERSHOOT = 0x4652
OBJECT_LIGHT_SPOT_PROJECTOR = 0x4653
OBJECT_LIGHT_EXCLUDE = 0x4654
OBJECT_LIGHT_RANGE = 0x4655
OBJECT_LIGHT_ROLL = 0x4656
OBJECT_LIGHT_SPOT_ASPECT = 0x4657
OBJECT_LIGHT_RAY_BIAS = 0x4658
OBJECT_LIGHT_INNER_RANGE = 0x4659
OBJECT_LIGHT_OUTER_RANGE = 0x465A
OBJECT_LIGHT_MULTIPLIER = 0x465B
OBJECT_LIGHT_AMBIENT_LIGHT = 0x4680
OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object
OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
#>------ Sub defines of LIGHT
LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight
LIGHT_OFF = 0x4620 # The light is off
LIGHT_ATTENUATE = 0x4625 # Light attenuate flag
LIGHT_RAYSHADE = 0x4627 # Light rayshading flag
LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag
LIGHT_LOCAL_SHADOW = 0x4640 # Light shadow values 1
LIGHT_LOCAL_SHADOW2 = 0x4641 # Light shadow values 2
LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot cone flag
LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag
LIGHT_SPOT_OVERSHOOT = 0x4652 # Light spot overshoot flag
LIGHT_SPOT_PROJECTOR = 0x4653 # Light spot bitmap name
LIGHT_EXCLUDE = 0x4654 # Light excluded objects
LIGHT_RANGE = 0x4655 # Light range
LIGHT_SPOT_ROLL = 0x4656 # The roll angle of the spot
LIGHT_SPOT_ASPECT = 0x4657 # Light spot aspect flag
LIGHT_RAY_BIAS = 0x4658 # Light ray bias value
LIGHT_INNER_RANGE = 0x4659 # The light inner range
LIGHT_OUTER_RANGE = 0x465A # The light outer range
LIGHT_MULTIPLIER = 0x465B # The light energy factor
LIGHT_AMBIENT_LIGHT = 0x4680 # Light ambient flag
# >------ sub defines of CAMERA
OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object
OBJECT_CAM_RANGES = 0x4720 # The camera range values
# >------ sub defines of OBJECT_MESH
OBJECT_VERTICES = 0x4110 # The objects vertices
OBJECT_VERTFLAGS = 0x4111 # The objects vertex flags
OBJECT_FACES = 0x4120 # The objects faces
OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color
OBJECT_UV = 0x4140 # The UV texture coordinates
OBJECT_SMOOTH = 0x4150 # The Object smooth groups
OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix
OBJECT_MATERIAL = 0x4130 # The objects face material
OBJECT_UV = 0x4140 # The vertex UV texture coordinates
OBJECT_SMOOTH = 0x4150 # The objects face smooth groups
OBJECT_TRANS_MATRIX = 0x4160 # The objects Matrix
# >------ sub defines of EDITKEYFRAME
KFDATA_AMBIENT = 0xB001
KFDATA_OBJECT = 0xB002
KFDATA_CAMERA = 0xB003
KFDATA_TARGET = 0xB004
KFDATA_LIGHT = 0xB005
KFDATA_L_TARGET = 0xB006
KFDATA_SPOTLIGHT = 0xB007
KFDATA_KFSEG = 0xB008
KFDATA_CURTIME = 0xB009
# KFDATA_KFHDR = 0xB00A
KFDATA_AMBIENT = 0xB001 # Keyframe ambient node
KFDATA_OBJECT = 0xB002 # Keyframe object node
KFDATA_CAMERA = 0xB003 # Keyframe camera node
KFDATA_TARGET = 0xB004 # Keyframe target node
KFDATA_LIGHT = 0xB005 # Keyframe light node
KFDATA_LTARGET = 0xB006 # Keyframe light target node
KFDATA_SPOTLIGHT = 0xB007 # Keyframe spotlight node
KFDATA_KFSEG = 0xB008 # Keyframe start and stop
KFDATA_CURTIME = 0xB009 # Keyframe current frame
KFDATA_KFHDR = 0xB00A # Keyframe node header
# >------ sub defines of KEYFRAME_NODE
OBJECT_NODE_HDR = 0xB010
OBJECT_INSTANCE_NAME = 0xB011
# OBJECT_PRESCALE = 0xB012
OBJECT_PIVOT = 0xB013
# OBJECT_BOUNDBOX = 0xB014
MORPH_SMOOTH = 0xB015
POS_TRACK_TAG = 0xB020
ROT_TRACK_TAG = 0xB021
SCL_TRACK_TAG = 0xB022
FOV_TRACK_TAG = 0xB023
ROLL_TRACK_TAG = 0xB024
COL_TRACK_TAG = 0xB025
# MORPH_TRACK_TAG = 0xB026
HOTSPOT_TRACK_TAG = 0xB027
FALLOFF_TRACK_TAG = 0xB028
# HIDE_TRACK_TAG = 0xB029
OBJECT_NODE_ID = 0xB030
OBJECT_NODE_HDR = 0xB010 # Keyframe object node header
OBJECT_INSTANCE_NAME = 0xB011 # Keyframe object name for dummy objects
OBJECT_PRESCALE = 0xB012 # Keyframe object prescale
OBJECT_PIVOT = 0xB013 # Keyframe object pivot position
OBJECT_BOUNDBOX = 0xB014 # Keyframe object boundbox
MORPH_SMOOTH = 0xB015 # Auto smooth angle for keyframe mesh objects
POS_TRACK_TAG = 0xB020 # Keyframe object position track
ROT_TRACK_TAG = 0xB021 # Keyframe object rotation track
SCL_TRACK_TAG = 0xB022 # Keyframe object scale track
FOV_TRACK_TAG = 0xB023 # Keyframe camera field of view track
ROLL_TRACK_TAG = 0xB024 # Keyframe camera roll track
COL_TRACK_TAG = 0xB025 # Keyframe light color track
MORPH_TRACK_TAG = 0xB026 # Keyframe object morph smooth track
HOTSPOT_TRACK_TAG = 0xB027 # Keyframe spotlight hotspot track
FALLOFF_TRACK_TAG = 0xB028 # Keyframe spotlight falloff track
HIDE_TRACK_TAG = 0xB029 # Keyframe object hide track
OBJECT_NODE_ID = 0xB030 # Keyframe object node id
ROOT_OBJECT = 0xFFFF
@ -198,10 +200,10 @@ def read_string(file):
# print("read string", s)
return str(b''.join(s), "utf-8", "replace"), len(s) + 1
######################################################
# IMPORT
######################################################
##########
# IMPORT #
##########
def process_next_object_chunk(file, previous_chunk):
new_chunk = Chunk()
@ -210,7 +212,6 @@ def process_next_object_chunk(file, previous_chunk):
# read the next chunk
read_chunk(file, new_chunk)
def skip_to_end(file, skip_chunk):
buffer_size = skip_chunk.length - skip_chunk.bytes_read
binary_format = "%ic" % buffer_size
@ -308,7 +309,7 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN_BOUNDS, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME):
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME):
from bpy_extras.image_utils import load_image
contextObName = None
@ -411,8 +412,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
imported_objects.append(ob)
if myContextMesh_flag:
# Bit 0 (0x1) sets edge CA visible, Bit 1 (0x2) sets edge BC visible and Bit 2 (0x4) sets edge AB visible
# In Blender we use sharp edges for those flags
"""Bit 0 (0x1) sets edge CA visible, Bit 1 (0x2) sets edge BC visible and Bit 2 (0x4) sets edge AB visible
In Blender we use sharp edges for those flags"""
for f, pl in enumerate(bmesh.polygons):
face = myContextMesh_facels[f]
faceflag = myContextMesh_flag[f]
@ -421,25 +422,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
edge_ca = bmesh.edges[bmesh.loops[pl.loop_start + 2].edge_index]
if face[2] == 0:
edge_ab, edge_bc, edge_ca = edge_ca, edge_ab, edge_bc
if faceflag == 1:
if faceflag & 0x1:
edge_ca.use_edge_sharp = True
elif faceflag == 2:
if faceflag & 0x2:
edge_bc.use_edge_sharp = True
elif faceflag == 3:
edge_ca.use_edge_sharp = True
edge_bc.use_edge_sharp = True
elif faceflag == 4:
if faceflag & 0x4:
edge_ab.use_edge_sharp = True
elif faceflag == 5:
edge_ca.use_edge_sharp = True
edge_ab.use_edge_sharp = True
elif faceflag == 6:
edge_bc.use_edge_sharp = True
edge_ab.use_edge_sharp = True
elif faceflag == 7:
edge_bc.use_edge_sharp = True
edge_ab.use_edge_sharp = True
edge_ca.use_edge_sharp = True
if myContextMesh_smooth:
for f, pl in enumerate(bmesh.polygons):
@ -461,6 +449,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
CreateBlenderObject = False
CreateLightObject = False
CreateCameraObject = False
CreateTrackData = False
def read_float_color(temp_chunk):
temp_data = file.read(SZ_3FLOAT)
@ -518,6 +507,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
voffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_TILING:
"""Control bit flags, where 0x1 activates decaling, 0x2 activates mirror,
0x8 activates inversion, 0x10 deactivates tiling, 0x20 activates summed area sampling,
0x40 activates alpha source, 0x80 activates tinting, 0x100 ignores alpha, 0x200 activates RGB tint.
Bits 0x80, 0x100, and 0x200 are only used with TEXMAP, TEX2MAP, and SPECMAP chunks.
0x40, when used with a TEXMAP, TEX2MAP, or SPECMAP chunk must be accompanied with a tint bit,
either 0x100 or 0x200, tintcolor will be processed if colorchunks are present"""
tiling = read_short(temp_chunk)
if tiling & 0x1:
extend = 'decal'
@ -527,20 +522,19 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
extend = 'invert'
elif tiling & 0x10:
extend = 'noWrap'
elif tiling & 0x20:
if tiling & 0x20:
alpha = 'sat'
elif tiling & 0x40:
if tiling & 0x40:
alpha = 'alpha'
elif tiling & 0x80:
if tiling & 0x80:
tint = 'tint'
elif tiling & 0x100:
if tiling & 0x100:
tint = 'noAlpha'
elif tiling & 0x200:
if tiling & 0x200:
tint = 'RGBtint'
elif temp_chunk.ID == MAT_MAP_ANG:
angle = read_float(temp_chunk)
print("\nwarning: UV angle mapped to z-rotation")
elif temp_chunk.ID == MAT_MAP_COL1:
tintcolor = read_byte_color(temp_chunk)
@ -635,7 +629,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# is it an object info chunk?
elif new_chunk.ID == OBJECTINFO:
process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN_BOUNDS, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
# keep track of how much we read in the main chunk
new_chunk.bytes_read += temp_chunk.bytes_read
@ -659,7 +653,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMesh_flag = None
contextMesh_smooth = None
contextMeshUV = None
# Reset matrix
contextMatrix = None
CreateBlenderObject = True
@ -673,7 +666,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_NAME:
material_name, read_str_len = read_string(file)
# plus one for the null character that ended the string
new_chunk.bytes_read += read_str_len
contextMaterial.name = material_name.rstrip() # remove trailing whitespace
@ -681,7 +673,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_AMBIENT:
read_chunk(file, temp_chunk)
# only available color is emission color
# to not loose this data, ambient color is stored in line color
if temp_chunk.ID == COLOR_F:
contextMaterial.line_color[:3] = read_float_color(temp_chunk)
elif temp_chunk.ID == COLOR_24:
@ -702,7 +694,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_SPECULAR:
read_chunk(file, temp_chunk)
# Specular color is available
if temp_chunk.ID == COLOR_F:
contextMaterial.specular_color = read_float_color(temp_chunk)
elif temp_chunk.ID == COLOR_24:
@ -758,7 +749,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_chunk.bytes_read += SZ_FLOAT
contextMaterial.diffuse_color[3] = 1 - float(struct.unpack('f', temp_data)[0])
else:
print("Cannot read material transparency")
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
elif new_chunk.ID == MAT_SELF_ILPCT:
@ -827,7 +818,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_TEX2_MAP:
read_texture(new_chunk, temp_chunk, "Tex", "TEXTURE")
# mesh chunk
# If mesh chunk
elif new_chunk.ID == OBJECT_MESH:
pass
@ -838,7 +829,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
new_chunk.bytes_read += 2
contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(SZ_3FLOAT * num_verts))
new_chunk.bytes_read += SZ_3FLOAT * num_verts
# dummyvert is not used atm!
elif new_chunk.ID == OBJECT_FACES:
temp_data = file.read(SZ_U_SHORT)
@ -884,9 +874,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMatrix = mathutils.Matrix(
(data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1])).transposed()
elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support.
# no lamp in dict that would be confusing
# ...why not? just set CreateBlenderObject to False
elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support
newLamp = bpy.data.lights.new("Lamp", 'POINT')
contextLamp = bpy.data.objects.new(contextObName, newLamp)
context.view_layer.active_layer_collection.collection.objects.link(contextLamp)
@ -898,17 +886,16 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMatrix = None # Reset matrix
CreateBlenderObject = False
CreateLightObject = True
elif CreateLightObject and new_chunk.ID == COLOR_F: # Light color
temp_data = file.read(SZ_3FLOAT)
contextLamp.data.color = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += SZ_3FLOAT
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_MULTIPLIER: # Intensity
elif CreateLightObject and new_chunk.ID == LIGHT_MULTIPLIER: # Intensity
temp_data = file.read(SZ_FLOAT)
contextLamp.data.energy = (float(struct.unpack('f', temp_data)[0]) * 1000)
new_chunk.bytes_read += SZ_FLOAT
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_SPOT: # Spotlight
elif CreateLightObject and new_chunk.ID == LIGHT_SPOTLIGHT: # Spotlight
temp_data = file.read(SZ_3FLOAT)
contextLamp.data.type = 'SPOT'
spot = mathutils.Vector(struct.unpack('<3f', temp_data))
@ -925,12 +912,18 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
temp_data = file.read(SZ_FLOAT) # Beam angle
beam_angle = float(struct.unpack('f', temp_data)[0])
contextLamp.data.spot_size = math.radians(beam_angle)
contextLamp.data.spot_blend = (1.0 - (hotspot / beam_angle)) * 2
contextLamp.data.spot_blend = 1.0 - (hotspot / beam_angle)
new_chunk.bytes_read += SZ_FLOAT
elif CreateLightObject and new_chunk.ID == OBJECT_LIGHT_ROLL: # Roll
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_ROLL: # Roll
temp_data = file.read(SZ_FLOAT)
contextLamp.rotation_euler[1] = float(struct.unpack('f', temp_data)[0])
new_chunk.bytes_read += SZ_FLOAT
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_SHADOWED: # Shadow
contextLamp.data.use_shadow = True
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_SEE_CONE: # Cone
contextLamp.data.show_cone = True
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_RECTANGLE: # Square
contextLamp.data.use_square = True
elif contextObName and new_chunk.ID == OBJECT_CAMERA and CreateCameraObject is False: # Basic camera support
camera = bpy.data.cameras.new("Camera")
@ -948,20 +941,24 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
new_chunk.bytes_read += SZ_3FLOAT
temp_data = file.read(SZ_FLOAT) # triangulating camera angles
direction = math.copysign(math.sqrt(pow(focus, 2) + pow(target[2], 2)), cam[1])
pitch = math.radians(90) - math.copysign(math.acos(focus / direction), cam[2])
contextCamera.rotation_euler[0] = -1 * math.copysign(pitch, cam[1])
contextCamera.rotation_euler[1] = float(struct.unpack('f', temp_data)[0])
contextCamera.rotation_euler[2] = -1 * (math.radians(90) - math.acos(cam[0] / focus))
pitch = math.radians(90)-math.copysign(math.acos(focus/direction), cam[2])
if contextCamera.location[1] > target[1]:
contextCamera.rotation_euler[0] = math.copysign(pitch, cam[1])
contextCamera.rotation_euler[2] = math.radians(180)-math.copysign(math.atan(cam[0]/focus), cam[0])
else:
contextCamera.rotation_euler[0] = -1*(math.copysign(pitch, cam[1]))
contextCamera.rotation_euler[2] = -1*(math.radians(90)-math.acos(cam[0]/focus))
contextCamera.rotation_euler[1] = float(struct.unpack('f', temp_data)[0]) # Roll
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_FLOAT)
contextCamera.data.lens = float(struct.unpack('f', temp_data)[0])
contextCamera.data.lens = float(struct.unpack('f', temp_data)[0]) # Focus
new_chunk.bytes_read += SZ_FLOAT
contextMatrix = None # Reset matrix
CreateBlenderObject = False
CreateCameraObject = True
elif new_chunk.ID == EDITKEYFRAME:
pass
trackposition = {}
elif KEYFRAME and new_chunk.ID == KFDATA_KFSEG:
temp_data = file.read(SZ_U_INT)
@ -981,12 +978,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# including these here means their OB_NODE_HDR are scanned
# another object is being processed
elif new_chunk.ID in {KFDATA_AMBIENT, KFDATA_OBJECT, KFDATA_CAMERA, KFDATA_LIGHT}:
elif new_chunk.ID in {KFDATA_AMBIENT, KFDATA_OBJECT, KFDATA_CAMERA, KFDATA_LIGHT, KFDATA_SPOTLIGHT}:
object_id = ROOT_OBJECT
tracking = 'OBJECT'
child = None
elif new_chunk.ID in {KFDATA_TARGET, KFDATA_L_TARGET}:
elif CreateTrackData and new_chunk.ID in {KFDATA_TARGET, KFDATA_LTARGET}:
tracking = 'TARGET'
child = None
@ -1063,8 +1060,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
keyframe_data = {}
default_data = child.location[:]
child.location = read_track_data(temp_chunk)[0]
if child.type in {'LIGHT', 'CAMERA'}:
trackposition[0] = child.location
CreateTrackData = True
for keydata in keyframe_data.items():
child.location = mathutils.Vector(keydata[1]) * (CONSTRAIN_BOUNDS * 0.1) if hierarchy == ROOT_OBJECT and CONSTRAIN_BOUNDS != 0.0 else keydata[1]
trackposition[keydata[0]] = keydata[1] # Keep track to position for target calculation
child.location = mathutils.Vector(keydata[1]) * (CONSTRAIN * 0.1) if hierarchy == ROOT_OBJECT and CONSTRAIN != 0.0 else keydata[1]
child.keyframe_insert(data_path="location", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'TARGET': # Target position
@ -1074,15 +1075,23 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
foc = math.copysign(math.sqrt(pow(pos[1],2)+pow(pos[0],2)),pos[1])
hyp = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[1])
tilt = math.radians(90)-math.copysign(math.acos(foc/hyp), pos[2])
child.rotation_euler[0] = -1*math.copysign(tilt, pos[1])
if child.location[0] > target[1]:
child.rotation_euler[0] = math.copysign(tilt, pos[1])
child.rotation_euler[2] = math.radians(180)-math.copysign(math.atan(pos[0]/foc), pos[0])
else:
child.rotation_euler[0] = -1*(math.copysign(tilt, pos[1]))
child.rotation_euler[2] = -1*(math.radians(90)-math.acos(pos[0]/foc))
for keydata in keyframe_data.items():
target = keydata[1]
pos = child.location + mathutils.Vector(target)
pos = mathutils.Vector(trackposition[keydata[0]]) + mathutils.Vector(target)
foc = math.copysign(math.sqrt(pow(pos[1],2)+pow(pos[0],2)),pos[1])
hyp = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[1])
tilt = math.radians(90)-math.copysign(math.acos(foc/hyp), pos[2])
child.rotation_euler[0] = -1*math.copysign(tilt, pos[1])
if trackposition[keydata[0]][1] > target[1]:
child.rotation_euler[0] = math.copysign(tilt, pos[1])
child.rotation_euler[2] = math.radians(180)-math.copysign(math.atan(pos[0]/foc), pos[0])
else:
child.rotation_euler[0] = -1*(math.copysign(tilt, pos[1]))
child.rotation_euler[2] = -1*(math.radians(90)-math.acos(pos[0]/foc))
child.keyframe_insert(data_path="rotation_euler", frame=keydata[0])
@ -1121,7 +1130,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
default_data = child.scale[:]
child.scale = read_track_data(temp_chunk)[0]
for keydata in keyframe_data.items():
child.scale = mathutils.Vector(keydata[1]) * (CONSTRAIN_BOUNDS * 0.1) if hierarchy == ROOT_OBJECT and CONSTRAIN_BOUNDS != 0.0 else keydata[1]
child.scale = mathutils.Vector(keydata[1]) * (CONSTRAIN * 0.1) if hierarchy == ROOT_OBJECT and CONSTRAIN != 0.0 else keydata[1]
child.keyframe_insert(data_path="scale", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracking == 'OBJECT': # Roll angle
@ -1144,10 +1153,10 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
keyframe_angle = {}
cone_angle = math.degrees(child.data.spot_size)
default_value = cone_angle-(child.data.spot_blend*math.floor(cone_angle))
hot_spot = read_track_angle(temp_chunk)[0]
hot_spot = math.degrees(read_track_angle(temp_chunk)[0])
child.data.spot_blend = 1.0 - (hot_spot/cone_angle)
for keydata in keyframe_angle.items():
child.data.spot_blend = 1.0 - (keydata[1]/cone_angle)
child.data.spot_blend = 1.0 - (math.degrees(keydata[1])/cone_angle)
child.data.keyframe_insert(data_path="spot_blend", frame=keydata[0])
elif new_chunk.ID == FALLOFF_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Falloff
@ -1190,6 +1199,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
if parent == ROOT_OBJECT:
if ob.parent is not None:
ob.parent = None
elif parent not in object_dict:
if ob.parent != object_list[parent]:
if ob == object_list[parent]:
print(' warning: Cannot assign self to parent ', ob)
else:
ob.parent = object_list[parent]
else:
if ob.parent != object_dict[parent]:
if ob == object_dict[parent]:
@ -1197,7 +1212,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
else:
ob.parent = object_dict[parent]
# pivot_list[ind] += pivot_list[parent] # XXX, not sure this is correct, should parent space matrix be applied before combining?
#pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining?
# fix pivots
for ind, ob in enumerate(object_list):
if ob.type == 'MESH':
@ -1210,16 +1226,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
def load_3ds(filepath,
context,
CONSTRAIN_BOUNDS=10.0,
CONSTRAIN=10.0,
IMAGE_SEARCH=True,
WORLD_MATRIX=False,
KEYFRAME=True,
APPLY_MATRIX=True,
global_matrix=None):
# global SCN
# XXX
# if BPyMessages.Error_NoFile(filepath):
# return
print("importing 3DS: %r..." % (filepath), end="")
@ -1227,27 +1239,21 @@ def load_3ds(filepath,
bpy.ops.object.select_all(action='DESELECT')
time1 = time.time()
# time1 = Blender.sys.time()
current_chunk = Chunk()
file = open(filepath, 'rb')
# here we go!
# print 'reading the first chunk'
read_chunk(file, current_chunk)
if current_chunk.ID != PRIMARY:
print('\tFatal Error: Not a valid 3ds file: %r' % filepath)
file.close()
return
if CONSTRAIN_BOUNDS:
if CONSTRAIN:
BOUNDS_3DS[:] = [1 << 30, 1 << 30, 1 << 30, -1 << 30, -1 << 30, -1 << 30]
else:
del BOUNDS_3DS[:]
# IMAGE_SEARCH
# fixme, make unglobal, clear in case
object_dictionary.clear()
object_matrix.clear()
@ -1255,17 +1261,12 @@ def load_3ds(filepath,
scn = context.scene
imported_objects = [] # Fill this list with objects
process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN_BOUNDS, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
# fixme, make unglobal
object_dictionary.clear()
object_matrix.clear()
# Link the objects into this scene.
# Layers = scn.Layers
# REMOVE DUMMYVERT, - remove this in the next release when blenders internal are fixed.
if APPLY_MATRIX:
for ob in imported_objects:
if ob.type == 'MESH':
@ -1284,7 +1285,6 @@ def load_3ds(filepath,
bpy.ops.object.rotation_clear()
bpy.ops.object.location_clear()
# Done DUMMYVERT
"""
if IMPORT_AS_INSTANCE:
name = filepath.split('\\')[-1].split('/')[-1]
@ -1314,7 +1314,7 @@ def load_3ds(filepath,
axis_min = [1000000000] * 3
axis_max = [-1000000000] * 3
global_clamp_size = CONSTRAIN_BOUNDS
global_clamp_size = CONSTRAIN
if global_clamp_size != 0.0:
# Get all object bounds
for ob in imported_objects:
@ -1358,7 +1358,7 @@ def load(operator,
load_3ds(filepath,
context,
CONSTRAIN_BOUNDS=constrain_size,
CONSTRAIN=constrain_size,
IMAGE_SEARCH=use_image_search,
WORLD_MATRIX=use_world_matrix,
KEYFRAME=read_keyframe,

View File

@ -3,7 +3,7 @@
bl_info = {
"name": "Node Wrangler",
"author": "Bartek Skorupa, Greg Zaal, Sebastian Koenig, Christian Brinkmann, Florian Meyer",
"version": (3, 45),
"version": (3, 46),
"blender": (3, 6, 0),
"location": "Node Editor Toolbar or Shift-W",
"description": "Various tools to enhance and speed up node-based workflow",

View File

@ -14,6 +14,7 @@ from bpy.props import (
CollectionProperty,
)
from bpy_extras.io_utils import ImportHelper, ExportHelper
from bpy_extras.node_utils import connect_sockets
from mathutils import Vector
from os import path
from glob import glob
@ -369,13 +370,13 @@ class NWSwapLinks(Operator, NWBase):
for connection in n1_outputs:
try:
links.new(n2.outputs[connection[0]], connection[1])
connect_sockets(n2.outputs[connection[0]], connection[1])
except:
self.report({'WARNING'},
"Some connections have been lost due to differing numbers of output sockets")
for connection in n2_outputs:
try:
links.new(n1.outputs[connection[0]], connection[1])
connect_sockets(n1.outputs[connection[0]], connection[1])
except:
self.report({'WARNING'},
"Some connections have been lost due to differing numbers of output sockets")
@ -413,8 +414,8 @@ class NWSwapLinks(Operator, NWBase):
i1t = pair[0].links[0].to_socket
i2f = pair[1].links[0].from_socket
i2t = pair[1].links[0].to_socket
links.new(i1f, i2t)
links.new(i2f, i1t)
connect_sockets(i1f, i2t)
connect_sockets(i2f, i1t)
if t[1] == 1:
if len(types) == 1:
fs = t[0].links[0].from_socket
@ -425,14 +426,14 @@ class NWSwapLinks(Operator, NWBase):
i += 1
while n1.inputs[i].is_linked:
i += 1
links.new(fs, n1.inputs[i])
connect_sockets(fs, n1.inputs[i])
elif len(types) == 2:
i1f = types[0][0].links[0].from_socket
i1t = types[0][0].links[0].to_socket
i2f = types[1][0].links[0].from_socket
i2t = types[1][0].links[0].to_socket
links.new(i1f, i2t)
links.new(i2f, i1t)
connect_sockets(i1f, i2t)
connect_sockets(i2f, i1t)
else:
self.report({'WARNING'}, "This node has no input connections to swap!")
@ -703,7 +704,7 @@ class NWPreviewNode(Operator, NWBase):
make_links.append((active.outputs[out_i], geometryoutput.inputs[geometryoutindex]))
output_socket = geometryoutput.inputs[geometryoutindex]
for li_from, li_to in make_links:
base_node_tree.links.new(li_from, li_to)
connect_sockets(li_from, li_to)
tree = base_node_tree
link_end = output_socket
while tree.nodes.active != active:
@ -714,11 +715,11 @@ class NWPreviewNode(Operator, NWBase):
node_socket = node.node_tree.outputs[index]
if node_socket in delete_sockets:
delete_sockets.remove(node_socket)
tree.links.new(link_start, link_end)
connect_sockets(link_start, link_end)
# Iterate
link_end = self.ensure_group_output(node.node_tree).inputs[index]
tree = tree.nodes.active.node_tree
tree.links.new(active.outputs[out_i], link_end)
connect_sockets(active.outputs[out_i], link_end)
# Delete sockets
for socket in delete_sockets:
@ -777,7 +778,7 @@ class NWPreviewNode(Operator, NWBase):
make_links.append((active.outputs[out_i], materialout.inputs[materialout_index]))
output_socket = materialout.inputs[materialout_index]
for li_from, li_to in make_links:
base_node_tree.links.new(li_from, li_to)
connect_sockets(li_from, li_to)
# Create links through node groups until we reach the active node
tree = base_node_tree
@ -790,11 +791,11 @@ class NWPreviewNode(Operator, NWBase):
node_socket = node.node_tree.outputs[index]
if node_socket in delete_sockets:
delete_sockets.remove(node_socket)
tree.links.new(link_start, link_end)
connect_sockets(link_start, link_end)
# Iterate
link_end = self.ensure_group_output(node.node_tree).inputs[index]
tree = tree.nodes.active.node_tree
tree.links.new(active.outputs[out_i], link_end)
connect_sockets(active.outputs[out_i], link_end)
# Delete sockets
for socket in delete_sockets:
@ -1065,31 +1066,31 @@ class NWSwitchNodeType(Operator, NWBase):
if node.inputs[src_i].links and not new_node.inputs[dst_i].links:
in_src_link = node.inputs[src_i].links[0]
in_dst_socket = new_node.inputs[dst_i]
links.new(in_src_link.from_socket, in_dst_socket)
connect_sockets(in_src_link.from_socket, in_dst_socket)
links.remove(in_src_link)
# OUTPUTS: Base on matches in proper order.
for (src_i, src_dval), (dst_i, dst_dval) in matches['OUTPUTS'][tp]:
for out_src_link in node.outputs[src_i].links:
out_dst_socket = new_node.outputs[dst_i]
links.new(out_dst_socket, out_src_link.to_socket)
connect_sockets(out_dst_socket, out_src_link.to_socket)
# relink rest inputs if possible, no criteria
for src_inp in node.inputs:
for dst_inp in new_node.inputs:
if src_inp.links and not dst_inp.links:
src_link = src_inp.links[0]
links.new(src_link.from_socket, dst_inp)
connect_sockets(src_link.from_socket, dst_inp)
links.remove(src_link)
# relink rest outputs if possible, base on node kind if any left.
for src_o in node.outputs:
for out_src_link in src_o.links:
for dst_o in new_node.outputs:
if src_o.type == dst_o.type:
links.new(dst_o, out_src_link.to_socket)
connect_sockets(dst_o, out_src_link.to_socket)
# relink rest outputs no criteria if any left. Link all from first output.
for src_o in node.outputs:
for out_src_link in src_o.links:
if new_node.outputs:
links.new(new_node.outputs[0], out_src_link.to_socket)
connect_sockets(new_node.outputs[0], out_src_link.to_socket)
nodes.remove(node)
force_update(context)
return {'FINISHED'}
@ -1178,16 +1179,16 @@ class NWMergeNodes(Operator, NWBase):
# outputs to the multi input socket.
if i < len(socket_indices) - 1:
ind = socket_indices[i]
links.new(node.outputs[0], new_node.inputs[ind])
connect_sockets(node.outputs[0], new_node.inputs[ind])
else:
outputs_for_multi_input.insert(0, node.outputs[0])
if outputs_for_multi_input != []:
ind = socket_indices[-1]
for output in outputs_for_multi_input:
links.new(output, new_node.inputs[ind])
connect_sockets(output, new_node.inputs[ind])
if prev_links != []:
for link in prev_links:
links.new(new_node.outputs[0], link.to_node.inputs[0])
connect_sockets(new_node.outputs[0], link.to_node.inputs[0])
return new_node
def execute(self, context):
@ -1448,19 +1449,19 @@ class NWMergeNodes(Operator, NWBase):
# Prevent cyclic dependencies when nodes to be merged are linked to one another.
# Link only if "to_node" index not in invalid indexes list.
if not self.link_creates_cycle(ss_link, invalid_nodes):
links.new(get_first_enabled_output(last_add), ss_link.to_socket)
connect_sockets(get_first_enabled_output(last_add), ss_link.to_socket)
# add links from last_add to all links 'to_socket' of out links of first selected.
for fs_link in first_selected_output.links:
# Link only if "to_node" index not in invalid indexes list.
if not self.link_creates_cycle(fs_link, invalid_nodes):
links.new(get_first_enabled_output(last_add), fs_link.to_socket)
connect_sockets(get_first_enabled_output(last_add), fs_link.to_socket)
# add link from "first" selected and "first" add node
node_to = nodes[count_after - 1]
links.new(first_selected_output, node_to.inputs[first])
connect_sockets(first_selected_output, node_to.inputs[first])
if node_to.type == 'ZCOMBINE':
for fs_out in first_selected.outputs:
if fs_out != first_selected_output and fs_out.name in ('Z', 'Depth'):
links.new(fs_out, node_to.inputs[1])
connect_sockets(fs_out, node_to.inputs[1])
break
# add links between added ADD nodes and between selected and ADD nodes
for i in range(count_adds):
@ -1469,21 +1470,21 @@ class NWMergeNodes(Operator, NWBase):
node_to = nodes[index - 1]
node_to_input_i = first
node_to_z_i = 1 # if z combine - link z to first z input
links.new(get_first_enabled_output(node_from), node_to.inputs[node_to_input_i])
connect_sockets(get_first_enabled_output(node_from), node_to.inputs[node_to_input_i])
if node_to.type == 'ZCOMBINE':
for from_out in node_from.outputs:
if from_out != get_first_enabled_output(node_from) and from_out.name in ('Z', 'Depth'):
links.new(from_out, node_to.inputs[node_to_z_i])
connect_sockets(from_out, node_to.inputs[node_to_z_i])
if len(nodes_list) > 1:
node_from = nodes[nodes_list[i + 1][0]]
node_to = nodes[index]
node_to_input_i = second
node_to_z_i = 3 # if z combine - link z to second z input
links.new(get_first_enabled_output(node_from), node_to.inputs[node_to_input_i])
connect_sockets(get_first_enabled_output(node_from), node_to.inputs[node_to_input_i])
if node_to.type == 'ZCOMBINE':
for from_out in node_from.outputs:
if from_out != get_first_enabled_output(node_from) and from_out.name in ('Z', 'Depth'):
links.new(from_out, node_to.inputs[node_to_z_i])
connect_sockets(from_out, node_to.inputs[node_to_z_i])
index -= 1
# set "last" of added nodes as active
nodes.active = last_add
@ -1691,7 +1692,7 @@ class NWCopySettings(Operator, NWBase):
new_node.location = node_loc
for str_from, str_to in reconnections:
node_tree.links.new(eval(str_from), eval(str_to))
node_tree.connect_sockets(eval(str_from), eval(str_to))
success_names.append(new_node.name)
@ -1860,7 +1861,7 @@ class NWAddTextureSetup(Operator, NWBase):
x_offset = x_offset + image_texture_node.width + padding
image_texture_node.location = [locx - x_offset, locy]
nodes.active = image_texture_node
links.new(image_texture_node.outputs[0], target_input)
connect_sockets(image_texture_node.outputs[0], target_input)
# The mapping setup following this will connect to the first input of this image texture.
target_input = image_texture_node.inputs[0]
@ -1872,7 +1873,7 @@ class NWAddTextureSetup(Operator, NWBase):
mapping_node = nodes.new('ShaderNodeMapping')
x_offset = x_offset + mapping_node.width + padding
mapping_node.location = [locx - x_offset, locy]
links.new(mapping_node.outputs[0], target_input)
connect_sockets(mapping_node.outputs[0], target_input)
# Add Texture Coordinates node.
tex_coord_node = nodes.new('ShaderNodeTexCoord')
@ -1882,7 +1883,7 @@ class NWAddTextureSetup(Operator, NWBase):
is_procedural_texture = is_texture_node and node.type != 'TEX_IMAGE'
use_generated_coordinates = is_procedural_texture or use_environment_texture
tex_coord_output = tex_coord_node.outputs[0 if use_generated_coordinates else 2]
links.new(tex_coord_output, mapping_node.inputs[0])
connect_sockets(tex_coord_output, mapping_node.inputs[0])
return {'FINISHED'}
@ -2007,7 +2008,7 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
disp_node = nodes.new(type='ShaderNodeDisplacement')
# Align the Displacement node under the active Principled BSDF node
disp_node.location = active_node.location + Vector((100, -700))
link = links.new(disp_node.inputs[0], disp_texture.outputs[0])
link = connect_sockets(disp_node.inputs[0], disp_texture.outputs[0])
# TODO Turn on true displacement in the material
# Too complicated for now
@ -2016,7 +2017,7 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
output_node = [n for n in nodes if n.bl_idname == 'ShaderNodeOutputMaterial']
if output_node:
if not output_node[0].inputs[2].is_linked:
link = links.new(output_node[0].inputs[2], disp_node.outputs[0])
link = connect_sockets(output_node[0].inputs[2], disp_node.outputs[0])
continue
@ -2046,13 +2047,13 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
if match_normal:
# If Normal add normal node in between
normal_node = nodes.new(type='ShaderNodeNormalMap')
link = links.new(normal_node.inputs[1], texture_node.outputs[0])
link = connect_sockets(normal_node.inputs[1], texture_node.outputs[0])
elif match_bump:
# If Bump add bump node in between
normal_node = nodes.new(type='ShaderNodeBump')
link = links.new(normal_node.inputs[2], texture_node.outputs[0])
link = connect_sockets(normal_node.inputs[2], texture_node.outputs[0])
link = links.new(active_node.inputs[sname[0]], normal_node.outputs[0])
link = connect_sockets(active_node.inputs[sname[0]], normal_node.outputs[0])
normal_node_texture = texture_node
elif sname[0] == 'Roughness':
@ -2063,19 +2064,19 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
if match_rough:
# If Roughness nothing to to
link = links.new(active_node.inputs[sname[0]], texture_node.outputs[0])
link = connect_sockets(active_node.inputs[sname[0]], texture_node.outputs[0])
elif match_gloss:
# If Gloss Map add invert node
invert_node = nodes.new(type='ShaderNodeInvert')
link = links.new(invert_node.inputs[1], texture_node.outputs[0])
link = connect_sockets(invert_node.inputs[1], texture_node.outputs[0])
link = links.new(active_node.inputs[sname[0]], invert_node.outputs[0])
link = connect_sockets(active_node.inputs[sname[0]], invert_node.outputs[0])
roughness_node = texture_node
else:
# This is a simple connection Texture --> Input slot
link = links.new(active_node.inputs[sname[0]], texture_node.outputs[0])
link = connect_sockets(active_node.inputs[sname[0]], texture_node.outputs[0])
# Use non-color for all but 'Base Color' Textures
if not sname[0] in ['Base Color', 'Emission'] and texture_node.image:
@ -2120,15 +2121,15 @@ class NWAddPrincipledSetup(Operator, NWBase, ImportHelper):
sum(n.location.y for n in texture_nodes) / len(texture_nodes)))
reroute.location = tex_coords + Vector((-50, -120))
for texture_node in texture_nodes:
link = links.new(texture_node.inputs[0], reroute.outputs[0])
link = links.new(reroute.inputs[0], mapping.outputs[0])
link = connect_sockets(texture_node.inputs[0], reroute.outputs[0])
link = connect_sockets(reroute.inputs[0], mapping.outputs[0])
else:
link = links.new(texture_nodes[0].inputs[0], mapping.outputs[0])
link = connect_sockets(texture_nodes[0].inputs[0], mapping.outputs[0])
# Connect texture_coordiantes to mapping node
texture_input = nodes.new(type='ShaderNodeTexCoord')
texture_input.location = mapping.location + Vector((-200, 0))
link = links.new(mapping.inputs[0], texture_input.outputs[2])
link = connect_sockets(mapping.inputs[0], texture_input.outputs[2])
# Create frame around tex coords and mapping
frame = nodes.new(type='NodeFrame')
@ -2232,8 +2233,8 @@ class NWAddReroutes(Operator, NWBase):
n = nodes.new('NodeReroute')
nodes.active = n
for link in output.links:
links.new(n.outputs[0], link.to_socket)
links.new(output, n.inputs[0])
connect_sockets(n.outputs[0], link.to_socket)
connect_sockets(output, n.inputs[0])
n.location = loc
post_select.append(n)
reroutes_count += 1
@ -2325,7 +2326,7 @@ class NWLinkActiveToSelected(Operator, NWBase):
for input in node.inputs:
if input.type == out.type or node.type == 'REROUTE':
if replace or not input.is_linked:
links.new(out, input)
connect_sockets(out, input)
if not use_node_name and not use_outputs_names:
doit = False
break
@ -2590,7 +2591,7 @@ class NWLinkToOutputNode(Operator):
elif tree_type == 'GeometryNodeTree':
if active.outputs[output_index].type != 'GEOMETRY':
return {'CANCELLED'}
links.new(active.outputs[output_index], output_node.inputs[out_input_index])
connect_sockets(active.outputs[output_index], output_node.inputs[out_input_index])
force_update(context) # viewport render does not update
@ -2611,7 +2612,7 @@ class NWMakeLink(Operator, NWBase):
n1 = nodes[context.scene.NWLazySource]
n2 = nodes[context.scene.NWLazyTarget]
links.new(n1.outputs[self.from_socket], n2.inputs[self.to_socket])
connect_sockets(n1.outputs[self.from_socket], n2.inputs[self.to_socket])
force_update(context)
@ -2635,7 +2636,7 @@ class NWCallInputsMenu(Operator, NWBase):
if len(n2.inputs) > 1:
bpy.ops.wm.call_menu("INVOKE_DEFAULT", name=NWConnectionListInputs.bl_idname)
elif len(n2.inputs) == 1:
links.new(n1.outputs[self.from_socket], n2.inputs[0])
connect_sockets(n1.outputs[self.from_socket], n2.inputs[0])
return {'FINISHED'}
@ -3019,7 +3020,7 @@ class NWResetNodes(bpy.types.Operator):
new_node.location = node_loc
for str_from, str_to in reconnections:
node_tree.links.new(eval(str_from), eval(str_to))
connect_sockets(eval(str_from), eval(str_to))
new_node.select = False
success_names.append(new_node.name)