New Addon: Import Autodesk .max #105013

Closed
Sebastian Sille wants to merge 136 commits from (deleted):nrgsille-import_max into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
2 changed files with 120 additions and 100 deletions
Showing only changes of commit 09f1219145 - Show all commits

View File

@ -66,6 +66,17 @@ class Import3DS(bpy.types.Operator, ImportHelper):
"(Warning, may be slow)", "(Warning, may be slow)",
default=True, default=True,
) )
object_filter: EnumProperty(
name="Object Filter", options={'ENUM_FLAG'},
items=(('WORLD',"World".rjust(11),"",'WORLD_DATA',0x1),
('MESH',"Mesh".rjust(11),"",'MESH_DATA',0x2),
('LIGHT',"Light".rjust(12),"",'LIGHT_DATA',0x4),
('CAMERA',"Camera".rjust(11),"",'CAMERA_DATA',0x8),
('EMPTY',"Empty".rjust(11),"",'EMPTY_DATA',0x10),
),
description="Object types to export",
default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'},
)
use_apply_transform: BoolProperty( use_apply_transform: BoolProperty(
name="Apply Transform", name="Apply Transform",
description="Workaround for object transformations " description="Workaround for object transformations "
@ -124,6 +135,7 @@ class MAX3DS_PT_import_include(bpy.types.Panel):
operator = sfile.active_operator operator = sfile.active_operator
layout.prop(operator, "use_image_search") layout.prop(operator, "use_image_search")
layout.column().prop(operator, "object_filter")
layout.prop(operator, "read_keyframe") layout.prop(operator, "read_keyframe")
@ -186,7 +198,7 @@ class Export3DS(bpy.types.Operator, ExportHelper):
description="Export selected objects only", description="Export selected objects only",
default=False, default=False,
) )
object_filter: bpy.props.EnumProperty( object_filter: EnumProperty(
name="Object Filter", options={'ENUM_FLAG'}, name="Object Filter", options={'ENUM_FLAG'},
items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA',0x1), items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA',0x1),
('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2),

View File

@ -25,7 +25,6 @@ PRIMARY = 0x4D4D
# >----- Main Chunks # >----- Main Chunks
OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information
VERSION = 0x0002 # This gives the version of the .3ds file VERSION = 0x0002 # This gives the version of the .3ds file
AMBIENTLIGHT = 0x2100 # The color of the ambient light
EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info
# >----- Data Chunks, used for various attributes # >----- Data Chunks, used for various attributes
@ -38,6 +37,7 @@ PCT_FLOAT = 0x0031 # percentage float
MASTERSCALE = 0x0100 # Master scale factor MASTERSCALE = 0x0100 # Master scale factor
# >----- sub defines of OBJECTINFO # >----- sub defines of OBJECTINFO
AMBIENTLIGHT = 0x2100 # The color of the ambient light
MATERIAL = 0xAFFF # This stored the texture info MATERIAL = 0xAFFF # This stored the texture info
OBJECT = 0x4000 # This stores the faces, vertices, etc... OBJECT = 0x4000 # This stores the faces, vertices, etc...
@ -131,13 +131,13 @@ OBJECT_SMOOTH = 0x4150 # The objects face smooth groups
OBJECT_TRANS_MATRIX = 0x4160 # The objects Matrix OBJECT_TRANS_MATRIX = 0x4160 # The objects Matrix
# >------ sub defines of EDITKEYFRAME # >------ sub defines of EDITKEYFRAME
KFDATA_AMBIENT = 0xB001 # Keyframe ambient node KF_AMBIENT = 0xB001 # Keyframe ambient node
KFDATA_OBJECT = 0xB002 # Keyframe object node KF_OBJECT = 0xB002 # Keyframe object node
KFDATA_CAMERA = 0xB003 # Keyframe camera node KF_OBJECT_CAMERA = 0xB003 # Keyframe camera node
KFDATA_TARGET = 0xB004 # Keyframe target node KF_TARGET_CAMERA = 0xB004 # Keyframe target node
KFDATA_LIGHT = 0xB005 # Keyframe light node KF_OBJECT_LIGHT = 0xB005 # Keyframe light node
KFDATA_LTARGET = 0xB006 # Keyframe light target node KF_TARGET_LIGHT = 0xB006 # Keyframe light target node
KFDATA_SPOTLIGHT = 0xB007 # Keyframe spotlight node KF_OBJECT_SPOT_LIGHT = 0xB007 # Keyframe spotlight node
KFDATA_KFSEG = 0xB008 # Keyframe start and stop KFDATA_KFSEG = 0xB008 # Keyframe start and stop
KFDATA_CURTIME = 0xB009 # Keyframe current frame KFDATA_CURTIME = 0xB009 # Keyframe current frame
KFDATA_KFHDR = 0xB00A # Keyframe node header KFDATA_KFHDR = 0xB00A # Keyframe node header
@ -326,7 +326,7 @@ childs_list = []
parent_list = [] parent_list = []
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN,
IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE): FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE):
contextObName = None contextObName = None
contextLamp = None contextLamp = None
@ -470,6 +470,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
CreateLightObject = False CreateLightObject = False
CreateTrackData = False CreateTrackData = False
CreateWorld = 'WORLD' in FILTER
CreateMesh = 'MESH' in FILTER
CreateLight = 'LIGHT' in FILTER
CreateCamera = 'CAMERA' in FILTER
CreateEmpty = 'EMPTY' in FILTER
def read_short(temp_chunk): def read_short(temp_chunk):
temp_data = file.read(SZ_U_SHORT) temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT temp_chunk.bytes_read += SZ_U_SHORT
@ -666,7 +672,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version) print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version)
# is it an ambient light chunk? # is it an ambient light chunk?
elif new_chunk.ID == AMBIENTLIGHT: elif CreateWorld and new_chunk.ID == AMBIENTLIGHT:
path, filename = os.path.split(file.name) path, filename = os.path.split(file.name)
realname, ext = os.path.splitext(filename) realname, ext = os.path.splitext(filename)
world = bpy.data.worlds.new("Ambient: " + realname) world = bpy.data.worlds.new("Ambient: " + realname)
@ -683,7 +689,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# is it an object info chunk? # is it an object info chunk?
elif new_chunk.ID == OBJECTINFO: elif new_chunk.ID == OBJECTINFO:
process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN,
IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE)
# keep track of how much we read in the main chunk # keep track of how much we read in the main chunk
new_chunk.bytes_read += temp_chunk.bytes_read new_chunk.bytes_read += temp_chunk.bytes_read
@ -692,15 +698,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == OBJECT: elif new_chunk.ID == OBJECT:
if CreateBlenderObject: if CreateBlenderObject:
putContextMesh( putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMesh_flag,
context, contextMeshMaterials, contextMesh_smooth, WORLD_MATRIX)
contextMesh_vertls,
contextMesh_facels,
contextMesh_flag,
contextMeshMaterials,
contextMesh_smooth,
WORLD_MATRIX
)
contextMesh_vertls = [] contextMesh_vertls = []
contextMesh_facels = [] contextMesh_facels = []
contextMeshMaterials = [] contextMeshMaterials = []
@ -709,7 +709,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMeshUV = None contextMeshUV = None
contextMatrix = None contextMatrix = None
CreateBlenderObject = True CreateBlenderObject = True if CreateMesh else False
contextObName, read_str_len = read_string(file) contextObName, read_str_len = read_string(file)
new_chunk.bytes_read += read_str_len new_chunk.bytes_read += read_str_len
@ -867,13 +867,13 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == OBJECT_MESH: elif new_chunk.ID == OBJECT_MESH:
pass pass
elif new_chunk.ID == OBJECT_VERTICES: elif CreateMesh and new_chunk.ID == OBJECT_VERTICES:
"""Worldspace vertex locations""" """Worldspace vertex locations"""
num_verts = read_short(new_chunk) num_verts = read_short(new_chunk)
contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(SZ_3FLOAT * num_verts)) contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(SZ_3FLOAT * num_verts))
new_chunk.bytes_read += SZ_3FLOAT * num_verts new_chunk.bytes_read += SZ_3FLOAT * num_verts
elif new_chunk.ID == OBJECT_FACES: elif CreateMesh and new_chunk.ID == OBJECT_FACES:
num_faces = read_short(new_chunk) num_faces = read_short(new_chunk)
temp_data = file.read(SZ_4U_SHORT * num_faces) temp_data = file.read(SZ_4U_SHORT * num_faces)
new_chunk.bytes_read += SZ_4U_SHORT * num_faces # 4 short ints x 2 bytes each new_chunk.bytes_read += SZ_4U_SHORT * num_faces # 4 short ints x 2 bytes each
@ -881,7 +881,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMesh_flag = [contextMesh_facels[i] for i in range(3, (num_faces * 4) + 3, 4)] contextMesh_flag = [contextMesh_facels[i] for i in range(3, (num_faces * 4) + 3, 4)]
contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)] contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)]
elif new_chunk.ID == OBJECT_MATERIAL: elif CreateMesh and new_chunk.ID == OBJECT_MATERIAL:
material_name, read_str_len = read_string(file) material_name, read_str_len = read_string(file)
new_chunk.bytes_read += read_str_len # remove 1 null character. new_chunk.bytes_read += read_str_len # remove 1 null character.
num_faces_using_mat = read_short(new_chunk) num_faces_using_mat = read_short(new_chunk)
@ -891,19 +891,19 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMeshMaterials.append((material_name, temp_data)) contextMeshMaterials.append((material_name, temp_data))
# look up the material in all the materials # look up the material in all the materials
elif new_chunk.ID == OBJECT_SMOOTH: elif CreateMesh and new_chunk.ID == OBJECT_SMOOTH:
temp_data = file.read(SZ_U_INT * num_faces) temp_data = file.read(SZ_U_INT * num_faces)
smoothgroup = struct.unpack('<%dI' % (num_faces), temp_data) smoothgroup = struct.unpack('<%dI' % (num_faces), temp_data)
new_chunk.bytes_read += SZ_U_INT * num_faces new_chunk.bytes_read += SZ_U_INT * num_faces
contextMesh_smooth = smoothgroup contextMesh_smooth = smoothgroup
elif new_chunk.ID == OBJECT_UV: elif CreateMesh and new_chunk.ID == OBJECT_UV:
num_uv = read_short(new_chunk) num_uv = read_short(new_chunk)
temp_data = file.read(SZ_2FLOAT * num_uv) temp_data = file.read(SZ_2FLOAT * num_uv)
new_chunk.bytes_read += SZ_2FLOAT * num_uv new_chunk.bytes_read += SZ_2FLOAT * num_uv
contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data) contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data)
elif new_chunk.ID == OBJECT_TRANS_MATRIX: elif CreateMesh and new_chunk.ID == OBJECT_TRANS_MATRIX:
# How do we know the matrix size? 54 == 4x4 48 == 4x3 # How do we know the matrix size? 54 == 4x4 48 == 4x3
temp_data = file.read(SZ_4x3MAT) temp_data = file.read(SZ_4x3MAT)
mtx = list(struct.unpack('<ffffffffffff', temp_data)) mtx = list(struct.unpack('<ffffffffffff', temp_data))
@ -912,21 +912,25 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
(mtx[:3] + [0], mtx[3:6] + [0], mtx[6:9] + [0], mtx[9:] + [1])).transposed() (mtx[:3] + [0], mtx[3:6] + [0], mtx[6:9] + [0], mtx[9:] + [1])).transposed()
# If hierarchy chunk # If hierarchy chunk
elif new_chunk.ID == OBJECT_HIERARCHY: elif CreateMesh and new_chunk.ID == OBJECT_HIERARCHY:
child_id = get_hierarchy(new_chunk) child_id = get_hierarchy(new_chunk)
elif new_chunk.ID == OBJECT_PARENT: elif CreateMesh and new_chunk.ID == OBJECT_PARENT:
get_parent(new_chunk, child_id) get_parent(new_chunk, child_id)
# If light chunk # If light chunk
elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support elif new_chunk.ID == OBJECT_LIGHT: # Basic lamp support
newLamp = bpy.data.lights.new("Lamp", 'POINT')
contextLamp = bpy.data.objects.new(contextObName, newLamp)
context.view_layer.active_layer_collection.collection.objects.link(contextLamp)
imported_objects.append(contextLamp)
object_dictionary[contextObName] = contextLamp
contextLamp.location = read_float_array(new_chunk) # Position
CreateBlenderObject = False CreateBlenderObject = False
CreateLightObject = True if not CreateLight:
contextObName = None
skip_to_end(file, new_chunk)
else:
CreateLightObject = True
light = bpy.data.lights.new("Lamp", 'POINT')
contextLamp = bpy.data.objects.new(contextObName, light)
context.view_layer.active_layer_collection.collection.objects.link(contextLamp)
imported_objects.append(contextLamp)
object_dictionary[contextObName] = contextLamp
contextLamp.location = read_float_array(new_chunk) # Position
contextMatrix = None # Reset matrix contextMatrix = None # Reset matrix
elif CreateLightObject and new_chunk.ID == COLOR_F: # Color elif CreateLightObject and new_chunk.ID == COLOR_F: # Color
contextLamp.data.color = read_float_array(new_chunk) contextLamp.data.color = read_float_array(new_chunk)
@ -964,21 +968,25 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
get_parent(new_chunk, child_id) get_parent(new_chunk, child_id)
# If camera chunk # If camera chunk
elif contextObName and new_chunk.ID == OBJECT_CAMERA: # Basic camera support elif new_chunk.ID == OBJECT_CAMERA: # Basic camera support
camera = bpy.data.cameras.new("Camera")
contextCamera = bpy.data.objects.new(contextObName, camera)
context.view_layer.active_layer_collection.collection.objects.link(contextCamera)
imported_objects.append(contextCamera)
object_dictionary[contextObName] = contextCamera
contextCamera.location = read_float_array(new_chunk) # Position
focus = mathutils.Vector(read_float_array(new_chunk))
direction = calc_target(contextCamera.location, focus) # Target
contextCamera.rotation_euler[0] = direction[0]
contextCamera.rotation_euler[1] = read_float(new_chunk) # Roll
contextCamera.rotation_euler[2] = direction[1]
contextCamera.data.lens = read_float(new_chunk) # Focal length
CreateBlenderObject = False CreateBlenderObject = False
CreateCameraObject = True if not CreateCamera:
contextObName = None
skip_to_end(file, new_chunk)
else:
CreateCameraObject = True
camera = bpy.data.cameras.new("Camera")
contextCamera = bpy.data.objects.new(contextObName, camera)
context.view_layer.active_layer_collection.collection.objects.link(contextCamera)
imported_objects.append(contextCamera)
object_dictionary[contextObName] = contextCamera
contextCamera.location = read_float_array(new_chunk) # Position
focus = mathutils.Vector(read_float_array(new_chunk))
direction = calc_target(contextCamera.location, focus) # Target
contextCamera.rotation_euler[0] = direction[0]
contextCamera.rotation_euler[1] = read_float(new_chunk) # Roll
contextCamera.rotation_euler[2] = direction[1]
contextCamera.data.lens = read_float(new_chunk) # Focal length
contextMatrix = None # Reset matrix contextMatrix = None # Reset matrix
elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY: # Hierarchy elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY: # Hierarchy
child_id = get_hierarchy(new_chunk) child_id = get_hierarchy(new_chunk)
@ -1000,15 +1008,27 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
context.scene.frame_current = current context.scene.frame_current = current
# including these here means their OB_NODE_HDR are scanned # including these here means their OB_NODE_HDR are scanned
# another object is being processed elif new_chunk.ID in {KF_AMBIENT, KF_OBJECT, KF_OBJECT_CAMERA, KF_OBJECT_LIGHT, KF_OBJECT_SPOT_LIGHT}:
elif new_chunk.ID in {KFDATA_AMBIENT, KFDATA_OBJECT, KFDATA_CAMERA, KFDATA_LIGHT, KFDATA_SPOTLIGHT}: tracktype = str([kf for kf,ck in globals().items() if ck == new_chunk.ID][0]).split("_")[1]
object_id = ROOT_OBJECT tracking = str([kf for kf,ck in globals().items() if ck == new_chunk.ID][0]).split("_")[-1]
tracking = 'OBJECT' spotting = str([kf for kf,ck in globals().items() if ck == new_chunk.ID][0]).split("_")[-2]
object_id = hierarchy = ROOT_OBJECT
child = None child = None
if not CreateWorld and tracking == 'AMBIENT':
skip_to_end(file, new_chunk)
if not CreateLight and tracking == 'LIGHT':
skip_to_end(file, new_chunk)
if not CreateCamera and tracking == 'CAMERA':
skip_to_end(file, new_chunk)
elif CreateTrackData and new_chunk.ID in {KFDATA_TARGET, KFDATA_LTARGET}: elif CreateTrackData and new_chunk.ID in {KF_TARGET_CAMERA, KF_TARGET_LIGHT}:
tracking = 'TARGET' tracktype = str([kf for kf,ck in globals().items() if ck == new_chunk.ID][0]).split("_")[1]
tracking = str([kf for kf,ck in globals().items() if ck == new_chunk.ID][0]).split("_")[-1]
child = None child = None
if not CreateLight and tracking == 'LIGHT':
skip_to_end(file, new_chunk)
if not CreateCamera and tracking == 'CAMERA':
skip_to_end(file, new_chunk)
elif new_chunk.ID == OBJECT_NODE_ID: elif new_chunk.ID == OBJECT_NODE_ID:
object_id = read_short(new_chunk) object_id = read_short(new_chunk)
@ -1016,22 +1036,21 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == OBJECT_NODE_HDR: elif new_chunk.ID == OBJECT_NODE_HDR:
object_name, read_str_len = read_string(file) object_name, read_str_len = read_string(file)
new_chunk.bytes_read += read_str_len new_chunk.bytes_read += read_str_len
temp_data = file.read(SZ_U_SHORT * 2) new_data = file.read(SZ_U_INT)
new_chunk.bytes_read += 4 new_chunk.bytes_read += SZ_U_INT
hierarchy = read_short(new_chunk) hierarchy = read_short(new_chunk)
child = object_dictionary.get(object_name) child = object_dictionary.get(object_name)
colortrack = 'LIGHT'
if child is None: if child is None:
if object_name == '$AMBIENT$': if CreateWorld and object_name == '$AMBIENT$':
child = context.scene.world child = context.scene.world
child.use_nodes = True child.use_nodes = True
colortrack = 'AMBIENT' elif CreateEmpty and object_name == '$$$DUMMY':
else:
child = bpy.data.objects.new(object_name, None) # Create an empty object child = bpy.data.objects.new(object_name, None) # Create an empty object
context.view_layer.active_layer_collection.collection.objects.link(child) context.view_layer.active_layer_collection.collection.objects.link(child)
imported_objects.append(child) imported_objects.append(child)
else:
if tracking != 'TARGET' and object_name != '$AMBIENT$': tracking = tracktype = None
if tracktype != 'TARGET' and tracking != 'AMBIENT':
object_dict[object_id] = child object_dict[object_id] = child
object_list.append(child) object_list.append(child)
object_parent.append(hierarchy) object_parent.append(hierarchy)
@ -1042,7 +1061,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
parent_dictionary.setdefault(parent_name, []).append(child) parent_dictionary.setdefault(parent_name, []).append(child)
new_chunk.bytes_read += read_str_len new_chunk.bytes_read += read_str_len
elif new_chunk.ID == OBJECT_INSTANCE_NAME: elif new_chunk.ID == OBJECT_INSTANCE_NAME and tracking == 'OBJECT':
instance_name, read_str_len = read_string(file) instance_name, read_str_len = read_string(file)
if child.name == '$$$DUMMY': if child.name == '$$$DUMMY':
child.name = instance_name child.name = instance_name
@ -1055,16 +1074,16 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
object_dictionary[child.name] = child object_dictionary[child.name] = child
new_chunk.bytes_read += read_str_len new_chunk.bytes_read += read_str_len
elif new_chunk.ID == OBJECT_PIVOT: # Pivot elif new_chunk.ID == OBJECT_PIVOT and tracking == 'OBJECT': # Pivot
pivot = read_float_array(new_chunk) pivot = read_float_array(new_chunk)
pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot) pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot)
elif new_chunk.ID == MORPH_SMOOTH and child.type == 'MESH': # Smooth angle elif new_chunk.ID == MORPH_SMOOTH and tracking == 'OBJECT': # Smooth angle
child.data.use_auto_smooth = True child.data.use_auto_smooth = True
smooth_angle = read_float(new_chunk) smooth_angle = read_float(new_chunk)
child.data.auto_smooth_angle = smooth_angle child.data.auto_smooth_angle = smooth_angle
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and colortrack == 'AMBIENT': # Ambient elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'AMBIENT': # Ambient
keyframe_data = {} keyframe_data = {}
default_data = child.color[:] default_data = child.color[:]
child.color = read_track_data(new_chunk)[0] child.color = read_track_data(new_chunk)[0]
@ -1076,7 +1095,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.node_tree.keyframe_insert(data_path="nodes[\"Background\"].inputs[0].default_value", frame=keydata[0]) child.node_tree.keyframe_insert(data_path="nodes[\"Background\"].inputs[0].default_value", frame=keydata[0])
contextTrack_flag = False contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and colortrack == 'LIGHT': # Color elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'LIGHT': # Color
keyframe_data = {} keyframe_data = {}
default_data = child.data.color[:] default_data = child.data.color[:]
child.data.color = read_track_data(new_chunk)[0] child.data.color = read_track_data(new_chunk)[0]
@ -1085,7 +1104,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.data.keyframe_insert(data_path="color", frame=keydata[0]) child.data.keyframe_insert(data_path="color", frame=keydata[0])
contextTrack_flag = False contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'OBJECT': # Translation elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'OBJECT': # Translation
keyframe_data = {} keyframe_data = {}
default_data = child.location[:] default_data = child.location[:]
child.location = read_track_data(new_chunk)[0] child.location = read_track_data(new_chunk)[0]
@ -1113,7 +1132,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.keyframe_insert(data_path="location", index=2, frame=keydata[0]) child.keyframe_insert(data_path="location", index=2, frame=keydata[0])
contextTrack_flag = False contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'TARGET': # Target position elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'TARGET': # Target position
keyframe_data = {} keyframe_data = {}
location = child.location location = child.location
target = mathutils.Vector(read_track_data(new_chunk)[0]) target = mathutils.Vector(read_track_data(new_chunk)[0])
@ -1137,7 +1156,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.keyframe_insert(data_path="rotation_euler", index=2, frame=keydata[0]) child.keyframe_insert(data_path="rotation_euler", index=2, frame=keydata[0])
contextTrack_flag = False contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracking == 'OBJECT': # Rotation elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracktype == 'OBJECT': # Rotation
keyframe_rotation = {} keyframe_rotation = {}
tflags = read_short(new_chunk) tflags = read_short(new_chunk)
temp_data = file.read(SZ_U_INT * 2) temp_data = file.read(SZ_U_INT * 2)
@ -1177,7 +1196,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
if not tflags & 0x400: # Flag 0x400 unlinks Z axis if not tflags & 0x400: # Flag 0x400 unlinks Z axis
child.keyframe_insert(data_path="rotation_euler", index=2, frame=keydata[0]) child.keyframe_insert(data_path="rotation_euler", index=2, frame=keydata[0])
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracking == 'OBJECT': # Scale elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracktype == 'OBJECT': # Scale
keyframe_data = {} keyframe_data = {}
default_data = child.scale[:] default_data = child.scale[:]
child.scale = read_track_data(new_chunk)[0] child.scale = read_track_data(new_chunk)[0]
@ -1197,7 +1216,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.keyframe_insert(data_path="scale", index=2, frame=keydata[0]) child.keyframe_insert(data_path="scale", index=2, frame=keydata[0])
contextTrack_flag = False contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracking == 'OBJECT': # Roll angle elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracktype == 'OBJECT': # Roll angle
keyframe_angle = {} keyframe_angle = {}
default_value = child.rotation_euler[1] default_value = child.rotation_euler[1]
child.rotation_euler[1] = read_track_angle(new_chunk)[0] child.rotation_euler[1] = read_track_angle(new_chunk)[0]
@ -1207,7 +1226,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.rotation_euler.rotate(CONVERSE) child.rotation_euler.rotate(CONVERSE)
child.keyframe_insert(data_path="rotation_euler", index=1, frame=keydata[0]) child.keyframe_insert(data_path="rotation_euler", index=1, frame=keydata[0])
elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and child.type == 'CAMERA': # Field of view elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and tracking == 'CAMERA': # Field of view
keyframe_angle = {} keyframe_angle = {}
default_value = child.data.angle default_value = child.data.angle
child.data.angle = read_track_angle(new_chunk)[0] child.data.angle = read_track_angle(new_chunk)[0]
@ -1215,7 +1234,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2) child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2)
child.data.keyframe_insert(data_path="lens", frame=keydata[0]) child.data.keyframe_insert(data_path="lens", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Hotspot elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Hotspot
keyframe_angle = {} keyframe_angle = {}
cone_angle = math.degrees(child.data.spot_size) cone_angle = math.degrees(child.data.spot_size)
default_value = cone_angle-(child.data.spot_blend * math.floor(cone_angle)) default_value = cone_angle-(child.data.spot_blend * math.floor(cone_angle))
@ -1225,7 +1244,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.data.spot_blend = 1.0 - (math.degrees(keydata[1]) / cone_angle) child.data.spot_blend = 1.0 - (math.degrees(keydata[1]) / cone_angle)
child.data.keyframe_insert(data_path="spot_blend", frame=keydata[0]) child.data.keyframe_insert(data_path="spot_blend", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Falloff elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and tracking == 'LIGHT' and spotting == 'SPOT': # Falloff
keyframe_angle = {} keyframe_angle = {}
default_value = math.degrees(child.data.spot_size) default_value = math.degrees(child.data.spot_size)
child.data.spot_size = read_track_angle(new_chunk)[0] child.data.spot_size = read_track_angle(new_chunk)[0]
@ -1245,15 +1264,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# FINISHED LOOP # FINISHED LOOP
# There will be a number of objects still not added # There will be a number of objects still not added
if CreateBlenderObject: if CreateBlenderObject:
putContextMesh( putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMesh_flag,
context, contextMeshMaterials, contextMesh_smooth, WORLD_MATRIX)
contextMesh_vertls,
contextMesh_facels,
contextMesh_flag,
contextMeshMaterials,
contextMesh_smooth,
WORLD_MATRIX
)
# Assign parents to objects # Assign parents to objects
# check _if_ we need to assign first because doing so recalcs the depsgraph # check _if_ we need to assign first because doing so recalcs the depsgraph
@ -1305,7 +1317,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# IMPORT # # IMPORT #
########## ##########
def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, FILTER=None,
WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None): WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None):
print("importing 3DS: %r..." % (filepath), end="") print("importing 3DS: %r..." % (filepath), end="")
@ -1349,7 +1361,7 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True,
imported_objects = [] # Fill this list with objects imported_objects = [] # Fill this list with objects
process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN,
IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE)
# fixme, make unglobal # fixme, make unglobal
object_dictionary.clear() object_dictionary.clear()
@ -1443,16 +1455,12 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True,
file.close() file.close()
def load(operator, context, filepath="", constrain_size=0.0, def load(operator, context, filepath="", constrain_size=0.0, convert_unit=False,
convert_unit=False, use_image_search=True, use_image_search=True, object_filter=None, use_world_matrix=False,
use_world_matrix=False, read_keyframe=True, read_keyframe=True, use_apply_transform=True, global_matrix=None,):
use_apply_transform=True, global_matrix=None,
):
load_3ds(filepath, context, CONSTRAIN=constrain_size, load_3ds(filepath, context, CONSTRAIN=constrain_size, UNITS=convert_unit,
UNITS=convert_unit, IMAGE_SEARCH=use_image_search, IMAGE_SEARCH=use_image_search, FILTER=object_filter, WORLD_MATRIX=use_world_matrix,
WORLD_MATRIX=use_world_matrix, KEYFRAME=read_keyframe, KEYFRAME=read_keyframe, APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix,)
APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix,
)
return {'FINISHED'} return {'FINISHED'}