io_scene_3ds: Added scale factor to 3ds export #104767
@ -16,7 +16,7 @@ import bpy
|
|||||||
bl_info = {
|
bl_info = {
|
||||||
"name": "Autodesk 3DS format",
|
"name": "Autodesk 3DS format",
|
||||||
"author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand",
|
"author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand",
|
||||||
"version": (2, 4, 1),
|
"version": (2, 4, 3),
|
||||||
"blender": (3, 6, 0),
|
"blender": (3, 6, 0),
|
||||||
"location": "File > Import-Export",
|
"location": "File > Import-Export",
|
||||||
"description": "3DS Import/Export meshes, UVs, materials, textures, "
|
"description": "3DS Import/Export meshes, UVs, materials, textures, "
|
||||||
@ -109,6 +109,11 @@ class Export3DS(bpy.types.Operator, ExportHelper):
|
|||||||
description="Export selected objects only",
|
description="Export selected objects only",
|
||||||
default=False,
|
default=False,
|
||||||
)
|
)
|
||||||
|
use_hierarchy: bpy.props.BoolProperty(
|
||||||
|
name="Export Hierarchy",
|
||||||
|
description="Export hierarchy chunks",
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
write_keyframe: BoolProperty(
|
write_keyframe: BoolProperty(
|
||||||
name="Write Keyframe",
|
name="Write Keyframe",
|
||||||
description="Write the keyframe data",
|
description="Write the keyframe data",
|
||||||
|
@ -87,6 +87,8 @@ MASTERSCALE = 0x0100 # Master scale factor
|
|||||||
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
|
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
|
||||||
OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object
|
OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object
|
||||||
OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
|
OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
|
||||||
|
OBJECT_HIERARCHY = 0x4F00 # Hierarchy id of the object
|
||||||
|
OBJECT_PARENT = 0x4F10 # Parent id of the object
|
||||||
|
|
||||||
# >------ Sub defines of LIGHT
|
# >------ Sub defines of LIGHT
|
||||||
LIGHT_MULTIPLIER = 0x465B # The light energy factor
|
LIGHT_MULTIPLIER = 0x465B # The light energy factor
|
||||||
@ -1477,7 +1479,7 @@ def make_ambient_node(world):
|
|||||||
# EXPORT #
|
# EXPORT #
|
||||||
##########
|
##########
|
||||||
|
|
||||||
def save(operator, context, filepath="", use_selection=False, write_keyframe=False, global_matrix=None):
|
def save(operator, context, filepath="", use_selection=False, use_hierarchy=False, write_keyframe=False, global_matrix=None):
|
||||||
|
|
||||||
"""Save the Blender scene to a 3ds file."""
|
"""Save the Blender scene to a 3ds file."""
|
||||||
# Time the export
|
# Time the export
|
||||||
@ -1608,6 +1610,7 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
|
|||||||
scale = {}
|
scale = {}
|
||||||
|
|
||||||
# Give all objects a unique ID and build a dictionary from object name to object id
|
# Give all objects a unique ID and build a dictionary from object name to object id
|
||||||
|
object_id = {}
|
||||||
name_id = {}
|
name_id = {}
|
||||||
|
|
||||||
for ob, data, matrix in mesh_objects:
|
for ob, data, matrix in mesh_objects:
|
||||||
@ -1615,6 +1618,7 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
|
|||||||
rotation[ob.name] = ob.rotation_euler.to_quaternion().inverted()
|
rotation[ob.name] = ob.rotation_euler.to_quaternion().inverted()
|
||||||
scale[ob.name] = ob.scale
|
scale[ob.name] = ob.scale
|
||||||
name_id[ob.name] = len(name_id)
|
name_id[ob.name] = len(name_id)
|
||||||
|
object_id[ob.name] = len(object_id)
|
||||||
|
|
||||||
for ob in empty_objects:
|
for ob in empty_objects:
|
||||||
translation[ob.name] = ob.location
|
translation[ob.name] = ob.location
|
||||||
@ -1622,6 +1626,12 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
|
|||||||
scale[ob.name] = ob.scale
|
scale[ob.name] = ob.scale
|
||||||
name_id[ob.name] = len(name_id)
|
name_id[ob.name] = len(name_id)
|
||||||
|
|
||||||
|
for ob in light_objects:
|
||||||
|
object_id[ob.name] = len(object_id)
|
||||||
|
|
||||||
|
for ob in camera_objects:
|
||||||
|
object_id[ob.name] = len(object_id)
|
||||||
|
|
||||||
# Create object chunks for all meshes
|
# Create object chunks for all meshes
|
||||||
i = 0
|
i = 0
|
||||||
for ob, mesh, matrix in mesh_objects:
|
for ob, mesh, matrix in mesh_objects:
|
||||||
@ -1633,20 +1643,32 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
|
|||||||
# Make a mesh chunk out of the mesh
|
# Make a mesh chunk out of the mesh
|
||||||
object_chunk.add_subchunk(make_mesh_chunk(ob, mesh, matrix, materialDict, translation))
|
object_chunk.add_subchunk(make_mesh_chunk(ob, mesh, matrix, materialDict, translation))
|
||||||
|
|
||||||
# Ensure the mesh has no over sized arrays, skip ones that do!
|
# Add hierachy chunk with ID from object_id dictionary
|
||||||
|
if use_hierarchy:
|
||||||
|
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
|
||||||
|
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
|
||||||
|
|
||||||
|
# Add parent chunk if object has a parent
|
||||||
|
if ob.parent is not None and (ob.parent.name in object_id):
|
||||||
|
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
||||||
|
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
|
||||||
|
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
|
||||||
|
object_chunk.add_subchunk(obj_hierarchy_chunk)
|
||||||
|
|
||||||
|
# ensure the mesh has no over sized arrays - skip ones that do!
|
||||||
# Otherwise we cant write since the array size wont fit into USHORT
|
# Otherwise we cant write since the array size wont fit into USHORT
|
||||||
if object_chunk.validate():
|
if object_chunk.validate():
|
||||||
object_info.add_subchunk(object_chunk)
|
object_info.add_subchunk(object_chunk)
|
||||||
else:
|
else:
|
||||||
operator.report({'WARNING'}, "Object %r can't be written into a 3DS file")
|
operator.report({'WARNING'}, "Object %r can't be written into a 3DS file")
|
||||||
|
|
||||||
# Export kf object node
|
# Export object node
|
||||||
if write_keyframe:
|
if write_keyframe:
|
||||||
kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id))
|
kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id))
|
||||||
|
|
||||||
i += i
|
i += i
|
||||||
|
|
||||||
# Create chunks for all empties, only requires a kf object node
|
# Create chunks for all empties - only requires a object node
|
||||||
if write_keyframe:
|
if write_keyframe:
|
||||||
for ob in empty_objects:
|
for ob in empty_objects:
|
||||||
kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id))
|
kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id))
|
||||||
@ -1654,15 +1676,15 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
|
|||||||
# Create light object chunks
|
# Create light object chunks
|
||||||
for ob in light_objects:
|
for ob in light_objects:
|
||||||
object_chunk = _3ds_chunk(OBJECT)
|
object_chunk = _3ds_chunk(OBJECT)
|
||||||
light_chunk = _3ds_chunk(OBJECT_LIGHT)
|
obj_light_chunk = _3ds_chunk(OBJECT_LIGHT)
|
||||||
color_float_chunk = _3ds_chunk(RGB)
|
color_float_chunk = _3ds_chunk(RGB)
|
||||||
energy_factor = _3ds_chunk(LIGHT_MULTIPLIER)
|
light_energy_factor = _3ds_chunk(LIGHT_MULTIPLIER)
|
||||||
object_chunk.add_variable("light", _3ds_string(sane_name(ob.name)))
|
object_chunk.add_variable("light", _3ds_string(sane_name(ob.name)))
|
||||||
light_chunk.add_variable("location", _3ds_point_3d(ob.location))
|
obj_light_chunk.add_variable("location", _3ds_point_3d(ob.location))
|
||||||
color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color))
|
color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color))
|
||||||
energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001))
|
light_energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001))
|
||||||
light_chunk.add_subchunk(color_float_chunk)
|
obj_light_chunk.add_subchunk(color_float_chunk)
|
||||||
light_chunk.add_subchunk(energy_factor)
|
obj_light_chunk.add_subchunk(light_energy_factor)
|
||||||
|
|
||||||
if ob.data.type == 'SPOT':
|
if ob.data.type == 'SPOT':
|
||||||
cone_angle = math.degrees(ob.data.spot_size)
|
cone_angle = math.degrees(ob.data.spot_size)
|
||||||
@ -1684,10 +1706,24 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
|
|||||||
if ob.data.use_square:
|
if ob.data.use_square:
|
||||||
spot_square_chunk = _3ds_chunk(LIGHT_SPOT_RECTANGLE)
|
spot_square_chunk = _3ds_chunk(LIGHT_SPOT_RECTANGLE)
|
||||||
spotlight_chunk.add_subchunk(spot_square_chunk)
|
spotlight_chunk.add_subchunk(spot_square_chunk)
|
||||||
light_chunk.add_subchunk(spotlight_chunk)
|
obj_light_chunk.add_subchunk(spotlight_chunk)
|
||||||
|
|
||||||
# Add light to object info
|
# Add light to object chunk
|
||||||
object_chunk.add_subchunk(light_chunk)
|
object_chunk.add_subchunk(obj_light_chunk)
|
||||||
|
|
||||||
|
# Add hierachy chunks with ID from object_id dictionary
|
||||||
|
if use_hierarchy:
|
||||||
|
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
|
||||||
|
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
||||||
|
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
|
||||||
|
if ob.parent is None or (ob.parent.name not in object_id):
|
||||||
|
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
|
||||||
|
else: # Get the parent ID from the object_id dict
|
||||||
|
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
|
||||||
|
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
|
||||||
|
object_chunk.add_subchunk(obj_hierarchy_chunk)
|
||||||
|
|
||||||
|
# Add light object and hierarchy chunks to object info
|
||||||
object_info.add_subchunk(object_chunk)
|
object_info.add_subchunk(object_chunk)
|
||||||
|
|
||||||
# Export light and spotlight target node
|
# Export light and spotlight target node
|
||||||
@ -1714,6 +1750,20 @@ def save(operator, context, filepath="", use_selection=False, write_keyframe=Fal
|
|||||||
camera_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6)))
|
camera_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6)))
|
||||||
camera_chunk.add_variable("lens", _3ds_float(ob.data.lens))
|
camera_chunk.add_variable("lens", _3ds_float(ob.data.lens))
|
||||||
object_chunk.add_subchunk(camera_chunk)
|
object_chunk.add_subchunk(camera_chunk)
|
||||||
|
|
||||||
|
# Add hierachy chunks with ID from object_id dictionary
|
||||||
|
if use_hierarchy:
|
||||||
|
obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY)
|
||||||
|
obj_parent_chunk = _3ds_chunk(OBJECT_PARENT)
|
||||||
|
obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name]))
|
||||||
|
if ob.parent is None or (ob.parent.name not in object_id):
|
||||||
|
obj_parent_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
|
||||||
|
else: # Get the parent ID from the object_id dict
|
||||||
|
obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name]))
|
||||||
|
obj_hierarchy_chunk.add_subchunk(obj_parent_chunk)
|
||||||
|
object_chunk.add_subchunk(obj_hierarchy_chunk)
|
||||||
|
|
||||||
|
# Add light object and hierarchy chunks to object info
|
||||||
object_info.add_subchunk(object_chunk)
|
object_info.add_subchunk(object_chunk)
|
||||||
|
|
||||||
# Export camera and target node
|
# Export camera and target node
|
||||||
|
@ -92,6 +92,8 @@ MAT_MAP_BCOL = 0xA368 # Blue mapping
|
|||||||
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
|
OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
|
||||||
OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object
|
OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object
|
||||||
OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
|
OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
|
||||||
|
OBJECT_HIERARCHY = 0x4F00 # This lets us know the hierachy id of the object
|
||||||
|
OBJECT_PARENT = 0x4F10 # This lets us know the parent id of the object
|
||||||
|
|
||||||
# >------ Sub defines of LIGHT
|
# >------ Sub defines of LIGHT
|
||||||
LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight
|
LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight
|
||||||
@ -322,6 +324,9 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
|||||||
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
|
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
|
||||||
|
|
||||||
|
|
||||||
|
childs_list = []
|
||||||
|
parent_list = []
|
||||||
|
|
||||||
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE):
|
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE):
|
||||||
|
|
||||||
contextObName = None
|
contextObName = None
|
||||||
@ -461,6 +466,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
temp_chunk = Chunk()
|
temp_chunk = Chunk()
|
||||||
|
|
||||||
CreateBlenderObject = False
|
CreateBlenderObject = False
|
||||||
|
CreateCameraObject = False
|
||||||
CreateLightObject = False
|
CreateLightObject = False
|
||||||
CreateTrackData = False
|
CreateTrackData = False
|
||||||
|
|
||||||
@ -924,6 +930,23 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
contextMatrix = mathutils.Matrix(
|
contextMatrix = mathutils.Matrix(
|
||||||
(data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1])).transposed()
|
(data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1])).transposed()
|
||||||
|
|
||||||
|
# If hierarchy chunk
|
||||||
|
elif new_chunk.ID == OBJECT_HIERARCHY:
|
||||||
|
child_id = read_short(new_chunk)
|
||||||
|
childs_list.insert(child_id, contextObName)
|
||||||
|
parent_list.insert(child_id, None)
|
||||||
|
if child_id in parent_list:
|
||||||
|
idp = parent_list.index(child_id)
|
||||||
|
parent_list[idp] = contextObName
|
||||||
|
elif new_chunk.ID == OBJECT_PARENT:
|
||||||
|
parent_id = read_short(new_chunk)
|
||||||
|
if parent_id > len(childs_list):
|
||||||
|
parent_list[child_id] = parent_id
|
||||||
|
parent_list.extend([None]*(parent_id-len(parent_list)))
|
||||||
|
parent_list.insert(parent_id, contextObName)
|
||||||
|
elif parent_id < len(childs_list):
|
||||||
|
parent_list[child_id] = childs_list[parent_id]
|
||||||
|
|
||||||
# If light chunk
|
# If light chunk
|
||||||
elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support
|
elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support
|
||||||
newLamp = bpy.data.lights.new("Lamp", 'POINT')
|
newLamp = bpy.data.lights.new("Lamp", 'POINT')
|
||||||
@ -934,9 +957,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
temp_data = file.read(SZ_3FLOAT)
|
temp_data = file.read(SZ_3FLOAT)
|
||||||
contextLamp.location = struct.unpack('<3f', temp_data)
|
contextLamp.location = struct.unpack('<3f', temp_data)
|
||||||
new_chunk.bytes_read += SZ_3FLOAT
|
new_chunk.bytes_read += SZ_3FLOAT
|
||||||
contextMatrix = None # Reset matrix
|
|
||||||
CreateBlenderObject = False
|
CreateBlenderObject = False
|
||||||
CreateLightObject = True
|
CreateLightObject = True
|
||||||
|
contextMatrix = None # Reset matrix
|
||||||
elif CreateLightObject and new_chunk.ID == COLOR_F: # Light color
|
elif CreateLightObject and new_chunk.ID == COLOR_F: # Light color
|
||||||
temp_data = file.read(SZ_3FLOAT)
|
temp_data = file.read(SZ_3FLOAT)
|
||||||
contextLamp.data.color = struct.unpack('<3f', temp_data)
|
contextLamp.data.color = struct.unpack('<3f', temp_data)
|
||||||
@ -973,6 +996,21 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
contextLamp.data.show_cone = True
|
contextLamp.data.show_cone = True
|
||||||
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_RECTANGLE: # Square
|
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_RECTANGLE: # Square
|
||||||
contextLamp.data.use_square = True
|
contextLamp.data.use_square = True
|
||||||
|
elif CreateLightObject and new_chunk.ID == OBJECT_HIERARCHY:
|
||||||
|
child_id = read_short(new_chunk)
|
||||||
|
childs_list.insert(child_id, contextObName)
|
||||||
|
parent_list.insert(child_id, None)
|
||||||
|
if child_id in parent_list:
|
||||||
|
idp = parent_list.index(child_id)
|
||||||
|
parent_list[idp] = contextObName
|
||||||
|
elif CreateLightObject and new_chunk.ID == OBJECT_PARENT:
|
||||||
|
parent_id = read_short(new_chunk)
|
||||||
|
if parent_id > len(childs_list):
|
||||||
|
parent_list[child_id] = parent_id
|
||||||
|
parent_list.extend([None]*(parent_id-len(parent_list)))
|
||||||
|
parent_list.insert(parent_id, contextObName)
|
||||||
|
elif parent_id < len(childs_list):
|
||||||
|
parent_list[child_id] = childs_list[parent_id]
|
||||||
|
|
||||||
# If camera chunk
|
# If camera chunk
|
||||||
elif contextObName and new_chunk.ID == OBJECT_CAMERA: # Basic camera support
|
elif contextObName and new_chunk.ID == OBJECT_CAMERA: # Basic camera support
|
||||||
@ -996,8 +1034,24 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
temp_data = file.read(SZ_FLOAT)
|
temp_data = file.read(SZ_FLOAT)
|
||||||
contextCamera.data.lens = float(struct.unpack('<f', temp_data)[0]) # Focus
|
contextCamera.data.lens = float(struct.unpack('<f', temp_data)[0]) # Focus
|
||||||
new_chunk.bytes_read += SZ_FLOAT
|
new_chunk.bytes_read += SZ_FLOAT
|
||||||
contextMatrix = None # Reset matrix
|
|
||||||
CreateBlenderObject = False
|
CreateBlenderObject = False
|
||||||
|
CreateCameraObject = True
|
||||||
|
contextMatrix = None # Reset matrix
|
||||||
|
elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY:
|
||||||
|
child_id = read_short(new_chunk)
|
||||||
|
childs_list.insert(child_id, contextObName)
|
||||||
|
parent_list.insert(child_id, None)
|
||||||
|
if child_id in parent_list:
|
||||||
|
idp = parent_list.index(child_id)
|
||||||
|
parent_list[idp] = contextObName
|
||||||
|
elif CreateCameraObject and new_chunk.ID == OBJECT_PARENT:
|
||||||
|
parent_id = read_short(new_chunk)
|
||||||
|
if parent_id > len(childs_list):
|
||||||
|
parent_list[child_id] = parent_id
|
||||||
|
parent_list.extend([None]*(parent_id-len(parent_list)))
|
||||||
|
parent_list.insert(parent_id, contextObName)
|
||||||
|
elif parent_id < len(childs_list):
|
||||||
|
parent_list[child_id] = childs_list[parent_id]
|
||||||
|
|
||||||
# start keyframe section
|
# start keyframe section
|
||||||
elif new_chunk.ID == EDITKEYFRAME:
|
elif new_chunk.ID == EDITKEYFRAME:
|
||||||
@ -1296,12 +1350,22 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
|
|
||||||
#pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining?
|
#pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining?
|
||||||
|
|
||||||
|
# if parent name
|
||||||
for par, objs in parent_dictionary.items():
|
for par, objs in parent_dictionary.items():
|
||||||
parent = object_dictionary.get(par)
|
parent = object_dictionary.get(par)
|
||||||
for ob in objs:
|
for ob in objs:
|
||||||
if parent is not None:
|
if parent is not None:
|
||||||
ob.parent = parent
|
ob.parent = parent
|
||||||
|
|
||||||
|
# If hierarchy
|
||||||
|
hierarchy = dict(zip(childs_list, parent_list))
|
||||||
|
hierarchy.pop(None, ...)
|
||||||
|
for idt, (child, parent) in enumerate(hierarchy.items()):
|
||||||
|
child_obj = object_dictionary.get(child)
|
||||||
|
parent_obj = object_dictionary.get(parent)
|
||||||
|
if child_obj and parent_obj is not None:
|
||||||
|
child_obj.parent = parent_obj
|
||||||
|
|
||||||
# fix pivots
|
# fix pivots
|
||||||
for ind, ob in enumerate(object_list):
|
for ind, ob in enumerate(object_list):
|
||||||
if ob.type == 'MESH':
|
if ob.type == 'MESH':
|
||||||
@ -1312,14 +1376,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
|||||||
ob.data.transform(pivot_matrix)
|
ob.data.transform(pivot_matrix)
|
||||||
|
|
||||||
|
|
||||||
def load_3ds(filepath,
|
def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None):
|
||||||
context,
|
|
||||||
CONSTRAIN=10.0,
|
|
||||||
IMAGE_SEARCH=True,
|
|
||||||
WORLD_MATRIX=False,
|
|
||||||
KEYFRAME=True,
|
|
||||||
APPLY_MATRIX=True,
|
|
||||||
CONVERSE=None):
|
|
||||||
|
|
||||||
print("importing 3DS: %r..." % (filepath), end="")
|
print("importing 3DS: %r..." % (filepath), end="")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user