From 7504cb5e96d4e45e2ddf265b821904c8d0a34410 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sat, 22 Jul 2023 17:40:41 +0200 Subject: [PATCH 01/78] io_scene_3ds: Added unit measure convert option --- io_scene_3ds/__init__.py | 7 +++- io_scene_3ds/export_3ds.py | 2 +- io_scene_3ds/import_3ds.py | 70 ++++++++++++++++++-------------------- 3 files changed, 41 insertions(+), 38 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 4369e686f..b67a05eb6 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -55,6 +55,11 @@ class Import3DS(bpy.types.Operator, ImportHelper): soft_min=0.0, soft_max=1000.0, default=10.0, ) + convert_measure: BoolProperty( + name="Convert Measure", + description="Convert from millimeters to meters", + default=False, + ) use_image_search: BoolProperty( name="Image Search", description="Search subdirectories for any associated images " @@ -112,7 +117,7 @@ class Export3DS(bpy.types.Operator, ExportHelper): min=0.0, max=100000.0, soft_min=0.0, soft_max=100000.0, default=1.0, - ) + ) use_selection: BoolProperty( name="Selection Only", description="Export selected objects only", diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index f2f93848a..b1798d251 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1209,7 +1209,7 @@ def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): elif ID == ROT_TRACK_TAG: # Rotation (angle first [radians], followed by axis) quat = ob_rot.to_quaternion().inverted() - track_chunk.add_variable("rotation", _3ds_point_4d((quat.angle, quat.axis.x, quat.axis.y, quat.axis.z))) + track_chunk.add_variable("rotation", _3ds_point_4d((quat.angle, quat.axis,x, quat.axis.y, quat.axis.z))) elif ID == SCL_TRACK_TAG: # Scale vector track_chunk.add_variable("scale", _3ds_point_3d(ob_size)) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 529166b14..4fa28e5a3 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -325,7 +325,8 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of childs_list = [] parent_list = [] -def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE): +def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE): contextObName = None contextLamp = None @@ -362,15 +363,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI pivot_list = [] # pivots with hierarchy handling trackposition = {} # keep track to position for target calculation - def putContextMesh( - context, - myContextMesh_vertls, - myContextMesh_facels, - myContextMesh_flag, - myContextMeshMaterials, - myContextMesh_smooth, - WORLD_MATRIX, - ): + def putContextMesh(context, myContextMesh_vertls, myContextMesh_facels, myContextMesh_flag, + myContextMeshMaterials, myContextMesh_smooth, WORLD_MATRIX): + bmesh = bpy.data.meshes.new(contextObName) if myContextMesh_facels is None: @@ -431,8 +426,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI imported_objects.append(ob) if myContextMesh_flag: - """Bit 0 (0x1) sets edge CA visible, Bit 1 (0x2) sets edge BC visible and Bit 2 (0x4) sets edge AB visible - In Blender we use sharp edges for those flags""" + """Bit 0 (0x1) sets edge CA visible, Bit 1 (0x2) sets edge BC visible and + Bit 2 (0x4) sets edge AB visible. In Blender we use sharp edges for those flags.""" for f, pl in enumerate(bmesh.polygons): face = myContextMesh_facels[f] faceflag = myContextMesh_flag[f] @@ -541,7 +536,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI 0x40 activates alpha source, 0x80 activates tinting, 0x100 ignores alpha, 0x200 activates RGB tint. Bits 0x80, 0x100, and 0x200 are only used with TEXMAP, TEX2MAP, and SPECMAP chunks. 0x40, when used with a TEXMAP, TEX2MAP, or SPECMAP chunk must be accompanied with a tint bit, - either 0x100 or 0x200, tintcolor will be processed if colorchunks are present""" + either 0x100 or 0x200, tintcolor will be processed if colorchunks are present.""" tiling = read_short(temp_chunk) if tiling & 0x1: extend = 'decal' @@ -620,7 +615,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI def read_track_data(track_chunk): """Trackflags 0x1, 0x2 and 0x3 are for looping. 0x8, 0x10 and 0x20 - locks the XYZ axes. 0x100, 0x200 and 0x400 unlinks the XYZ axes""" + locks the XYZ axes. 0x100, 0x200 and 0x400 unlinks the XYZ axes.""" tflags = read_short(track_chunk) contextTrack_flag = tflags temp_data = file.read(SZ_U_INT * 2) @@ -687,7 +682,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # is it an object info chunk? elif new_chunk.ID == OBJECTINFO: - process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE) + process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) # keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read @@ -1105,6 +1101,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI for keydata in keyframe_data.items(): trackposition[keydata[0]] = keydata[1] # Keep track to position for target calculation child.location = apply_constrain(keydata[1]) if hierarchy == ROOT_OBJECT else mathutils.Vector(keydata[1]) + if MEASURE: + child.location = child.location * 0.001 if hierarchy == ROOT_OBJECT: child.location.rotate(CONVERSE) if not contextTrack_flag & 0x100: # Flag 0x100 unlinks X axis @@ -1131,6 +1129,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI scale = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1)) if CONSTRAIN != 0.0 else child.scale transformation = mathutils.Matrix.LocRotScale(locate, rotate, scale) child.matrix_world = transformation + if MEASURE: + child.matrix_world = mathutils.Matrix.Scale(0.001,4) @ child.matrix_world if hierarchy == ROOT_OBJECT: child.matrix_world = CONVERSE @ child.matrix_world child.keyframe_insert(data_path="rotation_euler", index=0, frame=keydata[0]) @@ -1305,7 +1305,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # IMPORT # ########## -def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None): +def load_3ds(filepath, context, CONSTRAIN=10.0, MEASURE=False, IMAGE_SEARCH=True, + WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None): print("importing 3DS: %r..." % (filepath), end="") @@ -1335,7 +1336,8 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX= scn = context.scene imported_objects = [] # Fill this list with objects - process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE) + process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) # fixme, make unglobal object_dictionary.clear() @@ -1344,8 +1346,13 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX= if APPLY_MATRIX: for ob in imported_objects: if ob.type == 'MESH': - me = ob.data - me.transform(ob.matrix_local.inverted()) + ob.data.transform(ob.matrix_local.inverted()) + + if MEASURE: + unit_mtx = mathutils.Matrix.Scale(0.001,4) + for ob in imported_objects: + if ob.type == 'MESH': + ob.data.transform(unit_mtx) if CONVERSE and not KEYFRAME: for ob in imported_objects: @@ -1424,25 +1431,16 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX= file.close() -def load(operator, - context, - filepath="", - constrain_size=0.0, - use_image_search=True, - use_world_matrix=False, - read_keyframe=True, - use_apply_transform=True, - global_matrix=None, +def load(operator, context, filepath="", constrain_size=0.0, + convert_measure=False, use_image_search=True, + use_world_matrix=False, read_keyframe=True, + use_apply_transform=True, global_matrix=None, ): - load_3ds(filepath, - context, - CONSTRAIN=constrain_size, - IMAGE_SEARCH=use_image_search, - WORLD_MATRIX=use_world_matrix, - KEYFRAME=read_keyframe, - APPLY_MATRIX=use_apply_transform, - CONVERSE=global_matrix, + load_3ds(filepath, context, CONSTRAIN=constrain_size, + MEASURE=convert_measure, IMAGE_SEARCH=use_image_search, + WORLD_MATRIX=use_world_matrix, KEYFRAME=read_keyframe, + APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix, ) return {'FINISHED'} -- 2.30.2 From 0ca031c3173ac83cb3547c480120b54771049bca Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sat, 22 Jul 2023 17:44:39 +0200 Subject: [PATCH 02/78] Fixed mismatched comma --- io_scene_3ds/export_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index b1798d251..f2f93848a 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1209,7 +1209,7 @@ def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): elif ID == ROT_TRACK_TAG: # Rotation (angle first [radians], followed by axis) quat = ob_rot.to_quaternion().inverted() - track_chunk.add_variable("rotation", _3ds_point_4d((quat.angle, quat.axis,x, quat.axis.y, quat.axis.z))) + track_chunk.add_variable("rotation", _3ds_point_4d((quat.angle, quat.axis.x, quat.axis.y, quat.axis.z))) elif ID == SCL_TRACK_TAG: # Scale vector track_chunk.add_variable("scale", _3ds_point_3d(ob_size)) -- 2.30.2 From 77f7f4dbdf715152564d0bbbe3ae4251ea1725d0 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sun, 23 Jul 2023 12:45:01 +0200 Subject: [PATCH 03/78] Export_3ds: Fixed position scaling --- io_scene_3ds/export_3ds.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index f2f93848a..79d00463e 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1086,7 +1086,6 @@ def make_kfdata(revision, start=0, stop=100, curtime=0): def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): """Make a chunk for track data. Depending on the ID, this will construct a position, rotation, scale, roll, color, fov, hotspot or falloff track.""" - ob_distance = mathutils.Matrix.Diagonal(ob_size) track_chunk = _3ds_chunk(ID) if ID in {POS_TRACK_TAG, ROT_TRACK_TAG, SCL_TRACK_TAG, ROLL_TRACK_TAG} and ob.animation_data and ob.animation_data.action: @@ -1111,7 +1110,7 @@ def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): pos_x = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 0), ob_pos.x) pos_y = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 1), ob_pos.y) pos_z = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 2), ob_pos.z) - pos = ob_distance @ mathutils.Vector((pos_x, pos_y, pos_z)) + pos = ob_size @ mathutils.Vector((pos_x, pos_y, pos_z)) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) track_chunk.add_variable("position", _3ds_point_3d((pos.x, pos.y, pos.z))) @@ -1317,7 +1316,8 @@ def make_object_node(ob, translation, rotation, scale, name_id): obj_morph_smooth.add_variable("angle", _3ds_float(round(ob.data.auto_smooth_angle, 6))) obj_node.add_subchunk(obj_morph_smooth) - # Add track chunks for color, position, rotation and scale + # Add track chunks for position, rotation, size + ob_scale = scale[name] # and collect masterscale if parent is None or (parent.name not in name_id): ob_pos = translation[name] ob_rot = rotation[name] @@ -1328,7 +1328,7 @@ def make_object_node(ob, translation, rotation, scale, name_id): ob_rot = rotation[name].to_quaternion().cross(rotation[parent.name].to_quaternion().copy().inverted()).to_euler() ob_size = mathutils.Vector((1.0, 1.0, 1.0)) - obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, ob, ob_pos, ob_rot, ob_scale)) if ob.type in {'MESH', 'EMPTY'}: obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) @@ -1375,7 +1375,7 @@ def make_target_node(ob, translation, rotation, scale, name_id): # Calculate target position ob_pos = translation[name] ob_rot = rotation[name] - ob_size = mathutils.Matrix.Diagonal(scale[name]) + ob_scale = scale[name] target_pos = calc_target(ob_pos, ob_rot.x, ob_rot.z) # Add track chunks for target position @@ -1405,7 +1405,7 @@ def make_target_node(ob, translation, rotation, scale, name_id): rot_target = [fc for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] rot_x = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 0), ob_rot.x) rot_z = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 2), ob_rot.z) - target_distance = ob_size @ mathutils.Vector((loc_x, loc_y, loc_z)) + target_distance = ob_scale @ mathutils.Vector((loc_x, loc_y, loc_z)) target_pos = calc_target(target_distance, rot_x, rot_z) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) @@ -1628,29 +1628,29 @@ def save(operator, context, filepath="", scale_factor=1.0, use_selection=False, name_id = {} for ob, data, matrix in mesh_objects: - translation[ob.name] = mtx_scale @ ob.location.copy() - rotation[ob.name] = ob.rotation_euler.copy() - scale[ob.name] = ob.scale.copy() + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() name_id[ob.name] = len(name_id) object_id[ob.name] = len(object_id) for ob in empty_objects: - translation[ob.name] = mtx_scale @ ob.location.copy() - rotation[ob.name] = ob.rotation_euler.copy() - scale[ob.name] = ob.scale.copy() + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() name_id[ob.name] = len(name_id) for ob in light_objects: - translation[ob.name] = mtx_scale @ ob.location.copy() - rotation[ob.name] = ob.rotation_euler.copy() - scale[ob.name] = mtx_scale.copy().to_scale() + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() name_id[ob.name] = len(name_id) object_id[ob.name] = len(object_id) for ob in camera_objects: - translation[ob.name] = mtx_scale @ ob.location.copy() - rotation[ob.name] = ob.rotation_euler.copy() - scale[ob.name] = mtx_scale.copy().to_scale() + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() name_id[ob.name] = len(name_id) object_id[ob.name] = len(object_id) -- 2.30.2 From 1928cdf967ef88997adfb112f1dcd12742e5d22c Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sun, 23 Jul 2023 12:59:10 +0200 Subject: [PATCH 04/78] Export_3ds: Fixed position scaling for animations --- io_scene_3ds/export_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 79d00463e..83e947b52 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1321,7 +1321,7 @@ def make_object_node(ob, translation, rotation, scale, name_id): if parent is None or (parent.name not in name_id): ob_pos = translation[name] ob_rot = rotation[name] - ob_size = scale[name] + ob_size = ob.scale else: # Calculate child position and rotation of the object center, no scale applied ob_pos = translation[name] - translation[parent.name] -- 2.30.2 From a7b20de484536b46c229e001d04d81a30936a184 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 25 Jul 2023 16:21:08 +0200 Subject: [PATCH 05/78] io_scene_3ds: Take scene units into account for import --- io_scene_3ds/__init__.py | 10 +++++----- io_scene_3ds/import_3ds.py | 30 +++++++++++++++++++++--------- 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 0aad653ad..9262f5b0e 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -55,9 +55,9 @@ class Import3DS(bpy.types.Operator, ImportHelper): soft_min=0.0, soft_max=1000.0, default=10.0, ) - convert_measure: BoolProperty( - name="Convert Measure", - description="Convert from millimeters to meters", + convert_unit: BoolProperty( + name="Convert Units", + description="Converts to scene unit length settings", default=False, ) use_image_search: BoolProperty( @@ -149,7 +149,7 @@ class MAX3DS_PT_import_transform(bpy.types.Panel): operator = sfile.active_operator layout.prop(operator, "constrain_size") - layout.prop(operator, "convert_measure") + layout.prop(operator, "convert_unit") layout.prop(operator, "use_apply_transform") layout.prop(operator, "use_world_matrix") layout.prop(operator, "axis_forward") @@ -295,4 +295,4 @@ def unregister(): if __name__ == "__main__": - register() \ No newline at end of file + register() diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 4fa28e5a3..c7dc5d1d6 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1101,8 +1101,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI for keydata in keyframe_data.items(): trackposition[keydata[0]] = keydata[1] # Keep track to position for target calculation child.location = apply_constrain(keydata[1]) if hierarchy == ROOT_OBJECT else mathutils.Vector(keydata[1]) - if MEASURE: - child.location = child.location * 0.001 + if MEASURE != 1.0: + child.location = child.location * MEASURE if hierarchy == ROOT_OBJECT: child.location.rotate(CONVERSE) if not contextTrack_flag & 0x100: # Flag 0x100 unlinks X axis @@ -1129,8 +1129,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI scale = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1)) if CONSTRAIN != 0.0 else child.scale transformation = mathutils.Matrix.LocRotScale(locate, rotate, scale) child.matrix_world = transformation - if MEASURE: - child.matrix_world = mathutils.Matrix.Scale(0.001,4) @ child.matrix_world + if MEASURE != 1.0: + child.matrix_world = mathutils.Matrix.Scale(MEASURE,4) @ child.matrix_world if hierarchy == ROOT_OBJECT: child.matrix_world = CONVERSE @ child.matrix_world child.keyframe_insert(data_path="rotation_euler", index=0, frame=keydata[0]) @@ -1305,7 +1305,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # IMPORT # ########## -def load_3ds(filepath, context, CONSTRAIN=10.0, MEASURE=False, IMAGE_SEARCH=True, +def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None): print("importing 3DS: %r..." % (filepath), end="") @@ -1313,6 +1313,7 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, MEASURE=False, IMAGE_SEARCH=True if bpy.ops.object.select_all.poll(): bpy.ops.object.select_all(action='DESELECT') + MEASURE = 1.0 duration = time.time() current_chunk = Chunk() file = open(filepath, 'rb') @@ -1335,6 +1336,17 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, MEASURE=False, IMAGE_SEARCH=True object_matrix.clear() scn = context.scene + if UNITS: + unit_length = sce.unit_settings.length_unit + if unit_length == 'KILOMETERS': + MEASURE = 1000.0 + elif unit_length == 'CENTIMETERS': + MEASURE = 0.01 + elif unit_length == 'MILLIMETERS': + MEASURE = 0.001 + elif unit_length == 'MICROMETERS': + MEASURE = 0.000001 + imported_objects = [] # Fill this list with objects process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) @@ -1348,8 +1360,8 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, MEASURE=False, IMAGE_SEARCH=True if ob.type == 'MESH': ob.data.transform(ob.matrix_local.inverted()) - if MEASURE: - unit_mtx = mathutils.Matrix.Scale(0.001,4) + if UNITS: + unit_mtx = mathutils.Matrix.Scale(MEASURE,4) for ob in imported_objects: if ob.type == 'MESH': ob.data.transform(unit_mtx) @@ -1432,13 +1444,13 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, MEASURE=False, IMAGE_SEARCH=True def load(operator, context, filepath="", constrain_size=0.0, - convert_measure=False, use_image_search=True, + convert_unit=False, use_image_search=True, use_world_matrix=False, read_keyframe=True, use_apply_transform=True, global_matrix=None, ): load_3ds(filepath, context, CONSTRAIN=constrain_size, - MEASURE=convert_measure, IMAGE_SEARCH=use_image_search, + UNITS=convert_unit, IMAGE_SEARCH=use_image_search, WORLD_MATRIX=use_world_matrix, KEYFRAME=read_keyframe, APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix, ) -- 2.30.2 From 31c673f60720ed333ed143e45b90d4f539c13373 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 25 Jul 2023 17:03:38 +0200 Subject: [PATCH 06/78] io_scene_3ds: Take scene units into account for export --- io_scene_3ds/__init__.py | 6 ++++++ io_scene_3ds/export_3ds.py | 19 ++++++++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 9262f5b0e..b50a0cb46 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -176,6 +176,11 @@ class Export3DS(bpy.types.Operator, ExportHelper): soft_min=0.0, soft_max=100000.0, default=1.0, ) + unit_convert: BoolProperty( + name="Convert Units", + description="Converts to scene unit length settings", + default=False, + ) use_selection: BoolProperty( name="Selection Only", description="Export selected objects only", @@ -259,6 +264,7 @@ class MAX3DS_PT_export_transform(bpy.types.Panel): operator = sfile.active_operator layout.prop(operator, "scale_factor") + layout.prop(operator, "unit_convert") layout.prop(operator, "axis_forward") layout.prop(operator, "axis_up") diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 83e947b52..23fa8d850 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1489,10 +1489,9 @@ def make_ambient_node(world): # EXPORT # ########## -def save(operator, context, filepath="", scale_factor=1.0, use_selection=False, use_hierarchy=False, write_keyframe=False, global_matrix=None): - +def save(operator, context, filepath="", scale_factor=1.0, convert_unit=False, + use_selection=False, use_hierarchy=False, write_keyframe=False, global_matrix=None): """Save the Blender scene to a 3ds file.""" - mtx_scale = mathutils.Matrix.Scale(scale_factor, 4) # Time the export duration = time.time() @@ -1503,6 +1502,20 @@ def save(operator, context, filepath="", scale_factor=1.0, use_selection=False, depsgraph = context.evaluated_depsgraph_get() world = scene.world + unit_measure = 1.0 + if unit_convert: + unit_length = sce.unit_settings.length_unit + if unit_length == 'KILOMETERS': + unit_measure = 0.001 + elif unit_length == 'CENTIMETERS': + unit_measure = 100 + elif unit_length == 'MILLIMETERS': + unit_measure = 1000 + elif unit_length == 'MICROMETERS': + unit_measure = 1000000 + + mtx_scale = mathutils.Matrix.Scale((scale_factor * unit_measure),4) + if global_matrix is None: global_matrix = mathutils.Matrix() -- 2.30.2 From 22803a31540a4c9750f6f2dcd4e227aee8b358c4 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 25 Jul 2023 17:12:25 +0200 Subject: [PATCH 07/78] Export_3ds: Take scene units into account --- io_scene_3ds/export_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 23fa8d850..a383fda8e 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1489,7 +1489,7 @@ def make_ambient_node(world): # EXPORT # ########## -def save(operator, context, filepath="", scale_factor=1.0, convert_unit=False, +def save(operator, context, filepath="", scale_factor=1.0, unit_convert=False, use_selection=False, use_hierarchy=False, write_keyframe=False, global_matrix=None): """Save the Blender scene to a 3ds file.""" -- 2.30.2 From 1c8d5ace0accfcab025adf970b7e0fd1beb37f66 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 25 Jul 2023 17:17:29 +0200 Subject: [PATCH 08/78] Export_3ds: Added unit measure to masterscale --- io_scene_3ds/export_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index a383fda8e..bbd1c4514 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1538,7 +1538,7 @@ def save(operator, context, filepath="", scale_factor=1.0, unit_convert=False, # Add MASTERSCALE element mscale = _3ds_chunk(MASTERSCALE) - mscale.add_variable("scale", _3ds_float(1.0)) + mscale.add_variable("scale", _3ds_float(unit_measure)) object_info.add_subchunk(mscale) # Init main keyframe data chunk -- 2.30.2 From 70b45a8858c7117d5ad97fb71b0aee183817c209 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 25 Jul 2023 17:25:50 +0200 Subject: [PATCH 09/78] Export_3ds: Added unit measure to masterscale --- io_scene_3ds/export_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index bbd1c4514..763391662 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1538,7 +1538,7 @@ def save(operator, context, filepath="", scale_factor=1.0, unit_convert=False, # Add MASTERSCALE element mscale = _3ds_chunk(MASTERSCALE) - mscale.add_variable("scale", _3ds_float(unit_measure)) + mscale.add_variable("scale", _3ds_float(1.0 / unit_measure)) object_info.add_subchunk(mscale) # Init main keyframe data chunk -- 2.30.2 From 3be5db227d933ed4619ac8638ee8293bac35ffc7 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 25 Jul 2023 17:31:17 +0200 Subject: [PATCH 10/78] Export_3ds: Added unit measure to masterscale --- io_scene_3ds/export_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 763391662..0a955a939 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1538,7 +1538,7 @@ def save(operator, context, filepath="", scale_factor=1.0, unit_convert=False, # Add MASTERSCALE element mscale = _3ds_chunk(MASTERSCALE) - mscale.add_variable("scale", _3ds_float(1.0 / unit_measure)) + mscale.add_variable("scale", _3ds_float((1.0 / unit_measure))) object_info.add_subchunk(mscale) # Init main keyframe data chunk -- 2.30.2 From 00636818a9aa975276a09c8e4fc22011a56fa97e Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 25 Jul 2023 18:02:48 +0200 Subject: [PATCH 11/78] io_scene_3ds: Changed unit convert to apply units --- io_scene_3ds/__init__.py | 8 ++++---- io_scene_3ds/export_3ds.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index b50a0cb46..e04a071d2 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -176,9 +176,9 @@ class Export3DS(bpy.types.Operator, ExportHelper): soft_min=0.0, soft_max=100000.0, default=1.0, ) - unit_convert: BoolProperty( - name="Convert Units", - description="Converts to scene unit length settings", + apply_unit: BoolProperty( + name="Apply Units", + description="Take the scene unit length settings into account", default=False, ) use_selection: BoolProperty( @@ -264,7 +264,7 @@ class MAX3DS_PT_export_transform(bpy.types.Panel): operator = sfile.active_operator layout.prop(operator, "scale_factor") - layout.prop(operator, "unit_convert") + layout.prop(operator, "apply_unit") layout.prop(operator, "axis_forward") layout.prop(operator, "axis_up") diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 0a955a939..e7a20030d 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1489,7 +1489,7 @@ def make_ambient_node(world): # EXPORT # ########## -def save(operator, context, filepath="", scale_factor=1.0, unit_convert=False, +def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use_selection=False, use_hierarchy=False, write_keyframe=False, global_matrix=None): """Save the Blender scene to a 3ds file.""" @@ -1503,7 +1503,7 @@ def save(operator, context, filepath="", scale_factor=1.0, unit_convert=False, world = scene.world unit_measure = 1.0 - if unit_convert: + if apply_unit: unit_length = sce.unit_settings.length_unit if unit_length == 'KILOMETERS': unit_measure = 0.001 @@ -1538,7 +1538,7 @@ def save(operator, context, filepath="", scale_factor=1.0, unit_convert=False, # Add MASTERSCALE element mscale = _3ds_chunk(MASTERSCALE) - mscale.add_variable("scale", _3ds_float((1.0 / unit_measure))) + mscale.add_variable("scale", _3ds_float(1.0)) object_info.add_subchunk(mscale) # Init main keyframe data chunk -- 2.30.2 From 8b1dc3f7b7ce7384e49d0ab32b226268595aba5c Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 25 Jul 2023 18:49:45 +0200 Subject: [PATCH 12/78] Export_3ds: Removed unit measure from masterscale --- io_scene_3ds/export_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 0378b1afa..e7a20030d 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1538,7 +1538,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, # Add MASTERSCALE element mscale = _3ds_chunk(MASTERSCALE) - mscale.add_variable("scale", _3ds_float((1.0 / unit_measure))) + mscale.add_variable("scale", _3ds_float(1.0)) object_info.add_subchunk(mscale) # Init main keyframe data chunk -- 2.30.2 From b7a36d80d50f17b1e1d0fd2d33ee722a36b9d95b Mon Sep 17 00:00:00 2001 From: NRGSille Date: Wed, 26 Jul 2023 01:01:56 +0200 Subject: [PATCH 13/78] io_scene_3ds: Added object filter- to export options --- io_scene_3ds/__init__.py | 25 +++++++++++++++++++------ io_scene_3ds/export_3ds.py | 8 ++++---- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index e04a071d2..ddfff583e 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -18,7 +18,7 @@ import bpy bl_info = { "name": "Autodesk 3DS format", "author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand", - "version": (2, 4, 4), + "version": (2, 4, 5), "blender": (3, 6, 0), "location": "File > Import-Export", "description": "3DS Import/Export meshes, UVs, materials, textures, " @@ -170,14 +170,14 @@ class Export3DS(bpy.types.Operator, ExportHelper): ) scale_factor: FloatProperty( - name="Scale", - description="Scale factor for all objects", + name="Scale Factor", + description="Master scale factor for all objects", min=0.0, max=100000.0, soft_min=0.0, soft_max=100000.0, default=1.0, ) apply_unit: BoolProperty( - name="Apply Units", + name="Scene Units", description="Take the scene unit length settings into account", default=False, ) @@ -186,13 +186,23 @@ class Export3DS(bpy.types.Operator, ExportHelper): description="Export selected objects only", default=False, ) + object_filter: bpy.props.EnumProperty( + name="Object Filter", options={'ENUM_FLAG'}, + items=(('MESH',"Mesh".rjust(11),"",'MESH_DATA',0x1), + ('LIGHT',"Light".rjust(12),"",'LIGHT_DATA',0x2), + ('CAMERA',"Camera".rjust(11),"",'CAMERA_DATA',0x4), + ('EMPTY',"Empty".rjust(11),"",'EMPTY_DATA',0x8), + ), + description="Object types to export", + default={'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, + ) use_hierarchy: BoolProperty( name="Export Hierarchy", description="Export hierarchy chunks", default=False, ) write_keyframe: BoolProperty( - name="Write Keyframe", + name="Export Keyframes", description="Write the keyframe data", default=False, ) @@ -238,7 +248,10 @@ class MAX3DS_PT_export_include(bpy.types.Panel): operator = sfile.active_operator layout.prop(operator, "use_selection") - layout.prop(operator, "use_hierarchy") + laysub = layout.column(align=True) + laysub.enabled = (not operator.use_selection) + laysub.prop(operator, "object_filter") + layout.column().prop(operator, "use_hierarchy") layout.prop(operator, "write_keyframe") diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index e7a20030d..21f6dea20 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1489,8 +1489,8 @@ def make_ambient_node(world): # EXPORT # ########## -def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, - use_selection=False, use_hierarchy=False, write_keyframe=False, global_matrix=None): +def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use_selection=False, + object_filter=None, use_hierarchy=False, write_keyframe=False, global_matrix=None): """Save the Blender scene to a 3ds file.""" # Time the export @@ -1504,7 +1504,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, unit_measure = 1.0 if apply_unit: - unit_length = sce.unit_settings.length_unit + unit_length = scene.unit_settings.length_unit if unit_length == 'KILOMETERS': unit_measure = 0.001 elif unit_length == 'CENTIMETERS': @@ -1566,7 +1566,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, if use_selection: objects = [ob for ob in scene.objects if ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)] else: - objects = [ob for ob in scene.objects if ob.visible_get(view_layer=layer)] + objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer)] empty_objects = [ob for ob in objects if ob.type == 'EMPTY'] light_objects = [ob for ob in objects if ob.type == 'LIGHT'] -- 2.30.2 From 8c5950824b789824497734c28c1df68c0f7157a2 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Wed, 26 Jul 2023 09:43:18 +0200 Subject: [PATCH 14/78] io_scene_3ds: Added background color, image and gradient chunks --- io_scene_3ds/__init__.py | 21 ++++++++++----------- io_scene_3ds/export_3ds.py | 29 ++++++++++++++++++++++++++--- 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index ddfff583e..6897217f8 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -18,7 +18,7 @@ import bpy bl_info = { "name": "Autodesk 3DS format", "author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand", - "version": (2, 4, 5), + "version": (2, 4, 6), "blender": (3, 6, 0), "location": "File > Import-Export", "description": "3DS Import/Export meshes, UVs, materials, textures, " @@ -188,13 +188,14 @@ class Export3DS(bpy.types.Operator, ExportHelper): ) object_filter: bpy.props.EnumProperty( name="Object Filter", options={'ENUM_FLAG'}, - items=(('MESH',"Mesh".rjust(11),"",'MESH_DATA',0x1), - ('LIGHT',"Light".rjust(12),"",'LIGHT_DATA',0x2), - ('CAMERA',"Camera".rjust(11),"",'CAMERA_DATA',0x4), - ('EMPTY',"Empty".rjust(11),"",'EMPTY_DATA',0x8), - ), + items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA',0x1), + ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), + ('LIGHT', "Light".rjust(12), "", 'LIGHT_DATA',0x4), + ('CAMERA', "Camera".rjust(11), "", 'CAMERA_DATA',0x8), + ('EMPTY', "Empty".rjust(11), "", 'EMPTY_DATA',0x10), + ), description="Object types to export", - default={'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, + default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, ) use_hierarchy: BoolProperty( name="Export Hierarchy", @@ -248,10 +249,8 @@ class MAX3DS_PT_export_include(bpy.types.Panel): operator = sfile.active_operator layout.prop(operator, "use_selection") - laysub = layout.column(align=True) - laysub.enabled = (not operator.use_selection) - laysub.prop(operator, "object_filter") - layout.column().prop(operator, "use_hierarchy") + layout.column().prop(operator, "object_filter") + layout.prop(operator, "use_hierarchy") layout.prop(operator, "write_keyframe") diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 21f6dea20..fbd6dc5e5 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -30,6 +30,12 @@ KFDATA = 0xB000 # This is the header for all of the keyframe info # >----- sub defines of OBJECTINFO OBJECTINFO = 0x3D3D # Main mesh object chunk before material and object information MESHVERSION = 0x3D3E # This gives the version of the mesh +BITMAP = 0x1100 # The background image name +USE_BITMAP = 0x1101 # The background image flag +SOLIDBACKGND = 0x1200 # The background color (RGB) +USE_SOLIDBGND = 0x1201 # The background color flag +VGRADIENT = 0x1300 # The background gradient colors +USE_VGRADIENT = 0x1301 # The background gradient flag AMBIENTLIGHT = 0x2100 # The color of the ambient light MATERIAL = 45055 # 0xAFFF // This stored the texture info OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc... @@ -1549,13 +1555,30 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use curtime = scene.frame_current kfdata = make_kfdata(revision, start, stop, curtime) - # Add AMBIENT color - if world is not None: + # Add AMBIENT and BACKGROUND color + if world is not None and 'WORLD' in object_filter: ambient_chunk = _3ds_chunk(AMBIENTLIGHT) ambient_light = _3ds_chunk(RGB) ambient_light.add_variable("ambient", _3ds_float_color(world.color)) ambient_chunk.add_subchunk(ambient_light) object_info.add_subchunk(ambient_chunk) + if world.use_nodes: + ntree = world.node_tree.links + background_color = _3ds_chunk(RGB) + background_chunk = _3ds_chunk(SOLIDBACKGND) + background_flag = _3ds_chunk(USE_SOLIDBGND) + bgcol, bgtex, nworld = 'BACKGROUND', 'TEX_IMAGE', 'OUTPUT_WORLD' + bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.to_node.type == nworld), world.color) + bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type == bgtex and lk.to_node.type in {bgcol, nworld}), False) + background_color.add_variable("color", _3ds_float_color(bg_color)) + background_chunk.add_subchunk(background_color) + if bg_image: + background_image = _3ds_chunk(BITMAP) + background_flag = _3ds_chunk(USE_BITMAP) + background_image.add_variable("image", _3ds_string(sane_name(bg_image))) + object_info.add_subchunk(background_image) + object_info.add_subchunk(background_chunk) + object_info.add_subchunk(background_flag) if write_keyframe and world.animation_data: kfdata.add_subchunk(make_ambient_node(world)) @@ -1564,7 +1587,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use mesh_objects = [] if use_selection: - objects = [ob for ob in scene.objects if ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)] + objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)] else: objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer)] -- 2.30.2 From 09f12191459f837463307e45acc22f5a3206ed19 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Thu, 27 Jul 2023 16:40:27 +0200 Subject: [PATCH 15/78] io_scene_3ds: Added object filter to import options --- io_scene_3ds/__init__.py | 14 ++- io_scene_3ds/import_3ds.py | 206 +++++++++++++++++++------------------ 2 files changed, 120 insertions(+), 100 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 6897217f8..176722ab4 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -66,6 +66,17 @@ class Import3DS(bpy.types.Operator, ImportHelper): "(Warning, may be slow)", default=True, ) + object_filter: EnumProperty( + name="Object Filter", options={'ENUM_FLAG'}, + items=(('WORLD',"World".rjust(11),"",'WORLD_DATA',0x1), + ('MESH',"Mesh".rjust(11),"",'MESH_DATA',0x2), + ('LIGHT',"Light".rjust(12),"",'LIGHT_DATA',0x4), + ('CAMERA',"Camera".rjust(11),"",'CAMERA_DATA',0x8), + ('EMPTY',"Empty".rjust(11),"",'EMPTY_DATA',0x10), + ), + description="Object types to export", + default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, + ) use_apply_transform: BoolProperty( name="Apply Transform", description="Workaround for object transformations " @@ -124,6 +135,7 @@ class MAX3DS_PT_import_include(bpy.types.Panel): operator = sfile.active_operator layout.prop(operator, "use_image_search") + layout.column().prop(operator, "object_filter") layout.prop(operator, "read_keyframe") @@ -186,7 +198,7 @@ class Export3DS(bpy.types.Operator, ExportHelper): description="Export selected objects only", default=False, ) - object_filter: bpy.props.EnumProperty( + object_filter: EnumProperty( name="Object Filter", options={'ENUM_FLAG'}, items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA',0x1), ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index c7dc5d1d6..89ad73465 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -25,7 +25,6 @@ PRIMARY = 0x4D4D # >----- Main Chunks OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information VERSION = 0x0002 # This gives the version of the .3ds file -AMBIENTLIGHT = 0x2100 # The color of the ambient light EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info # >----- Data Chunks, used for various attributes @@ -38,6 +37,7 @@ PCT_FLOAT = 0x0031 # percentage float MASTERSCALE = 0x0100 # Master scale factor # >----- sub defines of OBJECTINFO +AMBIENTLIGHT = 0x2100 # The color of the ambient light MATERIAL = 0xAFFF # This stored the texture info OBJECT = 0x4000 # This stores the faces, vertices, etc... @@ -131,13 +131,13 @@ OBJECT_SMOOTH = 0x4150 # The objects face smooth groups OBJECT_TRANS_MATRIX = 0x4160 # The objects Matrix # >------ sub defines of EDITKEYFRAME -KFDATA_AMBIENT = 0xB001 # Keyframe ambient node -KFDATA_OBJECT = 0xB002 # Keyframe object node -KFDATA_CAMERA = 0xB003 # Keyframe camera node -KFDATA_TARGET = 0xB004 # Keyframe target node -KFDATA_LIGHT = 0xB005 # Keyframe light node -KFDATA_LTARGET = 0xB006 # Keyframe light target node -KFDATA_SPOTLIGHT = 0xB007 # Keyframe spotlight node +KF_AMBIENT = 0xB001 # Keyframe ambient node +KF_OBJECT = 0xB002 # Keyframe object node +KF_OBJECT_CAMERA = 0xB003 # Keyframe camera node +KF_TARGET_CAMERA = 0xB004 # Keyframe target node +KF_OBJECT_LIGHT = 0xB005 # Keyframe light node +KF_TARGET_LIGHT = 0xB006 # Keyframe light target node +KF_OBJECT_SPOT_LIGHT = 0xB007 # Keyframe spotlight node KFDATA_KFSEG = 0xB008 # Keyframe start and stop KFDATA_CURTIME = 0xB009 # Keyframe current frame KFDATA_KFHDR = 0xB00A # Keyframe node header @@ -326,7 +326,7 @@ childs_list = [] parent_list = [] def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, - IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE): + FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE): contextObName = None contextLamp = None @@ -470,6 +470,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI CreateLightObject = False CreateTrackData = False + CreateWorld = 'WORLD' in FILTER + CreateMesh = 'MESH' in FILTER + CreateLight = 'LIGHT' in FILTER + CreateCamera = 'CAMERA' in FILTER + CreateEmpty = 'EMPTY' in FILTER + def read_short(temp_chunk): temp_data = file.read(SZ_U_SHORT) temp_chunk.bytes_read += SZ_U_SHORT @@ -666,7 +672,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version) # is it an ambient light chunk? - elif new_chunk.ID == AMBIENTLIGHT: + elif CreateWorld and new_chunk.ID == AMBIENTLIGHT: path, filename = os.path.split(file.name) realname, ext = os.path.splitext(filename) world = bpy.data.worlds.new("Ambient: " + realname) @@ -683,7 +689,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # is it an object info chunk? elif new_chunk.ID == OBJECTINFO: process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, - IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) + FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) # keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read @@ -692,15 +698,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif new_chunk.ID == OBJECT: if CreateBlenderObject: - putContextMesh( - context, - contextMesh_vertls, - contextMesh_facels, - contextMesh_flag, - contextMeshMaterials, - contextMesh_smooth, - WORLD_MATRIX - ) + putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMesh_flag, + contextMeshMaterials, contextMesh_smooth, WORLD_MATRIX) + contextMesh_vertls = [] contextMesh_facels = [] contextMeshMaterials = [] @@ -709,7 +709,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextMeshUV = None contextMatrix = None - CreateBlenderObject = True + CreateBlenderObject = True if CreateMesh else False contextObName, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len @@ -867,13 +867,13 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif new_chunk.ID == OBJECT_MESH: pass - elif new_chunk.ID == OBJECT_VERTICES: + elif CreateMesh and new_chunk.ID == OBJECT_VERTICES: """Worldspace vertex locations""" num_verts = read_short(new_chunk) contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(SZ_3FLOAT * num_verts)) new_chunk.bytes_read += SZ_3FLOAT * num_verts - elif new_chunk.ID == OBJECT_FACES: + elif CreateMesh and new_chunk.ID == OBJECT_FACES: num_faces = read_short(new_chunk) temp_data = file.read(SZ_4U_SHORT * num_faces) new_chunk.bytes_read += SZ_4U_SHORT * num_faces # 4 short ints x 2 bytes each @@ -881,7 +881,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextMesh_flag = [contextMesh_facels[i] for i in range(3, (num_faces * 4) + 3, 4)] contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)] - elif new_chunk.ID == OBJECT_MATERIAL: + elif CreateMesh and new_chunk.ID == OBJECT_MATERIAL: material_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len # remove 1 null character. num_faces_using_mat = read_short(new_chunk) @@ -891,19 +891,19 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextMeshMaterials.append((material_name, temp_data)) # look up the material in all the materials - elif new_chunk.ID == OBJECT_SMOOTH: + elif CreateMesh and new_chunk.ID == OBJECT_SMOOTH: temp_data = file.read(SZ_U_INT * num_faces) smoothgroup = struct.unpack('<%dI' % (num_faces), temp_data) new_chunk.bytes_read += SZ_U_INT * num_faces contextMesh_smooth = smoothgroup - elif new_chunk.ID == OBJECT_UV: + elif CreateMesh and new_chunk.ID == OBJECT_UV: num_uv = read_short(new_chunk) temp_data = file.read(SZ_2FLOAT * num_uv) new_chunk.bytes_read += SZ_2FLOAT * num_uv contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data) - elif new_chunk.ID == OBJECT_TRANS_MATRIX: + elif CreateMesh and new_chunk.ID == OBJECT_TRANS_MATRIX: # How do we know the matrix size? 54 == 4x4 48 == 4x3 temp_data = file.read(SZ_4x3MAT) mtx = list(struct.unpack(' Date: Thu, 27 Jul 2023 16:50:58 +0200 Subject: [PATCH 16/78] Import_3ds: Typo / style clean --- io_scene_3ds/import_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 89ad73465..567aa2dbe 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1036,7 +1036,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif new_chunk.ID == OBJECT_NODE_HDR: object_name, read_str_len = read_string(file) new_chunk.bytes_read += read_str_len - new_data = file.read(SZ_U_INT) + temp_data = file.read(SZ_U_INT) new_chunk.bytes_read += SZ_U_INT hierarchy = read_short(new_chunk) child = object_dictionary.get(object_name) -- 2.30.2 From 67f0607b6483a79710de27e4f22c1c1b50efdf62 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Thu, 27 Jul 2023 16:55:51 +0200 Subject: [PATCH 17/78] Import_3ds: Clear trailing space --- io_scene_3ds/import_3ds.py | 1 - 1 file changed, 1 deletion(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 567aa2dbe..3d5e9f945 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -696,7 +696,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # is it an object chunk? elif new_chunk.ID == OBJECT: - if CreateBlenderObject: putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMesh_flag, contextMeshMaterials, contextMesh_smooth, WORLD_MATRIX) -- 2.30.2 From c177741cd4e61a71291e3ea0958088196b39f626 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Thu, 27 Jul 2023 17:00:29 +0200 Subject: [PATCH 18/78] Export_3ds: Changed chunk order --- io_scene_3ds/export_3ds.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index fbd6dc5e5..f40659046 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -24,12 +24,12 @@ from bpy_extras import node_shader_utils PRIMARY = 0x4D4D # >----- Main Chunks +OBJECTINFO = 0x3D3D # Main mesh object chunk before material and object information +MESHVERSION = 0x3D3E # This gives the version of the mesh VERSION = 0x0002 # This gives the version of the .3ds file KFDATA = 0xB000 # This is the header for all of the keyframe info # >----- sub defines of OBJECTINFO -OBJECTINFO = 0x3D3D # Main mesh object chunk before material and object information -MESHVERSION = 0x3D3E # This gives the version of the mesh BITMAP = 0x1100 # The background image name USE_BITMAP = 0x1101 # The background image flag SOLIDBACKGND = 0x1200 # The background color (RGB) -- 2.30.2 From 8a4902ea54ed5245fdf7d6f4c30b07ff597960db Mon Sep 17 00:00:00 2001 From: NRGSille Date: Thu, 27 Jul 2023 18:07:55 +0200 Subject: [PATCH 19/78] Import_3ds: Remove None from object list --- io_scene_3ds/import_3ds.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 3d5e9f945..cd6c10f6a 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1267,6 +1267,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextMeshMaterials, contextMesh_smooth, WORLD_MATRIX) # Assign parents to objects + while None in object_list: + object_list.remove(None) + # check _if_ we need to assign first because doing so recalcs the depsgraph for ind, ob in enumerate(object_list): parent = object_parent[ind] -- 2.30.2 From f18b8463b6533bb474f43c451bb6f10c126f262b Mon Sep 17 00:00:00 2001 From: NRGSille Date: Thu, 27 Jul 2023 18:15:54 +0200 Subject: [PATCH 20/78] io_scene_3ds: Fixed object filter description --- io_scene_3ds/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 176722ab4..0430aa68f 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -74,7 +74,7 @@ class Import3DS(bpy.types.Operator, ImportHelper): ('CAMERA',"Camera".rjust(11),"",'CAMERA_DATA',0x8), ('EMPTY',"Empty".rjust(11),"",'EMPTY_DATA',0x10), ), - description="Object types to export", + description="Object types to import", default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, ) use_apply_transform: BoolProperty( -- 2.30.2 From 7116a3fa65be44173118b9827dcfc427ace945b0 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Thu, 27 Jul 2023 18:43:50 +0200 Subject: [PATCH 21/78] Import_3ds: Avoid any None in lists --- io_scene_3ds/import_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index cd6c10f6a..e16db52f2 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1049,7 +1049,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI imported_objects.append(child) else: tracking = tracktype = None - if tracktype != 'TARGET' and tracking != 'AMBIENT': + if child is not None and tracktype != 'TARGET' and tracking != 'AMBIENT': object_dict[object_id] = child object_list.append(child) object_parent.append(hierarchy) -- 2.30.2 From 4d4e74437dbed48130835e0693e784fc1987d8ab Mon Sep 17 00:00:00 2001 From: NRGSille Date: Fri, 28 Jul 2023 04:56:19 +0200 Subject: [PATCH 22/78] io_scene_3ds: Some beautify to the UI in file browser --- io_scene_3ds/__init__.py | 57 ++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 19 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 0430aa68f..eeddb37cd 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -56,7 +56,7 @@ class Import3DS(bpy.types.Operator, ImportHelper): default=10.0, ) convert_unit: BoolProperty( - name="Convert Units", + name="Scene Units", description="Converts to scene unit length settings", default=False, ) @@ -68,11 +68,11 @@ class Import3DS(bpy.types.Operator, ImportHelper): ) object_filter: EnumProperty( name="Object Filter", options={'ENUM_FLAG'}, - items=(('WORLD',"World".rjust(11),"",'WORLD_DATA',0x1), - ('MESH',"Mesh".rjust(11),"",'MESH_DATA',0x2), - ('LIGHT',"Light".rjust(12),"",'LIGHT_DATA',0x4), - ('CAMERA',"Camera".rjust(11),"",'CAMERA_DATA',0x8), - ('EMPTY',"Empty".rjust(11),"",'EMPTY_DATA',0x10), + items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA', 0x1), + ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), + ('LIGHT', "Light".rjust(12), "", 'LIGHT_DATA', 0x4), + ('CAMERA', "Camera".rjust(11), "", 'CAMERA_DATA', 0x8), + ('EMPTY', "Empty".rjust(11), "", 'EMPTY_DATA', 0x10), ), description="Object types to import", default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, @@ -84,7 +84,7 @@ class Import3DS(bpy.types.Operator, ImportHelper): default=True, ) read_keyframe: BoolProperty( - name="Read Keyframe", + name="Animation", description="Read the keyframe data", default=True, ) @@ -134,9 +134,13 @@ class MAX3DS_PT_import_include(bpy.types.Panel): sfile = context.space_data operator = sfile.active_operator - layout.prop(operator, "use_image_search") + layrow = layout.row(align=True) + layrow.prop(operator, "use_image_search") + layrow.label(text="", icon='OUTLINER_OB_IMAGE' if operator.use_image_search else 'IMAGE_DATA') layout.column().prop(operator, "object_filter") - layout.prop(operator, "read_keyframe") + layrow = layout.row(align=True) + layrow.prop(operator, "read_keyframe") + layrow.label(text="", icon='ANIM' if operator.read_keyframe else 'DECORATE_DRIVER') class MAX3DS_PT_import_transform(bpy.types.Panel): @@ -161,9 +165,15 @@ class MAX3DS_PT_import_transform(bpy.types.Panel): operator = sfile.active_operator layout.prop(operator, "constrain_size") - layout.prop(operator, "convert_unit") - layout.prop(operator, "use_apply_transform") - layout.prop(operator, "use_world_matrix") + layrow = layout.row(align=True) + layrow.prop(operator, "convert_unit") + layrow.label(text="", icon='EMPTY_ARROWS' if operator.convert_unit else 'EMPTY_DATA') + layrow = layout.row(align=True) + layrow.prop(operator, "use_apply_transform") + layrow.label(text="", icon='MESH_CUBE' if operator.use_apply_transform else 'MOD_SOLIDIFY') + layrow = layout.row(align=True) + layrow.prop(operator, "use_world_matrix") + layrow.label(text="", icon='WORLD' if operator.use_world_matrix else 'META_BALL') layout.prop(operator, "axis_forward") layout.prop(operator, "axis_up") @@ -194,7 +204,7 @@ class Export3DS(bpy.types.Operator, ExportHelper): default=False, ) use_selection: BoolProperty( - name="Selection Only", + name="Selection", description="Export selected objects only", default=False, ) @@ -210,12 +220,12 @@ class Export3DS(bpy.types.Operator, ExportHelper): default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, ) use_hierarchy: BoolProperty( - name="Export Hierarchy", + name="Hierarchy", description="Export hierarchy chunks", default=False, ) write_keyframe: BoolProperty( - name="Export Keyframes", + name="Animation", description="Write the keyframe data", default=False, ) @@ -260,10 +270,17 @@ class MAX3DS_PT_export_include(bpy.types.Panel): sfile = context.space_data operator = sfile.active_operator - layout.prop(operator, "use_selection") + layrow = layout.row(align=True) + layrow.prop(operator, "use_selection") + layrow.label(text="", icon='RESTRICT_SELECT_OFF' if operator.use_selection else 'RESTRICT_SELECT_ON') layout.column().prop(operator, "object_filter") - layout.prop(operator, "use_hierarchy") - layout.prop(operator, "write_keyframe") + layrow = layout.row(align=True) + layrow.prop(operator, "use_hierarchy") + layrow.label(text="", icon='OUTLINER' if operator.use_hierarchy else 'CON_CHILDOF') + layrow = layout.row(align=True) + layrow.prop(operator, "write_keyframe") + layrow.label(text="", icon='ANIM' if operator.write_keyframe else 'DECORATE_DRIVER') + layout.use_property_split = True class MAX3DS_PT_export_transform(bpy.types.Panel): @@ -288,7 +305,9 @@ class MAX3DS_PT_export_transform(bpy.types.Panel): operator = sfile.active_operator layout.prop(operator, "scale_factor") - layout.prop(operator, "apply_unit") + layrow = layout.row(align=True) + layrow.prop(operator, "apply_unit") + layrow.label(text="", icon='EMPTY_ARROWS' if operator.apply_unit else 'EMPTY_DATA') layout.prop(operator, "axis_forward") layout.prop(operator, "axis_up") -- 2.30.2 From 7fc7bf97bfea0e729881a79f6fa144f921fab26f Mon Sep 17 00:00:00 2001 From: NRGSille Date: Fri, 28 Jul 2023 09:07:46 +0200 Subject: [PATCH 23/78] Import_3ds: Keep None objects in list to preserve hierarchy --- io_scene_3ds/import_3ds.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index abe933a19..c68088f8a 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1054,7 +1054,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI imported_objects.append(child) else: tracking = tracktype = None - if child is not None and tracktype != 'TARGET' and tracking != 'AMBIENT': + if tracktype != 'TARGET' and tracking != 'AMBIENT': object_dict[object_id] = child object_list.append(child) object_parent.append(hierarchy) @@ -1272,14 +1272,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextMeshMaterials, contextMesh_smooth, WORLD_MATRIX) # Assign parents to objects - while None in object_list: - object_list.remove(None) - # check _if_ we need to assign first because doing so recalcs the depsgraph for ind, ob in enumerate(object_list): parent = object_parent[ind] if parent == ROOT_OBJECT: - ob.parent = None + if ob is not None: + ob.parent = None elif parent not in object_dict: try: ob.parent = object_list[parent] @@ -1289,7 +1287,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI try: ob.parent = object_dict.get(parent) except: # self to parent exception - ob.parent = None + object_list.remove(ob) #pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining? @@ -1312,7 +1310,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # fix pivots for ind, ob in enumerate(object_list): - if ob.type == 'MESH': + if ob is None: # remove None + object_list.pop(ind) + elif ob.type == 'MESH': pivot = pivot_list[ind] pivot_matrix = object_matrix.get(ob, mathutils.Matrix()) # unlikely to fail pivot_matrix = mathutils.Matrix.Translation(-1 * pivot) -- 2.30.2 From 92bfdea648df16650161ac6c4cbc58a5dd7cabc6 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Fri, 28 Jul 2023 09:33:00 +0200 Subject: [PATCH 24/78] Import_3ds: Keep None in object list to preserve hierarchy --- io_scene_3ds/import_3ds.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index c68088f8a..af09d9316 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1281,13 +1281,13 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif parent not in object_dict: try: ob.parent = object_list[parent] - except: # seems one object is missing, so take previous one - ob.parent = object_list[parent - 1] + except: # seems object is None or not in list + object_list.pop(ind) else: # get parent from node_id number try: ob.parent = object_dict.get(parent) - except: # self to parent exception - object_list.remove(ob) + except: # object is None or self to parent exception + object_list.pop(ind) #pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining? @@ -1311,7 +1311,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # fix pivots for ind, ob in enumerate(object_list): if ob is None: # remove None - object_list.pop(ind) + object_list.remove(ob) elif ob.type == 'MESH': pivot = pivot_list[ind] pivot_matrix = object_matrix.get(ob, mathutils.Matrix()) # unlikely to fail -- 2.30.2 From c55c3248d3b2e94a5e31f1acdd18d4b42950e42f Mon Sep 17 00:00:00 2001 From: NRGSille Date: Fri, 28 Jul 2023 18:23:14 +0200 Subject: [PATCH 25/78] Import_3ds: Added background and bitmap import --- io_scene_3ds/import_3ds.py | 61 ++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index af09d9316..fc2f73ae5 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -676,7 +676,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI if version > 3: print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version) - # is it an ambient light chunk? + # If ambient light chunk elif CreateWorld and new_chunk.ID == AMBIENTLIGHT: path, filename = os.path.split(file.name) realname, ext = os.path.splitext(filename) @@ -691,6 +691,42 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read + # If background chunk + elif CreateWorld and new_chunk.ID == SOLIDBACKGND: + if context.scene.world is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + world = bpy.data.worlds.new("Background: " + realname) + context.scene.world = world + world = context.scene.world + world.use_nodes = True + read_chunk(file, temp_chunk) + if temp_chunk.ID == RGB: + world.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == RGBF: + world.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) + else: skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + + # If bitmap chunk + elif CreateWorld and new_chunk.ID == BITMAP: + bitmap_name, read_str_len = read_string(file) + bitmap = load_image(bitmap_name, dirname, place_holder=False, recursive=image_search, check_existing=True) + if context.scene.world is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + world = bpy.data.worlds.new("Bitmap: " + realname) + context.scene.world = world + world = context.scene.world + world.use_nodes = True + links = world.node_tree.links + nodes = world.node_tree.nodes + bitmapnode = nodes.new(type='ShaderNodeTexImage') + bitmapnode.label = bitmap_name + bitmapnode.location = (-300, 300) + links.new(bitmapnode.outputs['Color'], nodes['Background'].inputs[0]) + new_chunk.bytes_read += read_str_len + # is it an object info chunk? elif new_chunk.ID == OBJECTINFO: process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, @@ -1045,10 +1081,23 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI hierarchy = read_short(new_chunk) child = object_dictionary.get(object_name) if child is None: - if CreateWorld and object_name == '$AMBIENT$': + if CreateWorld and tracking == 'AMBIENT': child = context.scene.world child.use_nodes = True - elif CreateEmpty and object_name == '$$$DUMMY': + nodetree = child.node_tree + links = nodetree.links + nodes = nodetree.nodes + worldout = nodes['World Output'] + mixshade = nodes.new(type='ShaderNodeMixShader') + ambinode = nodes.new(type='ShaderNodeEmission') + ambinode.inputs[0].default_value[:3] = child.color + worldout.location = (600, 250) + mixshade.location = (300, 250) + links.new(mixshade.outputs[0], worldout.inputs['Surface']) + links.new(nodes['Background'].outputs[0], mixshade.inputs[1]) + links.new(ambinode.outputs[0], mixshade.inputs[2]) + ambinode.label = object_name if object_name != '$AMBIENT$' else "Ambient" + elif CreateEmpty and tracking == 'OBJECT' and object_name == '$$$DUMMY': child = bpy.data.objects.new(object_name, None) # Create an empty object context.view_layer.active_layer_collection.collection.objects.link(child) imported_objects.append(child) @@ -1083,9 +1132,10 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot) elif new_chunk.ID == MORPH_SMOOTH and tracking == 'OBJECT': # Smooth angle - child.data.use_auto_smooth = True smooth_angle = read_float(new_chunk) - child.data.auto_smooth_angle = smooth_angle + if child.data is not None: # Check if child is a dummy + child.data.use_auto_smooth = True + child.data.auto_smooth_angle = smooth_angle elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'AMBIENT': # Ambient keyframe_data = {} @@ -1292,6 +1342,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI #pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining? # if parent name + parent_dictionary.pop(None, ...) for par, objs in parent_dictionary.items(): parent = object_dictionary.get(par) for ob in objs: -- 2.30.2 From da86b608721d81e70a667a8ea6007daf7a0d1fec Mon Sep 17 00:00:00 2001 From: NRGSille Date: Fri, 28 Jul 2023 19:42:46 +0200 Subject: [PATCH 26/78] Import_3ds: Fixed mismatched letter --- io_scene_3ds/import_3ds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index fc2f73ae5..9b7fe551d 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1407,7 +1407,7 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, scn = context.scene if UNITS: - unit_length = sce.unit_settings.length_unit + unit_length = scn.unit_settings.length_unit if unit_length == 'KILOMETERS': MEASURE = 1000.0 elif unit_length == 'CENTIMETERS': -- 2.30.2 From 820c7a6ec0aca258ce5712e99f3255a3d250f9d6 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sat, 29 Jul 2023 00:45:53 +0200 Subject: [PATCH 27/78] io_scene_3ds: Fixed background and bitmap import --- io_scene_3ds/export_3ds.py | 10 +++--- io_scene_3ds/import_3ds.py | 72 ++++++++++++++++++++------------------ 2 files changed, 44 insertions(+), 38 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index f40659046..156c1dcba 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -37,6 +37,8 @@ USE_SOLIDBGND = 0x1201 # The background color flag VGRADIENT = 0x1300 # The background gradient colors USE_VGRADIENT = 0x1301 # The background gradient flag AMBIENTLIGHT = 0x2100 # The color of the ambient light +LAYER_FOG = 0x2302 # The fog atmosphere settings +USE_LAYER_FOG = 0x2303 # The fog atmosphere flag MATERIAL = 45055 # 0xAFFF // This stored the texture info OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc... @@ -1567,17 +1569,17 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use background_color = _3ds_chunk(RGB) background_chunk = _3ds_chunk(SOLIDBACKGND) background_flag = _3ds_chunk(USE_SOLIDBGND) - bgcol, bgtex, nworld = 'BACKGROUND', 'TEX_IMAGE', 'OUTPUT_WORLD' - bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.to_node.type == nworld), world.color) - bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type == bgtex and lk.to_node.type in {bgcol, nworld}), False) + amcol, bgcol, bgtex, nworld = 'EMISSION', 'BACKGROUND', 'TEX_ENVIRONMENT', 'OUTPUT_WORLD' + bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.to_node.type == bgcol), world.color) + bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type == bgtex and lk.to_node.type in {amcol, bgcol}), False) background_color.add_variable("color", _3ds_float_color(bg_color)) background_chunk.add_subchunk(background_color) + object_info.add_subchunk(background_chunk) if bg_image: background_image = _3ds_chunk(BITMAP) background_flag = _3ds_chunk(USE_BITMAP) background_image.add_variable("image", _3ds_string(sane_name(bg_image))) object_info.add_subchunk(background_image) - object_info.add_subchunk(background_chunk) object_info.add_subchunk(background_flag) if write_keyframe and world.animation_data: kfdata.add_subchunk(make_ambient_node(world)) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 9b7fe551d..363722387 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -44,6 +44,8 @@ USE_SOLIDBGND = 0x1201 # The background color flag VGRADIENT = 0x1300 # The background gradient colors USE_VGRADIENT = 0x1301 # The background gradient flag AMBIENTLIGHT = 0x2100 # The color of the ambient light +LAYER_FOG = 0x2302 # The fog atmosphere settings +USE_LAYER_FOG = 0x2303 # The fog atmosphere flag MATERIAL = 0xAFFF # This stored the texture info OBJECT = 0x4000 # This stores the faces, vertices, etc... @@ -335,6 +337,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE): contextObName = None + contextWorld = None contextLamp = None contextCamera = None contextMaterial = None @@ -680,52 +683,52 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif CreateWorld and new_chunk.ID == AMBIENTLIGHT: path, filename = os.path.split(file.name) realname, ext = os.path.splitext(filename) - world = bpy.data.worlds.new("Ambient: " + realname) - context.scene.world = world + contextWorld = bpy.data.worlds.new("Ambient: " + realname) + context.scene.world = contextWorld read_chunk(file, temp_chunk) if temp_chunk.ID == COLOR_F: - context.scene.world.color[:] = read_float_array(temp_chunk) + contextWorld.color[:] = read_float_array(temp_chunk) elif temp_chunk.ID == LIN_COLOR_F: - context.scene.world.color[:] = read_float_array(temp_chunk) + contextWorld.color[:] = read_float_array(temp_chunk) else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read # If background chunk elif CreateWorld and new_chunk.ID == SOLIDBACKGND: - if context.scene.world is None: + if contextWorld is None: path, filename = os.path.split(file.name) realname, ext = os.path.splitext(filename) - world = bpy.data.worlds.new("Background: " + realname) - context.scene.world = world - world = context.scene.world - world.use_nodes = True - read_chunk(file, temp_chunk) - if temp_chunk.ID == RGB: - world.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) - elif temp_chunk.ID == RGBF: - world.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) - else: skip_to_end(file, temp_chunk) - new_chunk.bytes_read += temp_chunk.bytes_read + contextWorld = bpy.data.worlds.new("Background: " + realname) + context.scene.world = contextWorld + else: + contextWorld.use_nodes = True + read_chunk(file, temp_chunk) + if temp_chunk.ID == RGB: + contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == RGBF: + contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) + else: skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read # If bitmap chunk elif CreateWorld and new_chunk.ID == BITMAP: bitmap_name, read_str_len = read_string(file) - bitmap = load_image(bitmap_name, dirname, place_holder=False, recursive=image_search, check_existing=True) - if context.scene.world is None: + if contextWorld is None: path, filename = os.path.split(file.name) realname, ext = os.path.splitext(filename) - world = bpy.data.worlds.new("Bitmap: " + realname) - context.scene.world = world - world = context.scene.world - world.use_nodes = True - links = world.node_tree.links - nodes = world.node_tree.nodes - bitmapnode = nodes.new(type='ShaderNodeTexImage') - bitmapnode.label = bitmap_name - bitmapnode.location = (-300, 300) - links.new(bitmapnode.outputs['Color'], nodes['Background'].inputs[0]) - new_chunk.bytes_read += read_str_len + contextWorld = bpy.data.worlds.new("Bitmap: " + realname) + context.scene.world = contextWorld + else: + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + bitmapnode = nodes.new(type='ShaderNodeTexEnvironment') + bitmapnode.label = bitmap_name + bitmapnode.location = (-300, 300) + bitmapnode.image = load_image(bitmap_name, dirname, place_holder=False, recursive=image_search, check_existing=True) + links.new(bitmapnode.outputs['Color'], nodes['Background'].inputs[0]) + new_chunk.bytes_read += read_str_len # is it an object info chunk? elif new_chunk.ID == OBJECTINFO: @@ -1091,8 +1094,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI mixshade = nodes.new(type='ShaderNodeMixShader') ambinode = nodes.new(type='ShaderNodeEmission') ambinode.inputs[0].default_value[:3] = child.color - worldout.location = (600, 250) - mixshade.location = (300, 250) + ambinode.location = (10, 150) + worldout.location = (600, 200) + mixshade.location = (300, 300) links.new(mixshade.outputs[0], worldout.inputs['Surface']) links.new(nodes['Background'].outputs[0], mixshade.inputs[1]) links.new(ambinode.outputs[0], mixshade.inputs[2]) @@ -1141,12 +1145,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI keyframe_data = {} default_data = child.color[:] child.color = read_track_data(new_chunk)[0] - child.node_tree.nodes['Background'].inputs[0].default_value[:3] = child.color + ambinode.inputs[0].default_value[:3] = child.color for keydata in keyframe_data.items(): child.color = keydata[1] child.keyframe_insert(data_path="color", frame=keydata[0]) - child.node_tree.nodes['Background'].inputs[0].default_value[:3] = keydata[1] - child.node_tree.keyframe_insert(data_path="nodes[\"Background\"].inputs[0].default_value", frame=keydata[0]) + ambinode.inputs[0].default_value[:3] = keydata[1] + nodetree.keyframe_insert(data_path="nodes[\"Emission\"].inputs[0].default_value", frame=keydata[0]) contextTrack_flag = False elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'LIGHT': # Color -- 2.30.2 From 90c5f4aead6f659af4c981a61f26ec3bf3cadb55 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sat, 29 Jul 2023 05:33:58 +0200 Subject: [PATCH 28/78] Import_3ds: Added fog atmosphere settings import --- io_scene_3ds/import_3ds.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 363722387..a3c3a01c3 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -704,9 +704,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI else: contextWorld.use_nodes = True read_chunk(file, temp_chunk) - if temp_chunk.ID == RGB: + if temp_chunk.ID == COLOR_F: contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) - elif temp_chunk.ID == RGBF: + elif temp_chunk.ID == LIN_COLOR_F: contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read @@ -730,6 +730,37 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI links.new(bitmapnode.outputs['Color'], nodes['Background'].inputs[0]) new_chunk.bytes_read += read_str_len + # If fog chunk + elif new_chunk.ID == LAYER_FOG: + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("LayerFog: " + realname) + context.scene.world = contextWorld + else: + contextWorld.use_nodes = True + links = newWorld.node_tree.links + nodes = newWorld.node_tree.nodes + context.view_layer.use_pass_mist = False + layerfog = nodes.new(type='ShaderNodeVolumeScatter') + layerfog.label = "Layer Fog" + layerfog.location = (300, 100) + links.new(layerfog.outputs['Volume'], nodes['World Output'].inputs['Volume']) + world.mist_settings.start = read_float(new_chunk) + world.mist_settings.depth = read_float(new_chunk) + layerfog.inputs[1].default_value = read_float(new_chunk) + layerfogflag = read_long(new_chunk) + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + elif new_chunk.ID == USE_LAYER_FOG: + context.view_layer.use_pass_mist = True + # is it an object info chunk? elif new_chunk.ID == OBJECTINFO: process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, -- 2.30.2 From 7d0c49648749eb8bc75f2a6eeb1e864723606636 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sat, 29 Jul 2023 05:38:33 +0200 Subject: [PATCH 29/78] Import_3ds: Added world boolean to fog import --- io_scene_3ds/import_3ds.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index a3c3a01c3..3a997e0c7 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -731,7 +731,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI new_chunk.bytes_read += read_str_len # If fog chunk - elif new_chunk.ID == LAYER_FOG: + elif CreateWorld and new_chunk.ID == LAYER_FOG: if contextWorld is None: path, filename = os.path.split(file.name) realname, ext = os.path.splitext(filename) @@ -758,7 +758,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read - elif new_chunk.ID == USE_LAYER_FOG: + elif CreateWorld and new_chunk.ID == USE_LAYER_FOG: context.view_layer.use_pass_mist = True # is it an object info chunk? -- 2.30.2 From 291b2517a9e2559faebecc9316caafd1b58cb69e Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sat, 29 Jul 2023 05:47:17 +0200 Subject: [PATCH 30/78] Import_3ds: Fixed world boolean --- io_scene_3ds/import_3ds.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 3a997e0c7..9b431bcdb 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -739,15 +739,15 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI context.scene.world = contextWorld else: contextWorld.use_nodes = True - links = newWorld.node_tree.links - nodes = newWorld.node_tree.nodes + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes context.view_layer.use_pass_mist = False layerfog = nodes.new(type='ShaderNodeVolumeScatter') layerfog.label = "Layer Fog" layerfog.location = (300, 100) links.new(layerfog.outputs['Volume'], nodes['World Output'].inputs['Volume']) - world.mist_settings.start = read_float(new_chunk) - world.mist_settings.depth = read_float(new_chunk) + contextWorld.mist_settings.start = read_float(new_chunk) + contextWorld.mist_settings.depth = read_float(new_chunk) layerfog.inputs[1].default_value = read_float(new_chunk) layerfogflag = read_long(new_chunk) read_chunk(file, temp_chunk) -- 2.30.2 From 2c7c68ee3b786028db4bfaf0605213b19a6418bc Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sat, 29 Jul 2023 15:46:31 +0200 Subject: [PATCH 31/78] io_scene_3ds: Added fog atmosphere settings --- io_scene_3ds/export_3ds.py | 35 ++++++++++++---- io_scene_3ds/import_3ds.py | 81 ++++++++++++++++++++------------------ 2 files changed, 71 insertions(+), 45 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 156c1dcba..34243e9dd 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1566,21 +1566,42 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use object_info.add_subchunk(ambient_chunk) if world.use_nodes: ntree = world.node_tree.links - background_color = _3ds_chunk(RGB) + background_color_chunk = _3ds_chunk(RGB) background_chunk = _3ds_chunk(SOLIDBACKGND) background_flag = _3ds_chunk(USE_SOLIDBGND) - amcol, bgcol, bgtex, nworld = 'EMISSION', 'BACKGROUND', 'TEX_ENVIRONMENT', 'OUTPUT_WORLD' - bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.to_node.type == bgcol), world.color) - bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type == bgtex and lk.to_node.type in {amcol, bgcol}), False) - background_color.add_variable("color", _3ds_float_color(bg_color)) - background_chunk.add_subchunk(background_color) - object_info.add_subchunk(background_chunk) + bgshader = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD' + bgtexture = 'TEX_IMAGE', 'TEX_ENVIRONMENT' + acol, bcol = 'EMISSION', 'BACKGROUND' + bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bcol and lk.to_node.type in bgshader), world.color) + bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type in bgtexture and lk.to_node.type in {acol, bcol}), False) + background_color_chunk.add_variable("color", _3ds_float_color(bg_color)) + background_chunk.add_subchunk(background_color_chunk) if bg_image: background_image = _3ds_chunk(BITMAP) background_flag = _3ds_chunk(USE_BITMAP) background_image.add_variable("image", _3ds_string(sane_name(bg_image))) object_info.add_subchunk(background_image) + object_info.add_subchunk(background_chunk) object_info.add_subchunk(background_flag) + fogshader = next((lk.from_socket.node for lk in ntree if lk.from_socket.identifier and lk.to_socket.identifier == 'Volume'), False) + if fogshader: + fogflag = 0 + if world.mist_settings.falloff == 'QUADRATIC': + fogflag |= 0x1 + if world.mist_settings.falloff == 'INVERSE_QUADRATIC': + fogflag |= 0x2 + fog_chunk = _3ds_chunk(LAYER_FOG) + fog_color_chunk = _3ds_chunk(RGB) + use_fog_flag = _3ds_chunk(USE_LAYER_FOG) + fog_color_chunk.add_variable("color", _3ds_float_color(fogshader.inputs[0].default_value[:3])) + fog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start)) + fog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.depth)) + fog_chunk.add_variable("density", _3ds_float(fogshader.inputs[1].default_value)) + fog_chunk.add_variable("flags", _3ds_uint(fogflag)) + fog_chunk.add_subchunk(fog_color_chunk) + object_info.add_subchunk(fog_chunk) + if layer.use_pass_mist: + object_info.add_subchunk(use_fog_flag) if write_keyframe and world.animation_data: kfdata.add_subchunk(make_ambient_node(world)) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 9b431bcdb..bba5e4703 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -701,15 +701,15 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI realname, ext = os.path.splitext(filename) contextWorld = bpy.data.worlds.new("Background: " + realname) context.scene.world = contextWorld + contextWorld.use_nodes = True + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) else: - contextWorld.use_nodes = True - read_chunk(file, temp_chunk) - if temp_chunk.ID == COLOR_F: - contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) - elif temp_chunk.ID == LIN_COLOR_F: - contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) - else: skip_to_end(file, temp_chunk) - new_chunk.bytes_read += temp_chunk.bytes_read + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read # If bitmap chunk elif CreateWorld and new_chunk.ID == BITMAP: @@ -719,45 +719,50 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI realname, ext = os.path.splitext(filename) contextWorld = bpy.data.worlds.new("Bitmap: " + realname) context.scene.world = contextWorld - else: - contextWorld.use_nodes = True - links = contextWorld.node_tree.links - nodes = contextWorld.node_tree.nodes - bitmapnode = nodes.new(type='ShaderNodeTexEnvironment') - bitmapnode.label = bitmap_name - bitmapnode.location = (-300, 300) - bitmapnode.image = load_image(bitmap_name, dirname, place_holder=False, recursive=image_search, check_existing=True) - links.new(bitmapnode.outputs['Color'], nodes['Background'].inputs[0]) - new_chunk.bytes_read += read_str_len + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + bitmapnode = nodes.new(type='ShaderNodeTexEnvironment') + bitmapnode.label = bitmap_name + bitmapnode.location = (-300, 300) + bitmapnode.image = load_image(bitmap_name, dirname, place_holder=False, recursive=image_search, check_existing=True) + links.new(bitmapnode.outputs['Color'], nodes['Background'].inputs[0]) + new_chunk.bytes_read += read_str_len # If fog chunk elif CreateWorld and new_chunk.ID == LAYER_FOG: + """Fog options flags are bit 20 (0x100000) for background fogging, + bit 0 (0x1) for bottom falloff, and bit 1 (0x2) for top falloff.""" if contextWorld is None: path, filename = os.path.split(file.name) realname, ext = os.path.splitext(filename) contextWorld = bpy.data.worlds.new("LayerFog: " + realname) context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + layerfog = nodes.new(type='ShaderNodeVolumeScatter') + layerfog.label = "Layer Fog" + layerfog.location = (300, 100) + links.new(layerfog.outputs['Volume'], nodes['World Output'].inputs['Volume']) + context.view_layer.use_pass_mist = False + contextWorld.mist_settings.use_mist = True + contextWorld.mist_settings.start = read_float(new_chunk) + contextWorld.mist_settings.depth = read_float(new_chunk) + layerfog.inputs[1].default_value = read_float(new_chunk) + layerfog_flag = read_long(new_chunk) + if layerfog_flag & 0x1: + contextWorld.mist_settings.falloff = 'QUADRATIC' + if layerfog_flag & 0x2: + contextWorld.mist_settings.falloff = 'INVERSE_QUADRATIC' + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) else: - contextWorld.use_nodes = True - links = contextWorld.node_tree.links - nodes = contextWorld.node_tree.nodes - context.view_layer.use_pass_mist = False - layerfog = nodes.new(type='ShaderNodeVolumeScatter') - layerfog.label = "Layer Fog" - layerfog.location = (300, 100) - links.new(layerfog.outputs['Volume'], nodes['World Output'].inputs['Volume']) - contextWorld.mist_settings.start = read_float(new_chunk) - contextWorld.mist_settings.depth = read_float(new_chunk) - layerfog.inputs[1].default_value = read_float(new_chunk) - layerfogflag = read_long(new_chunk) - read_chunk(file, temp_chunk) - if temp_chunk.ID == COLOR_F: - layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) - elif temp_chunk.ID == LIN_COLOR_F: - layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) - else: - skip_to_end(file, temp_chunk) - new_chunk.bytes_read += temp_chunk.bytes_read + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read elif CreateWorld and new_chunk.ID == USE_LAYER_FOG: context.view_layer.use_pass_mist = True -- 2.30.2 From bd9a39823a73e6bfbeb8b2c63034d599e8496054 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sun, 30 Jul 2023 02:26:35 +0200 Subject: [PATCH 32/78] io_scene_3ds: Added cursor location --- io_scene_3ds/__init__.py | 45 +++++++++++++++++++++++++------------- io_scene_3ds/export_3ds.py | 31 ++++++++++++++++---------- io_scene_3ds/import_3ds.py | 27 +++++++++++++---------- 3 files changed, 66 insertions(+), 37 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index eeddb37cd..c0c13aa1b 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -55,7 +55,7 @@ class Import3DS(bpy.types.Operator, ImportHelper): soft_min=0.0, soft_max=1000.0, default=10.0, ) - convert_unit: BoolProperty( + use_scene_unit: BoolProperty( name="Scene Units", description="Converts to scene unit length settings", default=False, @@ -72,7 +72,7 @@ class Import3DS(bpy.types.Operator, ImportHelper): ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), ('LIGHT', "Light".rjust(12), "", 'LIGHT_DATA', 0x4), ('CAMERA', "Camera".rjust(11), "", 'CAMERA_DATA', 0x8), - ('EMPTY', "Empty".rjust(11), "", 'EMPTY_DATA', 0x10), + ('EMPTY', "Empty".rjust(11), "", 'EMPTY_AXIS', 0x10), ), description="Object types to import", default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, @@ -83,7 +83,7 @@ class Import3DS(bpy.types.Operator, ImportHelper): "importing incorrectly", default=True, ) - read_keyframe: BoolProperty( + use_keyframes: BoolProperty( name="Animation", description="Read the keyframe data", default=True, @@ -93,6 +93,11 @@ class Import3DS(bpy.types.Operator, ImportHelper): description="Transform to matrix world", default=False, ) + use_cursor: BoolProperty( + name="Cursor Origin", + description="Read the 3D cursor location", + default=False, + ) def execute(self, context): from . import import_3ds @@ -139,8 +144,11 @@ class MAX3DS_PT_import_include(bpy.types.Panel): layrow.label(text="", icon='OUTLINER_OB_IMAGE' if operator.use_image_search else 'IMAGE_DATA') layout.column().prop(operator, "object_filter") layrow = layout.row(align=True) - layrow.prop(operator, "read_keyframe") - layrow.label(text="", icon='ANIM' if operator.read_keyframe else 'DECORATE_DRIVER') + layrow.prop(operator, "use_keyframes") + layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') + layrow = layout.row(align=True) + layrow.prop(operator, "use_cursor") + layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR') class MAX3DS_PT_import_transform(bpy.types.Panel): @@ -166,8 +174,8 @@ class MAX3DS_PT_import_transform(bpy.types.Panel): layout.prop(operator, "constrain_size") layrow = layout.row(align=True) - layrow.prop(operator, "convert_unit") - layrow.label(text="", icon='EMPTY_ARROWS' if operator.convert_unit else 'EMPTY_DATA') + layrow.prop(operator, "use_scene_unit") + layrow.label(text="", icon='EMPTY_ARROWS' if operator.use_scene_unit else 'EMPTY_DATA') layrow = layout.row(align=True) layrow.prop(operator, "use_apply_transform") layrow.label(text="", icon='MESH_CUBE' if operator.use_apply_transform else 'MOD_SOLIDIFY') @@ -198,7 +206,7 @@ class Export3DS(bpy.types.Operator, ExportHelper): soft_min=0.0, soft_max=100000.0, default=1.0, ) - apply_unit: BoolProperty( + use_scene_unit: BoolProperty( name="Scene Units", description="Take the scene unit length settings into account", default=False, @@ -214,7 +222,7 @@ class Export3DS(bpy.types.Operator, ExportHelper): ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), ('LIGHT', "Light".rjust(12), "", 'LIGHT_DATA',0x4), ('CAMERA', "Camera".rjust(11), "", 'CAMERA_DATA',0x8), - ('EMPTY', "Empty".rjust(11), "", 'EMPTY_DATA',0x10), + ('EMPTY', "Empty".rjust(11), "", 'EMPTY_AXIS',0x10), ), description="Object types to export", default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, @@ -224,11 +232,16 @@ class Export3DS(bpy.types.Operator, ExportHelper): description="Export hierarchy chunks", default=False, ) - write_keyframe: BoolProperty( + use_keyframes: BoolProperty( name="Animation", description="Write the keyframe data", default=False, ) + use_cursor: BoolProperty( + name="Cursor Origin", + description="Save the 3D cursor location", + default=False, + ) def execute(self, context): from . import export_3ds @@ -278,9 +291,11 @@ class MAX3DS_PT_export_include(bpy.types.Panel): layrow.prop(operator, "use_hierarchy") layrow.label(text="", icon='OUTLINER' if operator.use_hierarchy else 'CON_CHILDOF') layrow = layout.row(align=True) - layrow.prop(operator, "write_keyframe") - layrow.label(text="", icon='ANIM' if operator.write_keyframe else 'DECORATE_DRIVER') - layout.use_property_split = True + layrow.prop(operator, "use_keyframes") + layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') + layrow = layout.row(align=True) + layrow.prop(operator, "use_cursor") + layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR') class MAX3DS_PT_export_transform(bpy.types.Panel): @@ -306,8 +321,8 @@ class MAX3DS_PT_export_transform(bpy.types.Panel): layout.prop(operator, "scale_factor") layrow = layout.row(align=True) - layrow.prop(operator, "apply_unit") - layrow.label(text="", icon='EMPTY_ARROWS' if operator.apply_unit else 'EMPTY_DATA') + layrow.prop(operator, "use_scene_unit") + layrow.label(text="", icon='EMPTY_ARROWS' if operator.use_scene_unit else 'EMPTY_DATA') layout.prop(operator, "axis_forward") layout.prop(operator, "axis_up") diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index d4d53b834..0ffedc299 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -36,6 +36,7 @@ SOLIDBACKGND = 0x1200 # The background color (RGB) USE_SOLIDBGND = 0x1201 # The background color flag VGRADIENT = 0x1300 # The background gradient colors USE_VGRADIENT = 0x1301 # The background gradient flag +O_CONSTS = 0x1500 # The origin of the 3D cursor AMBIENTLIGHT = 0x2100 # The color of the ambient light LAYER_FOG = 0x2302 # The fog atmosphere settings USE_LAYER_FOG = 0x2303 # The fog atmosphere flag @@ -1497,8 +1498,8 @@ def make_ambient_node(world): # EXPORT # ########## -def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use_selection=False, - object_filter=None, use_hierarchy=False, write_keyframe=False, global_matrix=None): +def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, use_selection=False, + object_filter=None, use_hierarchy=False, use_keyframes=False, global_matrix=None, use_cursor=False): """Save the Blender scene to a 3ds file.""" # Time the export @@ -1511,7 +1512,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use world = scene.world unit_measure = 1.0 - if apply_unit: + if use_scene_unit: unit_length = scene.unit_settings.length_unit if unit_length == 'KILOMETERS': unit_measure = 0.001 @@ -1549,21 +1550,29 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use mscale.add_variable("scale", _3ds_float(1.0)) object_info.add_subchunk(mscale) + # Add 3D cursor location + if use_cursor: + cursor_chunk = _3ds_chunk(O_CONSTS) + cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location)) + object_info.add_subchunk(cursor_chunk) + # Init main keyframe data chunk - if write_keyframe: + if use_keyframes: revision = 0x0005 stop = scene.frame_end start = scene.frame_start curtime = scene.frame_current kfdata = make_kfdata(revision, start, stop, curtime) - # Add AMBIENT, BACKGROUND and BITMAP + # Add AMBIENT color if world is not None and 'WORLD' in object_filter: ambient_chunk = _3ds_chunk(AMBIENTLIGHT) ambient_light = _3ds_chunk(RGB) ambient_light.add_variable("ambient", _3ds_float_color(world.color)) ambient_chunk.add_subchunk(ambient_light) object_info.add_subchunk(ambient_chunk) + + # Add BACKGROUND and BITMAP if world.use_nodes: ntree = world.node_tree.links background_color_chunk = _3ds_chunk(RGB) @@ -1604,7 +1613,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use object_info.add_subchunk(fog_chunk) if layer.use_pass_mist: object_info.add_subchunk(use_fog_flag) - if write_keyframe and world.animation_data: + if use_keyframes and world.animation_data: kfdata.add_subchunk(make_ambient_node(world)) # Make a list of all materials used in the selected meshes (use dictionary, each material is added once) @@ -1746,13 +1755,13 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use operator.report({'WARNING'}, "Object %r can't be written into a 3DS file") # Export object node - if write_keyframe: + if use_keyframes: kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) i += i # Create chunks for all empties - only requires a object node - if write_keyframe: + if use_keyframes: for ob in empty_objects: kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) @@ -1816,7 +1825,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use object_info.add_subchunk(object_chunk) # Export light and spotlight target node - if write_keyframe: + if use_keyframes: kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) if ob.data.type == 'SPOT': kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale, name_id)) @@ -1850,7 +1859,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use object_info.add_subchunk(object_chunk) # Export camera and target node - if write_keyframe: + if use_keyframes: kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale, name_id)) @@ -1858,7 +1867,7 @@ def save(operator, context, filepath="", scale_factor=1.0, apply_unit=False, use primary.add_subchunk(object_info) # Add main keyframe data chunk to primary chunk - if write_keyframe: + if use_keyframes: primary.add_subchunk(kfdata) # The chunk hierarchy is completely built, now check the size diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 12e606c48..4df55c7af 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -43,6 +43,7 @@ SOLIDBACKGND = 0x1200 # The background color (RGB) USE_SOLIDBGND = 0x1201 # The background color flag VGRADIENT = 0x1300 # The background gradient colors USE_VGRADIENT = 0x1301 # The background gradient flag +O_CONSTS = 0x1500 # The origin of the 3D cursor AMBIENTLIGHT = 0x2100 # The color of the ambient light LAYER_FOG = 0x2302 # The fog atmosphere settings USE_LAYER_FOG = 0x2303 # The fog atmosphere flag @@ -333,8 +334,8 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of childs_list = [] parent_list = [] -def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, - FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE): +def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, FILTER, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE, CURSOR): contextObName = None contextWorld = None @@ -679,6 +680,10 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI if version > 3: print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version) + # If cursor location + elif CURSOR and new_chunk.ID == O_CONSTS: + context.scene.cursor.location = read_float_array(new_chunk) + # If ambient light chunk elif CreateWorld and new_chunk.ID == AMBIENTLIGHT: path, filename = os.path.split(file.name) @@ -768,8 +773,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # is it an object info chunk? elif new_chunk.ID == OBJECTINFO: - process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, - FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) + process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, FILTER, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE, CURSOR) # keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read @@ -1416,7 +1421,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI ########## def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, FILTER=None, - WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None): + WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None, CURSOR=False): print("importing 3DS: %r..." % (filepath), end="") @@ -1458,8 +1463,8 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, MEASURE = 0.000001 imported_objects = [] # Fill this list with objects - process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, - FILTER, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE) + process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, FILTER, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE, CURSOR) # fixme, make unglobal object_dictionary.clear() @@ -1553,12 +1558,12 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, file.close() -def load(operator, context, filepath="", constrain_size=0.0, convert_unit=False, +def load(operator, context, filepath="", constrain_size=0.0, use_scene_unit=False, use_image_search=True, object_filter=None, use_world_matrix=False, - read_keyframe=True, use_apply_transform=True, global_matrix=None,): + use_keyframes=True, use_apply_transform=True, global_matrix=None, use_cursor=False): - load_3ds(filepath, context, CONSTRAIN=constrain_size, UNITS=convert_unit, + load_3ds(filepath, context, CONSTRAIN=constrain_size, UNITS=use_scene_unit, IMAGE_SEARCH=use_image_search, FILTER=object_filter, WORLD_MATRIX=use_world_matrix, - KEYFRAME=read_keyframe, APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix,) + KEYFRAME=use_keyframes, APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix, CURSOR=use_cursor,) return {'FINISHED'} -- 2.30.2 From 197ae2fe1883144c0afcef3d2e5217e221a9eb88 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sun, 30 Jul 2023 16:29:39 +0200 Subject: [PATCH 33/78] io_scene_3ds: Improved import and export of background color and images --- io_scene_3ds/export_3ds.py | 11 ++++++----- io_scene_3ds/import_3ds.py | 8 +++++++- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 0ffedc299..c9a67702c 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1578,11 +1578,12 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, background_color_chunk = _3ds_chunk(RGB) background_chunk = _3ds_chunk(SOLIDBACKGND) background_flag = _3ds_chunk(USE_SOLIDBGND) - bgshader = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD' - bgtexture = 'TEX_IMAGE', 'TEX_ENVIRONMENT' - acol, bcol = 'EMISSION', 'BACKGROUND' - bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bcol and lk.to_node.type in bgshader), world.color) - bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type in bgtexture and lk.to_node.type in {acol, bcol}), False) + bgtype = 'BACKGROUND' + bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD' + bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT' + bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color) + bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in {'MIX', 'MIX_RGB'} and lk.to_node.type == bgtype), bgtype) + bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False) background_color_chunk.add_variable("color", _3ds_float_color(bg_color)) background_chunk.add_subchunk(background_color_chunk) if bg_image: diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 8c3b4d54f..5f0971d18 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -735,11 +735,17 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextWorld.use_nodes = True links = contextWorld.node_tree.links nodes = contextWorld.node_tree.nodes + bitmap_mix = nodes.new(type='ShaderNodeMixRGB') bitmapnode = nodes.new(type='ShaderNodeTexEnvironment') + bitmap_mix.label = "Solid Color" bitmapnode.label = bitmap_name + bitmap_mix.location = (-250, 300) bitmapnode.location = (-300, 300) + bitmap_mix.inputs[2].default_value = nodes['Background'].inputs[0].default_value bitmapnode.image = load_image(bitmap_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True) - links.new(bitmapnode.outputs['Color'], nodes['Background'].inputs[0]) + bitmap_mix.inputs[0].default_value = 0.0 if bitmapnode.image is not None else 1.0 + links.new(bitmap_mix.outputs['Color'], nodes['Background'].inputs[0]) + links.new(bitmapnode.outputs['Color'], bitmap_mix.inputs[1]) new_chunk.bytes_read += read_str_len # If fog chunk -- 2.30.2 From f9c66d2c0d2fcbf8d3b3ee063041c842ee8d273f Mon Sep 17 00:00:00 2001 From: NRGSille Date: Sun, 30 Jul 2023 22:58:23 +0200 Subject: [PATCH 34/78] io_scene_3ds: Advanced world bitmap and color keyframe import and export --- io_scene_3ds/export_3ds.py | 29 ++++++++++++++++++++++++++++- io_scene_3ds/import_3ds.py | 29 +++++++++++++++++++++-------- 2 files changed, 49 insertions(+), 9 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index c9a67702c..3cced15fa 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1455,7 +1455,34 @@ def make_ambient_node(world): amb_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) amb_node.add_subchunk(amb_node_header_chunk) - if world.animation_data.action: + if world.use_nodes and world.node_tree.animation_data.action: + action = world.node_tree.animation_data.action + ambinode = next((nd for nd in world.node_tree.nodes if nd.type in {'RGB', 'EMISSION'}), False) + if ambinode and action.fcurves: + fcurves = action.fcurves + fcurves.update() + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + ambipath = ('nodes[\"RGB\"].outputs[0].default_value' if ambinode.type == 'RGB' else + 'nodes[\"Emission\"].inputs[0].default_value') + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys = nkeys + 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + for i, frame in enumerate(kframes): + ambient = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == ambipath] + if not ambient: + ambient.append(world.color) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(ambient[:3])) + + elif world.animation_data.action: action = world.animation_data.action if action.fcurves: fcurves = action.fcurves diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 5f0971d18..2cce3b0c7 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -709,19 +709,26 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # If background chunk elif CreateWorld and new_chunk.ID == SOLIDBACKGND: + backgroundcolor = mathutils.Color((0.1, 0.1, 0.1)) if contextWorld is None: path, filename = os.path.split(file.name) realname, ext = os.path.splitext(filename) contextWorld = bpy.data.worlds.new("Background: " + realname) context.scene.world = contextWorld contextWorld.use_nodes = True + worldnodes = contextWorld.node_tree.nodes + backgroundnode = worldnodes['Background'] read_chunk(file, temp_chunk) if temp_chunk.ID == COLOR_F: - contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) + backgroundcolor = read_float_array(temp_chunk) elif temp_chunk.ID == LIN_COLOR_F: - contextWorld.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_float_array(temp_chunk) + backgroundcolor = read_float_array(temp_chunk) else: skip_to_end(file, temp_chunk) + backgroundmix = next((wn for wn in worldnodes if wn.type in {'MIX', 'MIX_RGB'}), False) + backgroundnode.inputs[0].default_value[:3] = backgroundcolor + if backgroundmix: + backgroundmix.inputs[2].default_value[:3] = backgroundcolor new_chunk.bytes_read += temp_chunk.bytes_read # If bitmap chunk @@ -738,9 +745,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI bitmap_mix = nodes.new(type='ShaderNodeMixRGB') bitmapnode = nodes.new(type='ShaderNodeTexEnvironment') bitmap_mix.label = "Solid Color" - bitmapnode.label = bitmap_name - bitmap_mix.location = (-250, 300) - bitmapnode.location = (-300, 300) + bitmapnode.label = "Bitmap: " + bitmap_name + bitmap_mix.location = (-250, 360) + bitmapnode.location = (-600, 300) bitmap_mix.inputs[2].default_value = nodes['Background'].inputs[0].default_value bitmapnode.image = load_image(bitmap_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True) bitmap_mix.inputs[0].default_value = 0.0 if bitmapnode.image is not None else 1.0 @@ -1143,13 +1150,17 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI worldout = nodes['World Output'] mixshade = nodes.new(type='ShaderNodeMixShader') ambinode = nodes.new(type='ShaderNodeEmission') + ambilite = nodes.new(type='ShaderNodeRGB') + ambilite.label = "Ambient Color" ambinode.inputs[0].default_value[:3] = child.color ambinode.location = (10, 150) worldout.location = (600, 200) mixshade.location = (300, 300) + ambilite.location = (-250, 150) links.new(mixshade.outputs[0], worldout.inputs['Surface']) links.new(nodes['Background'].outputs[0], mixshade.inputs[1]) links.new(ambinode.outputs[0], mixshade.inputs[2]) + links.new(ambilite.outputs[0], ambinode.inputs[0]) ambinode.label = object_name if object_name != '$AMBIENT$' else "Ambient" elif CreateEmpty and tracking == 'OBJECT' and object_name == '$$$DUMMY': child = bpy.data.objects.new(object_name, None) # Create an empty object @@ -1195,11 +1206,13 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI keyframe_data = {} default_data = child.color[:] child.color = read_track_data(new_chunk)[0] - ambinode.inputs[0].default_value[:3] = child.color + ambilite.color = child.color + ambinode.inputs[0].default_value[:3] = ambilite.color for keydata in keyframe_data.items(): - child.color = keydata[1] - child.keyframe_insert(data_path="color", frame=keydata[0]) ambinode.inputs[0].default_value[:3] = keydata[1] + child.color = ambilite.outputs[0].default_value[:3] = keydata[1] + child.keyframe_insert(data_path="color", frame=keydata[0]) + nodetree.keyframe_insert(data_path="nodes[\"RGB\"].outputs[0].default_value", frame=keydata[0]) nodetree.keyframe_insert(data_path="nodes[\"Emission\"].inputs[0].default_value", frame=keydata[0]) contextTrack_flag = False -- 2.30.2 From a70ce67693e08853124e5e0b2f80aa67f2fcb37a Mon Sep 17 00:00:00 2001 From: NRGSille Date: Mon, 31 Jul 2023 00:36:28 +0200 Subject: [PATCH 35/78] io_scene_3ds: Added imperial unit measure --- io_scene_3ds/export_3ds.py | 8 +++++++- io_scene_3ds/import_3ds.py | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 3cced15fa..73b5339ed 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1541,12 +1541,18 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, unit_measure = 1.0 if use_scene_unit: unit_length = scene.unit_settings.length_unit - if unit_length == 'KILOMETERS': + if unit_length == 'MILES': + unit_measure = 0.000621371 + elif unit_length == 'KILOMETERS': unit_measure = 0.001 + elif unit_length == 'INCHES': + unit_measure = 39.37007874 elif unit_length == 'CENTIMETERS': unit_measure = 100 elif unit_length == 'MILLIMETERS': unit_measure = 1000 + elif unit_length == 'THOU': + unit_measure = 39370.07874 elif unit_length == 'MICROMETERS': unit_measure = 1000000 diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 2cce3b0c7..e9b640ed4 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1475,12 +1475,18 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, if UNITS: unit_length = scn.unit_settings.length_unit - if unit_length == 'KILOMETERS': + if unit_length == 'MILES': + MEASURE = 1609.344 + elif unit_length == 'KILOMETERS': MEASURE = 1000.0 + elif unit_length == 'INCHES': + MEASURE = 0.3048 elif unit_length == 'CENTIMETERS': MEASURE = 0.01 elif unit_length == 'MILLIMETERS': MEASURE = 0.001 + elif unit_length == 'THOU': + MEASURE = 0.0000254 elif unit_length == 'MICROMETERS': MEASURE = 0.000001 -- 2.30.2 From ed8255c7a6f7d87c352660691fe7e94d19ea31ae Mon Sep 17 00:00:00 2001 From: NRGSille Date: Mon, 31 Jul 2023 12:45:10 +0200 Subject: [PATCH 36/78] io_scene_3ds: Added feet to imperial unit measure --- io_scene_3ds/export_3ds.py | 2 ++ io_scene_3ds/import_3ds.py | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 73b5339ed..005d5932c 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1545,6 +1545,8 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, unit_measure = 0.000621371 elif unit_length == 'KILOMETERS': unit_measure = 0.001 + elif unit_length == 'FEET': + unit_measure = 3.280839895 elif unit_length == 'INCHES': unit_measure = 39.37007874 elif unit_length == 'CENTIMETERS': diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index ad657f493..850396254 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1479,8 +1479,10 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, MEASURE = 1609.344 elif unit_length == 'KILOMETERS': MEASURE = 1000.0 - elif unit_length == 'INCHES': + elif unit_length == 'FEET': MEASURE = 0.3048 + elif unit_length == 'INCHES': + MEASURE = 0.0254 elif unit_length == 'CENTIMETERS': MEASURE = 0.01 elif unit_length == 'MILLIMETERS': -- 2.30.2 From 5ee88e7de55cdc803b1f28a190144ef4d8e830db Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 1 Aug 2023 16:06:32 +0200 Subject: [PATCH 37/78] io_scene_3ds: Added fog and gradient import and export --- io_scene_3ds/export_3ds.py | 79 +++++++++++++++++------- io_scene_3ds/import_3ds.py | 120 ++++++++++++++++++++++++++++++++----- 2 files changed, 162 insertions(+), 37 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 6f8e2e589..2715d5b66 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -38,8 +38,10 @@ VGRADIENT = 0x1300 # The background gradient colors USE_VGRADIENT = 0x1301 # The background gradient flag O_CONSTS = 0x1500 # The origin of the 3D cursor AMBIENTLIGHT = 0x2100 # The color of the ambient light -LAYER_FOG = 0x2302 # The fog atmosphere settings -USE_LAYER_FOG = 0x2303 # The fog atmosphere flag +FOG = 0x2200 # The fog atmosphere settings +USE_FOG = 0x2201 # The fog atmosphere flag +LAYER_FOG = 0x2302 # The fog layer atmosphere settings +USE_LAYER_FOG = 0x2303 # The fog layer atmosphere flag MATERIAL = 45055 # 0xAFFF // This stored the texture info OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc... @@ -1468,7 +1470,7 @@ def make_ambient_node(world): emission = next((lk.from_socket.node for lk in ambilinks if lk.to_node.type in ambioutput), False) ambinode = next((lk.from_socket.node for lk in ambilinks if lk.to_node.type == 'EMISSION'), emission) kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] - ambipath = ('nodes[\"RGB\"].outputs[0].default_value' if ambinode.type == 'RGB' else + ambipath = ('nodes[\"RGB\"].outputs[0].default_value' if ambinode and ambinode.type == 'RGB' else 'nodes[\"Emission\"].inputs[0].default_value') nkeys = len(kframes) if not 0 in kframes: @@ -1615,16 +1617,18 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, # Add BACKGROUND and BITMAP if world.use_nodes: + bgtype = 'BACKGROUND' ntree = world.node_tree.links background_color_chunk = _3ds_chunk(RGB) background_chunk = _3ds_chunk(SOLIDBACKGND) background_flag = _3ds_chunk(USE_SOLIDBGND) - bgtype = 'BACKGROUND' + bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB' bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD' bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT' bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color) - bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in {'MIX', 'MIX_RGB'} and lk.to_node.type == bgtype), bgtype) + bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype) bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False) + gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False) background_color_chunk.add_variable("color", _3ds_float_color(bg_color)) background_chunk.add_subchunk(background_color_chunk) if bg_image: @@ -1633,28 +1637,59 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, background_image.add_variable("image", _3ds_string(sane_name(bg_image))) object_info.add_subchunk(background_image) object_info.add_subchunk(background_chunk) + + # Add VGRADIENT chunk + if gradient and len(gradient) >= 3: + gradient_chunk = _3ds_chunk(VGRADIENT) + background_flag = _3ds_chunk(USE_VGRADIENT) + gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position)) + gradient_topcolor_chunk = _3ds_chunk(RGB) + gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3])) + gradient_chunk.add_subchunk(gradient_topcolor_chunk) + gradient_midcolor_chunk = _3ds_chunk(RGB) + gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3])) + gradient_chunk.add_subchunk(gradient_midcolor_chunk) + gradient_lowcolor_chunk = _3ds_chunk(RGB) + gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3])) + gradient_chunk.add_subchunk(gradient_lowcolor_chunk) + object_info.add_subchunk(gradient_chunk) object_info.add_subchunk(background_flag) - # Add LAYER_FOG settings - fogshader = next((lk.from_socket.node for lk in ntree if lk.from_socket.identifier and lk.to_socket.identifier == 'Volume'), False) - if fogshader: - fogflag = 0 - if world.mist_settings.falloff == 'QUADRATIC': - fogflag |= 0x1 - if world.mist_settings.falloff == 'INVERSE_QUADRATIC': - fogflag |= 0x2 - fog_chunk = _3ds_chunk(LAYER_FOG) + # Add FOG + fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False) + if fognode: + fog_chunk = _3ds_chunk(FOG) fog_color_chunk = _3ds_chunk(RGB) - use_fog_flag = _3ds_chunk(USE_LAYER_FOG) - fog_color_chunk.add_variable("color", _3ds_float_color(fogshader.inputs['Color'].default_value[:3])) - fog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start)) - fog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.depth)) - fog_chunk.add_variable("density", _3ds_float(fogshader.inputs['Density'].default_value)) - fog_chunk.add_variable("flags", _3ds_uint(fogflag)) + use_fog_flag = _3ds_chunk(USE_FOG) + fog_density = fognode.inputs['Density'].default_value * 100 + fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3])) + fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start)) + fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5)) + fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth)) + fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5)) fog_chunk.add_subchunk(fog_color_chunk) object_info.add_subchunk(fog_chunk) - if layer.use_pass_mist: - object_info.add_subchunk(use_fog_flag) + + # Add LAYER FOG + foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False) + if foglayer: + layerfog_flag = 0 + if world.mist_settings.falloff == 'QUADRATIC': + layerfog_flag |= 0x1 + if world.mist_settings.falloff == 'INVERSE_QUADRATIC': + layerfog_flag |= 0x2 + layerfog_chunk = _3ds_chunk(LAYER_FOG) + layerfog_color_chunk = _3ds_chunk(RGB) + use_fog_flag = _3ds_chunk(USE_LAYER_FOG) + layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3])) + layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start)) + layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height)) + layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value)) + layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag)) + layerfog_chunk.add_subchunk(layerfog_color_chunk) + object_info.add_subchunk(layerfog_chunk) + if fognode or foglayer and layer.use_pass_mist: + object_info.add_subchunk(use_fog_flag) if use_keyframes and world.animation_data: kfdata.add_subchunk(make_ambient_node(world)) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 850396254..42a49bc7f 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -45,8 +45,11 @@ VGRADIENT = 0x1300 # The background gradient colors USE_VGRADIENT = 0x1301 # The background gradient flag O_CONSTS = 0x1500 # The origin of the 3D cursor AMBIENTLIGHT = 0x2100 # The color of the ambient light -LAYER_FOG = 0x2302 # The fog atmosphere settings -USE_LAYER_FOG = 0x2303 # The fog atmosphere flag +FOG = 0x2200 # The fog atmosphere settings +USE_FOG = 0x2201 # The fog atmosphere flag +FOG_BGND = 0x2210 # The fog atmosphere background flag +LAYER_FOG = 0x2302 # The fog layer atmosphere settings +USE_LAYER_FOG = 0x2303 # The fog layer atmosphere flag MATERIAL = 0xAFFF # This stored the texture info OBJECT = 0x4000 # This stores the faces, vertices, etc... @@ -746,16 +749,95 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI bitmapnode = nodes.new(type='ShaderNodeTexEnvironment') bitmap_mix.label = "Solid Color" bitmapnode.label = "Bitmap: " + bitmap_name - bitmap_mix.location = (-250, 360) - bitmapnode.location = (-600, 300) bitmap_mix.inputs[2].default_value = nodes['Background'].inputs[0].default_value bitmapnode.image = load_image(bitmap_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True) - bitmap_mix.inputs[0].default_value = 0.0 if bitmapnode.image is not None else 1.0 + bitmap_mix.inputs[0].default_value = 0.5 if bitmapnode.image is not None else 1.0 + bitmapnode.location = (-600, 360) if bitmapnode.image is not None else (-600, 300) + bitmap_mix.location = (-250, 300) links.new(bitmap_mix.outputs['Color'], nodes['Background'].inputs[0]) links.new(bitmapnode.outputs['Color'], bitmap_mix.inputs[1]) new_chunk.bytes_read += read_str_len - # If fog chunk + # If gradient chunk: + elif CreateWorld and new_chunk.ID == VGRADIENT: + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Gradient: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + gradientnode = nodes.new(type='ShaderNodeValToRGB') + gradientnode.location = (-600, 100) + gradientnode.label = "Gradient" + backgroundmix = next((wn for wn in worldnodes if wn.type in {'MIX', 'MIX_RGB'}), False) + if backgroundmix: + links.new(gradientnode.outputs['Color'], backgroundmix.inputs[2]) + else: + links.new(gradientnode.outputs['Color'], nodes['Background'].inputs[0]) + gradientnode.color_ramp.elements.new(read_float(new_chunk)) + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[2].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[2].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[1].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[1].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[0].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[0].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + + # If fog chunk: + elif CreateWorld and new_chunk.ID == FOG: + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + newWorld = bpy.data.worlds.new("LayerFog: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + fognode = nodes.new(type='ShaderNodeVolumeAbsorption') + fognode.label = "Fog" + fognode.location = (300, 60) + volumemix = next((wn for wn in worldnodes if wn.label == 'Volume' and wn.type in {'ADD_SHADER', 'MIX_SHADER'}), False) + if volumemix: + links.new(fognode.outputs['Volume'], volumemix.inputs[1]) + else: + links.new(fognode.outputs[0], nodes['World Output'].inputs[1]) + contextWorld.mist_settings.use_mist = True + contextWorld.mist_settings.start = read_float(new_chunk) + nearfog = read_float(new_chunk) * 0.01 + contextWorld.mist_settings.depth = read_float(new_chunk) + farfog = read_float(new_chunk) * 0.01 + fognode.inputs[1].default_value = (nearfog + farfog) * 0.5 + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + fognode.inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + fognode.inputs[0].default_value[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + elif CreateWorld and new_chunk.ID == FOG_BGND: + pass + + # If layer fog chunk: elif CreateWorld and new_chunk.ID == LAYER_FOG: """Fog options flags are bit 20 (0x100000) for background fogging, bit 0 (0x1) for bottom falloff, and bit 1 (0x2) for top falloff.""" @@ -767,16 +849,23 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextWorld.use_nodes = True links = contextWorld.node_tree.links nodes = contextWorld.node_tree.nodes + mxvolume = nodes.new(type='ShaderNodeMixShader') layerfog = nodes.new(type='ShaderNodeVolumeScatter') layerfog.label = "Layer Fog" - layerfog.location = (300, 100) - links.new(layerfog.outputs['Volume'], nodes['World Output'].inputs['Volume']) + mxvolume.label = "Volume" + layerfog.location = (10, -60) + mxvolume.location = (300, 50) + links.new(layerfog.outputs['Volume'], mxvolume.inputs[2]) + links.new(mxvolume.outputs[0], nodes['World Output'].inputs[1]) + fognode = next((wn for wn in worldnodes if wn.type == 'VOLUME_ABSORPTION'), False) + if fognode: + links.new(fognode.outputs['Volume'], mxvolume.inputs[1]) + fognode.location = (10, 60) context.view_layer.use_pass_mist = False contextWorld.mist_settings.use_mist = True contextWorld.mist_settings.start = read_float(new_chunk) - contextWorld.mist_settings.depth = read_float(new_chunk) - contextWorld.mist_settings.height = contextWorld.mist_settings.depth * 0.5 - layerfog.inputs['Density'].default_value = read_float(new_chunk) + contextWorld.mist_settings.height = read_float(new_chunk) + layerfog.inputs[1].default_value = read_float(new_chunk) layerfog_flag = read_long(new_chunk) if layerfog_flag == 0: contextWorld.mist_settings.falloff = 'LINEAR' @@ -1152,11 +1241,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI ambinode = nodes.new(type='ShaderNodeEmission') ambilite = nodes.new(type='ShaderNodeRGB') ambilite.label = "Ambient Color" + mixshade.label = "Surface" ambinode.inputs[0].default_value[:3] = child.color - ambinode.location = (10, 150) - worldout.location = (600, 200) - mixshade.location = (300, 300) - ambilite.location = (-250, 150) + ambinode.location = (10, 180) + worldout.location = (600, 180) + mixshade.location = (300, 280) + ambilite.location = (-250, 100) links.new(mixshade.outputs[0], worldout.inputs['Surface']) links.new(nodes['Background'].outputs[0], mixshade.inputs[1]) links.new(ambinode.outputs[0], mixshade.inputs[2]) -- 2.30.2 From 4655d96488fa6e8417d29d675e8d8e030e9e9599 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Thu, 3 Aug 2023 00:34:17 +0200 Subject: [PATCH 38/78] io_scene_3ds: Added spot aspect and projector --- io_scene_3ds/export_3ds.py | 20 ++++++++++++++++++++ io_scene_3ds/import_3ds.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 2715d5b66..ab7c12ca1 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -110,6 +110,9 @@ LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag LIGHT_SPOT_LSHADOW = 0x4641 # Light spot shadow parameters LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot show cone flag LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag +LIGHT_SPOT_OVERSHOOT = 0x4652 # Light spot overshoot flag +LIGHT_SPOT_PROJECTOR = 0x4653 # Light spot projection bitmap +LIGHT_SPOT_ASPECT = 0x4657 # Light spot aspect ratio # >------ sub defines of CAMERA OBJECT_CAM_RANGES = 0x4720 # The camera range values @@ -1881,6 +1884,23 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, if ob.data.use_square: spot_square_chunk = _3ds_chunk(LIGHT_SPOT_RECTANGLE) spotlight_chunk.add_subchunk(spot_square_chunk) + if ob.scale.x and ob.scale.y != 0.0: + spot_aspect_chunk = _3ds_chunk(LIGHT_SPOT_ASPECT) + spot_aspect_chunk.add_variable("aspect", _3ds_float(round((ob.scale.x / ob.scale.y),4))) + spotlight_chunk.add_subchunk(spot_aspect_chunk) + if ob.data.use_nodes: + links = ob.data.node_tree.links + bptype = 'EMISSION' + bpmix = 'MIX', 'MIX_RGB', 'EMISSION' + bptex = 'TEX_IMAGE', 'TEX_ENVIRONMENT' + bpout = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_LIGHT' + bshade = next((lk.from_node.type for lk in links if lk.from_node.type == bptype and lk.to_node.type in bpout), None) + bpnode = next((lk.from_node.type for lk in links if lk.from_node.type in bpmix and lk.to_node.type == bshade), bshade) + bitmap = next((lk.from_node.image for lk in links if lk.from_node.type in bptex and lk.to_node.type == bpnode), False) + if bitmap and bitmap is not None: + spot_projector_chunk = _3ds_chunk(LIGHT_SPOT_PROJECTOR) + spot_projector_chunk.add_variable("image", _3ds_string(sane_name(bitmap.name))) + spotlight_chunk.add_subchunk(spot_projector_chunk) obj_light_chunk.add_subchunk(spotlight_chunk) # Add light to object chunk diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 732a70eee..1d51491d6 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1156,6 +1156,24 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextLamp.data.show_cone = True elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_RECTANGLE: # Square flag contextLamp.data.use_square = True + elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_ASPECT: # Aspect + contextLamp.empty_display_size = read_float(new_chunk) + elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_PROJECTOR: # Projection + contextLamp.data.use_nodes = True + nodes = contextLamp.data.node_tree.nodes + links = contextLamp.data.node_tree.links + gobo_name, read_str_len = read_string(file) + new_chunk.bytes_read += read_str_len + projection = nodes.new(type='ShaderNodeTexImage') + projection.label = gobo_name + projection.location = (-340, 360) + projection.image = load_image(gobo_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True) + emitnode = next((node for node in nodes if node.type == 'EMISSION'), False) + emission = emitnode if emitnode else nodes.new(type='ShaderNodeEmission') + emission.label = "Projector" + emission.location = (0, 300) + links.new(emission.outputs['Emission'], nodes['Light Output'].inputs[0]) + links.new(projection.outputs['Color'], emission.inputs[0]) elif CreateLightObject and new_chunk.ID == OBJECT_HIERARCHY: # Hierarchy child_id = get_hierarchy(new_chunk) elif CreateLightObject and new_chunk.ID == OBJECT_PARENT: @@ -1613,6 +1631,18 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, # Select all new objects for ob in imported_objects: + if ob.type == 'LIGHT' and ob.data.type == 'SPOT': + aspect = ob.empty_display_size + fac = 1.0 + ratio = (fac / aspect) + align = fac - (ratio - aspect if ratio > fac else aspect - ratio) + shift = align + (align / 2.0) + if aspect > 1.0: + ob.scale.x = fac + align + ob.scale.y = fac - align + elif aspect < 1.0: + ob.scale.x = fac - align + ob.scale.y = fac + align ob.select_set(True) if not APPLY_MATRIX: # Reset transform bpy.ops.object.rotation_clear() -- 2.30.2 From 9cef03ab7f01584472577943d304662758c31a35 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Thu, 3 Aug 2023 23:28:16 +0200 Subject: [PATCH 39/78] io_scene_3ds: Added light radius, distance and attenuation --- io_scene_3ds/export_3ds.py | 12 ++++++++++++ io_scene_3ds/import_3ds.py | 7 +++++++ 2 files changed, 19 insertions(+) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index ab7c12ca1..fe090399e 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -104,6 +104,9 @@ OBJECT_PARENT = 0x4F10 # Parent id of the object # >------ Sub defines of LIGHT LIGHT_MULTIPLIER = 0x465B # The light energy factor +LIGHT_INNER_RANGE = 0x4659 # Light inner range value +LIGHT_OUTER_RANGE = 0x465A # Light outer range value +LIGHT_ATTENUATE = 0x4625 # Light attenuation flag LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight LIGHT_SPOT_ROLL = 0x4656 # Light spot roll angle LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag @@ -1851,13 +1854,22 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, obj_light_chunk = _3ds_chunk(OBJECT_LIGHT) color_float_chunk = _3ds_chunk(RGB) light_distance = translation[ob.name] + light_attenuate = _3ds_chunk(LIGHT_ATTENUATE) + light_inner_range = _3ds_chunk(LIGHT_INNER_RANGE) + light_outer_range = _3ds_chunk(LIGHT_OUTER_RANGE) light_energy_factor = _3ds_chunk(LIGHT_MULTIPLIER) object_chunk.add_variable("light", _3ds_string(sane_name(ob.name))) obj_light_chunk.add_variable("location", _3ds_point_3d(light_distance)) color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color)) + light_outer_range.add_variable("distance", _3ds_float(ob.data.cutoff_distance)) + light_inner_range.add_variable("radius", _3ds_float(ob.data.shadow_soft_size)) light_energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001)) obj_light_chunk.add_subchunk(color_float_chunk) + obj_light_chunk.add_subchunk(light_outer_range) + obj_light_chunk.add_subchunk(light_inner_range) obj_light_chunk.add_subchunk(light_energy_factor) + if ob.data.use_custom_distance: + obj_light_chunk.add_subchunk(light_attenuate) if ob.data.type == 'SPOT': cone_angle = math.degrees(ob.data.spot_size) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 3f493375c..16fe88548 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -128,6 +128,7 @@ LIGHT_RAY_BIAS = 0x4658 # Light ray bias value LIGHT_INNER_RANGE = 0x4659 # The light inner range LIGHT_OUTER_RANGE = 0x465A # The light outer range LIGHT_MULTIPLIER = 0x465B # The light energy factor +LIGHT_ATTENUATE = 0x4625 # Light attenuation flag LIGHT_AMBIENT_LIGHT = 0x4680 # Light ambient flag # >------ sub defines of CAMERA @@ -1128,8 +1129,14 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextMatrix = None # Reset matrix elif CreateLightObject and new_chunk.ID == COLOR_F: # Color contextLamp.data.color = read_float_array(new_chunk) + elif CreateLightObject and new_chunk.ID == LIGHT_OUTER_RANGE: # Distance + contextLamp.data.cutoff_distance = read_float(new_chunk) + elif CreateLightObject and new_chunk.ID == LIGHT_INNER_RANGE: # Radius + contextLamp.data.shadow_soft_size = read_float(new_chunk) elif CreateLightObject and new_chunk.ID == LIGHT_MULTIPLIER: # Intensity contextLamp.data.energy = (read_float(new_chunk) * 1000) + elif CreateLightObject and new_chunk.ID == LIGHT_ATTENUATE: # Attenuation + contextLamp.data.use_custom_distance = True # If spotlight chunk elif CreateLightObject and new_chunk.ID == LIGHT_SPOTLIGHT: # Spotlight -- 2.30.2 From 27c7ab156e59ca5e71e0bc7ceabccf9b9f231f12 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Fri, 4 Aug 2023 23:44:18 +0200 Subject: [PATCH 40/78] io_scene_3ds: Added pivot origin option and fixed camera and light ranges --- io_scene_3ds/__init__.py | 10 +++++- io_scene_3ds/export_3ds.py | 9 +++-- io_scene_3ds/import_3ds.py | 67 ++++++++++++++++++++++---------------- 3 files changed, 55 insertions(+), 31 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index bbf2f1fe2..a9f7f56b8 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -57,7 +57,12 @@ class Import3DS(bpy.types.Operator, ImportHelper): ) use_scene_unit: BoolProperty( name="Scene Units", - description="Converts to scene unit length settings", + description="Convert to scene unit length settings", + default=False, + ) + use_center_pivot: BoolProperty( + name="Pivot Origin", + description="Move all geometry to pivot origin", default=False, ) use_image_search: BoolProperty( @@ -177,6 +182,9 @@ class MAX3DS_PT_import_transform(bpy.types.Panel): layrow.prop(operator, "use_scene_unit") layrow.label(text="", icon='EMPTY_ARROWS' if operator.use_scene_unit else 'EMPTY_DATA') layrow = layout.row(align=True) + layrow.prop(operator, "use_center_pivot") + layrow.label(text="", icon='OVERLAY' if operator.use_center_pivot else 'PIVOT_ACTIVE') + layrow = layout.row(align=True) layrow.prop(operator, "use_apply_transform") layrow.label(text="", icon='MESH_CUBE' if operator.use_apply_transform else 'MOD_SOLIDIFY') layrow = layout.row(align=True) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index fe090399e..503b20ddf 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1858,12 +1858,13 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, light_inner_range = _3ds_chunk(LIGHT_INNER_RANGE) light_outer_range = _3ds_chunk(LIGHT_OUTER_RANGE) light_energy_factor = _3ds_chunk(LIGHT_MULTIPLIER) + light_ratio = ob.data.energy if ob.data.type == 'SUN' else ob.data.energy * 0.001 object_chunk.add_variable("light", _3ds_string(sane_name(ob.name))) obj_light_chunk.add_variable("location", _3ds_point_3d(light_distance)) color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color)) light_outer_range.add_variable("distance", _3ds_float(ob.data.cutoff_distance)) - light_inner_range.add_variable("radius", _3ds_float(ob.data.shadow_soft_size)) - light_energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001)) + light_inner_range.add_variable("radius", _3ds_float(ob.data.shadow_soft_size * 100)) + light_energy_factor.add_variable("energy", _3ds_float(light_ratio)) obj_light_chunk.add_subchunk(color_float_chunk) obj_light_chunk.add_subchunk(light_outer_range) obj_light_chunk.add_subchunk(light_inner_range) @@ -1943,6 +1944,7 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, for ob in camera_objects: object_chunk = _3ds_chunk(OBJECT) camera_chunk = _3ds_chunk(OBJECT_CAMERA) + crange_chunk = _3ds_chunk(OBJECT_CAM_RANGES) camera_distance = translation[ob.name] camera_target = calc_target(camera_distance, rotation[ob.name].x, rotation[ob.name].z) object_chunk.add_variable("camera", _3ds_string(sane_name(ob.name))) @@ -1950,6 +1952,9 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, camera_chunk.add_variable("target", _3ds_point_3d(camera_target)) camera_chunk.add_variable("roll", _3ds_float(round(rotation[ob.name].y, 6))) camera_chunk.add_variable("lens", _3ds_float(ob.data.lens)) + crange_chunk.add_variable("clipstart", _3ds_float(ob.data.clip_start * 0.1)) + crange_chunk.add_variable("clipend", _3ds_float(ob.data.clip_end * 0.1)) + camera_chunk.add_subchunk(crange_chunk) object_chunk.add_subchunk(camera_chunk) # Add hierachy chunks with ID from object_id dictionary diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 16fe88548..4e21198a1 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -890,24 +890,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif CreateWorld and new_chunk.ID in {USE_FOG, USE_LAYER_FOG}: context.view_layer.use_pass_mist = True - # If object chunk - can be material and mesh, light and spot or camera - elif new_chunk.ID == OBJECT: - if CreateBlenderObject: - putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMesh_flag, - contextMeshMaterials, contextMesh_smooth, WORLD_MATRIX) - - contextMesh_vertls = [] - contextMesh_facels = [] - contextMeshMaterials = [] - contextMesh_flag = None - contextMesh_smooth = None - contextMeshUV = None - contextMatrix = None - - CreateBlenderObject = True if CreateMesh else False - contextObName, read_str_len = read_string(file) - new_chunk.bytes_read += read_str_len - # If material chunk elif new_chunk.ID == MATERIAL: contextAlpha = True @@ -1058,6 +1040,25 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif new_chunk.ID == MAT_TEX2_MAP: read_texture(new_chunk, temp_chunk, "Tex", 'TEXTURE') + # If object chunk - can be mesh, light and spot or camera + elif new_chunk.ID == OBJECT: + if CreateBlenderObject: + putContextMesh(context, contextMesh_vertls, contextMesh_facels, contextMesh_flag, + contextMeshMaterials, contextMesh_smooth, WORLD_MATRIX) + + contextMesh_vertls = [] + contextMesh_facels = [] + contextMeshMaterials = [] + contextMesh_flag = None + contextMesh_smooth = None + contextMeshUV = None + contextMatrix = None + + CreateBlenderObject = True if CreateMesh else False + CreateLightObject = CreateCameraObject = False + contextObName, read_str_len = read_string(file) + new_chunk.bytes_read += read_str_len + # If mesh chunk elif new_chunk.ID == OBJECT_MESH: pass @@ -1132,7 +1133,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif CreateLightObject and new_chunk.ID == LIGHT_OUTER_RANGE: # Distance contextLamp.data.cutoff_distance = read_float(new_chunk) elif CreateLightObject and new_chunk.ID == LIGHT_INNER_RANGE: # Radius - contextLamp.data.shadow_soft_size = read_float(new_chunk) + contextLamp.data.shadow_soft_size = (read_float(new_chunk) * 0.01) elif CreateLightObject and new_chunk.ID == LIGHT_MULTIPLIER: # Intensity contextLamp.data.energy = (read_float(new_chunk) * 1000) elif CreateLightObject and new_chunk.ID == LIGHT_ATTENUATE: # Attenuation @@ -1207,6 +1208,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextCamera.rotation_euler.z = direction[1] contextCamera.data.lens = read_float(new_chunk) # Focal length contextMatrix = None # Reset matrix + elif CreateCameraObject and new_chunk.ID == OBJECT_CAM_RANGES: # Range + camrange = read_float(new_chunk) + startrange = camrange if camrange >= 0.01 else 0.1 + contextCamera.data.clip_start = startrange * CONSTRAIN + contextCamera.data.clip_end = read_float(new_chunk) * CONSTRAIN elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY: # Hierarchy child_id = get_hierarchy(new_chunk) elif CreateCameraObject and new_chunk.ID == OBJECT_PARENT: @@ -1560,8 +1566,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI # IMPORT # ########## -def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, FILTER=None, - WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None, CURSOR=False): +def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, + FILTER=None, WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, + CONVERSE=None, CURSOR=False, PIVOT=False): print("importing 3DS: %r..." % (filepath), end="") @@ -1643,9 +1650,13 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, ob.scale.y = (square / (math.sqrt(pow(aspect,2) + 1.0))) ob.scale.z = 1.0 ob.select_set(True) - if not APPLY_MATRIX: # Reset transform - bpy.ops.object.rotation_clear() - bpy.ops.object.location_clear() + if ob.type == 'MESH': + if PIVOT: + bpy.ops.object.origin_set(type='GEOMETRY_ORIGIN') + if not APPLY_MATRIX: # Reset transform + bpy.ops.object.rotation_clear() + bpy.ops.object.location_clear() + bpy.ops.object.scale_clear() """ if IMPORT_AS_INSTANCE: @@ -1712,11 +1723,11 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, UNITS=False, IMAGE_SEARCH=True, def load(operator, context, filepath="", constrain_size=0.0, use_scene_unit=False, - use_image_search=True, object_filter=None, use_world_matrix=False, - use_keyframes=True, use_apply_transform=True, global_matrix=None, use_cursor=False): + use_image_search=True, object_filter=None, use_world_matrix=False, use_keyframes=True, + use_apply_transform=True, global_matrix=None, use_cursor=False, use_center_pivot=False): load_3ds(filepath, context, CONSTRAIN=constrain_size, UNITS=use_scene_unit, - IMAGE_SEARCH=use_image_search, FILTER=object_filter, WORLD_MATRIX=use_world_matrix, - KEYFRAME=use_keyframes, APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix, CURSOR=use_cursor,) + IMAGE_SEARCH=use_image_search, FILTER=object_filter, WORLD_MATRIX=use_world_matrix, KEYFRAME=use_keyframes, + APPLY_MATRIX=use_apply_transform, CONVERSE=global_matrix, CURSOR=use_cursor, PIVOT=use_center_pivot,) return {'FINISHED'} -- 2.30.2 From 50c8062fca8a7e8ae40b5fa316069e30f0940618 Mon Sep 17 00:00:00 2001 From: NRGSille Date: Tue, 26 Sep 2023 19:33:27 +0200 Subject: [PATCH 41/78] io_scene_3ds: Update for Principled BSDF specularity --- io_scene_3ds/__init__.py | 6 +++--- io_scene_3ds/export_3ds.py | 12 ++++++------ io_scene_3ds/import_3ds.py | 2 ++ 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index a9f7f56b8..3f4e65421 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -17,9 +17,9 @@ from bpy.props import ( import bpy bl_info = { "name": "Autodesk 3DS format", - "author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand", - "version": (2, 4, 6), - "blender": (3, 6, 0), + "author": "Bob Holcomb, Campbell Barton, Sebastian Schrand", + "version": (2, 4, 7), + "blender": (4, 0, 0), "location": "File > Import-Export", "description": "3DS Import/Export meshes, UVs, materials, textures, " "cameras, lamps & animation", diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 9d77718ea..bc543216c 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -698,7 +698,7 @@ def make_material_chunk(material, image): shading.add_variable("shading", _3ds_ushort(3)) # Phong shading material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, wrap.emission_color[:3])) material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, wrap.base_color[:3])) - material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color[:])) + material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, wrap.specular_tint[:3])) material_chunk.add_subchunk(make_percent_subchunk(MATSHINESS, 1 - wrap.roughness)) material_chunk.add_subchunk(make_percent_subchunk(MATSHIN2, wrap.specular)) material_chunk.add_subchunk(make_percent_subchunk(MATSHIN3, wrap.metallic)) @@ -1211,10 +1211,10 @@ def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): track_chunk.add_variable("fov", _3ds_float(round(math.degrees(fov), 4))) elif ID == HOTSPOT_TRACK_TAG: # Hotspot - beam_angle = math.degrees(ob.data.spot_size) for i, frame in enumerate(kframes): + beamsize = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_size'), ob.data.spot_size) blend = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_blend'), ob.data.spot_blend) - hot_spot = beam_angle - (blend * math.floor(beam_angle)) + hot_spot = math.degrees(beamsize) - (blend * math.floor(math.degrees(beamsize))) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) track_chunk.add_variable("hotspot", _3ds_float(round(hot_spot, 4))) @@ -1647,14 +1647,14 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT' bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color) bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype) - bg_image = next((lk.from_node.image.name for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False) + bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False) gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False) background_color_chunk.add_variable("color", _3ds_float_color(bg_color)) background_chunk.add_subchunk(background_color_chunk) if bg_image: background_image = _3ds_chunk(BITMAP) background_flag = _3ds_chunk(USE_BITMAP) - background_image.add_variable("image", _3ds_string(sane_name(bg_image))) + background_image.add_variable("image", _3ds_string(sane_name(bg_image.name))) object_info.add_subchunk(background_image) object_info.add_subchunk(background_chunk) @@ -1710,7 +1710,7 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, object_info.add_subchunk(layerfog_chunk) if fognode or foglayer and layer.use_pass_mist: object_info.add_subchunk(use_fog_flag) - if use_keyframes and world.animation_data: + if use_keyframes and world.animation_data or world.node_tree.animation_data: kfdata.add_subchunk(make_ambient_node(world)) # Make a list of all materials used in the selected meshes (use dictionary, each material is added once) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 5dc8ca5aa..16964fb00 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -527,6 +527,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextWrapper.metallic = contextMaterial.metallic contextWrapper.roughness = contextMaterial.roughness contextWrapper.specular = contextMaterial.specular_intensity + contextWrapper.specular_tint = contextMaterial.specular_color[:] contextWrapper.emission_color = contextMaterial.line_color[:3] contextWrapper.emission_strength = contextMaterial.line_priority / 100 contextWrapper.alpha = contextMaterial.diffuse_color[3] = contextAlpha @@ -1000,6 +1001,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextWrapper.metallic = contextMaterial.metallic contextWrapper.roughness = contextMaterial.roughness contextWrapper.specular = contextMaterial.specular_intensity + contextWrapper.specular_tint = contextMaterial.specular_color[:] contextWrapper.emission_color = contextMaterial.line_color[:3] contextWrapper.emission_strength = contextMaterial.line_priority / 100 contextWrapper.alpha = contextMaterial.diffuse_color[3] = contextAlpha -- 2.30.2 From 242092e13795de7c70fbc32ebe50e1543c9c295a Mon Sep 17 00:00:00 2001 From: NRGSille Date: Wed, 27 Sep 2023 00:38:44 +0200 Subject: [PATCH 42/78] io_scene_3ds: Move specular color texture to specular tint --- io_scene_3ds/__init__.py | 2 +- io_scene_3ds/export_3ds.py | 10 +++++----- io_scene_3ds/import_3ds.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 3f4e65421..c9d20b13f 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -18,7 +18,7 @@ import bpy bl_info = { "name": "Autodesk 3DS format", "author": "Bob Holcomb, Campbell Barton, Sebastian Schrand", - "version": (2, 4, 7), + "version": (2, 4, 8), "blender": (4, 0, 0), "location": "File > Import-Export", "description": "3DS Import/Export meshes, UVs, materials, textures, " diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index bc543216c..a19d57b30 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -620,7 +620,7 @@ def make_material_texture_chunk(chunk_id, texslots, pct): if socket == 'Alpha': mapflags |= 0x40 - if texslot.socket_dst.identifier in {'Base Color', 'Specular IOR Level'}: + if texslot.socket_dst.identifier in {'Base Color', 'Specular Tint'}: mapflags |= 0x80 if image.colorspace_settings.name == 'Non-Color' else 0x200 mat_sub_mapflags.add_variable("mapflags", _3ds_ushort(mapflags)) @@ -650,11 +650,11 @@ def make_material_texture_chunk(chunk_id, texslots, pct): mat_sub_angle.add_variable("mapangle", _3ds_float(round(texslot.rotation[2], 6))) mat_sub.add_subchunk(mat_sub_angle) - if texslot.socket_dst.identifier in {'Base Color', 'Specular IOR Level'}: + if texslot.socket_dst.identifier in {'Base Color', 'Specular Tint'}: rgb = _3ds_chunk(MAP_COL1) # Add tint color base = texslot.owner_shader.material.diffuse_color[:3] spec = texslot.owner_shader.material.specular_color[:] - rgb.add_variable("mapcolor", _3ds_rgb_color(spec if texslot.socket_dst.identifier == 'Specular IOR Level' else base)) + rgb.add_variable("mapcolor", _3ds_rgb_color(spec if texslot.socket_dst.identifier == 'Specular Tint' else base)) mat_sub.add_subchunk(rgb) # Store all textures for this mapto in order. This at least is what the @@ -724,8 +724,8 @@ def make_material_chunk(material, image): material_chunk.add_subchunk(make_texture_chunk(MAT_DIFFUSEMAP, mxtex, mxpct)) primary_tex = True - if wrap.specular_texture: - spec = [wrap.specular_texture] + if wrap.specular_tint_texture: + spec = [wrap.specular_tint_texture] s_pct = material.specular_intensity matmap = make_material_texture_chunk(MAT_SPECMAP, spec, s_pct) if matmap: diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 16964fb00..5ad730567 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -264,7 +264,7 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2]) links.new(mixer.outputs['Color'], shader.inputs['Base Color']) elif mapto == 'SPECULARITY': - img_wrap = contextWrapper.specular_texture + img_wrap = contextWrapper.specular_tint_texture elif mapto == 'ALPHA': shader.location = (0, -300) img_wrap = contextWrapper.alpha_texture -- 2.30.2 From 0f795e2dabd579ff4170f00a7e8f6559e80ff1b9 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sun, 19 Nov 2023 16:21:56 +0100 Subject: [PATCH 43/78] New Addon: Import Autodesk .max Created a new addon for importing meshes and materials from Autodesk .max files --- io_import_max.py | 1518 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1518 insertions(+) create mode 100644 io_import_max.py diff --git a/io_import_max.py b/io_import_max.py new file mode 100644 index 000000000..3952e874e --- /dev/null +++ b/io_import_max.py @@ -0,0 +1,1518 @@ +# SPDX-FileCopyrightText: 2023 Sebastian Schrand +# +# SPDX-License-Identifier: GPL-2.0-or-later + + +#--- LICENSE --- +# GNU GPL +# Import is based on using information from olefile IO sourcecode +# and the FreeCAD Autodesk 3DS Max importer ImportMAX +# +# olefile (formerly OleFileIO_PL) is copyright (c) 2005-2018 Philippe Lagadec +# (https://www.decalage.info) +# +# ImportMAX is copyright (c) 2017-2022 Jens M. Plonka +# (https://www.github.com/jmplonka/Importer3D) + + +bl_info = { + "name": "Import Autodesk MAX (.max)", + "author": "Sebastian Sille, Philippe Lagadec, Jens M. Plonka", + "version": (1, 0, 0), + "blender": (4, 0, 0), + "location": "File > Import", + "description": "Import 3DSMAX meshes & materials", + "warning": "", + "filepath_url": "", + "category": "Import-Export"} + + +################## +# IMPORT MODULES # +################## + +import io, re +import os, sys, zlib +import struct, array +import time, datetime +import math, mathutils +import bpy, bpy_extras +from bpy_extras import node_shader_utils +from bpy_extras.image_utils import load_image +from bpy_extras.io_utils import axis_conversion +from bpy_extras.io_utils import orientation_helper + +@orientation_helper(axis_forward='Y', axis_up='Z') + +### IMPORT OPERATOR ### +class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): + """Import Autodesk MAX""" + bl_idname = "import_autodesk.max" + bl_label = "Import Autodesk MAX (.max)" + + filename_ext = ".max" + filter_glob: bpy.props.StringProperty(default="*.max", options={'HIDDEN'},) + + def execute(self, context): + keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob")) + global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up,).to_4x4() + keywords["global_matrix"] = global_matrix + + return load(self, context, **keywords) + +### REGISTER ### +def menu_func(self, context): + self.layout.operator(Import_max.bl_idname, text="Autodesk MAX (.max)") + +def register(): + bpy.utils.register_class(Import_max) + bpy.types.TOPBAR_MT_file_import.append(menu_func) + +def unregister(): + bpy.types.TOPBAR_MT_file_import.remove(menu_func) + bpy.utils.unregister_class(Import_max) + + +################### +# DATA STRUCTURES # +################### + +MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' +WORD_CLSID = "00020900-0000-0000-C000-000000000046" + +MAXREGSECT = 0xFFFFFFFA # (-6) maximum SECT +DIFSECT = 0xFFFFFFFC # (-4) denotes a DIFAT sector in a FAT +FATSECT = 0xFFFFFFFD # (-3) denotes a FAT sector in a FAT +ENDOFCHAIN = 0xFFFFFFFE # (-2) end of a virtual stream chain +FREESECT = 0xFFFFFFFF # (-1) unallocated sector +MAXREGSID = 0xFFFFFFFA # (-6) maximum directory entry ID +NOSTREAM = 0xFFFFFFFF # (-1) unallocated directory entry +UNKNOWN_SIZE = 0x7FFFFFFF +MIN_FILE_SIZE = 1536 + +STGTY_EMPTY = 0 #: empty directory entry +STGTY_STORAGE = 1 #: element is a storage object +STGTY_STREAM = 2 #: element is a stream object +STGTY_LOCKBYTES = 3 #: element is an ILockBytes object +STGTY_PROPERTY = 4 #: element is an IPropertyStorage object +STGTY_ROOT = 5 #: element is a root storage + +VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6; +VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11; +VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17; +VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23; +VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28; +VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64; +VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68; +VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72; +VT_VECTOR=0x1000; + +TYP_NAME = 0x0962 +INVALID_NAME = re.compile('^[0-9].*') +UNPACK_BOX_DATA = struct.Struct('= MIN_FILE_SIZE: + header = filename[:len(MAGIC)] + else: + with open(filename, 'rb') as fp: + header = fp.read(len(MAGIC)) + if header == MAGIC: + return True + else: + return False + + +class MaxStream(io.BytesIO): + + def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize): + unknown_size = False + if size == UNKNOWN_SIZE: + size = len(fat)*sectorsize + unknown_size = True + nb_sectors = (size + (sectorsize-1)) // sectorsize + + data = [] + for i in range(nb_sectors): + try: + fp.seek(offset + sectorsize * sect) + except: + break + sector_data = fp.read(sectorsize) + data.append(sector_data) + try: + sect = fat[sect] & 0xFFFFFFFF + except IndexError: + break + data = b"".join(data) + if len(data) >= size: + data = data[:size] + self.size = size + else: + self.size = len(data) + io.BytesIO.__init__(self, data) + + +class MaxFileDirEntry: + STRUCT_DIRENTRY = '<64sHBBIII16sIQQIII' + DIRENTRY_SIZE = 128 + assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE + + def __init__(self, entry, sid, maxfile): + self.sid = sid + self.maxfile = maxfile + self.kids = [] + self.kids_dict = {} + self.used = False + ( + self.name_raw, + self.namelength, + self.entry_type, + self.color, + self.sid_left, + self.sid_right, + self.sid_child, + clsid, + self.dwUserFlags, + self.createTime, + self.modifyTime, + self.isectStart, + self.sizeLow, + self.sizeHigh + ) = struct.unpack(MaxFileDirEntry.STRUCT_DIRENTRY, entry) + + if self.namelength > 64: + self.namelength = 64 + self.name_utf16 = self.name_raw[:(self.namelength - 2)] + self.name = maxfile._decode_utf16_str(self.name_utf16) + # print('DirEntry SID=%d: %s' % (self.sid, repr(self.name))) + if maxfile.sectorsize == 512: + self.size = self.sizeLow + else: + self.size = self.sizeLow + (int(self.sizeHigh) << 32) + self.clsid = _clsid(clsid) + self.is_minifat = False + if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size > 0: + if self.size < maxfile.minisectorcutoff \ + and self.entry_type == STGTY_STREAM: # only streams can be in MiniFAT + self.is_minifat = True + else: + self.is_minifat = False + maxfile._check_duplicate_stream(self.isectStart, self.is_minifat) + self.sect_chain = None + + def build_sect_chain(self, maxfile): + if self.sect_chain: + return + if self.entry_type not in (STGTY_ROOT, STGTY_STREAM) or self.size == 0: + return + self.sect_chain = list() + if self.is_minifat and not maxfile.minifat: + maxfile.loadminifat() + next_sect = self.isectStart + while next_sect != ENDOFCHAIN: + self.sect_chain.append(next_sect) + if self.is_minifat: + next_sect = maxfile.minifat[next_sect] + else: + next_sect = maxfile.fat[next_sect] + + def build_storage_tree(self): + if self.sid_child != NOSTREAM: + self.append_kids(self.sid_child) + self.kids.sort() + + def append_kids(self, child_sid): + if child_sid == NOSTREAM: + return + else: + child = self.maxfile._load_direntry(child_sid) + if child.used: + return + child.used = True + self.append_kids(child.sid_left) + name_lower = child.name.lower() + self.kids.append(child) + self.kids_dict[name_lower] = child + self.append_kids(child.sid_right) + child.build_storage_tree() + + def __eq__(self, other): + return self.name == other.name + + def __lt__(self, other): + return self.name < other.name + + def __ne__(self, other): + return not self.__eq__(other) + + def __le__(self, other): + return self.__eq__(other) or self.__lt__(other) + + +class ImportMaxFile: + + def __init__(self, filename=None, write_mode=False, debug=False): + self.write_mode = write_mode + self._filesize = None + self.byte_order = None + self.directory_fp = None + self.direntries = None + self.dll_version = None + self.fat = None + self.first_difat_sector = None + self.first_dir_sector = None + self.first_mini_fat_sector = None + self.fp = None + self.header_clsid = None + self.header_signature = None + self.metadata = None + self.mini_sector_shift = None + self.mini_sector_size = None + self.mini_stream_cutoff_size = None + self.minifat = None + self.minifatsect = None + self.minisectorcutoff = None + self.minisectorsize = None + self.ministream = None + self.minor_version = None + self.nb_sect = None + self.num_difat_sectors = None + self.num_dir_sectors = None + self.num_fat_sectors = None + self.num_mini_fat_sectors = None + self.reserved1 = None + self.reserved2 = None + self.root = None + self.sector_shift = None + self.sector_size = None + self.transaction_signature_number = None + if filename: + self.open(filename, write_mode=write_mode) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def _decode_utf16_str(self, utf16_str, errors='replace'): + unicode_str = utf16_str.decode('UTF-16LE', errors) + return unicode_str + + def open(self, filename, write_mode=False): + self.write_mode = write_mode + if hasattr(filename, 'read'): + self.fp = filename + elif isinstance(filename, bytes) and len(filename) >= MIN_FILE_SIZE: + self.fp = io.BytesIO(filename) + else: + if self.write_mode: + mode = 'r+b' + else: + mode = 'rb' + self.fp = open(filename, mode) + filesize=0 + self.fp.seek(0, os.SEEK_END) + try: + filesize = self.fp.tell() + finally: + self.fp.seek(0) + self._filesize = filesize + self._used_streams_fat = [] + self._used_streams_minifat = [] + header = self.fp.read(512) + + fmt_header = '<8s16sHHHHHHLLLLLLLLLL' + header_size = struct.calcsize(fmt_header) + header1 = header[:header_size] + ( + self.header_signature, + self.header_clsid, + self.minor_version, + self.dll_version, + self.byte_order, + self.sector_shift, + self.mini_sector_shift, + self.reserved1, + self.reserved2, + self.num_dir_sectors, + self.num_fat_sectors, + self.first_dir_sector, + self.transaction_signature_number, + self.mini_stream_cutoff_size, + self.first_mini_fat_sector, + self.num_mini_fat_sectors, + self.first_difat_sector, + self.num_difat_sectors + ) = struct.unpack(fmt_header, header1) + + self.sector_size = 2**self.sector_shift + self.mini_sector_size = 2**self.mini_sector_shift + if self.mini_stream_cutoff_size != 0x1000: + self.mini_stream_cutoff_size = 0x1000 + self.nb_sect = ((filesize + self.sector_size-1) // self.sector_size) - 1 + + # file clsid + self.header_clsid = _clsid(header[8:24]) + self.sectorsize = self.sector_size #1 << i16(header, 30) + self.minisectorsize = self.mini_sector_size #1 << i16(header, 32) + self.minisectorcutoff = self.mini_stream_cutoff_size # i32(header, 56) + self._check_duplicate_stream(self.first_dir_sector) + if self.num_mini_fat_sectors: + self._check_duplicate_stream(self.first_mini_fat_sector) + if self.num_difat_sectors: + self._check_duplicate_stream(self.first_difat_sector) + + # Load file allocation tables + self.loadfat(header) + self.loaddirectory(self.first_dir_sector) + self.minifatsect = self.first_mini_fat_sector + + def close(self): + self.fp.close() + + def _check_duplicate_stream(self, first_sect, minifat=False): + if minifat: + used_streams = self._used_streams_minifat + else: + if first_sect in (DIFSECT,FATSECT,ENDOFCHAIN,FREESECT): + return + used_streams = self._used_streams_fat + if first_sect in used_streams: + pass + else: + used_streams.append(first_sect) + + def sector_array(self, sect): + ary = array.array('I', sect) + if sys.byteorder == 'big': + ary.byteswap() + return ary + + def loadfat_sect(self, sect): + if isinstance(sect, array.array): + fat1 = sect + else: + fat1 = self.sector_array(sect) + isect = None + for isect in fat1: + isect = isect & 0xFFFFFFFF + if isect == ENDOFCHAIN or isect == FREESECT: + break + sector = self.getsect(isect) + nextfat = self.sector_array(sector) + self.fat = self.fat + nextfat + return isect + + def loadfat(self, header): + sect = header[76:512] + self.fat = array.array('I') + self.loadfat_sect(sect) + if self.num_difat_sectors != 0: + nb_difat_sectors = (self.sectorsize//4) - 1 + nb_difat = (self.num_fat_sectors - 109 + nb_difat_sectors - 1) // nb_difat_sectors + isect_difat = self.first_difat_sector + for i in range(nb_difat): + sector_difat = self.getsect(isect_difat) + difat = self.sector_array(sector_difat) + self.loadfat_sect(difat[:nb_difat_sectors]) + isect_difat = difat[nb_difat_sectors] + if len(self.fat) > self.nb_sect: + self.fat = self.fat[:self.nb_sect] + + def loadminifat(self): + stream_size = self.num_mini_fat_sectors * self.sector_size + nb_minisectors = (self.root.size + self.mini_sector_size - 1) // self.mini_sector_size + used_size = nb_minisectors * 4 + sect = self._open(self.minifatsect, stream_size, force_FAT=True).read() + self.minifat = self.sector_array(sect) + self.minifat = self.minifat[:nb_minisectors] + + def getsect(self, sect): + try: + self.fp.seek(self.sectorsize * (sect + 1)) + except: + print('MAX sector index out of range') + sector = self.fp.read(self.sectorsize) + return sector + + def loaddirectory(self, sect): + self.directory_fp = self._open(sect, force_FAT=True) + max_entries = self.directory_fp.size // 128 + self.direntries = [None] * max_entries + root_entry = self._load_direntry(0) + self.root = self.direntries[0] + self.root.build_storage_tree() + + def _load_direntry (self, sid): + if self.direntries[sid] is not None: + return self.direntries[sid] + self.directory_fp.seek(sid * 128) + entry = self.directory_fp.read(128) + self.direntries[sid] = MaxFileDirEntry(entry, sid, self) + return self.direntries[sid] + + def _open(self, start, size = UNKNOWN_SIZE, force_FAT=False): + if size < self.minisectorcutoff and not force_FAT: + if not self.ministream: + self.loadminifat() + size_ministream = self.root.size + self.ministream = self._open(self.root.isectStart, + size_ministream, force_FAT=True) + return MaxStream(fp=self.ministream, sect=start, size=size, + offset=0, sectorsize=self.minisectorsize, + fat=self.minifat, filesize=self.ministream.size) + else: + return MaxStream(fp=self.fp, sect=start, size=size, + offset=self.sectorsize, + sectorsize=self.sectorsize, fat=self.fat, + filesize=self._filesize) + + def _list(self, files, prefix, node, streams=True, storages=False): + prefix = prefix + [node.name] + for entry in node.kids: + if entry.entry_type == STGTY_STORAGE: + if storages: + files.append(prefix[1:] + [entry.name]) + self._list(files, prefix, entry, streams, storages) + elif entry.entry_type == STGTY_STREAM: + if streams: + files.append(prefix[1:] + [entry.name]) + + def listdir(self, streams=True, storages=False): + files = [] + self._list(files, [], self.root, streams, storages) + return files + + def _find(self, filename): + if isinstance(filename, str): + filename = filename.split('/') + node = self.root + for name in filename: + for kid in node.kids: + if kid.name.lower() == name.lower(): + break + node = kid + return node.sid + + def openstream(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return self._open(entry.isectStart, entry.size) + + def get_type(self, filename): + try: + sid = self._find(filename) + entry = self.direntries[sid] + return entry.entry_type + except: + return False + + def getclsid(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return entry.clsid + + def get_size(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return entry.size + + def get_rootentry_name(self): + return self.root.name + + def getproperties(self, filename, convert_time=False, no_conversion=None): + if no_conversion == None: + no_conversion = [] + streampath = filename + if not isinstance(streampath, str): + streampath = '/'.join(streampath) + fp = self.openstream(filename) + data = {} + try: + stream = fp.read(28) + clsid = _clsid(stream[8:24]) + stream = fp.read(20) + fmtid = _clsid(stream[:16]) + fp.seek(i32(stream, 16)) + stream = b"****" + fp.read(i32(fp.read(4)) - 4) + num_props = i32(stream, 4) + except BaseException as exc: + return data + + num_props = min(num_props, int(len(stream) / 8)) + for i in range(num_props): + property_id = 0 + try: + property_id = i32(stream, 8 + i*8) + offset = i32(stream, 12 + i*8) + property_type = i32(stream, offset) + if property_type == VT_I2: # 16-bit signed integer + value = i16(stream, offset + 4) + if value >= 32768: + value = value - 65536 + elif property_type == VT_UI2: # 2-byte unsigned integer + value = i16(stream, offset + 4) + elif property_type in (VT_I4, VT_INT, VT_ERROR): + value = i32(stream, offset + 4) + elif property_type in (VT_UI4, VT_UINT): # 4-byte unsigned integer + value = i32(stream, offset + 4) + elif property_type in (VT_BSTR, VT_LPSTR): + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count - 1] + value = value.replace(b'\x00', b'') + elif property_type == VT_BLOB: + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count] + elif property_type == VT_LPWSTR: + count = i32(stream, offset + 4) + value = self._decode_utf16_str(stream[offset + 8:offset + 8 + count*2]) + elif property_type == VT_FILETIME: + value = int(i32(stream, offset + 4)) + (int(i32(stream, offset + 8)) << 32) + if convert_time and property_id not in no_conversion: + _FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0) + value = _FILETIME_null_date + datetime.timedelta(microseconds=value // 10) + else: + value = value // 10000000 + elif property_type == VT_UI1: # 1-byte unsigned integer + value = i8(stream[offset + 4]) + elif property_type == VT_CLSID: + value = _clsid(stream[offset + 4:offset + 20]) + elif property_type == VT_CF: + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count] + elif property_type == VT_BOOL: + value = bool(i16(stream, offset + 4)) + else: + value = None + + data[property_id] = value + except BaseException as exc: + print('Error while parsing property_id:', exc) + return data + + +class MaxChunk(): + + def __init__(self, types, size, level, number): + self.number = number + self.types = types + self.level = level + self.parent = None + self.previous = None + self.next = None + self.size = size + self.unknown = True + self.format = None + self.data = None + self.resolved = False + + def __str__(self): + if (self.unknown == True): + return "%s[%4x] %04X: %s" %("" * self.level, self.number, self.types, ":".join("%02x"%(c) for c in self.data)) + return "%s[%4x] %04X: %s=%s" %("" * self.level, self.number, self.types, self.format, self.data) + + +class ByteArrayChunk(MaxChunk): + + def __init__(self, types, data, level, number): + MaxChunk.__init__(self, types, data, level, number) + + def set(self, data, name, fmt, start, end): + try: + self.data = struct.unpack(fmt, data[start:end]) + self.format = name + self.unknown = False + except Exception as exc: + self.data = data + # print('StructError:', exc, name) + + def set_string(self, data): + try: + self.data = data.decode('UTF-16LE') + self.format = "Str16" + self.unknown = False + except: + self.data = data + + def set_le16_string(self, data): + try: + long, offset = get_long(data, 0) + self.data = data[offset:offset + l * 2].decode('utf-16-le') + if (self.data[-1] == b'\0'): + self.data = self.data[0:-1] + self.format = "LStr16" + self.unknown = False + except: + self.data = data + + def set_data(self, data): + if (self.types in [0x0340, 0x4001, 0x0456, 0x0962]): + self.set_string(data) + elif (self.types in [0x2034, 0x2035]): + self.set(data, "ints", '<'+'I'*int(len(data) / 4), 0, len(data)) + elif (self.types in [0x2501, 0x2503, 0x2504, 0x2505, 0x2511]): + self.set(data, "floats", '<'+'f'*int(len(data) / 4), 0, len(data)) + elif (self.types == 0x2510): + self.set(data, "struct", '<'+'f'*int(len(data) / 4 - 1) + 'I', 0, len(data)) + elif (self.types == 0x0100): + self.set(data, "float", ' 3): + return get_rotation(refs[0]) + elif (uid == 0x3A90416731381913): # Rotation Wire + return get_rotation(get_references(pos)[0]) + if (rotation): + mtx = mathutils.Matrix.Rotation(rotation.angle, 4, rotation.axis) + return mtx + + +def get_scale(pos): + mtx = mathutils.Matrix.Identity(4) + if (pos): + uid = get_guid(pos) + if (uid == 0x2010): # Bezier Scale + scale = pos.get_first(0x2501) + if (scale is None): + scale = pos.get_first(0x2505) + pos = scale.data + elif (uid == 0x0000000000442315): # TCB Zoom + scale = pos.get_first(0x2501) + if (scale is None): + scale = pos.get_first(0x2505) + pos = scale.data + elif (uid == 0xFEEE238B118F7C01): # ScaleXYZ + pos = get_point_3d(pos, 1.0) + else: + return mtx + mtx = mathutils.Matrix.Diagonal(pos[:3]).to_4x4() + return mtx + + +def create_matrix(prc): + mtx = mathutils.Matrix.Identity(4) + pos = rot = scl = None + uid = get_guid(prc) + if (uid == 0x2005): # Position/Rotation/Scale + pos = get_position(get_references(prc)[0]) + rot = get_rotation(get_references(prc)[1]) + scl = get_scale(get_references(prc)[2]) + elif (uid == 0x9154): # BipSlave Control + biped_sub_anim = get_references(prc)[2] + refs = get_references(biped_sub_anim) + scl = get_scale(get_references(refs[1])[0]) + rot = get_rotation(get_references(refs[2])[0]) + pos = get_position(get_references(refs[3])[0]) + if (pos is not None): + mtx = pos @ mtx + if (rot is not None): + mtx = rot @ mtx + if (scl is not None): + mtx = scl @ mtx + return mtx + + +def get_property(properties, idx): + for child in properties.children: + if (child.types & 0x100E): + if (get_short(child.data, 0)[0] == idx): + return child + return None + + +def get_color(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + siz = 15 if (len(prop.data) > 23) else 11 + col, offset = get_floats(prop.data, siz, 3) + return (col[0], col[1], col[2]) + return None + + +def get_float(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + fl, offset = get_float(prop.data, 15) + return fl + return None + + +def get_standard_material(refs): + material = None + try: + if (len(refs) > 2): + colors = refs[2] + parameters = get_references(colors)[0] + material = Material() + material.set('ambient', get_color(parameters, 0x00)) + material.set('diffuse', get_color(parameters, 0x01)) + material.set('specular', get_color(parameters, 0x02)) + material.set('emissive', get_color(parameters, 0x08)) + material.set('shinines', get_float(parameters, 0x0A)) + transparency = refs[4] # ParameterBlock2 + material.set('transparency', get_float(transparency, 0x02)) + except: + pass + return material + + +def get_vray_material(vry): + material = Material() + try: + material.set('diffuse', get_color(vry, 0x01)) + material.set('ambient', get_color(vry, 0x02)) + material.set('specular', get_color(vry, 0x05)) + material.set('emissive', get_color(vry, 0x05)) + material.set('shinines', get_float(vry, 0x0B)) + material.set('transparency', get_float(vry, 0x02)) + except: + pass + return material + + +def get_arch_material(ad): + material = Material() + try: + material.set('diffuse', get_color(ad, 0x1A)) + material.set('ambient', get_color(ad, 0x02)) + material.set('specular', get_color(ad, 0x05)) + material.set('emissive', get_color(ad, 0x05)) + material.set('shinines', get_float(ad, 0x0B)) + material.set('transparency', get_float(ad, 0x02)) + except: + pass + return material + + +def adjust_material(obj, mat): + material = None + if (mat is not None): + uid = get_guid(mat) + if (uid == 0x0002): # Standard + refs = get_references(mat) + material = get_standard_material(refs) + elif (uid == 0x0000000000000200): # Multi/Sub-Object + refs = get_references(mat) + material = adjust_material(obj, refs[-1]) + elif (uid == 0x7034695C37BF3F2F): # VRayMtl + refs = get_reference(mat) + material = get_vray_material(refs[1]) + elif (uid == 0x4A16365470B05735): # Arch + refs = get_references(mat) + material = get_arch_material(refs[0]) + if (obj is not None) and (material is not None): + objMaterial = bpy.data.materials.new(get_class_name(mat)) + obj.data.materials.append(objMaterial) + objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) + objMaterial.specular_color[:3] = material.get('specular', (0,0,0)) + objMaterial.roughness = 1.0 - material.get('shinines', 0.6) + + +def create_shape(context, pts, indices, node, key, prc, mat): + name = node.get_first(TYP_NAME).data + shape = bpy.data.meshes.new(name) + if (key is not None): + name = "%s_%d" %(name, key) + mtx = create_matrix(prc) + data = [] + if (pts): + loopstart = [] + looplines = loop = 0 + nbr_faces = len(indices) + for fid in range(nbr_faces): + polyface = indices[fid] + looplines += len(polyface) + shape.vertices.add(len(pts) // 3) + shape.loops.add(looplines) + shape.polygons.add(nbr_faces) + shape.vertices.foreach_set("co", pts) + for vtx in indices: + loopstart.append(loop) + data.extend(vtx) + loop += len(vtx) + shape.polygons.foreach_set("loop_start", loopstart) + shape.loops.foreach_set("vertex_index", data) + + if (len(data) > 0): + shape.validate() + shape.update() + obj = bpy.data.objects.new(name, shape) + context.view_layer.active_layer_collection.collection.objects.link(obj) + adjust_material(obj, mat) + return True + return True + + +def calc_point(data): + points = [] + long, offset = get_long(data, 0) + while (offset < len(data)): + val, offset = get_long(data, offset) + flt, offset = get_floats(data, offset, 3) + points.extend(flt) + return points + + +def calc_point_float(data): + points = [] + long, offset = get_long(data, 0) + while (offset < len(data)): + flt, offset = get_floats(data, offset, 3) + points.extend(flt) + return points + + +def get_poly_4p(points): + vertex = {} + for point in points: + ngon = point.points + key = point.fH + if (key not in vertex): + vertex[key] = [] + vertex[key].append(ngon) + return vertex + + +def get_poly_5p(data): + count, offset = get_long(data, 0) + ngons = [] + while count > 0: + pt, offset = get_longs(data, offset, 3) + offset += 8 + ngons.append(pt) + count -= 1 + return ngons + + +def get_poly_6p(data): + count, offset = get_long(data, 0) + polylist = [] + while (offset < len(data)): + long, offset = get_longs(data, offset, 6) + i = 5 + while ((i > 3) and (long[i] < 0)): + i -= 1 + if (i > 2): + polylist.append(long[1:i]) + return polylist + + +def get_poly_data(chunk): + offset = 0 + polylist = [] + data = chunk.data + while (offset < len(data)): + count, offset = get_long(data, offset) + points, offset = get_longs(data, offset, count) + polylist.append(points) + return polylist + + +def get_point_array(values): + verts = [] + if len(values) >= 4: + count, offset = get_long(values, 0) + while (count > 0): + floats, offset = get_floats(values, offset, 3) + verts.extend(floats) + count -= 1 + return verts + + +def calc_point_3d(chunk): + data = chunk.data + count, offset = get_long(data, 0) + pointlist = [] + try: + while (offset < len(data)): + pt = Point3d() + long, offset = get_long(data, offset) + pt.points, offset = get_longs(data, offset, long) + pt.flags, offset = get_short(data, offset) + if ((pt.flags & 0x01) != 0): + pt.f1, offset = get_long(data, offset) + if ((pt.flags & 0x08) != 0): + pt.fH, offset = get_short(data, offset) + if ((pt.flags & 0x10) != 0): + pt.f2, offset = get_long(data, offset) + if ((pt.flags & 0x20) != 0): + pt.fA, offset = get_longs(data, offset, 2 * (long - 3)) + if (len(pt.points) > 0): + pointlist.append(pt) + except Exception as exc: + print('ArrayError:\n', "%s: offset = %d\n" %(exc, offset)) + raise exc + return pointlist + + +def create_editable_poly(context, node, msh, mat, mtx): + coords = point3i = point4i = point6i = pointNi = None + name = node.get_first(TYP_NAME).data + poly = msh.get_first(0x08FE) + created = False + if (poly): + for child in poly.children: + if (child.types == 0x0100): + coords = calc_point(child.data) + elif (child.types == 0x0108): + point6i = child.data + elif (child.types == 0x011A): + point4i = calc_point_3d(child) + if (point4i is not None): + vertex = get_poly_4p(point4i) + if (len(vertex) > 0): + for key, ngons in vertex.items(): + created |= create_shape(context, coords, ngons, node, key, mtx, mat) + else: + created = True + elif (point6i is not None): + ngons = get_poly_6p(point6i) + created = create_shape(context, coords, ngons, node, None, mtx, mat) + return created + + +def create_editable_mesh(context, node, msh, mat, mtx): + name = node.get_first(TYP_NAME).data + poly = msh.get_first(0x08FE) + created = False + if (poly): + vertex_chunk = poly.get_first(0x0914) + clsid_chunk = poly.get_first(0x0912) + coords = get_point_array(vertex_chunk.data) + ngons = get_poly_5p(clsid_chunk.data) + created = create_shape(context, coords, ngons, node, None, mtx, mat) + return created + + +def get_matrix_mesh_material(node): + refs = get_reference(node) + if (refs): + mtx = refs.get(0, None) + msh = refs.get(1, None) + mat = refs.get(3, None) + lyr = refs.get(6, None) + else: + refs = get_references(node) + mtx = refs[0] + msh = refs[1] + mat = refs[3] + lyr = None + if (len(refs) > 6): + lyr = refs[6] + return mtx, msh, mat, lyr + + +def adjust_matrix(obj, node): + mtx = create_matrix(node).flatten() + plc = mathutils.Matrix(*mtx) + obj.matrix_world = plc + return plc + + +def create_shell(context, node, shell, mat, mtx): + name = node.get_first(TYP_NAME).data + refs = get_references(shell) + msh = refs[-1] + created = create_editable_mesh(context, node, msh, mtx, mat) + return created + + +def create_skipable(context, node, msh, mat, mtx, skip): + name = node.get_first(TYP_NAME).data + print(" skipping %s '%s'... " %(skip, name)) + return True + + +def create_mesh(context, node, msh, mtx, mat): + created = False + uid = get_guid(msh) + msh.geometry = None + if (uid == 0x0E44F10B3): + created = create_editable_mesh(context, node, msh, mat, mtx) + elif (uid == 0x192F60981BF8338D): + created = create_editable_poly(context, node, msh, mat, mtx) + elif (uid in {0x2032, 0x2033}): + created = create_shell(context, node, msh, mat, mtx) + else: + skip = SKIPPABLE.get(uid) + if (skip is not None): + created = create_skipable(context, node, msh, mat, mtx, skip) + return created, uid + + +def create_object(context, node): + parent = get_node_parent(node) + node.parent = parent + name = get_node_name(node) + mtx, msh, mat, lyr = get_matrix_mesh_material(node) + while ((parent is not None) and (get_guid(parent) != 0x0002)): + name = "%s/%s" %(get_node_name(parent), name) + parent_mtx = parent.matrix + if (parent_mtx): + mtx = mtx.dot(parent_mtx) + parent = get_node_parent(parent) + created, uid = create_mesh(context, node, msh, mtx, mat) + + +def make_scene(context, parent, level=0): + for chunk in parent.children: + if (isinstance(chunk, SceneChunk)): + if ((get_guid(chunk) == 0x0001) and (get_super_id(chunk) == 0x0001)): + try: + create_object(context, chunk) + except Exception as exc: + print('ImportError:', exc, chunk) + + +def read_scene(context, maxfile, filename): + global SCENE_LIST + SCENE_LIST = read_chunks(maxfile, 'Scene', filename+'.Scn.bin', containerReader=SceneChunk) + make_scene(context, SCENE_LIST[0], 0) + + +def read(context, filename): + if (is_maxfile(filename)): + maxfile = ImportMaxFile(filename) + prop = maxfile.getproperties('\x05DocumentSummaryInformation', convert_time=True, no_conversion=[10]) + prop = maxfile.getproperties('\x05SummaryInformation', convert_time=True, no_conversion=[10]) + read_class_data(maxfile, filename) + read_config(maxfile, filename) + read_directory(maxfile, filename) + read_class_directory(maxfile, filename) + read_video_postqueue(maxfile, filename) + read_scene(context, maxfile, filename) + else: + print("File seems to be no 3D Studio Max file!") + + +def load(operator, context, filepath="", global_matrix=None): + read(context, filepath) + + return {'FINISHED'} \ No newline at end of file -- 2.30.2 From d927216d830283275b3f82f729344260aa1df12d Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sun, 19 Nov 2023 22:24:56 +0100 Subject: [PATCH 44/78] Import_max: Added code documentation Added class descriptions and code documentation --- io_import_max.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 3952e874e..a82993274 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -206,6 +206,7 @@ def _clsid(clsid): tuple(map(i8, clsid[8:16])))) def is_maxfile (filename): + """Test if file is a MAX OLE2 container.""" if hasattr(filename, 'read'): header = filename.read(len(MAGIC)) filename.seek(0) @@ -221,7 +222,7 @@ def is_maxfile (filename): class MaxStream(io.BytesIO): - + """Returns an instance of the BytesIO class as read-only file object.""" def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize): unknown_size = False if size == UNKNOWN_SIZE: @@ -251,6 +252,7 @@ class MaxStream(io.BytesIO): class MaxFileDirEntry: + """Directory Entry for a stream or storage.""" STRUCT_DIRENTRY = '<64sHBBIII16sIQQIII' DIRENTRY_SIZE = 128 assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE @@ -348,7 +350,7 @@ class MaxFileDirEntry: class ImportMaxFile: - + """Representing an interface for importing .max files.""" def __init__(self, filename=None, write_mode=False, debug=False): self.write_mode = write_mode self._filesize = None @@ -691,7 +693,7 @@ class ImportMaxFile: class MaxChunk(): - + """Representing a chunk of a .max file.""" def __init__(self, types, size, level, number): self.number = number self.types = types @@ -712,7 +714,7 @@ class MaxChunk(): class ByteArrayChunk(MaxChunk): - + """A byte array of a .max chunk.""" def __init__(self, types, data, level, number): MaxChunk.__init__(self, types, data, level, number) @@ -761,7 +763,7 @@ class ByteArrayChunk(MaxChunk): class ClassIDChunk(ByteArrayChunk): - + """The class ID subchunk of a .max chunk.""" def __init__(self, types, data, level, number): MaxChunk.__init__(self, types, data, level, number) self.dll = None @@ -777,7 +779,7 @@ class ClassIDChunk(ByteArrayChunk): class DirectoryChunk(ByteArrayChunk): - + """The directory chunk of a .max file.""" def __init__(self, types, data, level, number): MaxChunk.__init__(self, types, data, level, number) @@ -789,7 +791,7 @@ class DirectoryChunk(ByteArrayChunk): class ContainerChunk(MaxChunk): - + """A container chunk in a .max file wich includes byte arrays.""" def __init__(self, types, data, level, number, primitiveReader=ByteArrayChunk): MaxChunk.__init__(self, types, data, level, number) self.primitiveReader = primitiveReader @@ -813,7 +815,7 @@ class ContainerChunk(MaxChunk): class SceneChunk(ContainerChunk): - + """The scene chunk of a .max file wich includes the relevant data for blender.""" def __init__(self, types, data, level, number, primitiveReader=ByteArrayChunk): MaxChunk.__init__(self, types, data, level, number) self.primitiveReader = primitiveReader @@ -833,7 +835,7 @@ class SceneChunk(ContainerChunk): class ChunkReader(): - + """The chunk reader class for decoding the byte arrays.""" def __init__(self, name=None): self.name = name @@ -872,7 +874,7 @@ class ChunkReader(): class Point3d(): - + """Representing a three dimensional vector plus pointflag.""" def __init__(self): self.points = None self.flags = 0 @@ -886,7 +888,7 @@ class Point3d(): class Material(): - + """Representing a material chunk of a scene chunk.""" def __init__(self): self.data = {} -- 2.30.2 From ab6139b6a45485bbb202577f2fcd2c73865ced08 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 21 Nov 2023 02:50:18 +0100 Subject: [PATCH 45/78] Import_max: Removed unused definitions Removed unused definitions Some cleanup --- io_import_max.py | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index a82993274..b71b30c1a 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -90,12 +90,12 @@ NOSTREAM = 0xFFFFFFFF # (-1) unallocated directory entry UNKNOWN_SIZE = 0x7FFFFFFF MIN_FILE_SIZE = 1536 -STGTY_EMPTY = 0 #: empty directory entry -STGTY_STORAGE = 1 #: element is a storage object -STGTY_STREAM = 2 #: element is a stream object -STGTY_LOCKBYTES = 3 #: element is an ILockBytes object -STGTY_PROPERTY = 4 #: element is an IPropertyStorage object -STGTY_ROOT = 5 #: element is a root storage +STGTY_EMPTY = 0 # empty directory entry +STGTY_STORAGE = 1 # element is a storage object +STGTY_STREAM = 2 # element is a stream object +STGTY_LOCKBYTES = 3 # element is an ILockBytes object +STGTY_PROPERTY = 4 # element is an IPropertyStorage object +STGTY_ROOT = 5 # element is a root storage VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6; VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11; @@ -226,7 +226,7 @@ class MaxStream(io.BytesIO): def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize): unknown_size = False if size == UNKNOWN_SIZE: - size = len(fat)*sectorsize + size = len(fat) * sectorsize unknown_size = True nb_sectors = (size + (sectorsize-1)) // sectorsize @@ -411,7 +411,7 @@ class ImportMaxFile: else: mode = 'rb' self.fp = open(filename, mode) - filesize=0 + filesize = 0 self.fp.seek(0, os.SEEK_END) try: filesize = self.fp.tell() @@ -568,22 +568,6 @@ class ImportMaxFile: sectorsize=self.sectorsize, fat=self.fat, filesize=self._filesize) - def _list(self, files, prefix, node, streams=True, storages=False): - prefix = prefix + [node.name] - for entry in node.kids: - if entry.entry_type == STGTY_STORAGE: - if storages: - files.append(prefix[1:] + [entry.name]) - self._list(files, prefix, entry, streams, storages) - elif entry.entry_type == STGTY_STREAM: - if streams: - files.append(prefix[1:] + [entry.name]) - - def listdir(self, streams=True, storages=False): - files = [] - self._list(files, [], self.root, streams, storages) - return files - def _find(self, filename): if isinstance(filename, str): filename = filename.split('/') @@ -666,7 +650,7 @@ class ImportMaxFile: value = stream[offset + 8:offset + 8 + count] elif property_type == VT_LPWSTR: count = i32(stream, offset + 4) - value = self._decode_utf16_str(stream[offset + 8:offset + 8 + count*2]) + value = self._decode_utf16_str(stream[offset + 8:offset + 8 + count * 2]) elif property_type == VT_FILETIME: value = int(i32(stream, offset + 4)) + (int(i32(stream, offset + 8)) << 32) if convert_time and property_id not in no_conversion: -- 2.30.2 From 049a6fb9329c7b741c1a4194750a381ab14ae9c4 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 21 Nov 2023 03:02:11 +0100 Subject: [PATCH 46/78] Import_max: Removed unused code Removed unused definition --- io_import_max.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index b71b30c1a..c938b11f5 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1142,14 +1142,6 @@ def get_color(colors, idx): return None -def get_float(colors, idx): - prop = get_property(colors, idx) - if (prop is not None): - fl, offset = get_float(prop.data, 15) - return fl - return None - - def get_standard_material(refs): material = None try: -- 2.30.2 From 8578642860c9108756d4cb519b7912ec36a60635 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 21 Nov 2023 10:26:17 +0100 Subject: [PATCH 47/78] Import_max: Added material float value Added definition for material float value Some cleanup --- io_import_max.py | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index c938b11f5..4cd4963d5 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1,10 +1,6 @@ # SPDX-FileCopyrightText: 2023 Sebastian Schrand # # SPDX-License-Identifier: GPL-2.0-or-later - - -#--- LICENSE --- -# GNU GPL # Import is based on using information from olefile IO sourcecode # and the FreeCAD Autodesk 3DS Max importer ImportMAX # @@ -37,8 +33,6 @@ import struct, array import time, datetime import math, mathutils import bpy, bpy_extras -from bpy_extras import node_shader_utils -from bpy_extras.image_utils import load_image from bpy_extras.io_utils import axis_conversion from bpy_extras.io_utils import orientation_helper @@ -1017,16 +1011,16 @@ def read_video_postqueue(maxfile, filename): def get_point(floatval, default=0.0): uid = get_guid(floatval) if (uid == 0x2007): # Bezier-Float - fl = floatval.get_first(0x7127) - if (fl): + flv = floatval.get_first(0x7127) + if (flv): try: - return fl.get_first(0x2501).data[0] + return flv.get_first(0x2501).data[0] except: print("SyntaxError: %s - assuming 0.0!\n" %(floatval)) return default if (uid == 0x71F11549498702E7): # Float Wire - fl = get_references(floatval)[0] - return get_point(fl) + flv = get_references(floatval)[0] + return get_point(flv) else: return default @@ -1142,6 +1136,14 @@ def get_color(colors, idx): return None +def get_value(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + val, offset = get_float(prop.data, 15) + return val + return None + + def get_standard_material(refs): material = None try: @@ -1153,9 +1155,9 @@ def get_standard_material(refs): material.set('diffuse', get_color(parameters, 0x01)) material.set('specular', get_color(parameters, 0x02)) material.set('emissive', get_color(parameters, 0x08)) - material.set('shinines', get_float(parameters, 0x0A)) + material.set('shinines', get_value(parameters, 0x0A)) transparency = refs[4] # ParameterBlock2 - material.set('transparency', get_float(transparency, 0x02)) + material.set('transparency', get_value(transparency, 0x02)) except: pass return material @@ -1168,8 +1170,8 @@ def get_vray_material(vry): material.set('ambient', get_color(vry, 0x02)) material.set('specular', get_color(vry, 0x05)) material.set('emissive', get_color(vry, 0x05)) - material.set('shinines', get_float(vry, 0x0B)) - material.set('transparency', get_float(vry, 0x02)) + material.set('shinines', get_value(vry, 0x0B)) + material.set('transparency', get_value(vry, 0x02)) except: pass return material @@ -1182,8 +1184,8 @@ def get_arch_material(ad): material.set('ambient', get_color(ad, 0x02)) material.set('specular', get_color(ad, 0x05)) material.set('emissive', get_color(ad, 0x05)) - material.set('shinines', get_float(ad, 0x0B)) - material.set('transparency', get_float(ad, 0x02)) + material.set('shinines', get_value(ad, 0x0B)) + material.set('transparency', get_value(ad, 0x02)) except: pass return material @@ -1209,7 +1211,7 @@ def adjust_material(obj, mat): objMaterial = bpy.data.materials.new(get_class_name(mat)) obj.data.materials.append(objMaterial) objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) - objMaterial.specular_color[:3] = material.get('specular', (0,0,0)) + objMaterial.specular_color[:3] = material.get('specular', (1.0,1.0,1.0)) objMaterial.roughness = 1.0 - material.get('shinines', 0.6) -- 2.30.2 From 2bcb045779b1b64810b0c0ea9708873804267f79 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 21 Nov 2023 21:29:43 +0100 Subject: [PATCH 48/78] Import_max: Removed unnessecary zeros Removed unnessecary zeros in chunk definition --- io_import_max.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 4cd4963d5..175afc351 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -43,6 +43,7 @@ class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): """Import Autodesk MAX""" bl_idname = "import_autodesk.max" bl_label = "Import Autodesk MAX (.max)" + bl_options = {'PRESET', 'UNDO'} filename_ext = ".max" filter_glob: bpy.props.StringProperty(default="*.max", options={'HIDDEN'},) @@ -825,7 +826,7 @@ class ChunkReader(): long, ofst = get_long(data, ofst) if (short == 0x8B1F): short, ofst = get_long(data, ofst) - if (short == 0x0B000000): + if (short == 0xB000000): data = zlib.decompress(data, zlib.MAX_WBITS|32) print(" reading '%s'..."%self.name, len(data)) while offset < len(data): @@ -1043,9 +1044,9 @@ def get_position(pos): uid = get_guid(pos) if (uid == 0xFFEE238A118F7E02): # Position XYZ position = get_point_3d(pos) - elif (uid == 0x0000000000442312): # TCB Position + elif (uid == 0x442312): # TCB Position position = pos.get_first(0x2503).data - elif (uid == 0x0000000000002008): # Bezier Position + elif (uid == 0x2008): # Bezier Position position = pos.get_first(0x2503).data if (position): mtx = mathutils.Matrix.Translation(position) @@ -1060,10 +1061,10 @@ def get_rotation(pos): if (uid == 0x2012): # Euler XYZ rot = get_point_3d(pos) rotation = mathutils.Euler((rot[2], rot[1], rot[0])).to_quaternion() - elif (uid == 0x0000000000442313): # TCB Rotation + elif (uid == 0x442313): # TCB Rotation rot = pos.get_first(0x2504).data rotation = mathutils.Quaternion((rot[0], rot[1], rot[2], rot[3])) - elif (uid == 0x000000004B4B1003): #'Rotation List + elif (uid == 0x4B4B1003): #'Rotation List refs = get_references(pos) if (len(refs) > 3): return get_rotation(refs[0]) @@ -1083,7 +1084,7 @@ def get_scale(pos): if (scale is None): scale = pos.get_first(0x2505) pos = scale.data - elif (uid == 0x0000000000442315): # TCB Zoom + elif (uid == 0x442315): # TCB Zoom scale = pos.get_first(0x2501) if (scale is None): scale = pos.get_first(0x2505) @@ -1198,7 +1199,7 @@ def adjust_material(obj, mat): if (uid == 0x0002): # Standard refs = get_references(mat) material = get_standard_material(refs) - elif (uid == 0x0000000000000200): # Multi/Sub-Object + elif (uid == 0x0200): # Multi/Sub-Object refs = get_references(mat) material = adjust_material(obj, refs[-1]) elif (uid == 0x7034695C37BF3F2F): # VRayMtl @@ -1348,7 +1349,6 @@ def calc_point_3d(chunk): pointlist.append(pt) except Exception as exc: print('ArrayError:\n', "%s: offset = %d\n" %(exc, offset)) - raise exc return pointlist -- 2.30.2 From b0e40301cef0379c34a7960cea071f7666a0d5ac Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 27 Nov 2023 02:41:24 +0100 Subject: [PATCH 49/78] Import_max: Added import options Added import options for scale and transformation Fixed axis conversion --- io_import_max.py | 104 ++++++++++++++++++++++++++++++----------------- 1 file changed, 66 insertions(+), 38 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 175afc351..97f594883 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -48,6 +48,18 @@ class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): filename_ext = ".max" filter_glob: bpy.props.StringProperty(default="*.max", options={'HIDDEN'},) + scale_objects: bpy.props.FloatProperty(name="Scale", + description="Scale factor for all objects", + min=0.0, max=10000.0, + soft_min=0.0, soft_max=10000.0, + default=1.0, + ) + + use_apply_matrix: bpy.props.BoolProperty(name="Apply Matrix", + description="Use matrix to transform the objects", + default=True, + ) + def execute(self, context): keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob")) global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up,).to_4x4() @@ -200,6 +212,11 @@ def _clsid(clsid): ((i32(clsid, 0), i16(clsid, 4), i16(clsid, 6)) + tuple(map(i8, clsid[8:16])))) + +############### +# DATA IMPORT # +############### + def is_maxfile (filename): """Test if file is a MAX OLE2 container.""" if hasattr(filename, 'read'): @@ -671,6 +688,10 @@ class ImportMaxFile: return data +################### +# DATA PROCESSING # +################### + class MaxChunk(): """Representing a chunk of a .max file.""" def __init__(self, types, size, level, number): @@ -1120,6 +1141,24 @@ def create_matrix(prc): return mtx +def get_matrix_mesh_material(node): + refs = get_reference(node) + if (refs): + mtx = refs.get(0, None) + msh = refs.get(1, None) + mat = refs.get(3, None) + lyr = refs.get(6, None) + else: + refs = get_references(node) + mtx = refs[0] + msh = refs[1] + mat = refs[3] + lyr = None + if (len(refs) > 6): + lyr = refs[6] + return mtx, msh, mat, lyr + + def get_property(properties, idx): for child in properties.children: if (child.types & 0x100E): @@ -1216,12 +1255,18 @@ def adjust_material(obj, mat): objMaterial.roughness = 1.0 - material.get('shinines', 0.6) -def create_shape(context, pts, indices, node, key, prc, mat): +def adjust_matrix(obj, node): + mtx = create_matrix(node).flatten() + plc = mathutils.Matrix(*mtx) + obj.matrix_world = plc + return plc + + +def create_shape(context, pts, indices, node, key, mtx, mat): name = node.get_first(TYP_NAME).data shape = bpy.data.meshes.new(name) if (key is not None): name = "%s_%d" %(name, key) - mtx = create_matrix(prc) data = [] if (pts): loopstart = [] @@ -1247,6 +1292,7 @@ def create_shape(context, pts, indices, node, key, prc, mat): obj = bpy.data.objects.new(name, shape) context.view_layer.active_layer_collection.collection.objects.link(obj) adjust_material(obj, mat) + obj.matrix_world = mtx return True return True @@ -1391,31 +1437,6 @@ def create_editable_mesh(context, node, msh, mat, mtx): return created -def get_matrix_mesh_material(node): - refs = get_reference(node) - if (refs): - mtx = refs.get(0, None) - msh = refs.get(1, None) - mat = refs.get(3, None) - lyr = refs.get(6, None) - else: - refs = get_references(node) - mtx = refs[0] - msh = refs[1] - mat = refs[3] - lyr = None - if (len(refs) > 6): - lyr = refs[6] - return mtx, msh, mat, lyr - - -def adjust_matrix(obj, node): - mtx = create_matrix(node).flatten() - plc = mathutils.Matrix(*mtx) - obj.matrix_world = plc - return plc - - def create_shell(context, node, shell, mat, mtx): name = node.get_first(TYP_NAME).data refs = get_references(shell) @@ -1424,7 +1445,7 @@ def create_shell(context, node, shell, mat, mtx): return created -def create_skipable(context, node, msh, mat, mtx, skip): +def create_skipable(context, node, skip): name = node.get_first(TYP_NAME).data print(" skipping %s '%s'... " %(skip, name)) return True @@ -1443,11 +1464,11 @@ def create_mesh(context, node, msh, mtx, mat): else: skip = SKIPPABLE.get(uid) if (skip is not None): - created = create_skipable(context, node, msh, mat, mtx, skip) + created = create_skipable(context, node, skip) return created, uid -def create_object(context, node): +def create_object(context, node, mscale, transform): parent = get_node_parent(node) node.parent = parent name = get_node_name(node) @@ -1458,26 +1479,30 @@ def create_object(context, node): if (parent_mtx): mtx = mtx.dot(parent_mtx) parent = get_node_parent(parent) + if (transform): + mtx = create_matrix(mtx) @ mscale + else: + mtx = mscale created, uid = create_mesh(context, node, msh, mtx, mat) -def make_scene(context, parent, level=0): +def make_scene(context, mscale, transform, parent, level=0): for chunk in parent.children: if (isinstance(chunk, SceneChunk)): if ((get_guid(chunk) == 0x0001) and (get_super_id(chunk) == 0x0001)): try: - create_object(context, chunk) + create_object(context, chunk, mscale, transform) except Exception as exc: print('ImportError:', exc, chunk) -def read_scene(context, maxfile, filename): +def read_scene(context, maxfile, filename, mscale, transform): global SCENE_LIST SCENE_LIST = read_chunks(maxfile, 'Scene', filename+'.Scn.bin', containerReader=SceneChunk) - make_scene(context, SCENE_LIST[0], 0) + make_scene(context, mscale, transform, SCENE_LIST[0], 0) -def read(context, filename): +def read(context, filename, mscale, transform): if (is_maxfile(filename)): maxfile = ImportMaxFile(filename) prop = maxfile.getproperties('\x05DocumentSummaryInformation', convert_time=True, no_conversion=[10]) @@ -1487,12 +1512,15 @@ def read(context, filename): read_directory(maxfile, filename) read_class_directory(maxfile, filename) read_video_postqueue(maxfile, filename) - read_scene(context, maxfile, filename) + read_scene(context, maxfile, filename, mscale, transform) else: print("File seems to be no 3D Studio Max file!") -def load(operator, context, filepath="", global_matrix=None): - read(context, filepath) +def load(operator, context, filepath="", scale_objects=1.0, use_apply_matrix=False, global_matrix=None): + mscale = mathutils.Matrix.Scale(scale_objects, 4) + if global_matrix is not None: + mscale = global_matrix @ mscale + read(context, filepath, mscale, transform=use_apply_matrix) return {'FINISHED'} \ No newline at end of file -- 2.30.2 From 718f8f11048bb899dbb40d9b5cd2354ad71e4235 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 27 Nov 2023 11:38:33 +0100 Subject: [PATCH 50/78] Import_max: Updated user interface design Updated UI design to blender standard --- io_import_max.py | 42 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 97f594883..a9d2bceb4 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -14,7 +14,7 @@ bl_info = { "name": "Import Autodesk MAX (.max)", "author": "Sebastian Sille, Philippe Lagadec, Jens M. Plonka", - "version": (1, 0, 0), + "version": (1, 1, 0), "blender": (4, 0, 0), "location": "File > Import", "description": "Import 3DSMAX meshes & materials", @@ -42,7 +42,7 @@ from bpy_extras.io_utils import orientation_helper class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): """Import Autodesk MAX""" bl_idname = "import_autodesk.max" - bl_label = "Import Autodesk MAX (.max)" + bl_label = "Import MAX (.max)" bl_options = {'PRESET', 'UNDO'} filename_ext = ".max" @@ -54,10 +54,9 @@ class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): soft_min=0.0, soft_max=10000.0, default=1.0, ) - use_apply_matrix: bpy.props.BoolProperty(name="Apply Matrix", description="Use matrix to transform the objects", - default=True, + default=False, ) def execute(self, context): @@ -67,16 +66,51 @@ class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): return load(self, context, **keywords) + def draw(self, context): + pass + + +class MAX_PT_import_transform(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Transform" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_AUTODESK_OT_max" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "scale_objects") + layrow = layout.row(align=True) + layrow.prop(operator, "use_apply_matrix") + layrow.label(text="", icon='VIEW_ORTHO' if operator.use_apply_matrix else 'MESH_GRID') + layout.prop(operator, "axis_forward") + layout.prop(operator, "axis_up") + + ### REGISTER ### def menu_func(self, context): self.layout.operator(Import_max.bl_idname, text="Autodesk MAX (.max)") def register(): bpy.utils.register_class(Import_max) + bpy.utils.register_class(MAX_PT_import_transform) bpy.types.TOPBAR_MT_file_import.append(menu_func) def unregister(): bpy.types.TOPBAR_MT_file_import.remove(menu_func) + bpy.utils.unregister_class(MAX_PT_import_transform) bpy.utils.unregister_class(Import_max) -- 2.30.2 From 364dfe1e206816d4d9856eaa21279c47bf0a848a Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 27 Nov 2023 13:23:30 +0100 Subject: [PATCH 51/78] Import_max: Fixed matrix variable Changed variable for matrix chunk to avoid confusion with real matrix Removed debug keyword --- io_import_max.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index a9d2bceb4..49168d046 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -397,7 +397,7 @@ class MaxFileDirEntry: class ImportMaxFile: """Representing an interface for importing .max files.""" - def __init__(self, filename=None, write_mode=False, debug=False): + def __init__(self, filename=None, write_mode=False): self.write_mode = write_mode self._filesize = None self.byte_order = None @@ -1178,19 +1178,19 @@ def create_matrix(prc): def get_matrix_mesh_material(node): refs = get_reference(node) if (refs): - mtx = refs.get(0, None) + prs = refs.get(0, None) msh = refs.get(1, None) mat = refs.get(3, None) lyr = refs.get(6, None) else: refs = get_references(node) - mtx = refs[0] + prs = refs[0] msh = refs[1] mat = refs[3] lyr = None if (len(refs) > 6): lyr = refs[6] - return mtx, msh, mat, lyr + return prs, msh, mat, lyr def get_property(properties, idx): @@ -1506,15 +1506,15 @@ def create_object(context, node, mscale, transform): parent = get_node_parent(node) node.parent = parent name = get_node_name(node) - mtx, msh, mat, lyr = get_matrix_mesh_material(node) + prs, msh, mat, lyr = get_matrix_mesh_material(node) while ((parent is not None) and (get_guid(parent) != 0x0002)): name = "%s/%s" %(get_node_name(parent), name) parent_mtx = parent.matrix if (parent_mtx): - mtx = mtx.dot(parent_mtx) + prs = prs.dot(parent_mtx) parent = get_node_parent(parent) if (transform): - mtx = create_matrix(mtx) @ mscale + mtx = create_matrix(prs) @ mscale else: mtx = mscale created, uid = create_mesh(context, node, msh, mtx, mat) -- 2.30.2 From ebb9cfac139f15d29c78d102242a37aa17d76858 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 27 Nov 2023 13:56:52 +0100 Subject: [PATCH 52/78] Import_max: Fixed keyword order Fixed mismatched keyword order --- io_import_max.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_import_max.py b/io_import_max.py index 49168d046..e8cbe8bfb 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1475,7 +1475,7 @@ def create_shell(context, node, shell, mat, mtx): name = node.get_first(TYP_NAME).data refs = get_references(shell) msh = refs[-1] - created = create_editable_mesh(context, node, msh, mtx, mat) + created = create_editable_mesh(context, node, msh, mat, mtx) return created -- 2.30.2 From 59a70e946b07b196169956c8f990766cbfac28af Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 27 Nov 2023 15:25:49 +0100 Subject: [PATCH 53/78] Import_max: Removed write mode Removed write mode from data import --- io_import_max.py | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index e8cbe8bfb..58d723dc1 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -397,8 +397,7 @@ class MaxFileDirEntry: class ImportMaxFile: """Representing an interface for importing .max files.""" - def __init__(self, filename=None, write_mode=False): - self.write_mode = write_mode + def __init__(self, filename=None): self._filesize = None self.byte_order = None self.directory_fp = None @@ -433,7 +432,7 @@ class ImportMaxFile: self.sector_size = None self.transaction_signature_number = None if filename: - self.open(filename, write_mode=write_mode) + self.open(filename) def __enter__(self): return self @@ -445,18 +444,13 @@ class ImportMaxFile: unicode_str = utf16_str.decode('UTF-16LE', errors) return unicode_str - def open(self, filename, write_mode=False): - self.write_mode = write_mode + def open(self, filename): if hasattr(filename, 'read'): self.fp = filename elif isinstance(filename, bytes) and len(filename) >= MIN_FILE_SIZE: self.fp = io.BytesIO(filename) else: - if self.write_mode: - mode = 'r+b' - else: - mode = 'rb' - self.fp = open(filename, mode) + self.fp = open(filename, 'rb') filesize = 0 self.fp.seek(0, os.SEEK_END) try: @@ -467,7 +461,6 @@ class ImportMaxFile: self._used_streams_fat = [] self._used_streams_minifat = [] header = self.fp.read(512) - fmt_header = '<8s16sHHHHHHLLLLLLLLLL' header_size = struct.calcsize(fmt_header) header1 = header[:header_size] @@ -578,7 +571,7 @@ class ImportMaxFile: try: self.fp.seek(self.sectorsize * (sect + 1)) except: - print('MAX sector index out of range') + print('IndexError: MAX sector index out of range') sector = self.fp.read(self.sectorsize) return sector @@ -610,9 +603,8 @@ class ImportMaxFile: fat=self.minifat, filesize=self.ministream.size) else: return MaxStream(fp=self.fp, sect=start, size=size, - offset=self.sectorsize, - sectorsize=self.sectorsize, fat=self.fat, - filesize=self._filesize) + offset=self.sectorsize, sectorsize=self.sectorsize, + fat=self.fat, filesize=self._filesize) def _find(self, filename): if isinstance(filename, str): -- 2.30.2 From 62edd34edfd7c48110ced17fab26144e4b9e4631 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 27 Nov 2023 15:59:54 +0100 Subject: [PATCH 54/78] Import_max: Changed definition order Changed definition order for better overview --- io_import_max.py | 72 ++++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 58d723dc1..e5324d3a9 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1342,6 +1342,42 @@ def calc_point_float(data): return points +def calc_point_3d(chunk): + data = chunk.data + count, offset = get_long(data, 0) + pointlist = [] + try: + while (offset < len(data)): + pt = Point3d() + long, offset = get_long(data, offset) + pt.points, offset = get_longs(data, offset, long) + pt.flags, offset = get_short(data, offset) + if ((pt.flags & 0x01) != 0): + pt.f1, offset = get_long(data, offset) + if ((pt.flags & 0x08) != 0): + pt.fH, offset = get_short(data, offset) + if ((pt.flags & 0x10) != 0): + pt.f2, offset = get_long(data, offset) + if ((pt.flags & 0x20) != 0): + pt.fA, offset = get_longs(data, offset, 2 * (long - 3)) + if (len(pt.points) > 0): + pointlist.append(pt) + except Exception as exc: + print('ArrayError:\n', "%s: offset = %d\n" %(exc, offset)) + return pointlist + + +def get_point_array(values): + verts = [] + if len(values) >= 4: + count, offset = get_long(values, 0) + while (count > 0): + floats, offset = get_floats(values, offset, 3) + verts.extend(floats) + count -= 1 + return verts + + def get_poly_4p(points): vertex = {} for point in points: @@ -1388,42 +1424,6 @@ def get_poly_data(chunk): return polylist -def get_point_array(values): - verts = [] - if len(values) >= 4: - count, offset = get_long(values, 0) - while (count > 0): - floats, offset = get_floats(values, offset, 3) - verts.extend(floats) - count -= 1 - return verts - - -def calc_point_3d(chunk): - data = chunk.data - count, offset = get_long(data, 0) - pointlist = [] - try: - while (offset < len(data)): - pt = Point3d() - long, offset = get_long(data, offset) - pt.points, offset = get_longs(data, offset, long) - pt.flags, offset = get_short(data, offset) - if ((pt.flags & 0x01) != 0): - pt.f1, offset = get_long(data, offset) - if ((pt.flags & 0x08) != 0): - pt.fH, offset = get_short(data, offset) - if ((pt.flags & 0x10) != 0): - pt.f2, offset = get_long(data, offset) - if ((pt.flags & 0x20) != 0): - pt.fA, offset = get_longs(data, offset, 2 * (long - 3)) - if (len(pt.points) > 0): - pointlist.append(pt) - except Exception as exc: - print('ArrayError:\n', "%s: offset = %d\n" %(exc, offset)) - return pointlist - - def create_editable_poly(context, node, msh, mat, mtx): coords = point3i = point4i = point6i = pointNi = None name = node.get_first(TYP_NAME).data -- 2.30.2 From a6c89df2581002054b5a7f68b976265b4db87da1 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 27 Nov 2023 20:43:42 +0100 Subject: [PATCH 55/78] Import_max: Added additional import options Changed definition order for better overview --- io_import_max.py | 120 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 90 insertions(+), 30 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index e5324d3a9..6b398d5e4 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -14,7 +14,7 @@ bl_info = { "name": "Import Autodesk MAX (.max)", "author": "Sebastian Sille, Philippe Lagadec, Jens M. Plonka", - "version": (1, 1, 0), + "version": (1, 1, 2), "blender": (4, 0, 0), "location": "File > Import", "description": "Import 3DSMAX meshes & materials", @@ -54,6 +54,14 @@ class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): soft_min=0.0, soft_max=10000.0, default=1.0, ) + use_material: bpy.props.BoolProperty(name="Materials", + description="Import the materials of the objects", + default=True, + ) + use_uv_mesh: bpy.props.BoolProperty(name="UV Mesh", + description="Import texture coordinates as mesh objects", + default=False, + ) use_apply_matrix: bpy.props.BoolProperty(name="Apply Matrix", description="Use matrix to transform the objects", default=False, @@ -70,6 +78,35 @@ class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): pass +class MAX_PT_import_include(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Include" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_AUTODESK_OT_max" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = True + + sfile = context.space_data + operator = sfile.active_operator + + layrow = layout.row(align=True) + layrow.prop(operator, "use_material") + layrow.label(text="", icon='MATERIAL' if operator.use_material else 'SHADING_TEXTURE') + layrow = layout.row(align=True) + layrow.prop(operator, "use_uv_mesh") + layrow.label(text="", icon='UV' if operator.use_uv_mesh else 'GROUP_UVS') + + class MAX_PT_import_transform(bpy.types.Panel): bl_space_type = 'FILE_BROWSER' bl_region_type = 'TOOL_PROPS' @@ -105,12 +142,14 @@ def menu_func(self, context): def register(): bpy.utils.register_class(Import_max) + bpy.utils.register_class(MAX_PT_import_include) bpy.utils.register_class(MAX_PT_import_transform) bpy.types.TOPBAR_MT_file_import.append(menu_func) def unregister(): bpy.types.TOPBAR_MT_file_import.remove(menu_func) bpy.utils.unregister_class(MAX_PT_import_transform) + bpy.utils.unregister_class(MAX_PT_import_include) bpy.utils.unregister_class(Import_max) @@ -1205,7 +1244,8 @@ def get_color(colors, idx): def get_value(colors, idx): prop = get_property(colors, idx) if (prop is not None): - val, offset = get_float(prop.data, 15) + siz = 15 if (len(prop.data) > 23) else 6 + val, offset = get_float(prop.data, siz) return val return None @@ -1288,7 +1328,7 @@ def adjust_matrix(obj, node): return plc -def create_shape(context, pts, indices, node, key, mtx, mat): +def create_shape(context, pts, indices, node, key, mtx, mat, umt): name = node.get_first(TYP_NAME).data shape = bpy.data.meshes.new(name) if (key is not None): @@ -1297,13 +1337,13 @@ def create_shape(context, pts, indices, node, key, mtx, mat): if (pts): loopstart = [] looplines = loop = 0 - nbr_faces = len(indices) - for fid in range(nbr_faces): + nb_faces = len(indices) + for fid in range(nb_faces): polyface = indices[fid] looplines += len(polyface) shape.vertices.add(len(pts) // 3) shape.loops.add(looplines) - shape.polygons.add(nbr_faces) + shape.polygons.add(nb_faces) shape.vertices.foreach_set("co", pts) for vtx in indices: loopstart.append(loop) @@ -1317,8 +1357,9 @@ def create_shape(context, pts, indices, node, key, mtx, mat): shape.update() obj = bpy.data.objects.new(name, shape) context.view_layer.active_layer_collection.collection.objects.link(obj) - adjust_material(obj, mat) obj.matrix_world = mtx + if (umt): + adjust_material(obj, mat) return True return True @@ -1424,11 +1465,14 @@ def get_poly_data(chunk): return polylist -def create_editable_poly(context, node, msh, mat, mtx): - coords = point3i = point4i = point6i = pointNi = None +def create_editable_poly(context, node, msh, mat, mtx, umt, uvm): + coords = point4i = point6i = pointNi = None name = node.get_first(TYP_NAME).data poly = msh.get_first(0x08FE) created = False + lidx = [] + lcrd = [] + lply = [] if (poly): for child in poly.children: if (child.types == 0x0100): @@ -1437,20 +1481,34 @@ def create_editable_poly(context, node, msh, mat, mtx): point6i = child.data elif (child.types == 0x011A): point4i = calc_point_3d(child) + elif (child.types == 0x0310): + pointNi = child.data + elif (child.types == 0x0124): + lidx.append(get_long(child.data, 0)[0]) + elif (child.types == 0x0128): + lcrd.append(calc_point_float(child.data)) + elif (child.types == 0x012B): + lply.append(get_poly_data(child)) if (point4i is not None): vertex = get_poly_4p(point4i) if (len(vertex) > 0): for key, ngons in vertex.items(): - created |= create_shape(context, coords, ngons, node, key, mtx, mat) + created |= create_shape(context, coords, ngons, node, key, mtx, mat, umt) else: created = True elif (point6i is not None): ngons = get_poly_6p(point6i) - created = create_shape(context, coords, ngons, node, None, mtx, mat) + created = create_shape(context, coords, ngons, node, None, mtx, mat, umt) + elif (pointNi is not None): + ngons = get_poly_5p(pointNi) + created = create_shape(context, coords, ngons, node, None, mtx, mat, umt) + if (uvm and len(lidx) > 0): + for i in range(len(lidx)): + created |= create_shape(context, lcrd[i], lply[i], node, lidx[i], mtx, mat, umt) return created -def create_editable_mesh(context, node, msh, mat, mtx): +def create_editable_mesh(context, node, msh, mat, mtx, umt): name = node.get_first(TYP_NAME).data poly = msh.get_first(0x08FE) created = False @@ -1459,15 +1517,15 @@ def create_editable_mesh(context, node, msh, mat, mtx): clsid_chunk = poly.get_first(0x0912) coords = get_point_array(vertex_chunk.data) ngons = get_poly_5p(clsid_chunk.data) - created = create_shape(context, coords, ngons, node, None, mtx, mat) + created = create_shape(context, coords, ngons, node, None, mtx, mat, umt) return created -def create_shell(context, node, shell, mat, mtx): +def create_shell(context, node, shell, mat, mtx, umt): name = node.get_first(TYP_NAME).data refs = get_references(shell) msh = refs[-1] - created = create_editable_mesh(context, node, msh, mat, mtx) + created = create_editable_mesh(context, node, msh, mat, mtx, umt) return created @@ -1477,16 +1535,16 @@ def create_skipable(context, node, skip): return True -def create_mesh(context, node, msh, mtx, mat): +def create_mesh(context, node, msh, mtx, mat, umt, uvm): created = False uid = get_guid(msh) msh.geometry = None if (uid == 0x0E44F10B3): - created = create_editable_mesh(context, node, msh, mat, mtx) + created = create_editable_mesh(context, node, msh, mat, mtx, umt) elif (uid == 0x192F60981BF8338D): - created = create_editable_poly(context, node, msh, mat, mtx) + created = create_editable_poly(context, node, msh, mat, mtx, umt, uvm) elif (uid in {0x2032, 0x2033}): - created = create_shell(context, node, msh, mat, mtx) + created = create_shell(context, node, msh, mat, mtx, umt) else: skip = SKIPPABLE.get(uid) if (skip is not None): @@ -1494,7 +1552,7 @@ def create_mesh(context, node, msh, mtx, mat): return created, uid -def create_object(context, node, mscale, transform): +def create_object(context, node, mscale, usemat, uvmesh, transform): parent = get_node_parent(node) node.parent = parent name = get_node_name(node) @@ -1509,26 +1567,26 @@ def create_object(context, node, mscale, transform): mtx = create_matrix(prs) @ mscale else: mtx = mscale - created, uid = create_mesh(context, node, msh, mtx, mat) + created, uid = create_mesh(context, node, msh, mtx, mat, usemat, uvmesh) -def make_scene(context, mscale, transform, parent, level=0): +def make_scene(context, mscale, usemat, uvmesh, transform, parent, level=0): for chunk in parent.children: if (isinstance(chunk, SceneChunk)): if ((get_guid(chunk) == 0x0001) and (get_super_id(chunk) == 0x0001)): try: - create_object(context, chunk, mscale, transform) + create_object(context, chunk, mscale, usemat, uvmesh, transform) except Exception as exc: print('ImportError:', exc, chunk) -def read_scene(context, maxfile, filename, mscale, transform): +def read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform): global SCENE_LIST SCENE_LIST = read_chunks(maxfile, 'Scene', filename+'.Scn.bin', containerReader=SceneChunk) - make_scene(context, mscale, transform, SCENE_LIST[0], 0) + make_scene(context, mscale, usemat, uvmesh, transform, SCENE_LIST[0], 0) -def read(context, filename, mscale, transform): +def read(context, filename, mscale, usemat, uvmesh, transform): if (is_maxfile(filename)): maxfile = ImportMaxFile(filename) prop = maxfile.getproperties('\x05DocumentSummaryInformation', convert_time=True, no_conversion=[10]) @@ -1538,15 +1596,17 @@ def read(context, filename, mscale, transform): read_directory(maxfile, filename) read_class_directory(maxfile, filename) read_video_postqueue(maxfile, filename) - read_scene(context, maxfile, filename, mscale, transform) + read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform) else: print("File seems to be no 3D Studio Max file!") -def load(operator, context, filepath="", scale_objects=1.0, use_apply_matrix=False, global_matrix=None): +def load(operator, context, filepath="", scale_objects=1.0, use_material=True, + use_uv_mesh=False, use_apply_matrix=False, global_matrix=None): mscale = mathutils.Matrix.Scale(scale_objects, 4) if global_matrix is not None: - mscale = global_matrix @ mscale - read(context, filepath, mscale, transform=use_apply_matrix) + mscale = global_matrix @ mscale + + read(context, filepath, mscale, usemat=use_material, uvmesh=use_uv_mesh, transform=use_apply_matrix) return {'FINISHED'} \ No newline at end of file -- 2.30.2 From 4cb429054f859824f2cc83d54562ea095cfe10d8 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 28 Nov 2023 01:30:55 +0100 Subject: [PATCH 56/78] Import_max: Fixed vray material import Fixed vray material import --- io_import_max.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 6b398d5e4..59583f06a 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1244,7 +1244,7 @@ def get_color(colors, idx): def get_value(colors, idx): prop = get_property(colors, idx) if (prop is not None): - siz = 15 if (len(prop.data) > 23) else 6 + siz = 15 if (len(prop.data) > 19) else 6 val, offset = get_float(prop.data, siz) return val return None @@ -1273,10 +1273,9 @@ def get_vray_material(vry): material = Material() try: material.set('diffuse', get_color(vry, 0x01)) - material.set('ambient', get_color(vry, 0x02)) - material.set('specular', get_color(vry, 0x05)) - material.set('emissive', get_color(vry, 0x05)) - material.set('shinines', get_value(vry, 0x0B)) + material.set('specular', get_color(vry, 0x02)) + material.set('reflect', get_value(vry, 0x04)) + material.set('shinines', get_value(vry, 0x0A)) material.set('transparency', get_value(vry, 0x02)) except: pass @@ -1287,9 +1286,8 @@ def get_arch_material(ad): material = Material() try: material.set('diffuse', get_color(ad, 0x1A)) - material.set('ambient', get_color(ad, 0x02)) - material.set('specular', get_color(ad, 0x05)) - material.set('emissive', get_color(ad, 0x05)) + material.set('specular', get_color(ad, 0x02)) + material.set('reflect', get_color(ad, 0x05)) material.set('shinines', get_value(ad, 0x0B)) material.set('transparency', get_value(ad, 0x02)) except: -- 2.30.2 From 93da464a5527e4d8f0eb8707d31ab28d4fbb40e2 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 28 Nov 2023 02:04:59 +0100 Subject: [PATCH 57/78] Import_max: Fixed arch material import Fixed arch material import --- io_import_max.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 59583f06a..6daf59c8b 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1287,7 +1287,7 @@ def get_arch_material(ad): try: material.set('diffuse', get_color(ad, 0x1A)) material.set('specular', get_color(ad, 0x02)) - material.set('reflect', get_color(ad, 0x05)) + material.set('reflect', get_value(ad, 0x05)) material.set('shinines', get_value(ad, 0x0B)) material.set('transparency', get_value(ad, 0x02)) except: @@ -1316,6 +1316,7 @@ def adjust_material(obj, mat): obj.data.materials.append(objMaterial) objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) objMaterial.specular_color[:3] = material.get('specular', (1.0,1.0,1.0)) + objMaterial.specular_intensity = material.get('reflect', 0.5) objMaterial.roughness = 1.0 - material.get('shinines', 0.6) @@ -1603,8 +1604,7 @@ def load(operator, context, filepath="", scale_objects=1.0, use_material=True, use_uv_mesh=False, use_apply_matrix=False, global_matrix=None): mscale = mathutils.Matrix.Scale(scale_objects, 4) if global_matrix is not None: - mscale = global_matrix @ mscale - + mscale = global_matrix @ mscale read(context, filepath, mscale, usemat=use_material, uvmesh=use_uv_mesh, transform=use_apply_matrix) return {'FINISHED'} \ No newline at end of file -- 2.30.2 From 88fc1661f0e883a4a4355c16fba1a80ed0828eda Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 28 Nov 2023 02:10:28 +0100 Subject: [PATCH 58/78] Import_max: Fixed vray material import Fixed vray material import --- io_import_max.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 6daf59c8b..85089f59c 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1287,8 +1287,8 @@ def get_arch_material(ad): try: material.set('diffuse', get_color(ad, 0x1A)) material.set('specular', get_color(ad, 0x02)) - material.set('reflect', get_value(ad, 0x05)) - material.set('shinines', get_value(ad, 0x0B)) + material.set('reflect', get_value(ad, 0x04)) + material.set('shinines', get_value(ad, 0x0A)) material.set('transparency', get_value(ad, 0x02)) except: pass -- 2.30.2 From 25566f5e9ed25499693a7304da88b9bec2bf9ccc Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 28 Nov 2023 02:27:48 +0100 Subject: [PATCH 59/78] Import_max: Removed property decorate Removed property decorate --- io_import_max.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_import_max.py b/io_import_max.py index 85089f59c..6059950c1 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -94,7 +94,7 @@ class MAX_PT_import_include(bpy.types.Panel): def draw(self, context): layout = self.layout layout.use_property_split = True - layout.use_property_decorate = True + layout.use_property_decorate = False sfile = context.space_data operator = sfile.active_operator -- 2.30.2 From 42345234c6a20a4fec8e5b7b79c8cc8ac90b951d Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Wed, 29 Nov 2023 01:29:04 +0100 Subject: [PATCH 60/78] Import_max: Fixed poly import Check if mesh is a editable poly Some Cleanup --- io_import_max.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 6059950c1..10667f29c 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -223,7 +223,8 @@ SKIPPABLE = { def get_valid_name(name): - if (INVALID_NAME.match(name)): return "_%s"%(name.encode('utf8')) + if (INVALID_NAME.match(name)): + return "_%s"%(name.encode('utf8')) return "%s"%(name.encode('utf8')) def i8(data): @@ -774,8 +775,8 @@ class MaxChunk(): def __str__(self): if (self.unknown == True): - return "%s[%4x] %04X: %s" %("" * self.level, self.number, self.types, ":".join("%02x"%(c) for c in self.data)) - return "%s[%4x] %04X: %s=%s" %("" * self.level, self.number, self.types, self.format, self.data) + return "%s[%4x] %04X: %s" %(""*self.level, self.number, self.types, ":".join("%02x"%(c) for c in self.data)) + return "%s[%4x] %04X: %s=%s" %(""*self.level, self.number, self.types, self.format, self.data) class ByteArrayChunk(MaxChunk): @@ -840,7 +841,7 @@ class ClassIDChunk(ByteArrayChunk): self.set(data, "struct", ' Date: Wed, 29 Nov 2023 14:24:14 +0100 Subject: [PATCH 61/78] Import_max: Added principled BSDF Added node_shader_utils principled BSDF wrapper Removed parameters wich are incompatible for blender --- io_import_max.py | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 10667f29c..945f22d03 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -35,6 +35,7 @@ import math, mathutils import bpy, bpy_extras from bpy_extras.io_utils import axis_conversion from bpy_extras.io_utils import orientation_helper +from bpy_extras.node_shader_utils import PrincipledBSDFWrapper @orientation_helper(axis_forward='Y', axis_up='Z') @@ -1150,7 +1151,7 @@ def get_rotation(pos): elif (uid == 0x442313): # TCB Rotation rot = pos.get_first(0x2504).data rotation = mathutils.Quaternion((rot[0], rot[1], rot[2], rot[3])) - elif (uid == 0x4B4B1003): #'Rotation List + elif (uid == 0x4B4B1003): # Rotation List refs = get_references(pos) if (len(refs) > 3): return get_rotation(refs[0]) @@ -1244,7 +1245,7 @@ def get_color(colors, idx): def get_value(colors, idx): prop = get_property(colors, idx) if (prop is not None): - siz = 15 if (len(prop.data) > 19) else 6 + siz = 15 if (len(prop.data) > 15) else 11 val, offset = get_float(prop.data, siz) return val return None @@ -1262,8 +1263,8 @@ def get_standard_material(refs): material.set('specular', get_color(parameters, 0x02)) material.set('emissive', get_color(parameters, 0x08)) material.set('shinines', get_value(parameters, 0x0A)) - transparency = refs[4] # ParameterBlock2 - material.set('transparency', get_value(transparency, 0x02)) + reflect = refs[4] # ParameterBlock2 + material.set('reflect', get_value(reflect, 0x02)) except: pass return material @@ -1274,9 +1275,6 @@ def get_vray_material(vry): try: material.set('diffuse', get_color(vry, 0x01)) material.set('specular', get_color(vry, 0x02)) - material.set('reflect', get_value(vry, 0x04)) - material.set('shinines', get_value(vry, 0x0A)) - material.set('transparency', get_value(vry, 0x02)) except: pass return material @@ -1287,9 +1285,6 @@ def get_arch_material(ad): try: material.set('diffuse', get_color(ad, 0x1A)) material.set('specular', get_color(ad, 0x02)) - material.set('reflect', get_value(ad, 0x04)) - material.set('shinines', get_value(ad, 0x0A)) - material.set('transparency', get_value(ad, 0x02)) except: pass return material @@ -1314,10 +1309,12 @@ def adjust_material(obj, mat): if (obj is not None) and (material is not None): objMaterial = bpy.data.materials.new(get_class_name(mat)) obj.data.materials.append(objMaterial) - objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) - objMaterial.specular_color[:3] = material.get('specular', (1.0,1.0,1.0)) - objMaterial.specular_intensity = material.get('reflect', 0.5) - objMaterial.roughness = 1.0 - material.get('shinines', 0.6) + matShader = PrincipledBSDFWrapper(objMaterial, is_readonly=False, use_nodes=True) + matShader.base_color = objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) + matShader.specular_tint = objMaterial.specular_color[:3] = material.get('specular', (1.0,1.0,1.0)) + matShader.specular = objMaterial.specular_intensity = material.get('reflect', 1.0) + matShader.roughness = objMaterial.roughness = 1.0 - material.get('shinines', 0.6) + matShader.emission_color = material.get('emissive', (0,0,0)) def adjust_matrix(obj, node): -- 2.30.2 From 24d74b9abbf43036b7b430e00ec493dd20f914ba Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Wed, 29 Nov 2023 14:27:36 +0100 Subject: [PATCH 62/78] Import_max: Fixed specularity Changed specularity default --- io_import_max.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_import_max.py b/io_import_max.py index 945f22d03..a08c95463 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1312,7 +1312,7 @@ def adjust_material(obj, mat): matShader = PrincipledBSDFWrapper(objMaterial, is_readonly=False, use_nodes=True) matShader.base_color = objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) matShader.specular_tint = objMaterial.specular_color[:3] = material.get('specular', (1.0,1.0,1.0)) - matShader.specular = objMaterial.specular_intensity = material.get('reflect', 1.0) + matShader.specular = objMaterial.specular_intensity = material.get('reflect', 0.5) matShader.roughness = objMaterial.roughness = 1.0 - material.get('shinines', 0.6) matShader.emission_color = material.get('emissive', (0,0,0)) -- 2.30.2 From 54f35f8a53eda7175f2471fcb8d9a2c21d20f73f Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Wed, 29 Nov 2023 22:27:12 +0100 Subject: [PATCH 63/78] Import_max: Avoid struct error Ensure there are enough bytes to read --- io_import_max.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_import_max.py b/io_import_max.py index a08c95463..8772cb720 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1245,7 +1245,7 @@ def get_color(colors, idx): def get_value(colors, idx): prop = get_property(colors, idx) if (prop is not None): - siz = 15 if (len(prop.data) > 15) else 11 + siz = 15 if (len(prop.data) > 17) else 11 val, offset = get_float(prop.data, siz) return val return None -- 2.30.2 From 1b60028bef9dd544fcfd0bf9f7d1733ac0009295 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Fri, 1 Dec 2023 20:35:59 +0100 Subject: [PATCH 64/78] Import_max: Fixed material import Fixed material import --- io_import_max.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 8772cb720..d0a34644e 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1236,7 +1236,7 @@ def get_property(properties, idx): def get_color(colors, idx): prop = get_property(colors, idx) if (prop is not None): - siz = 15 if (len(prop.data) > 23) else 11 + siz = len(prop.data) - 12 col, offset = get_floats(prop.data, siz, 3) return (col[0], col[1], col[2]) return None @@ -1245,7 +1245,7 @@ def get_color(colors, idx): def get_value(colors, idx): prop = get_property(colors, idx) if (prop is not None): - siz = 15 if (len(prop.data) > 17) else 11 + siz = len(prop.data) - 4 val, offset = get_float(prop.data, siz) return val return None @@ -1262,9 +1262,10 @@ def get_standard_material(refs): material.set('diffuse', get_color(parameters, 0x01)) material.set('specular', get_color(parameters, 0x02)) material.set('emissive', get_color(parameters, 0x08)) - material.set('shinines', get_value(parameters, 0x0A)) + material.set('shinines', get_value(parameters, 0x0B)) reflect = refs[4] # ParameterBlock2 - material.set('reflect', get_value(reflect, 0x02)) + material.set('glossines', get_value(parablock, 0x02)) + material.set('metallic', get_value(parablock, 0x05)) except: pass return material @@ -1275,6 +1276,11 @@ def get_vray_material(vry): try: material.set('diffuse', get_color(vry, 0x01)) material.set('specular', get_color(vry, 0x02)) + material.set('shinines', get_value(vry, 0x03)) + material.set('ior', get_value(vry, 0x09)) + material.set('emissive', get_color(vry, 0x17)) + material.set('glossines', get_value(vry, 0x18)) + material.set('metallic', get_value(vry, 0x19)) except: pass return material @@ -1312,9 +1318,11 @@ def adjust_material(obj, mat): matShader = PrincipledBSDFWrapper(objMaterial, is_readonly=False, use_nodes=True) matShader.base_color = objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) matShader.specular_tint = objMaterial.specular_color[:3] = material.get('specular', (1.0,1.0,1.0)) - matShader.specular = objMaterial.specular_intensity = material.get('reflect', 0.5) + matShader.specular = objMaterial.specular_intensity = material.get('glossines', 0.5) matShader.roughness = objMaterial.roughness = 1.0 - material.get('shinines', 0.6) + matShader.metallic = objMaterial.metallic = material.get('metallic', 0) matShader.emission_color = material.get('emissive', (0,0,0)) + matShader.ior = material.get('ior', 1.45) def adjust_matrix(obj, node): @@ -1602,6 +1610,7 @@ def load(operator, context, filepath="", scale_objects=1.0, use_material=True, mscale = mathutils.Matrix.Scale(scale_objects, 4) if global_matrix is not None: mscale = global_matrix @ mscale + read(context, filepath, mscale, usemat=use_material, uvmesh=use_uv_mesh, transform=use_apply_matrix) return {'FINISHED'} \ No newline at end of file -- 2.30.2 From c6eeea33bef2f62ee14b7fd6ba4bb0816cedc985 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Fri, 1 Dec 2023 20:50:16 +0100 Subject: [PATCH 65/78] Import_max: Fixed material import Fixed variable --- io_import_max.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_import_max.py b/io_import_max.py index d0a34644e..0651950a9 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1263,7 +1263,7 @@ def get_standard_material(refs): material.set('specular', get_color(parameters, 0x02)) material.set('emissive', get_color(parameters, 0x08)) material.set('shinines', get_value(parameters, 0x0B)) - reflect = refs[4] # ParameterBlock2 + parablock = refs[4] # ParameterBlock2 material.set('glossines', get_value(parablock, 0x02)) material.set('metallic', get_value(parablock, 0x05)) except: -- 2.30.2 From 4db63f4d4d672fced090b47766cb54f4e86384f4 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Fri, 1 Dec 2023 22:42:12 +0100 Subject: [PATCH 66/78] Cleanup: Import_max Changed blender version to last compatible version Cleanup unused code --- io_import_max.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 0651950a9..d5dc4d4ea 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -15,7 +15,7 @@ bl_info = { "name": "Import Autodesk MAX (.max)", "author": "Sebastian Sille, Philippe Lagadec, Jens M. Plonka", "version": (1, 1, 2), - "blender": (4, 0, 0), + "blender": (3, 6, 0), "location": "File > Import", "description": "Import 3DSMAX meshes & materials", "warning": "", @@ -451,7 +451,6 @@ class ImportMaxFile: self.fp = None self.header_clsid = None self.header_signature = None - self.metadata = None self.mini_sector_shift = None self.mini_sector_size = None self.mini_stream_cutoff_size = None @@ -684,9 +683,7 @@ class ImportMaxFile: def get_rootentry_name(self): return self.root.name - def getproperties(self, filename, convert_time=False, no_conversion=None): - if no_conversion == None: - no_conversion = [] + def getproperties(self, filename): streampath = filename if not isinstance(streampath, str): streampath = '/'.join(streampath) @@ -732,7 +729,7 @@ class ImportMaxFile: value = self._decode_utf16_str(stream[offset + 8:offset + 8 + count * 2]) elif property_type == VT_FILETIME: value = int(i32(stream, offset + 4)) + (int(i32(stream, offset + 8)) << 32) - if convert_time and property_id not in no_conversion: + if property_id != 10: _FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0) value = _FILETIME_null_date + datetime.timedelta(microseconds=value // 10) else: @@ -1290,7 +1287,6 @@ def get_arch_material(ad): material = Material() try: material.set('diffuse', get_color(ad, 0x1A)) - material.set('specular', get_color(ad, 0x02)) except: pass return material @@ -1593,8 +1589,8 @@ def read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform): def read(context, filename, mscale, usemat, uvmesh, transform): if (is_maxfile(filename)): maxfile = ImportMaxFile(filename) - prop = maxfile.getproperties('\x05DocumentSummaryInformation', convert_time=True, no_conversion=[10]) - prop = maxfile.getproperties('\x05SummaryInformation', convert_time=True, no_conversion=[10]) + prop = maxfile.getproperties('\x05DocumentSummaryInformation') + prop = maxfile.getproperties('\x05SummaryInformation') read_class_data(maxfile, filename) read_config(maxfile, filename) read_directory(maxfile, filename) -- 2.30.2 From f62018a92d3bf967afd863c63258d2a04ffd848d Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sat, 2 Dec 2023 12:35:50 +0100 Subject: [PATCH 67/78] Import_max:Added chunk definitions Added chunk definitions --- io_import_max.py | 62 ++++++++++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index d5dc4d4ea..7d214afb7 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -161,15 +161,14 @@ def unregister(): MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' WORD_CLSID = "00020900-0000-0000-C000-000000000046" +MIN_FILE_SIZE = 1536 +UNKNOWN_SIZE = 0x7FFFFFFF +MAXFILE_SIZE = 0x7FFFFFFFFFFFFFFF MAXREGSECT = 0xFFFFFFFA # (-6) maximum SECT DIFSECT = 0xFFFFFFFC # (-4) denotes a DIFAT sector in a FAT FATSECT = 0xFFFFFFFD # (-3) denotes a FAT sector in a FAT ENDOFCHAIN = 0xFFFFFFFE # (-2) end of a virtual stream chain FREESECT = 0xFFFFFFFF # (-1) unallocated sector -MAXREGSID = 0xFFFFFFFA # (-6) maximum directory entry ID -NOSTREAM = 0xFFFFFFFF # (-1) unallocated directory entry -UNKNOWN_SIZE = 0x7FFFFFFF -MIN_FILE_SIZE = 1536 STGTY_EMPTY = 0 # empty directory entry STGTY_STORAGE = 1 # element is a storage object @@ -192,12 +191,15 @@ TYP_NAME = 0x0962 INVALID_NAME = re.compile('^[0-9].*') UNPACK_BOX_DATA = struct.Struct(' 3): return get_rotation(refs[0]) - elif (uid == 0x3A90416731381913): # Rotation Wire + elif (uid == MATRIX_ROT): # Rotation Wire return get_rotation(get_references(pos)[0]) if (rotation): mtx = mathutils.Matrix.Rotation(rotation.angle, 4, rotation.axis) @@ -1173,7 +1182,7 @@ def get_scale(pos): if (scale is None): scale = pos.get_first(0x2505) pos = scale.data - elif (uid == 0xFEEE238B118F7C01): # ScaleXYZ + elif (uid == MATRIX_SCL): # ScaleXYZ pos = get_point_3d(pos, 1.0) else: return mtx @@ -1287,6 +1296,7 @@ def get_arch_material(ad): material = Material() try: material.set('diffuse', get_color(ad, 0x1A)) + material.set('specular', get_color(ad, 0x02)) except: pass return material @@ -1302,10 +1312,10 @@ def adjust_material(obj, mat): elif (uid == 0x0200): # Multi/Sub-Object refs = get_references(mat) material = adjust_material(obj, refs[-1]) - elif (uid == 0x7034695C37BF3F2F): # VRayMtl + elif (uid == VRAY_MTL): # VRayMtl refs = get_reference(mat) material = get_vray_material(refs[1]) - elif (uid == 0x4A16365470B05735): # Arch + elif (uid == ARCH_MTL): # Arch refs = get_references(mat) material = get_arch_material(refs[0]) if (obj is not None) and (material is not None): @@ -1539,9 +1549,9 @@ def create_mesh(context, node, msh, mtx, mat, umt, uvm): created = False uid = get_guid(msh) msh.geometry = None - if (uid == 0x0E44F10B3): + if (uid == EDIT_MESH): created = create_editable_mesh(context, node, msh, mat, mtx, umt) - elif (uid == 0x192F60981BF8338D): + elif (uid == EDIT_POLY): created = create_editable_poly(context, node, msh, mat, mtx, umt, uvm) elif (uid in {0x2032, 0x2033}): created = create_shell(context, node, msh, mat, mtx, umt, uvm) @@ -1557,7 +1567,7 @@ def create_object(context, node, mscale, usemat, uvmesh, transform): node.parent = parent name = get_node_name(node) prs, msh, mat, lyr = get_matrix_mesh_material(node) - while ((parent is not None) and (get_guid(parent) != 0x0002)): + while ((parent is not None) and (get_guid(parent) != 0x02)): name = "%s/%s" %(get_node_name(parent), name) parent_mtx = parent.matrix if (parent_mtx): @@ -1573,7 +1583,7 @@ def create_object(context, node, mscale, usemat, uvmesh, transform): def make_scene(context, mscale, usemat, uvmesh, transform, parent, level=0): for chunk in parent.children: if (isinstance(chunk, SceneChunk)): - if ((get_guid(chunk) == 0x0001) and (get_super_id(chunk) == 0x0001)): + if ((get_guid(chunk) == 0x01) and (get_super_id(chunk) == 0x01)): try: create_object(context, chunk, mscale, usemat, uvmesh, transform) except Exception as exc: -- 2.30.2 From 28c6317302da40f80adf570ebe4c356d758a1a5b Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sat, 2 Dec 2023 13:13:43 +0100 Subject: [PATCH 68/78] Cleanup: Import_max Cleanup tabs and spaces --- io_import_max.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 7d214afb7..1ce8dbe47 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -389,7 +389,7 @@ class MaxFileDirEntry: self.is_minifat = False if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size > 0: if self.size < maxfile.minisectorcutoff \ - and self.entry_type == STGTY_STREAM: # only streams can be in MiniFAT + and self.entry_type == STGTY_STREAM: # only streams can be in MiniFAT self.is_minifat = True else: self.is_minifat = False @@ -563,7 +563,7 @@ class ImportMaxFile: if minifat: used_streams = self._used_streams_minifat else: - if first_sect in (DIFSECT,FATSECT,ENDOFCHAIN,FREESECT): + if first_sect in (DIFSECT, FATSECT, ENDOFCHAIN, FREESECT): return used_streams = self._used_streams_fat if first_sect in used_streams: @@ -597,7 +597,7 @@ class ImportMaxFile: self.fat = array.array('I') self.loadfat_sect(sect) if self.num_difat_sectors != 0: - nb_difat_sectors = (self.sectorsize//4) - 1 + nb_difat_sectors = (self.sectorsize // 4) - 1 nb_difat = (self.num_fat_sectors - 109 + nb_difat_sectors - 1) // nb_difat_sectors isect_difat = self.first_difat_sector for i in range(nb_difat): @@ -1177,7 +1177,7 @@ def get_scale(pos): if (scale is None): scale = pos.get_first(0x2505) pos = scale.data - elif (uid == 0x442315): # TCB Zoom + elif (uid == 0x442315): # TCB Zoom scale = pos.get_first(0x2501) if (scale is None): scale = pos.get_first(0x2505) -- 2.30.2 From f50fc1533ecfb0de69bd40888b920a9ec2c33bc5 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sun, 3 Dec 2023 23:10:15 +0100 Subject: [PATCH 69/78] Import_max: Removed unused code Added additional arch material parameter Removed unused code --- io_import_max.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 1ce8dbe47..770e9d823 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -620,7 +620,7 @@ class ImportMaxFile: try: self.fp.seek(self.sectorsize * (sect + 1)) except: - print('IndexError: MAX sector index out of range') + print('IndexError: Sector index out of range') sector = self.fp.read(self.sectorsize) return sector @@ -693,9 +693,6 @@ class ImportMaxFile: return self.root.name def getproperties(self, filename): - streampath = filename - if not isinstance(streampath, str): - streampath = '/'.join(streampath) fp = self.openstream(filename) data = {} try: @@ -1296,7 +1293,8 @@ def get_arch_material(ad): material = Material() try: material.set('diffuse', get_color(ad, 0x1A)) - material.set('specular', get_color(ad, 0x02)) + material.set('specular', get_color(ad, 0x05)) + material.set('shinines', get_value(ad, 0x0B)) except: pass return material -- 2.30.2 From 876fe8f278c3a74aa4934eff26319a194055edf6 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 4 Dec 2023 20:15:48 +0100 Subject: [PATCH 70/78] Import_max: Update for newer max files Fixed crash with files from latest version Removed unnessecary code Removed unused imports --- io_import_max.py | 86 ++---------------------------------------------- 1 file changed, 3 insertions(+), 83 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 770e9d823..4a0f2ceef 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -30,7 +30,6 @@ bl_info = { import io, re import os, sys, zlib import struct, array -import time, datetime import math, mathutils import bpy, bpy_extras from bpy_extras.io_utils import axis_conversion @@ -170,23 +169,9 @@ FATSECT = 0xFFFFFFFD # (-3) denotes a FAT sector in a FAT ENDOFCHAIN = 0xFFFFFFFE # (-2) end of a virtual stream chain FREESECT = 0xFFFFFFFF # (-1) unallocated sector -STGTY_EMPTY = 0 # empty directory entry -STGTY_STORAGE = 1 # element is a storage object STGTY_STREAM = 2 # element is a stream object -STGTY_LOCKBYTES = 3 # element is an ILockBytes object -STGTY_PROPERTY = 4 # element is an IPropertyStorage object STGTY_ROOT = 5 # element is a root storage -VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6; -VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11; -VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17; -VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23; -VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28; -VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64; -VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68; -VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72; -VT_VECTOR=0x1000; - TYP_NAME = 0x0962 INVALID_NAME = re.compile('^[0-9].*') UNPACK_BOX_DATA = struct.Struct('= 32768: - value = value - 65536 - elif property_type == VT_UI2: # 2-byte unsigned integer - value = i16(stream, offset + 4) - elif property_type in (VT_I4, VT_INT, VT_ERROR): - value = i32(stream, offset + 4) - elif property_type in (VT_UI4, VT_UINT): # 4-byte unsigned integer - value = i32(stream, offset + 4) - elif property_type in (VT_BSTR, VT_LPSTR): - count = i32(stream, offset + 4) - value = stream[offset + 8:offset + 8 + count - 1] - value = value.replace(b'\x00', b'') - elif property_type == VT_BLOB: - count = i32(stream, offset + 4) - value = stream[offset + 8:offset + 8 + count] - elif property_type == VT_LPWSTR: - count = i32(stream, offset + 4) - value = self._decode_utf16_str(stream[offset + 8:offset + 8 + count * 2]) - elif property_type == VT_FILETIME: - value = int(i32(stream, offset + 4)) + (int(i32(stream, offset + 8)) << 32) - if property_id != 10: - _FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0) - value = _FILETIME_null_date + datetime.timedelta(microseconds=value // 10) - else: - value = value // 10000000 - elif property_type == VT_UI1: # 1-byte unsigned integer - value = i8(stream[offset + 4]) - elif property_type == VT_CLSID: - value = _clsid(stream[offset + 4:offset + 20]) - elif property_type == VT_CF: - count = i32(stream, offset + 4) - value = stream[offset + 8:offset + 8 + count] - elif property_type == VT_BOOL: - value = bool(i16(stream, offset + 4)) - else: - value = None - - data[property_id] = value - except BaseException as exc: - print('Error while parsing property_id:', exc) - return data - ################### # DATA PROCESSING # @@ -917,7 +837,7 @@ class ChunkReader(): long, ofst = get_long(data, ofst) if (short == 0x8B1F): short, ofst = get_long(data, ofst) - if (short == 0xB000000): + if (short in (0xB000000, 0xa040000)): data = zlib.decompress(data, zlib.MAX_WBITS|32) print(" reading '%s'..." %self.name, len(data)) while offset < len(data): @@ -1597,8 +1517,6 @@ def read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform): def read(context, filename, mscale, usemat, uvmesh, transform): if (is_maxfile(filename)): maxfile = ImportMaxFile(filename) - prop = maxfile.getproperties('\x05DocumentSummaryInformation') - prop = maxfile.getproperties('\x05SummaryInformation') read_class_data(maxfile, filename) read_config(maxfile, filename) read_directory(maxfile, filename) @@ -1611,10 +1529,12 @@ def read(context, filename, mscale, usemat, uvmesh, transform): def load(operator, context, filepath="", scale_objects=1.0, use_material=True, use_uv_mesh=False, use_apply_matrix=False, global_matrix=None): + context.window.cursor_set('WAIT') mscale = mathutils.Matrix.Scale(scale_objects, 4) if global_matrix is not None: mscale = global_matrix @ mscale read(context, filepath, mscale, usemat=use_material, uvmesh=use_uv_mesh, transform=use_apply_matrix) + context.window.cursor_set('DEFAULT') return {'FINISHED'} \ No newline at end of file -- 2.30.2 From d763da40bd68e6e8245d584204c47ef3e0cbb9d2 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 4 Dec 2023 22:21:01 +0100 Subject: [PATCH 71/78] Import_max: Removed unused code Removed unused code --- io_import_max.py | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 4a0f2ceef..a4f315590 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -168,9 +168,8 @@ DIFSECT = 0xFFFFFFFC # (-4) denotes a DIFAT sector in a FAT FATSECT = 0xFFFFFFFD # (-3) denotes a FAT sector in a FAT ENDOFCHAIN = 0xFFFFFFFE # (-2) end of a virtual stream chain FREESECT = 0xFFFFFFFF # (-1) unallocated sector - -STGTY_STREAM = 2 # element is a stream object -STGTY_ROOT = 5 # element is a root storage +MAX_STREAM = 2 # element is a stream object +ROOT_STORE = 5 # element is a root storage TYP_NAME = 0x0962 INVALID_NAME = re.compile('^[0-9].*') @@ -656,27 +655,6 @@ class ImportMaxFile: entry = self.direntries[sid] return self._open(entry.isectStart, entry.size) - def get_type(self, filename): - try: - sid = self._find(filename) - entry = self.direntries[sid] - return entry.entry_type - except: - return False - - def getclsid(self, filename): - sid = self._find(filename) - entry = self.direntries[sid] - return entry.clsid - - def get_size(self, filename): - sid = self._find(filename) - entry = self.direntries[sid] - return entry.size - - def get_rootentry_name(self): - return self.root.name - ################### # DATA PROCESSING # -- 2.30.2 From 642eea7cb8eddc5190677aa2e7759c9ee22680f5 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 4 Dec 2023 22:23:14 +0100 Subject: [PATCH 72/78] Cleanup: Import_max Fixed variables and cleanup --- io_import_max.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index a4f315590..46579a396 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -371,9 +371,9 @@ class MaxFileDirEntry: self.size = self.sizeLow + (int(self.sizeHigh) << 32) self.clsid = _clsid(clsid) self.is_minifat = False - if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size > 0: + if self.entry_type in (ROOT_STORE, MAX_STREAM) and self.size > 0: if self.size < maxfile.minisectorcutoff \ - and self.entry_type == STGTY_STREAM: # only streams can be in MiniFAT + and self.entry_type == MAX_STREAM: # only streams can be in MiniFAT self.is_minifat = True else: self.is_minifat = False @@ -383,7 +383,7 @@ class MaxFileDirEntry: def build_sect_chain(self, maxfile): if self.sect_chain: return - if self.entry_type not in (STGTY_ROOT, STGTY_STREAM) or self.size == 0: + if self.entry_type not in (ROOT_STORE, MAX_STREAM) or self.size == 0: return self.sect_chain = list() if self.is_minifat and not maxfile.minifat: -- 2.30.2 From a6e1722429d2d2af2c6fa2b15929051f9b15321c Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 4 Dec 2023 23:26:11 +0100 Subject: [PATCH 73/78] Import_max: Removed unused boolean Removed unused boolean --- io_import_max.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 46579a396..c87038e59 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -304,10 +304,8 @@ def is_maxfile (filename): class MaxStream(io.BytesIO): """Returns an instance of the BytesIO class as read-only file object.""" def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize): - unknown_size = False if size == UNKNOWN_SIZE: size = len(fat) * sectorsize - unknown_size = True nb_sectors = (size + (sectorsize-1)) // sectorsize data = [] @@ -1178,7 +1176,7 @@ def get_vray_material(vry): material.set('diffuse', get_color(vry, 0x01)) material.set('specular', get_color(vry, 0x02)) material.set('shinines', get_value(vry, 0x03)) - material.set('ior', get_value(vry, 0x09)) + material.set('refraction', get_value(vry, 0x09)) material.set('emissive', get_color(vry, 0x17)) material.set('glossines', get_value(vry, 0x18)) material.set('metallic', get_value(vry, 0x19)) @@ -1224,7 +1222,7 @@ def adjust_material(obj, mat): matShader.roughness = objMaterial.roughness = 1.0 - material.get('shinines', 0.6) matShader.metallic = objMaterial.metallic = material.get('metallic', 0) matShader.emission_color = material.get('emissive', (0,0,0)) - matShader.ior = material.get('ior', 1.45) + matShader.ior = material.get('refraction', 1.45) def adjust_matrix(obj, node): -- 2.30.2 From 19c67a2af6f37e6b9bcc276af9c07d8b71b0bbc7 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 5 Dec 2023 01:18:24 +0100 Subject: [PATCH 74/78] Import_max: Added corona material Added material properties for corona renderer --- io_import_max.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/io_import_max.py b/io_import_max.py index c87038e59..6e93e534a 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -1150,6 +1150,16 @@ def get_value(colors, idx): return None +def get_parameter(colors, fmt): + if (fmt == 0x1): + siz = len(colors.data) - 12 + para, offset = get_floats(colors.data, siz, 3) + else: + siz = len(colors.data) - 4 + para, offset = get_float(colors.data, siz) + return para + + def get_standard_material(refs): material = None try: @@ -1185,6 +1195,19 @@ def get_vray_material(vry): return material +def get_corona_material(mtl): + material = Material() + try: + cor = mtl.children + material.set('diffuse', get_parameter(cor[3], 0x1)) + material.set('specular', get_parameter(cor[4], 0x1)) + material.set('emissive', get_parameter(cor[8], 0x1)) + material.set('glossines', get_parameter(cor[9], 0x2)) + except: + pass + return material + + def get_arch_material(ad): material = Material() try: @@ -1209,6 +1232,9 @@ def adjust_material(obj, mat): elif (uid == VRAY_MTL): # VRayMtl refs = get_reference(mat) material = get_vray_material(refs[1]) + elif (uid == CORO_MTL): # Corona + refs = get_references(mat) + material = get_corona_material(refs[0]) elif (uid == ARCH_MTL): # Arch refs = get_references(mat) material = get_arch_material(refs[0]) -- 2.30.2 From c56dcef5a336f51ec5eb3b5935465e96cc41af2c Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Tue, 5 Dec 2023 02:16:52 +0100 Subject: [PATCH 75/78] Cleanup: Import_max Cleanup code and style --- io_import_max.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 6e93e534a..59c42a8bc 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -813,7 +813,7 @@ class ChunkReader(): long, ofst = get_long(data, ofst) if (short == 0x8B1F): short, ofst = get_long(data, ofst) - if (short in (0xB000000, 0xa040000)): + if (short in (0xB000000, 0xA040000)): data = zlib.decompress(data, zlib.MAX_WBITS|32) print(" reading '%s'..." %self.name, len(data)) while offset < len(data): @@ -1232,7 +1232,7 @@ def adjust_material(obj, mat): elif (uid == VRAY_MTL): # VRayMtl refs = get_reference(mat) material = get_vray_material(refs[1]) - elif (uid == CORO_MTL): # Corona + elif (uid == CORO_MTL): # CoronaMtl refs = get_references(mat) material = get_corona_material(refs[0]) elif (uid == ARCH_MTL): # Arch -- 2.30.2 From 0a51bb4054c60a4d370dde6171680c8d14c4be94 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Wed, 6 Dec 2023 23:07:17 +0100 Subject: [PATCH 76/78] Import_max: Removed unused code Code cleanup --- io_import_max.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/io_import_max.py b/io_import_max.py index 59c42a8bc..37681b511 100644 --- a/io_import_max.py +++ b/io_import_max.py @@ -671,7 +671,6 @@ class MaxChunk(): self.unknown = True self.format = None self.data = None - self.resolved = False def __str__(self): if (self.unknown == True): @@ -701,17 +700,6 @@ class ByteArrayChunk(MaxChunk): except: self.data = data - def set_le16_string(self, data): - try: - long, offset = get_long(data, 0) - self.data = data[offset:offset + l * 2].decode('utf-16-le') - if (self.data[-1] == b'\0'): - self.data = self.data[0:-1] - self.format = "LStr16" - self.unknown = False - except: - self.data = data - def set_data(self, data): if (self.types in [0x0340, 0x4001, 0x0456, 0x0962]): self.set_string(data) @@ -1500,7 +1488,7 @@ def create_object(context, node, mscale, usemat, uvmesh, transform): created, uid = create_mesh(context, node, msh, mtx, mat, usemat, uvmesh) -def make_scene(context, mscale, usemat, uvmesh, transform, parent, level=0): +def make_scene(context, mscale, usemat, uvmesh, transform, parent): for chunk in parent.children: if (isinstance(chunk, SceneChunk)): if ((get_guid(chunk) == 0x01) and (get_super_id(chunk) == 0x01)): @@ -1513,7 +1501,7 @@ def make_scene(context, mscale, usemat, uvmesh, transform, parent, level=0): def read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform): global SCENE_LIST SCENE_LIST = read_chunks(maxfile, 'Scene', filename+'.Scn.bin', containerReader=SceneChunk) - make_scene(context, mscale, usemat, uvmesh, transform, SCENE_LIST[0], 0) + make_scene(context, mscale, usemat, uvmesh, transform, SCENE_LIST[0]) def read(context, filename, mscale, usemat, uvmesh, transform): -- 2.30.2 From 6226e30ab81811cd41789a1fe2182e296b49a5da Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Thu, 7 Dec 2023 16:56:12 +0100 Subject: [PATCH 77/78] Delete: io_import_max Remove to package --- io_import_max.py | 1530 ---------------------------------------------- 1 file changed, 1530 deletions(-) delete mode 100644 io_import_max.py diff --git a/io_import_max.py b/io_import_max.py deleted file mode 100644 index 37681b511..000000000 --- a/io_import_max.py +++ /dev/null @@ -1,1530 +0,0 @@ -# SPDX-FileCopyrightText: 2023 Sebastian Schrand -# -# SPDX-License-Identifier: GPL-2.0-or-later -# Import is based on using information from olefile IO sourcecode -# and the FreeCAD Autodesk 3DS Max importer ImportMAX -# -# olefile (formerly OleFileIO_PL) is copyright (c) 2005-2018 Philippe Lagadec -# (https://www.decalage.info) -# -# ImportMAX is copyright (c) 2017-2022 Jens M. Plonka -# (https://www.github.com/jmplonka/Importer3D) - - -bl_info = { - "name": "Import Autodesk MAX (.max)", - "author": "Sebastian Sille, Philippe Lagadec, Jens M. Plonka", - "version": (1, 1, 2), - "blender": (3, 6, 0), - "location": "File > Import", - "description": "Import 3DSMAX meshes & materials", - "warning": "", - "filepath_url": "", - "category": "Import-Export"} - - -################## -# IMPORT MODULES # -################## - -import io, re -import os, sys, zlib -import struct, array -import math, mathutils -import bpy, bpy_extras -from bpy_extras.io_utils import axis_conversion -from bpy_extras.io_utils import orientation_helper -from bpy_extras.node_shader_utils import PrincipledBSDFWrapper - -@orientation_helper(axis_forward='Y', axis_up='Z') - -### IMPORT OPERATOR ### -class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): - """Import Autodesk MAX""" - bl_idname = "import_autodesk.max" - bl_label = "Import MAX (.max)" - bl_options = {'PRESET', 'UNDO'} - - filename_ext = ".max" - filter_glob: bpy.props.StringProperty(default="*.max", options={'HIDDEN'},) - - scale_objects: bpy.props.FloatProperty(name="Scale", - description="Scale factor for all objects", - min=0.0, max=10000.0, - soft_min=0.0, soft_max=10000.0, - default=1.0, - ) - use_material: bpy.props.BoolProperty(name="Materials", - description="Import the materials of the objects", - default=True, - ) - use_uv_mesh: bpy.props.BoolProperty(name="UV Mesh", - description="Import texture coordinates as mesh objects", - default=False, - ) - use_apply_matrix: bpy.props.BoolProperty(name="Apply Matrix", - description="Use matrix to transform the objects", - default=False, - ) - - def execute(self, context): - keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob")) - global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up,).to_4x4() - keywords["global_matrix"] = global_matrix - - return load(self, context, **keywords) - - def draw(self, context): - pass - - -class MAX_PT_import_include(bpy.types.Panel): - bl_space_type = 'FILE_BROWSER' - bl_region_type = 'TOOL_PROPS' - bl_label = "Include" - bl_parent_id = "FILE_PT_operator" - - @classmethod - def poll(cls, context): - sfile = context.space_data - operator = sfile.active_operator - - return operator.bl_idname == "IMPORT_AUTODESK_OT_max" - - def draw(self, context): - layout = self.layout - layout.use_property_split = True - layout.use_property_decorate = False - - sfile = context.space_data - operator = sfile.active_operator - - layrow = layout.row(align=True) - layrow.prop(operator, "use_material") - layrow.label(text="", icon='MATERIAL' if operator.use_material else 'SHADING_TEXTURE') - layrow = layout.row(align=True) - layrow.prop(operator, "use_uv_mesh") - layrow.label(text="", icon='UV' if operator.use_uv_mesh else 'GROUP_UVS') - - -class MAX_PT_import_transform(bpy.types.Panel): - bl_space_type = 'FILE_BROWSER' - bl_region_type = 'TOOL_PROPS' - bl_label = "Transform" - bl_parent_id = "FILE_PT_operator" - - @classmethod - def poll(cls, context): - sfile = context.space_data - operator = sfile.active_operator - - return operator.bl_idname == "IMPORT_AUTODESK_OT_max" - - def draw(self, context): - layout = self.layout - layout.use_property_split = True - layout.use_property_decorate = False - - sfile = context.space_data - operator = sfile.active_operator - - layout.prop(operator, "scale_objects") - layrow = layout.row(align=True) - layrow.prop(operator, "use_apply_matrix") - layrow.label(text="", icon='VIEW_ORTHO' if operator.use_apply_matrix else 'MESH_GRID') - layout.prop(operator, "axis_forward") - layout.prop(operator, "axis_up") - - -### REGISTER ### -def menu_func(self, context): - self.layout.operator(Import_max.bl_idname, text="Autodesk MAX (.max)") - -def register(): - bpy.utils.register_class(Import_max) - bpy.utils.register_class(MAX_PT_import_include) - bpy.utils.register_class(MAX_PT_import_transform) - bpy.types.TOPBAR_MT_file_import.append(menu_func) - -def unregister(): - bpy.types.TOPBAR_MT_file_import.remove(menu_func) - bpy.utils.unregister_class(MAX_PT_import_transform) - bpy.utils.unregister_class(MAX_PT_import_include) - bpy.utils.unregister_class(Import_max) - - -################### -# DATA STRUCTURES # -################### - -MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' -WORD_CLSID = "00020900-0000-0000-C000-000000000046" - -MIN_FILE_SIZE = 1536 -UNKNOWN_SIZE = 0x7FFFFFFF -MAXFILE_SIZE = 0x7FFFFFFFFFFFFFFF -MAXREGSECT = 0xFFFFFFFA # (-6) maximum SECT -DIFSECT = 0xFFFFFFFC # (-4) denotes a DIFAT sector in a FAT -FATSECT = 0xFFFFFFFD # (-3) denotes a FAT sector in a FAT -ENDOFCHAIN = 0xFFFFFFFE # (-2) end of a virtual stream chain -FREESECT = 0xFFFFFFFF # (-1) unallocated sector -MAX_STREAM = 2 # element is a stream object -ROOT_STORE = 5 # element is a root storage - -TYP_NAME = 0x0962 -INVALID_NAME = re.compile('^[0-9].*') -UNPACK_BOX_DATA = struct.Struct('= MIN_FILE_SIZE: - header = filename[:len(MAGIC)] - else: - with open(filename, 'rb') as fp: - header = fp.read(len(MAGIC)) - if header == MAGIC: - return True - else: - return False - - -class MaxStream(io.BytesIO): - """Returns an instance of the BytesIO class as read-only file object.""" - def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize): - if size == UNKNOWN_SIZE: - size = len(fat) * sectorsize - nb_sectors = (size + (sectorsize-1)) // sectorsize - - data = [] - for i in range(nb_sectors): - try: - fp.seek(offset + sectorsize * sect) - except: - break - sector_data = fp.read(sectorsize) - data.append(sector_data) - try: - sect = fat[sect] & FREESECT - except IndexError: - break - data = b"".join(data) - if len(data) >= size: - data = data[:size] - self.size = size - else: - self.size = len(data) - io.BytesIO.__init__(self, data) - - -class MaxFileDirEntry: - """Directory Entry for a stream or storage.""" - STRUCT_DIRENTRY = '<64sHBBIII16sIQQIII' - DIRENTRY_SIZE = 128 - assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE - - def __init__(self, entry, sid, maxfile): - self.sid = sid - self.maxfile = maxfile - self.kids = [] - self.kids_dict = {} - self.used = False - ( - self.name_raw, - self.namelength, - self.entry_type, - self.color, - self.sid_left, - self.sid_right, - self.sid_child, - clsid, - self.dwUserFlags, - self.createTime, - self.modifyTime, - self.isectStart, - self.sizeLow, - self.sizeHigh - ) = struct.unpack(MaxFileDirEntry.STRUCT_DIRENTRY, entry) - - if self.namelength > 64: - self.namelength = 64 - self.name_utf16 = self.name_raw[:(self.namelength - 2)] - self.name = maxfile._decode_utf16_str(self.name_utf16) - # print('DirEntry SID=%d: %s' % (self.sid, repr(self.name))) - if maxfile.sectorsize == 512: - self.size = self.sizeLow - else: - self.size = self.sizeLow + (int(self.sizeHigh) << 32) - self.clsid = _clsid(clsid) - self.is_minifat = False - if self.entry_type in (ROOT_STORE, MAX_STREAM) and self.size > 0: - if self.size < maxfile.minisectorcutoff \ - and self.entry_type == MAX_STREAM: # only streams can be in MiniFAT - self.is_minifat = True - else: - self.is_minifat = False - maxfile._check_duplicate_stream(self.isectStart, self.is_minifat) - self.sect_chain = None - - def build_sect_chain(self, maxfile): - if self.sect_chain: - return - if self.entry_type not in (ROOT_STORE, MAX_STREAM) or self.size == 0: - return - self.sect_chain = list() - if self.is_minifat and not maxfile.minifat: - maxfile.loadminifat() - next_sect = self.isectStart - while next_sect != ENDOFCHAIN: - self.sect_chain.append(next_sect) - if self.is_minifat: - next_sect = maxfile.minifat[next_sect] - else: - next_sect = maxfile.fat[next_sect] - - def build_storage_tree(self): - if self.sid_child != FREESECT: - self.append_kids(self.sid_child) - self.kids.sort() - - def append_kids(self, child_sid): - if child_sid == FREESECT: - return - else: - child = self.maxfile._load_direntry(child_sid) - if child.used: - return - child.used = True - self.append_kids(child.sid_left) - name_lower = child.name.lower() - self.kids.append(child) - self.kids_dict[name_lower] = child - self.append_kids(child.sid_right) - child.build_storage_tree() - - def __eq__(self, other): - return self.name == other.name - - def __lt__(self, other): - return self.name < other.name - - def __ne__(self, other): - return not self.__eq__(other) - - def __le__(self, other): - return self.__eq__(other) or self.__lt__(other) - - -class ImportMaxFile: - """Representing an interface for importing .max files.""" - def __init__(self, filename=None): - self._filesize = None - self.byte_order = None - self.directory_fp = None - self.direntries = None - self.dll_version = None - self.fat = None - self.first_difat_sector = None - self.first_dir_sector = None - self.first_mini_fat_sector = None - self.fp = None - self.header_clsid = None - self.header_signature = None - self.mini_sector_shift = None - self.mini_sector_size = None - self.mini_stream_cutoff_size = None - self.minifat = None - self.minifatsect = None - self.minisectorcutoff = None - self.minisectorsize = None - self.ministream = None - self.minor_version = None - self.nb_sect = None - self.num_difat_sectors = None - self.num_dir_sectors = None - self.num_fat_sectors = None - self.num_mini_fat_sectors = None - self.reserved1 = None - self.reserved2 = None - self.root = None - self.sector_shift = None - self.sector_size = None - self.transaction_signature_number = None - if filename: - self.open(filename) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - def _decode_utf16_str(self, utf16_str, errors='replace'): - unicode_str = utf16_str.decode('UTF-16LE', errors) - return unicode_str - - def open(self, filename): - if hasattr(filename, 'read'): - self.fp = filename - elif isinstance(filename, bytes) and len(filename) >= MIN_FILE_SIZE: - self.fp = io.BytesIO(filename) - else: - self.fp = open(filename, 'rb') - filesize = 0 - self.fp.seek(0, os.SEEK_END) - try: - filesize = self.fp.tell() - finally: - self.fp.seek(0) - self._filesize = filesize - self._used_streams_fat = [] - self._used_streams_minifat = [] - header = self.fp.read(512) - fmt_header = '<8s16sHHHHHHLLLLLLLLLL' - header_size = struct.calcsize(fmt_header) - header1 = header[:header_size] - ( - self.header_signature, - self.header_clsid, - self.minor_version, - self.dll_version, - self.byte_order, - self.sector_shift, - self.mini_sector_shift, - self.reserved1, - self.reserved2, - self.num_dir_sectors, - self.num_fat_sectors, - self.first_dir_sector, - self.transaction_signature_number, - self.mini_stream_cutoff_size, - self.first_mini_fat_sector, - self.num_mini_fat_sectors, - self.first_difat_sector, - self.num_difat_sectors - ) = struct.unpack(fmt_header, header1) - - self.sector_size = 2**self.sector_shift - self.mini_sector_size = 2**self.mini_sector_shift - if self.mini_stream_cutoff_size != 0x1000: - self.mini_stream_cutoff_size = 0x1000 - self.nb_sect = ((filesize + self.sector_size-1) // self.sector_size) - 1 - - # file clsid - self.header_clsid = _clsid(header[8:24]) - self.sectorsize = self.sector_size #1 << i16(header, 30) - self.minisectorsize = self.mini_sector_size #1 << i16(header, 32) - self.minisectorcutoff = self.mini_stream_cutoff_size # i32(header, 56) - self._check_duplicate_stream(self.first_dir_sector) - if self.num_mini_fat_sectors: - self._check_duplicate_stream(self.first_mini_fat_sector) - if self.num_difat_sectors: - self._check_duplicate_stream(self.first_difat_sector) - - # Load file allocation tables - self.loadfat(header) - self.loaddirectory(self.first_dir_sector) - self.minifatsect = self.first_mini_fat_sector - - def close(self): - self.fp.close() - - def _check_duplicate_stream(self, first_sect, minifat=False): - if minifat: - used_streams = self._used_streams_minifat - else: - if first_sect in (DIFSECT, FATSECT, ENDOFCHAIN, FREESECT): - return - used_streams = self._used_streams_fat - if first_sect in used_streams: - pass - else: - used_streams.append(first_sect) - - def sector_array(self, sect): - ary = array.array('I', sect) - if sys.byteorder == 'big': - ary.byteswap() - return ary - - def loadfat_sect(self, sect): - if isinstance(sect, array.array): - fat1 = sect - else: - fat1 = self.sector_array(sect) - isect = None - for isect in fat1: - isect = isect & FREESECT - if isect == ENDOFCHAIN or isect == FREESECT: - break - sector = self.getsect(isect) - nextfat = self.sector_array(sector) - self.fat = self.fat + nextfat - return isect - - def loadfat(self, header): - sect = header[76:512] - self.fat = array.array('I') - self.loadfat_sect(sect) - if self.num_difat_sectors != 0: - nb_difat_sectors = (self.sectorsize // 4) - 1 - nb_difat = (self.num_fat_sectors - 109 + nb_difat_sectors - 1) // nb_difat_sectors - isect_difat = self.first_difat_sector - for i in range(nb_difat): - sector_difat = self.getsect(isect_difat) - difat = self.sector_array(sector_difat) - self.loadfat_sect(difat[:nb_difat_sectors]) - isect_difat = difat[nb_difat_sectors] - if len(self.fat) > self.nb_sect: - self.fat = self.fat[:self.nb_sect] - - def loadminifat(self): - stream_size = self.num_mini_fat_sectors * self.sector_size - nb_minisectors = (self.root.size + self.mini_sector_size - 1) // self.mini_sector_size - used_size = nb_minisectors * 4 - sect = self._open(self.minifatsect, stream_size, force_FAT=True).read() - self.minifat = self.sector_array(sect) - self.minifat = self.minifat[:nb_minisectors] - - def getsect(self, sect): - try: - self.fp.seek(self.sectorsize * (sect + 1)) - except: - print('IndexError: Sector index out of range') - sector = self.fp.read(self.sectorsize) - return sector - - def loaddirectory(self, sect): - self.directory_fp = self._open(sect, force_FAT=True) - max_entries = self.directory_fp.size // 128 - self.direntries = [None] * max_entries - root_entry = self._load_direntry(0) - self.root = self.direntries[0] - self.root.build_storage_tree() - - def _load_direntry (self, sid): - if self.direntries[sid] is not None: - return self.direntries[sid] - self.directory_fp.seek(sid * 128) - entry = self.directory_fp.read(128) - self.direntries[sid] = MaxFileDirEntry(entry, sid, self) - return self.direntries[sid] - - def _open(self, start, size = UNKNOWN_SIZE, force_FAT=False): - if size < self.minisectorcutoff and not force_FAT: - if not self.ministream: - self.loadminifat() - size_ministream = self.root.size - self.ministream = self._open(self.root.isectStart, - size_ministream, force_FAT=True) - return MaxStream(fp=self.ministream, sect=start, size=size, - offset=0, sectorsize=self.minisectorsize, - fat=self.minifat, filesize=self.ministream.size) - else: - return MaxStream(fp=self.fp, sect=start, size=size, - offset=self.sectorsize, sectorsize=self.sectorsize, - fat=self.fat, filesize=self._filesize) - - def _find(self, filename): - if isinstance(filename, str): - filename = filename.split('/') - node = self.root - for name in filename: - for kid in node.kids: - if kid.name.lower() == name.lower(): - break - node = kid - return node.sid - - def openstream(self, filename): - sid = self._find(filename) - entry = self.direntries[sid] - return self._open(entry.isectStart, entry.size) - - -################### -# DATA PROCESSING # -################### - -class MaxChunk(): - """Representing a chunk of a .max file.""" - def __init__(self, types, size, level, number): - self.number = number - self.types = types - self.level = level - self.parent = None - self.previous = None - self.next = None - self.size = size - self.unknown = True - self.format = None - self.data = None - - def __str__(self): - if (self.unknown == True): - return "%s[%4x] %04X: %s" %(""*self.level, self.number, self.types, ":".join("%02x"%(c) for c in self.data)) - return "%s[%4x] %04X: %s=%s" %(""*self.level, self.number, self.types, self.format, self.data) - - -class ByteArrayChunk(MaxChunk): - """A byte array of a .max chunk.""" - def __init__(self, types, data, level, number): - MaxChunk.__init__(self, types, data, level, number) - - def set(self, data, name, fmt, start, end): - try: - self.data = struct.unpack(fmt, data[start:end]) - self.format = name - self.unknown = False - except Exception as exc: - self.data = data - # print('StructError:', exc, name) - - def set_string(self, data): - try: - self.data = data.decode('UTF-16LE') - self.format = "Str16" - self.unknown = False - except: - self.data = data - - def set_data(self, data): - if (self.types in [0x0340, 0x4001, 0x0456, 0x0962]): - self.set_string(data) - elif (self.types in [0x2034, 0x2035]): - self.set(data, "ints", '<'+'I'*int(len(data) / 4), 0, len(data)) - elif (self.types in [0x2501, 0x2503, 0x2504, 0x2505, 0x2511]): - self.set(data, "floats", '<'+'f'*int(len(data) / 4), 0, len(data)) - elif (self.types == 0x2510): - self.set(data, "struct", '<'+'f'*int(len(data) / 4 - 1) + 'I', 0, len(data)) - elif (self.types == 0x0100): - self.set(data, "float", ' 3): - return get_rotation(refs[0]) - elif (uid == MATRIX_ROT): # Rotation Wire - return get_rotation(get_references(pos)[0]) - if (rotation): - mtx = mathutils.Matrix.Rotation(rotation.angle, 4, rotation.axis) - return mtx - - -def get_scale(pos): - mtx = mathutils.Matrix.Identity(4) - if (pos): - uid = get_guid(pos) - if (uid == 0x2010): # Bezier Scale - scale = pos.get_first(0x2501) - if (scale is None): - scale = pos.get_first(0x2505) - pos = scale.data - elif (uid == 0x442315): # TCB Zoom - scale = pos.get_first(0x2501) - if (scale is None): - scale = pos.get_first(0x2505) - pos = scale.data - elif (uid == MATRIX_SCL): # ScaleXYZ - pos = get_point_3d(pos, 1.0) - else: - return mtx - mtx = mathutils.Matrix.Diagonal(pos[:3]).to_4x4() - return mtx - - -def create_matrix(prc): - mtx = mathutils.Matrix.Identity(4) - pos = rot = scl = None - uid = get_guid(prc) - if (uid == 0x2005): # Position/Rotation/Scale - pos = get_position(get_references(prc)[0]) - rot = get_rotation(get_references(prc)[1]) - scl = get_scale(get_references(prc)[2]) - elif (uid == 0x9154): # BipSlave Control - biped_sub_anim = get_references(prc)[2] - refs = get_references(biped_sub_anim) - scl = get_scale(get_references(refs[1])[0]) - rot = get_rotation(get_references(refs[2])[0]) - pos = get_position(get_references(refs[3])[0]) - if (pos is not None): - mtx = pos @ mtx - if (rot is not None): - mtx = rot @ mtx - if (scl is not None): - mtx = scl @ mtx - return mtx - - -def get_matrix_mesh_material(node): - refs = get_reference(node) - if (refs): - prs = refs.get(0, None) - msh = refs.get(1, None) - mat = refs.get(3, None) - lyr = refs.get(6, None) - else: - refs = get_references(node) - prs = refs[0] - msh = refs[1] - mat = refs[3] - lyr = None - if (len(refs) > 6): - lyr = refs[6] - return prs, msh, mat, lyr - - -def get_property(properties, idx): - for child in properties.children: - if (child.types & 0x100E): - if (get_short(child.data, 0)[0] == idx): - return child - return None - - -def get_color(colors, idx): - prop = get_property(colors, idx) - if (prop is not None): - siz = len(prop.data) - 12 - col, offset = get_floats(prop.data, siz, 3) - return (col[0], col[1], col[2]) - return None - - -def get_value(colors, idx): - prop = get_property(colors, idx) - if (prop is not None): - siz = len(prop.data) - 4 - val, offset = get_float(prop.data, siz) - return val - return None - - -def get_parameter(colors, fmt): - if (fmt == 0x1): - siz = len(colors.data) - 12 - para, offset = get_floats(colors.data, siz, 3) - else: - siz = len(colors.data) - 4 - para, offset = get_float(colors.data, siz) - return para - - -def get_standard_material(refs): - material = None - try: - if (len(refs) > 2): - colors = refs[2] - parameters = get_references(colors)[0] - material = Material() - material.set('ambient', get_color(parameters, 0x00)) - material.set('diffuse', get_color(parameters, 0x01)) - material.set('specular', get_color(parameters, 0x02)) - material.set('emissive', get_color(parameters, 0x08)) - material.set('shinines', get_value(parameters, 0x0B)) - parablock = refs[4] # ParameterBlock2 - material.set('glossines', get_value(parablock, 0x02)) - material.set('metallic', get_value(parablock, 0x05)) - except: - pass - return material - - -def get_vray_material(vry): - material = Material() - try: - material.set('diffuse', get_color(vry, 0x01)) - material.set('specular', get_color(vry, 0x02)) - material.set('shinines', get_value(vry, 0x03)) - material.set('refraction', get_value(vry, 0x09)) - material.set('emissive', get_color(vry, 0x17)) - material.set('glossines', get_value(vry, 0x18)) - material.set('metallic', get_value(vry, 0x19)) - except: - pass - return material - - -def get_corona_material(mtl): - material = Material() - try: - cor = mtl.children - material.set('diffuse', get_parameter(cor[3], 0x1)) - material.set('specular', get_parameter(cor[4], 0x1)) - material.set('emissive', get_parameter(cor[8], 0x1)) - material.set('glossines', get_parameter(cor[9], 0x2)) - except: - pass - return material - - -def get_arch_material(ad): - material = Material() - try: - material.set('diffuse', get_color(ad, 0x1A)) - material.set('specular', get_color(ad, 0x05)) - material.set('shinines', get_value(ad, 0x0B)) - except: - pass - return material - - -def adjust_material(obj, mat): - material = None - if (mat is not None): - uid = get_guid(mat) - if (uid == 0x0002): # Standard - refs = get_references(mat) - material = get_standard_material(refs) - elif (uid == 0x0200): # Multi/Sub-Object - refs = get_references(mat) - material = adjust_material(obj, refs[-1]) - elif (uid == VRAY_MTL): # VRayMtl - refs = get_reference(mat) - material = get_vray_material(refs[1]) - elif (uid == CORO_MTL): # CoronaMtl - refs = get_references(mat) - material = get_corona_material(refs[0]) - elif (uid == ARCH_MTL): # Arch - refs = get_references(mat) - material = get_arch_material(refs[0]) - if (obj is not None) and (material is not None): - objMaterial = bpy.data.materials.new(get_class_name(mat)) - obj.data.materials.append(objMaterial) - matShader = PrincipledBSDFWrapper(objMaterial, is_readonly=False, use_nodes=True) - matShader.base_color = objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) - matShader.specular_tint = objMaterial.specular_color[:3] = material.get('specular', (1.0,1.0,1.0)) - matShader.specular = objMaterial.specular_intensity = material.get('glossines', 0.5) - matShader.roughness = objMaterial.roughness = 1.0 - material.get('shinines', 0.6) - matShader.metallic = objMaterial.metallic = material.get('metallic', 0) - matShader.emission_color = material.get('emissive', (0,0,0)) - matShader.ior = material.get('refraction', 1.45) - - -def adjust_matrix(obj, node): - mtx = create_matrix(node).flatten() - plc = mathutils.Matrix(*mtx) - obj.matrix_world = plc - return plc - - -def create_shape(context, pts, indices, node, key, mtx, mat, umt): - name = node.get_first(TYP_NAME).data - shape = bpy.data.meshes.new(name) - if (key is not None): - name = "%s_%d" %(name, key) - data = [] - if (pts): - loopstart = [] - looplines = loop = 0 - nb_faces = len(indices) - for fid in range(nb_faces): - polyface = indices[fid] - looplines += len(polyface) - shape.vertices.add(len(pts) // 3) - shape.loops.add(looplines) - shape.polygons.add(nb_faces) - shape.vertices.foreach_set("co", pts) - for vtx in indices: - loopstart.append(loop) - data.extend(vtx) - loop += len(vtx) - shape.polygons.foreach_set("loop_start", loopstart) - shape.loops.foreach_set("vertex_index", data) - - if (len(data) > 0): - shape.validate() - shape.update() - obj = bpy.data.objects.new(name, shape) - context.view_layer.active_layer_collection.collection.objects.link(obj) - obj.matrix_world = mtx - if (umt): - adjust_material(obj, mat) - return True - return True - - -def calc_point(data): - points = [] - long, offset = get_long(data, 0) - while (offset < len(data)): - val, offset = get_long(data, offset) - flt, offset = get_floats(data, offset, 3) - points.extend(flt) - return points - - -def calc_point_float(data): - points = [] - long, offset = get_long(data, 0) - while (offset < len(data)): - flt, offset = get_floats(data, offset, 3) - points.extend(flt) - return points - - -def calc_point_3d(chunk): - data = chunk.data - count, offset = get_long(data, 0) - pointlist = [] - try: - while (offset < len(data)): - pt = Point3d() - long, offset = get_long(data, offset) - pt.points, offset = get_longs(data, offset, long) - pt.flags, offset = get_short(data, offset) - if ((pt.flags & 0x01) != 0): - pt.f1, offset = get_long(data, offset) - if ((pt.flags & 0x08) != 0): - pt.fH, offset = get_short(data, offset) - if ((pt.flags & 0x10) != 0): - pt.f2, offset = get_long(data, offset) - if ((pt.flags & 0x20) != 0): - pt.fA, offset = get_longs(data, offset, 2 * (long - 3)) - if (len(pt.points) > 0): - pointlist.append(pt) - except Exception as exc: - print('ArrayError:\n', "%s: offset = %d\n" %(exc, offset)) - return pointlist - - -def get_point_array(values): - verts = [] - if len(values) >= 4: - count, offset = get_long(values, 0) - while (count > 0): - floats, offset = get_floats(values, offset, 3) - verts.extend(floats) - count -= 1 - return verts - - -def get_poly_4p(points): - vertex = {} - for point in points: - ngon = point.points - key = point.fH - if (key not in vertex): - vertex[key] = [] - vertex[key].append(ngon) - return vertex - - -def get_poly_5p(data): - count, offset = get_long(data, 0) - ngons = [] - while count > 0: - pt, offset = get_longs(data, offset, 3) - offset += 8 - ngons.append(pt) - count -= 1 - return ngons - - -def get_poly_6p(data): - count, offset = get_long(data, 0) - polylist = [] - while (offset < len(data)): - long, offset = get_longs(data, offset, 6) - i = 5 - while ((i > 3) and (long[i] < 0)): - i -= 1 - if (i > 2): - polylist.append(long[1:i]) - return polylist - - -def get_poly_data(chunk): - offset = 0 - polylist = [] - data = chunk.data - while (offset < len(data)): - count, offset = get_long(data, offset) - points, offset = get_longs(data, offset, count) - polylist.append(points) - return polylist - - -def create_editable_poly(context, node, msh, mat, mtx, umt, uvm): - coords = point4i = point6i = pointNi = None - poly = msh.get_first(0x08FE) - created = False - lidx = [] - lcrd = [] - lply = [] - if (poly): - for child in poly.children: - if (child.types == 0x0100): - coords = calc_point(child.data) - elif (child.types == 0x0108): - point6i = child.data - elif (child.types == 0x011A): - point4i = calc_point_3d(child) - elif (child.types == 0x0310): - pointNi = child.data - elif (child.types == 0x0124): - lidx.append(get_long(child.data, 0)[0]) - elif (child.types == 0x0128): - lcrd.append(calc_point_float(child.data)) - elif (child.types == 0x012B): - lply.append(get_poly_data(child)) - if (point4i is not None): - vertex = get_poly_4p(point4i) - if (len(vertex) > 0): - for key, ngons in vertex.items(): - created |= create_shape(context, coords, ngons, node, key, mtx, mat, umt) - else: - created = True - elif (point6i is not None): - ngons = get_poly_6p(point6i) - created = create_shape(context, coords, ngons, node, None, mtx, mat, umt) - elif (pointNi is not None): - ngons = get_poly_5p(pointNi) - created = create_shape(context, coords, ngons, node, None, mtx, mat, umt) - if (uvm and len(lidx) > 0): - for i in range(len(lidx)): - created |= create_shape(context, lcrd[i], lply[i], node, lidx[i], mtx, mat, umt) - return created - - -def create_editable_mesh(context, node, msh, mat, mtx, umt): - poly = msh.get_first(0x08FE) - created = False - if (poly): - vertex_chunk = poly.get_first(0x0914) - clsid_chunk = poly.get_first(0x0912) - coords = get_point_array(vertex_chunk.data) - ngons = get_poly_5p(clsid_chunk.data) - created = create_shape(context, coords, ngons, node, None, mtx, mat, umt) - return created - - -def create_shell(context, node, shell, mat, mtx, umt, uvm): - refs = get_references(shell) - msh = refs[-1] - if (get_class_name(msh) == "'Editable Poly'"): - created = create_editable_poly(context, node, msh, mat, mtx, umt, uvm) - else: - created = create_editable_mesh(context, node, msh, mat, mtx, umt) - return created - - -def create_skipable(context, node, skip): - name = node.get_first(TYP_NAME).data - print(" skipping %s '%s'... " %(skip, name)) - return True - - -def create_mesh(context, node, msh, mtx, mat, umt, uvm): - created = False - uid = get_guid(msh) - msh.geometry = None - if (uid == EDIT_MESH): - created = create_editable_mesh(context, node, msh, mat, mtx, umt) - elif (uid == EDIT_POLY): - created = create_editable_poly(context, node, msh, mat, mtx, umt, uvm) - elif (uid in {0x2032, 0x2033}): - created = create_shell(context, node, msh, mat, mtx, umt, uvm) - else: - skip = SKIPPABLE.get(uid) - if (skip is not None): - created = create_skipable(context, node, skip) - return created, uid - - -def create_object(context, node, mscale, usemat, uvmesh, transform): - parent = get_node_parent(node) - node.parent = parent - name = get_node_name(node) - prs, msh, mat, lyr = get_matrix_mesh_material(node) - while ((parent is not None) and (get_guid(parent) != 0x02)): - name = "%s/%s" %(get_node_name(parent), name) - parent_mtx = parent.matrix - if (parent_mtx): - prs = prs.dot(parent_mtx) - parent = get_node_parent(parent) - if (transform): - mtx = create_matrix(prs) @ mscale - else: - mtx = mscale - created, uid = create_mesh(context, node, msh, mtx, mat, usemat, uvmesh) - - -def make_scene(context, mscale, usemat, uvmesh, transform, parent): - for chunk in parent.children: - if (isinstance(chunk, SceneChunk)): - if ((get_guid(chunk) == 0x01) and (get_super_id(chunk) == 0x01)): - try: - create_object(context, chunk, mscale, usemat, uvmesh, transform) - except Exception as exc: - print('ImportError:', exc, chunk) - - -def read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform): - global SCENE_LIST - SCENE_LIST = read_chunks(maxfile, 'Scene', filename+'.Scn.bin', containerReader=SceneChunk) - make_scene(context, mscale, usemat, uvmesh, transform, SCENE_LIST[0]) - - -def read(context, filename, mscale, usemat, uvmesh, transform): - if (is_maxfile(filename)): - maxfile = ImportMaxFile(filename) - read_class_data(maxfile, filename) - read_config(maxfile, filename) - read_directory(maxfile, filename) - read_class_directory(maxfile, filename) - read_video_postqueue(maxfile, filename) - read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform) - else: - print("File seems to be no 3D Studio Max file!") - - -def load(operator, context, filepath="", scale_objects=1.0, use_material=True, - use_uv_mesh=False, use_apply_matrix=False, global_matrix=None): - context.window.cursor_set('WAIT') - mscale = mathutils.Matrix.Scale(scale_objects, 4) - if global_matrix is not None: - mscale = global_matrix @ mscale - - read(context, filepath, mscale, usemat=use_material, uvmesh=use_uv_mesh, transform=use_apply_matrix) - context.window.cursor_set('DEFAULT') - - return {'FINISHED'} \ No newline at end of file -- 2.30.2 From 21397d7a3f765af30c203fdbab6f7633feaa622a Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Thu, 7 Dec 2023 16:57:08 +0100 Subject: [PATCH 78/78] Uploaded: io_scene_max Uploaded package --- io_scene_max/__init__.py | 156 ++++ io_scene_max/import_max.py | 1422 ++++++++++++++++++++++++++++++++++++ 2 files changed, 1578 insertions(+) create mode 100644 io_scene_max/__init__.py create mode 100644 io_scene_max/import_max.py diff --git a/io_scene_max/__init__.py b/io_scene_max/__init__.py new file mode 100644 index 000000000..51af2e1ea --- /dev/null +++ b/io_scene_max/__init__.py @@ -0,0 +1,156 @@ +# SPDX-FileCopyrightText: 2023 Sebastian Schrand +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import bpy +from bpy_extras.io_utils import ( + ImportHelper, + orientation_helper, + axis_conversion, + ) +from bpy.props import ( + BoolProperty, + FloatProperty, + StringProperty, + ) + +bl_info = { + "name": "Import Autodesk MAX (.max)", + "author": "Sebastian Sille, Philippe Lagadec, Jens M. Plonka", + "version": (1, 1, 2), + "blender": (3, 6, 0), + "location": "File > Import", + "description": "Import 3DSMAX meshes & materials", + "warning": "", + "filepath_url": "", + "category": "Import-Export"} + +if "bpy" in locals(): + import importlib + if "import_max" in locals(): + importlib.reload(import_max) + + +@orientation_helper(axis_forward='Y', axis_up='Z') +class Import_max(bpy.types.Operator, ImportHelper): + """Import Autodesk MAX""" + bl_idname = "import_autodesk.max" + bl_label = "Import MAX (.max)" + bl_options = {'PRESET', 'UNDO'} + + filename_ext = ".max" + filter_glob: StringProperty(default="*.max", options={'HIDDEN'},) + + scale_objects: FloatProperty( + name="Scale", + description="Scale factor for all objects", + min=0.0, max=10000.0, + soft_min=0.0, soft_max=10000.0, + default=1.0, + ) + use_material: BoolProperty( + name="Materials", + description="Import the materials of the objects", + default=True, + ) + use_uv_mesh: BoolProperty( + name="UV Mesh", + description="Import texture coordinates as mesh objects", + default=False, + ) + use_apply_matrix: BoolProperty( + name="Apply Matrix", + description="Use matrix to transform the objects", + default=False, + ) + + def execute(self, context): + from . import import_max + keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob")) + global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up,).to_4x4() + keywords["global_matrix"] = global_matrix + + return import_max.load(self, context, **keywords) + + def draw(self, context): + pass + + +class MAX_PT_import_include(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Include" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_AUTODESK_OT_max" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + sfile = context.space_data + operator = sfile.active_operator + + layrow = layout.row(align=True) + layrow.prop(operator, "use_material") + layrow.label(text="", icon='MATERIAL' if operator.use_material else 'SHADING_TEXTURE') + layrow = layout.row(align=True) + layrow.prop(operator, "use_uv_mesh") + layrow.label(text="", icon='UV' if operator.use_uv_mesh else 'GROUP_UVS') + + +class MAX_PT_import_transform(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Transform" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_AUTODESK_OT_max" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "scale_objects") + layrow = layout.row(align=True) + layrow.prop(operator, "use_apply_matrix") + layrow.label(text="", icon='VIEW_ORTHO' if operator.use_apply_matrix else 'MESH_GRID') + layout.prop(operator, "axis_forward") + layout.prop(operator, "axis_up") + + +def menu_func(self, context): + self.layout.operator(Import_max.bl_idname, text="Autodesk MAX (.max)") + + +def register(): + bpy.utils.register_class(Import_max) + bpy.utils.register_class(MAX_PT_import_include) + bpy.utils.register_class(MAX_PT_import_transform) + bpy.types.TOPBAR_MT_file_import.append(menu_func) + + +def unregister(): + bpy.types.TOPBAR_MT_file_import.remove(menu_func) + bpy.utils.unregister_class(MAX_PT_import_transform) + bpy.utils.unregister_class(MAX_PT_import_include) + bpy.utils.unregister_class(Import_max) + + +if __name__ == "__main__": + register() diff --git a/io_scene_max/import_max.py b/io_scene_max/import_max.py new file mode 100644 index 000000000..03702286c --- /dev/null +++ b/io_scene_max/import_max.py @@ -0,0 +1,1422 @@ +# SPDX-FileCopyrightText: 2023 Sebastian Schrand +# +# SPDX-License-Identifier: GPL-2.0-or-later +# Import is based on using information from olefile IO sourcecode +# and the FreeCAD Autodesk 3DS Max importer ImportMAX +# +# olefile (formerly OleFileIO_PL) is copyright (c) 2005-2018 Philippe Lagadec +# (https://www.decalage.info) +# +# ImportMAX is copyright (c) 2017-2022 Jens M. Plonka +# (https://www.github.com/jmplonka/Importer3D) + +import io +import os +import re +import sys +import bpy +import math +import zlib +import array +import struct +import mathutils +from bpy_extras.node_shader_utils import PrincipledBSDFWrapper + + +################### +# DATA STRUCTURES # +################### + +MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' +WORD_CLSID = "00020900-0000-0000-C000-000000000046" + +MIN_FILE_SIZE = 1536 +UNKNOWN_SIZE = 0x7FFFFFFF +MAXFILE_SIZE = 0x7FFFFFFFFFFFFFFF +MAXREGSECT = 0xFFFFFFFA # (-6) maximum SECT +DIFSECT = 0xFFFFFFFC # (-4) denotes a DIFAT sector in a FAT +FATSECT = 0xFFFFFFFD # (-3) denotes a FAT sector in a FAT +ENDOFCHAIN = 0xFFFFFFFE # (-2) end of a virtual stream chain +FREESECT = 0xFFFFFFFF # (-1) unallocated sector +MAX_STREAM = 2 # element is a stream object +ROOT_STORE = 5 # element is a root storage + +TYP_NAME = 0x0962 +INVALID_NAME = re.compile('^[0-9].*') +UNPACK_BOX_DATA = struct.Struct('= MIN_FILE_SIZE: + header = filename[:len(MAGIC)] + else: + with open(filename, 'rb') as fp: + header = fp.read(len(MAGIC)) + if header == MAGIC: + return True + else: + return False + + +class MaxStream(io.BytesIO): + """Returns an instance of the BytesIO class as read-only file object.""" + def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize): + if size == UNKNOWN_SIZE: + size = len(fat) * sectorsize + nb_sectors = (size + (sectorsize-1)) // sectorsize + + data = [] + for i in range(nb_sectors): + try: + fp.seek(offset + sectorsize * sect) + except: + break + sector_data = fp.read(sectorsize) + data.append(sector_data) + try: + sect = fat[sect] & FREESECT + except IndexError: + break + data = b"".join(data) + if len(data) >= size: + data = data[:size] + self.size = size + else: + self.size = len(data) + io.BytesIO.__init__(self, data) + + +class MaxFileDirEntry: + """Directory Entry for a stream or storage.""" + STRUCT_DIRENTRY = '<64sHBBIII16sIQQIII' + DIRENTRY_SIZE = 128 + assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE + + def __init__(self, entry, sid, maxfile): + self.sid = sid + self.maxfile = maxfile + self.kids = [] + self.kids_dict = {} + self.used = False + ( + self.name_raw, + self.namelength, + self.entry_type, + self.color, + self.sid_left, + self.sid_right, + self.sid_child, + clsid, + self.dwUserFlags, + self.createTime, + self.modifyTime, + self.isectStart, + self.sizeLow, + self.sizeHigh + ) = struct.unpack(MaxFileDirEntry.STRUCT_DIRENTRY, entry) + + if self.namelength > 64: + self.namelength = 64 + self.name_utf16 = self.name_raw[:(self.namelength - 2)] + self.name = maxfile._decode_utf16_str(self.name_utf16) + # print('DirEntry SID=%d: %s' % (self.sid, repr(self.name))) + if maxfile.sectorsize == 512: + self.size = self.sizeLow + else: + self.size = self.sizeLow + (int(self.sizeHigh) << 32) + self.clsid = _clsid(clsid) + self.is_minifat = False + if self.entry_type in (ROOT_STORE, MAX_STREAM) and self.size > 0: + if self.size < maxfile.minisectorcutoff \ + and self.entry_type == MAX_STREAM: # only streams can be in MiniFAT + self.is_minifat = True + else: + self.is_minifat = False + maxfile._check_duplicate_stream(self.isectStart, self.is_minifat) + self.sect_chain = None + + def build_sect_chain(self, maxfile): + if self.sect_chain: + return + if self.entry_type not in (ROOT_STORE, MAX_STREAM) or self.size == 0: + return + self.sect_chain = list() + if self.is_minifat and not maxfile.minifat: + maxfile.loadminifat() + next_sect = self.isectStart + while next_sect != ENDOFCHAIN: + self.sect_chain.append(next_sect) + if self.is_minifat: + next_sect = maxfile.minifat[next_sect] + else: + next_sect = maxfile.fat[next_sect] + + def build_storage_tree(self): + if self.sid_child != FREESECT: + self.append_kids(self.sid_child) + self.kids.sort() + + def append_kids(self, child_sid): + if child_sid == FREESECT: + return + else: + child = self.maxfile._load_direntry(child_sid) + if child.used: + return + child.used = True + self.append_kids(child.sid_left) + name_lower = child.name.lower() + self.kids.append(child) + self.kids_dict[name_lower] = child + self.append_kids(child.sid_right) + child.build_storage_tree() + + def __eq__(self, other): + return self.name == other.name + + def __lt__(self, other): + return self.name < other.name + + def __ne__(self, other): + return not self.__eq__(other) + + def __le__(self, other): + return self.__eq__(other) or self.__lt__(other) + + +class ImportMaxFile: + """Representing an interface for importing .max files.""" + def __init__(self, filename=None): + self._filesize = None + self.byte_order = None + self.directory_fp = None + self.direntries = None + self.dll_version = None + self.fat = None + self.first_difat_sector = None + self.first_dir_sector = None + self.first_mini_fat_sector = None + self.fp = None + self.header_clsid = None + self.header_signature = None + self.mini_sector_shift = None + self.mini_sector_size = None + self.mini_stream_cutoff_size = None + self.minifat = None + self.minifatsect = None + self.minisectorcutoff = None + self.minisectorsize = None + self.ministream = None + self.minor_version = None + self.nb_sect = None + self.num_difat_sectors = None + self.num_dir_sectors = None + self.num_fat_sectors = None + self.num_mini_fat_sectors = None + self.reserved1 = None + self.reserved2 = None + self.root = None + self.sector_shift = None + self.sector_size = None + self.transaction_signature_number = None + if filename: + self.open(filename) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def _decode_utf16_str(self, utf16_str, errors='replace'): + unicode_str = utf16_str.decode('UTF-16LE', errors) + return unicode_str + + def open(self, filename): + if hasattr(filename, 'read'): + self.fp = filename + elif isinstance(filename, bytes) and len(filename) >= MIN_FILE_SIZE: + self.fp = io.BytesIO(filename) + else: + self.fp = open(filename, 'rb') + filesize = 0 + self.fp.seek(0, os.SEEK_END) + try: + filesize = self.fp.tell() + finally: + self.fp.seek(0) + self._filesize = filesize + self._used_streams_fat = [] + self._used_streams_minifat = [] + header = self.fp.read(512) + fmt_header = '<8s16sHHHHHHLLLLLLLLLL' + header_size = struct.calcsize(fmt_header) + header1 = header[:header_size] + ( + self.header_signature, + self.header_clsid, + self.minor_version, + self.dll_version, + self.byte_order, + self.sector_shift, + self.mini_sector_shift, + self.reserved1, + self.reserved2, + self.num_dir_sectors, + self.num_fat_sectors, + self.first_dir_sector, + self.transaction_signature_number, + self.mini_stream_cutoff_size, + self.first_mini_fat_sector, + self.num_mini_fat_sectors, + self.first_difat_sector, + self.num_difat_sectors + ) = struct.unpack(fmt_header, header1) + + self.sector_size = 2**self.sector_shift + self.mini_sector_size = 2**self.mini_sector_shift + if self.mini_stream_cutoff_size != 0x1000: + self.mini_stream_cutoff_size = 0x1000 + self.nb_sect = ((filesize + self.sector_size-1) // self.sector_size) - 1 + + # file clsid + self.header_clsid = _clsid(header[8:24]) + self.sectorsize = self.sector_size # i16(header, 30) + self.minisectorsize = self.mini_sector_size # i16(header, 32) + self.minisectorcutoff = self.mini_stream_cutoff_size # i32(header, 56) + self._check_duplicate_stream(self.first_dir_sector) + if self.num_mini_fat_sectors: + self._check_duplicate_stream(self.first_mini_fat_sector) + if self.num_difat_sectors: + self._check_duplicate_stream(self.first_difat_sector) + + # Load file allocation tables + self.loadfat(header) + self.loaddirectory(self.first_dir_sector) + self.minifatsect = self.first_mini_fat_sector + + def close(self): + self.fp.close() + + def _check_duplicate_stream(self, first_sect, minifat=False): + if minifat: + used_streams = self._used_streams_minifat + else: + if first_sect in (DIFSECT, FATSECT, ENDOFCHAIN, FREESECT): + return + used_streams = self._used_streams_fat + if first_sect in used_streams: + pass + else: + used_streams.append(first_sect) + + def sector_array(self, sect): + ary = array.array('I', sect) + if sys.byteorder == 'big': + ary.byteswap() + return ary + + def loadfat_sect(self, sect): + if isinstance(sect, array.array): + fat1 = sect + else: + fat1 = self.sector_array(sect) + isect = None + for isect in fat1: + isect = isect & FREESECT + if isect == ENDOFCHAIN or isect == FREESECT: + break + sector = self.getsect(isect) + nextfat = self.sector_array(sector) + self.fat = self.fat + nextfat + return isect + + def loadfat(self, header): + sect = header[76:512] + self.fat = array.array('I') + self.loadfat_sect(sect) + if self.num_difat_sectors != 0: + nb_difat_sectors = (self.sectorsize // 4) - 1 + nb_difat = (self.num_fat_sectors - 109 + nb_difat_sectors - 1) // nb_difat_sectors + isect_difat = self.first_difat_sector + for i in range(nb_difat): + sector_difat = self.getsect(isect_difat) + difat = self.sector_array(sector_difat) + self.loadfat_sect(difat[:nb_difat_sectors]) + isect_difat = difat[nb_difat_sectors] + if len(self.fat) > self.nb_sect: + self.fat = self.fat[:self.nb_sect] + + def loadminifat(self): + stream_size = self.num_mini_fat_sectors * self.sector_size + nb_minisectors = (self.root.size + self.mini_sector_size - 1) // self.mini_sector_size + used_size = nb_minisectors * 4 + sect = self._open(self.minifatsect, stream_size, force_FAT=True).read() + self.minifat = self.sector_array(sect) + self.minifat = self.minifat[:nb_minisectors] + + def getsect(self, sect): + try: + self.fp.seek(self.sectorsize * (sect + 1)) + except: + print('IndexError: Sector index out of range') + sector = self.fp.read(self.sectorsize) + return sector + + def loaddirectory(self, sect): + self.directory_fp = self._open(sect, force_FAT=True) + max_entries = self.directory_fp.size // 128 + self.direntries = [None] * max_entries + root_entry = self._load_direntry(0) + self.root = self.direntries[0] + self.root.build_storage_tree() + + def _load_direntry(self, sid): + if self.direntries[sid] is not None: + return self.direntries[sid] + self.directory_fp.seek(sid * 128) + entry = self.directory_fp.read(128) + self.direntries[sid] = MaxFileDirEntry(entry, sid, self) + return self.direntries[sid] + + def _open(self, start, size=UNKNOWN_SIZE, force_FAT=False): + if size < self.minisectorcutoff and not force_FAT: + if not self.ministream: + self.loadminifat() + size_ministream = self.root.size + self.ministream = self._open(self.root.isectStart, + size_ministream, force_FAT=True) + return MaxStream(fp=self.ministream, sect=start, size=size, + offset=0, sectorsize=self.minisectorsize, + fat=self.minifat, filesize=self.ministream.size) + else: + return MaxStream(fp=self.fp, sect=start, size=size, + offset=self.sectorsize, sectorsize=self.sectorsize, + fat=self.fat, filesize=self._filesize) + + def _find(self, filename): + if isinstance(filename, str): + filename = filename.split('/') + node = self.root + for name in filename: + for kid in node.kids: + if kid.name.lower() == name.lower(): + break + node = kid + return node.sid + + def openstream(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return self._open(entry.isectStart, entry.size) + + +################### +# DATA PROCESSING # +################### + +class MaxChunk(): + """Representing a chunk of a .max file.""" + def __init__(self, types, size, level, number): + self.number = number + self.types = types + self.level = level + self.parent = None + self.previous = None + self.next = None + self.size = size + self.unknown = True + self.format = None + self.data = None + + def __str__(self): + if (self.unknown): + return "%s[%4x]%04X:%s" % ("" * self.level, self.number, self.types, + ":".join("%02x" % (c) for c in self.data)) + return "%s[%4x]%04X:%s=%s" % ("" * self.level, self.number, self.types, + self.format, self.data) + + +class ByteArrayChunk(MaxChunk): + """A byte array of a .max chunk.""" + def __init__(self, types, data, level, number): + MaxChunk.__init__(self, types, data, level, number) + + def set(self, data, name, fmt, start, end): + try: + self.data = struct.unpack(fmt, data[start:end]) + self.format = name + self.unknown = False + except Exception as exc: + self.data = data + # print('StructError:', exc, name) + + def set_string(self, data): + try: + self.data = data.decode('UTF-16LE') + self.format = "Str16" + self.unknown = False + except: + self.data = data + + def set_data(self, data): + if (self.types in [0x0340, 0x4001, 0x0456, 0x0962]): + self.set_string(data) + elif (self.types in [0x2034, 0x2035]): + self.set(data, "ints", '<' + 'I' * int(len(data) / 4), 0, len(data)) + elif (self.types in [0x2501, 0x2503, 0x2504, 0x2505, 0x2511]): + self.set(data, "floats", '<' + 'f' * int(len(data) / 4), 0, len(data)) + elif (self.types == 0x2510): + self.set(data, "struct", '<' + 'f' * int(len(data) / 4 - 1) + 'I', 0, len(data)) + elif (self.types == 0x0100): + self.set(data, "float", ' 3): + return get_rotation(refs[0]) + elif (uid == MATRIX_ROT): # Rotation Wire + return get_rotation(get_references(pos)[0]) + if (rotation): + mtx = mathutils.Matrix.Rotation(rotation.angle, 4, rotation.axis) + return mtx + + +def get_scale(pos): + mtx = mathutils.Matrix.Identity(4) + if (pos): + uid = get_guid(pos) + if (uid == 0x2010): # Bezier Scale + scale = pos.get_first(0x2501) + if (scale is None): + scale = pos.get_first(0x2505) + pos = scale.data + elif (uid == 0x442315): # TCB Zoom + scale = pos.get_first(0x2501) + if (scale is None): + scale = pos.get_first(0x2505) + pos = scale.data + elif (uid == MATRIX_SCL): # ScaleXYZ + pos = get_point_3d(pos, 1.0) + else: + return mtx + mtx = mathutils.Matrix.Diagonal(pos[:3]).to_4x4() + return mtx + + +def create_matrix(prc): + mtx = mathutils.Matrix.Identity(4) + pos = rot = scl = None + uid = get_guid(prc) + if (uid == 0x2005): # Position/Rotation/Scale + pos = get_position(get_references(prc)[0]) + rot = get_rotation(get_references(prc)[1]) + scl = get_scale(get_references(prc)[2]) + elif (uid == 0x9154): # BipSlave Control + biped_sub_anim = get_references(prc)[2] + refs = get_references(biped_sub_anim) + scl = get_scale(get_references(refs[1])[0]) + rot = get_rotation(get_references(refs[2])[0]) + pos = get_position(get_references(refs[3])[0]) + if (pos is not None): + mtx = pos @ mtx + if (rot is not None): + mtx = rot @ mtx + if (scl is not None): + mtx = scl @ mtx + return mtx + + +def get_matrix_mesh_material(node): + refs = get_reference(node) + if (refs): + prs = refs.get(0, None) + msh = refs.get(1, None) + mat = refs.get(3, None) + lyr = refs.get(6, None) + else: + refs = get_references(node) + prs = refs[0] + msh = refs[1] + mat = refs[3] + lyr = None + if (len(refs) > 6): + lyr = refs[6] + return prs, msh, mat, lyr + + +def get_property(properties, idx): + for child in properties.children: + if (child.types & 0x100E): + if (get_short(child.data, 0)[0] == idx): + return child + return None + + +def get_color(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + siz = len(prop.data) - 12 + col, offset = get_floats(prop.data, siz, 3) + return (col[0], col[1], col[2]) + return None + + +def get_value(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + siz = len(prop.data) - 4 + val, offset = get_float(prop.data, siz) + return val + return None + + +def get_parameter(colors, fmt): + if (fmt == 0x1): + siz = len(colors.data) - 12 + para, offset = get_floats(colors.data, siz, 3) + else: + siz = len(colors.data) - 4 + para, offset = get_float(colors.data, siz) + return para + + +def get_standard_material(refs): + material = None + try: + if (len(refs) > 2): + colors = refs[2] + parameters = get_references(colors)[0] + material = Material() + material.set('ambient', get_color(parameters, 0x00)) + material.set('diffuse', get_color(parameters, 0x01)) + material.set('specular', get_color(parameters, 0x02)) + material.set('emissive', get_color(parameters, 0x08)) + material.set('shinines', get_value(parameters, 0x0B)) + parablock = refs[4] # ParameterBlock2 + material.set('glossines', get_value(parablock, 0x02)) + material.set('metallic', get_value(parablock, 0x05)) + except: + pass + return material + + +def get_vray_material(vry): + material = Material() + try: + material.set('diffuse', get_color(vry, 0x01)) + material.set('specular', get_color(vry, 0x02)) + material.set('shinines', get_value(vry, 0x03)) + material.set('refraction', get_value(vry, 0x09)) + material.set('emissive', get_color(vry, 0x17)) + material.set('glossines', get_value(vry, 0x18)) + material.set('metallic', get_value(vry, 0x19)) + except: + pass + return material + + +def get_corona_material(mtl): + material = Material() + try: + cor = mtl.children + material.set('diffuse', get_parameter(cor[3], 0x1)) + material.set('specular', get_parameter(cor[4], 0x1)) + material.set('emissive', get_parameter(cor[8], 0x1)) + material.set('glossines', get_parameter(cor[9], 0x2)) + except: + pass + return material + + +def get_arch_material(ad): + material = Material() + try: + material.set('diffuse', get_color(ad, 0x1A)) + material.set('specular', get_color(ad, 0x05)) + material.set('shinines', get_value(ad, 0x0B)) + except: + pass + return material + + +def adjust_material(obj, mat): + material = None + if (mat is not None): + uid = get_guid(mat) + if (uid == 0x0002): # Standard + refs = get_references(mat) + material = get_standard_material(refs) + elif (uid == 0x0200): # Multi/Sub-Object + refs = get_references(mat) + material = adjust_material(obj, refs[-1]) + elif (uid == VRAY_MTL): # VRayMtl + refs = get_reference(mat) + material = get_vray_material(refs[1]) + elif (uid == CORO_MTL): # CoronaMtl + refs = get_references(mat) + material = get_corona_material(refs[0]) + elif (uid == ARCH_MTL): # Arch + refs = get_references(mat) + material = get_arch_material(refs[0]) + if (obj is not None) and (material is not None): + objMaterial = bpy.data.materials.new(get_cls_name(mat)) + obj.data.materials.append(objMaterial) + matShader = PrincipledBSDFWrapper(objMaterial, is_readonly=False, use_nodes=True) + matShader.base_color = objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8, 0.8, 0.8)) + matShader.specular_tint = objMaterial.specular_color[:3] = material.get('specular', (1, 1, 1)) + matShader.specular = objMaterial.specular_intensity = material.get('glossines', 0.5) + matShader.roughness = objMaterial.roughness = 1.0 - material.get('shinines', 0.6) + matShader.metallic = objMaterial.metallic = material.get('metallic', 0) + matShader.emission_color = material.get('emissive', (0, 0, 0)) + matShader.ior = material.get('refraction', 1.45) + + +def adjust_matrix(obj, node): + mtx = create_matrix(node).flatten() + plc = mathutils.Matrix(*mtx) + obj.matrix_world = plc + return plc + + +def create_shape(context, pts, indices, node, key, mtx, mat, umt): + name = node.get_first(TYP_NAME).data + shape = bpy.data.meshes.new(name) + if (key is not None): + name = "%s_%d" % (name, key) + data = [] + if (pts): + loopstart = [] + looplines = loop = 0 + nb_faces = len(indices) + for fid in range(nb_faces): + polyface = indices[fid] + looplines += len(polyface) + shape.vertices.add(len(pts) // 3) + shape.loops.add(looplines) + shape.polygons.add(nb_faces) + shape.vertices.foreach_set("co", pts) + for vtx in indices: + loopstart.append(loop) + data.extend(vtx) + loop += len(vtx) + shape.polygons.foreach_set("loop_start", loopstart) + shape.loops.foreach_set("vertex_index", data) + + if (len(data) > 0): + shape.validate() + shape.update() + obj = bpy.data.objects.new(name, shape) + context.view_layer.active_layer_collection.collection.objects.link(obj) + obj.matrix_world = mtx + if (umt): + adjust_material(obj, mat) + return True + return True + + +def calc_point(data): + points = [] + long, offset = get_long(data, 0) + while (offset < len(data)): + val, offset = get_long(data, offset) + flt, offset = get_floats(data, offset, 3) + points.extend(flt) + return points + + +def calc_point_float(data): + points = [] + long, offset = get_long(data, 0) + while (offset < len(data)): + flt, offset = get_floats(data, offset, 3) + points.extend(flt) + return points + + +def calc_point_3d(chunk): + data = chunk.data + count, offset = get_long(data, 0) + pointlist = [] + try: + while (offset < len(data)): + pt = Point3d() + long, offset = get_long(data, offset) + pt.points, offset = get_longs(data, offset, long) + pt.flags, offset = get_short(data, offset) + if ((pt.flags & 0x01) != 0): + pt.f1, offset = get_long(data, offset) + if ((pt.flags & 0x08) != 0): + pt.fH, offset = get_short(data, offset) + if ((pt.flags & 0x10) != 0): + pt.f2, offset = get_long(data, offset) + if ((pt.flags & 0x20) != 0): + pt.fA, offset = get_longs(data, offset, 2 * (long - 3)) + if (len(pt.points) > 0): + pointlist.append(pt) + except Exception as exc: + print('ArrayError:\n', "%s: offset = %d\n" % (exc, offset)) + return pointlist + + +def get_point_array(values): + verts = [] + if len(values) >= 4: + count, offset = get_long(values, 0) + while (count > 0): + floats, offset = get_floats(values, offset, 3) + verts.extend(floats) + count -= 1 + return verts + + +def get_poly_4p(points): + vertex = {} + for point in points: + ngon = point.points + key = point.fH + if (key not in vertex): + vertex[key] = [] + vertex[key].append(ngon) + return vertex + + +def get_poly_5p(data): + count, offset = get_long(data, 0) + ngons = [] + while count > 0: + pt, offset = get_longs(data, offset, 3) + offset += 8 + ngons.append(pt) + count -= 1 + return ngons + + +def get_poly_6p(data): + count, offset = get_long(data, 0) + polylist = [] + while (offset < len(data)): + long, offset = get_longs(data, offset, 6) + i = 5 + while ((i > 3) and (long[i] < 0)): + i -= 1 + if (i > 2): + polylist.append(long[1:i]) + return polylist + + +def get_poly_data(chunk): + offset = 0 + polylist = [] + data = chunk.data + while (offset < len(data)): + count, offset = get_long(data, offset) + points, offset = get_longs(data, offset, count) + polylist.append(points) + return polylist + + +def create_editable_poly(context, node, msh, mat, mtx, umt, uvm): + coords = point4i = point6i = pointNi = None + poly = msh.get_first(0x08FE) + created = False + lidx = [] + lcrd = [] + lply = [] + if (poly): + for child in poly.children: + if (child.types == 0x0100): + coords = calc_point(child.data) + elif (child.types == 0x0108): + point6i = child.data + elif (child.types == 0x011A): + point4i = calc_point_3d(child) + elif (child.types == 0x0310): + pointNi = child.data + elif (child.types == 0x0124): + lidx.append(get_long(child.data, 0)[0]) + elif (child.types == 0x0128): + lcrd.append(calc_point_float(child.data)) + elif (child.types == 0x012B): + lply.append(get_poly_data(child)) + if (point4i is not None): + vertex = get_poly_4p(point4i) + if (len(vertex) > 0): + for key, ngons in vertex.items(): + created |= create_shape(context, coords, ngons, + node, key, mtx, mat, umt) + else: + created = True + elif (point6i is not None): + ngons = get_poly_6p(point6i) + created = create_shape(context, coords, ngons, node, + None, mtx, mat, umt) + elif (pointNi is not None): + ngons = get_poly_5p(pointNi) + created = create_shape(context, coords, ngons, node, + None, mtx, mat, umt) + if (uvm and len(lidx) > 0): + for i in range(len(lidx)): + created |= create_shape(context, lcrd[i], lply[i], + node, lidx[i], mtx, mat, umt) + return created + + +def create_editable_mesh(context, node, msh, mat, mtx, umt): + poly = msh.get_first(0x08FE) + created = False + if (poly): + vertex_chunk = poly.get_first(0x0914) + clsid_chunk = poly.get_first(0x0912) + coords = get_point_array(vertex_chunk.data) + ngons = get_poly_5p(clsid_chunk.data) + created = create_shape(context, coords, ngons, node, None, mtx, mat, umt) + return created + + +def create_shell(context, node, shell, mat, mtx, umt, uvm): + refs = get_references(shell) + msh = refs[-1] + if (get_cls_name(msh) == "'Editable Poly'"): + created = create_editable_poly(context, node, msh, mat, mtx, umt, uvm) + else: + created = create_editable_mesh(context, node, msh, mat, mtx, umt) + return created + + +def create_skipable(context, node, skip): + name = node.get_first(TYP_NAME).data + print(" skipping %s '%s'... " % (skip, name)) + return True + + +def create_mesh(context, node, msh, mtx, mat, umt, uvm): + created = False + uid = get_guid(msh) + msh.geometry = None + if (uid == EDIT_MESH): + created = create_editable_mesh(context, node, msh, mat, mtx, umt) + elif (uid == EDIT_POLY): + created = create_editable_poly(context, node, msh, mat, mtx, umt, uvm) + elif (uid in {0x2032, 0x2033}): + created = create_shell(context, node, msh, mat, mtx, umt, uvm) + else: + skip = SKIPPABLE.get(uid) + if (skip is not None): + created = create_skipable(context, node, skip) + return created, uid + + +def create_object(context, node, mscale, usemat, uvmesh, transform): + parent = get_node_parent(node) + node.parent = parent + prs, msh, mat, lyr = get_matrix_mesh_material(node) + while ((parent is not None) and (get_guid(parent) != 0x02)): + parent_mtx = parent.matrix + if (parent_mtx): + prs = prs.dot(parent_mtx) + parent = get_node_parent(parent) + if (transform): + mtx = create_matrix(prs) @ mscale + else: + mtx = mscale + created, uid = create_mesh(context, node, msh, mtx, mat, usemat, uvmesh) + + +def make_scene(context, mscale, usemat, uvmesh, transform, parent): + for chunk in parent.children: + if (isinstance(chunk, SceneChunk)): + if ((get_guid(chunk) == 0x01) and (get_super_id(chunk) == 0x01)): + try: + create_object(context, chunk, mscale, usemat, uvmesh, transform) + except Exception as exc: + print('ImportError:', exc, chunk) + + +def read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform): + global SCENE_LIST + SCENE_LIST = read_chunks(maxfile, 'Scene', filename+'.Scn.bin', conReader=SceneChunk) + make_scene(context, mscale, usemat, uvmesh, transform, SCENE_LIST[0]) + + +def read(context, filename, mscale, usemat, uvmesh, transform): + if (is_maxfile(filename)): + maxfile = ImportMaxFile(filename) + read_class_data(maxfile, filename) + read_config(maxfile, filename) + read_directory(maxfile, filename) + read_class_directory(maxfile, filename) + read_video_postqueue(maxfile, filename) + read_scene(context, maxfile, filename, mscale, usemat, uvmesh, transform) + else: + print("File seems to be no 3D Studio Max file!") + + +def load(operator, context, filepath="", scale_objects=1.0, use_material=True, + use_uv_mesh=False, use_apply_matrix=False, global_matrix=None): + context.window.cursor_set('WAIT') + mscale = mathutils.Matrix.Scale(scale_objects, 4) + if global_matrix is not None: + mscale = global_matrix @ mscale + + read(context, filepath, mscale, usemat=use_material, uvmesh=use_uv_mesh, transform=use_apply_matrix) + context.window.cursor_set('DEFAULT') + + return {'FINISHED'} -- 2.30.2