From a3760ee0536ab8abdb26c4d4d5adef228006b048 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sun, 26 Mar 2023 18:31:09 +0200 Subject: [PATCH 01/14] io_scene_3ds: Updated version changed version Signed-off-by: Sebastian Sille --- io_scene_3ds/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 3912ac1..9430ca4 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -32,7 +32,7 @@ import bpy bl_info = { "name": "Autodesk 3DS format", "author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand", - "version": (2, 2, 0), + "version": (2, 3, 1), "blender": (3, 0, 0), "location": "File > Import", "description": "Import 3DS, meshes, uvs, materials, textures, " -- 2.30.2 From 3a11dae2437a3dfb7866d578e17548b2ca799763 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sun, 26 Mar 2023 19:05:22 +0200 Subject: [PATCH 02/14] io_scene_3ds: Added face and point flags added point array chunk exporting sharp edge flags some cleanup Signed-off-by: Sebastian Sille --- io_scene_3ds/export_3ds.py | 57 ++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 17 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 71c55fc..e7dda76 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -108,6 +108,7 @@ OBJECT_CAM_RANGES = 0x4720 # The camera range values # >------ sub defines of OBJECT_MESH OBJECT_VERTICES = 0x4110 # The objects vertices +OBJECT_VERTFLAGS = 0x4111 # The objects vertex flags OBJECT_FACES = 0x4120 # The objects faces OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color OBJECT_UV = 0x4140 # The UV texture coordinates @@ -327,10 +328,11 @@ class _3ds_rgb_color(object): class _3ds_face(object): """Class representing a face for a 3ds file.""" - __slots__ = ("vindex", ) + __slots__ = ("vindex", "flag") - def __init__(self, vindex): + def __init__(self, vindex, flag): self.vindex = vindex + self.flag = flag def get_size(self): return 4 * SZ_SHORT @@ -339,11 +341,11 @@ class _3ds_face(object): # catch this problem def write(self, file): - # The last zero is only used by 3d studio - file.write(struct.pack("<4H", self.vindex[0], self.vindex[1], self.vindex[2], 0)) + # The last short is used for face flags + file.write(struct.pack("<4H", self.vindex[0], self.vindex[1], self.vindex[2], self.flag)) def __str__(self): - return "[%d %d %d]" % (self.vindex[0], self.vindex[1], self.vindex[2]) + return "[%d %d %d %d]" % (self.vindex[0], self.vindex[1], self.vindex[2], self.flag) class _3ds_array(object): @@ -763,17 +765,17 @@ def make_material_chunk(material, image): class tri_wrapper(object): """Class representing a triangle. - Used when converting faces to triangles""" - __slots__ = "vertex_index", "ma", "image", "faceuvs", "offset", "group" + __slots__ = "vertex_index", "ma", "image", "faceuvs", "offset", "flag", "group" - def __init__(self, vindex=(0, 0, 0), ma=None, image=None, faceuvs=None, group=0): + def __init__(self, vindex=(0, 0, 0), ma=None, image=None, faceuvs=None, flag=0, group=0): self.vertex_index = vindex self.ma = ma self.image = image self.faceuvs = faceuvs self.offset = [0, 0, 0] # offset indices + self.flag = flag self.group = group @@ -789,7 +791,7 @@ def extract_triangles(mesh): img = None for i, face in enumerate(mesh.loop_triangles): f_v = face.vertices - + v1, v2, v3 = f_v[0], f_v[1], f_v[2] uf = mesh.uv_layers.active.data if do_uv else None if do_uv: @@ -798,13 +800,37 @@ def extract_triangles(mesh): img = get_uv_image(ma) if uf else None if img is not None: img = img.name + uv1, uv2, uv3 = f_uv[0], f_uv[1], f_uv[2] + + """Flag 0x1 sets CA edge visible, Flag 0x2 sets BC edge visible, Flag 0x4 sets AB edge visible + Flag 0x8 indicates a U axis texture wrap and Flag 0x10 indicates a V axis texture wrap + In Blender we use the edge CA, BC, and AB flags for sharp edges flags""" + a_b = mesh.edges[mesh.loops[face.loops[0]].edge_index] + b_c = mesh.edges[mesh.loops[face.loops[1]].edge_index] + c_a = mesh.edges[mesh.loops[face.loops[2]].edge_index] + + if v3 == 0: + a_b, b_c, c_a = c_a, a_b, b_c + + faceflag = 0 + if c_a.use_edge_sharp: + faceflag = faceflag + 0x1 + if b_c.use_edge_sharp: + faceflag = faceflag + 0x2 + if a_b.use_edge_sharp: + faceflag = faceflag + 0x4 smoothgroup = polygroup[face.polygon_index] - if len(f_v) == 3: - new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img) + if len(f_v)==3: + if v3 == 0: + v1, v2, v3 = v3, v1, v2 + if do_uv: + uv1, uv2, uv3 = uv3, uv1, uv2 + new_tri = tri_wrapper((v1, v2, v3), face.material_index, img) if (do_uv): - new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2]) + new_tri.faceuvs = uv_key(uv1), uv_key(uv2), uv_key(uv3) + new_tri.flag = faceflag new_tri.group = smoothgroup if face.use_smooth else 0 tri_list.append(new_tri) @@ -813,7 +839,6 @@ def extract_triangles(mesh): def remove_face_uv(verts, tri_list): """Remove face UV coordinates from a list of triangles. - Since 3ds files only support one pair of uv coordinates for each vertex, face uv coordinates need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when there are multiple uv coordinates per vertex.""" @@ -896,8 +921,7 @@ def make_faces_chunk(tri_list, mesh, materialDict): # Gather materials used in this mesh - mat/image pairs unique_mats = {} for i, tri in enumerate(tri_list): - - face_list.add(_3ds_face(tri.vertex_index)) + face_list.add(_3ds_face(tri.vertex_index, tri.flag)) if materials: ma = materials[tri.ma] @@ -927,7 +951,6 @@ def make_faces_chunk(tri_list, mesh, materialDict): face_chunk.add_subchunk(obj_material_chunk) else: - obj_material_faces = [] obj_material_names = [] for m in materials: @@ -937,7 +960,7 @@ def make_faces_chunk(tri_list, mesh, materialDict): n_materials = len(obj_material_names) for i, tri in enumerate(tri_list): - face_list.add(_3ds_face(tri.vertex_index)) + face_list.add(_3ds_face(tri.vertex_index, tri.flag)) if (tri.ma < n_materials): obj_material_faces[tri.ma].add(_3ds_ushort(i)) -- 2.30.2 From cce6fce4bc4e44377e5fbb8ebf1c208be546153d Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sun, 26 Mar 2023 19:15:20 +0200 Subject: [PATCH 03/14] io_scene_3ds: Added face and point flags fixed edge flag loop to match with point loop some cleanup Signed-off-by: Sebastian Sille --- io_scene_3ds/import_3ds.py | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index d333618..833c43a 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -400,15 +400,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE for fidx in faces: bmesh.polygons[fidx].material_index = mat_idx -# if uv_faces and img: -# for fidx in faces: -# bmesh.polygons[fidx].material_index = mat_idx -# # TODO: How to restore this? -# # uv_faces[fidx].image = img -# else: -# for fidx in faces: -# bmesh.polygons[fidx].material_index = mat_idx - if uv_faces: uvl = bmesh.uv_layers.active.data[:] for fidx, pl in enumerate(bmesh.polygons): @@ -433,13 +424,16 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE imported_objects.append(ob) if myContextMesh_flag: - # Bit 0 (0x1) sets edge CA visible, Bit 1 (0x2) sets edge BC visible and Bit 2 (0x4) sets the edge AB visible + # Bit 0 (0x1) sets edge CA visible, Bit 1 (0x2) sets edge BC visible and Bit 2 (0x4) sets edge AB visible # In Blender we use sharp edges for those flags - for lt, tri in enumerate(bmesh.loop_triangles): - faceflag = myContextMesh_flag[lt] - edge_ca = bmesh.edges[bmesh.loops[tri.loops[2]].edge_index] - edge_bc = bmesh.edges[bmesh.loops[tri.loops[1]].edge_index] - edge_ab = bmesh.edges[bmesh.loops[tri.loops[0]].edge_index] + for f, pl in enumerate(bmesh.polygons): + face = myContextMesh_facels[f] + faceflag = myContextMesh_flag[f] + edge_ab = bmesh.edges[bmesh.loops[pl.loop_start].edge_index] + edge_bc = bmesh.edges[bmesh.loops[pl.loop_start + 1].edge_index] + edge_ca = bmesh.edges[bmesh.loops[pl.loop_start + 2].edge_index] + if face[2] == 0: + edge_ab, edge_bc, edge_ca = edge_ca, edge_ab, edge_bc if faceflag == 1: edge_ca.use_edge_sharp = True elif faceflag == 2: @@ -455,8 +449,10 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE elif faceflag == 6: edge_bc.use_edge_sharp = True edge_ab.use_edge_sharp = True - elif faceflag >= 7: - pass + elif faceflag == 7: + edge_bc.use_edge_sharp = True + edge_ab.use_edge_sharp = True + edge_ca.use_edge_sharp = True if myContextMesh_smooth: for f, pl in enumerate(bmesh.polygons): -- 2.30.2 From 122e83dbecc47dd1c06de6114e788a3a515bd6f6 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sat, 13 May 2023 20:44:29 +0200 Subject: [PATCH 04/14] io_scene_3ds: Update for Blender 3.x --- io_scene_3ds/__init__.py | 44 +++++++++++----------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 9430ca4..643214c 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -1,20 +1,4 @@ -# ##### BEGIN GPL LICENSE BLOCK ##### -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# ##### END GPL LICENSE BLOCK ##### +# SPDX-License-Identifier: GPL-2.0-or-later from bpy_extras.io_utils import ( ImportHelper, @@ -34,12 +18,12 @@ bl_info = { "author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand", "version": (2, 3, 1), "blender": (3, 0, 0), - "location": "File > Import", - "description": "Import 3DS, meshes, uvs, materials, textures, " - "cameras & lamps", - "warning": "Images must be in file folder", - "doc_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/" - "Scripts/Import-Export/Autodesk_3DS", + "location": "File > Import-Export", + "description": "3DS Import/Export meshes, UVs, materials, textures, " + "cameras, lamps & animation", + "warning": "Images must be in file folder, " + "filenames are limited to DOS 8.3 format", + "doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/scene_3ds.html", "category": "Import-Export", } @@ -81,12 +65,16 @@ class Import3DS(bpy.types.Operator, ImportHelper): "importing incorrectly", default=True, ) - read_keyframe: bpy.props.BoolProperty( name="Read Keyframe", description="Read the keyframe data", default=True, ) + use_world_matrix: bpy.props.BoolProperty( + name="World Space", + description="Transform to matrix world", + default=False, + ) def execute(self, context): from . import import_3ds @@ -162,14 +150,6 @@ def unregister(): bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) bpy.types.TOPBAR_MT_file_export.remove(menu_func_export) -# NOTES: -# why add 1 extra vertex? and remove it when done? - -# "Answer - eekadoodle - would need to re-order UV's without this since face -# order isnt always what we give blender, BMesh will solve :D" -# -# disabled scaling to size, this requires exposing bb (easy) and understanding -# how it works (needs some time) - if __name__ == "__main__": register() -- 2.30.2 From e46a44f9e493e30c73ea92b38cf2584a93dfd144 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sat, 13 May 2023 20:48:20 +0200 Subject: [PATCH 05/14] Export_3ds: Update for Blender 3.x Added chunks Fixed bugs Cleanup --- io_scene_3ds/export_3ds.py | 331 ++++++++++++++++++------------------- 1 file changed, 160 insertions(+), 171 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 5505322..8859b60 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1,23 +1,5 @@ -# ##### BEGIN GPL LICENSE BLOCK ##### -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# ##### END GPL LICENSE BLOCK ##### - -# Script copyright (C) Bob Holcomb -# Contributors: Campbell Barton, Bob Holcomb, Richard Lärkäng, Damien McGinnes, Mark Stijnman, Sebastian Sille +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright 2005 Bob Holcomb """ Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information @@ -25,25 +7,26 @@ from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode. """ import bpy +import time import math import struct import mathutils import bpy_extras from bpy_extras import node_shader_utils -###################################################### -# Data Structures -###################################################### +################### +# Data Structures # +################### # Some of the chunks that we will export -# ----- Primary Chunk, at the beginning of each file +# >----- Primary Chunk, at the beginning of each file PRIMARY = 0x4D4D -# ------ Main Chunks +# >----- Main Chunks VERSION = 0x0002 # This gives the version of the .3ds file KFDATA = 0xB000 # This is the header for all of the key frame info -# ------ sub defines of OBJECTINFO +# >----- sub defines of OBJECTINFO OBJECTINFO = 0x3D3D # Main mesh object chunk before the material and object information MESHVERSION = 0x3D3E # This gives the version of the mesh AMBIENTLIGHT = 0x2100 # The color of the ambient light @@ -59,9 +42,16 @@ MATSHINESS = 0xA040 # Specular intensity of the object/material (percent) MATSHIN2 = 0xA041 # Reflection of the object/material (percent) MATSHIN3 = 0xA042 # metallic/mirror of the object/material (percent) MATTRANS = 0xA050 # Transparency value (100-OpacityValue) (percent) +MATSELFILLUM = 0xA080 # # Material self illumination flag MATSELFILPCT = 0xA084 # Self illumination strength (percent) +MATWIRE = 0xA085 # Material wireframe rendered flag +MATFACEMAP = 0xA088 # Face mapped textures flag +MATPHONGSOFT = 0xA08C # Phong soften material flag +MATWIREABS = 0xA08E # Wire size in units flag +MATWIRESIZE = 0xA087 # Rendered wire size in pixels MATSHADING = 0xA100 # Material shading method +# >------ sub defines of MAT_MAP MAT_DIFFUSEMAP = 0xA200 # This is a header for a new diffuse texture MAT_SPECMAP = 0xA204 # head for specularity map MAT_OPACMAP = 0xA210 # head for opacity map @@ -71,9 +61,7 @@ MAT_BUMP_PERCENT = 0xA252 # Normalmap strength (percent) MAT_TEX2MAP = 0xA33A # head for secondary texture MAT_SHINMAP = 0xA33C # head for roughness map MAT_SELFIMAP = 0xA33D # head for emission map - -# >------ sub defines of MAT_MAP -MATMAPFILE = 0xA300 # This holds the file name of a texture +MAT_MAP_FILE = 0xA300 # This holds the file name of a texture MAT_MAP_TILING = 0xa351 # 2nd bit (from LSB) is mirror UV flag MAT_MAP_TEXBLUR = 0xA353 # Texture blurring factor MAT_MAP_USCALE = 0xA354 # U axis scaling @@ -87,10 +75,12 @@ MAP_RCOL = 0xA364 # Red tint MAP_GCOL = 0xA366 # Green tint MAP_BCOL = 0xA368 # Blue tint -RGB = 0x0010 # RGB float -RGB1 = 0x0011 # RGB Color1 -RGB2 = 0x0012 # RGB Color2 +RGB = 0x0010 # RGB float Color1 +RGB1 = 0x0011 # RGB int Color1 +RGBI = 0x0012 # RGB int Color2 +RGBF = 0x0013 # RGB float Color2 PCT = 0x0030 # Percent chunk +PCTF = 0x0031 # Percent float MASTERSCALE = 0x0100 # Master scale factor # >------ sub defines of OBJECT @@ -101,7 +91,10 @@ OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object # >------ Sub defines of LIGHT LIGHT_MULTIPLIER = 0x465B # The light energy factor LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight -LIGHT_SPOTROLL = 0x4656 # The roll angle of the spot +LIGHT_SPOT_ROLL = 0x4656 # Light spot roll angle +LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag +LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot show cone flag +LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag # >------ sub defines of CAMERA OBJECT_CAM_RANGES = 0x4720 # The camera range values @@ -116,19 +109,34 @@ OBJECT_SMOOTH = 0x4150 # The objects smooth groups OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix # >------ sub defines of KFDATA -KFDATA_KFHDR = 0xB00A -KFDATA_KFSEG = 0xB008 -KFDATA_KFCURTIME = 0xB009 -KFDATA_OBJECT_NODE_TAG = 0xB002 +AMBIENT_NODE_TAG = 0xB001 # Ambient node tag +OBJECT_NODE_TAG = 0xB002 # Object tree tag +CAMERA_NODE_TAG = 0xB003 # Camera object tag +TARGET_NODE_TAG = 0xB004 # Camera target tag +LIGHT_NODE_TAG = 0xB005 # Light object tag +LTARGET_NODE_TAG = 0xB006 # Light target tag +SPOT_NODE_TAG = 0xB007 # Spotlight tag +KFDATA_KFSEG = 0xB008 # Frame start & end +KFDATA_KFCURTIME = 0xB009 # Frame current +KFDATA_KFHDR = 0xB00A # Keyframe header # >------ sub defines of OBJECT_NODE_TAG -OBJECT_NODE_ID = 0xB030 -OBJECT_NODE_HDR = 0xB010 -OBJECT_PIVOT = 0xB013 -OBJECT_INSTANCE_NAME = 0xB011 -POS_TRACK_TAG = 0xB020 -ROT_TRACK_TAG = 0xB021 -SCL_TRACK_TAG = 0xB022 +OBJECT_NODE_ID = 0xB030 # Object hierachy ID +OBJECT_NODE_HDR = 0xB010 # Hierachy tree header +OBJECT_INSTANCE_NAME = 0xB011 # Object instance name +OBJECT_PIVOT = 0xB013 # Object pivot position +OBJECT_BOUNDBOX = 0xB014 # Object boundbox +OBJECT_MORPH_SMOOTH = 0xB015 # Object smooth angle +POS_TRACK_TAG = 0xB020 # Position transform tag +ROT_TRACK_TAG = 0xB021 # Rotation transform tag +SCL_TRACK_TAG = 0xB022 # Scale transform tag +FOV_TRACK_TAG = 0xB023 # Field of view tag +ROLL_TRACK_TAG = 0xB024 # Roll transform tag +COL_TRACK_TAG = 0xB025 # Color transform tag +HOTSPOT_TRACK_TAG = 0xB027 # Hotspot transform tag +FALLOFF_TRACK_TAG = 0xB028 # Falloff transform tag + +ROOT_OBJECT = 0xFFFF # Root object # So 3ds max can open files, limit names to 12 in length @@ -136,7 +144,6 @@ SCL_TRACK_TAG = 0xB022 name_unique = [] # stores str, ascii only name_mapping = {} # stores {orig: byte} mapping - def sane_name(name): name_fixed = name_mapping.get(name) if name_fixed is not None: @@ -147,7 +154,7 @@ def sane_name(name): i = 0 while new_name in name_unique: - new_name = new_name_clean + ".%.3d" % i + new_name = new_name_clean + '.%.3d' % i i += 1 # note, appending the 'str' version. @@ -159,13 +166,11 @@ def sane_name(name): def uv_key(uv): return round(uv[0], 6), round(uv[1], 6) - # size defines: SZ_SHORT = 2 SZ_INT = 4 SZ_FLOAT = 4 - class _3ds_ushort(object): """Class representing a short (2-byte integer) for a 3ds file. *** This looks like an unsigned short H is unsigned from the struct docs - Cam***""" @@ -178,7 +183,7 @@ class _3ds_ushort(object): return SZ_SHORT def write(self, file): - file.write(struct.pack("= ma_ls_len: f.material_index = 0 - # ob_derived_eval.to_mesh_clear() - - #if free: - # free_derived_objects(ob) # Make material chunks for all materials used in the meshes: for ma_image in materialDict.values(): object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1])) + # Collect translation for transformation matrix + translation = {} + # Give all objects a unique ID and build a dictionary from object name to object id: - translation = {} # collect translation for transformation matrix - #name_to_id = {} + # name_to_id = {} + for ob, data, matrix in mesh_objects: translation[ob.name] = ob.location - #name_to_id[ob.name]= len(name_to_id) - """ - #for ob in empty_objects: - # name_to_id[ob.name]= len(name_to_id) - """ + # name_to_id[ob.name]= len(name_to_id) + + for ob in empty_objects: + translation[ob.name] = ob.location + # name_to_id[ob.name]= len(name_to_id) # Create object chunks for all meshes: i = 0 @@ -1356,10 +1344,6 @@ def save(operator, kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) ''' - # if not blender_mesh.users: - # bpy.data.meshes.remove(blender_mesh) - #blender_mesh.vertices = None - i += i # Create chunks for all empties: @@ -1379,7 +1363,7 @@ def save(operator, object_chunk.add_variable("light", _3ds_string(sane_name(ob.name))) light_chunk.add_variable("location", _3ds_point_3d(ob.location)) color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color)) - energy_factor.add_variable("energy", _3ds_float(ob.data.energy * .001)) + energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001)) light_chunk.add_subchunk(color_float_chunk) light_chunk.add_subchunk(energy_factor) @@ -1391,12 +1375,18 @@ def save(operator, pos_y = ob.location[1] + (ob.location[0] * math.tan(math.radians(90) - ob.rotation_euler[2])) pos_z = hypo * math.tan(math.radians(90) - ob.rotation_euler[0]) spotlight_chunk = _3ds_chunk(LIGHT_SPOTLIGHT) - spot_roll_chunk = _3ds_chunk(LIGHT_SPOTROLL) + spot_roll_chunk = _3ds_chunk(LIGHT_SPOT_ROLL) spotlight_chunk.add_variable("target", _3ds_point_3d((pos_x, pos_y, pos_z))) spotlight_chunk.add_variable("hotspot", _3ds_float(round(hotspot, 4))) spotlight_chunk.add_variable("angle", _3ds_float(round(cone_angle, 4))) spot_roll_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6))) spotlight_chunk.add_subchunk(spot_roll_chunk) + if ob.data.show_cone: + spot_cone_chunk = _3ds_chunk(LIGHT_SPOT_SEE_CONE) + spotlight_chunk.add_subchunk(spot_cone_chunk) + if ob.data.use_square: + spot_square_chunk = _3ds_chunk(LIGHT_SPOT_RECTANGLE) + spotlight_chunk.add_subchunk(spot_square_chunk) light_chunk.add_subchunk(spotlight_chunk) # Add light to object info @@ -1428,9 +1418,9 @@ def save(operator, ''' # At this point, the chunk hierarchy is completely built. - # Check the size: primary.get_size() + # Open the file for writing: file = open(filepath, 'wb') @@ -1445,7 +1435,6 @@ def save(operator, name_mapping.clear() # Debugging only: report the exporting time: - # Blender.Window.WaitCursor(0) print("3ds export time: %.2f" % (time.time() - duration)) # Debugging only: dump the chunk hierarchy: -- 2.30.2 From 984e1054d8090ac80d9f26cb6f13a00f8f6f945c Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sat, 13 May 2023 20:51:08 +0200 Subject: [PATCH 06/14] Import_3ds: Update for Blender3.x Added animation keyframe import Added chunks Fixed bugs Cleanup --- io_scene_3ds/import_3ds.py | 921 ++++++++++++++++++++++--------------- 1 file changed, 550 insertions(+), 371 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 833c43a..4976cd5 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1,24 +1,5 @@ -# ##### BEGIN GPL LICENSE BLOCK ##### -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# ##### END GPL LICENSE BLOCK ##### - -# Script copyright (C) Bob Holcomb -# Contributors: Bob Holcomb, Richard L?rk?ng, Damien McGinnes, Sebastian Sille -# Campbell Barton, Mario Lapin, Dominique Lorre, Andreas Atteneder +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright 2005 Bob Holcomb import os import time @@ -26,34 +7,40 @@ import struct import bpy import math import mathutils +from bpy_extras.image_utils import load_image from bpy_extras.node_shader_utils import PrincipledBSDFWrapper BOUNDS_3DS = [] -###################################################### -# Data Structures -###################################################### +################### +# Data Structures # +################### # Some of the chunks that we will see -# ----- Primary Chunk, at the beginning of each file +# >----- Primary Chunk, at the beginning of each file PRIMARY = 0x4D4D -# ------ Main Chunks +# >----- Main Chunks OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information VERSION = 0x0002 # This gives the version of the .3ds file +AMBIENTLIGHT = 0x2100 # The color of the ambient light EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info -# ------ Data Chunks, used for various attributes -PERCENTAGE_SHORT = 0x30 -PERCENTAGE_FLOAT = 0x31 +# >----- Data Chunks, used for various attributes +COLOR_F = 0x0010 # color defined as 3 floats +COLOR_24 = 0x0011 # color defined as 3 bytes +LIN_COLOR_24 = 0x0012 # linear byte color +LIN_COLOR_F = 0x0013 # linear float color +PCT_SHORT = 0x30 # percentage short +PCT_FLOAT = 0x31 # percentage float +MASTERSCALE = 0x0100 # Master scale factor -# ------ sub defines of OBJECTINFO +# >----- sub defines of OBJECTINFO MATERIAL = 0xAFFF # This stored the texture info OBJECT = 0x4000 # This stores the faces, vertices, etc... # >------ sub defines of MATERIAL -# ------ sub defines of MATERIAL_BLOCK MAT_NAME = 0xA000 # This holds the material name MAT_AMBIENT = 0xA010 # Ambient color of the object/material MAT_DIFFUSE = 0xA020 # This holds the color of the object/material @@ -62,11 +49,23 @@ MAT_SHINESS = 0xA040 # Roughness of the object/material (percent) MAT_SHIN2 = 0xA041 # Shininess of the object/material (percent) MAT_SHIN3 = 0xA042 # Reflection of the object/material (percent) MAT_TRANSPARENCY = 0xA050 # Transparency value of material (percent) -MAT_SELF_ILLUM = 0xA080 # Self Illumination value of material +MAT_XPFALL = 0xA052 # Transparency falloff value +MAT_REFBLUR = 0xA053 # Reflection blurring value +MAT_SELF_ILLUM = 0xA080 # # Material self illumination flag +MAT_TWO_SIDE = 0xA081 # Material is two sided flag +MAT_DECAL = 0xA082 # Material mapping is decaled flag +MAT_ADDITIVE = 0xA083 # Material has additive transparency flag MAT_SELF_ILPCT = 0xA084 # Self illumination strength (percent) -MAT_WIRE = 0xA085 # Only render's wireframe +MAT_WIRE = 0xA085 # Material wireframe rendered flag +MAT_FACEMAP = 0xA088 # Face mapped textures flag +MAT_PHONGSOFT = 0xA08C # Phong soften material flag +MAT_WIREABS = 0xA08E # Wire size in units flag +MAT_WIRESIZE = 0xA087 # Rendered wire size in pixels MAT_SHADING = 0xA100 # Material shading method +MAT_USE_XPFALL = 0xA240 # Transparency falloff flag +MAT_USE_REFBLUR = 0xA250 # Reflection blurring flag +# >------ sub defines of MATERIAL_MAP MAT_TEXTURE_MAP = 0xA200 # This is a header for a new texture map MAT_SPECULAR_MAP = 0xA204 # This is a header for a new specular map MAT_OPACITY_MAP = 0xA210 # This is a header for a new opacity map @@ -77,46 +76,44 @@ MAT_TEX2_MAP = 0xA33A # This is a header for a secondary texture MAT_SHIN_MAP = 0xA33C # This is a header for a new roughness map MAT_SELFI_MAP = 0xA33D # This is a header for a new emission map MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture - -MAT_MAP_TILING = 0xa351 # 2nd bit (from LSB) is mirror UV flag -MAT_MAP_USCALE = 0xA354 # U axis scaling -MAT_MAP_VSCALE = 0xA356 # V axis scaling +MAT_MAP_TILING = 0xA351 # 2nd bit (from LSB) is mirror UV flag +MAT_MAP_USCALE = 0xA354 # U axis scaling +MAT_MAP_VSCALE = 0xA356 # V axis scaling MAT_MAP_UOFFSET = 0xA358 # U axis offset MAT_MAP_VOFFSET = 0xA35A # V axis offset -MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad +MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad MAT_MAP_COL1 = 0xA360 # Map Color1 MAT_MAP_COL2 = 0xA362 # Map Color2 MAT_MAP_RCOL = 0xA364 # Red mapping MAT_MAP_GCOL = 0xA366 # Green mapping MAT_MAP_BCOL = 0xA368 # Blue mapping -MAT_FLOAT_COLOR = 0x0010 # color defined as 3 floats -MAT_24BIT_COLOR = 0x0011 # color defined as 3 bytes # >------ sub defines of OBJECT OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object -OBJECT_LIGHT = 0x4600 # This lets un know we are reading a light object -OBJECT_LIGHT_SPOT = 0x4610 # The light is a spotloght. -OBJECT_LIGHT_OFF = 0x4620 # The light off. -OBJECT_LIGHT_ATTENUATE = 0x4625 -OBJECT_LIGHT_RAYSHADE = 0x4627 -OBJECT_LIGHT_SHADOWED = 0x4630 -OBJECT_LIGHT_LOCAL_SHADOW = 0x4640 -OBJECT_LIGHT_LOCAL_SHADOW2 = 0x4641 -OBJECT_LIGHT_SEE_CONE = 0x4650 -OBJECT_LIGHT_SPOT_RECTANGULAR = 0x4651 -OBJECT_LIGHT_SPOT_OVERSHOOT = 0x4652 -OBJECT_LIGHT_SPOT_PROJECTOR = 0x4653 -OBJECT_LIGHT_EXCLUDE = 0x4654 -OBJECT_LIGHT_RANGE = 0x4655 -OBJECT_LIGHT_ROLL = 0x4656 -OBJECT_LIGHT_SPOT_ASPECT = 0x4657 -OBJECT_LIGHT_RAY_BIAS = 0x4658 -OBJECT_LIGHT_INNER_RANGE = 0x4659 -OBJECT_LIGHT_OUTER_RANGE = 0x465A -OBJECT_LIGHT_MULTIPLIER = 0x465B -OBJECT_LIGHT_AMBIENT_LIGHT = 0x4680 +OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object +OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object -OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object +# >------ Sub defines of LIGHT +LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight +LIGHT_OFF = 0x4620 # The light is off +LIGHT_ATTENUATE = 0x4625 # Light attenuate flag +LIGHT_RAYSHADE = 0x4627 # Light rayshading flag +LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag +LIGHT_LOCAL_SHADOW = 0x4640 # Light shadow values 1 +LIGHT_LOCAL_SHADOW2 = 0x4641 # Light shadow values 2 +LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot cone flag +LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag +LIGHT_SPOT_OVERSHOOT = 0x4652 # Light spot overshoot flag +LIGHT_SPOT_PROJECTOR = 0x4653 # Light spot bitmap name +LIGHT_EXCLUDE = 0x4654 # Light excluded objects +LIGHT_RANGE = 0x4655 # Light range +LIGHT_SPOT_ROLL = 0x4656 # The roll angle of the spot +LIGHT_SPOT_ASPECT = 0x4657 # Light spot aspect flag +LIGHT_RAY_BIAS = 0x4658 # Light ray bias value +LIGHT_INNER_RANGE = 0x4659 # The light inner range +LIGHT_OUTER_RANGE = 0x465A # The light outer range +LIGHT_MULTIPLIER = 0x465B # The light energy factor +LIGHT_AMBIENT_LIGHT = 0x4680 # Light ambient flag # >------ sub defines of CAMERA OBJECT_CAM_RANGES = 0x4720 # The camera range values @@ -125,40 +122,42 @@ OBJECT_CAM_RANGES = 0x4720 # The camera range values OBJECT_VERTICES = 0x4110 # The objects vertices OBJECT_VERTFLAGS = 0x4111 # The objects vertex flags OBJECT_FACES = 0x4120 # The objects faces -OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color -OBJECT_UV = 0x4140 # The UV texture coordinates -OBJECT_SMOOTH = 0x4150 # The Object smooth groups -OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix +OBJECT_MATERIAL = 0x4130 # The objects face material +OBJECT_UV = 0x4140 # The vertex UV texture coordinates +OBJECT_SMOOTH = 0x4150 # The objects face smooth groups +OBJECT_TRANS_MATRIX = 0x4160 # The objects Matrix # >------ sub defines of EDITKEYFRAME -KFDATA_AMBIENT = 0xB001 -KFDATA_OBJECT = 0xB002 -KFDATA_CAMERA = 0xB003 -KFDATA_TARGET = 0xB004 -KFDATA_LIGHT = 0xB005 -KFDATA_L_TARGET = 0xB006 -KFDATA_SPOTLIGHT = 0xB007 -KFDATA_KFSEG = 0xB008 -# KFDATA_CURTIME = 0xB009 -# KFDATA_KFHDR = 0xB00A +KFDATA_AMBIENT = 0xB001 # Keyframe ambient node +KFDATA_OBJECT = 0xB002 # Keyframe object node +KFDATA_CAMERA = 0xB003 # Keyframe camera node +KFDATA_TARGET = 0xB004 # Keyframe target node +KFDATA_LIGHT = 0xB005 # Keyframe light node +KFDATA_LTARGET = 0xB006 # Keyframe light target node +KFDATA_SPOTLIGHT = 0xB007 # Keyframe spotlight node +KFDATA_KFSEG = 0xB008 # Keyframe start and stop +KFDATA_CURTIME = 0xB009 # Keyframe current frame +KFDATA_KFHDR = 0xB00A # Keyframe node header + # >------ sub defines of KEYFRAME_NODE -OBJECT_NODE_HDR = 0xB010 -OBJECT_INSTANCE_NAME = 0xB011 -# OBJECT_PRESCALE = 0xB012 -OBJECT_PIVOT = 0xB013 -# OBJECT_BOUNDBOX = 0xB014 -# MORPH_SMOOTH = 0xB015 -POS_TRACK_TAG = 0xB020 -ROT_TRACK_TAG = 0xB021 -SCL_TRACK_TAG = 0xB022 -FOV_TRACK_TAG = 0xB023 -ROLL_TRACK_TAG = 0xB024 -COL_TRACK_TAG = 0xB025 -# MORPH_TRACK_TAG = 0xB026 -# HOTSPOT_TRACK_TAG = 0xB027 -# FALLOFF_TRACK_TAG = 0xB028 -# HIDE_TRACK_TAG = 0xB029 -# OBJECT_NODE_ID = 0xB030 +OBJECT_NODE_HDR = 0xB010 # Keyframe object node header +OBJECT_INSTANCE_NAME = 0xB011 # Keyframe object name for dummy objects +OBJECT_PRESCALE = 0xB012 # Keyframe object prescale +OBJECT_PIVOT = 0xB013 # Keyframe object pivot position +OBJECT_BOUNDBOX = 0xB014 # Keyframe object boundbox +MORPH_SMOOTH = 0xB015 # Auto smooth angle for keyframe mesh objects +POS_TRACK_TAG = 0xB020 # Keyframe object position track +ROT_TRACK_TAG = 0xB021 # Keyframe object rotation track +SCL_TRACK_TAG = 0xB022 # Keyframe object scale track +FOV_TRACK_TAG = 0xB023 # Keyframe camera field of view track +ROLL_TRACK_TAG = 0xB024 # Keyframe camera roll track +COL_TRACK_TAG = 0xB025 # Keyframe light color track +MORPH_TRACK_TAG = 0xB026 # Keyframe object morph smooth track +HOTSPOT_TRACK_TAG = 0xB027 # Keyframe spotlight hotspot track +FALLOFF_TRACK_TAG = 0xB028 # Keyframe spotlight falloff track +HIDE_TRACK_TAG = 0xB029 # Keyframe object hide track +OBJECT_NODE_ID = 0xB030 # Keyframe object node id +PARENT_NAME = 0x80F0 # Object parent name tree (dot seperated) ROOT_OBJECT = 0xFFFF @@ -176,7 +175,7 @@ class Chunk: "bytes_read", ) # we don't read in the bytes_read, we compute that - binary_format = " abs(location[1] - target[1]): + foc = math.copysign(math.sqrt(pow(pos[0],2)+pow(pos[1],2)),pos[0]) + dia = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[0]) + pitch = math.radians(90)-math.copysign(math.acos(foc/dia), pos[2]) + if location[0] > target[0]: + tilt = math.copysign(pitch, pos[0]) + pan = math.radians(90)+math.atan(pos[1]/foc) + else: + tilt = -1*(math.copysign(pitch, pos[0])) + pan = -1*(math.radians(90)-math.atan(pos[1]/foc)) + elif abs(location[1] - target[1]) > abs(location[0] - target[0]): + foc = math.copysign(math.sqrt(pow(pos[1],2)+pow(pos[0],2)),pos[1]) + dia = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[1]) + pitch = math.radians(90)-math.copysign(math.acos(foc/dia), pos[2]) + if location[1] > target[1]: + tilt = math.copysign(pitch, pos[1]) + pan = math.radians(90)+math.acos(pos[0]/foc) + else: + tilt = -1*(math.copysign(pitch, pos[1])) + pan = -1*(math.radians(90)-math.acos(pos[0]/foc)) + direction = tilt, pan + return direction + + def read_track_data(temp_chunk): + """Trackflags 0x1, 0x2 and 0x3 are for looping. 0x8, 0x10 and 0x20 + locks the XYZ axes. 0x100, 0x200 and 0x400 unlinks the XYZ axes""" + new_chunk.bytes_read += SZ_U_SHORT + temp_data = file.read(SZ_U_SHORT) + tflags = struct.unpack(' 3: - print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version) + print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version) + + # is it an ambient light chunk? + elif new_chunk.ID == AMBIENTLIGHT: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + world = bpy.data.worlds.new("Ambient: " + realname) + world.light_settings.use_ambient_occlusion = True + context.scene.world = world + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + context.scene.world.color[:] = read_float_color(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + context.scene.world.color[:] = read_float_color(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read # is it an object info chunk? elif new_chunk.ID == OBJECTINFO: - process_next_chunk(context, file, new_chunk, imported_objects, IMAGE_SEARCH, KEYFRAME) + process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE) # keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read @@ -597,6 +699,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE contextMesh_flag, contextMeshMaterials, contextMesh_smooth, + WORLD_MATRIX ) contextMesh_vertls = [] contextMesh_facels = [] @@ -604,7 +707,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE contextMesh_flag = None contextMesh_smooth = None contextMeshUV = None - # Reset matrix contextMatrix = None CreateBlenderObject = True @@ -618,18 +720,17 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE elif new_chunk.ID == MAT_NAME: material_name, read_str_len = read_string(file) - # plus one for the null character that ended the string new_chunk.bytes_read += read_str_len - contextMaterial.name = material_name.rstrip() # remove trailing whitespace + contextMaterial.name = material_name.rstrip() # remove trailing whitespace MATDICT[material_name] = contextMaterial elif new_chunk.ID == MAT_AMBIENT: read_chunk(file, temp_chunk) - # only available color is emission color - if temp_chunk.ID == MAT_FLOAT_COLOR: + # to not loose this data, ambient color is stored in line color + if temp_chunk.ID == COLOR_F: contextMaterial.line_color[:3] = read_float_color(temp_chunk) - elif temp_chunk.ID == MAT_24BIT_COLOR: + elif temp_chunk.ID == COLOR_24: contextMaterial.line_color[:3] = read_byte_color(temp_chunk) else: skip_to_end(file, temp_chunk) @@ -637,9 +738,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE elif new_chunk.ID == MAT_DIFFUSE: read_chunk(file, temp_chunk) - if temp_chunk.ID == MAT_FLOAT_COLOR: + if temp_chunk.ID == COLOR_F: contextMaterial.diffuse_color[:3] = read_float_color(temp_chunk) - elif temp_chunk.ID == MAT_24BIT_COLOR: + elif temp_chunk.ID == COLOR_24: contextMaterial.diffuse_color[:3] = read_byte_color(temp_chunk) else: skip_to_end(file, temp_chunk) @@ -647,10 +748,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE elif new_chunk.ID == MAT_SPECULAR: read_chunk(file, temp_chunk) - # Specular color is available - if temp_chunk.ID == MAT_FLOAT_COLOR: + if temp_chunk.ID == COLOR_F: contextMaterial.specular_color = read_float_color(temp_chunk) - elif temp_chunk.ID == MAT_24BIT_COLOR: + elif temp_chunk.ID == COLOR_24: contextMaterial.specular_color = read_byte_color(temp_chunk) else: skip_to_end(file, temp_chunk) @@ -658,64 +758,64 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE elif new_chunk.ID == MAT_SHINESS: read_chunk(file, temp_chunk) - if temp_chunk.ID == PERCENTAGE_SHORT: + if temp_chunk.ID == PCT_SHORT: temp_data = file.read(SZ_U_SHORT) temp_chunk.bytes_read += SZ_U_SHORT contextMaterial.roughness = 1 - (float(struct.unpack(' Date: Mon, 22 May 2023 21:28:41 +0200 Subject: [PATCH 07/14] Import_3ds: Completed code improvement Final code improvement --- io_scene_3ds/import_3ds.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 4976cd5..ce55232 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -2,10 +2,10 @@ # Copyright 2005 Bob Holcomb import os -import time -import struct import bpy +import time import math +import struct import mathutils from bpy_extras.image_utils import load_image from bpy_extras.node_shader_utils import PrincipledBSDFWrapper @@ -293,16 +293,12 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of img_wrap.rotation[2] = angle if extend == 'mirror': - # 3DS mirror flag can be emulated by these settings (at least so it seems) - # TODO: bring back mirror - pass - # texture.repeat_x = texture.repeat_y = 2 - # texture.use_mirror_x = texture.use_mirror_y = True + img_wrap.extension = 'MIRROR' elif extend == 'decal': - # 3DS' decal mode maps best to Blenders EXTEND img_wrap.extension = 'EXTEND' elif extend == 'noWrap': img_wrap.extension = 'CLIP' + if alpha == 'alpha': for link in links: if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB': @@ -386,6 +382,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI for v1, v2, v3 in myContextMesh_facels: eekadoodle_faces.extend((v3, v1, v2) if v3 == 0 else (v1, v2, v3)) bmesh.polygons.foreach_set("loop_start", range(0, nbr_faces * 3, 3)) + bmesh.polygons.foreach_set("loop_total", (3,) * nbr_faces) bmesh.loops.foreach_set("vertex_index", eekadoodle_faces) if bmesh.polygons and contextMeshUV: @@ -1447,4 +1444,4 @@ def load(operator, CONVERSE=global_matrix, ) - return {'FINISHED'} + return {'FINISHED'} \ No newline at end of file -- 2.30.2 From 62ff785a55268b2d673d67948f0f504de145e0ab Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 22 May 2023 21:32:40 +0200 Subject: [PATCH 08/14] Export_3ds: Completed code improvement Final code improvement --- io_scene_3ds/export_3ds.py | 709 ++++++++++++++++++++++++++----------- 1 file changed, 508 insertions(+), 201 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 8859b60..428b655 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -121,6 +121,7 @@ KFDATA_KFCURTIME = 0xB009 # Frame current KFDATA_KFHDR = 0xB00A # Keyframe header # >------ sub defines of OBJECT_NODE_TAG +PARENT_NAME = 0x80F0 # Object parent name tree OBJECT_NODE_ID = 0xB030 # Object hierachy ID OBJECT_NODE_HDR = 0xB010 # Hierachy tree header OBJECT_INSTANCE_NAME = 0xB011 # Object instance name @@ -149,7 +150,7 @@ def sane_name(name): if name_fixed is not None: return name_fixed - # strip non ascii chars + # Strip non ascii chars new_name_clean = new_name = name.encode("ASCII", "replace").decode("ASCII")[:12] i = 0 @@ -157,7 +158,7 @@ def sane_name(name): new_name = new_name_clean + '.%.3d' % i i += 1 - # note, appending the 'str' version. + # Note, appending the 'str' version name_unique.append(new_name) name_mapping[name] = new_name = new_name.encode("ASCII", "replace") return new_name @@ -166,14 +167,13 @@ def sane_name(name): def uv_key(uv): return round(uv[0], 6), round(uv[1], 6) -# size defines: +# Size defines SZ_SHORT = 2 SZ_INT = 4 SZ_FLOAT = 4 class _3ds_ushort(object): - """Class representing a short (2-byte integer) for a 3ds file. - *** This looks like an unsigned short H is unsigned from the struct docs - Cam***""" + """Class representing a short (2-byte integer) for a 3ds file.""" __slots__ = ("value", ) def __init__(self, val=0): @@ -239,7 +239,7 @@ class _3ds_string(object): file.write(struct.pack(binary_format, self.value)) def __str__(self): - return str(self.value) + return str((self.value).decode("ASCII")) class _3ds_point_3d(object): @@ -260,15 +260,15 @@ class _3ds_point_3d(object): # Used for writing a track -''' class _3ds_point_4d(object): """Class representing a four-dimensional point for a 3ds file, for instance a quaternion.""" - __slots__ = "w","x","y","z" - def __init__(self, point=(0.0,0.0,0.0,0.0)): + __slots__ = "w", "x", "y", "z" + + def __init__(self, point): self.w, self.x, self.y, self.z = point def get_size(self): - return 4*SZ_FLOAT + return 4 * SZ_FLOAT def write(self,file): data=struct.pack('<4f', self.w, self.x, self.y, self.z) @@ -276,7 +276,6 @@ class _3ds_point_4d(object): def __str__(self): return '(%f, %f, %f, %f)' % (self.w, self.x, self.y, self.z) -''' class _3ds_point_uv(object): @@ -308,7 +307,7 @@ class _3ds_float_color(object): return 3 * SZ_FLOAT def write(self, file): - file.write(struct.pack('3f', self.r, self.g, self.b)) + file.write(struct.pack('<3f', self.r, self.g, self.b)) def __str__(self): return '{%f, %f, %f}' % (self.r, self.g, self.b) @@ -342,9 +341,7 @@ class _3ds_face(object): def get_size(self): return 4 * SZ_SHORT - # no need to validate every face vert. the oversized array will - # catch this problem - + # No need to validate every face vert, the oversized array will catch this problem def write(self, file): # The last short is used for face flags file.write(struct.pack('<4H', self.vindex[0], self.vindex[1], self.vindex[2], self.flag)) @@ -355,15 +352,15 @@ class _3ds_face(object): class _3ds_array(object): """Class representing an array of variables for a 3ds file. - Consists of a _3ds_ushort to indicate the number of items, followed by the items themselves. - """ + Consists of a _3ds_ushort to indicate the number of items, followed by the items themselves.""" + __slots__ = "values", "size" def __init__(self): self.values = [] self.size = SZ_SHORT - # add an item: + # Add an item def add(self, item): self.values.append(item) self.size += item.get_size() @@ -380,14 +377,13 @@ class _3ds_array(object): value.write(file) # To not overwhelm the output in a dump, a _3ds_array only - # outputs the number of items, not all of the actual items. + # outputs the number of items, not all of the actual items def __str__(self): return '(%d items)' % len(self.values) class _3ds_named_variable(object): """Convenience class for named variables.""" - __slots__ = "value", "name" def __init__(self, name, val=None): @@ -412,11 +408,11 @@ class _3ds_named_variable(object): self.value) -# the chunk class +# The chunk class class _3ds_chunk(object): """Class representing a chunk in a 3ds file. - Chunks contain zero or more variables, followed by zero or more subchunks. - """ + Chunks contain zero or more variables, followed by zero or more subchunks.""" + __slots__ = "ID", "size", "variables", "subchunks" def __init__(self, chunk_id=0): @@ -464,7 +460,7 @@ class _3ds_chunk(object): """Write the chunk to a file. Uses the write function of the variables and the subchunks to do the actual work.""" - # write header + # Write header self.ID.write(file) self.size.write(file) for variable in self.variables: @@ -485,9 +481,9 @@ class _3ds_chunk(object): subchunk.dump(indent + 1) -########## -# EXPORT # -########## +############# +# MATERIALS # +############# def get_material_image(material): """ Get images from paint slots.""" @@ -518,7 +514,7 @@ def make_material_subchunk(chunk_id, color): col1 = _3ds_chunk(RGB1) col1.add_variable("color1", _3ds_rgb_color(color)) mat_sub.add_subchunk(col1) - # optional: + # Optional # col2 = _3ds_chunk(RGBI) # col2.add_variable("color2", _3ds_rgb_color(color)) # mat_sub.add_subchunk(col2) @@ -531,7 +527,7 @@ def make_percent_subchunk(chunk_id, percent): pcti = _3ds_chunk(PCT) pcti.add_variable("percent", _3ds_ushort(int(round(percent * 100, 0)))) pct_sub.add_subchunk(pcti) - # optional: + # Optional # pctf = _3ds_chunk(PCTF) # pctf.add_variable("pctfloat", _3ds_float(round(percent, 6))) # pct_sub.add_subchunk(pctf) @@ -583,14 +579,13 @@ def make_material_texture_chunk(chunk_id, texslots, pct): 0x40 activates alpha source, 0x80 activates tinting, 0x100 ignores alpha, 0x200 activates RGB tint. Bits 0x80, 0x100, and 0x200 are only used with TEXMAP, TEX2MAP, and SPECMAP chunks. 0x40, when used with a TEXMAP, TEX2MAP, or SPECMAP chunk must be accompanied with a tint bit, - either 0x100 or 0x200, tintcolor will be processed if colorchunks are present""" + either 0x100 or 0x200, tintcolor will be processed if a tintflag is present""" mapflags = 0 - - # no perfect mapping for mirror modes - 3DS only has uniform mirror w. repeat=2 if texslot.extension == 'EXTEND': mapflags |= 0x1 - + if texslot.extension == 'MIRROR': + mapflags |= 0x2 if texslot.extension == 'CLIP': mapflags |= 0x10 @@ -633,9 +628,8 @@ def make_material_texture_chunk(chunk_id, texslots, pct): rgb.add_variable("mapcolor", _3ds_rgb_color(spec if texslot.socket_dst.identifier == 'Specular' else base)) mat_sub.add_subchunk(rgb) - # store all textures for this mapto in order. This at least is what - # the 3DS exporter did so far, afaik most readers will just skip - # over 2nd textures. + # Store all textures for this mapto in order. This at least is what the + # 3DS exporter did so far, afaik most readers will just skip over 2nd textures for slot in texslots: if slot.image is not None: add_texslot(slot) @@ -686,9 +680,9 @@ def make_material_chunk(material, image): primary_tex = False if wrap.base_color_texture: - d_pct = 0.7 + sum(wrap.base_color[:]) * 0.1 color = [wrap.base_color_texture] - matmap = make_material_texture_chunk(MAT_DIFFUSEMAP, color, d_pct) + c_pct = 0.7 + sum(wrap.base_color[:]) * 0.1 + matmap = make_material_texture_chunk(MAT_DIFFUSEMAP, color, c_pct) if matmap: material_chunk.add_subchunk(matmap) primary_tex = True @@ -730,13 +724,13 @@ def make_material_chunk(material, image): material_chunk.add_subchunk(matmap) if wrap.emission_color_texture: - e_pct = wrap.emission_strength emission = [wrap.emission_color_texture] + e_pct = wrap.emission_strength matmap = make_material_texture_chunk(MAT_SELFIMAP, emission, e_pct) if matmap: material_chunk.add_subchunk(matmap) - # make sure no textures are lost. Everything that doesn't fit + # Make sure no textures are lost. Everything that doesn't fit # into a channel is exported as secondary texture diffuse = [] @@ -763,7 +757,7 @@ def make_material_chunk(material, image): material_chunk.add_subchunk(make_percent_subchunk(MATTRANS, 1 - material.diffuse_color[3])) material_chunk.add_subchunk(shading) - slots = [get_material_image(material)] # can be None + slots = [get_material_image(material)] # Can be None if image: material_chunk.add_subchunk(make_texture_chunk(MAT_DIFFUSEMAP, slots)) @@ -771,6 +765,10 @@ def make_material_chunk(material, image): return material_chunk +############# +# MESH DATA # +############# + class tri_wrapper(object): """Class representing a triangle. Used when converting faces to triangles""" @@ -782,7 +780,7 @@ class tri_wrapper(object): self.ma = ma self.image = image self.faceuvs = faceuvs - self.offset = [0, 0, 0] # offset indices + self.offset = [0, 0, 0] # Offset indices self.flag = flag self.group = group @@ -850,19 +848,17 @@ def remove_face_uv(verts, tri_list): need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when there are multiple uv coordinates per vertex.""" - # initialize a list of UniqueLists, one per vertex: - # uv_list = [UniqueList() for i in xrange(len(verts))] + # Initialize a list of UniqueLists, one per vertex unique_uvs = [{} for i in range(len(verts))] - # for each face uv coordinate, add it to the UniqueList of the vertex + # For each face uv coordinate, add it to the UniqueList of the vertex for tri in tri_list: for i in range(3): - # store the index into the UniqueList for future reference: + # Store the index into the UniqueList for future reference # offset.append(uv_list[tri.vertex_index[i]].add(_3ds_point_uv(tri.faceuvs[i]))) context_uv_vert = unique_uvs[tri.vertex_index[i]] uvkey = tri.faceuvs[i] - offset_index__uv_3ds = context_uv_vert.get(uvkey) if not offset_index__uv_3ds: @@ -870,11 +866,9 @@ def remove_face_uv(verts, tri_list): tri.offset[i] = offset_index__uv_3ds[0] - # At this point, each vertex has a UniqueList containing every uv coordinate that is associated with it - # only once. - + # At this point each vertex has a UniqueList containing every uv coord associated with it only once # Now we need to duplicate every vertex as many times as it has uv coordinates and make sure the - # faces refer to the new face indices: + # faces refer to the new face indices vert_index = 0 vert_array = _3ds_array() uv_array = _3ds_array() @@ -885,22 +879,20 @@ def remove_face_uv(verts, tri_list): pt = _3ds_point_3d(vert.co) # reuse, should be ok uvmap = [None] * len(unique_uvs[i]) for ii, uv_3ds in unique_uvs[i].values(): - # add a vertex duplicate to the vertex_array for every uv associated with this vertex: + # Add a vertex duplicate to the vertex_array for every uv associated with this vertex vert_array.add(pt) - # add the uv coordinate to the uv array: - # This for loop does not give uv's ordered by ii, so we create a new map - # and add the uv's later + # Add the uv coordinate to the uv array, this for loop does not give + # uv's ordered by ii, so we create a new map and add the uv's later # uv_array.add(uv_3ds) uvmap[ii] = uv_3ds - # Add the uv's in the correct order + # Add uv's in the correct order and add coordinates to the uv array for uv_3ds in uvmap: - # add the uv coordinate to the uv array: uv_array.add(uv_3ds) vert_index += len(unique_uvs[i]) - # Make sure the triangle vertex indices now refer to the new vertex list: + # Make sure the triangle vertex indices now refer to the new vertex list for tri in tri_list: for i in range(3): tri.offset[i] += index_list[tri.vertex_index[i]] @@ -1004,34 +996,32 @@ def make_uv_chunk(uv_array): def make_mesh_chunk(ob, mesh, matrix, materialDict, translation): """Make a chunk out of a Blender mesh.""" - # Extract the triangles from the mesh: + # Extract the triangles from the mesh tri_list = extract_triangles(mesh) if mesh.uv_layers: - # Remove the face UVs and convert it to vertex UV: + # Remove the face UVs and convert it to vertex UV vert_array, uv_array, tri_list = remove_face_uv(mesh.vertices, tri_list) else: - # Add the vertices to the vertex array: + # Add the vertices to the vertex array vert_array = _3ds_array() for vert in mesh.vertices: vert_array.add(_3ds_point_3d(vert.co)) - # no UV at all: + # No UV at all uv_array = None - # create the chunk: + # Create the chunk mesh_chunk = _3ds_chunk(OBJECT_MESH) - # add vertex chunk: + # Add vertex and faces chunk mesh_chunk.add_subchunk(make_vert_chunk(vert_array)) - - # add faces chunk: mesh_chunk.add_subchunk(make_faces_chunk(tri_list, mesh, materialDict)) - # if available, add uv chunk: + # If available, add uv chunk if uv_array: mesh_chunk.add_subchunk(make_uv_chunk(uv_array)) - # create transformation matrix chunk + # Create transformation matrix chunk matrix_chunk = _3ds_chunk(OBJECT_TRANS_MATRIX) obj_matrix = matrix.transposed().to_3x3() @@ -1059,17 +1049,18 @@ def make_mesh_chunk(ob, mesh, matrix, materialDict, translation): return mesh_chunk -''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX -def make_kfdata(start=0, stop=0, curtime=0): +################# +# KEYFRAME DATA # +################# + +def make_kfdata(revision, start=0, stop=100, curtime=0): """Make the basic keyframe data chunk""" kfdata = _3ds_chunk(KFDATA) kfhdr = _3ds_chunk(KFDATA_KFHDR) - kfhdr.add_variable("revision", _3ds_ushort(0)) - # Not really sure what filename is used for, but it seems it is usually used - # to identify the program that generated the .3ds: - kfhdr.add_variable("filename", _3ds_string("Blender")) - kfhdr.add_variable("animlen", _3ds_uint(stop-start)) + kfhdr.add_variable("revision", _3ds_ushort(revision)) + kfhdr.add_variable("filename", _3ds_string(b'Blender')) + kfhdr.add_variable("animlen", _3ds_uint(stop - start)) kfseg = _3ds_chunk(KFDATA_KFSEG) kfseg.add_variable("start", _3ds_uint(start)) @@ -1083,107 +1074,397 @@ def make_kfdata(start=0, stop=0, curtime=0): kfdata.add_subchunk(kfcurtime) return kfdata -def make_track_chunk(ID, obj): - """Make a chunk for track data. - Depending on the ID, this will construct a position, rotation or scale track.""" +def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): + """Make a chunk for track data. Depending on the ID, this will + construct a position, rotation, scale, roll, color or fov track.""" track_chunk = _3ds_chunk(ID) - track_chunk.add_variable("track_flags", _3ds_ushort()) - track_chunk.add_variable("unknown", _3ds_uint()) - track_chunk.add_variable("unknown", _3ds_uint()) - track_chunk.add_variable("nkeys", _3ds_uint(1)) - # Next section should be repeated for every keyframe, but for now, animation is not actually supported. - track_chunk.add_variable("tcb_frame", _3ds_uint(0)) - track_chunk.add_variable("tcb_flags", _3ds_ushort()) - if obj.type=='Empty': - if ID==POS_TRACK_TAG: - # position vector: - track_chunk.add_variable("position", _3ds_point_3d(obj.getLocation())) - elif ID==ROT_TRACK_TAG: - # rotation (quaternion, angle first, followed by axis): - q = obj.getEuler().to_quaternion() # XXX, todo! - track_chunk.add_variable("rotation", _3ds_point_4d((q.angle, q.axis[0], q.axis[1], q.axis[2]))) - elif ID==SCL_TRACK_TAG: - # scale vector: - track_chunk.add_variable("scale", _3ds_point_3d(obj.getSize())) + + if ID in {POS_TRACK_TAG, ROT_TRACK_TAG, SCL_TRACK_TAG, ROLL_TRACK_TAG} and ob.animation_data and ob.animation_data.action: + action = ob.animation_data.action + if action.fcurves: + fcurves = action.fcurves + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys = nkeys + 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + if ID == POS_TRACK_TAG: # Position + for i, frame in enumerate(kframes): + position = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'location'] + if not position: + position.append(ob_pos) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("position", _3ds_point_3d(position)) + + elif ID == ROT_TRACK_TAG: # Rotation + for i, frame in enumerate(kframes): + rotation = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + if not rotation: + rotation.append(ob_rot) + quat = mathutils.Euler(rotation).to_quaternion() + axis_angle = quat.angle, quat.axis[0], quat.axis[1], quat.axis[2] + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("rotation", _3ds_point_4d(axis_angle)) + + elif ID == SCL_TRACK_TAG: # Scale + for i, frame in enumerate(kframes): + size = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'scale'] + if not size: + size.append(ob_size) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("scale", _3ds_point_3d(size)) + + elif ID == ROLL_TRACK_TAG: # Roll + for i, frame in enumerate(kframes): + roll = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + if not roll: + roll.append(ob_rot) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("roll", _3ds_float(round(math.degrees(roll[1]), 4))) + + elif ID in {COL_TRACK_TAG, FOV_TRACK_TAG, HOTSPOT_TRACK_TAG, FALLOFF_TRACK_TAG} and ob.data.animation_data and ob.data.animation_data.action: + action = ob.data.animation_data.action + if action.fcurves: + fcurves = action.fcurves + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys = nkeys + 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + if ID == COL_TRACK_TAG: # Color + for i, frame in enumerate(kframes): + color = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'color'] + if not color: + color.append(ob.data.color[:3]) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(color)) + + elif ID == FOV_TRACK_TAG: # Field of view + for i, frame in enumerate(kframes): + lens = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'lens'] + if not lens: + lens.append(ob.data.lens) + fov = 2 * math.atan(ob.data.sensor_width/(2*lens[0])) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("fov", _3ds_float(round(math.degrees(fov), 4))) + + elif ID == HOTSPOT_TRACK_TAG: # Hotspot + beam_angle = math.degrees(ob.data.spot_size) + for i, frame in enumerate(kframes): + blend = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_blend'] + if not blend: + blend.append(ob.data.spot_blend) + hot_spot = beam_angle-(blend[0]*math.floor(beam_angle)) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("hotspot", _3ds_float(round(hot_spot, 4))) + + elif ID == FALLOFF_TRACK_TAG: # Falloff + for i, frame in enumerate(kframes): + fall_off = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_size'] + if not fall_off: + fall_off.append(ob.data.spot_size) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("falloff", _3ds_float(round(math.degrees(fall_off[0]), 4))) + else: - # meshes have their transformations applied before - # exporting, so write identity transforms here: - if ID==POS_TRACK_TAG: - # position vector: - track_chunk.add_variable("position", _3ds_point_3d((0.0,0.0,0.0))) - elif ID==ROT_TRACK_TAG: - # rotation (quaternion, angle first, followed by axis): - track_chunk.add_variable("rotation", _3ds_point_4d((0.0, 1.0, 0.0, 0.0))) - elif ID==SCL_TRACK_TAG: - # scale vector: - track_chunk.add_variable("scale", _3ds_point_3d((1.0, 1.0, 1.0))) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) # Based on observation default flag is 0x40 + track_chunk.add_variable("frame_start", _3ds_uint(0)) + track_chunk.add_variable("frame_total", _3ds_uint(0)) + track_chunk.add_variable("nkeys", _3ds_uint(1)) + # Next section should be repeated for every keyframe, with no animation only one tag is needed + track_chunk.add_variable("tcb_frame", _3ds_uint(0)) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + + # New method simply inserts the parameters + if ID == POS_TRACK_TAG: # Position vector + track_chunk.add_variable("position", _3ds_point_3d(ob_pos)) + + elif ID == ROT_TRACK_TAG: # Rotation (angle first [radians], followed by axis) + track_chunk.add_variable("rotation", _3ds_point_4d((ob_rot.angle, ob_rot.axis[0], ob_rot.axis[1], ob_rot.axis[2]))) + + elif ID == SCL_TRACK_TAG: # Scale vector + track_chunk.add_variable("scale", _3ds_point_3d(ob_size)) + + elif ID == ROLL_TRACK_TAG: # Roll angle + track_chunk.add_variable("roll", _3ds_float(round(math.degrees(ob.rotation_euler[1]), 4))) + + elif ID == COL_TRACK_TAG: # Color values + track_chunk.add_variable("color", _3ds_float_color(ob.data.color)) + + elif ID == FOV_TRACK_TAG: # Field of view + track_chunk.add_variable("fov", _3ds_float(round(math.degrees(ob.data.angle), 4))) + + elif ID == HOTSPOT_TRACK_TAG: # Hotspot + beam_angle = math.degrees(ob.data.spot_size) + track_chunk.add_variable("hotspot", _3ds_float(round(beam_angle-(ob.data.spot_blend*math.floor(beam_angle)), 4))) + + elif ID == FALLOFF_TRACK_TAG: # Falloff + track_chunk.add_variable("falloff", _3ds_float(round(math.degrees(ob.data.spot_size), 4))) return track_chunk -def make_kf_obj_node(obj, name_to_id): - """Make a node chunk for a Blender object. - Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id. - Blender Empty objects are converted to dummy nodes.""" - name = obj.name - # main object node chunk: - kf_obj_node = _3ds_chunk(OBJECT_NODE_TAG) - # chunk for the object id: - obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID) - # object id is from the name_to_id dictionary: - obj_id_chunk.add_variable("node_id", _3ds_ushort(name_to_id[name])) +def make_object_node(ob, translation, rotation, scale): + """Make a node chunk for a Blender object. Takes Blender object as parameter. + Blender Empty objects are converted to dummy nodes.""" - # object node header: + name = ob.name + if ob.type == 'CAMERA': + obj_node = _3ds_chunk(CAMERA_NODE_TAG) + elif ob.type == 'LIGHT': + obj_node = _3ds_chunk(LIGHT_NODE_TAG) + if ob.data.type == 'SPOT': + obj_node = _3ds_chunk(SPOT_NODE_TAG) + else: # Main object node chunk + obj_node = _3ds_chunk(OBJECT_NODE_TAG) + + # Object node header with object name obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) - # object name: - if obj.type == 'Empty': - # Empties are called "$$$DUMMY" and use the OBJECT_INSTANCE_NAME chunk - # for their name (see below): - obj_node_header_chunk.add_variable("name", _3ds_string("$$$DUMMY")) - else: - # Add the name: + parent = ob.parent + + if ob.type == 'EMPTY': # Forcing to use the real name for empties + # Empties called $$$DUMMY and use OBJECT_INSTANCE_NAME chunk as name + obj_node_header_chunk.add_variable("name", _3ds_string(b"$$$DUMMY")) + obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0x4000)) + obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + + else: # Add flag variables - Based on observation flags1 is usually 0x0040 and 0x4000 for empty objects obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name))) - # Add Flag variables (not sure what they do): - obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0)) - obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0x0040)) + # Flags2 defines bit 0x01 for display path, bit 0x02 use autosmooth, bit 0x04 object frozen, + # bit 0x10 for motion blur, bit 0x20 for material morph and bit 0x40 for mesh morph + if ob.type == 'MESH' and ob.data.use_auto_smooth: + obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0x02)) + else: + obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + obj_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) + + ''' + # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX # Check parent-child relationships: - parent = obj.parent - if (parent is None) or (parent.name not in name_to_id): - # If no parent, or the parents name is not in the name_to_id dictionary, - # parent id becomes -1: + if parent is None or parent.name not in name_to_id: + # If no parent, or parents name is not in dictionary, ID becomes -1: obj_node_header_chunk.add_variable("parent", _3ds_ushort(-1)) - else: - # Get the parent's id from the name_to_id dictionary: + else: # Get the parent's ID from the name_to_id dictionary: obj_node_header_chunk.add_variable("parent", _3ds_ushort(name_to_id[parent.name])) + ''' - # Add pivot chunk: - obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT) - obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(obj.getLocation())) - kf_obj_node.add_subchunk(obj_pivot_chunk) + # Add subchunk for node header + obj_node.add_subchunk(obj_node_header_chunk) - # add subchunks for object id and node header: - kf_obj_node.add_subchunk(obj_id_chunk) - kf_obj_node.add_subchunk(obj_node_header_chunk) - - # Empty objects need to have an extra chunk for the instance name: - if obj.type == 'Empty': + # Empty objects need to have an extra chunk for the instance name + if ob.type == 'EMPTY': # Will use a real object name for empties for now obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME) obj_instance_name_chunk.add_variable("name", _3ds_string(sane_name(name))) - kf_obj_node.add_subchunk(obj_instance_name_chunk) + obj_node.add_subchunk(obj_instance_name_chunk) - # Add track chunks for position, rotation and scale: - kf_obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, obj)) - kf_obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, obj)) - kf_obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, obj)) + if ob.type in {'MESH', 'EMPTY'}: # Add a pivot point at the object center + pivot_pos = (translation[name]) + obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT) + obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(pivot_pos)) + obj_node.add_subchunk(obj_pivot_chunk) - return kf_obj_node -''' + # Create a bounding box from quadrant diagonal + obj_boundbox = _3ds_chunk(OBJECT_BOUNDBOX) + obj_boundbox.add_variable("min", _3ds_point_3d(ob.bound_box[0])) + obj_boundbox.add_variable("max", _3ds_point_3d(ob.bound_box[6])) + obj_node.add_subchunk(obj_boundbox) + # Add smooth angle if autosmooth is used + if ob.type == 'MESH' and ob.data.use_auto_smooth: + obj_morph_smooth = _3ds_chunk(OBJECT_MORPH_SMOOTH) + obj_morph_smooth.add_variable("angle", _3ds_float(round(ob.data.auto_smooth_angle, 6))) + obj_node.add_subchunk(obj_morph_smooth) + + # Add track chunks for color, position, rotation and scale + if parent is None: + ob_pos = translation[name] + ob_rot = rotation[name] + ob_size = scale[name] + + else: # Calculate child position and rotation of the object center, no scale applied + ob_pos = translation[name] - translation[parent.name] + ob_rot = rotation[name].cross(rotation[parent.name].copy().inverted()) + ob_size = (1.0, 1.0, 1.0) + + obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + + if ob.type in {'MESH', 'EMPTY'}: + obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + if ob.type =='CAMERA': + obj_node.add_subchunk(make_track_chunk(FOV_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(ROLL_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + if ob.type =='LIGHT': + obj_node.add_subchunk(make_track_chunk(COL_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + if ob.type == 'LIGHT' and ob.data.type == 'SPOT': + obj_node.add_subchunk(make_track_chunk(HOTSPOT_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(FALLOFF_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(ROLL_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + + return obj_node + + +def make_target_node(ob, translation, rotation, scale): + """Make a target chunk for light and camera objects""" + + name = ob.name + if ob.type == 'CAMERA': #Add camera target + tar_node = _3ds_chunk(TARGET_NODE_TAG) + elif ob.type == 'LIGHT': # Add spot target + tar_node = _3ds_chunk(LTARGET_NODE_TAG) + + # Object node header with object name + tar_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) + # Targets get the same name as the object, flags1 is usually 0x0010 and parent ROOT_OBJECT + tar_node_header_chunk.add_variable("name", _3ds_string(sane_name(name))) + tar_node_header_chunk.add_variable("flags1", _3ds_ushort(0x0010)) + tar_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + tar_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) + + # Add subchunk for node header + tar_node.add_subchunk(tar_node_header_chunk) + + # Calculate target position + ob_pos = translation[name] + ob_rot = rotation[name].to_euler() + ob_size = scale[name] + + diagonal = math.copysign(math.sqrt(pow(ob_pos[0],2)+pow(ob_pos[1],2)), ob_pos[1]) + target_x = ob_pos[0]+(ob_pos[1]*math.tan(ob_rot[2])) + target_y = ob_pos[1]+(ob_pos[0]*math.tan(math.radians(90)-ob_rot[2])) + target_z = -1*diagonal*math.tan(math.radians(90)-ob_rot[0]) + + # Add track chunks for target position + track_chunk = _3ds_chunk(POS_TRACK_TAG) + + if ob.animation_data and ob.animation_data.action: + action = ob.animation_data.action + if action.fcurves: + fcurves = action.fcurves + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys = nkeys + 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + for i, frame in enumerate(kframes): + target_pos = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'location'] + target_rot = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + if not target_pos: + target_pos.append(ob_pos) + if not target_rot: + target_rot.insert(0, ob_rot.x) + target_rot.insert(1, ob_rot.y) + target_rot.insert(2, ob_rot.z) + diagonal = math.copysign(math.sqrt(pow(target_pos[0],2)+pow(target_pos[1],2)), target_pos[1]) + target_x = target_pos[0]+(target_pos[1]*math.tan(target_rot[2])) + target_y = target_pos[1]+(target_pos[0]*math.tan(math.radians(90)-target_rot[2])) + target_z = -1*diagonal*math.tan(math.radians(90)-target_rot[0]) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("position", _3ds_point_3d((target_x, target_y, target_z))) + + else: # Track header + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) # Based on observation default flag is 0x40 + track_chunk.add_variable("frame_start", _3ds_uint(0)) + track_chunk.add_variable("frame_total", _3ds_uint(0)) + track_chunk.add_variable("nkeys", _3ds_uint(1)) + # Keyframe header + track_chunk.add_variable("tcb_frame", _3ds_uint(0)) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("position", _3ds_point_3d((target_x, target_y, target_z))) + + tar_node.add_subchunk(track_chunk) + + return tar_node + + +def make_ambient_node(world): + amb_color = world.color + amb_node = _3ds_chunk(AMBIENT_NODE_TAG) + track_chunk = _3ds_chunk(COL_TRACK_TAG) + + # Object node header, name is "$AMBIENT$" for ambient nodes + amb_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) + amb_node_header_chunk.add_variable("name", _3ds_string(b"$AMBIENT$")) + amb_node_header_chunk.add_variable("flags1", _3ds_ushort(0x4000)) # Flags1 0x4000 for empty objects + amb_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + amb_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) + amb_node.add_subchunk(amb_node_header_chunk) + + if world.animation_data.action: + action = world.animation_data.action + if action.fcurves: + fcurves = action.fcurves + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys = nkeys + 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + for i, frame in enumerate(kframes): + ambient = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'color'] + if not ambient: + ambient.append(world.color) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(ambient)) + + else: # Track header + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(0)) + track_chunk.add_variable("frame_total", _3ds_uint(0)) + track_chunk.add_variable("nkeys", _3ds_uint(1)) + # Keyframe header + track_chunk.add_variable("tcb_frame", _3ds_uint(0)) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(amb_color)) + + amb_node.add_subchunk(track_chunk) + + return amb_node + + +########## +# EXPORT # +########## def save(operator, context, filepath="", - use_selection=True, + use_selection=False, + write_keyframe=False, global_matrix=None, ): @@ -1194,6 +1475,7 @@ def save(operator, scene = context.scene layer = context.view_layer depsgraph = context.evaluated_depsgraph_get() + world = scene.world if global_matrix is None: global_matrix = mathutils.Matrix() @@ -1201,15 +1483,15 @@ def save(operator, if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') - # Initialize the main chunk (primary): + # Initialize the main chunk (primary) primary = _3ds_chunk(PRIMARY) - # Add version chunk: + # Add version chunk version_chunk = _3ds_chunk(VERSION) version_chunk.add_variable("version", _3ds_uint(3)) primary.add_subchunk(version_chunk) - # Init main object info chunk: + # Init main object info chunk object_info = _3ds_chunk(OBJECTINFO) mesh_version = _3ds_chunk(MESHVERSION) mesh_version.add_variable("mesh", _3ds_uint(3)) @@ -1220,21 +1502,25 @@ def save(operator, mscale.add_variable("scale", _3ds_float(1)) object_info.add_subchunk(mscale) + # Init main keyframe data chunk + if write_keyframe: + revision = 0x0005 + stop = scene.frame_end + start = scene.frame_start + curtime = scene.frame_current + kfdata = make_kfdata(revision, start, stop, curtime) + # Add AMBIENT color - if scene.world is not None: + if world is not None: ambient_chunk = _3ds_chunk(AMBIENTLIGHT) ambient_light = _3ds_chunk(RGB) ambient_light.add_variable("ambient", _3ds_float_color(scene.world.color)) ambient_chunk.add_subchunk(ambient_light) object_info.add_subchunk(ambient_chunk) + if write_keyframe and world.animation_data: + kfdata.add_subchunk(make_ambient_node(world)) - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # init main key frame data chunk: - kfdata = make_kfdata() - ''' - - # Make a list of all materials used in the selected meshes (use a dictionary, - # each material is added once): + # Make a list of all materials used in the selected meshes (use dictionary, each material is added once) materialDict = {} mesh_objects = [] @@ -1248,7 +1534,7 @@ def save(operator, camera_objects = [ob for ob in objects if ob.type == 'CAMERA'] for ob in objects: - # get derived objects + # Get derived objects derived_dict = bpy_extras.io_utils.create_derived_objects(depsgraph, [ob]) derived = derived_dict.get(ob) @@ -1271,7 +1557,7 @@ def save(operator, ma_ls = data.materials ma_ls_len = len(ma_ls) - # get material/image tuples. + # Get material/image tuples if data.uv_layers: if not ma_ls: ma = ma_name = None @@ -1283,7 +1569,7 @@ def save(operator, ma_index = f.material_index = 0 ma = ma_ls[ma_index] ma_name = None if ma is None else ma.name - # else there already set to none + # Else there already set to none img = get_uv_image(ma) img_name = None if img is None else img.name @@ -1292,7 +1578,7 @@ def save(operator, else: for ma in ma_ls: - if ma: # material may be None so check its not. + if ma: # Material may be None so check its not materialDict.setdefault((ma.name, None), (ma, None)) # Why 0 Why! @@ -1301,62 +1587,68 @@ def save(operator, f.material_index = 0 - # Make material chunks for all materials used in the meshes: + # Make material chunks for all materials used in the meshes for ma_image in materialDict.values(): object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1])) # Collect translation for transformation matrix translation = {} + rotation = {} + scale = {} - # Give all objects a unique ID and build a dictionary from object name to object id: + # Give all objects a unique ID and build a dictionary from object name to object id # name_to_id = {} for ob, data, matrix in mesh_objects: translation[ob.name] = ob.location + rotation[ob.name] = ob.rotation_euler.to_quaternion().inverted() + scale[ob.name] = ob.scale # name_to_id[ob.name]= len(name_to_id) for ob in empty_objects: translation[ob.name] = ob.location + rotation[ob.name] = ob.rotation_euler.to_quaternion().inverted() + scale[ob.name] = ob.scale # name_to_id[ob.name]= len(name_to_id) - # Create object chunks for all meshes: + # Create object chunks for all meshes i = 0 for ob, mesh, matrix in mesh_objects: - # create a new object chunk + # Create a new object chunk object_chunk = _3ds_chunk(OBJECT) - # set the object name + # Set the object name object_chunk.add_variable("name", _3ds_string(sane_name(ob.name))) - # make a mesh chunk out of the mesh: + # Make a mesh chunk out of the mesh object_chunk.add_subchunk(make_mesh_chunk(ob, mesh, matrix, materialDict, translation)) - # ensure the mesh has no over sized arrays - # skip ones that do!, otherwise we cant write since the array size wont - # fit into USHORT. + # Ensure the mesh has no over sized arrays, skip ones that do! + # Otherwise we cant write since the array size wont fit into USHORT if object_chunk.validate(): object_info.add_subchunk(object_chunk) else: operator.report({'WARNING'}, "Object %r can't be written into a 3DS file") - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # make a kf object node for the object: - kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) - ''' + # Export kf object node + if write_keyframe: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale)) i += i - # Create chunks for all empties: - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - for ob in empty_objects: - # Empties only require a kf object node: - kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) - pass - ''' + # Create chunks for all empties, only requires a kf object node + if write_keyframe: + for ob in empty_objects: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale)) # Create light object chunks for ob in light_objects: object_chunk = _3ds_chunk(OBJECT) + translation[ob.name] = ob.location + rotation[ob.name] = ob.rotation_euler.to_quaternion() + scale[ob.name] = ob.scale + + # Add light data subchunks light_chunk = _3ds_chunk(OBJECT_LIGHT) color_float_chunk = _3ds_chunk(RGB) energy_factor = _3ds_chunk(LIGHT_MULTIPLIER) @@ -1393,9 +1685,20 @@ def save(operator, object_chunk.add_subchunk(light_chunk) object_info.add_subchunk(object_chunk) + # Export light and spotlight target node + if write_keyframe: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale)) + if ob.data.type == 'SPOT': + kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale)) + # Create camera object chunks for ob in camera_objects: object_chunk = _3ds_chunk(OBJECT) + translation[ob.name] = ob.location + rotation[ob.name] = ob.rotation_euler.to_quaternion() + scale[ob.name] = ob.scale + + # Add camera data subchunks camera_chunk = _3ds_chunk(OBJECT_CAMERA) diagonal = math.copysign(math.sqrt(pow(ob.location[0], 2) + pow(ob.location[1], 2)), ob.location[1]) focus_x = ob.location[0] + (ob.location[1] * math.tan(ob.rotation_euler[2])) @@ -1409,35 +1712,39 @@ def save(operator, object_chunk.add_subchunk(camera_chunk) object_info.add_subchunk(object_chunk) - # Add main object info chunk to primary chunk: + # Export camera and target node + if write_keyframe: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale)) + kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale)) + + # Add main object info chunk to primary chunk primary.add_subchunk(object_info) - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # Add main keyframe data chunk to primary chunk: - primary.add_subchunk(kfdata) - ''' + # Add main keyframe data chunk to primary chunk + if write_keyframe: + primary.add_subchunk(kfdata) - # At this point, the chunk hierarchy is completely built. - # Check the size: + # At this point, the chunk hierarchy is completely built + # Check the size primary.get_size() - # Open the file for writing: + # Open the file for writing file = open(filepath, 'wb') - # Recursively write the chunks to file: + # Recursively write the chunks to file primary.write(file) - # Close the file: + # Close the file file.close() # Clear name mapping vars, could make locals too del name_unique[:] name_mapping.clear() - # Debugging only: report the exporting time: + # Debugging only: report the exporting time print("3ds export time: %.2f" % (time.time() - duration)) - # Debugging only: dump the chunk hierarchy: + # Debugging only: dump the chunk hierarchy # primary.dump() - return {'FINISHED'} + return {'FINISHED'} \ No newline at end of file -- 2.30.2 From 595c392be8c3ecc1e03b09da4251a273c9bce4cc Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 22 May 2023 21:36:46 +0200 Subject: [PATCH 09/14] io_scene_3ds: Completed code improvement Final code improvement --- io_scene_3ds/__init__.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 643214c..04f4121 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -16,8 +16,8 @@ import bpy bl_info = { "name": "Autodesk 3DS format", "author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand", - "version": (2, 3, 1), - "blender": (3, 0, 0), + "version": (2, 4, 1), + "blender": (3, 6, 0), "location": "File > Import-Export", "description": "3DS Import/Export meshes, UVs, materials, textures, " "cameras, lamps & animation", @@ -65,12 +65,12 @@ class Import3DS(bpy.types.Operator, ImportHelper): "importing incorrectly", default=True, ) - read_keyframe: bpy.props.BoolProperty( + read_keyframe: BoolProperty( name="Read Keyframe", description="Read the keyframe data", default=True, ) - use_world_matrix: bpy.props.BoolProperty( + use_world_matrix: BoolProperty( name="World Space", description="Transform to matrix world", default=False, @@ -109,6 +109,11 @@ class Export3DS(bpy.types.Operator, ExportHelper): description="Export selected objects only", default=False, ) + write_keyframe: BoolProperty( + name="Write Keyframe", + description="Write the keyframe data", + default=False, + ) def execute(self, context): from . import export_3ds @@ -152,4 +157,4 @@ def unregister(): if __name__ == "__main__": - register() + register() \ No newline at end of file -- 2.30.2 From 5ca5eb4cc45553219d32406bd7d5019fb5c96c26 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Mon, 22 May 2023 21:38:27 +0200 Subject: [PATCH 10/14] io_scene_3ds: Updated version Updated script and blender version --- io_scene_3ds/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 04f4121..d23684f 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -16,8 +16,8 @@ import bpy bl_info = { "name": "Autodesk 3DS format", "author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand", - "version": (2, 4, 1), - "blender": (3, 6, 0), + "version": (2, 3, 6), + "blender": (3, 6, 1), "location": "File > Import-Export", "description": "3DS Import/Export meshes, UVs, materials, textures, " "cameras, lamps & animation", -- 2.30.2 From 75de2974e8b6f701615f27b52da10861f7b7a2c0 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sat, 7 Oct 2023 20:11:07 +0200 Subject: [PATCH 11/14] io_scene_3ds: Update for Blender 4.x --- io_scene_3ds/__init__.py | 230 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 219 insertions(+), 11 deletions(-) diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index d23684f..16c60da 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -38,35 +38,56 @@ if "bpy" in locals(): @orientation_helper(axis_forward='Y', axis_up='Z') class Import3DS(bpy.types.Operator, ImportHelper): """Import from 3DS file format (.3ds)""" - bl_idname = "import_scene.autodesk_3ds" + bl_idname = "import_scene.max3ds" bl_label = 'Import 3DS' - bl_options = {'UNDO'} + bl_options = {'PRESET', 'UNDO'} filename_ext = ".3ds" filter_glob: StringProperty(default="*.3ds", options={'HIDDEN'}) constrain_size: FloatProperty( - name="Size Constraint", + name="Constrain Size", description="Scale the model by 10 until it reaches the " "size constraint (0 to disable)", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0, ) + use_scene_unit: BoolProperty( + name="Scene Units", + description="Convert to scene unit length settings", + default=False, + ) + use_center_pivot: BoolProperty( + name="Pivot Origin", + description="Move all geometry to pivot origin", + default=False, + ) use_image_search: BoolProperty( name="Image Search", description="Search subdirectories for any associated images " "(Warning, may be slow)", default=True, ) + object_filter: EnumProperty( + name="Object Filter", options={'ENUM_FLAG'}, + items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA', 0x1), + ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), + ('LIGHT', "Light".rjust(12), "", 'LIGHT_DATA', 0x4), + ('CAMERA', "Camera".rjust(11), "", 'CAMERA_DATA', 0x8), + ('EMPTY', "Empty".rjust(11), "", 'EMPTY_AXIS', 0x10), + ), + description="Object types to import", + default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, + ) use_apply_transform: BoolProperty( name="Apply Transform", description="Workaround for object transformations " "importing incorrectly", default=True, ) - read_keyframe: BoolProperty( - name="Read Keyframe", + use_keyframes: BoolProperty( + name="Animation", description="Read the keyframe data", default=True, ) @@ -75,6 +96,11 @@ class Import3DS(bpy.types.Operator, ImportHelper): description="Transform to matrix world", default=False, ) + use_cursor: BoolProperty( + name="Cursor Origin", + description="Read the 3D cursor location", + default=False, + ) def execute(self, context): from . import import_3ds @@ -91,12 +117,87 @@ class Import3DS(bpy.types.Operator, ImportHelper): return import_3ds.load(self, context, **keywords) + def draw(self, context): + pass + + +class MAX3DS_PT_import_include(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Include" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_SCENE_OT_max3ds" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = True + + sfile = context.space_data + operator = sfile.active_operator + + layrow = layout.row(align=True) + layrow.prop(operator, "use_image_search") + layrow.label(text="", icon='OUTLINER_OB_IMAGE' if operator.use_image_search else 'IMAGE_DATA') + layout.column().prop(operator, "object_filter") + layrow = layout.row(align=True) + layrow.prop(operator, "use_keyframes") + layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') + layrow = layout.row(align=True) + layrow.prop(operator, "use_cursor") + layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR') + + +class MAX3DS_PT_import_transform(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Transform" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_SCENE_OT_max3ds" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "constrain_size") + layrow = layout.row(align=True) + layrow.prop(operator, "use_scene_unit") + layrow.label(text="", icon='EMPTY_ARROWS' if operator.use_scene_unit else 'EMPTY_DATA') + layrow = layout.row(align=True) + layrow.prop(operator, "use_center_pivot") + layrow.label(text="", icon='OVERLAY' if operator.use_center_pivot else 'PIVOT_ACTIVE') + layrow = layout.row(align=True) + layrow.prop(operator, "use_apply_transform") + layrow.label(text="", icon='MESH_CUBE' if operator.use_apply_transform else 'MOD_SOLIDIFY') + layrow = layout.row(align=True) + layrow.prop(operator, "use_world_matrix") + layrow.label(text="", icon='WORLD' if operator.use_world_matrix else 'META_BALL') + layout.prop(operator, "axis_forward") + layout.prop(operator, "axis_up") + @orientation_helper(axis_forward='Y', axis_up='Z') class Export3DS(bpy.types.Operator, ExportHelper): """Export to 3DS file format (.3ds)""" - bl_idname = "export_scene.autodesk_3ds" + bl_idname = "export_scene.max3ds" bl_label = 'Export 3DS' + bl_options = {'PRESET', 'UNDO'} filename_ext = ".3ds" filter_glob: StringProperty( @@ -104,16 +205,49 @@ class Export3DS(bpy.types.Operator, ExportHelper): options={'HIDDEN'}, ) + scale_factor: FloatProperty( + name="Scale Factor", + description="Master scale factor for all objects", + min=0.0, max=100000.0, + soft_min=0.0, soft_max=100000.0, + default=1.0, + ) + use_scene_unit: BoolProperty( + name="Scene Units", + description="Take the scene unit length settings into account", + default=False, + ) use_selection: BoolProperty( - name="Selection Only", + name="Selection", description="Export selected objects only", default=False, ) - write_keyframe: BoolProperty( - name="Write Keyframe", + object_filter: EnumProperty( + name="Object Filter", options={'ENUM_FLAG'}, + items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA',0x1), + ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), + ('LIGHT', "Light".rjust(12), "", 'LIGHT_DATA',0x4), + ('CAMERA', "Camera".rjust(11), "", 'CAMERA_DATA',0x8), + ('EMPTY', "Empty".rjust(11), "", 'EMPTY_AXIS',0x10), + ), + description="Object types to export", + default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, + ) + use_hierarchy: BoolProperty( + name="Hierarchy", + description="Export hierarchy chunks", + default=False, + ) + use_keyframes: BoolProperty( + name="Animation", description="Write the keyframe data", default=False, ) + use_cursor: BoolProperty( + name="Cursor Origin", + description="Save the 3D cursor location", + default=False, + ) def execute(self, context): from . import export_3ds @@ -130,6 +264,74 @@ class Export3DS(bpy.types.Operator, ExportHelper): return export_3ds.save(self, context, **keywords) + def draw(self, context): + pass + + +class MAX3DS_PT_export_include(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Include" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "EXPORT_SCENE_OT_max3ds" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = True + + sfile = context.space_data + operator = sfile.active_operator + + layrow = layout.row(align=True) + layrow.prop(operator, "use_selection") + layrow.label(text="", icon='RESTRICT_SELECT_OFF' if operator.use_selection else 'RESTRICT_SELECT_ON') + layout.column().prop(operator, "object_filter") + layrow = layout.row(align=True) + layrow.prop(operator, "use_hierarchy") + layrow.label(text="", icon='OUTLINER' if operator.use_hierarchy else 'CON_CHILDOF') + layrow = layout.row(align=True) + layrow.prop(operator, "use_keyframes") + layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') + layrow = layout.row(align=True) + layrow.prop(operator, "use_cursor") + layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR') + + +class MAX3DS_PT_export_transform(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Transform" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "EXPORT_SCENE_OT_max3ds" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "scale_factor") + layrow = layout.row(align=True) + layrow.prop(operator, "use_scene_unit") + layrow.label(text="", icon='EMPTY_ARROWS' if operator.use_scene_unit else 'EMPTY_DATA') + layout.prop(operator, "axis_forward") + layout.prop(operator, "axis_up") + # Add to a menu def menu_func_export(self, context): @@ -142,16 +344,22 @@ def menu_func_import(self, context): def register(): bpy.utils.register_class(Import3DS) + bpy.utils.register_class(MAX3DS_PT_import_include) + bpy.utils.register_class(MAX3DS_PT_import_transform) bpy.utils.register_class(Export3DS) - + bpy.utils.register_class(MAX3DS_PT_export_include) + bpy.utils.register_class(MAX3DS_PT_export_transform) bpy.types.TOPBAR_MT_file_import.append(menu_func_import) bpy.types.TOPBAR_MT_file_export.append(menu_func_export) def unregister(): bpy.utils.unregister_class(Import3DS) + bpy.utils.unregister_class(MAX3DS_PT_import_include) + bpy.utils.unregister_class(MAX3DS_PT_import_transform) bpy.utils.unregister_class(Export3DS) - + bpy.utils.unregister_class(MAX3DS_PT_export_include) + bpy.utils.unregister_class(MAX3DS_PT_export_transform) bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) bpy.types.TOPBAR_MT_file_export.remove(menu_func_export) -- 2.30.2 From 1a81a4e589e2cb473563f8c3db063cc6542fde45 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sat, 7 Oct 2023 20:16:43 +0200 Subject: [PATCH 12/14] Import_3ds: Update for Blender 4.x --- io_scene_3ds/import_3ds.py | 1246 ++++++++++++++++++++++-------------- 1 file changed, 765 insertions(+), 481 deletions(-) diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index ce55232..9a4d40f 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -24,7 +24,6 @@ PRIMARY = 0x4D4D # >----- Main Chunks OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information VERSION = 0x0002 # This gives the version of the .3ds file -AMBIENTLIGHT = 0x2100 # The color of the ambient light EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info # >----- Data Chunks, used for various attributes @@ -32,11 +31,24 @@ COLOR_F = 0x0010 # color defined as 3 floats COLOR_24 = 0x0011 # color defined as 3 bytes LIN_COLOR_24 = 0x0012 # linear byte color LIN_COLOR_F = 0x0013 # linear float color -PCT_SHORT = 0x30 # percentage short -PCT_FLOAT = 0x31 # percentage float +PCT_SHORT = 0x0030 # percentage short +PCT_FLOAT = 0x0031 # percentage float MASTERSCALE = 0x0100 # Master scale factor # >----- sub defines of OBJECTINFO +BITMAP = 0x1100 # The background image name +USE_BITMAP = 0x1101 # The background image flag +SOLIDBACKGND = 0x1200 # The background color (RGB) +USE_SOLIDBGND = 0x1201 # The background color flag +VGRADIENT = 0x1300 # The background gradient colors +USE_VGRADIENT = 0x1301 # The background gradient flag +O_CONSTS = 0x1500 # The origin of the 3D cursor +AMBIENTLIGHT = 0x2100 # The color of the ambient light +FOG = 0x2200 # The fog atmosphere settings +USE_FOG = 0x2201 # The fog atmosphere flag +FOG_BGND = 0x2210 # The fog atmosphere background flag +LAYER_FOG = 0x2302 # The fog layer atmosphere settings +USE_LAYER_FOG = 0x2303 # The fog layer atmosphere flag MATERIAL = 0xAFFF # This stored the texture info OBJECT = 0x4000 # This stores the faces, vertices, etc... @@ -92,6 +104,8 @@ MAT_MAP_BCOL = 0xA368 # Blue mapping OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object +OBJECT_HIERARCHY = 0x4F00 # This lets us know the hierachy id of the object +OBJECT_PARENT = 0x4F10 # This lets us know the parent id of the object # >------ Sub defines of LIGHT LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight @@ -113,6 +127,7 @@ LIGHT_RAY_BIAS = 0x4658 # Light ray bias value LIGHT_INNER_RANGE = 0x4659 # The light inner range LIGHT_OUTER_RANGE = 0x465A # The light outer range LIGHT_MULTIPLIER = 0x465B # The light energy factor +LIGHT_ATTENUATE = 0x4625 # Light attenuation flag LIGHT_AMBIENT_LIGHT = 0x4680 # Light ambient flag # >------ sub defines of CAMERA @@ -128,13 +143,13 @@ OBJECT_SMOOTH = 0x4150 # The objects face smooth groups OBJECT_TRANS_MATRIX = 0x4160 # The objects Matrix # >------ sub defines of EDITKEYFRAME -KFDATA_AMBIENT = 0xB001 # Keyframe ambient node -KFDATA_OBJECT = 0xB002 # Keyframe object node -KFDATA_CAMERA = 0xB003 # Keyframe camera node -KFDATA_TARGET = 0xB004 # Keyframe target node -KFDATA_LIGHT = 0xB005 # Keyframe light node -KFDATA_LTARGET = 0xB006 # Keyframe light target node -KFDATA_SPOTLIGHT = 0xB007 # Keyframe spotlight node +KF_AMBIENT = 0xB001 # Keyframe ambient node +KF_OBJECT = 0xB002 # Keyframe object node +KF_OBJECT_CAMERA = 0xB003 # Keyframe camera node +KF_TARGET_CAMERA = 0xB004 # Keyframe target node +KF_OBJECT_LIGHT = 0xB005 # Keyframe light node +KF_TARGET_LIGHT = 0xB006 # Keyframe light target node +KF_OBJECT_SPOT_LIGHT = 0xB007 # Keyframe spotlight node KFDATA_KFSEG = 0xB008 # Keyframe start and stop KFDATA_CURTIME = 0xB009 # Keyframe current frame KFDATA_KFHDR = 0xB00A # Keyframe node header @@ -165,6 +180,7 @@ global scn scn = None object_dictionary = {} +parent_dictionary = {} object_matrix = {} @@ -216,17 +232,6 @@ def read_string(file): return str(b''.join(s), "utf-8", "replace"), len(s) + 1 -########## -# IMPORT # -########## - -def process_next_object_chunk(file, previous_chunk): - new_chunk = Chunk() - - while (previous_chunk.bytes_read < previous_chunk.length): - # read the next chunk - read_chunk(file, new_chunk) - def skip_to_end(file, skip_chunk): buffer_size = skip_chunk.length - skip_chunk.bytes_read binary_format = '%ic' % buffer_size @@ -234,6 +239,10 @@ def skip_to_end(file, skip_chunk): skip_chunk.bytes_read += buffer_size +############# +# MATERIALS # +############# + def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tintcolor, mapto): shader = contextWrapper.node_principled_bsdf nodetree = contextWrapper.material.node_tree @@ -253,19 +262,21 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of img_wrap = contextWrapper.base_color_texture links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2]) links.new(mixer.outputs['Color'], shader.inputs['Base Color']) - elif mapto == 'SPECULARITY': - img_wrap = contextWrapper.specular_texture - elif mapto == 'ALPHA': - shader.location = (0, -300) - img_wrap = contextWrapper.alpha_texture - elif mapto == 'METALLIC': - shader.location = (300, 300) - img_wrap = contextWrapper.metallic_texture elif mapto == 'ROUGHNESS': - shader.location = (300, 0) img_wrap = contextWrapper.roughness_texture + elif mapto == 'METALLIC': + shader.location = (300,300) + img_wrap = contextWrapper.metallic_texture + elif mapto == 'SPECULARITY': + shader.location = (300,0) + img_wrap = contextWrapper.specular_tint_texture + elif mapto == 'ALPHA': + shader.location = (-300,0) + img_wrap = contextWrapper.alpha_texture + img_wrap.use_alpha = False + links.new(img_wrap.node_image.outputs['Color'], img_wrap.socket_dst) elif mapto == 'EMISSION': - shader.location = (-300, -600) + shader.location = (0,-900) img_wrap = contextWrapper.emission_color_texture elif mapto == 'NORMAL': shader.location = (300, 300) @@ -300,10 +311,12 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of img_wrap.extension = 'CLIP' if alpha == 'alpha': + own_node = img_wrap.node_image + contextWrapper.material.blend_method = 'HASHED' + links.new(own_node.outputs['Alpha'], img_wrap.socket_dst) for link in links: if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB': tex = link.from_node.image.name - own_node = img_wrap.node_image own_map = img_wrap.node_mapping if tex == image.name: links.new(link.from_node.outputs['Alpha'], img_wrap.socket_dst) @@ -313,20 +326,28 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of if imgs.name[-3:].isdigit(): if not imgs.users: bpy.data.images.remove(imgs) - else: - links.new(img_wrap.node_image.outputs['Alpha'], img_wrap.socket_dst) - contextWrapper.material.blend_method = 'HASHED' shader.location = (300, 300) contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader) -def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE): +############# +# MESH DATA # +############# + +childs_list = [] +parent_list = [] + +def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, FILTER, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE, CURSOR): contextObName = None + contextWorld = None contextLamp = None contextCamera = None contextMaterial = None + contextAlpha = None + contextColor = None contextWrapper = None contextMatrix = None contextMesh_vertls = None @@ -335,6 +356,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI contextMeshMaterials = [] contextMesh_smooth = None contextMeshUV = None + contextTrack_flag = False # TEXTURE_DICT = {} MATDICT = {} @@ -353,36 +375,28 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI object_list = [] # for hierarchy object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent pivot_list = [] # pivots with hierarchy handling - track_flags = [] # keyframe track flags trackposition = {} # keep track to position for target calculation - def putContextMesh( - context, - myContextMesh_vertls, - myContextMesh_facels, - myContextMesh_flag, - myContextMeshMaterials, - myContextMesh_smooth, - WORLD_MATRIX, - ): + def putContextMesh(context, ContextMesh_vertls, ContextMesh_facels, ContextMesh_flag, + ContextMeshMaterials, ContextMesh_smooth, WORLD_MATRIX): + bmesh = bpy.data.meshes.new(contextObName) - if myContextMesh_facels is None: - myContextMesh_facels = [] + if ContextMesh_facels is None: + ContextMesh_facels = [] - if myContextMesh_vertls: + if ContextMesh_vertls: - bmesh.vertices.add(len(myContextMesh_vertls) // 3) - bmesh.vertices.foreach_set("co", myContextMesh_vertls) + bmesh.vertices.add(len(ContextMesh_vertls) // 3) + bmesh.vertices.foreach_set("co", ContextMesh_vertls) - nbr_faces = len(myContextMesh_facels) + nbr_faces = len(ContextMesh_facels) bmesh.polygons.add(nbr_faces) bmesh.loops.add(nbr_faces * 3) eekadoodle_faces = [] - for v1, v2, v3 in myContextMesh_facels: + for v1, v2, v3 in ContextMesh_facels: eekadoodle_faces.extend((v3, v1, v2) if v3 == 0 else (v1, v2, v3)) bmesh.polygons.foreach_set("loop_start", range(0, nbr_faces * 3, 3)) - bmesh.polygons.foreach_set("loop_total", (3,) * nbr_faces) bmesh.loops.foreach_set("vertex_index", eekadoodle_faces) if bmesh.polygons and contextMeshUV: @@ -391,7 +405,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI else: uv_faces = None - for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials): + for mat_idx, (matName, faces) in enumerate(ContextMeshMaterials): if matName is None: bmat = None else: @@ -405,7 +419,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI if uv_faces: uvl = bmesh.uv_layers.active.data[:] for fidx, pl in enumerate(bmesh.polygons): - face = myContextMesh_facels[fidx] + face = ContextMesh_facels[fidx] v1, v2, v3 = face # eekadoodle @@ -425,12 +439,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI context.view_layer.active_layer_collection.collection.objects.link(ob) imported_objects.append(ob) - if myContextMesh_flag: - """Bit 0 (0x1) sets edge CA visible, Bit 1 (0x2) sets edge BC visible and Bit 2 (0x4) sets edge AB visible - In Blender we use sharp edges for those flags""" + if ContextMesh_flag: + """Bit 0 (0x1) sets edge CA visible, Bit 1 (0x2) sets edge BC visible and + Bit 2 (0x4) sets edge AB visible. In Blender we use sharp edges for those flags.""" for f, pl in enumerate(bmesh.polygons): - face = myContextMesh_facels[f] - faceflag = myContextMesh_flag[f] + face = ContextMesh_facels[f] + faceflag = ContextMesh_flag[f] edge_ab = bmesh.edges[bmesh.loops[pl.loop_start].edge_index] edge_bc = bmesh.edges[bmesh.loops[pl.loop_start + 1].edge_index] edge_ca = bmesh.edges[bmesh.loops[pl.loop_start + 2].edge_index] @@ -443,11 +457,16 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI if faceflag & 0x4: edge_ab.use_edge_sharp = True - if myContextMesh_smooth: + if ContextMesh_smooth: for f, pl in enumerate(bmesh.polygons): - smoothface = myContextMesh_smooth[f] + smoothface = ContextMesh_smooth[f] if smoothface > 0: bmesh.polygons[f].use_smooth = True + else: + bmesh.polygons[f].use_smooth = False + else: + for poly in bmesh.polygons: + poly.use_smooth = False if contextMatrix: if WORLD_MATRIX: @@ -461,23 +480,35 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI temp_chunk = Chunk() CreateBlenderObject = False + CreateCameraObject = False CreateLightObject = False CreateTrackData = False - def read_float_color(temp_chunk): - temp_data = file.read(SZ_3FLOAT) - temp_chunk.bytes_read += SZ_3FLOAT - return [float(col) for col in struct.unpack('<3f', temp_data)] + CreateWorld = 'WORLD' in FILTER + CreateMesh = 'MESH' in FILTER + CreateLight = 'LIGHT' in FILTER + CreateCamera = 'CAMERA' in FILTER + CreateEmpty = 'EMPTY' in FILTER + + def read_short(temp_chunk): + temp_data = file.read(SZ_U_SHORT) + temp_chunk.bytes_read += SZ_U_SHORT + return struct.unpack(' abs(location[1] - target[1]): - foc = math.copysign(math.sqrt(pow(pos[0],2)+pow(pos[1],2)),pos[0]) - dia = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[0]) - pitch = math.radians(90)-math.copysign(math.acos(foc/dia), pos[2]) - if location[0] > target[0]: - tilt = math.copysign(pitch, pos[0]) - pan = math.radians(90)+math.atan(pos[1]/foc) - else: - tilt = -1*(math.copysign(pitch, pos[0])) - pan = -1*(math.radians(90)-math.atan(pos[1]/foc)) - elif abs(location[1] - target[1]) > abs(location[0] - target[0]): - foc = math.copysign(math.sqrt(pow(pos[1],2)+pow(pos[0],2)),pos[1]) - dia = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[1]) - pitch = math.radians(90)-math.copysign(math.acos(foc/dia), pos[2]) - if location[1] > target[1]: - tilt = math.copysign(pitch, pos[1]) - pan = math.radians(90)+math.acos(pos[0]/foc) - else: - tilt = -1*(math.copysign(pitch, pos[1])) - pan = -1*(math.radians(90)-math.acos(pos[0]/foc)) - direction = tilt, pan - return direction + def get_hierarchy(tree_chunk): + child_id = read_short(tree_chunk) + childs_list.insert(child_id, contextObName) + parent_list.insert(child_id, None) + if child_id in parent_list: + idp = parent_list.index(child_id) + parent_list[idp] = contextObName + return child_id - def read_track_data(temp_chunk): + def get_parent(tree_chunk, child_id=-1): + parent_id = read_short(tree_chunk) + if parent_id > len(childs_list): + parent_list[child_id] = parent_id + parent_list.extend([None] * (parent_id - len(parent_list))) + parent_list.insert(parent_id, contextObName) + elif parent_id < len(childs_list): + parent_list[child_id] = childs_list[parent_id] + + def calc_target(loca, target): + pan = tilt = 0.0 + plane = loca + target + angle = math.radians(90) # Target triangulation + check_sign = abs(loca.y) < abs(target.y) + check_axes = abs(loca.x - target.x) > abs(loca.y - target.y) + plane_y = plane.y if check_sign else -1 * plane.y + sign_xy = plane.x if check_axes else plane.y + axis_xy = plane_y if check_axes else plane.x + hyp = math.sqrt(pow(plane.x,2) + pow(plane.y,2)) + dia = math.sqrt(pow(hyp,2) + pow(plane.z,2)) + yaw = math.atan2(math.copysign(hyp, sign_xy), axis_xy) + bow = math.acos(hyp / dia) + turn = angle - yaw if check_sign else angle + yaw + tilt = angle - bow if loca.z > target.z else angle + bow + pan = yaw if check_axes else turn + return tilt, pan + + def read_track_data(track_chunk): """Trackflags 0x1, 0x2 and 0x3 are for looping. 0x8, 0x10 and 0x20 - locks the XYZ axes. 0x100, 0x200 and 0x400 unlinks the XYZ axes""" - new_chunk.bytes_read += SZ_U_SHORT - temp_data = file.read(SZ_U_SHORT) - tflags = struct.unpack(' 3: print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version) - # is it an ambient light chunk? - elif new_chunk.ID == AMBIENTLIGHT: - path, filename = os.path.split(file.name) - realname, ext = os.path.splitext(filename) - world = bpy.data.worlds.new("Ambient: " + realname) - world.light_settings.use_ambient_occlusion = True - context.scene.world = world - read_chunk(file, temp_chunk) - if temp_chunk.ID == COLOR_F: - context.scene.world.color[:] = read_float_color(temp_chunk) - elif temp_chunk.ID == LIN_COLOR_F: - context.scene.world.color[:] = read_float_color(temp_chunk) - else: - skip_to_end(file, temp_chunk) - new_chunk.bytes_read += temp_chunk.bytes_read - - # is it an object info chunk? + # The main object info chunk elif new_chunk.ID == OBJECTINFO: - process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE) + process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, FILTER, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE, CURSOR) # keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read - # is it an object chunk? - elif new_chunk.ID == OBJECT: + # If cursor location + elif CURSOR and new_chunk.ID == O_CONSTS: + context.scene.cursor.location = read_float_array(new_chunk) - if CreateBlenderObject: - putContextMesh( - context, - contextMesh_vertls, - contextMesh_facels, - contextMesh_flag, - contextMeshMaterials, - contextMesh_smooth, - WORLD_MATRIX - ) - contextMesh_vertls = [] - contextMesh_facels = [] - contextMeshMaterials = [] - contextMesh_flag = None - contextMesh_smooth = None - contextMeshUV = None - contextMatrix = None + # If ambient light chunk + elif CreateWorld and new_chunk.ID == AMBIENTLIGHT: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Ambient: " + realname) + context.scene.world = contextWorld + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + contextWorld.color[:] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + contextWorld.color[:] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read - CreateBlenderObject = True - contextObName, read_str_len = read_string(file) + # If background chunk + elif CreateWorld and new_chunk.ID == SOLIDBACKGND: + backgroundcolor = mathutils.Color((0.1, 0.1, 0.1)) + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Background: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + worldnodes = contextWorld.node_tree.nodes + backgroundnode = worldnodes['Background'] + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + backgroundcolor = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + backgroundcolor = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + backgroundmix = next((wn for wn in worldnodes if wn.type in {'MIX', 'MIX_RGB'}), False) + backgroundnode.inputs[0].default_value[:3] = backgroundcolor + if backgroundmix: + backgroundmix.inputs[2].default_value[:3] = backgroundcolor + new_chunk.bytes_read += temp_chunk.bytes_read + + # If bitmap chunk + elif CreateWorld and new_chunk.ID == BITMAP: + bitmap_name, read_str_len = read_string(file) + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Bitmap: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + bitmap_mix = nodes.new(type='ShaderNodeMixRGB') + bitmapnode = nodes.new(type='ShaderNodeTexEnvironment') + bitmap_mix.label = "Solid Color" + bitmapnode.label = "Bitmap: " + bitmap_name + bitmap_mix.inputs[2].default_value = nodes['Background'].inputs[0].default_value + bitmapnode.image = load_image(bitmap_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True) + bitmap_mix.inputs[0].default_value = 0.5 if bitmapnode.image is not None else 1.0 + bitmapnode.location = (-600, 360) if bitmapnode.image is not None else (-600, 300) + bitmap_mix.location = (-250, 300) + gradientnode = next((wn for wn in nodes if wn.type == 'VALTORGB'), False) + links.new(bitmap_mix.outputs['Color'], nodes['Background'].inputs[0]) + links.new(bitmapnode.outputs['Color'], bitmap_mix.inputs[1]) + if gradientnode: + links.new(bitmapnode.outputs['Color'], gradientnode.inputs[0]) new_chunk.bytes_read += read_str_len - # is it a material chunk? + # If gradient chunk: + elif CreateWorld and new_chunk.ID == VGRADIENT: + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Gradient: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + gradientnode = nodes.new(type='ShaderNodeValToRGB') + gradientnode.location = (-600, 100) + gradientnode.label = "Gradient" + backgroundmix = next((wn for wn in worldnodes if wn.type in {'MIX', 'MIX_RGB'}), False) + bitmapnode = next((wn for wn in nodes if wn.type in {'TEX_IMAGE', 'TEX_ENVIRONMENT'}), False) + if backgroundmix: + links.new(gradientnode.outputs['Color'], backgroundmix.inputs[2]) + else: + links.new(gradientnode.outputs['Color'], nodes['Background'].inputs[0]) + if bitmapnode: + links.new(bitmapnode.outputs['Color'], gradientnode.inputs[0]) + gradientnode.color_ramp.elements.new(read_float(new_chunk)) + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[2].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[2].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[1].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[1].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[0].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[0].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + + # If fog chunk: + elif CreateWorld and new_chunk.ID == FOG: + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Fog: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + fognode = nodes.new(type='ShaderNodeVolumeAbsorption') + fognode.label = "Fog" + fognode.location = (10, 60) + volumemix = next((wn for wn in worldnodes if wn.label == 'Volume' and wn.type in {'ADD_SHADER', 'MIX_SHADER'}), False) + if volumemix: + links.new(fognode.outputs['Volume'], volumemix.inputs[1]) + else: + links.new(fognode.outputs[0], nodes['World Output'].inputs[1]) + contextWorld.mist_settings.use_mist = True + contextWorld.mist_settings.start = read_float(new_chunk) + nearfog = read_float(new_chunk) * 0.01 + contextWorld.mist_settings.depth = read_float(new_chunk) + farfog = read_float(new_chunk) * 0.01 + fognode.inputs[1].default_value = (nearfog + farfog) * 0.5 + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + fognode.inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + fognode.inputs[0].default_value[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + elif CreateWorld and new_chunk.ID == FOG_BGND: + pass + + # If layer fog chunk: + elif CreateWorld and new_chunk.ID == LAYER_FOG: + """Fog options flags are bit 20 (0x100000) for background fogging, + bit 0 (0x1) for bottom falloff, and bit 1 (0x2) for top falloff.""" + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("LayerFog: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + mxvolume = nodes.new(type='ShaderNodeMixShader') + layerfog = nodes.new(type='ShaderNodeVolumeScatter') + layerfog.label = "Layer Fog" + mxvolume.label = "Volume" + layerfog.location = (10, -60) + mxvolume.location = (300, 50) + nodes['World Output'].location = (600, 200) + links.new(layerfog.outputs['Volume'], mxvolume.inputs[2]) + links.new(mxvolume.outputs[0], nodes['World Output'].inputs[1]) + fognode = next((wn for wn in worldnodes if wn.type == 'VOLUME_ABSORPTION'), False) + if fognode: + links.new(fognode.outputs['Volume'], mxvolume.inputs[1]) + context.view_layer.use_pass_mist = False + contextWorld.mist_settings.use_mist = True + contextWorld.mist_settings.start = read_float(new_chunk) + contextWorld.mist_settings.height = read_float(new_chunk) + layerfog.inputs[1].default_value = read_float(new_chunk) + layerfog_flag = read_long(new_chunk) + if layerfog_flag == 0: + contextWorld.mist_settings.falloff = 'LINEAR' + if layerfog_flag & 0x1: + contextWorld.mist_settings.falloff = 'QUADRATIC' + if layerfog_flag & 0x2: + contextWorld.mist_settings.falloff = 'INVERSE_QUADRATIC' + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + elif CreateWorld and new_chunk.ID in {USE_FOG, USE_LAYER_FOG}: + context.view_layer.use_pass_mist = True + + # If material chunk elif new_chunk.ID == MATERIAL: + contextAlpha = True + contextColor = mathutils.Color((0.8, 0.8, 0.8)) contextMaterial = bpy.data.materials.new('Material') contextWrapper = PrincipledBSDFWrapper(contextMaterial, is_readonly=False, use_nodes=False) @@ -726,7 +906,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI read_chunk(file, temp_chunk) # to not loose this data, ambient color is stored in line color if temp_chunk.ID == COLOR_F: - contextMaterial.line_color[:3] = read_float_color(temp_chunk) + contextMaterial.line_color[:3] = read_float_array(temp_chunk) elif temp_chunk.ID == COLOR_24: contextMaterial.line_color[:3] = read_byte_color(temp_chunk) else: @@ -736,9 +916,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif new_chunk.ID == MAT_DIFFUSE: read_chunk(file, temp_chunk) if temp_chunk.ID == COLOR_F: - contextMaterial.diffuse_color[:3] = read_float_color(temp_chunk) + contextColor = mathutils.Color(read_float_array(temp_chunk)) + contextMaterial.diffuse_color[:3] = contextColor elif temp_chunk.ID == COLOR_24: - contextMaterial.diffuse_color[:3] = read_byte_color(temp_chunk) + contextColor = mathutils.Color(read_byte_color(temp_chunk)) + contextMaterial.diffuse_color[:3] = contextColor else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read @@ -746,7 +928,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif new_chunk.ID == MAT_SPECULAR: read_chunk(file, temp_chunk) if temp_chunk.ID == COLOR_F: - contextMaterial.specular_color = read_float_color(temp_chunk) + contextMaterial.specular_color = read_float_array(temp_chunk) elif temp_chunk.ID == COLOR_24: contextMaterial.specular_color = read_byte_color(temp_chunk) else: @@ -756,76 +938,69 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif new_chunk.ID == MAT_SHINESS: read_chunk(file, temp_chunk) if temp_chunk.ID == PCT_SHORT: - temp_data = file.read(SZ_U_SHORT) - temp_chunk.bytes_read += SZ_U_SHORT - contextMaterial.roughness = 1 - (float(struct.unpack('= 2: contextWrapper.use_nodes = True + contextWrapper.base_color = contextColor[:] + contextWrapper.metallic = contextMaterial.metallic + contextWrapper.roughness = contextMaterial.roughness + contextWrapper.specular = contextMaterial.specular_intensity + contextWrapper.specular_tint = contextMaterial.specular_color[:] contextWrapper.emission_color = contextMaterial.line_color[:3] contextWrapper.emission_strength = contextMaterial.line_priority / 100 - contextWrapper.base_color = contextMaterial.diffuse_color[:3] - contextWrapper.specular = contextMaterial.specular_intensity - contextWrapper.roughness = contextMaterial.roughness - contextWrapper.metallic = contextMaterial.metallic - contextWrapper.alpha = contextMaterial.diffuse_color[3] + contextWrapper.alpha = contextMaterial.diffuse_color[3] = contextAlpha contextWrapper.use_nodes = False if shading >= 3: contextWrapper.use_nodes = True @@ -848,13 +1023,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI elif new_chunk.ID == MAT_BUMP_PERCENT: read_chunk(file, temp_chunk) if temp_chunk.ID == PCT_SHORT: - temp_data = file.read(SZ_U_SHORT) - temp_chunk.bytes_read += SZ_U_SHORT - contextWrapper.normalmap_strength = (float(struct.unpack('= 0.01 else 0.1 + contextCamera.data.clip_start = startrange * CONSTRAIN + contextCamera.data.clip_end = read_float(new_chunk) * CONSTRAIN + elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY: # Hierarchy + child_id = get_hierarchy(new_chunk) + elif CreateCameraObject and new_chunk.ID == OBJECT_PARENT: + get_parent(new_chunk, child_id) # start keyframe section elif new_chunk.ID == EDITKEYFRAME: pass elif KEYFRAME and new_chunk.ID == KFDATA_KFSEG: - temp_data = file.read(SZ_U_INT) - start = struct.unpack(' Date: Sat, 7 Oct 2023 20:21:20 +0200 Subject: [PATCH 13/14] Update io_scene_3ds/export_3ds.py --- io_scene_3ds/export_3ds.py | 719 +++++++++++++++++++++++++------------ 1 file changed, 495 insertions(+), 224 deletions(-) diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 428b655..bc639e7 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -23,13 +23,24 @@ from bpy_extras import node_shader_utils PRIMARY = 0x4D4D # >----- Main Chunks +OBJECTINFO = 0x3D3D # Main mesh object chunk before material and object information +MESHVERSION = 0x3D3E # This gives the version of the mesh VERSION = 0x0002 # This gives the version of the .3ds file -KFDATA = 0xB000 # This is the header for all of the key frame info +KFDATA = 0xB000 # This is the header for all of the keyframe info # >----- sub defines of OBJECTINFO -OBJECTINFO = 0x3D3D # Main mesh object chunk before the material and object information -MESHVERSION = 0x3D3E # This gives the version of the mesh +BITMAP = 0x1100 # The background image name +USE_BITMAP = 0x1101 # The background image flag +SOLIDBACKGND = 0x1200 # The background color (RGB) +USE_SOLIDBGND = 0x1201 # The background color flag +VGRADIENT = 0x1300 # The background gradient colors +USE_VGRADIENT = 0x1301 # The background gradient flag +O_CONSTS = 0x1500 # The origin of the 3D cursor AMBIENTLIGHT = 0x2100 # The color of the ambient light +FOG = 0x2200 # The fog atmosphere settings +USE_FOG = 0x2201 # The fog atmosphere flag +LAYER_FOG = 0x2302 # The fog layer atmosphere settings +USE_LAYER_FOG = 0x2303 # The fog layer atmosphere flag MATERIAL = 45055 # 0xAFFF // This stored the texture info OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc... @@ -68,7 +79,7 @@ MAT_MAP_USCALE = 0xA354 # U axis scaling MAT_MAP_VSCALE = 0xA356 # V axis scaling MAT_MAP_UOFFSET = 0xA358 # U axis offset MAT_MAP_VOFFSET = 0xA35A # V axis offset -MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad +MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad MAP_COL1 = 0xA360 # Tint Color1 MAP_COL2 = 0xA362 # Tint Color2 MAP_RCOL = 0xA364 # Red tint @@ -87,14 +98,23 @@ MASTERSCALE = 0x0100 # Master scale factor OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object +OBJECT_HIERARCHY = 0x4F00 # Hierarchy id of the object +OBJECT_PARENT = 0x4F10 # Parent id of the object # >------ Sub defines of LIGHT LIGHT_MULTIPLIER = 0x465B # The light energy factor +LIGHT_INNER_RANGE = 0x4659 # Light inner range value +LIGHT_OUTER_RANGE = 0x465A # Light outer range value +LIGHT_ATTENUATE = 0x4625 # Light attenuation flag LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight LIGHT_SPOT_ROLL = 0x4656 # Light spot roll angle LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag +LIGHT_SPOT_LSHADOW = 0x4641 # Light spot shadow parameters LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot show cone flag LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag +LIGHT_SPOT_OVERSHOOT = 0x4652 # Light spot overshoot flag +LIGHT_SPOT_PROJECTOR = 0x4653 # Light spot projection bitmap +LIGHT_SPOT_ASPECT = 0x4657 # Light spot aspect ratio # >------ sub defines of CAMERA OBJECT_CAM_RANGES = 0x4720 # The camera range values @@ -121,10 +141,10 @@ KFDATA_KFCURTIME = 0xB009 # Frame current KFDATA_KFHDR = 0xB00A # Keyframe header # >------ sub defines of OBJECT_NODE_TAG -PARENT_NAME = 0x80F0 # Object parent name tree OBJECT_NODE_ID = 0xB030 # Object hierachy ID OBJECT_NODE_HDR = 0xB010 # Hierachy tree header OBJECT_INSTANCE_NAME = 0xB011 # Object instance name +OBJECT_PARENT_NAME = 0x80F0 # Object parent name OBJECT_PIVOT = 0xB013 # Object pivot position OBJECT_BOUNDBOX = 0xB014 # Object boundbox OBJECT_MORPH_SMOOTH = 0xB015 # Object smooth angle @@ -353,7 +373,6 @@ class _3ds_face(object): class _3ds_array(object): """Class representing an array of variables for a 3ds file. Consists of a _3ds_ushort to indicate the number of items, followed by the items themselves.""" - __slots__ = "values", "size" def __init__(self): @@ -412,7 +431,6 @@ class _3ds_named_variable(object): class _3ds_chunk(object): """Class representing a chunk in a 3ds file. Chunks contain zero or more variables, followed by zero or more subchunks.""" - __slots__ = "ID", "size", "variables", "subchunks" def __init__(self, chunk_id=0): @@ -424,7 +442,6 @@ class _3ds_chunk(object): def add_variable(self, name, var): """Add a named variable. The name is mostly for debugging purposes.""" - self.variables.append(_3ds_named_variable(name, var)) def add_subchunk(self, chunk): @@ -433,8 +450,7 @@ class _3ds_chunk(object): def get_size(self): """Calculate the size of the chunk and return it. - The sizes of the variables and subchunks are used to determine this chunk\'s size.""" - + The sizes of the variables and subchunks are used to determine this chunk's size.""" tmpsize = self.ID.get_size() + self.size.get_size() for variable in self.variables: tmpsize += variable.get_size() @@ -534,30 +550,41 @@ def make_percent_subchunk(chunk_id, percent): return pct_sub -def make_texture_chunk(chunk_id, images): +def make_texture_chunk(chunk_id, teximages, pct): """Make Material Map texture chunk.""" # Add texture percentage value (100 = 1.0) - mat_sub = make_percent_subchunk(chunk_id, 1) + mat_sub = make_percent_subchunk(chunk_id, pct) has_entry = False - def add_image(img): - filename = bpy.path.basename(image.filepath) + def add_image(img, extension): + filename = bpy.path.basename(img.filepath) mat_sub_file = _3ds_chunk(MAT_MAP_FILE) + mat_sub_tiling = _3ds_chunk(MAT_MAP_TILING) mat_sub_file.add_variable("image", _3ds_string(sane_name(filename))) mat_sub.add_subchunk(mat_sub_file) - for image in images: - add_image(image) + tiling = 0 + if extension == 'EXTEND': # decal flag + tiling |= 0x1 + if extension == 'MIRROR': # mirror flag + tiling |= 0x2 + if extension == 'CLIP': # no wrap + tiling |= 0x10 + + mat_sub_tiling.add_variable("tiling", _3ds_ushort(tiling)) + mat_sub.add_subchunk(mat_sub_tiling) + + for tex in teximages: + extend = tex.extension + add_image(tex.image, extend) has_entry = True return mat_sub if has_entry else None def make_material_texture_chunk(chunk_id, texslots, pct): - """Make Material Map texture chunk given a seq. of `MaterialTextureSlot`'s - Paint slots are optionally used as image source if no nodes are - used. No additional filtering for mapping modes is done, all - slots are written "as is".""" + """Make Material Map texture chunk given a seq. of MaterialTextureSlot's + Paint slots are optionally used as image source if no nodes are used.""" # Add texture percentage value mat_sub = make_percent_subchunk(chunk_id, pct) @@ -565,6 +592,7 @@ def make_material_texture_chunk(chunk_id, texslots, pct): def add_texslot(texslot): image = texslot.image + socket = None filename = bpy.path.basename(image.filepath) mat_sub_file = _3ds_chunk(MAT_MAP_FILE) @@ -579,7 +607,7 @@ def make_material_texture_chunk(chunk_id, texslots, pct): 0x40 activates alpha source, 0x80 activates tinting, 0x100 ignores alpha, 0x200 activates RGB tint. Bits 0x80, 0x100, and 0x200 are only used with TEXMAP, TEX2MAP, and SPECMAP chunks. 0x40, when used with a TEXMAP, TEX2MAP, or SPECMAP chunk must be accompanied with a tint bit, - either 0x100 or 0x200, tintcolor will be processed if a tintflag is present""" + either 0x100 or 0x200, tintcolor will be processed if a tintflag was created.""" mapflags = 0 if texslot.extension == 'EXTEND': @@ -591,8 +619,8 @@ def make_material_texture_chunk(chunk_id, texslots, pct): if socket == 'Alpha': mapflags |= 0x40 - if texslot.socket_dst.identifier in {'Base Color', 'Specular'}: - mapflags |= 0x80 if image.colorspace_settings.name=='Non-Color' else 0x200 + if texslot.socket_dst.identifier in {'Base Color', 'Specular Tint'}: + mapflags |= 0x80 if image.colorspace_settings.name == 'Non-Color' else 0x200 mat_sub_mapflags.add_variable("mapflags", _3ds_ushort(mapflags)) mat_sub.add_subchunk(mat_sub_mapflags) @@ -621,11 +649,11 @@ def make_material_texture_chunk(chunk_id, texslots, pct): mat_sub_angle.add_variable("mapangle", _3ds_float(round(texslot.rotation[2], 6))) mat_sub.add_subchunk(mat_sub_angle) - if texslot.socket_dst.identifier in {'Base Color', 'Specular'}: + if texslot.socket_dst.identifier in {'Base Color', 'Specular Tint'}: rgb = _3ds_chunk(MAP_COL1) # Add tint color base = texslot.owner_shader.material.diffuse_color[:3] spec = texslot.owner_shader.material.specular_color[:] - rgb.add_variable("mapcolor", _3ds_rgb_color(spec if texslot.socket_dst.identifier == 'Specular' else base)) + rgb.add_variable("mapcolor", _3ds_rgb_color(spec if texslot.socket_dst.identifier == 'Specular Tint' else base)) mat_sub.add_subchunk(rgb) # Store all textures for this mapto in order. This at least is what the @@ -661,7 +689,7 @@ def make_material_chunk(material, image): material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (0.8, 0.8, 0.8))) material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1.0, 1.0, 1.0))) material_chunk.add_subchunk(make_percent_subchunk(MATSHINESS, 0.8)) - material_chunk.add_subchunk(make_percent_subchunk(MATSHIN2, 1)) + material_chunk.add_subchunk(make_percent_subchunk(MATSHIN2, 0.5)) material_chunk.add_subchunk(shading) elif material and material.use_nodes: @@ -669,7 +697,7 @@ def make_material_chunk(material, image): shading.add_variable("shading", _3ds_ushort(3)) # Phong shading material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, wrap.emission_color[:3])) material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, wrap.base_color[:3])) - material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color[:])) + material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, wrap.specular_tint[:3])) material_chunk.add_subchunk(make_percent_subchunk(MATSHINESS, 1 - wrap.roughness)) material_chunk.add_subchunk(make_percent_subchunk(MATSHIN2, wrap.specular)) material_chunk.add_subchunk(make_percent_subchunk(MATSHIN3, wrap.metallic)) @@ -678,6 +706,10 @@ def make_material_chunk(material, image): material_chunk.add_subchunk(shading) primary_tex = False + mtype = 'MIX', 'MIX_RGB' + mtlks = material.node_tree.links + mxtex = [lk.from_node for lk in mtlks if lk.from_node.type == 'TEX_IMAGE' and lk.to_socket.identifier in {'Color2', 'B_Color'}] + mxpct = next((lk.from_node.inputs[0].default_value for lk in mtlks if lk.from_node.type in mtype and lk.to_node.type == 'BSDF_PRINCIPLED'), 0.5) if wrap.base_color_texture: color = [wrap.base_color_texture] @@ -687,8 +719,12 @@ def make_material_chunk(material, image): material_chunk.add_subchunk(matmap) primary_tex = True - if wrap.specular_texture: - spec = [wrap.specular_texture] + if mxtex and not primary_tex: + material_chunk.add_subchunk(make_texture_chunk(MAT_DIFFUSEMAP, mxtex, mxpct)) + primary_tex = True + + if wrap.specular_tint_texture: + spec = [wrap.specular_tint_texture] s_pct = material.specular_intensity matmap = make_material_texture_chunk(MAT_SPECMAP, spec, s_pct) if matmap: @@ -732,19 +768,12 @@ def make_material_chunk(material, image): # Make sure no textures are lost. Everything that doesn't fit # into a channel is exported as secondary texture - diffuse = [] - - for link in wrap.material.node_tree.links: - if link.from_node.type == 'TEX_IMAGE' and link.to_node.type in {'MIX', 'MIX_RGB'}: - diffuse = [link.from_node.image] - - if diffuse: - if not primary_tex: - matmap = make_texture_chunk(MAT_DIFFUSEMAP, diffuse) - else: - matmap = make_texture_chunk(MAT_TEX2MAP, diffuse) - if matmap: - material_chunk.add_subchunk(matmap) + for link in mtlks: + mxsecondary = link.from_node if link.from_node.type == 'TEX_IMAGE' and link.to_socket.identifier in {'Color1', 'A_Color'} else False + if mxsecondary: + matmap = make_texture_chunk(MAT_TEX2MAP, [mxsecondary], 1 - mxpct) + if primary_tex and matmap: + material_chunk.add_subchunk(matmap) else: shading.add_variable("shading", _3ds_ushort(2)) # Gouraud shading @@ -810,7 +839,7 @@ def extract_triangles(mesh): """Flag 0x1 sets CA edge visible, Flag 0x2 sets BC edge visible, Flag 0x4 sets AB edge visible Flag 0x8 indicates a U axis texture wrap seam and Flag 0x10 indicates a V axis texture wrap seam - In Blender we use the edge CA, BC, and AB flags for sharp edges flags""" + In Blender we use the edge CA, BC, and AB flags for sharp edges flags.""" a_b = mesh.edges[mesh.loops[face.loops[0]].edge_index] b_c = mesh.edges[mesh.loops[face.loops[1]].edge_index] c_a = mesh.edges[mesh.loops[face.loops[2]].edge_index] @@ -848,7 +877,7 @@ def remove_face_uv(verts, tri_list): need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when there are multiple uv coordinates per vertex.""" - # Initialize a list of UniqueLists, one per vertex + # Initialize a list of UniqueUVs, one per vertex unique_uvs = [{} for i in range(len(verts))] # For each face uv coordinate, add it to the UniqueList of the vertex @@ -883,7 +912,6 @@ def remove_face_uv(verts, tri_list): vert_array.add(pt) # Add the uv coordinate to the uv array, this for loop does not give # uv's ordered by ii, so we create a new map and add the uv's later - # uv_array.add(uv_3ds) uvmap[ii] = uv_3ds # Add uv's in the correct order and add coordinates to the uv array @@ -940,7 +968,6 @@ def make_faces_chunk(tri_list, mesh, materialDict): unique_mats[ma, img] = _3ds_string(sane_name(name_str)), context_face_array context_face_array.add(_3ds_ushort(i)) - # obj_material_faces[tri.ma].add(_3ds_ushort(i)) face_chunk.add_variable("faces", face_list) for ma_name, ma_faces in unique_mats.values(): @@ -1025,7 +1052,7 @@ def make_mesh_chunk(ob, mesh, matrix, materialDict, translation): matrix_chunk = _3ds_chunk(OBJECT_TRANS_MATRIX) obj_matrix = matrix.transposed().to_3x3() - if ob.parent is None or ob.parent.name not in translation: + if ob.parent is None or (ob.parent.name not in translation): obj_translate = matrix.to_translation() else: # Calculate child matrix translation relative to parent @@ -1049,12 +1076,25 @@ def make_mesh_chunk(ob, mesh, matrix, materialDict, translation): return mesh_chunk +def calc_target(posi, tilt=0.0, pan=0.0): + """Calculate target position for cameras and spotlights.""" + adjacent = math.radians(90) + turn = 0.0 if abs(pan) < adjacent else -0.0 + lean = 0.0 if abs(tilt) > adjacent else -0.0 + diagonal = math.sqrt(pow(posi.x ,2) + pow(posi.y ,2)) + target_x = math.copysign(posi.x + (posi.y * math.tan(pan)), pan) + target_y = math.copysign(posi.y + (posi.x * math.tan(adjacent - pan)), turn) + target_z = math.copysign(posi.z + diagonal * math.tan(adjacent - tilt), lean) + + return target_x, target_y, target_z + + ################# # KEYFRAME DATA # ################# def make_kfdata(revision, start=0, stop=100, curtime=0): - """Make the basic keyframe data chunk""" + """Make the basic keyframe data chunk.""" kfdata = _3ds_chunk(KFDATA) kfhdr = _3ds_chunk(KFDATA_KFHDR) @@ -1074,20 +1114,22 @@ def make_kfdata(revision, start=0, stop=100, curtime=0): kfdata.add_subchunk(kfcurtime) return kfdata + def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): - """Make a chunk for track data. Depending on the ID, this will - construct a position, rotation, scale, roll, color or fov track.""" + """Make a chunk for track data. Depending on the ID, this will construct + a position, rotation, scale, roll, color, fov, hotspot or falloff track.""" track_chunk = _3ds_chunk(ID) if ID in {POS_TRACK_TAG, ROT_TRACK_TAG, SCL_TRACK_TAG, ROLL_TRACK_TAG} and ob.animation_data and ob.animation_data.action: action = ob.animation_data.action if action.fcurves: fcurves = action.fcurves + fcurves.update() kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] nkeys = len(kframes) if not 0 in kframes: kframes.append(0) - nkeys = nkeys + 1 + nkeys += 1 kframes = sorted(set(kframes)) track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) @@ -1096,51 +1138,54 @@ def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): if ID == POS_TRACK_TAG: # Position for i, frame in enumerate(kframes): - position = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'location'] - if not position: - position.append(ob_pos) + pos_track = [fc for fc in fcurves if fc is not None and fc.data_path == 'location'] + pos_x = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 0), ob_pos.x) + pos_y = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 1), ob_pos.y) + pos_z = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 2), ob_pos.z) + pos = ob_size @ mathutils.Vector((pos_x, pos_y, pos_z)) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) - track_chunk.add_variable("position", _3ds_point_3d(position)) + track_chunk.add_variable("position", _3ds_point_3d((pos.x, pos.y, pos.z))) elif ID == ROT_TRACK_TAG: # Rotation for i, frame in enumerate(kframes): - rotation = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] - if not rotation: - rotation.append(ob_rot) - quat = mathutils.Euler(rotation).to_quaternion() - axis_angle = quat.angle, quat.axis[0], quat.axis[1], quat.axis[2] + rot_track = [fc for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + rot_x = next((tc.evaluate(frame) for tc in rot_track if tc.array_index == 0), ob_rot.x) + rot_y = next((tc.evaluate(frame) for tc in rot_track if tc.array_index == 1), ob_rot.y) + rot_z = next((tc.evaluate(frame) for tc in rot_track if tc.array_index == 2), ob_rot.z) + quat = mathutils.Euler((rot_x, rot_y, rot_z)).to_quaternion().inverted() track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) - track_chunk.add_variable("rotation", _3ds_point_4d(axis_angle)) + track_chunk.add_variable("rotation", _3ds_point_4d((quat.angle, quat.axis.x, quat.axis.y, quat.axis.z))) elif ID == SCL_TRACK_TAG: # Scale for i, frame in enumerate(kframes): - size = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'scale'] - if not size: - size.append(ob_size) + scale_track = [fc for fc in fcurves if fc is not None and fc.data_path == 'scale'] + size_x = next((tc.evaluate(frame) for tc in scale_track if tc.array_index == 0), ob_size.x) + size_y = next((tc.evaluate(frame) for tc in scale_track if tc.array_index == 1), ob_size.y) + size_z = next((tc.evaluate(frame) for tc in scale_track if tc.array_index == 2), ob_size.z) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) - track_chunk.add_variable("scale", _3ds_point_3d(size)) + track_chunk.add_variable("scale", _3ds_point_3d((size_x, size_y, size_z))) elif ID == ROLL_TRACK_TAG: # Roll for i, frame in enumerate(kframes): - roll = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] - if not roll: - roll.append(ob_rot) + roll_track = [fc for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + roll = next((tc.evaluate(frame) for tc in roll_track if tc.array_index == 1), ob_rot.y) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) - track_chunk.add_variable("roll", _3ds_float(round(math.degrees(roll[1]), 4))) + track_chunk.add_variable("roll", _3ds_float(round(math.degrees(roll), 4))) elif ID in {COL_TRACK_TAG, FOV_TRACK_TAG, HOTSPOT_TRACK_TAG, FALLOFF_TRACK_TAG} and ob.data.animation_data and ob.data.animation_data.action: action = ob.data.animation_data.action if action.fcurves: fcurves = action.fcurves + fcurves.update() kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] nkeys = len(kframes) if not 0 in kframes: kframes.append(0) - nkeys = nkeys + 1 + nkeys += 1 kframes = sorted(set(kframes)) track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) @@ -1151,40 +1196,34 @@ def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): for i, frame in enumerate(kframes): color = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'color'] if not color: - color.append(ob.data.color[:3]) + color = ob.data.color[:3] track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) track_chunk.add_variable("color", _3ds_float_color(color)) elif ID == FOV_TRACK_TAG: # Field of view for i, frame in enumerate(kframes): - lens = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'lens'] - if not lens: - lens.append(ob.data.lens) - fov = 2 * math.atan(ob.data.sensor_width/(2*lens[0])) + lens = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'lens'), ob.data.lens) + fov = 2 * math.atan(ob.data.sensor_width / (2 * lens)) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) track_chunk.add_variable("fov", _3ds_float(round(math.degrees(fov), 4))) elif ID == HOTSPOT_TRACK_TAG: # Hotspot - beam_angle = math.degrees(ob.data.spot_size) for i, frame in enumerate(kframes): - blend = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_blend'] - if not blend: - blend.append(ob.data.spot_blend) - hot_spot = beam_angle-(blend[0]*math.floor(beam_angle)) + beamsize = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_size'), ob.data.spot_size) + blend = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_blend'), ob.data.spot_blend) + hot_spot = math.degrees(beamsize) - (blend * math.floor(math.degrees(beamsize))) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) track_chunk.add_variable("hotspot", _3ds_float(round(hot_spot, 4))) elif ID == FALLOFF_TRACK_TAG: # Falloff for i, frame in enumerate(kframes): - fall_off = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_size'] - if not fall_off: - fall_off.append(ob.data.spot_size) + fall_off = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_size'), ob.data.spot_size) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) - track_chunk.add_variable("falloff", _3ds_float(round(math.degrees(fall_off[0]), 4))) + track_chunk.add_variable("falloff", _3ds_float(round(math.degrees(fall_off), 4))) else: track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) # Based on observation default flag is 0x40 @@ -1200,23 +1239,24 @@ def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): track_chunk.add_variable("position", _3ds_point_3d(ob_pos)) elif ID == ROT_TRACK_TAG: # Rotation (angle first [radians], followed by axis) - track_chunk.add_variable("rotation", _3ds_point_4d((ob_rot.angle, ob_rot.axis[0], ob_rot.axis[1], ob_rot.axis[2]))) - + quat = ob_rot.to_quaternion().inverted() + track_chunk.add_variable("rotation", _3ds_point_4d((quat.angle, quat.axis.x, quat.axis.y, quat.axis.z))) + elif ID == SCL_TRACK_TAG: # Scale vector track_chunk.add_variable("scale", _3ds_point_3d(ob_size)) elif ID == ROLL_TRACK_TAG: # Roll angle - track_chunk.add_variable("roll", _3ds_float(round(math.degrees(ob.rotation_euler[1]), 4))) + track_chunk.add_variable("roll", _3ds_float(round(math.degrees(ob_rot.y), 4))) elif ID == COL_TRACK_TAG: # Color values - track_chunk.add_variable("color", _3ds_float_color(ob.data.color)) + track_chunk.add_variable("color", _3ds_float_color(ob.data.color[:3])) elif ID == FOV_TRACK_TAG: # Field of view track_chunk.add_variable("fov", _3ds_float(round(math.degrees(ob.data.angle), 4))) elif ID == HOTSPOT_TRACK_TAG: # Hotspot beam_angle = math.degrees(ob.data.spot_size) - track_chunk.add_variable("hotspot", _3ds_float(round(beam_angle-(ob.data.spot_blend*math.floor(beam_angle)), 4))) + track_chunk.add_variable("hotspot", _3ds_float(round(beam_angle - (ob.data.spot_blend * math.floor(beam_angle)), 4))) elif ID == FALLOFF_TRACK_TAG: # Falloff track_chunk.add_variable("falloff", _3ds_float(round(math.degrees(ob.data.spot_size), 4))) @@ -1224,7 +1264,7 @@ def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): return track_chunk -def make_object_node(ob, translation, rotation, scale): +def make_object_node(ob, translation, rotation, scale, name_id): """Make a node chunk for a Blender object. Takes Blender object as parameter. Blender Empty objects are converted to dummy nodes.""" @@ -1238,6 +1278,11 @@ def make_object_node(ob, translation, rotation, scale): else: # Main object node chunk obj_node = _3ds_chunk(OBJECT_NODE_TAG) + # Chunk for the object ID from name_id dictionary: + obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID) + obj_id_chunk.add_variable("node_id", _3ds_ushort(name_id[name])) + obj_node.add_subchunk(obj_id_chunk) + # Object node header with object name obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) parent = ob.parent @@ -1252,8 +1297,8 @@ def make_object_node(ob, translation, rotation, scale): obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name))) obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0x0040)) - # Flags2 defines bit 0x01 for display path, bit 0x02 use autosmooth, bit 0x04 object frozen, - # bit 0x10 for motion blur, bit 0x20 for material morph and bit 0x40 for mesh morph + """Flags2 defines 0x01 for display path, 0x02 use autosmooth, 0x04 object frozen, + 0x10 for motion blur, 0x20 for material morph and bit 0x40 for mesh morph.""" if ob.type == 'MESH' and ob.data.use_auto_smooth: obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0x02)) else: @@ -1263,16 +1308,22 @@ def make_object_node(ob, translation, rotation, scale): ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX # Check parent-child relationships: - if parent is None or parent.name not in name_to_id: + if parent is None or parent.name not in name_id: # If no parent, or parents name is not in dictionary, ID becomes -1: obj_node_header_chunk.add_variable("parent", _3ds_ushort(-1)) - else: # Get the parent's ID from the name_to_id dictionary: - obj_node_header_chunk.add_variable("parent", _3ds_ushort(name_to_id[parent.name])) + else: # Get the parent's ID from the name_id dictionary: + obj_node_header_chunk.add_variable("parent", _3ds_ushort(name_id[parent.name])) ''' # Add subchunk for node header obj_node.add_subchunk(obj_node_header_chunk) + # Alternatively use PARENT_NAME chunk for hierachy + if parent is not None and (parent.name in name_id): + obj_parent_name_chunk = _3ds_chunk(OBJECT_PARENT_NAME) + obj_parent_name_chunk.add_variable("parent", _3ds_string(sane_name(parent.name))) + obj_node.add_subchunk(obj_parent_name_chunk) + # Empty objects need to have an extra chunk for the instance name if ob.type == 'EMPTY': # Will use a real object name for empties for now obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME) @@ -1297,18 +1348,19 @@ def make_object_node(ob, translation, rotation, scale): obj_morph_smooth.add_variable("angle", _3ds_float(round(ob.data.auto_smooth_angle, 6))) obj_node.add_subchunk(obj_morph_smooth) - # Add track chunks for color, position, rotation and scale - if parent is None: + # Add track chunks for position, rotation, size + ob_scale = scale[name] # and collect masterscale + if parent is None or (parent.name not in name_id): ob_pos = translation[name] ob_rot = rotation[name] - ob_size = scale[name] + ob_size = ob.scale else: # Calculate child position and rotation of the object center, no scale applied ob_pos = translation[name] - translation[parent.name] - ob_rot = rotation[name].cross(rotation[parent.name].copy().inverted()) - ob_size = (1.0, 1.0, 1.0) + ob_rot = rotation[name].to_quaternion().cross(rotation[parent.name].to_quaternion().copy().inverted()).to_euler() + ob_size = mathutils.Vector((1.0, 1.0, 1.0)) - obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, ob, ob_pos, ob_rot, ob_scale)) if ob.type in {'MESH', 'EMPTY'}: obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) @@ -1326,15 +1378,21 @@ def make_object_node(ob, translation, rotation, scale): return obj_node -def make_target_node(ob, translation, rotation, scale): - """Make a target chunk for light and camera objects""" +def make_target_node(ob, translation, rotation, scale, name_id): + """Make a target chunk for light and camera objects.""" name = ob.name - if ob.type == 'CAMERA': #Add camera target + name_id["ø " + name] = len(name_id) + if ob.type == 'CAMERA': # Add camera target tar_node = _3ds_chunk(TARGET_NODE_TAG) elif ob.type == 'LIGHT': # Add spot target tar_node = _3ds_chunk(LTARGET_NODE_TAG) + # Chunk for the object ID from name_id dictionary: + tar_id_chunk = _3ds_chunk(OBJECT_NODE_ID) + tar_id_chunk.add_variable("node_id", _3ds_ushort(name_id[name])) + tar_node.add_subchunk(tar_id_chunk) + # Object node header with object name tar_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) # Targets get the same name as the object, flags1 is usually 0x0010 and parent ROOT_OBJECT @@ -1348,14 +1406,10 @@ def make_target_node(ob, translation, rotation, scale): # Calculate target position ob_pos = translation[name] - ob_rot = rotation[name].to_euler() - ob_size = scale[name] - - diagonal = math.copysign(math.sqrt(pow(ob_pos[0],2)+pow(ob_pos[1],2)), ob_pos[1]) - target_x = ob_pos[0]+(ob_pos[1]*math.tan(ob_rot[2])) - target_y = ob_pos[1]+(ob_pos[0]*math.tan(math.radians(90)-ob_rot[2])) - target_z = -1*diagonal*math.tan(math.radians(90)-ob_rot[0]) - + ob_rot = rotation[name] + ob_scale = scale[name] + target_pos = calc_target(ob_pos, ob_rot.x, ob_rot.z) + # Add track chunks for target position track_chunk = _3ds_chunk(POS_TRACK_TAG) @@ -1363,11 +1417,12 @@ def make_target_node(ob, translation, rotation, scale): action = ob.animation_data.action if action.fcurves: fcurves = action.fcurves + fcurves.update() kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] nkeys = len(kframes) if not 0 in kframes: kframes.append(0) - nkeys = nkeys + 1 + nkeys += 1 kframes = sorted(set(kframes)) track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) @@ -1375,21 +1430,18 @@ def make_target_node(ob, translation, rotation, scale): track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) for i, frame in enumerate(kframes): - target_pos = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'location'] - target_rot = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] - if not target_pos: - target_pos.append(ob_pos) - if not target_rot: - target_rot.insert(0, ob_rot.x) - target_rot.insert(1, ob_rot.y) - target_rot.insert(2, ob_rot.z) - diagonal = math.copysign(math.sqrt(pow(target_pos[0],2)+pow(target_pos[1],2)), target_pos[1]) - target_x = target_pos[0]+(target_pos[1]*math.tan(target_rot[2])) - target_y = target_pos[1]+(target_pos[0]*math.tan(math.radians(90)-target_rot[2])) - target_z = -1*diagonal*math.tan(math.radians(90)-target_rot[0]) + loc_target = [fc for fc in fcurves if fc is not None and fc.data_path == 'location'] + loc_x = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 0), ob_pos.x) + loc_y = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 1), ob_pos.y) + loc_z = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 2), ob_pos.z) + rot_target = [fc for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + rot_x = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 0), ob_rot.x) + rot_z = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 2), ob_rot.z) + target_distance = ob_scale @ mathutils.Vector((loc_x, loc_y, loc_z)) + target_pos = calc_target(target_distance, rot_x, rot_z) track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) - track_chunk.add_variable("position", _3ds_point_3d((target_x, target_y, target_z))) + track_chunk.add_variable("position", _3ds_point_3d(target_pos)) else: # Track header track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) # Based on observation default flag is 0x40 @@ -1399,18 +1451,25 @@ def make_target_node(ob, translation, rotation, scale): # Keyframe header track_chunk.add_variable("tcb_frame", _3ds_uint(0)) track_chunk.add_variable("tcb_flags", _3ds_ushort()) - track_chunk.add_variable("position", _3ds_point_3d((target_x, target_y, target_z))) - + track_chunk.add_variable("position", _3ds_point_3d(target_pos)) + tar_node.add_subchunk(track_chunk) - + return tar_node def make_ambient_node(world): - amb_color = world.color + """Make an ambient node for the world color, if the color is animated.""" + + amb_color = world.color[:3] amb_node = _3ds_chunk(AMBIENT_NODE_TAG) track_chunk = _3ds_chunk(COL_TRACK_TAG) + # Chunk for the ambient ID is ROOT_OBJECT + amb_id_chunk = _3ds_chunk(OBJECT_NODE_ID) + amb_id_chunk.add_variable("node_id", _3ds_ushort(ROOT_OBJECT)) + amb_node.add_subchunk(amb_id_chunk) + # Object node header, name is "$AMBIENT$" for ambient nodes amb_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) amb_node_header_chunk.add_variable("name", _3ds_string(b"$AMBIENT$")) @@ -1418,12 +1477,20 @@ def make_ambient_node(world): amb_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) amb_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) amb_node.add_subchunk(amb_node_header_chunk) - - if world.animation_data.action: - action = world.animation_data.action - if action.fcurves: + + if world.use_nodes and world.node_tree.animation_data.action: + ambioutput = 'EMISSION' ,'MIX_SHADER', 'WORLD_OUTPUT' + action = world.node_tree.animation_data.action + links = world.node_tree.links + ambilinks = [lk for lk in links if lk.from_node.type in {'EMISSION', 'RGB'} and lk.to_node.type in ambioutput] + if ambilinks and action.fcurves: fcurves = action.fcurves + fcurves.update() + emission = next((lk.from_socket.node for lk in ambilinks if lk.to_node.type in ambioutput), False) + ambinode = next((lk.from_socket.node for lk in ambilinks if lk.to_node.type == 'EMISSION'), emission) kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + ambipath = ('nodes[\"RGB\"].outputs[0].default_value' if ambinode and ambinode.type == 'RGB' else + 'nodes[\"Emission\"].inputs[0].default_value') nkeys = len(kframes) if not 0 in kframes: kframes.append(0) @@ -1434,14 +1501,38 @@ def make_ambient_node(world): track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + for i, frame in enumerate(kframes): + ambient = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == ambipath] + if not ambient: + ambient = amb_color + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(ambient[:3])) + + elif world.animation_data.action: + action = world.animation_data.action + if action.fcurves: + fcurves = action.fcurves + fcurves.update() + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys += 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + for i, frame in enumerate(kframes): ambient = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'color'] if not ambient: - ambient.append(world.color) + ambient = amb_color track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) track_chunk.add_variable("tcb_flags", _3ds_ushort()) track_chunk.add_variable("color", _3ds_float_color(ambient)) - + else: # Track header track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) track_chunk.add_variable("frame_start", _3ds_uint(0)) @@ -1451,9 +1542,9 @@ def make_ambient_node(world): track_chunk.add_variable("tcb_frame", _3ds_uint(0)) track_chunk.add_variable("tcb_flags", _3ds_ushort()) track_chunk.add_variable("color", _3ds_float_color(amb_color)) - + amb_node.add_subchunk(track_chunk) - + return amb_node @@ -1461,22 +1552,41 @@ def make_ambient_node(world): # EXPORT # ########## -def save(operator, - context, filepath="", - use_selection=False, - write_keyframe=False, - global_matrix=None, - ): - +def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, use_selection=False, + object_filter=None, use_hierarchy=False, use_keyframes=False, global_matrix=None, use_cursor=False): """Save the Blender scene to a 3ds file.""" + # Time the export duration = time.time() + context.window.cursor_set('WAIT') scene = context.scene layer = context.view_layer depsgraph = context.evaluated_depsgraph_get() world = scene.world + unit_measure = 1.0 + if use_scene_unit: + unit_length = scene.unit_settings.length_unit + if unit_length == 'MILES': + unit_measure = 0.000621371 + elif unit_length == 'KILOMETERS': + unit_measure = 0.001 + elif unit_length == 'FEET': + unit_measure = 3.280839895 + elif unit_length == 'INCHES': + unit_measure = 39.37007874 + elif unit_length == 'CENTIMETERS': + unit_measure = 100 + elif unit_length == 'MILLIMETERS': + unit_measure = 1000 + elif unit_length == 'THOU': + unit_measure = 39370.07874 + elif unit_length == 'MICROMETERS': + unit_measure = 1000000 + + mtx_scale = mathutils.Matrix.Scale((scale_factor * unit_measure),4) + if global_matrix is None: global_matrix = mathutils.Matrix() @@ -1497,37 +1607,22 @@ def save(operator, mesh_version.add_variable("mesh", _3ds_uint(3)) object_info.add_subchunk(mesh_version) - # Add MASTERSCALE element - mscale = _3ds_chunk(MASTERSCALE) - mscale.add_variable("scale", _3ds_float(1)) - object_info.add_subchunk(mscale) - # Init main keyframe data chunk - if write_keyframe: + if use_keyframes: revision = 0x0005 stop = scene.frame_end start = scene.frame_start curtime = scene.frame_current kfdata = make_kfdata(revision, start, stop, curtime) - # Add AMBIENT color - if world is not None: - ambient_chunk = _3ds_chunk(AMBIENTLIGHT) - ambient_light = _3ds_chunk(RGB) - ambient_light.add_variable("ambient", _3ds_float_color(scene.world.color)) - ambient_chunk.add_subchunk(ambient_light) - object_info.add_subchunk(ambient_chunk) - if write_keyframe and world.animation_data: - kfdata.add_subchunk(make_ambient_node(world)) - # Make a list of all materials used in the selected meshes (use dictionary, each material is added once) materialDict = {} mesh_objects = [] if use_selection: - objects = [ob for ob in scene.objects if ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)] + objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)] else: - objects = [ob for ob in scene.objects if ob.visible_get(view_layer=layer)] + objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer)] empty_objects = [ob for ob in objects if ob.type == 'EMPTY'] light_objects = [ob for ob in objects if ob.type == 'LIGHT'] @@ -1553,6 +1648,7 @@ def save(operator, if data: matrix = global_matrix @ mtx data.transform(matrix) + data.transform(mtx_scale) mesh_objects.append((ob_derived, data, matrix)) ma_ls = data.materials ma_ls_len = len(ma_ls) @@ -1587,34 +1683,146 @@ def save(operator, f.material_index = 0 - # Make material chunks for all materials used in the meshes + # Make MATERIAL chunks for all materials used in the meshes for ma_image in materialDict.values(): object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1])) + # Add MASTERSCALE element + mscale = _3ds_chunk(MASTERSCALE) + mscale.add_variable("scale", _3ds_float(1.0)) + object_info.add_subchunk(mscale) + + # Add 3D cursor location + if use_cursor: + cursor_chunk = _3ds_chunk(O_CONSTS) + cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location)) + object_info.add_subchunk(cursor_chunk) + + # Add AMBIENT color + if world is not None and 'WORLD' in object_filter: + ambient_chunk = _3ds_chunk(AMBIENTLIGHT) + ambient_light = _3ds_chunk(RGB) + ambient_light.add_variable("ambient", _3ds_float_color(world.color)) + ambient_chunk.add_subchunk(ambient_light) + object_info.add_subchunk(ambient_chunk) + + # Add BACKGROUND and BITMAP + if world.use_nodes: + bgtype = 'BACKGROUND' + ntree = world.node_tree.links + background_color_chunk = _3ds_chunk(RGB) + background_chunk = _3ds_chunk(SOLIDBACKGND) + background_flag = _3ds_chunk(USE_SOLIDBGND) + bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB' + bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD' + bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT' + bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color) + bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype) + bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False) + gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False) + background_color_chunk.add_variable("color", _3ds_float_color(bg_color)) + background_chunk.add_subchunk(background_color_chunk) + if bg_image and bg_image is not None: + background_image = _3ds_chunk(BITMAP) + background_flag = _3ds_chunk(USE_BITMAP) + background_image.add_variable("image", _3ds_string(sane_name(bg_image.name))) + object_info.add_subchunk(background_image) + object_info.add_subchunk(background_chunk) + + # Add VGRADIENT chunk + if gradient and len(gradient) >= 3: + gradient_chunk = _3ds_chunk(VGRADIENT) + background_flag = _3ds_chunk(USE_VGRADIENT) + gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position)) + gradient_topcolor_chunk = _3ds_chunk(RGB) + gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3])) + gradient_chunk.add_subchunk(gradient_topcolor_chunk) + gradient_midcolor_chunk = _3ds_chunk(RGB) + gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3])) + gradient_chunk.add_subchunk(gradient_midcolor_chunk) + gradient_lowcolor_chunk = _3ds_chunk(RGB) + gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3])) + gradient_chunk.add_subchunk(gradient_lowcolor_chunk) + object_info.add_subchunk(gradient_chunk) + object_info.add_subchunk(background_flag) + + # Add FOG + fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False) + if fognode: + fog_chunk = _3ds_chunk(FOG) + fog_color_chunk = _3ds_chunk(RGB) + use_fog_flag = _3ds_chunk(USE_FOG) + fog_density = fognode.inputs['Density'].default_value * 100 + fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3])) + fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start)) + fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5)) + fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth)) + fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5)) + fog_chunk.add_subchunk(fog_color_chunk) + object_info.add_subchunk(fog_chunk) + + # Add LAYER FOG + foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False) + if foglayer: + layerfog_flag = 0 + if world.mist_settings.falloff == 'QUADRATIC': + layerfog_flag |= 0x1 + if world.mist_settings.falloff == 'INVERSE_QUADRATIC': + layerfog_flag |= 0x2 + layerfog_chunk = _3ds_chunk(LAYER_FOG) + layerfog_color_chunk = _3ds_chunk(RGB) + use_fog_flag = _3ds_chunk(USE_LAYER_FOG) + layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3])) + layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start)) + layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height)) + layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value)) + layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag)) + layerfog_chunk.add_subchunk(layerfog_color_chunk) + object_info.add_subchunk(layerfog_chunk) + if fognode or foglayer and layer.use_pass_mist: + object_info.add_subchunk(use_fog_flag) + if use_keyframes and world.animation_data or (world.node_tree and world.node_tree.animation_data): + kfdata.add_subchunk(make_ambient_node(world)) + # Collect translation for transformation matrix translation = {} rotation = {} scale = {} # Give all objects a unique ID and build a dictionary from object name to object id - # name_to_id = {} + object_id = {} + name_id = {} for ob, data, matrix in mesh_objects: - translation[ob.name] = ob.location - rotation[ob.name] = ob.rotation_euler.to_quaternion().inverted() - scale[ob.name] = ob.scale - # name_to_id[ob.name]= len(name_to_id) + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() + name_id[ob.name] = len(name_id) + object_id[ob.name] = len(object_id) for ob in empty_objects: - translation[ob.name] = ob.location - rotation[ob.name] = ob.rotation_euler.to_quaternion().inverted() - scale[ob.name] = ob.scale - # name_to_id[ob.name]= len(name_to_id) + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() + name_id[ob.name] = len(name_id) + + for ob in light_objects: + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() + name_id[ob.name] = len(name_id) + object_id[ob.name] = len(object_id) + + for ob in camera_objects: + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() + name_id[ob.name] = len(name_id) + object_id[ob.name] = len(object_id) # Create object chunks for all meshes i = 0 for ob, mesh, matrix in mesh_objects: - # Create a new object chunk object_chunk = _3ds_chunk(OBJECT) # Set the object name @@ -1623,109 +1831,171 @@ def save(operator, # Make a mesh chunk out of the mesh object_chunk.add_subchunk(make_mesh_chunk(ob, mesh, matrix, materialDict, translation)) - # Ensure the mesh has no over sized arrays, skip ones that do! + # Add hierachy chunk with ID from object_id dictionary + if use_hierarchy: + obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY) + obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name])) + + # Add parent chunk if object has a parent + if ob.parent is not None and (ob.parent.name in object_id): + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name])) + obj_hierarchy_chunk.add_subchunk(obj_parent_chunk) + object_chunk.add_subchunk(obj_hierarchy_chunk) + + # ensure the mesh has no over sized arrays - skip ones that do! # Otherwise we cant write since the array size wont fit into USHORT if object_chunk.validate(): object_info.add_subchunk(object_chunk) else: operator.report({'WARNING'}, "Object %r can't be written into a 3DS file") - # Export kf object node - if write_keyframe: - kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale)) + # Export object node + if use_keyframes: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) i += i - # Create chunks for all empties, only requires a kf object node - if write_keyframe: + # Create chunks for all empties - only requires a object node + if use_keyframes: for ob in empty_objects: - kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale)) + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) # Create light object chunks for ob in light_objects: object_chunk = _3ds_chunk(OBJECT) - translation[ob.name] = ob.location - rotation[ob.name] = ob.rotation_euler.to_quaternion() - scale[ob.name] = ob.scale - - # Add light data subchunks - light_chunk = _3ds_chunk(OBJECT_LIGHT) + obj_light_chunk = _3ds_chunk(OBJECT_LIGHT) color_float_chunk = _3ds_chunk(RGB) - energy_factor = _3ds_chunk(LIGHT_MULTIPLIER) + light_distance = translation[ob.name] + light_attenuate = _3ds_chunk(LIGHT_ATTENUATE) + light_inner_range = _3ds_chunk(LIGHT_INNER_RANGE) + light_outer_range = _3ds_chunk(LIGHT_OUTER_RANGE) + light_energy_factor = _3ds_chunk(LIGHT_MULTIPLIER) + light_ratio = ob.data.energy if ob.data.type == 'SUN' else ob.data.energy * 0.001 object_chunk.add_variable("light", _3ds_string(sane_name(ob.name))) - light_chunk.add_variable("location", _3ds_point_3d(ob.location)) + obj_light_chunk.add_variable("location", _3ds_point_3d(light_distance)) color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color)) - energy_factor.add_variable("energy", _3ds_float(ob.data.energy * 0.001)) - light_chunk.add_subchunk(color_float_chunk) - light_chunk.add_subchunk(energy_factor) + light_outer_range.add_variable("distance", _3ds_float(ob.data.cutoff_distance)) + light_inner_range.add_variable("radius", _3ds_float(ob.data.shadow_soft_size * 100)) + light_energy_factor.add_variable("energy", _3ds_float(light_ratio)) + obj_light_chunk.add_subchunk(color_float_chunk) + obj_light_chunk.add_subchunk(light_outer_range) + obj_light_chunk.add_subchunk(light_inner_range) + obj_light_chunk.add_subchunk(light_energy_factor) + if ob.data.use_custom_distance: + obj_light_chunk.add_subchunk(light_attenuate) if ob.data.type == 'SPOT': cone_angle = math.degrees(ob.data.spot_size) - hotspot = cone_angle - (ob.data.spot_blend * math.floor(cone_angle)) - hypo = math.copysign(math.sqrt(pow(ob.location[0], 2) + pow(ob.location[1], 2)), ob.location[1]) - pos_x = ob.location[0] + (ob.location[1] * math.tan(ob.rotation_euler[2])) - pos_y = ob.location[1] + (ob.location[0] * math.tan(math.radians(90) - ob.rotation_euler[2])) - pos_z = hypo * math.tan(math.radians(90) - ob.rotation_euler[0]) + hot_spot = cone_angle - (ob.data.spot_blend * math.floor(cone_angle)) + spot_pos = calc_target(light_distance, rotation[ob.name].x, rotation[ob.name].z) spotlight_chunk = _3ds_chunk(LIGHT_SPOTLIGHT) spot_roll_chunk = _3ds_chunk(LIGHT_SPOT_ROLL) - spotlight_chunk.add_variable("target", _3ds_point_3d((pos_x, pos_y, pos_z))) - spotlight_chunk.add_variable("hotspot", _3ds_float(round(hotspot, 4))) + spotlight_chunk.add_variable("target", _3ds_point_3d(spot_pos)) + spotlight_chunk.add_variable("hotspot", _3ds_float(round(hot_spot, 4))) spotlight_chunk.add_variable("angle", _3ds_float(round(cone_angle, 4))) - spot_roll_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6))) + spot_roll_chunk.add_variable("roll", _3ds_float(round(rotation[ob.name].y, 6))) spotlight_chunk.add_subchunk(spot_roll_chunk) + if ob.data.use_shadow: + spot_shadow_flag = _3ds_chunk(LIGHT_SPOT_SHADOWED) + spot_shadow_chunk = _3ds_chunk(LIGHT_SPOT_LSHADOW) + spot_shadow_chunk.add_variable("bias", _3ds_float(round(ob.data.shadow_buffer_bias,4))) + spot_shadow_chunk.add_variable("filter", _3ds_float(round((ob.data.shadow_buffer_clip_start * 10),4))) + spot_shadow_chunk.add_variable("buffer", _3ds_ushort(0x200)) + spotlight_chunk.add_subchunk(spot_shadow_flag) + spotlight_chunk.add_subchunk(spot_shadow_chunk) if ob.data.show_cone: spot_cone_chunk = _3ds_chunk(LIGHT_SPOT_SEE_CONE) spotlight_chunk.add_subchunk(spot_cone_chunk) if ob.data.use_square: spot_square_chunk = _3ds_chunk(LIGHT_SPOT_RECTANGLE) spotlight_chunk.add_subchunk(spot_square_chunk) - light_chunk.add_subchunk(spotlight_chunk) + if ob.scale.x and ob.scale.y != 0.0: + spot_aspect_chunk = _3ds_chunk(LIGHT_SPOT_ASPECT) + spot_aspect_chunk.add_variable("aspect", _3ds_float(round((ob.scale.x / ob.scale.y),4))) + spotlight_chunk.add_subchunk(spot_aspect_chunk) + if ob.data.use_nodes: + links = ob.data.node_tree.links + bptype = 'EMISSION' + bpmix = 'MIX', 'MIX_RGB', 'EMISSION' + bptex = 'TEX_IMAGE', 'TEX_ENVIRONMENT' + bpout = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_LIGHT' + bshade = next((lk.from_node.type for lk in links if lk.from_node.type == bptype and lk.to_node.type in bpout), None) + bpnode = next((lk.from_node.type for lk in links if lk.from_node.type in bpmix and lk.to_node.type == bshade), bshade) + bitmap = next((lk.from_node.image for lk in links if lk.from_node.type in bptex and lk.to_node.type == bpnode), False) + if bitmap and bitmap is not None: + spot_projector_chunk = _3ds_chunk(LIGHT_SPOT_PROJECTOR) + spot_projector_chunk.add_variable("image", _3ds_string(sane_name(bitmap.name))) + spotlight_chunk.add_subchunk(spot_projector_chunk) + obj_light_chunk.add_subchunk(spotlight_chunk) - # Add light to object info - object_chunk.add_subchunk(light_chunk) + # Add light to object chunk + object_chunk.add_subchunk(obj_light_chunk) + + # Add hierachy chunks with ID from object_id dictionary + if use_hierarchy: + obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY) + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name])) + if ob.parent is not None and (ob.parent.name in object_id): + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name])) + obj_hierarchy_chunk.add_subchunk(obj_parent_chunk) + object_chunk.add_subchunk(obj_hierarchy_chunk) + + # Add light object and hierarchy chunks to object info object_info.add_subchunk(object_chunk) # Export light and spotlight target node - if write_keyframe: - kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale)) + if use_keyframes: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) if ob.data.type == 'SPOT': - kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale)) + kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale, name_id)) # Create camera object chunks for ob in camera_objects: object_chunk = _3ds_chunk(OBJECT) - translation[ob.name] = ob.location - rotation[ob.name] = ob.rotation_euler.to_quaternion() - scale[ob.name] = ob.scale - - # Add camera data subchunks camera_chunk = _3ds_chunk(OBJECT_CAMERA) - diagonal = math.copysign(math.sqrt(pow(ob.location[0], 2) + pow(ob.location[1], 2)), ob.location[1]) - focus_x = ob.location[0] + (ob.location[1] * math.tan(ob.rotation_euler[2])) - focus_y = ob.location[1] + (ob.location[0] * math.tan(math.radians(90) - ob.rotation_euler[2])) - focus_z = diagonal * math.tan(math.radians(90) - ob.rotation_euler[0]) + crange_chunk = _3ds_chunk(OBJECT_CAM_RANGES) + camera_distance = translation[ob.name] + camera_target = calc_target(camera_distance, rotation[ob.name].x, rotation[ob.name].z) object_chunk.add_variable("camera", _3ds_string(sane_name(ob.name))) - camera_chunk.add_variable("location", _3ds_point_3d(ob.location)) - camera_chunk.add_variable("target", _3ds_point_3d((focus_x, focus_y, focus_z))) - camera_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6))) + camera_chunk.add_variable("location", _3ds_point_3d(camera_distance)) + camera_chunk.add_variable("target", _3ds_point_3d(camera_target)) + camera_chunk.add_variable("roll", _3ds_float(round(rotation[ob.name].y, 6))) camera_chunk.add_variable("lens", _3ds_float(ob.data.lens)) + crange_chunk.add_variable("clipstart", _3ds_float(ob.data.clip_start * 0.1)) + crange_chunk.add_variable("clipend", _3ds_float(ob.data.clip_end * 0.1)) + camera_chunk.add_subchunk(crange_chunk) object_chunk.add_subchunk(camera_chunk) + + # Add hierachy chunks with ID from object_id dictionary + if use_hierarchy: + obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY) + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name])) + if ob.parent is not None and (ob.parent.name in object_id): + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name])) + obj_hierarchy_chunk.add_subchunk(obj_parent_chunk) + object_chunk.add_subchunk(obj_hierarchy_chunk) + + # Add light object and hierarchy chunks to object info object_info.add_subchunk(object_chunk) # Export camera and target node - if write_keyframe: - kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale)) - kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale)) + if use_keyframes: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) + kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale, name_id)) # Add main object info chunk to primary chunk primary.add_subchunk(object_info) # Add main keyframe data chunk to primary chunk - if write_keyframe: + if use_keyframes: primary.add_subchunk(kfdata) - # At this point, the chunk hierarchy is completely built - # Check the size + # The chunk hierarchy is completely built, now check the size primary.get_size() # Open the file for writing @@ -1742,6 +2012,7 @@ def save(operator, name_mapping.clear() # Debugging only: report the exporting time + context.window.cursor_set('DEFAULT') print("3ds export time: %.2f" % (time.time() - duration)) # Debugging only: dump the chunk hierarchy -- 2.30.2 From 304174134589baf25efe9c8dfdacf26f7f84f127 Mon Sep 17 00:00:00 2001 From: Sebastian Sille Date: Sat, 18 Nov 2023 15:03:18 +0100 Subject: [PATCH 14/14] New addon: Import Autodesk .max Created a new addon for importing materials and meshes from Autodesk .max files Blender does not provide any option to import .max files and a lot of people asked for it --- io_import_max.py | 1518 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1518 insertions(+) create mode 100644 io_import_max.py diff --git a/io_import_max.py b/io_import_max.py new file mode 100644 index 0000000..3952e87 --- /dev/null +++ b/io_import_max.py @@ -0,0 +1,1518 @@ +# SPDX-FileCopyrightText: 2023 Sebastian Schrand +# +# SPDX-License-Identifier: GPL-2.0-or-later + + +#--- LICENSE --- +# GNU GPL +# Import is based on using information from olefile IO sourcecode +# and the FreeCAD Autodesk 3DS Max importer ImportMAX +# +# olefile (formerly OleFileIO_PL) is copyright (c) 2005-2018 Philippe Lagadec +# (https://www.decalage.info) +# +# ImportMAX is copyright (c) 2017-2022 Jens M. Plonka +# (https://www.github.com/jmplonka/Importer3D) + + +bl_info = { + "name": "Import Autodesk MAX (.max)", + "author": "Sebastian Sille, Philippe Lagadec, Jens M. Plonka", + "version": (1, 0, 0), + "blender": (4, 0, 0), + "location": "File > Import", + "description": "Import 3DSMAX meshes & materials", + "warning": "", + "filepath_url": "", + "category": "Import-Export"} + + +################## +# IMPORT MODULES # +################## + +import io, re +import os, sys, zlib +import struct, array +import time, datetime +import math, mathutils +import bpy, bpy_extras +from bpy_extras import node_shader_utils +from bpy_extras.image_utils import load_image +from bpy_extras.io_utils import axis_conversion +from bpy_extras.io_utils import orientation_helper + +@orientation_helper(axis_forward='Y', axis_up='Z') + +### IMPORT OPERATOR ### +class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): + """Import Autodesk MAX""" + bl_idname = "import_autodesk.max" + bl_label = "Import Autodesk MAX (.max)" + + filename_ext = ".max" + filter_glob: bpy.props.StringProperty(default="*.max", options={'HIDDEN'},) + + def execute(self, context): + keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob")) + global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up,).to_4x4() + keywords["global_matrix"] = global_matrix + + return load(self, context, **keywords) + +### REGISTER ### +def menu_func(self, context): + self.layout.operator(Import_max.bl_idname, text="Autodesk MAX (.max)") + +def register(): + bpy.utils.register_class(Import_max) + bpy.types.TOPBAR_MT_file_import.append(menu_func) + +def unregister(): + bpy.types.TOPBAR_MT_file_import.remove(menu_func) + bpy.utils.unregister_class(Import_max) + + +################### +# DATA STRUCTURES # +################### + +MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' +WORD_CLSID = "00020900-0000-0000-C000-000000000046" + +MAXREGSECT = 0xFFFFFFFA # (-6) maximum SECT +DIFSECT = 0xFFFFFFFC # (-4) denotes a DIFAT sector in a FAT +FATSECT = 0xFFFFFFFD # (-3) denotes a FAT sector in a FAT +ENDOFCHAIN = 0xFFFFFFFE # (-2) end of a virtual stream chain +FREESECT = 0xFFFFFFFF # (-1) unallocated sector +MAXREGSID = 0xFFFFFFFA # (-6) maximum directory entry ID +NOSTREAM = 0xFFFFFFFF # (-1) unallocated directory entry +UNKNOWN_SIZE = 0x7FFFFFFF +MIN_FILE_SIZE = 1536 + +STGTY_EMPTY = 0 #: empty directory entry +STGTY_STORAGE = 1 #: element is a storage object +STGTY_STREAM = 2 #: element is a stream object +STGTY_LOCKBYTES = 3 #: element is an ILockBytes object +STGTY_PROPERTY = 4 #: element is an IPropertyStorage object +STGTY_ROOT = 5 #: element is a root storage + +VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6; +VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11; +VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17; +VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23; +VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28; +VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64; +VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68; +VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72; +VT_VECTOR=0x1000; + +TYP_NAME = 0x0962 +INVALID_NAME = re.compile('^[0-9].*') +UNPACK_BOX_DATA = struct.Struct('= MIN_FILE_SIZE: + header = filename[:len(MAGIC)] + else: + with open(filename, 'rb') as fp: + header = fp.read(len(MAGIC)) + if header == MAGIC: + return True + else: + return False + + +class MaxStream(io.BytesIO): + + def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize): + unknown_size = False + if size == UNKNOWN_SIZE: + size = len(fat)*sectorsize + unknown_size = True + nb_sectors = (size + (sectorsize-1)) // sectorsize + + data = [] + for i in range(nb_sectors): + try: + fp.seek(offset + sectorsize * sect) + except: + break + sector_data = fp.read(sectorsize) + data.append(sector_data) + try: + sect = fat[sect] & 0xFFFFFFFF + except IndexError: + break + data = b"".join(data) + if len(data) >= size: + data = data[:size] + self.size = size + else: + self.size = len(data) + io.BytesIO.__init__(self, data) + + +class MaxFileDirEntry: + STRUCT_DIRENTRY = '<64sHBBIII16sIQQIII' + DIRENTRY_SIZE = 128 + assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE + + def __init__(self, entry, sid, maxfile): + self.sid = sid + self.maxfile = maxfile + self.kids = [] + self.kids_dict = {} + self.used = False + ( + self.name_raw, + self.namelength, + self.entry_type, + self.color, + self.sid_left, + self.sid_right, + self.sid_child, + clsid, + self.dwUserFlags, + self.createTime, + self.modifyTime, + self.isectStart, + self.sizeLow, + self.sizeHigh + ) = struct.unpack(MaxFileDirEntry.STRUCT_DIRENTRY, entry) + + if self.namelength > 64: + self.namelength = 64 + self.name_utf16 = self.name_raw[:(self.namelength - 2)] + self.name = maxfile._decode_utf16_str(self.name_utf16) + # print('DirEntry SID=%d: %s' % (self.sid, repr(self.name))) + if maxfile.sectorsize == 512: + self.size = self.sizeLow + else: + self.size = self.sizeLow + (int(self.sizeHigh) << 32) + self.clsid = _clsid(clsid) + self.is_minifat = False + if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size > 0: + if self.size < maxfile.minisectorcutoff \ + and self.entry_type == STGTY_STREAM: # only streams can be in MiniFAT + self.is_minifat = True + else: + self.is_minifat = False + maxfile._check_duplicate_stream(self.isectStart, self.is_minifat) + self.sect_chain = None + + def build_sect_chain(self, maxfile): + if self.sect_chain: + return + if self.entry_type not in (STGTY_ROOT, STGTY_STREAM) or self.size == 0: + return + self.sect_chain = list() + if self.is_minifat and not maxfile.minifat: + maxfile.loadminifat() + next_sect = self.isectStart + while next_sect != ENDOFCHAIN: + self.sect_chain.append(next_sect) + if self.is_minifat: + next_sect = maxfile.minifat[next_sect] + else: + next_sect = maxfile.fat[next_sect] + + def build_storage_tree(self): + if self.sid_child != NOSTREAM: + self.append_kids(self.sid_child) + self.kids.sort() + + def append_kids(self, child_sid): + if child_sid == NOSTREAM: + return + else: + child = self.maxfile._load_direntry(child_sid) + if child.used: + return + child.used = True + self.append_kids(child.sid_left) + name_lower = child.name.lower() + self.kids.append(child) + self.kids_dict[name_lower] = child + self.append_kids(child.sid_right) + child.build_storage_tree() + + def __eq__(self, other): + return self.name == other.name + + def __lt__(self, other): + return self.name < other.name + + def __ne__(self, other): + return not self.__eq__(other) + + def __le__(self, other): + return self.__eq__(other) or self.__lt__(other) + + +class ImportMaxFile: + + def __init__(self, filename=None, write_mode=False, debug=False): + self.write_mode = write_mode + self._filesize = None + self.byte_order = None + self.directory_fp = None + self.direntries = None + self.dll_version = None + self.fat = None + self.first_difat_sector = None + self.first_dir_sector = None + self.first_mini_fat_sector = None + self.fp = None + self.header_clsid = None + self.header_signature = None + self.metadata = None + self.mini_sector_shift = None + self.mini_sector_size = None + self.mini_stream_cutoff_size = None + self.minifat = None + self.minifatsect = None + self.minisectorcutoff = None + self.minisectorsize = None + self.ministream = None + self.minor_version = None + self.nb_sect = None + self.num_difat_sectors = None + self.num_dir_sectors = None + self.num_fat_sectors = None + self.num_mini_fat_sectors = None + self.reserved1 = None + self.reserved2 = None + self.root = None + self.sector_shift = None + self.sector_size = None + self.transaction_signature_number = None + if filename: + self.open(filename, write_mode=write_mode) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def _decode_utf16_str(self, utf16_str, errors='replace'): + unicode_str = utf16_str.decode('UTF-16LE', errors) + return unicode_str + + def open(self, filename, write_mode=False): + self.write_mode = write_mode + if hasattr(filename, 'read'): + self.fp = filename + elif isinstance(filename, bytes) and len(filename) >= MIN_FILE_SIZE: + self.fp = io.BytesIO(filename) + else: + if self.write_mode: + mode = 'r+b' + else: + mode = 'rb' + self.fp = open(filename, mode) + filesize=0 + self.fp.seek(0, os.SEEK_END) + try: + filesize = self.fp.tell() + finally: + self.fp.seek(0) + self._filesize = filesize + self._used_streams_fat = [] + self._used_streams_minifat = [] + header = self.fp.read(512) + + fmt_header = '<8s16sHHHHHHLLLLLLLLLL' + header_size = struct.calcsize(fmt_header) + header1 = header[:header_size] + ( + self.header_signature, + self.header_clsid, + self.minor_version, + self.dll_version, + self.byte_order, + self.sector_shift, + self.mini_sector_shift, + self.reserved1, + self.reserved2, + self.num_dir_sectors, + self.num_fat_sectors, + self.first_dir_sector, + self.transaction_signature_number, + self.mini_stream_cutoff_size, + self.first_mini_fat_sector, + self.num_mini_fat_sectors, + self.first_difat_sector, + self.num_difat_sectors + ) = struct.unpack(fmt_header, header1) + + self.sector_size = 2**self.sector_shift + self.mini_sector_size = 2**self.mini_sector_shift + if self.mini_stream_cutoff_size != 0x1000: + self.mini_stream_cutoff_size = 0x1000 + self.nb_sect = ((filesize + self.sector_size-1) // self.sector_size) - 1 + + # file clsid + self.header_clsid = _clsid(header[8:24]) + self.sectorsize = self.sector_size #1 << i16(header, 30) + self.minisectorsize = self.mini_sector_size #1 << i16(header, 32) + self.minisectorcutoff = self.mini_stream_cutoff_size # i32(header, 56) + self._check_duplicate_stream(self.first_dir_sector) + if self.num_mini_fat_sectors: + self._check_duplicate_stream(self.first_mini_fat_sector) + if self.num_difat_sectors: + self._check_duplicate_stream(self.first_difat_sector) + + # Load file allocation tables + self.loadfat(header) + self.loaddirectory(self.first_dir_sector) + self.minifatsect = self.first_mini_fat_sector + + def close(self): + self.fp.close() + + def _check_duplicate_stream(self, first_sect, minifat=False): + if minifat: + used_streams = self._used_streams_minifat + else: + if first_sect in (DIFSECT,FATSECT,ENDOFCHAIN,FREESECT): + return + used_streams = self._used_streams_fat + if first_sect in used_streams: + pass + else: + used_streams.append(first_sect) + + def sector_array(self, sect): + ary = array.array('I', sect) + if sys.byteorder == 'big': + ary.byteswap() + return ary + + def loadfat_sect(self, sect): + if isinstance(sect, array.array): + fat1 = sect + else: + fat1 = self.sector_array(sect) + isect = None + for isect in fat1: + isect = isect & 0xFFFFFFFF + if isect == ENDOFCHAIN or isect == FREESECT: + break + sector = self.getsect(isect) + nextfat = self.sector_array(sector) + self.fat = self.fat + nextfat + return isect + + def loadfat(self, header): + sect = header[76:512] + self.fat = array.array('I') + self.loadfat_sect(sect) + if self.num_difat_sectors != 0: + nb_difat_sectors = (self.sectorsize//4) - 1 + nb_difat = (self.num_fat_sectors - 109 + nb_difat_sectors - 1) // nb_difat_sectors + isect_difat = self.first_difat_sector + for i in range(nb_difat): + sector_difat = self.getsect(isect_difat) + difat = self.sector_array(sector_difat) + self.loadfat_sect(difat[:nb_difat_sectors]) + isect_difat = difat[nb_difat_sectors] + if len(self.fat) > self.nb_sect: + self.fat = self.fat[:self.nb_sect] + + def loadminifat(self): + stream_size = self.num_mini_fat_sectors * self.sector_size + nb_minisectors = (self.root.size + self.mini_sector_size - 1) // self.mini_sector_size + used_size = nb_minisectors * 4 + sect = self._open(self.minifatsect, stream_size, force_FAT=True).read() + self.minifat = self.sector_array(sect) + self.minifat = self.minifat[:nb_minisectors] + + def getsect(self, sect): + try: + self.fp.seek(self.sectorsize * (sect + 1)) + except: + print('MAX sector index out of range') + sector = self.fp.read(self.sectorsize) + return sector + + def loaddirectory(self, sect): + self.directory_fp = self._open(sect, force_FAT=True) + max_entries = self.directory_fp.size // 128 + self.direntries = [None] * max_entries + root_entry = self._load_direntry(0) + self.root = self.direntries[0] + self.root.build_storage_tree() + + def _load_direntry (self, sid): + if self.direntries[sid] is not None: + return self.direntries[sid] + self.directory_fp.seek(sid * 128) + entry = self.directory_fp.read(128) + self.direntries[sid] = MaxFileDirEntry(entry, sid, self) + return self.direntries[sid] + + def _open(self, start, size = UNKNOWN_SIZE, force_FAT=False): + if size < self.minisectorcutoff and not force_FAT: + if not self.ministream: + self.loadminifat() + size_ministream = self.root.size + self.ministream = self._open(self.root.isectStart, + size_ministream, force_FAT=True) + return MaxStream(fp=self.ministream, sect=start, size=size, + offset=0, sectorsize=self.minisectorsize, + fat=self.minifat, filesize=self.ministream.size) + else: + return MaxStream(fp=self.fp, sect=start, size=size, + offset=self.sectorsize, + sectorsize=self.sectorsize, fat=self.fat, + filesize=self._filesize) + + def _list(self, files, prefix, node, streams=True, storages=False): + prefix = prefix + [node.name] + for entry in node.kids: + if entry.entry_type == STGTY_STORAGE: + if storages: + files.append(prefix[1:] + [entry.name]) + self._list(files, prefix, entry, streams, storages) + elif entry.entry_type == STGTY_STREAM: + if streams: + files.append(prefix[1:] + [entry.name]) + + def listdir(self, streams=True, storages=False): + files = [] + self._list(files, [], self.root, streams, storages) + return files + + def _find(self, filename): + if isinstance(filename, str): + filename = filename.split('/') + node = self.root + for name in filename: + for kid in node.kids: + if kid.name.lower() == name.lower(): + break + node = kid + return node.sid + + def openstream(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return self._open(entry.isectStart, entry.size) + + def get_type(self, filename): + try: + sid = self._find(filename) + entry = self.direntries[sid] + return entry.entry_type + except: + return False + + def getclsid(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return entry.clsid + + def get_size(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return entry.size + + def get_rootentry_name(self): + return self.root.name + + def getproperties(self, filename, convert_time=False, no_conversion=None): + if no_conversion == None: + no_conversion = [] + streampath = filename + if not isinstance(streampath, str): + streampath = '/'.join(streampath) + fp = self.openstream(filename) + data = {} + try: + stream = fp.read(28) + clsid = _clsid(stream[8:24]) + stream = fp.read(20) + fmtid = _clsid(stream[:16]) + fp.seek(i32(stream, 16)) + stream = b"****" + fp.read(i32(fp.read(4)) - 4) + num_props = i32(stream, 4) + except BaseException as exc: + return data + + num_props = min(num_props, int(len(stream) / 8)) + for i in range(num_props): + property_id = 0 + try: + property_id = i32(stream, 8 + i*8) + offset = i32(stream, 12 + i*8) + property_type = i32(stream, offset) + if property_type == VT_I2: # 16-bit signed integer + value = i16(stream, offset + 4) + if value >= 32768: + value = value - 65536 + elif property_type == VT_UI2: # 2-byte unsigned integer + value = i16(stream, offset + 4) + elif property_type in (VT_I4, VT_INT, VT_ERROR): + value = i32(stream, offset + 4) + elif property_type in (VT_UI4, VT_UINT): # 4-byte unsigned integer + value = i32(stream, offset + 4) + elif property_type in (VT_BSTR, VT_LPSTR): + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count - 1] + value = value.replace(b'\x00', b'') + elif property_type == VT_BLOB: + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count] + elif property_type == VT_LPWSTR: + count = i32(stream, offset + 4) + value = self._decode_utf16_str(stream[offset + 8:offset + 8 + count*2]) + elif property_type == VT_FILETIME: + value = int(i32(stream, offset + 4)) + (int(i32(stream, offset + 8)) << 32) + if convert_time and property_id not in no_conversion: + _FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0) + value = _FILETIME_null_date + datetime.timedelta(microseconds=value // 10) + else: + value = value // 10000000 + elif property_type == VT_UI1: # 1-byte unsigned integer + value = i8(stream[offset + 4]) + elif property_type == VT_CLSID: + value = _clsid(stream[offset + 4:offset + 20]) + elif property_type == VT_CF: + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count] + elif property_type == VT_BOOL: + value = bool(i16(stream, offset + 4)) + else: + value = None + + data[property_id] = value + except BaseException as exc: + print('Error while parsing property_id:', exc) + return data + + +class MaxChunk(): + + def __init__(self, types, size, level, number): + self.number = number + self.types = types + self.level = level + self.parent = None + self.previous = None + self.next = None + self.size = size + self.unknown = True + self.format = None + self.data = None + self.resolved = False + + def __str__(self): + if (self.unknown == True): + return "%s[%4x] %04X: %s" %("" * self.level, self.number, self.types, ":".join("%02x"%(c) for c in self.data)) + return "%s[%4x] %04X: %s=%s" %("" * self.level, self.number, self.types, self.format, self.data) + + +class ByteArrayChunk(MaxChunk): + + def __init__(self, types, data, level, number): + MaxChunk.__init__(self, types, data, level, number) + + def set(self, data, name, fmt, start, end): + try: + self.data = struct.unpack(fmt, data[start:end]) + self.format = name + self.unknown = False + except Exception as exc: + self.data = data + # print('StructError:', exc, name) + + def set_string(self, data): + try: + self.data = data.decode('UTF-16LE') + self.format = "Str16" + self.unknown = False + except: + self.data = data + + def set_le16_string(self, data): + try: + long, offset = get_long(data, 0) + self.data = data[offset:offset + l * 2].decode('utf-16-le') + if (self.data[-1] == b'\0'): + self.data = self.data[0:-1] + self.format = "LStr16" + self.unknown = False + except: + self.data = data + + def set_data(self, data): + if (self.types in [0x0340, 0x4001, 0x0456, 0x0962]): + self.set_string(data) + elif (self.types in [0x2034, 0x2035]): + self.set(data, "ints", '<'+'I'*int(len(data) / 4), 0, len(data)) + elif (self.types in [0x2501, 0x2503, 0x2504, 0x2505, 0x2511]): + self.set(data, "floats", '<'+'f'*int(len(data) / 4), 0, len(data)) + elif (self.types == 0x2510): + self.set(data, "struct", '<'+'f'*int(len(data) / 4 - 1) + 'I', 0, len(data)) + elif (self.types == 0x0100): + self.set(data, "float", ' 3): + return get_rotation(refs[0]) + elif (uid == 0x3A90416731381913): # Rotation Wire + return get_rotation(get_references(pos)[0]) + if (rotation): + mtx = mathutils.Matrix.Rotation(rotation.angle, 4, rotation.axis) + return mtx + + +def get_scale(pos): + mtx = mathutils.Matrix.Identity(4) + if (pos): + uid = get_guid(pos) + if (uid == 0x2010): # Bezier Scale + scale = pos.get_first(0x2501) + if (scale is None): + scale = pos.get_first(0x2505) + pos = scale.data + elif (uid == 0x0000000000442315): # TCB Zoom + scale = pos.get_first(0x2501) + if (scale is None): + scale = pos.get_first(0x2505) + pos = scale.data + elif (uid == 0xFEEE238B118F7C01): # ScaleXYZ + pos = get_point_3d(pos, 1.0) + else: + return mtx + mtx = mathutils.Matrix.Diagonal(pos[:3]).to_4x4() + return mtx + + +def create_matrix(prc): + mtx = mathutils.Matrix.Identity(4) + pos = rot = scl = None + uid = get_guid(prc) + if (uid == 0x2005): # Position/Rotation/Scale + pos = get_position(get_references(prc)[0]) + rot = get_rotation(get_references(prc)[1]) + scl = get_scale(get_references(prc)[2]) + elif (uid == 0x9154): # BipSlave Control + biped_sub_anim = get_references(prc)[2] + refs = get_references(biped_sub_anim) + scl = get_scale(get_references(refs[1])[0]) + rot = get_rotation(get_references(refs[2])[0]) + pos = get_position(get_references(refs[3])[0]) + if (pos is not None): + mtx = pos @ mtx + if (rot is not None): + mtx = rot @ mtx + if (scl is not None): + mtx = scl @ mtx + return mtx + + +def get_property(properties, idx): + for child in properties.children: + if (child.types & 0x100E): + if (get_short(child.data, 0)[0] == idx): + return child + return None + + +def get_color(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + siz = 15 if (len(prop.data) > 23) else 11 + col, offset = get_floats(prop.data, siz, 3) + return (col[0], col[1], col[2]) + return None + + +def get_float(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + fl, offset = get_float(prop.data, 15) + return fl + return None + + +def get_standard_material(refs): + material = None + try: + if (len(refs) > 2): + colors = refs[2] + parameters = get_references(colors)[0] + material = Material() + material.set('ambient', get_color(parameters, 0x00)) + material.set('diffuse', get_color(parameters, 0x01)) + material.set('specular', get_color(parameters, 0x02)) + material.set('emissive', get_color(parameters, 0x08)) + material.set('shinines', get_float(parameters, 0x0A)) + transparency = refs[4] # ParameterBlock2 + material.set('transparency', get_float(transparency, 0x02)) + except: + pass + return material + + +def get_vray_material(vry): + material = Material() + try: + material.set('diffuse', get_color(vry, 0x01)) + material.set('ambient', get_color(vry, 0x02)) + material.set('specular', get_color(vry, 0x05)) + material.set('emissive', get_color(vry, 0x05)) + material.set('shinines', get_float(vry, 0x0B)) + material.set('transparency', get_float(vry, 0x02)) + except: + pass + return material + + +def get_arch_material(ad): + material = Material() + try: + material.set('diffuse', get_color(ad, 0x1A)) + material.set('ambient', get_color(ad, 0x02)) + material.set('specular', get_color(ad, 0x05)) + material.set('emissive', get_color(ad, 0x05)) + material.set('shinines', get_float(ad, 0x0B)) + material.set('transparency', get_float(ad, 0x02)) + except: + pass + return material + + +def adjust_material(obj, mat): + material = None + if (mat is not None): + uid = get_guid(mat) + if (uid == 0x0002): # Standard + refs = get_references(mat) + material = get_standard_material(refs) + elif (uid == 0x0000000000000200): # Multi/Sub-Object + refs = get_references(mat) + material = adjust_material(obj, refs[-1]) + elif (uid == 0x7034695C37BF3F2F): # VRayMtl + refs = get_reference(mat) + material = get_vray_material(refs[1]) + elif (uid == 0x4A16365470B05735): # Arch + refs = get_references(mat) + material = get_arch_material(refs[0]) + if (obj is not None) and (material is not None): + objMaterial = bpy.data.materials.new(get_class_name(mat)) + obj.data.materials.append(objMaterial) + objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) + objMaterial.specular_color[:3] = material.get('specular', (0,0,0)) + objMaterial.roughness = 1.0 - material.get('shinines', 0.6) + + +def create_shape(context, pts, indices, node, key, prc, mat): + name = node.get_first(TYP_NAME).data + shape = bpy.data.meshes.new(name) + if (key is not None): + name = "%s_%d" %(name, key) + mtx = create_matrix(prc) + data = [] + if (pts): + loopstart = [] + looplines = loop = 0 + nbr_faces = len(indices) + for fid in range(nbr_faces): + polyface = indices[fid] + looplines += len(polyface) + shape.vertices.add(len(pts) // 3) + shape.loops.add(looplines) + shape.polygons.add(nbr_faces) + shape.vertices.foreach_set("co", pts) + for vtx in indices: + loopstart.append(loop) + data.extend(vtx) + loop += len(vtx) + shape.polygons.foreach_set("loop_start", loopstart) + shape.loops.foreach_set("vertex_index", data) + + if (len(data) > 0): + shape.validate() + shape.update() + obj = bpy.data.objects.new(name, shape) + context.view_layer.active_layer_collection.collection.objects.link(obj) + adjust_material(obj, mat) + return True + return True + + +def calc_point(data): + points = [] + long, offset = get_long(data, 0) + while (offset < len(data)): + val, offset = get_long(data, offset) + flt, offset = get_floats(data, offset, 3) + points.extend(flt) + return points + + +def calc_point_float(data): + points = [] + long, offset = get_long(data, 0) + while (offset < len(data)): + flt, offset = get_floats(data, offset, 3) + points.extend(flt) + return points + + +def get_poly_4p(points): + vertex = {} + for point in points: + ngon = point.points + key = point.fH + if (key not in vertex): + vertex[key] = [] + vertex[key].append(ngon) + return vertex + + +def get_poly_5p(data): + count, offset = get_long(data, 0) + ngons = [] + while count > 0: + pt, offset = get_longs(data, offset, 3) + offset += 8 + ngons.append(pt) + count -= 1 + return ngons + + +def get_poly_6p(data): + count, offset = get_long(data, 0) + polylist = [] + while (offset < len(data)): + long, offset = get_longs(data, offset, 6) + i = 5 + while ((i > 3) and (long[i] < 0)): + i -= 1 + if (i > 2): + polylist.append(long[1:i]) + return polylist + + +def get_poly_data(chunk): + offset = 0 + polylist = [] + data = chunk.data + while (offset < len(data)): + count, offset = get_long(data, offset) + points, offset = get_longs(data, offset, count) + polylist.append(points) + return polylist + + +def get_point_array(values): + verts = [] + if len(values) >= 4: + count, offset = get_long(values, 0) + while (count > 0): + floats, offset = get_floats(values, offset, 3) + verts.extend(floats) + count -= 1 + return verts + + +def calc_point_3d(chunk): + data = chunk.data + count, offset = get_long(data, 0) + pointlist = [] + try: + while (offset < len(data)): + pt = Point3d() + long, offset = get_long(data, offset) + pt.points, offset = get_longs(data, offset, long) + pt.flags, offset = get_short(data, offset) + if ((pt.flags & 0x01) != 0): + pt.f1, offset = get_long(data, offset) + if ((pt.flags & 0x08) != 0): + pt.fH, offset = get_short(data, offset) + if ((pt.flags & 0x10) != 0): + pt.f2, offset = get_long(data, offset) + if ((pt.flags & 0x20) != 0): + pt.fA, offset = get_longs(data, offset, 2 * (long - 3)) + if (len(pt.points) > 0): + pointlist.append(pt) + except Exception as exc: + print('ArrayError:\n', "%s: offset = %d\n" %(exc, offset)) + raise exc + return pointlist + + +def create_editable_poly(context, node, msh, mat, mtx): + coords = point3i = point4i = point6i = pointNi = None + name = node.get_first(TYP_NAME).data + poly = msh.get_first(0x08FE) + created = False + if (poly): + for child in poly.children: + if (child.types == 0x0100): + coords = calc_point(child.data) + elif (child.types == 0x0108): + point6i = child.data + elif (child.types == 0x011A): + point4i = calc_point_3d(child) + if (point4i is not None): + vertex = get_poly_4p(point4i) + if (len(vertex) > 0): + for key, ngons in vertex.items(): + created |= create_shape(context, coords, ngons, node, key, mtx, mat) + else: + created = True + elif (point6i is not None): + ngons = get_poly_6p(point6i) + created = create_shape(context, coords, ngons, node, None, mtx, mat) + return created + + +def create_editable_mesh(context, node, msh, mat, mtx): + name = node.get_first(TYP_NAME).data + poly = msh.get_first(0x08FE) + created = False + if (poly): + vertex_chunk = poly.get_first(0x0914) + clsid_chunk = poly.get_first(0x0912) + coords = get_point_array(vertex_chunk.data) + ngons = get_poly_5p(clsid_chunk.data) + created = create_shape(context, coords, ngons, node, None, mtx, mat) + return created + + +def get_matrix_mesh_material(node): + refs = get_reference(node) + if (refs): + mtx = refs.get(0, None) + msh = refs.get(1, None) + mat = refs.get(3, None) + lyr = refs.get(6, None) + else: + refs = get_references(node) + mtx = refs[0] + msh = refs[1] + mat = refs[3] + lyr = None + if (len(refs) > 6): + lyr = refs[6] + return mtx, msh, mat, lyr + + +def adjust_matrix(obj, node): + mtx = create_matrix(node).flatten() + plc = mathutils.Matrix(*mtx) + obj.matrix_world = plc + return plc + + +def create_shell(context, node, shell, mat, mtx): + name = node.get_first(TYP_NAME).data + refs = get_references(shell) + msh = refs[-1] + created = create_editable_mesh(context, node, msh, mtx, mat) + return created + + +def create_skipable(context, node, msh, mat, mtx, skip): + name = node.get_first(TYP_NAME).data + print(" skipping %s '%s'... " %(skip, name)) + return True + + +def create_mesh(context, node, msh, mtx, mat): + created = False + uid = get_guid(msh) + msh.geometry = None + if (uid == 0x0E44F10B3): + created = create_editable_mesh(context, node, msh, mat, mtx) + elif (uid == 0x192F60981BF8338D): + created = create_editable_poly(context, node, msh, mat, mtx) + elif (uid in {0x2032, 0x2033}): + created = create_shell(context, node, msh, mat, mtx) + else: + skip = SKIPPABLE.get(uid) + if (skip is not None): + created = create_skipable(context, node, msh, mat, mtx, skip) + return created, uid + + +def create_object(context, node): + parent = get_node_parent(node) + node.parent = parent + name = get_node_name(node) + mtx, msh, mat, lyr = get_matrix_mesh_material(node) + while ((parent is not None) and (get_guid(parent) != 0x0002)): + name = "%s/%s" %(get_node_name(parent), name) + parent_mtx = parent.matrix + if (parent_mtx): + mtx = mtx.dot(parent_mtx) + parent = get_node_parent(parent) + created, uid = create_mesh(context, node, msh, mtx, mat) + + +def make_scene(context, parent, level=0): + for chunk in parent.children: + if (isinstance(chunk, SceneChunk)): + if ((get_guid(chunk) == 0x0001) and (get_super_id(chunk) == 0x0001)): + try: + create_object(context, chunk) + except Exception as exc: + print('ImportError:', exc, chunk) + + +def read_scene(context, maxfile, filename): + global SCENE_LIST + SCENE_LIST = read_chunks(maxfile, 'Scene', filename+'.Scn.bin', containerReader=SceneChunk) + make_scene(context, SCENE_LIST[0], 0) + + +def read(context, filename): + if (is_maxfile(filename)): + maxfile = ImportMaxFile(filename) + prop = maxfile.getproperties('\x05DocumentSummaryInformation', convert_time=True, no_conversion=[10]) + prop = maxfile.getproperties('\x05SummaryInformation', convert_time=True, no_conversion=[10]) + read_class_data(maxfile, filename) + read_config(maxfile, filename) + read_directory(maxfile, filename) + read_class_directory(maxfile, filename) + read_video_postqueue(maxfile, filename) + read_scene(context, maxfile, filename) + else: + print("File seems to be no 3D Studio Max file!") + + +def load(operator, context, filepath="", global_matrix=None): + read(context, filepath) + + return {'FINISHED'} \ No newline at end of file -- 2.30.2