diff --git a/io_import_max.py b/io_import_max.py new file mode 100644 index 0000000..3952e87 --- /dev/null +++ b/io_import_max.py @@ -0,0 +1,1518 @@ +# SPDX-FileCopyrightText: 2023 Sebastian Schrand +# +# SPDX-License-Identifier: GPL-2.0-or-later + + +#--- LICENSE --- +# GNU GPL +# Import is based on using information from olefile IO sourcecode +# and the FreeCAD Autodesk 3DS Max importer ImportMAX +# +# olefile (formerly OleFileIO_PL) is copyright (c) 2005-2018 Philippe Lagadec +# (https://www.decalage.info) +# +# ImportMAX is copyright (c) 2017-2022 Jens M. Plonka +# (https://www.github.com/jmplonka/Importer3D) + + +bl_info = { + "name": "Import Autodesk MAX (.max)", + "author": "Sebastian Sille, Philippe Lagadec, Jens M. Plonka", + "version": (1, 0, 0), + "blender": (4, 0, 0), + "location": "File > Import", + "description": "Import 3DSMAX meshes & materials", + "warning": "", + "filepath_url": "", + "category": "Import-Export"} + + +################## +# IMPORT MODULES # +################## + +import io, re +import os, sys, zlib +import struct, array +import time, datetime +import math, mathutils +import bpy, bpy_extras +from bpy_extras import node_shader_utils +from bpy_extras.image_utils import load_image +from bpy_extras.io_utils import axis_conversion +from bpy_extras.io_utils import orientation_helper + +@orientation_helper(axis_forward='Y', axis_up='Z') + +### IMPORT OPERATOR ### +class Import_max(bpy.types.Operator, bpy_extras.io_utils.ImportHelper): + """Import Autodesk MAX""" + bl_idname = "import_autodesk.max" + bl_label = "Import Autodesk MAX (.max)" + + filename_ext = ".max" + filter_glob: bpy.props.StringProperty(default="*.max", options={'HIDDEN'},) + + def execute(self, context): + keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob")) + global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up,).to_4x4() + keywords["global_matrix"] = global_matrix + + return load(self, context, **keywords) + +### REGISTER ### +def menu_func(self, context): + self.layout.operator(Import_max.bl_idname, text="Autodesk MAX (.max)") + +def register(): + bpy.utils.register_class(Import_max) + bpy.types.TOPBAR_MT_file_import.append(menu_func) + +def unregister(): + bpy.types.TOPBAR_MT_file_import.remove(menu_func) + bpy.utils.unregister_class(Import_max) + + +################### +# DATA STRUCTURES # +################### + +MAGIC = b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1' +WORD_CLSID = "00020900-0000-0000-C000-000000000046" + +MAXREGSECT = 0xFFFFFFFA # (-6) maximum SECT +DIFSECT = 0xFFFFFFFC # (-4) denotes a DIFAT sector in a FAT +FATSECT = 0xFFFFFFFD # (-3) denotes a FAT sector in a FAT +ENDOFCHAIN = 0xFFFFFFFE # (-2) end of a virtual stream chain +FREESECT = 0xFFFFFFFF # (-1) unallocated sector +MAXREGSID = 0xFFFFFFFA # (-6) maximum directory entry ID +NOSTREAM = 0xFFFFFFFF # (-1) unallocated directory entry +UNKNOWN_SIZE = 0x7FFFFFFF +MIN_FILE_SIZE = 1536 + +STGTY_EMPTY = 0 #: empty directory entry +STGTY_STORAGE = 1 #: element is a storage object +STGTY_STREAM = 2 #: element is a stream object +STGTY_LOCKBYTES = 3 #: element is an ILockBytes object +STGTY_PROPERTY = 4 #: element is an IPropertyStorage object +STGTY_ROOT = 5 #: element is a root storage + +VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6; +VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11; +VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17; +VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23; +VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28; +VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64; +VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68; +VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72; +VT_VECTOR=0x1000; + +TYP_NAME = 0x0962 +INVALID_NAME = re.compile('^[0-9].*') +UNPACK_BOX_DATA = struct.Struct('= MIN_FILE_SIZE: + header = filename[:len(MAGIC)] + else: + with open(filename, 'rb') as fp: + header = fp.read(len(MAGIC)) + if header == MAGIC: + return True + else: + return False + + +class MaxStream(io.BytesIO): + + def __init__(self, fp, sect, size, offset, sectorsize, fat, filesize): + unknown_size = False + if size == UNKNOWN_SIZE: + size = len(fat)*sectorsize + unknown_size = True + nb_sectors = (size + (sectorsize-1)) // sectorsize + + data = [] + for i in range(nb_sectors): + try: + fp.seek(offset + sectorsize * sect) + except: + break + sector_data = fp.read(sectorsize) + data.append(sector_data) + try: + sect = fat[sect] & 0xFFFFFFFF + except IndexError: + break + data = b"".join(data) + if len(data) >= size: + data = data[:size] + self.size = size + else: + self.size = len(data) + io.BytesIO.__init__(self, data) + + +class MaxFileDirEntry: + STRUCT_DIRENTRY = '<64sHBBIII16sIQQIII' + DIRENTRY_SIZE = 128 + assert struct.calcsize(STRUCT_DIRENTRY) == DIRENTRY_SIZE + + def __init__(self, entry, sid, maxfile): + self.sid = sid + self.maxfile = maxfile + self.kids = [] + self.kids_dict = {} + self.used = False + ( + self.name_raw, + self.namelength, + self.entry_type, + self.color, + self.sid_left, + self.sid_right, + self.sid_child, + clsid, + self.dwUserFlags, + self.createTime, + self.modifyTime, + self.isectStart, + self.sizeLow, + self.sizeHigh + ) = struct.unpack(MaxFileDirEntry.STRUCT_DIRENTRY, entry) + + if self.namelength > 64: + self.namelength = 64 + self.name_utf16 = self.name_raw[:(self.namelength - 2)] + self.name = maxfile._decode_utf16_str(self.name_utf16) + # print('DirEntry SID=%d: %s' % (self.sid, repr(self.name))) + if maxfile.sectorsize == 512: + self.size = self.sizeLow + else: + self.size = self.sizeLow + (int(self.sizeHigh) << 32) + self.clsid = _clsid(clsid) + self.is_minifat = False + if self.entry_type in (STGTY_ROOT, STGTY_STREAM) and self.size > 0: + if self.size < maxfile.minisectorcutoff \ + and self.entry_type == STGTY_STREAM: # only streams can be in MiniFAT + self.is_minifat = True + else: + self.is_minifat = False + maxfile._check_duplicate_stream(self.isectStart, self.is_minifat) + self.sect_chain = None + + def build_sect_chain(self, maxfile): + if self.sect_chain: + return + if self.entry_type not in (STGTY_ROOT, STGTY_STREAM) or self.size == 0: + return + self.sect_chain = list() + if self.is_minifat and not maxfile.minifat: + maxfile.loadminifat() + next_sect = self.isectStart + while next_sect != ENDOFCHAIN: + self.sect_chain.append(next_sect) + if self.is_minifat: + next_sect = maxfile.minifat[next_sect] + else: + next_sect = maxfile.fat[next_sect] + + def build_storage_tree(self): + if self.sid_child != NOSTREAM: + self.append_kids(self.sid_child) + self.kids.sort() + + def append_kids(self, child_sid): + if child_sid == NOSTREAM: + return + else: + child = self.maxfile._load_direntry(child_sid) + if child.used: + return + child.used = True + self.append_kids(child.sid_left) + name_lower = child.name.lower() + self.kids.append(child) + self.kids_dict[name_lower] = child + self.append_kids(child.sid_right) + child.build_storage_tree() + + def __eq__(self, other): + return self.name == other.name + + def __lt__(self, other): + return self.name < other.name + + def __ne__(self, other): + return not self.__eq__(other) + + def __le__(self, other): + return self.__eq__(other) or self.__lt__(other) + + +class ImportMaxFile: + + def __init__(self, filename=None, write_mode=False, debug=False): + self.write_mode = write_mode + self._filesize = None + self.byte_order = None + self.directory_fp = None + self.direntries = None + self.dll_version = None + self.fat = None + self.first_difat_sector = None + self.first_dir_sector = None + self.first_mini_fat_sector = None + self.fp = None + self.header_clsid = None + self.header_signature = None + self.metadata = None + self.mini_sector_shift = None + self.mini_sector_size = None + self.mini_stream_cutoff_size = None + self.minifat = None + self.minifatsect = None + self.minisectorcutoff = None + self.minisectorsize = None + self.ministream = None + self.minor_version = None + self.nb_sect = None + self.num_difat_sectors = None + self.num_dir_sectors = None + self.num_fat_sectors = None + self.num_mini_fat_sectors = None + self.reserved1 = None + self.reserved2 = None + self.root = None + self.sector_shift = None + self.sector_size = None + self.transaction_signature_number = None + if filename: + self.open(filename, write_mode=write_mode) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def _decode_utf16_str(self, utf16_str, errors='replace'): + unicode_str = utf16_str.decode('UTF-16LE', errors) + return unicode_str + + def open(self, filename, write_mode=False): + self.write_mode = write_mode + if hasattr(filename, 'read'): + self.fp = filename + elif isinstance(filename, bytes) and len(filename) >= MIN_FILE_SIZE: + self.fp = io.BytesIO(filename) + else: + if self.write_mode: + mode = 'r+b' + else: + mode = 'rb' + self.fp = open(filename, mode) + filesize=0 + self.fp.seek(0, os.SEEK_END) + try: + filesize = self.fp.tell() + finally: + self.fp.seek(0) + self._filesize = filesize + self._used_streams_fat = [] + self._used_streams_minifat = [] + header = self.fp.read(512) + + fmt_header = '<8s16sHHHHHHLLLLLLLLLL' + header_size = struct.calcsize(fmt_header) + header1 = header[:header_size] + ( + self.header_signature, + self.header_clsid, + self.minor_version, + self.dll_version, + self.byte_order, + self.sector_shift, + self.mini_sector_shift, + self.reserved1, + self.reserved2, + self.num_dir_sectors, + self.num_fat_sectors, + self.first_dir_sector, + self.transaction_signature_number, + self.mini_stream_cutoff_size, + self.first_mini_fat_sector, + self.num_mini_fat_sectors, + self.first_difat_sector, + self.num_difat_sectors + ) = struct.unpack(fmt_header, header1) + + self.sector_size = 2**self.sector_shift + self.mini_sector_size = 2**self.mini_sector_shift + if self.mini_stream_cutoff_size != 0x1000: + self.mini_stream_cutoff_size = 0x1000 + self.nb_sect = ((filesize + self.sector_size-1) // self.sector_size) - 1 + + # file clsid + self.header_clsid = _clsid(header[8:24]) + self.sectorsize = self.sector_size #1 << i16(header, 30) + self.minisectorsize = self.mini_sector_size #1 << i16(header, 32) + self.minisectorcutoff = self.mini_stream_cutoff_size # i32(header, 56) + self._check_duplicate_stream(self.first_dir_sector) + if self.num_mini_fat_sectors: + self._check_duplicate_stream(self.first_mini_fat_sector) + if self.num_difat_sectors: + self._check_duplicate_stream(self.first_difat_sector) + + # Load file allocation tables + self.loadfat(header) + self.loaddirectory(self.first_dir_sector) + self.minifatsect = self.first_mini_fat_sector + + def close(self): + self.fp.close() + + def _check_duplicate_stream(self, first_sect, minifat=False): + if minifat: + used_streams = self._used_streams_minifat + else: + if first_sect in (DIFSECT,FATSECT,ENDOFCHAIN,FREESECT): + return + used_streams = self._used_streams_fat + if first_sect in used_streams: + pass + else: + used_streams.append(first_sect) + + def sector_array(self, sect): + ary = array.array('I', sect) + if sys.byteorder == 'big': + ary.byteswap() + return ary + + def loadfat_sect(self, sect): + if isinstance(sect, array.array): + fat1 = sect + else: + fat1 = self.sector_array(sect) + isect = None + for isect in fat1: + isect = isect & 0xFFFFFFFF + if isect == ENDOFCHAIN or isect == FREESECT: + break + sector = self.getsect(isect) + nextfat = self.sector_array(sector) + self.fat = self.fat + nextfat + return isect + + def loadfat(self, header): + sect = header[76:512] + self.fat = array.array('I') + self.loadfat_sect(sect) + if self.num_difat_sectors != 0: + nb_difat_sectors = (self.sectorsize//4) - 1 + nb_difat = (self.num_fat_sectors - 109 + nb_difat_sectors - 1) // nb_difat_sectors + isect_difat = self.first_difat_sector + for i in range(nb_difat): + sector_difat = self.getsect(isect_difat) + difat = self.sector_array(sector_difat) + self.loadfat_sect(difat[:nb_difat_sectors]) + isect_difat = difat[nb_difat_sectors] + if len(self.fat) > self.nb_sect: + self.fat = self.fat[:self.nb_sect] + + def loadminifat(self): + stream_size = self.num_mini_fat_sectors * self.sector_size + nb_minisectors = (self.root.size + self.mini_sector_size - 1) // self.mini_sector_size + used_size = nb_minisectors * 4 + sect = self._open(self.minifatsect, stream_size, force_FAT=True).read() + self.minifat = self.sector_array(sect) + self.minifat = self.minifat[:nb_minisectors] + + def getsect(self, sect): + try: + self.fp.seek(self.sectorsize * (sect + 1)) + except: + print('MAX sector index out of range') + sector = self.fp.read(self.sectorsize) + return sector + + def loaddirectory(self, sect): + self.directory_fp = self._open(sect, force_FAT=True) + max_entries = self.directory_fp.size // 128 + self.direntries = [None] * max_entries + root_entry = self._load_direntry(0) + self.root = self.direntries[0] + self.root.build_storage_tree() + + def _load_direntry (self, sid): + if self.direntries[sid] is not None: + return self.direntries[sid] + self.directory_fp.seek(sid * 128) + entry = self.directory_fp.read(128) + self.direntries[sid] = MaxFileDirEntry(entry, sid, self) + return self.direntries[sid] + + def _open(self, start, size = UNKNOWN_SIZE, force_FAT=False): + if size < self.minisectorcutoff and not force_FAT: + if not self.ministream: + self.loadminifat() + size_ministream = self.root.size + self.ministream = self._open(self.root.isectStart, + size_ministream, force_FAT=True) + return MaxStream(fp=self.ministream, sect=start, size=size, + offset=0, sectorsize=self.minisectorsize, + fat=self.minifat, filesize=self.ministream.size) + else: + return MaxStream(fp=self.fp, sect=start, size=size, + offset=self.sectorsize, + sectorsize=self.sectorsize, fat=self.fat, + filesize=self._filesize) + + def _list(self, files, prefix, node, streams=True, storages=False): + prefix = prefix + [node.name] + for entry in node.kids: + if entry.entry_type == STGTY_STORAGE: + if storages: + files.append(prefix[1:] + [entry.name]) + self._list(files, prefix, entry, streams, storages) + elif entry.entry_type == STGTY_STREAM: + if streams: + files.append(prefix[1:] + [entry.name]) + + def listdir(self, streams=True, storages=False): + files = [] + self._list(files, [], self.root, streams, storages) + return files + + def _find(self, filename): + if isinstance(filename, str): + filename = filename.split('/') + node = self.root + for name in filename: + for kid in node.kids: + if kid.name.lower() == name.lower(): + break + node = kid + return node.sid + + def openstream(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return self._open(entry.isectStart, entry.size) + + def get_type(self, filename): + try: + sid = self._find(filename) + entry = self.direntries[sid] + return entry.entry_type + except: + return False + + def getclsid(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return entry.clsid + + def get_size(self, filename): + sid = self._find(filename) + entry = self.direntries[sid] + return entry.size + + def get_rootentry_name(self): + return self.root.name + + def getproperties(self, filename, convert_time=False, no_conversion=None): + if no_conversion == None: + no_conversion = [] + streampath = filename + if not isinstance(streampath, str): + streampath = '/'.join(streampath) + fp = self.openstream(filename) + data = {} + try: + stream = fp.read(28) + clsid = _clsid(stream[8:24]) + stream = fp.read(20) + fmtid = _clsid(stream[:16]) + fp.seek(i32(stream, 16)) + stream = b"****" + fp.read(i32(fp.read(4)) - 4) + num_props = i32(stream, 4) + except BaseException as exc: + return data + + num_props = min(num_props, int(len(stream) / 8)) + for i in range(num_props): + property_id = 0 + try: + property_id = i32(stream, 8 + i*8) + offset = i32(stream, 12 + i*8) + property_type = i32(stream, offset) + if property_type == VT_I2: # 16-bit signed integer + value = i16(stream, offset + 4) + if value >= 32768: + value = value - 65536 + elif property_type == VT_UI2: # 2-byte unsigned integer + value = i16(stream, offset + 4) + elif property_type in (VT_I4, VT_INT, VT_ERROR): + value = i32(stream, offset + 4) + elif property_type in (VT_UI4, VT_UINT): # 4-byte unsigned integer + value = i32(stream, offset + 4) + elif property_type in (VT_BSTR, VT_LPSTR): + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count - 1] + value = value.replace(b'\x00', b'') + elif property_type == VT_BLOB: + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count] + elif property_type == VT_LPWSTR: + count = i32(stream, offset + 4) + value = self._decode_utf16_str(stream[offset + 8:offset + 8 + count*2]) + elif property_type == VT_FILETIME: + value = int(i32(stream, offset + 4)) + (int(i32(stream, offset + 8)) << 32) + if convert_time and property_id not in no_conversion: + _FILETIME_null_date = datetime.datetime(1601, 1, 1, 0, 0, 0) + value = _FILETIME_null_date + datetime.timedelta(microseconds=value // 10) + else: + value = value // 10000000 + elif property_type == VT_UI1: # 1-byte unsigned integer + value = i8(stream[offset + 4]) + elif property_type == VT_CLSID: + value = _clsid(stream[offset + 4:offset + 20]) + elif property_type == VT_CF: + count = i32(stream, offset + 4) + value = stream[offset + 8:offset + 8 + count] + elif property_type == VT_BOOL: + value = bool(i16(stream, offset + 4)) + else: + value = None + + data[property_id] = value + except BaseException as exc: + print('Error while parsing property_id:', exc) + return data + + +class MaxChunk(): + + def __init__(self, types, size, level, number): + self.number = number + self.types = types + self.level = level + self.parent = None + self.previous = None + self.next = None + self.size = size + self.unknown = True + self.format = None + self.data = None + self.resolved = False + + def __str__(self): + if (self.unknown == True): + return "%s[%4x] %04X: %s" %("" * self.level, self.number, self.types, ":".join("%02x"%(c) for c in self.data)) + return "%s[%4x] %04X: %s=%s" %("" * self.level, self.number, self.types, self.format, self.data) + + +class ByteArrayChunk(MaxChunk): + + def __init__(self, types, data, level, number): + MaxChunk.__init__(self, types, data, level, number) + + def set(self, data, name, fmt, start, end): + try: + self.data = struct.unpack(fmt, data[start:end]) + self.format = name + self.unknown = False + except Exception as exc: + self.data = data + # print('StructError:', exc, name) + + def set_string(self, data): + try: + self.data = data.decode('UTF-16LE') + self.format = "Str16" + self.unknown = False + except: + self.data = data + + def set_le16_string(self, data): + try: + long, offset = get_long(data, 0) + self.data = data[offset:offset + l * 2].decode('utf-16-le') + if (self.data[-1] == b'\0'): + self.data = self.data[0:-1] + self.format = "LStr16" + self.unknown = False + except: + self.data = data + + def set_data(self, data): + if (self.types in [0x0340, 0x4001, 0x0456, 0x0962]): + self.set_string(data) + elif (self.types in [0x2034, 0x2035]): + self.set(data, "ints", '<'+'I'*int(len(data) / 4), 0, len(data)) + elif (self.types in [0x2501, 0x2503, 0x2504, 0x2505, 0x2511]): + self.set(data, "floats", '<'+'f'*int(len(data) / 4), 0, len(data)) + elif (self.types == 0x2510): + self.set(data, "struct", '<'+'f'*int(len(data) / 4 - 1) + 'I', 0, len(data)) + elif (self.types == 0x0100): + self.set(data, "float", ' 3): + return get_rotation(refs[0]) + elif (uid == 0x3A90416731381913): # Rotation Wire + return get_rotation(get_references(pos)[0]) + if (rotation): + mtx = mathutils.Matrix.Rotation(rotation.angle, 4, rotation.axis) + return mtx + + +def get_scale(pos): + mtx = mathutils.Matrix.Identity(4) + if (pos): + uid = get_guid(pos) + if (uid == 0x2010): # Bezier Scale + scale = pos.get_first(0x2501) + if (scale is None): + scale = pos.get_first(0x2505) + pos = scale.data + elif (uid == 0x0000000000442315): # TCB Zoom + scale = pos.get_first(0x2501) + if (scale is None): + scale = pos.get_first(0x2505) + pos = scale.data + elif (uid == 0xFEEE238B118F7C01): # ScaleXYZ + pos = get_point_3d(pos, 1.0) + else: + return mtx + mtx = mathutils.Matrix.Diagonal(pos[:3]).to_4x4() + return mtx + + +def create_matrix(prc): + mtx = mathutils.Matrix.Identity(4) + pos = rot = scl = None + uid = get_guid(prc) + if (uid == 0x2005): # Position/Rotation/Scale + pos = get_position(get_references(prc)[0]) + rot = get_rotation(get_references(prc)[1]) + scl = get_scale(get_references(prc)[2]) + elif (uid == 0x9154): # BipSlave Control + biped_sub_anim = get_references(prc)[2] + refs = get_references(biped_sub_anim) + scl = get_scale(get_references(refs[1])[0]) + rot = get_rotation(get_references(refs[2])[0]) + pos = get_position(get_references(refs[3])[0]) + if (pos is not None): + mtx = pos @ mtx + if (rot is not None): + mtx = rot @ mtx + if (scl is not None): + mtx = scl @ mtx + return mtx + + +def get_property(properties, idx): + for child in properties.children: + if (child.types & 0x100E): + if (get_short(child.data, 0)[0] == idx): + return child + return None + + +def get_color(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + siz = 15 if (len(prop.data) > 23) else 11 + col, offset = get_floats(prop.data, siz, 3) + return (col[0], col[1], col[2]) + return None + + +def get_float(colors, idx): + prop = get_property(colors, idx) + if (prop is not None): + fl, offset = get_float(prop.data, 15) + return fl + return None + + +def get_standard_material(refs): + material = None + try: + if (len(refs) > 2): + colors = refs[2] + parameters = get_references(colors)[0] + material = Material() + material.set('ambient', get_color(parameters, 0x00)) + material.set('diffuse', get_color(parameters, 0x01)) + material.set('specular', get_color(parameters, 0x02)) + material.set('emissive', get_color(parameters, 0x08)) + material.set('shinines', get_float(parameters, 0x0A)) + transparency = refs[4] # ParameterBlock2 + material.set('transparency', get_float(transparency, 0x02)) + except: + pass + return material + + +def get_vray_material(vry): + material = Material() + try: + material.set('diffuse', get_color(vry, 0x01)) + material.set('ambient', get_color(vry, 0x02)) + material.set('specular', get_color(vry, 0x05)) + material.set('emissive', get_color(vry, 0x05)) + material.set('shinines', get_float(vry, 0x0B)) + material.set('transparency', get_float(vry, 0x02)) + except: + pass + return material + + +def get_arch_material(ad): + material = Material() + try: + material.set('diffuse', get_color(ad, 0x1A)) + material.set('ambient', get_color(ad, 0x02)) + material.set('specular', get_color(ad, 0x05)) + material.set('emissive', get_color(ad, 0x05)) + material.set('shinines', get_float(ad, 0x0B)) + material.set('transparency', get_float(ad, 0x02)) + except: + pass + return material + + +def adjust_material(obj, mat): + material = None + if (mat is not None): + uid = get_guid(mat) + if (uid == 0x0002): # Standard + refs = get_references(mat) + material = get_standard_material(refs) + elif (uid == 0x0000000000000200): # Multi/Sub-Object + refs = get_references(mat) + material = adjust_material(obj, refs[-1]) + elif (uid == 0x7034695C37BF3F2F): # VRayMtl + refs = get_reference(mat) + material = get_vray_material(refs[1]) + elif (uid == 0x4A16365470B05735): # Arch + refs = get_references(mat) + material = get_arch_material(refs[0]) + if (obj is not None) and (material is not None): + objMaterial = bpy.data.materials.new(get_class_name(mat)) + obj.data.materials.append(objMaterial) + objMaterial.diffuse_color[:3] = material.get('diffuse', (0.8,0.8,0.8)) + objMaterial.specular_color[:3] = material.get('specular', (0,0,0)) + objMaterial.roughness = 1.0 - material.get('shinines', 0.6) + + +def create_shape(context, pts, indices, node, key, prc, mat): + name = node.get_first(TYP_NAME).data + shape = bpy.data.meshes.new(name) + if (key is not None): + name = "%s_%d" %(name, key) + mtx = create_matrix(prc) + data = [] + if (pts): + loopstart = [] + looplines = loop = 0 + nbr_faces = len(indices) + for fid in range(nbr_faces): + polyface = indices[fid] + looplines += len(polyface) + shape.vertices.add(len(pts) // 3) + shape.loops.add(looplines) + shape.polygons.add(nbr_faces) + shape.vertices.foreach_set("co", pts) + for vtx in indices: + loopstart.append(loop) + data.extend(vtx) + loop += len(vtx) + shape.polygons.foreach_set("loop_start", loopstart) + shape.loops.foreach_set("vertex_index", data) + + if (len(data) > 0): + shape.validate() + shape.update() + obj = bpy.data.objects.new(name, shape) + context.view_layer.active_layer_collection.collection.objects.link(obj) + adjust_material(obj, mat) + return True + return True + + +def calc_point(data): + points = [] + long, offset = get_long(data, 0) + while (offset < len(data)): + val, offset = get_long(data, offset) + flt, offset = get_floats(data, offset, 3) + points.extend(flt) + return points + + +def calc_point_float(data): + points = [] + long, offset = get_long(data, 0) + while (offset < len(data)): + flt, offset = get_floats(data, offset, 3) + points.extend(flt) + return points + + +def get_poly_4p(points): + vertex = {} + for point in points: + ngon = point.points + key = point.fH + if (key not in vertex): + vertex[key] = [] + vertex[key].append(ngon) + return vertex + + +def get_poly_5p(data): + count, offset = get_long(data, 0) + ngons = [] + while count > 0: + pt, offset = get_longs(data, offset, 3) + offset += 8 + ngons.append(pt) + count -= 1 + return ngons + + +def get_poly_6p(data): + count, offset = get_long(data, 0) + polylist = [] + while (offset < len(data)): + long, offset = get_longs(data, offset, 6) + i = 5 + while ((i > 3) and (long[i] < 0)): + i -= 1 + if (i > 2): + polylist.append(long[1:i]) + return polylist + + +def get_poly_data(chunk): + offset = 0 + polylist = [] + data = chunk.data + while (offset < len(data)): + count, offset = get_long(data, offset) + points, offset = get_longs(data, offset, count) + polylist.append(points) + return polylist + + +def get_point_array(values): + verts = [] + if len(values) >= 4: + count, offset = get_long(values, 0) + while (count > 0): + floats, offset = get_floats(values, offset, 3) + verts.extend(floats) + count -= 1 + return verts + + +def calc_point_3d(chunk): + data = chunk.data + count, offset = get_long(data, 0) + pointlist = [] + try: + while (offset < len(data)): + pt = Point3d() + long, offset = get_long(data, offset) + pt.points, offset = get_longs(data, offset, long) + pt.flags, offset = get_short(data, offset) + if ((pt.flags & 0x01) != 0): + pt.f1, offset = get_long(data, offset) + if ((pt.flags & 0x08) != 0): + pt.fH, offset = get_short(data, offset) + if ((pt.flags & 0x10) != 0): + pt.f2, offset = get_long(data, offset) + if ((pt.flags & 0x20) != 0): + pt.fA, offset = get_longs(data, offset, 2 * (long - 3)) + if (len(pt.points) > 0): + pointlist.append(pt) + except Exception as exc: + print('ArrayError:\n', "%s: offset = %d\n" %(exc, offset)) + raise exc + return pointlist + + +def create_editable_poly(context, node, msh, mat, mtx): + coords = point3i = point4i = point6i = pointNi = None + name = node.get_first(TYP_NAME).data + poly = msh.get_first(0x08FE) + created = False + if (poly): + for child in poly.children: + if (child.types == 0x0100): + coords = calc_point(child.data) + elif (child.types == 0x0108): + point6i = child.data + elif (child.types == 0x011A): + point4i = calc_point_3d(child) + if (point4i is not None): + vertex = get_poly_4p(point4i) + if (len(vertex) > 0): + for key, ngons in vertex.items(): + created |= create_shape(context, coords, ngons, node, key, mtx, mat) + else: + created = True + elif (point6i is not None): + ngons = get_poly_6p(point6i) + created = create_shape(context, coords, ngons, node, None, mtx, mat) + return created + + +def create_editable_mesh(context, node, msh, mat, mtx): + name = node.get_first(TYP_NAME).data + poly = msh.get_first(0x08FE) + created = False + if (poly): + vertex_chunk = poly.get_first(0x0914) + clsid_chunk = poly.get_first(0x0912) + coords = get_point_array(vertex_chunk.data) + ngons = get_poly_5p(clsid_chunk.data) + created = create_shape(context, coords, ngons, node, None, mtx, mat) + return created + + +def get_matrix_mesh_material(node): + refs = get_reference(node) + if (refs): + mtx = refs.get(0, None) + msh = refs.get(1, None) + mat = refs.get(3, None) + lyr = refs.get(6, None) + else: + refs = get_references(node) + mtx = refs[0] + msh = refs[1] + mat = refs[3] + lyr = None + if (len(refs) > 6): + lyr = refs[6] + return mtx, msh, mat, lyr + + +def adjust_matrix(obj, node): + mtx = create_matrix(node).flatten() + plc = mathutils.Matrix(*mtx) + obj.matrix_world = plc + return plc + + +def create_shell(context, node, shell, mat, mtx): + name = node.get_first(TYP_NAME).data + refs = get_references(shell) + msh = refs[-1] + created = create_editable_mesh(context, node, msh, mtx, mat) + return created + + +def create_skipable(context, node, msh, mat, mtx, skip): + name = node.get_first(TYP_NAME).data + print(" skipping %s '%s'... " %(skip, name)) + return True + + +def create_mesh(context, node, msh, mtx, mat): + created = False + uid = get_guid(msh) + msh.geometry = None + if (uid == 0x0E44F10B3): + created = create_editable_mesh(context, node, msh, mat, mtx) + elif (uid == 0x192F60981BF8338D): + created = create_editable_poly(context, node, msh, mat, mtx) + elif (uid in {0x2032, 0x2033}): + created = create_shell(context, node, msh, mat, mtx) + else: + skip = SKIPPABLE.get(uid) + if (skip is not None): + created = create_skipable(context, node, msh, mat, mtx, skip) + return created, uid + + +def create_object(context, node): + parent = get_node_parent(node) + node.parent = parent + name = get_node_name(node) + mtx, msh, mat, lyr = get_matrix_mesh_material(node) + while ((parent is not None) and (get_guid(parent) != 0x0002)): + name = "%s/%s" %(get_node_name(parent), name) + parent_mtx = parent.matrix + if (parent_mtx): + mtx = mtx.dot(parent_mtx) + parent = get_node_parent(parent) + created, uid = create_mesh(context, node, msh, mtx, mat) + + +def make_scene(context, parent, level=0): + for chunk in parent.children: + if (isinstance(chunk, SceneChunk)): + if ((get_guid(chunk) == 0x0001) and (get_super_id(chunk) == 0x0001)): + try: + create_object(context, chunk) + except Exception as exc: + print('ImportError:', exc, chunk) + + +def read_scene(context, maxfile, filename): + global SCENE_LIST + SCENE_LIST = read_chunks(maxfile, 'Scene', filename+'.Scn.bin', containerReader=SceneChunk) + make_scene(context, SCENE_LIST[0], 0) + + +def read(context, filename): + if (is_maxfile(filename)): + maxfile = ImportMaxFile(filename) + prop = maxfile.getproperties('\x05DocumentSummaryInformation', convert_time=True, no_conversion=[10]) + prop = maxfile.getproperties('\x05SummaryInformation', convert_time=True, no_conversion=[10]) + read_class_data(maxfile, filename) + read_config(maxfile, filename) + read_directory(maxfile, filename) + read_class_directory(maxfile, filename) + read_video_postqueue(maxfile, filename) + read_scene(context, maxfile, filename) + else: + print("File seems to be no 3D Studio Max file!") + + +def load(operator, context, filepath="", global_matrix=None): + read(context, filepath) + + return {'FINISHED'} \ No newline at end of file diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py index 9430ca4..16c60da 100644 --- a/io_scene_3ds/__init__.py +++ b/io_scene_3ds/__init__.py @@ -1,20 +1,4 @@ -# ##### BEGIN GPL LICENSE BLOCK ##### -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# ##### END GPL LICENSE BLOCK ##### +# SPDX-License-Identifier: GPL-2.0-or-later from bpy_extras.io_utils import ( ImportHelper, @@ -32,14 +16,14 @@ import bpy bl_info = { "name": "Autodesk 3DS format", "author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand", - "version": (2, 3, 1), - "blender": (3, 0, 0), - "location": "File > Import", - "description": "Import 3DS, meshes, uvs, materials, textures, " - "cameras & lamps", - "warning": "Images must be in file folder", - "doc_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/" - "Scripts/Import-Export/Autodesk_3DS", + "version": (2, 3, 6), + "blender": (3, 6, 1), + "location": "File > Import-Export", + "description": "3DS Import/Export meshes, UVs, materials, textures, " + "cameras, lamps & animation", + "warning": "Images must be in file folder, " + "filenames are limited to DOS 8.3 format", + "doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/scene_3ds.html", "category": "Import-Export", } @@ -54,39 +38,69 @@ if "bpy" in locals(): @orientation_helper(axis_forward='Y', axis_up='Z') class Import3DS(bpy.types.Operator, ImportHelper): """Import from 3DS file format (.3ds)""" - bl_idname = "import_scene.autodesk_3ds" + bl_idname = "import_scene.max3ds" bl_label = 'Import 3DS' - bl_options = {'UNDO'} + bl_options = {'PRESET', 'UNDO'} filename_ext = ".3ds" filter_glob: StringProperty(default="*.3ds", options={'HIDDEN'}) constrain_size: FloatProperty( - name="Size Constraint", + name="Constrain Size", description="Scale the model by 10 until it reaches the " "size constraint (0 to disable)", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0, ) + use_scene_unit: BoolProperty( + name="Scene Units", + description="Convert to scene unit length settings", + default=False, + ) + use_center_pivot: BoolProperty( + name="Pivot Origin", + description="Move all geometry to pivot origin", + default=False, + ) use_image_search: BoolProperty( name="Image Search", description="Search subdirectories for any associated images " "(Warning, may be slow)", default=True, ) + object_filter: EnumProperty( + name="Object Filter", options={'ENUM_FLAG'}, + items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA', 0x1), + ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), + ('LIGHT', "Light".rjust(12), "", 'LIGHT_DATA', 0x4), + ('CAMERA', "Camera".rjust(11), "", 'CAMERA_DATA', 0x8), + ('EMPTY', "Empty".rjust(11), "", 'EMPTY_AXIS', 0x10), + ), + description="Object types to import", + default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, + ) use_apply_transform: BoolProperty( name="Apply Transform", description="Workaround for object transformations " "importing incorrectly", default=True, ) - - read_keyframe: bpy.props.BoolProperty( - name="Read Keyframe", + use_keyframes: BoolProperty( + name="Animation", description="Read the keyframe data", default=True, ) + use_world_matrix: BoolProperty( + name="World Space", + description="Transform to matrix world", + default=False, + ) + use_cursor: BoolProperty( + name="Cursor Origin", + description="Read the 3D cursor location", + default=False, + ) def execute(self, context): from . import import_3ds @@ -103,12 +117,87 @@ class Import3DS(bpy.types.Operator, ImportHelper): return import_3ds.load(self, context, **keywords) + def draw(self, context): + pass + + +class MAX3DS_PT_import_include(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Include" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_SCENE_OT_max3ds" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = True + + sfile = context.space_data + operator = sfile.active_operator + + layrow = layout.row(align=True) + layrow.prop(operator, "use_image_search") + layrow.label(text="", icon='OUTLINER_OB_IMAGE' if operator.use_image_search else 'IMAGE_DATA') + layout.column().prop(operator, "object_filter") + layrow = layout.row(align=True) + layrow.prop(operator, "use_keyframes") + layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') + layrow = layout.row(align=True) + layrow.prop(operator, "use_cursor") + layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR') + + +class MAX3DS_PT_import_transform(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Transform" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "IMPORT_SCENE_OT_max3ds" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "constrain_size") + layrow = layout.row(align=True) + layrow.prop(operator, "use_scene_unit") + layrow.label(text="", icon='EMPTY_ARROWS' if operator.use_scene_unit else 'EMPTY_DATA') + layrow = layout.row(align=True) + layrow.prop(operator, "use_center_pivot") + layrow.label(text="", icon='OVERLAY' if operator.use_center_pivot else 'PIVOT_ACTIVE') + layrow = layout.row(align=True) + layrow.prop(operator, "use_apply_transform") + layrow.label(text="", icon='MESH_CUBE' if operator.use_apply_transform else 'MOD_SOLIDIFY') + layrow = layout.row(align=True) + layrow.prop(operator, "use_world_matrix") + layrow.label(text="", icon='WORLD' if operator.use_world_matrix else 'META_BALL') + layout.prop(operator, "axis_forward") + layout.prop(operator, "axis_up") + @orientation_helper(axis_forward='Y', axis_up='Z') class Export3DS(bpy.types.Operator, ExportHelper): """Export to 3DS file format (.3ds)""" - bl_idname = "export_scene.autodesk_3ds" + bl_idname = "export_scene.max3ds" bl_label = 'Export 3DS' + bl_options = {'PRESET', 'UNDO'} filename_ext = ".3ds" filter_glob: StringProperty( @@ -116,11 +205,49 @@ class Export3DS(bpy.types.Operator, ExportHelper): options={'HIDDEN'}, ) + scale_factor: FloatProperty( + name="Scale Factor", + description="Master scale factor for all objects", + min=0.0, max=100000.0, + soft_min=0.0, soft_max=100000.0, + default=1.0, + ) + use_scene_unit: BoolProperty( + name="Scene Units", + description="Take the scene unit length settings into account", + default=False, + ) use_selection: BoolProperty( - name="Selection Only", + name="Selection", description="Export selected objects only", default=False, ) + object_filter: EnumProperty( + name="Object Filter", options={'ENUM_FLAG'}, + items=(('WORLD', "World".rjust(11), "", 'WORLD_DATA',0x1), + ('MESH', "Mesh".rjust(11), "", 'MESH_DATA', 0x2), + ('LIGHT', "Light".rjust(12), "", 'LIGHT_DATA',0x4), + ('CAMERA', "Camera".rjust(11), "", 'CAMERA_DATA',0x8), + ('EMPTY', "Empty".rjust(11), "", 'EMPTY_AXIS',0x10), + ), + description="Object types to export", + default={'WORLD', 'MESH', 'LIGHT', 'CAMERA', 'EMPTY'}, + ) + use_hierarchy: BoolProperty( + name="Hierarchy", + description="Export hierarchy chunks", + default=False, + ) + use_keyframes: BoolProperty( + name="Animation", + description="Write the keyframe data", + default=False, + ) + use_cursor: BoolProperty( + name="Cursor Origin", + description="Save the 3D cursor location", + default=False, + ) def execute(self, context): from . import export_3ds @@ -137,6 +264,74 @@ class Export3DS(bpy.types.Operator, ExportHelper): return export_3ds.save(self, context, **keywords) + def draw(self, context): + pass + + +class MAX3DS_PT_export_include(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Include" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "EXPORT_SCENE_OT_max3ds" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = True + + sfile = context.space_data + operator = sfile.active_operator + + layrow = layout.row(align=True) + layrow.prop(operator, "use_selection") + layrow.label(text="", icon='RESTRICT_SELECT_OFF' if operator.use_selection else 'RESTRICT_SELECT_ON') + layout.column().prop(operator, "object_filter") + layrow = layout.row(align=True) + layrow.prop(operator, "use_hierarchy") + layrow.label(text="", icon='OUTLINER' if operator.use_hierarchy else 'CON_CHILDOF') + layrow = layout.row(align=True) + layrow.prop(operator, "use_keyframes") + layrow.label(text="", icon='ANIM' if operator.use_keyframes else 'DECORATE_DRIVER') + layrow = layout.row(align=True) + layrow.prop(operator, "use_cursor") + layrow.label(text="", icon='PIVOT_CURSOR' if operator.use_cursor else 'CURSOR') + + +class MAX3DS_PT_export_transform(bpy.types.Panel): + bl_space_type = 'FILE_BROWSER' + bl_region_type = 'TOOL_PROPS' + bl_label = "Transform" + bl_parent_id = "FILE_PT_operator" + + @classmethod + def poll(cls, context): + sfile = context.space_data + operator = sfile.active_operator + + return operator.bl_idname == "EXPORT_SCENE_OT_max3ds" + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + sfile = context.space_data + operator = sfile.active_operator + + layout.prop(operator, "scale_factor") + layrow = layout.row(align=True) + layrow.prop(operator, "use_scene_unit") + layrow.label(text="", icon='EMPTY_ARROWS' if operator.use_scene_unit else 'EMPTY_DATA') + layout.prop(operator, "axis_forward") + layout.prop(operator, "axis_up") + # Add to a menu def menu_func_export(self, context): @@ -149,27 +344,25 @@ def menu_func_import(self, context): def register(): bpy.utils.register_class(Import3DS) + bpy.utils.register_class(MAX3DS_PT_import_include) + bpy.utils.register_class(MAX3DS_PT_import_transform) bpy.utils.register_class(Export3DS) - + bpy.utils.register_class(MAX3DS_PT_export_include) + bpy.utils.register_class(MAX3DS_PT_export_transform) bpy.types.TOPBAR_MT_file_import.append(menu_func_import) bpy.types.TOPBAR_MT_file_export.append(menu_func_export) def unregister(): bpy.utils.unregister_class(Import3DS) + bpy.utils.unregister_class(MAX3DS_PT_import_include) + bpy.utils.unregister_class(MAX3DS_PT_import_transform) bpy.utils.unregister_class(Export3DS) - + bpy.utils.unregister_class(MAX3DS_PT_export_include) + bpy.utils.unregister_class(MAX3DS_PT_export_transform) bpy.types.TOPBAR_MT_file_import.remove(menu_func_import) bpy.types.TOPBAR_MT_file_export.remove(menu_func_export) -# NOTES: -# why add 1 extra vertex? and remove it when done? - -# "Answer - eekadoodle - would need to re-order UV's without this since face -# order isnt always what we give blender, BMesh will solve :D" -# -# disabled scaling to size, this requires exposing bb (easy) and understanding -# how it works (needs some time) - if __name__ == "__main__": - register() + register() \ No newline at end of file diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py index 5505322..bc639e7 100644 --- a/io_scene_3ds/export_3ds.py +++ b/io_scene_3ds/export_3ds.py @@ -1,23 +1,5 @@ -# ##### BEGIN GPL LICENSE BLOCK ##### -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# ##### END GPL LICENSE BLOCK ##### - -# Script copyright (C) Bob Holcomb -# Contributors: Campbell Barton, Bob Holcomb, Richard Lärkäng, Damien McGinnes, Mark Stijnman, Sebastian Sille +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright 2005 Bob Holcomb """ Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information @@ -25,28 +7,40 @@ from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode. """ import bpy +import time import math import struct import mathutils import bpy_extras from bpy_extras import node_shader_utils -###################################################### -# Data Structures -###################################################### +################### +# Data Structures # +################### # Some of the chunks that we will export -# ----- Primary Chunk, at the beginning of each file +# >----- Primary Chunk, at the beginning of each file PRIMARY = 0x4D4D -# ------ Main Chunks -VERSION = 0x0002 # This gives the version of the .3ds file -KFDATA = 0xB000 # This is the header for all of the key frame info - -# ------ sub defines of OBJECTINFO -OBJECTINFO = 0x3D3D # Main mesh object chunk before the material and object information +# >----- Main Chunks +OBJECTINFO = 0x3D3D # Main mesh object chunk before material and object information MESHVERSION = 0x3D3E # This gives the version of the mesh +VERSION = 0x0002 # This gives the version of the .3ds file +KFDATA = 0xB000 # This is the header for all of the keyframe info + +# >----- sub defines of OBJECTINFO +BITMAP = 0x1100 # The background image name +USE_BITMAP = 0x1101 # The background image flag +SOLIDBACKGND = 0x1200 # The background color (RGB) +USE_SOLIDBGND = 0x1201 # The background color flag +VGRADIENT = 0x1300 # The background gradient colors +USE_VGRADIENT = 0x1301 # The background gradient flag +O_CONSTS = 0x1500 # The origin of the 3D cursor AMBIENTLIGHT = 0x2100 # The color of the ambient light +FOG = 0x2200 # The fog atmosphere settings +USE_FOG = 0x2201 # The fog atmosphere flag +LAYER_FOG = 0x2302 # The fog layer atmosphere settings +USE_LAYER_FOG = 0x2303 # The fog layer atmosphere flag MATERIAL = 45055 # 0xAFFF // This stored the texture info OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc... @@ -59,9 +53,16 @@ MATSHINESS = 0xA040 # Specular intensity of the object/material (percent) MATSHIN2 = 0xA041 # Reflection of the object/material (percent) MATSHIN3 = 0xA042 # metallic/mirror of the object/material (percent) MATTRANS = 0xA050 # Transparency value (100-OpacityValue) (percent) +MATSELFILLUM = 0xA080 # # Material self illumination flag MATSELFILPCT = 0xA084 # Self illumination strength (percent) +MATWIRE = 0xA085 # Material wireframe rendered flag +MATFACEMAP = 0xA088 # Face mapped textures flag +MATPHONGSOFT = 0xA08C # Phong soften material flag +MATWIREABS = 0xA08E # Wire size in units flag +MATWIRESIZE = 0xA087 # Rendered wire size in pixels MATSHADING = 0xA100 # Material shading method +# >------ sub defines of MAT_MAP MAT_DIFFUSEMAP = 0xA200 # This is a header for a new diffuse texture MAT_SPECMAP = 0xA204 # head for specularity map MAT_OPACMAP = 0xA210 # head for opacity map @@ -71,37 +72,49 @@ MAT_BUMP_PERCENT = 0xA252 # Normalmap strength (percent) MAT_TEX2MAP = 0xA33A # head for secondary texture MAT_SHINMAP = 0xA33C # head for roughness map MAT_SELFIMAP = 0xA33D # head for emission map - -# >------ sub defines of MAT_MAP -MATMAPFILE = 0xA300 # This holds the file name of a texture +MAT_MAP_FILE = 0xA300 # This holds the file name of a texture MAT_MAP_TILING = 0xa351 # 2nd bit (from LSB) is mirror UV flag MAT_MAP_TEXBLUR = 0xA353 # Texture blurring factor MAT_MAP_USCALE = 0xA354 # U axis scaling MAT_MAP_VSCALE = 0xA356 # V axis scaling MAT_MAP_UOFFSET = 0xA358 # U axis offset MAT_MAP_VOFFSET = 0xA35A # V axis offset -MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad +MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad MAP_COL1 = 0xA360 # Tint Color1 MAP_COL2 = 0xA362 # Tint Color2 MAP_RCOL = 0xA364 # Red tint MAP_GCOL = 0xA366 # Green tint MAP_BCOL = 0xA368 # Blue tint -RGB = 0x0010 # RGB float -RGB1 = 0x0011 # RGB Color1 -RGB2 = 0x0012 # RGB Color2 +RGB = 0x0010 # RGB float Color1 +RGB1 = 0x0011 # RGB int Color1 +RGBI = 0x0012 # RGB int Color2 +RGBF = 0x0013 # RGB float Color2 PCT = 0x0030 # Percent chunk +PCTF = 0x0031 # Percent float MASTERSCALE = 0x0100 # Master scale factor # >------ sub defines of OBJECT OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object +OBJECT_HIERARCHY = 0x4F00 # Hierarchy id of the object +OBJECT_PARENT = 0x4F10 # Parent id of the object # >------ Sub defines of LIGHT LIGHT_MULTIPLIER = 0x465B # The light energy factor +LIGHT_INNER_RANGE = 0x4659 # Light inner range value +LIGHT_OUTER_RANGE = 0x465A # Light outer range value +LIGHT_ATTENUATE = 0x4625 # Light attenuation flag LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight -LIGHT_SPOTROLL = 0x4656 # The roll angle of the spot +LIGHT_SPOT_ROLL = 0x4656 # Light spot roll angle +LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag +LIGHT_SPOT_LSHADOW = 0x4641 # Light spot shadow parameters +LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot show cone flag +LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag +LIGHT_SPOT_OVERSHOOT = 0x4652 # Light spot overshoot flag +LIGHT_SPOT_PROJECTOR = 0x4653 # Light spot projection bitmap +LIGHT_SPOT_ASPECT = 0x4657 # Light spot aspect ratio # >------ sub defines of CAMERA OBJECT_CAM_RANGES = 0x4720 # The camera range values @@ -116,19 +129,35 @@ OBJECT_SMOOTH = 0x4150 # The objects smooth groups OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix # >------ sub defines of KFDATA -KFDATA_KFHDR = 0xB00A -KFDATA_KFSEG = 0xB008 -KFDATA_KFCURTIME = 0xB009 -KFDATA_OBJECT_NODE_TAG = 0xB002 +AMBIENT_NODE_TAG = 0xB001 # Ambient node tag +OBJECT_NODE_TAG = 0xB002 # Object tree tag +CAMERA_NODE_TAG = 0xB003 # Camera object tag +TARGET_NODE_TAG = 0xB004 # Camera target tag +LIGHT_NODE_TAG = 0xB005 # Light object tag +LTARGET_NODE_TAG = 0xB006 # Light target tag +SPOT_NODE_TAG = 0xB007 # Spotlight tag +KFDATA_KFSEG = 0xB008 # Frame start & end +KFDATA_KFCURTIME = 0xB009 # Frame current +KFDATA_KFHDR = 0xB00A # Keyframe header # >------ sub defines of OBJECT_NODE_TAG -OBJECT_NODE_ID = 0xB030 -OBJECT_NODE_HDR = 0xB010 -OBJECT_PIVOT = 0xB013 -OBJECT_INSTANCE_NAME = 0xB011 -POS_TRACK_TAG = 0xB020 -ROT_TRACK_TAG = 0xB021 -SCL_TRACK_TAG = 0xB022 +OBJECT_NODE_ID = 0xB030 # Object hierachy ID +OBJECT_NODE_HDR = 0xB010 # Hierachy tree header +OBJECT_INSTANCE_NAME = 0xB011 # Object instance name +OBJECT_PARENT_NAME = 0x80F0 # Object parent name +OBJECT_PIVOT = 0xB013 # Object pivot position +OBJECT_BOUNDBOX = 0xB014 # Object boundbox +OBJECT_MORPH_SMOOTH = 0xB015 # Object smooth angle +POS_TRACK_TAG = 0xB020 # Position transform tag +ROT_TRACK_TAG = 0xB021 # Rotation transform tag +SCL_TRACK_TAG = 0xB022 # Scale transform tag +FOV_TRACK_TAG = 0xB023 # Field of view tag +ROLL_TRACK_TAG = 0xB024 # Roll transform tag +COL_TRACK_TAG = 0xB025 # Color transform tag +HOTSPOT_TRACK_TAG = 0xB027 # Hotspot transform tag +FALLOFF_TRACK_TAG = 0xB028 # Falloff transform tag + +ROOT_OBJECT = 0xFFFF # Root object # So 3ds max can open files, limit names to 12 in length @@ -136,21 +165,20 @@ SCL_TRACK_TAG = 0xB022 name_unique = [] # stores str, ascii only name_mapping = {} # stores {orig: byte} mapping - def sane_name(name): name_fixed = name_mapping.get(name) if name_fixed is not None: return name_fixed - # strip non ascii chars + # Strip non ascii chars new_name_clean = new_name = name.encode("ASCII", "replace").decode("ASCII")[:12] i = 0 while new_name in name_unique: - new_name = new_name_clean + ".%.3d" % i + new_name = new_name_clean + '.%.3d' % i i += 1 - # note, appending the 'str' version. + # Note, appending the 'str' version name_unique.append(new_name) name_mapping[name] = new_name = new_name.encode("ASCII", "replace") return new_name @@ -159,16 +187,13 @@ def sane_name(name): def uv_key(uv): return round(uv[0], 6), round(uv[1], 6) - -# size defines: +# Size defines SZ_SHORT = 2 SZ_INT = 4 SZ_FLOAT = 4 - class _3ds_ushort(object): - """Class representing a short (2-byte integer) for a 3ds file. - *** This looks like an unsigned short H is unsigned from the struct docs - Cam***""" + """Class representing a short (2-byte integer) for a 3ds file.""" __slots__ = ("value", ) def __init__(self, val=0): @@ -178,7 +203,7 @@ class _3ds_ushort(object): return SZ_SHORT def write(self, file): - file.write(struct.pack(" adjacent else -0.0 + diagonal = math.sqrt(pow(posi.x ,2) + pow(posi.y ,2)) + target_x = math.copysign(posi.x + (posi.y * math.tan(pan)), pan) + target_y = math.copysign(posi.y + (posi.x * math.tan(adjacent - pan)), turn) + target_z = math.copysign(posi.z + diagonal * math.tan(adjacent - tilt), lean) + + return target_x, target_y, target_z + + +################# +# KEYFRAME DATA # +################# + +def make_kfdata(revision, start=0, stop=100, curtime=0): + """Make the basic keyframe data chunk.""" kfdata = _3ds_chunk(KFDATA) kfhdr = _3ds_chunk(KFDATA_KFHDR) - kfhdr.add_variable("revision", _3ds_ushort(0)) - # Not really sure what filename is used for, but it seems it is usually used - # to identify the program that generated the .3ds: - kfhdr.add_variable("filename", _3ds_string("Blender")) - kfhdr.add_variable("animlen", _3ds_uint(stop-start)) + kfhdr.add_variable("revision", _3ds_ushort(revision)) + kfhdr.add_variable("filename", _3ds_string(b'Blender')) + kfhdr.add_variable("animlen", _3ds_uint(stop - start)) kfseg = _3ds_chunk(KFDATA_KFSEG) kfseg.add_variable("start", _3ds_uint(start)) @@ -1088,120 +1114,478 @@ def make_kfdata(start=0, stop=0, curtime=0): kfdata.add_subchunk(kfcurtime) return kfdata -def make_track_chunk(ID, obj): - """Make a chunk for track data. - Depending on the ID, this will construct a position, rotation or scale track.""" +def make_track_chunk(ID, ob, ob_pos, ob_rot, ob_size): + """Make a chunk for track data. Depending on the ID, this will construct + a position, rotation, scale, roll, color, fov, hotspot or falloff track.""" track_chunk = _3ds_chunk(ID) - track_chunk.add_variable("track_flags", _3ds_ushort()) - track_chunk.add_variable("unknown", _3ds_uint()) - track_chunk.add_variable("unknown", _3ds_uint()) - track_chunk.add_variable("nkeys", _3ds_uint(1)) - # Next section should be repeated for every keyframe, but for now, animation is not actually supported. - track_chunk.add_variable("tcb_frame", _3ds_uint(0)) - track_chunk.add_variable("tcb_flags", _3ds_ushort()) - if obj.type=='Empty': - if ID==POS_TRACK_TAG: - # position vector: - track_chunk.add_variable("position", _3ds_point_3d(obj.getLocation())) - elif ID==ROT_TRACK_TAG: - # rotation (quaternion, angle first, followed by axis): - q = obj.getEuler().to_quaternion() # XXX, todo! - track_chunk.add_variable("rotation", _3ds_point_4d((q.angle, q.axis[0], q.axis[1], q.axis[2]))) - elif ID==SCL_TRACK_TAG: - # scale vector: - track_chunk.add_variable("scale", _3ds_point_3d(obj.getSize())) + + if ID in {POS_TRACK_TAG, ROT_TRACK_TAG, SCL_TRACK_TAG, ROLL_TRACK_TAG} and ob.animation_data and ob.animation_data.action: + action = ob.animation_data.action + if action.fcurves: + fcurves = action.fcurves + fcurves.update() + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys += 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + if ID == POS_TRACK_TAG: # Position + for i, frame in enumerate(kframes): + pos_track = [fc for fc in fcurves if fc is not None and fc.data_path == 'location'] + pos_x = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 0), ob_pos.x) + pos_y = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 1), ob_pos.y) + pos_z = next((tc.evaluate(frame) for tc in pos_track if tc.array_index == 2), ob_pos.z) + pos = ob_size @ mathutils.Vector((pos_x, pos_y, pos_z)) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("position", _3ds_point_3d((pos.x, pos.y, pos.z))) + + elif ID == ROT_TRACK_TAG: # Rotation + for i, frame in enumerate(kframes): + rot_track = [fc for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + rot_x = next((tc.evaluate(frame) for tc in rot_track if tc.array_index == 0), ob_rot.x) + rot_y = next((tc.evaluate(frame) for tc in rot_track if tc.array_index == 1), ob_rot.y) + rot_z = next((tc.evaluate(frame) for tc in rot_track if tc.array_index == 2), ob_rot.z) + quat = mathutils.Euler((rot_x, rot_y, rot_z)).to_quaternion().inverted() + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("rotation", _3ds_point_4d((quat.angle, quat.axis.x, quat.axis.y, quat.axis.z))) + + elif ID == SCL_TRACK_TAG: # Scale + for i, frame in enumerate(kframes): + scale_track = [fc for fc in fcurves if fc is not None and fc.data_path == 'scale'] + size_x = next((tc.evaluate(frame) for tc in scale_track if tc.array_index == 0), ob_size.x) + size_y = next((tc.evaluate(frame) for tc in scale_track if tc.array_index == 1), ob_size.y) + size_z = next((tc.evaluate(frame) for tc in scale_track if tc.array_index == 2), ob_size.z) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("scale", _3ds_point_3d((size_x, size_y, size_z))) + + elif ID == ROLL_TRACK_TAG: # Roll + for i, frame in enumerate(kframes): + roll_track = [fc for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + roll = next((tc.evaluate(frame) for tc in roll_track if tc.array_index == 1), ob_rot.y) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("roll", _3ds_float(round(math.degrees(roll), 4))) + + elif ID in {COL_TRACK_TAG, FOV_TRACK_TAG, HOTSPOT_TRACK_TAG, FALLOFF_TRACK_TAG} and ob.data.animation_data and ob.data.animation_data.action: + action = ob.data.animation_data.action + if action.fcurves: + fcurves = action.fcurves + fcurves.update() + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys += 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + if ID == COL_TRACK_TAG: # Color + for i, frame in enumerate(kframes): + color = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'color'] + if not color: + color = ob.data.color[:3] + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(color)) + + elif ID == FOV_TRACK_TAG: # Field of view + for i, frame in enumerate(kframes): + lens = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'lens'), ob.data.lens) + fov = 2 * math.atan(ob.data.sensor_width / (2 * lens)) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("fov", _3ds_float(round(math.degrees(fov), 4))) + + elif ID == HOTSPOT_TRACK_TAG: # Hotspot + for i, frame in enumerate(kframes): + beamsize = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_size'), ob.data.spot_size) + blend = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_blend'), ob.data.spot_blend) + hot_spot = math.degrees(beamsize) - (blend * math.floor(math.degrees(beamsize))) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("hotspot", _3ds_float(round(hot_spot, 4))) + + elif ID == FALLOFF_TRACK_TAG: # Falloff + for i, frame in enumerate(kframes): + fall_off = next((fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'spot_size'), ob.data.spot_size) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("falloff", _3ds_float(round(math.degrees(fall_off), 4))) + else: - # meshes have their transformations applied before - # exporting, so write identity transforms here: - if ID==POS_TRACK_TAG: - # position vector: - track_chunk.add_variable("position", _3ds_point_3d((0.0,0.0,0.0))) - elif ID==ROT_TRACK_TAG: - # rotation (quaternion, angle first, followed by axis): - track_chunk.add_variable("rotation", _3ds_point_4d((0.0, 1.0, 0.0, 0.0))) - elif ID==SCL_TRACK_TAG: - # scale vector: - track_chunk.add_variable("scale", _3ds_point_3d((1.0, 1.0, 1.0))) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) # Based on observation default flag is 0x40 + track_chunk.add_variable("frame_start", _3ds_uint(0)) + track_chunk.add_variable("frame_total", _3ds_uint(0)) + track_chunk.add_variable("nkeys", _3ds_uint(1)) + # Next section should be repeated for every keyframe, with no animation only one tag is needed + track_chunk.add_variable("tcb_frame", _3ds_uint(0)) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + + # New method simply inserts the parameters + if ID == POS_TRACK_TAG: # Position vector + track_chunk.add_variable("position", _3ds_point_3d(ob_pos)) + + elif ID == ROT_TRACK_TAG: # Rotation (angle first [radians], followed by axis) + quat = ob_rot.to_quaternion().inverted() + track_chunk.add_variable("rotation", _3ds_point_4d((quat.angle, quat.axis.x, quat.axis.y, quat.axis.z))) + + elif ID == SCL_TRACK_TAG: # Scale vector + track_chunk.add_variable("scale", _3ds_point_3d(ob_size)) + + elif ID == ROLL_TRACK_TAG: # Roll angle + track_chunk.add_variable("roll", _3ds_float(round(math.degrees(ob_rot.y), 4))) + + elif ID == COL_TRACK_TAG: # Color values + track_chunk.add_variable("color", _3ds_float_color(ob.data.color[:3])) + + elif ID == FOV_TRACK_TAG: # Field of view + track_chunk.add_variable("fov", _3ds_float(round(math.degrees(ob.data.angle), 4))) + + elif ID == HOTSPOT_TRACK_TAG: # Hotspot + beam_angle = math.degrees(ob.data.spot_size) + track_chunk.add_variable("hotspot", _3ds_float(round(beam_angle - (ob.data.spot_blend * math.floor(beam_angle)), 4))) + + elif ID == FALLOFF_TRACK_TAG: # Falloff + track_chunk.add_variable("falloff", _3ds_float(round(math.degrees(ob.data.spot_size), 4))) return track_chunk -def make_kf_obj_node(obj, name_to_id): - """Make a node chunk for a Blender object. - Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id. - Blender Empty objects are converted to dummy nodes.""" +def make_object_node(ob, translation, rotation, scale, name_id): + """Make a node chunk for a Blender object. Takes Blender object as parameter. + Blender Empty objects are converted to dummy nodes.""" - name = obj.name - # main object node chunk: - kf_obj_node = _3ds_chunk(KFDATA_OBJECT_NODE_TAG) - # chunk for the object id: + name = ob.name + if ob.type == 'CAMERA': + obj_node = _3ds_chunk(CAMERA_NODE_TAG) + elif ob.type == 'LIGHT': + obj_node = _3ds_chunk(LIGHT_NODE_TAG) + if ob.data.type == 'SPOT': + obj_node = _3ds_chunk(SPOT_NODE_TAG) + else: # Main object node chunk + obj_node = _3ds_chunk(OBJECT_NODE_TAG) + + # Chunk for the object ID from name_id dictionary: obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID) - # object id is from the name_to_id dictionary: - obj_id_chunk.add_variable("node_id", _3ds_ushort(name_to_id[name])) + obj_id_chunk.add_variable("node_id", _3ds_ushort(name_id[name])) + obj_node.add_subchunk(obj_id_chunk) - # object node header: + # Object node header with object name obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) - # object name: - if obj.type == 'Empty': - # Empties are called "$$$DUMMY" and use the OBJECT_INSTANCE_NAME chunk - # for their name (see below): - obj_node_header_chunk.add_variable("name", _3ds_string("$$$DUMMY")) - else: - # Add the name: + parent = ob.parent + + if ob.type == 'EMPTY': # Forcing to use the real name for empties + # Empties called $$$DUMMY and use OBJECT_INSTANCE_NAME chunk as name + obj_node_header_chunk.add_variable("name", _3ds_string(b"$$$DUMMY")) + obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0x4000)) + obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + + else: # Add flag variables - Based on observation flags1 is usually 0x0040 and 0x4000 for empty objects obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name))) - # Add Flag variables (not sure what they do): - obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0)) - obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0x0040)) + """Flags2 defines 0x01 for display path, 0x02 use autosmooth, 0x04 object frozen, + 0x10 for motion blur, 0x20 for material morph and bit 0x40 for mesh morph.""" + if ob.type == 'MESH' and ob.data.use_auto_smooth: + obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0x02)) + else: + obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + obj_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) + + ''' + # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX # Check parent-child relationships: - parent = obj.parent - if (parent is None) or (parent.name not in name_to_id): - # If no parent, or the parents name is not in the name_to_id dictionary, - # parent id becomes -1: + if parent is None or parent.name not in name_id: + # If no parent, or parents name is not in dictionary, ID becomes -1: obj_node_header_chunk.add_variable("parent", _3ds_ushort(-1)) - else: - # Get the parent's id from the name_to_id dictionary: - obj_node_header_chunk.add_variable("parent", _3ds_ushort(name_to_id[parent.name])) + else: # Get the parent's ID from the name_id dictionary: + obj_node_header_chunk.add_variable("parent", _3ds_ushort(name_id[parent.name])) + ''' - # Add pivot chunk: - obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT) - obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(obj.getLocation())) - kf_obj_node.add_subchunk(obj_pivot_chunk) + # Add subchunk for node header + obj_node.add_subchunk(obj_node_header_chunk) - # add subchunks for object id and node header: - kf_obj_node.add_subchunk(obj_id_chunk) - kf_obj_node.add_subchunk(obj_node_header_chunk) + # Alternatively use PARENT_NAME chunk for hierachy + if parent is not None and (parent.name in name_id): + obj_parent_name_chunk = _3ds_chunk(OBJECT_PARENT_NAME) + obj_parent_name_chunk.add_variable("parent", _3ds_string(sane_name(parent.name))) + obj_node.add_subchunk(obj_parent_name_chunk) - # Empty objects need to have an extra chunk for the instance name: - if obj.type == 'Empty': + # Empty objects need to have an extra chunk for the instance name + if ob.type == 'EMPTY': # Will use a real object name for empties for now obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME) obj_instance_name_chunk.add_variable("name", _3ds_string(sane_name(name))) - kf_obj_node.add_subchunk(obj_instance_name_chunk) + obj_node.add_subchunk(obj_instance_name_chunk) - # Add track chunks for position, rotation and scale: - kf_obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, obj)) - kf_obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, obj)) - kf_obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, obj)) + if ob.type in {'MESH', 'EMPTY'}: # Add a pivot point at the object center + pivot_pos = (translation[name]) + obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT) + obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(pivot_pos)) + obj_node.add_subchunk(obj_pivot_chunk) - return kf_obj_node -''' + # Create a bounding box from quadrant diagonal + obj_boundbox = _3ds_chunk(OBJECT_BOUNDBOX) + obj_boundbox.add_variable("min", _3ds_point_3d(ob.bound_box[0])) + obj_boundbox.add_variable("max", _3ds_point_3d(ob.bound_box[6])) + obj_node.add_subchunk(obj_boundbox) + + # Add smooth angle if autosmooth is used + if ob.type == 'MESH' and ob.data.use_auto_smooth: + obj_morph_smooth = _3ds_chunk(OBJECT_MORPH_SMOOTH) + obj_morph_smooth.add_variable("angle", _3ds_float(round(ob.data.auto_smooth_angle, 6))) + obj_node.add_subchunk(obj_morph_smooth) + + # Add track chunks for position, rotation, size + ob_scale = scale[name] # and collect masterscale + if parent is None or (parent.name not in name_id): + ob_pos = translation[name] + ob_rot = rotation[name] + ob_size = ob.scale + + else: # Calculate child position and rotation of the object center, no scale applied + ob_pos = translation[name] - translation[parent.name] + ob_rot = rotation[name].to_quaternion().cross(rotation[parent.name].to_quaternion().copy().inverted()).to_euler() + ob_size = mathutils.Vector((1.0, 1.0, 1.0)) + + obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, ob, ob_pos, ob_rot, ob_scale)) + + if ob.type in {'MESH', 'EMPTY'}: + obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + if ob.type =='CAMERA': + obj_node.add_subchunk(make_track_chunk(FOV_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(ROLL_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + if ob.type =='LIGHT': + obj_node.add_subchunk(make_track_chunk(COL_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + if ob.type == 'LIGHT' and ob.data.type == 'SPOT': + obj_node.add_subchunk(make_track_chunk(HOTSPOT_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(FALLOFF_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + obj_node.add_subchunk(make_track_chunk(ROLL_TRACK_TAG, ob, ob_pos, ob_rot, ob_size)) + + return obj_node -def save(operator, - context, filepath="", - use_selection=True, - global_matrix=None, - ): +def make_target_node(ob, translation, rotation, scale, name_id): + """Make a target chunk for light and camera objects.""" - import time - #from bpy_extras.io_utils import create_derived_objects, free_derived_objects + name = ob.name + name_id["ø " + name] = len(name_id) + if ob.type == 'CAMERA': # Add camera target + tar_node = _3ds_chunk(TARGET_NODE_TAG) + elif ob.type == 'LIGHT': # Add spot target + tar_node = _3ds_chunk(LTARGET_NODE_TAG) + # Chunk for the object ID from name_id dictionary: + tar_id_chunk = _3ds_chunk(OBJECT_NODE_ID) + tar_id_chunk.add_variable("node_id", _3ds_ushort(name_id[name])) + tar_node.add_subchunk(tar_id_chunk) + + # Object node header with object name + tar_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) + # Targets get the same name as the object, flags1 is usually 0x0010 and parent ROOT_OBJECT + tar_node_header_chunk.add_variable("name", _3ds_string(sane_name(name))) + tar_node_header_chunk.add_variable("flags1", _3ds_ushort(0x0010)) + tar_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + tar_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) + + # Add subchunk for node header + tar_node.add_subchunk(tar_node_header_chunk) + + # Calculate target position + ob_pos = translation[name] + ob_rot = rotation[name] + ob_scale = scale[name] + target_pos = calc_target(ob_pos, ob_rot.x, ob_rot.z) + + # Add track chunks for target position + track_chunk = _3ds_chunk(POS_TRACK_TAG) + + if ob.animation_data and ob.animation_data.action: + action = ob.animation_data.action + if action.fcurves: + fcurves = action.fcurves + fcurves.update() + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys += 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + for i, frame in enumerate(kframes): + loc_target = [fc for fc in fcurves if fc is not None and fc.data_path == 'location'] + loc_x = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 0), ob_pos.x) + loc_y = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 1), ob_pos.y) + loc_z = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 2), ob_pos.z) + rot_target = [fc for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler'] + rot_x = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 0), ob_rot.x) + rot_z = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 2), ob_rot.z) + target_distance = ob_scale @ mathutils.Vector((loc_x, loc_y, loc_z)) + target_pos = calc_target(target_distance, rot_x, rot_z) + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("position", _3ds_point_3d(target_pos)) + + else: # Track header + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) # Based on observation default flag is 0x40 + track_chunk.add_variable("frame_start", _3ds_uint(0)) + track_chunk.add_variable("frame_total", _3ds_uint(0)) + track_chunk.add_variable("nkeys", _3ds_uint(1)) + # Keyframe header + track_chunk.add_variable("tcb_frame", _3ds_uint(0)) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("position", _3ds_point_3d(target_pos)) + + tar_node.add_subchunk(track_chunk) + + return tar_node + + +def make_ambient_node(world): + """Make an ambient node for the world color, if the color is animated.""" + + amb_color = world.color[:3] + amb_node = _3ds_chunk(AMBIENT_NODE_TAG) + track_chunk = _3ds_chunk(COL_TRACK_TAG) + + # Chunk for the ambient ID is ROOT_OBJECT + amb_id_chunk = _3ds_chunk(OBJECT_NODE_ID) + amb_id_chunk.add_variable("node_id", _3ds_ushort(ROOT_OBJECT)) + amb_node.add_subchunk(amb_id_chunk) + + # Object node header, name is "$AMBIENT$" for ambient nodes + amb_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR) + amb_node_header_chunk.add_variable("name", _3ds_string(b"$AMBIENT$")) + amb_node_header_chunk.add_variable("flags1", _3ds_ushort(0x4000)) # Flags1 0x4000 for empty objects + amb_node_header_chunk.add_variable("flags2", _3ds_ushort(0)) + amb_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT)) + amb_node.add_subchunk(amb_node_header_chunk) + + if world.use_nodes and world.node_tree.animation_data.action: + ambioutput = 'EMISSION' ,'MIX_SHADER', 'WORLD_OUTPUT' + action = world.node_tree.animation_data.action + links = world.node_tree.links + ambilinks = [lk for lk in links if lk.from_node.type in {'EMISSION', 'RGB'} and lk.to_node.type in ambioutput] + if ambilinks and action.fcurves: + fcurves = action.fcurves + fcurves.update() + emission = next((lk.from_socket.node for lk in ambilinks if lk.to_node.type in ambioutput), False) + ambinode = next((lk.from_socket.node for lk in ambilinks if lk.to_node.type == 'EMISSION'), emission) + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + ambipath = ('nodes[\"RGB\"].outputs[0].default_value' if ambinode and ambinode.type == 'RGB' else + 'nodes[\"Emission\"].inputs[0].default_value') + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys = nkeys + 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + for i, frame in enumerate(kframes): + ambient = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == ambipath] + if not ambient: + ambient = amb_color + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(ambient[:3])) + + elif world.animation_data.action: + action = world.animation_data.action + if action.fcurves: + fcurves = action.fcurves + fcurves.update() + kframes = [kf.co[0] for kf in [fc for fc in fcurves if fc is not None][0].keyframe_points] + nkeys = len(kframes) + if not 0 in kframes: + kframes.append(0) + nkeys += 1 + kframes = sorted(set(kframes)) + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(int(action.frame_start))) + track_chunk.add_variable("frame_total", _3ds_uint(int(action.frame_end))) + track_chunk.add_variable("nkeys", _3ds_uint(nkeys)) + + for i, frame in enumerate(kframes): + ambient = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'color'] + if not ambient: + ambient = amb_color + track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame))) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(ambient)) + + else: # Track header + track_chunk.add_variable("track_flags", _3ds_ushort(0x40)) + track_chunk.add_variable("frame_start", _3ds_uint(0)) + track_chunk.add_variable("frame_total", _3ds_uint(0)) + track_chunk.add_variable("nkeys", _3ds_uint(1)) + # Keyframe header + track_chunk.add_variable("tcb_frame", _3ds_uint(0)) + track_chunk.add_variable("tcb_flags", _3ds_ushort()) + track_chunk.add_variable("color", _3ds_float_color(amb_color)) + + amb_node.add_subchunk(track_chunk) + + return amb_node + + +########## +# EXPORT # +########## + +def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False, use_selection=False, + object_filter=None, use_hierarchy=False, use_keyframes=False, global_matrix=None, use_cursor=False): """Save the Blender scene to a 3ds file.""" # Time the export duration = time.time() - # Blender.Window.WaitCursor(1) + context.window.cursor_set('WAIT') + + scene = context.scene + layer = context.view_layer + depsgraph = context.evaluated_depsgraph_get() + world = scene.world + + unit_measure = 1.0 + if use_scene_unit: + unit_length = scene.unit_settings.length_unit + if unit_length == 'MILES': + unit_measure = 0.000621371 + elif unit_length == 'KILOMETERS': + unit_measure = 0.001 + elif unit_length == 'FEET': + unit_measure = 3.280839895 + elif unit_length == 'INCHES': + unit_measure = 39.37007874 + elif unit_length == 'CENTIMETERS': + unit_measure = 100 + elif unit_length == 'MILLIMETERS': + unit_measure = 1000 + elif unit_length == 'THOU': + unit_measure = 39370.07874 + elif unit_length == 'MICROMETERS': + unit_measure = 1000000 + + mtx_scale = mathutils.Matrix.Scale((scale_factor * unit_measure),4) if global_matrix is None: global_matrix = mathutils.Matrix() @@ -1209,57 +1593,43 @@ def save(operator, if bpy.ops.object.mode_set.poll(): bpy.ops.object.mode_set(mode='OBJECT') - scene = context.scene - layer = context.view_layer - depsgraph = context.evaluated_depsgraph_get() - - # Initialize the main chunk (primary): + # Initialize the main chunk (primary) primary = _3ds_chunk(PRIMARY) - # Add version chunk: + + # Add version chunk version_chunk = _3ds_chunk(VERSION) version_chunk.add_variable("version", _3ds_uint(3)) primary.add_subchunk(version_chunk) - # Init main object info chunk: + # Init main object info chunk object_info = _3ds_chunk(OBJECTINFO) mesh_version = _3ds_chunk(MESHVERSION) mesh_version.add_variable("mesh", _3ds_uint(3)) object_info.add_subchunk(mesh_version) - # Add MASTERSCALE element - mscale = _3ds_chunk(MASTERSCALE) - mscale.add_variable("scale", _3ds_float(1)) - object_info.add_subchunk(mscale) + # Init main keyframe data chunk + if use_keyframes: + revision = 0x0005 + stop = scene.frame_end + start = scene.frame_start + curtime = scene.frame_current + kfdata = make_kfdata(revision, start, stop, curtime) - # Add AMBIENT color - if scene.world is not None: - ambient_chunk = _3ds_chunk(AMBIENTLIGHT) - ambient_light = _3ds_chunk(RGB) - ambient_light.add_variable("ambient", _3ds_float_color(scene.world.color)) - ambient_chunk.add_subchunk(ambient_light) - object_info.add_subchunk(ambient_chunk) - - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # init main key frame data chunk: - kfdata = make_kfdata() - ''' - - # Make a list of all materials used in the selected meshes (use a dictionary, - # each material is added once): + # Make a list of all materials used in the selected meshes (use dictionary, each material is added once) materialDict = {} mesh_objects = [] if use_selection: - objects = [ob for ob in scene.objects if not ob.hide_viewport and ob.select_get(view_layer=layer)] + objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)] else: - objects = [ob for ob in scene.objects if not ob.hide_viewport] + objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer)] + empty_objects = [ob for ob in objects if ob.type == 'EMPTY'] light_objects = [ob for ob in objects if ob.type == 'LIGHT'] camera_objects = [ob for ob in objects if ob.type == 'CAMERA'] for ob in objects: - # get derived objects - #free, derived = create_derived_objects(scene, ob) + # Get derived objects derived_dict = bpy_extras.io_utils.create_derived_objects(depsgraph, [ob]) derived = derived_dict.get(ob) @@ -1278,11 +1648,12 @@ def save(operator, if data: matrix = global_matrix @ mtx data.transform(matrix) + data.transform(mtx_scale) mesh_objects.append((ob_derived, data, matrix)) ma_ls = data.materials ma_ls_len = len(ma_ls) - # get material/image tuples. + # Get material/image tuples if data.uv_layers: if not ma_ls: ma = ma_name = None @@ -1294,7 +1665,7 @@ def save(operator, ma_index = f.material_index = 0 ma = ma_ls[ma_index] ma_name = None if ma is None else ma.name - # else there already set to none + # Else there already set to none img = get_uv_image(ma) img_name = None if img is None else img.name @@ -1303,7 +1674,7 @@ def save(operator, else: for ma in ma_ls: - if ma: # material may be None so check its not. + if ma: # Material may be None so check its not materialDict.setdefault((ma.name, None), (ma, None)) # Why 0 Why! @@ -1311,144 +1682,340 @@ def save(operator, if f.material_index >= ma_ls_len: f.material_index = 0 - # ob_derived_eval.to_mesh_clear() - #if free: - # free_derived_objects(ob) - - # Make material chunks for all materials used in the meshes: + # Make MATERIAL chunks for all materials used in the meshes for ma_image in materialDict.values(): object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1])) - # Give all objects a unique ID and build a dictionary from object name to object id: - translation = {} # collect translation for transformation matrix - #name_to_id = {} - for ob, data, matrix in mesh_objects: - translation[ob.name] = ob.location - #name_to_id[ob.name]= len(name_to_id) - """ - #for ob in empty_objects: - # name_to_id[ob.name]= len(name_to_id) - """ + # Add MASTERSCALE element + mscale = _3ds_chunk(MASTERSCALE) + mscale.add_variable("scale", _3ds_float(1.0)) + object_info.add_subchunk(mscale) - # Create object chunks for all meshes: + # Add 3D cursor location + if use_cursor: + cursor_chunk = _3ds_chunk(O_CONSTS) + cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location)) + object_info.add_subchunk(cursor_chunk) + + # Add AMBIENT color + if world is not None and 'WORLD' in object_filter: + ambient_chunk = _3ds_chunk(AMBIENTLIGHT) + ambient_light = _3ds_chunk(RGB) + ambient_light.add_variable("ambient", _3ds_float_color(world.color)) + ambient_chunk.add_subchunk(ambient_light) + object_info.add_subchunk(ambient_chunk) + + # Add BACKGROUND and BITMAP + if world.use_nodes: + bgtype = 'BACKGROUND' + ntree = world.node_tree.links + background_color_chunk = _3ds_chunk(RGB) + background_chunk = _3ds_chunk(SOLIDBACKGND) + background_flag = _3ds_chunk(USE_SOLIDBGND) + bgmixer = 'BACKGROUND', 'MIX', 'MIX_RGB' + bgshade = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_WORLD' + bg_tex = 'TEX_IMAGE', 'TEX_ENVIRONMENT' + bg_color = next((lk.from_node.inputs[0].default_value[:3] for lk in ntree if lk.from_node.type == bgtype and lk.to_node.type in bgshade), world.color) + bg_mixer = next((lk.from_node.type for lk in ntree if lk.from_node.type in bgmixer and lk.to_node.type == bgtype), bgtype) + bg_image = next((lk.from_node.image for lk in ntree if lk.from_node.type in bg_tex and lk.to_node.type == bg_mixer), False) + gradient = next((lk.from_node.color_ramp.elements for lk in ntree if lk.from_node.type == 'VALTORGB' and lk.to_node.type in bgmixer), False) + background_color_chunk.add_variable("color", _3ds_float_color(bg_color)) + background_chunk.add_subchunk(background_color_chunk) + if bg_image and bg_image is not None: + background_image = _3ds_chunk(BITMAP) + background_flag = _3ds_chunk(USE_BITMAP) + background_image.add_variable("image", _3ds_string(sane_name(bg_image.name))) + object_info.add_subchunk(background_image) + object_info.add_subchunk(background_chunk) + + # Add VGRADIENT chunk + if gradient and len(gradient) >= 3: + gradient_chunk = _3ds_chunk(VGRADIENT) + background_flag = _3ds_chunk(USE_VGRADIENT) + gradient_chunk.add_variable("midpoint", _3ds_float(gradient[1].position)) + gradient_topcolor_chunk = _3ds_chunk(RGB) + gradient_topcolor_chunk.add_variable("color", _3ds_float_color(gradient[2].color[:3])) + gradient_chunk.add_subchunk(gradient_topcolor_chunk) + gradient_midcolor_chunk = _3ds_chunk(RGB) + gradient_midcolor_chunk.add_variable("color", _3ds_float_color(gradient[1].color[:3])) + gradient_chunk.add_subchunk(gradient_midcolor_chunk) + gradient_lowcolor_chunk = _3ds_chunk(RGB) + gradient_lowcolor_chunk.add_variable("color", _3ds_float_color(gradient[0].color[:3])) + gradient_chunk.add_subchunk(gradient_lowcolor_chunk) + object_info.add_subchunk(gradient_chunk) + object_info.add_subchunk(background_flag) + + # Add FOG + fognode = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_ABSORPTION' and lk.to_socket.node.type in bgshade), False) + if fognode: + fog_chunk = _3ds_chunk(FOG) + fog_color_chunk = _3ds_chunk(RGB) + use_fog_flag = _3ds_chunk(USE_FOG) + fog_density = fognode.inputs['Density'].default_value * 100 + fog_color_chunk.add_variable("color", _3ds_float_color(fognode.inputs[0].default_value[:3])) + fog_chunk.add_variable("nearplane", _3ds_float(world.mist_settings.start)) + fog_chunk.add_variable("nearfog", _3ds_float(fog_density * 0.5)) + fog_chunk.add_variable("farplane", _3ds_float(world.mist_settings.depth)) + fog_chunk.add_variable("farfog", _3ds_float(fog_density + fog_density * 0.5)) + fog_chunk.add_subchunk(fog_color_chunk) + object_info.add_subchunk(fog_chunk) + + # Add LAYER FOG + foglayer = next((lk.from_socket.node for lk in ntree if lk.from_socket.node.type == 'VOLUME_SCATTER' and lk.to_socket.node.type in bgshade), False) + if foglayer: + layerfog_flag = 0 + if world.mist_settings.falloff == 'QUADRATIC': + layerfog_flag |= 0x1 + if world.mist_settings.falloff == 'INVERSE_QUADRATIC': + layerfog_flag |= 0x2 + layerfog_chunk = _3ds_chunk(LAYER_FOG) + layerfog_color_chunk = _3ds_chunk(RGB) + use_fog_flag = _3ds_chunk(USE_LAYER_FOG) + layerfog_color_chunk.add_variable("color", _3ds_float_color(foglayer.inputs[0].default_value[:3])) + layerfog_chunk.add_variable("lowZ", _3ds_float(world.mist_settings.start)) + layerfog_chunk.add_variable("highZ", _3ds_float(world.mist_settings.height)) + layerfog_chunk.add_variable("density", _3ds_float(foglayer.inputs[1].default_value)) + layerfog_chunk.add_variable("flags", _3ds_uint(layerfog_flag)) + layerfog_chunk.add_subchunk(layerfog_color_chunk) + object_info.add_subchunk(layerfog_chunk) + if fognode or foglayer and layer.use_pass_mist: + object_info.add_subchunk(use_fog_flag) + if use_keyframes and world.animation_data or (world.node_tree and world.node_tree.animation_data): + kfdata.add_subchunk(make_ambient_node(world)) + + # Collect translation for transformation matrix + translation = {} + rotation = {} + scale = {} + + # Give all objects a unique ID and build a dictionary from object name to object id + object_id = {} + name_id = {} + + for ob, data, matrix in mesh_objects: + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() + name_id[ob.name] = len(name_id) + object_id[ob.name] = len(object_id) + + for ob in empty_objects: + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() + name_id[ob.name] = len(name_id) + + for ob in light_objects: + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() + name_id[ob.name] = len(name_id) + object_id[ob.name] = len(object_id) + + for ob in camera_objects: + translation[ob.name] = mtx_scale @ ob.location + rotation[ob.name] = ob.rotation_euler + scale[ob.name] = mtx_scale.copy() + name_id[ob.name] = len(name_id) + object_id[ob.name] = len(object_id) + + # Create object chunks for all meshes i = 0 for ob, mesh, matrix in mesh_objects: - # create a new object chunk object_chunk = _3ds_chunk(OBJECT) - # set the object name + # Set the object name object_chunk.add_variable("name", _3ds_string(sane_name(ob.name))) - # make a mesh chunk out of the mesh: + # Make a mesh chunk out of the mesh object_chunk.add_subchunk(make_mesh_chunk(ob, mesh, matrix, materialDict, translation)) - # ensure the mesh has no over sized arrays - # skip ones that do!, otherwise we cant write since the array size wont - # fit into USHORT. + # Add hierachy chunk with ID from object_id dictionary + if use_hierarchy: + obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY) + obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name])) + + # Add parent chunk if object has a parent + if ob.parent is not None and (ob.parent.name in object_id): + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name])) + obj_hierarchy_chunk.add_subchunk(obj_parent_chunk) + object_chunk.add_subchunk(obj_hierarchy_chunk) + + # ensure the mesh has no over sized arrays - skip ones that do! + # Otherwise we cant write since the array size wont fit into USHORT if object_chunk.validate(): object_info.add_subchunk(object_chunk) else: operator.report({'WARNING'}, "Object %r can't be written into a 3DS file") - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # make a kf object node for the object: - kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) - ''' - - # if not blender_mesh.users: - # bpy.data.meshes.remove(blender_mesh) - #blender_mesh.vertices = None + # Export object node + if use_keyframes: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) i += i - # Create chunks for all empties: - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - for ob in empty_objects: - # Empties only require a kf object node: - kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) - pass - ''' + # Create chunks for all empties - only requires a object node + if use_keyframes: + for ob in empty_objects: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) # Create light object chunks for ob in light_objects: object_chunk = _3ds_chunk(OBJECT) - light_chunk = _3ds_chunk(OBJECT_LIGHT) + obj_light_chunk = _3ds_chunk(OBJECT_LIGHT) color_float_chunk = _3ds_chunk(RGB) - energy_factor = _3ds_chunk(LIGHT_MULTIPLIER) + light_distance = translation[ob.name] + light_attenuate = _3ds_chunk(LIGHT_ATTENUATE) + light_inner_range = _3ds_chunk(LIGHT_INNER_RANGE) + light_outer_range = _3ds_chunk(LIGHT_OUTER_RANGE) + light_energy_factor = _3ds_chunk(LIGHT_MULTIPLIER) + light_ratio = ob.data.energy if ob.data.type == 'SUN' else ob.data.energy * 0.001 object_chunk.add_variable("light", _3ds_string(sane_name(ob.name))) - light_chunk.add_variable("location", _3ds_point_3d(ob.location)) + obj_light_chunk.add_variable("location", _3ds_point_3d(light_distance)) color_float_chunk.add_variable("color", _3ds_float_color(ob.data.color)) - energy_factor.add_variable("energy", _3ds_float(ob.data.energy * .001)) - light_chunk.add_subchunk(color_float_chunk) - light_chunk.add_subchunk(energy_factor) + light_outer_range.add_variable("distance", _3ds_float(ob.data.cutoff_distance)) + light_inner_range.add_variable("radius", _3ds_float(ob.data.shadow_soft_size * 100)) + light_energy_factor.add_variable("energy", _3ds_float(light_ratio)) + obj_light_chunk.add_subchunk(color_float_chunk) + obj_light_chunk.add_subchunk(light_outer_range) + obj_light_chunk.add_subchunk(light_inner_range) + obj_light_chunk.add_subchunk(light_energy_factor) + if ob.data.use_custom_distance: + obj_light_chunk.add_subchunk(light_attenuate) if ob.data.type == 'SPOT': cone_angle = math.degrees(ob.data.spot_size) - hotspot = cone_angle - (ob.data.spot_blend * math.floor(cone_angle)) - hypo = math.copysign(math.sqrt(pow(ob.location[0], 2) + pow(ob.location[1], 2)), ob.location[1]) - pos_x = ob.location[0] + (ob.location[1] * math.tan(ob.rotation_euler[2])) - pos_y = ob.location[1] + (ob.location[0] * math.tan(math.radians(90) - ob.rotation_euler[2])) - pos_z = hypo * math.tan(math.radians(90) - ob.rotation_euler[0]) + hot_spot = cone_angle - (ob.data.spot_blend * math.floor(cone_angle)) + spot_pos = calc_target(light_distance, rotation[ob.name].x, rotation[ob.name].z) spotlight_chunk = _3ds_chunk(LIGHT_SPOTLIGHT) - spot_roll_chunk = _3ds_chunk(LIGHT_SPOTROLL) - spotlight_chunk.add_variable("target", _3ds_point_3d((pos_x, pos_y, pos_z))) - spotlight_chunk.add_variable("hotspot", _3ds_float(round(hotspot, 4))) + spot_roll_chunk = _3ds_chunk(LIGHT_SPOT_ROLL) + spotlight_chunk.add_variable("target", _3ds_point_3d(spot_pos)) + spotlight_chunk.add_variable("hotspot", _3ds_float(round(hot_spot, 4))) spotlight_chunk.add_variable("angle", _3ds_float(round(cone_angle, 4))) - spot_roll_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6))) + spot_roll_chunk.add_variable("roll", _3ds_float(round(rotation[ob.name].y, 6))) spotlight_chunk.add_subchunk(spot_roll_chunk) - light_chunk.add_subchunk(spotlight_chunk) + if ob.data.use_shadow: + spot_shadow_flag = _3ds_chunk(LIGHT_SPOT_SHADOWED) + spot_shadow_chunk = _3ds_chunk(LIGHT_SPOT_LSHADOW) + spot_shadow_chunk.add_variable("bias", _3ds_float(round(ob.data.shadow_buffer_bias,4))) + spot_shadow_chunk.add_variable("filter", _3ds_float(round((ob.data.shadow_buffer_clip_start * 10),4))) + spot_shadow_chunk.add_variable("buffer", _3ds_ushort(0x200)) + spotlight_chunk.add_subchunk(spot_shadow_flag) + spotlight_chunk.add_subchunk(spot_shadow_chunk) + if ob.data.show_cone: + spot_cone_chunk = _3ds_chunk(LIGHT_SPOT_SEE_CONE) + spotlight_chunk.add_subchunk(spot_cone_chunk) + if ob.data.use_square: + spot_square_chunk = _3ds_chunk(LIGHT_SPOT_RECTANGLE) + spotlight_chunk.add_subchunk(spot_square_chunk) + if ob.scale.x and ob.scale.y != 0.0: + spot_aspect_chunk = _3ds_chunk(LIGHT_SPOT_ASPECT) + spot_aspect_chunk.add_variable("aspect", _3ds_float(round((ob.scale.x / ob.scale.y),4))) + spotlight_chunk.add_subchunk(spot_aspect_chunk) + if ob.data.use_nodes: + links = ob.data.node_tree.links + bptype = 'EMISSION' + bpmix = 'MIX', 'MIX_RGB', 'EMISSION' + bptex = 'TEX_IMAGE', 'TEX_ENVIRONMENT' + bpout = 'ADD_SHADER', 'MIX_SHADER', 'OUTPUT_LIGHT' + bshade = next((lk.from_node.type for lk in links if lk.from_node.type == bptype and lk.to_node.type in bpout), None) + bpnode = next((lk.from_node.type for lk in links if lk.from_node.type in bpmix and lk.to_node.type == bshade), bshade) + bitmap = next((lk.from_node.image for lk in links if lk.from_node.type in bptex and lk.to_node.type == bpnode), False) + if bitmap and bitmap is not None: + spot_projector_chunk = _3ds_chunk(LIGHT_SPOT_PROJECTOR) + spot_projector_chunk.add_variable("image", _3ds_string(sane_name(bitmap.name))) + spotlight_chunk.add_subchunk(spot_projector_chunk) + obj_light_chunk.add_subchunk(spotlight_chunk) - # Add light to object info - object_chunk.add_subchunk(light_chunk) + # Add light to object chunk + object_chunk.add_subchunk(obj_light_chunk) + + # Add hierachy chunks with ID from object_id dictionary + if use_hierarchy: + obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY) + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name])) + if ob.parent is not None and (ob.parent.name in object_id): + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name])) + obj_hierarchy_chunk.add_subchunk(obj_parent_chunk) + object_chunk.add_subchunk(obj_hierarchy_chunk) + + # Add light object and hierarchy chunks to object info object_info.add_subchunk(object_chunk) + # Export light and spotlight target node + if use_keyframes: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) + if ob.data.type == 'SPOT': + kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale, name_id)) + # Create camera object chunks for ob in camera_objects: object_chunk = _3ds_chunk(OBJECT) camera_chunk = _3ds_chunk(OBJECT_CAMERA) - diagonal = math.copysign(math.sqrt(pow(ob.location[0], 2) + pow(ob.location[1], 2)), ob.location[1]) - focus_x = ob.location[0] + (ob.location[1] * math.tan(ob.rotation_euler[2])) - focus_y = ob.location[1] + (ob.location[0] * math.tan(math.radians(90) - ob.rotation_euler[2])) - focus_z = diagonal * math.tan(math.radians(90) - ob.rotation_euler[0]) + crange_chunk = _3ds_chunk(OBJECT_CAM_RANGES) + camera_distance = translation[ob.name] + camera_target = calc_target(camera_distance, rotation[ob.name].x, rotation[ob.name].z) object_chunk.add_variable("camera", _3ds_string(sane_name(ob.name))) - camera_chunk.add_variable("location", _3ds_point_3d(ob.location)) - camera_chunk.add_variable("target", _3ds_point_3d((focus_x, focus_y, focus_z))) - camera_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6))) + camera_chunk.add_variable("location", _3ds_point_3d(camera_distance)) + camera_chunk.add_variable("target", _3ds_point_3d(camera_target)) + camera_chunk.add_variable("roll", _3ds_float(round(rotation[ob.name].y, 6))) camera_chunk.add_variable("lens", _3ds_float(ob.data.lens)) + crange_chunk.add_variable("clipstart", _3ds_float(ob.data.clip_start * 0.1)) + crange_chunk.add_variable("clipend", _3ds_float(ob.data.clip_end * 0.1)) + camera_chunk.add_subchunk(crange_chunk) object_chunk.add_subchunk(camera_chunk) + + # Add hierachy chunks with ID from object_id dictionary + if use_hierarchy: + obj_hierarchy_chunk = _3ds_chunk(OBJECT_HIERARCHY) + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_hierarchy_chunk.add_variable("hierarchy", _3ds_ushort(object_id[ob.name])) + if ob.parent is not None and (ob.parent.name in object_id): + obj_parent_chunk = _3ds_chunk(OBJECT_PARENT) + obj_parent_chunk.add_variable("parent", _3ds_ushort(object_id[ob.parent.name])) + obj_hierarchy_chunk.add_subchunk(obj_parent_chunk) + object_chunk.add_subchunk(obj_hierarchy_chunk) + + # Add light object and hierarchy chunks to object info object_info.add_subchunk(object_chunk) - # Add main object info chunk to primary chunk: + # Export camera and target node + if use_keyframes: + kfdata.add_subchunk(make_object_node(ob, translation, rotation, scale, name_id)) + kfdata.add_subchunk(make_target_node(ob, translation, rotation, scale, name_id)) + + # Add main object info chunk to primary chunk primary.add_subchunk(object_info) - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # Add main keyframe data chunk to primary chunk: - primary.add_subchunk(kfdata) - ''' + # Add main keyframe data chunk to primary chunk + if use_keyframes: + primary.add_subchunk(kfdata) - # At this point, the chunk hierarchy is completely built. - - # Check the size: + # The chunk hierarchy is completely built, now check the size primary.get_size() - # Open the file for writing: + + # Open the file for writing file = open(filepath, 'wb') - # Recursively write the chunks to file: + # Recursively write the chunks to file primary.write(file) - # Close the file: + # Close the file file.close() # Clear name mapping vars, could make locals too del name_unique[:] name_mapping.clear() - # Debugging only: report the exporting time: - # Blender.Window.WaitCursor(0) + # Debugging only: report the exporting time + context.window.cursor_set('DEFAULT') print("3ds export time: %.2f" % (time.time() - duration)) - # Debugging only: dump the chunk hierarchy: + # Debugging only: dump the chunk hierarchy # primary.dump() - return {'FINISHED'} + return {'FINISHED'} \ No newline at end of file diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py index 833c43a..9a4d40f 100644 --- a/io_scene_3ds/import_3ds.py +++ b/io_scene_3ds/import_3ds.py @@ -1,59 +1,58 @@ -# ##### BEGIN GPL LICENSE BLOCK ##### -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# ##### END GPL LICENSE BLOCK ##### - -# Script copyright (C) Bob Holcomb -# Contributors: Bob Holcomb, Richard L?rk?ng, Damien McGinnes, Sebastian Sille -# Campbell Barton, Mario Lapin, Dominique Lorre, Andreas Atteneder +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright 2005 Bob Holcomb import os -import time -import struct import bpy +import time import math +import struct import mathutils +from bpy_extras.image_utils import load_image from bpy_extras.node_shader_utils import PrincipledBSDFWrapper BOUNDS_3DS = [] -###################################################### -# Data Structures -###################################################### +################### +# Data Structures # +################### # Some of the chunks that we will see -# ----- Primary Chunk, at the beginning of each file +# >----- Primary Chunk, at the beginning of each file PRIMARY = 0x4D4D -# ------ Main Chunks +# >----- Main Chunks OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information VERSION = 0x0002 # This gives the version of the .3ds file EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info -# ------ Data Chunks, used for various attributes -PERCENTAGE_SHORT = 0x30 -PERCENTAGE_FLOAT = 0x31 +# >----- Data Chunks, used for various attributes +COLOR_F = 0x0010 # color defined as 3 floats +COLOR_24 = 0x0011 # color defined as 3 bytes +LIN_COLOR_24 = 0x0012 # linear byte color +LIN_COLOR_F = 0x0013 # linear float color +PCT_SHORT = 0x0030 # percentage short +PCT_FLOAT = 0x0031 # percentage float +MASTERSCALE = 0x0100 # Master scale factor -# ------ sub defines of OBJECTINFO +# >----- sub defines of OBJECTINFO +BITMAP = 0x1100 # The background image name +USE_BITMAP = 0x1101 # The background image flag +SOLIDBACKGND = 0x1200 # The background color (RGB) +USE_SOLIDBGND = 0x1201 # The background color flag +VGRADIENT = 0x1300 # The background gradient colors +USE_VGRADIENT = 0x1301 # The background gradient flag +O_CONSTS = 0x1500 # The origin of the 3D cursor +AMBIENTLIGHT = 0x2100 # The color of the ambient light +FOG = 0x2200 # The fog atmosphere settings +USE_FOG = 0x2201 # The fog atmosphere flag +FOG_BGND = 0x2210 # The fog atmosphere background flag +LAYER_FOG = 0x2302 # The fog layer atmosphere settings +USE_LAYER_FOG = 0x2303 # The fog layer atmosphere flag MATERIAL = 0xAFFF # This stored the texture info OBJECT = 0x4000 # This stores the faces, vertices, etc... # >------ sub defines of MATERIAL -# ------ sub defines of MATERIAL_BLOCK MAT_NAME = 0xA000 # This holds the material name MAT_AMBIENT = 0xA010 # Ambient color of the object/material MAT_DIFFUSE = 0xA020 # This holds the color of the object/material @@ -62,11 +61,23 @@ MAT_SHINESS = 0xA040 # Roughness of the object/material (percent) MAT_SHIN2 = 0xA041 # Shininess of the object/material (percent) MAT_SHIN3 = 0xA042 # Reflection of the object/material (percent) MAT_TRANSPARENCY = 0xA050 # Transparency value of material (percent) -MAT_SELF_ILLUM = 0xA080 # Self Illumination value of material +MAT_XPFALL = 0xA052 # Transparency falloff value +MAT_REFBLUR = 0xA053 # Reflection blurring value +MAT_SELF_ILLUM = 0xA080 # # Material self illumination flag +MAT_TWO_SIDE = 0xA081 # Material is two sided flag +MAT_DECAL = 0xA082 # Material mapping is decaled flag +MAT_ADDITIVE = 0xA083 # Material has additive transparency flag MAT_SELF_ILPCT = 0xA084 # Self illumination strength (percent) -MAT_WIRE = 0xA085 # Only render's wireframe +MAT_WIRE = 0xA085 # Material wireframe rendered flag +MAT_FACEMAP = 0xA088 # Face mapped textures flag +MAT_PHONGSOFT = 0xA08C # Phong soften material flag +MAT_WIREABS = 0xA08E # Wire size in units flag +MAT_WIRESIZE = 0xA087 # Rendered wire size in pixels MAT_SHADING = 0xA100 # Material shading method +MAT_USE_XPFALL = 0xA240 # Transparency falloff flag +MAT_USE_REFBLUR = 0xA250 # Reflection blurring flag +# >------ sub defines of MATERIAL_MAP MAT_TEXTURE_MAP = 0xA200 # This is a header for a new texture map MAT_SPECULAR_MAP = 0xA204 # This is a header for a new specular map MAT_OPACITY_MAP = 0xA210 # This is a header for a new opacity map @@ -77,46 +88,47 @@ MAT_TEX2_MAP = 0xA33A # This is a header for a secondary texture MAT_SHIN_MAP = 0xA33C # This is a header for a new roughness map MAT_SELFI_MAP = 0xA33D # This is a header for a new emission map MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture - -MAT_MAP_TILING = 0xa351 # 2nd bit (from LSB) is mirror UV flag -MAT_MAP_USCALE = 0xA354 # U axis scaling -MAT_MAP_VSCALE = 0xA356 # V axis scaling +MAT_MAP_TILING = 0xA351 # 2nd bit (from LSB) is mirror UV flag +MAT_MAP_USCALE = 0xA354 # U axis scaling +MAT_MAP_VSCALE = 0xA356 # V axis scaling MAT_MAP_UOFFSET = 0xA358 # U axis offset MAT_MAP_VOFFSET = 0xA35A # V axis offset -MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad +MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad MAT_MAP_COL1 = 0xA360 # Map Color1 MAT_MAP_COL2 = 0xA362 # Map Color2 MAT_MAP_RCOL = 0xA364 # Red mapping MAT_MAP_GCOL = 0xA366 # Green mapping MAT_MAP_BCOL = 0xA368 # Blue mapping -MAT_FLOAT_COLOR = 0x0010 # color defined as 3 floats -MAT_24BIT_COLOR = 0x0011 # color defined as 3 bytes # >------ sub defines of OBJECT OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object -OBJECT_LIGHT = 0x4600 # This lets un know we are reading a light object -OBJECT_LIGHT_SPOT = 0x4610 # The light is a spotloght. -OBJECT_LIGHT_OFF = 0x4620 # The light off. -OBJECT_LIGHT_ATTENUATE = 0x4625 -OBJECT_LIGHT_RAYSHADE = 0x4627 -OBJECT_LIGHT_SHADOWED = 0x4630 -OBJECT_LIGHT_LOCAL_SHADOW = 0x4640 -OBJECT_LIGHT_LOCAL_SHADOW2 = 0x4641 -OBJECT_LIGHT_SEE_CONE = 0x4650 -OBJECT_LIGHT_SPOT_RECTANGULAR = 0x4651 -OBJECT_LIGHT_SPOT_OVERSHOOT = 0x4652 -OBJECT_LIGHT_SPOT_PROJECTOR = 0x4653 -OBJECT_LIGHT_EXCLUDE = 0x4654 -OBJECT_LIGHT_RANGE = 0x4655 -OBJECT_LIGHT_ROLL = 0x4656 -OBJECT_LIGHT_SPOT_ASPECT = 0x4657 -OBJECT_LIGHT_RAY_BIAS = 0x4658 -OBJECT_LIGHT_INNER_RANGE = 0x4659 -OBJECT_LIGHT_OUTER_RANGE = 0x465A -OBJECT_LIGHT_MULTIPLIER = 0x465B -OBJECT_LIGHT_AMBIENT_LIGHT = 0x4680 +OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object +OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object +OBJECT_HIERARCHY = 0x4F00 # This lets us know the hierachy id of the object +OBJECT_PARENT = 0x4F10 # This lets us know the parent id of the object -OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object +# >------ Sub defines of LIGHT +LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight +LIGHT_OFF = 0x4620 # The light is off +LIGHT_ATTENUATE = 0x4625 # Light attenuate flag +LIGHT_RAYSHADE = 0x4627 # Light rayshading flag +LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag +LIGHT_LOCAL_SHADOW = 0x4640 # Light shadow values 1 +LIGHT_LOCAL_SHADOW2 = 0x4641 # Light shadow values 2 +LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot cone flag +LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag +LIGHT_SPOT_OVERSHOOT = 0x4652 # Light spot overshoot flag +LIGHT_SPOT_PROJECTOR = 0x4653 # Light spot bitmap name +LIGHT_EXCLUDE = 0x4654 # Light excluded objects +LIGHT_RANGE = 0x4655 # Light range +LIGHT_SPOT_ROLL = 0x4656 # The roll angle of the spot +LIGHT_SPOT_ASPECT = 0x4657 # Light spot aspect flag +LIGHT_RAY_BIAS = 0x4658 # Light ray bias value +LIGHT_INNER_RANGE = 0x4659 # The light inner range +LIGHT_OUTER_RANGE = 0x465A # The light outer range +LIGHT_MULTIPLIER = 0x465B # The light energy factor +LIGHT_ATTENUATE = 0x4625 # Light attenuation flag +LIGHT_AMBIENT_LIGHT = 0x4680 # Light ambient flag # >------ sub defines of CAMERA OBJECT_CAM_RANGES = 0x4720 # The camera range values @@ -125,40 +137,42 @@ OBJECT_CAM_RANGES = 0x4720 # The camera range values OBJECT_VERTICES = 0x4110 # The objects vertices OBJECT_VERTFLAGS = 0x4111 # The objects vertex flags OBJECT_FACES = 0x4120 # The objects faces -OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color -OBJECT_UV = 0x4140 # The UV texture coordinates -OBJECT_SMOOTH = 0x4150 # The Object smooth groups -OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix +OBJECT_MATERIAL = 0x4130 # The objects face material +OBJECT_UV = 0x4140 # The vertex UV texture coordinates +OBJECT_SMOOTH = 0x4150 # The objects face smooth groups +OBJECT_TRANS_MATRIX = 0x4160 # The objects Matrix # >------ sub defines of EDITKEYFRAME -KFDATA_AMBIENT = 0xB001 -KFDATA_OBJECT = 0xB002 -KFDATA_CAMERA = 0xB003 -KFDATA_TARGET = 0xB004 -KFDATA_LIGHT = 0xB005 -KFDATA_L_TARGET = 0xB006 -KFDATA_SPOTLIGHT = 0xB007 -KFDATA_KFSEG = 0xB008 -# KFDATA_CURTIME = 0xB009 -# KFDATA_KFHDR = 0xB00A +KF_AMBIENT = 0xB001 # Keyframe ambient node +KF_OBJECT = 0xB002 # Keyframe object node +KF_OBJECT_CAMERA = 0xB003 # Keyframe camera node +KF_TARGET_CAMERA = 0xB004 # Keyframe target node +KF_OBJECT_LIGHT = 0xB005 # Keyframe light node +KF_TARGET_LIGHT = 0xB006 # Keyframe light target node +KF_OBJECT_SPOT_LIGHT = 0xB007 # Keyframe spotlight node +KFDATA_KFSEG = 0xB008 # Keyframe start and stop +KFDATA_CURTIME = 0xB009 # Keyframe current frame +KFDATA_KFHDR = 0xB00A # Keyframe node header + # >------ sub defines of KEYFRAME_NODE -OBJECT_NODE_HDR = 0xB010 -OBJECT_INSTANCE_NAME = 0xB011 -# OBJECT_PRESCALE = 0xB012 -OBJECT_PIVOT = 0xB013 -# OBJECT_BOUNDBOX = 0xB014 -# MORPH_SMOOTH = 0xB015 -POS_TRACK_TAG = 0xB020 -ROT_TRACK_TAG = 0xB021 -SCL_TRACK_TAG = 0xB022 -FOV_TRACK_TAG = 0xB023 -ROLL_TRACK_TAG = 0xB024 -COL_TRACK_TAG = 0xB025 -# MORPH_TRACK_TAG = 0xB026 -# HOTSPOT_TRACK_TAG = 0xB027 -# FALLOFF_TRACK_TAG = 0xB028 -# HIDE_TRACK_TAG = 0xB029 -# OBJECT_NODE_ID = 0xB030 +OBJECT_NODE_HDR = 0xB010 # Keyframe object node header +OBJECT_INSTANCE_NAME = 0xB011 # Keyframe object name for dummy objects +OBJECT_PRESCALE = 0xB012 # Keyframe object prescale +OBJECT_PIVOT = 0xB013 # Keyframe object pivot position +OBJECT_BOUNDBOX = 0xB014 # Keyframe object boundbox +MORPH_SMOOTH = 0xB015 # Auto smooth angle for keyframe mesh objects +POS_TRACK_TAG = 0xB020 # Keyframe object position track +ROT_TRACK_TAG = 0xB021 # Keyframe object rotation track +SCL_TRACK_TAG = 0xB022 # Keyframe object scale track +FOV_TRACK_TAG = 0xB023 # Keyframe camera field of view track +ROLL_TRACK_TAG = 0xB024 # Keyframe camera roll track +COL_TRACK_TAG = 0xB025 # Keyframe light color track +MORPH_TRACK_TAG = 0xB026 # Keyframe object morph smooth track +HOTSPOT_TRACK_TAG = 0xB027 # Keyframe spotlight hotspot track +FALLOFF_TRACK_TAG = 0xB028 # Keyframe spotlight falloff track +HIDE_TRACK_TAG = 0xB029 # Keyframe object hide track +OBJECT_NODE_ID = 0xB030 # Keyframe object node id +PARENT_NAME = 0x80F0 # Object parent name tree (dot seperated) ROOT_OBJECT = 0xFFFF @@ -166,6 +180,7 @@ global scn scn = None object_dictionary = {} +parent_dictionary = {} object_matrix = {} @@ -176,7 +191,7 @@ class Chunk: "bytes_read", ) # we don't read in the bytes_read, we compute that - binary_format = " 0: bmesh.polygons[f].use_smooth = True + else: + bmesh.polygons[f].use_smooth = False + else: + for poly in bmesh.polygons: + poly.use_smooth = False if contextMatrix: - ob.matrix_local = contextMatrix + if WORLD_MATRIX: + ob.matrix_world = contextMatrix + else: + ob.matrix_local = contextMatrix object_matrix[ob] = contextMatrix.copy() # a spare chunk @@ -469,23 +480,35 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE temp_chunk = Chunk() CreateBlenderObject = False - CreateLightObject = False CreateCameraObject = False + CreateLightObject = False + CreateTrackData = False - def read_float_color(temp_chunk): - temp_data = file.read(SZ_3FLOAT) - temp_chunk.bytes_read += SZ_3FLOAT - return [float(col) for col in struct.unpack('<3f', temp_data)] + CreateWorld = 'WORLD' in FILTER + CreateMesh = 'MESH' in FILTER + CreateLight = 'LIGHT' in FILTER + CreateCamera = 'CAMERA' in FILTER + CreateEmpty = 'EMPTY' in FILTER + + def read_short(temp_chunk): + temp_data = file.read(SZ_U_SHORT) + temp_chunk.bytes_read += SZ_U_SHORT + return struct.unpack(' len(childs_list): + parent_list[child_id] = parent_id + parent_list.extend([None] * (parent_id - len(parent_list))) + parent_list.insert(parent_id, contextObName) + elif parent_id < len(childs_list): + parent_list[child_id] = childs_list[parent_id] + + def calc_target(loca, target): + pan = tilt = 0.0 + plane = loca + target + angle = math.radians(90) # Target triangulation + check_sign = abs(loca.y) < abs(target.y) + check_axes = abs(loca.x - target.x) > abs(loca.y - target.y) + plane_y = plane.y if check_sign else -1 * plane.y + sign_xy = plane.x if check_axes else plane.y + axis_xy = plane_y if check_axes else plane.x + hyp = math.sqrt(pow(plane.x,2) + pow(plane.y,2)) + dia = math.sqrt(pow(hyp,2) + pow(plane.z,2)) + yaw = math.atan2(math.copysign(hyp, sign_xy), axis_xy) + bow = math.acos(hyp / dia) + turn = angle - yaw if check_sign else angle + yaw + tilt = angle - bow if loca.z > target.z else angle + bow + pan = yaw if check_axes else turn + return tilt, pan + + def read_track_data(track_chunk): + """Trackflags 0x1, 0x2 and 0x3 are for looping. 0x8, 0x10 and 0x20 + locks the XYZ axes. 0x100, 0x200 and 0x400 unlinks the XYZ axes.""" + tflags = read_short(track_chunk) + contextTrack_flag = tflags + temp_data = file.read(SZ_U_INT * 2) + track_chunk.bytes_read += SZ_U_INT * 2 + nkeys = read_long(track_chunk) + for i in range(nkeys): + nframe = read_long(track_chunk) + nflags = read_short(track_chunk) + for f in range(bin(nflags).count('1')): + temp_data = file.read(SZ_FLOAT) # Check for spline terms + track_chunk.bytes_read += SZ_FLOAT + trackdata = read_float_array(track_chunk) + keyframe_data[nframe] = trackdata + return keyframe_data + + def read_track_angle(track_chunk): + temp_data = file.read(SZ_U_SHORT * 5) + track_chunk.bytes_read += SZ_U_SHORT * 5 + nkeys = read_long(track_chunk) + for i in range(nkeys): + nframe = read_long(track_chunk) + nflags = read_short(track_chunk) + for f in range(bin(nflags).count('1')): + temp_data = file.read(SZ_FLOAT) # Check for spline terms + track_chunk.bytes_read += SZ_FLOAT + angle = read_float(track_chunk) + keyframe_angle[nframe] = math.radians(angle) + return keyframe_angle + dirname = os.path.dirname(file.name) # loop through all the data for this chunk (previous chunk) and see what it is while (previous_chunk.bytes_read < previous_chunk.length): read_chunk(file, new_chunk) - # is it a Version chunk? + # Check the Version chunk if new_chunk.ID == VERSION: # read in the version of the file - temp_data = file.read(struct.calcsize('I')) + temp_data = file.read(SZ_U_INT) version = struct.unpack(' 3: - print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version) + print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version) - # is it an object info chunk? + # The main object info chunk elif new_chunk.ID == OBJECTINFO: - process_next_chunk(context, file, new_chunk, imported_objects, IMAGE_SEARCH, KEYFRAME) + process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, FILTER, + IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE, MEASURE, CURSOR) # keep track of how much we read in the main chunk new_chunk.bytes_read += temp_chunk.bytes_read - # is it an object chunk? - elif new_chunk.ID == OBJECT: + # If cursor location + elif CURSOR and new_chunk.ID == O_CONSTS: + context.scene.cursor.location = read_float_array(new_chunk) - if CreateBlenderObject: - putContextMesh( - context, - contextMesh_vertls, - contextMesh_facels, - contextMesh_flag, - contextMeshMaterials, - contextMesh_smooth, - ) - contextMesh_vertls = [] - contextMesh_facels = [] - contextMeshMaterials = [] - contextMesh_flag = None - contextMesh_smooth = None - contextMeshUV = None - # Reset matrix - contextMatrix = None + # If ambient light chunk + elif CreateWorld and new_chunk.ID == AMBIENTLIGHT: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Ambient: " + realname) + context.scene.world = contextWorld + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + contextWorld.color[:] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + contextWorld.color[:] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read - CreateBlenderObject = True - contextObName, read_str_len = read_string(file) + # If background chunk + elif CreateWorld and new_chunk.ID == SOLIDBACKGND: + backgroundcolor = mathutils.Color((0.1, 0.1, 0.1)) + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Background: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + worldnodes = contextWorld.node_tree.nodes + backgroundnode = worldnodes['Background'] + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + backgroundcolor = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + backgroundcolor = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + backgroundmix = next((wn for wn in worldnodes if wn.type in {'MIX', 'MIX_RGB'}), False) + backgroundnode.inputs[0].default_value[:3] = backgroundcolor + if backgroundmix: + backgroundmix.inputs[2].default_value[:3] = backgroundcolor + new_chunk.bytes_read += temp_chunk.bytes_read + + # If bitmap chunk + elif CreateWorld and new_chunk.ID == BITMAP: + bitmap_name, read_str_len = read_string(file) + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Bitmap: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + bitmap_mix = nodes.new(type='ShaderNodeMixRGB') + bitmapnode = nodes.new(type='ShaderNodeTexEnvironment') + bitmap_mix.label = "Solid Color" + bitmapnode.label = "Bitmap: " + bitmap_name + bitmap_mix.inputs[2].default_value = nodes['Background'].inputs[0].default_value + bitmapnode.image = load_image(bitmap_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True) + bitmap_mix.inputs[0].default_value = 0.5 if bitmapnode.image is not None else 1.0 + bitmapnode.location = (-600, 360) if bitmapnode.image is not None else (-600, 300) + bitmap_mix.location = (-250, 300) + gradientnode = next((wn for wn in nodes if wn.type == 'VALTORGB'), False) + links.new(bitmap_mix.outputs['Color'], nodes['Background'].inputs[0]) + links.new(bitmapnode.outputs['Color'], bitmap_mix.inputs[1]) + if gradientnode: + links.new(bitmapnode.outputs['Color'], gradientnode.inputs[0]) new_chunk.bytes_read += read_str_len - # is it a material chunk? + # If gradient chunk: + elif CreateWorld and new_chunk.ID == VGRADIENT: + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Gradient: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + gradientnode = nodes.new(type='ShaderNodeValToRGB') + gradientnode.location = (-600, 100) + gradientnode.label = "Gradient" + backgroundmix = next((wn for wn in worldnodes if wn.type in {'MIX', 'MIX_RGB'}), False) + bitmapnode = next((wn for wn in nodes if wn.type in {'TEX_IMAGE', 'TEX_ENVIRONMENT'}), False) + if backgroundmix: + links.new(gradientnode.outputs['Color'], backgroundmix.inputs[2]) + else: + links.new(gradientnode.outputs['Color'], nodes['Background'].inputs[0]) + if bitmapnode: + links.new(bitmapnode.outputs['Color'], gradientnode.inputs[0]) + gradientnode.color_ramp.elements.new(read_float(new_chunk)) + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[2].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[2].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[1].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[1].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + gradientnode.color_ramp.elements[0].color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + gradientnode.color_ramp.elements[0].color[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + + # If fog chunk: + elif CreateWorld and new_chunk.ID == FOG: + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("Fog: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + fognode = nodes.new(type='ShaderNodeVolumeAbsorption') + fognode.label = "Fog" + fognode.location = (10, 60) + volumemix = next((wn for wn in worldnodes if wn.label == 'Volume' and wn.type in {'ADD_SHADER', 'MIX_SHADER'}), False) + if volumemix: + links.new(fognode.outputs['Volume'], volumemix.inputs[1]) + else: + links.new(fognode.outputs[0], nodes['World Output'].inputs[1]) + contextWorld.mist_settings.use_mist = True + contextWorld.mist_settings.start = read_float(new_chunk) + nearfog = read_float(new_chunk) * 0.01 + contextWorld.mist_settings.depth = read_float(new_chunk) + farfog = read_float(new_chunk) * 0.01 + fognode.inputs[1].default_value = (nearfog + farfog) * 0.5 + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + fognode.inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + fognode.inputs[0].default_value[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + elif CreateWorld and new_chunk.ID == FOG_BGND: + pass + + # If layer fog chunk: + elif CreateWorld and new_chunk.ID == LAYER_FOG: + """Fog options flags are bit 20 (0x100000) for background fogging, + bit 0 (0x1) for bottom falloff, and bit 1 (0x2) for top falloff.""" + if contextWorld is None: + path, filename = os.path.split(file.name) + realname, ext = os.path.splitext(filename) + contextWorld = bpy.data.worlds.new("LayerFog: " + realname) + context.scene.world = contextWorld + contextWorld.use_nodes = True + links = contextWorld.node_tree.links + nodes = contextWorld.node_tree.nodes + mxvolume = nodes.new(type='ShaderNodeMixShader') + layerfog = nodes.new(type='ShaderNodeVolumeScatter') + layerfog.label = "Layer Fog" + mxvolume.label = "Volume" + layerfog.location = (10, -60) + mxvolume.location = (300, 50) + nodes['World Output'].location = (600, 200) + links.new(layerfog.outputs['Volume'], mxvolume.inputs[2]) + links.new(mxvolume.outputs[0], nodes['World Output'].inputs[1]) + fognode = next((wn for wn in worldnodes if wn.type == 'VOLUME_ABSORPTION'), False) + if fognode: + links.new(fognode.outputs['Volume'], mxvolume.inputs[1]) + context.view_layer.use_pass_mist = False + contextWorld.mist_settings.use_mist = True + contextWorld.mist_settings.start = read_float(new_chunk) + contextWorld.mist_settings.height = read_float(new_chunk) + layerfog.inputs[1].default_value = read_float(new_chunk) + layerfog_flag = read_long(new_chunk) + if layerfog_flag == 0: + contextWorld.mist_settings.falloff = 'LINEAR' + if layerfog_flag & 0x1: + contextWorld.mist_settings.falloff = 'QUADRATIC' + if layerfog_flag & 0x2: + contextWorld.mist_settings.falloff = 'INVERSE_QUADRATIC' + read_chunk(file, temp_chunk) + if temp_chunk.ID == COLOR_F: + layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == LIN_COLOR_F: + layerfog.inputs[0].default_value[:3] = read_float_array(temp_chunk) + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + elif CreateWorld and new_chunk.ID in {USE_FOG, USE_LAYER_FOG}: + context.view_layer.use_pass_mist = True + + # If material chunk elif new_chunk.ID == MATERIAL: + contextAlpha = True + contextColor = mathutils.Color((0.8, 0.8, 0.8)) contextMaterial = bpy.data.materials.new('Material') contextWrapper = PrincipledBSDFWrapper(contextMaterial, is_readonly=False, use_nodes=False) elif new_chunk.ID == MAT_NAME: material_name, read_str_len = read_string(file) - # plus one for the null character that ended the string new_chunk.bytes_read += read_str_len - contextMaterial.name = material_name.rstrip() # remove trailing whitespace + contextMaterial.name = material_name.rstrip() # remove trailing whitespace MATDICT[material_name] = contextMaterial elif new_chunk.ID == MAT_AMBIENT: read_chunk(file, temp_chunk) - # only available color is emission color - if temp_chunk.ID == MAT_FLOAT_COLOR: - contextMaterial.line_color[:3] = read_float_color(temp_chunk) - elif temp_chunk.ID == MAT_24BIT_COLOR: + # to not loose this data, ambient color is stored in line color + if temp_chunk.ID == COLOR_F: + contextMaterial.line_color[:3] = read_float_array(temp_chunk) + elif temp_chunk.ID == COLOR_24: contextMaterial.line_color[:3] = read_byte_color(temp_chunk) else: skip_to_end(file, temp_chunk) @@ -637,20 +915,21 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE elif new_chunk.ID == MAT_DIFFUSE: read_chunk(file, temp_chunk) - if temp_chunk.ID == MAT_FLOAT_COLOR: - contextMaterial.diffuse_color[:3] = read_float_color(temp_chunk) - elif temp_chunk.ID == MAT_24BIT_COLOR: - contextMaterial.diffuse_color[:3] = read_byte_color(temp_chunk) + if temp_chunk.ID == COLOR_F: + contextColor = mathutils.Color(read_float_array(temp_chunk)) + contextMaterial.diffuse_color[:3] = contextColor + elif temp_chunk.ID == COLOR_24: + contextColor = mathutils.Color(read_byte_color(temp_chunk)) + contextMaterial.diffuse_color[:3] = contextColor else: skip_to_end(file, temp_chunk) new_chunk.bytes_read += temp_chunk.bytes_read elif new_chunk.ID == MAT_SPECULAR: read_chunk(file, temp_chunk) - # Specular color is available - if temp_chunk.ID == MAT_FLOAT_COLOR: - contextMaterial.specular_color = read_float_color(temp_chunk) - elif temp_chunk.ID == MAT_24BIT_COLOR: + if temp_chunk.ID == COLOR_F: + contextMaterial.specular_color = read_float_array(temp_chunk) + elif temp_chunk.ID == COLOR_24: contextMaterial.specular_color = read_byte_color(temp_chunk) else: skip_to_end(file, temp_chunk) @@ -658,424 +937,567 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, IMAGE_SE elif new_chunk.ID == MAT_SHINESS: read_chunk(file, temp_chunk) - if temp_chunk.ID == PERCENTAGE_SHORT: - temp_data = file.read(SZ_U_SHORT) - temp_chunk.bytes_read += SZ_U_SHORT - contextMaterial.roughness = 1 - (float(struct.unpack('= 2: contextWrapper.use_nodes = True + contextWrapper.base_color = contextColor[:] + contextWrapper.metallic = contextMaterial.metallic + contextWrapper.roughness = contextMaterial.roughness + contextWrapper.specular = contextMaterial.specular_intensity + contextWrapper.specular_tint = contextMaterial.specular_color[:] contextWrapper.emission_color = contextMaterial.line_color[:3] contextWrapper.emission_strength = contextMaterial.line_priority / 100 - contextWrapper.base_color = contextMaterial.diffuse_color[:3] - contextWrapper.specular = contextMaterial.specular_intensity - contextWrapper.roughness = contextMaterial.roughness - contextWrapper.metallic = contextMaterial.metallic - contextWrapper.alpha = contextMaterial.diffuse_color[3] + contextWrapper.alpha = contextMaterial.diffuse_color[3] = contextAlpha contextWrapper.use_nodes = False if shading >= 3: contextWrapper.use_nodes = True elif new_chunk.ID == MAT_TEXTURE_MAP: - read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR") + read_texture(new_chunk, temp_chunk, "Diffuse", 'COLOR') elif new_chunk.ID == MAT_SPECULAR_MAP: - read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY") + read_texture(new_chunk, temp_chunk, "Specular", 'SPECULARITY') elif new_chunk.ID == MAT_OPACITY_MAP: - contextMaterial.blend_method = 'BLEND' - read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA") + read_texture(new_chunk, temp_chunk, "Opacity", 'ALPHA') elif new_chunk.ID == MAT_REFLECTION_MAP: - read_texture(new_chunk, temp_chunk, "Reflect", "METALLIC") + read_texture(new_chunk, temp_chunk, "Reflect", 'METALLIC') elif new_chunk.ID == MAT_BUMP_MAP: - read_texture(new_chunk, temp_chunk, "Bump", "NORMAL") + read_texture(new_chunk, temp_chunk, "Bump", 'NORMAL') elif new_chunk.ID == MAT_BUMP_PERCENT: read_chunk(file, temp_chunk) - if temp_chunk.ID == PERCENTAGE_SHORT: - temp_data = file.read(SZ_U_SHORT) - temp_chunk.bytes_read += SZ_U_SHORT - contextWrapper.normalmap_strength = (float(struct.unpack('= 0.01 else 0.1 + contextCamera.data.clip_start = startrange * CONSTRAIN + contextCamera.data.clip_end = read_float(new_chunk) * CONSTRAIN + elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY: # Hierarchy + child_id = get_hierarchy(new_chunk) + elif CreateCameraObject and new_chunk.ID == OBJECT_PARENT: + get_parent(new_chunk, child_id) + # start keyframe section elif new_chunk.ID == EDITKEYFRAME: pass - elif new_chunk.ID == KFDATA_KFSEG: - temp_data = file.read(struct.calcsize('I')) - start = struct.unpack('