Node Wrangler: Improved accuracy on Align Nodes operator #104551
@ -18,14 +18,14 @@ from bpy_extras import node_shader_utils
|
||||
###################
|
||||
|
||||
# Some of the chunks that we will export
|
||||
# ----- Primary Chunk, at the beginning of each file
|
||||
# >----- Primary Chunk, at the beginning of each file
|
||||
PRIMARY = 0x4D4D
|
||||
|
||||
# ------ Main Chunks
|
||||
# >----- Main Chunks
|
||||
VERSION = 0x0002 # This gives the version of the .3ds file
|
||||
KFDATA = 0xB000 # This is the header for all of the key frame info
|
||||
|
||||
# ------ sub defines of OBJECTINFO
|
||||
# >----- sub defines of OBJECTINFO
|
||||
OBJECTINFO = 0x3D3D # Main mesh object chunk before the material and object information
|
||||
MESHVERSION = 0x3D3E # This gives the version of the mesh
|
||||
AMBIENTLIGHT = 0x2100 # The color of the ambient light
|
||||
@ -41,9 +41,16 @@ MATSHINESS = 0xA040 # Specular intensity of the object/material (percent)
|
||||
MATSHIN2 = 0xA041 # Reflection of the object/material (percent)
|
||||
MATSHIN3 = 0xA042 # metallic/mirror of the object/material (percent)
|
||||
MATTRANS = 0xA050 # Transparency value (100-OpacityValue) (percent)
|
||||
MATSELFILLUM = 0xA080 # # Material self illumination flag
|
||||
MATSELFILPCT = 0xA084 # Self illumination strength (percent)
|
||||
MATWIRE = 0xA085 # Material wireframe rendered flag
|
||||
MATFACEMAP = 0xA088 # Face mapped textures flag
|
||||
MATPHONGSOFT = 0xA08C # Phong soften material flag
|
||||
MATWIREABS = 0xA08E # Wire size in units flag
|
||||
MATWIRESIZE = 0xA087 # Rendered wire size in pixels
|
||||
MATSHADING = 0xA100 # Material shading method
|
||||
|
||||
# >------ sub defines of MAT_MAP
|
||||
MAT_DIFFUSEMAP = 0xA200 # This is a header for a new diffuse texture
|
||||
MAT_SPECMAP = 0xA204 # head for specularity map
|
||||
MAT_OPACMAP = 0xA210 # head for opacity map
|
||||
@ -53,9 +60,7 @@ MAT_BUMP_PERCENT = 0xA252 # Normalmap strength (percent)
|
||||
MAT_TEX2MAP = 0xA33A # head for secondary texture
|
||||
MAT_SHINMAP = 0xA33C # head for roughness map
|
||||
MAT_SELFIMAP = 0xA33D # head for emission map
|
||||
|
||||
# >------ sub defines of MAT_MAP
|
||||
MATMAPFILE = 0xA300 # This holds the file name of a texture
|
||||
MAT_MAP_FILE = 0xA300 # This holds the file name of a texture
|
||||
MAT_MAP_TILING = 0xa351 # 2nd bit (from LSB) is mirror UV flag
|
||||
MAT_MAP_TEXBLUR = 0xA353 # Texture blurring factor
|
||||
MAT_MAP_USCALE = 0xA354 # U axis scaling
|
||||
@ -103,12 +108,6 @@ OBJECT_SMOOTH = 0x4150 # The objects smooth groups
|
||||
OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix
|
||||
|
||||
# >------ sub defines of KFDATA
|
||||
KFDATA_KFHDR = 0xB00A
|
||||
KFDATA_KFSEG = 0xB008
|
||||
KFDATA_KFCURTIME = 0xB009
|
||||
KFDATA_OBJECT_NODE_TAG = 0xB002
|
||||
|
||||
# >------ sub defines of OBJECT_NODE_TAG
|
||||
AMBIENT_NODE_TAG = 0xB001 # Ambient node tag
|
||||
OBJECT_NODE_TAG = 0xB002 # Object tree tag
|
||||
CAMERA_NODE_TAG = 0xB003 # Camera object tag
|
||||
@ -116,6 +115,11 @@ TARGET_NODE_TAG = 0xB004 # Camera target tag
|
||||
LIGHT_NODE_TAG = 0xB005 # Light object tag
|
||||
LTARGET_NODE_TAG = 0xB006 # Light target tag
|
||||
SPOT_NODE_TAG = 0xB007 # Spotlight tag
|
||||
KFDATA_KFSEG = 0xB008 # Frame start & end
|
||||
KFDATA_KFCURTIME = 0xB009 # Frame current
|
||||
KFDATA_KFHDR = 0xB00A # Keyframe header
|
||||
|
||||
# >------ sub defines of OBJECT_NODE_TAG
|
||||
OBJECT_NODE_ID = 0xB030 # Object hierachy ID
|
||||
OBJECT_NODE_HDR = 0xB010 # Hierachy tree header
|
||||
OBJECT_INSTANCE_NAME = 0xB011 # Object instance name
|
||||
@ -149,7 +153,7 @@ def sane_name(name):
|
||||
i = 0
|
||||
|
||||
while new_name in name_unique:
|
||||
new_name = new_name_clean + ".%.3d" % i
|
||||
new_name = new_name_clean + '.%.3d' % i
|
||||
i += 1
|
||||
|
||||
# note, appending the 'str' version.
|
||||
@ -178,7 +182,7 @@ class _3ds_ushort(object):
|
||||
return SZ_SHORT
|
||||
|
||||
def write(self, file):
|
||||
file.write(struct.pack("<H", self.value))
|
||||
file.write(struct.pack('<H', self.value))
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
@ -195,7 +199,7 @@ class _3ds_uint(object):
|
||||
return SZ_INT
|
||||
|
||||
def write(self, file):
|
||||
file.write(struct.pack("<I", self.value))
|
||||
file.write(struct.pack('<I', self.value))
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
@ -212,7 +216,7 @@ class _3ds_float(object):
|
||||
return SZ_FLOAT
|
||||
|
||||
def write(self, file):
|
||||
file.write(struct.pack("<f", self.value))
|
||||
file.write(struct.pack('<f', self.value))
|
||||
|
||||
def __str__(self):
|
||||
return str(self.value)
|
||||
@ -230,7 +234,7 @@ class _3ds_string(object):
|
||||
return (len(self.value) + 1)
|
||||
|
||||
def write(self, file):
|
||||
binary_format = "<%ds" % (len(self.value) + 1)
|
||||
binary_format = '<%ds' % (len(self.value) + 1)
|
||||
file.write(struct.pack(binary_format, self.value))
|
||||
|
||||
def __str__(self):
|
||||
@ -258,19 +262,19 @@ class _3ds_point_3d(object):
|
||||
'''
|
||||
class _3ds_point_4d(object):
|
||||
"""Class representing a four-dimensional point for a 3ds file, for instance a quaternion."""
|
||||
__slots__ = "x","y","z","w"
|
||||
__slots__ = "w","x","y","z"
|
||||
def __init__(self, point=(0.0,0.0,0.0,0.0)):
|
||||
self.x, self.y, self.z, self.w = point
|
||||
self.w, self.x, self.y, self.z = point
|
||||
|
||||
def get_size(self):
|
||||
return 4*SZ_FLOAT
|
||||
|
||||
def write(self,file):
|
||||
data=struct.pack('<4f', self.x, self.y, self.z, self.w)
|
||||
data=struct.pack('<4f', self.w, self.x, self.y, self.z)
|
||||
file.write(data)
|
||||
|
||||
def __str__(self):
|
||||
return '(%f, %f, %f, %f)' % (self.x, self.y, self.z, self.w)
|
||||
return '(%f, %f, %f, %f)' % (self.w, self.x, self.y, self.z)
|
||||
'''
|
||||
|
||||
|
||||
@ -342,15 +346,14 @@ class _3ds_face(object):
|
||||
|
||||
def write(self, file):
|
||||
# The last short is used for face flags
|
||||
file.write(struct.pack("<4H", self.vindex[0], self.vindex[1], self.vindex[2], self.flag))
|
||||
file.write(struct.pack('<4H', self.vindex[0], self.vindex[1], self.vindex[2], self.flag))
|
||||
|
||||
def __str__(self):
|
||||
return "[%d %d %d %d]" % (self.vindex[0], self.vindex[1], self.vindex[2], self.flag)
|
||||
return '[%d %d %d %d]' % (self.vindex[0], self.vindex[1], self.vindex[2], self.flag)
|
||||
|
||||
|
||||
class _3ds_array(object):
|
||||
"""Class representing an array of variables for a 3ds file.
|
||||
|
||||
Consists of a _3ds_ushort to indicate the number of items, followed by the items themselves.
|
||||
"""
|
||||
__slots__ = "values", "size"
|
||||
@ -411,7 +414,6 @@ class _3ds_named_variable(object):
|
||||
# the chunk class
|
||||
class _3ds_chunk(object):
|
||||
"""Class representing a chunk in a 3ds file.
|
||||
|
||||
Chunks contain zero or more variables, followed by zero or more subchunks.
|
||||
"""
|
||||
__slots__ = "ID", "size", "variables", "subchunks"
|
||||
@ -424,8 +426,8 @@ class _3ds_chunk(object):
|
||||
|
||||
def add_variable(self, name, var):
|
||||
"""Add a named variable.
|
||||
|
||||
The name is mostly for debugging purposes."""
|
||||
|
||||
self.variables.append(_3ds_named_variable(name, var))
|
||||
|
||||
def add_subchunk(self, chunk):
|
||||
@ -434,8 +436,8 @@ class _3ds_chunk(object):
|
||||
|
||||
def get_size(self):
|
||||
"""Calculate the size of the chunk and return it.
|
||||
|
||||
The sizes of the variables and subchunks are used to determine this chunk\'s size."""
|
||||
|
||||
tmpsize = self.ID.get_size() + self.size.get_size()
|
||||
for variable in self.variables:
|
||||
tmpsize += variable.get_size()
|
||||
@ -459,8 +461,8 @@ class _3ds_chunk(object):
|
||||
|
||||
def write(self, file):
|
||||
"""Write the chunk to a file.
|
||||
|
||||
Uses the write function of the variables and the subchunks to do the actual work."""
|
||||
|
||||
# write header
|
||||
self.ID.write(file)
|
||||
self.size.write(file)
|
||||
@ -471,12 +473,11 @@ class _3ds_chunk(object):
|
||||
|
||||
def dump(self, indent=0):
|
||||
"""Write the chunk to a file.
|
||||
|
||||
Dump is used for debugging purposes, to dump the contents of a chunk to the standard output.
|
||||
Uses the dump function of the named variables and the subchunks to do the actual work."""
|
||||
print(indent * " ",
|
||||
"ID=%r" % hex(self.ID.value),
|
||||
"size=%r" % self.get_size())
|
||||
'ID=%r' % hex(self.ID.value),
|
||||
'size=%r' % self.get_size())
|
||||
for variable in self.variables:
|
||||
variable.dump(indent + 1)
|
||||
for subchunk in self.subchunks:
|
||||
@ -501,17 +502,16 @@ def get_material_image(material):
|
||||
def get_uv_image(ma):
|
||||
""" Get image from material wrapper."""
|
||||
if ma and ma.use_nodes:
|
||||
ma_wrap = node_shader_utils.PrincipledBSDFWrapper(ma)
|
||||
ma_tex = ma_wrap.base_color_texture
|
||||
if ma_tex and ma_tex.image is not None:
|
||||
return ma_tex.image
|
||||
mat_wrap = node_shader_utils.PrincipledBSDFWrapper(ma)
|
||||
mat_tex = mat_wrap.base_color_texture
|
||||
if mat_tex and mat_tex.image is not None:
|
||||
return mat_tex.image
|
||||
else:
|
||||
return get_material_image(ma)
|
||||
|
||||
|
||||
def make_material_subchunk(chunk_id, color):
|
||||
"""Make a material subchunk.
|
||||
|
||||
Used for color subchunks, such as diffuse color or ambient color subchunks."""
|
||||
mat_sub = _3ds_chunk(chunk_id)
|
||||
col1 = _3ds_chunk(RGB1)
|
||||
@ -536,20 +536,20 @@ def make_percent_subchunk(chunk_id, percent):
|
||||
def make_texture_chunk(chunk_id, images):
|
||||
"""Make Material Map texture chunk."""
|
||||
# Add texture percentage value (100 = 1.0)
|
||||
ma_sub = make_percent_subchunk(chunk_id, 1)
|
||||
mat_sub = make_percent_subchunk(chunk_id, 1)
|
||||
has_entry = False
|
||||
|
||||
def add_image(img):
|
||||
filename = bpy.path.basename(image.filepath)
|
||||
ma_sub_file = _3ds_chunk(MATMAPFILE)
|
||||
ma_sub_file.add_variable("image", _3ds_string(sane_name(filename)))
|
||||
ma_sub.add_subchunk(ma_sub_file)
|
||||
mat_sub_file = _3ds_chunk(MAT_MAP_FILE)
|
||||
mat_sub_file.add_variable("image", _3ds_string(sane_name(filename)))
|
||||
mat_sub.add_subchunk(mat_sub_file)
|
||||
|
||||
for image in images:
|
||||
add_image(image)
|
||||
has_entry = True
|
||||
|
||||
return ma_sub if has_entry else None
|
||||
return mat_sub if has_entry else None
|
||||
|
||||
|
||||
def make_material_texture_chunk(chunk_id, texslots, pct):
|
||||
@ -565,35 +565,29 @@ def make_material_texture_chunk(chunk_id, texslots, pct):
|
||||
image = texslot.image
|
||||
|
||||
filename = bpy.path.basename(image.filepath)
|
||||
mat_sub_file = _3ds_chunk(MATMAPFILE)
|
||||
mat_sub_file = _3ds_chunk(MAT_MAP_FILE)
|
||||
mat_sub_file.add_variable("mapfile", _3ds_string(sane_name(filename)))
|
||||
mat_sub.add_subchunk(mat_sub_file)
|
||||
for link in texslot.socket_dst.links:
|
||||
socket = link.from_socket.identifier
|
||||
|
||||
maptile = 0
|
||||
mat_sub_mapflags = _3ds_chunk(MAT_MAP_TILING)
|
||||
mapflags = 0
|
||||
|
||||
# no perfect mapping for mirror modes - 3DS only has uniform mirror w. repeat=2
|
||||
if texslot.extension == 'EXTEND':
|
||||
maptile |= 0x1
|
||||
if texslot.extension == 'EXTEND': # decal flag
|
||||
mapflags |= 0x1
|
||||
# CLIP maps to 3DS' decal flag
|
||||
elif texslot.extension == 'CLIP':
|
||||
maptile |= 0x10
|
||||
|
||||
mat_sub_tile = _3ds_chunk(MAT_MAP_TILING)
|
||||
mat_sub_tile.add_variable("tiling", _3ds_ushort(maptile))
|
||||
mat_sub.add_subchunk(mat_sub_tile)
|
||||
if texslot.extension == 'CLIP': # no wrap
|
||||
mapflags |= 0x10
|
||||
|
||||
if socket == 'Alpha':
|
||||
mat_sub_alpha = _3ds_chunk(MAP_TILING)
|
||||
alphaflag |= 0x40 # summed area sampling 0x20
|
||||
mat_sub_alpha.add_variable("alpha", _3ds_ushort(alphaflag))
|
||||
mat_sub.add_subchunk(mat_sub_alpha)
|
||||
mapflags |= 0x40 # summed area sampling 0x20
|
||||
if texslot.socket_dst.identifier in {'Base Color', 'Specular'}:
|
||||
mat_sub_tint = _3ds_chunk(MAP_TILING) # RGB tint 0x200
|
||||
tint |= 0x80 if texslot.image.colorspace_settings.name == 'Non-Color' else 0x200
|
||||
mat_sub_tint.add_variable("tint", _3ds_ushort(tint))
|
||||
mat_sub.add_subchunk(mat_sub_tint)
|
||||
mapflags |= 0x80 if image.colorspace_settings.name=='Non-Color' else 0x200 # RGB tint
|
||||
|
||||
mat_sub_mapflags.add_variable("mapflags", _3ds_ushort(mapflags))
|
||||
mat_sub.add_subchunk(mat_sub_mapflags)
|
||||
|
||||
mat_sub_texblur = _3ds_chunk(MAT_MAP_TEXBLUR) # Based on observation this is usually 1.0
|
||||
mat_sub_texblur.add_variable("maptexblur", _3ds_float(1.0))
|
||||
@ -1089,7 +1083,6 @@ def make_kfdata(start=0, stop=0, curtime=0):
|
||||
|
||||
def make_track_chunk(ID, obj):
|
||||
"""Make a chunk for track data.
|
||||
|
||||
Depending on the ID, this will construct a position, rotation or scale track."""
|
||||
track_chunk = _3ds_chunk(ID)
|
||||
track_chunk.add_variable("track_flags", _3ds_ushort())
|
||||
@ -1127,13 +1120,12 @@ def make_track_chunk(ID, obj):
|
||||
|
||||
def make_kf_obj_node(obj, name_to_id):
|
||||
"""Make a node chunk for a Blender object.
|
||||
|
||||
Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id.
|
||||
Blender Empty objects are converted to dummy nodes."""
|
||||
|
||||
name = obj.name
|
||||
# main object node chunk:
|
||||
kf_obj_node = _3ds_chunk(KFDATA_OBJECT_NODE_TAG)
|
||||
kf_obj_node = _3ds_chunk(OBJECT_NODE_TAG)
|
||||
# chunk for the object id:
|
||||
obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID)
|
||||
# object id is from the name_to_id dictionary:
|
||||
|
@ -7,6 +7,7 @@ import struct
|
||||
import bpy
|
||||
import math
|
||||
import mathutils
|
||||
from bpy_extras.image_utils import load_image
|
||||
from bpy_extras.node_shader_utils import PrincipledBSDFWrapper
|
||||
|
||||
BOUNDS_3DS = []
|
||||
@ -17,29 +18,29 @@ BOUNDS_3DS = []
|
||||
###################
|
||||
|
||||
# Some of the chunks that we will see
|
||||
# ----- Primary Chunk, at the beginning of each file
|
||||
# >----- Primary Chunk, at the beginning of each file
|
||||
PRIMARY = 0x4D4D
|
||||
|
||||
# ------ Main Chunks
|
||||
# >----- Main Chunks
|
||||
OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information
|
||||
VERSION = 0x0002 # This gives the version of the .3ds file
|
||||
AMBIENTLIGHT = 0x2100 # The color of the ambient light
|
||||
EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info
|
||||
|
||||
# ------ Data Chunks, used for various attributes
|
||||
# >----- Data Chunks, used for various attributes
|
||||
COLOR_F = 0x0010 # color defined as 3 floats
|
||||
COLOR_24 = 0x0011 # color defined as 3 bytes
|
||||
LIN_COLOR_24 = 0x0012 # linear byte color
|
||||
LIN_COLOR_F = 0x0013 # linear float color
|
||||
PCT_SHORT = 0x30 # percentage short
|
||||
PCT_FLOAT = 0x31 # percentage float
|
||||
MASTERSCALE = 0x0100 # Master scale factor
|
||||
|
||||
# ------ sub defines of OBJECTINFO
|
||||
# >----- sub defines of OBJECTINFO
|
||||
MATERIAL = 0xAFFF # This stored the texture info
|
||||
OBJECT = 0x4000 # This stores the faces, vertices, etc...
|
||||
|
||||
# >------ sub defines of MATERIAL
|
||||
# ------ sub defines of MATERIAL_BLOCK
|
||||
MAT_NAME = 0xA000 # This holds the material name
|
||||
MAT_AMBIENT = 0xA010 # Ambient color of the object/material
|
||||
MAT_DIFFUSE = 0xA020 # This holds the color of the object/material
|
||||
@ -48,10 +49,23 @@ MAT_SHINESS = 0xA040 # Roughness of the object/material (percent)
|
||||
MAT_SHIN2 = 0xA041 # Shininess of the object/material (percent)
|
||||
MAT_SHIN3 = 0xA042 # Reflection of the object/material (percent)
|
||||
MAT_TRANSPARENCY = 0xA050 # Transparency value of material (percent)
|
||||
MAT_SELF_ILLUM = 0xA080 # Self Illumination value of material
|
||||
MAT_XPFALL = 0xA052 # Transparency falloff value
|
||||
MAT_REFBLUR = 0xA053 # Reflection blurring value
|
||||
MAT_SELF_ILLUM = 0xA080 # # Material self illumination flag
|
||||
MAT_TWO_SIDE = 0xA081 # Material is two sided flag
|
||||
MAT_DECAL = 0xA082 # Material mapping is decaled flag
|
||||
MAT_ADDITIVE = 0xA083 # Material has additive transparency flag
|
||||
MAT_SELF_ILPCT = 0xA084 # Self illumination strength (percent)
|
||||
MAT_WIRE = 0xA085 # Only render's wireframe
|
||||
MAT_WIRE = 0xA085 # Material wireframe rendered flag
|
||||
MAT_FACEMAP = 0xA088 # Face mapped textures flag
|
||||
MAT_PHONGSOFT = 0xA08C # Phong soften material flag
|
||||
MAT_WIREABS = 0xA08E # Wire size in units flag
|
||||
MAT_WIRESIZE = 0xA087 # Rendered wire size in pixels
|
||||
MAT_SHADING = 0xA100 # Material shading method
|
||||
MAT_USE_XPFALL = 0xA240 # Transparency falloff flag
|
||||
MAT_USE_REFBLUR = 0xA250 # Reflection blurring flag
|
||||
|
||||
# >------ sub defines of MATERIAL_MAP
|
||||
MAT_TEXTURE_MAP = 0xA200 # This is a header for a new texture map
|
||||
MAT_SPECULAR_MAP = 0xA204 # This is a header for a new specular map
|
||||
MAT_OPACITY_MAP = 0xA210 # This is a header for a new opacity map
|
||||
@ -79,7 +93,7 @@ OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
|
||||
OBJECT_LIGHT = 0x4600 # This lets us know we are reading a light object
|
||||
OBJECT_CAMERA = 0x4700 # This lets us know we are reading a camera object
|
||||
|
||||
#>------ Sub defines of LIGHT
|
||||
# >------ Sub defines of LIGHT
|
||||
LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight
|
||||
LIGHT_OFF = 0x4620 # The light is off
|
||||
LIGHT_ATTENUATE = 0x4625 # Light attenuate flag
|
||||
@ -143,6 +157,7 @@ HOTSPOT_TRACK_TAG = 0xB027 # Keyframe spotlight hotspot track
|
||||
FALLOFF_TRACK_TAG = 0xB028 # Keyframe spotlight falloff track
|
||||
HIDE_TRACK_TAG = 0xB029 # Keyframe object hide track
|
||||
OBJECT_NODE_ID = 0xB030 # Keyframe object node id
|
||||
PARENT_NAME = 0x80F0 # Object parent name tree (dot seperated)
|
||||
|
||||
ROOT_OBJECT = 0xFFFF
|
||||
|
||||
@ -160,7 +175,7 @@ class Chunk:
|
||||
"bytes_read",
|
||||
)
|
||||
# we don't read in the bytes_read, we compute that
|
||||
binary_format = "<HI"
|
||||
binary_format = '<HI'
|
||||
|
||||
def __init__(self):
|
||||
self.ID = 0
|
||||
@ -214,7 +229,7 @@ def process_next_object_chunk(file, previous_chunk):
|
||||
|
||||
def skip_to_end(file, skip_chunk):
|
||||
buffer_size = skip_chunk.length - skip_chunk.bytes_read
|
||||
binary_format = "%ic" % buffer_size
|
||||
binary_format = '%ic' % buffer_size
|
||||
file.read(struct.calcsize(binary_format))
|
||||
skip_chunk.bytes_read += buffer_size
|
||||
|
||||
@ -304,13 +319,13 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
||||
bpy.data.images.remove(imgs)
|
||||
else:
|
||||
links.new(img_wrap.node_image.outputs['Alpha'], img_wrap.socket_dst)
|
||||
contextWrapper.material.blend_method = 'HASHED'
|
||||
|
||||
shader.location = (300, 300)
|
||||
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
|
||||
|
||||
|
||||
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME):
|
||||
from bpy_extras.image_utils import load_image
|
||||
def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE):
|
||||
|
||||
contextObName = None
|
||||
contextLamp = None
|
||||
@ -342,6 +357,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
object_list = [] # for hierarchy
|
||||
object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent
|
||||
pivot_list = [] # pivots with hierarchy handling
|
||||
track_flags = [] # keyframe track flags
|
||||
trackposition = {} # keep track to position for target calculation
|
||||
|
||||
def putContextMesh(
|
||||
context,
|
||||
@ -448,7 +465,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
|
||||
CreateBlenderObject = False
|
||||
CreateLightObject = False
|
||||
CreateCameraObject = False
|
||||
CreateTrackData = False
|
||||
|
||||
def read_float_color(temp_chunk):
|
||||
@ -547,9 +563,47 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1),
|
||||
(uoffset, voffset, 0), angle, tintcolor, mapto)
|
||||
|
||||
def apply_constrain(vec):
|
||||
consize = mathutils.Vector(vec) * (CONSTRAIN * 0.1) if CONSTRAIN != 0.0 else mathutils.Vector(vec)
|
||||
return consize
|
||||
|
||||
def calc_target(location, target):
|
||||
pan = 0.0
|
||||
tilt = 0.0
|
||||
pos = location + target # Target triangulation
|
||||
if abs(location[0] - target[0]) > abs(location[1] - target[1]):
|
||||
foc = math.copysign(math.sqrt(pow(pos[0],2)+pow(pos[1],2)),pos[0])
|
||||
dia = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[0])
|
||||
pitch = math.radians(90)-math.copysign(math.acos(foc/dia), pos[2])
|
||||
if location[0] > target[0]:
|
||||
tilt = math.copysign(pitch, pos[0])
|
||||
pan = math.radians(90)+math.atan(pos[1]/foc)
|
||||
else:
|
||||
tilt = -1*(math.copysign(pitch, pos[0]))
|
||||
pan = -1*(math.radians(90)-math.atan(pos[1]/foc))
|
||||
elif abs(location[1] - target[1]) > abs(location[0] - target[0]):
|
||||
foc = math.copysign(math.sqrt(pow(pos[1],2)+pow(pos[0],2)),pos[1])
|
||||
dia = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[1])
|
||||
pitch = math.radians(90)-math.copysign(math.acos(foc/dia), pos[2])
|
||||
if location[1] > target[1]:
|
||||
tilt = math.copysign(pitch, pos[1])
|
||||
pan = math.radians(90)+math.acos(pos[0]/foc)
|
||||
else:
|
||||
tilt = -1*(math.copysign(pitch, pos[1]))
|
||||
pan = -1*(math.radians(90)-math.acos(pos[0]/foc))
|
||||
direction = tilt, pan
|
||||
return direction
|
||||
|
||||
def read_track_data(temp_chunk):
|
||||
new_chunk.bytes_read += SZ_U_SHORT * 5
|
||||
temp_data = file.read(SZ_U_SHORT * 5)
|
||||
"""Trackflags 0x1, 0x2 and 0x3 are for looping. 0x8, 0x10 and 0x20
|
||||
locks the XYZ axes. 0x100, 0x200 and 0x400 unlinks the XYZ axes"""
|
||||
new_chunk.bytes_read += SZ_U_SHORT
|
||||
temp_data = file.read(SZ_U_SHORT)
|
||||
tflags = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += SZ_U_SHORT
|
||||
track_flags.append(tflags)
|
||||
temp_data = file.read(SZ_U_INT * 2)
|
||||
new_chunk.bytes_read += SZ_U_INT * 2
|
||||
temp_data = file.read(SZ_U_INT)
|
||||
nkeys = struct.unpack('<I', temp_data)[0]
|
||||
new_chunk.bytes_read += SZ_U_INT
|
||||
@ -562,8 +616,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
temp_data = file.read(SZ_U_SHORT)
|
||||
nflags = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += SZ_U_SHORT
|
||||
if nflags > 0: # Check for spline terms
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
for f in range(bin(nflags).count('1')):
|
||||
temp_data = file.read(SZ_FLOAT) # Check for spline terms
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
temp_data = file.read(SZ_3FLOAT)
|
||||
data = struct.unpack('<3f', temp_data)
|
||||
@ -586,8 +640,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
temp_data = file.read(SZ_U_SHORT)
|
||||
nflags = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += SZ_U_SHORT
|
||||
if nflags > 0: # Check for spline terms
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
for f in range(bin(nflags).count('1')):
|
||||
temp_data = file.read(SZ_FLOAT) # Check for spline terms
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
angle = struct.unpack('<f', temp_data)[0]
|
||||
@ -609,7 +663,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
new_chunk.bytes_read += 4 # read the 4 bytes for the version number
|
||||
# this loader works with version 3 and below, but may not with 4 and above
|
||||
if version > 3:
|
||||
print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version)
|
||||
print("\tNon-Fatal Error: Version greater than 3, may not load correctly: ", version)
|
||||
|
||||
# is it an ambient light chunk?
|
||||
elif new_chunk.ID == AMBIENTLIGHT:
|
||||
@ -629,7 +683,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
|
||||
# is it an object info chunk?
|
||||
elif new_chunk.ID == OBJECTINFO:
|
||||
process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
|
||||
process_next_chunk(context, file, new_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE)
|
||||
|
||||
# keep track of how much we read in the main chunk
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
@ -711,7 +765,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif temp_chunk.ID == PCT_FLOAT:
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
temp_chunk.bytes_read += SZ_FLOAT
|
||||
contextMaterial.roughness = 1 - float(struct.unpack('f', temp_data)[0])
|
||||
contextMaterial.roughness = 1 - float(struct.unpack('<f', temp_data)[0])
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif new_chunk.ID == MAT_SHIN2:
|
||||
@ -723,7 +777,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif temp_chunk.ID == PCT_FLOAT:
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
temp_chunk.bytes_read += SZ_FLOAT
|
||||
contextMaterial.specular_intensity = float(struct.unpack('f', temp_data)[0])
|
||||
contextMaterial.specular_intensity = float(struct.unpack('<f', temp_data)[0])
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif new_chunk.ID == MAT_SHIN3:
|
||||
@ -735,7 +789,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif temp_chunk.ID == PCT_FLOAT:
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
temp_chunk.bytes_read += SZ_FLOAT
|
||||
contextMaterial.metallic = float(struct.unpack('f', temp_data)[0])
|
||||
contextMaterial.metallic = float(struct.unpack('<f', temp_data)[0])
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif new_chunk.ID == MAT_TRANSPARENCY:
|
||||
@ -747,7 +801,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif temp_chunk.ID == PCT_FLOAT:
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
temp_chunk.bytes_read += SZ_FLOAT
|
||||
contextMaterial.diffuse_color[3] = 1 - float(struct.unpack('f', temp_data)[0])
|
||||
contextMaterial.diffuse_color[3] = 1 - float(struct.unpack('<f', temp_data)[0])
|
||||
else:
|
||||
skip_to_end(file, temp_chunk)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
@ -757,11 +811,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
if temp_chunk.ID == PCT_SHORT:
|
||||
temp_data = file.read(SZ_U_SHORT)
|
||||
temp_chunk.bytes_read += SZ_U_SHORT
|
||||
contextMaterial.line_priority = int(struct.unpack('H', temp_data)[0])
|
||||
contextMaterial.line_priority = int(struct.unpack('<H', temp_data)[0])
|
||||
elif temp_chunk.ID == PCT_FLOAT:
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
temp_chunk.bytes_read += SZ_FLOAT
|
||||
contextMaterial.line_priority = (float(struct.unpack('f', temp_data)[0]) * 100)
|
||||
contextMaterial.line_priority = (float(struct.unpack('<f', temp_data)[0]) * 100)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif new_chunk.ID == MAT_SHADING:
|
||||
@ -780,20 +834,19 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
contextWrapper.use_nodes = True
|
||||
|
||||
elif new_chunk.ID == MAT_TEXTURE_MAP:
|
||||
read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")
|
||||
read_texture(new_chunk, temp_chunk, "Diffuse", 'COLOR')
|
||||
|
||||
elif new_chunk.ID == MAT_SPECULAR_MAP:
|
||||
read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")
|
||||
read_texture(new_chunk, temp_chunk, "Specular", 'SPECULARITY')
|
||||
|
||||
elif new_chunk.ID == MAT_OPACITY_MAP:
|
||||
contextMaterial.blend_method = 'BLEND'
|
||||
read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")
|
||||
read_texture(new_chunk, temp_chunk, "Opacity", 'ALPHA')
|
||||
|
||||
elif new_chunk.ID == MAT_REFLECTION_MAP:
|
||||
read_texture(new_chunk, temp_chunk, "Reflect", "METALLIC")
|
||||
read_texture(new_chunk, temp_chunk, "Reflect", 'METALLIC')
|
||||
|
||||
elif new_chunk.ID == MAT_BUMP_MAP:
|
||||
read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")
|
||||
read_texture(new_chunk, temp_chunk, "Bump", 'NORMAL')
|
||||
|
||||
elif new_chunk.ID == MAT_BUMP_PERCENT:
|
||||
read_chunk(file, temp_chunk)
|
||||
@ -804,19 +857,19 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif temp_chunk.ID == PCT_FLOAT:
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
temp_chunk.bytes_read += SZ_FLOAT
|
||||
contextWrapper.normalmap_strength = float(struct.unpack('f', temp_data)[0])
|
||||
contextWrapper.normalmap_strength = float(struct.unpack('<f', temp_data)[0])
|
||||
else:
|
||||
skip_to_end(file, temp_chunk)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
|
||||
elif new_chunk.ID == MAT_SHIN_MAP:
|
||||
read_texture(new_chunk, temp_chunk, "Shininess", "ROUGHNESS")
|
||||
read_texture(new_chunk, temp_chunk, "Shininess", 'ROUGHNESS')
|
||||
|
||||
elif new_chunk.ID == MAT_SELFI_MAP:
|
||||
read_texture(new_chunk, temp_chunk, "Emit", "EMISSION")
|
||||
read_texture(new_chunk, temp_chunk, "Emit", 'EMISSION')
|
||||
|
||||
elif new_chunk.ID == MAT_TEX2_MAP:
|
||||
read_texture(new_chunk, temp_chunk, "Tex", "TEXTURE")
|
||||
read_texture(new_chunk, temp_chunk, "Tex", 'TEXTURE')
|
||||
|
||||
# If mesh chunk
|
||||
elif new_chunk.ID == OBJECT_MESH:
|
||||
@ -848,7 +901,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
new_chunk.bytes_read += SZ_U_SHORT
|
||||
temp_data = file.read(SZ_U_SHORT * num_faces_using_mat)
|
||||
new_chunk.bytes_read += SZ_U_SHORT * num_faces_using_mat
|
||||
temp_data = struct.unpack("<%dH" % (num_faces_using_mat), temp_data)
|
||||
temp_data = struct.unpack('<%dH' % (num_faces_using_mat), temp_data)
|
||||
contextMeshMaterials.append((material_name, temp_data))
|
||||
# look up the material in all the materials
|
||||
|
||||
@ -874,6 +927,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
contextMatrix = mathutils.Matrix(
|
||||
(data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1])).transposed()
|
||||
|
||||
# If light chunk
|
||||
elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support
|
||||
newLamp = bpy.data.lights.new("Lamp", 'POINT')
|
||||
contextLamp = bpy.data.objects.new(contextObName, newLamp)
|
||||
@ -892,31 +946,29 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
new_chunk.bytes_read += SZ_3FLOAT
|
||||
elif CreateLightObject and new_chunk.ID == LIGHT_MULTIPLIER: # Intensity
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
contextLamp.data.energy = (float(struct.unpack('f', temp_data)[0]) * 1000)
|
||||
contextLamp.data.energy = (float(struct.unpack('<f', temp_data)[0]) * 1000)
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
|
||||
# If spotlight chunk
|
||||
elif CreateLightObject and new_chunk.ID == LIGHT_SPOTLIGHT: # Spotlight
|
||||
temp_data = file.read(SZ_3FLOAT)
|
||||
contextLamp.data.type = 'SPOT'
|
||||
spot = mathutils.Vector(struct.unpack('<3f', temp_data))
|
||||
aim = contextLamp.location + spot
|
||||
hypo = math.copysign(math.sqrt(pow(aim[1], 2) + pow(aim[0], 2)), aim[1])
|
||||
track = math.copysign(math.sqrt(pow(hypo, 2) + pow(spot[2], 2)), aim[1])
|
||||
angle = math.radians(90) - math.copysign(math.acos(hypo / track), aim[2])
|
||||
contextLamp.rotation_euler[0] = -1 * math.copysign(angle, aim[1])
|
||||
contextLamp.rotation_euler[2] = -1 * (math.radians(90) - math.acos(aim[0] / hypo))
|
||||
aim = calc_target(contextLamp.location, spot) # Target
|
||||
contextLamp.rotation_euler[0] = aim[0]
|
||||
contextLamp.rotation_euler[2] = aim[1]
|
||||
new_chunk.bytes_read += SZ_3FLOAT
|
||||
temp_data = file.read(SZ_FLOAT) # Hotspot
|
||||
hotspot = float(struct.unpack('f', temp_data)[0])
|
||||
hotspot = float(struct.unpack('<f', temp_data)[0])
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
temp_data = file.read(SZ_FLOAT) # Beam angle
|
||||
beam_angle = float(struct.unpack('f', temp_data)[0])
|
||||
beam_angle = float(struct.unpack('<f', temp_data)[0])
|
||||
contextLamp.data.spot_size = math.radians(beam_angle)
|
||||
contextLamp.data.spot_blend = 1.0 - (hotspot / beam_angle)
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_ROLL: # Roll
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
contextLamp.rotation_euler[1] = float(struct.unpack('f', temp_data)[0])
|
||||
contextLamp.rotation_euler[1] = float(struct.unpack('<f', temp_data)[0])
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_SHADOWED: # Shadow
|
||||
contextLamp.data.use_shadow = True
|
||||
@ -925,7 +977,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_RECTANGLE: # Square
|
||||
contextLamp.data.use_square = True
|
||||
|
||||
elif contextObName and new_chunk.ID == OBJECT_CAMERA and CreateCameraObject is False: # Basic camera support
|
||||
# If camera chunk
|
||||
elif contextObName and new_chunk.ID == OBJECT_CAMERA: # Basic camera support
|
||||
camera = bpy.data.cameras.new("Camera")
|
||||
contextCamera = bpy.data.objects.new(contextObName, camera)
|
||||
context.view_layer.active_layer_collection.collection.objects.link(contextCamera)
|
||||
@ -935,30 +988,23 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
contextCamera.location = struct.unpack('<3f', temp_data)
|
||||
new_chunk.bytes_read += SZ_3FLOAT
|
||||
temp_data = file.read(SZ_3FLOAT)
|
||||
target = mathutils.Vector(struct.unpack('<3f', temp_data))
|
||||
cam = contextCamera.location + target
|
||||
focus = math.copysign(math.sqrt(pow(cam[1], 2) + pow(cam[0], 2)), cam[1])
|
||||
focus = mathutils.Vector(struct.unpack('<3f', temp_data))
|
||||
direction = calc_target(contextCamera.location, focus) # Target
|
||||
new_chunk.bytes_read += SZ_3FLOAT
|
||||
temp_data = file.read(SZ_FLOAT) # triangulating camera angles
|
||||
direction = math.copysign(math.sqrt(pow(focus, 2) + pow(target[2], 2)), cam[1])
|
||||
pitch = math.radians(90)-math.copysign(math.acos(focus/direction), cam[2])
|
||||
if contextCamera.location[1] > target[1]:
|
||||
contextCamera.rotation_euler[0] = math.copysign(pitch, cam[1])
|
||||
contextCamera.rotation_euler[2] = math.radians(180)-math.copysign(math.atan(cam[0]/focus), cam[0])
|
||||
else:
|
||||
contextCamera.rotation_euler[0] = -1*(math.copysign(pitch, cam[1]))
|
||||
contextCamera.rotation_euler[2] = -1*(math.radians(90)-math.acos(cam[0]/focus))
|
||||
contextCamera.rotation_euler[1] = float(struct.unpack('f', temp_data)[0]) # Roll
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
contextCamera.rotation_euler[0] = direction[0]
|
||||
contextCamera.rotation_euler[1] = float(struct.unpack('<f', temp_data)[0]) # Roll
|
||||
contextCamera.rotation_euler[2] = direction[1]
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
contextCamera.data.lens = float(struct.unpack('f', temp_data)[0]) # Focus
|
||||
contextCamera.data.lens = float(struct.unpack('<f', temp_data)[0]) # Focus
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
contextMatrix = None # Reset matrix
|
||||
CreateBlenderObject = False
|
||||
CreateCameraObject = True
|
||||
|
||||
# start keyframe section
|
||||
elif new_chunk.ID == EDITKEYFRAME:
|
||||
trackposition = {}
|
||||
pass
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == KFDATA_KFSEG:
|
||||
temp_data = file.read(SZ_U_INT)
|
||||
@ -1038,7 +1084,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
smooth_angle = struct.unpack('<f', temp_data)[0]
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
child.data.auto_smooth_angle = math.radians(smooth_angle)
|
||||
child.data.auto_smooth_angle = smooth_angle
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and colortrack == 'AMBIENT': # Ambient
|
||||
keyframe_data = {}
|
||||
@ -1047,6 +1093,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
for keydata in keyframe_data.items():
|
||||
child.node_tree.nodes['Background'].inputs[0].default_value[:3] = keydata[1]
|
||||
child.node_tree.keyframe_insert(data_path="nodes[\"Background\"].inputs[0].default_value", frame=keydata[0])
|
||||
track_flags.clear()
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and colortrack == 'LIGHT': # Color
|
||||
keyframe_data = {}
|
||||
@ -1055,6 +1102,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
for keydata in keyframe_data.items():
|
||||
child.data.color = keydata[1]
|
||||
child.data.keyframe_insert(data_path="color", frame=keydata[0])
|
||||
track_flags.clear()
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'OBJECT': # Translation
|
||||
keyframe_data = {}
|
||||
@ -1063,47 +1111,66 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
if child.type in {'LIGHT', 'CAMERA'}:
|
||||
trackposition[0] = child.location
|
||||
CreateTrackData = True
|
||||
if track_flags[0] & 0x8: # Flag 0x8 locks X axis
|
||||
child.lock_location[0] = True
|
||||
if track_flags[0] & 0x10: # Flag 0x10 locks Y axis
|
||||
child.lock_location[1] = True
|
||||
if track_flags[0] & 0x20: # Flag 0x20 locks Z axis
|
||||
child.lock_location[2] = True
|
||||
for keydata in keyframe_data.items():
|
||||
trackposition[keydata[0]] = keydata[1] # Keep track to position for target calculation
|
||||
child.location = mathutils.Vector(keydata[1]) * (CONSTRAIN * 0.1) if hierarchy == ROOT_OBJECT and CONSTRAIN != 0.0 else keydata[1]
|
||||
child.keyframe_insert(data_path="location", frame=keydata[0])
|
||||
child.location = apply_constrain(keydata[1]) if hierarchy == ROOT_OBJECT else mathutils.Vector(keydata[1])
|
||||
if hierarchy == ROOT_OBJECT:
|
||||
child.location.rotate(CONVERSE)
|
||||
if not track_flags[0] & 0x100: # Flag 0x100 unlinks X axis
|
||||
child.keyframe_insert(data_path="location", index=0, frame=keydata[0])
|
||||
if not track_flags[0] & 0x200: # Flag 0x200 unlinks Y axis
|
||||
child.keyframe_insert(data_path="location", index=1, frame=keydata[0])
|
||||
if not track_flags[0] & 0x400: # Flag 0x400 unlinks Z axis
|
||||
child.keyframe_insert(data_path="location", index=2, frame=keydata[0])
|
||||
track_flags.clear()
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'TARGET': # Target position
|
||||
keyframe_data = {}
|
||||
target = read_track_data(temp_chunk)[0]
|
||||
pos = child.location + mathutils.Vector(target) # Target triangulation
|
||||
foc = math.copysign(math.sqrt(pow(pos[1],2)+pow(pos[0],2)),pos[1])
|
||||
hyp = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[1])
|
||||
tilt = math.radians(90)-math.copysign(math.acos(foc/hyp), pos[2])
|
||||
if child.location[0] > target[1]:
|
||||
child.rotation_euler[0] = math.copysign(tilt, pos[1])
|
||||
child.rotation_euler[2] = math.radians(180)-math.copysign(math.atan(pos[0]/foc), pos[0])
|
||||
else:
|
||||
child.rotation_euler[0] = -1*(math.copysign(tilt, pos[1]))
|
||||
child.rotation_euler[2] = -1*(math.radians(90)-math.acos(pos[0]/foc))
|
||||
location = child.location
|
||||
target = mathutils.Vector(read_track_data(temp_chunk)[0])
|
||||
direction = calc_target(location, target)
|
||||
child.rotation_euler[0] = direction[0]
|
||||
child.rotation_euler[2] = direction[1]
|
||||
for keydata in keyframe_data.items():
|
||||
target = keydata[1]
|
||||
pos = mathutils.Vector(trackposition[keydata[0]]) + mathutils.Vector(target)
|
||||
foc = math.copysign(math.sqrt(pow(pos[1],2)+pow(pos[0],2)),pos[1])
|
||||
hyp = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[1])
|
||||
tilt = math.radians(90)-math.copysign(math.acos(foc/hyp), pos[2])
|
||||
if trackposition[keydata[0]][1] > target[1]:
|
||||
child.rotation_euler[0] = math.copysign(tilt, pos[1])
|
||||
child.rotation_euler[2] = math.radians(180)-math.copysign(math.atan(pos[0]/foc), pos[0])
|
||||
else:
|
||||
child.rotation_euler[0] = -1*(math.copysign(tilt, pos[1]))
|
||||
child.rotation_euler[2] = -1*(math.radians(90)-math.acos(pos[0]/foc))
|
||||
child.keyframe_insert(data_path="rotation_euler", frame=keydata[0])
|
||||
track = trackposition.get(keydata[0], child.location)
|
||||
locate = mathutils.Vector(track)
|
||||
target = mathutils.Vector(keydata[1])
|
||||
direction = calc_target(locate, target)
|
||||
rotate = mathutils.Euler((direction[0], 0.0, direction[1]), 'XYZ').to_matrix()
|
||||
scale = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1)) if CONSTRAIN != 0.0 else child.scale
|
||||
transformation = mathutils.Matrix.LocRotScale(locate, rotate, scale)
|
||||
child.matrix_world = transformation
|
||||
if hierarchy == ROOT_OBJECT:
|
||||
child.matrix_world = CONVERSE @ child.matrix_world
|
||||
child.keyframe_insert(data_path="rotation_euler", index=0, frame=keydata[0])
|
||||
child.keyframe_insert(data_path="rotation_euler", index=2, frame=keydata[0])
|
||||
track_flags.clear()
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracking == 'OBJECT': # Rotation
|
||||
keyframe_rotation = {}
|
||||
new_chunk.bytes_read += SZ_U_SHORT * 5
|
||||
temp_data = file.read(SZ_U_SHORT * 5)
|
||||
new_chunk.bytes_read += SZ_U_SHORT
|
||||
temp_data = file.read(SZ_U_SHORT)
|
||||
tflags = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += SZ_U_SHORT
|
||||
temp_data = file.read(SZ_U_INT * 2)
|
||||
new_chunk.bytes_read += SZ_U_INT * 2
|
||||
temp_data = file.read(SZ_U_INT)
|
||||
nkeys = struct.unpack('<I', temp_data)[0]
|
||||
new_chunk.bytes_read += SZ_U_INT
|
||||
if nkeys == 0:
|
||||
keyframe_rotation[0] = child.rotation_axis_angle[:]
|
||||
if tflags & 0x8: # Flag 0x8 locks X axis
|
||||
child.lock_rotation[0] = True
|
||||
if tflags & 0x10: # Flag 0x10 locks Y axis
|
||||
child.lock_rotation[1] = True
|
||||
if tflags & 0x20: # Flag 0x20 locks Z axis
|
||||
child.lock_rotation[2] = True
|
||||
for i in range(nkeys):
|
||||
temp_data = file.read(SZ_U_INT)
|
||||
nframe = struct.unpack('<I', temp_data)[0]
|
||||
@ -1111,11 +1178,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
temp_data = file.read(SZ_U_SHORT)
|
||||
nflags = struct.unpack('<H', temp_data)[0]
|
||||
new_chunk.bytes_read += SZ_U_SHORT
|
||||
if nflags > 0: # Check for spline term values
|
||||
temp_data = file.read(SZ_FLOAT)
|
||||
for f in range(bin(nflags).count('1')):
|
||||
temp_data = file.read(SZ_FLOAT) # Check for spline term values
|
||||
new_chunk.bytes_read += SZ_FLOAT
|
||||
temp_data = file.read(SZ_4FLOAT)
|
||||
rotation = struct.unpack("<4f", temp_data)
|
||||
rotation = struct.unpack('<4f', temp_data)
|
||||
new_chunk.bytes_read += SZ_4FLOAT
|
||||
keyframe_rotation[nframe] = rotation
|
||||
rad, axis_x, axis_y, axis_z = keyframe_rotation[0]
|
||||
@ -1123,15 +1190,34 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
for keydata in keyframe_rotation.items():
|
||||
rad, axis_x, axis_y, axis_z = keydata[1]
|
||||
child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler()
|
||||
child.keyframe_insert(data_path="rotation_euler", frame=keydata[0])
|
||||
if hierarchy == ROOT_OBJECT:
|
||||
child.rotation_euler.rotate(CONVERSE)
|
||||
if not tflags & 0x100: # Flag 0x100 unlinks X axis
|
||||
child.keyframe_insert(data_path="rotation_euler", index=0, frame=keydata[0])
|
||||
if not tflags & 0x200: # Flag 0x200 unlinks Y axis
|
||||
child.keyframe_insert(data_path="rotation_euler", index=1, frame=keydata[0])
|
||||
if not tflags & 0x400: # Flag 0x400 unlinks Z axis
|
||||
child.keyframe_insert(data_path="rotation_euler", index=2, frame=keydata[0])
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracking == 'OBJECT': # Scale
|
||||
keyframe_data = {}
|
||||
default_data = child.scale[:]
|
||||
child.scale = read_track_data(temp_chunk)[0]
|
||||
if track_flags[0] & 0x8: # Flag 0x8 locks X axis
|
||||
child.lock_scale[0] = True
|
||||
if track_flags[0] & 0x10: # Flag 0x10 locks Y axis
|
||||
child.lock_scale[1] = True
|
||||
if track_flags[0] & 0x20: # Flag 0x20 locks Z axis
|
||||
child.lock_scale[2] = True
|
||||
for keydata in keyframe_data.items():
|
||||
child.scale = mathutils.Vector(keydata[1]) * (CONSTRAIN * 0.1) if hierarchy == ROOT_OBJECT and CONSTRAIN != 0.0 else keydata[1]
|
||||
child.keyframe_insert(data_path="scale", frame=keydata[0])
|
||||
child.scale = apply_constrain(keydata[1]) if hierarchy == ROOT_OBJECT else mathutils.Vector(keydata[1])
|
||||
if not track_flags[0] & 0x100: # Flag 0x100 unlinks X axis
|
||||
child.keyframe_insert(data_path="scale", index=0, frame=keydata[0])
|
||||
if not track_flags[0] & 0x200: # Flag 0x200 unlinks Y axis
|
||||
child.keyframe_insert(data_path="scale", index=1, frame=keydata[0])
|
||||
if not track_flags[0] & 0x400: # Flag 0x400 unlinks Z axis
|
||||
child.keyframe_insert(data_path="scale", index=2, frame=keydata[0])
|
||||
track_flags.clear()
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracking == 'OBJECT': # Roll angle
|
||||
keyframe_angle = {}
|
||||
@ -1139,6 +1225,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
child.rotation_euler[1] = read_track_angle(temp_chunk)[0]
|
||||
for keydata in keyframe_angle.items():
|
||||
child.rotation_euler[1] = keydata[1]
|
||||
if hierarchy == ROOT_OBJECT:
|
||||
child.rotation_euler.rotate(CONVERSE)
|
||||
child.keyframe_insert(data_path="rotation_euler", index=1, frame=keydata[0])
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and child.type == 'CAMERA': # Field of view
|
||||
@ -1149,7 +1237,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
child.data.lens = (child.data.sensor_width/2)/math.tan(keydata[1]/2)
|
||||
child.data.keyframe_insert(data_path="lens", frame=keydata[0])
|
||||
|
||||
elif new_chunk.ID == HOTSPOT_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Hotspot
|
||||
elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Hotspot
|
||||
keyframe_angle = {}
|
||||
cone_angle = math.degrees(child.data.spot_size)
|
||||
default_value = cone_angle-(child.data.spot_blend*math.floor(cone_angle))
|
||||
@ -1159,7 +1247,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
child.data.spot_blend = 1.0 - (math.degrees(keydata[1])/cone_angle)
|
||||
child.data.keyframe_insert(data_path="spot_blend", frame=keydata[0])
|
||||
|
||||
elif new_chunk.ID == FALLOFF_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Falloff
|
||||
elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Falloff
|
||||
keyframe_angle = {}
|
||||
default_value = math.degrees(child.data.spot_size)
|
||||
child.data.spot_size = read_track_angle(temp_chunk)[0]
|
||||
@ -1169,7 +1257,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
|
||||
else:
|
||||
buffer_size = new_chunk.length - new_chunk.bytes_read
|
||||
binary_format = "%ic" % buffer_size
|
||||
binary_format = '%ic' % buffer_size
|
||||
temp_data = file.read(struct.calcsize(binary_format))
|
||||
new_chunk.bytes_read += buffer_size
|
||||
|
||||
@ -1179,9 +1267,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
# FINISHED LOOP
|
||||
# There will be a number of objects still not added
|
||||
if CreateBlenderObject:
|
||||
if CreateLightObject or CreateCameraObject:
|
||||
pass
|
||||
else:
|
||||
putContextMesh(
|
||||
context,
|
||||
contextMesh_vertls,
|
||||
@ -1201,16 +1286,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
ob.parent = None
|
||||
elif parent not in object_dict:
|
||||
if ob.parent != object_list[parent]:
|
||||
if ob == object_list[parent]:
|
||||
print(' warning: Cannot assign self to parent ', ob)
|
||||
else:
|
||||
ob.parent = object_list[parent]
|
||||
else:
|
||||
if ob.parent != object_dict[parent]:
|
||||
if ob == object_dict[parent]:
|
||||
print(' warning: Cannot assign self to parent ', ob)
|
||||
print("\tWarning: Cannot assign self to parent ", ob)
|
||||
else:
|
||||
ob.parent = object_dict[parent]
|
||||
if ob.parent != object_dict[parent]:
|
||||
ob.parent = object_dict.get(parent)
|
||||
|
||||
#pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining?
|
||||
|
||||
@ -1231,7 +1312,7 @@ def load_3ds(filepath,
|
||||
WORLD_MATRIX=False,
|
||||
KEYFRAME=True,
|
||||
APPLY_MATRIX=True,
|
||||
global_matrix=None):
|
||||
CONVERSE=None):
|
||||
|
||||
print("importing 3DS: %r..." % (filepath), end="")
|
||||
|
||||
@ -1245,7 +1326,7 @@ def load_3ds(filepath,
|
||||
# here we go!
|
||||
read_chunk(file, current_chunk)
|
||||
if current_chunk.ID != PRIMARY:
|
||||
print('\tFatal Error: Not a valid 3ds file: %r' % filepath)
|
||||
print("\tFatal Error: Not a valid 3ds file: %r" % filepath)
|
||||
file.close()
|
||||
return
|
||||
|
||||
@ -1261,7 +1342,7 @@ def load_3ds(filepath,
|
||||
scn = context.scene
|
||||
|
||||
imported_objects = [] # Fill this list with objects
|
||||
process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME)
|
||||
process_next_chunk(context, file, current_chunk, imported_objects, CONSTRAIN, IMAGE_SEARCH, WORLD_MATRIX, KEYFRAME, CONVERSE)
|
||||
|
||||
# fixme, make unglobal
|
||||
object_dictionary.clear()
|
||||
@ -1274,10 +1355,10 @@ def load_3ds(filepath,
|
||||
me.transform(ob.matrix_local.inverted())
|
||||
|
||||
# print(imported_objects)
|
||||
if global_matrix:
|
||||
if CONVERSE and not KEYFRAME:
|
||||
for ob in imported_objects:
|
||||
if ob.type == 'MESH' and ob.parent is None:
|
||||
ob.matrix_world = ob.matrix_world @ global_matrix
|
||||
ob.location.rotate(CONVERSE)
|
||||
ob.rotation_euler.rotate(CONVERSE)
|
||||
|
||||
for ob in imported_objects:
|
||||
ob.select_set(True)
|
||||
@ -1363,7 +1444,7 @@ def load(operator,
|
||||
WORLD_MATRIX=use_world_matrix,
|
||||
KEYFRAME=read_keyframe,
|
||||
APPLY_MATRIX=use_apply_transform,
|
||||
global_matrix=global_matrix,
|
||||
CONVERSE=global_matrix,
|
||||
)
|
||||
|
||||
return {'FINISHED'}
|
||||
|
@ -931,26 +931,6 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
me.edges.foreach_get("vertices", t_ev)
|
||||
me.loops.foreach_get("edge_index", t_lei)
|
||||
|
||||
# Polygons might not be in the same order as loops. To export per-loop and per-polygon data in a matching order,
|
||||
# one must be set into the order of the other. Since there are fewer polygons than loops and there are usually
|
||||
# more geometry layers exported that are per-loop than per-polygon, it's more efficient to re-order polygons and
|
||||
# per-polygon data.
|
||||
perm_polygons_to_loop_order = None
|
||||
# t_ls indicates the ordering of polygons compared to loops. When t_ls is sorted, polygons and loops are in the same
|
||||
# order. Since each loop must be assigned to exactly one polygon for the mesh to be valid, every value in t_ls must
|
||||
# be unique, so t_ls will be monotonically increasing when sorted.
|
||||
# t_ls is expected to be in the same order as loops in most cases since exiting Edit mode will sort t_ls, so do an
|
||||
# initial check for any element being smaller than the previous element to determine if sorting is required.
|
||||
sort_polygon_data = np.any(t_ls[1:] < t_ls[:-1])
|
||||
if sort_polygon_data:
|
||||
# t_ls is not sorted, so get the indices that would sort t_ls using argsort, these will be re-used to sort
|
||||
# per-polygon data.
|
||||
# Using 'stable' for radix sort, which performs much better with partially ordered data and slightly worse with
|
||||
# completely random data, compared to the default of 'quicksort' for introsort.
|
||||
perm_polygons_to_loop_order = np.argsort(t_ls, kind='stable')
|
||||
# Sort t_ls into the same order as loops.
|
||||
t_ls = t_ls[perm_polygons_to_loop_order]
|
||||
|
||||
# Add "fake" faces for loose edges. Each "fake" face consists of two loops creating a new 2-sided polygon.
|
||||
if scene_data.settings.use_mesh_edges:
|
||||
bl_edge_is_loose_dtype = bool
|
||||
@ -1051,8 +1031,6 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
if smooth_type == 'FACE':
|
||||
t_ps = np.empty(len(me.polygons), dtype=poly_use_smooth_dtype)
|
||||
me.polygons.foreach_get("use_smooth", t_ps)
|
||||
if sort_polygon_data:
|
||||
t_ps = t_ps[perm_polygons_to_loop_order]
|
||||
_map = b"ByPolygon"
|
||||
else: # EDGE
|
||||
_map = b"ByEdge"
|
||||
@ -1071,17 +1049,14 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
# Get the 'use_smooth' attribute of all polygons.
|
||||
p_use_smooth_mask = np.empty(mesh_poly_nbr, dtype=poly_use_smooth_dtype)
|
||||
me.polygons.foreach_get('use_smooth', p_use_smooth_mask)
|
||||
if sort_polygon_data:
|
||||
p_use_smooth_mask = p_use_smooth_mask[perm_polygons_to_loop_order]
|
||||
# Invert to get all flat shaded polygons.
|
||||
p_flat_mask = np.invert(p_use_smooth_mask, out=p_use_smooth_mask)
|
||||
# Convert flat shaded polygons to flat shaded loops by repeating each element by the number of sides of
|
||||
# that polygon.
|
||||
# Polygon sides can be calculated from the element-wise difference of sorted loop starts appended by the
|
||||
# number of loops. Alternatively, polygon sides can be retrieved directly from the 'loop_total'
|
||||
# attribute of polygons, but that might need to be sorted, and we already have t_ls which is sorted loop
|
||||
# starts. It tends to be quicker to calculate from t_ls when above around 10_000 polygons even when the
|
||||
# 'loop_total' array wouldn't need sorting.
|
||||
# Polygon sides can be calculated from the element-wise difference of loop starts appended by the number
|
||||
# of loops. Alternatively, polygon sides can be retrieved directly from the 'loop_total' attribute of
|
||||
# polygons, but since we already have t_ls, it tends to be quicker to calculate from t_ls when above
|
||||
# around 10_000 polygons.
|
||||
polygon_sides = np.diff(mesh_t_ls_view, append=mesh_loop_nbr)
|
||||
p_flat_loop_mask = np.repeat(p_flat_mask, polygon_sides)
|
||||
# Convert flat shaded loops to flat shaded (sharp) edge indices.
|
||||
@ -1442,8 +1417,6 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
fbx_pm_dtype = np.int32
|
||||
t_pm = np.empty(len(me.polygons), dtype=bl_pm_dtype)
|
||||
me.polygons.foreach_get("material_index", t_pm)
|
||||
if sort_polygon_data:
|
||||
t_pm = t_pm[perm_polygons_to_loop_order]
|
||||
|
||||
# We have to validate mat indices, and map them to FBX indices.
|
||||
# Note a mat might not be in me_fbxmaterials_idx (e.g. node mats are ignored).
|
||||
@ -1474,7 +1447,6 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
elem_data_single_string(lay_ma, b"MappingInformationType", b"AllSame")
|
||||
elem_data_single_string(lay_ma, b"ReferenceInformationType", b"IndexToDirect")
|
||||
elem_data_single_int32_array(lay_ma, b"Materials", [0])
|
||||
del perm_polygons_to_loop_order
|
||||
|
||||
# And the "layer TOC"...
|
||||
|
||||
|
@ -372,7 +372,7 @@ def blen_read_custom_properties(fbx_obj, blen_obj, settings):
|
||||
def blen_read_object_transform_do(transform_data):
|
||||
# This is a nightmare. FBX SDK uses Maya way to compute the transformation matrix of a node - utterly simple:
|
||||
#
|
||||
# WorldTransform = ParentWorldTransform @ T @ Roff @ Rp @ Rpre @ R @ Rpost @ Rp-1 @ Soff @ Sp @ S @ Sp-1
|
||||
# WorldTransform = ParentWorldTransform @ T @ Roff @ Rp @ Rpre @ R @ Rpost-1 @ Rp-1 @ Soff @ Sp @ S @ Sp-1
|
||||
#
|
||||
# Where all those terms are 4 x 4 matrices that contain:
|
||||
# WorldTransform: Transformation matrix of the node in global space.
|
||||
@ -382,7 +382,7 @@ def blen_read_object_transform_do(transform_data):
|
||||
# Rp: Rotation pivot
|
||||
# Rpre: Pre-rotation
|
||||
# R: Rotation
|
||||
# Rpost: Post-rotation
|
||||
# Rpost-1: Inverse of the post-rotation (FBX 2011 documentation incorrectly specifies this without inversion)
|
||||
# Rp-1: Inverse of the rotation pivot
|
||||
# Soff: Scaling offset
|
||||
# Sp: Scaling pivot
|
||||
@ -402,14 +402,15 @@ def blen_read_object_transform_do(transform_data):
|
||||
# S: Scaling
|
||||
# OT: Geometric transform translation
|
||||
# OR: Geometric transform rotation
|
||||
# OS: Geometric transform translation
|
||||
# OS: Geometric transform scale
|
||||
#
|
||||
# Notes:
|
||||
# Geometric transformations ***are not inherited***: ParentWorldTransform does not contain the OT, OR, OS
|
||||
# of WorldTransform's parent node.
|
||||
# The R matrix takes into account the rotation order. Other rotation matrices are always 'XYZ' order.
|
||||
#
|
||||
# Taken from http://download.autodesk.com/us/fbx/20112/FBX_SDK_HELP/
|
||||
# index.html?url=WS1a9193826455f5ff1f92379812724681e696651.htm,topicNumber=d0e7429
|
||||
# Taken from https://help.autodesk.com/view/FBX/2020/ENU/
|
||||
# ?guid=FBX_Developer_Help_nodes_and_scene_graph_fbx_nodes_computing_transformation_matrix_html
|
||||
|
||||
# translation
|
||||
lcl_translation = Matrix.Translation(transform_data.loc)
|
||||
@ -418,9 +419,9 @@ def blen_read_object_transform_do(transform_data):
|
||||
# rotation
|
||||
to_rot = lambda rot, rot_ord: Euler(convert_deg_to_rad_iter(rot), rot_ord).to_matrix().to_4x4()
|
||||
lcl_rot = to_rot(transform_data.rot, transform_data.rot_ord) @ transform_data.rot_alt_mat
|
||||
pre_rot = to_rot(transform_data.pre_rot, transform_data.rot_ord)
|
||||
pst_rot = to_rot(transform_data.pst_rot, transform_data.rot_ord)
|
||||
geom_rot = to_rot(transform_data.geom_rot, transform_data.rot_ord)
|
||||
pre_rot = to_rot(transform_data.pre_rot, 'XYZ')
|
||||
pst_rot = to_rot(transform_data.pst_rot, 'XYZ')
|
||||
geom_rot = to_rot(transform_data.geom_rot, 'XYZ')
|
||||
|
||||
rot_ofs = Matrix.Translation(transform_data.rot_ofs)
|
||||
rot_piv = Matrix.Translation(transform_data.rot_piv)
|
||||
@ -439,7 +440,7 @@ def blen_read_object_transform_do(transform_data):
|
||||
rot_piv @
|
||||
pre_rot @
|
||||
lcl_rot @
|
||||
pst_rot @
|
||||
pst_rot.inverted_safe() @
|
||||
rot_piv.inverted_safe() @
|
||||
sca_ofs @
|
||||
sca_piv @
|
||||
|
@ -21,10 +21,13 @@ class NODE_OT_GLTF_SETTINGS(bpy.types.Operator):
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
space = context.space_data
|
||||
return space.type == "NODE_EDITOR" \
|
||||
and context.object and context.object.active_material \
|
||||
and context.object.active_material.use_nodes is True \
|
||||
return (
|
||||
space is not None
|
||||
and space.type == "NODE_EDITOR"
|
||||
and context.object and context.object.active_material
|
||||
and context.object.active_material.use_nodes is True
|
||||
and bpy.context.preferences.addons['io_scene_gltf2'].preferences.settings_node_ui is True
|
||||
)
|
||||
|
||||
def execute(self, context):
|
||||
gltf_settings_node_name = get_gltf_node_name()
|
||||
|
Loading…
Reference in New Issue
Block a user