Pose Library: Update to use the asset shelf (when enabled) #104546

Merged
Julian Eisel merged 33 commits from asset-shelf into main 2023-08-04 15:00:21 +02:00
7 changed files with 255 additions and 304 deletions
Showing only changes of commit 21bdb84e38 - Show all commits

View File

@ -69,7 +69,7 @@ MAT_MAP_USCALE = 0xA354 # U axis scaling
MAT_MAP_VSCALE = 0xA356 # V axis scaling
MAT_MAP_UOFFSET = 0xA358 # U axis offset
MAT_MAP_VOFFSET = 0xA35A # V axis offset
MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad
MAT_MAP_ANG = 0xA35C # UV rotation around the z-axis in rad
MAP_COL1 = 0xA360 # Tint Color1
MAP_COL2 = 0xA362 # Tint Color2
MAP_RCOL = 0xA364 # Red tint
@ -96,6 +96,7 @@ LIGHT_MULTIPLIER = 0x465B # The light energy factor
LIGHT_SPOTLIGHT = 0x4610 # The target of a spotlight
LIGHT_SPOT_ROLL = 0x4656 # Light spot roll angle
LIGHT_SPOT_SHADOWED = 0x4630 # Light spot shadow flag
LIGHT_SPOT_LSHADOW = 0x4641 # Light spot shadow parameters
LIGHT_SPOT_SEE_CONE = 0x4650 # Light spot show cone flag
LIGHT_SPOT_RECTANGLE = 0x4651 # Light spot rectangle flag
@ -1361,10 +1362,10 @@ def make_target_node(ob, translation, rotation, scale, name_id):
ob_rot = rotation[name]
ob_size = scale[name]
diagonal = math.copysign(math.sqrt(pow(ob_pos[0],2) + pow(ob_pos[1],2)), ob_pos[1])
target_x = ob_pos[0] + (ob_pos[1] * math.tan(ob_rot[2]))
target_y = ob_pos[1] + (ob_pos[0] * math.tan(math.radians(90) - ob_rot[2]))
target_z = -1 * diagonal * math.tan(math.radians(90) - ob_rot[0])
diagonal = math.copysign(math.sqrt(pow(ob_pos.x, 2) + pow(ob_pos.y, 2)), ob_pos.y)
target_x = -1 * math.copysign(ob_pos.x + (ob_pos.y * math.tan(ob_rot.z)), ob_rot.x)
target_y = -1 * math.copysign(ob_pos.y + (ob_pos.x * math.tan(math.radians(90) - ob_rot.z)), ob_rot.z)
target_z = -1 * math.copysign(diagonal * math.tan(math.radians(90) - ob_rot.x), ob_pos.z)
# Add track chunks for target position
track_chunk = _3ds_chunk(POS_TRACK_TAG)
@ -1387,16 +1388,16 @@ def make_target_node(ob, translation, rotation, scale, name_id):
for i, frame in enumerate(kframes):
loc_target = [fc for fc in fcurves if fc is not None and fc.data_path == 'location']
locate_x = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 0), ob_pos.x)
locate_y = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 1), ob_pos.y)
locate_z = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 2), ob_pos.z)
loc_x = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 0), ob_pos.x)
loc_y = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 1), ob_pos.y)
loc_z = next((tc.evaluate(frame) for tc in loc_target if tc.array_index == 2), ob_pos.z)
rot_target = [fc for fc in fcurves if fc is not None and fc.data_path == 'rotation_euler']
rotate_x = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 0), ob_rot.x)
rotate_z = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 2), ob_rot.z)
diagonal = math.copysign(math.sqrt(pow(locate_x, 2) + pow(locate_y, 2)), locate_y)
target_x = locate_x + (locate_y * math.tan(rotate_z))
target_y = locate_y + (locate_x * math.tan(math.radians(90) - rotate_z))
target_z = -1 * diagonal * math.tan(math.radians(90) - rotate_x)
rot_x = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 0), ob_rot.x)
rot_z = next((tc.evaluate(frame) for tc in rot_target if tc.array_index == 2), ob_rot.z)
diagonal = math.copysign(math.sqrt(pow(loc_x, 2) + pow(loc_y, 2)), loc_y)
target_x = -1 * math.copysign(loc_x + (loc_y * math.tan(rot_z)), rot_x)
target_y = -1 * math.copysign(loc_y + (loc_x * math.tan(math.radians(90) - rot_z)), rot_z)
target_z = -1 * math.copysign(diagonal * math.tan(math.radians(90) - rot_x), loc_z)
track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame)))
track_chunk.add_variable("tcb_flags", _3ds_ushort())
track_chunk.add_variable("position", _3ds_point_3d((target_x, target_y, target_z)))
@ -1419,7 +1420,7 @@ def make_target_node(ob, translation, rotation, scale, name_id):
def make_ambient_node(world):
"""Make an ambient node for the world color, if the color is animated."""
amb_color = world.color
amb_color = world.color[:3]
amb_node = _3ds_chunk(AMBIENT_NODE_TAG)
track_chunk = _3ds_chunk(COL_TRACK_TAG)
@ -1455,7 +1456,7 @@ def make_ambient_node(world):
for i, frame in enumerate(kframes):
ambient = [fc.evaluate(frame) for fc in fcurves if fc is not None and fc.data_path == 'color']
if not ambient:
ambient.append(world.color)
ambient = amb_color
track_chunk.add_variable("tcb_frame", _3ds_uint(int(frame)))
track_chunk.add_variable("tcb_flags", _3ds_ushort())
track_chunk.add_variable("color", _3ds_float_color(ambient))
@ -1697,17 +1698,25 @@ def save(operator, context, filepath="", use_selection=False, use_hierarchy=Fals
if ob.data.type == 'SPOT':
cone_angle = math.degrees(ob.data.spot_size)
hotspot = cone_angle - (ob.data.spot_blend * math.floor(cone_angle))
hypo = math.copysign(math.sqrt(pow(ob.location[0], 2) + pow(ob.location[1], 2)), ob.location[1])
pos_x = ob.location[0] + (ob.location[1] * math.tan(ob.rotation_euler[2]))
pos_y = ob.location[1] + (ob.location[0] * math.tan(math.radians(90) - ob.rotation_euler[2]))
pos_z = hypo * math.tan(math.radians(90) - ob.rotation_euler[0])
hypo = math.copysign(math.sqrt(pow(ob.location.x, 2) + pow(ob.location.y, 2)), ob.location.y)
pos_x = -1 * math.copysign(ob.location.x + (ob.location.y * math.tan(ob.rotation_euler.z)), ob.rotation_euler.x)
pos_y = -1 * math.copysign(ob.location.y + (ob.location.x * math.tan(math.radians(90) - ob.rotation_euler.z)), ob.rotation_euler.z)
pos_z = -1 * math.copysign(hypo * math.tan(math.radians(90) - ob.rotation_euler.x), ob.location.z)
spotlight_chunk = _3ds_chunk(LIGHT_SPOTLIGHT)
spot_roll_chunk = _3ds_chunk(LIGHT_SPOT_ROLL)
spotlight_chunk.add_variable("target", _3ds_point_3d((pos_x, pos_y, pos_z)))
spotlight_chunk.add_variable("hotspot", _3ds_float(round(hotspot, 4)))
spotlight_chunk.add_variable("angle", _3ds_float(round(cone_angle, 4)))
spot_roll_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6)))
spot_roll_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler.y, 6)))
spotlight_chunk.add_subchunk(spot_roll_chunk)
if ob.data.use_shadow:
spot_shadow_flag = _3ds_chunk(LIGHT_SPOT_SHADOWED)
spot_shadow_chunk = _3ds_chunk(LIGHT_SPOT_LSHADOW)
spot_shadow_chunk.add_variable("bias", _3ds_float(round(ob.data.shadow_buffer_bias,4)))
spot_shadow_chunk.add_variable("filter", _3ds_float(round((ob.data.shadow_buffer_clip_start * 10),4)))
spot_shadow_chunk.add_variable("buffer", _3ds_ushort(0x200))
spotlight_chunk.add_subchunk(spot_shadow_flag)
spotlight_chunk.add_subchunk(spot_shadow_chunk)
if ob.data.show_cone:
spot_cone_chunk = _3ds_chunk(LIGHT_SPOT_SEE_CONE)
spotlight_chunk.add_subchunk(spot_cone_chunk)
@ -1744,14 +1753,14 @@ def save(operator, context, filepath="", use_selection=False, use_hierarchy=Fals
for ob in camera_objects:
object_chunk = _3ds_chunk(OBJECT)
camera_chunk = _3ds_chunk(OBJECT_CAMERA)
diagonal = math.copysign(math.sqrt(pow(ob.location[0], 2) + pow(ob.location[1], 2)), ob.location[1])
focus_x = ob.location[0] + (ob.location[1] * math.tan(ob.rotation_euler[2]))
focus_y = ob.location[1] + (ob.location[0] * math.tan(math.radians(90) - ob.rotation_euler[2]))
focus_z = diagonal * math.tan(math.radians(90) - ob.rotation_euler[0])
diagonal = math.copysign(math.sqrt(pow(ob.location.x, 2) + pow(ob.location.y, 2)), ob.location.y)
focus_x = -1 * math.copysign(ob.location.x + (ob.location.y * math.tan(ob.rotation_euler.z)), ob.rotation_euler.x)
focus_y = -1 * math.copysign(ob.location.y + (ob.location.x * math.tan(math.radians(90) - ob.rotation_euler.z)), ob.rotation_euler.z)
focus_z = -1 * math.copysign(diagonal * math.tan(math.radians(90) - ob.rotation_euler.x), ob.location.z)
object_chunk.add_variable("camera", _3ds_string(sane_name(ob.name)))
camera_chunk.add_variable("location", _3ds_point_3d(ob.location))
camera_chunk.add_variable("target", _3ds_point_3d((focus_x, focus_y, focus_z)))
camera_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler[1], 6)))
camera_chunk.add_variable("roll", _3ds_float(round(ob.rotation_euler.y, 6)))
camera_chunk.add_variable("lens", _3ds_float(ob.data.lens))
object_chunk.add_subchunk(camera_chunk)

View File

@ -220,10 +220,6 @@ def read_string(file):
return str(b''.join(s), "utf-8", "replace"), len(s) + 1
##########
# IMPORT #
##########
def skip_to_end(file, skip_chunk):
buffer_size = skip_chunk.length - skip_chunk.bytes_read
binary_format = '%ic' % buffer_size
@ -231,6 +227,10 @@ def skip_to_end(file, skip_chunk):
skip_chunk.bytes_read += buffer_size
#############
# MATERIALS #
#############
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tintcolor, mapto):
shader = contextWrapper.node_principled_bsdf
nodetree = contextWrapper.material.node_tree
@ -318,6 +318,10 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
#############
# MESH DATA #
#############
childs_list = []
parent_list = []
@ -335,6 +339,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMeshMaterials = []
contextMesh_smooth = None
contextMeshUV = None
contextTrack_flag = False
# TEXTURE_DICT = {}
MATDICT = {}
@ -353,7 +358,6 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
object_list = [] # for hierarchy
object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent
pivot_list = [] # pivots with hierarchy handling
track_flags = [] # keyframe track flags
trackposition = {} # keep track to position for target calculation
def putContextMesh(
@ -469,20 +473,25 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
CreateLightObject = False
CreateTrackData = False
def read_float_color(temp_chunk):
temp_data = file.read(SZ_3FLOAT)
temp_chunk.bytes_read += SZ_3FLOAT
return [float(col) for col in struct.unpack('<3f', temp_data)]
def read_short(temp_chunk):
temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT
return struct.unpack('<H', temp_data)[0]
def read_long(temp_chunk):
temp_data = file.read(SZ_U_INT)
temp_chunk.bytes_read += SZ_U_INT
return struct.unpack('<I', temp_data)[0]
def read_float(temp_chunk):
temp_data = file.read(SZ_FLOAT)
temp_chunk.bytes_read += SZ_FLOAT
return struct.unpack('<f', temp_data)[0]
def read_short(temp_chunk):
temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT
return struct.unpack('<H', temp_data)[0]
def read_float_array(temp_chunk):
temp_data = file.read(SZ_3FLOAT)
temp_chunk.bytes_read += SZ_3FLOAT
return [float(val) for val in struct.unpack('<3f', temp_data)]
def read_byte_color(temp_chunk):
temp_data = file.read(struct.calcsize('3B'))
@ -570,90 +579,90 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
consize = mathutils.Vector(vec) * convector if CONSTRAIN != 0.0 else mathutils.Vector(vec)
return consize
def calc_target(location, target):
def get_hierarchy(tree_chunk):
child_id = read_short(tree_chunk)
childs_list.insert(child_id, contextObName)
parent_list.insert(child_id, None)
if child_id in parent_list:
idp = parent_list.index(child_id)
parent_list[idp] = contextObName
return child_id
def get_parent(tree_chunk, child_id):
parent_id = read_short(tree_chunk)
if parent_id > len(childs_list):
parent_list[child_id] = parent_id
parent_list.extend([None] * (parent_id - len(parent_list)))
parent_list.insert(parent_id, contextObName)
elif parent_id < len(childs_list):
parent_list[child_id] = childs_list[parent_id]
def calc_target(loc, target):
pan = 0.0
tilt = 0.0
pos = location + target # Target triangulation
if abs(location[0] - target[0]) > abs(location[1] - target[1]):
foc = math.copysign(math.sqrt(pow(pos[0],2) + pow(pos[1],2)), pos[0])
dia = math.copysign(math.sqrt(pow(foc,2) + pow(target[2],2)), pos[0])
pitch = math.radians(90) - math.copysign(math.acos(foc / dia), pos[2])
if location[0] > target[0]:
tilt = math.copysign(pitch, pos[0])
pan = math.radians(90) + math.atan(pos[1] / foc)
pos = loc + target # Target triangulation
if abs(loc.x - target.x) > abs(loc.y - target.y):
foc = math.copysign(math.sqrt(pow(pos.x,2) + pow(pos.y,2)), pos.x)
dia = math.copysign(math.sqrt(pow(foc,2) + pow(target.z,2)), pos.x)
pitch = math.radians(90) - math.copysign(math.acos(foc / dia), loc.z)
if loc.x > target.x:
tilt = math.copysign(pitch, pos.x)
pan = math.radians(90) + math.atan(pos.y / foc)
else:
tilt = -1 * (math.copysign(pitch, pos[0]))
pan = -1 * (math.radians(90) - math.atan(pos[1] / foc))
if abs(location[1]) < abs(target[1]):
tilt = -1 * (math.copysign(pitch, pos.x))
pan = -1 * (math.radians(90) - math.atan(pos.y / foc))
if abs(loc.x) < abs(target.x):
tilt = -1 * tilt
pan = -1 * pan
elif abs(location[1] - target[1]) > abs(location[0] - target[0]):
foc = math.copysign(math.sqrt(pow(pos[1],2) + pow(pos[0],2)), pos[1])
dia = math.copysign(math.sqrt(pow(foc,2) + pow(target[2],2)), pos[1])
pitch = math.radians(90) - math.copysign(math.acos(foc / dia), pos[2])
if location[1] > target[1]:
tilt = math.copysign(pitch, pos[1])
pan = math.radians(90) + math.acos(pos[0] / foc)
elif abs(loc.y - target.y) > abs(loc.x - target.x):
foc = math.copysign(math.sqrt(pow(pos.y,2) + pow(pos.x,2)), pos.y)
dia = math.copysign(math.sqrt(pow(foc,2) + pow(target.z,2)), pos.y)
pitch = math.radians(90) - math.copysign(math.acos(foc / dia), loc.z)
if loc.y > target.y:
tilt = math.copysign(pitch, pos.y)
pan = math.radians(90) + math.acos(pos.x / foc)
else:
tilt = -1 * (math.copysign(pitch, pos[1]))
pan = -1 * (math.radians(90) - math.acos(pos[0] / foc))
if abs(location[0]) < abs(target[0]):
tilt = -1 * (math.copysign(pitch, pos.y))
pan = -1 * (math.radians(90) - math.acos(pos.x / foc))
if abs(loc.y) < abs(target.y):
tilt = -1 * tilt
pan = -1 * pan
direction = tilt, pan
return direction
def read_track_data(temp_chunk):
def read_track_data(track_chunk):
"""Trackflags 0x1, 0x2 and 0x3 are for looping. 0x8, 0x10 and 0x20
locks the XYZ axes. 0x100, 0x200 and 0x400 unlinks the XYZ axes"""
temp_data = file.read(SZ_U_SHORT)
tflags = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
track_flags.append(tflags)
tflags = read_short(track_chunk)
contextTrack_flag = tflags
temp_data = file.read(SZ_U_INT * 2)
new_chunk.bytes_read += SZ_U_INT * 2
temp_data = file.read(SZ_U_INT)
nkeys = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += SZ_U_INT
track_chunk.bytes_read += SZ_U_INT * 2
nkeys = read_long(track_chunk)
if nkeys == 0:
keyframe_data[0] = default_data
for i in range(nkeys):
temp_data = file.read(SZ_U_INT)
nframe = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += SZ_U_INT
temp_data = file.read(SZ_U_SHORT)
nflags = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
nframe = read_long(track_chunk)
nflags = read_short(track_chunk)
for f in range(bin(nflags).count('1')):
temp_data = file.read(SZ_FLOAT) # Check for spline terms
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_3FLOAT)
data = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += SZ_3FLOAT
keyframe_data[nframe] = data
track_chunk.bytes_read += SZ_FLOAT
trackdata = read_float_array(track_chunk)
keyframe_data[nframe] = trackdata
return keyframe_data
def read_track_angle(temp_chunk):
def read_track_angle(track_chunk):
temp_data = file.read(SZ_U_SHORT * 5)
new_chunk.bytes_read += SZ_U_SHORT * 5
temp_data = file.read(SZ_U_INT)
nkeys = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += SZ_U_INT
track_chunk.bytes_read += SZ_U_SHORT * 5
nkeys = read_long(track_chunk)
if nkeys == 0:
keyframe_angle[0] = default_value
for i in range(nkeys):
temp_data = file.read(SZ_U_INT)
nframe = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += SZ_U_INT
temp_data = file.read(SZ_U_SHORT)
nflags = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
nframe = read_long(track_chunk)
nflags = read_short(track_chunk)
for f in range(bin(nflags).count('1')):
temp_data = file.read(SZ_FLOAT) # Check for spline terms
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_FLOAT)
angle = struct.unpack('<f', temp_data)[0]
new_chunk.bytes_read += SZ_FLOAT
track_chunk.bytes_read += SZ_FLOAT
angle = read_float(track_chunk)
keyframe_angle[nframe] = math.radians(angle)
return keyframe_angle
@ -681,9 +690,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
context.scene.world = world
read_chunk(file, temp_chunk)
if temp_chunk.ID == COLOR_F:
context.scene.world.color[:] = read_float_color(temp_chunk)
context.scene.world.color[:] = read_float_array(temp_chunk)
elif temp_chunk.ID == LIN_COLOR_F:
context.scene.world.color[:] = read_float_color(temp_chunk)
context.scene.world.color[:] = read_float_array(temp_chunk)
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
@ -736,7 +745,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
read_chunk(file, temp_chunk)
# to not loose this data, ambient color is stored in line color
if temp_chunk.ID == COLOR_F:
contextMaterial.line_color[:3] = read_float_color(temp_chunk)
contextMaterial.line_color[:3] = read_float_array(temp_chunk)
elif temp_chunk.ID == COLOR_24:
contextMaterial.line_color[:3] = read_byte_color(temp_chunk)
else:
@ -746,7 +755,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_DIFFUSE:
read_chunk(file, temp_chunk)
if temp_chunk.ID == COLOR_F:
contextMaterial.diffuse_color[:3] = read_float_color(temp_chunk)
contextMaterial.diffuse_color[:3] = read_float_array(temp_chunk)
elif temp_chunk.ID == COLOR_24:
contextMaterial.diffuse_color[:3] = read_byte_color(temp_chunk)
else:
@ -756,7 +765,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_SPECULAR:
read_chunk(file, temp_chunk)
if temp_chunk.ID == COLOR_F:
contextMaterial.specular_color = read_float_color(temp_chunk)
contextMaterial.specular_color = read_float_array(temp_chunk)
elif temp_chunk.ID == COLOR_24:
contextMaterial.specular_color = read_byte_color(temp_chunk)
else:
@ -766,49 +775,39 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_SHINESS:
read_chunk(file, temp_chunk)
if temp_chunk.ID == PCT_SHORT:
temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT
contextMaterial.roughness = 1 - (float(struct.unpack('<H', temp_data)[0]) / 100)
contextMaterial.roughness = 1 - (float(read_short(temp_chunk) / 100))
elif temp_chunk.ID == PCT_FLOAT:
temp_data = file.read(SZ_FLOAT)
temp_chunk.bytes_read += SZ_FLOAT
contextMaterial.roughness = 1 - float(struct.unpack('<f', temp_data)[0])
contextMaterial.roughness = 1.0 - float(read_float(temp_chunk))
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
elif new_chunk.ID == MAT_SHIN2:
read_chunk(file, temp_chunk)
if temp_chunk.ID == PCT_SHORT:
temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT
contextMaterial.specular_intensity = (float(struct.unpack('<H', temp_data)[0]) / 100)
contextMaterial.specular_intensity = float(read_short(temp_chunk) / 100)
elif temp_chunk.ID == PCT_FLOAT:
temp_data = file.read(SZ_FLOAT)
temp_chunk.bytes_read += SZ_FLOAT
contextMaterial.specular_intensity = float(struct.unpack('<f', temp_data)[0])
contextMaterial.specular_intensity = float(read_float(temp_chunk))
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
elif new_chunk.ID == MAT_SHIN3:
read_chunk(file, temp_chunk)
if temp_chunk.ID == PCT_SHORT:
temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT
contextMaterial.metallic = (float(struct.unpack('<H', temp_data)[0]) / 100)
contextMaterial.metallic = float(read_short(temp_chunk) / 100)
elif temp_chunk.ID == PCT_FLOAT:
temp_data = file.read(SZ_FLOAT)
temp_chunk.bytes_read += SZ_FLOAT
contextMaterial.metallic = float(struct.unpack('<f', temp_data)[0])
contextMaterial.metallic = float(read_float(temp_chunk))
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
elif new_chunk.ID == MAT_TRANSPARENCY:
read_chunk(file, temp_chunk)
if temp_chunk.ID == PCT_SHORT:
temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT
contextMaterial.diffuse_color[3] = 1 - (float(struct.unpack('<H', temp_data)[0]) / 100)
contextMaterial.diffuse_color[3] = 1 - (float(read_short(temp_chunk) / 100))
elif temp_chunk.ID == PCT_FLOAT:
temp_data = file.read(SZ_FLOAT)
temp_chunk.bytes_read += SZ_FLOAT
contextMaterial.diffuse_color[3] = 1 - float(struct.unpack('<f', temp_data)[0])
contextMaterial.diffuse_color[3] = 1.0 - float(read_float(temp_chunk))
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
@ -816,13 +815,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_SELF_ILPCT:
read_chunk(file, temp_chunk)
if temp_chunk.ID == PCT_SHORT:
temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT
contextMaterial.line_priority = int(struct.unpack('<H', temp_data)[0])
contextMaterial.line_priority = int(read_short(temp_chunk))
elif temp_chunk.ID == PCT_FLOAT:
temp_data = file.read(SZ_FLOAT)
temp_chunk.bytes_read += SZ_FLOAT
contextMaterial.line_priority = (float(struct.unpack('<f', temp_data)[0]) * 100)
contextMaterial.line_priority = (float(read_float(temp_chunk)) * 100)
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
elif new_chunk.ID == MAT_SHADING:
@ -858,13 +855,9 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == MAT_BUMP_PERCENT:
read_chunk(file, temp_chunk)
if temp_chunk.ID == PCT_SHORT:
temp_data = file.read(SZ_U_SHORT)
temp_chunk.bytes_read += SZ_U_SHORT
contextWrapper.normalmap_strength = (float(struct.unpack('<H', temp_data)[0]) / 100)
contextWrapper.normalmap_strength = (float(read_short(temp_chunk) / 100))
elif temp_chunk.ID == PCT_FLOAT:
temp_data = file.read(SZ_FLOAT)
temp_chunk.bytes_read += SZ_FLOAT
contextWrapper.normalmap_strength = float(struct.unpack('<f', temp_data)[0])
contextWrapper.normalmap_strength = float(read_float(temp_chunk))
else:
skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read
@ -884,16 +877,12 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == OBJECT_VERTICES:
"""Worldspace vertex locations"""
temp_data = file.read(SZ_U_SHORT)
num_verts = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
num_verts = read_short(new_chunk)
contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(SZ_3FLOAT * num_verts))
new_chunk.bytes_read += SZ_3FLOAT * num_verts
elif new_chunk.ID == OBJECT_FACES:
temp_data = file.read(SZ_U_SHORT)
num_faces = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
num_faces = read_short(new_chunk)
temp_data = file.read(SZ_4U_SHORT * num_faces)
new_chunk.bytes_read += SZ_4U_SHORT * num_faces # 4 short ints x 2 bytes each
contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data)
@ -903,9 +892,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == OBJECT_MATERIAL:
material_name, read_str_len = read_string(file)
new_chunk.bytes_read += read_str_len # remove 1 null character.
temp_data = file.read(SZ_U_SHORT)
num_faces_using_mat = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
num_faces_using_mat = read_short(new_chunk)
temp_data = file.read(SZ_U_SHORT * num_faces_using_mat)
new_chunk.bytes_read += SZ_U_SHORT * num_faces_using_mat
temp_data = struct.unpack('<%dH' % (num_faces_using_mat), temp_data)
@ -919,9 +906,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
contextMesh_smooth = smoothgroup
elif new_chunk.ID == OBJECT_UV:
temp_data = file.read(SZ_U_SHORT)
num_uv = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
num_uv = read_short(new_chunk)
temp_data = file.read(SZ_2FLOAT * num_uv)
new_chunk.bytes_read += SZ_2FLOAT * num_uv
contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data)
@ -929,27 +914,16 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif new_chunk.ID == OBJECT_TRANS_MATRIX:
# How do we know the matrix size? 54 == 4x4 48 == 4x3
temp_data = file.read(SZ_4x3MAT)
data = list(struct.unpack('<ffffffffffff', temp_data))
mtx = list(struct.unpack('<ffffffffffff', temp_data))
new_chunk.bytes_read += SZ_4x3MAT
contextMatrix = mathutils.Matrix(
(data[:3] + [0], data[3:6] + [0], data[6:9] + [0], data[9:] + [1])).transposed()
(mtx[:3] + [0], mtx[3:6] + [0], mtx[6:9] + [0], mtx[9:] + [1])).transposed()
# If hierarchy chunk
elif new_chunk.ID == OBJECT_HIERARCHY:
child_id = read_short(new_chunk)
childs_list.insert(child_id, contextObName)
parent_list.insert(child_id, None)
if child_id in parent_list:
idp = parent_list.index(child_id)
parent_list[idp] = contextObName
child_id = get_hierarchy(new_chunk)
elif new_chunk.ID == OBJECT_PARENT:
parent_id = read_short(new_chunk)
if parent_id > len(childs_list):
parent_list[child_id] = parent_id
parent_list.extend([None]*(parent_id-len(parent_list)))
parent_list.insert(parent_id, contextObName)
elif parent_id < len(childs_list):
parent_list[child_id] = childs_list[parent_id]
get_parent(new_chunk, child_id)
# If light chunk
elif contextObName and new_chunk.ID == OBJECT_LIGHT: # Basic lamp support
@ -958,63 +932,45 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
context.view_layer.active_layer_collection.collection.objects.link(contextLamp)
imported_objects.append(contextLamp)
object_dictionary[contextObName] = contextLamp
temp_data = file.read(SZ_3FLOAT)
contextLamp.location = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += SZ_3FLOAT
contextLamp.location = read_float_array(new_chunk) # Position
CreateBlenderObject = False
CreateLightObject = True
contextMatrix = None # Reset matrix
elif CreateLightObject and new_chunk.ID == COLOR_F: # Light color
temp_data = file.read(SZ_3FLOAT)
contextLamp.data.color = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += SZ_3FLOAT
contextMatrix = None # Reset matrix
elif CreateLightObject and new_chunk.ID == RGB: # Color
contextLamp.data.color = read_float_array(new_chunk)
elif CreateLightObject and new_chunk.ID == LIGHT_MULTIPLIER: # Intensity
temp_data = file.read(SZ_FLOAT)
contextLamp.data.energy = (float(struct.unpack('<f', temp_data)[0]) * 1000)
new_chunk.bytes_read += SZ_FLOAT
contextLamp.data.energy = (read_float(new_chunk) * 1000)
# If spotlight chunk
elif CreateLightObject and new_chunk.ID == LIGHT_SPOTLIGHT: # Spotlight
temp_data = file.read(SZ_3FLOAT)
contextLamp.data.type = 'SPOT'
spot = mathutils.Vector(struct.unpack('<3f', temp_data))
contextLamp.data.use_shadow = False
spot = mathutils.Vector(read_float_array(new_chunk)) # Spot location
aim = calc_target(contextLamp.location, spot) # Target
contextLamp.rotation_euler[0] = aim[0]
contextLamp.rotation_euler[2] = aim[1]
new_chunk.bytes_read += SZ_3FLOAT
temp_data = file.read(SZ_FLOAT) # Hotspot
hotspot = float(struct.unpack('<f', temp_data)[0])
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_FLOAT) # Beam angle
beam_angle = float(struct.unpack('<f', temp_data)[0])
hotspot = read_float(new_chunk) # Hotspot
beam_angle = read_float(new_chunk) # Beam angle
contextLamp.data.spot_size = math.radians(beam_angle)
contextLamp.data.spot_blend = 1.0 - (hotspot / beam_angle)
new_chunk.bytes_read += SZ_FLOAT
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_ROLL: # Roll
temp_data = file.read(SZ_FLOAT)
contextLamp.rotation_euler[1] = float(struct.unpack('<f', temp_data)[0])
new_chunk.bytes_read += SZ_FLOAT
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_SHADOWED: # Shadow
contextLamp.rotation_euler[1] = read_float(new_chunk)
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_SHADOWED: # Shadow flag
contextLamp.data.use_shadow = True
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_SEE_CONE: # Cone
elif CreateLightObject and new_chunk.ID == LIGHT_LOCAL_SHADOW2: # Shadow parameters
contextLamp.data.shadow_buffer_bias = read_float(new_chunk)
contextLamp.data.shadow_buffer_clip_start = (read_float(new_chunk) * 0.1)
temp_data = file.read(SZ_U_SHORT)
new_chunk.bytes_read += SZ_U_SHORT
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_SEE_CONE: # Cone flag
contextLamp.data.show_cone = True
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_RECTANGLE: # Square
elif CreateLightObject and new_chunk.ID == LIGHT_SPOT_RECTANGLE: # Square flag
contextLamp.data.use_square = True
elif CreateLightObject and new_chunk.ID == OBJECT_HIERARCHY:
child_id = read_short(new_chunk)
childs_list.insert(child_id, contextObName)
parent_list.insert(child_id, None)
if child_id in parent_list:
idp = parent_list.index(child_id)
parent_list[idp] = contextObName
elif CreateLightObject and new_chunk.ID == OBJECT_HIERARCHY: # Hierarchy
child_id = get_hierarchy(new_chunk)
elif CreateLightObject and new_chunk.ID == OBJECT_PARENT:
parent_id = read_short(new_chunk)
if parent_id > len(childs_list):
parent_list[child_id] = parent_id
parent_list.extend([None]*(parent_id-len(parent_list)))
parent_list.insert(parent_id, contextObName)
elif parent_id < len(childs_list):
parent_list[child_id] = childs_list[parent_id]
get_parent(new_chunk, child_id)
# If camera chunk
elif contextObName and new_chunk.ID == OBJECT_CAMERA: # Basic camera support
@ -1023,58 +979,33 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
context.view_layer.active_layer_collection.collection.objects.link(contextCamera)
imported_objects.append(contextCamera)
object_dictionary[contextObName] = contextCamera
temp_data = file.read(SZ_3FLOAT)
contextCamera.location = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += SZ_3FLOAT
temp_data = file.read(SZ_3FLOAT)
focus = mathutils.Vector(struct.unpack('<3f', temp_data))
contextCamera.location = read_float_array(new_chunk) # Position
focus = mathutils.Vector(read_float_array(new_chunk))
direction = calc_target(contextCamera.location, focus) # Target
new_chunk.bytes_read += SZ_3FLOAT
temp_data = file.read(SZ_FLOAT)
contextCamera.rotation_euler[0] = direction[0]
contextCamera.rotation_euler[1] = float(struct.unpack('<f', temp_data)[0]) # Roll
contextCamera.rotation_euler[1] = read_float(new_chunk) # Roll
contextCamera.rotation_euler[2] = direction[1]
new_chunk.bytes_read += SZ_FLOAT
temp_data = file.read(SZ_FLOAT)
contextCamera.data.lens = float(struct.unpack('<f', temp_data)[0]) # Focus
new_chunk.bytes_read += SZ_FLOAT
contextCamera.data.lens = read_float(new_chunk) # Focal length
CreateBlenderObject = False
CreateCameraObject = True
contextMatrix = None # Reset matrix
elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY:
child_id = read_short(new_chunk)
childs_list.insert(child_id, contextObName)
parent_list.insert(child_id, None)
if child_id in parent_list:
idp = parent_list.index(child_id)
parent_list[idp] = contextObName
elif CreateCameraObject and new_chunk.ID == OBJECT_HIERARCHY: # Hierarchy
child_id = get_hierarchy(new_chunk)
elif CreateCameraObject and new_chunk.ID == OBJECT_PARENT:
parent_id = read_short(new_chunk)
if parent_id > len(childs_list):
parent_list[child_id] = parent_id
parent_list.extend([None]*(parent_id-len(parent_list)))
parent_list.insert(parent_id, contextObName)
elif parent_id < len(childs_list):
parent_list[child_id] = childs_list[parent_id]
get_parent(new_chunk, child_id)
# start keyframe section
elif new_chunk.ID == EDITKEYFRAME:
pass
elif KEYFRAME and new_chunk.ID == KFDATA_KFSEG:
temp_data = file.read(SZ_U_INT)
start = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += 4
start = read_long(new_chunk)
context.scene.frame_start = start
temp_data = file.read(SZ_U_INT)
stop = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += 4
stop = read_long(new_chunk)
context.scene.frame_end = stop
elif KEYFRAME and new_chunk.ID == KFDATA_CURTIME:
temp_data = file.read(SZ_U_INT)
current = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += 4
current = read_long(new_chunk)
context.scene.frame_current = current
# including these here means their OB_NODE_HDR are scanned
@ -1089,18 +1020,14 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child = None
elif new_chunk.ID == OBJECT_NODE_ID:
temp_data = file.read(SZ_U_SHORT)
object_id = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
object_id = read_short(new_chunk)
elif new_chunk.ID == OBJECT_NODE_HDR:
object_name, read_str_len = read_string(file)
new_chunk.bytes_read += read_str_len
temp_data = file.read(SZ_U_SHORT * 2)
new_chunk.bytes_read += 4
temp_data = file.read(SZ_U_SHORT)
hierarchy = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += 2
hierarchy = read_short(new_chunk)
child = object_dictionary.get(object_name)
colortrack = 'LIGHT'
if child is None:
@ -1138,66 +1065,62 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
new_chunk.bytes_read += read_str_len
elif new_chunk.ID == OBJECT_PIVOT: # Pivot
temp_data = file.read(SZ_3FLOAT)
pivot = struct.unpack('<3f', temp_data)
new_chunk.bytes_read += SZ_3FLOAT
pivot = read_float_array(new_chunk)
pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot)
elif new_chunk.ID == MORPH_SMOOTH and child.type == 'MESH': # Smooth angle
child.data.use_auto_smooth = True
temp_data = file.read(SZ_FLOAT)
smooth_angle = struct.unpack('<f', temp_data)[0]
new_chunk.bytes_read += SZ_FLOAT
smooth_angle = read_float(new_chunk)
child.data.auto_smooth_angle = smooth_angle
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and colortrack == 'AMBIENT': # Ambient
keyframe_data = {}
default_data = child.color[:]
child.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_track_data(temp_chunk)[0]
child.node_tree.nodes['Background'].inputs[0].default_value[:3] = read_track_data(new_chunk)[0]
for keydata in keyframe_data.items():
child.node_tree.nodes['Background'].inputs[0].default_value[:3] = keydata[1]
child.node_tree.keyframe_insert(data_path="nodes[\"Background\"].inputs[0].default_value", frame=keydata[0])
track_flags.clear()
contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and colortrack == 'LIGHT': # Color
keyframe_data = {}
default_data = child.data.color[:]
child.data.color = read_track_data(temp_chunk)[0]
child.data.color = read_track_data(new_chunk)[0]
for keydata in keyframe_data.items():
child.data.color = keydata[1]
child.data.keyframe_insert(data_path="color", frame=keydata[0])
track_flags.clear()
contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'OBJECT': # Translation
keyframe_data = {}
default_data = child.location[:]
child.location = read_track_data(temp_chunk)[0]
child.location = read_track_data(new_chunk)[0]
if child.type in {'LIGHT', 'CAMERA'}:
trackposition[0] = child.location
CreateTrackData = True
if track_flags[0] & 0x8: # Flag 0x8 locks X axis
if contextTrack_flag & 0x8: # Flag 0x8 locks X axis
child.lock_location[0] = True
if track_flags[0] & 0x10: # Flag 0x10 locks Y axis
if contextTrack_flag & 0x10: # Flag 0x10 locks Y axis
child.lock_location[1] = True
if track_flags[0] & 0x20: # Flag 0x20 locks Z axis
if contextTrack_flag & 0x20: # Flag 0x20 locks Z axis
child.lock_location[2] = True
for keydata in keyframe_data.items():
trackposition[keydata[0]] = keydata[1] # Keep track to position for target calculation
child.location = apply_constrain(keydata[1]) if hierarchy == ROOT_OBJECT else mathutils.Vector(keydata[1])
if hierarchy == ROOT_OBJECT:
child.location.rotate(CONVERSE)
if not track_flags[0] & 0x100: # Flag 0x100 unlinks X axis
if not contextTrack_flag & 0x100: # Flag 0x100 unlinks X axis
child.keyframe_insert(data_path="location", index=0, frame=keydata[0])
if not track_flags[0] & 0x200: # Flag 0x200 unlinks Y axis
if not contextTrack_flag & 0x200: # Flag 0x200 unlinks Y axis
child.keyframe_insert(data_path="location", index=1, frame=keydata[0])
if not track_flags[0] & 0x400: # Flag 0x400 unlinks Z axis
if not contextTrack_flag & 0x400: # Flag 0x400 unlinks Z axis
child.keyframe_insert(data_path="location", index=2, frame=keydata[0])
track_flags.clear()
contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracking == 'TARGET': # Target position
keyframe_data = {}
location = child.location
target = mathutils.Vector(read_track_data(temp_chunk)[0])
target = mathutils.Vector(read_track_data(new_chunk)[0])
direction = calc_target(location, target)
child.rotation_euler[0] = direction[0]
child.rotation_euler[2] = direction[1]
@ -1214,18 +1137,14 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.matrix_world = CONVERSE @ child.matrix_world
child.keyframe_insert(data_path="rotation_euler", index=0, frame=keydata[0])
child.keyframe_insert(data_path="rotation_euler", index=2, frame=keydata[0])
track_flags.clear()
contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == ROT_TRACK_TAG and tracking == 'OBJECT': # Rotation
keyframe_rotation = {}
temp_data = file.read(SZ_U_SHORT)
tflags = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
tflags = read_short(new_chunk)
temp_data = file.read(SZ_U_INT * 2)
new_chunk.bytes_read += SZ_U_INT * 2
temp_data = file.read(SZ_U_INT)
nkeys = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += SZ_U_INT
nkeys = read_long(new_chunk)
if nkeys == 0:
keyframe_rotation[0] = child.rotation_axis_angle[:]
if tflags & 0x8: # Flag 0x8 locks X axis
@ -1234,13 +1153,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
child.lock_rotation[1] = True
if tflags & 0x20: # Flag 0x20 locks Z axis
child.lock_rotation[2] = True
if nkeys == 0:
keyframe_rotation[0] = child.rotation_axis_angle[:]
for i in range(nkeys):
temp_data = file.read(SZ_U_INT)
nframe = struct.unpack('<I', temp_data)[0]
new_chunk.bytes_read += SZ_U_INT
temp_data = file.read(SZ_U_SHORT)
nflags = struct.unpack('<H', temp_data)[0]
new_chunk.bytes_read += SZ_U_SHORT
nframe = read_long(new_chunk)
nflags = read_short(new_chunk)
for f in range(bin(nflags).count('1')):
temp_data = file.read(SZ_FLOAT) # Check for spline term values
new_chunk.bytes_read += SZ_FLOAT
@ -1265,27 +1182,27 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == SCL_TRACK_TAG and tracking == 'OBJECT': # Scale
keyframe_data = {}
default_data = child.scale[:]
child.scale = read_track_data(temp_chunk)[0]
if track_flags[0] & 0x8: # Flag 0x8 locks X axis
child.scale = read_track_data(new_chunk)[0]
if contextTrack_flag & 0x8: # Flag 0x8 locks X axis
child.lock_scale[0] = True
if track_flags[0] & 0x10: # Flag 0x10 locks Y axis
if contextTrack_flag & 0x10: # Flag 0x10 locks Y axis
child.lock_scale[1] = True
if track_flags[0] & 0x20: # Flag 0x20 locks Z axis
if contextTrack_flag & 0x20: # Flag 0x20 locks Z axis
child.lock_scale[2] = True
for keydata in keyframe_data.items():
child.scale = apply_constrain(keydata[1]) if hierarchy == ROOT_OBJECT else mathutils.Vector(keydata[1])
if not track_flags[0] & 0x100: # Flag 0x100 unlinks X axis
if not contextTrack_flag & 0x100: # Flag 0x100 unlinks X axis
child.keyframe_insert(data_path="scale", index=0, frame=keydata[0])
if not track_flags[0] & 0x200: # Flag 0x200 unlinks Y axis
if not contextTrack_flag & 0x200: # Flag 0x200 unlinks Y axis
child.keyframe_insert(data_path="scale", index=1, frame=keydata[0])
if not track_flags[0] & 0x400: # Flag 0x400 unlinks Z axis
if not contextTrack_flag & 0x400: # Flag 0x400 unlinks Z axis
child.keyframe_insert(data_path="scale", index=2, frame=keydata[0])
track_flags.clear()
contextTrack_flag = False
elif KEYFRAME and new_chunk.ID == ROLL_TRACK_TAG and tracking == 'OBJECT': # Roll angle
keyframe_angle = {}
default_value = child.rotation_euler[1]
child.rotation_euler[1] = read_track_angle(temp_chunk)[0]
child.rotation_euler[1] = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items():
child.rotation_euler[1] = keydata[1]
if hierarchy == ROOT_OBJECT:
@ -1295,7 +1212,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == FOV_TRACK_TAG and child.type == 'CAMERA': # Field of view
keyframe_angle = {}
default_value = child.data.angle
child.data.angle = read_track_angle(temp_chunk)[0]
child.data.angle = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items():
child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2)
child.data.keyframe_insert(data_path="lens", frame=keydata[0])
@ -1304,8 +1221,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
keyframe_angle = {}
cone_angle = math.degrees(child.data.spot_size)
default_value = cone_angle-(child.data.spot_blend * math.floor(cone_angle))
hot_spot = math.degrees(read_track_angle(temp_chunk)[0])
child.data.spot_blend = 1.0 - (hot_spot/cone_angle)
hot_spot = math.degrees(read_track_angle(new_chunk)[0])
child.data.spot_blend = 1.0 - (hot_spot / cone_angle)
for keydata in keyframe_angle.items():
child.data.spot_blend = 1.0 - (math.degrees(keydata[1]) / cone_angle)
child.data.keyframe_insert(data_path="spot_blend", frame=keydata[0])
@ -1313,7 +1230,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Falloff
keyframe_angle = {}
default_value = math.degrees(child.data.spot_size)
child.data.spot_size = read_track_angle(temp_chunk)[0]
child.data.spot_size = read_track_angle(new_chunk)[0]
for keydata in keyframe_angle.items():
child.data.spot_size = keydata[1]
child.data.keyframe_insert(data_path="spot_size", frame=keydata[0])
@ -1345,14 +1262,17 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
for ind, ob in enumerate(object_list):
parent = object_parent[ind]
if parent == ROOT_OBJECT:
if ob.parent is not None:
ob.parent = None
ob.parent = None
elif parent not in object_dict:
if ob.parent != object_list[parent]:
try:
ob.parent = object_list[parent]
else:
if ob.parent != object_dict[parent]:
except: # seems one object is missing, so take previous one
ob.parent = object_list[parent - 1]
else: # get parent from node_id number
try:
ob.parent = object_dict.get(parent)
except: # self to parent exception
ob.parent = None
#pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining?
@ -1383,6 +1303,10 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
ob.data.transform(pivot_matrix)
##########
# IMPORT #
##########
def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX=False, KEYFRAME=True, APPLY_MATRIX=True, CONVERSE=None):
print("importing 3DS: %r..." % (filepath), end="")
@ -1390,7 +1314,7 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX=
if bpy.ops.object.select_all.poll():
bpy.ops.object.select_all(action='DESELECT')
time1 = time.time()
duration = time.time()
current_chunk = Chunk()
file = open(filepath, 'rb')
@ -1466,7 +1390,7 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX=
axis_min = [1000000000] * 3
axis_max = [-1000000000] * 3
global_clamp_size = CONSTRAIN
global_clamp_size = CONSTRAIN * 10000
if global_clamp_size != 0.0:
# Get all object bounds
for ob in imported_objects:
@ -1486,14 +1410,19 @@ def load_3ds(filepath, context, CONSTRAIN=10.0, IMAGE_SEARCH=True, WORLD_MATRIX=
while global_clamp_size < max_axis * scale:
scale = scale / 10.0
scale_mat = mathutils.Matrix.Scale(scale, 4)
mtx_scale = mathutils.Matrix.Scale(scale, 4)
for obj in imported_objects:
if obj.parent is None:
obj.matrix_world = scale_mat @ obj.matrix_world
obj.matrix_world = mtx_scale @ obj.matrix_world
for screen in bpy.data.screens:
for area in screen.areas:
if area.type == 'VIEW_3D':
area.spaces[0].clip_start = scale * 0.1
area.spaces[0].clip_end = scale * 10000
# Select all new objects.
print(" done in %.4f sec." % (time.time() - time1))
print(" done in %.4f sec." % (time.time() - duration))
file.close()

View File

@ -6,7 +6,6 @@ bl_info = {
'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (4, 0, 5),
"version": (3, 6, 27),
'blender': (3, 5, 0),
'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0',

View File

@ -145,6 +145,19 @@ def merge_tracks_perform(merged_tracks, animations, export_settings):
else:
new_animations = animations
# If some strips have same channel animations, we already ignored some.
# But if the channels was exactly the same, we already pick index of sampler, and we have a mix of samplers, and index of samplers, in animation.samplers
# So get back to list of objects only
# This can lead to unused samplers... but keep them, as, anyway, data are not exported properly
for anim in new_animations:
new_samplers = []
for s in anim.samplers:
if type(s) == int:
new_samplers.append(anim.samplers[s])
else:
new_samplers.append(s)
anim.samplers = new_samplers
return new_animations
def bake_animation(obj_uuid: str, animation_key: str, export_settings, mode=None):

View File

@ -40,7 +40,8 @@ def gather_action_armature_sampled(armature_uuid: str, blender_action: typing.Op
# To allow reuse of samplers in one animation : This will be done later, when we know all channels are here
export_user_extensions('gather_animation_hook', export_settings, animation, blender_action, blender_object)
export_user_extensions('gather_animation_hook', export_settings, animation, blender_action, blender_object) # For compatibility for older version
export_user_extensions('animation_action_armature_sampled', export_settings, animation, blender_object, blender_action, cache_key)
return animation

View File

@ -27,7 +27,7 @@ def gather_action_object_sampled(object_uuid: str, blender_action: typing.Option
return None
blender_object = export_settings['vtree'].nodes[object_uuid].blender_object
export_user_extensions('animation_gather_object_sampled', export_settings, blender_object, blender_action)
export_user_extensions('animation_action_object_sampled', export_settings, animation, blender_object, blender_action, cache_key)
return animation

View File

@ -27,7 +27,7 @@ def gather_action_sk_sampled(object_uuid: str, blender_action: typing.Optional[b
return None
blender_object = export_settings['vtree'].nodes[object_uuid].blender_object
export_user_extensions('animation_action_sk_sampled', export_settings, blender_object, blender_action, cache_key)
export_user_extensions('animation_action_sk_sampled', export_settings, animation, blender_object, blender_action, cache_key)
return animation