New Addon: Import Autodesk .max #105013
@ -320,8 +320,8 @@ def get_shadeless_node(dest_node_tree):
|
|||||||
output_node = node_tree.nodes.new('NodeGroupOutput')
|
output_node = node_tree.nodes.new('NodeGroupOutput')
|
||||||
input_node = node_tree.nodes.new('NodeGroupInput')
|
input_node = node_tree.nodes.new('NodeGroupInput')
|
||||||
|
|
||||||
node_tree.outputs.new('NodeSocketShader', 'Shader')
|
node_tree.interface.new_socket('Shader', in_out='OUTPUT', socket_type='NodeSocketShader')
|
||||||
node_tree.inputs.new('NodeSocketColor', 'Color')
|
node_tree.interface.new_socket('Color', in_out='INPUT', socket_type='NodeSocketColor')
|
||||||
|
|
||||||
# This could be faster as a transparent shader, but then no ambient occlusion
|
# This could be faster as a transparent shader, but then no ambient occlusion
|
||||||
diffuse_shader = node_tree.nodes.new('ShaderNodeBsdfDiffuse')
|
diffuse_shader = node_tree.nodes.new('ShaderNodeBsdfDiffuse')
|
||||||
@ -1079,7 +1079,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
|||||||
if self.shader in {'PRINCIPLED', 'SHADELESS'}:
|
if self.shader in {'PRINCIPLED', 'SHADELESS'}:
|
||||||
node_tree.links.new(core_shader.inputs[0], tex_image.outputs['Color'])
|
node_tree.links.new(core_shader.inputs[0], tex_image.outputs['Color'])
|
||||||
elif self.shader == 'EMISSION':
|
elif self.shader == 'EMISSION':
|
||||||
node_tree.links.new(core_shader.inputs['Emission'], tex_image.outputs['Color'])
|
node_tree.links.new(core_shader.inputs['Emission Color'], tex_image.outputs['Color'])
|
||||||
|
|
||||||
if self.use_transparency:
|
if self.use_transparency:
|
||||||
if self.shader in {'PRINCIPLED', 'EMISSION'}:
|
if self.shader in {'PRINCIPLED', 'EMISSION'}:
|
||||||
|
@ -1608,6 +1608,86 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
mesh_version.add_variable("mesh", _3ds_uint(3))
|
mesh_version.add_variable("mesh", _3ds_uint(3))
|
||||||
object_info.add_subchunk(mesh_version)
|
object_info.add_subchunk(mesh_version)
|
||||||
|
|
||||||
|
# Init main keyframe data chunk
|
||||||
|
if use_keyframes:
|
||||||
|
revision = 0x0005
|
||||||
|
stop = scene.frame_end
|
||||||
|
start = scene.frame_start
|
||||||
|
curtime = scene.frame_current
|
||||||
|
kfdata = make_kfdata(revision, start, stop, curtime)
|
||||||
|
|
||||||
|
# Make a list of all materials used in the selected meshes (use dictionary, each material is added once)
|
||||||
|
materialDict = {}
|
||||||
|
mesh_objects = []
|
||||||
|
|
||||||
|
if use_selection:
|
||||||
|
objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)]
|
||||||
|
else:
|
||||||
|
objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer)]
|
||||||
|
|
||||||
|
empty_objects = [ob for ob in objects if ob.type == 'EMPTY']
|
||||||
|
light_objects = [ob for ob in objects if ob.type == 'LIGHT']
|
||||||
|
camera_objects = [ob for ob in objects if ob.type == 'CAMERA']
|
||||||
|
|
||||||
|
for ob in objects:
|
||||||
|
# Get derived objects
|
||||||
|
derived_dict = bpy_extras.io_utils.create_derived_objects(depsgraph, [ob])
|
||||||
|
derived = derived_dict.get(ob)
|
||||||
|
|
||||||
|
if derived is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for ob_derived, mtx in derived:
|
||||||
|
if ob.type not in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META'}:
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = ob_derived.to_mesh()
|
||||||
|
except:
|
||||||
|
data = None
|
||||||
|
|
||||||
|
if data:
|
||||||
|
matrix = global_matrix @ mtx
|
||||||
|
data.transform(matrix)
|
||||||
|
data.transform(mtx_scale)
|
||||||
|
mesh_objects.append((ob_derived, data, matrix))
|
||||||
|
ma_ls = data.materials
|
||||||
|
ma_ls_len = len(ma_ls)
|
||||||
|
|
||||||
|
# Get material/image tuples
|
||||||
|
if data.uv_layers:
|
||||||
|
if not ma_ls:
|
||||||
|
ma = ma_name = None
|
||||||
|
|
||||||
|
for f, uf in zip(data.polygons, data.uv_layers.active.data):
|
||||||
|
if ma_ls:
|
||||||
|
ma_index = f.material_index
|
||||||
|
if ma_index >= ma_ls_len:
|
||||||
|
ma_index = f.material_index = 0
|
||||||
|
ma = ma_ls[ma_index]
|
||||||
|
ma_name = None if ma is None else ma.name
|
||||||
|
# Else there already set to none
|
||||||
|
|
||||||
|
img = get_uv_image(ma)
|
||||||
|
img_name = None if img is None else img.name
|
||||||
|
|
||||||
|
materialDict.setdefault((ma_name, img_name), (ma, img))
|
||||||
|
|
||||||
|
else:
|
||||||
|
for ma in ma_ls:
|
||||||
|
if ma: # Material may be None so check its not
|
||||||
|
materialDict.setdefault((ma.name, None), (ma, None))
|
||||||
|
|
||||||
|
# Why 0 Why!
|
||||||
|
for f in data.polygons:
|
||||||
|
if f.material_index >= ma_ls_len:
|
||||||
|
f.material_index = 0
|
||||||
|
|
||||||
|
|
||||||
|
# Make MATERIAL chunks for all materials used in the meshes
|
||||||
|
for ma_image in materialDict.values():
|
||||||
|
object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1]))
|
||||||
|
|
||||||
# Add MASTERSCALE element
|
# Add MASTERSCALE element
|
||||||
mscale = _3ds_chunk(MASTERSCALE)
|
mscale = _3ds_chunk(MASTERSCALE)
|
||||||
mscale.add_variable("scale", _3ds_float(1.0))
|
mscale.add_variable("scale", _3ds_float(1.0))
|
||||||
@ -1619,14 +1699,6 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
|
cursor_chunk.add_variable("cursor", _3ds_point_3d(scene.cursor.location))
|
||||||
object_info.add_subchunk(cursor_chunk)
|
object_info.add_subchunk(cursor_chunk)
|
||||||
|
|
||||||
# Init main keyframe data chunk
|
|
||||||
if use_keyframes:
|
|
||||||
revision = 0x0005
|
|
||||||
stop = scene.frame_end
|
|
||||||
start = scene.frame_start
|
|
||||||
curtime = scene.frame_current
|
|
||||||
kfdata = make_kfdata(revision, start, stop, curtime)
|
|
||||||
|
|
||||||
# Add AMBIENT color
|
# Add AMBIENT color
|
||||||
if world is not None and 'WORLD' in object_filter:
|
if world is not None and 'WORLD' in object_filter:
|
||||||
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
|
ambient_chunk = _3ds_chunk(AMBIENTLIGHT)
|
||||||
@ -1710,81 +1782,9 @@ def save(operator, context, filepath="", scale_factor=1.0, use_scene_unit=False,
|
|||||||
object_info.add_subchunk(layerfog_chunk)
|
object_info.add_subchunk(layerfog_chunk)
|
||||||
if fognode or foglayer and layer.use_pass_mist:
|
if fognode or foglayer and layer.use_pass_mist:
|
||||||
object_info.add_subchunk(use_fog_flag)
|
object_info.add_subchunk(use_fog_flag)
|
||||||
if use_keyframes and world.animation_data or world.node_tree.animation_data:
|
if use_keyframes and world.animation_data or (world.node_tree and world.node_tree.animation_data):
|
||||||
kfdata.add_subchunk(make_ambient_node(world))
|
kfdata.add_subchunk(make_ambient_node(world))
|
||||||
|
|
||||||
# Make a list of all materials used in the selected meshes (use dictionary, each material is added once)
|
|
||||||
materialDict = {}
|
|
||||||
mesh_objects = []
|
|
||||||
|
|
||||||
if use_selection:
|
|
||||||
objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer) and ob.select_get(view_layer=layer)]
|
|
||||||
else:
|
|
||||||
objects = [ob for ob in scene.objects if ob.type in object_filter and ob.visible_get(view_layer=layer)]
|
|
||||||
|
|
||||||
empty_objects = [ob for ob in objects if ob.type == 'EMPTY']
|
|
||||||
light_objects = [ob for ob in objects if ob.type == 'LIGHT']
|
|
||||||
camera_objects = [ob for ob in objects if ob.type == 'CAMERA']
|
|
||||||
|
|
||||||
for ob in objects:
|
|
||||||
# Get derived objects
|
|
||||||
derived_dict = bpy_extras.io_utils.create_derived_objects(depsgraph, [ob])
|
|
||||||
derived = derived_dict.get(ob)
|
|
||||||
|
|
||||||
if derived is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
for ob_derived, mtx in derived:
|
|
||||||
if ob.type not in {'MESH', 'CURVE', 'SURFACE', 'FONT', 'META'}:
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
data = ob_derived.to_mesh()
|
|
||||||
except:
|
|
||||||
data = None
|
|
||||||
|
|
||||||
if data:
|
|
||||||
matrix = global_matrix @ mtx
|
|
||||||
data.transform(matrix)
|
|
||||||
data.transform(mtx_scale)
|
|
||||||
mesh_objects.append((ob_derived, data, matrix))
|
|
||||||
ma_ls = data.materials
|
|
||||||
ma_ls_len = len(ma_ls)
|
|
||||||
|
|
||||||
# Get material/image tuples
|
|
||||||
if data.uv_layers:
|
|
||||||
if not ma_ls:
|
|
||||||
ma = ma_name = None
|
|
||||||
|
|
||||||
for f, uf in zip(data.polygons, data.uv_layers.active.data):
|
|
||||||
if ma_ls:
|
|
||||||
ma_index = f.material_index
|
|
||||||
if ma_index >= ma_ls_len:
|
|
||||||
ma_index = f.material_index = 0
|
|
||||||
ma = ma_ls[ma_index]
|
|
||||||
ma_name = None if ma is None else ma.name
|
|
||||||
# Else there already set to none
|
|
||||||
|
|
||||||
img = get_uv_image(ma)
|
|
||||||
img_name = None if img is None else img.name
|
|
||||||
|
|
||||||
materialDict.setdefault((ma_name, img_name), (ma, img))
|
|
||||||
|
|
||||||
else:
|
|
||||||
for ma in ma_ls:
|
|
||||||
if ma: # Material may be None so check its not
|
|
||||||
materialDict.setdefault((ma.name, None), (ma, None))
|
|
||||||
|
|
||||||
# Why 0 Why!
|
|
||||||
for f in data.polygons:
|
|
||||||
if f.material_index >= ma_ls_len:
|
|
||||||
f.material_index = 0
|
|
||||||
|
|
||||||
|
|
||||||
# Make material chunks for all materials used in the meshes
|
|
||||||
for ma_image in materialDict.values():
|
|
||||||
object_info.add_subchunk(make_material_chunk(ma_image[0], ma_image[1]))
|
|
||||||
|
|
||||||
# Collect translation for transformation matrix
|
# Collect translation for transformation matrix
|
||||||
translation = {}
|
translation = {}
|
||||||
rotation = {}
|
rotation = {}
|
||||||
|
@ -269,10 +269,10 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
|||||||
shader.location = (300,300)
|
shader.location = (300,300)
|
||||||
img_wrap = contextWrapper.metallic_texture
|
img_wrap = contextWrapper.metallic_texture
|
||||||
elif mapto == 'SPECULARITY':
|
elif mapto == 'SPECULARITY':
|
||||||
shader.location = (0,-300)
|
shader.location = (300,0)
|
||||||
img_wrap = contextWrapper.specular_tint_texture
|
img_wrap = contextWrapper.specular_tint_texture
|
||||||
elif mapto == 'ALPHA':
|
elif mapto == 'ALPHA':
|
||||||
shader.location = (300,300)
|
shader.location = (-300,0)
|
||||||
img_wrap = contextWrapper.alpha_texture
|
img_wrap = contextWrapper.alpha_texture
|
||||||
elif mapto == 'EMISSION':
|
elif mapto == 'EMISSION':
|
||||||
shader.location = (0,-900)
|
shader.location = (0,-900)
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
bl_info = {
|
bl_info = {
|
||||||
"name": "FBX format",
|
"name": "FBX format",
|
||||||
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
|
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
|
||||||
"version": (5, 8, 3),
|
"version": (5, 8, 6),
|
||||||
"blender": (3, 6, 0),
|
"blender": (3, 6, 0),
|
||||||
"location": "File > Import-Export",
|
"location": "File > Import-Export",
|
||||||
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
|
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
|
||||||
|
@ -250,9 +250,8 @@ class FBXElem:
|
|||||||
for elem in self.elems:
|
for elem in self.elems:
|
||||||
offset = elem._calc_offsets(offset, (elem is elem_last))
|
offset = elem._calc_offsets(offset, (elem is elem_last))
|
||||||
offset += _BLOCK_SENTINEL_LENGTH
|
offset += _BLOCK_SENTINEL_LENGTH
|
||||||
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||||
if not is_last:
|
offset += _BLOCK_SENTINEL_LENGTH
|
||||||
offset += _BLOCK_SENTINEL_LENGTH
|
|
||||||
|
|
||||||
return offset
|
return offset
|
||||||
|
|
||||||
@ -282,9 +281,8 @@ class FBXElem:
|
|||||||
assert(elem.id != b'')
|
assert(elem.id != b'')
|
||||||
elem._write(write, tell, (elem is elem_last))
|
elem._write(write, tell, (elem is elem_last))
|
||||||
write(_BLOCK_SENTINEL_DATA)
|
write(_BLOCK_SENTINEL_DATA)
|
||||||
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
|
||||||
if not is_last:
|
write(_BLOCK_SENTINEL_DATA)
|
||||||
write(_BLOCK_SENTINEL_DATA)
|
|
||||||
|
|
||||||
|
|
||||||
def _write_timedate_hack(elem_root):
|
def _write_timedate_hack(elem_root):
|
||||||
|
@ -1810,18 +1810,16 @@ def fbx_data_armature_elements(root, arm_obj, scene_data):
|
|||||||
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
|
elem_data_single_int32(fbx_skin, b"Version", FBX_DEFORMER_SKIN_VERSION)
|
||||||
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
|
elem_data_single_float64(fbx_skin, b"Link_DeformAcuracy", 50.0) # Only vague idea what it is...
|
||||||
|
|
||||||
# Pre-process vertex weights (also to check vertices assigned to more than four bones).
|
# Pre-process vertex weights so that the vertices only need to be iterated once.
|
||||||
ob = ob_obj.bdata
|
ob = ob_obj.bdata
|
||||||
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
|
bo_vg_idx = {bo_obj.bdata.name: ob.vertex_groups[bo_obj.bdata.name].index
|
||||||
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
|
for bo_obj in clusters.keys() if bo_obj.bdata.name in ob.vertex_groups}
|
||||||
valid_idxs = set(bo_vg_idx.values())
|
valid_idxs = set(bo_vg_idx.values())
|
||||||
vgroups = {vg.index: {} for vg in ob.vertex_groups}
|
vgroups = {vg.index: {} for vg in ob.vertex_groups}
|
||||||
verts_vgroups = (sorted(((vg.group, vg.weight) for vg in v.groups if vg.weight and vg.group in valid_idxs),
|
for idx, v in enumerate(me.vertices):
|
||||||
key=lambda e: e[1], reverse=True)
|
for vg in v.groups:
|
||||||
for v in me.vertices)
|
if (w := vg.weight) and (vg_idx := vg.group) in valid_idxs:
|
||||||
for idx, vgs in enumerate(verts_vgroups):
|
vgroups[vg_idx][idx] = w
|
||||||
for vg_idx, w in vgs:
|
|
||||||
vgroups[vg_idx][idx] = w
|
|
||||||
|
|
||||||
for bo_obj, clstr_key in clusters.items():
|
for bo_obj, clstr_key in clusters.items():
|
||||||
bo = bo_obj.bdata
|
bo = bo_obj.bdata
|
||||||
|
@ -2784,7 +2784,9 @@ class FbxImportHelperNode:
|
|||||||
for i, w in combined_weights.items():
|
for i, w in combined_weights.items():
|
||||||
indices.append(i)
|
indices.append(i)
|
||||||
if len(w) > 1:
|
if len(w) > 1:
|
||||||
weights.append(sum(w) / len(w))
|
# Add ignored child weights to the current bone's weight.
|
||||||
|
# XXX - Weights that sum to more than 1.0 get clamped to 1.0 when set in the vertex group.
|
||||||
|
weights.append(sum(w))
|
||||||
else:
|
else:
|
||||||
weights.append(w[0])
|
weights.append(w[0])
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
bl_info = {
|
bl_info = {
|
||||||
'name': 'glTF 2.0 format',
|
'name': 'glTF 2.0 format',
|
||||||
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
|
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
|
||||||
"version": (4, 1, 1),
|
"version": (4, 1, 2),
|
||||||
'blender': (4, 0, 0),
|
'blender': (4, 0, 0),
|
||||||
'location': 'File > Import-Export',
|
'location': 'File > Import-Export',
|
||||||
'description': 'Import-Export as glTF 2.0',
|
'description': 'Import-Export as glTF 2.0',
|
||||||
|
@ -135,6 +135,7 @@ class BlenderNode():
|
|||||||
bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION)
|
bpy.data.collections.new(BLENDER_GLTF_SPECIAL_COLLECTION)
|
||||||
bpy.data.scenes[gltf.blender_scene].collection.children.link(bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION])
|
bpy.data.scenes[gltf.blender_scene].collection.children.link(bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION])
|
||||||
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True
|
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_viewport = True
|
||||||
|
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].hide_render = True
|
||||||
|
|
||||||
# Create an icosphere, and assign it to the collection
|
# Create an icosphere, and assign it to the collection
|
||||||
bpy.ops.mesh.primitive_ico_sphere_add(radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
|
bpy.ops.mesh.primitive_ico_sphere_add(radius=1, enter_editmode=False, align='WORLD', location=(0, 0, 0), scale=(1, 1, 1))
|
||||||
|
@ -99,9 +99,6 @@ class VIEW3D_MT_Pose(Menu):
|
|||||||
layout.operator("pose.quaternions_flip")
|
layout.operator("pose.quaternions_flip")
|
||||||
layout.operator_context = 'INVOKE_AREA'
|
layout.operator_context = 'INVOKE_AREA'
|
||||||
layout.separator()
|
layout.separator()
|
||||||
layout.operator("armature.armature_layers", text="Change Armature Layers...")
|
|
||||||
layout.operator("pose.bone_layers", text="Change Bone Layers...")
|
|
||||||
layout.separator()
|
|
||||||
layout.menu("VIEW3D_MT_pose_showhide")
|
layout.menu("VIEW3D_MT_pose_showhide")
|
||||||
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")
|
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user