New Addon: Import Autodesk .max #105013
@ -418,7 +418,6 @@ class add_mesh_bolt(Operator, AddObjectHelper):
|
||||
(context.active_object.data is not None) and ('Bolt' in context.active_object.data.keys()) and \
|
||||
(self.change == True):
|
||||
obj = context.active_object
|
||||
use_auto_smooth = bool(obj.data.use_auto_smooth) # Copy value, do not take a reference
|
||||
use_smooth = bool(obj.data.polygons[0].use_smooth) # Copy value, do not take a reference
|
||||
|
||||
mesh = createMesh.Create_New_Mesh(self, context)
|
||||
@ -430,7 +429,6 @@ class add_mesh_bolt(Operator, AddObjectHelper):
|
||||
bm.free()
|
||||
|
||||
# Preserve flat/smooth choice. New mesh is flat by default
|
||||
obj.data.use_auto_smooth = use_auto_smooth
|
||||
if use_smooth:
|
||||
bpy.ops.object.shade_smooth()
|
||||
else:
|
||||
|
@ -150,9 +150,6 @@ def createMeshObject(context, verts, edges, faces, name):
|
||||
# Make a mesh from a list of verts/edges/faces.
|
||||
mesh.from_pydata(verts, edges, faces)
|
||||
|
||||
# Set mesh to use auto smoothing:
|
||||
mesh.use_auto_smooth = True
|
||||
|
||||
# Update mesh geometry after adding stuff.
|
||||
mesh.update()
|
||||
|
||||
|
@ -6,7 +6,7 @@ bl_info = {
|
||||
"name": "Grease Pencil Tools",
|
||||
"description": "Extra tools for Grease Pencil",
|
||||
"author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola",
|
||||
"version": (1, 8, 1),
|
||||
"version": (1, 8, 2),
|
||||
"blender": (3, 0, 0),
|
||||
"location": "Sidebar > Grease Pencil > Grease Pencil Tools",
|
||||
"warning": "",
|
||||
|
@ -49,10 +49,10 @@ def get_reduced_area_coord(context):
|
||||
|
||||
## minus tool leftbar + sidebar right
|
||||
regs = context.area.regions
|
||||
toolbar = regs[2]
|
||||
sidebar = regs[3]
|
||||
header = regs[0]
|
||||
tool_header = regs[1]
|
||||
toolbar = next((r for r in regs if r.type == 'TOOLS'), None)
|
||||
sidebar = next((r for r in regs if r.type == 'UI'), None)
|
||||
header = next((r for r in regs if r.type == 'HEADER'), None)
|
||||
tool_header = next((r for r in regs if r.type == 'TOOL_HEADER'), None)
|
||||
up_margin = down_margin = 0
|
||||
if tool_header.alignment == 'TOP':
|
||||
up_margin += tool_header.height
|
||||
|
@ -5,8 +5,8 @@
|
||||
bl_info = {
|
||||
"name": "Import Images as Planes",
|
||||
"author": "Florian Meyer (tstscr), mont29, matali, Ted Schundler (SpkyElctrc), mrbimax",
|
||||
"version": (3, 5, 0),
|
||||
"blender": (2, 91, 0),
|
||||
"version": (3, 5, 1),
|
||||
"blender": (4, 0, 0),
|
||||
"location": "File > Import > Images as Planes or Add > Image > Images as Planes",
|
||||
"description": "Imports images and creates planes with the appropriate aspect ratio. "
|
||||
"The images are mapped to the planes.",
|
||||
@ -25,7 +25,10 @@ from math import pi
|
||||
|
||||
import bpy
|
||||
from bpy.types import Operator
|
||||
from bpy.app.translations import pgettext_tip as tip_
|
||||
from bpy.app.translations import (
|
||||
pgettext_tip as tip_,
|
||||
contexts as i18n_contexts
|
||||
)
|
||||
from mathutils import Vector
|
||||
|
||||
from bpy.props import (
|
||||
@ -151,6 +154,9 @@ def load_images(filenames, directory, force_reload=False, frame_start=1, find_se
|
||||
file_iter = zip(filenames, repeat(1), repeat(1))
|
||||
|
||||
for filename, offset, frames in file_iter:
|
||||
if not os.path.isfile(bpy.path.abspath(os.path.join(directory, filename))):
|
||||
continue
|
||||
|
||||
image = load_image(filename, directory, check_existing=True, force_reload=force_reload)
|
||||
|
||||
# Size is unavailable for sequences, so we grab it early
|
||||
@ -731,7 +737,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
('HASHED', "Hashed","Use noise to dither the binary visibility (works well with multi-samples)"),
|
||||
('OPAQUE', "Opaque","Render surface without transparency"),
|
||||
)
|
||||
blend_method: EnumProperty(name="Blend Mode", items=BLEND_METHODS, default='BLEND', description="Blend Mode for Transparent Faces")
|
||||
blend_method: EnumProperty(
|
||||
name="Blend Mode", items=BLEND_METHODS, default='BLEND',
|
||||
description="Blend Mode for Transparent Faces", translation_context=i18n_contexts.id_material)
|
||||
|
||||
SHADOW_METHODS = (
|
||||
('CLIP', "Clip","Use the alpha threshold to clip the visibility (binary visibility)"),
|
||||
@ -739,7 +747,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
('OPAQUE',"Opaque","Material will cast shadows without transparency"),
|
||||
('NONE',"None","Material will cast no shadow"),
|
||||
)
|
||||
shadow_method: EnumProperty(name="Shadow Mode", items=SHADOW_METHODS, default='CLIP', description="Shadow mapping method")
|
||||
shadow_method: EnumProperty(
|
||||
name="Shadow Mode", items=SHADOW_METHODS, default='CLIP',
|
||||
description="Shadow mapping method", translation_context=i18n_contexts.id_material)
|
||||
|
||||
use_backface_culling: BoolProperty(
|
||||
name="Backface Culling", default=False,
|
||||
@ -923,11 +933,11 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
if context.active_object and context.active_object.mode != 'OBJECT':
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.import_images(context)
|
||||
ret_code = self.import_images(context)
|
||||
|
||||
context.preferences.edit.use_enter_edit_mode = editmode
|
||||
|
||||
return {'FINISHED'}
|
||||
return ret_code
|
||||
|
||||
def import_images(self, context):
|
||||
|
||||
@ -939,6 +949,10 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
find_sequences=self.image_sequence
|
||||
))
|
||||
|
||||
if not images:
|
||||
self.report({'WARNING'}, "Please select at least an image.")
|
||||
return {'CANCELLED'}
|
||||
|
||||
# Create individual planes
|
||||
planes = [self.single_image_spec_to_plane(context, img_spec) for img_spec in images]
|
||||
|
||||
@ -962,6 +976,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
|
||||
|
||||
# all done!
|
||||
self.report({'INFO'}, tip_("Added {} Image Plane(s)").format(len(planes)))
|
||||
return {'FINISHED'}
|
||||
|
||||
# operate on a single image
|
||||
def single_image_spec_to_plane(self, context, img_spec):
|
||||
|
@ -17,11 +17,12 @@ def create_and_link_mesh(name, faces, face_nors, points, global_matrix):
|
||||
mesh.from_pydata(points, [], faces)
|
||||
|
||||
if face_nors:
|
||||
# Note: we store 'temp' normals in loops, since validate() may alter final mesh,
|
||||
# we can only set custom lnors *after* calling it.
|
||||
mesh.create_normals_split()
|
||||
# Write imported normals to a temporary attribute so they are interpolated by #mesh.validate().
|
||||
# It's important to validate before calling #mesh.normals_split_custom_set() which expects a
|
||||
# valid mesh.
|
||||
lnors = tuple(chain(*chain(*zip(face_nors, face_nors, face_nors))))
|
||||
mesh.loops.foreach_set("normal", lnors)
|
||||
mesh.attributes.new("temp_custom_normals", 'FLOAT_VECTOR', 'CORNER')
|
||||
mesh.attributes["temp_custom_normals"].data.foreach_set("vector", lnors)
|
||||
|
||||
mesh.transform(global_matrix)
|
||||
|
||||
@ -30,13 +31,12 @@ def create_and_link_mesh(name, faces, face_nors, points, global_matrix):
|
||||
|
||||
if face_nors:
|
||||
clnors = array.array('f', [0.0] * (len(mesh.loops) * 3))
|
||||
mesh.loops.foreach_get("normal", clnors)
|
||||
mesh.attributes["temp_custom_normals"].data.foreach_get("vector", clnors)
|
||||
|
||||
mesh.polygons.foreach_set("use_smooth", [True] * len(mesh.polygons))
|
||||
|
||||
mesh.normals_split_custom_set(tuple(zip(*(iter(clnors),) * 3)))
|
||||
mesh.use_auto_smooth = True
|
||||
mesh.free_normals_split()
|
||||
mesh.attributes.remove(mesh.attributes["temp_custom_normals"])
|
||||
|
||||
mesh.update()
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
bl_info = {
|
||||
"name": "UV Layout",
|
||||
"author": "Campbell Barton, Matt Ebb",
|
||||
"version": (1, 1, 6),
|
||||
"version": (1, 2, 0),
|
||||
"blender": (3, 0, 0),
|
||||
"location": "UV Editor > UV > Export UV Layout",
|
||||
"description": "Export the UV layout as a 2D graphic",
|
||||
@ -30,6 +30,8 @@ if "bpy" in locals():
|
||||
import os
|
||||
import bpy
|
||||
|
||||
from bpy.app.translations import contexts as i18n_contexts
|
||||
|
||||
from bpy.props import (
|
||||
StringProperty,
|
||||
BoolProperty,
|
||||
@ -54,10 +56,24 @@ class ExportUVLayout(bpy.types.Operator):
|
||||
description="Export all UVs in this mesh (not just visible ones)",
|
||||
default=False,
|
||||
)
|
||||
export_tiles: EnumProperty(
|
||||
name="Export Tiles",
|
||||
items=(
|
||||
('NONE', "None",
|
||||
"Export only UVs in the [0, 1] range"),
|
||||
('UDIM', "UDIM",
|
||||
"Export tiles in the UDIM numbering scheme: 1001 + u-tile + 10*v-tile"),
|
||||
('UV', "UVTILE",
|
||||
"Export tiles in the UVTILE numbering scheme: u(u-tile + 1)_v(v-tile + 1)"),
|
||||
),
|
||||
description="Choose whether to export only the [0, 1 range], or all UV tiles",
|
||||
default='NONE',
|
||||
)
|
||||
modified: BoolProperty(
|
||||
name="Modified",
|
||||
description="Exports UVs from the modified mesh",
|
||||
default=False,
|
||||
translation_context=i18n_contexts.id_mesh,
|
||||
)
|
||||
mode: EnumProperty(
|
||||
items=(
|
||||
@ -73,6 +89,7 @@ class ExportUVLayout(bpy.types.Operator):
|
||||
default='PNG',
|
||||
)
|
||||
size: IntVectorProperty(
|
||||
name="Size",
|
||||
size=2,
|
||||
default=(1024, 1024),
|
||||
min=8, max=32768,
|
||||
@ -123,9 +140,6 @@ class ExportUVLayout(bpy.types.Operator):
|
||||
if is_editmode:
|
||||
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
|
||||
|
||||
filepath = self.filepath
|
||||
filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower())
|
||||
|
||||
meshes = list(self.iter_meshes_to_export(context))
|
||||
polygon_data = list(self.iter_polygon_data_to_draw(context, meshes))
|
||||
different_colors = set(color for _, color in polygon_data)
|
||||
@ -135,8 +149,35 @@ class ExportUVLayout(bpy.types.Operator):
|
||||
obj_eval = obj.evaluated_get(depsgraph)
|
||||
obj_eval.to_mesh_clear()
|
||||
|
||||
tiles = self.tiles_to_export(polygon_data)
|
||||
export = self.get_exporter()
|
||||
export(filepath, polygon_data, different_colors, self.size[0], self.size[1], self.opacity)
|
||||
dirname, filename = os.path.split(self.filepath)
|
||||
|
||||
# Strip UDIM or UV numbering, and extension
|
||||
import re
|
||||
name_regex = r"^(.*?)"
|
||||
udim_regex = r"(?:\.[0-9]{4})?"
|
||||
uv_regex = r"(?:\.u[0-9]+_v[0-9]+)?"
|
||||
ext_regex = r"(?:\.png|\.eps|\.svg)?$"
|
||||
if self.export_tiles == 'NONE':
|
||||
match = re.match(name_regex + ext_regex, filename)
|
||||
elif self.export_tiles == 'UDIM':
|
||||
match = re.match(name_regex + udim_regex + ext_regex, filename)
|
||||
elif self.export_tiles == 'UV':
|
||||
match = re.match(name_regex + uv_regex + ext_regex, filename)
|
||||
if match:
|
||||
filename = match.groups()[0]
|
||||
|
||||
for tile in sorted(tiles):
|
||||
filepath = os.path.join(dirname, filename)
|
||||
if self.export_tiles == 'UDIM':
|
||||
filepath += f".{1001 + tile[0] + tile[1] * 10:04}"
|
||||
elif self.export_tiles == 'UV':
|
||||
filepath += f".u{tile[0] + 1}_v{tile[1] + 1}"
|
||||
filepath = bpy.path.ensure_ext(filepath, "." + self.mode.lower())
|
||||
|
||||
export(filepath, tile, polygon_data, different_colors,
|
||||
self.size[0], self.size[1], self.opacity)
|
||||
|
||||
if is_editmode:
|
||||
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
|
||||
@ -161,6 +202,30 @@ class ExportUVLayout(bpy.types.Operator):
|
||||
continue
|
||||
yield obj
|
||||
|
||||
def tiles_to_export(self, polygon_data):
|
||||
"""Get a set of tiles containing UVs.
|
||||
This assumes there is no UV edge crossing an otherwise empty tile.
|
||||
"""
|
||||
if self.export_tiles == 'NONE':
|
||||
return {(0, 0)}
|
||||
|
||||
from math import floor
|
||||
tiles = set()
|
||||
for poly in polygon_data:
|
||||
for uv in poly[0]:
|
||||
# Ignore UVs at corners - precisely touching the right or upper edge
|
||||
# of a tile should not load its right/upper neighbor as well.
|
||||
# From intern/cycles/scene/attribute.cpp
|
||||
u, v = uv[0], uv[1]
|
||||
x, y = floor(u), floor(v)
|
||||
if x > 0 and u < x + 1e-6:
|
||||
x -= 1
|
||||
if y > 0 and v < y + 1e-6:
|
||||
y -= 1
|
||||
if x >= 0 and y >= 0:
|
||||
tiles.add((x, y))
|
||||
return tiles
|
||||
|
||||
@staticmethod
|
||||
def currently_image_image_editor(context):
|
||||
return isinstance(context.space_data, bpy.types.SpaceImageEditor)
|
||||
|
@ -5,19 +5,19 @@
|
||||
import bpy
|
||||
|
||||
|
||||
def export(filepath, face_data, colors, width, height, opacity):
|
||||
def export(filepath, tile, face_data, colors, width, height, opacity):
|
||||
with open(filepath, 'w', encoding='utf-8') as file:
|
||||
for text in get_file_parts(face_data, colors, width, height, opacity):
|
||||
for text in get_file_parts(tile, face_data, colors, width, height, opacity):
|
||||
file.write(text)
|
||||
|
||||
|
||||
def get_file_parts(face_data, colors, width, height, opacity):
|
||||
def get_file_parts(tile, face_data, colors, width, height, opacity):
|
||||
yield from header(width, height)
|
||||
if opacity > 0.0:
|
||||
name_by_color = {}
|
||||
yield from prepare_colors(colors, name_by_color)
|
||||
yield from draw_colored_polygons(face_data, name_by_color, width, height)
|
||||
yield from draw_lines(face_data, width, height)
|
||||
yield from draw_colored_polygons(tile, face_data, name_by_color, width, height)
|
||||
yield from draw_lines(tile, face_data, width, height)
|
||||
yield from footer()
|
||||
|
||||
|
||||
@ -53,24 +53,24 @@ def prepare_colors(colors, out_name_by_color):
|
||||
yield "} def\n"
|
||||
|
||||
|
||||
def draw_colored_polygons(face_data, name_by_color, width, height):
|
||||
def draw_colored_polygons(tile, face_data, name_by_color, width, height):
|
||||
for uvs, color in face_data:
|
||||
yield from draw_polygon_path(uvs, width, height)
|
||||
yield from draw_polygon_path(tile, uvs, width, height)
|
||||
yield "closepath\n"
|
||||
yield "%s\n" % name_by_color[color]
|
||||
|
||||
|
||||
def draw_lines(face_data, width, height):
|
||||
def draw_lines(tile, face_data, width, height):
|
||||
for uvs, _ in face_data:
|
||||
yield from draw_polygon_path(uvs, width, height)
|
||||
yield from draw_polygon_path(tile, uvs, width, height)
|
||||
yield "closepath\n"
|
||||
yield "stroke\n"
|
||||
|
||||
|
||||
def draw_polygon_path(uvs, width, height):
|
||||
def draw_polygon_path(tile, uvs, width, height):
|
||||
yield "newpath\n"
|
||||
for j, uv in enumerate(uvs):
|
||||
uv_scale = (uv[0] * width, uv[1] * height)
|
||||
uv_scale = ((uv[0] - tile[0]) * width, (uv[1] - tile[1]) * height)
|
||||
if j == 0:
|
||||
yield "%.5f %.5f moveto\n" % uv_scale
|
||||
else:
|
||||
|
@ -15,14 +15,14 @@ except ImportError:
|
||||
oiio = None
|
||||
|
||||
|
||||
def export(filepath, face_data, colors, width, height, opacity):
|
||||
def export(filepath, tile, face_data, colors, width, height, opacity):
|
||||
offscreen = gpu.types.GPUOffScreen(width, height)
|
||||
offscreen.bind()
|
||||
|
||||
try:
|
||||
fb = gpu.state.active_framebuffer_get()
|
||||
fb.clear(color=(0.0, 0.0, 0.0, 0.0))
|
||||
draw_image(face_data, opacity)
|
||||
draw_image(tile, face_data, opacity)
|
||||
|
||||
pixel_data = fb.read_color(0, 0, width, height, 4, 0, 'UBYTE')
|
||||
pixel_data.dimensions = width * height * 4
|
||||
@ -32,11 +32,11 @@ def export(filepath, face_data, colors, width, height, opacity):
|
||||
offscreen.free()
|
||||
|
||||
|
||||
def draw_image(face_data, opacity):
|
||||
def draw_image(tile, face_data, opacity):
|
||||
gpu.state.blend_set('ALPHA')
|
||||
|
||||
with gpu.matrix.push_pop():
|
||||
gpu.matrix.load_matrix(get_normalize_uvs_matrix())
|
||||
gpu.matrix.load_matrix(get_normalize_uvs_matrix(tile))
|
||||
gpu.matrix.load_projection_matrix(Matrix.Identity(4))
|
||||
|
||||
draw_background_colors(face_data, opacity)
|
||||
@ -45,11 +45,11 @@ def draw_image(face_data, opacity):
|
||||
gpu.state.blend_set('NONE')
|
||||
|
||||
|
||||
def get_normalize_uvs_matrix():
|
||||
def get_normalize_uvs_matrix(tile):
|
||||
'''matrix maps x and y coordinates from [0, 1] to [-1, 1]'''
|
||||
matrix = Matrix.Identity(4)
|
||||
matrix.col[3][0] = -1
|
||||
matrix.col[3][1] = -1
|
||||
matrix.col[3][0] = -1 - (tile[0]) * 2
|
||||
matrix.col[3][1] = -1 - (tile[1]) * 2
|
||||
matrix[0][0] = 2
|
||||
matrix[1][1] = 2
|
||||
|
||||
|
@ -7,15 +7,15 @@ from os.path import basename
|
||||
from xml.sax.saxutils import escape
|
||||
|
||||
|
||||
def export(filepath, face_data, colors, width, height, opacity):
|
||||
def export(filepath, tile, face_data, colors, width, height, opacity):
|
||||
with open(filepath, 'w', encoding='utf-8') as file:
|
||||
for text in get_file_parts(face_data, colors, width, height, opacity):
|
||||
for text in get_file_parts(tile, face_data, colors, width, height, opacity):
|
||||
file.write(text)
|
||||
|
||||
|
||||
def get_file_parts(face_data, colors, width, height, opacity):
|
||||
def get_file_parts(tile, face_data, colors, width, height, opacity):
|
||||
yield from header(width, height)
|
||||
yield from draw_polygons(face_data, width, height, opacity)
|
||||
yield from draw_polygons(tile, face_data, width, height, opacity)
|
||||
yield from footer()
|
||||
|
||||
|
||||
@ -29,7 +29,7 @@ def header(width, height):
|
||||
yield f'<desc>{escape(desc)}</desc>\n'
|
||||
|
||||
|
||||
def draw_polygons(face_data, width, height, opacity):
|
||||
def draw_polygons(tile, face_data, width, height, opacity):
|
||||
for uvs, color in face_data:
|
||||
fill = f'fill="{get_color_string(color)}"'
|
||||
|
||||
@ -39,7 +39,7 @@ def draw_polygons(face_data, width, height, opacity):
|
||||
yield ' points="'
|
||||
|
||||
for uv in uvs:
|
||||
x, y = uv[0], 1.0 - uv[1]
|
||||
x, y = uv[0] - tile[0], 1.0 - uv[1] + tile[1]
|
||||
yield f'{x*width:.3f},{y*height:.3f} '
|
||||
yield '" />\n'
|
||||
|
||||
|
@ -18,8 +18,8 @@ import bpy
|
||||
bl_info = {
|
||||
"name": "Autodesk 3DS format",
|
||||
"author": "Bob Holcomb, Campbell Barton, Sebastian Schrand",
|
||||
"version": (2, 4, 8),
|
||||
"blender": (4, 0, 0),
|
||||
"version": (2, 4, 9),
|
||||
"blender": (4, 1, 0),
|
||||
"location": "File > Import-Export",
|
||||
"description": "3DS Import/Export meshes, UVs, materials, textures, "
|
||||
"cameras, lamps & animation",
|
||||
|
@ -1298,11 +1298,8 @@ def make_object_node(ob, translation, rotation, scale, name_id):
|
||||
obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name)))
|
||||
obj_node_header_chunk.add_variable("flags1", _3ds_ushort(0x0040))
|
||||
|
||||
"""Flags2 defines 0x01 for display path, 0x02 use autosmooth, 0x04 object frozen,
|
||||
"""Flags2 defines 0x01 for display path, 0x04 object frozen,
|
||||
0x10 for motion blur, 0x20 for material morph and bit 0x40 for mesh morph."""
|
||||
if ob.type == 'MESH' and ob.data.use_auto_smooth:
|
||||
obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0x02))
|
||||
else:
|
||||
obj_node_header_chunk.add_variable("flags2", _3ds_ushort(0))
|
||||
obj_node_header_chunk.add_variable("parent", _3ds_ushort(ROOT_OBJECT))
|
||||
|
||||
@ -1343,12 +1340,6 @@ def make_object_node(ob, translation, rotation, scale, name_id):
|
||||
obj_boundbox.add_variable("max", _3ds_point_3d(ob.bound_box[6]))
|
||||
obj_node.add_subchunk(obj_boundbox)
|
||||
|
||||
# Add smooth angle if autosmooth is used
|
||||
if ob.type == 'MESH' and ob.data.use_auto_smooth:
|
||||
obj_morph_smooth = _3ds_chunk(OBJECT_MORPH_SMOOTH)
|
||||
obj_morph_smooth.add_variable("angle", _3ds_float(round(ob.data.auto_smooth_angle, 6)))
|
||||
obj_node.add_subchunk(obj_morph_smooth)
|
||||
|
||||
# Add track chunks for position, rotation, size
|
||||
ob_scale = scale[name] # and collect masterscale
|
||||
if parent is None or (parent.name not in name_id):
|
||||
|
@ -244,7 +244,7 @@ def skip_to_end(file, skip_chunk):
|
||||
# MATERIALS #
|
||||
#############
|
||||
|
||||
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tintcolor, mapto):
|
||||
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tint1, tint2, mapto):
|
||||
shader = contextWrapper.node_principled_bsdf
|
||||
nodetree = contextWrapper.material.node_tree
|
||||
shader.location = (-300, 0)
|
||||
@ -256,13 +256,16 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
||||
mixer.label = "Mixer"
|
||||
mixer.inputs[0].default_value = pct / 100
|
||||
mixer.inputs[1].default_value = (
|
||||
tintcolor[:3] + [1] if tintcolor else
|
||||
shader.inputs['Base Color'].default_value[:]
|
||||
)
|
||||
tint1[:3] + [1] if tint1 else shader.inputs['Base Color'].default_value[:])
|
||||
contextWrapper._grid_to_location(1, 2, dst_node=mixer, ref_node=shader)
|
||||
img_wrap = contextWrapper.base_color_texture
|
||||
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
|
||||
links.new(mixer.outputs['Color'], shader.inputs['Base Color'])
|
||||
if tint2 is not None:
|
||||
img_wrap.colorspace_name = 'Non-Color'
|
||||
mixer.inputs[2].default_value = tint2[:3] + [1]
|
||||
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[0])
|
||||
else:
|
||||
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
|
||||
elif mapto == 'ROUGHNESS':
|
||||
img_wrap = contextWrapper.roughness_texture
|
||||
elif mapto == 'METALLIC':
|
||||
@ -312,10 +315,12 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
||||
img_wrap.extension = 'CLIP'
|
||||
|
||||
if alpha == 'alpha':
|
||||
own_node = img_wrap.node_image
|
||||
contextWrapper.material.blend_method = 'HASHED'
|
||||
links.new(own_node.outputs['Alpha'], img_wrap.socket_dst)
|
||||
for link in links:
|
||||
if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB':
|
||||
tex = link.from_node.image.name
|
||||
own_node = img_wrap.node_image
|
||||
own_map = img_wrap.node_mapping
|
||||
if tex == image.name:
|
||||
links.new(link.from_node.outputs['Alpha'], img_wrap.socket_dst)
|
||||
@ -325,9 +330,6 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
|
||||
if imgs.name[-3:].isdigit():
|
||||
if not imgs.users:
|
||||
bpy.data.images.remove(imgs)
|
||||
else:
|
||||
links.new(img_wrap.node_image.outputs['Alpha'], img_wrap.socket_dst)
|
||||
contextWrapper.material.blend_method = 'HASHED'
|
||||
|
||||
shader.location = (300, 300)
|
||||
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
|
||||
@ -520,7 +522,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
def read_texture(new_chunk, temp_chunk, name, mapto):
|
||||
uscale, vscale, uoffset, voffset, angle = 1.0, 1.0, 0.0, 0.0, 0.0
|
||||
contextWrapper.use_nodes = True
|
||||
tintcolor = None
|
||||
tint1 = tint2 = None
|
||||
extend = 'wrap'
|
||||
alpha = False
|
||||
pct = 70
|
||||
@ -544,14 +546,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
img = load_image(texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True)
|
||||
temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
|
||||
|
||||
elif temp_chunk.ID == MAT_MAP_USCALE:
|
||||
uscale = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_VSCALE:
|
||||
vscale = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_UOFFSET:
|
||||
uoffset = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_VOFFSET:
|
||||
voffset = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_BUMP_PERCENT:
|
||||
contextWrapper.normalmap_strength = (float(read_short(temp_chunk) / 100))
|
||||
|
||||
elif temp_chunk.ID == MAT_MAP_TILING:
|
||||
"""Control bit flags, where 0x1 activates decaling, 0x2 activates mirror,
|
||||
@ -580,11 +576,20 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
if tiling & 0x200:
|
||||
tint = 'RGBtint'
|
||||
|
||||
elif temp_chunk.ID == MAT_MAP_USCALE:
|
||||
uscale = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_VSCALE:
|
||||
vscale = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_UOFFSET:
|
||||
uoffset = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_VOFFSET:
|
||||
voffset = read_float(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_ANG:
|
||||
angle = read_float(temp_chunk)
|
||||
|
||||
elif temp_chunk.ID == MAT_MAP_COL1:
|
||||
tintcolor = read_byte_color(temp_chunk)
|
||||
tint1 = read_byte_color(temp_chunk)
|
||||
elif temp_chunk.ID == MAT_MAP_COL2:
|
||||
tint2 = read_byte_color(temp_chunk)
|
||||
|
||||
skip_to_end(file, temp_chunk)
|
||||
new_chunk.bytes_read += temp_chunk.bytes_read
|
||||
@ -592,7 +597,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
# add the map to the material in the right channel
|
||||
if img:
|
||||
add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1),
|
||||
(uoffset, voffset, 0), angle, tintcolor, mapto)
|
||||
(uoffset, voffset, 0), angle, tint1, tint2, mapto)
|
||||
|
||||
def apply_constrain(vec):
|
||||
convector = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1))
|
||||
@ -1326,8 +1331,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif new_chunk.ID == MORPH_SMOOTH and tracking == 'OBJECT': # Smooth angle
|
||||
smooth_angle = read_float(new_chunk)
|
||||
if child.data is not None: # Check if child is a dummy
|
||||
child.data.use_auto_smooth = True
|
||||
child.data.auto_smooth_angle = smooth_angle
|
||||
child.data.set_sharp_from_angle(smooth_angle)
|
||||
|
||||
elif KEYFRAME and new_chunk.ID == COL_TRACK_TAG and tracking == 'AMBIENT': # Ambient
|
||||
keyframe_data = {}
|
||||
@ -1383,6 +1387,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
|
||||
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'TARGET': # Target position
|
||||
keyframe_data = {}
|
||||
location = child.location
|
||||
keyframe_data[0] = trackposition[0]
|
||||
target = mathutils.Vector(read_track_data(new_chunk)[0])
|
||||
direction = calc_target(location, target)
|
||||
child.rotation_euler.x = direction[0]
|
||||
|
@ -5,8 +5,8 @@
|
||||
bl_info = {
|
||||
"name": "FBX format",
|
||||
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier, @Mysteryem",
|
||||
"version": (5, 8, 7),
|
||||
"blender": (3, 6, 0),
|
||||
"version": (5, 8, 8),
|
||||
"blender": (4, 0, 0),
|
||||
"location": "File > Import-Export",
|
||||
"description": "FBX IO meshes, UVs, vertex colors, materials, textures, cameras, lamps and actions",
|
||||
"warning": "",
|
||||
|
@ -1158,7 +1158,6 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
# NOTE: this is not supported by importer currently.
|
||||
# XXX Official docs says normals should use IndexToDirect,
|
||||
# but this does not seem well supported by apps currently...
|
||||
me.calc_normals_split()
|
||||
|
||||
ln_bl_dtype = np.single
|
||||
ln_fbx_dtype = np.float64
|
||||
@ -1258,8 +1257,6 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
# del t_lnw
|
||||
me.free_tangents()
|
||||
|
||||
me.free_normals_split()
|
||||
|
||||
# Write VertexColor Layers.
|
||||
colors_type = scene_data.settings.colors_type
|
||||
vcolnumber = 0 if colors_type == 'NONE' else len(me.color_attributes)
|
||||
|
@ -1653,8 +1653,6 @@ def blen_read_geom_layer_smooth(fbx_obj, mesh):
|
||||
1, fbx_item_size, layer_id,
|
||||
xform=np.logical_not, # in FBX, 0 (False) is sharp, but in Blender True is sharp.
|
||||
)
|
||||
# We only set sharp edges here, not face smoothing itself...
|
||||
mesh.use_auto_smooth = True
|
||||
return False
|
||||
elif fbx_layer_mapping == b'ByPolygon':
|
||||
blen_data = MESH_ATTRIBUTE_SHARP_FACE.ensure(mesh.attributes).data
|
||||
@ -1737,23 +1735,23 @@ def blen_read_geom_layer_normal(fbx_obj, mesh, xform=None):
|
||||
bl_norm_dtype = np.single
|
||||
item_size = 3
|
||||
# try loops, then polygons, then vertices.
|
||||
tries = ((mesh.loops, "Loops", False, blen_read_geom_array_mapped_polyloop),
|
||||
tries = ((mesh.attributes["temp_custom_normals"].data, "Loops", False, blen_read_geom_array_mapped_polyloop),
|
||||
(mesh.polygons, "Polygons", True, blen_read_geom_array_mapped_polygon),
|
||||
(mesh.vertices, "Vertices", True, blen_read_geom_array_mapped_vert))
|
||||
for blen_data, blen_data_type, is_fake, func in tries:
|
||||
bdata = np.zeros((len(blen_data), item_size), dtype=bl_norm_dtype) if is_fake else blen_data
|
||||
if func(mesh, bdata, "normal", bl_norm_dtype,
|
||||
if func(mesh, bdata, "vector", bl_norm_dtype,
|
||||
fbx_layer_data, fbx_layer_index, fbx_layer_mapping, fbx_layer_ref, 3, item_size, layer_id, xform, True):
|
||||
if blen_data_type == "Polygons":
|
||||
# To expand to per-loop normals, repeat each per-polygon normal by the number of loops of each polygon.
|
||||
poly_loop_totals = np.empty(len(mesh.polygons), dtype=np.uintc)
|
||||
mesh.polygons.foreach_get("loop_total", poly_loop_totals)
|
||||
loop_normals = np.repeat(bdata, poly_loop_totals, axis=0)
|
||||
mesh.loops.foreach_set("normal", loop_normals.ravel())
|
||||
mesh.attributes["temp_custom_normals"].data.foreach_set("normal", loop_normals.ravel())
|
||||
elif blen_data_type == "Vertices":
|
||||
# We have to copy vnors to lnors! Far from elegant, but simple.
|
||||
loop_vertex_indices = MESH_ATTRIBUTE_CORNER_VERT.to_ndarray(mesh.attributes)
|
||||
mesh.loops.foreach_set("normal", bdata[loop_vertex_indices].ravel())
|
||||
mesh.attributes["temp_custom_normals"].data.foreach_set("normal", bdata[loop_vertex_indices].ravel())
|
||||
return True
|
||||
|
||||
blen_read_geom_array_error_mapping("normal", fbx_layer_mapping)
|
||||
@ -1877,7 +1875,7 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
|
||||
if settings.use_custom_normals:
|
||||
# Note: we store 'temp' normals in loops, since validate() may alter final mesh,
|
||||
# we can only set custom lnors *after* calling it.
|
||||
mesh.create_normals_split()
|
||||
mesh.attributes.new("temp_custom_normals", 'FLOAT_VECTOR', 'CORNER')
|
||||
if geom_mat_no is None:
|
||||
ok_normals = blen_read_geom_layer_normal(fbx_obj, mesh)
|
||||
else:
|
||||
@ -1889,7 +1887,7 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
|
||||
if ok_normals:
|
||||
bl_nors_dtype = np.single
|
||||
clnors = np.empty(len(mesh.loops) * 3, dtype=bl_nors_dtype)
|
||||
mesh.loops.foreach_get("normal", clnors)
|
||||
mesh.attributes["temp_custom_normals"].data.foreach_get("vector", clnors)
|
||||
|
||||
if not ok_smooth:
|
||||
sharp_face = MESH_ATTRIBUTE_SHARP_FACE.get(attributes)
|
||||
@ -1900,10 +1898,8 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
|
||||
# Iterating clnors into a nested tuple first is faster than passing clnors.reshape(-1, 3) directly into
|
||||
# normals_split_custom_set. We use clnors.data since it is a memoryview, which is faster to iterate than clnors.
|
||||
mesh.normals_split_custom_set(tuple(zip(*(iter(clnors.data),) * 3)))
|
||||
mesh.use_auto_smooth = True
|
||||
|
||||
if settings.use_custom_normals:
|
||||
mesh.free_normals_split()
|
||||
mesh.attributes.remove(mesh.attributes["temp_custom_normals"])
|
||||
|
||||
if not ok_smooth:
|
||||
sharp_face = MESH_ATTRIBUTE_SHARP_FACE.get(attributes)
|
||||
@ -3470,31 +3466,56 @@ def load(operator, context, filepath="",
|
||||
def _():
|
||||
fbx_tmpl = fbx_template_get((b'Geometry', b'KFbxShape'))
|
||||
|
||||
# - FBX | - Blender equivalent
|
||||
# Mesh | `Mesh`
|
||||
# BlendShape | `Key`
|
||||
# BlendShapeChannel | `ShapeKey`, but without its `.data`.
|
||||
# Shape | `ShapeKey.data`, but also includes normals and the values are relative to the base Mesh
|
||||
# | instead of being absolute. The data is sparse, so each Shape has an "Indexes" array too.
|
||||
# | FBX 2020 introduced 'Modern Style' Shapes that also support tangents, binormals, vertex
|
||||
# | colors and UVs, and can be absolute values instead of relative, but 'Modern Style' Shapes
|
||||
# | are not currently supported.
|
||||
#
|
||||
# The FBX connections between Shapes and Meshes form multiple many-many relationships:
|
||||
# Mesh >-< BlendShape >-< BlendShapeChannel >-< Shape
|
||||
# In practice, the relationships are almost never many-many and are more typically 1-many or 1-1:
|
||||
# Mesh --- BlendShape:
|
||||
# usually 1-1 and the FBX SDK might enforce that each BlendShape is connected to at most one Mesh.
|
||||
# BlendShape --< BlendShapeChannel:
|
||||
# usually 1-many.
|
||||
# BlendShapeChannel --- or uncommonly --< Shape:
|
||||
# usually 1-1, but 1-many is a documented feature.
|
||||
|
||||
def connections_gen(c_src_uuid, fbx_id, fbx_type):
|
||||
"""Helper to reduce duplicate code"""
|
||||
# Rarely, an imported FBX file will have duplicate connections. For Shape Key related connections, FBX
|
||||
# appears to ignore the duplicates, or overwrite the existing duplicates such that the end result is the
|
||||
# same as ignoring them, so keep a set of the seen connections and ignore any duplicates.
|
||||
seen_connections = set()
|
||||
for c_dst_uuid, ctype in fbx_connection_map.get(c_src_uuid, ()):
|
||||
if ctype.props[0] != b'OO':
|
||||
# 'Object-Object' connections only.
|
||||
continue
|
||||
fbx_data, bl_data = fbx_table_nodes.get(c_dst_uuid, (None, None))
|
||||
if fbx_data is None or fbx_data.id != fbx_id or fbx_data.props[2] != fbx_type:
|
||||
# Either `c_dst_uuid` doesn't exist, or it has a different id or type.
|
||||
continue
|
||||
connection_key = (c_src_uuid, c_dst_uuid)
|
||||
if connection_key in seen_connections:
|
||||
# The connection is a duplicate, skip it.
|
||||
continue
|
||||
seen_connections.add(connection_key)
|
||||
yield c_dst_uuid, fbx_data, bl_data
|
||||
|
||||
mesh_to_shapes = {}
|
||||
for s_uuid, s_item in fbx_table_nodes.items():
|
||||
fbx_sdata, bl_sdata = s_item = fbx_table_nodes.get(s_uuid, (None, None))
|
||||
for s_uuid, (fbx_sdata, _bl_sdata) in fbx_table_nodes.items():
|
||||
if fbx_sdata is None or fbx_sdata.id != b'Geometry' or fbx_sdata.props[2] != b'Shape':
|
||||
continue
|
||||
|
||||
# shape -> blendshapechannel -> blendshape -> mesh.
|
||||
for bc_uuid, bc_ctype in fbx_connection_map.get(s_uuid, ()):
|
||||
if bc_ctype.props[0] != b'OO':
|
||||
continue
|
||||
fbx_bcdata, _bl_bcdata = fbx_table_nodes.get(bc_uuid, (None, None))
|
||||
if fbx_bcdata is None or fbx_bcdata.id != b'Deformer' or fbx_bcdata.props[2] != b'BlendShapeChannel':
|
||||
continue
|
||||
for bs_uuid, bs_ctype in fbx_connection_map.get(bc_uuid, ()):
|
||||
if bs_ctype.props[0] != b'OO':
|
||||
continue
|
||||
fbx_bsdata, _bl_bsdata = fbx_table_nodes.get(bs_uuid, (None, None))
|
||||
if fbx_bsdata is None or fbx_bsdata.id != b'Deformer' or fbx_bsdata.props[2] != b'BlendShape':
|
||||
continue
|
||||
for m_uuid, m_ctype in fbx_connection_map.get(bs_uuid, ()):
|
||||
if m_ctype.props[0] != b'OO':
|
||||
continue
|
||||
fbx_mdata, bl_mdata = fbx_table_nodes.get(m_uuid, (None, None))
|
||||
if fbx_mdata is None or fbx_mdata.id != b'Geometry' or fbx_mdata.props[2] != b'Mesh':
|
||||
continue
|
||||
for bc_uuid, fbx_bcdata, _bl_bcdata in connections_gen(s_uuid, b'Deformer', b'BlendShapeChannel'):
|
||||
for bs_uuid, _fbx_bsdata, _bl_bsdata in connections_gen(bc_uuid, b'Deformer', b'BlendShape'):
|
||||
for m_uuid, _fbx_mdata, bl_mdata in connections_gen(bs_uuid, b'Geometry', b'Mesh'):
|
||||
# Blenmeshes are assumed already created at that time!
|
||||
assert(isinstance(bl_mdata, bpy.types.Mesh))
|
||||
# Group shapes by mesh so that each mesh only needs to be processed once for all of its shape
|
||||
|
@ -5,8 +5,8 @@
|
||||
bl_info = {
|
||||
'name': 'glTF 2.0 format',
|
||||
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
|
||||
"version": (4, 1, 2),
|
||||
'blender': (4, 0, 0),
|
||||
"version": (4, 1, 13),
|
||||
'blender': (4, 1, 0),
|
||||
'location': 'File > Import-Export',
|
||||
'description': 'Import-Export as glTF 2.0',
|
||||
'warning': '',
|
||||
@ -144,13 +144,10 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
'Most efficient and portable, but more difficult to edit later'),
|
||||
('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)',
|
||||
'Exports multiple files, with separate JSON, binary and texture data. '
|
||||
'Easiest to edit later'),
|
||||
('GLTF_EMBEDDED', 'glTF Embedded (.gltf)',
|
||||
'Exports a single file, with all data packed in JSON. '
|
||||
'Less efficient than binary, but easier to edit later')),
|
||||
'Easiest to edit later')),
|
||||
description=(
|
||||
'Output format and embedding options. Binary is most efficient, '
|
||||
'but JSON (embedded or separate) may be easier to edit later'
|
||||
'Output format. Binary is most efficient, '
|
||||
'but JSON may be easier to edit later'
|
||||
),
|
||||
default='GLB', #Warning => If you change the default, need to change the default filter too
|
||||
update=on_export_format_changed,
|
||||
@ -174,13 +171,13 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
export_image_format: EnumProperty(
|
||||
name='Images',
|
||||
items=(('AUTO', 'Automatic',
|
||||
'Save PNGs as PNGs, JPEGs as JPEGs, WEBPs as WEBPs. '
|
||||
'If neither one, use PNG'),
|
||||
'Save PNGs as PNGs, JPEGs as JPEGs, WebPs as WebPs. '
|
||||
'For other formats, use PNG'),
|
||||
('JPEG', 'JPEG Format (.jpg)',
|
||||
'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) '
|
||||
'Be aware of a possible loss in quality'),
|
||||
('WEBP', 'Webp Format',
|
||||
'Save images as WEBPs as main image (no fallback)'),
|
||||
('WEBP', 'WebP Format',
|
||||
'Save images as WebPs as main image (no fallback)'),
|
||||
('NONE', 'None',
|
||||
'Don\'t export images'),
|
||||
),
|
||||
@ -192,18 +189,18 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
)
|
||||
|
||||
export_image_add_webp: BoolProperty(
|
||||
name='Create Webp',
|
||||
name='Create WebP',
|
||||
description=(
|
||||
"Creates webp textures for every textures. "
|
||||
"For already webp textures, nothing happen"
|
||||
"Creates WebP textures for every texture. "
|
||||
"For already WebP textures, nothing happens"
|
||||
),
|
||||
default=False
|
||||
)
|
||||
|
||||
export_image_webp_fallback: BoolProperty(
|
||||
name='Webp fallback',
|
||||
name='WebP fallback',
|
||||
description=(
|
||||
"For all webp textures, create a PNG fallback texture"
|
||||
"For all WebP textures, create a PNG fallback texture"
|
||||
),
|
||||
default=False
|
||||
)
|
||||
@ -476,6 +473,21 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
default=False
|
||||
)
|
||||
|
||||
export_hierarchy_flatten_objs: BoolProperty(
|
||||
name='Flatten Object Hierarchy',
|
||||
description='Flatten Object Hierarchy. Useful in case of non decomposable transformation matrix',
|
||||
default=False
|
||||
)
|
||||
|
||||
export_armature_object_remove: BoolProperty(
|
||||
name='Remove Armature Object',
|
||||
description=(
|
||||
'Remove Armature object if possible.'
|
||||
'If Armature has multiple root bones, object will not be removed'
|
||||
),
|
||||
default=False
|
||||
)
|
||||
|
||||
export_optimize_animation_size: BoolProperty(
|
||||
name='Optimize Animation Size',
|
||||
description=(
|
||||
@ -641,7 +653,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
|
||||
export_try_sparse_sk: BoolProperty(
|
||||
name='Use Sparse Accessor if better',
|
||||
description='Try using Sparce Accessor if it save space',
|
||||
description='Try using Sparse Accessor if it saves space',
|
||||
default=True
|
||||
)
|
||||
|
||||
@ -653,9 +665,9 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
|
||||
export_gpu_instances: BoolProperty(
|
||||
name='GPU Instances',
|
||||
description='Export using EXT_mesh_gpu_instancing.'
|
||||
'Limited to children of a same Empty. '
|
||||
'multiple Materials might be omitted',
|
||||
description='Export using EXT_mesh_gpu_instancing. '
|
||||
'Limited to children of a given Empty. '
|
||||
'Multiple materials might be omitted',
|
||||
default=False
|
||||
)
|
||||
|
||||
@ -821,6 +833,8 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
export_settings['gltf_animations'] = self.export_animations
|
||||
export_settings['gltf_def_bones'] = self.export_def_bones
|
||||
export_settings['gltf_flatten_bones_hierarchy'] = self.export_hierarchy_flatten_bones
|
||||
export_settings['gltf_flatten_obj_hierarchy'] = self.export_hierarchy_flatten_objs
|
||||
export_settings['gltf_armature_object_remove'] = self.export_armature_object_remove
|
||||
if self.export_animations:
|
||||
export_settings['gltf_frame_range'] = self.export_frame_range
|
||||
export_settings['gltf_force_sampling'] = self.export_force_sampling
|
||||
@ -1054,6 +1068,7 @@ class GLTF_PT_export_data_scene(bpy.types.Panel):
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
layout.prop(operator, 'export_gpu_instances')
|
||||
layout.prop(operator, 'export_hierarchy_flatten_objs')
|
||||
|
||||
class GLTF_PT_export_data_mesh(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
@ -1279,6 +1294,8 @@ class GLTF_PT_export_data_armature(bpy.types.Panel):
|
||||
if operator.export_force_sampling is False and operator.export_def_bones is True:
|
||||
layout.label(text="Export only deformation bones is not possible when not sampling animation")
|
||||
row = layout.row()
|
||||
row.prop(operator, 'export_armature_object_remove')
|
||||
row = layout.row()
|
||||
row.prop(operator, 'export_hierarchy_flatten_bones')
|
||||
|
||||
class GLTF_PT_export_data_compression(bpy.types.Panel):
|
||||
@ -1648,7 +1665,7 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
|
||||
items=(
|
||||
("BLENDER", "Blender (best for import/export round trip)",
|
||||
"Good for re-importing glTFs exported from Blender, "
|
||||
"and re-exporting glTFs to glTFs after Blender editing"
|
||||
"and re-exporting glTFs to glTFs after Blender editing. "
|
||||
"Bone tips are placed on their local +Y axis (in glTF space)"),
|
||||
("TEMPERANCE", "Temperance (average)",
|
||||
"Decent all-around strategy. "
|
||||
@ -1674,10 +1691,10 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
|
||||
)
|
||||
|
||||
import_webp_texture: BoolProperty(
|
||||
name='Import Webp textures',
|
||||
name='Import WebP textures',
|
||||
description=(
|
||||
"If a texture exists in webp format,"
|
||||
"loads the webp texture instead of the fallback png/jpg one"
|
||||
"If a texture exists in WebP format, "
|
||||
"loads the WebP texture instead of the fallback PNG/JPEG one"
|
||||
),
|
||||
default=False,
|
||||
)
|
||||
|
@ -8,6 +8,10 @@ import bpy
|
||||
def get_gltf_node_old_name():
|
||||
return "glTF Settings"
|
||||
|
||||
# Old group name
|
||||
def get_gltf_old_group_node_name():
|
||||
return "glTF Metallic Roughness"
|
||||
|
||||
def get_gltf_node_name():
|
||||
return "glTF Material Output"
|
||||
|
||||
|
@ -34,6 +34,11 @@ def gather_actions_animations(export_settings):
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings["gltf_armature_object_remove"] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
animations_, merged_tracks = gather_action_animations(obj_uuid, merged_tracks, len(animations), export_settings)
|
||||
@ -63,6 +68,11 @@ def prepare_actions_range(export_settings):
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings["gltf_armature_object_remove"] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
if obj_uuid not in export_settings['ranges']:
|
||||
@ -168,6 +178,11 @@ def prepare_actions_range(export_settings):
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
blender_actions = __get_blender_actions(obj_uuid, export_settings)
|
||||
|
@ -35,6 +35,11 @@ def gather_scene_animations(export_settings):
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
@ -22,6 +22,11 @@ def gather_tracks_animations(export_settings):
|
||||
|
||||
# Do not manage not exported objects
|
||||
if vtree.nodes[obj_uuid].node is None:
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
# Manage armature object, as this is the object that has the animation
|
||||
if not vtree.nodes[obj_uuid].blender_object:
|
||||
continue
|
||||
else:
|
||||
continue
|
||||
|
||||
animations_, merged_tracks = gather_track_animations(obj_uuid, merged_tracks, len(animations), export_settings)
|
||||
|
@ -119,6 +119,10 @@ def get_cache_data(path: str,
|
||||
# Bone has a parent, but in export, after filter, is at root of armature
|
||||
matrix = blender_bone.matrix.copy()
|
||||
|
||||
# Because there is no armature object, we need to apply the TRS of armature to the root bone
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
matrix = matrix @ blender_obj.matrix_world
|
||||
|
||||
if blender_obj.animation_data and blender_obj.animation_data.action \
|
||||
and export_settings['gltf_animation_mode'] in ["ACTIVE_ACTIONS", "ACTIONS"]:
|
||||
if blender_bone.name not in data[obj_uuid][blender_obj.animation_data.action.name]['bone'].keys():
|
||||
|
@ -85,9 +85,6 @@ def __create_buffer(exporter, export_settings):
|
||||
buffer = bytes()
|
||||
if export_settings['gltf_format'] == 'GLB':
|
||||
buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True)
|
||||
else:
|
||||
if export_settings['gltf_format'] == 'GLTF_EMBEDDED':
|
||||
exporter.finalize_buffer(export_settings['gltf_filedirectory'])
|
||||
else:
|
||||
exporter.finalize_buffer(export_settings['gltf_filedirectory'],
|
||||
export_settings['gltf_binaryfilename'])
|
||||
|
@ -9,6 +9,7 @@ from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..com.gltf2_blender_extras import generate_extras
|
||||
from .gltf2_blender_gather_cache import cached
|
||||
from . import gltf2_blender_gather_nodes
|
||||
from . import gltf2_blender_gather_joints
|
||||
from . import gltf2_blender_gather_tree
|
||||
from .animation.sampled.object.gltf2_blender_gather_object_keyframes import get_cache_data
|
||||
from .animation.gltf2_blender_gather_animations import gather_animations
|
||||
@ -52,6 +53,8 @@ def __gather_scene(blender_scene, export_settings):
|
||||
vtree = gltf2_blender_gather_tree.VExportTree(export_settings)
|
||||
vtree.construct(blender_scene)
|
||||
vtree.search_missing_armature() # In case armature are no parented correctly
|
||||
vtree.bake_armature_bone_list() # Used in case we remove the armature
|
||||
vtree.check_if_we_can_remove_armature() # Check if we can remove the armatures objects
|
||||
|
||||
export_user_extensions('vtree_before_filter_hook', export_settings, vtree)
|
||||
|
||||
@ -59,6 +62,8 @@ def __gather_scene(blender_scene, export_settings):
|
||||
vtree.filter()
|
||||
if export_settings['gltf_flatten_bones_hierarchy'] is True:
|
||||
vtree.break_bone_hierarchy()
|
||||
if export_settings['gltf_flatten_obj_hierarchy'] is True:
|
||||
vtree.break_obj_hierarchy()
|
||||
|
||||
vtree.variants_reset_to_original()
|
||||
|
||||
@ -66,11 +71,41 @@ def __gather_scene(blender_scene, export_settings):
|
||||
|
||||
export_settings['vtree'] = vtree
|
||||
|
||||
|
||||
|
||||
|
||||
# If we don't remove armature object, we can't have bones directly at root of scene
|
||||
# So looping only on root nodes, as they are all nodes, not bones
|
||||
if export_settings['gltf_armature_object_remove'] is False:
|
||||
for r in [vtree.nodes[r] for r in vtree.roots]:
|
||||
node = gltf2_blender_gather_nodes.gather_node(
|
||||
r, export_settings)
|
||||
if node is not None:
|
||||
scene.nodes.append(node)
|
||||
else:
|
||||
# If we remove armature objects, we can have bone at root of scene
|
||||
armature_root_joints = {}
|
||||
for r in [vtree.nodes[r] for r in vtree.roots]:
|
||||
# Classic Object/node case
|
||||
if r.blender_type != gltf2_blender_gather_tree.VExportNode.BONE:
|
||||
node = gltf2_blender_gather_nodes.gather_node(
|
||||
r, export_settings)
|
||||
if node is not None:
|
||||
scene.nodes.append(node)
|
||||
else:
|
||||
# We can have bone are root of scene because we remove the armature object
|
||||
# and the armature was at root of scene
|
||||
node = gltf2_blender_gather_joints.gather_joint_vnode(
|
||||
r.uuid, export_settings)
|
||||
if node is not None:
|
||||
scene.nodes.append(node)
|
||||
if r.armature not in armature_root_joints.keys():
|
||||
armature_root_joints[r.armature] = []
|
||||
armature_root_joints[r.armature].append(node)
|
||||
|
||||
# Manage objects parented to bones, now we go through all root objects
|
||||
for k, v in armature_root_joints.items():
|
||||
gltf2_blender_gather_nodes.get_objects_parented_to_bones(k, v, export_settings)
|
||||
|
||||
vtree.add_neutral_bones()
|
||||
|
||||
|
@ -48,10 +48,14 @@ def gather_joint_vnode(vnode, export_settings):
|
||||
:return: a glTF2 node (acting as a joint)
|
||||
"""
|
||||
vtree = export_settings['vtree']
|
||||
blender_object = vtree.nodes[vnode].blender_object
|
||||
blender_bone = vtree.nodes[vnode].blender_bone
|
||||
|
||||
|
||||
if export_settings['gltf_armature_object_remove'] is True:
|
||||
if vtree.nodes[vnode].parent_uuid is not None:
|
||||
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world
|
||||
else:
|
||||
mat = vtree.nodes[vnode].matrix_world
|
||||
else:
|
||||
mat = vtree.nodes[vtree.nodes[vnode].parent_uuid].matrix_world.inverted_safe() @ vtree.nodes[vnode].matrix_world
|
||||
|
||||
trans, rot, sca = mat.decompose()
|
||||
|
@ -21,6 +21,7 @@ from . import gltf2_blender_gather_lights
|
||||
from .gltf2_blender_gather_tree import VExportNode
|
||||
|
||||
def gather_node(vnode, export_settings):
|
||||
|
||||
blender_object = vnode.blender_object
|
||||
|
||||
skin = gather_skin(vnode.uuid, export_settings)
|
||||
@ -29,7 +30,7 @@ def gather_node(vnode, export_settings):
|
||||
|
||||
node = gltf2_io.Node(
|
||||
camera=__gather_camera(blender_object, export_settings),
|
||||
children=__gather_children(vnode, blender_object, export_settings),
|
||||
children=__gather_children(vnode, export_settings),
|
||||
extensions=__gather_extensions(blender_object, export_settings),
|
||||
extras=__gather_extras(blender_object, export_settings),
|
||||
matrix=__gather_matrix(blender_object, export_settings),
|
||||
@ -61,53 +62,70 @@ def __gather_camera(blender_object, export_settings):
|
||||
return gltf2_blender_gather_cameras.gather_camera(blender_object.data, export_settings)
|
||||
|
||||
|
||||
def __gather_children(vnode, blender_object, export_settings):
|
||||
def __gather_children(vnode, export_settings):
|
||||
children = []
|
||||
|
||||
vtree = export_settings['vtree']
|
||||
|
||||
|
||||
armature_object_uuid = None
|
||||
|
||||
# Standard Children / Collection
|
||||
if export_settings['gltf_armature_object_remove'] is False:
|
||||
for c in [vtree.nodes[c] for c in vnode.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE]:
|
||||
node = gather_node(c, export_settings)
|
||||
if node is not None:
|
||||
children.append(node)
|
||||
else:
|
||||
root_joints = []
|
||||
for c in [vtree.nodes[c] for c in vnode.children]:
|
||||
if c.blender_type != gltf2_blender_gather_tree.VExportNode.BONE:
|
||||
node = gather_node(c, export_settings)
|
||||
if node is not None:
|
||||
children.append(node)
|
||||
else:
|
||||
# We come here because armature was remove, and bone can be a child of any object
|
||||
joint = gltf2_blender_gather_joints.gather_joint_vnode(c.uuid, export_settings)
|
||||
children.append(joint)
|
||||
armature_object_uuid = c.armature
|
||||
root_joints.append(joint)
|
||||
|
||||
# Now got all bone children (that are root joints), we can get object parented to bones
|
||||
|
||||
# Armature --> Retrieve Blender bones
|
||||
# This can't happen if we remove the Armature Object
|
||||
if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE:
|
||||
armature_object_uuid = vnode.uuid
|
||||
root_joints = []
|
||||
|
||||
all_armature_children = vnode.children
|
||||
root_bones_uuid = [c for c in all_armature_children if export_settings['vtree'].nodes[c].blender_type == VExportNode.BONE]
|
||||
root_bones_uuid = export_settings['vtree'].get_root_bones_uuid(vnode.uuid)
|
||||
for bone_uuid in root_bones_uuid:
|
||||
joint = gltf2_blender_gather_joints.gather_joint_vnode(bone_uuid, export_settings)
|
||||
children.append(joint)
|
||||
root_joints.append(joint)
|
||||
|
||||
|
||||
if vnode.blender_type == gltf2_blender_gather_tree.VExportNode.ARMATURE \
|
||||
or armature_object_uuid is not None:
|
||||
|
||||
# Object parented to bones
|
||||
get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings)
|
||||
|
||||
return children
|
||||
|
||||
def get_objects_parented_to_bones(armature_object_uuid, root_joints, export_settings):
|
||||
vtree = export_settings['vtree']
|
||||
direct_bone_children = []
|
||||
for n in [vtree.nodes[i] for i in vtree.get_all_bones(vnode.uuid)]:
|
||||
for n in [vtree.nodes[i] for i in vtree.get_all_bones(armature_object_uuid)]:
|
||||
direct_bone_children.extend([c for c in n.children if vtree.nodes[c].blender_type != gltf2_blender_gather_tree.VExportNode.BONE])
|
||||
|
||||
|
||||
def find_parent_joint(joints, name):
|
||||
for joint in joints:
|
||||
if joint.name == name:
|
||||
return joint
|
||||
parent_joint = find_parent_joint(joint.children, name)
|
||||
if parent_joint:
|
||||
return parent_joint
|
||||
return None
|
||||
|
||||
for child in direct_bone_children: # List of object that are parented to bones
|
||||
# find parent joint
|
||||
parent_joint = find_parent_joint(root_joints, vtree.nodes[child].blender_object.parent_bone)
|
||||
parent_joint = __find_parent_joint(root_joints, vtree.nodes[child].blender_object.parent_bone)
|
||||
if not parent_joint:
|
||||
continue
|
||||
child_node = gather_node(vtree.nodes[child], export_settings)
|
||||
if child_node is None:
|
||||
continue
|
||||
blender_bone = blender_object.pose.bones[parent_joint.name]
|
||||
|
||||
mat = vtree.nodes[vtree.nodes[child].parent_bone_uuid].matrix_world.inverted_safe() @ vtree.nodes[child].matrix_world
|
||||
loc, rot_quat, scale = mat.decompose()
|
||||
@ -131,7 +149,15 @@ def __gather_children(vnode, blender_object, export_settings):
|
||||
|
||||
parent_joint.children.append(child_node)
|
||||
|
||||
return children
|
||||
|
||||
def __find_parent_joint(joints, name):
|
||||
for joint in joints:
|
||||
if joint.name == name:
|
||||
return joint
|
||||
parent_joint = __find_parent_joint(joint.children, name)
|
||||
if parent_joint:
|
||||
return parent_joint
|
||||
return None
|
||||
|
||||
|
||||
def __gather_extensions(blender_object, export_settings):
|
||||
|
@ -64,8 +64,6 @@ class PrimitiveCreator:
|
||||
self.blender_object = self.export_settings['vtree'].nodes[self.uuid_for_skined_data].blender_object
|
||||
|
||||
self.use_normals = self.export_settings['gltf_normals']
|
||||
if self.use_normals:
|
||||
self.blender_mesh.calc_normals_split()
|
||||
|
||||
self.use_tangents = False
|
||||
if self.use_normals and self.export_settings['gltf_tangents']:
|
||||
@ -776,7 +774,6 @@ class PrimitiveCreator:
|
||||
self.normals = np.array(self.normals, dtype=np.float32)
|
||||
else:
|
||||
self.normals = np.empty(len(self.blender_mesh.loops) * 3, dtype=np.float32)
|
||||
self.blender_mesh.calc_normals_split()
|
||||
self.blender_mesh.loops.foreach_get('normal', self.normals)
|
||||
|
||||
self.normals = self.normals.reshape(len(self.blender_mesh.loops), 3)
|
||||
|
@ -7,16 +7,27 @@ from ...io.com import gltf2_io
|
||||
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ...io.com.gltf2_io_constants import TextureFilter, TextureWrap
|
||||
from .gltf2_blender_gather_cache import cached
|
||||
from .gltf2_blender_get import (
|
||||
previous_node,
|
||||
previous_socket,
|
||||
get_const_from_socket,
|
||||
)
|
||||
from .material.gltf2_blender_search_node_tree import previous_node, previous_socket, get_const_from_socket, NodeSocket
|
||||
|
||||
|
||||
@cached
|
||||
def gather_sampler(blender_shader_node: bpy.types.Node, export_settings):
|
||||
wrap_s, wrap_t = __gather_wrap(blender_shader_node, export_settings)
|
||||
def gather_sampler(blender_shader_node: bpy.types.Node, group_path_str, export_settings):
|
||||
# reconstruct group_path from group_path_str
|
||||
sep_item = "##~~gltf-sep~~##"
|
||||
sep_inside_item = "##~~gltf-inside-sep~~##"
|
||||
group_path = []
|
||||
tab = group_path_str.split(sep_item)
|
||||
if len(tab) > 0:
|
||||
group_path.append(bpy.data.materials[tab[0]])
|
||||
for idx, i in enumerate(tab[1:]):
|
||||
subtab = i.split(sep_inside_item)
|
||||
if idx == 0:
|
||||
group_path.append(bpy.data.materials[tab[0]].node_tree.nodes[subtab[1]])
|
||||
else:
|
||||
group_path.append(bpy.data.node_groups[subtab[0]].nodes[subtab[1]])
|
||||
|
||||
|
||||
wrap_s, wrap_t = __gather_wrap(blender_shader_node, group_path, export_settings)
|
||||
|
||||
sampler = gltf2_io.Sampler(
|
||||
extensions=__gather_extensions(blender_shader_node, export_settings),
|
||||
@ -80,7 +91,7 @@ def __gather_name(blender_shader_node, export_settings):
|
||||
return None
|
||||
|
||||
|
||||
def __gather_wrap(blender_shader_node, export_settings):
|
||||
def __gather_wrap(blender_shader_node, group_path, export_settings):
|
||||
# First gather from the Texture node
|
||||
if blender_shader_node.extension == 'EXTEND':
|
||||
wrap_s = TextureWrap.ClampToEdge
|
||||
@ -98,7 +109,7 @@ def __gather_wrap(blender_shader_node, export_settings):
|
||||
# But still works for old files
|
||||
# Still needed for heterogen heterogeneous sampler, like MIRROR x REPEAT, for example
|
||||
# Take manual wrapping into account
|
||||
result = detect_manual_uv_wrapping(blender_shader_node)
|
||||
result = detect_manual_uv_wrapping(blender_shader_node, group_path)
|
||||
if result:
|
||||
if result['wrap_s'] is not None: wrap_s = result['wrap_s']
|
||||
if result['wrap_t'] is not None: wrap_t = result['wrap_t']
|
||||
@ -110,7 +121,7 @@ def __gather_wrap(blender_shader_node, export_settings):
|
||||
return wrap_s, wrap_t
|
||||
|
||||
|
||||
def detect_manual_uv_wrapping(blender_shader_node):
|
||||
def detect_manual_uv_wrapping(blender_shader_node, group_path):
|
||||
# Detects UV wrapping done using math nodes. This is for emulating wrap
|
||||
# modes Blender doesn't support. It looks like
|
||||
#
|
||||
@ -124,38 +135,38 @@ def detect_manual_uv_wrapping(blender_shader_node):
|
||||
# mode in each direction (or None), and next_socket.
|
||||
result = {}
|
||||
|
||||
comb = previous_node(blender_shader_node.inputs['Vector'])
|
||||
if comb is None or comb.type != 'COMBXYZ': return None
|
||||
comb = previous_node(NodeSocket(blender_shader_node.inputs['Vector'], group_path))
|
||||
if comb.node is None or comb.node.type != 'COMBXYZ': return None
|
||||
|
||||
for soc in ['X', 'Y']:
|
||||
node = previous_node(comb.inputs[soc])
|
||||
if node is None: return None
|
||||
node = previous_node(NodeSocket(comb.node.inputs[soc], comb.group_path))
|
||||
if node.node is None: return None
|
||||
|
||||
if node.type == 'SEPXYZ':
|
||||
if node.node.type == 'SEPXYZ':
|
||||
# Passed through without change
|
||||
wrap = None
|
||||
prev_socket = previous_socket(comb.inputs[soc])
|
||||
elif node.type == 'MATH':
|
||||
prev_socket = previous_socket(NodeSocket(comb.node.inputs[soc], comb.group_path))
|
||||
elif node.node.type == 'MATH':
|
||||
# Math node applies a manual wrap
|
||||
if (node.operation == 'PINGPONG' and
|
||||
get_const_from_socket(node.inputs[1], kind='VALUE') == 1.0): # scale = 1
|
||||
if (node.node.operation == 'PINGPONG' and
|
||||
get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind='VALUE') == 1.0): # scale = 1
|
||||
wrap = TextureWrap.MirroredRepeat
|
||||
elif (node.operation == 'WRAP' and
|
||||
get_const_from_socket(node.inputs[1], kind='VALUE') == 0.0 and # min = 0
|
||||
get_const_from_socket(node.inputs[2], kind='VALUE') == 1.0): # max = 1
|
||||
elif (node.node.operation == 'WRAP' and
|
||||
get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind='VALUE') == 0.0 and # min = 0
|
||||
get_const_from_socket(NodeSocket(node.node.inputs[2], node.group_path), kind='VALUE') == 1.0): # max = 1
|
||||
wrap = TextureWrap.Repeat
|
||||
else:
|
||||
return None
|
||||
|
||||
prev_socket = previous_socket(node.inputs[0])
|
||||
prev_socket = previous_socket(NodeSocket(node.node.inputs[0], node.group_path))
|
||||
else:
|
||||
return None
|
||||
|
||||
if prev_socket is None: return None
|
||||
prev_node = prev_socket.node
|
||||
if prev_socket.socket is None: return None
|
||||
prev_node = prev_socket.socket.node
|
||||
if prev_node.type != 'SEPXYZ': return None
|
||||
# Make sure X goes to X, etc.
|
||||
if prev_socket.name != soc: return None
|
||||
if prev_socket.socket.name != soc: return None
|
||||
# Make sure both attach to the same SeparateXYZ node
|
||||
if soc == 'X':
|
||||
sep = prev_node
|
||||
@ -164,5 +175,5 @@ def detect_manual_uv_wrapping(blender_shader_node):
|
||||
|
||||
result['wrap_s' if soc == 'X' else 'wrap_t'] = wrap
|
||||
|
||||
result['next_socket'] = sep.inputs[0]
|
||||
result['next_socket'] = NodeSocket(sep.inputs[0], prev_socket.group_path)
|
||||
return result
|
||||
|
@ -13,6 +13,7 @@ from ...io.com import gltf2_io_constants
|
||||
from ...io.exp import gltf2_io_binary_data
|
||||
from ..com.gltf2_blender_default import BLENDER_GLTF_SPECIAL_COLLECTION
|
||||
from . import gltf2_blender_gather_accessors
|
||||
from .gltf2_blender_gather_joints import gather_joint_vnode
|
||||
|
||||
class VExportNode:
|
||||
|
||||
@ -76,7 +77,7 @@ class VExportNode:
|
||||
def recursive_display(self, tree, mode):
|
||||
if mode == "simple":
|
||||
for c in self.children:
|
||||
print(self.blender_object.name, "/", self.blender_bone.name if self.blender_bone else "", "-->", tree.nodes[c].blender_object.name, "/", tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "" )
|
||||
print(tree.nodes[c].uuid, self.blender_object.name, "/", self.blender_bone.name if self.blender_bone else "", "-->", tree.nodes[c].blender_object.name, "/", tree.nodes[c].blender_bone.name if tree.nodes[c].blender_bone else "" )
|
||||
tree.nodes[c].recursive_display(tree, mode)
|
||||
|
||||
class VExportTree:
|
||||
@ -278,7 +279,8 @@ class VExportTree:
|
||||
def get_all_objects(self):
|
||||
return [n.uuid for n in self.nodes.values() if n.blender_type != VExportNode.BONE]
|
||||
|
||||
def get_all_bones(self, uuid): #For armatue Only
|
||||
def get_all_bones(self, uuid): #For armature only
|
||||
if not hasattr(self.nodes[uuid], "all_bones"):
|
||||
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
|
||||
def recursive_get_all_bones(uuid):
|
||||
total = []
|
||||
@ -292,9 +294,25 @@ class VExportTree:
|
||||
tot = []
|
||||
for c_uuid in self.nodes[uuid].children:
|
||||
tot.extend(recursive_get_all_bones(c_uuid))
|
||||
return tot
|
||||
self.nodes[uuid].all_bones = tot
|
||||
return tot # Not really needed to return, we are just baking it before export really starts
|
||||
else:
|
||||
self.nodes[uuid].all_bones = []
|
||||
return []
|
||||
else:
|
||||
return self.nodes[uuid].all_bones
|
||||
|
||||
def get_root_bones_uuid(self, uuid): #For armature only
|
||||
if not hasattr(self.nodes[uuid], "root_bones_uuid"):
|
||||
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
|
||||
all_armature_children = self.nodes[uuid].children
|
||||
self.nodes[uuid].root_bones_uuid = [c for c in all_armature_children if self.nodes[c].blender_type == VExportNode.BONE]
|
||||
return self.nodes[uuid].root_bones_uuid # Not really needed to return, we are just baking it before export really starts
|
||||
else:
|
||||
self.nodes[uuid].root_bones_uuid = []
|
||||
return []
|
||||
else:
|
||||
return self.nodes[uuid].root_bones_uuid
|
||||
|
||||
def get_all_node_of_type(self, node_type):
|
||||
return [n.uuid for n in self.nodes.values() if n.blender_type == node_type]
|
||||
@ -302,10 +320,9 @@ class VExportTree:
|
||||
def display(self, mode):
|
||||
if mode == "simple":
|
||||
for n in self.roots:
|
||||
print("Root", self.nodes[n].blender_object.name, "/", self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "" )
|
||||
print(self.nodes[n].uuid, "Root", self.nodes[n].blender_object.name, "/", self.nodes[n].blender_bone.name if self.nodes[n].blender_bone else "" )
|
||||
self.nodes[n].recursive_display(self, mode)
|
||||
|
||||
|
||||
def filter_tag(self):
|
||||
roots = self.roots.copy()
|
||||
for r in roots:
|
||||
@ -322,7 +339,6 @@ class VExportTree:
|
||||
self.filter_perform()
|
||||
self.remove_filtered_nodes()
|
||||
|
||||
|
||||
def recursive_filter_tag(self, uuid, parent_keep_tag):
|
||||
# parent_keep_tag is for collection instance
|
||||
# some properties (selection, visibility, renderability)
|
||||
@ -442,9 +458,19 @@ class VExportTree:
|
||||
bpy.data.collections[BLENDER_GLTF_SPECIAL_COLLECTION].objects:
|
||||
return False
|
||||
|
||||
if self.export_settings['gltf_armature_object_remove'] is True:
|
||||
# If we remove the Armature object
|
||||
if self.nodes[uuid].blender_type == VExportNode.ARMATURE:
|
||||
self.nodes[uuid].arma_exported = True
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def remove_filtered_nodes(self):
|
||||
if self.export_settings['gltf_armature_object_remove'] is True:
|
||||
# If we remove the Armature object
|
||||
self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True or (n.keep_tag is False and n.blender_type == VExportNode.ARMATURE)}
|
||||
else:
|
||||
self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True}
|
||||
|
||||
def search_missing_armature(self):
|
||||
@ -454,6 +480,14 @@ class VExportTree:
|
||||
n.armature = candidates[0].uuid
|
||||
del n.armature_needed
|
||||
|
||||
def bake_armature_bone_list(self):
|
||||
# Used to store data in armature vnode
|
||||
# If armature is removed from export
|
||||
# Data are still available, even if armature is not exported (so bones are re-parented)
|
||||
for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]:
|
||||
self.get_all_bones(n.uuid)
|
||||
self.get_root_bones_uuid(n.uuid)
|
||||
|
||||
def add_neutral_bones(self):
|
||||
added_armatures = []
|
||||
for n in [n for n in self.nodes.values() if \
|
||||
@ -521,6 +555,9 @@ class VExportTree:
|
||||
from .gltf2_blender_gather_skins import gather_skin
|
||||
skins = []
|
||||
for n in [n for n in self.nodes.values() if n.blender_type == VExportNode.ARMATURE]:
|
||||
if self.export_settings['gltf_armature_object_remove'] is True:
|
||||
if hasattr(n, "arma_exported") is False:
|
||||
continue
|
||||
if len([m for m in self.nodes.values() if m.keep_tag is True and m.blender_type == VExportNode.OBJECT and m.armature == n.uuid]) == 0:
|
||||
skin = gather_skin(n.uuid, self.export_settings)
|
||||
skins.append(skin)
|
||||
@ -552,3 +589,25 @@ class VExportTree:
|
||||
self.nodes[self.nodes[bone].parent_uuid].children.remove(bone)
|
||||
self.nodes[bone].parent_uuid = arma
|
||||
self.nodes[arma].children.append(bone)
|
||||
|
||||
def break_obj_hierarchy(self):
|
||||
# Can be usefull when matrix is not decomposable
|
||||
# TODO: if we get real collection one day, we probably need to adapt this code
|
||||
for obj in self.get_all_objects():
|
||||
if self.nodes[obj].armature is not None and self.nodes[obj].parent_uuid == self.nodes[obj].armature:
|
||||
continue # Keep skined meshs as children of armature
|
||||
if self.nodes[obj].parent_uuid is not None:
|
||||
self.nodes[self.nodes[obj].parent_uuid].children.remove(obj)
|
||||
self.nodes[obj].parent_uuid = None
|
||||
self.roots.append(obj)
|
||||
|
||||
def check_if_we_can_remove_armature(self):
|
||||
# If user requested to remove armature, we need to check if it is possible
|
||||
# If is impossible to remove it if armature has multiple root bones. (glTF validator error)
|
||||
# Currently, we manage it at export level, not at each armature level
|
||||
for arma_uuid in [n for n in self.nodes.keys() if self.nodes[n].blender_type == VExportNode.ARMATURE]:
|
||||
if len(self.get_root_bones_uuid(arma_uuid)) > 1:
|
||||
# We can't remove armature
|
||||
self.export_settings['gltf_armature_object_remove'] = False
|
||||
print("WARNING: We can't remove armature object because some armatures have multiple root bones.")
|
||||
break
|
||||
|
@ -3,12 +3,6 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import bpy
|
||||
from mathutils import Vector, Matrix
|
||||
from ...blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf
|
||||
from ...io.com import gltf2_io_debug
|
||||
from ..com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name
|
||||
from .material import gltf2_blender_search_node_tree
|
||||
|
||||
|
||||
def get_animation_target(action_group: bpy.types.ActionGroup):
|
||||
return action_group.channels[0].data_path.split('.')[-1]
|
||||
@ -31,292 +25,3 @@ def get_object_from_datapath(blender_object, data_path: str):
|
||||
# path_attr = data_path
|
||||
|
||||
return prop
|
||||
|
||||
|
||||
def get_node_socket(blender_material, type, name):
|
||||
"""
|
||||
For a given material input name, retrieve the corresponding node tree socket for a given node type.
|
||||
|
||||
:param blender_material: a blender material for which to get the socket
|
||||
:return: a blender NodeSocket for a given type
|
||||
"""
|
||||
nodes = [n for n in blender_material.node_tree.nodes if isinstance(n, type) and not n.mute]
|
||||
nodes = [node for node in nodes if check_if_is_linked_to_active_output(node.outputs[0])]
|
||||
inputs = sum([[input for input in node.inputs if input.name == name] for node in nodes], [])
|
||||
if inputs:
|
||||
return inputs[0]
|
||||
return None
|
||||
|
||||
|
||||
def get_socket(blender_material: bpy.types.Material, name: str, volume=False):
|
||||
"""
|
||||
For a given material input name, retrieve the corresponding node tree socket.
|
||||
|
||||
:param blender_material: a blender material for which to get the socket
|
||||
:param name: the name of the socket
|
||||
:return: a blender NodeSocket
|
||||
"""
|
||||
if blender_material.node_tree and blender_material.use_nodes:
|
||||
#i = [input for input in blender_material.node_tree.inputs]
|
||||
#o = [output for output in blender_material.node_tree.outputs]
|
||||
if name == "Emissive":
|
||||
# Check for a dedicated Emission node first, it must supersede the newer built-in one
|
||||
# because the newer one is always present in all Principled BSDF materials.
|
||||
emissive_socket = get_node_socket(blender_material, bpy.types.ShaderNodeEmission, "Color")
|
||||
if emissive_socket:
|
||||
return emissive_socket
|
||||
# If a dedicated Emission node was not found, fall back to the Principled BSDF Emission Color socket.
|
||||
name = "Emission Color"
|
||||
type = bpy.types.ShaderNodeBsdfPrincipled
|
||||
elif name == "Background":
|
||||
type = bpy.types.ShaderNodeBackground
|
||||
name = "Color"
|
||||
else:
|
||||
if volume is False:
|
||||
type = bpy.types.ShaderNodeBsdfPrincipled
|
||||
else:
|
||||
type = bpy.types.ShaderNodeVolumeAbsorption
|
||||
|
||||
return get_node_socket(blender_material, type, name)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_socket_old(blender_material: bpy.types.Material, name: str):
|
||||
"""
|
||||
For a given material input name, retrieve the corresponding node tree socket in the special glTF node group.
|
||||
|
||||
:param blender_material: a blender material for which to get the socket
|
||||
:param name: the name of the socket
|
||||
:return: a blender NodeSocket
|
||||
"""
|
||||
gltf_node_group_names = [get_gltf_node_name().lower(), get_gltf_node_old_name().lower()]
|
||||
if blender_material.node_tree and blender_material.use_nodes:
|
||||
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
|
||||
nodes = [n for n in blender_material.node_tree.nodes if \
|
||||
isinstance(n, bpy.types.ShaderNodeGroup) and \
|
||||
n.node_tree is not None and
|
||||
(n.node_tree.name.startswith('glTF Metallic Roughness') or n.node_tree.name.lower() in gltf_node_group_names)]
|
||||
inputs = sum([[input for input in node.inputs if input.name == name] for node in nodes], [])
|
||||
if inputs:
|
||||
return inputs[0]
|
||||
|
||||
return None
|
||||
|
||||
def check_if_is_linked_to_active_output(shader_socket):
|
||||
for link in shader_socket.links:
|
||||
if isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial) and link.to_node.is_active_output is True:
|
||||
return True
|
||||
|
||||
if len(link.to_node.outputs) > 0: # ignore non active output, not having output sockets
|
||||
ret = check_if_is_linked_to_active_output(link.to_node.outputs[0]) # recursive until find an output material node
|
||||
if ret is True:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def find_shader_image_from_shader_socket(shader_socket, max_hops=10):
|
||||
"""Find any ShaderNodeTexImage in the path from the socket."""
|
||||
if shader_socket is None:
|
||||
return None
|
||||
|
||||
if max_hops <= 0:
|
||||
return None
|
||||
|
||||
for link in shader_socket.links:
|
||||
if isinstance(link.from_node, bpy.types.ShaderNodeTexImage):
|
||||
return link.from_node
|
||||
|
||||
for socket in link.from_node.inputs.values():
|
||||
image = find_shader_image_from_shader_socket(shader_socket=socket, max_hops=max_hops - 1)
|
||||
if image is not None:
|
||||
return image
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_texture_transform_from_mapping_node(mapping_node):
|
||||
if mapping_node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]:
|
||||
gltf2_io_debug.print_console("WARNING",
|
||||
"Skipping exporting texture transform because it had type " +
|
||||
mapping_node.vector_type + "; recommend using POINT instead"
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
rotation_0, rotation_1 = mapping_node.inputs['Rotation'].default_value[0], mapping_node.inputs['Rotation'].default_value[1]
|
||||
if rotation_0 or rotation_1:
|
||||
# TODO: can we handle this?
|
||||
gltf2_io_debug.print_console("WARNING",
|
||||
"Skipping exporting texture transform because it had non-zero "
|
||||
"rotations in the X/Y direction; only a Z rotation can be exported!"
|
||||
)
|
||||
return None
|
||||
|
||||
mapping_transform = {}
|
||||
mapping_transform["offset"] = [mapping_node.inputs['Location'].default_value[0], mapping_node.inputs['Location'].default_value[1]]
|
||||
mapping_transform["rotation"] = mapping_node.inputs['Rotation'].default_value[2]
|
||||
mapping_transform["scale"] = [mapping_node.inputs['Scale'].default_value[0], mapping_node.inputs['Scale'].default_value[1]]
|
||||
|
||||
if mapping_node.vector_type == "TEXTURE":
|
||||
# This means use the inverse of the TRS transform.
|
||||
def inverted(mapping_transform):
|
||||
offset = mapping_transform["offset"]
|
||||
rotation = mapping_transform["rotation"]
|
||||
scale = mapping_transform["scale"]
|
||||
|
||||
# Inverse of a TRS is not always a TRS. This function will be right
|
||||
# at least when the following don't occur.
|
||||
if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5:
|
||||
return None
|
||||
if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5:
|
||||
return None
|
||||
|
||||
new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector((-offset[0], -offset[1], 1))
|
||||
new_offset[0] /= scale[0]; new_offset[1] /= scale[1]
|
||||
return {
|
||||
"offset": new_offset[0:2],
|
||||
"rotation": -rotation,
|
||||
"scale": [1/scale[0], 1/scale[1]],
|
||||
}
|
||||
|
||||
mapping_transform = inverted(mapping_transform)
|
||||
if mapping_transform is None:
|
||||
gltf2_io_debug.print_console("WARNING",
|
||||
"Skipping exporting texture transform with type TEXTURE because "
|
||||
"we couldn't convert it to TRS; recommend using POINT instead"
|
||||
)
|
||||
return None
|
||||
|
||||
elif mapping_node.vector_type == "VECTOR":
|
||||
# Vectors don't get translated
|
||||
mapping_transform["offset"] = [0, 0]
|
||||
|
||||
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
|
||||
|
||||
if all([component == 0 for component in texture_transform["offset"]]):
|
||||
del(texture_transform["offset"])
|
||||
if all([component == 1 for component in texture_transform["scale"]]):
|
||||
del(texture_transform["scale"])
|
||||
if texture_transform["rotation"] == 0:
|
||||
del(texture_transform["rotation"])
|
||||
|
||||
if len(texture_transform) == 0:
|
||||
return None
|
||||
|
||||
return texture_transform
|
||||
|
||||
|
||||
def get_node(data_path):
|
||||
"""Return Blender node on a given Blender data path."""
|
||||
if data_path is None:
|
||||
return None
|
||||
|
||||
index = data_path.find("[\"")
|
||||
if (index == -1):
|
||||
return None
|
||||
|
||||
node_name = data_path[(index + 2):]
|
||||
|
||||
index = node_name.find("\"")
|
||||
if (index == -1):
|
||||
return None
|
||||
|
||||
return node_name[:(index)]
|
||||
|
||||
|
||||
def get_factor_from_socket(socket, kind):
|
||||
"""
|
||||
For baseColorFactor, metallicFactor, etc.
|
||||
Get a constant value from a socket, or a constant value
|
||||
from a MULTIPLY node just before the socket.
|
||||
kind is either 'RGB' or 'VALUE'.
|
||||
"""
|
||||
fac = get_const_from_socket(socket, kind)
|
||||
if fac is not None:
|
||||
return fac
|
||||
|
||||
node = previous_node(socket)
|
||||
if node is not None:
|
||||
x1, x2 = None, None
|
||||
if kind == 'RGB':
|
||||
if node.type == 'MIX' and node.data_type == "RGBA" and node.blend_type == 'MULTIPLY':
|
||||
# TODO: handle factor in inputs[0]?
|
||||
x1 = get_const_from_socket(node.inputs[6], kind)
|
||||
x2 = get_const_from_socket(node.inputs[7], kind)
|
||||
if kind == 'VALUE':
|
||||
if node.type == 'MATH' and node.operation == 'MULTIPLY':
|
||||
x1 = get_const_from_socket(node.inputs[0], kind)
|
||||
x2 = get_const_from_socket(node.inputs[1], kind)
|
||||
if x1 is not None and x2 is None: return x1
|
||||
if x2 is not None and x1 is None: return x2
|
||||
|
||||
return None
|
||||
|
||||
def get_const_from_default_value_socket(socket, kind):
|
||||
if kind == 'RGB':
|
||||
if socket.type != 'RGBA': return None
|
||||
return list(socket.default_value)[:3]
|
||||
if kind == 'VALUE':
|
||||
if socket.type != 'VALUE': return None
|
||||
return socket.default_value
|
||||
return None
|
||||
|
||||
|
||||
def get_const_from_socket(socket, kind):
|
||||
if not socket.is_linked:
|
||||
if kind == 'RGB':
|
||||
if socket.type != 'RGBA': return None
|
||||
return list(socket.default_value)[:3]
|
||||
if kind == 'VALUE':
|
||||
if socket.type != 'VALUE': return None
|
||||
return socket.default_value
|
||||
|
||||
# Handle connection to a constant RGB/Value node
|
||||
prev_node = previous_node(socket)
|
||||
if prev_node is not None:
|
||||
if kind == 'RGB' and prev_node.type == 'RGB':
|
||||
return list(prev_node.outputs[0].default_value)[:3]
|
||||
if kind == 'VALUE' and prev_node.type == 'VALUE':
|
||||
return prev_node.outputs[0].default_value
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def previous_socket(socket):
|
||||
while True:
|
||||
if not socket.is_linked:
|
||||
return None
|
||||
|
||||
from_socket = socket.links[0].from_socket
|
||||
|
||||
# Skip over reroute nodes
|
||||
if from_socket.node.type == 'REROUTE':
|
||||
socket = from_socket.node.inputs[0]
|
||||
continue
|
||||
|
||||
return from_socket
|
||||
|
||||
|
||||
def previous_node(socket):
|
||||
prev_socket = previous_socket(socket)
|
||||
if prev_socket is not None:
|
||||
return prev_socket.node
|
||||
return None
|
||||
|
||||
def get_tex_from_socket(socket):
|
||||
result = gltf2_blender_search_node_tree.from_socket(
|
||||
socket,
|
||||
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeTexImage))
|
||||
if not result:
|
||||
return None
|
||||
if result[0].shader_node.image is None:
|
||||
return None
|
||||
return result[0]
|
||||
|
||||
def has_image_node_from_socket(socket):
|
||||
return get_tex_from_socket(socket) is not None
|
||||
|
||||
def image_tex_is_valid_from_socket(socket):
|
||||
res = get_tex_from_socket(socket)
|
||||
return res is not None and res.shader_node.image is not None and res.shader_node.image.channels != 0
|
||||
|
@ -117,7 +117,7 @@ class GlTF2Exporter:
|
||||
f.write(self.__buffer.to_bytes())
|
||||
uri = buffer_name
|
||||
else:
|
||||
uri = self.__buffer.to_embed_string()
|
||||
pass # This is no more possible, we don't export embedded buffers
|
||||
|
||||
buffer = gltf2_io.Buffer(
|
||||
byte_length=self.__buffer.byte_length,
|
||||
@ -320,6 +320,20 @@ class GlTF2Exporter:
|
||||
len_ = len([i for i in self.nodes_idx_to_remove if i < skin.skeleton])
|
||||
skin.skeleton = skin.skeleton - len_
|
||||
|
||||
# Remove animation channels that was targeting a node that will be removed
|
||||
new_animation_list = []
|
||||
for animation in self.__gltf.animations:
|
||||
new_channel_list = []
|
||||
for channel in animation.channels:
|
||||
if channel.target.node not in self.nodes_idx_to_remove:
|
||||
new_channel_list.append(channel)
|
||||
animation.channels = new_channel_list
|
||||
if len(animation.channels) > 0:
|
||||
new_animation_list.append(animation)
|
||||
self.__gltf.animations = new_animation_list
|
||||
|
||||
#TODO: remove unused animation accessors?
|
||||
|
||||
# And now really remove nodes
|
||||
self.__gltf.nodes = [node for idx, node in enumerate(self.__gltf.nodes) if idx not in self.nodes_idx_to_remove]
|
||||
|
||||
|
@ -4,8 +4,8 @@
|
||||
|
||||
import bpy
|
||||
from .....io.com.gltf2_io_extensions import Extension
|
||||
from ....exp import gltf2_blender_get
|
||||
from ...material import gltf2_blender_gather_texture_info
|
||||
from ..gltf2_blender_search_node_tree import has_image_node_from_socket, get_socket, get_factor_from_socket
|
||||
|
||||
def export_clearcoat(blender_material, export_settings):
|
||||
clearcoat_enabled = False
|
||||
@ -15,15 +15,15 @@ def export_clearcoat(blender_material, export_settings):
|
||||
clearcoat_extension = {}
|
||||
clearcoat_roughness_slots = ()
|
||||
|
||||
clearcoat_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Weight')
|
||||
clearcoat_roughness_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Roughness')
|
||||
clearcoat_normal_socket = gltf2_blender_get.get_socket(blender_material, 'Coat Normal')
|
||||
clearcoat_socket = get_socket(blender_material, 'Coat Weight')
|
||||
clearcoat_roughness_socket = get_socket(blender_material, 'Coat Roughness')
|
||||
clearcoat_normal_socket = get_socket(blender_material, 'Coat Normal')
|
||||
|
||||
if isinstance(clearcoat_socket, bpy.types.NodeSocket) and not clearcoat_socket.is_linked:
|
||||
clearcoat_extension['clearcoatFactor'] = clearcoat_socket.default_value
|
||||
if isinstance(clearcoat_socket.socket, bpy.types.NodeSocket) and not clearcoat_socket.socket.is_linked:
|
||||
clearcoat_extension['clearcoatFactor'] = clearcoat_socket.socket.default_value
|
||||
clearcoat_enabled = clearcoat_extension['clearcoatFactor'] > 0
|
||||
elif gltf2_blender_get.has_image_node_from_socket(clearcoat_socket):
|
||||
fac = gltf2_blender_get.get_factor_from_socket(clearcoat_socket, kind='VALUE')
|
||||
elif has_image_node_from_socket(clearcoat_socket, export_settings):
|
||||
fac = get_factor_from_socket(clearcoat_socket, kind='VALUE')
|
||||
# default value in glTF is 0.0, but if there is a texture without factor, use 1
|
||||
clearcoat_extension['clearcoatFactor'] = fac if fac != None else 1.0
|
||||
has_clearcoat_texture = True
|
||||
@ -32,10 +32,10 @@ def export_clearcoat(blender_material, export_settings):
|
||||
if not clearcoat_enabled:
|
||||
return None, {}
|
||||
|
||||
if isinstance(clearcoat_roughness_socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.is_linked:
|
||||
clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.default_value
|
||||
elif gltf2_blender_get.has_image_node_from_socket(clearcoat_roughness_socket):
|
||||
fac = gltf2_blender_get.get_factor_from_socket(clearcoat_roughness_socket, kind='VALUE')
|
||||
if isinstance(clearcoat_roughness_socket.socket, bpy.types.NodeSocket) and not clearcoat_roughness_socket.socket.is_linked:
|
||||
clearcoat_extension['clearcoatRoughnessFactor'] = clearcoat_roughness_socket.socket.default_value
|
||||
elif has_image_node_from_socket(clearcoat_roughness_socket, export_settings):
|
||||
fac = get_factor_from_socket(clearcoat_roughness_socket, kind='VALUE')
|
||||
# default value in glTF is 0.0, but if there is a texture without factor, use 1
|
||||
clearcoat_extension['clearcoatRoughnessFactor'] = fac if fac != None else 1.0
|
||||
has_clearcoat_roughness_texture = True
|
||||
@ -71,7 +71,7 @@ def export_clearcoat(blender_material, export_settings):
|
||||
clearcoat_extension['clearcoatRoughnessTexture'] = clearcoat_roughness_texture
|
||||
uvmap_infos.update({'clearcoatRoughnessTexture': uvmap_info})
|
||||
|
||||
if gltf2_blender_get.has_image_node_from_socket(clearcoat_normal_socket):
|
||||
if has_image_node_from_socket(clearcoat_normal_socket, export_settings):
|
||||
clearcoat_normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class(
|
||||
clearcoat_normal_socket,
|
||||
(clearcoat_normal_socket,),
|
||||
|
@ -4,20 +4,26 @@
|
||||
|
||||
import bpy
|
||||
from .....io.com.gltf2_io_extensions import Extension
|
||||
from ....exp import gltf2_blender_get
|
||||
from ...material import gltf2_blender_gather_texture_info
|
||||
from ..gltf2_blender_search_node_tree import \
|
||||
get_const_from_default_value_socket, \
|
||||
get_socket, \
|
||||
get_factor_from_socket, \
|
||||
get_const_from_socket, \
|
||||
NodeSocket, \
|
||||
get_socket_from_gltf_material_node
|
||||
|
||||
def export_emission_factor(blender_material, export_settings):
|
||||
emissive_socket = gltf2_blender_get.get_socket(blender_material, "Emissive")
|
||||
if emissive_socket is None:
|
||||
emissive_socket = gltf2_blender_get.get_socket_old(blender_material, "EmissiveFactor")
|
||||
if isinstance(emissive_socket, bpy.types.NodeSocket):
|
||||
emissive_socket = get_socket(blender_material, "Emissive")
|
||||
if emissive_socket.socket is None:
|
||||
emissive_socket = get_socket_from_gltf_material_node(blender_material, "EmissiveFactor")
|
||||
if isinstance(emissive_socket.socket, bpy.types.NodeSocket):
|
||||
if export_settings['gltf_image_format'] != "NONE":
|
||||
factor = gltf2_blender_get.get_factor_from_socket(emissive_socket, kind='RGB')
|
||||
factor = get_factor_from_socket(emissive_socket, kind='RGB')
|
||||
else:
|
||||
factor = gltf2_blender_get.get_const_from_default_value_socket(emissive_socket, kind='RGB')
|
||||
factor = get_const_from_default_value_socket(emissive_socket, kind='RGB')
|
||||
|
||||
if factor is None and emissive_socket.is_linked:
|
||||
if factor is None and emissive_socket.socket.is_linked:
|
||||
# In glTF, the default emissiveFactor is all zeros, so if an emission texture is connected,
|
||||
# we have to manually set it to all ones.
|
||||
factor = [1.0, 1.0, 1.0]
|
||||
@ -26,12 +32,12 @@ def export_emission_factor(blender_material, export_settings):
|
||||
|
||||
# Handle Emission Strength
|
||||
strength_socket = None
|
||||
if emissive_socket.node.type == 'EMISSION':
|
||||
strength_socket = emissive_socket.node.inputs['Strength']
|
||||
elif 'Emission Strength' in emissive_socket.node.inputs:
|
||||
strength_socket = emissive_socket.node.inputs['Emission Strength']
|
||||
if emissive_socket.socket.node.type == 'EMISSION':
|
||||
strength_socket = emissive_socket.socket.node.inputs['Strength']
|
||||
elif 'Emission Strength' in emissive_socket.socket.node.inputs:
|
||||
strength_socket = emissive_socket.socket.node.inputs['Emission Strength']
|
||||
strength = (
|
||||
gltf2_blender_get.get_const_from_socket(strength_socket, kind='VALUE')
|
||||
get_const_from_socket(NodeSocket(strength_socket, emissive_socket.group_path), kind='VALUE')
|
||||
if strength_socket is not None
|
||||
else None
|
||||
)
|
||||
@ -49,9 +55,9 @@ def export_emission_factor(blender_material, export_settings):
|
||||
return None
|
||||
|
||||
def export_emission_texture(blender_material, export_settings):
|
||||
emissive = gltf2_blender_get.get_socket(blender_material, "Emissive")
|
||||
if emissive is None:
|
||||
emissive = gltf2_blender_get.get_socket_old(blender_material, "Emissive")
|
||||
emissive = get_socket(blender_material, "Emissive")
|
||||
if emissive.socket is None:
|
||||
emissive = get_socket_from_gltf_material_node(blender_material, "Emissive")
|
||||
emissive_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(emissive, (emissive,), (), export_settings)
|
||||
return emissive_texture, {'emissiveTexture': uvmap_info}
|
||||
|
||||
|
@ -4,20 +4,20 @@
|
||||
|
||||
from .....io.com.gltf2_io_extensions import Extension
|
||||
from .....io.com.gltf2_io_constants import GLTF_IOR
|
||||
from ....exp import gltf2_blender_get
|
||||
from ..gltf2_blender_search_node_tree import get_socket
|
||||
|
||||
def export_ior(blender_material, extensions, export_settings):
|
||||
ior_socket = gltf2_blender_get.get_socket(blender_material, 'IOR')
|
||||
ior_socket = get_socket(blender_material, 'IOR')
|
||||
|
||||
if not ior_socket:
|
||||
if not ior_socket.socket:
|
||||
return None
|
||||
|
||||
# We don't manage case where socket is linked, always check default value
|
||||
if ior_socket.is_linked:
|
||||
if ior_socket.socket.is_linked:
|
||||
# TODOExt: add warning?
|
||||
return None
|
||||
|
||||
if ior_socket.default_value == GLTF_IOR:
|
||||
if ior_socket.socket.default_value == GLTF_IOR:
|
||||
return None
|
||||
|
||||
# Export only if the following extensions are exported:
|
||||
@ -31,6 +31,6 @@ def export_ior(blender_material, extensions, export_settings):
|
||||
return None
|
||||
|
||||
ior_extension = {}
|
||||
ior_extension['ior'] = ior_socket.default_value
|
||||
ior_extension['ior'] = ior_socket.socket.default_value
|
||||
|
||||
return Extension('KHR_materials_ior', ior_extension, False)
|
||||
|
@ -4,47 +4,48 @@
|
||||
|
||||
import bpy
|
||||
from .....io.com.gltf2_io_extensions import Extension
|
||||
from ....exp import gltf2_blender_get
|
||||
from ...material import gltf2_blender_gather_texture_info
|
||||
from ..gltf2_blender_search_node_tree import \
|
||||
has_image_node_from_socket, \
|
||||
get_socket, \
|
||||
get_factor_from_socket
|
||||
|
||||
|
||||
def export_sheen(blender_material, export_settings):
|
||||
sheen_extension = {}
|
||||
|
||||
sheenTint_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Tint")
|
||||
sheenRoughness_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Roughness")
|
||||
sheen_socket = gltf2_blender_get.get_socket(blender_material, "Sheen Weight")
|
||||
sheenTint_socket = get_socket(blender_material, "Sheen Tint")
|
||||
sheenRoughness_socket = get_socket(blender_material, "Sheen Roughness")
|
||||
sheen_socket = get_socket(blender_material, "Sheen Weight")
|
||||
|
||||
if sheenTint_socket is None or sheenRoughness_socket is None or sheen_socket is None:
|
||||
if sheenTint_socket.socket is None or sheenRoughness_socket.socket is None or sheen_socket.socket is None:
|
||||
return None, {}
|
||||
|
||||
if sheen_socket.is_linked is False and sheen_socket.default_value == 0.0:
|
||||
if sheen_socket.socket.is_linked is False and sheen_socket.socket.default_value == 0.0:
|
||||
return None, {}
|
||||
|
||||
uvmap_infos = {}
|
||||
|
||||
#TODOExt : What to do if sheen_socket is linked? or is not between 0 and 1?
|
||||
|
||||
sheenTint_non_linked = isinstance(sheenTint_socket, bpy.types.NodeSocket) and not sheenTint_socket.is_linked
|
||||
sheenRoughness_non_linked = isinstance(sheenRoughness_socket, bpy.types.NodeSocket) and not sheenRoughness_socket.is_linked
|
||||
sheenTint_non_linked = isinstance(sheenTint_socket.socket, bpy.types.NodeSocket) and not sheenTint_socket.socket.is_linked
|
||||
sheenRoughness_non_linked = isinstance(sheenRoughness_socket.socket, bpy.types.NodeSocket) and not sheenRoughness_socket.socket.is_linked
|
||||
|
||||
|
||||
use_actives_uvmaps = []
|
||||
|
||||
if sheenTint_non_linked is True:
|
||||
color = sheenTint_socket.default_value[:3]
|
||||
color = sheenTint_socket.socket.default_value[:3]
|
||||
if color != (0.0, 0.0, 0.0):
|
||||
sheen_extension['sheenColorFactor'] = color
|
||||
else:
|
||||
# Factor
|
||||
fac = gltf2_blender_get.get_factor_from_socket(sheenTint_socket, kind='RGB')
|
||||
fac = get_factor_from_socket(sheenTint_socket, kind='RGB')
|
||||
if fac is None:
|
||||
fac = [1.0, 1.0, 1.0] # Default is 0.0/0.0/0.0, so we need to set it to 1 if no factor
|
||||
if fac is not None and fac != [0.0, 0.0, 0.0]:
|
||||
sheen_extension['sheenColorFactor'] = fac
|
||||
|
||||
# Texture
|
||||
if gltf2_blender_get.has_image_node_from_socket(sheenTint_socket):
|
||||
if has_image_node_from_socket(sheenTint_socket, export_settings):
|
||||
original_sheenColor_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
|
||||
sheenTint_socket,
|
||||
(sheenTint_socket,),
|
||||
@ -55,19 +56,19 @@ def export_sheen(blender_material, export_settings):
|
||||
uvmap_infos.update({'sheenColorTexture': uvmap_info})
|
||||
|
||||
if sheenRoughness_non_linked is True:
|
||||
fac = sheenRoughness_socket.default_value
|
||||
fac = sheenRoughness_socket.socket.default_value
|
||||
if fac != 0.0:
|
||||
sheen_extension['sheenRoughnessFactor'] = fac
|
||||
else:
|
||||
# Factor
|
||||
fac = gltf2_blender_get.get_factor_from_socket(sheenRoughness_socket, kind='VALUE')
|
||||
fac = get_factor_from_socket(sheenRoughness_socket, kind='VALUE')
|
||||
if fac is None:
|
||||
fac = 1.0 # Default is 0.0 so we need to set it to 1.0 if no factor
|
||||
if fac is not None and fac != 0.0:
|
||||
sheen_extension['sheenRoughnessFactor'] = fac
|
||||
|
||||
# Texture
|
||||
if gltf2_blender_get.has_image_node_from_socket(sheenRoughness_socket):
|
||||
if has_image_node_from_socket(sheenRoughness_socket, export_settings):
|
||||
original_sheenRoughness_texture, uvmap_info , _ = gltf2_blender_gather_texture_info.gather_texture_info(
|
||||
sheenRoughness_socket,
|
||||
(sheenRoughness_socket,),
|
||||
|
@ -4,68 +4,96 @@
|
||||
|
||||
import bpy
|
||||
from .....io.com.gltf2_io_extensions import Extension
|
||||
from ....exp import gltf2_blender_get
|
||||
from ...material.gltf2_blender_gather_texture_info import gather_texture_info
|
||||
from ..gltf2_blender_search_node_tree import \
|
||||
has_image_node_from_socket, \
|
||||
get_socket, \
|
||||
get_factor_from_socket
|
||||
|
||||
|
||||
def export_specular(blender_material, export_settings):
|
||||
specular_extension = {}
|
||||
extensions_needed = False
|
||||
|
||||
specular_socket = gltf2_blender_get.get_socket(blender_material, 'Specular IOR Level')
|
||||
speculartint_socket = gltf2_blender_get.get_socket(blender_material, 'Specular Tint')
|
||||
specular_socket = get_socket(blender_material, 'Specular IOR Level')
|
||||
speculartint_socket = get_socket(blender_material, 'Specular Tint')
|
||||
|
||||
if specular_socket is None or speculartint_socket is None:
|
||||
if specular_socket.socket is None or speculartint_socket.socket is None:
|
||||
return None, {}
|
||||
|
||||
uvmap_infos = {}
|
||||
|
||||
specular_non_linked = isinstance(specular_socket, bpy.types.NodeSocket) and not specular_socket.is_linked
|
||||
specularcolor_non_linked = isinstance(speculartint_socket, bpy.types.NodeSocket) and not speculartint_socket.is_linked
|
||||
specular_non_linked = isinstance(specular_socket.socket, bpy.types.NodeSocket) and not specular_socket.socket.is_linked
|
||||
specularcolor_non_linked = isinstance(speculartint_socket.socket, bpy.types.NodeSocket) and not speculartint_socket.socket.is_linked
|
||||
|
||||
if specular_non_linked is True:
|
||||
fac = specular_socket.default_value
|
||||
if fac != 1.0:
|
||||
fac = specular_socket.socket.default_value
|
||||
fac = fac * 2.0
|
||||
if fac < 1.0:
|
||||
specular_extension['specularFactor'] = fac
|
||||
if fac == 0.0:
|
||||
return None, {}
|
||||
extensions_needed = True
|
||||
elif fac > 1.0:
|
||||
# glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
|
||||
extensions_needed = True
|
||||
else:
|
||||
pass # If fac == 1.0, no need to export specularFactor, the default value is 1.0
|
||||
|
||||
else:
|
||||
# Factor
|
||||
fac = gltf2_blender_get.get_factor_from_socket(specular_socket, kind='VALUE')
|
||||
fac = get_factor_from_socket(specular_socket, kind='VALUE')
|
||||
if fac is not None and fac != 1.0:
|
||||
fac = fac * 2.0 if fac is not None else None
|
||||
if fac is not None and fac < 1.0:
|
||||
specular_extension['specularFactor'] = fac
|
||||
|
||||
if fac == 0.0:
|
||||
return None, {}
|
||||
extensions_needed = True
|
||||
elif fac is not None and fac > 1.0:
|
||||
# glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
|
||||
extensions_needed = True
|
||||
|
||||
# Texture
|
||||
if gltf2_blender_get.has_image_node_from_socket(specular_socket):
|
||||
original_specular_texture, uvmap_info, _ = gather_texture_info(
|
||||
if has_image_node_from_socket(specular_socket, export_settings):
|
||||
specular_texture, uvmap_info, _ = gather_texture_info(
|
||||
specular_socket,
|
||||
(specular_socket,),
|
||||
(),
|
||||
export_settings,
|
||||
)
|
||||
specular_extension['specularTexture'] = original_specular_texture
|
||||
specular_extension['specularTexture'] = specular_texture
|
||||
uvmap_infos.update({'specularTexture': uvmap_info})
|
||||
extensions_needed = True
|
||||
|
||||
if specularcolor_non_linked is True:
|
||||
color = speculartint_socket.default_value[:3]
|
||||
color = speculartint_socket.socket.default_value[:3]
|
||||
if fac is not None and fac > 1.0:
|
||||
color = (color[0] * fac, color[1] * fac, color[2] * fac)
|
||||
specular_extension['specularColorFactor'] = color if color != (1.0, 1.0, 1.0) else None
|
||||
if color != (1.0, 1.0, 1.0):
|
||||
specular_extension['specularColorFactor'] = color
|
||||
extensions_needed = True
|
||||
|
||||
else:
|
||||
# Factor
|
||||
fac = gltf2_blender_get.get_factor_from_socket(speculartint_socket, kind='RGB')
|
||||
if fac is not None and fac != (1.0, 1.0, 1.0):
|
||||
specular_extension['specularColorFactor'] = fac
|
||||
fac_color = get_factor_from_socket(speculartint_socket, kind='RGB')
|
||||
if fac_color is not None and fac is not None and fac > 1.0:
|
||||
fac_color = (fac_color[0] * fac, fac_color[1] * fac, fac_color[2] * fac)
|
||||
elif fac_color is None and fac is not None and fac > 1.0:
|
||||
fac_color = (fac, fac, fac)
|
||||
specular_extension['specularColorFactor'] = fac_color if fac_color != (1.0, 1.0, 1.0) else None
|
||||
if fac_color != (1.0, 1.0, 1.0):
|
||||
extensions_needed = True
|
||||
|
||||
# Texture
|
||||
if gltf2_blender_get.has_image_node_from_socket(speculartint_socket):
|
||||
original_specularcolor_texture, uvmap_info, _ = gather_texture_info(
|
||||
if has_image_node_from_socket(speculartint_socket, export_settings):
|
||||
specularcolor_texture, uvmap_info, _ = gather_texture_info(
|
||||
speculartint_socket,
|
||||
(speculartint_socket,),
|
||||
(),
|
||||
export_settings,
|
||||
)
|
||||
specular_extension['specularColorTexture'] = original_specularcolor_texture
|
||||
specular_extension['specularColorTexture'] = specularcolor_texture
|
||||
uvmap_infos.update({'specularColorTexture': uvmap_info})
|
||||
extensions_needed = True
|
||||
|
||||
if extensions_needed is False:
|
||||
return None, {}
|
||||
|
||||
return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos
|
||||
|
@ -4,8 +4,11 @@
|
||||
|
||||
import bpy
|
||||
from .....io.com.gltf2_io_extensions import Extension
|
||||
from ....exp import gltf2_blender_get
|
||||
from ...material import gltf2_blender_gather_texture_info
|
||||
from ..gltf2_blender_search_node_tree import \
|
||||
has_image_node_from_socket, \
|
||||
get_socket, \
|
||||
get_factor_from_socket
|
||||
|
||||
def export_transmission(blender_material, export_settings):
|
||||
transmission_enabled = False
|
||||
@ -14,13 +17,13 @@ def export_transmission(blender_material, export_settings):
|
||||
transmission_extension = {}
|
||||
transmission_slots = ()
|
||||
|
||||
transmission_socket = gltf2_blender_get.get_socket(blender_material, 'Transmission Weight')
|
||||
transmission_socket = get_socket(blender_material, 'Transmission Weight')
|
||||
|
||||
if isinstance(transmission_socket, bpy.types.NodeSocket) and not transmission_socket.is_linked:
|
||||
transmission_extension['transmissionFactor'] = transmission_socket.default_value
|
||||
if isinstance(transmission_socket.socket, bpy.types.NodeSocket) and not transmission_socket.socket.is_linked:
|
||||
transmission_extension['transmissionFactor'] = transmission_socket.socket.default_value
|
||||
transmission_enabled = transmission_extension['transmissionFactor'] > 0
|
||||
elif gltf2_blender_get.has_image_node_from_socket(transmission_socket):
|
||||
fac = gltf2_blender_get.get_factor_from_socket(transmission_socket, kind='VALUE')
|
||||
elif has_image_node_from_socket(transmission_socket, export_settings):
|
||||
fac = get_factor_from_socket(transmission_socket, kind='VALUE')
|
||||
transmission_extension['transmissionFactor'] = fac if fac is not None else 1.0
|
||||
has_transmission_texture = True
|
||||
transmission_enabled = True
|
||||
|
@ -4,8 +4,13 @@
|
||||
|
||||
import bpy
|
||||
from .....io.com.gltf2_io_extensions import Extension
|
||||
from ....exp import gltf2_blender_get
|
||||
from ...material import gltf2_blender_gather_texture_info
|
||||
from ..gltf2_blender_search_node_tree import \
|
||||
has_image_node_from_socket, \
|
||||
get_const_from_default_value_socket, \
|
||||
get_socket_from_gltf_material_node, \
|
||||
get_socket, \
|
||||
get_factor_from_socket
|
||||
|
||||
|
||||
def export_volume(blender_material, export_settings):
|
||||
@ -13,10 +18,10 @@ def export_volume(blender_material, export_settings):
|
||||
|
||||
# If no transmission --> No volume
|
||||
transmission_enabled = False
|
||||
transmission_socket = gltf2_blender_get.get_socket(blender_material, 'Transmission Weight')
|
||||
if isinstance(transmission_socket, bpy.types.NodeSocket) and not transmission_socket.is_linked:
|
||||
transmission_enabled = transmission_socket.default_value > 0
|
||||
elif gltf2_blender_get.has_image_node_from_socket(transmission_socket):
|
||||
transmission_socket = get_socket(blender_material, 'Transmission Weight')
|
||||
if isinstance(transmission_socket.socket, bpy.types.NodeSocket) and not transmission_socket.socket.is_linked:
|
||||
transmission_enabled = transmission_socket.socket.default_value > 0
|
||||
elif has_image_node_from_socket(transmission_socket, export_settings):
|
||||
transmission_enabled = True
|
||||
|
||||
if transmission_enabled is False:
|
||||
@ -27,43 +32,43 @@ def export_volume(blender_material, export_settings):
|
||||
thickness_slots = ()
|
||||
uvmap_info = {}
|
||||
|
||||
thicknesss_socket = gltf2_blender_get.get_socket_old(blender_material, 'Thickness')
|
||||
if thicknesss_socket is None:
|
||||
thickness_socket = get_socket_from_gltf_material_node(blender_material, 'Thickness')
|
||||
if thickness_socket.socket is None:
|
||||
# If no thickness (here because there is no glTF Material Output node), no volume extension export
|
||||
return None, {}
|
||||
|
||||
density_socket = gltf2_blender_get.get_socket(blender_material, 'Density', volume=True)
|
||||
attenuation_color_socket = gltf2_blender_get.get_socket(blender_material, 'Color', volume=True)
|
||||
density_socket = get_socket(blender_material, 'Density', volume=True)
|
||||
attenuation_color_socket = get_socket(blender_material, 'Color', volume=True)
|
||||
# Even if density or attenuation are not set, we export volume extension
|
||||
|
||||
if isinstance(attenuation_color_socket, bpy.types.NodeSocket):
|
||||
rgb = gltf2_blender_get.get_const_from_default_value_socket(attenuation_color_socket, kind='RGB')
|
||||
if isinstance(attenuation_color_socket.socket, bpy.types.NodeSocket):
|
||||
rgb = get_const_from_default_value_socket(attenuation_color_socket, kind='RGB')
|
||||
volume_extension['attenuationColor'] = rgb
|
||||
|
||||
if isinstance(density_socket, bpy.types.NodeSocket):
|
||||
density = gltf2_blender_get.get_const_from_default_value_socket(density_socket, kind='VALUE')
|
||||
if isinstance(density_socket.socket, bpy.types.NodeSocket):
|
||||
density = get_const_from_default_value_socket(density_socket, kind='VALUE')
|
||||
volume_extension['attenuationDistance'] = 1.0 / density if density != 0 else None # infinity (Using None as glTF default)
|
||||
|
||||
|
||||
if isinstance(thicknesss_socket, bpy.types.NodeSocket) and not thicknesss_socket.is_linked:
|
||||
val = thicknesss_socket.default_value
|
||||
if isinstance(thickness_socket.socket, bpy.types.NodeSocket) and not thickness_socket.socket.is_linked:
|
||||
val = thickness_socket.socket.default_value
|
||||
if val == 0.0:
|
||||
# If no thickness, no volume extension export
|
||||
return None, {}
|
||||
volume_extension['thicknessFactor'] = val
|
||||
elif gltf2_blender_get.has_image_node_from_socket(thicknesss_socket):
|
||||
fac = gltf2_blender_get.get_factor_from_socket(thicknesss_socket, kind='VALUE')
|
||||
elif has_image_node_from_socket(thickness_socket, export_settings):
|
||||
fac = get_factor_from_socket(thickness_socket, kind='VALUE')
|
||||
# default value in glTF is 0.0, but if there is a texture without factor, use 1
|
||||
volume_extension['thicknessFactor'] = fac if fac != None else 1.0
|
||||
has_thickness_texture = True
|
||||
|
||||
# Pack thickness channel (R).
|
||||
# Pack thickness channel (G).
|
||||
if has_thickness_texture:
|
||||
thickness_slots = (thicknesss_socket,)
|
||||
thickness_slots = (thickness_socket,)
|
||||
|
||||
if len(thickness_slots) > 0:
|
||||
combined_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_texture_info(
|
||||
thicknesss_socket,
|
||||
thickness_socket,
|
||||
thickness_slots,
|
||||
(),
|
||||
export_settings,
|
||||
|
@ -13,7 +13,7 @@ from ....io.com import gltf2_io_debug
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ..gltf2_blender_gather_cache import cached
|
||||
from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage
|
||||
from ..gltf2_blender_get import get_tex_from_socket
|
||||
from .gltf2_blender_search_node_tree import get_texture_node_from_socket, NodeSocket
|
||||
|
||||
@cached
|
||||
def gather_image(
|
||||
@ -59,7 +59,7 @@ def gather_image(
|
||||
|
||||
export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets)
|
||||
|
||||
# We also return image_data, as it can be used to generate same file with another extension for webp management
|
||||
# We also return image_data, as it can be used to generate same file with another extension for WebP management
|
||||
return image, image_data, factor
|
||||
|
||||
def __gather_original_uri(original_uri, export_settings):
|
||||
@ -114,11 +114,11 @@ def __gather_extras(sockets, export_settings):
|
||||
def __gather_mime_type(sockets, export_image, export_settings):
|
||||
# force png or webp if Alpha contained so we can export alpha
|
||||
for socket in sockets:
|
||||
if socket.name == "Alpha":
|
||||
if socket.socket.name == "Alpha":
|
||||
if export_settings["gltf_image_format"] == "WEBP":
|
||||
return "image/webp"
|
||||
else:
|
||||
# If we keep image as is (no channel composition), we need to keep original format (for webp)
|
||||
# If we keep image as is (no channel composition), we need to keep original format (for WebP)
|
||||
image = export_image.blender_image()
|
||||
if image is not None and __is_blender_image_a_webp(image):
|
||||
return "image/webp"
|
||||
@ -191,7 +191,7 @@ def __get_image_data(sockets, default_sockets, export_settings) -> ExportImage:
|
||||
# For shared resources, such as images, we just store the portion of data that is needed in the glTF property
|
||||
# in a helper class. During generation of the glTF in the exporter these will then be combined to actual binary
|
||||
# resources.
|
||||
results = [get_tex_from_socket(socket) for socket in sockets]
|
||||
results = [get_texture_node_from_socket(socket, export_settings) for socket in sockets]
|
||||
|
||||
# Check if we need a simple mapping or more complex calculation
|
||||
# There is currently no complex calculation for any textures
|
||||
@ -222,7 +222,7 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
|
||||
|
||||
else:
|
||||
# rudimentarily try follow the node tree to find the correct image data.
|
||||
src_chan = Channel.R
|
||||
src_chan = None
|
||||
for elem in result.path:
|
||||
if isinstance(elem.from_node, bpy.types.ShaderNodeSeparateColor):
|
||||
src_chan = {
|
||||
@ -233,26 +233,55 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
|
||||
if elem.from_socket.name == 'Alpha':
|
||||
src_chan = Channel.A
|
||||
|
||||
|
||||
if src_chan is None:
|
||||
# No SeparateColor node found, so take the specification channel that is needed
|
||||
# So export is correct if user plug the texture directly to the socket
|
||||
if socket.socket.name == 'Metallic':
|
||||
src_chan = Channel.B
|
||||
elif socket.socket.name == 'Roughness':
|
||||
src_chan = Channel.G
|
||||
elif socket.socket.name == 'Occlusion':
|
||||
src_chan = Channel.R
|
||||
elif socket.socket.name == 'Alpha':
|
||||
src_chan = Channel.A
|
||||
elif socket.socket.name == 'Coat Weight':
|
||||
src_chan = Channel.R
|
||||
elif socket.socket.name == 'Coat Roughness':
|
||||
src_chan = Channel.G
|
||||
elif socket.socket.name == 'Thickness': # For KHR_materials_volume
|
||||
src_chan = Channel.G
|
||||
|
||||
if src_chan is None:
|
||||
# Seems we can't find the channel
|
||||
# We are in a case where user plugged a texture in a Color socket, but we may have used the alpha one
|
||||
if socket.socket.name in ["Alpha", "Specular IOR Level", "Sheen Roughness"]:
|
||||
src_chan = Channel.A
|
||||
|
||||
if src_chan is None:
|
||||
# We definitely can't find the channel, so keep the first channel even if this is wrong
|
||||
src_chan = Channel.R
|
||||
|
||||
dst_chan = None
|
||||
|
||||
# some sockets need channel rewriting (gltf pbr defines fixed channels for some attributes)
|
||||
if socket.name == 'Metallic':
|
||||
if socket.socket.name == 'Metallic':
|
||||
dst_chan = Channel.B
|
||||
elif socket.name == 'Roughness':
|
||||
elif socket.socket.name == 'Roughness':
|
||||
dst_chan = Channel.G
|
||||
elif socket.name == 'Occlusion':
|
||||
elif socket.socket.name == 'Occlusion':
|
||||
dst_chan = Channel.R
|
||||
elif socket.name == 'Alpha':
|
||||
elif socket.socket.name == 'Alpha':
|
||||
dst_chan = Channel.A
|
||||
elif socket.name == 'Coat Weight':
|
||||
elif socket.socket.name == 'Coat Weight':
|
||||
dst_chan = Channel.R
|
||||
elif socket.name == 'Coat Roughness':
|
||||
elif socket.socket.name == 'Coat Roughness':
|
||||
dst_chan = Channel.G
|
||||
elif socket.name == 'Thickness': # For KHR_materials_volume
|
||||
elif socket.socket.name == 'Thickness': # For KHR_materials_volume
|
||||
dst_chan = Channel.G
|
||||
elif socket.name == "Specular IOR Level": # For KHR_material_specular
|
||||
elif socket.socket.name == "Specular IOR Level": # For KHR_material_specular
|
||||
dst_chan = Channel.A
|
||||
elif socket.name == "Sheen Roughness": # For KHR_materials_sheen
|
||||
elif socket.socket.name == "Sheen Roughness": # For KHR_materials_sheen
|
||||
dst_chan = Channel.A
|
||||
|
||||
if dst_chan is not None:
|
||||
@ -260,12 +289,12 @@ def __get_image_data_mapping(sockets, default_sockets, results, export_settings)
|
||||
|
||||
# Since metal/roughness are always used together, make sure
|
||||
# the other channel is filled.
|
||||
if socket.name == 'Metallic' and not composed_image.is_filled(Channel.G):
|
||||
if socket.socket.name == 'Metallic' and not composed_image.is_filled(Channel.G):
|
||||
if default_roughness is not None:
|
||||
composed_image.fill_with(Channel.G, default_roughness)
|
||||
else:
|
||||
composed_image.fill_white(Channel.G)
|
||||
elif socket.name == 'Roughness' and not composed_image.is_filled(Channel.B):
|
||||
elif socket.socket.name == 'Roughness' and not composed_image.is_filled(Channel.B):
|
||||
if default_metallic is not None:
|
||||
composed_image.fill_with(Channel.B, default_metallic)
|
||||
else:
|
||||
|
@ -10,7 +10,6 @@ from ....io.com.gltf2_io_extensions import Extension
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ....io.com.gltf2_io_debug import print_console
|
||||
from ...com.gltf2_blender_extras import generate_extras
|
||||
from ...exp import gltf2_blender_get
|
||||
from ..gltf2_blender_gather_cache import cached, cached_by_key
|
||||
from . import gltf2_blender_gather_materials_unlit
|
||||
from . import gltf2_blender_gather_texture_info
|
||||
@ -23,6 +22,11 @@ from .extensions.gltf2_blender_gather_materials_specular import export_specular
|
||||
from .extensions.gltf2_blender_gather_materials_transmission import export_transmission
|
||||
from .extensions.gltf2_blender_gather_materials_clearcoat import export_clearcoat
|
||||
from .extensions.gltf2_blender_gather_materials_ior import export_ior
|
||||
from .gltf2_blender_search_node_tree import \
|
||||
has_image_node_from_socket, \
|
||||
get_socket_from_gltf_material_node, \
|
||||
get_socket, \
|
||||
get_node_socket
|
||||
|
||||
@cached
|
||||
def get_material_cache_key(blender_material, export_settings):
|
||||
@ -90,7 +94,7 @@ def gather_material(blender_material, export_settings):
|
||||
# If emissive is set, from an emissive node (not PBR)
|
||||
# We need to set manually default values for
|
||||
# pbr_metallic_roughness.baseColor
|
||||
if material.emissive_factor is not None and gltf2_blender_get.get_node_socket(blender_material, bpy.types.ShaderNodeBsdfPrincipled, "Base Color") is None:
|
||||
if material.emissive_factor is not None and get_node_socket(blender_material, bpy.types.ShaderNodeBsdfPrincipled, "Base Color").socket is None:
|
||||
material.pbr_metallic_roughness = gltf2_blender_gather_materials_pbr_metallic_roughness.get_default_pbr_for_emissive_node()
|
||||
|
||||
export_user_extensions('gather_material_hook', export_settings, material, blender_material)
|
||||
@ -143,12 +147,6 @@ def __gather_double_sided(blender_material, extensions, export_settings):
|
||||
|
||||
if not blender_material.use_backface_culling:
|
||||
return True
|
||||
|
||||
old_double_sided_socket = gltf2_blender_get.get_socket_old(blender_material, "DoubleSided")
|
||||
if old_double_sided_socket is not None and\
|
||||
not old_double_sided_socket.is_linked and\
|
||||
old_double_sided_socket.default_value > 0.5:
|
||||
return True
|
||||
return None
|
||||
|
||||
|
||||
@ -222,9 +220,7 @@ def __gather_name(blender_material, export_settings):
|
||||
|
||||
|
||||
def __gather_normal_texture(blender_material, export_settings):
|
||||
normal = gltf2_blender_get.get_socket(blender_material, "Normal")
|
||||
if normal is None:
|
||||
normal = gltf2_blender_get.get_socket_old(blender_material, "Normal")
|
||||
normal = get_socket(blender_material, "Normal")
|
||||
normal_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_normal_texture_info_class(
|
||||
normal,
|
||||
(normal,),
|
||||
@ -236,35 +232,37 @@ def __gather_orm_texture(blender_material, export_settings):
|
||||
# Check for the presence of Occlusion, Roughness, Metallic sharing a single image.
|
||||
# If not fully shared, return None, so the images will be cached and processed separately.
|
||||
|
||||
occlusion = gltf2_blender_get.get_socket(blender_material, "Occlusion")
|
||||
if occlusion is None or not gltf2_blender_get.has_image_node_from_socket(occlusion):
|
||||
occlusion = gltf2_blender_get.get_socket_old(blender_material, "Occlusion")
|
||||
if occlusion is None or not gltf2_blender_get.has_image_node_from_socket(occlusion):
|
||||
occlusion = get_socket(blender_material, "Occlusion")
|
||||
if occlusion.socket is None or not has_image_node_from_socket(occlusion, export_settings):
|
||||
occlusion = get_socket_from_gltf_material_node(blender_material, "Occlusion")
|
||||
if occlusion.socket is None or not has_image_node_from_socket(occlusion, export_settings):
|
||||
return None, None
|
||||
|
||||
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
|
||||
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")
|
||||
metallic_socket = get_socket(blender_material, "Metallic")
|
||||
roughness_socket = get_socket(blender_material, "Roughness")
|
||||
|
||||
hasMetal = metallic_socket is not None and gltf2_blender_get.has_image_node_from_socket(metallic_socket)
|
||||
hasRough = roughness_socket is not None and gltf2_blender_get.has_image_node_from_socket(roughness_socket)
|
||||
hasMetal = metallic_socket.socket is not None and has_image_node_from_socket(metallic_socket, export_settings)
|
||||
hasRough = roughness_socket.socket is not None and has_image_node_from_socket(roughness_socket, export_settings)
|
||||
|
||||
default_sockets = ()
|
||||
# Warning: for default socket, do not use NodeSocket object, because it will break cache
|
||||
# Using directlty the Blender socket object
|
||||
if not hasMetal and not hasRough:
|
||||
metallic_roughness = gltf2_blender_get.get_socket_old(blender_material, "MetallicRoughness")
|
||||
if metallic_roughness is None or not gltf2_blender_get.has_image_node_from_socket(metallic_roughness):
|
||||
metallic_roughness = get_socket_from_gltf_material_node(blender_material, "MetallicRoughness")
|
||||
if metallic_roughness.socket is None or not has_image_node_from_socket(metallic_roughness, export_settings):
|
||||
return None, default_sockets
|
||||
result = (occlusion, metallic_roughness)
|
||||
elif not hasMetal:
|
||||
result = (occlusion, roughness_socket)
|
||||
default_sockets = (metallic_socket,)
|
||||
default_sockets = (metallic_socket.socket,)
|
||||
elif not hasRough:
|
||||
result = (occlusion, metallic_socket)
|
||||
default_sockets = (roughness_socket,)
|
||||
default_sockets = (roughness_socket.socket,)
|
||||
else:
|
||||
result = (occlusion, roughness_socket, metallic_socket)
|
||||
default_sockets = ()
|
||||
|
||||
if not gltf2_blender_gather_texture_info.check_same_size_images(result):
|
||||
if not gltf2_blender_gather_texture_info.check_same_size_images(result, export_settings):
|
||||
print_console("INFO",
|
||||
"Occlusion and metal-roughness texture will be exported separately "
|
||||
"(use same-sized images if you want them combined)")
|
||||
@ -278,9 +276,9 @@ def __gather_orm_texture(blender_material, export_settings):
|
||||
return result, default_sockets
|
||||
|
||||
def __gather_occlusion_texture(blender_material, orm_texture, default_sockets, export_settings):
|
||||
occlusion = gltf2_blender_get.get_socket(blender_material, "Occlusion")
|
||||
if occlusion is None:
|
||||
occlusion = gltf2_blender_get.get_socket_old(blender_material, "Occlusion")
|
||||
occlusion = get_socket(blender_material, "Occlusion")
|
||||
if occlusion.socket is None:
|
||||
occlusion = get_socket_from_gltf_material_node(blender_material, "Occlusion")
|
||||
occlusion_texture, uvmap_info, _ = gltf2_blender_gather_texture_info.gather_material_occlusion_texture_info_class(
|
||||
occlusion,
|
||||
orm_texture or (occlusion,),
|
||||
|
@ -4,13 +4,18 @@
|
||||
|
||||
|
||||
import bpy
|
||||
|
||||
from ....io.com import gltf2_io
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ...exp import gltf2_blender_get
|
||||
from ..gltf2_blender_gather_cache import cached
|
||||
from ..gltf2_blender_get import image_tex_is_valid_from_socket
|
||||
from .gltf2_blender_search_node_tree import get_vertex_color_info
|
||||
from .gltf2_blender_gather_texture_info import gather_texture_info
|
||||
from .gltf2_blender_search_node_tree import \
|
||||
get_socket_from_gltf_material_node, \
|
||||
has_image_node_from_socket, \
|
||||
get_const_from_default_value_socket, \
|
||||
get_socket, \
|
||||
get_factor_from_socket
|
||||
|
||||
@cached
|
||||
def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings):
|
||||
@ -49,23 +54,23 @@ def __gather_base_color_factor(blender_material, export_settings):
|
||||
|
||||
rgb, alpha = None, None
|
||||
|
||||
alpha_socket = gltf2_blender_get.get_socket(blender_material, "Alpha")
|
||||
if isinstance(alpha_socket, bpy.types.NodeSocket):
|
||||
alpha_socket = get_socket(blender_material, "Alpha")
|
||||
if isinstance(alpha_socket.socket, bpy.types.NodeSocket):
|
||||
if export_settings['gltf_image_format'] != "NONE":
|
||||
alpha = gltf2_blender_get.get_factor_from_socket(alpha_socket, kind='VALUE')
|
||||
alpha = get_factor_from_socket(alpha_socket, kind='VALUE')
|
||||
else:
|
||||
alpha = gltf2_blender_get.get_const_from_default_value_socket(alpha_socket, kind='VALUE')
|
||||
alpha = get_const_from_default_value_socket(alpha_socket, kind='VALUE')
|
||||
|
||||
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Base Color")
|
||||
base_color_socket = get_socket(blender_material, "Base Color")
|
||||
if base_color_socket.socket is None:
|
||||
base_color_socket = get_socket(blender_material, "BaseColor")
|
||||
if base_color_socket is None:
|
||||
base_color_socket = gltf2_blender_get.get_socket(blender_material, "BaseColor")
|
||||
if base_color_socket is None:
|
||||
base_color_socket = gltf2_blender_get.get_socket_old(blender_material, "BaseColorFactor")
|
||||
if isinstance(base_color_socket, bpy.types.NodeSocket):
|
||||
base_color_socket = get_socket_from_gltf_material_node(blender_material, "BaseColorFactor")
|
||||
if isinstance(base_color_socket.socket, bpy.types.NodeSocket):
|
||||
if export_settings['gltf_image_format'] != "NONE":
|
||||
rgb = gltf2_blender_get.get_factor_from_socket(base_color_socket, kind='RGB')
|
||||
rgb = get_factor_from_socket(base_color_socket, kind='RGB')
|
||||
else:
|
||||
rgb = gltf2_blender_get.get_const_from_default_value_socket(base_color_socket, kind='RGB')
|
||||
rgb = get_const_from_default_value_socket(base_color_socket, kind='RGB')
|
||||
|
||||
if rgb is None: rgb = [1.0, 1.0, 1.0]
|
||||
if alpha is None: alpha = 1.0
|
||||
@ -80,18 +85,18 @@ def __gather_base_color_factor(blender_material, export_settings):
|
||||
|
||||
|
||||
def __gather_base_color_texture(blender_material, export_settings):
|
||||
base_color_socket = gltf2_blender_get.get_socket(blender_material, "Base Color")
|
||||
base_color_socket = get_socket(blender_material, "Base Color")
|
||||
if base_color_socket.socket is None:
|
||||
base_color_socket = get_socket(blender_material, "BaseColor")
|
||||
if base_color_socket is None:
|
||||
base_color_socket = gltf2_blender_get.get_socket(blender_material, "BaseColor")
|
||||
if base_color_socket is None:
|
||||
base_color_socket = gltf2_blender_get.get_socket_old(blender_material, "BaseColor")
|
||||
base_color_socket = get_socket_from_gltf_material_node(blender_material, "BaseColor")
|
||||
|
||||
alpha_socket = gltf2_blender_get.get_socket(blender_material, "Alpha")
|
||||
alpha_socket = get_socket(blender_material, "Alpha")
|
||||
|
||||
# keep sockets that have some texture : color and/or alpha
|
||||
inputs = tuple(
|
||||
socket for socket in [base_color_socket, alpha_socket]
|
||||
if socket is not None and image_tex_is_valid_from_socket(socket)
|
||||
if socket.socket is not None and has_image_node_from_socket(socket, export_settings)
|
||||
)
|
||||
if not inputs:
|
||||
return None, {}, {"uv_info": {}, "vc_info": {}}, None
|
||||
@ -113,34 +118,35 @@ def __gather_metallic_factor(blender_material, export_settings):
|
||||
if not blender_material.use_nodes:
|
||||
return blender_material.metallic
|
||||
|
||||
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
|
||||
metallic_socket = get_socket(blender_material, "Metallic")
|
||||
if metallic_socket is None:
|
||||
metallic_socket = gltf2_blender_get.get_socket_old(blender_material, "MetallicFactor")
|
||||
if isinstance(metallic_socket, bpy.types.NodeSocket):
|
||||
fac = gltf2_blender_get.get_factor_from_socket(metallic_socket, kind='VALUE')
|
||||
metallic_socket = get_socket_from_gltf_material_node(blender_material, "MetallicFactor")
|
||||
if isinstance(metallic_socket.socket, bpy.types.NodeSocket):
|
||||
fac = get_factor_from_socket(metallic_socket, kind='VALUE')
|
||||
return fac if fac != 1 else None
|
||||
return None
|
||||
|
||||
|
||||
def __gather_metallic_roughness_texture(blender_material, orm_texture, export_settings):
|
||||
metallic_socket = gltf2_blender_get.get_socket(blender_material, "Metallic")
|
||||
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")
|
||||
metallic_socket = get_socket(blender_material, "Metallic")
|
||||
roughness_socket = get_socket(blender_material, "Roughness")
|
||||
|
||||
hasMetal = metallic_socket is not None and image_tex_is_valid_from_socket(metallic_socket)
|
||||
hasRough = roughness_socket is not None and image_tex_is_valid_from_socket(roughness_socket)
|
||||
hasMetal = metallic_socket.socket is not None and has_image_node_from_socket(metallic_socket, export_settings)
|
||||
hasRough = roughness_socket.socket is not None and has_image_node_from_socket(roughness_socket, export_settings)
|
||||
|
||||
default_sockets = ()
|
||||
# Warning: for default socket, do not use NodeSocket object, because it will break cache
|
||||
# Using directlty the Blender socket object
|
||||
if not hasMetal and not hasRough:
|
||||
metallic_roughness = gltf2_blender_get.get_socket_old(blender_material, "MetallicRoughness")
|
||||
if metallic_roughness is None or not image_tex_is_valid_from_socket(metallic_roughness):
|
||||
metallic_roughness = get_socket_from_gltf_material_node(blender_material, "MetallicRoughness")
|
||||
if metallic_roughness is None or not has_image_node_from_socket(metallic_roughness, export_settings):
|
||||
return None, {}, None
|
||||
texture_input = (metallic_roughness,)
|
||||
elif not hasMetal:
|
||||
texture_input = (roughness_socket,)
|
||||
default_sockets = (metallic_socket,)
|
||||
default_sockets = (metallic_socket.socket,)
|
||||
elif not hasRough:
|
||||
texture_input = (metallic_socket,)
|
||||
default_sockets = (roughness_socket,)
|
||||
default_sockets = (roughness_socket.socket,)
|
||||
else:
|
||||
texture_input = (metallic_socket, roughness_socket)
|
||||
default_sockets = ()
|
||||
@ -158,11 +164,11 @@ def __gather_roughness_factor(blender_material, export_settings):
|
||||
if not blender_material.use_nodes:
|
||||
return blender_material.roughness
|
||||
|
||||
roughness_socket = gltf2_blender_get.get_socket(blender_material, "Roughness")
|
||||
roughness_socket = get_socket(blender_material, "Roughness")
|
||||
if roughness_socket is None:
|
||||
roughness_socket = gltf2_blender_get.get_socket_old(blender_material, "RoughnessFactor")
|
||||
if isinstance(roughness_socket, bpy.types.NodeSocket):
|
||||
fac = gltf2_blender_get.get_factor_from_socket(roughness_socket, kind='VALUE')
|
||||
roughness_socket = get_socket_from_gltf_material_node(blender_material, "RoughnessFactor")
|
||||
if isinstance(roughness_socket.socket, bpy.types.NodeSocket):
|
||||
fac = get_factor_from_socket(roughness_socket, kind='VALUE')
|
||||
return fac if fac != 1 else None
|
||||
return None
|
||||
|
||||
|
@ -2,10 +2,14 @@
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from ....io.com.gltf2_io_extensions import Extension
|
||||
from ...exp import gltf2_blender_get
|
||||
from . import gltf2_blender_gather_texture_info
|
||||
from .gltf2_blender_search_node_tree import get_vertex_color_info
|
||||
from .gltf2_blender_search_node_tree import \
|
||||
get_socket, \
|
||||
NodeSocket, \
|
||||
previous_socket, \
|
||||
previous_node, \
|
||||
get_factor_from_socket
|
||||
|
||||
def detect_shadeless_material(blender_material, export_settings):
|
||||
"""Detect if this material is "shadeless" ie. should be exported
|
||||
@ -15,8 +19,8 @@ def detect_shadeless_material(blender_material, export_settings):
|
||||
if not blender_material.use_nodes: return None
|
||||
|
||||
# Old Background node detection (unlikely to happen)
|
||||
bg_socket = gltf2_blender_get.get_socket(blender_material, "Background")
|
||||
if bg_socket is not None:
|
||||
bg_socket = get_socket(blender_material, "Background")
|
||||
if bg_socket.socket is not None:
|
||||
return {'rgb_socket': bg_socket}
|
||||
|
||||
# Look for
|
||||
@ -27,6 +31,7 @@ def detect_shadeless_material(blender_material, export_settings):
|
||||
|
||||
info = {}
|
||||
|
||||
#TODOSNode this can be a function call
|
||||
for node in blender_material.node_tree.nodes:
|
||||
if node.type == 'OUTPUT_MATERIAL' and node.is_active_output:
|
||||
socket = node.inputs[0]
|
||||
@ -34,6 +39,8 @@ def detect_shadeless_material(blender_material, export_settings):
|
||||
else:
|
||||
return None
|
||||
|
||||
socket = NodeSocket(socket, [blender_material])
|
||||
|
||||
# Be careful not to misidentify a lightpath trick as mix-alpha.
|
||||
result = __detect_lightpath_trick(socket)
|
||||
if result is not None:
|
||||
@ -49,10 +56,10 @@ def detect_shadeless_material(blender_material, export_settings):
|
||||
socket = result['next_socket']
|
||||
|
||||
# Check if a color socket, or connected to a color socket
|
||||
if socket.type != 'RGBA':
|
||||
from_socket = gltf2_blender_get.previous_socket(socket)
|
||||
if from_socket is None: return None
|
||||
if from_socket.type != 'RGBA': return None
|
||||
if socket.socket.type != 'RGBA':
|
||||
from_socket = previous_socket(socket)
|
||||
if from_socket.socket is None: return None
|
||||
if from_socket.socket.type != 'RGBA': return None
|
||||
|
||||
info['rgb_socket'] = socket
|
||||
return info
|
||||
@ -68,13 +75,13 @@ def __detect_mix_alpha(socket):
|
||||
#
|
||||
# Returns None if not detected. Otherwise, a dict containing alpha_socket
|
||||
# and next_socket.
|
||||
prev = gltf2_blender_get.previous_node(socket)
|
||||
if prev is None or prev.type != 'MIX_SHADER': return None
|
||||
in1 = gltf2_blender_get.previous_node(prev.inputs[1])
|
||||
if in1 is None or in1.type != 'BSDF_TRANSPARENT': return None
|
||||
prev = previous_node(socket)
|
||||
if prev.node is None or prev.node.type != 'MIX_SHADER': return None
|
||||
in1 = previous_node(NodeSocket(prev.node.inputs[1], prev.group_path))
|
||||
if in1.node is None or in1.node.type != 'BSDF_TRANSPARENT': return None
|
||||
return {
|
||||
'alpha_socket': prev.inputs[0],
|
||||
'next_socket': prev.inputs[2],
|
||||
'alpha_socket': NodeSocket(prev.node.inputs[0], prev.group_path),
|
||||
'next_socket': NodeSocket(prev.node.inputs[2], prev.group_path),
|
||||
}
|
||||
|
||||
|
||||
@ -90,17 +97,17 @@ def __detect_lightpath_trick(socket):
|
||||
# The Emission node can be omitted.
|
||||
# Returns None if not detected. Otherwise, a dict containing
|
||||
# next_socket.
|
||||
prev = gltf2_blender_get.previous_node(socket)
|
||||
if prev is None or prev.type != 'MIX_SHADER': return None
|
||||
in0 = gltf2_blender_get.previous_socket(prev.inputs[0])
|
||||
if in0 is None or in0.node.type != 'LIGHT_PATH': return None
|
||||
if in0.name != 'Is Camera Ray': return None
|
||||
next_socket = prev.inputs[2]
|
||||
prev = previous_node(socket)
|
||||
if prev.node is None or prev.node.type != 'MIX_SHADER': return None
|
||||
in0 = previous_socket(NodeSocket(prev.node.inputs[0], prev.group_path))
|
||||
if in0.socket is None or in0.socket.node.type != 'LIGHT_PATH': return None
|
||||
if in0.socket.name != 'Is Camera Ray': return None
|
||||
next_socket = NodeSocket(prev.node.inputs[2], prev.group_path)
|
||||
|
||||
# Detect emission
|
||||
prev = gltf2_blender_get.previous_node(next_socket)
|
||||
if prev is not None and prev.type == 'EMISSION':
|
||||
next_socket = prev.inputs[0]
|
||||
prev = previous_node(next_socket)
|
||||
if prev.node is not None and prev.node.type == 'EMISSION':
|
||||
next_socket = NodeSocket(prev.node.inputs[0], prev.group_path)
|
||||
|
||||
return {'next_socket': next_socket}
|
||||
|
||||
@ -109,9 +116,9 @@ def gather_base_color_factor(info, export_settings):
|
||||
rgb, alpha = None, None
|
||||
|
||||
if 'rgb_socket' in info:
|
||||
rgb = gltf2_blender_get.get_factor_from_socket(info['rgb_socket'], kind='RGB')
|
||||
rgb = get_factor_from_socket(info['rgb_socket'], kind='RGB')
|
||||
if 'alpha_socket' in info:
|
||||
alpha = gltf2_blender_get.get_factor_from_socket(info['alpha_socket'], kind='VALUE')
|
||||
alpha = get_factor_from_socket(info['alpha_socket'], kind='VALUE')
|
||||
|
||||
if rgb is None: rgb = [1.0, 1.0, 1.0]
|
||||
if alpha is None: alpha = 1.0
|
||||
@ -122,8 +129,8 @@ def gather_base_color_factor(info, export_settings):
|
||||
|
||||
|
||||
def gather_base_color_texture(info, export_settings):
|
||||
sockets = (info.get('rgb_socket'), info.get('alpha_socket'))
|
||||
sockets = tuple(s for s in sockets if s is not None)
|
||||
sockets = (info.get('rgb_socket', NodeSocket(None, None)), info.get('alpha_socket', NodeSocket(None, None)))
|
||||
sockets = tuple(s for s in sockets if s.socket is not None)
|
||||
if sockets:
|
||||
# NOTE: separate RGB and Alpha textures will not get combined
|
||||
# because gather_image determines how to pack images based on the
|
||||
|
@ -4,21 +4,23 @@
|
||||
|
||||
import typing
|
||||
import bpy
|
||||
from ....io.com import gltf2_io_debug
|
||||
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ....io.com.gltf2_io_extensions import Extension
|
||||
from ....io.exp.gltf2_io_image_data import ImageData
|
||||
from ....io.exp.gltf2_io_binary_data import BinaryData
|
||||
from ....io.com import gltf2_io_debug
|
||||
from ....io.com import gltf2_io
|
||||
from ..gltf2_blender_gather_cache import cached
|
||||
from ..gltf2_blender_gather_sampler import gather_sampler
|
||||
from ..gltf2_blender_get import get_tex_from_socket
|
||||
from ..gltf2_blender_gather_cache import cached
|
||||
from .gltf2_blender_search_node_tree import get_texture_node_from_socket, NodeSocket
|
||||
from . import gltf2_blender_gather_image
|
||||
|
||||
|
||||
@cached
|
||||
def gather_texture(
|
||||
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
|
||||
default_sockets: typing.Tuple[bpy.types.NodeSocket],
|
||||
default_sockets,
|
||||
export_settings):
|
||||
"""
|
||||
Gather texture sampling information and image channels from a blender shader texture attached to a shader socket.
|
||||
@ -70,7 +72,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
|
||||
|
||||
ext_webp = {}
|
||||
|
||||
# If user want to keep original textures, and these textures are webp, we need to remove source from
|
||||
# If user want to keep original textures, and these textures are WebP, we need to remove source from
|
||||
# gltf2_io.Texture, and populate extension
|
||||
if export_settings['gltf_keep_original_textures'] is True \
|
||||
and source is not None \
|
||||
@ -79,19 +81,19 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
|
||||
remove_source = True
|
||||
required = True
|
||||
|
||||
# If user want to export in webp format (so without fallback in png/jpg)
|
||||
# If user want to export in WebP format (so without fallback in png/jpg)
|
||||
if export_settings['gltf_image_format'] == "WEBP":
|
||||
# We create all image without fallback
|
||||
ext_webp["source"] = source
|
||||
remove_source = True
|
||||
required = True
|
||||
|
||||
# If user doesn't want to export in webp format, but want webp too. Texture is not webp
|
||||
# If user doesn't want to export in WebP format, but want WebP too. Texture is not WebP
|
||||
if export_settings['gltf_image_format'] != "WEBP" \
|
||||
and export_settings['gltf_add_webp'] \
|
||||
and source is not None \
|
||||
and source.mime_type != "image/webp":
|
||||
# We need here to create some webp textures
|
||||
# We need here to create some WebP textures
|
||||
|
||||
new_mime_type = "image/webp"
|
||||
new_data, _ = image_data.encode(new_mime_type, export_settings)
|
||||
@ -116,7 +118,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
|
||||
ext_webp["source"] = webp_image
|
||||
|
||||
|
||||
# If user doesn't want to export in webp format, but want webp too. Texture is webp
|
||||
# If user doesn't want to export in WebP format, but want WebP too. Texture is WebP
|
||||
if export_settings['gltf_image_format'] != "WEBP" \
|
||||
and source is not None \
|
||||
and source.mime_type == "image/webp":
|
||||
@ -127,7 +129,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
|
||||
remove_source = True
|
||||
required = True
|
||||
|
||||
# If user doesn't want to export in webp format, but want webp too as fallback. Texture is webp
|
||||
# If user doesn't want to export in webp format, but want WebP too as fallback. Texture is WebP
|
||||
if export_settings['gltf_image_format'] != "WEBP" \
|
||||
and webp_image is not None \
|
||||
and export_settings['gltf_webp_fallback'] is True:
|
||||
@ -164,14 +166,33 @@ def __gather_name(blender_shader_sockets, export_settings):
|
||||
|
||||
|
||||
def __gather_sampler(blender_shader_sockets, export_settings):
|
||||
shader_nodes = [get_tex_from_socket(socket) for socket in blender_shader_sockets]
|
||||
shader_nodes = [get_texture_node_from_socket(socket, export_settings) for socket in blender_shader_sockets]
|
||||
if len(shader_nodes) > 1:
|
||||
gltf2_io_debug.print_console("WARNING",
|
||||
"More than one shader node tex image used for a texture. "
|
||||
"The resulting glTF sampler will behave like the first shader node tex image.")
|
||||
first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes)).shader_node
|
||||
first_valid_shader_node = next(filter(lambda x: x is not None, shader_nodes))
|
||||
|
||||
# group_path can't be a list, so transform it to str
|
||||
|
||||
sep_item = "##~~gltf-sep~~##"
|
||||
sep_inside_item = "##~~gltf-inside-sep~~##"
|
||||
group_path_str = ""
|
||||
if len(first_valid_shader_node.group_path) > 0:
|
||||
group_path_str += first_valid_shader_node.group_path[0].name
|
||||
if len(first_valid_shader_node.group_path) > 1:
|
||||
for idx, i in enumerate(first_valid_shader_node.group_path[1:]):
|
||||
group_path_str += sep_item
|
||||
if idx == 0:
|
||||
group_path_str += first_valid_shader_node.group_path[0].name
|
||||
else:
|
||||
group_path_str += i.id_data.name
|
||||
group_path_str += sep_inside_item
|
||||
group_path_str += i.name
|
||||
|
||||
return gather_sampler(
|
||||
first_valid_shader_node,
|
||||
first_valid_shader_node.shader_node,
|
||||
group_path_str,
|
||||
export_settings)
|
||||
|
||||
|
||||
@ -209,7 +230,7 @@ def __gather_source(blender_shader_sockets, default_sockets, export_settings):
|
||||
|
||||
png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings)
|
||||
|
||||
# We inverted the png & webp image, to have the png as main source
|
||||
# We inverted the png & WebP image, to have the png as main source
|
||||
return png_image, source, image_data, factor
|
||||
return source, None, image_data, factor
|
||||
|
||||
|
@ -7,12 +7,17 @@ import typing
|
||||
from ....io.com import gltf2_io
|
||||
from ....io.com.gltf2_io_extensions import Extension
|
||||
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from ...exp import gltf2_blender_get
|
||||
from ..gltf2_blender_get import previous_node, get_tex_from_socket
|
||||
from ..gltf2_blender_gather_sampler import detect_manual_uv_wrapping
|
||||
from ..gltf2_blender_gather_cache import cached
|
||||
from . import gltf2_blender_gather_texture
|
||||
from . import gltf2_blender_search_node_tree
|
||||
from .gltf2_blender_search_node_tree import \
|
||||
get_texture_node_from_socket, \
|
||||
from_socket, \
|
||||
FilterByType, \
|
||||
previous_node, \
|
||||
get_const_from_socket, \
|
||||
NodeSocket, \
|
||||
get_texture_transform_from_mapping_node
|
||||
|
||||
# blender_shader_sockets determine the texture and primary_socket determines
|
||||
# the textransform and UVMap. Ex: when combining an ORM texture, for
|
||||
@ -37,7 +42,7 @@ def gather_material_occlusion_texture_info_class(primary_socket, blender_shader_
|
||||
def __gather_texture_info_helper(
|
||||
primary_socket: bpy.types.NodeSocket,
|
||||
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
|
||||
default_sockets: typing.Tuple[bpy.types.NodeSocket],
|
||||
default_sockets,
|
||||
kind: str,
|
||||
filter_type: str,
|
||||
export_settings):
|
||||
@ -77,7 +82,7 @@ def __gather_texture_info_helper(
|
||||
def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, export_settings):
|
||||
if primary_socket is None:
|
||||
return False
|
||||
if get_tex_from_socket(primary_socket) is None:
|
||||
if get_texture_node_from_socket(primary_socket, export_settings) is None:
|
||||
return False
|
||||
if not blender_shader_sockets:
|
||||
return False
|
||||
@ -85,12 +90,12 @@ def __filter_texture_info(primary_socket, blender_shader_sockets, filter_type, e
|
||||
return False
|
||||
if filter_type == "ALL":
|
||||
# Check that all sockets link to texture
|
||||
if any([get_tex_from_socket(socket) is None for socket in blender_shader_sockets]):
|
||||
if any([get_texture_node_from_socket(socket, export_settings) is None for socket in blender_shader_sockets]):
|
||||
# sockets do not lead to a texture --> discard
|
||||
return False
|
||||
elif filter_type == "ANY":
|
||||
# Check that at least one socket link to texture
|
||||
if all([get_tex_from_socket(socket) is None for socket in blender_shader_sockets]):
|
||||
if all([get_texture_node_from_socket(socket, export_settings) is None for socket in blender_shader_sockets]):
|
||||
return False
|
||||
elif filter_type == "NONE":
|
||||
# No check
|
||||
@ -112,9 +117,9 @@ def __gather_extras(blender_shader_sockets, export_settings):
|
||||
|
||||
# MaterialNormalTextureInfo only
|
||||
def __gather_normal_scale(primary_socket, export_settings):
|
||||
result = gltf2_blender_search_node_tree.from_socket(
|
||||
result = from_socket(
|
||||
primary_socket,
|
||||
gltf2_blender_search_node_tree.FilterByType(bpy.types.ShaderNodeNormalMap))
|
||||
FilterByType(bpy.types.ShaderNodeNormalMap))
|
||||
if not result:
|
||||
return None
|
||||
strengthInput = result[0].shader_node.inputs['Strength']
|
||||
@ -127,11 +132,11 @@ def __gather_normal_scale(primary_socket, export_settings):
|
||||
def __gather_occlusion_strength(primary_socket, export_settings):
|
||||
# Look for a MixRGB node that mixes with pure white in front of
|
||||
# primary_socket. The mix factor gives the occlusion strength.
|
||||
node = gltf2_blender_get.previous_node(primary_socket)
|
||||
if node and node.type == 'MIX' and node.blend_type == 'MIX':
|
||||
fac = gltf2_blender_get.get_const_from_socket(node.inputs['Factor'], kind='VALUE')
|
||||
col1 = gltf2_blender_get.get_const_from_socket(node.inputs[6], kind='RGB')
|
||||
col2 = gltf2_blender_get.get_const_from_socket(node.inputs[7], kind='RGB')
|
||||
node = previous_node(primary_socket)
|
||||
if node and node.node.type == 'MIX' and node.node.blend_type == 'MIX':
|
||||
fac = get_const_from_socket(NodeSocket(node.node.inputs['Factor'], node.group_path), kind='VALUE')
|
||||
col1 = get_const_from_socket(NodeSocket(node.node.inputs[6], node.group_path), kind='RGB')
|
||||
col2 = get_const_from_socket(NodeSocket(node.node.inputs[7], node.group_path), kind='RGB')
|
||||
if fac is not None:
|
||||
if col1 == [1.0, 1.0, 1.0] and col2 is None:
|
||||
return fac
|
||||
@ -153,31 +158,32 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
|
||||
#
|
||||
# The [UV Wrapping] is for wrap modes like MIRROR that use nodes,
|
||||
# [Mapping] is for KHR_texture_transform, and [UV Map] is for texCoord.
|
||||
blender_shader_node = get_tex_from_socket(primary_socket).shader_node
|
||||
result_tex = get_texture_node_from_socket(primary_socket, export_settings)
|
||||
blender_shader_node = result_tex.shader_node
|
||||
|
||||
# Skip over UV wrapping stuff (it goes in the sampler)
|
||||
result = detect_manual_uv_wrapping(blender_shader_node)
|
||||
result = detect_manual_uv_wrapping(blender_shader_node, result_tex.group_path)
|
||||
if result:
|
||||
node = previous_node(result['next_socket'])
|
||||
else:
|
||||
node = previous_node(blender_shader_node.inputs['Vector'])
|
||||
node = previous_node(NodeSocket(blender_shader_node.inputs['Vector'], result_tex.group_path))
|
||||
|
||||
texture_transform = None
|
||||
if node and node.type == 'MAPPING':
|
||||
texture_transform = gltf2_blender_get.get_texture_transform_from_mapping_node(node)
|
||||
node = previous_node(node.inputs['Vector'])
|
||||
if node.node and node.node.type == 'MAPPING':
|
||||
texture_transform = get_texture_transform_from_mapping_node(node)
|
||||
node = previous_node(NodeSocket(node.node.inputs['Vector'], node.group_path))
|
||||
|
||||
uvmap_info = {}
|
||||
|
||||
if node and node.type == 'UVMAP' and node.uv_map:
|
||||
if node.node and node.node.type == 'UVMAP' and node.node.uv_map:
|
||||
uvmap_info['type'] = "Fixed"
|
||||
uvmap_info['value'] = node.uv_map
|
||||
uvmap_info['value'] = node.node.uv_map
|
||||
|
||||
elif node and node.type == 'ATTRIBUTE' \
|
||||
and node.attribute_type == "GEOMETRY" \
|
||||
and node.attribute_name:
|
||||
elif node and node.node and node.node.type == 'ATTRIBUTE' \
|
||||
and node.node.attribute_type == "GEOMETRY" \
|
||||
and node.node.attribute_name:
|
||||
uvmap_info['type'] = 'Attribute'
|
||||
uvmap_info['value'] = node.attribute_name
|
||||
uvmap_info['value'] = node.node.attribute_name
|
||||
|
||||
else:
|
||||
uvmap_info['type'] = 'Active'
|
||||
@ -187,6 +193,7 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
|
||||
|
||||
def check_same_size_images(
|
||||
blender_shader_sockets: typing.Tuple[bpy.types.NodeSocket],
|
||||
export_settings
|
||||
) -> bool:
|
||||
"""Check that all sockets leads to images of the same size."""
|
||||
if not blender_shader_sockets or not all(blender_shader_sockets):
|
||||
@ -194,7 +201,7 @@ def check_same_size_images(
|
||||
|
||||
sizes = set()
|
||||
for socket in blender_shader_sockets:
|
||||
tex = get_tex_from_socket(socket)
|
||||
tex = get_texture_node_from_socket(socket, export_settings)
|
||||
if tex is None:
|
||||
return False
|
||||
size = tex.shader_node.image.size
|
||||
|
@ -7,6 +7,11 @@
|
||||
#
|
||||
|
||||
import bpy
|
||||
from mathutils import Vector, Matrix
|
||||
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
|
||||
from ...com.gltf2_blender_material_helpers import get_gltf_node_name, get_gltf_node_old_name, get_gltf_old_group_node_name
|
||||
from ....blender.com.gltf2_blender_conversion import texture_transform_blender_to_gltf
|
||||
from io_scene_gltf2.io.com import gltf2_io_debug
|
||||
import typing
|
||||
|
||||
|
||||
@ -48,13 +53,14 @@ class FilterByType(Filter):
|
||||
|
||||
|
||||
class NodeTreeSearchResult:
|
||||
def __init__(self, shader_node: bpy.types.Node, path: typing.List[bpy.types.NodeLink]):
|
||||
def __init__(self, shader_node: bpy.types.Node, path: typing.List[bpy.types.NodeLink], group_path: typing.List[bpy.types.Node]):
|
||||
self.shader_node = shader_node
|
||||
self.path = path
|
||||
self.group_path = group_path
|
||||
|
||||
|
||||
# TODO: cache these searches
|
||||
def from_socket(start_socket: bpy.types.NodeSocket,
|
||||
def from_socket(start_socket: NodeTreeSearchResult,
|
||||
shader_node_filter: typing.Union[Filter, typing.Callable]) -> typing.List[NodeTreeSearchResult]:
|
||||
"""
|
||||
Find shader nodes where the filter expression is true.
|
||||
@ -66,18 +72,39 @@ def from_socket(start_socket: bpy.types.NodeSocket,
|
||||
# hide implementation (especially the search path)
|
||||
def __search_from_socket(start_socket: bpy.types.NodeSocket,
|
||||
shader_node_filter: typing.Union[Filter, typing.Callable],
|
||||
search_path: typing.List[bpy.types.NodeLink]) -> typing.List[NodeTreeSearchResult]:
|
||||
search_path: typing.List[bpy.types.NodeLink],
|
||||
group_path: typing.List[bpy.types.Node]) -> typing.List[NodeTreeSearchResult]:
|
||||
results = []
|
||||
|
||||
for link in start_socket.links:
|
||||
# follow the link to a shader node
|
||||
linked_node = link.from_node
|
||||
|
||||
if linked_node.type == "GROUP":
|
||||
group_output_node = [node for node in linked_node.node_tree.nodes if node.type == "GROUP_OUTPUT"][0]
|
||||
socket = [sock for sock in group_output_node.inputs if sock.name == link.from_socket.name][0]
|
||||
group_path.append(linked_node)
|
||||
linked_results = __search_from_socket(socket, shader_node_filter, search_path + [link], group_path.copy())
|
||||
if linked_results:
|
||||
# add the link to the current path
|
||||
search_path.append(link)
|
||||
results += linked_results
|
||||
continue
|
||||
|
||||
if linked_node.type == "GROUP_INPUT":
|
||||
socket = [sock for sock in group_path[-1].inputs if sock.name == link.from_socket.name][0]
|
||||
linked_results = __search_from_socket(socket, shader_node_filter, search_path + [link], group_path[:-1])
|
||||
if linked_results:
|
||||
# add the link to the current path
|
||||
search_path.append(link)
|
||||
results += linked_results
|
||||
continue
|
||||
|
||||
# check if the node matches the filter
|
||||
if shader_node_filter(linked_node):
|
||||
results.append(NodeTreeSearchResult(linked_node, search_path + [link]))
|
||||
results.append(NodeTreeSearchResult(linked_node, search_path + [link], group_path))
|
||||
# traverse into inputs of the node
|
||||
for input_socket in linked_node.inputs:
|
||||
linked_results = __search_from_socket(input_socket, shader_node_filter, search_path + [link])
|
||||
linked_results = __search_from_socket(input_socket, shader_node_filter, search_path + [link], group_path.copy())
|
||||
if linked_results:
|
||||
# add the link to the current path
|
||||
search_path.append(link)
|
||||
@ -85,10 +112,330 @@ def from_socket(start_socket: bpy.types.NodeSocket,
|
||||
|
||||
return results
|
||||
|
||||
if start_socket is None:
|
||||
if start_socket.socket is None:
|
||||
return []
|
||||
|
||||
return __search_from_socket(start_socket, shader_node_filter, [])
|
||||
return __search_from_socket(start_socket.socket, shader_node_filter, [], start_socket.group_path)
|
||||
|
||||
@cached
|
||||
def get_texture_node_from_socket(socket, export_settings):
|
||||
result = from_socket(
|
||||
socket,
|
||||
FilterByType(bpy.types.ShaderNodeTexImage))
|
||||
if not result:
|
||||
return None
|
||||
if result[0].shader_node.image is None:
|
||||
return None
|
||||
return result[0]
|
||||
|
||||
def has_image_node_from_socket(socket, export_settings):
|
||||
result = get_texture_node_from_socket(socket, export_settings)
|
||||
return result is not None
|
||||
|
||||
# return the default value of a socket, even if this socket is linked
|
||||
def get_const_from_default_value_socket(socket, kind):
|
||||
if kind == 'RGB':
|
||||
if socket.socket.type != 'RGBA': return None
|
||||
return list(socket.socket.default_value)[:3]
|
||||
if kind == 'VALUE':
|
||||
if socket.socket.type != 'VALUE': return None
|
||||
return socket.socket.default_value
|
||||
return None
|
||||
|
||||
#TODOSNode : @cached? If yes, need to use id of node tree, has this is probably not fully hashable
|
||||
# For now, not caching it. If we encounter performance issue, we will see later
|
||||
def get_material_nodes(node_tree: bpy.types.NodeTree, group_path, type):
|
||||
"""
|
||||
For a given tree, recursively return all nodes including node groups.
|
||||
"""
|
||||
|
||||
nodes = []
|
||||
for node in [n for n in node_tree.nodes if isinstance(n, type) and not n.mute]:
|
||||
nodes.append((node, group_path.copy()))
|
||||
|
||||
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
|
||||
for node in [n for n in node_tree.nodes if n.type == "GROUP" and n.node_tree is not None and not n.mute and n.node_tree.name != get_gltf_old_group_node_name()]: # Do not enter the olf glTF node group
|
||||
new_group_path = group_path.copy()
|
||||
new_group_path.append(node)
|
||||
nodes.extend(get_material_nodes(node.node_tree, new_group_path, type))
|
||||
|
||||
return nodes
|
||||
|
||||
def get_socket_from_gltf_material_node(blender_material: bpy.types.Material, name: str):
|
||||
"""
|
||||
For a given material input name, retrieve the corresponding node tree socket in the special glTF node group.
|
||||
|
||||
:param blender_material: a blender material for which to get the socket
|
||||
:param name: the name of the socket
|
||||
:return: a blender NodeSocket
|
||||
"""
|
||||
gltf_node_group_names = [get_gltf_node_name().lower(), get_gltf_node_old_name().lower()]
|
||||
if blender_material.node_tree and blender_material.use_nodes:
|
||||
nodes = get_material_nodes(blender_material.node_tree, [blender_material], bpy.types.ShaderNodeGroup)
|
||||
# Some weird node groups with missing datablock can have no node_tree, so checking n.node_tree (See #1797)
|
||||
nodes = [n for n in nodes if n[0].node_tree is not None and ( n[0].node_tree.name.lower().startswith(get_gltf_old_group_node_name()) or n[0].node_tree.name.lower() in gltf_node_group_names)]
|
||||
inputs = sum([[(input, node[1]) for input in node[0].inputs if input.name == name] for node in nodes], [])
|
||||
if inputs:
|
||||
return NodeSocket(inputs[0][0], inputs[0][1])
|
||||
|
||||
return NodeSocket(None, None)
|
||||
|
||||
class NodeSocket:
|
||||
def __init__(self, socket, group_path):
|
||||
self.socket = socket
|
||||
self.group_path = group_path
|
||||
|
||||
class ShNode:
|
||||
def __init__(self, node, group_path):
|
||||
self.node = node
|
||||
self.group_path = group_path
|
||||
|
||||
def get_node_socket(blender_material, type, name):
|
||||
"""
|
||||
For a given material input name, retrieve the corresponding node tree socket for a given node type.
|
||||
|
||||
:param blender_material: a blender material for which to get the socket
|
||||
:return: a blender NodeSocket for a given type
|
||||
"""
|
||||
nodes = get_material_nodes(blender_material.node_tree, [blender_material], type)
|
||||
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
|
||||
nodes = [node for node in nodes if check_if_is_linked_to_active_output(node[0].outputs[0], node[1])]
|
||||
inputs = sum([[(input, node[1]) for input in node[0].inputs if input.name == name] for node in nodes], [])
|
||||
if inputs:
|
||||
return NodeSocket(inputs[0][0], inputs[0][1])
|
||||
return NodeSocket(None, None)
|
||||
|
||||
|
||||
def get_socket(blender_material: bpy.types.Material, name: str, volume=False):
|
||||
"""
|
||||
For a given material input name, retrieve the corresponding node tree socket.
|
||||
|
||||
:param blender_material: a blender material for which to get the socket
|
||||
:param name: the name of the socket
|
||||
:return: a blender NodeSocket
|
||||
"""
|
||||
if blender_material.node_tree and blender_material.use_nodes:
|
||||
#i = [input for input in blender_material.node_tree.inputs]
|
||||
#o = [output for output in blender_material.node_tree.outputs]
|
||||
if name == "Emissive":
|
||||
# Check for a dedicated Emission node first, it must supersede the newer built-in one
|
||||
# because the newer one is always present in all Principled BSDF materials.
|
||||
emissive_socket = get_node_socket(blender_material, bpy.types.ShaderNodeEmission, "Color")
|
||||
if emissive_socket.socket is not None:
|
||||
return emissive_socket
|
||||
# If a dedicated Emission node was not found, fall back to the Principled BSDF Emission socket.
|
||||
name = "Emission Color"
|
||||
type = bpy.types.ShaderNodeBsdfPrincipled
|
||||
elif name == "Background":
|
||||
type = bpy.types.ShaderNodeBackground
|
||||
name = "Color"
|
||||
else:
|
||||
if volume is False:
|
||||
type = bpy.types.ShaderNodeBsdfPrincipled
|
||||
else:
|
||||
type = bpy.types.ShaderNodeVolumeAbsorption
|
||||
|
||||
return get_node_socket(blender_material, type, name)
|
||||
|
||||
return NodeSocket(None, None)
|
||||
|
||||
def get_factor_from_socket(socket, kind):
|
||||
"""
|
||||
For baseColorFactor, metallicFactor, etc.
|
||||
Get a constant value from a socket, or a constant value
|
||||
from a MULTIPLY node just before the socket.
|
||||
kind is either 'RGB' or 'VALUE'.
|
||||
"""
|
||||
fac = get_const_from_socket(socket, kind)
|
||||
if fac is not None:
|
||||
return fac
|
||||
|
||||
node = previous_node(socket)
|
||||
if node.node is not None:
|
||||
x1, x2 = None, None
|
||||
if kind == 'RGB':
|
||||
if node.node.type == 'MIX' and node.node.data_type == "RGBA" and node.node.blend_type == 'MULTIPLY':
|
||||
# TODO: handle factor in inputs[0]?
|
||||
x1 = get_const_from_socket(NodeSocket(node.node.inputs[6], node.group_path), kind)
|
||||
x2 = get_const_from_socket(NodeSocket(node.node.inputs[7], node.group_path), kind)
|
||||
if kind == 'VALUE':
|
||||
if node.node.type == 'MATH' and node.node.operation == 'MULTIPLY':
|
||||
x1 = get_const_from_socket(NodeSocket(node.node.inputs[0], node.group_path), kind)
|
||||
x2 = get_const_from_socket(NodeSocket(node.node.inputs[1], node.group_path), kind)
|
||||
if x1 is not None and x2 is None: return x1
|
||||
if x2 is not None and x1 is None: return x2
|
||||
|
||||
return None
|
||||
|
||||
def get_const_from_socket(socket, kind):
|
||||
if not socket.socket.is_linked:
|
||||
if kind == 'RGB':
|
||||
if socket.socket.type != 'RGBA': return None
|
||||
return list(socket.socket.default_value)[:3]
|
||||
if kind == 'VALUE':
|
||||
if socket.socket.type != 'VALUE': return None
|
||||
return socket.socket.default_value
|
||||
|
||||
# Handle connection to a constant RGB/Value node
|
||||
prev_node = previous_node(socket)
|
||||
if prev_node.node is not None:
|
||||
if kind == 'RGB' and prev_node.node.type == 'RGB':
|
||||
return list(prev_node.node.outputs[0].default_value)[:3]
|
||||
if kind == 'VALUE' and prev_node.node.type == 'VALUE':
|
||||
return prev_node.node.outputs[0].default_value
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def previous_socket(socket: NodeSocket):
|
||||
soc = socket.socket
|
||||
group_path = socket.group_path.copy()
|
||||
while True:
|
||||
if not soc.is_linked:
|
||||
return NodeSocket(None, None)
|
||||
|
||||
from_socket = soc.links[0].from_socket
|
||||
|
||||
# If we are entering a node group (from outputs)
|
||||
if from_socket.node.type == "GROUP":
|
||||
socket_name = from_socket.name
|
||||
sockets = [n for n in from_socket.node.node_tree.nodes if n.type == "GROUP_OUTPUT"][0].inputs
|
||||
socket = [s for s in sockets if s.name == socket_name][0]
|
||||
group_path.append(from_socket.node)
|
||||
soc = socket
|
||||
continue
|
||||
|
||||
# If we are exiting a node group (from inputs)
|
||||
if from_socket.node.type == "GROUP_INPUT":
|
||||
socket_name = from_socket.name
|
||||
sockets = group_path[-1].inputs
|
||||
socket = [s for s in sockets if s.name == socket_name][0]
|
||||
group_path = group_path[:-1]
|
||||
soc = socket
|
||||
continue
|
||||
|
||||
# Skip over reroute nodes
|
||||
if from_socket.node.type == 'REROUTE':
|
||||
soc = from_socket.node.inputs[0]
|
||||
continue
|
||||
|
||||
return NodeSocket(from_socket, group_path)
|
||||
|
||||
|
||||
def previous_node(socket: NodeSocket):
|
||||
prev_socket = previous_socket(socket)
|
||||
if prev_socket.socket is not None:
|
||||
return ShNode(prev_socket.socket.node, prev_socket.group_path)
|
||||
return ShNode(None, None)
|
||||
|
||||
def get_texture_transform_from_mapping_node(mapping_node):
|
||||
if mapping_node.node.vector_type not in ["TEXTURE", "POINT", "VECTOR"]:
|
||||
gltf2_io_debug.print_console("WARNING",
|
||||
"Skipping exporting texture transform because it had type " +
|
||||
mapping_node.node.vector_type + "; recommend using POINT instead"
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
rotation_0, rotation_1 = mapping_node.node.inputs['Rotation'].default_value[0], mapping_node.node.inputs['Rotation'].default_value[1]
|
||||
if rotation_0 or rotation_1:
|
||||
# TODO: can we handle this?
|
||||
gltf2_io_debug.print_console("WARNING",
|
||||
"Skipping exporting texture transform because it had non-zero "
|
||||
"rotations in the X/Y direction; only a Z rotation can be exported!"
|
||||
)
|
||||
return None
|
||||
|
||||
mapping_transform = {}
|
||||
mapping_transform["offset"] = [mapping_node.node.inputs['Location'].default_value[0], mapping_node.node.inputs['Location'].default_value[1]]
|
||||
mapping_transform["rotation"] = mapping_node.node.inputs['Rotation'].default_value[2]
|
||||
mapping_transform["scale"] = [mapping_node.node.inputs['Scale'].default_value[0], mapping_node.node.inputs['Scale'].default_value[1]]
|
||||
|
||||
if mapping_node.node.vector_type == "TEXTURE":
|
||||
# This means use the inverse of the TRS transform.
|
||||
def inverted(mapping_transform):
|
||||
offset = mapping_transform["offset"]
|
||||
rotation = mapping_transform["rotation"]
|
||||
scale = mapping_transform["scale"]
|
||||
|
||||
# Inverse of a TRS is not always a TRS. This function will be right
|
||||
# at least when the following don't occur.
|
||||
if abs(rotation) > 1e-5 and abs(scale[0] - scale[1]) > 1e-5:
|
||||
return None
|
||||
if abs(scale[0]) < 1e-5 or abs(scale[1]) < 1e-5:
|
||||
return None
|
||||
|
||||
new_offset = Matrix.Rotation(-rotation, 3, 'Z') @ Vector((-offset[0], -offset[1], 1))
|
||||
new_offset[0] /= scale[0]; new_offset[1] /= scale[1]
|
||||
return {
|
||||
"offset": new_offset[0:2],
|
||||
"rotation": -rotation,
|
||||
"scale": [1/scale[0], 1/scale[1]],
|
||||
}
|
||||
|
||||
mapping_transform = inverted(mapping_transform)
|
||||
if mapping_transform is None:
|
||||
gltf2_io_debug.print_console("WARNING",
|
||||
"Skipping exporting texture transform with type TEXTURE because "
|
||||
"we couldn't convert it to TRS; recommend using POINT instead"
|
||||
)
|
||||
return None
|
||||
|
||||
elif mapping_node.node.vector_type == "VECTOR":
|
||||
# Vectors don't get translated
|
||||
mapping_transform["offset"] = [0, 0]
|
||||
|
||||
texture_transform = texture_transform_blender_to_gltf(mapping_transform)
|
||||
|
||||
if all([component == 0 for component in texture_transform["offset"]]):
|
||||
del(texture_transform["offset"])
|
||||
if all([component == 1 for component in texture_transform["scale"]]):
|
||||
del(texture_transform["scale"])
|
||||
if texture_transform["rotation"] == 0:
|
||||
del(texture_transform["rotation"])
|
||||
|
||||
if len(texture_transform) == 0:
|
||||
return None
|
||||
|
||||
return texture_transform
|
||||
|
||||
def check_if_is_linked_to_active_output(shader_socket, group_path):
|
||||
for link in shader_socket.links:
|
||||
|
||||
# If we are entering a node group
|
||||
if link.to_node.type == "GROUP":
|
||||
socket_name = link.to_socket.name
|
||||
sockets = [n for n in link.to_node.node_tree.nodes if n.type == "GROUP_INPUT"][0].outputs
|
||||
socket = [s for s in sockets if s.name == socket_name][0]
|
||||
group_path.append(link.to_node)
|
||||
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
|
||||
ret = check_if_is_linked_to_active_output(socket, group_path) # recursive until find an output material node
|
||||
if ret is True:
|
||||
return True
|
||||
continue
|
||||
|
||||
# If we are exiting a node group
|
||||
if link.to_node.type == "GROUP_OUTPUT":
|
||||
socket_name = link.to_socket.name
|
||||
sockets = group_path[-1].outputs
|
||||
socket = [s for s in sockets if s.name == socket_name][0]
|
||||
group_path = group_path[:-1]
|
||||
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
|
||||
ret = check_if_is_linked_to_active_output(socket, group_path) # recursive until find an output material node
|
||||
if ret is True:
|
||||
return True
|
||||
continue
|
||||
|
||||
if isinstance(link.to_node, bpy.types.ShaderNodeOutputMaterial) and link.to_node.is_active_output is True:
|
||||
return True
|
||||
|
||||
if len(link.to_node.outputs) > 0: # ignore non active output, not having output sockets
|
||||
#TODOSNode : Why checking outputs[0] ? What about alpha for texture node, that is outputs[1] ????
|
||||
ret = check_if_is_linked_to_active_output(link.to_node.outputs[0], group_path) # recursive until find an output material node
|
||||
if ret is True:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_vertex_color_info(primary_socket, sockets, export_settings):
|
||||
return {"color": None, "alpha": None} #TODO, placeholder for now
|
||||
|
@ -39,7 +39,7 @@ def specular(mh, location_specular,
|
||||
x_specularcolor, y_specularcolor = location_specular_tint
|
||||
|
||||
if tex_specular_info is None:
|
||||
specular_socket.default_value = specular_factor
|
||||
specular_socket.default_value = specular_factor / 2.0
|
||||
else:
|
||||
# Mix specular factor
|
||||
if specular_factor != 1.0:
|
||||
@ -51,7 +51,7 @@ def specular(mh, location_specular,
|
||||
mh.node_tree.links.new(specular_socket, node.outputs[0])
|
||||
# Inputs
|
||||
specular_socket = node.inputs[0]
|
||||
node.inputs[1].default_value = specular_factor
|
||||
node.inputs[1].default_value = specular_factor / 2.0
|
||||
x_specular -= 200
|
||||
|
||||
texture(
|
||||
|
@ -61,10 +61,9 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
|
||||
|
||||
# Use a class here, to be able to pass data by reference to hook (to be able to change them inside hook)
|
||||
class IMPORT_mesh_options:
|
||||
def __init__(self, skinning: bool = True, skin_into_bind_pose: bool = True, use_auto_smooth: bool = True):
|
||||
def __init__(self, skinning: bool = True, skin_into_bind_pose: bool = True):
|
||||
self.skinning = skinning
|
||||
self.skin_into_bind_pose = skin_into_bind_pose
|
||||
self.use_auto_smooth = use_auto_smooth
|
||||
|
||||
mesh_options = IMPORT_mesh_options()
|
||||
import_user_extensions('gather_import_mesh_options', gltf, mesh_options, pymesh, skin_idx)
|
||||
@ -479,9 +478,7 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
|
||||
mesh.update(calc_edges_loose=has_loose_edges)
|
||||
|
||||
if has_normals:
|
||||
mesh.create_normals_split()
|
||||
mesh.normals_split_custom_set_from_vertices(vert_normals)
|
||||
mesh.use_auto_smooth = mesh_options.use_auto_smooth
|
||||
|
||||
|
||||
def points_edges_tris(mode, indices):
|
||||
|
@ -50,9 +50,6 @@ def pbr_metallic_roughness(mh: MaterialHelper):
|
||||
# This value may be overridden later if IOR extension is set on file
|
||||
pbr_node.inputs['IOR'].default_value = GLTF_IOR
|
||||
|
||||
pbr_node.inputs['Specular IOR Level'].default_value = 0.0 # Will be overridden by KHR_materials_specular if set
|
||||
pbr_node.inputs['Specular Tint'].default_value = [0.0]*3 + [1.0] # Will be overridden by KHR_materials_specular if set
|
||||
|
||||
if mh.pymat.occlusion_texture is not None:
|
||||
if mh.settings_node is None:
|
||||
mh.settings_node = make_settings_node(mh)
|
||||
|
@ -41,7 +41,7 @@ def texture(
|
||||
if forced_image is None:
|
||||
|
||||
if mh.gltf.import_settings['import_webp_texture'] is True:
|
||||
# Get the webp image if there is one
|
||||
# Get the WebP image if there is one
|
||||
if pytexture.extensions \
|
||||
and 'EXT_texture_webp' in pytexture.extensions \
|
||||
and pytexture.extensions['EXT_texture_webp']['source'] is not None:
|
||||
|
@ -47,8 +47,5 @@ class Buffer:
|
||||
def to_bytes(self):
|
||||
return self.__data
|
||||
|
||||
def to_embed_string(self):
|
||||
return 'data:application/octet-stream;base64,' + base64.b64encode(self.__data).decode('ascii')
|
||||
|
||||
def clear(self):
|
||||
self.__data = b""
|
||||
|
@ -819,10 +819,6 @@ def export(file,
|
||||
|
||||
# --- Write IndexedFaceSet Attributes (same as IndexedTriangleSet)
|
||||
fw('solid="%s"\n' % bool_as_str(material and material.use_backface_culling))
|
||||
if is_smooth:
|
||||
# use Auto-Smooth angle, if enabled. Otherwise make
|
||||
# the mesh perfectly smooth by creaseAngle > pi.
|
||||
fw(ident_step + 'creaseAngle="%.4f"\n' % (mesh.auto_smooth_angle if mesh.use_auto_smooth else 4.0))
|
||||
|
||||
if use_normals:
|
||||
# currently not optional, could be made so:
|
||||
|
@ -3013,8 +3013,7 @@ def importShape_ProcessObject(
|
||||
# solid=false, we don't support it yet.
|
||||
creaseAngle = geom.getFieldAsFloat('creaseAngle', None, ancestry)
|
||||
if creaseAngle is not None:
|
||||
bpydata.auto_smooth_angle = creaseAngle
|
||||
bpydata.use_auto_smooth = True
|
||||
bpydata.set_sharp_from_angle(creaseAngle)
|
||||
else:
|
||||
bpydata.polygons.foreach_set("use_smooth", [False] * len(bpydata.polygons))
|
||||
|
||||
|
@ -677,8 +677,7 @@ def mu_set_auto_smooth(self, angle, affect, set_smooth_shading):
|
||||
|
||||
#bpy.ops.object.shade_smooth()
|
||||
|
||||
object.data.use_auto_smooth = 1
|
||||
object.data.auto_smooth_angle = angle # 35 degrees as radians
|
||||
object.data.set_sharp_from_angle(angle) # 35 degrees as radians
|
||||
|
||||
objects_affected += 1
|
||||
|
||||
|
@ -171,13 +171,6 @@ class VIEW3D_MT_materialutilities_specials(bpy.types.Menu):
|
||||
text = "Join by material",
|
||||
icon = "OBJECT_DATAMODE")
|
||||
|
||||
layout.separator()
|
||||
|
||||
op = layout.operator(MATERIAL_OT_materialutilities_auto_smooth_angle.bl_idname,
|
||||
text = "Set Auto Smooth",
|
||||
icon = "SHADING_SOLID")
|
||||
op.affect = mu_prefs.set_smooth_affect
|
||||
op.angle = mu_prefs.auto_smooth_angle
|
||||
|
||||
class VIEW3D_MT_materialutilities_main(bpy.types.Menu):
|
||||
"""Main menu for Material Utilities"""
|
||||
|
@ -68,20 +68,6 @@ class VIEW3D_MT_materialutilities_preferences(AddonPreferences):
|
||||
default = 0
|
||||
)
|
||||
|
||||
set_smooth_affect: EnumProperty(
|
||||
name = "Set Auto Smooth Affect",
|
||||
description = "Which objects to affect",
|
||||
items = mu_affect_enums,
|
||||
default = 'SELECTED'
|
||||
)
|
||||
auto_smooth_angle: FloatProperty(
|
||||
name = "Auto Smooth Angle",
|
||||
description = "Maximum angle between face normals that will be considered as smooth",
|
||||
subtype = 'ANGLE',
|
||||
min = 0,
|
||||
max = radians(180),
|
||||
default = radians(35)
|
||||
)
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
@ -105,11 +91,6 @@ class VIEW3D_MT_materialutilities_preferences(AddonPreferences):
|
||||
c.row().prop(self, "link_to", expand = False)
|
||||
c.row().prop(self, "link_to_affect", expand = False)
|
||||
|
||||
d = box.box()
|
||||
d.label(text = "Set Auto Smooth")
|
||||
d.row().prop(self, "auto_smooth_angle", expand = False)
|
||||
d.row().prop(self, "set_smooth_affect", expand = False)
|
||||
|
||||
box = layout.box()
|
||||
box.label(text = "Miscellaneous")
|
||||
|
||||
|
@ -802,7 +802,6 @@ def tessellate_patch(props):
|
||||
n2 = n2[masked_faces][:,None,:]
|
||||
else:
|
||||
if normals_mode == 'CUSTOM':
|
||||
me0.calc_normals_split()
|
||||
normals_split = [0]*len(me0.loops)*3
|
||||
vertex_indexes = [0]*len(me0.loops)
|
||||
me0.loops.foreach_get('normal', normals_split)
|
||||
|
@ -338,8 +338,7 @@ def CreateBevel(context, CurrentObject):
|
||||
|
||||
bpy.ops.object.shade_smooth()
|
||||
|
||||
context.object.data.use_auto_smooth = True
|
||||
context.object.data.auto_smooth_angle = 1.0471975
|
||||
context.object.data.set_sharp_from_angle(1.0471975)
|
||||
|
||||
# Restore the active object
|
||||
context.view_layer.objects.active = SavActive
|
||||
|
@ -108,12 +108,6 @@ class PovDataButtonsPanel(properties_data_mesh.MeshButtonsPanel):
|
||||
# We cannot inherit from RNA classes (like e.g. properties_data_mesh.DATA_PT_vertex_groups).
|
||||
# Complex py/bpy/rna interactions (with metaclass and all) simply do not allow it to work.
|
||||
# So we simply have to explicitly copy here the interesting bits. ;)
|
||||
class DATA_PT_POV_normals(PovDataButtonsPanel, Panel):
|
||||
bl_label = properties_data_mesh.DATA_PT_normals.bl_label
|
||||
|
||||
draw = properties_data_mesh.DATA_PT_normals.draw
|
||||
|
||||
|
||||
class DATA_PT_POV_texture_space(PovDataButtonsPanel, Panel):
|
||||
bl_label = properties_data_mesh.DATA_PT_texture_space.bl_label
|
||||
bl_options = properties_data_mesh.DATA_PT_texture_space.bl_options
|
||||
@ -1066,7 +1060,6 @@ class VIEW_WT_POV_blobcube_add(WorkSpaceTool):
|
||||
classes = (
|
||||
# ObjectButtonsPanel,
|
||||
# PovDataButtonsPanel,
|
||||
DATA_PT_POV_normals,
|
||||
DATA_PT_POV_texture_space,
|
||||
DATA_PT_POV_vertex_groups,
|
||||
DATA_PT_POV_shape_keys,
|
||||
|
@ -180,7 +180,6 @@ def pov_cylinder_define(context, op, ob, radius, loc, loc_cap):
|
||||
ob.name = ob.data.name = "PovCylinder"
|
||||
ob.pov.cylinder_radius = radius
|
||||
ob.pov.cylinder_location_cap = vec
|
||||
ob.data.use_auto_smooth = True
|
||||
ob.pov.object_as = "CYLINDER"
|
||||
ob.update_tag() # as prop set via python not updated in depsgraph
|
||||
|
||||
@ -326,7 +325,6 @@ def pov_sphere_define(context, op, ob, loc):
|
||||
bpy.ops.object.mode_set(mode="EDIT")
|
||||
bpy.ops.mesh.hide(unselected=False)
|
||||
bpy.ops.object.mode_set(mode="OBJECT")
|
||||
ob.data.use_auto_smooth = True
|
||||
bpy.ops.object.shade_smooth()
|
||||
ob.pov.object_as = "SPHERE"
|
||||
ob.update_tag() # as prop set via python not updated in depsgraph
|
||||
@ -471,7 +469,6 @@ def pov_cone_define(context, op, ob):
|
||||
ob.pov.cone_height = height
|
||||
ob.pov.cone_base_z = zb
|
||||
ob.pov.cone_cap_z = zc
|
||||
ob.data.use_auto_smooth = True
|
||||
bpy.ops.object.shade_smooth()
|
||||
ob.pov.object_as = "CONE"
|
||||
ob.update_tag() # as prop set via python not updated in depsgraph
|
||||
@ -659,9 +656,7 @@ def pov_torus_define(context, op, ob):
|
||||
bpy.ops.object.mode_set(mode="EDIT")
|
||||
bpy.ops.mesh.hide(unselected=False)
|
||||
bpy.ops.object.mode_set(mode="OBJECT")
|
||||
ob.data.use_auto_smooth = True
|
||||
ob.data.auto_smooth_angle = 0.6
|
||||
bpy.ops.object.shade_smooth()
|
||||
ob.data.set_sharp_from_angle(0.6)
|
||||
ob.pov.object_as = "TORUS"
|
||||
ob.update_tag() # as prop set via python not updated in depsgraph
|
||||
|
||||
|
@ -171,8 +171,7 @@ def pov_superellipsoid_define(context, op, ob):
|
||||
bpy.ops.object.mode_set(mode="EDIT")
|
||||
bpy.ops.mesh.hide(unselected=False)
|
||||
bpy.ops.object.mode_set(mode="OBJECT")
|
||||
ob.data.auto_smooth_angle = 1.3
|
||||
bpy.ops.object.shade_smooth()
|
||||
ob.data.set_sharp_from_angle(1.3)
|
||||
ob.pov.object_as = "SUPERELLIPSOID"
|
||||
ob.update_tag() # as prop set via python not updated in depsgraph
|
||||
|
||||
@ -1051,8 +1050,7 @@ def pov_parametric_define(context, op, ob):
|
||||
bpy.ops.object.mode_set(mode="EDIT")
|
||||
bpy.ops.mesh.hide(unselected=False)
|
||||
bpy.ops.object.mode_set(mode="OBJECT")
|
||||
ob.data.auto_smooth_angle = 0.6
|
||||
bpy.ops.object.shade_smooth()
|
||||
ob.data.set_sharp_from_angle(0.6)
|
||||
ob.pov.object_as = "PARAMETRIC"
|
||||
ob.update_tag() # as prop set via python not updated in depsgraph
|
||||
return{'FINISHED'}
|
||||
@ -1180,8 +1178,6 @@ class POV_OT_polygon_to_circle_add(Operator):
|
||||
bpy.ops.object.mode_set(mode="EDIT")
|
||||
bpy.ops.mesh.hide(unselected=False)
|
||||
bpy.ops.object.mode_set(mode="OBJECT")
|
||||
#ob.data.auto_smooth_angle = 0.1
|
||||
#bpy.ops.object.shade_smooth()
|
||||
ob.pov.object_as = "POLYCIRCLE"
|
||||
ob.update_tag() # as prop set via python not updated in depsgraph
|
||||
return {"FINISHED"}
|
||||
|
@ -89,10 +89,10 @@ class ActionSlot(PropertyGroup, ActionSlotBase):
|
||||
|
||||
target_space: EnumProperty(
|
||||
name="Transform Space",
|
||||
items=[("WORLD", "World Space", "World Space"),
|
||||
("POSE", "Pose Space", "Pose Space"),
|
||||
("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent"),
|
||||
("LOCAL", "Local Space", "Local Space")],
|
||||
items=[("WORLD", "World Space", "World Space", 0),
|
||||
# ("POSE", "Pose Space", "Pose Space", 1),
|
||||
# ("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent", 2),
|
||||
("LOCAL", "Local Space", "Local Space", 3)],
|
||||
default="LOCAL"
|
||||
)
|
||||
|
||||
|
@ -153,9 +153,10 @@ def pVisRotExec(bone, active, context):
|
||||
|
||||
def pVisScaExec(bone, active, context):
|
||||
obj_bone = bone.id_data
|
||||
bone.scale = getmat(bone, active, context,
|
||||
not obj_bone.data.bones[bone.name].use_inherit_scale)\
|
||||
.to_scale()
|
||||
bone.scale = getmat(
|
||||
bone, active, context,
|
||||
obj_bone.data.bones[bone.name].inherit_scale not in {'NONE', 'NONE_LEGACY'}
|
||||
).to_scale()
|
||||
|
||||
|
||||
def pDrwExec(bone, active, context):
|
||||
|
@ -156,12 +156,12 @@ def draw_callback_view():
|
||||
|
||||
if data_euler or data_quat:
|
||||
cursor = bpy.context.scene.cursor.location.copy()
|
||||
derived_matrices = []
|
||||
for key, quat in data_quat.values():
|
||||
derived_matrices = dict()
|
||||
for key, quat in data_quat.items():
|
||||
matrix = quat.to_matrix().to_4x4()
|
||||
matrix.translation = cursor
|
||||
derived_matrices[key] = matrix
|
||||
for key, eul in data_euler.values():
|
||||
for key, eul in data_euler.items():
|
||||
matrix = eul.to_matrix().to_4x4()
|
||||
matrix.translation = cursor
|
||||
derived_matrices[key] = matrix
|
||||
|
@ -180,21 +180,6 @@ class VIEW3D_OT_selecteditVertsEdgesFaces(Operator):
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
# ********** Normals / Auto Smooth Menu **********
|
||||
# Thanks to marvin.k.breuer for the Autosmooth part of the menu
|
||||
|
||||
def menu_func(self, context):
|
||||
layout = self.layout
|
||||
obj = context.object
|
||||
obj_data = context.active_object.data
|
||||
layout.separator()
|
||||
layout.prop(obj_data, "use_auto_smooth", text="Normals: Auto Smooth")
|
||||
|
||||
# Auto Smooth Angle - two tab spaces to align it with the rest of the menu
|
||||
layout.prop(obj_data, "auto_smooth_angle",
|
||||
text=" Auto Smooth Angle")
|
||||
|
||||
|
||||
# List The Classes #
|
||||
|
||||
classes = (
|
||||
@ -215,7 +200,6 @@ def register():
|
||||
for cls in classes:
|
||||
bpy.utils.register_class(cls)
|
||||
|
||||
bpy.types.VIEW3D_MT_edit_mesh_normals.append(menu_func)
|
||||
|
||||
# Unregister Classes & Hotkeys #
|
||||
def unregister():
|
||||
@ -223,7 +207,6 @@ def unregister():
|
||||
for cls in reversed(classes):
|
||||
bpy.utils.unregister_class(cls)
|
||||
|
||||
bpy.types.VIEW3D_MT_edit_mesh_normals.remove(menu_func)
|
||||
|
||||
if __name__ == "__main__":
|
||||
register()
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
import bpy
|
||||
from bpy.props import FloatProperty, FloatVectorProperty
|
||||
from bpy.app.translations import pgettext_iface as iface_
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
from mathutils import Vector
|
||||
@ -248,8 +249,8 @@ class SUNPOS_OT_ShowHdr(bpy.types.Operator):
|
||||
self.initial_azimuth = context.scene.sun_pos_properties.hdr_azimuth
|
||||
|
||||
context.workspace.status_text_set(
|
||||
"Enter/LMB: confirm, Esc/RMB: cancel,"
|
||||
" MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure")
|
||||
iface_("Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, "
|
||||
"mouse wheel: zoom, Ctrl + mouse wheel: set exposure"))
|
||||
|
||||
self._handle = bpy.types.SpaceView3D.draw_handler_add(
|
||||
draw_callback_px, (self, context), 'WINDOW', 'POST_PIXEL'
|
||||
|
@ -416,6 +416,19 @@ translations_tuple = (
|
||||
("fr_FR", "Année",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Unknown projection"),
|
||||
(("scripts/addons/sun_position/hdr.py:181",),
|
||||
()),
|
||||
("fr_FR", "Projection inconnue",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure"),
|
||||
(("scripts/addons/sun_position/hdr.py:252",),
|
||||
()),
|
||||
("fr_FR", "Entrée/ClicG : Confirmer, Échap/ClicD : Annuler, ClicM : défiler, "
|
||||
"molette : zoom, Ctrl + molette : exposition",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Could not find 3D View"),
|
||||
(("scripts/addons/sun_position/hdr.py:263",),
|
||||
()),
|
||||
@ -428,12 +441,6 @@ translations_tuple = (
|
||||
("fr_FR", "Veuillez utiliser un nœud de texture d’environnement",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Unknown projection"),
|
||||
(("scripts/addons/sun_position/hdr.py:181",),
|
||||
()),
|
||||
("fr_FR", "Projection inconnue",
|
||||
(False, ())),
|
||||
),
|
||||
(("*", "Show options and info:"),
|
||||
(("scripts/addons/sun_position/properties.py:297",),
|
||||
()),
|
||||
|
@ -344,7 +344,9 @@ class UI_OT_i18n_addon_translation_export(Operator):
|
||||
if not lng.use:
|
||||
print("Skipping {} language ({}).".format(lng.name, lng.uid))
|
||||
continue
|
||||
uid = utils_i18n.find_best_isocode_matches(lng.uid, trans.trans.keys())
|
||||
translation_keys = {k for k in trans.trans.keys()
|
||||
if k != self.settings.PARSER_TEMPLATE_ID}
|
||||
uid = utils_i18n.find_best_isocode_matches(lng.uid, translation_keys)
|
||||
if uid:
|
||||
uids.append(uid[0])
|
||||
|
||||
@ -357,8 +359,8 @@ class UI_OT_i18n_addon_translation_export(Operator):
|
||||
if not os.path.isfile(path):
|
||||
continue
|
||||
msgs = utils_i18n.I18nMessages(kind='PO', src=path, settings=self.settings)
|
||||
msgs.update(trans.msgs[self.settings.PARSER_TEMPLATE_ID])
|
||||
trans.msgs[uid] = msgs
|
||||
msgs.update(trans.trans[self.settings.PARSER_TEMPLATE_ID])
|
||||
trans.trans[uid] = msgs
|
||||
|
||||
trans.write(kind='PO', langs=set(uids))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user