WIP: MaterialX addon #104594
@ -3,7 +3,7 @@
|
||||
bl_info = {
|
||||
"name": "Add Camera Rigs",
|
||||
"author": "Wayne Dixon, Brian Raschko, Kris Wittig, Damien Picard, Flavio Perez",
|
||||
"version": (1, 5, 0),
|
||||
"version": (1, 5, 1),
|
||||
"blender": (3, 3, 0),
|
||||
"location": "View3D > Add > Camera > Dolly or Crane Rig",
|
||||
"description": "Adds a Camera Rig with UI",
|
||||
|
@ -82,7 +82,9 @@ class ADD_CAMERA_RIGS_OT_set_dof_bone(Operator, CameraRigMixin):
|
||||
rig, cam = get_rig_and_cam(context.active_object)
|
||||
|
||||
cam.data.dof.focus_object = rig
|
||||
cam.data.dof.focus_subtarget = 'Aim_shape_rotation-MCH'
|
||||
cam.data.dof.focus_subtarget = (
|
||||
'Center-MCH' if rig["rig_id"].lower() == '2d_rig'
|
||||
else 'Aim_shape_rotation-MCH')
|
||||
|
||||
return {'FINISHED'}
|
||||
|
||||
|
@ -809,7 +809,7 @@ def generateRocks(context, scaleX, skewX, scaleY, skewY, scaleZ, skewZ,
|
||||
rocks = []
|
||||
|
||||
for i in range(numOfRocks):
|
||||
# todo: enable different random values for each (x,y,z) corrdinate for
|
||||
# todo: enable different random values for each (x,y,z) coordinate for
|
||||
# each vertex. This will add additional randomness to the shape of the
|
||||
# generated rocks.
|
||||
# *** todo completed 4/19/2011 ***
|
||||
|
@ -76,7 +76,7 @@ except:
|
||||
# a continuous distribution curve but instead acts as a piecewise finction.
|
||||
# This linearly scales the output on one side to fit the bounds.
|
||||
#
|
||||
# Example output historgrams:
|
||||
# Example output histograms:
|
||||
#
|
||||
# Upper skewed: Lower skewed:
|
||||
# | ▄ | _
|
||||
@ -92,7 +92,7 @@ except:
|
||||
# | _▄_ ▄███████████████▄_ | _▄███████████████▄▄_
|
||||
# ------------------------- -----------------------
|
||||
# |mu |mu
|
||||
# Historgrams were generated in R (http://www.r-project.org/) based on the
|
||||
# Histograms were generated in R (http://www.r-project.org/) based on the
|
||||
# calculations below and manually duplicated here.
|
||||
#
|
||||
# param: mu - mu is the mean of the distribution.
|
||||
|
@ -99,7 +99,7 @@ def supertoroid(R, r, u, v, n1, n2):
|
||||
# x = (cos(theta) ** n1)*(R + r * (cos(phi) ** n2))
|
||||
# y = (sin(theta) ** n1)*(R + r * (cos(phi) ** n2))
|
||||
# z = (r * sin(phi) ** n2)
|
||||
# with theta and phi rangeing from 0 to 2pi
|
||||
# with theta and phi ranging from 0 to 2pi
|
||||
|
||||
for i in range(u):
|
||||
s = power(sin(i * a), n1)
|
||||
|
@ -41,8 +41,6 @@ from amaranth.node_editor import (
|
||||
simplify_nodes,
|
||||
node_stats,
|
||||
normal_node,
|
||||
switch_material,
|
||||
node_shader_extra,
|
||||
)
|
||||
|
||||
from amaranth.render import (
|
||||
@ -74,7 +72,7 @@ from amaranth.misc import (
|
||||
bl_info = {
|
||||
"name": "Amaranth Toolset",
|
||||
"author": "Pablo Vazquez, Bassam Kurdali, Sergey Sharybin, Lukas Tönne, Cesar Saez, CansecoGPC",
|
||||
"version": (1, 0, 14),
|
||||
"version": (1, 0, 17),
|
||||
"blender": (3, 2, 0),
|
||||
"location": "Everywhere!",
|
||||
"description": "A collection of tools and settings to improve productivity",
|
||||
|
@ -16,7 +16,7 @@ Find it on the User Preferences, Editing.
|
||||
"""
|
||||
|
||||
import bpy
|
||||
from bpy.types import Operator
|
||||
from bpy.types import Operator, Panel
|
||||
from bpy.props import BoolProperty
|
||||
|
||||
KEYMAPS = list()
|
||||
@ -129,40 +129,35 @@ class AMTH_SCREEN_OT_frame_jump(Operator):
|
||||
return {"FINISHED"}
|
||||
|
||||
|
||||
def ui_userpreferences_edit(self, context):
|
||||
get_addon = "amaranth" in context.preferences.addons.keys()
|
||||
if not get_addon:
|
||||
return
|
||||
class AMTH_USERPREF_PT_animation(Panel):
|
||||
bl_space_type = 'PREFERENCES'
|
||||
bl_region_type = 'WINDOW'
|
||||
bl_label = "Jump Keyframes"
|
||||
bl_parent_id = "USERPREF_PT_animation_keyframes"
|
||||
|
||||
def draw(self, context):
|
||||
preferences = context.preferences.addons["amaranth"].preferences
|
||||
|
||||
col = self.layout.column()
|
||||
split = col.split(factor=0.21)
|
||||
split.prop(preferences, "frames_jump",
|
||||
text="Frames to Jump")
|
||||
|
||||
|
||||
def label(self, context):
|
||||
get_addon = "amaranth" in context.preferences.addons.keys()
|
||||
if not get_addon:
|
||||
return
|
||||
|
||||
layout = self.layout
|
||||
|
||||
if context.preferences.addons["amaranth"].preferences.use_timeline_extra_info:
|
||||
row = layout.row(align=True)
|
||||
col = layout.column()
|
||||
row = col.row()
|
||||
row.label(text="Frames to Jump")
|
||||
row.prop(preferences, "frames_jump", text="")
|
||||
|
||||
col = layout.column()
|
||||
row = col.row()
|
||||
row.label(text="Jump Operators")
|
||||
row.operator(AMTH_SCREEN_OT_keyframe_jump_inbetween.bl_idname,
|
||||
icon="PREV_KEYFRAME", text="").backwards = True
|
||||
icon="PREV_KEYFRAME", text="Jump to Previous").backwards = True
|
||||
row.operator(AMTH_SCREEN_OT_keyframe_jump_inbetween.bl_idname,
|
||||
icon="NEXT_KEYFRAME", text="").backwards = False
|
||||
icon="NEXT_KEYFRAME", text="Jump to Next").backwards = False
|
||||
|
||||
|
||||
def register():
|
||||
bpy.utils.register_class(AMTH_USERPREF_PT_animation)
|
||||
bpy.utils.register_class(AMTH_SCREEN_OT_frame_jump)
|
||||
bpy.utils.register_class(AMTH_SCREEN_OT_keyframe_jump_inbetween)
|
||||
bpy.types.USERPREF_PT_animation_timeline.append(ui_userpreferences_edit)
|
||||
bpy.types.USERPREF_PT_animation_timeline.append(label)
|
||||
|
||||
# register keyboard shortcuts
|
||||
wm = bpy.context.window_manager
|
||||
@ -189,9 +184,10 @@ def register():
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.utils.unregister_class(AMTH_USERPREF_PT_animation)
|
||||
bpy.utils.unregister_class(AMTH_SCREEN_OT_frame_jump)
|
||||
bpy.utils.unregister_class(AMTH_SCREEN_OT_keyframe_jump_inbetween)
|
||||
bpy.types.USERPREF_PT_animation_timeline.remove(ui_userpreferences_edit)
|
||||
|
||||
for km, kmi in KEYMAPS:
|
||||
km.keymap_items.remove(kmi)
|
||||
KEYMAPS.clear()
|
||||
|
@ -2,6 +2,41 @@
|
||||
|
||||
import bpy
|
||||
|
||||
class AMTH_VIEW3D_PT_wire_toggle(bpy.types.Panel):
|
||||
bl_space_type = 'VIEW_3D'
|
||||
bl_region_type = 'UI'
|
||||
bl_category = "View"
|
||||
bl_label = "Wireframes "
|
||||
bl_parent_id = "VIEW3D_PT_view3d_properties"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
scene = context.scene
|
||||
|
||||
row = layout.row(align=True)
|
||||
row.operator(AMTH_OBJECT_OT_wire_toggle.bl_idname,
|
||||
icon="MOD_WIREFRAME", text="Display").clear = False
|
||||
row.operator(AMTH_OBJECT_OT_wire_toggle.bl_idname,
|
||||
icon="X", text="Clear").clear = True
|
||||
|
||||
layout.separator()
|
||||
|
||||
col = layout.column(heading="Display", align=True)
|
||||
col.prop(scene, "amth_wire_toggle_edges_all")
|
||||
col.prop(scene, "amth_wire_toggle_optimal")
|
||||
|
||||
col = layout.column(heading="Apply to", align=True)
|
||||
sub = col.row(align=True)
|
||||
sub.active = not scene.amth_wire_toggle_scene_all
|
||||
sub.prop(scene, "amth_wire_toggle_is_selected")
|
||||
sub = col.row(align=True)
|
||||
sub.active = not scene.amth_wire_toggle_is_selected
|
||||
sub.prop(scene, "amth_wire_toggle_scene_all")
|
||||
|
||||
|
||||
# FEATURE: Toggle Wire Display
|
||||
class AMTH_OBJECT_OT_wire_toggle(bpy.types.Operator):
|
||||
@ -49,31 +84,6 @@ class AMTH_OBJECT_OT_wire_toggle(bpy.types.Operator):
|
||||
return {"FINISHED"}
|
||||
|
||||
|
||||
def ui_object_wire_toggle(self, context):
|
||||
|
||||
scene = context.scene
|
||||
|
||||
self.layout.separator()
|
||||
col = self.layout.column(align=True)
|
||||
col.label(text="Wireframes:")
|
||||
row = col.row(align=True)
|
||||
row.operator(AMTH_OBJECT_OT_wire_toggle.bl_idname,
|
||||
icon="MOD_WIREFRAME", text="Display").clear = False
|
||||
row.operator(AMTH_OBJECT_OT_wire_toggle.bl_idname,
|
||||
icon="X", text="Clear").clear = True
|
||||
col.separator()
|
||||
row = col.row(align=True)
|
||||
row.prop(scene, "amth_wire_toggle_edges_all")
|
||||
row.prop(scene, "amth_wire_toggle_optimal")
|
||||
row = col.row(align=True)
|
||||
sub = row.row(align=True)
|
||||
sub.active = not scene.amth_wire_toggle_scene_all
|
||||
sub.prop(scene, "amth_wire_toggle_is_selected")
|
||||
sub = row.row(align=True)
|
||||
sub.active = not scene.amth_wire_toggle_is_selected
|
||||
sub.prop(scene, "amth_wire_toggle_scene_all")
|
||||
|
||||
|
||||
def init_properties():
|
||||
scene = bpy.types.Scene
|
||||
scene.amth_wire_toggle_scene_all = bpy.props.BoolProperty(
|
||||
@ -90,7 +100,7 @@ def init_properties():
|
||||
description="Draw all the edges even on coplanar faces")
|
||||
scene.amth_wire_toggle_optimal = bpy.props.BoolProperty(
|
||||
default=False,
|
||||
name="Subsurf Optimal Display",
|
||||
name="Optimal Display Subdivision",
|
||||
description="Skip drawing/rendering of interior subdivided edges "
|
||||
"on meshes with Subdivision Surface modifier")
|
||||
|
||||
@ -109,14 +119,21 @@ def clear_properties():
|
||||
|
||||
# //FEATURE: Toggle Wire Display
|
||||
|
||||
classes = (
|
||||
AMTH_VIEW3D_PT_wire_toggle,
|
||||
AMTH_OBJECT_OT_wire_toggle
|
||||
)
|
||||
|
||||
def register():
|
||||
init_properties()
|
||||
bpy.utils.register_class(AMTH_OBJECT_OT_wire_toggle)
|
||||
bpy.types.VIEW3D_PT_view3d_properties.append(ui_object_wire_toggle)
|
||||
|
||||
from bpy.utils import register_class
|
||||
for cls in classes:
|
||||
register_class(cls)
|
||||
|
||||
def unregister():
|
||||
bpy.utils.unregister_class(AMTH_OBJECT_OT_wire_toggle)
|
||||
bpy.types.VIEW3D_PT_view3d_properties.remove(ui_object_wire_toggle)
|
||||
clear_properties()
|
||||
|
||||
from bpy.utils import unregister_class
|
||||
for cls in classes:
|
||||
unregister_class(cls)
|
||||
|
@ -32,7 +32,7 @@ class AMTH_NODE_OT_show_active_node_image(bpy.types.Operator):
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.active_node is not None
|
||||
return context.space_data == 'NODE_EDITOR' and context.active_node is not None
|
||||
|
||||
def execute(self, context):
|
||||
return {'FINISHED'}
|
||||
|
@ -1,28 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
import bpy
|
||||
|
||||
|
||||
# FEATURE: Shader Nodes Extra Info
|
||||
def node_shader_extra(self, context):
|
||||
if context.space_data.tree_type == 'ShaderNodeTree':
|
||||
ob = context.active_object
|
||||
snode = context.space_data
|
||||
layout = self.layout
|
||||
|
||||
if ob and snode.shader_type == 'OBJECT':
|
||||
if ob.type == 'LAMP':
|
||||
layout.label(text="%s" % ob.name,
|
||||
icon="LAMP_%s" % ob.data.type)
|
||||
else:
|
||||
layout.label(text="%s" % ob.name,
|
||||
icon="OUTLINER_DATA_%s" % ob.type)
|
||||
|
||||
# // FEATURE: Shader Nodes Extra Info
|
||||
|
||||
|
||||
def register():
|
||||
bpy.types.NODE_HT_header.append(node_shader_extra)
|
||||
|
||||
|
||||
def unregister():
|
||||
bpy.types.NODE_HT_header.remove(node_shader_extra)
|
@ -1,56 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
"""
|
||||
Material Selector
|
||||
|
||||
Quickly switch materials in the active mesh without going to the Properties editor
|
||||
|
||||
Based on 'Afeitadora's work on Elysiun
|
||||
http://www.elysiun.com/forum/showthread.php?290097-Dynamic-Object-Dropdown-List&p=2361851#post2361851
|
||||
|
||||
"""
|
||||
|
||||
import bpy
|
||||
|
||||
def ui_node_editor_material_select(self, context):
|
||||
|
||||
act_ob = context.active_object
|
||||
|
||||
if act_ob and context.active_object.type in {'MESH', 'CURVE', 'SURFACE', 'META'} and \
|
||||
context.space_data.tree_type == 'ShaderNodeTree' and \
|
||||
context.space_data.shader_type == 'OBJECT':
|
||||
|
||||
if act_ob.active_material:
|
||||
mat_name = act_ob.active_material.name
|
||||
else:
|
||||
mat_name = "No Material"
|
||||
|
||||
self.layout.operator_menu_enum("material.menu_select",
|
||||
"material_select",
|
||||
text=mat_name,
|
||||
icon="MATERIAL")
|
||||
|
||||
class AMNodeEditorMaterialSelect(bpy.types.Operator):
|
||||
bl_idname = "material.menu_select"
|
||||
bl_label = "Select Material"
|
||||
bl_description = "Switch to another material in this mesh"
|
||||
|
||||
def avail_materials(self,context):
|
||||
items = [(str(i),x.name,x.name, "MATERIAL", i) for i,x in enumerate(bpy.context.active_object.material_slots)]
|
||||
return items
|
||||
material_select: bpy.props.EnumProperty(items = avail_materials, name = "Available Materials")
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.active_object
|
||||
|
||||
def execute(self,context):
|
||||
bpy.context.active_object.active_material_index = int(self.material_select)
|
||||
return {'FINISHED'}
|
||||
|
||||
def register():
|
||||
bpy.utils.register_class(AMNodeEditorMaterialSelect)
|
||||
bpy.types.NODE_HT_header.append(ui_node_editor_material_select)
|
||||
|
||||
def unregister():
|
||||
bpy.utils.unregister_class(AMNodeEditorMaterialSelect)
|
||||
bpy.types.NODE_HT_header.remove(ui_node_editor_material_select)
|
@ -18,7 +18,7 @@ class AMTH_VIEW3D_OT_render_border_camera(bpy.types.Operator):
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.space_data.region_3d.view_perspective == "CAMERA"
|
||||
return context.space_data == 'VIEW_3D' and context.space_data.region_3d.view_perspective == "CAMERA"
|
||||
|
||||
def execute(self, context):
|
||||
render = context.scene.render
|
||||
|
@ -660,7 +660,7 @@ def draw_ant_water(self, context):
|
||||
col.prop(self, "water_level")
|
||||
|
||||
|
||||
# Store propereties
|
||||
# Store properties
|
||||
def store_properties(operator, ob):
|
||||
ob.ant_landscape.ant_terrain_name = operator.ant_terrain_name
|
||||
ob.ant_landscape.at_cursor = operator.at_cursor
|
||||
|
@ -134,7 +134,7 @@ class SvgExport(bpy.types.Operator, ExportHelper):
|
||||
svg_view = ('' if self.unit_name == '-' else 'width="{0:.3f}{2}" height="{1:.3f}{2}" ')+'viewBox="0 0 {0:.3f} {1:.3f}">\n'
|
||||
f.write('''<?xml version="1.0" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" '''+svg_view.format(self.bounds[0], self.bounds[1], self.unit_name))
|
||||
<svg xmlns="http://www.w3.org/2000/svg" fill-rule="evenodd" '''+svg_view.format(self.bounds[0], self.bounds[1], self.unit_name))
|
||||
for obj in curves:
|
||||
f.write(self.serialize_object(obj))
|
||||
f.write('</svg>')
|
||||
|
@ -5,8 +5,8 @@ bl_info = {
|
||||
"name": "Icon Viewer",
|
||||
"description": "Click an icon to copy its name to the clipboard",
|
||||
"author": "roaoao",
|
||||
"version": (1, 4, 0),
|
||||
"blender": (2, 80, 0),
|
||||
"version": (1, 4, 1),
|
||||
"blender": (3, 4, 0),
|
||||
"location": "Text Editor > Dev Tab > Icon Viewer",
|
||||
"doc_url": "{BLENDER_MANUAL_URL}/addons/development/icon_viewer.html",
|
||||
"category": "Development",
|
||||
@ -30,7 +30,7 @@ HISTORY = []
|
||||
|
||||
def ui_scale():
|
||||
prefs = bpy.context.preferences.system
|
||||
return prefs.dpi * prefs.pixel_size / DPI
|
||||
return prefs.dpi / DPI
|
||||
|
||||
|
||||
def prefs():
|
||||
|
@ -4,7 +4,7 @@ bl_info = {
|
||||
"name": "Grease Pencil Tools",
|
||||
"description": "Extra tools for Grease Pencil",
|
||||
"author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola",
|
||||
"version": (1, 6, 1),
|
||||
"version": (1, 6, 2),
|
||||
"blender": (3, 0, 0),
|
||||
"location": "Sidebar > Grease Pencil > Grease Pencil Tools",
|
||||
"warning": "",
|
||||
|
@ -10,7 +10,8 @@ class GP_OT_camera_flip_x(bpy.types.Operator):
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.space_data.region_3d.view_perspective == 'CAMERA'
|
||||
return context.area.type == 'VIEW_3D' \
|
||||
and context.space_data.region_3d.view_perspective == 'CAMERA'
|
||||
|
||||
def execute(self, context):
|
||||
context.scene.camera.scale.x *= -1
|
||||
|
@ -283,7 +283,8 @@ class RC_OT_Set_rotation(bpy.types.Operator):
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.space_data.region_3d.view_perspective == 'CAMERA'
|
||||
return context.area.type == 'VIEW_3D' \
|
||||
and context.space_data.region_3d.view_perspective == 'CAMERA'
|
||||
|
||||
def execute(self, context):
|
||||
cam_ob = context.scene.camera
|
||||
@ -306,7 +307,9 @@ class RC_OT_Reset_rotation(bpy.types.Operator):
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.space_data.region_3d.view_perspective == 'CAMERA' and context.scene.camera.get('stored_rotation')
|
||||
return context.area.type == 'VIEW_3D' \
|
||||
and context.space_data.region_3d.view_perspective == 'CAMERA' \
|
||||
and context.scene.camera.get('stored_rotation')
|
||||
|
||||
def execute(self, context):
|
||||
cam_ob = context.scene.camera
|
||||
|
@ -48,7 +48,7 @@ def draw_callback_px(self, context):
|
||||
self.batch_timeline.draw(shader)
|
||||
|
||||
# Display keyframes
|
||||
if self.use_hud_keyframes:
|
||||
if self.use_hud_keyframes and self.batch_keyframes:
|
||||
if self.keyframe_aspect == 'LINE':
|
||||
gpu.state.line_width_set(3.0)
|
||||
shader.bind()
|
||||
@ -302,8 +302,8 @@ class GPTS_OT_time_scrub(bpy.types.Operator):
|
||||
else:
|
||||
ui_key_pos = self.pos[:-2]
|
||||
|
||||
|
||||
# keyframe display
|
||||
self.batch_keyframes = None # init if there are no keyframe to draw
|
||||
if ui_key_pos:
|
||||
if self.keyframe_aspect == 'LINE':
|
||||
key_lines = []
|
||||
# Slice off position of start/end frame added last (in list for snapping)
|
||||
|
@ -101,7 +101,7 @@ def read_bvh(context, file_path, rotate_mode='XYZ', global_scale=1.0):
|
||||
|
||||
# Separate into a list of lists, each line a list of words.
|
||||
file_lines = file.readlines()
|
||||
# Non standard carrage returns?
|
||||
# Non standard carriage returns?
|
||||
if len(file_lines) == 1:
|
||||
file_lines = file_lines[0].split('\r')
|
||||
|
||||
@ -742,8 +742,8 @@ def _update_scene_fps(context, report, bvh_frame_time):
|
||||
|
||||
if scene.render.fps != new_fps or scene.render.fps_base != 1.0:
|
||||
print("\tupdating scene FPS (was %f) to BVH FPS (%f)" % (scene_fps, new_fps))
|
||||
scene.render.fps = new_fps
|
||||
scene.render.fps_base = 1.0
|
||||
scene.render.fps = int(round(new_fps))
|
||||
scene.render.fps_base = scene.render.fps / new_fps
|
||||
|
||||
|
||||
def _update_scene_duration(
|
||||
|
@ -77,7 +77,7 @@ def testi(objekti, texture_info, index_mat_name, uv_MODE_mat, mat_index):
|
||||
|
||||
def readtexturefolder(objekti, mat_list, texturelist, is_new, udim_textures, udim_len): #read textures from texture file
|
||||
|
||||
# Let's check are we UVSet or MATERIAL modee
|
||||
# Let's check are we UVSet or MATERIAL mode
|
||||
create_nodes = False
|
||||
for ind, index_mat in enumerate(objekti.material_slots):
|
||||
|
||||
@ -638,7 +638,7 @@ def createExtraNodes(act_material, node, type):
|
||||
|
||||
def matlab(objekti,mat_list,texturelist,is_new):
|
||||
|
||||
# FBX Materials: remove all nodes and create princibles node
|
||||
# FBX Materials: remove all nodes and create principle node
|
||||
|
||||
if(is_new):
|
||||
RemoveFbxNodes(objekti)
|
||||
|
@ -132,7 +132,7 @@ def updatetextures(objekti): # Update 3DC textures
|
||||
|
||||
def readtexturefolder(objekti, mat_list, texturelist, is_new, udim_textures): #read textures from texture file
|
||||
|
||||
# Let's check are we UVSet or MATERIAL modee
|
||||
# Let's check are we UVSet or MATERIAL mode
|
||||
create_nodes = False
|
||||
for ind, index_mat in enumerate(objekti.material_slots):
|
||||
|
||||
@ -801,7 +801,7 @@ def matlab(objekti,mat_list,texturelist,is_new):
|
||||
|
||||
print('Welcome facture matlab function')
|
||||
|
||||
''' FBX Materials: remove all nodes and create princibles node'''
|
||||
''' FBX Materials: remove all nodes and create principle node'''
|
||||
if(is_new):
|
||||
RemoveFbxNodes(objekti)
|
||||
|
||||
|
@ -161,7 +161,7 @@ def SVGParseTransform(transform):
|
||||
|
||||
proc = SVGTransforms.get(func)
|
||||
if proc is None:
|
||||
raise Exception('Unknown trasnform function: ' + func)
|
||||
raise Exception('Unknown transform function: ' + func)
|
||||
|
||||
m = m @ proc(params)
|
||||
|
||||
@ -484,7 +484,7 @@ class SVGPathParser:
|
||||
"""
|
||||
|
||||
__slots__ = ('_data', # Path data supplird
|
||||
'_point', # Current point coorfinate
|
||||
'_point', # Current point coordinate
|
||||
'_handle', # Last handle coordinate
|
||||
'_splines', # List of all splies created during parsing
|
||||
'_spline', # Currently handling spline
|
||||
@ -870,6 +870,14 @@ class SVGPathParser:
|
||||
cv = self._spline['points'][0]
|
||||
self._point = (cv['x'], cv['y'])
|
||||
|
||||
def _pathCloseImplicitly(self):
|
||||
"""
|
||||
Close path implicitly without changing current point coordinate
|
||||
"""
|
||||
|
||||
if self._spline:
|
||||
self._spline['closed'] = True
|
||||
|
||||
def parse(self):
|
||||
"""
|
||||
Execute parser
|
||||
@ -884,14 +892,17 @@ class SVGPathParser:
|
||||
if cmd is None:
|
||||
raise Exception('Unknown path command: {0}' . format(code))
|
||||
|
||||
if cmd in {'Z', 'z'}:
|
||||
if code in {'Z', 'z'}:
|
||||
closed = True
|
||||
else:
|
||||
closed = False
|
||||
|
||||
if code in {'M', 'm'} and self._use_fill and not closed:
|
||||
self._pathCloseImplicitly() # Ensure closed before MoveTo path command
|
||||
|
||||
cmd(code)
|
||||
if self._use_fill and not closed:
|
||||
self._pathClose('z')
|
||||
self._pathCloseImplicitly() # Ensure closed at the end of parsing
|
||||
|
||||
def getSplines(self):
|
||||
"""
|
||||
|
@ -5,7 +5,7 @@ bl_info = {
|
||||
"author": "Florian Meyer (tstscr), mont29, matali, Ted Schundler (SpkyElctrc), mrbimax",
|
||||
"version": (3, 5, 0),
|
||||
"blender": (2, 91, 0),
|
||||
"location": "File > Import > Images as Planes or Add > Mesh > Images as Planes",
|
||||
"location": "File > Import > Images as Planes or Add > Image > Images as Planes",
|
||||
"description": "Imports images and creates planes with the appropriate aspect ratio. "
|
||||
"The images are mapped to the planes.",
|
||||
"warning": "",
|
||||
|
@ -3,9 +3,9 @@
|
||||
bl_info = {
|
||||
"name": "UV Layout",
|
||||
"author": "Campbell Barton, Matt Ebb",
|
||||
"version": (1, 1, 3),
|
||||
"version": (1, 1, 5),
|
||||
"blender": (3, 0, 0),
|
||||
"location": "Image-Window > UVs > Export UV Layout",
|
||||
"location": "UV Editor > UV > Export UV Layout",
|
||||
"description": "Export the UV layout as a 2D graphic",
|
||||
"warning": "",
|
||||
"doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/mesh_uv_layout.html",
|
||||
|
@ -25,7 +25,7 @@ def export(filepath, face_data, colors, width, height, opacity):
|
||||
|
||||
|
||||
def draw_image(face_data, opacity):
|
||||
gpu.state.blend_set('ALPHA_PREMULT')
|
||||
gpu.state.blend_set('ALPHA')
|
||||
|
||||
with gpu.matrix.push_pop():
|
||||
gpu.matrix.load_matrix(get_normalize_uvs_matrix())
|
||||
@ -80,15 +80,12 @@ def draw_lines(face_data):
|
||||
coords.append((start[0], start[1]))
|
||||
coords.append((end[0], end[1]))
|
||||
|
||||
# Use '2D_UNIFORM_COLOR' in the `batch_for_shader` so we don't need to
|
||||
# convert the coordinates to 3D as in the case of
|
||||
# '3D_POLYLINE_UNIFORM_COLOR'.
|
||||
batch = batch_for_shader(gpu.shader.from_builtin('2D_UNIFORM_COLOR'), 'LINES', {"pos": coords})
|
||||
shader = gpu.shader.from_builtin('3D_POLYLINE_UNIFORM_COLOR')
|
||||
shader.bind()
|
||||
shader = gpu.shader.from_builtin('POLYLINE_UNIFORM_COLOR')
|
||||
shader.uniform_float("viewportSize", gpu.state.viewport_get()[2:])
|
||||
shader.uniform_float("lineWidth", 0.5)
|
||||
shader.uniform_float("color", (0, 0, 0, 1))
|
||||
shader.uniform_float("lineWidth", 1.0)
|
||||
shader.uniform_float("color", (0.0, 0.0, 0.0, 1.0))
|
||||
|
||||
batch = batch_for_shader(shader, 'LINES', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
|
||||
|
||||
|
@ -3,8 +3,8 @@
|
||||
bl_info = {
|
||||
"name": "FBX format",
|
||||
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier",
|
||||
"version": (4, 36, 3),
|
||||
"blender": (3, 2, 0),
|
||||
"version": (4, 37, 1),
|
||||
"blender": (3, 4, 0),
|
||||
"location": "File > Import-Export",
|
||||
"description": "FBX IO meshes, UV's, vertex colors, materials, textures, cameras, lamps and actions",
|
||||
"warning": "",
|
||||
@ -89,6 +89,15 @@ class ImportFBX(bpy.types.Operator, ImportHelper):
|
||||
description="Import custom normals, if available (otherwise Blender will recompute them)",
|
||||
default=True,
|
||||
)
|
||||
colors_type: EnumProperty(
|
||||
name="Vertex Colors",
|
||||
items=(('NONE', "None", "Do not import color attributes"),
|
||||
('SRGB', "sRGB", "Expect file colors in sRGB color space"),
|
||||
('LINEAR', "Linear", "Expect file colors in linear color space"),
|
||||
),
|
||||
description="Import vertex color attributes",
|
||||
default='SRGB',
|
||||
)
|
||||
|
||||
use_image_search: BoolProperty(
|
||||
name="Image Search",
|
||||
@ -230,6 +239,7 @@ class FBX_PT_import_include(bpy.types.Panel):
|
||||
sub.enabled = operator.use_custom_props
|
||||
sub.prop(operator, "use_custom_props_enum_as_string")
|
||||
layout.prop(operator, "use_image_search")
|
||||
layout.prop(operator, "colors_type")
|
||||
|
||||
|
||||
class FBX_PT_import_transform(bpy.types.Panel):
|
||||
@ -463,6 +473,15 @@ class ExportFBX(bpy.types.Operator, ExportHelper):
|
||||
"(prefer 'Normals Only' option if your target importer understand split normals)",
|
||||
default='OFF',
|
||||
)
|
||||
colors_type: EnumProperty(
|
||||
name="Vertex Colors",
|
||||
items=(('NONE', "None", "Do not export color attributes"),
|
||||
('SRGB', "sRGB", "Export colors in sRGB color space"),
|
||||
('LINEAR', "Linear", "Export colors in linear color space"),
|
||||
),
|
||||
description="Export vertex color attributes",
|
||||
default='SRGB',
|
||||
)
|
||||
use_subsurf: BoolProperty(
|
||||
name="Export Subdivision Surface",
|
||||
description="Export the last Catmull-Rom subdivision modifier as FBX subdivision "
|
||||
@ -767,6 +786,7 @@ class FBX_PT_export_geometry(bpy.types.Panel):
|
||||
sub = layout.row()
|
||||
#~ sub.enabled = operator.mesh_smooth_type in {'OFF'}
|
||||
sub.prop(operator, "use_tspace")
|
||||
layout.prop(operator, "colors_type")
|
||||
|
||||
|
||||
class FBX_PT_export_armature(bpy.types.Panel):
|
||||
|
@ -1111,14 +1111,20 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
me.free_normals_split()
|
||||
|
||||
# Write VertexColor Layers.
|
||||
vcolnumber = len(me.vertex_colors)
|
||||
colors_type = scene_data.settings.colors_type
|
||||
vcolnumber = 0 if colors_type == 'NONE' else len(me.color_attributes)
|
||||
if vcolnumber:
|
||||
def _coltuples_gen(raw_cols):
|
||||
return zip(*(iter(raw_cols),) * 4)
|
||||
|
||||
t_lc = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * len(me.loops) * 4
|
||||
for colindex, collayer in enumerate(me.vertex_colors):
|
||||
collayer.data.foreach_get("color", t_lc)
|
||||
color_prop_name = "color_srgb" if colors_type == 'SRGB' else "color"
|
||||
|
||||
for colindex, collayer in enumerate(me.color_attributes):
|
||||
is_point = collayer.domain == "POINT"
|
||||
vcollen = len(me.vertices if is_point else me.loops)
|
||||
t_lc = array.array(data_types.ARRAY_FLOAT64, (0.0,)) * vcollen * 4
|
||||
collayer.data.foreach_get(color_prop_name, t_lc)
|
||||
|
||||
lay_vcol = elem_data_single_int32(geom, b"LayerElementColor", colindex)
|
||||
elem_data_single_int32(lay_vcol, b"Version", FBX_GEOMETRY_VCOLOR_VERSION)
|
||||
elem_data_single_string_unicode(lay_vcol, b"Name", collayer.name)
|
||||
@ -1129,7 +1135,14 @@ def fbx_data_mesh_elements(root, me_obj, scene_data, done_meshes):
|
||||
elem_data_single_float64_array(lay_vcol, b"Colors", chain(*col2idx)) # Flatten again...
|
||||
|
||||
col2idx = {col: idx for idx, col in enumerate(col2idx)}
|
||||
elem_data_single_int32_array(lay_vcol, b"ColorIndex", (col2idx[c] for c in _coltuples_gen(t_lc)))
|
||||
col_indices = list(col2idx[c] for c in _coltuples_gen(t_lc))
|
||||
if is_point:
|
||||
# for "point" domain colors, we could directly emit them
|
||||
# with a "ByVertex" mapping type, but some software does not
|
||||
# properly understand that. So expand to full "ByPolygonVertex"
|
||||
# index map.
|
||||
col_indices = list((col_indices[c.vertex_index] for c in me.loops))
|
||||
elem_data_single_int32_array(lay_vcol, b"ColorIndex", col_indices)
|
||||
del col2idx
|
||||
del t_lc
|
||||
del _coltuples_gen
|
||||
@ -2829,6 +2842,7 @@ def fbx_header_elements(root, scene_data, time=None):
|
||||
if similar_values(fps, ref_fps):
|
||||
fbx_fps = ref_fps
|
||||
fbx_fps_mode = fps_mode
|
||||
break
|
||||
elem_props_set(props, "p_enum", b"TimeMode", fbx_fps_mode)
|
||||
elem_props_set(props, "p_timestamp", b"TimeSpanStart", 0)
|
||||
elem_props_set(props, "p_timestamp", b"TimeSpanStop", FBX_KTIME)
|
||||
@ -3021,6 +3035,7 @@ def save_single(operator, scene, depsgraph, filepath="",
|
||||
use_custom_props=False,
|
||||
bake_space_transform=False,
|
||||
armature_nodetype='NULL',
|
||||
colors_type='SRGB',
|
||||
**kwargs
|
||||
):
|
||||
|
||||
@ -3088,7 +3103,7 @@ def save_single(operator, scene, depsgraph, filepath="",
|
||||
add_leaf_bones, bone_correction_matrix, bone_correction_matrix_inv,
|
||||
bake_anim, bake_anim_use_all_bones, bake_anim_use_nla_strips, bake_anim_use_all_actions,
|
||||
bake_anim_step, bake_anim_simplify_factor, bake_anim_force_startend_keying,
|
||||
False, media_settings, use_custom_props,
|
||||
False, media_settings, use_custom_props, colors_type,
|
||||
)
|
||||
|
||||
import bpy_extras.io_utils
|
||||
@ -3155,6 +3170,7 @@ def defaults_unity3d():
|
||||
"use_mesh_modifiers_render": True,
|
||||
"use_mesh_edges": False,
|
||||
"mesh_smooth_type": 'FACE',
|
||||
"colors_type": 'SRGB',
|
||||
"use_subsurf": False,
|
||||
"use_tspace": False, # XXX Why? Unity is expected to support tspace import...
|
||||
"use_triangles": False,
|
||||
|
@ -1220,7 +1220,7 @@ FBXExportSettings = namedtuple("FBXExportSettings", (
|
||||
"bone_correction_matrix", "bone_correction_matrix_inv",
|
||||
"bake_anim", "bake_anim_use_all_bones", "bake_anim_use_nla_strips", "bake_anim_use_all_actions",
|
||||
"bake_anim_step", "bake_anim_simplify_factor", "bake_anim_force_startend_keying",
|
||||
"use_metadata", "media_settings", "use_custom_props",
|
||||
"use_metadata", "media_settings", "use_custom_props", "colors_type",
|
||||
))
|
||||
|
||||
# Helper container gathering some data we need multiple times:
|
||||
@ -1249,5 +1249,5 @@ FBXImportSettings = namedtuple("FBXImportSettings", (
|
||||
"use_custom_props", "use_custom_props_enum_as_string",
|
||||
"nodal_material_wrap_map", "image_cache",
|
||||
"ignore_leaf_bones", "force_connect_children", "automatic_bone_orientation", "bone_correction_matrix",
|
||||
"use_prepost_rot",
|
||||
"use_prepost_rot", "colors_type",
|
||||
))
|
||||
|
@ -1061,7 +1061,12 @@ def blen_read_geom_layer_uv(fbx_obj, mesh):
|
||||
)
|
||||
|
||||
|
||||
def blen_read_geom_layer_color(fbx_obj, mesh):
|
||||
def blen_read_geom_layer_color(fbx_obj, mesh, colors_type):
|
||||
if colors_type == 'NONE':
|
||||
return
|
||||
use_srgb = colors_type == 'SRGB'
|
||||
layer_type = 'BYTE_COLOR' if use_srgb else 'FLOAT_COLOR'
|
||||
color_prop_name = "color_srgb" if use_srgb else "color"
|
||||
# almost same as UV's
|
||||
for layer_id in (b'LayerElementColor',):
|
||||
for fbx_layer in elem_find_iter(fbx_obj, layer_id):
|
||||
@ -1074,8 +1079,7 @@ def blen_read_geom_layer_color(fbx_obj, mesh):
|
||||
fbx_layer_data = elem_prop_first(elem_find_first(fbx_layer, b'Colors'))
|
||||
fbx_layer_index = elem_prop_first(elem_find_first(fbx_layer, b'ColorIndex'))
|
||||
|
||||
# Always init our new layers with full white opaque color.
|
||||
color_lay = mesh.vertex_colors.new(name=fbx_layer_name, do_init=False)
|
||||
color_lay = mesh.color_attributes.new(name=fbx_layer_name, type=layer_type, domain='CORNER')
|
||||
|
||||
if color_lay is None:
|
||||
print("Failed to add {%r %r} vertex color layer to %r (probably too many of them?)"
|
||||
@ -1090,7 +1094,7 @@ def blen_read_geom_layer_color(fbx_obj, mesh):
|
||||
continue
|
||||
|
||||
blen_read_geom_array_mapped_polyloop(
|
||||
mesh, blen_data, "color",
|
||||
mesh, blen_data, color_prop_name,
|
||||
fbx_layer_data, fbx_layer_index,
|
||||
fbx_layer_mapping, fbx_layer_ref,
|
||||
4, 4, layer_id,
|
||||
@ -1289,7 +1293,7 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
|
||||
|
||||
blen_read_geom_layer_material(fbx_obj, mesh)
|
||||
blen_read_geom_layer_uv(fbx_obj, mesh)
|
||||
blen_read_geom_layer_color(fbx_obj, mesh)
|
||||
blen_read_geom_layer_color(fbx_obj, mesh, settings.colors_type)
|
||||
|
||||
if fbx_edges:
|
||||
# edges in fact index the polygons (NOT the vertices)
|
||||
@ -2365,7 +2369,8 @@ def load(operator, context, filepath="",
|
||||
automatic_bone_orientation=False,
|
||||
primary_bone_axis='Y',
|
||||
secondary_bone_axis='X',
|
||||
use_prepost_rot=True):
|
||||
use_prepost_rot=True,
|
||||
colors_type='SRGB'):
|
||||
|
||||
global fbx_elem_nil
|
||||
fbx_elem_nil = FBXElem('', (), (), ())
|
||||
@ -2504,7 +2509,7 @@ def load(operator, context, filepath="",
|
||||
use_custom_props, use_custom_props_enum_as_string,
|
||||
nodal_material_wrap_map, image_cache,
|
||||
ignore_leaf_bones, force_connect_children, automatic_bone_orientation, bone_correction_matrix,
|
||||
use_prepost_rot,
|
||||
use_prepost_rot, colors_type,
|
||||
)
|
||||
|
||||
# #### And now, the "real" data.
|
||||
|
@ -4,7 +4,7 @@
|
||||
bl_info = {
|
||||
'name': 'glTF 2.0 format',
|
||||
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
|
||||
"version": (3, 4, 10),
|
||||
"version": (3, 5, 0),
|
||||
'blender': (3, 3, 0),
|
||||
'location': 'File > Import-Export',
|
||||
'description': 'Import-Export as glTF 2.0',
|
||||
@ -98,7 +98,21 @@ def on_export_format_changed(self, context):
|
||||
)
|
||||
|
||||
|
||||
class ExportGLTF2_Base:
|
||||
class ConvertGLTF2_Base:
|
||||
"""Base class containing options that should be exposed during both import and export."""
|
||||
|
||||
convert_lighting_mode: EnumProperty(
|
||||
name='Lighting Mode',
|
||||
items=(
|
||||
('SPEC', 'Standard', 'Physically-based glTF lighting units (cd, lx, nt)'),
|
||||
('COMPAT', 'Unitless', 'Non-physical, unitless lighting. Useful when exposure controls are not available'),
|
||||
('RAW', 'Raw (Deprecated)', 'Blender lighting strengths with no conversion'),
|
||||
),
|
||||
description='Optional backwards compatibility for non-standard render engines. Applies to lights',# TODO: and emissive materials',
|
||||
default='SPEC'
|
||||
)
|
||||
|
||||
class ExportGLTF2_Base(ConvertGLTF2_Base):
|
||||
# TODO: refactor to avoid boilerplate
|
||||
|
||||
def __init__(self):
|
||||
@ -273,6 +287,12 @@ class ExportGLTF2_Base:
|
||||
default=True
|
||||
)
|
||||
|
||||
export_attributes: BoolProperty(
|
||||
name='Attributes',
|
||||
description='Export Attributes',
|
||||
default=False
|
||||
)
|
||||
|
||||
use_mesh_edges: BoolProperty(
|
||||
name='Loose Edges',
|
||||
description=(
|
||||
@ -313,6 +333,12 @@ class ExportGLTF2_Base:
|
||||
default=False
|
||||
)
|
||||
|
||||
use_active_collection_with_nested: BoolProperty(
|
||||
name='Include Nested Collections',
|
||||
description='Include active collection and nested collections',
|
||||
default=True
|
||||
)
|
||||
|
||||
use_active_collection: BoolProperty(
|
||||
name='Active Collection',
|
||||
description='Export objects in the active collection only',
|
||||
@ -394,7 +420,7 @@ class ExportGLTF2_Base:
|
||||
default=False
|
||||
)
|
||||
|
||||
optimize_animation_size: BoolProperty(
|
||||
export_optimize_animation_size: BoolProperty(
|
||||
name='Optimize Animation Size',
|
||||
description=(
|
||||
"Reduce exported file-size by removing duplicate keyframes"
|
||||
@ -412,6 +438,15 @@ class ExportGLTF2_Base:
|
||||
default=True
|
||||
)
|
||||
|
||||
export_reset_pose_bones: BoolProperty(
|
||||
name='Reset pose bones between actions',
|
||||
description=(
|
||||
"Reset pose bones between each action exported. "
|
||||
"This is needed when some bones are not keyed on some animations"
|
||||
),
|
||||
default=True
|
||||
)
|
||||
|
||||
export_current_frame: BoolProperty(
|
||||
name='Use Current Frame',
|
||||
description='Export the scene in the current animation frame',
|
||||
@ -506,6 +541,7 @@ class ExportGLTF2_Base:
|
||||
'use_selection',
|
||||
'use_visible',
|
||||
'use_renderable',
|
||||
'use_active_collection_with_nested',
|
||||
'use_active_collection',
|
||||
'use_mesh_edges',
|
||||
'use_mesh_vertices',
|
||||
@ -563,13 +599,19 @@ class ExportGLTF2_Base:
|
||||
|
||||
export_settings['gltf_materials'] = self.export_materials
|
||||
export_settings['gltf_colors'] = self.export_colors
|
||||
export_settings['gltf_attributes'] = self.export_attributes
|
||||
export_settings['gltf_cameras'] = self.export_cameras
|
||||
|
||||
export_settings['gltf_original_specular'] = self.export_original_specular
|
||||
|
||||
export_settings['gltf_visible'] = self.use_visible
|
||||
export_settings['gltf_renderable'] = self.use_renderable
|
||||
|
||||
export_settings['gltf_active_collection'] = self.use_active_collection
|
||||
if self.use_active_collection:
|
||||
export_settings['gltf_active_collection_with_nested'] = self.use_active_collection_with_nested
|
||||
else:
|
||||
export_settings['gltf_active_collection_with_nested'] = False
|
||||
export_settings['gltf_active_scene'] = self.use_active_scene
|
||||
|
||||
export_settings['gltf_selected'] = self.use_selection
|
||||
@ -587,14 +629,16 @@ class ExportGLTF2_Base:
|
||||
export_settings['gltf_def_bones'] = False
|
||||
export_settings['gltf_nla_strips'] = self.export_nla_strips
|
||||
export_settings['gltf_nla_strips_merged_animation_name'] = self.export_nla_strips_merged_animation_name
|
||||
export_settings['gltf_optimize_animation'] = self.optimize_animation_size
|
||||
export_settings['gltf_optimize_animation'] = self.export_optimize_animation_size
|
||||
export_settings['gltf_export_anim_single_armature'] = self.export_anim_single_armature
|
||||
export_settings['gltf_export_reset_pose_bones'] = self.export_reset_pose_bones
|
||||
else:
|
||||
export_settings['gltf_frame_range'] = False
|
||||
export_settings['gltf_move_keyframes'] = False
|
||||
export_settings['gltf_force_sampling'] = False
|
||||
export_settings['gltf_optimize_animation'] = False
|
||||
export_settings['gltf_export_anim_single_armature'] = False
|
||||
export_settings['gltf_export_reset_pose_bones'] = False
|
||||
export_settings['gltf_skins'] = self.export_skins
|
||||
if self.export_skins:
|
||||
export_settings['gltf_all_vertex_influences'] = self.export_all_influences
|
||||
@ -613,6 +657,7 @@ class ExportGLTF2_Base:
|
||||
export_settings['gltf_morph_tangent'] = False
|
||||
|
||||
export_settings['gltf_lights'] = self.export_lights
|
||||
export_settings['gltf_lighting_mode'] = self.convert_lighting_mode
|
||||
|
||||
export_settings['gltf_binary'] = bytearray()
|
||||
export_settings['gltf_binaryfilename'] = (
|
||||
@ -710,6 +755,8 @@ class GLTF_PT_export_include(bpy.types.Panel):
|
||||
col.prop(operator, 'use_visible')
|
||||
col.prop(operator, 'use_renderable')
|
||||
col.prop(operator, 'use_active_collection')
|
||||
if operator.use_active_collection:
|
||||
col.prop(operator, 'use_active_collection_with_nested')
|
||||
col.prop(operator, 'use_active_scene')
|
||||
|
||||
col = layout.column(heading = "Data", align = True)
|
||||
@ -746,7 +793,7 @@ class GLTF_PT_export_transform(bpy.types.Panel):
|
||||
class GLTF_PT_export_geometry(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
bl_region_type = 'TOOL_PROPS'
|
||||
bl_label = "Geometry"
|
||||
bl_label = "Data"
|
||||
bl_parent_id = "FILE_PT_operator"
|
||||
bl_options = {'DEFAULT_CLOSED'}
|
||||
|
||||
@ -788,6 +835,7 @@ class GLTF_PT_export_geometry_mesh(bpy.types.Panel):
|
||||
col.active = operator.export_normals
|
||||
col.prop(operator, 'export_tangents')
|
||||
layout.prop(operator, 'export_colors')
|
||||
layout.prop(operator, 'export_attributes')
|
||||
|
||||
col = layout.column()
|
||||
col.prop(operator, 'use_mesh_edges')
|
||||
@ -843,6 +891,28 @@ class GLTF_PT_export_geometry_original_pbr(bpy.types.Panel):
|
||||
|
||||
layout.prop(operator, 'export_original_specular')
|
||||
|
||||
class GLTF_PT_export_geometry_lighting(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
bl_region_type = 'TOOL_PROPS'
|
||||
bl_label = "Lighting"
|
||||
bl_parent_id = "GLTF_PT_export_geometry"
|
||||
bl_options = {'DEFAULT_CLOSED'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
|
||||
|
||||
def draw(self, context):
|
||||
layout = self.layout
|
||||
layout.use_property_split = True
|
||||
layout.use_property_decorate = False # No animation.
|
||||
|
||||
sfile = context.space_data
|
||||
operator = sfile.active_operator
|
||||
|
||||
layout.prop(operator, 'convert_lighting_mode')
|
||||
|
||||
class GLTF_PT_export_geometry_compression(bpy.types.Panel):
|
||||
bl_space_type = 'FILE_BROWSER'
|
||||
@ -946,8 +1016,9 @@ class GLTF_PT_export_animation_export(bpy.types.Panel):
|
||||
layout.prop(operator, 'export_nla_strips')
|
||||
if operator.export_nla_strips is False:
|
||||
layout.prop(operator, 'export_nla_strips_merged_animation_name')
|
||||
layout.prop(operator, 'optimize_animation_size')
|
||||
layout.prop(operator, 'export_optimize_animation_size')
|
||||
layout.prop(operator, 'export_anim_single_armature')
|
||||
layout.prop(operator, 'export_reset_pose_bones')
|
||||
|
||||
|
||||
class GLTF_PT_export_animation_shapekeys(bpy.types.Panel):
|
||||
@ -1072,7 +1143,7 @@ def menu_func_export(self, context):
|
||||
self.layout.operator(ExportGLTF2.bl_idname, text='glTF 2.0 (.glb/.gltf)')
|
||||
|
||||
|
||||
class ImportGLTF2(Operator, ImportHelper):
|
||||
class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
|
||||
"""Load a glTF 2.0 file"""
|
||||
bl_idname = 'import_scene.gltf'
|
||||
bl_label = 'Import glTF 2.0'
|
||||
@ -1155,6 +1226,7 @@ class ImportGLTF2(Operator, ImportHelper):
|
||||
layout.prop(self, 'import_shading')
|
||||
layout.prop(self, 'guess_original_bind_pose')
|
||||
layout.prop(self, 'bone_heuristic')
|
||||
layout.prop(self, 'convert_lighting_mode')
|
||||
|
||||
def invoke(self, context, event):
|
||||
import sys
|
||||
@ -1286,6 +1358,7 @@ classes = (
|
||||
GLTF_PT_export_geometry_mesh,
|
||||
GLTF_PT_export_geometry_material,
|
||||
GLTF_PT_export_geometry_original_pbr,
|
||||
GLTF_PT_export_geometry_lighting,
|
||||
GLTF_PT_export_geometry_compression,
|
||||
GLTF_PT_export_animation,
|
||||
GLTF_PT_export_animation_export,
|
||||
|
@ -2,6 +2,11 @@
|
||||
# Copyright 2018-2021 The glTF-Blender-IO authors.
|
||||
|
||||
from math import sin, cos
|
||||
import numpy as np
|
||||
from io_scene_gltf2.io.com import gltf2_io_constants
|
||||
|
||||
PBR_WATTS_TO_LUMENS = 683
|
||||
# Industry convention, biological peak at 555nm, scientific standard as part of SI candela definition.
|
||||
|
||||
def texture_transform_blender_to_gltf(mapping_transform):
|
||||
"""
|
||||
@ -48,3 +53,55 @@ def get_target(property):
|
||||
"scale": "scale",
|
||||
"value": "weights"
|
||||
}.get(property)
|
||||
|
||||
def get_component_type(attribute_component_type):
|
||||
return {
|
||||
"INT8": gltf2_io_constants.ComponentType.Float,
|
||||
"BYTE_COLOR": gltf2_io_constants.ComponentType.UnsignedShort,
|
||||
"FLOAT2": gltf2_io_constants.ComponentType.Float,
|
||||
"FLOAT_COLOR": gltf2_io_constants.ComponentType.Float,
|
||||
"FLOAT_VECTOR": gltf2_io_constants.ComponentType.Float,
|
||||
"FLOAT_VECTOR_4": gltf2_io_constants.ComponentType.Float,
|
||||
"INT": gltf2_io_constants.ComponentType.Float, # No signed Int in glTF accessor
|
||||
"FLOAT": gltf2_io_constants.ComponentType.Float,
|
||||
"BOOLEAN": gltf2_io_constants.ComponentType.Float
|
||||
}.get(attribute_component_type)
|
||||
|
||||
def get_data_type(attribute_component_type):
|
||||
return {
|
||||
"INT8": gltf2_io_constants.DataType.Scalar,
|
||||
"BYTE_COLOR": gltf2_io_constants.DataType.Vec4,
|
||||
"FLOAT2": gltf2_io_constants.DataType.Vec2,
|
||||
"FLOAT_COLOR": gltf2_io_constants.DataType.Vec4,
|
||||
"FLOAT_VECTOR": gltf2_io_constants.DataType.Vec3,
|
||||
"FLOAT_VECTOR_4": gltf2_io_constants.DataType.Vec4,
|
||||
"INT": gltf2_io_constants.DataType.Scalar,
|
||||
"FLOAT": gltf2_io_constants.DataType.Scalar,
|
||||
"BOOLEAN": gltf2_io_constants.DataType.Scalar,
|
||||
}.get(attribute_component_type)
|
||||
|
||||
def get_data_length(attribute_component_type):
|
||||
return {
|
||||
"INT8": 1,
|
||||
"BYTE_COLOR": 4,
|
||||
"FLOAT2": 2,
|
||||
"FLOAT_COLOR": 4,
|
||||
"FLOAT_VECTOR": 3,
|
||||
"FLOAT_VECTOR_4": 4,
|
||||
"INT": 1,
|
||||
"FLOAT": 1,
|
||||
"BOOLEAN": 1
|
||||
}.get(attribute_component_type)
|
||||
|
||||
def get_numpy_type(attribute_component_type):
|
||||
return {
|
||||
"INT8": np.float32,
|
||||
"BYTE_COLOR": np.float32,
|
||||
"FLOAT2": np.float32,
|
||||
"FLOAT_COLOR": np.float32,
|
||||
"FLOAT_VECTOR": np.float32,
|
||||
"FLOAT_VECTOR_4": np.float32,
|
||||
"INT": np.float32, #signed integer are not supported by glTF
|
||||
"FLOAT": np.float32,
|
||||
"BOOLEAN": np.float32
|
||||
}.get(attribute_component_type)
|
||||
|
@ -15,20 +15,33 @@ def get_target_object_path(data_path: str) -> str:
|
||||
return ""
|
||||
return path_split[0]
|
||||
|
||||
def get_rotation_modes(target_property: str) -> str:
|
||||
def get_rotation_modes(target_property: str):
|
||||
"""Retrieve rotation modes based on target_property"""
|
||||
if target_property == "rotation_euler":
|
||||
return True, False, ["XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"]
|
||||
return True, ["XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"]
|
||||
elif target_property == "delta_rotation_euler":
|
||||
return True, True, ["XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"]
|
||||
return True, ["XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"]
|
||||
elif target_property == "rotation_quaternion":
|
||||
return True, False, ["QUATERNION"]
|
||||
return True, ["QUATERNION"]
|
||||
elif target_property == "delta_rotation_quaternion":
|
||||
return True, True, ["QUATERNION"]
|
||||
return True, ["QUATERNION"]
|
||||
elif target_property in ["rotation_axis_angle"]:
|
||||
return True, False, ["AXIS_ANGLE"]
|
||||
return True, ["AXIS_ANGLE"]
|
||||
else:
|
||||
return False, False, []
|
||||
return False, []
|
||||
|
||||
def is_location(target_property):
|
||||
return "location" in target_property
|
||||
|
||||
def is_rotation(target_property):
|
||||
return "rotation" in target_property
|
||||
|
||||
def is_scale(target_property):
|
||||
return "scale" in target_property
|
||||
|
||||
def get_delta_modes(target_property: str) -> str:
|
||||
"""Retrieve location based on target_property"""
|
||||
return target_property.startswith("delta_")
|
||||
|
||||
def is_bone_anim_channel(data_path: str) -> bool:
|
||||
return data_path[:10] == "pose.bones"
|
@ -4,3 +4,11 @@
|
||||
BLENDER_IOR = 1.45
|
||||
BLENDER_SPECULAR = 0.5
|
||||
BLENDER_SPECULAR_TINT = 0.0
|
||||
|
||||
|
||||
SPECIAL_ATTRIBUTES = {
|
||||
".select_vert",
|
||||
".select_edge",
|
||||
".select_poly",
|
||||
"material_index"
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ APPLY = 'gltf_apply'
|
||||
SELECTED = 'gltf_selected'
|
||||
VISIBLE = 'gltf_visible'
|
||||
RENDERABLE = 'gltf_renderable'
|
||||
ACTIVE_COLLECTION_WITH_NESTED = 'gltf_active_collection_with_nested'
|
||||
ACTIVE_COLLECTION = 'gltf_active_collection'
|
||||
SKINS = 'gltf_skins'
|
||||
DEF_BONES_ONLY = 'gltf_def_bones'
|
||||
|
@ -1,618 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright 2018-2021 The glTF-Blender-IO authors.
|
||||
|
||||
import numpy as np
|
||||
from mathutils import Vector
|
||||
|
||||
from . import gltf2_blender_export_keys
|
||||
from ...io.com.gltf2_io_debug import print_console
|
||||
from io_scene_gltf2.blender.exp import gltf2_blender_gather_skins
|
||||
|
||||
|
||||
def extract_primitives(blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings):
|
||||
"""Extract primitives from a mesh."""
|
||||
print_console('INFO', 'Extracting primitive: ' + blender_mesh.name)
|
||||
|
||||
blender_object = None
|
||||
if uuid_for_skined_data:
|
||||
blender_object = export_settings['vtree'].nodes[uuid_for_skined_data].blender_object
|
||||
|
||||
use_normals = export_settings[gltf2_blender_export_keys.NORMALS]
|
||||
if use_normals:
|
||||
blender_mesh.calc_normals_split()
|
||||
|
||||
use_tangents = False
|
||||
if use_normals and export_settings[gltf2_blender_export_keys.TANGENTS]:
|
||||
if blender_mesh.uv_layers.active and len(blender_mesh.uv_layers) > 0:
|
||||
try:
|
||||
blender_mesh.calc_tangents()
|
||||
use_tangents = True
|
||||
except Exception:
|
||||
print_console('WARNING', 'Could not calculate tangents. Please try to triangulate the mesh first.')
|
||||
|
||||
tex_coord_max = 0
|
||||
if export_settings[gltf2_blender_export_keys.TEX_COORDS]:
|
||||
if blender_mesh.uv_layers.active:
|
||||
tex_coord_max = len(blender_mesh.uv_layers)
|
||||
|
||||
color_max = 0
|
||||
if export_settings[gltf2_blender_export_keys.COLORS]:
|
||||
color_max = len(blender_mesh.vertex_colors)
|
||||
|
||||
colors_attributes = []
|
||||
rendered_color_idx = blender_mesh.attributes.render_color_index
|
||||
|
||||
if color_max > 0:
|
||||
colors_attributes.append(rendered_color_idx)
|
||||
# Then find other ones
|
||||
colors_attributes.extend([
|
||||
i for i in range(len(blender_mesh.color_attributes)) if i != rendered_color_idx \
|
||||
and blender_mesh.vertex_colors.find(blender_mesh.color_attributes[i].name) != -1
|
||||
])
|
||||
|
||||
|
||||
armature = None
|
||||
skin = None
|
||||
if blender_vertex_groups and export_settings[gltf2_blender_export_keys.SKINS]:
|
||||
if modifiers is not None:
|
||||
modifiers_dict = {m.type: m for m in modifiers}
|
||||
if "ARMATURE" in modifiers_dict:
|
||||
modifier = modifiers_dict["ARMATURE"]
|
||||
armature = modifier.object
|
||||
|
||||
# Skin must be ignored if the object is parented to a bone of the armature
|
||||
# (This creates an infinite recursive error)
|
||||
# So ignoring skin in that case
|
||||
is_child_of_arma = (
|
||||
armature and
|
||||
blender_object and
|
||||
blender_object.parent_type == "BONE" and
|
||||
blender_object.parent.name == armature.name
|
||||
)
|
||||
if is_child_of_arma:
|
||||
armature = None
|
||||
|
||||
if armature:
|
||||
skin = gltf2_blender_gather_skins.gather_skin(export_settings['vtree'].nodes[uuid_for_skined_data].armature, export_settings)
|
||||
if not skin:
|
||||
armature = None
|
||||
|
||||
use_morph_normals = use_normals and export_settings[gltf2_blender_export_keys.MORPH_NORMAL]
|
||||
use_morph_tangents = use_morph_normals and use_tangents and export_settings[gltf2_blender_export_keys.MORPH_TANGENT]
|
||||
|
||||
key_blocks = []
|
||||
if blender_mesh.shape_keys and export_settings[gltf2_blender_export_keys.MORPH]:
|
||||
key_blocks = [
|
||||
key_block
|
||||
for key_block in blender_mesh.shape_keys.key_blocks
|
||||
if not (key_block == key_block.relative_key or key_block.mute)
|
||||
]
|
||||
|
||||
use_materials = export_settings[gltf2_blender_export_keys.MATERIALS]
|
||||
|
||||
# Fetch vert positions and bone data (joint,weights)
|
||||
|
||||
locs, morph_locs = __get_positions(blender_mesh, key_blocks, armature, blender_object, export_settings)
|
||||
if skin:
|
||||
vert_bones, num_joint_sets, need_neutral_bone = __get_bone_data(blender_mesh, skin, blender_vertex_groups)
|
||||
if need_neutral_bone is True:
|
||||
# Need to create a fake joint at root of armature
|
||||
# In order to assign not assigned vertices to it
|
||||
# But for now, this is not yet possible, we need to wait the armature node is created
|
||||
# Just store this, to be used later
|
||||
armature_uuid = export_settings['vtree'].nodes[uuid_for_skined_data].armature
|
||||
export_settings['vtree'].nodes[armature_uuid].need_neutral_bone = True
|
||||
|
||||
# In Blender there is both per-vert data, like position, and also per-loop
|
||||
# (loop=corner-of-poly) data, like normals or UVs. glTF only has per-vert
|
||||
# data, so we need to split Blender verts up into potentially-multiple glTF
|
||||
# verts.
|
||||
#
|
||||
# First, we'll collect a "dot" for every loop: a struct that stores all the
|
||||
# attributes at that loop, namely the vertex index (which determines all
|
||||
# per-vert data), and all the per-loop data like UVs, etc.
|
||||
#
|
||||
# Each unique dot will become one unique glTF vert.
|
||||
|
||||
# List all fields the dot struct needs.
|
||||
dot_fields = [('vertex_index', np.uint32)]
|
||||
if use_normals:
|
||||
dot_fields += [('nx', np.float32), ('ny', np.float32), ('nz', np.float32)]
|
||||
if use_tangents:
|
||||
dot_fields += [('tx', np.float32), ('ty', np.float32), ('tz', np.float32), ('tw', np.float32)]
|
||||
for uv_i in range(tex_coord_max):
|
||||
dot_fields += [('uv%dx' % uv_i, np.float32), ('uv%dy' % uv_i, np.float32)]
|
||||
for col_i, _ in enumerate(colors_attributes):
|
||||
dot_fields += [
|
||||
('color%dr' % col_i, np.float32),
|
||||
('color%dg' % col_i, np.float32),
|
||||
('color%db' % col_i, np.float32),
|
||||
('color%da' % col_i, np.float32),
|
||||
]
|
||||
if use_morph_normals:
|
||||
for morph_i, _ in enumerate(key_blocks):
|
||||
dot_fields += [
|
||||
('morph%dnx' % morph_i, np.float32),
|
||||
('morph%dny' % morph_i, np.float32),
|
||||
('morph%dnz' % morph_i, np.float32),
|
||||
]
|
||||
|
||||
dots = np.empty(len(blender_mesh.loops), dtype=np.dtype(dot_fields))
|
||||
|
||||
vidxs = np.empty(len(blender_mesh.loops))
|
||||
blender_mesh.loops.foreach_get('vertex_index', vidxs)
|
||||
dots['vertex_index'] = vidxs
|
||||
del vidxs
|
||||
|
||||
if use_normals:
|
||||
kbs = key_blocks if use_morph_normals else []
|
||||
normals, morph_normals = __get_normals(
|
||||
blender_mesh, kbs, armature, blender_object, export_settings
|
||||
)
|
||||
dots['nx'] = normals[:, 0]
|
||||
dots['ny'] = normals[:, 1]
|
||||
dots['nz'] = normals[:, 2]
|
||||
del normals
|
||||
for morph_i, ns in enumerate(morph_normals):
|
||||
dots['morph%dnx' % morph_i] = ns[:, 0]
|
||||
dots['morph%dny' % morph_i] = ns[:, 1]
|
||||
dots['morph%dnz' % morph_i] = ns[:, 2]
|
||||
del morph_normals
|
||||
|
||||
if use_tangents:
|
||||
tangents = __get_tangents(blender_mesh, armature, blender_object, export_settings)
|
||||
dots['tx'] = tangents[:, 0]
|
||||
dots['ty'] = tangents[:, 1]
|
||||
dots['tz'] = tangents[:, 2]
|
||||
del tangents
|
||||
signs = __get_bitangent_signs(blender_mesh, armature, blender_object, export_settings)
|
||||
dots['tw'] = signs
|
||||
del signs
|
||||
|
||||
for uv_i in range(tex_coord_max):
|
||||
uvs = __get_uvs(blender_mesh, uv_i)
|
||||
dots['uv%dx' % uv_i] = uvs[:, 0]
|
||||
dots['uv%dy' % uv_i] = uvs[:, 1]
|
||||
del uvs
|
||||
|
||||
colors_types = []
|
||||
for col_i, blender_col_i in enumerate(colors_attributes):
|
||||
colors, colors_type, domain = __get_colors(blender_mesh, col_i, blender_col_i)
|
||||
if domain == "POINT":
|
||||
colors = colors[dots['vertex_index']]
|
||||
colors_types.append(colors_type)
|
||||
dots['color%dr' % col_i] = colors[:, 0]
|
||||
dots['color%dg' % col_i] = colors[:, 1]
|
||||
dots['color%db' % col_i] = colors[:, 2]
|
||||
dots['color%da' % col_i] = colors[:, 3]
|
||||
del colors
|
||||
|
||||
# Calculate triangles and sort them into primitives.
|
||||
|
||||
blender_mesh.calc_loop_triangles()
|
||||
loop_indices = np.empty(len(blender_mesh.loop_triangles) * 3, dtype=np.uint32)
|
||||
blender_mesh.loop_triangles.foreach_get('loops', loop_indices)
|
||||
|
||||
prim_indices = {} # maps material index to TRIANGLES-style indices into dots
|
||||
|
||||
if use_materials == "NONE": # Only for None. For placeholder and export, keep primitives
|
||||
# Put all vertices into one primitive
|
||||
prim_indices[-1] = loop_indices
|
||||
|
||||
else:
|
||||
# Bucket by material index.
|
||||
|
||||
tri_material_idxs = np.empty(len(blender_mesh.loop_triangles), dtype=np.uint32)
|
||||
blender_mesh.loop_triangles.foreach_get('material_index', tri_material_idxs)
|
||||
loop_material_idxs = np.repeat(tri_material_idxs, 3) # material index for every loop
|
||||
unique_material_idxs = np.unique(tri_material_idxs)
|
||||
del tri_material_idxs
|
||||
|
||||
for material_idx in unique_material_idxs:
|
||||
prim_indices[material_idx] = loop_indices[loop_material_idxs == material_idx]
|
||||
|
||||
# Create all the primitives.
|
||||
|
||||
primitives = []
|
||||
|
||||
for material_idx, dot_indices in prim_indices.items():
|
||||
# Extract just dots used by this primitive, deduplicate them, and
|
||||
# calculate indices into this deduplicated list.
|
||||
prim_dots = dots[dot_indices]
|
||||
prim_dots, indices = np.unique(prim_dots, return_inverse=True)
|
||||
|
||||
if len(prim_dots) == 0:
|
||||
continue
|
||||
|
||||
# Now just move all the data for prim_dots into attribute arrays
|
||||
|
||||
attributes = {}
|
||||
|
||||
blender_idxs = prim_dots['vertex_index']
|
||||
|
||||
attributes['POSITION'] = locs[blender_idxs]
|
||||
|
||||
for morph_i, vs in enumerate(morph_locs):
|
||||
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
|
||||
|
||||
if use_normals:
|
||||
normals = np.empty((len(prim_dots), 3), dtype=np.float32)
|
||||
normals[:, 0] = prim_dots['nx']
|
||||
normals[:, 1] = prim_dots['ny']
|
||||
normals[:, 2] = prim_dots['nz']
|
||||
attributes['NORMAL'] = normals
|
||||
|
||||
if use_tangents:
|
||||
tangents = np.empty((len(prim_dots), 4), dtype=np.float32)
|
||||
tangents[:, 0] = prim_dots['tx']
|
||||
tangents[:, 1] = prim_dots['ty']
|
||||
tangents[:, 2] = prim_dots['tz']
|
||||
tangents[:, 3] = prim_dots['tw']
|
||||
attributes['TANGENT'] = tangents
|
||||
|
||||
if use_morph_normals:
|
||||
for morph_i, _ in enumerate(key_blocks):
|
||||
ns = np.empty((len(prim_dots), 3), dtype=np.float32)
|
||||
ns[:, 0] = prim_dots['morph%dnx' % morph_i]
|
||||
ns[:, 1] = prim_dots['morph%dny' % morph_i]
|
||||
ns[:, 2] = prim_dots['morph%dnz' % morph_i]
|
||||
attributes['MORPH_NORMAL_%d' % morph_i] = ns
|
||||
|
||||
if use_morph_tangents:
|
||||
attributes['MORPH_TANGENT_%d' % morph_i] = __calc_morph_tangents(normals, ns, tangents)
|
||||
|
||||
for tex_coord_i in range(tex_coord_max):
|
||||
uvs = np.empty((len(prim_dots), 2), dtype=np.float32)
|
||||
uvs[:, 0] = prim_dots['uv%dx' % tex_coord_i]
|
||||
uvs[:, 1] = prim_dots['uv%dy' % tex_coord_i]
|
||||
attributes['TEXCOORD_%d' % tex_coord_i] = uvs
|
||||
|
||||
for color_i, _ in enumerate(colors_attributes):
|
||||
colors = np.empty((len(prim_dots), 4), dtype=np.float32)
|
||||
colors[:, 0] = prim_dots['color%dr' % color_i]
|
||||
colors[:, 1] = prim_dots['color%dg' % color_i]
|
||||
colors[:, 2] = prim_dots['color%db' % color_i]
|
||||
colors[:, 3] = prim_dots['color%da' % color_i]
|
||||
attributes['COLOR_%d' % color_i] = {}
|
||||
attributes['COLOR_%d' % color_i]["data"] = colors
|
||||
|
||||
attributes['COLOR_%d' % color_i]["norm"] = colors_types[color_i] == "BYTE_COLOR"
|
||||
|
||||
if skin:
|
||||
joints = [[] for _ in range(num_joint_sets)]
|
||||
weights = [[] for _ in range(num_joint_sets)]
|
||||
|
||||
for vi in blender_idxs:
|
||||
bones = vert_bones[vi]
|
||||
for j in range(0, 4 * num_joint_sets):
|
||||
if j < len(bones):
|
||||
joint, weight = bones[j]
|
||||
else:
|
||||
joint, weight = 0, 0.0
|
||||
joints[j//4].append(joint)
|
||||
weights[j//4].append(weight)
|
||||
|
||||
for i, (js, ws) in enumerate(zip(joints, weights)):
|
||||
attributes['JOINTS_%d' % i] = js
|
||||
attributes['WEIGHTS_%d' % i] = ws
|
||||
|
||||
primitives.append({
|
||||
'attributes': attributes,
|
||||
'indices': indices,
|
||||
'material': material_idx,
|
||||
})
|
||||
|
||||
if export_settings['gltf_loose_edges']:
|
||||
# Find loose edges
|
||||
loose_edges = [e for e in blender_mesh.edges if e.is_loose]
|
||||
blender_idxs = [vi for e in loose_edges for vi in e.vertices]
|
||||
|
||||
if blender_idxs:
|
||||
# Export one glTF vert per unique Blender vert in a loose edge
|
||||
blender_idxs = np.array(blender_idxs, dtype=np.uint32)
|
||||
blender_idxs, indices = np.unique(blender_idxs, return_inverse=True)
|
||||
|
||||
attributes = {}
|
||||
|
||||
attributes['POSITION'] = locs[blender_idxs]
|
||||
|
||||
for morph_i, vs in enumerate(morph_locs):
|
||||
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
|
||||
|
||||
if skin:
|
||||
joints = [[] for _ in range(num_joint_sets)]
|
||||
weights = [[] for _ in range(num_joint_sets)]
|
||||
|
||||
for vi in blender_idxs:
|
||||
bones = vert_bones[vi]
|
||||
for j in range(0, 4 * num_joint_sets):
|
||||
if j < len(bones):
|
||||
joint, weight = bones[j]
|
||||
else:
|
||||
joint, weight = 0, 0.0
|
||||
joints[j//4].append(joint)
|
||||
weights[j//4].append(weight)
|
||||
|
||||
for i, (js, ws) in enumerate(zip(joints, weights)):
|
||||
attributes['JOINTS_%d' % i] = js
|
||||
attributes['WEIGHTS_%d' % i] = ws
|
||||
|
||||
primitives.append({
|
||||
'attributes': attributes,
|
||||
'indices': indices,
|
||||
'mode': 1, # LINES
|
||||
'material': 0,
|
||||
})
|
||||
|
||||
if export_settings['gltf_loose_points']:
|
||||
# Find loose points
|
||||
verts_in_edge = set(vi for e in blender_mesh.edges for vi in e.vertices)
|
||||
blender_idxs = [
|
||||
vi for vi, _ in enumerate(blender_mesh.vertices)
|
||||
if vi not in verts_in_edge
|
||||
]
|
||||
|
||||
if blender_idxs:
|
||||
blender_idxs = np.array(blender_idxs, dtype=np.uint32)
|
||||
|
||||
attributes = {}
|
||||
|
||||
attributes['POSITION'] = locs[blender_idxs]
|
||||
|
||||
for morph_i, vs in enumerate(morph_locs):
|
||||
attributes['MORPH_POSITION_%d' % morph_i] = vs[blender_idxs]
|
||||
|
||||
if skin:
|
||||
joints = [[] for _ in range(num_joint_sets)]
|
||||
weights = [[] for _ in range(num_joint_sets)]
|
||||
|
||||
for vi in blender_idxs:
|
||||
bones = vert_bones[vi]
|
||||
for j in range(0, 4 * num_joint_sets):
|
||||
if j < len(bones):
|
||||
joint, weight = bones[j]
|
||||
else:
|
||||
joint, weight = 0, 0.0
|
||||
joints[j//4].append(joint)
|
||||
weights[j//4].append(weight)
|
||||
|
||||
for i, (js, ws) in enumerate(zip(joints, weights)):
|
||||
attributes['JOINTS_%d' % i] = js
|
||||
attributes['WEIGHTS_%d' % i] = ws
|
||||
|
||||
primitives.append({
|
||||
'attributes': attributes,
|
||||
'mode': 0, # POINTS
|
||||
'material': 0,
|
||||
})
|
||||
|
||||
print_console('INFO', 'Primitives created: %d' % len(primitives))
|
||||
|
||||
return primitives
|
||||
|
||||
|
||||
def __get_positions(blender_mesh, key_blocks, armature, blender_object, export_settings):
|
||||
locs = np.empty(len(blender_mesh.vertices) * 3, dtype=np.float32)
|
||||
source = key_blocks[0].relative_key.data if key_blocks else blender_mesh.vertices
|
||||
source.foreach_get('co', locs)
|
||||
locs = locs.reshape(len(blender_mesh.vertices), 3)
|
||||
|
||||
morph_locs = []
|
||||
for key_block in key_blocks:
|
||||
vs = np.empty(len(blender_mesh.vertices) * 3, dtype=np.float32)
|
||||
key_block.data.foreach_get('co', vs)
|
||||
vs = vs.reshape(len(blender_mesh.vertices), 3)
|
||||
morph_locs.append(vs)
|
||||
|
||||
# Transform for skinning
|
||||
if armature and blender_object:
|
||||
# apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
|
||||
# loc_transform = armature.matrix_world @ apply_matrix
|
||||
|
||||
loc_transform = blender_object.matrix_world
|
||||
locs[:] = __apply_mat_to_all(loc_transform, locs)
|
||||
for vs in morph_locs:
|
||||
vs[:] = __apply_mat_to_all(loc_transform, vs)
|
||||
|
||||
# glTF stores deltas in morph targets
|
||||
for vs in morph_locs:
|
||||
vs -= locs
|
||||
|
||||
if export_settings[gltf2_blender_export_keys.YUP]:
|
||||
__zup2yup(locs)
|
||||
for vs in morph_locs:
|
||||
__zup2yup(vs)
|
||||
|
||||
return locs, morph_locs
|
||||
|
||||
|
||||
def __get_normals(blender_mesh, key_blocks, armature, blender_object, export_settings):
|
||||
"""Get normal for each loop."""
|
||||
if key_blocks:
|
||||
normals = key_blocks[0].relative_key.normals_split_get()
|
||||
normals = np.array(normals, dtype=np.float32)
|
||||
else:
|
||||
normals = np.empty(len(blender_mesh.loops) * 3, dtype=np.float32)
|
||||
blender_mesh.calc_normals_split()
|
||||
blender_mesh.loops.foreach_get('normal', normals)
|
||||
|
||||
normals = normals.reshape(len(blender_mesh.loops), 3)
|
||||
|
||||
morph_normals = []
|
||||
for key_block in key_blocks:
|
||||
ns = np.array(key_block.normals_split_get(), dtype=np.float32)
|
||||
ns = ns.reshape(len(blender_mesh.loops), 3)
|
||||
morph_normals.append(ns)
|
||||
|
||||
# Transform for skinning
|
||||
if armature and blender_object:
|
||||
apply_matrix = (armature.matrix_world.inverted_safe() @ blender_object.matrix_world)
|
||||
apply_matrix = apply_matrix.to_3x3().inverted_safe().transposed()
|
||||
normal_transform = armature.matrix_world.to_3x3() @ apply_matrix
|
||||
|
||||
normals[:] = __apply_mat_to_all(normal_transform, normals)
|
||||
__normalize_vecs(normals)
|
||||
for ns in morph_normals:
|
||||
ns[:] = __apply_mat_to_all(normal_transform, ns)
|
||||
__normalize_vecs(ns)
|
||||
|
||||
for ns in [normals, *morph_normals]:
|
||||
# Replace zero normals with the unit UP vector.
|
||||
# Seems to happen sometimes with degenerate tris?
|
||||
is_zero = ~ns.any(axis=1)
|
||||
ns[is_zero, 2] = 1
|
||||
|
||||
# glTF stores deltas in morph targets
|
||||
for ns in morph_normals:
|
||||
ns -= normals
|
||||
|
||||
if export_settings[gltf2_blender_export_keys.YUP]:
|
||||
__zup2yup(normals)
|
||||
for ns in morph_normals:
|
||||
__zup2yup(ns)
|
||||
|
||||
return normals, morph_normals
|
||||
|
||||
|
||||
def __get_tangents(blender_mesh, armature, blender_object, export_settings):
|
||||
"""Get an array of the tangent for each loop."""
|
||||
tangents = np.empty(len(blender_mesh.loops) * 3, dtype=np.float32)
|
||||
blender_mesh.loops.foreach_get('tangent', tangents)
|
||||
tangents = tangents.reshape(len(blender_mesh.loops), 3)
|
||||
|
||||
# Transform for skinning
|
||||
if armature and blender_object:
|
||||
apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
|
||||
tangent_transform = apply_matrix.to_quaternion().to_matrix()
|
||||
tangents = __apply_mat_to_all(tangent_transform, tangents)
|
||||
__normalize_vecs(tangents)
|
||||
|
||||
if export_settings[gltf2_blender_export_keys.YUP]:
|
||||
__zup2yup(tangents)
|
||||
|
||||
return tangents
|
||||
|
||||
|
||||
def __get_bitangent_signs(blender_mesh, armature, blender_object, export_settings):
|
||||
signs = np.empty(len(blender_mesh.loops), dtype=np.float32)
|
||||
blender_mesh.loops.foreach_get('bitangent_sign', signs)
|
||||
|
||||
# Transform for skinning
|
||||
if armature and blender_object:
|
||||
# Bitangent signs should flip when handedness changes
|
||||
# TODO: confirm
|
||||
apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
|
||||
tangent_transform = apply_matrix.to_quaternion().to_matrix()
|
||||
flipped = tangent_transform.determinant() < 0
|
||||
if flipped:
|
||||
signs *= -1
|
||||
|
||||
# No change for Zup -> Yup
|
||||
|
||||
return signs
|
||||
|
||||
|
||||
def __calc_morph_tangents(normals, morph_normal_deltas, tangents):
|
||||
# TODO: check if this works
|
||||
morph_tangent_deltas = np.empty((len(normals), 3), dtype=np.float32)
|
||||
|
||||
for i in range(len(normals)):
|
||||
n = Vector(normals[i])
|
||||
morph_n = n + Vector(morph_normal_deltas[i]) # convert back to non-delta
|
||||
t = Vector(tangents[i, :3])
|
||||
|
||||
rotation = morph_n.rotation_difference(n)
|
||||
|
||||
t_morph = Vector(t)
|
||||
t_morph.rotate(rotation)
|
||||
morph_tangent_deltas[i] = t_morph - t # back to delta
|
||||
|
||||
return morph_tangent_deltas
|
||||
|
||||
|
||||
def __get_uvs(blender_mesh, uv_i):
|
||||
layer = blender_mesh.uv_layers[uv_i]
|
||||
uvs = np.empty(len(blender_mesh.loops) * 2, dtype=np.float32)
|
||||
layer.data.foreach_get('uv', uvs)
|
||||
uvs = uvs.reshape(len(blender_mesh.loops), 2)
|
||||
|
||||
# Blender UV space -> glTF UV space
|
||||
# u,v -> u,1-v
|
||||
uvs[:, 1] *= -1
|
||||
uvs[:, 1] += 1
|
||||
|
||||
return uvs
|
||||
|
||||
|
||||
def __get_colors(blender_mesh, color_i, blender_color_i):
|
||||
if blender_mesh.color_attributes[blender_color_i].domain == "POINT":
|
||||
colors = np.empty(len(blender_mesh.vertices) * 4, dtype=np.float32) #POINT
|
||||
else:
|
||||
colors = np.empty(len(blender_mesh.loops) * 4, dtype=np.float32) #CORNER
|
||||
blender_mesh.color_attributes[blender_color_i].data.foreach_get('color', colors)
|
||||
colors = colors.reshape(-1, 4)
|
||||
# colors are already linear, no need to switch color space
|
||||
return colors, blender_mesh.color_attributes[blender_color_i].data_type, blender_mesh.color_attributes[blender_color_i].domain
|
||||
|
||||
|
||||
def __get_bone_data(blender_mesh, skin, blender_vertex_groups):
|
||||
|
||||
need_neutral_bone = False
|
||||
min_influence = 0.0001
|
||||
|
||||
joint_name_to_index = {joint.name: index for index, joint in enumerate(skin.joints)}
|
||||
group_to_joint = [joint_name_to_index.get(g.name) for g in blender_vertex_groups]
|
||||
|
||||
# List of (joint, weight) pairs for each vert
|
||||
vert_bones = []
|
||||
max_num_influences = 0
|
||||
|
||||
for vertex in blender_mesh.vertices:
|
||||
bones = []
|
||||
if vertex.groups:
|
||||
for group_element in vertex.groups:
|
||||
weight = group_element.weight
|
||||
if weight <= min_influence:
|
||||
continue
|
||||
try:
|
||||
joint = group_to_joint[group_element.group]
|
||||
except Exception:
|
||||
continue
|
||||
if joint is None:
|
||||
continue
|
||||
bones.append((joint, weight))
|
||||
bones.sort(key=lambda x: x[1], reverse=True)
|
||||
if not bones:
|
||||
# Is not assign to any bone
|
||||
bones = ((len(skin.joints), 1.0),) # Assign to a joint that will be created later
|
||||
need_neutral_bone = True
|
||||
vert_bones.append(bones)
|
||||
if len(bones) > max_num_influences:
|
||||
max_num_influences = len(bones)
|
||||
|
||||
# How many joint sets do we need? 1 set = 4 influences
|
||||
num_joint_sets = (max_num_influences + 3) // 4
|
||||
|
||||
return vert_bones, num_joint_sets, need_neutral_bone
|
||||
|
||||
|
||||
def __zup2yup(array):
|
||||
# x,y,z -> x,z,-y
|
||||
array[:, [1,2]] = array[:, [2,1]] # x,z,y
|
||||
array[:, 2] *= -1 # x,z,-y
|
||||
|
||||
|
||||
def __apply_mat_to_all(matrix, vectors):
|
||||
"""Given matrix m and vectors [v1,v2,...], computes [m@v1,m@v2,...]"""
|
||||
# Linear part
|
||||
m = matrix.to_3x3() if len(matrix) == 4 else matrix
|
||||
res = np.matmul(vectors, np.array(m.transposed()))
|
||||
# Translation part
|
||||
if len(matrix) == 4:
|
||||
res += np.array(matrix.translation)
|
||||
return res
|
||||
|
||||
|
||||
def __normalize_vecs(vectors):
|
||||
norms = np.linalg.norm(vectors, axis=1, keepdims=True)
|
||||
np.divide(vectors, norms, out=vectors, where=norms != 0)
|
@ -4,7 +4,7 @@
|
||||
import bpy
|
||||
import typing
|
||||
|
||||
from ..com.gltf2_blender_data_path import get_target_object_path, get_target_property_name, get_rotation_modes
|
||||
from ..com.gltf2_blender_data_path import get_target_object_path, get_target_property_name, get_rotation_modes, get_delta_modes, is_location, is_rotation, is_scale
|
||||
from io_scene_gltf2.io.com import gltf2_io
|
||||
from io_scene_gltf2.io.com import gltf2_io_debug
|
||||
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
|
||||
@ -19,6 +19,55 @@ from io_scene_gltf2.blender.exp.gltf2_blender_gather_tree import VExportNode
|
||||
from . import gltf2_blender_export_keys
|
||||
|
||||
|
||||
def gather_channels_baked(obj_uuid, frame_range, export_settings):
|
||||
channels = []
|
||||
|
||||
# If no animation in file, no need to bake
|
||||
if len(bpy.data.actions) == 0:
|
||||
return None
|
||||
|
||||
blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object
|
||||
|
||||
if frame_range is None:
|
||||
start_frame = min([v[0] for v in [a.frame_range for a in bpy.data.actions]])
|
||||
end_frame = max([v[1] for v in [a.frame_range for a in bpy.data.actions]])
|
||||
else:
|
||||
if blender_obj.animation_data and blender_obj.animation_data.action:
|
||||
# Coming from object parented to bone, and object is also animated. So using range action
|
||||
start_frame, end_frame = blender_obj.animation_data.action.frame_range[0], blender_obj.animation_data.action.frame_range[1]
|
||||
else:
|
||||
# Coming from object parented to bone, and object is not animated. So using range from armature
|
||||
start_frame, end_frame = frame_range
|
||||
|
||||
# use action if exists, else obj_uuid
|
||||
# When an object need some forced baked, there are 2 situations:
|
||||
# - Non animated object, but there are some selection, so we need to bake
|
||||
# - Object parented to bone. So we need to bake, because of inverse transforms on non default TRS armatures
|
||||
# In this last case, there are 2 situations :
|
||||
# - Object is also animated, so use the action name as key for caching
|
||||
# - Object is not animated, so use obj_uuid as key for caching, like for non animated object (case 1)
|
||||
|
||||
key_action = blender_obj.animation_data.action.name if blender_obj.animation_data and blender_obj.animation_data.action else obj_uuid
|
||||
|
||||
for p in ["location", "rotation_quaternion", "scale"]:
|
||||
channel = gather_animation_channel(
|
||||
obj_uuid,
|
||||
(),
|
||||
export_settings,
|
||||
None,
|
||||
p,
|
||||
start_frame,
|
||||
end_frame,
|
||||
False,
|
||||
key_action, # Use obj uuid as action name for caching (or action name if case of object parented to bone and animated)
|
||||
None,
|
||||
False #If Object is not animated, don't keep animation for this channel
|
||||
)
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
return channels if len(channels) > 0 else None
|
||||
|
||||
@cached
|
||||
def gather_animation_channels(obj_uuid: int,
|
||||
blender_action: bpy.types.Action,
|
||||
@ -118,6 +167,21 @@ def gather_animation_channels(obj_uuid: int,
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
# When An Object is parented to bone, and rest pose is used (not current frame)
|
||||
# If parenting is not done with same TRS than rest pose, this can lead to inconsistencies
|
||||
# So we need to bake object animation too, to be sure that correct TRS animation are used
|
||||
# Here, we want add these channels to same action that the armature
|
||||
if export_settings['gltf_selected'] is False and export_settings['gltf_current_frame'] is False:
|
||||
|
||||
children_obj_parent_to_bones = []
|
||||
for bone_uuid in bones_uuid:
|
||||
children_obj_parent_to_bones.extend([child for child in export_settings['vtree'].nodes[bone_uuid].children if export_settings['vtree'].nodes[child].blender_type not in [VExportNode.BONE, VExportNode.ARMATURE]])
|
||||
for child_uuid in children_obj_parent_to_bones:
|
||||
|
||||
channels_baked = gather_channels_baked(child_uuid, (bake_range_start, bake_range_end), export_settings)
|
||||
if channels_baked is not None:
|
||||
channels.extend(channels_baked)
|
||||
|
||||
else:
|
||||
done_paths = []
|
||||
for channel_group in __get_channel_groups(blender_action, blender_object, export_settings):
|
||||
@ -364,6 +428,8 @@ def __get_channel_groups(blender_action: bpy.types.Action, blender_object: bpy.t
|
||||
targets = {}
|
||||
multiple_rotation_mode_detected = False
|
||||
delta_rotation_detection = [False, False] # Normal / Delta
|
||||
delta_location_detection = [False, False] # Normal / Delta
|
||||
delta_scale_detection = [False, False] # Normal / Delta
|
||||
for fcurve in blender_action.fcurves:
|
||||
# In some invalid files, channel hasn't any keyframes ... this channel need to be ignored
|
||||
if len(fcurve.keyframe_points) == 0:
|
||||
@ -405,17 +471,17 @@ def __get_channel_groups(blender_action: bpy.types.Action, blender_object: bpy.t
|
||||
|
||||
# Detect that object or bone are not multiple keyed for euler and quaternion
|
||||
# Keep only the current rotation mode used by object
|
||||
rotation, delta, rotation_modes = get_rotation_modes(target_property)
|
||||
rotation, rotation_modes = get_rotation_modes(target_property)
|
||||
delta = get_delta_modes(target_property)
|
||||
|
||||
# Delta rotation management
|
||||
if is_rotation(target_property) :
|
||||
if delta is False:
|
||||
if delta_rotation_detection[1] is True: # normal rotation coming, but delta is already present
|
||||
multiple_rotation_mode_detected = True
|
||||
continue
|
||||
delta_rotation_detection[0] = True
|
||||
else:
|
||||
if delta_rotation_detection[0] is True: # delta rotation coming, but normal is already present
|
||||
multiple_rotation_mode_detected = True
|
||||
continue
|
||||
delta_rotation_detection[1] = True
|
||||
|
||||
@ -424,6 +490,28 @@ def __get_channel_groups(blender_action: bpy.types.Action, blender_object: bpy.t
|
||||
multiple_rotation_mode_detected = True
|
||||
continue
|
||||
|
||||
# Delta location management
|
||||
if is_location(target_property):
|
||||
if delta is False:
|
||||
if delta_location_detection[1] is True: # normal location coming, but delta is already present
|
||||
continue
|
||||
delta_location_detection[0] = True
|
||||
else:
|
||||
if delta_location_detection[0] is True: # delta location coming, but normal is already present
|
||||
continue
|
||||
delta_location_detection[1] = True
|
||||
|
||||
# Delta scale management
|
||||
if is_scale(target_property):
|
||||
if delta is False:
|
||||
if delta_scale_detection[1] is True: # normal scale coming, but delta is already present
|
||||
continue
|
||||
delta_scale_detection[0] = True
|
||||
else:
|
||||
if delta_scale_detection[0] is True: # delta scale coming, but normal is already present
|
||||
continue
|
||||
delta_scale_detection[1] = True
|
||||
|
||||
# group channels by target object and affected property of the target
|
||||
target_properties = targets.get(target, {})
|
||||
channels = target_properties.get(target_property, [])
|
||||
@ -466,7 +554,8 @@ def __gather_armature_object_channel_groups(blender_action: bpy.types.Action, bl
|
||||
|
||||
# Detect that armature is not multiple keyed for euler and quaternion
|
||||
# Keep only the current rotation mode used by bone
|
||||
rotation, delta, rotation_modes = get_rotation_modes(target_property)
|
||||
rotation, rotation_modes = get_rotation_modes(target_property)
|
||||
delta = get_delta_modes(target_property)
|
||||
|
||||
# Delta rotation management
|
||||
if delta is False:
|
||||
|
@ -45,6 +45,7 @@ class Keyframe:
|
||||
length = {
|
||||
"delta_location": 3,
|
||||
"delta_rotation_euler": 3,
|
||||
"delta_scale": 3,
|
||||
"location": 3,
|
||||
"rotation_axis_angle": 4,
|
||||
"rotation_euler": 3,
|
||||
@ -367,7 +368,10 @@ def gather_keyframes(blender_obj_uuid: str,
|
||||
"rotation_axis_angle": [rot.to_axis_angle()[1], rot.to_axis_angle()[0][0], rot.to_axis_angle()[0][1], rot.to_axis_angle()[0][2]],
|
||||
"rotation_euler": rot.to_euler(),
|
||||
"rotation_quaternion": rot,
|
||||
"scale": sca
|
||||
"scale": sca,
|
||||
"delta_location": trans,
|
||||
"delta_rotation_euler": rot.to_euler(),
|
||||
"delta_scale": sca
|
||||
}[target]
|
||||
else:
|
||||
key.value = get_sk_driver_values(driver_obj_uuid, frame, channels, export_settings)
|
||||
|
@ -11,37 +11,9 @@ from ..com.gltf2_blender_extras import generate_extras
|
||||
from io_scene_gltf2.io.exp.gltf2_io_user_extensions import export_user_extensions
|
||||
from io_scene_gltf2.blender.exp.gltf2_blender_gather_tree import VExportNode
|
||||
from ..com.gltf2_blender_data_path import is_bone_anim_channel
|
||||
from mathutils import Matrix
|
||||
|
||||
|
||||
def __gather_channels_baked(obj_uuid, export_settings):
|
||||
channels = []
|
||||
|
||||
# If no animation in file, no need to bake
|
||||
if len(bpy.data.actions) == 0:
|
||||
return None
|
||||
|
||||
start_frame = min([v[0] for v in [a.frame_range for a in bpy.data.actions]])
|
||||
end_frame = max([v[1] for v in [a.frame_range for a in bpy.data.actions]])
|
||||
|
||||
for p in ["location", "rotation_quaternion", "scale"]:
|
||||
channel = gltf2_blender_gather_animation_channels.gather_animation_channel(
|
||||
obj_uuid,
|
||||
(),
|
||||
export_settings,
|
||||
None,
|
||||
p,
|
||||
start_frame,
|
||||
end_frame,
|
||||
False,
|
||||
obj_uuid, # Use obj uuid as action name for caching
|
||||
None,
|
||||
False #If Object is not animated, don't keep animation for this channel
|
||||
)
|
||||
if channel is not None:
|
||||
channels.append(channel)
|
||||
|
||||
return channels if len(channels) > 0 else None
|
||||
|
||||
def gather_animations( obj_uuid: int,
|
||||
tracks: typing.Dict[str, typing.List[int]],
|
||||
offset: int,
|
||||
@ -69,7 +41,7 @@ def gather_animations( obj_uuid: int,
|
||||
# We also have to check if this is a skinned mesh, because we don't have to force animation baking on this case
|
||||
# (skinned meshes TRS must be ignored, says glTF specification)
|
||||
if export_settings['vtree'].nodes[obj_uuid].skin is None:
|
||||
channels = __gather_channels_baked(obj_uuid, export_settings)
|
||||
channels = gltf2_blender_gather_animation_channels.gather_channels_baked(obj_uuid, None, export_settings)
|
||||
if channels is not None:
|
||||
animation = gltf2_io.Animation(
|
||||
channels=channels,
|
||||
@ -90,8 +62,14 @@ def gather_animations( obj_uuid: int,
|
||||
|
||||
|
||||
current_action = None
|
||||
current_world_matrix = None
|
||||
if blender_object.animation_data and blender_object.animation_data.action:
|
||||
# There is an active action. Storing it, to be able to restore after switching all actions during export
|
||||
current_action = blender_object.animation_data.action
|
||||
elif len(blender_actions) != 0 and blender_object.animation_data is not None and blender_object.animation_data.action is None:
|
||||
# No current action set, storing world matrix of object
|
||||
current_world_matrix = blender_object.matrix_world.copy()
|
||||
|
||||
# Remove any solo (starred) NLA track. Restored after export
|
||||
solo_track = None
|
||||
if blender_object.animation_data:
|
||||
@ -110,6 +88,8 @@ def gather_animations( obj_uuid: int,
|
||||
current_use_nla = blender_object.animation_data.use_nla
|
||||
blender_object.animation_data.use_nla = False
|
||||
|
||||
export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, False)
|
||||
|
||||
# Export all collected actions.
|
||||
for blender_action, track_name, on_type in blender_actions:
|
||||
|
||||
@ -120,7 +100,10 @@ def gather_animations( obj_uuid: int,
|
||||
if blender_object.animation_data.is_property_readonly('action'):
|
||||
blender_object.animation_data.use_tweak_mode = False
|
||||
try:
|
||||
__reset_bone_matrix(blender_object, export_settings)
|
||||
export_user_extensions('pre_animation_switch_hook', export_settings, blender_object, blender_action, track_name, on_type)
|
||||
blender_object.animation_data.action = blender_action
|
||||
export_user_extensions('post_animation_switch_hook', export_settings, blender_object, blender_action, track_name, on_type)
|
||||
except:
|
||||
error = "Action is readonly. Please check NLA editor"
|
||||
print_console("WARNING", "Animation '{}' could not be exported. Cause: {}".format(blender_action.name, error))
|
||||
@ -146,15 +129,22 @@ def gather_animations( obj_uuid: int,
|
||||
if blender_object.animation_data.action is not None:
|
||||
if current_action is None:
|
||||
# remove last exported action
|
||||
__reset_bone_matrix(blender_object, export_settings)
|
||||
blender_object.animation_data.action = None
|
||||
elif blender_object.animation_data.action.name != current_action.name:
|
||||
# Restore action that was active at start of exporting
|
||||
__reset_bone_matrix(blender_object, export_settings)
|
||||
blender_object.animation_data.action = current_action
|
||||
if solo_track is not None:
|
||||
solo_track.is_solo = True
|
||||
blender_object.animation_data.use_tweak_mode = restore_tweak_mode
|
||||
blender_object.animation_data.use_nla = current_use_nla
|
||||
|
||||
if current_world_matrix is not None:
|
||||
blender_object.matrix_world = current_world_matrix
|
||||
|
||||
export_user_extensions('animation_switch_loop_hook', export_settings, blender_object, True)
|
||||
|
||||
return animations, tracks
|
||||
|
||||
|
||||
@ -338,7 +328,21 @@ def __get_blender_actions(blender_object: bpy.types.Object,
|
||||
blender_tracks[act.name] = None
|
||||
action_on_type[act.name] = "OBJECT"
|
||||
|
||||
export_user_extensions('gather_actions_hook', export_settings, blender_object, blender_actions, blender_tracks, action_on_type)
|
||||
# Use a class to get parameters, to be able to modify them
|
||||
class GatherActionHookParameters:
|
||||
def __init__(self, blender_actions, blender_tracks, action_on_type):
|
||||
self.blender_actions = blender_actions
|
||||
self.blender_tracks = blender_tracks
|
||||
self.action_on_type = action_on_type
|
||||
|
||||
gatheractionhookparams = GatherActionHookParameters(blender_actions, blender_tracks, action_on_type)
|
||||
|
||||
export_user_extensions('gather_actions_hook', export_settings, blender_object, gatheractionhookparams)
|
||||
|
||||
# Get params back from hooks
|
||||
blender_actions = gatheractionhookparams.blender_actions
|
||||
blender_tracks = gatheractionhookparams.blender_tracks
|
||||
action_on_type = gatheractionhookparams.action_on_type
|
||||
|
||||
# Remove duplicate actions.
|
||||
blender_actions = list(set(blender_actions))
|
||||
@ -353,3 +357,19 @@ def __is_armature_action(blender_action) -> bool:
|
||||
if is_bone_anim_channel(fcurve.data_path):
|
||||
return True
|
||||
return False
|
||||
|
||||
def __reset_bone_matrix(blender_object, export_settings) -> None:
|
||||
if export_settings['gltf_export_reset_pose_bones'] is False:
|
||||
return
|
||||
|
||||
# Only for armatures
|
||||
if blender_object.type != "ARMATURE":
|
||||
return
|
||||
|
||||
# Remove current action if any
|
||||
if blender_object.animation_data and blender_object.animation_data.action:
|
||||
blender_object.animation_data.action = None
|
||||
|
||||
# Resetting bones TRS to avoid to keep not keyed value on a future action set
|
||||
for bone in blender_object.pose.bones:
|
||||
bone.matrix_basis = Matrix()
|
||||
|
@ -89,6 +89,7 @@ def objectcache(func):
|
||||
if cache_key_args[0] not in func.__objectcache.keys():
|
||||
result = func(*args)
|
||||
func.__objectcache = result
|
||||
# Here are the key used: result[obj_uuid][action_name][frame]
|
||||
return result[cache_key_args[0]][cache_key_args[1]][cache_key_args[4]]
|
||||
# object is in cache, but not this action
|
||||
# We need to keep other actions
|
||||
|
@ -58,8 +58,16 @@ def __gather_orthographic(blender_camera, export_settings):
|
||||
znear=None
|
||||
)
|
||||
|
||||
orthographic.xmag = blender_camera.ortho_scale
|
||||
orthographic.ymag = blender_camera.ortho_scale
|
||||
_render = bpy.context.scene.render
|
||||
scene_x = _render.resolution_x * _render.pixel_aspect_x
|
||||
scene_y = _render.resolution_y * _render.pixel_aspect_y
|
||||
scene_square = max(scene_x, scene_y)
|
||||
del _render
|
||||
|
||||
# `Camera().ortho_scale` (and also FOV FTR) maps to the maximum of either image width or image height— This is the box that gets shown from camera view with the checkbox `.show_sensor = True`.
|
||||
|
||||
orthographic.xmag = blender_camera.ortho_scale * (scene_x / scene_square) / 2
|
||||
orthographic.ymag = blender_camera.ortho_scale * (scene_y / scene_square) / 2
|
||||
|
||||
orthographic.znear = blender_camera.clip_start
|
||||
orthographic.zfar = blender_camera.clip_end
|
||||
@ -79,9 +87,11 @@ def __gather_perspective(blender_camera, export_settings):
|
||||
znear=None
|
||||
)
|
||||
|
||||
width = bpy.context.scene.render.pixel_aspect_x * bpy.context.scene.render.resolution_x
|
||||
height = bpy.context.scene.render.pixel_aspect_y * bpy.context.scene.render.resolution_y
|
||||
_render = bpy.context.scene.render
|
||||
width = _render.pixel_aspect_x * _render.resolution_x
|
||||
height = _render.pixel_aspect_y * _render.resolution_y
|
||||
perspective.aspect_ratio = width / height
|
||||
del _render
|
||||
|
||||
if width >= height:
|
||||
if blender_camera.sensor_fit != 'VERTICAL':
|
||||
|
@ -8,6 +8,10 @@ from io_scene_gltf2.blender.com.gltf2_blender_data_path import get_target_object
|
||||
@skdriverdiscovercache
|
||||
def get_sk_drivers(blender_armature_uuid, export_settings):
|
||||
|
||||
# If no SK are exported --> No driver animation to export
|
||||
if export_settings['gltf_morph'] is False:
|
||||
return tuple([])
|
||||
|
||||
blender_armature = export_settings['vtree'].nodes[blender_armature_uuid].blender_object
|
||||
|
||||
drivers = []
|
||||
|
@ -41,7 +41,7 @@ def gather_image(
|
||||
# In case we can't retrieve image (for example packed images, with original moved)
|
||||
# We don't create invalid image without uri
|
||||
factor_uri = None
|
||||
if uri is None: return None
|
||||
if uri is None: return None, None
|
||||
|
||||
buffer_view, factor_buffer_view = __gather_buffer_view(image_data, mime_type, name, export_settings)
|
||||
|
||||
|
@ -7,6 +7,7 @@ from typing import Optional, List, Dict, Any
|
||||
|
||||
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
|
||||
from ..com.gltf2_blender_extras import generate_extras
|
||||
from ..com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS
|
||||
|
||||
from io_scene_gltf2.io.com import gltf2_io_lights_punctual
|
||||
from io_scene_gltf2.io.com import gltf2_io_debug
|
||||
@ -50,7 +51,7 @@ def __gather_color(blender_lamp, export_settings) -> Optional[List[float]]:
|
||||
return list(blender_lamp.color)
|
||||
|
||||
|
||||
def __gather_intensity(blender_lamp, _) -> Optional[float]:
|
||||
def __gather_intensity(blender_lamp, export_settings) -> Optional[float]:
|
||||
emission_node = __get_cycles_emission_node(blender_lamp)
|
||||
if emission_node is not None:
|
||||
if blender_lamp.type != 'SUN':
|
||||
@ -68,9 +69,26 @@ def __gather_intensity(blender_lamp, _) -> Optional[float]:
|
||||
emission_strength = blender_lamp.energy
|
||||
else:
|
||||
emission_strength = emission_node.inputs["Strength"].default_value
|
||||
else:
|
||||
emission_strength = blender_lamp.energy
|
||||
if export_settings['gltf_lighting_mode'] == 'RAW':
|
||||
return emission_strength
|
||||
|
||||
return blender_lamp.energy
|
||||
else:
|
||||
# Assume at this point the computed strength is still in the appropriate watt-related SI unit, which if everything up to here was done with physical basis it hopefully should be.
|
||||
if blender_lamp.type == 'SUN': # W/m^2 in Blender to lm/m^2 for GLTF/KHR_lights_punctual.
|
||||
emission_luminous = emission_strength
|
||||
else:
|
||||
# Other than directional, only point and spot lamps are supported by GLTF.
|
||||
# In Blender, points are omnidirectional W, and spots are specified as if they're points.
|
||||
# Point and spot should both be lm/r^2 in GLTF.
|
||||
emission_luminous = emission_strength / (4*math.pi)
|
||||
if export_settings['gltf_lighting_mode'] == 'SPEC':
|
||||
emission_luminous *= PBR_WATTS_TO_LUMENS
|
||||
elif export_settings['gltf_lighting_mode'] == 'COMPAT':
|
||||
pass # Just so we have an exhaustive tree to catch bugged values.
|
||||
else:
|
||||
raise ValueError(export_settings['gltf_lighting_mode'])
|
||||
return emission_luminous
|
||||
|
||||
|
||||
def __gather_spot(blender_lamp, export_settings) -> Optional[gltf2_io_lights_punctual.LightSpot]:
|
||||
|
@ -71,6 +71,9 @@ def __gather_base_color_factor(blender_material, export_settings):
|
||||
if rgb is None: rgb = [1.0, 1.0, 1.0]
|
||||
if alpha is None: alpha = 1.0
|
||||
|
||||
# Need to clamp between 0.0 and 1.0: Blender color can be outside this range
|
||||
rgb = [max(min(c, 1.0), 0.0) for c in rgb]
|
||||
|
||||
rgba = [*rgb, alpha]
|
||||
|
||||
if rgba == [1, 1, 1, 1]: return None
|
||||
|
@ -29,6 +29,8 @@ def export_sheen(blender_material, export_settings):
|
||||
else:
|
||||
# Factor
|
||||
fac = gltf2_blender_get.get_factor_from_socket(sheenColor_socket, kind='RGB')
|
||||
if fac is None:
|
||||
fac = [1.0, 1.0, 1.0] # Default is 0.0/0.0/0.0, so we need to set it to 1 if no factor
|
||||
if fac is not None and fac != [0.0, 0.0, 0.0]:
|
||||
sheen_extension['sheenColorFactor'] = fac
|
||||
|
||||
@ -51,6 +53,8 @@ def export_sheen(blender_material, export_settings):
|
||||
else:
|
||||
# Factor
|
||||
fac = gltf2_blender_get.get_factor_from_socket(sheenRoughness_socket, kind='VALUE')
|
||||
if fac is None:
|
||||
fac = 1.0 # Default is 0.0 so we need to set it to 1.0 if no factor
|
||||
if fac is not None and fac != 0.0:
|
||||
sheen_extension['sheenRoughnessFactor'] = fac
|
||||
|
||||
|
@ -120,9 +120,13 @@ def export_specular(blender_material, export_settings):
|
||||
return np.array([c[0] / l, c[1] / l, c[2] / l])
|
||||
|
||||
f0_from_ior = ((ior - 1)/(ior + 1))**2
|
||||
if f0_from_ior == 0:
|
||||
specular_color = [1.0, 1.0, 1.0]
|
||||
else:
|
||||
tint_strength = (1 - specular_tint) + normalize(base_color) * specular_tint
|
||||
specular_color = (1 - transmission) * (1 / f0_from_ior) * 0.08 * specular * tint_strength + transmission * tint_strength
|
||||
specular_extension['specularColorFactor'] = list(specular_color)
|
||||
specular_color = list(specular_color)
|
||||
specular_extension['specularColorFactor'] = specular_color
|
||||
else:
|
||||
if specular_not_linked and specular == BLENDER_SPECULAR and specular_tint_not_linked and specular_tint == BLENDER_SPECULAR_TINT:
|
||||
return None, None
|
||||
|
@ -25,7 +25,10 @@ from io_scene_gltf2.blender.exp import gltf2_blender_gather_tree
|
||||
def gather_node(vnode, export_settings):
|
||||
blender_object = vnode.blender_object
|
||||
|
||||
skin = __gather_skin(vnode, blender_object, export_settings)
|
||||
skin = gather_skin(vnode.uuid, export_settings)
|
||||
if skin is not None:
|
||||
vnode.skin = skin
|
||||
|
||||
node = gltf2_io.Node(
|
||||
camera=__gather_camera(blender_object, export_settings),
|
||||
children=__gather_children(vnode, blender_object, export_settings),
|
||||
@ -50,9 +53,6 @@ def gather_node(vnode, export_settings):
|
||||
|
||||
vnode.node = node
|
||||
|
||||
if node.skin is not None:
|
||||
vnode.skin = skin
|
||||
|
||||
return node
|
||||
|
||||
|
||||
@ -314,9 +314,15 @@ def __gather_trans_rot_scale(vnode, export_settings):
|
||||
trans, rot, sca = vnode.matrix_world.decompose()
|
||||
else:
|
||||
# calculate local matrix
|
||||
if export_settings['vtree'].nodes[vnode.parent_uuid].skin is None:
|
||||
trans, rot, sca = (export_settings['vtree'].nodes[vnode.parent_uuid].matrix_world.inverted_safe() @ vnode.matrix_world).decompose()
|
||||
|
||||
|
||||
else:
|
||||
# But ... if parent has skin, the parent TRS are not taken into account, so don't get local from parent, but from armature
|
||||
# It also depens if skined mesh is parented to armature or not
|
||||
if export_settings['vtree'].nodes[vnode.parent_uuid].parent_uuid is not None and export_settings['vtree'].nodes[export_settings['vtree'].nodes[vnode.parent_uuid].parent_uuid].blender_type == VExportNode.ARMATURE:
|
||||
trans, rot, sca = (export_settings['vtree'].nodes[export_settings['vtree'].nodes[vnode.parent_uuid].armature].matrix_world.inverted_safe() @ vnode.matrix_world).decompose()
|
||||
else:
|
||||
trans, rot, sca = vnode.matrix_world.decompose()
|
||||
|
||||
# make sure the rotation is normalized
|
||||
rot.normalize()
|
||||
@ -352,7 +358,8 @@ def __gather_trans_rot_scale(vnode, export_settings):
|
||||
scale = [sca[0], sca[1], sca[2]]
|
||||
return translation, rotation, scale
|
||||
|
||||
def __gather_skin(vnode, blender_object, export_settings):
|
||||
def gather_skin(vnode, export_settings):
|
||||
blender_object = export_settings['vtree'].nodes[vnode].blender_object
|
||||
modifiers = {m.type: m for m in blender_object.modifiers}
|
||||
if "ARMATURE" not in modifiers or modifiers["ARMATURE"].object is None:
|
||||
return None
|
||||
@ -379,7 +386,7 @@ def __gather_skin(vnode, blender_object, export_settings):
|
||||
return None
|
||||
|
||||
# Skins and meshes must be in the same glTF node, which is different from how blender handles armatures
|
||||
return gltf2_blender_gather_skins.gather_skin(vnode.armature, export_settings)
|
||||
return gltf2_blender_gather_skins.gather_skin(export_settings['vtree'].nodes[vnode].armature, export_settings)
|
||||
|
||||
|
||||
def __gather_weights(blender_object, export_settings):
|
||||
|
@ -10,32 +10,34 @@ from io_scene_gltf2.io.com import gltf2_io_debug
|
||||
from io_scene_gltf2.io.exp import gltf2_io_binary_data
|
||||
|
||||
|
||||
|
||||
def gather_primitive_attributes(blender_primitive, export_settings):
|
||||
"""
|
||||
Gathers the attributes, such as POSITION, NORMAL, TANGENT from a blender primitive.
|
||||
Gathers the attributes, such as POSITION, NORMAL, TANGENT, and all custom attributes from a blender primitive
|
||||
|
||||
:return: a dictionary of attributes
|
||||
"""
|
||||
attributes = {}
|
||||
attributes.update(__gather_position(blender_primitive, export_settings))
|
||||
attributes.update(__gather_normal(blender_primitive, export_settings))
|
||||
attributes.update(__gather_tangent(blender_primitive, export_settings))
|
||||
attributes.update(__gather_texcoord(blender_primitive, export_settings))
|
||||
attributes.update(__gather_colors(blender_primitive, export_settings))
|
||||
attributes.update(__gather_skins(blender_primitive, export_settings))
|
||||
|
||||
# loop on each attribute extracted
|
||||
# for skinning, all linked attributes (WEIGHTS_ and JOINTS_) need to be calculated
|
||||
# in one shot (because of normalization), so we need to check that it is called only once.
|
||||
|
||||
skin_done = False
|
||||
|
||||
for attribute in blender_primitive["attributes"]:
|
||||
if (attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_")) and skin_done is True:
|
||||
continue
|
||||
if attribute.startswith("MORPH_"):
|
||||
continue # Target for morphs will be managed later
|
||||
attributes.update(__gather_attribute(blender_primitive, attribute, export_settings))
|
||||
if (attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_")):
|
||||
skin_done = True
|
||||
|
||||
return attributes
|
||||
|
||||
|
||||
def array_to_accessor(array, component_type, data_type, include_max_and_min=False):
|
||||
dtype = gltf2_io_constants.ComponentType.to_numpy_dtype(component_type)
|
||||
num_elems = gltf2_io_constants.DataType.num_elements(data_type)
|
||||
|
||||
if type(array) is not np.ndarray:
|
||||
array = np.array(array, dtype=dtype)
|
||||
array = array.reshape(len(array) // num_elems, num_elems)
|
||||
|
||||
assert array.dtype == dtype
|
||||
assert array.shape[1] == num_elems
|
||||
|
||||
amax = None
|
||||
amin = None
|
||||
@ -58,109 +60,6 @@ def array_to_accessor(array, component_type, data_type, include_max_and_min=Fals
|
||||
type=data_type,
|
||||
)
|
||||
|
||||
|
||||
def __gather_position(blender_primitive, export_settings):
|
||||
position = blender_primitive["attributes"]["POSITION"]
|
||||
return {
|
||||
"POSITION": array_to_accessor(
|
||||
position,
|
||||
component_type=gltf2_io_constants.ComponentType.Float,
|
||||
data_type=gltf2_io_constants.DataType.Vec3,
|
||||
include_max_and_min=True
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def __gather_normal(blender_primitive, export_settings):
|
||||
if not export_settings[gltf2_blender_export_keys.NORMALS]:
|
||||
return {}
|
||||
if 'NORMAL' not in blender_primitive["attributes"]:
|
||||
return {}
|
||||
normal = blender_primitive["attributes"]['NORMAL']
|
||||
return {
|
||||
"NORMAL": array_to_accessor(
|
||||
normal,
|
||||
component_type=gltf2_io_constants.ComponentType.Float,
|
||||
data_type=gltf2_io_constants.DataType.Vec3,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def __gather_tangent(blender_primitive, export_settings):
|
||||
if not export_settings[gltf2_blender_export_keys.TANGENTS]:
|
||||
return {}
|
||||
if 'TANGENT' not in blender_primitive["attributes"]:
|
||||
return {}
|
||||
tangent = blender_primitive["attributes"]['TANGENT']
|
||||
return {
|
||||
"TANGENT": array_to_accessor(
|
||||
tangent,
|
||||
component_type=gltf2_io_constants.ComponentType.Float,
|
||||
data_type=gltf2_io_constants.DataType.Vec4,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
def __gather_texcoord(blender_primitive, export_settings):
|
||||
attributes = {}
|
||||
if export_settings[gltf2_blender_export_keys.TEX_COORDS]:
|
||||
tex_coord_index = 0
|
||||
tex_coord_id = 'TEXCOORD_' + str(tex_coord_index)
|
||||
while blender_primitive["attributes"].get(tex_coord_id) is not None:
|
||||
tex_coord = blender_primitive["attributes"][tex_coord_id]
|
||||
attributes[tex_coord_id] = array_to_accessor(
|
||||
tex_coord,
|
||||
component_type=gltf2_io_constants.ComponentType.Float,
|
||||
data_type=gltf2_io_constants.DataType.Vec2,
|
||||
)
|
||||
tex_coord_index += 1
|
||||
tex_coord_id = 'TEXCOORD_' + str(tex_coord_index)
|
||||
return attributes
|
||||
|
||||
|
||||
def __gather_colors(blender_primitive, export_settings):
|
||||
attributes = {}
|
||||
if export_settings[gltf2_blender_export_keys.COLORS]:
|
||||
color_index = 0
|
||||
color_id = 'COLOR_' + str(color_index)
|
||||
while blender_primitive["attributes"].get(color_id) is not None:
|
||||
colors = blender_primitive["attributes"][color_id]["data"]
|
||||
|
||||
if type(colors) is not np.ndarray:
|
||||
colors = np.array(colors, dtype=np.float32)
|
||||
colors = colors.reshape(len(colors) // 4, 4)
|
||||
|
||||
if blender_primitive["attributes"][color_id]["norm"] is True:
|
||||
comp_type = gltf2_io_constants.ComponentType.UnsignedShort
|
||||
|
||||
# Convert to normalized ushorts
|
||||
colors *= 65535
|
||||
colors += 0.5 # bias for rounding
|
||||
colors = colors.astype(np.uint16)
|
||||
|
||||
else:
|
||||
comp_type = gltf2_io_constants.ComponentType.Float
|
||||
|
||||
attributes[color_id] = gltf2_io.Accessor(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData(colors.tobytes(), gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER),
|
||||
byte_offset=None,
|
||||
component_type=comp_type,
|
||||
count=len(colors),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=None,
|
||||
min=None,
|
||||
name=None,
|
||||
normalized=blender_primitive["attributes"][color_id]["norm"],
|
||||
sparse=None,
|
||||
type=gltf2_io_constants.DataType.Vec4,
|
||||
)
|
||||
|
||||
color_index += 1
|
||||
color_id = 'COLOR_' + str(color_index)
|
||||
return attributes
|
||||
|
||||
|
||||
def __gather_skins(blender_primitive, export_settings):
|
||||
attributes = {}
|
||||
|
||||
@ -208,8 +107,10 @@ def __gather_skins(blender_primitive, export_settings):
|
||||
component_type = gltf2_io_constants.ComponentType.UnsignedShort
|
||||
if max(internal_joint) < 256:
|
||||
component_type = gltf2_io_constants.ComponentType.UnsignedByte
|
||||
joints = np.array(internal_joint, dtype= gltf2_io_constants.ComponentType.to_numpy_dtype(component_type))
|
||||
joints = joints.reshape(-1, 4)
|
||||
joint = array_to_accessor(
|
||||
internal_joint,
|
||||
joints,
|
||||
component_type,
|
||||
data_type=gltf2_io_constants.DataType.Vec4,
|
||||
)
|
||||
@ -226,6 +127,7 @@ def __gather_skins(blender_primitive, export_settings):
|
||||
# Normalize weights so they sum to 1
|
||||
weight_total = weight_total.reshape(-1, 1)
|
||||
for s in range(0, max_bone_set_index+1):
|
||||
weight_id = 'WEIGHTS_' + str(s)
|
||||
weight_arrs[s] /= weight_total
|
||||
|
||||
weight = array_to_accessor(
|
||||
@ -236,3 +138,48 @@ def __gather_skins(blender_primitive, export_settings):
|
||||
attributes[weight_id] = weight
|
||||
|
||||
return attributes
|
||||
|
||||
|
||||
def __gather_attribute(blender_primitive, attribute, export_settings):
|
||||
data = blender_primitive["attributes"][attribute]
|
||||
|
||||
|
||||
include_max_and_mins = {
|
||||
"POSITION": True
|
||||
}
|
||||
|
||||
if (attribute.startswith("_") or attribute.startswith("COLOR_")) and blender_primitive["attributes"][attribute]['component_type'] == gltf2_io_constants.ComponentType.UnsignedShort:
|
||||
# Byte Color vertex color, need to normalize
|
||||
|
||||
data['data'] *= 65535
|
||||
data['data'] += 0.5 # bias for rounding
|
||||
data['data'] = data['data'].astype(np.uint16)
|
||||
|
||||
return { attribute : gltf2_io.Accessor(
|
||||
buffer_view=gltf2_io_binary_data.BinaryData(data['data'].tobytes(), gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER),
|
||||
byte_offset=None,
|
||||
component_type=data['component_type'],
|
||||
count=len(data['data']),
|
||||
extensions=None,
|
||||
extras=None,
|
||||
max=None,
|
||||
min=None,
|
||||
name=None,
|
||||
normalized=True,
|
||||
sparse=None,
|
||||
type=data['data_type'],
|
||||
)
|
||||
}
|
||||
|
||||
elif attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_"):
|
||||
return __gather_skins(blender_primitive, export_settings)
|
||||
|
||||
else:
|
||||
return {
|
||||
attribute: array_to_accessor(
|
||||
data['data'],
|
||||
component_type=data['component_type'],
|
||||
data_type=data['data_type'],
|
||||
include_max_and_min=include_max_and_mins.get(attribute, False)
|
||||
)
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import numpy as np
|
||||
from .gltf2_blender_export_keys import NORMALS, MORPH_NORMAL, TANGENTS, MORPH_TANGENT, MORPH
|
||||
|
||||
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached, cached_by_key
|
||||
from io_scene_gltf2.blender.exp import gltf2_blender_extract
|
||||
from io_scene_gltf2.blender.exp import gltf2_blender_gather_primitives_extract
|
||||
from io_scene_gltf2.blender.exp import gltf2_blender_gather_accessors
|
||||
from io_scene_gltf2.blender.exp import gltf2_blender_gather_primitive_attributes
|
||||
from io_scene_gltf2.blender.exp import gltf2_blender_gather_materials
|
||||
@ -112,7 +112,7 @@ def __gather_cache_primitives(
|
||||
"""
|
||||
primitives = []
|
||||
|
||||
blender_primitives = gltf2_blender_extract.extract_primitives(
|
||||
blender_primitives = gltf2_blender_gather_primitives_extract.extract_primitives(
|
||||
blender_mesh, uuid_for_skined_data, vertex_groups, modifiers, export_settings)
|
||||
|
||||
for internal_primitive in blender_primitives:
|
||||
@ -184,7 +184,7 @@ def __gather_targets(blender_primitive, blender_mesh, modifiers, export_settings
|
||||
|
||||
if blender_primitive["attributes"].get(target_position_id) is not None:
|
||||
target = {}
|
||||
internal_target_position = blender_primitive["attributes"][target_position_id]
|
||||
internal_target_position = blender_primitive["attributes"][target_position_id]["data"]
|
||||
target["POSITION"] = gltf2_blender_gather_primitive_attributes.array_to_accessor(
|
||||
internal_target_position,
|
||||
component_type=gltf2_io_constants.ComponentType.Float,
|
||||
@ -196,7 +196,7 @@ def __gather_targets(blender_primitive, blender_mesh, modifiers, export_settings
|
||||
and export_settings[MORPH_NORMAL] \
|
||||
and blender_primitive["attributes"].get(target_normal_id) is not None:
|
||||
|
||||
internal_target_normal = blender_primitive["attributes"][target_normal_id]
|
||||
internal_target_normal = blender_primitive["attributes"][target_normal_id]["data"]
|
||||
target['NORMAL'] = gltf2_blender_gather_primitive_attributes.array_to_accessor(
|
||||
internal_target_normal,
|
||||
component_type=gltf2_io_constants.ComponentType.Float,
|
||||
@ -206,7 +206,7 @@ def __gather_targets(blender_primitive, blender_mesh, modifiers, export_settings
|
||||
if export_settings[TANGENTS] \
|
||||
and export_settings[MORPH_TANGENT] \
|
||||
and blender_primitive["attributes"].get(target_tangent_id) is not None:
|
||||
internal_target_tangent = blender_primitive["attributes"][target_tangent_id]
|
||||
internal_target_tangent = blender_primitive["attributes"][target_tangent_id]["data"]
|
||||
target['TANGENT'] = gltf2_blender_gather_primitive_attributes.array_to_accessor(
|
||||
internal_target_tangent,
|
||||
component_type=gltf2_io_constants.ComponentType.Float,
|
||||
|
@ -0,0 +1,875 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright 2018-2021 The glTF-Blender-IO authors.
|
||||
|
||||
import numpy as np
|
||||
from mathutils import Vector
|
||||
|
||||
from . import gltf2_blender_export_keys
|
||||
from ...io.com.gltf2_io_debug import print_console
|
||||
from io_scene_gltf2.blender.exp import gltf2_blender_gather_skins
|
||||
from io_scene_gltf2.io.com import gltf2_io_constants
|
||||
from io_scene_gltf2.blender.com import gltf2_blender_conversion
|
||||
from io_scene_gltf2.blender.com import gltf2_blender_default
|
||||
|
||||
|
||||
def extract_primitives(blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings):
|
||||
"""Extract primitives from a mesh."""
|
||||
print_console('INFO', 'Extracting primitive: ' + blender_mesh.name)
|
||||
|
||||
primitive_creator = PrimitiveCreator(blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings)
|
||||
primitive_creator.prepare_data()
|
||||
primitive_creator.define_attributes()
|
||||
primitive_creator.create_dots_data_structure()
|
||||
primitive_creator.populate_dots_data()
|
||||
primitive_creator.primitive_split()
|
||||
return primitive_creator.primitive_creation()
|
||||
|
||||
class PrimitiveCreator:
|
||||
def __init__(self, blender_mesh, uuid_for_skined_data, blender_vertex_groups, modifiers, export_settings):
|
||||
self.blender_mesh = blender_mesh
|
||||
self.uuid_for_skined_data = uuid_for_skined_data
|
||||
self.blender_vertex_groups = blender_vertex_groups
|
||||
self.modifiers = modifiers
|
||||
self.export_settings = export_settings
|
||||
|
||||
@classmethod
|
||||
def apply_mat_to_all(cls, matrix, vectors):
|
||||
"""Given matrix m and vectors [v1,v2,...], computes [m@v1,m@v2,...]"""
|
||||
# Linear part
|
||||
m = matrix.to_3x3() if len(matrix) == 4 else matrix
|
||||
res = np.matmul(vectors, np.array(m.transposed()))
|
||||
# Translation part
|
||||
if len(matrix) == 4:
|
||||
res += np.array(matrix.translation)
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def normalize_vecs(cls, vectors):
|
||||
norms = np.linalg.norm(vectors, axis=1, keepdims=True)
|
||||
np.divide(vectors, norms, out=vectors, where=norms != 0)
|
||||
|
||||
@classmethod
|
||||
def zup2yup(cls, array):
|
||||
# x,y,z -> x,z,-y
|
||||
array[:, [1,2]] = array[:, [2,1]] # x,z,y
|
||||
array[:, 2] *= -1 # x,z,-y
|
||||
|
||||
def prepare_data(self):
|
||||
self.blender_object = None
|
||||
if self.uuid_for_skined_data:
|
||||
self.blender_object = self.export_settings['vtree'].nodes[self.uuid_for_skined_data].blender_object
|
||||
|
||||
self.use_normals = self.export_settings[gltf2_blender_export_keys.NORMALS]
|
||||
if self.use_normals:
|
||||
self.blender_mesh.calc_normals_split()
|
||||
|
||||
self.use_tangents = False
|
||||
if self.use_normals and self.export_settings[gltf2_blender_export_keys.TANGENTS]:
|
||||
if self.blender_mesh.uv_layers.active and len(self.blender_mesh.uv_layers) > 0:
|
||||
try:
|
||||
self.blender_mesh.calc_tangents()
|
||||
self.use_tangents = True
|
||||
except Exception:
|
||||
print_console('WARNING', 'Could not calculate tangents. Please try to triangulate the mesh first.')
|
||||
|
||||
self.tex_coord_max = 0
|
||||
if self.export_settings[gltf2_blender_export_keys.TEX_COORDS]:
|
||||
if self.blender_mesh.uv_layers.active:
|
||||
self.tex_coord_max = len(self.blender_mesh.uv_layers)
|
||||
|
||||
self.use_morph_normals = self.use_normals and self.export_settings[gltf2_blender_export_keys.MORPH_NORMAL]
|
||||
self.use_morph_tangents = self.use_morph_normals and self.use_tangents and self.export_settings[gltf2_blender_export_keys.MORPH_TANGENT]
|
||||
|
||||
self.use_materials = self.export_settings[gltf2_blender_export_keys.MATERIALS]
|
||||
|
||||
self.blender_attributes = []
|
||||
|
||||
# Check if we have to export skin
|
||||
self.armature = None
|
||||
self.skin = None
|
||||
if self.blender_vertex_groups and self.export_settings[gltf2_blender_export_keys.SKINS]:
|
||||
if self.modifiers is not None:
|
||||
modifiers_dict = {m.type: m for m in self.modifiers}
|
||||
if "ARMATURE" in modifiers_dict:
|
||||
modifier = modifiers_dict["ARMATURE"]
|
||||
self.armature = modifier.object
|
||||
|
||||
# Skin must be ignored if the object is parented to a bone of the armature
|
||||
# (This creates an infinite recursive error)
|
||||
# So ignoring skin in that case
|
||||
is_child_of_arma = (
|
||||
self.armature and
|
||||
self.blender_object and
|
||||
self.blender_object.parent_type == "BONE" and
|
||||
self.blender_object.parent.name == self.armature.name
|
||||
)
|
||||
if is_child_of_arma:
|
||||
self.armature = None
|
||||
|
||||
if self.armature:
|
||||
self.skin = gltf2_blender_gather_skins.gather_skin(self.export_settings['vtree'].nodes[self.uuid_for_skined_data].armature, self.export_settings)
|
||||
if not self.skin:
|
||||
self.armature = None
|
||||
|
||||
self.key_blocks = []
|
||||
if self.export_settings[gltf2_blender_export_keys.APPLY] is False and self.blender_mesh.shape_keys and self.export_settings[gltf2_blender_export_keys.MORPH]:
|
||||
self.key_blocks = [
|
||||
key_block
|
||||
for key_block in self.blender_mesh.shape_keys.key_blocks
|
||||
if not (key_block == key_block.relative_key or key_block.mute)
|
||||
]
|
||||
|
||||
# Fetch vert positions and bone data (joint,weights)
|
||||
|
||||
self.locs = None
|
||||
self.morph_locs = None
|
||||
self.__get_positions()
|
||||
|
||||
if self.skin:
|
||||
self.__get_bone_data()
|
||||
if self.need_neutral_bone is True:
|
||||
# Need to create a fake joint at root of armature
|
||||
# In order to assign not assigned vertices to it
|
||||
# But for now, this is not yet possible, we need to wait the armature node is created
|
||||
# Just store this, to be used later
|
||||
armature_uuid = self.export_settings['vtree'].nodes[self.uuid_for_skined_data].armature
|
||||
self.export_settings['vtree'].nodes[armature_uuid].need_neutral_bone = True
|
||||
|
||||
def define_attributes(self):
|
||||
# Manage attributes + COLOR_0
|
||||
for blender_attribute_index, blender_attribute in enumerate(self.blender_mesh.attributes):
|
||||
|
||||
# Excluse special attributes (used internally by Blender)
|
||||
if blender_attribute.name in gltf2_blender_default.SPECIAL_ATTRIBUTES:
|
||||
continue
|
||||
|
||||
attr = {}
|
||||
attr['blender_attribute_index'] = blender_attribute_index
|
||||
attr['blender_name'] = blender_attribute.name
|
||||
attr['blender_domain'] = blender_attribute.domain
|
||||
attr['blender_data_type'] = blender_attribute.data_type
|
||||
|
||||
# For now, we don't export edge data, because I need to find how to
|
||||
# get from edge data to dots data
|
||||
if attr['blender_domain'] == "EDGE":
|
||||
continue
|
||||
|
||||
# Some type are not exportable (example : String)
|
||||
if gltf2_blender_conversion.get_component_type(blender_attribute.data_type) is None or \
|
||||
gltf2_blender_conversion.get_data_type(blender_attribute.data_type) is None:
|
||||
|
||||
continue
|
||||
|
||||
if self.blender_mesh.color_attributes.find(blender_attribute.name) == self.blender_mesh.color_attributes.render_color_index \
|
||||
and self.blender_mesh.color_attributes.render_color_index != -1:
|
||||
|
||||
if self.export_settings[gltf2_blender_export_keys.COLORS] is False:
|
||||
continue
|
||||
attr['gltf_attribute_name'] = 'COLOR_0'
|
||||
attr['get'] = self.get_function()
|
||||
|
||||
else:
|
||||
attr['gltf_attribute_name'] = '_' + blender_attribute.name.upper()
|
||||
attr['get'] = self.get_function()
|
||||
if self.export_settings['gltf_attributes'] is False:
|
||||
continue
|
||||
|
||||
self.blender_attributes.append(attr)
|
||||
|
||||
# Manage POSITION
|
||||
attr = {}
|
||||
attr['blender_data_type'] = 'FLOAT_VECTOR'
|
||||
attr['blender_domain'] = 'POINT'
|
||||
attr['gltf_attribute_name'] = 'POSITION'
|
||||
attr['set'] = self.set_function()
|
||||
attr['skip_getting_to_dots'] = True
|
||||
self.blender_attributes.append(attr)
|
||||
|
||||
# Manage uvs TEX_COORD_x
|
||||
for tex_coord_i in range(self.tex_coord_max):
|
||||
attr = {}
|
||||
attr['blender_data_type'] = 'FLOAT2'
|
||||
attr['blender_domain'] = 'CORNER'
|
||||
attr['gltf_attribute_name'] = 'TEXCOORD_' + str(tex_coord_i)
|
||||
attr['get'] = self.get_function()
|
||||
self.blender_attributes.append(attr)
|
||||
|
||||
# Manage NORMALS
|
||||
if self.use_normals:
|
||||
attr = {}
|
||||
attr['blender_data_type'] = 'FLOAT_VECTOR'
|
||||
attr['blender_domain'] = 'CORNER'
|
||||
attr['gltf_attribute_name'] = 'NORMAL'
|
||||
attr['gltf_attribute_name_morph'] = 'MORPH_NORMAL_'
|
||||
attr['get'] = self.get_function()
|
||||
self.blender_attributes.append(attr)
|
||||
|
||||
# Manage TANGENT
|
||||
if self.use_tangents:
|
||||
attr = {}
|
||||
attr['blender_data_type'] = 'FLOAT_VECTOR_4'
|
||||
attr['blender_domain'] = 'CORNER'
|
||||
attr['gltf_attribute_name'] = 'TANGENT'
|
||||
attr['get'] = self.get_function()
|
||||
self.blender_attributes.append(attr)
|
||||
|
||||
# Manage MORPH_POSITION_x
|
||||
for morph_i, vs in enumerate(self.morph_locs):
|
||||
attr = {}
|
||||
attr['blender_attribute_index'] = morph_i
|
||||
attr['blender_data_type'] = 'FLOAT_VECTOR'
|
||||
attr['blender_domain'] = 'POINT'
|
||||
attr['gltf_attribute_name'] = 'MORPH_POSITION_' + str(morph_i)
|
||||
attr['skip_getting_to_dots'] = True
|
||||
attr['set'] = self.set_function()
|
||||
self.blender_attributes.append(attr)
|
||||
|
||||
# Manage MORPH_NORMAL_x
|
||||
if self.use_morph_normals:
|
||||
attr = {}
|
||||
attr['blender_attribute_index'] = morph_i
|
||||
attr['blender_data_type'] = 'FLOAT_VECTOR'
|
||||
attr['blender_domain'] = 'CORNER'
|
||||
attr['gltf_attribute_name'] = 'MORPH_NORMAL_' + str(morph_i)
|
||||
# No get function is set here, because data are set from NORMALS
|
||||
self.blender_attributes.append(attr)
|
||||
|
||||
# Manage MORPH_TANGENT_x
|
||||
# This is a particular case, where we need to have the following data already calculated
|
||||
# - NORMAL
|
||||
# - MORPH_NORMAL
|
||||
# - TANGENT
|
||||
# So, the following needs to be AFTER the 3 others.
|
||||
if self.use_morph_tangents:
|
||||
attr = {}
|
||||
attr['blender_attribute_index'] = morph_i
|
||||
attr['blender_data_type'] = 'FLOAT_VECTOR'
|
||||
attr['blender_domain'] = 'CORNER'
|
||||
attr['gltf_attribute_name'] = 'MORPH_TANGENT_' + str(morph_i)
|
||||
attr['gltf_attribute_name_normal'] = "NORMAL"
|
||||
attr['gltf_attribute_name_morph_normal'] = "MORPH_NORMAL_" + str(morph_i)
|
||||
attr['gltf_attribute_name_tangent'] = "TANGENT"
|
||||
attr['skip_getting_to_dots'] = True
|
||||
attr['set'] = self.set_function()
|
||||
self.blender_attributes.append(attr)
|
||||
|
||||
for attr in self.blender_attributes:
|
||||
attr['len'] = gltf2_blender_conversion.get_data_length(attr['blender_data_type'])
|
||||
attr['type'] = gltf2_blender_conversion.get_numpy_type(attr['blender_data_type'])
|
||||
|
||||
def create_dots_data_structure(self):
|
||||
# Now that we get all attributes that are going to be exported, create numpy array that will store them
|
||||
dot_fields = [('vertex_index', np.uint32)]
|
||||
if self.export_settings['gltf_loose_edges']:
|
||||
dot_fields_edges = [('vertex_index', np.uint32)]
|
||||
if self.export_settings['gltf_loose_points']:
|
||||
dot_fields_points = [('vertex_index', np.uint32)]
|
||||
for attr in self.blender_attributes:
|
||||
if 'skip_getting_to_dots' in attr:
|
||||
continue
|
||||
for i in range(attr['len']):
|
||||
dot_fields.append((attr['gltf_attribute_name'] + str(i), attr['type']))
|
||||
if attr['blender_domain'] != 'POINT':
|
||||
continue
|
||||
if self.export_settings['gltf_loose_edges']:
|
||||
dot_fields_edges.append((attr['gltf_attribute_name'] + str(i), attr['type']))
|
||||
if self.export_settings['gltf_loose_points']:
|
||||
dot_fields_points.append((attr['gltf_attribute_name'] + str(i), attr['type']))
|
||||
|
||||
# In Blender there is both per-vert data, like position, and also per-loop
|
||||
# (loop=corner-of-poly) data, like normals or UVs. glTF only has per-vert
|
||||
# data, so we need to split Blender verts up into potentially-multiple glTF
|
||||
# verts.
|
||||
#
|
||||
# First, we'll collect a "dot" for every loop: a struct that stores all the
|
||||
# attributes at that loop, namely the vertex index (which determines all
|
||||
# per-vert data), and all the per-loop data like UVs, etc.
|
||||
#
|
||||
# Each unique dot will become one unique glTF vert.
|
||||
|
||||
self.dots = np.empty(len(self.blender_mesh.loops), dtype=np.dtype(dot_fields))
|
||||
|
||||
# Find loose edges
|
||||
if self.export_settings['gltf_loose_edges']:
|
||||
loose_edges = [e for e in self.blender_mesh.edges if e.is_loose]
|
||||
self.blender_idxs_edges = [vi for e in loose_edges for vi in e.vertices]
|
||||
self.blender_idxs_edges = np.array(self.blender_idxs_edges, dtype=np.uint32)
|
||||
|
||||
self.dots_edges = np.empty(len(self.blender_idxs_edges), dtype=np.dtype(dot_fields_edges))
|
||||
self.dots_edges['vertex_index'] = self.blender_idxs_edges
|
||||
|
||||
# Find loose points
|
||||
if self.export_settings['gltf_loose_points']:
|
||||
verts_in_edge = set(vi for e in self.blender_mesh.edges for vi in e.vertices)
|
||||
self.blender_idxs_points = [
|
||||
vi for vi, _ in enumerate(self.blender_mesh.vertices)
|
||||
if vi not in verts_in_edge
|
||||
]
|
||||
self.blender_idxs_points = np.array(self.blender_idxs_points, dtype=np.uint32)
|
||||
|
||||
self.dots_points = np.empty(len(self.blender_idxs_points), dtype=np.dtype(dot_fields_points))
|
||||
self.dots_points['vertex_index'] = self.blender_idxs_points
|
||||
|
||||
|
||||
def populate_dots_data(self):
|
||||
vidxs = np.empty(len(self.blender_mesh.loops))
|
||||
self.blender_mesh.loops.foreach_get('vertex_index', vidxs)
|
||||
self.dots['vertex_index'] = vidxs
|
||||
del vidxs
|
||||
|
||||
for attr in self.blender_attributes:
|
||||
if 'skip_getting_to_dots' in attr:
|
||||
continue
|
||||
if 'get' not in attr:
|
||||
continue
|
||||
attr['get'](attr)
|
||||
|
||||
def primitive_split(self):
|
||||
# Calculate triangles and sort them into primitives.
|
||||
|
||||
self.blender_mesh.calc_loop_triangles()
|
||||
loop_indices = np.empty(len(self.blender_mesh.loop_triangles) * 3, dtype=np.uint32)
|
||||
self.blender_mesh.loop_triangles.foreach_get('loops', loop_indices)
|
||||
|
||||
self.prim_indices = {} # maps material index to TRIANGLES-style indices into dots
|
||||
|
||||
if self.use_materials == "NONE": # Only for None. For placeholder and export, keep primitives
|
||||
# Put all vertices into one primitive
|
||||
self.prim_indices[-1] = loop_indices
|
||||
|
||||
else:
|
||||
# Bucket by material index.
|
||||
|
||||
tri_material_idxs = np.empty(len(self.blender_mesh.loop_triangles), dtype=np.uint32)
|
||||
self.blender_mesh.loop_triangles.foreach_get('material_index', tri_material_idxs)
|
||||
loop_material_idxs = np.repeat(tri_material_idxs, 3) # material index for every loop
|
||||
unique_material_idxs = np.unique(tri_material_idxs)
|
||||
del tri_material_idxs
|
||||
|
||||
for material_idx in unique_material_idxs:
|
||||
self.prim_indices[material_idx] = loop_indices[loop_material_idxs == material_idx]
|
||||
|
||||
def primitive_creation(self):
|
||||
primitives = []
|
||||
|
||||
for material_idx, dot_indices in self.prim_indices.items():
|
||||
# Extract just dots used by this primitive, deduplicate them, and
|
||||
# calculate indices into this deduplicated list.
|
||||
self.prim_dots = self.dots[dot_indices]
|
||||
self.prim_dots, indices = np.unique(self.prim_dots, return_inverse=True)
|
||||
|
||||
if len(self.prim_dots) == 0:
|
||||
continue
|
||||
|
||||
# Now just move all the data for prim_dots into attribute arrays
|
||||
|
||||
self.attributes = {}
|
||||
|
||||
self.blender_idxs = self.prim_dots['vertex_index']
|
||||
|
||||
for attr in self.blender_attributes:
|
||||
if 'set' in attr:
|
||||
attr['set'](attr)
|
||||
else: # Regular case
|
||||
self.__set_regular_attribute(attr)
|
||||
|
||||
if self.skin:
|
||||
joints = [[] for _ in range(self.num_joint_sets)]
|
||||
weights = [[] for _ in range(self.num_joint_sets)]
|
||||
|
||||
for vi in self.blender_idxs:
|
||||
bones = self.vert_bones[vi]
|
||||
for j in range(0, 4 * self.num_joint_sets):
|
||||
if j < len(bones):
|
||||
joint, weight = bones[j]
|
||||
else:
|
||||
joint, weight = 0, 0.0
|
||||
joints[j//4].append(joint)
|
||||
weights[j//4].append(weight)
|
||||
|
||||
for i, (js, ws) in enumerate(zip(joints, weights)):
|
||||
self.attributes['JOINTS_%d' % i] = js
|
||||
self.attributes['WEIGHTS_%d' % i] = ws
|
||||
|
||||
primitives.append({
|
||||
'attributes': self.attributes,
|
||||
'indices': indices,
|
||||
'material': material_idx
|
||||
})
|
||||
|
||||
if self.export_settings['gltf_loose_edges']:
|
||||
|
||||
if self.blender_idxs_edges.shape[0] > 0:
|
||||
# Export one glTF vert per unique Blender vert in a loose edge
|
||||
self.blender_idxs = self.blender_idxs_edges
|
||||
dots_edges, indices = np.unique(self.dots_edges, return_inverse=True)
|
||||
self.blender_idxs = np.unique(self.blender_idxs_edges)
|
||||
|
||||
self.attributes = {}
|
||||
|
||||
for attr in self.blender_attributes:
|
||||
if attr['blender_domain'] != 'POINT':
|
||||
continue
|
||||
if 'set' in attr:
|
||||
attr['set'](attr)
|
||||
else:
|
||||
res = np.empty((len(dots_edges), attr['len']), dtype=attr['type'])
|
||||
for i in range(attr['len']):
|
||||
res[:, i] = dots_edges[attr['gltf_attribute_name'] + str(i)]
|
||||
self.attributes[attr['gltf_attribute_name']] = {}
|
||||
self.attributes[attr['gltf_attribute_name']]["data"] = res
|
||||
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type(attr['blender_data_type'])
|
||||
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type(attr['blender_data_type'])
|
||||
|
||||
|
||||
if self.skin:
|
||||
joints = [[] for _ in range(self.num_joint_sets)]
|
||||
weights = [[] for _ in range(self.num_joint_sets)]
|
||||
|
||||
for vi in self.blender_idxs:
|
||||
bones = self.vert_bones[vi]
|
||||
for j in range(0, 4 * self.num_joint_sets):
|
||||
if j < len(bones):
|
||||
joint, weight = bones[j]
|
||||
else:
|
||||
joint, weight = 0, 0.0
|
||||
joints[j//4].append(joint)
|
||||
weights[j//4].append(weight)
|
||||
|
||||
for i, (js, ws) in enumerate(zip(joints, weights)):
|
||||
self.attributes['JOINTS_%d' % i] = js
|
||||
self.attributes['WEIGHTS_%d' % i] = ws
|
||||
|
||||
primitives.append({
|
||||
'attributes': self.attributes,
|
||||
'indices': indices,
|
||||
'mode': 1, # LINES
|
||||
'material': 0
|
||||
})
|
||||
|
||||
if self.export_settings['gltf_loose_points']:
|
||||
|
||||
if self.blender_idxs_points.shape[0] > 0:
|
||||
self.blender_idxs = self.blender_idxs_points
|
||||
|
||||
self.attributes = {}
|
||||
|
||||
for attr in self.blender_attributes:
|
||||
if attr['blender_domain'] != 'POINT':
|
||||
continue
|
||||
if 'set' in attr:
|
||||
attr['set'](attr)
|
||||
else:
|
||||
res = np.empty((len(self.blender_idxs), attr['len']), dtype=attr['type'])
|
||||
for i in range(attr['len']):
|
||||
res[:, i] = self.dots_points[attr['gltf_attribute_name'] + str(i)]
|
||||
self.attributes[attr['gltf_attribute_name']] = {}
|
||||
self.attributes[attr['gltf_attribute_name']]["data"] = res
|
||||
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type(attr['blender_data_type'])
|
||||
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type(attr['blender_data_type'])
|
||||
|
||||
|
||||
if self.skin:
|
||||
joints = [[] for _ in range(self.num_joint_sets)]
|
||||
weights = [[] for _ in range(self.num_joint_sets)]
|
||||
|
||||
for vi in self.blender_idxs:
|
||||
bones = self.vert_bones[vi]
|
||||
for j in range(0, 4 * self.num_joint_sets):
|
||||
if j < len(bones):
|
||||
joint, weight = bones[j]
|
||||
else:
|
||||
joint, weight = 0, 0.0
|
||||
joints[j//4].append(joint)
|
||||
weights[j//4].append(weight)
|
||||
|
||||
for i, (js, ws) in enumerate(zip(joints, weights)):
|
||||
self.attributes['JOINTS_%d' % i] = js
|
||||
self.attributes['WEIGHTS_%d' % i] = ws
|
||||
|
||||
primitives.append({
|
||||
'attributes': self.attributes,
|
||||
'mode': 0, # POINTS
|
||||
'material': 0
|
||||
})
|
||||
|
||||
print_console('INFO', 'Primitives created: %d' % len(primitives))
|
||||
|
||||
return primitives
|
||||
|
||||
################################## Get ##################################################
|
||||
|
||||
def __get_positions(self):
|
||||
self.locs = np.empty(len(self.blender_mesh.vertices) * 3, dtype=np.float32)
|
||||
source = self.key_blocks[0].relative_key.data if self.key_blocks else self.blender_mesh.vertices
|
||||
source.foreach_get('co', self.locs)
|
||||
self.locs = self.locs.reshape(len(self.blender_mesh.vertices), 3)
|
||||
|
||||
self.morph_locs = []
|
||||
for key_block in self.key_blocks:
|
||||
vs = np.empty(len(self.blender_mesh.vertices) * 3, dtype=np.float32)
|
||||
key_block.data.foreach_get('co', vs)
|
||||
vs = vs.reshape(len(self.blender_mesh.vertices), 3)
|
||||
self.morph_locs.append(vs)
|
||||
|
||||
# Transform for skinning
|
||||
if self.armature and self.blender_object:
|
||||
# apply_matrix = armature.matrix_world.inverted_safe() @ blender_object.matrix_world
|
||||
# loc_transform = armature.matrix_world @ apply_matrix
|
||||
|
||||
loc_transform = self.blender_object.matrix_world
|
||||
self.locs[:] = PrimitiveCreator.apply_mat_to_all(loc_transform, self.locs)
|
||||
for vs in self.morph_locs:
|
||||
vs[:] = PrimitiveCreator.apply_mat_to_all(loc_transform, vs)
|
||||
|
||||
# glTF stores deltas in morph targets
|
||||
for vs in self.morph_locs:
|
||||
vs -= self.locs
|
||||
|
||||
if self.export_settings[gltf2_blender_export_keys.YUP]:
|
||||
PrimitiveCreator.zup2yup(self.locs)
|
||||
for vs in self.morph_locs:
|
||||
PrimitiveCreator.zup2yup(vs)
|
||||
|
||||
def get_function(self):
|
||||
|
||||
def getting_function(attr):
|
||||
if attr['gltf_attribute_name'] == "COLOR_0":
|
||||
self.__get_color_attribute(attr)
|
||||
elif attr['gltf_attribute_name'].startswith("_"):
|
||||
self.__get_layer_attribute(attr)
|
||||
elif attr['gltf_attribute_name'].startswith("TEXCOORD_"):
|
||||
self.__get_uvs_attribute(int(attr['gltf_attribute_name'].split("_")[-1]), attr)
|
||||
elif attr['gltf_attribute_name'] == "NORMAL":
|
||||
self.__get_normal_attribute(attr)
|
||||
elif attr['gltf_attribute_name'] == "TANGENT":
|
||||
self.__get_tangent_attribute(attr)
|
||||
|
||||
return getting_function
|
||||
|
||||
|
||||
def __get_color_attribute(self, attr):
|
||||
blender_color_idx = self.blender_mesh.color_attributes.render_color_index
|
||||
|
||||
if attr['blender_domain'] == "POINT":
|
||||
colors = np.empty(len(self.blender_mesh.vertices) * 4, dtype=np.float32)
|
||||
elif attr['blender_domain'] == "CORNER":
|
||||
colors = np.empty(len(self.blender_mesh.loops) * 4, dtype=np.float32)
|
||||
self.blender_mesh.color_attributes[blender_color_idx].data.foreach_get('color', colors)
|
||||
if attr['blender_domain'] == "POINT":
|
||||
colors = colors.reshape(-1, 4)
|
||||
colors = colors[self.dots['vertex_index']]
|
||||
elif attr['blender_domain'] == "CORNER":
|
||||
colors = colors.reshape(-1, 4)
|
||||
# colors are already linear, no need to switch color space
|
||||
self.dots[attr['gltf_attribute_name'] + '0'] = colors[:, 0]
|
||||
self.dots[attr['gltf_attribute_name'] + '1'] = colors[:, 1]
|
||||
self.dots[attr['gltf_attribute_name'] + '2'] = colors[:, 2]
|
||||
self.dots[attr['gltf_attribute_name'] + '3'] = colors[:, 3]
|
||||
del colors
|
||||
|
||||
|
||||
def __get_layer_attribute(self, attr):
|
||||
if attr['blender_domain'] in ['CORNER']:
|
||||
data = np.empty(len(self.blender_mesh.loops) * attr['len'], dtype=attr['type'])
|
||||
elif attr['blender_domain'] in ['POINT']:
|
||||
data = np.empty(len(self.blender_mesh.vertices) * attr['len'], dtype=attr['type'])
|
||||
elif attr['blender_domain'] in ['EDGE']:
|
||||
data = np.empty(len(self.blender_mesh.edges) * attr['len'], dtype=attr['type'])
|
||||
elif attr['blender_domain'] in ['FACE']:
|
||||
data = np.empty(len(self.blender_mesh.polygons) * attr['len'], dtype=attr['type'])
|
||||
else:
|
||||
print_console("ERROR", "domain not known")
|
||||
|
||||
if attr['blender_data_type'] == "BYTE_COLOR":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('color', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
elif attr['blender_data_type'] == "INT8":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
elif attr['blender_data_type'] == "FLOAT2":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('vector', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
elif attr['blender_data_type'] == "BOOLEAN":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
elif attr['blender_data_type'] == "STRING":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
elif attr['blender_data_type'] == "FLOAT_COLOR":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('color', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
elif attr['blender_data_type'] == "FLOAT_VECTOR":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('vector', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
elif attr['blender_data_type'] == "FLOAT_VECTOR_4": # Specific case for tangent
|
||||
pass
|
||||
elif attr['blender_data_type'] == "INT":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
elif attr['blender_data_type'] == "FLOAT":
|
||||
self.blender_mesh.attributes[attr['blender_attribute_index']].data.foreach_get('value', data)
|
||||
data = data.reshape(-1, attr['len'])
|
||||
else:
|
||||
print_console('ERROR',"blender type not found " + attr['blender_data_type'])
|
||||
|
||||
if attr['blender_domain'] in ['CORNER']:
|
||||
for i in range(attr['len']):
|
||||
self.dots[attr['gltf_attribute_name'] + str(i)] = data[:, i]
|
||||
elif attr['blender_domain'] in ['POINT']:
|
||||
if attr['len'] > 1:
|
||||
data = data.reshape(-1, attr['len'])
|
||||
data_dots = data[self.dots['vertex_index']]
|
||||
if self.export_settings['gltf_loose_edges']:
|
||||
data_dots_edges = data[self.dots_edges['vertex_index']]
|
||||
if self.export_settings['gltf_loose_points']:
|
||||
data_dots_points = data[self.dots_points['vertex_index']]
|
||||
for i in range(attr['len']):
|
||||
self.dots[attr['gltf_attribute_name'] + str(i)] = data_dots[:, i]
|
||||
if self.export_settings['gltf_loose_edges']:
|
||||
self.dots_edges[attr['gltf_attribute_name'] + str(i)] = data_dots_edges[:, i]
|
||||
if self.export_settings['gltf_loose_points']:
|
||||
self.dots_points[attr['gltf_attribute_name'] + str(i)] = data_dots_points[:, i]
|
||||
elif attr['blender_domain'] in ['EDGE']:
|
||||
# No edge attribute exports
|
||||
pass
|
||||
elif attr['blender_domain'] in ['FACE']:
|
||||
if attr['len'] > 1:
|
||||
data = data.reshape(-1, attr['len'])
|
||||
# data contains face attribute, and is len(faces) long
|
||||
# We need to dispatch these len(faces) attribute in each dots lines
|
||||
data_attr = np.empty(self.dots.shape[0] * attr['len'], dtype=attr['type'])
|
||||
data_attr = data_attr.reshape(-1, attr['len'])
|
||||
for idx, poly in enumerate(self.blender_mesh.polygons):
|
||||
data_attr[list(poly.loop_indices)] = data[idx]
|
||||
data_attr = data_attr.reshape(-1, attr['len'])
|
||||
for i in range(attr['len']):
|
||||
self.dots[attr['gltf_attribute_name'] + str(i)] = data_attr[:, i]
|
||||
|
||||
else:
|
||||
print_console("ERROR", "domain not known")
|
||||
|
||||
def __get_uvs_attribute(self, blender_uv_idx, attr):
|
||||
layer = self.blender_mesh.uv_layers[blender_uv_idx]
|
||||
uvs = np.empty(len(self.blender_mesh.loops) * 2, dtype=np.float32)
|
||||
layer.data.foreach_get('uv', uvs)
|
||||
uvs = uvs.reshape(len(self.blender_mesh.loops), 2)
|
||||
|
||||
# Blender UV space -> glTF UV space
|
||||
# u,v -> u,1-v
|
||||
uvs[:, 1] *= -1
|
||||
uvs[:, 1] += 1
|
||||
|
||||
self.dots[attr['gltf_attribute_name'] + '0'] = uvs[:, 0]
|
||||
self.dots[attr['gltf_attribute_name'] + '1'] = uvs[:, 1]
|
||||
del uvs
|
||||
|
||||
def __get_normals(self):
|
||||
"""Get normal for each loop."""
|
||||
key_blocks = self.key_blocks if self.use_morph_normals else []
|
||||
if key_blocks:
|
||||
self.normals = key_blocks[0].relative_key.normals_split_get()
|
||||
self.normals = np.array(self.normals, dtype=np.float32)
|
||||
else:
|
||||
self.normals = np.empty(len(self.blender_mesh.loops) * 3, dtype=np.float32)
|
||||
self.blender_mesh.calc_normals_split()
|
||||
self.blender_mesh.loops.foreach_get('normal', self.normals)
|
||||
|
||||
self.normals = self.normals.reshape(len(self.blender_mesh.loops), 3)
|
||||
|
||||
self.morph_normals = []
|
||||
for key_block in key_blocks:
|
||||
ns = np.array(key_block.normals_split_get(), dtype=np.float32)
|
||||
ns = ns.reshape(len(self.blender_mesh.loops), 3)
|
||||
self.morph_normals.append(ns)
|
||||
|
||||
# Transform for skinning
|
||||
if self.armature and self.blender_object:
|
||||
apply_matrix = (self.armature.matrix_world.inverted_safe() @ self.blender_object.matrix_world)
|
||||
apply_matrix = apply_matrix.to_3x3().inverted_safe().transposed()
|
||||
normal_transform = self.armature.matrix_world.to_3x3() @ apply_matrix
|
||||
|
||||
self.normals[:] = PrimitiveCreator.apply_mat_to_all(normal_transform, self.normals)
|
||||
PrimitiveCreator.normalize_vecs(self.normals)
|
||||
for ns in self.morph_normals:
|
||||
ns[:] = PrimitiveCreator.apply_mat_to_all(normal_transform, ns)
|
||||
PrimitiveCreator.normalize_vecs(ns)
|
||||
|
||||
for ns in [self.normals, *self.morph_normals]:
|
||||
# Replace zero normals with the unit UP vector.
|
||||
# Seems to happen sometimes with degenerate tris?
|
||||
is_zero = ~ns.any(axis=1)
|
||||
ns[is_zero, 2] = 1
|
||||
|
||||
# glTF stores deltas in morph targets
|
||||
for ns in self.morph_normals:
|
||||
ns -= self.normals
|
||||
|
||||
if self.export_settings[gltf2_blender_export_keys.YUP]:
|
||||
PrimitiveCreator.zup2yup(self.normals)
|
||||
for ns in self.morph_normals:
|
||||
PrimitiveCreator.zup2yup(ns)
|
||||
|
||||
def __get_normal_attribute(self, attr):
|
||||
self.__get_normals()
|
||||
self.dots[attr['gltf_attribute_name'] + "0"] = self.normals[:, 0]
|
||||
self.dots[attr['gltf_attribute_name'] + "1"] = self.normals[:, 1]
|
||||
self.dots[attr['gltf_attribute_name'] + "2"] = self.normals[:, 2]
|
||||
|
||||
if self.use_morph_normals:
|
||||
for morph_i, ns in enumerate(self.morph_normals):
|
||||
self.dots[attr['gltf_attribute_name_morph'] + str(morph_i) + "0"] = ns[:, 0]
|
||||
self.dots[attr['gltf_attribute_name_morph'] + str(morph_i) + "1"] = ns[:, 1]
|
||||
self.dots[attr['gltf_attribute_name_morph'] + str(morph_i) + "2"] = ns[:, 2]
|
||||
del self.normals
|
||||
del self.morph_normals
|
||||
|
||||
def __get_tangent_attribute(self, attr):
|
||||
self.__get_tangents()
|
||||
self.dots[attr['gltf_attribute_name'] + "0"] = self.tangents[:, 0]
|
||||
self.dots[attr['gltf_attribute_name'] + "1"] = self.tangents[:, 1]
|
||||
self.dots[attr['gltf_attribute_name'] + "2"] = self.tangents[:, 2]
|
||||
del self.tangents
|
||||
self.__get_bitangent_signs()
|
||||
self.dots[attr['gltf_attribute_name'] + "3"] = self.signs
|
||||
del self.signs
|
||||
|
||||
def __get_tangents(self):
|
||||
"""Get an array of the tangent for each loop."""
|
||||
self.tangents = np.empty(len(self.blender_mesh.loops) * 3, dtype=np.float32)
|
||||
self.blender_mesh.loops.foreach_get('tangent', self.tangents)
|
||||
self.tangents = self.tangents.reshape(len(self.blender_mesh.loops), 3)
|
||||
|
||||
# Transform for skinning
|
||||
if self.armature and self.blender_object:
|
||||
apply_matrix = self.armature.matrix_world.inverted_safe() @ self.blender_object.matrix_world
|
||||
tangent_transform = apply_matrix.to_quaternion().to_matrix()
|
||||
self.tangents = PrimitiveCreator.apply_mat_to_all(tangent_transform, self.tangents)
|
||||
PrimitiveCreator.normalize_vecs(self.tangents)
|
||||
|
||||
if self.export_settings[gltf2_blender_export_keys.YUP]:
|
||||
PrimitiveCreator.zup2yup(self.tangents)
|
||||
|
||||
|
||||
def __get_bitangent_signs(self):
|
||||
self.signs = np.empty(len(self.blender_mesh.loops), dtype=np.float32)
|
||||
self.blender_mesh.loops.foreach_get('bitangent_sign', self.signs)
|
||||
|
||||
# Transform for skinning
|
||||
if self.armature and self.blender_object:
|
||||
# Bitangent signs should flip when handedness changes
|
||||
# TODO: confirm
|
||||
apply_matrix = self.armature.matrix_world.inverted_safe() @ self.blender_object.matrix_world
|
||||
tangent_transform = apply_matrix.to_quaternion().to_matrix()
|
||||
flipped = tangent_transform.determinant() < 0
|
||||
if flipped:
|
||||
self.signs *= -1
|
||||
|
||||
# No change for Zup -> Yup
|
||||
|
||||
|
||||
def __get_bone_data(self):
|
||||
|
||||
self.need_neutral_bone = False
|
||||
min_influence = 0.0001
|
||||
|
||||
joint_name_to_index = {joint.name: index for index, joint in enumerate(self.skin.joints)}
|
||||
group_to_joint = [joint_name_to_index.get(g.name) for g in self.blender_vertex_groups]
|
||||
|
||||
# List of (joint, weight) pairs for each vert
|
||||
self.vert_bones = []
|
||||
max_num_influences = 0
|
||||
|
||||
for vertex in self.blender_mesh.vertices:
|
||||
bones = []
|
||||
if vertex.groups:
|
||||
for group_element in vertex.groups:
|
||||
weight = group_element.weight
|
||||
if weight <= min_influence:
|
||||
continue
|
||||
try:
|
||||
joint = group_to_joint[group_element.group]
|
||||
except Exception:
|
||||
continue
|
||||
if joint is None:
|
||||
continue
|
||||
bones.append((joint, weight))
|
||||
bones.sort(key=lambda x: x[1], reverse=True)
|
||||
if not bones:
|
||||
# Is not assign to any bone
|
||||
bones = ((len(self.skin.joints), 1.0),) # Assign to a joint that will be created later
|
||||
self.need_neutral_bone = True
|
||||
self.vert_bones.append(bones)
|
||||
if len(bones) > max_num_influences:
|
||||
max_num_influences = len(bones)
|
||||
|
||||
# How many joint sets do we need? 1 set = 4 influences
|
||||
self.num_joint_sets = (max_num_influences + 3) // 4
|
||||
|
||||
##################################### Set ###################################
|
||||
def set_function(self):
|
||||
|
||||
def setting_function(attr):
|
||||
if attr['gltf_attribute_name'] == "POSITION":
|
||||
self.__set_positions_attribute(attr)
|
||||
elif attr['gltf_attribute_name'].startswith("MORPH_POSITION_"):
|
||||
self.__set_morph_locs_attribute(attr)
|
||||
elif attr['gltf_attribute_name'].startswith("MORPH_TANGENT_"):
|
||||
self.__set_morph_tangent_attribute(attr)
|
||||
|
||||
return setting_function
|
||||
|
||||
def __set_positions_attribute(self, attr):
|
||||
self.attributes[attr['gltf_attribute_name']] = {}
|
||||
self.attributes[attr['gltf_attribute_name']]["data"] = self.locs[self.blender_idxs]
|
||||
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec3
|
||||
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float
|
||||
|
||||
|
||||
def __set_morph_locs_attribute(self, attr):
|
||||
self.attributes[attr['gltf_attribute_name']] = {}
|
||||
self.attributes[attr['gltf_attribute_name']]["data"] = self.morph_locs[attr['blender_attribute_index']][self.blender_idxs]
|
||||
|
||||
def __set_morph_tangent_attribute(self, attr):
|
||||
# Morph tangent are after these 3 others, so, they are already calculated
|
||||
self.normals = self.attributes[attr['gltf_attribute_name_normal']]["data"]
|
||||
self.morph_normals = self.attributes[attr['gltf_attribute_name_morph_normal']]["data"]
|
||||
self.tangent = self.attributes[attr['gltf_attribute_name_tangent']]["data"]
|
||||
|
||||
self.__calc_morph_tangents()
|
||||
self.attributes[attr['gltf_attribute_name']] = {}
|
||||
self.attributes[attr['gltf_attribute_name']]["data"] = self.morph_tangents
|
||||
|
||||
def __calc_morph_tangents(self):
|
||||
# TODO: check if this works
|
||||
self.morph_tangent_deltas = np.empty((len(self.normals), 3), dtype=np.float32)
|
||||
|
||||
for i in range(len(self.normals)):
|
||||
n = Vector(self.normals[i])
|
||||
morph_n = n + Vector(self.morph_normal_deltas[i]) # convert back to non-delta
|
||||
t = Vector(self.tangents[i, :3])
|
||||
|
||||
rotation = morph_n.rotation_difference(n)
|
||||
|
||||
t_morph = Vector(t)
|
||||
t_morph.rotate(rotation)
|
||||
self.morph_tangent_deltas[i] = t_morph - t # back to delta
|
||||
|
||||
def __set_regular_attribute(self, attr):
|
||||
res = np.empty((len(self.prim_dots), attr['len']), dtype=attr['type'])
|
||||
for i in range(attr['len']):
|
||||
res[:, i] = self.prim_dots[attr['gltf_attribute_name'] + str(i)]
|
||||
self.attributes[attr['gltf_attribute_name']] = {}
|
||||
self.attributes[attr['gltf_attribute_name']]["data"] = res
|
||||
if 'gltf_attribute_name' == "NORMAL":
|
||||
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float
|
||||
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec3
|
||||
elif 'gltf_attribute_name' == "TANGENT":
|
||||
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float
|
||||
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec4
|
||||
elif attr['gltf_attribute_name'].startswith('TEXCOORD_'):
|
||||
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_io_constants.ComponentType.Float
|
||||
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_io_constants.DataType.Vec2
|
||||
else:
|
||||
self.attributes[attr['gltf_attribute_name']]["component_type"] = gltf2_blender_conversion.get_component_type(attr['blender_data_type'])
|
||||
self.attributes[attr['gltf_attribute_name']]["data_type"] = gltf2_blender_conversion.get_data_type(attr['blender_data_type'])
|
@ -166,8 +166,11 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
|
||||
use_active_uvmap = True
|
||||
if node and node.type == 'UVMAP' and node.uv_map:
|
||||
# Try to gather map index.
|
||||
for blender_mesh in bpy.data.meshes:
|
||||
i = blender_mesh.uv_layers.find(node.uv_map)
|
||||
node_tree = node.id_data
|
||||
for mesh in bpy.data.meshes:
|
||||
for material in mesh.materials:
|
||||
if material.node_tree == node_tree:
|
||||
i = mesh.uv_layers.find(node.uv_map)
|
||||
if i >= 0:
|
||||
texcoord_idx = i
|
||||
use_active_uvmap = False
|
||||
|
@ -11,7 +11,6 @@ from mathutils import Quaternion, Matrix
|
||||
from io_scene_gltf2.io.com import gltf2_io
|
||||
from io_scene_gltf2.io.imp.gltf2_io_binary import BinaryData
|
||||
from io_scene_gltf2.io.com import gltf2_io_constants
|
||||
from .gltf2_blender_gather_primitive_attributes import array_to_accessor
|
||||
from io_scene_gltf2.io.exp import gltf2_io_binary_data
|
||||
from io_scene_gltf2.blender.exp import gltf2_blender_gather_accessors
|
||||
|
||||
@ -103,7 +102,8 @@ class VExportTree:
|
||||
blender_children.setdefault(bobj, [])
|
||||
blender_children.setdefault(bparent, []).append(bobj)
|
||||
|
||||
for blender_object in [obj.original for obj in depsgraph.scene_eval.objects if obj.parent is None]:
|
||||
scene_eval = blender_scene.evaluated_get(depsgraph=depsgraph)
|
||||
for blender_object in [obj.original for obj in scene_eval.objects if obj.parent is None]:
|
||||
self.recursive_node_traverse(blender_object, None, None, Matrix.Identity(4), blender_children)
|
||||
|
||||
def recursive_node_traverse(self, blender_object, blender_bone, parent_uuid, parent_coll_matrix_world, blender_children, armature_uuid=None, dupli_world_matrix=None):
|
||||
@ -289,6 +289,7 @@ class VExportTree:
|
||||
self.filter_tag()
|
||||
export_user_extensions('gather_tree_filter_tag_hook', self.export_settings, self)
|
||||
self.filter_perform()
|
||||
self.remove_filtered_nodes()
|
||||
|
||||
|
||||
def recursive_filter_tag(self, uuid, parent_keep_tag):
|
||||
@ -392,13 +393,21 @@ class VExportTree:
|
||||
if all([c.hide_render for c in self.nodes[uuid].blender_object.users_collection]):
|
||||
return False
|
||||
|
||||
if self.export_settings[gltf2_blender_export_keys.ACTIVE_COLLECTION]:
|
||||
if self.export_settings[gltf2_blender_export_keys.ACTIVE_COLLECTION] and not self.export_settings[gltf2_blender_export_keys.ACTIVE_COLLECTION_WITH_NESTED]:
|
||||
found = any(x == self.nodes[uuid].blender_object for x in bpy.context.collection.objects)
|
||||
if not found:
|
||||
return False
|
||||
|
||||
if self.export_settings[gltf2_blender_export_keys.ACTIVE_COLLECTION] and self.export_settings[gltf2_blender_export_keys.ACTIVE_COLLECTION_WITH_NESTED]:
|
||||
found = any(x == self.nodes[uuid].blender_object for x in bpy.context.collection.all_objects)
|
||||
if not found:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def remove_filtered_nodes(self):
|
||||
self.nodes = {k:n for (k, n) in self.nodes.items() if n.keep_tag is True}
|
||||
|
||||
def search_missing_armature(self):
|
||||
for n in [n for n in self.nodes.values() if hasattr(n, "armature_needed") is True]:
|
||||
candidates = [i for i in self.nodes.values() if i.blender_type == VExportNode.ARMATURE and i.blender_object.name == n.armature_needed]
|
||||
@ -407,9 +416,14 @@ class VExportTree:
|
||||
del n.armature_needed
|
||||
|
||||
def add_neutral_bones(self):
|
||||
added_armatures = []
|
||||
for n in [n for n in self.nodes.values() if n.armature is not None and n.blender_type == VExportNode.OBJECT and hasattr(self.nodes[n.armature], "need_neutral_bone")]: #all skin meshes objects where neutral bone is needed
|
||||
# First add a new node
|
||||
|
||||
if n.armature not in added_armatures:
|
||||
|
||||
added_armatures.append(n.armature) # Make sure to not insert 2 times the neural bone
|
||||
|
||||
# First add a new node
|
||||
axis_basis_change = Matrix.Identity(4)
|
||||
if self.export_settings[gltf2_blender_export_keys.YUP]:
|
||||
axis_basis_change = Matrix(((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, 1.0, 0.0), (0.0, -1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0)))
|
||||
@ -438,6 +452,7 @@ class VExportTree:
|
||||
)
|
||||
# Add it to child list of armature
|
||||
self.nodes[n.armature].node.children.append(neutral_bone)
|
||||
|
||||
# Add it to joint list
|
||||
n.node.skin.joints.append(neutral_bone)
|
||||
|
||||
|
@ -28,7 +28,7 @@ class BlenderCamera():
|
||||
if pycamera.type == "orthographic":
|
||||
cam.type = "ORTHO"
|
||||
|
||||
# TODO: xmag/ymag
|
||||
cam.ortho_scale = max(pycamera.orthographic.xmag, pycamera.orthographic.ymag) * 2
|
||||
|
||||
cam.clip_start = pycamera.orthographic.znear
|
||||
cam.clip_end = pycamera.orthographic.zfar
|
||||
|
@ -6,6 +6,7 @@ from mathutils import Vector, Quaternion, Matrix
|
||||
from .gltf2_blender_scene import BlenderScene
|
||||
from ..com.gltf2_blender_ui import gltf2_KHR_materials_variants_variant, gltf2_KHR_materials_variants_primitive, gltf2_KHR_materials_variants_default_material
|
||||
from .gltf2_blender_material import BlenderMaterial
|
||||
from io_scene_gltf2.io.imp.gltf2_io_user_extensions import import_user_extensions
|
||||
|
||||
|
||||
class BlenderGlTF():
|
||||
@ -16,6 +17,9 @@ class BlenderGlTF():
|
||||
@staticmethod
|
||||
def create(gltf):
|
||||
"""Create glTF main method, with optional profiling"""
|
||||
|
||||
import_user_extensions('gather_import_gltf_before_hook', gltf)
|
||||
|
||||
profile = bpy.app.debug_value == 102
|
||||
if profile:
|
||||
import cProfile, pstats, io
|
||||
@ -158,22 +162,36 @@ class BlenderGlTF():
|
||||
mesh.shapekey_names = []
|
||||
used_names = set(['Basis']) #Be sure to not use 'Basis' name at import, this is a reserved name
|
||||
|
||||
# Some invalid glTF files has empty primitive tab
|
||||
if len(mesh.primitives) > 0:
|
||||
for sk, target in enumerate(mesh.primitives[0].targets or []):
|
||||
if 'POSITION' not in target:
|
||||
# Look for primitive with morph targets
|
||||
for prim in (mesh.primitives or []):
|
||||
if not prim.targets:
|
||||
continue
|
||||
|
||||
for sk, _ in enumerate(prim.targets):
|
||||
# Skip shape key for target that doesn't morph POSITION
|
||||
morphs_position = any(
|
||||
(prim.targets and 'POSITION' in prim.targets[sk])
|
||||
for prim in mesh.primitives
|
||||
)
|
||||
if not morphs_position:
|
||||
mesh.shapekey_names.append(None)
|
||||
continue
|
||||
|
||||
# Check if glTF file has some extras with targetNames. Otherwise
|
||||
# use the name of the POSITION accessor on the first primitive.
|
||||
shapekey_name = None
|
||||
if mesh.extras is not None:
|
||||
if 'targetNames' in mesh.extras and sk < len(mesh.extras['targetNames']):
|
||||
shapekey_name = mesh.extras['targetNames'][sk]
|
||||
|
||||
# Try to use name from extras.targetNames
|
||||
try:
|
||||
shapekey_name = str(mesh.extras['targetNames'][sk])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Try to get name from first primitive's POSITION accessor
|
||||
if shapekey_name is None:
|
||||
if gltf.data.accessors[target['POSITION']].name is not None:
|
||||
shapekey_name = gltf.data.accessors[target['POSITION']].name
|
||||
try:
|
||||
shapekey_name = gltf.data.accessors[mesh.primitives[0].targets[sk]['POSITION']].name
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if shapekey_name is None:
|
||||
shapekey_name = "target_" + str(sk)
|
||||
|
||||
@ -182,6 +200,8 @@ class BlenderGlTF():
|
||||
|
||||
mesh.shapekey_names.append(shapekey_name)
|
||||
|
||||
break
|
||||
|
||||
# Manage KHR_materials_variants
|
||||
BlenderGlTF.manage_material_variants(gltf)
|
||||
|
||||
|
@ -6,6 +6,7 @@ from math import pi
|
||||
|
||||
from ..com.gltf2_blender_extras import set_extras
|
||||
from io_scene_gltf2.io.imp.gltf2_io_user_extensions import import_user_extensions
|
||||
from ..com.gltf2_blender_conversion import PBR_WATTS_TO_LUMENS
|
||||
|
||||
|
||||
class BlenderLight():
|
||||
@ -21,7 +22,7 @@ class BlenderLight():
|
||||
import_user_extensions('gather_import_light_before_hook', gltf, vnode, pylight)
|
||||
|
||||
if pylight['type'] == "directional":
|
||||
light = BlenderLight.create_directional(gltf, light_id)
|
||||
light = BlenderLight.create_directional(gltf, light_id) # ...Why not pass the pylight?
|
||||
elif pylight['type'] == "point":
|
||||
light = BlenderLight.create_point(gltf, light_id)
|
||||
elif pylight['type'] == "spot":
|
||||
@ -30,9 +31,6 @@ class BlenderLight():
|
||||
if 'color' in pylight.keys():
|
||||
light.color = pylight['color']
|
||||
|
||||
if 'intensity' in pylight.keys():
|
||||
light.energy = pylight['intensity']
|
||||
|
||||
# TODO range
|
||||
|
||||
set_extras(light, pylight.get('extras'))
|
||||
@ -44,11 +42,33 @@ class BlenderLight():
|
||||
pylight = gltf.data.extensions['KHR_lights_punctual']['lights'][light_id]
|
||||
|
||||
if 'name' not in pylight.keys():
|
||||
pylight['name'] = "Sun"
|
||||
pylight['name'] = "Sun" # Uh... Is it okay to mutate the import data?
|
||||
|
||||
sun = bpy.data.lights.new(name=pylight['name'], type="SUN")
|
||||
|
||||
if 'intensity' in pylight.keys():
|
||||
if gltf.import_settings['convert_lighting_mode'] == 'SPEC':
|
||||
sun.energy = pylight['intensity'] / PBR_WATTS_TO_LUMENS
|
||||
elif gltf.import_settings['convert_lighting_mode'] == 'COMPAT':
|
||||
sun.energy = pylight['intensity']
|
||||
elif gltf.import_settings['convert_lighting_mode'] == 'RAW':
|
||||
sun.energy = pylight['intensity']
|
||||
else:
|
||||
raise ValueError(gltf.import_settings['convert_lighting_mode'])
|
||||
|
||||
return sun
|
||||
|
||||
@staticmethod
|
||||
def _calc_energy_pointlike(gltf, pylight):
|
||||
if gltf.import_settings['convert_lighting_mode'] == 'SPEC':
|
||||
return pylight['intensity'] / PBR_WATTS_TO_LUMENS * 4 * pi
|
||||
elif gltf.import_settings['convert_lighting_mode'] == 'COMPAT':
|
||||
return pylight['intensity'] * 4 * pi
|
||||
elif gltf.import_settings['convert_lighting_mode'] == 'RAW':
|
||||
return pylight['intensity']
|
||||
else:
|
||||
raise ValueError(gltf.import_settings['convert_lighting_mode'])
|
||||
|
||||
@staticmethod
|
||||
def create_point(gltf, light_id):
|
||||
pylight = gltf.data.extensions['KHR_lights_punctual']['lights'][light_id]
|
||||
@ -57,6 +77,10 @@ class BlenderLight():
|
||||
pylight['name'] = "Point"
|
||||
|
||||
point = bpy.data.lights.new(name=pylight['name'], type="POINT")
|
||||
|
||||
if 'intensity' in pylight.keys():
|
||||
point.energy = BlenderLight._calc_energy_pointlike(gltf, pylight)
|
||||
|
||||
return point
|
||||
|
||||
@staticmethod
|
||||
@ -79,4 +103,7 @@ class BlenderLight():
|
||||
else:
|
||||
spot.spot_blend = 1.0
|
||||
|
||||
if 'intensity' in pylight.keys():
|
||||
spot.energy = BlenderLight._calc_energy_pointlike(gltf, pylight)
|
||||
|
||||
return spot
|
||||
|
@ -98,11 +98,7 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
|
||||
while i < COLOR_MAX and ('COLOR_%d' % i) in prim.attributes: i += 1
|
||||
num_cols = max(i, num_cols)
|
||||
|
||||
num_shapekeys = 0
|
||||
if len(pymesh.primitives) > 0: # Empty primitive tab is not allowed, but some invalid files...
|
||||
for morph_i, _ in enumerate(pymesh.primitives[0].targets or []):
|
||||
if pymesh.shapekey_names[morph_i] is not None:
|
||||
num_shapekeys += 1
|
||||
num_shapekeys = sum(sk_name is not None for sk_name in pymesh.shapekey_names)
|
||||
|
||||
# -------------
|
||||
# We'll process all the primitives gathering arrays to feed into the
|
||||
@ -190,12 +186,17 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
|
||||
vert_joints[i] = np.concatenate((vert_joints[i], js))
|
||||
vert_weights[i] = np.concatenate((vert_weights[i], ws))
|
||||
|
||||
for morph_i, target in enumerate(prim.targets or []):
|
||||
if pymesh.shapekey_names[morph_i] is None:
|
||||
sk_i = 0
|
||||
for sk, sk_name in enumerate(pymesh.shapekey_names):
|
||||
if sk_name is None:
|
||||
continue
|
||||
morph_vs = BinaryData.decode_accessor(gltf, target['POSITION'], cache=True)
|
||||
if prim.targets and 'POSITION' in prim.targets[sk]:
|
||||
morph_vs = BinaryData.decode_accessor(gltf, prim.targets[sk]['POSITION'], cache=True)
|
||||
morph_vs = morph_vs[unique_indices]
|
||||
sk_vert_locs[morph_i] = np.concatenate((sk_vert_locs[morph_i], morph_vs))
|
||||
else:
|
||||
morph_vs = np.zeros((len(unique_indices), 3), dtype=np.float32)
|
||||
sk_vert_locs[sk_i] = np.concatenate((sk_vert_locs[sk_i], morph_vs))
|
||||
sk_i += 1
|
||||
|
||||
# inv_indices are the indices into the verts just for this prim;
|
||||
# calculate indices into the overall verts array
|
||||
@ -304,6 +305,10 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
|
||||
|
||||
mesh.color_attributes[layer.name].data.foreach_set('color', squish(loop_cols[col_i]))
|
||||
|
||||
# Make sure the first Vertex Color Attribute is the rendered one
|
||||
if num_cols > 0:
|
||||
mesh.color_attributes.render_color_index = 0
|
||||
|
||||
# Skinning
|
||||
# TODO: this is slow :/
|
||||
if num_joint_sets and mesh_options.skinning:
|
||||
|
@ -440,7 +440,7 @@ def base_color(
|
||||
# Vertex Color
|
||||
if mh.vertex_color:
|
||||
node = mh.node_tree.nodes.new('ShaderNodeVertexColor')
|
||||
node.layer_name = 'Col'
|
||||
# Do not set the layer name, so rendered one will be used (At import => The first one)
|
||||
node.location = x - 250, y - 240
|
||||
# Outputs
|
||||
mh.node_tree.links.new(vcolor_color_socket, node.outputs['Color'])
|
||||
|
@ -1,12 +1,12 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# Copyright 2018-2021 The glTF-Blender-IO authors.
|
||||
|
||||
def import_user_extensions(hook_name, gltf_importer, *args):
|
||||
for extension in gltf_importer.import_user_extensions:
|
||||
def import_user_extensions(hook_name, gltf, *args):
|
||||
for extension in gltf.import_user_extensions:
|
||||
hook = getattr(extension, hook_name, None)
|
||||
if hook is not None:
|
||||
try:
|
||||
hook(*args, gltf_importer)
|
||||
hook(*args, gltf)
|
||||
except Exception as e:
|
||||
print(hook_name, "fails on", extension)
|
||||
print(str(e))
|
||||
|
@ -266,7 +266,7 @@ def export(file,
|
||||
# store files to copy
|
||||
copy_set = set()
|
||||
|
||||
# store names of newly cerated meshes, so we dont overlap
|
||||
# store names of newly created meshes, so we dont overlap
|
||||
mesh_name_set = set()
|
||||
|
||||
fw = file.write
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
__author__ = "Nutti <nutti.metro@gmail.com>"
|
||||
__status__ = "production"
|
||||
__version__ = "6.6"
|
||||
__date__ = "22 Apr 2022"
|
||||
__version__ = "6.7"
|
||||
__date__ = "22 Sep 2022"
|
||||
|
||||
|
||||
bl_info = {
|
||||
@ -12,8 +12,8 @@ bl_info = {
|
||||
"Keith (Wahooney) Boshoff, McBuff, MaxRobinot, "
|
||||
"Alexander Milovsky, Dusan Stevanovic, MatthiasThDs, "
|
||||
"theCryingMan, PratikBorhade302",
|
||||
"version": (6, 6, 0),
|
||||
"blender": (2, 80, 0),
|
||||
"version": (6, 7, 0),
|
||||
"blender": (3, 4, 0),
|
||||
"location": "See Add-ons Preferences",
|
||||
"description": "UV Toolset. See Add-ons Preferences for details",
|
||||
"warning": "",
|
||||
|
@ -1,14 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
__author__ = "Nutti <nutti.metro@gmail.com>"
|
||||
__status__ = "production"
|
||||
__version__ = "6.6"
|
||||
__date__ = "22 Apr 2022"
|
||||
|
||||
if "bpy" in locals():
|
||||
import importlib
|
||||
importlib.reload(bglx)
|
||||
else:
|
||||
from . import bglx
|
||||
|
||||
import bpy
|
@ -1,288 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
from threading import Lock
|
||||
|
||||
import bgl
|
||||
from bgl import Buffer as Buffer
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
|
||||
GL_LINES = 0
|
||||
GL_LINE_STRIP = 1
|
||||
GL_LINE_LOOP = 2
|
||||
GL_TRIANGLES = 5
|
||||
GL_TRIANGLE_FAN = 6
|
||||
GL_QUADS = 4
|
||||
|
||||
class InternalData:
|
||||
__inst = None
|
||||
__lock = Lock()
|
||||
|
||||
def __init__(self):
|
||||
raise NotImplementedError("Not allowed to call constructor")
|
||||
|
||||
@classmethod
|
||||
def __internal_new(cls):
|
||||
inst = super().__new__(cls)
|
||||
inst.color = [1.0, 1.0, 1.0, 1.0]
|
||||
inst.line_width = 1.0
|
||||
|
||||
return inst
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls):
|
||||
if not cls.__inst:
|
||||
with cls.__lock:
|
||||
if not cls.__inst:
|
||||
cls.__inst = cls.__internal_new()
|
||||
|
||||
return cls.__inst
|
||||
|
||||
def init(self):
|
||||
self.clear()
|
||||
|
||||
def set_prim_mode(self, mode):
|
||||
self.prim_mode = mode
|
||||
|
||||
def set_dims(self, dims):
|
||||
self.dims = dims
|
||||
|
||||
def add_vert(self, v):
|
||||
self.verts.append(v)
|
||||
|
||||
def add_tex_coord(self, uv):
|
||||
self.tex_coords.append(uv)
|
||||
|
||||
def set_color(self, c):
|
||||
self.color = c
|
||||
|
||||
def set_line_width(self, width):
|
||||
self.line_width = width
|
||||
|
||||
def clear(self):
|
||||
self.prim_mode = None
|
||||
self.verts = []
|
||||
self.dims = None
|
||||
self.tex_coords = []
|
||||
|
||||
def get_verts(self):
|
||||
return self.verts
|
||||
|
||||
def get_dims(self):
|
||||
return self.dims
|
||||
|
||||
def get_prim_mode(self):
|
||||
return self.prim_mode
|
||||
|
||||
def get_color(self):
|
||||
return self.color
|
||||
|
||||
def get_line_width(self):
|
||||
return self.line_width
|
||||
|
||||
def get_tex_coords(self):
|
||||
return self.tex_coords
|
||||
|
||||
|
||||
def glLineWidth(width):
|
||||
inst = InternalData.get_instance()
|
||||
inst.set_line_width(width)
|
||||
|
||||
|
||||
def glColor3f(r, g, b):
|
||||
inst = InternalData.get_instance()
|
||||
inst.set_color([r, g, b, 1.0])
|
||||
|
||||
|
||||
def glColor4f(r, g, b, a):
|
||||
inst = InternalData.get_instance()
|
||||
inst.set_color([r, g, b, a])
|
||||
|
||||
|
||||
def glRecti(x0, y0, x1, y1):
|
||||
glBegin(GL_QUADS)
|
||||
glVertex2f(x0, y0)
|
||||
glVertex2f(x0, y1)
|
||||
glVertex2f(x1, y1)
|
||||
glVertex2f(x1, y0)
|
||||
glEnd()
|
||||
|
||||
|
||||
def glBegin(mode):
|
||||
inst = InternalData.get_instance()
|
||||
inst.init()
|
||||
inst.set_prim_mode(mode)
|
||||
|
||||
|
||||
def _get_transparency_shader():
|
||||
vertex_shader = '''
|
||||
uniform mat4 modelViewMatrix;
|
||||
uniform mat4 projectionMatrix;
|
||||
|
||||
in vec2 pos;
|
||||
in vec2 texCoord;
|
||||
out vec2 uvInterp;
|
||||
|
||||
void main()
|
||||
{
|
||||
uvInterp = texCoord;
|
||||
gl_Position = projectionMatrix * modelViewMatrix * vec4(pos.xy, 0.0, 1.0);
|
||||
gl_Position.z = 1.0;
|
||||
}
|
||||
'''
|
||||
|
||||
fragment_shader = '''
|
||||
uniform sampler2D image;
|
||||
uniform vec4 color;
|
||||
|
||||
in vec2 uvInterp;
|
||||
out vec4 fragColor;
|
||||
|
||||
void main()
|
||||
{
|
||||
fragColor = texture(image, uvInterp);
|
||||
fragColor.a = color.a;
|
||||
}
|
||||
'''
|
||||
|
||||
return vertex_shader, fragment_shader
|
||||
|
||||
|
||||
def glEnd():
|
||||
inst = InternalData.get_instance()
|
||||
|
||||
color = inst.get_color()
|
||||
coords = inst.get_verts()
|
||||
tex_coords = inst.get_tex_coords()
|
||||
if inst.get_dims() == 2:
|
||||
if len(tex_coords) == 0:
|
||||
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
|
||||
else:
|
||||
#shader = gpu.shader.from_builtin('2D_IMAGE')
|
||||
vert_shader, frag_shader = _get_transparency_shader()
|
||||
shader = gpu.types.GPUShader(vert_shader, frag_shader)
|
||||
elif inst.get_dims() == 3:
|
||||
if len(tex_coords) == 0:
|
||||
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
|
||||
else:
|
||||
raise NotImplemented("Texture is not supported in get_dims() == 3")
|
||||
else:
|
||||
raise NotImplemented("get_dims() != 2")
|
||||
|
||||
if len(tex_coords) == 0:
|
||||
data = {
|
||||
"pos": coords,
|
||||
}
|
||||
else:
|
||||
data = {
|
||||
"pos": coords,
|
||||
"texCoord": tex_coords
|
||||
}
|
||||
|
||||
if inst.get_prim_mode() == GL_LINES:
|
||||
indices = []
|
||||
for i in range(0, len(coords), 2):
|
||||
indices.append([i, i + 1])
|
||||
batch = batch_for_shader(shader, 'LINES', data, indices=indices)
|
||||
|
||||
elif inst.get_prim_mode() == GL_LINE_STRIP:
|
||||
batch = batch_for_shader(shader, 'LINE_STRIP', data)
|
||||
|
||||
|
||||
elif inst.get_prim_mode() == GL_LINE_LOOP:
|
||||
data["pos"].append(data["pos"][0])
|
||||
batch = batch_for_shader(shader, 'LINE_STRIP', data)
|
||||
|
||||
elif inst.get_prim_mode() == GL_TRIANGLES:
|
||||
indices = []
|
||||
for i in range(0, len(coords), 3):
|
||||
indices.append([i, i + 1, i + 2])
|
||||
batch = batch_for_shader(shader, 'TRIS', data, indices=indices)
|
||||
|
||||
elif inst.get_prim_mode() == GL_TRIANGLE_FAN:
|
||||
indices = []
|
||||
for i in range(1, len(coords) - 1):
|
||||
indices.append([0, i, i + 1])
|
||||
batch = batch_for_shader(shader, 'TRIS', data, indices=indices)
|
||||
|
||||
elif inst.get_prim_mode() == GL_QUADS:
|
||||
indices = []
|
||||
for i in range(0, len(coords), 4):
|
||||
indices.extend([[i, i + 1, i + 2], [i + 2, i + 3, i]])
|
||||
batch = batch_for_shader(shader, 'TRIS', data, indices=indices)
|
||||
else:
|
||||
raise NotImplemented("get_prim_mode() != (GL_LINES|GL_TRIANGLES|GL_QUADS)")
|
||||
|
||||
shader.bind()
|
||||
if len(tex_coords) != 0:
|
||||
shader.uniform_float("modelViewMatrix", gpu.matrix.get_model_view_matrix())
|
||||
shader.uniform_float("projectionMatrix", gpu.matrix.get_projection_matrix())
|
||||
shader.uniform_int("image", 0)
|
||||
shader.uniform_float("color", color)
|
||||
batch.draw(shader)
|
||||
|
||||
inst.clear()
|
||||
|
||||
|
||||
def glVertex2f(x, y):
|
||||
inst = InternalData.get_instance()
|
||||
inst.add_vert([x, y])
|
||||
inst.set_dims(2)
|
||||
|
||||
|
||||
def glVertex3f(x, y, z):
|
||||
inst = InternalData.get_instance()
|
||||
inst.add_vert([x, y, z])
|
||||
inst.set_dims(3)
|
||||
|
||||
|
||||
def glTexCoord2f(u, v):
|
||||
inst = InternalData.get_instance()
|
||||
inst.add_tex_coord([u, v])
|
||||
|
||||
|
||||
GL_BLEND = bgl.GL_BLEND
|
||||
GL_LINE_SMOOTH = bgl.GL_LINE_SMOOTH
|
||||
GL_INT = bgl.GL_INT
|
||||
GL_SCISSOR_BOX = bgl.GL_SCISSOR_BOX
|
||||
GL_TEXTURE_2D = bgl.GL_TEXTURE_2D
|
||||
GL_TEXTURE0 = bgl.GL_TEXTURE0
|
||||
GL_DEPTH_TEST = bgl.GL_DEPTH_TEST
|
||||
|
||||
GL_TEXTURE_MIN_FILTER = 0
|
||||
GL_TEXTURE_MAG_FILTER = 0
|
||||
GL_LINEAR = 0
|
||||
GL_TEXTURE_ENV = 0
|
||||
GL_TEXTURE_ENV_MODE = 0
|
||||
GL_MODULATE = 0
|
||||
|
||||
def glEnable(cap):
|
||||
bgl.glEnable(cap)
|
||||
|
||||
|
||||
def glDisable(cap):
|
||||
bgl.glDisable(cap)
|
||||
|
||||
|
||||
def glScissor(x, y, width, height):
|
||||
bgl.glScissor(x, y, width, height)
|
||||
|
||||
|
||||
def glGetIntegerv(pname, params):
|
||||
bgl.glGetIntegerv(pname, params)
|
||||
|
||||
|
||||
def glActiveTexture(texture):
|
||||
bgl.glActiveTexture(texture)
|
||||
|
||||
|
||||
def glBindTexture(target, texture):
|
||||
bgl.glBindTexture(target, texture)
|
||||
|
||||
|
||||
def glTexParameteri(target, pname, param):
|
||||
pass
|
||||
|
||||
|
||||
def glTexEnvi(target, pname, param):
|
||||
pass
|
@ -2,8 +2,8 @@
|
||||
|
||||
__author__ = "Nutti <nutti.metro@gmail.com>"
|
||||
__status__ = "production"
|
||||
__version__ = "6.6"
|
||||
__date__ = "22 Apr 2022"
|
||||
__version__ = "6.7"
|
||||
__date__ = "9 Sep 2022"
|
||||
|
||||
if "bpy" in locals():
|
||||
import importlib
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
__author__ = "Nutti <nutti.metro@gmail.com>"
|
||||
__status__ = "production"
|
||||
__version__ = "6.6"
|
||||
__date__ = "22 Apr 2022"
|
||||
__version__ = "6.7"
|
||||
__date__ = "9 Sep 2022"
|
||||
|
||||
from collections import namedtuple
|
||||
from math import sin, cos
|
||||
@ -24,11 +24,8 @@ from ..utils.bl_class_registry import BlClassRegistry
|
||||
from ..utils.property_class_registry import PropertyClassRegistry
|
||||
from ..utils import compatibility as compat
|
||||
|
||||
if compat.check_version(2, 80, 0) >= 0:
|
||||
from ..lib import bglx as bgl
|
||||
else:
|
||||
import bgl
|
||||
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
|
||||
_Rect = namedtuple('Rect', 'x0 y0 x1 y1')
|
||||
_Rect2 = namedtuple('Rect2', 'x y width height')
|
||||
@ -220,7 +217,7 @@ class _Properties:
|
||||
)
|
||||
scene.muv_texture_projection_adjust_window = BoolProperty(
|
||||
name="Adjust Window",
|
||||
description="Scale of renderered texture is fitted to window",
|
||||
description="Scale of rendered texture is fitted to window",
|
||||
default=True
|
||||
)
|
||||
scene.muv_texture_projection_apply_tex_aspect = BoolProperty(
|
||||
@ -334,35 +331,19 @@ class MUV_OT_TextureProjection(bpy.types.Operator):
|
||||
]
|
||||
|
||||
# OpenGL configuration
|
||||
if compat.check_version(2, 80, 0) >= 0:
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
bgl.glEnable(bgl.GL_TEXTURE_2D)
|
||||
bgl.glActiveTexture(bgl.GL_TEXTURE0)
|
||||
if img.bindcode:
|
||||
bind = img.bindcode
|
||||
bgl.glBindTexture(bgl.GL_TEXTURE_2D, bind)
|
||||
else:
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
bgl.glEnable(bgl.GL_TEXTURE_2D)
|
||||
if img.bindcode:
|
||||
bind = img.bindcode[0]
|
||||
bgl.glBindTexture(bgl.GL_TEXTURE_2D, bind)
|
||||
bgl.glTexParameteri(bgl.GL_TEXTURE_2D,
|
||||
bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_LINEAR)
|
||||
bgl.glTexParameteri(bgl.GL_TEXTURE_2D,
|
||||
bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_LINEAR)
|
||||
bgl.glTexEnvi(
|
||||
bgl.GL_TEXTURE_ENV, bgl.GL_TEXTURE_ENV_MODE,
|
||||
bgl.GL_MODULATE)
|
||||
|
||||
# render texture
|
||||
bgl.glBegin(bgl.GL_QUADS)
|
||||
bgl.glColor4f(1.0, 1.0, 1.0,
|
||||
sc.muv_texture_projection_tex_transparency)
|
||||
for (v1, v2), (u, v) in zip(positions, tex_coords):
|
||||
bgl.glTexCoord2f(u, v)
|
||||
bgl.glVertex2f(v1, v2)
|
||||
bgl.glEnd()
|
||||
shader = gpu.shader.from_builtin('IMAGE_COLOR')
|
||||
batch = batch_for_shader(
|
||||
shader, 'TRI_FAN',
|
||||
{"pos": positions, "texCoord": tex_coords},
|
||||
)
|
||||
|
||||
gpu.state.blend_set('ALPHA')
|
||||
shader.bind()
|
||||
shader.uniform_sampler("image", gpu.texture.from_image(img))
|
||||
shader.uniform_float("color", (1.0, 1.0, 1.0, sc.muv_texture_projection_tex_transparency))
|
||||
batch.draw(shader)
|
||||
del batch
|
||||
|
||||
def invoke(self, context, _):
|
||||
if not MUV_OT_TextureProjection.is_running(context):
|
||||
|
@ -246,7 +246,7 @@ class MUV_OT_TextureWrap_Set(bpy.types.Operator):
|
||||
cv0, cv1, ov)
|
||||
info["vert_vdiff"] = x - common_verts[0]["vert"].co
|
||||
|
||||
# calclulate factor
|
||||
# calculate factor
|
||||
fact_h = -info["vert_hdiff"].length / \
|
||||
ref_info["vert_hdiff"].length
|
||||
fact_v = info["vert_vdiff"].length / \
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
__author__ = "Nutti <nutti.metro@gmail.com>"
|
||||
__status__ = "production"
|
||||
__version__ = "6.6"
|
||||
__date__ = "22 Apr 2022"
|
||||
__version__ = "6.7"
|
||||
__date__ = "9 Sep 2022"
|
||||
|
||||
from enum import IntEnum
|
||||
import math
|
||||
@ -18,10 +18,8 @@ from ..utils.bl_class_registry import BlClassRegistry
|
||||
from ..utils.property_class_registry import PropertyClassRegistry
|
||||
from ..utils import compatibility as compat
|
||||
|
||||
if compat.check_version(2, 80, 0) >= 0:
|
||||
from ..lib import bglx as bgl
|
||||
else:
|
||||
import bgl
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
|
||||
|
||||
MAX_VALUE = 100000.0
|
||||
@ -634,28 +632,6 @@ class MUV_OT_UVBoundingBox(bpy.types.Operator):
|
||||
context.window_manager.event_timer_remove(cls.__timer)
|
||||
cls.__timer = None
|
||||
|
||||
@classmethod
|
||||
def __draw_ctrl_point(cls, context, pos):
|
||||
"""
|
||||
Draw control point
|
||||
"""
|
||||
user_prefs = compat.get_user_preferences(context)
|
||||
prefs = user_prefs.addons["magic_uv"].preferences
|
||||
cp_size = prefs.uv_bounding_box_cp_size
|
||||
offset = cp_size / 2
|
||||
verts = [
|
||||
[pos.x - offset, pos.y - offset],
|
||||
[pos.x - offset, pos.y + offset],
|
||||
[pos.x + offset, pos.y + offset],
|
||||
[pos.x + offset, pos.y - offset]
|
||||
]
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
bgl.glBegin(bgl.GL_QUADS)
|
||||
bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
|
||||
for (x, y) in verts:
|
||||
bgl.glVertex2f(x, y)
|
||||
bgl.glEnd()
|
||||
|
||||
@classmethod
|
||||
def draw_bb(cls, _, context):
|
||||
"""
|
||||
@ -669,10 +645,22 @@ class MUV_OT_UVBoundingBox(bpy.types.Operator):
|
||||
if not _is_valid_context(context):
|
||||
return
|
||||
|
||||
for cp in props.ctrl_points:
|
||||
cls.__draw_ctrl_point(
|
||||
context, mathutils.Vector(
|
||||
context.region.view2d.view_to_region(cp.x, cp.y)))
|
||||
user_prefs = compat.get_user_preferences(context)
|
||||
prefs = user_prefs.addons["magic_uv"].preferences
|
||||
cp_size = prefs.uv_bounding_box_cp_size
|
||||
|
||||
gpu.state.program_point_size_set(False)
|
||||
gpu.state.point_size_set(cp_size)
|
||||
gpu.state.blend_set('ALPHA')
|
||||
|
||||
shader = gpu.shader.from_builtin("UNIFORM_COLOR")
|
||||
shader.bind()
|
||||
shader.uniform_float("color", (1.0, 1.0, 1.0, 1.0))
|
||||
|
||||
points = [mathutils.Vector(context.region.view2d.view_to_region(cp.x, cp.y)) for cp in props.ctrl_points]
|
||||
batch = batch_for_shader(shader, 'POINTS', {"pos": points})
|
||||
batch.draw(shader)
|
||||
del batch
|
||||
|
||||
def __get_uv_info(self, context):
|
||||
"""
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
__author__ = "Nutti <nutti.metro@gmail.com>"
|
||||
__status__ = "production"
|
||||
__version__ = "6.6"
|
||||
__date__ = "22 Apr 2022"
|
||||
__version__ = "6.7"
|
||||
__date__ = "9 Sep 2022"
|
||||
|
||||
import random
|
||||
from math import fabs
|
||||
@ -17,10 +17,8 @@ from ..utils.bl_class_registry import BlClassRegistry
|
||||
from ..utils.property_class_registry import PropertyClassRegistry
|
||||
from ..utils import compatibility as compat
|
||||
|
||||
if compat.check_version(2, 80, 0) >= 0:
|
||||
from ..lib import bglx as bgl
|
||||
else:
|
||||
import bgl
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
|
||||
|
||||
def _is_valid_context(context):
|
||||
@ -234,41 +232,40 @@ class MUV_OT_UVInspection_Render(bpy.types.Operator):
|
||||
return
|
||||
|
||||
# OpenGL configuration.
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
bgl.glEnable(bgl.GL_DEPTH_TEST)
|
||||
gpu.state.blend_set('ALPHA')
|
||||
gpu.state.depth_test_set('LESS_EQUAL')
|
||||
|
||||
shader = gpu.shader.from_builtin("UNIFORM_COLOR")
|
||||
shader.bind()
|
||||
|
||||
# Render faces whose UV is overlapped.
|
||||
if sc.muv_uv_inspection_show_overlapped:
|
||||
color = prefs.uv_inspection_overlapped_color_for_v3d
|
||||
shader.uniform_float("color", prefs.uv_inspection_overlapped_color_for_v3d)
|
||||
|
||||
for obj, findices in props.overlapped_info_for_v3d.items():
|
||||
world_mat = obj.matrix_world
|
||||
bm = bmesh.from_edit_mesh(obj.data)
|
||||
|
||||
for fidx in findices:
|
||||
bgl.glBegin(bgl.GL_TRIANGLE_FAN)
|
||||
bgl.glColor4f(color[0], color[1], color[2], color[3])
|
||||
for l in bm.faces[fidx].loops:
|
||||
co = compat.matmul(world_mat, l.vert.co)
|
||||
bgl.glVertex3f(co[0], co[1], co[2])
|
||||
bgl.glEnd()
|
||||
coords = [compat.matmul(world_mat, l.vert.co) for l in bm.faces[fidx].loops]
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
|
||||
# Render faces whose UV is flipped.
|
||||
if sc.muv_uv_inspection_show_flipped:
|
||||
color = prefs.uv_inspection_flipped_color_for_v3d
|
||||
shader.uniform_float("color", prefs.uv_inspection_flipped_color_for_v3d)
|
||||
|
||||
for obj, findices in props.filpped_info_for_v3d.items():
|
||||
world_mat = obj.matrix_world
|
||||
bm = bmesh.from_edit_mesh(obj.data)
|
||||
|
||||
for fidx in findices:
|
||||
bgl.glBegin(bgl.GL_TRIANGLE_FAN)
|
||||
bgl.glColor4f(color[0], color[1], color[2], color[3])
|
||||
for l in bm.faces[fidx].loops:
|
||||
co = compat.matmul(world_mat, l.vert.co)
|
||||
bgl.glVertex3f(co[0], co[1], co[2])
|
||||
bgl.glEnd()
|
||||
coords = [compat.matmul(world_mat, l.vert.co) for l in bm.faces[fidx].loops]
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
|
||||
bgl.glDisable(bgl.GL_DEPTH_TEST)
|
||||
bgl.glDisable(bgl.GL_BLEND)
|
||||
gpu.state.depth_test_set('NONE')
|
||||
gpu.state.blend_set('NONE')
|
||||
|
||||
@staticmethod
|
||||
def draw(_, context):
|
||||
@ -281,53 +278,46 @@ class MUV_OT_UVInspection_Render(bpy.types.Operator):
|
||||
return
|
||||
|
||||
# OpenGL configuration
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
gpu.state.blend_set('ALPHA')
|
||||
|
||||
shader = gpu.shader.from_builtin("UNIFORM_COLOR")
|
||||
shader.bind()
|
||||
|
||||
# render overlapped UV
|
||||
if sc.muv_uv_inspection_show_overlapped:
|
||||
color = prefs.uv_inspection_overlapped_color
|
||||
shader.uniform_float("color", prefs.uv_inspection_overlapped_color)
|
||||
|
||||
for info in props.overlapped_info:
|
||||
if sc.muv_uv_inspection_show_mode == 'PART':
|
||||
for poly in info["polygons"]:
|
||||
bgl.glBegin(bgl.GL_TRIANGLE_FAN)
|
||||
bgl.glColor4f(color[0], color[1], color[2], color[3])
|
||||
for uv in poly:
|
||||
x, y = context.region.view2d.view_to_region(
|
||||
uv.x, uv.y, clip=False)
|
||||
bgl.glVertex2f(x, y)
|
||||
bgl.glEnd()
|
||||
coords = [context.region.view2d.view_to_region(uv.x, uv.y, clip=False) for uv in poly]
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
|
||||
elif sc.muv_uv_inspection_show_mode == 'FACE':
|
||||
bgl.glBegin(bgl.GL_TRIANGLE_FAN)
|
||||
bgl.glColor4f(color[0], color[1], color[2], color[3])
|
||||
for uv in info["subject_uvs"]:
|
||||
x, y = context.region.view2d.view_to_region(
|
||||
uv.x, uv.y, clip=False)
|
||||
bgl.glVertex2f(x, y)
|
||||
bgl.glEnd()
|
||||
coords = [
|
||||
context.region.view2d.view_to_region(
|
||||
uv.x, uv.y, clip=False) for uv in info["subject_uvs"]]
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
|
||||
# render flipped UV
|
||||
if sc.muv_uv_inspection_show_flipped:
|
||||
color = prefs.uv_inspection_flipped_color
|
||||
shader.uniform_float("color", prefs.uv_inspection_flipped_color)
|
||||
|
||||
for info in props.flipped_info:
|
||||
if sc.muv_uv_inspection_show_mode == 'PART':
|
||||
for poly in info["polygons"]:
|
||||
bgl.glBegin(bgl.GL_TRIANGLE_FAN)
|
||||
bgl.glColor4f(color[0], color[1], color[2], color[3])
|
||||
for uv in poly:
|
||||
x, y = context.region.view2d.view_to_region(
|
||||
uv.x, uv.y, clip=False)
|
||||
bgl.glVertex2f(x, y)
|
||||
bgl.glEnd()
|
||||
elif sc.muv_uv_inspection_show_mode == 'FACE':
|
||||
bgl.glBegin(bgl.GL_TRIANGLE_FAN)
|
||||
bgl.glColor4f(color[0], color[1], color[2], color[3])
|
||||
for uv in info["uvs"]:
|
||||
x, y = context.region.view2d.view_to_region(
|
||||
uv.x, uv.y, clip=False)
|
||||
bgl.glVertex2f(x, y)
|
||||
bgl.glEnd()
|
||||
coords = [context.region.view2d.view_to_region(uv.x, uv.y, clip=False) for uv in poly]
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
|
||||
bgl.glDisable(bgl.GL_BLEND)
|
||||
elif sc.muv_uv_inspection_show_mode == 'FACE':
|
||||
coords = [context.region.view2d.view_to_region(uv.x, uv.y, clip=False) for uv in info["uvs"]]
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
|
||||
gpu.state.blend_set('NONE')
|
||||
|
||||
def invoke(self, context, _):
|
||||
if not MUV_OT_UVInspection_Render.is_running(context):
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
__author__ = "Nutti <nutti.metro@gmail.com>"
|
||||
__status__ = "production"
|
||||
__version__ = "6.6"
|
||||
__date__ = "22 Apr 2022"
|
||||
__version__ = "6.7"
|
||||
__date__ = "9 Sep 2022"
|
||||
|
||||
from math import pi, cos, tan, sin
|
||||
|
||||
@ -25,11 +25,8 @@ from ..utils.bl_class_registry import BlClassRegistry
|
||||
from ..utils.property_class_registry import PropertyClassRegistry
|
||||
from ..utils import compatibility as compat
|
||||
|
||||
|
||||
if compat.check_version(2, 80, 0) >= 0:
|
||||
from ..lib import bglx as bgl
|
||||
else:
|
||||
import bgl
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
|
||||
|
||||
def _is_valid_context(context):
|
||||
@ -215,21 +212,26 @@ class MUV_OT_UVSculpt(bpy.types.Operator):
|
||||
theta = 2 * pi / num_segment
|
||||
fact_t = tan(theta)
|
||||
fact_r = cos(theta)
|
||||
color = prefs.uv_sculpt_brush_color
|
||||
|
||||
bgl.glBegin(bgl.GL_LINE_STRIP)
|
||||
bgl.glColor4f(color[0], color[1], color[2], color[3])
|
||||
shader = gpu.shader.from_builtin("UNIFORM_COLOR")
|
||||
shader.bind()
|
||||
shader.uniform_float("color", prefs.uv_sculpt_brush_color)
|
||||
|
||||
x = sc.muv_uv_sculpt_radius * cos(0.0)
|
||||
y = sc.muv_uv_sculpt_radius * sin(0.0)
|
||||
coords = []
|
||||
for _ in range(num_segment):
|
||||
bgl.glVertex2f(x + obj.current_mco.x, y + obj.current_mco.y)
|
||||
coords.append([x + obj.current_mco.x, y + obj.current_mco.y])
|
||||
tx = -y
|
||||
ty = x
|
||||
x = x + tx * fact_t
|
||||
y = y + ty * fact_t
|
||||
x = x * fact_r
|
||||
y = y * fact_r
|
||||
bgl.glEnd()
|
||||
|
||||
batch = batch_for_shader(shader, 'LINE_STRIP', {"pos": coords})
|
||||
batch.draw(shader)
|
||||
del batch
|
||||
|
||||
def __init__(self):
|
||||
self.__loop_info = {} # { Object: loop_info }
|
||||
|
@ -2,11 +2,10 @@
|
||||
|
||||
__author__ = "Nutti <nutti.metro@gmail.com>"
|
||||
__status__ = "production"
|
||||
__version__ = "6.6"
|
||||
__date__ = "22 Apr 2022"
|
||||
__version__ = "6.7"
|
||||
__date__ = "22 Sep 2022"
|
||||
|
||||
import bpy
|
||||
import bgl
|
||||
import blf
|
||||
|
||||
|
||||
@ -128,10 +127,7 @@ def icon(icon):
|
||||
|
||||
|
||||
def set_blf_font_color(font_id, r, g, b, a):
|
||||
if check_version(2, 80, 0) >= 0:
|
||||
blf.color(font_id, r, g, b, a)
|
||||
else:
|
||||
bgl.glColor4f(r, g, b, a)
|
||||
|
||||
|
||||
def set_blf_blur(font_id, radius):
|
||||
|
@ -272,7 +272,7 @@ def mu_select_by_material_name(self, find_material_name, extend_selection = Fals
|
||||
if active_object.type == 'MESH':
|
||||
# if not extending the selection, deselect all first
|
||||
# (Without this, edges/faces were still selected
|
||||
# while the faces were deselcted)
|
||||
# while the faces were deselected)
|
||||
if not extend_selection:
|
||||
bpy.ops.mesh.select_all(action = 'DESELECT')
|
||||
|
||||
|
@ -720,7 +720,7 @@ class MATERIAL_OT_materialutilities_auto_smooth_angle(bpy.types.Operator):
|
||||
set_smooth_shading: BoolProperty(
|
||||
name = "Set Smooth",
|
||||
description = "Set Smooth shading for the affected objects\n"
|
||||
"This overrides the currenth smooth/flat shading that might be set to different parts of the object",
|
||||
"This overrides the current smooth/flat shading that might be set to different parts of the object",
|
||||
default = True
|
||||
)
|
||||
|
||||
|
@ -305,7 +305,7 @@ def register():
|
||||
Scene.measureit_debug = BoolProperty(name="Debug",
|
||||
description="Display information for debugging"
|
||||
" (expand/collapse for enabling or disabling)"
|
||||
" this information is only renderered for "
|
||||
" this information is only rendered for "
|
||||
"selected objects",
|
||||
default=False)
|
||||
Scene.measureit_debug_select = BoolProperty(name="Selected",
|
||||
|
@ -232,7 +232,7 @@ class MeasureitProperties(PropertyGroup):
|
||||
default=15, min=6, max=500)
|
||||
|
||||
glarc_full: BoolProperty(name="arcfull",
|
||||
description="Create full circunference",
|
||||
description="Create full circumference",
|
||||
default=False)
|
||||
glarc_extrad: BoolProperty(name="arcextrad",
|
||||
description="Adapt radio length to arc line",
|
||||
@ -554,7 +554,7 @@ def add_item(box, idx, segment):
|
||||
if segment.gltype == 1:
|
||||
row.prop(segment, 'glorto', text="Orthogonal")
|
||||
row.prop(segment, 'glocwarning', text="Warning")
|
||||
# ortogonal (only segments)
|
||||
# orthogonal (only segments)
|
||||
if segment.gltype == 1:
|
||||
if segment.glorto != "99":
|
||||
row = box.row(align=True)
|
||||
|
@ -2560,7 +2560,7 @@ class MESH_OT_SURFSK_add_surface(Operator):
|
||||
loop_segments_lengths = []
|
||||
|
||||
for st in range(len(pts_on_strokes_with_proportions_U)):
|
||||
# When on the first stroke, add the segment from the selection to the dirst stroke
|
||||
# When on the first stroke, add the segment from the selection to the first stroke
|
||||
if st == 0:
|
||||
loop_segments_lengths.append(
|
||||
((self.main_object.matrix_world @ verts_ordered_U[lp].co) -
|
||||
|
@ -559,7 +559,7 @@ def PointInside(v, a, points):
|
||||
|
||||
|
||||
def SignedArea(polygon, points):
|
||||
"""Return the area of the polgon, positive if CCW, negative if CW.
|
||||
"""Return the area of the polygon, positive if CCW, negative if CW.
|
||||
|
||||
Args:
|
||||
polygon: list of vertex indices
|
||||
|
@ -3942,7 +3942,7 @@ class GStretch(Operator):
|
||||
conversion_max: IntProperty(
|
||||
name="Max Vertices",
|
||||
description="Maximum number of vertices strokes will "
|
||||
"have, when they are converted to geomtery",
|
||||
"have, when they are converted to geometry",
|
||||
default=32,
|
||||
min=3,
|
||||
soft_max=500,
|
||||
@ -3951,7 +3951,7 @@ class GStretch(Operator):
|
||||
conversion_min: IntProperty(
|
||||
name="Min Vertices",
|
||||
description="Minimum number of vertices strokes will "
|
||||
"have, when they are converted to geomtery",
|
||||
"have, when they are converted to geometry",
|
||||
default=8,
|
||||
min=3,
|
||||
soft_max=500,
|
||||
@ -5000,7 +5000,7 @@ class LoopToolsProps(PropertyGroup):
|
||||
gstretch_conversion_max: IntProperty(
|
||||
name="Max Vertices",
|
||||
description="Maximum number of vertices strokes will "
|
||||
"have, when they are converted to geomtery",
|
||||
"have, when they are converted to geometry",
|
||||
default=32,
|
||||
min=3,
|
||||
soft_max=500,
|
||||
@ -5009,7 +5009,7 @@ class LoopToolsProps(PropertyGroup):
|
||||
gstretch_conversion_min: IntProperty(
|
||||
name="Min Vertices",
|
||||
description="Minimum number of vertices strokes will "
|
||||
"have, when they are converted to geomtery",
|
||||
"have, when they are converted to geometry",
|
||||
default=8,
|
||||
min=3,
|
||||
soft_max=500,
|
||||
|
@ -71,7 +71,7 @@ def tool_line():
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Tool Registraion
|
||||
# Tool Registration
|
||||
|
||||
|
||||
def get_tool_list(space_type, context_mode):
|
||||
@ -149,7 +149,7 @@ def unregister_keymaps():
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Addon Registraion
|
||||
# Addon Registration
|
||||
|
||||
classes = (
|
||||
preferences.SnapUtilitiesPreferences,
|
||||
|
@ -152,7 +152,7 @@ def get_adj_faces(edges):
|
||||
co_adj = 0
|
||||
for f in e.link_faces:
|
||||
# Search an adjacent face.
|
||||
# Selected face has precedance.
|
||||
# Selected face has precedence.
|
||||
if not f.hide and f.normal != ZERO_VEC:
|
||||
adj_exist = True
|
||||
adj_f = f
|
||||
|
105
node_wrangler.py
105
node_wrangler.py
@ -3,8 +3,8 @@
|
||||
bl_info = {
|
||||
"name": "Node Wrangler",
|
||||
"author": "Bartek Skorupa, Greg Zaal, Sebastian Koenig, Christian Brinkmann, Florian Meyer",
|
||||
"version": (3, 41),
|
||||
"blender": (2, 93, 0),
|
||||
"version": (3, 43),
|
||||
"blender": (3, 4, 0),
|
||||
"location": "Node Editor Toolbar or Shift-W",
|
||||
"description": "Various tools to enhance and speed up node-based workflow",
|
||||
"warning": "",
|
||||
@ -12,7 +12,7 @@ bl_info = {
|
||||
"category": "Node",
|
||||
}
|
||||
|
||||
import bpy, blf, bgl
|
||||
import bpy
|
||||
import gpu
|
||||
from bpy.types import Operator, Panel, Menu
|
||||
from bpy.props import (
|
||||
@ -264,9 +264,14 @@ def force_update(context):
|
||||
context.space_data.node_tree.update_tag()
|
||||
|
||||
|
||||
def dpifac():
|
||||
def dpi_fac():
|
||||
prefs = bpy.context.preferences.system
|
||||
return prefs.dpi * prefs.pixel_size / 72
|
||||
return prefs.dpi / 72
|
||||
|
||||
|
||||
def prefs_line_width():
|
||||
prefs = bpy.context.preferences.system
|
||||
return prefs.pixel_size
|
||||
|
||||
|
||||
def node_mid_pt(node, axis):
|
||||
@ -342,8 +347,8 @@ def node_at_pos(nodes, context, event):
|
||||
for node in nodes:
|
||||
skipnode = False
|
||||
if node.type != 'FRAME': # no point trying to link to a frame node
|
||||
dimx = node.dimensions.x/dpifac()
|
||||
dimy = node.dimensions.y/dpifac()
|
||||
dimx = node.dimensions.x / dpi_fac()
|
||||
dimy = node.dimensions.y / dpi_fac()
|
||||
locx, locy = abs_node_location(node)
|
||||
|
||||
if not skipnode:
|
||||
@ -362,8 +367,8 @@ def node_at_pos(nodes, context, event):
|
||||
for node in nodes:
|
||||
if node.type != 'FRAME' and skipnode == False:
|
||||
locx, locy = abs_node_location(node)
|
||||
dimx = node.dimensions.x/dpifac()
|
||||
dimy = node.dimensions.y/dpifac()
|
||||
dimx = node.dimensions.x / dpi_fac()
|
||||
dimy = node.dimensions.y / dpi_fac()
|
||||
if (locx <= x <= locx + dimx) and \
|
||||
(locy - dimy <= y <= locy):
|
||||
nodes_under_mouse.append(node)
|
||||
@ -390,7 +395,9 @@ def store_mouse_cursor(context, event):
|
||||
space.cursor_location = tree.view_center
|
||||
|
||||
def draw_line(x1, y1, x2, y2, size, colour=(1.0, 1.0, 1.0, 0.7)):
|
||||
shader = gpu.shader.from_builtin('2D_SMOOTH_COLOR')
|
||||
shader = gpu.shader.from_builtin('POLYLINE_SMOOTH_COLOR')
|
||||
shader.uniform_float("viewportSize", gpu.state.viewport_get()[2:])
|
||||
shader.uniform_float("lineWidth", size * prefs_line_width())
|
||||
|
||||
vertices = ((x1, y1), (x2, y2))
|
||||
vertex_colors = ((colour[0]+(1.0-colour[0])/4,
|
||||
@ -400,34 +407,31 @@ def draw_line(x1, y1, x2, y2, size, colour=(1.0, 1.0, 1.0, 0.7)):
|
||||
colour)
|
||||
|
||||
batch = batch_for_shader(shader, 'LINE_STRIP', {"pos": vertices, "color": vertex_colors})
|
||||
bgl.glLineWidth(size * dpifac())
|
||||
|
||||
shader.bind()
|
||||
batch.draw(shader)
|
||||
|
||||
|
||||
def draw_circle_2d_filled(shader, mx, my, radius, colour=(1.0, 1.0, 1.0, 0.7)):
|
||||
radius = radius * dpifac()
|
||||
def draw_circle_2d_filled(mx, my, radius, colour=(1.0, 1.0, 1.0, 0.7)):
|
||||
radius = radius * prefs_line_width()
|
||||
sides = 12
|
||||
vertices = [(radius * cos(i * 2 * pi / sides) + mx,
|
||||
radius * sin(i * 2 * pi / sides) + my)
|
||||
for i in range(sides + 1)]
|
||||
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": vertices})
|
||||
shader.bind()
|
||||
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
|
||||
shader.uniform_float("color", colour)
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": vertices})
|
||||
batch.draw(shader)
|
||||
|
||||
|
||||
def draw_rounded_node_border(shader, node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)):
|
||||
def draw_rounded_node_border(node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)):
|
||||
area_width = bpy.context.area.width
|
||||
sides = 16
|
||||
radius = radius*dpifac()
|
||||
radius *= prefs_line_width()
|
||||
|
||||
nlocx, nlocy = abs_node_location(node)
|
||||
|
||||
nlocx = (nlocx+1)*dpifac()
|
||||
nlocy = (nlocy+1)*dpifac()
|
||||
nlocx = (nlocx+1) * dpi_fac()
|
||||
nlocy = (nlocy+1) * dpi_fac()
|
||||
ndimx = node.dimensions.x
|
||||
ndimy = node.dimensions.y
|
||||
|
||||
@ -441,6 +445,9 @@ def draw_rounded_node_border(shader, node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)
|
||||
ndimy = 0
|
||||
radius += 6
|
||||
|
||||
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
|
||||
shader.uniform_float("color", colour)
|
||||
|
||||
# Top left corner
|
||||
mx, my = bpy.context.region.view2d.view_to_region(nlocx, nlocy, clip=False)
|
||||
vertices = [(mx,my)]
|
||||
@ -450,9 +457,8 @@ def draw_rounded_node_border(shader, node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)
|
||||
cosine = radius * cos(i * 2 * pi / sides) + mx
|
||||
sine = radius * sin(i * 2 * pi / sides) + my
|
||||
vertices.append((cosine,sine))
|
||||
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": vertices})
|
||||
shader.bind()
|
||||
shader.uniform_float("color", colour)
|
||||
batch.draw(shader)
|
||||
|
||||
# Top right corner
|
||||
@ -464,9 +470,8 @@ def draw_rounded_node_border(shader, node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)
|
||||
cosine = radius * cos(i * 2 * pi / sides) + mx
|
||||
sine = radius * sin(i * 2 * pi / sides) + my
|
||||
vertices.append((cosine,sine))
|
||||
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": vertices})
|
||||
shader.bind()
|
||||
shader.uniform_float("color", colour)
|
||||
batch.draw(shader)
|
||||
|
||||
# Bottom left corner
|
||||
@ -478,9 +483,8 @@ def draw_rounded_node_border(shader, node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)
|
||||
cosine = radius * cos(i * 2 * pi / sides) + mx
|
||||
sine = radius * sin(i * 2 * pi / sides) + my
|
||||
vertices.append((cosine,sine))
|
||||
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": vertices})
|
||||
shader.bind()
|
||||
shader.uniform_float("color", colour)
|
||||
batch.draw(shader)
|
||||
|
||||
# Bottom right corner
|
||||
@ -492,9 +496,8 @@ def draw_rounded_node_border(shader, node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)
|
||||
cosine = radius * cos(i * 2 * pi / sides) + mx
|
||||
sine = radius * sin(i * 2 * pi / sides) + my
|
||||
vertices.append((cosine,sine))
|
||||
|
||||
batch = batch_for_shader(shader, 'TRI_FAN', {"pos": vertices})
|
||||
shader.bind()
|
||||
shader.uniform_float("color", colour)
|
||||
batch.draw(shader)
|
||||
|
||||
# prepare drawing all edges in one batch
|
||||
@ -546,22 +549,14 @@ def draw_rounded_node_border(shader, node, radius=8, colour=(1.0, 1.0, 1.0, 0.7)
|
||||
# now draw all edges in one batch
|
||||
if len(vertices) != 0:
|
||||
batch = batch_for_shader(shader, 'TRIS', {"pos": vertices}, indices=indices)
|
||||
shader.bind()
|
||||
shader.uniform_float("color", colour)
|
||||
batch.draw(shader)
|
||||
|
||||
def draw_callback_nodeoutline(self, context, mode):
|
||||
if self.mouse_path:
|
||||
|
||||
bgl.glLineWidth(1)
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
bgl.glEnable(bgl.GL_LINE_SMOOTH)
|
||||
bgl.glHint(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST)
|
||||
gpu.state.blend_set('ALPHA')
|
||||
|
||||
nodes, links = get_nodes_links(context)
|
||||
|
||||
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
|
||||
|
||||
if mode == "LINK":
|
||||
col_outer = (1.0, 0.2, 0.2, 0.4)
|
||||
col_inner = (0.0, 0.0, 0.0, 0.5)
|
||||
@ -588,24 +583,24 @@ def draw_callback_nodeoutline(self, context, mode):
|
||||
col_inner = (0.0, 0.0, 0.0, 0.5)
|
||||
col_circle_inner = (0.2, 0.2, 0.2, 1.0)
|
||||
|
||||
draw_rounded_node_border(shader, n1, radius=6, colour=col_outer) # outline
|
||||
draw_rounded_node_border(shader, n1, radius=5, colour=col_inner) # inner
|
||||
draw_rounded_node_border(shader, n2, radius=6, colour=col_outer) # outline
|
||||
draw_rounded_node_border(shader, n2, radius=5, colour=col_inner) # inner
|
||||
draw_rounded_node_border(n1, radius=6, colour=col_outer) # outline
|
||||
draw_rounded_node_border(n1, radius=5, colour=col_inner) # inner
|
||||
draw_rounded_node_border(n2, radius=6, colour=col_outer) # outline
|
||||
draw_rounded_node_border(n2, radius=5, colour=col_inner) # inner
|
||||
|
||||
draw_line(m1x, m1y, m2x, m2y, 5, col_outer) # line outline
|
||||
draw_line(m1x, m1y, m2x, m2y, 2, col_inner) # line inner
|
||||
|
||||
# circle outline
|
||||
draw_circle_2d_filled(shader, m1x, m1y, 7, col_outer)
|
||||
draw_circle_2d_filled(shader, m2x, m2y, 7, col_outer)
|
||||
draw_circle_2d_filled(m1x, m1y, 7, col_outer)
|
||||
draw_circle_2d_filled(m2x, m2y, 7, col_outer)
|
||||
|
||||
# circle inner
|
||||
draw_circle_2d_filled(shader, m1x, m1y, 5, col_circle_inner)
|
||||
draw_circle_2d_filled(shader, m2x, m2y, 5, col_circle_inner)
|
||||
draw_circle_2d_filled(m1x, m1y, 5, col_circle_inner)
|
||||
draw_circle_2d_filled(m2x, m2y, 5, col_circle_inner)
|
||||
|
||||
gpu.state.blend_set('NONE')
|
||||
|
||||
bgl.glDisable(bgl.GL_BLEND)
|
||||
bgl.glDisable(bgl.GL_LINE_SMOOTH)
|
||||
def get_active_tree(context):
|
||||
tree = context.space_data.node_tree
|
||||
path = []
|
||||
@ -832,11 +827,13 @@ def nw_check(context):
|
||||
space = context.space_data
|
||||
valid_trees = ["ShaderNodeTree", "CompositorNodeTree", "TextureNodeTree", "GeometryNodeTree"]
|
||||
|
||||
valid = False
|
||||
if space.type == 'NODE_EDITOR' and space.node_tree is not None and space.tree_type in valid_trees:
|
||||
valid = True
|
||||
if (space.type == 'NODE_EDITOR'
|
||||
and space.node_tree is not None
|
||||
and space.node_tree.library is None
|
||||
and space.tree_type in valid_trees):
|
||||
return True
|
||||
|
||||
return valid
|
||||
return False
|
||||
|
||||
class NWBase:
|
||||
@classmethod
|
||||
@ -2630,7 +2627,7 @@ class NWAddTextureSetup(Operator, NWBase):
|
||||
nodes.active = image_texture_node
|
||||
links.new(image_texture_node.outputs[0], target_input)
|
||||
|
||||
# The mapping setup following this will connect to the firrst input of this image texture.
|
||||
# The mapping setup following this will connect to the first input of this image texture.
|
||||
target_input = image_texture_node.inputs[0]
|
||||
|
||||
node.select = False
|
||||
@ -3419,7 +3416,7 @@ class NWAddSequence(Operator, NWBase, ImportHelper):
|
||||
self.report({'ERROR'}, "No file chosen")
|
||||
return {'CANCELLED'}
|
||||
elif files[0].name and (not filename or not path.exists(directory+filename)):
|
||||
# User has selected multiple files without an active one, or the active one is non-existant
|
||||
# User has selected multiple files without an active one, or the active one is non-existent
|
||||
filename = files[0].name
|
||||
|
||||
if not path.exists(directory+filename):
|
||||
|
@ -4,8 +4,8 @@ bl_info = {
|
||||
"name": "Carver",
|
||||
"author": "Pixivore, Cedric LEPILLER, Ted Milker, Clarkx",
|
||||
"description": "Multiple tools to carve or to create objects",
|
||||
"version": (1, 2, 0),
|
||||
"blender": (2, 80, 0),
|
||||
"version": (1, 2, 1),
|
||||
"blender": (3, 4, 0),
|
||||
"location": "3D View > Ctrl/Shift/x",
|
||||
"warning": "",
|
||||
"doc_url": "{BLENDER_MANUAL_URL}/addons/object/carver.html",
|
||||
|
@ -1,7 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import bpy
|
||||
import bgl
|
||||
import blf
|
||||
import bpy_extras
|
||||
import numpy as np
|
||||
@ -444,7 +443,7 @@ def draw_callback_px(self, context):
|
||||
mat = ob.matrix_world
|
||||
|
||||
# 50% alpha, 2 pixel width line
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
gpu.state.blend_set('ALPHA')
|
||||
|
||||
bbox = [mat @ Vector(b) for b in ob.bound_box]
|
||||
objBBDiagonal = objDiagonal(self.CurrentSelection[0])
|
||||
@ -497,5 +496,4 @@ def draw_callback_px(self, context):
|
||||
self.ProfileBrush.rotation_mode = 'XYZ'
|
||||
|
||||
# Opengl defaults
|
||||
bgl.glLineWidth(1)
|
||||
bgl.glDisable(bgl.GL_BLEND)
|
||||
gpu.state.blend_set('NONE')
|
||||
|
@ -1,7 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import bpy
|
||||
import bgl
|
||||
import gpu
|
||||
from gpu_extras.batch import batch_for_shader
|
||||
import math
|
||||
@ -918,25 +917,23 @@ def mini_grid(self, context, color):
|
||||
|
||||
def draw_shader(self, color, alpha, type, coords, size=1, indices=None):
|
||||
""" Create a batch for a draw type """
|
||||
bgl.glEnable(bgl.GL_BLEND)
|
||||
bgl.glEnable(bgl.GL_LINE_SMOOTH)
|
||||
gpu.state.blend_set('ALPHA')
|
||||
if type =='POINTS':
|
||||
bgl.glPointSize(size)
|
||||
gpu.state.program_point_size_set(False)
|
||||
gpu.state.point_size_set(size)
|
||||
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
|
||||
else:
|
||||
bgl.glLineWidth(size)
|
||||
shader = gpu.shader.from_builtin('POLYLINE_UNIFORM_COLOR')
|
||||
shader.uniform_float("viewportSize", gpu.state.viewport_get()[2:])
|
||||
shader.uniform_float("lineWidth", 1.0)
|
||||
|
||||
try:
|
||||
if len(coords[0])>2:
|
||||
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
|
||||
else:
|
||||
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
|
||||
batch = batch_for_shader(shader, type, {"pos": coords}, indices=indices)
|
||||
shader.bind()
|
||||
shader.uniform_float("color", (color[0], color[1], color[2], alpha))
|
||||
batch = batch_for_shader(shader, type, {"pos": coords}, indices=indices)
|
||||
batch.draw(shader)
|
||||
bgl.glLineWidth(1)
|
||||
bgl.glPointSize(1)
|
||||
bgl.glDisable(bgl.GL_LINE_SMOOTH)
|
||||
bgl.glDisable(bgl.GL_BLEND)
|
||||
except:
|
||||
exc_type, exc_value, exc_traceback = sys.exc_info()
|
||||
self.report({'ERROR'}, str(exc_value))
|
||||
|
||||
gpu.state.point_size_set(1.0)
|
||||
gpu.state.blend_set('NONE')
|
||||
|
@ -5,7 +5,10 @@
|
||||
|
||||
import bpy
|
||||
|
||||
from bpy.app.translations import pgettext_tip as tip_
|
||||
from bpy.app.translations import (
|
||||
pgettext_tip as tip_,
|
||||
pgettext_data as data_,
|
||||
)
|
||||
|
||||
|
||||
def image_get(mat):
|
||||
@ -73,7 +76,7 @@ def write_mesh(context, report_cb):
|
||||
name = os.path.basename(bpy.data.filepath)
|
||||
name = os.path.splitext(name)[0]
|
||||
else:
|
||||
name = "untitled"
|
||||
name = data_("untitled")
|
||||
|
||||
# add object name
|
||||
name += f"-{bpy.path.clean_name(obj.name)}"
|
||||
|
@ -164,11 +164,11 @@ class MESH_OT_print3d_check_solid(Operator):
|
||||
)
|
||||
|
||||
info.append(
|
||||
(tip_("Non Manifold Edge: {}").format(
|
||||
(tip_("Non Manifold Edges: {}").format(
|
||||
len(edges_non_manifold)),
|
||||
(bmesh.types.BMEdge,
|
||||
edges_non_manifold)))
|
||||
info.append((tip_("Bad Contig. Edges: {}").format(len(edges_non_contig)), (bmesh.types.BMEdge, edges_non_contig)))
|
||||
info.append((tip_("Bad Contiguous Edges: {}").format(len(edges_non_contig)), (bmesh.types.BMEdge, edges_non_contig)))
|
||||
|
||||
bm.free()
|
||||
|
||||
@ -786,7 +786,7 @@ class MESH_OT_print3d_align_to_xy(Operator):
|
||||
|
||||
if len(skip_invalid) > 0:
|
||||
for name in skip_invalid:
|
||||
print(tip_("Align to XY: Skipping object {}. No faces selected.").format(name))
|
||||
print(tip_("Align to XY: Skipping object {}. No faces selected").format(name))
|
||||
if len(skip_invalid) == 1:
|
||||
self.report({'WARNING'}, tip_("Skipping object {}. No faces selected").format(skip_invalid[0]))
|
||||
else:
|
||||
|
@ -98,7 +98,6 @@ class PoseActionCreator:
|
||||
"""Store the current pose into the given action."""
|
||||
self._store_bone_pose_parameters(dst_action)
|
||||
self._store_animated_parameters(dst_action)
|
||||
self._store_parameters_from_callback(dst_action)
|
||||
|
||||
def _store_bone_pose_parameters(self, dst_action: Action) -> None:
|
||||
"""Store loc/rot/scale/bbone values in the Action."""
|
||||
@ -146,13 +145,6 @@ class PoseActionCreator:
|
||||
dst_fcurve.keyframe_points.insert(self.params.src_frame_nr, value=value)
|
||||
dst_fcurve.update()
|
||||
|
||||
def _store_parameters_from_callback(self, dst_action: Action) -> None:
|
||||
"""Store extra parameters in the pose based on arbitrary callbacks.
|
||||
|
||||
Not implemented yet, needs a proper design & some user stories.
|
||||
"""
|
||||
pass
|
||||
|
||||
def _store_location(self, dst_action: Action, bone_name: str) -> None:
|
||||
"""Store bone location."""
|
||||
self._store_bone_array(dst_action, bone_name, "location", 3)
|
||||
|
@ -125,7 +125,7 @@ def set_axis(mode_pl):
|
||||
mode_pl: Taper Axis Selector variable as input
|
||||
|
||||
Returns:
|
||||
3 Integer Indicies.
|
||||
3 Integer Indices.
|
||||
"""
|
||||
|
||||
order = {
|
||||
|
@ -12,7 +12,7 @@
|
||||
"""This file contains all the Message Strings.
|
||||
|
||||
Note:
|
||||
These strings are called by various programmes in PDT,
|
||||
These strings are called by various programs in PDT,
|
||||
they can be set to suit individual User requirements.
|
||||
|
||||
Args:
|
||||
|
@ -180,7 +180,7 @@ def export_meta(file, metas, tab_write, DEF_MAT_NAME):
|
||||
try:
|
||||
one_material = elems[1].data.materials[
|
||||
0
|
||||
] # lame! - blender can't do enything else.
|
||||
] # lame! - blender can't do anything else.
|
||||
except BaseException as e:
|
||||
print(e.__doc__)
|
||||
print('An exception occurred: {}'.format(e))
|
||||
|
@ -469,7 +469,7 @@ class PovrayColorImageNode(Node, nodes_properties.ObjectNodeTree):
|
||||
("0", "Planar", "Default planar mapping"),
|
||||
("1", "Spherical", "Spherical mapping"),
|
||||
("2", "Cylindrical", "Cylindrical mapping"),
|
||||
("5", "Torroidal", "Torus or donut shaped mapping"),
|
||||
("5", "Toroidal", "Torus or donut shaped mapping"),
|
||||
),
|
||||
default="0",
|
||||
)
|
||||
@ -556,7 +556,7 @@ class PovrayBumpMapNode(Node, nodes_properties.ObjectNodeTree):
|
||||
("0", "Planar", "Default planar mapping"),
|
||||
("1", "Spherical", "Spherical mapping"),
|
||||
("2", "Cylindrical", "Cylindrical mapping"),
|
||||
("5", "Torroidal", "Torus or donut shaped mapping"),
|
||||
("5", "Toroidal", "Torus or donut shaped mapping"),
|
||||
),
|
||||
default="0",
|
||||
)
|
||||
@ -636,7 +636,7 @@ class PovrayImagePatternNode(Node, nodes_properties.ObjectNodeTree):
|
||||
("0", "Planar", "Default planar mapping"),
|
||||
("1", "Spherical", "Spherical mapping"),
|
||||
("2", "Cylindrical", "Cylindrical mapping"),
|
||||
("5", "Torroidal", "Torus or donut shaped mapping"),
|
||||
("5", "Toroidal", "Torus or donut shaped mapping"),
|
||||
),
|
||||
default="0",
|
||||
)
|
||||
|
@ -190,6 +190,8 @@ def register():
|
||||
bpy.utils.register_class(VIEW3D_PT_ui_animation_render)
|
||||
|
||||
wm = bpy.context.window_manager
|
||||
|
||||
if wm.keyconfigs.addon:
|
||||
km = wm.keyconfigs.addon.keymaps.new(name='Screen', space_type='EMPTY')
|
||||
km.keymap_items.new('render.render_screen', 'F12', 'PRESS', shift=True, ctrl=True)
|
||||
|
||||
|
@ -166,6 +166,7 @@ class RigifyPreferences(AddonPreferences):
|
||||
|
||||
def register_feature_sets(self, register):
|
||||
"""Call register or unregister of external feature sets"""
|
||||
self.refresh_installed_feature_sets()
|
||||
for set_name in feature_set_list.get_enabled_modules_names():
|
||||
feature_set_list.call_register_function(set_name, register)
|
||||
|
||||
|
@ -5,19 +5,26 @@ import sys
|
||||
import traceback
|
||||
import collections
|
||||
|
||||
from typing import Optional, TYPE_CHECKING, Collection, List
|
||||
from bpy.types import PoseBone, Bone
|
||||
|
||||
from .utils.errors import MetarigError, RaiseErrorMixin
|
||||
from .utils.naming import random_id
|
||||
from .utils.metaclass import SingletonPluginMetaclass
|
||||
from .utils.rig import list_bone_names_depth_first_sorted, get_rigify_type
|
||||
from .utils.misc import clone_parameters, assign_parameters
|
||||
from .utils.rig import list_bone_names_depth_first_sorted, get_rigify_type, get_rigify_params
|
||||
from .utils.misc import clone_parameters, assign_parameters, ArmatureObject
|
||||
|
||||
from . import base_rig
|
||||
|
||||
from itertools import count
|
||||
|
||||
#=============================================
|
||||
if TYPE_CHECKING:
|
||||
from .rig_ui_template import ScriptGenerator
|
||||
|
||||
|
||||
##############################################
|
||||
# Generator Plugin
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
|
||||
class GeneratorPlugin(base_rig.GenerateCallbackHost, metaclass=SingletonPluginMetaclass):
|
||||
@ -39,68 +46,68 @@ class GeneratorPlugin(base_rig.GenerateCallbackHost, metaclass=SingletonPluginMe
|
||||
|
||||
priority = 0
|
||||
|
||||
def __init__(self, generator):
|
||||
def __init__(self, generator: 'BaseGenerator'):
|
||||
self.generator = generator
|
||||
self.obj = generator.obj
|
||||
|
||||
def register_new_bone(self, new_name, old_name=None):
|
||||
def register_new_bone(self, new_name: str, old_name: Optional[str] = None):
|
||||
self.generator.bone_owners[new_name] = None
|
||||
if old_name:
|
||||
self.generator.derived_bones[old_name].add(new_name)
|
||||
|
||||
|
||||
#=============================================
|
||||
##############################################
|
||||
# Rig Substitution Mechanism
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
|
||||
class SubstitutionRig(RaiseErrorMixin):
|
||||
"""A proxy rig that replaces itself with one or more different rigs."""
|
||||
|
||||
def __init__(self, generator, pose_bone):
|
||||
def __init__(self, generator: 'BaseGenerator', pose_bone: PoseBone):
|
||||
self.generator = generator
|
||||
|
||||
self.obj = generator.obj
|
||||
self.base_bone = pose_bone.name
|
||||
self.params = pose_bone.rigify_parameters
|
||||
self.params = get_rigify_params(pose_bone)
|
||||
self.params_copy = clone_parameters(self.params)
|
||||
|
||||
def substitute(self):
|
||||
# return [rig1, rig2...]
|
||||
raise NotImplementedException()
|
||||
raise NotImplementedError
|
||||
|
||||
# Utility methods
|
||||
def register_new_bone(self, new_name, old_name=None):
|
||||
def register_new_bone(self, new_name: str, old_name: Optional[str] = None):
|
||||
pass
|
||||
|
||||
def get_params(self, bone_name):
|
||||
return self.obj.pose.bones[bone_name].rigify_parameters
|
||||
def get_params(self, bone_name: str):
|
||||
return get_rigify_params(self.obj.pose.bones[bone_name])
|
||||
|
||||
def assign_params(self, bone_name, param_dict=None, **params):
|
||||
def assign_params(self, bone_name: str, param_dict=None, **params):
|
||||
assign_parameters(self.get_params(bone_name), param_dict, **params)
|
||||
|
||||
def instantiate_rig(self, rig_class, bone_name):
|
||||
def instantiate_rig(self, rig_class: str | type, bone_name: str):
|
||||
if isinstance(rig_class, str):
|
||||
rig_class = self.generator.find_rig_class(rig_class)
|
||||
|
||||
return self.generator.instantiate_rig(rig_class, self.obj.pose.bones[bone_name])
|
||||
|
||||
|
||||
#=============================================
|
||||
##############################################
|
||||
# Legacy Rig Wrapper
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
|
||||
class LegacyRig(base_rig.BaseRig):
|
||||
"""Wrapper around legacy style rigs without a common base class"""
|
||||
|
||||
def __init__(self, generator, pose_bone, wrapped_class):
|
||||
def __init__(self, generator: 'BaseGenerator', pose_bone: PoseBone, wrapped_class: type):
|
||||
self.wrapped_rig = None
|
||||
self.wrapped_class = wrapped_class
|
||||
|
||||
super().__init__(generator, pose_bone)
|
||||
|
||||
def find_org_bones(self, pose_bone):
|
||||
def find_org_bones(self, pose_bone: PoseBone):
|
||||
bone_name = pose_bone.name
|
||||
|
||||
if not self.wrapped_rig:
|
||||
@ -163,15 +170,41 @@ class LegacyRig(base_rig.BaseRig):
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
|
||||
#=============================================
|
||||
##############################################
|
||||
# Base Generate Engine
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
|
||||
class BaseGenerator:
|
||||
"""Base class for the main generator object. Contains rig and plugin management code."""
|
||||
|
||||
instance = None
|
||||
instance: Optional['BaseGenerator'] = None # static
|
||||
|
||||
context: bpy.types.Context
|
||||
scene: bpy.types.Scene
|
||||
view_layer: bpy.types.ViewLayer
|
||||
layer_collection: bpy.types.LayerCollection
|
||||
collection: bpy.types.Collection
|
||||
|
||||
metarig: ArmatureObject
|
||||
obj: ArmatureObject
|
||||
|
||||
script: 'ScriptGenerator'
|
||||
|
||||
rig_list: List[base_rig.BaseRig]
|
||||
root_rigs: List[base_rig.BaseRig]
|
||||
|
||||
bone_owners: dict[str, Optional[base_rig.BaseRig]]
|
||||
derived_bones: dict[str, set[str]]
|
||||
|
||||
stage: Optional[str]
|
||||
rig_id: str
|
||||
|
||||
widget_collection: bpy.types.Collection
|
||||
use_mirror_widgets: bool
|
||||
old_widget_table: dict[str, bpy.types.Object]
|
||||
new_widget_table: dict[str, bpy.types.Object]
|
||||
widget_mirror_mesh: dict[str, bpy.types.Mesh]
|
||||
|
||||
def __init__(self, context, metarig):
|
||||
self.context = context
|
||||
@ -180,7 +213,6 @@ class BaseGenerator:
|
||||
self.layer_collection = context.layer_collection
|
||||
self.collection = self.layer_collection.collection
|
||||
self.metarig = metarig
|
||||
self.obj = None
|
||||
|
||||
# List of all rig instances
|
||||
self.rig_list = []
|
||||
@ -210,18 +242,16 @@ class BaseGenerator:
|
||||
# Table of renamed ORG bones
|
||||
self.org_rename_table = dict()
|
||||
|
||||
|
||||
def disable_auto_parent(self, bone_name):
|
||||
def disable_auto_parent(self, bone_name: str):
|
||||
"""Prevent automatically parenting the bone to root if parentless."""
|
||||
self.noparent_bones.add(bone_name)
|
||||
|
||||
|
||||
def find_derived_bones(self, bone_name, *, by_owner=False, recursive=True):
|
||||
def find_derived_bones(self, bone_name: str, *, by_owner=False, recursive=True) -> set[str]:
|
||||
"""Find which bones were copied from the specified one."""
|
||||
if by_owner:
|
||||
owner = self.bone_owners.get(bone_name, None)
|
||||
if not owner:
|
||||
return {}
|
||||
return set()
|
||||
|
||||
table = owner.rigify_derived_bones
|
||||
else:
|
||||
@ -231,7 +261,7 @@ class BaseGenerator:
|
||||
result = set()
|
||||
|
||||
def rec(name):
|
||||
for child in table.get(name, {}):
|
||||
for child in table.get(name, []):
|
||||
result.add(child)
|
||||
rec(child)
|
||||
|
||||
@ -239,16 +269,15 @@ class BaseGenerator:
|
||||
|
||||
return result
|
||||
else:
|
||||
return set(table.get(bone_name, {}))
|
||||
return set(table.get(bone_name, []))
|
||||
|
||||
|
||||
def set_layer_group_priority(self, bone_name, layers, priority):
|
||||
def set_layer_group_priority(self, bone_name: str,
|
||||
layers: Collection[bool], priority: float):
|
||||
for i, val in enumerate(layers):
|
||||
if val:
|
||||
self.layer_group_priorities[bone_name][i] = priority
|
||||
|
||||
|
||||
def rename_org_bone(self, old_name, new_name):
|
||||
def rename_org_bone(self, old_name: str, new_name: str) -> str:
|
||||
assert self.stage == 'instantiate'
|
||||
assert old_name == self.org_rename_table.get(old_name, None)
|
||||
assert old_name not in self.bone_owners
|
||||
@ -261,8 +290,8 @@ class BaseGenerator:
|
||||
self.org_rename_table[old_name] = new_name
|
||||
return new_name
|
||||
|
||||
|
||||
def __run_object_stage(self, method_name):
|
||||
def __run_object_stage(self, method_name: str):
|
||||
"""Run a generation stage in Object mode."""
|
||||
assert(self.context.active_object == self.obj)
|
||||
assert(self.obj.mode == 'OBJECT')
|
||||
num_bones = len(self.obj.data.bones)
|
||||
@ -287,8 +316,8 @@ class BaseGenerator:
|
||||
assert(self.obj.mode == 'OBJECT')
|
||||
assert(num_bones == len(self.obj.data.bones))
|
||||
|
||||
|
||||
def __run_edit_stage(self, method_name):
|
||||
def __run_edit_stage(self, method_name: str):
|
||||
"""Run a generation stage in Edit mode."""
|
||||
assert(self.context.active_object == self.obj)
|
||||
assert(self.obj.mode == 'EDIT')
|
||||
num_bones = len(self.obj.data.edit_bones)
|
||||
@ -313,15 +342,12 @@ class BaseGenerator:
|
||||
assert(self.obj.mode == 'EDIT')
|
||||
assert(num_bones == len(self.obj.data.edit_bones))
|
||||
|
||||
|
||||
def invoke_initialize(self):
|
||||
self.__run_object_stage('initialize')
|
||||
|
||||
|
||||
def invoke_prepare_bones(self):
|
||||
self.__run_edit_stage('prepare_bones')
|
||||
|
||||
|
||||
def __auto_register_bones(self, bones, rig, plugin=None):
|
||||
"""Find bones just added and not registered by this rig."""
|
||||
for bone in bones:
|
||||
@ -332,10 +358,10 @@ class BaseGenerator:
|
||||
rig.rigify_new_bones[name] = None
|
||||
|
||||
if not isinstance(rig, LegacyRig):
|
||||
print("WARNING: rig %s didn't register bone %s\n" % (self.describe_rig(rig), name))
|
||||
print(f"WARNING: rig {self.describe_rig(rig)} "
|
||||
f"didn't register bone {name}\n")
|
||||
else:
|
||||
print("WARNING: plugin %s didn't register bone %s\n" % (plugin, name))
|
||||
|
||||
print(f"WARNING: plugin {plugin} didn't register bone {name}\n")
|
||||
|
||||
def invoke_generate_bones(self):
|
||||
assert(self.context.active_object == self.obj)
|
||||
@ -363,36 +389,28 @@ class BaseGenerator:
|
||||
|
||||
self.__auto_register_bones(self.obj.data.edit_bones, None, plugin=self.plugin_list[i])
|
||||
|
||||
|
||||
def invoke_parent_bones(self):
|
||||
self.__run_edit_stage('parent_bones')
|
||||
|
||||
|
||||
def invoke_configure_bones(self):
|
||||
self.__run_object_stage('configure_bones')
|
||||
|
||||
|
||||
def invoke_preapply_bones(self):
|
||||
self.__run_object_stage('preapply_bones')
|
||||
|
||||
|
||||
def invoke_apply_bones(self):
|
||||
self.__run_edit_stage('apply_bones')
|
||||
|
||||
|
||||
def invoke_rig_bones(self):
|
||||
self.__run_object_stage('rig_bones')
|
||||
|
||||
|
||||
def invoke_generate_widgets(self):
|
||||
self.__run_object_stage('generate_widgets')
|
||||
|
||||
|
||||
def invoke_finalize(self):
|
||||
self.__run_object_stage('finalize')
|
||||
|
||||
|
||||
def instantiate_rig(self, rig_class, pose_bone):
|
||||
def instantiate_rig(self, rig_class: type, pose_bone: PoseBone) -> base_rig.BaseRig:
|
||||
assert not issubclass(rig_class, SubstitutionRig)
|
||||
|
||||
if issubclass(rig_class, base_rig.BaseRig):
|
||||
@ -400,12 +418,14 @@ class BaseGenerator:
|
||||
else:
|
||||
return LegacyRig(self, pose_bone, rig_class)
|
||||
|
||||
def find_rig_class(self, rig_type: str) -> type:
|
||||
raise NotImplementedError
|
||||
|
||||
def instantiate_rig_by_type(self, rig_type, pose_bone):
|
||||
def instantiate_rig_by_type(self, rig_type: str, pose_bone: PoseBone):
|
||||
return self.instantiate_rig(self.find_rig_class(rig_type), pose_bone)
|
||||
|
||||
|
||||
def describe_rig(self, rig):
|
||||
# noinspection PyMethodMayBeStatic
|
||||
def describe_rig(self, rig: base_rig.BaseRig) -> str:
|
||||
base_bone = rig.base_bone
|
||||
|
||||
if isinstance(rig, LegacyRig):
|
||||
@ -413,7 +433,6 @@ class BaseGenerator:
|
||||
|
||||
return "%s (%s)" % (rig.__class__, base_bone)
|
||||
|
||||
|
||||
def __create_rigs(self, bone_name, halt_on_missing):
|
||||
"""Recursively walk bones and create rig instances."""
|
||||
|
||||
@ -440,12 +459,14 @@ class BaseGenerator:
|
||||
if org_name in self.bone_owners:
|
||||
old_rig = self.describe_rig(self.bone_owners[org_name])
|
||||
new_rig = self.describe_rig(rig)
|
||||
print("CONFLICT: bone %s is claimed by rigs %s and %s\n" % (org_name, old_rig, new_rig))
|
||||
print(f"CONFLICT: bone {org_name} is claimed by rigs "
|
||||
f"{old_rig} and {new_rig}\n")
|
||||
|
||||
self.bone_owners[org_name] = rig
|
||||
|
||||
except ImportError:
|
||||
message = "Rig Type Missing: python module for type '%s' not found (bone: %s)" % (rig_type, bone_name)
|
||||
message = f"Rig Type Missing: python module for type '{rig_type}' "\
|
||||
f"not found (bone: {bone_name})"
|
||||
if halt_on_missing:
|
||||
raise MetarigError(message)
|
||||
else:
|
||||
@ -453,8 +474,8 @@ class BaseGenerator:
|
||||
print('print_exc():')
|
||||
traceback.print_exc(file=sys.stdout)
|
||||
|
||||
|
||||
def __build_rig_tree_rec(self, bone, current_rig, handled):
|
||||
def __build_rig_tree_rec(self, bone: Bone, current_rig: Optional[base_rig.BaseRig],
|
||||
handled: dict[base_rig.BaseRig, str]):
|
||||
"""Recursively walk bones and connect rig instances into a tree."""
|
||||
|
||||
rig = self.bone_owners.get(bone.name)
|
||||
@ -474,8 +495,8 @@ class BaseGenerator:
|
||||
handled[rig] = bone.name
|
||||
|
||||
elif rig.rigify_parent is not current_rig:
|
||||
raise MetarigError("CONFLICT: bone %s owned by rig %s has different parent rig from %s\n" %
|
||||
(bone.name, rig.base_bone, handled[rig]))
|
||||
raise MetarigError("CONFLICT: bone {bone.name} owned by rig {rig.base_bone} "
|
||||
f"has different parent rig from {handled[rig]}")
|
||||
|
||||
current_rig = rig
|
||||
else:
|
||||
@ -487,7 +508,6 @@ class BaseGenerator:
|
||||
for child in bone.children:
|
||||
self.__build_rig_tree_rec(child, current_rig, handled)
|
||||
|
||||
|
||||
def instantiate_rig_tree(self, halt_on_missing=False):
|
||||
"""Create rig instances and connect them into a tree."""
|
||||
|
||||
|
@ -2,17 +2,24 @@
|
||||
|
||||
import collections
|
||||
|
||||
from bpy.types import PoseBone
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional
|
||||
|
||||
from .utils.errors import RaiseErrorMixin
|
||||
from .utils.bones import BoneDict, BoneUtilityMixin
|
||||
from .utils.mechanism import MechanismUtilityMixin
|
||||
from .utils.metaclass import BaseStagedClass
|
||||
from .utils.misc import ArmatureObject
|
||||
from .utils.rig import get_rigify_params
|
||||
|
||||
# Only export certain symbols via 'from base_rig import *'
|
||||
__all__ = ['BaseRig', 'stage']
|
||||
if TYPE_CHECKING:
|
||||
from .base_generate import BaseGenerator
|
||||
from .rig_ui_template import ScriptGenerator
|
||||
|
||||
#=============================================
|
||||
|
||||
##############################################
|
||||
# Base Rig
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
class GenerateCallbackHost(BaseStagedClass, define_stages=True):
|
||||
"""
|
||||
@ -138,6 +145,21 @@ class GenerateCallbackHost(BaseStagedClass, define_stages=True):
|
||||
|
||||
|
||||
class BaseRig(GenerateCallbackHost, RaiseErrorMixin, BoneUtilityMixin, MechanismUtilityMixin):
|
||||
generator: 'BaseGenerator'
|
||||
|
||||
obj: ArmatureObject
|
||||
script: 'ScriptGenerator'
|
||||
base_bone: str
|
||||
params: Any
|
||||
bones: BoneDict
|
||||
|
||||
rigify_parent: Optional['BaseRig']
|
||||
rigify_children: list['BaseRig']
|
||||
rigify_org_bones: set[str]
|
||||
rigify_child_bones: set[str]
|
||||
rigify_new_bones: dict[str, Optional[str]]
|
||||
rigify_derived_bones: dict[str, set[str]]
|
||||
|
||||
"""
|
||||
Base class for all rigs.
|
||||
|
||||
@ -151,13 +173,13 @@ class BaseRig(GenerateCallbackHost, RaiseErrorMixin, BoneUtilityMixin, Mechanism
|
||||
and the common generator object. The generation process is also
|
||||
split into multiple stages.
|
||||
"""
|
||||
def __init__(self, generator, pose_bone):
|
||||
def __init__(self, generator: 'BaseGenerator', pose_bone: PoseBone):
|
||||
self.generator = generator
|
||||
|
||||
self.obj = generator.obj
|
||||
self.script = generator.script
|
||||
self.base_bone = pose_bone.name
|
||||
self.params = pose_bone.rigify_parameters
|
||||
self.params = get_rigify_params(pose_bone)
|
||||
|
||||
# Collection of bone names for use in implementing the rig
|
||||
self.bones = BoneDict(
|
||||
@ -194,7 +216,7 @@ class BaseRig(GenerateCallbackHost, RaiseErrorMixin, BoneUtilityMixin, Mechanism
|
||||
###########################################################
|
||||
# Bone ownership
|
||||
|
||||
def find_org_bones(self, pose_bone):
|
||||
def find_org_bones(self, pose_bone: PoseBone) -> str | list[str] | BoneDict:
|
||||
"""
|
||||
Select bones directly owned by the rig. Returning the
|
||||
same bone from multiple rigs is an error.
|
||||
@ -234,9 +256,9 @@ class BaseRig(GenerateCallbackHost, RaiseErrorMixin, BoneUtilityMixin, Mechanism
|
||||
"""
|
||||
|
||||
|
||||
#=============================================
|
||||
##############################################
|
||||
# Rig Utility
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
|
||||
class RigUtility(BoneUtilityMixin, MechanismUtilityMixin):
|
||||
@ -270,13 +292,21 @@ class RigComponent(LazyRigComponent):
|
||||
self.enable_component()
|
||||
|
||||
|
||||
#=============================================
|
||||
##############################################
|
||||
# Rig Stage Decorators
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
# Generate @stage.<...> decorators for all valid stages.
|
||||
@GenerateCallbackHost.stage_decorator_container
|
||||
class stage:
|
||||
pass
|
||||
|
||||
# Generate @stage.<...> decorators for all valid stages
|
||||
for name, decorator in GenerateCallbackHost.make_stage_decorators():
|
||||
setattr(stage, name, decorator)
|
||||
# Declare stages for auto-completion - doesn't affect execution.
|
||||
initialize: Callable
|
||||
prepare_bones: Callable
|
||||
generate_bones: Callable
|
||||
parent_bones: Callable
|
||||
configure_bones: Callable
|
||||
preapply_bones: Callable
|
||||
apply_bones: Callable
|
||||
rig_bones: Callable
|
||||
generate_widgets: Callable
|
||||
finalize: Callable
|
||||
|
@ -7,43 +7,48 @@ import time
|
||||
from .utils.errors import MetarigError
|
||||
from .utils.bones import new_bone
|
||||
from .utils.layers import ORG_LAYER, MCH_LAYER, DEF_LAYER, ROOT_LAYER
|
||||
from .utils.naming import ORG_PREFIX, MCH_PREFIX, DEF_PREFIX, ROOT_NAME, make_original_name, change_name_side, get_name_side, Side
|
||||
from .utils.naming import (ORG_PREFIX, MCH_PREFIX, DEF_PREFIX, ROOT_NAME, make_original_name,
|
||||
change_name_side, get_name_side, Side)
|
||||
from .utils.widgets import WGT_PREFIX
|
||||
from .utils.widgets_special import create_root_widget
|
||||
from .utils.mechanism import refresh_all_drivers
|
||||
from .utils.misc import gamma_correct, select_object
|
||||
from .utils.collections import ensure_collection, list_layer_collections, filter_layer_collections_by_object
|
||||
from .utils.rig import get_rigify_type
|
||||
from .utils.misc import gamma_correct, select_object, ArmatureObject, verify_armature_obj
|
||||
from .utils.collections import (ensure_collection, list_layer_collections,
|
||||
filter_layer_collections_by_object)
|
||||
from .utils.rig import get_rigify_type, get_rigify_layers
|
||||
|
||||
from . import base_generate
|
||||
from . import rig_ui_template
|
||||
from . import rig_lists
|
||||
|
||||
|
||||
RIG_MODULE = "rigs"
|
||||
|
||||
|
||||
class Timer:
|
||||
def __init__(self):
|
||||
self.timez = time.time()
|
||||
self.time_val = time.time()
|
||||
|
||||
def tick(self, string):
|
||||
t = time.time()
|
||||
print(string + "%.3f" % (t - self.timez))
|
||||
self.timez = t
|
||||
print(string + "%.3f" % (t - self.time_val))
|
||||
self.time_val = t
|
||||
|
||||
|
||||
class Generator(base_generate.BaseGenerator):
|
||||
usable_collections: list[bpy.types.LayerCollection]
|
||||
action_layers: ActionLayerBuilder
|
||||
|
||||
def __init__(self, context, metarig):
|
||||
super().__init__(context, metarig)
|
||||
|
||||
self.id_store = context.window_manager
|
||||
|
||||
|
||||
def find_rig_class(self, rig_type):
|
||||
rig_module = rig_lists.rigs[rig_type]["module"]
|
||||
|
||||
return rig_module.Rig
|
||||
|
||||
|
||||
def __switch_to_usable_collection(self, obj, fallback=False):
|
||||
collections = filter_layer_collections_by_object(self.usable_collections, obj)
|
||||
|
||||
@ -54,8 +59,7 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
self.collection = self.layer_collection.collection
|
||||
|
||||
|
||||
def ensure_rig_object(self) -> bpy.types.Object:
|
||||
def ensure_rig_object(self) -> ArmatureObject:
|
||||
"""Check if the generated rig already exists, so we can
|
||||
regenerate in the same object. If not, create a new
|
||||
object to generate the rig in.
|
||||
@ -63,10 +67,14 @@ class Generator(base_generate.BaseGenerator):
|
||||
print("Fetch rig.")
|
||||
meta_data = self.metarig.data
|
||||
|
||||
target_rig = meta_data.rigify_target_rig
|
||||
target_rig: ArmatureObject = meta_data.rigify_target_rig
|
||||
|
||||
if not target_rig:
|
||||
if meta_data.rigify_rig_basename:
|
||||
rig_new_name = meta_data.rigify_rig_basename
|
||||
# noinspection PyUnresolvedReferences
|
||||
rig_basename = meta_data.rigify_rig_basename
|
||||
|
||||
if rig_basename:
|
||||
rig_new_name = rig_basename
|
||||
elif "metarig" in self.metarig.name:
|
||||
rig_new_name = self.metarig.name.replace("metarig", "rig")
|
||||
elif "META" in self.metarig.name:
|
||||
@ -74,7 +82,8 @@ class Generator(base_generate.BaseGenerator):
|
||||
else:
|
||||
rig_new_name = "RIG-" + self.metarig.name
|
||||
|
||||
target_rig = bpy.data.objects.new(rig_new_name, bpy.data.armatures.new(rig_new_name))
|
||||
arm = bpy.data.armatures.new(rig_new_name)
|
||||
target_rig = verify_armature_obj(bpy.data.objects.new(rig_new_name, arm))
|
||||
target_rig.display_type = 'WIRE'
|
||||
|
||||
# If the object is already added to the scene, switch to its collection
|
||||
@ -94,8 +103,7 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
return target_rig
|
||||
|
||||
|
||||
def __unhide_rig_object(self, obj):
|
||||
def __unhide_rig_object(self, obj: bpy.types.Object):
|
||||
# Ensure the object is visible and selectable
|
||||
obj.hide_set(False, view_layer=self.view_layer)
|
||||
obj.hide_viewport = False
|
||||
@ -111,13 +119,13 @@ class Generator(base_generate.BaseGenerator):
|
||||
if self.layer_collection not in self.usable_collections:
|
||||
raise Exception('Could not generate: Could not find a usable collection.')
|
||||
|
||||
|
||||
def __find_legacy_collection(self) -> bpy.types.Collection:
|
||||
"""For backwards comp, matching by name to find a legacy collection.
|
||||
(For before there was a Widget Collection PointerProperty)
|
||||
"""
|
||||
wgts_group_name = "WGTS_" + self.obj.name
|
||||
old_collection = bpy.data.collections.get(wgts_group_name)
|
||||
# noinspection SpellCheckingInspection
|
||||
widgets_group_name = "WGTS_" + self.obj.name
|
||||
old_collection = bpy.data.collections.get(widgets_group_name)
|
||||
|
||||
if old_collection and old_collection.library:
|
||||
old_collection = None
|
||||
@ -126,13 +134,14 @@ class Generator(base_generate.BaseGenerator):
|
||||
# Update the old 'Widgets' collection
|
||||
legacy_collection = bpy.data.collections.get('Widgets')
|
||||
|
||||
if legacy_collection and wgts_group_name in legacy_collection.objects and not legacy_collection.library:
|
||||
legacy_collection.name = wgts_group_name
|
||||
if legacy_collection and widgets_group_name in legacy_collection.objects\
|
||||
and not legacy_collection.library:
|
||||
legacy_collection.name = widgets_group_name
|
||||
old_collection = legacy_collection
|
||||
|
||||
if old_collection:
|
||||
# Rename the collection
|
||||
old_collection.name = wgts_group_name
|
||||
old_collection.name = widgets_group_name
|
||||
|
||||
return old_collection
|
||||
|
||||
@ -142,11 +151,14 @@ class Generator(base_generate.BaseGenerator):
|
||||
if not self.widget_collection:
|
||||
self.widget_collection = self.__find_legacy_collection()
|
||||
if not self.widget_collection:
|
||||
wgts_group_name = "WGTS_" + self.obj.name.replace("RIG-", "")
|
||||
self.widget_collection = ensure_collection(self.context, wgts_group_name, hidden=True)
|
||||
# noinspection SpellCheckingInspection
|
||||
widgets_group_name = "WGTS_" + self.obj.name.replace("RIG-", "")
|
||||
self.widget_collection = ensure_collection(
|
||||
self.context, widgets_group_name, hidden=True)
|
||||
|
||||
self.metarig.data.rigify_widgets_collection = self.widget_collection
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
self.use_mirror_widgets = self.metarig.data.rigify_mirror_widgets
|
||||
|
||||
# Build tables for existing widgets
|
||||
@ -154,6 +166,7 @@ class Generator(base_generate.BaseGenerator):
|
||||
self.new_widget_table = {}
|
||||
self.widget_mirror_mesh = {}
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
if self.metarig.data.rigify_force_widget_update:
|
||||
# Remove widgets if force update is set
|
||||
for obj in list(self.widget_collection.objects):
|
||||
@ -176,16 +189,17 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
# If the mesh name is the same as the object, rename it too
|
||||
if widget.data.name == old_data_name:
|
||||
widget.data.name = change_name_side(widget.name, get_name_side(widget.data.name))
|
||||
widget.data.name = change_name_side(
|
||||
widget.name, get_name_side(widget.data.name))
|
||||
|
||||
# Find meshes for mirroring
|
||||
if self.use_mirror_widgets:
|
||||
for bone_name, widget in self.old_widget_table.items():
|
||||
mid_name = change_name_side(bone_name, Side.MIDDLE)
|
||||
if bone_name != mid_name:
|
||||
assert isinstance(widget.data, bpy.types.Mesh)
|
||||
self.widget_mirror_mesh[mid_name] = widget.data
|
||||
|
||||
|
||||
def __duplicate_rig(self):
|
||||
obj = self.obj
|
||||
metarig = self.metarig
|
||||
@ -203,7 +217,7 @@ class Generator(base_generate.BaseGenerator):
|
||||
bpy.ops.object.duplicate()
|
||||
|
||||
# Rename org bones in the temporary object
|
||||
temp_obj = context.view_layer.objects.active
|
||||
temp_obj = verify_armature_obj(context.view_layer.objects.active)
|
||||
|
||||
assert temp_obj and temp_obj != metarig
|
||||
|
||||
@ -230,8 +244,8 @@ class Generator(base_generate.BaseGenerator):
|
||||
for track in obj.animation_data.nla_tracks:
|
||||
obj.animation_data.nla_tracks.remove(track)
|
||||
|
||||
|
||||
def __freeze_driver_vars(self, obj):
|
||||
@staticmethod
|
||||
def __freeze_driver_vars(obj: bpy.types.Object):
|
||||
if obj.animation_data:
|
||||
# Freeze drivers referring to custom properties
|
||||
for d in obj.animation_data.drivers:
|
||||
@ -239,13 +253,12 @@ class Generator(base_generate.BaseGenerator):
|
||||
for tar in var.targets:
|
||||
# If a custom property
|
||||
if var.type == 'SINGLE_PROP' \
|
||||
and re.match(r'^pose.bones\["[^"\]]*"\]\["[^"\]]*"\]$', tar.data_path):
|
||||
and re.match(r'^pose.bones\["[^"\]]*"]\["[^"\]]*"]$',
|
||||
tar.data_path):
|
||||
tar.data_path = "RIGIFY-" + tar.data_path
|
||||
|
||||
|
||||
def __rename_org_bones(self, obj):
|
||||
#----------------------------------
|
||||
# Make a list of the original bones so we can keep track of them.
|
||||
def __rename_org_bones(self, obj: ArmatureObject):
|
||||
# Make a list of the original bones, so we can keep track of them.
|
||||
original_bones = [bone.name for bone in obj.data.bones]
|
||||
|
||||
# Add the ORG_PREFIX to the original bones.
|
||||
@ -267,7 +280,6 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
self.original_bones = original_bones
|
||||
|
||||
|
||||
def __create_root_bone(self):
|
||||
obj = self.obj
|
||||
metarig = self.metarig
|
||||
@ -289,7 +301,6 @@ class Generator(base_generate.BaseGenerator):
|
||||
self.bone_owners[root_bone] = None
|
||||
self.noparent_bones.add(root_bone)
|
||||
|
||||
|
||||
def __parent_bones_to_root(self):
|
||||
eb = self.obj.data.edit_bones
|
||||
|
||||
@ -301,7 +312,6 @@ class Generator(base_generate.BaseGenerator):
|
||||
bone.use_connect = False
|
||||
bone.parent = eb[self.root_bone]
|
||||
|
||||
|
||||
def __lock_transforms(self):
|
||||
# Lock transforms on all non-control bones
|
||||
r = re.compile("[A-Z][A-Z][A-Z]-")
|
||||
@ -312,15 +322,14 @@ class Generator(base_generate.BaseGenerator):
|
||||
pb.lock_rotation_w = True
|
||||
pb.lock_scale = (True, True, True)
|
||||
|
||||
|
||||
def __assign_layers(self):
|
||||
pbones = self.obj.pose.bones
|
||||
pose_bones = self.obj.pose.bones
|
||||
|
||||
pbones[self.root_bone].bone.layers = ROOT_LAYER
|
||||
pose_bones[self.root_bone].bone.layers = ROOT_LAYER
|
||||
|
||||
# Every bone that has a name starting with "DEF-" make deforming. All the
|
||||
# others make non-deforming.
|
||||
for pbone in pbones:
|
||||
for pbone in pose_bones:
|
||||
bone = pbone.bone
|
||||
name = bone.name
|
||||
layers = None
|
||||
@ -345,7 +354,6 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
bone.bbone_x = bone.bbone_z = bone.length * 0.05
|
||||
|
||||
|
||||
def __restore_driver_vars(self):
|
||||
obj = self.obj
|
||||
|
||||
@ -355,16 +363,15 @@ class Generator(base_generate.BaseGenerator):
|
||||
for v in d.driver.variables:
|
||||
for tar in v.targets:
|
||||
if tar.data_path.startswith("RIGIFY-"):
|
||||
temp, bone, prop = tuple([x.strip('"]') for x in tar.data_path.split('["')])
|
||||
if bone in obj.data.bones \
|
||||
and prop in obj.pose.bones[bone].keys():
|
||||
temp, bone, prop = tuple(
|
||||
[x.strip('"]') for x in tar.data_path.split('["')])
|
||||
if bone in obj.data.bones and prop in obj.pose.bones[bone].keys():
|
||||
tar.data_path = tar.data_path[7:]
|
||||
else:
|
||||
org_name = make_original_name(bone)
|
||||
org_name = self.org_rename_table.get(org_name, org_name)
|
||||
tar.data_path = 'pose.bones["%s"]["%s"]' % (org_name, prop)
|
||||
|
||||
|
||||
def __assign_widgets(self):
|
||||
obj_table = {obj.name: obj for obj in self.scene.objects}
|
||||
|
||||
@ -382,10 +389,9 @@ class Generator(base_generate.BaseGenerator):
|
||||
if wgt_name in obj_table:
|
||||
bone.custom_shape = obj_table[wgt_name]
|
||||
|
||||
|
||||
def __compute_visible_layers(self):
|
||||
# Reveal all the layers with control bones on them
|
||||
vis_layers = [False for n in range(0, 32)]
|
||||
vis_layers = [False for _ in range(0, 32)]
|
||||
|
||||
for bone in self.obj.data.bones:
|
||||
for i in range(0, 32):
|
||||
@ -396,20 +402,18 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
self.obj.data.layers = vis_layers
|
||||
|
||||
|
||||
def generate(self):
|
||||
context = self.context
|
||||
metarig = self.metarig
|
||||
scene = self.scene
|
||||
id_store = self.id_store
|
||||
view_layer = self.view_layer
|
||||
t = Timer()
|
||||
|
||||
self.usable_collections = list_layer_collections(view_layer.layer_collection, selectable=True)
|
||||
self.usable_collections = list_layer_collections(
|
||||
view_layer.layer_collection, selectable=True)
|
||||
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
# Create/find the rig object and set it up
|
||||
self.obj = obj = self.ensure_rig_object()
|
||||
|
||||
@ -426,19 +430,21 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
select_object(context, obj, deselect_all=True)
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
# Create Widget Collection
|
||||
self.ensure_widget_collection()
|
||||
|
||||
t.tick("Create main WGTS: ")
|
||||
t.tick("Create widgets collection: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
# Get parented objects to restore later
|
||||
childs = {} # {object: bone}
|
||||
for child in obj.children:
|
||||
childs[child] = child.parent_bone
|
||||
|
||||
#------------------------------------------
|
||||
child_parent_bones = {} # {object: bone}
|
||||
|
||||
for child in obj.children:
|
||||
child_parent_bones[child] = child.parent_bone
|
||||
|
||||
###########################################
|
||||
# Copy bones from metarig to obj (adds ORG_PREFIX)
|
||||
self.__duplicate_rig()
|
||||
|
||||
@ -446,34 +452,34 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
t.tick("Duplicate rig: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
# Put the rig_name in the armature custom properties
|
||||
obj.data["rig_id"] = self.rig_id
|
||||
|
||||
self.script = rig_ui_template.ScriptGenerator(self)
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.instantiate_rig_tree()
|
||||
|
||||
t.tick("Instantiate rigs: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.invoke_initialize()
|
||||
|
||||
t.tick("Initialize rigs: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='EDIT')
|
||||
|
||||
self.invoke_prepare_bones()
|
||||
|
||||
t.tick("Prepare bones: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
bpy.ops.object.mode_set(mode='EDIT')
|
||||
|
||||
@ -483,7 +489,7 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
t.tick("Generate bones: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
bpy.ops.object.mode_set(mode='EDIT')
|
||||
|
||||
@ -493,35 +499,35 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
t.tick("Parent bones: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.invoke_configure_bones()
|
||||
|
||||
t.tick("Configure bones: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.invoke_preapply_bones()
|
||||
|
||||
t.tick("Preapply bones: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='EDIT')
|
||||
|
||||
self.invoke_apply_bones()
|
||||
|
||||
t.tick("Apply bones: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.invoke_rig_bones()
|
||||
|
||||
t.tick("Rig bones: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.invoke_generate_widgets()
|
||||
@ -531,7 +537,7 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
t.tick("Generate widgets: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.__lock_transforms()
|
||||
@ -541,14 +547,14 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
t.tick("Assign layers: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.invoke_finalize()
|
||||
|
||||
t.tick("Finalize: ")
|
||||
|
||||
#------------------------------------------
|
||||
###########################################
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
self.__assign_widgets()
|
||||
@ -561,13 +567,14 @@ class Generator(base_generate.BaseGenerator):
|
||||
|
||||
t.tick("The rest: ")
|
||||
|
||||
#----------------------------------
|
||||
# Deconfigure
|
||||
###########################################
|
||||
# Restore state
|
||||
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
obj.data.pose_position = 'POSE'
|
||||
|
||||
# Restore parent to bones
|
||||
for child, sub_parent in childs.items():
|
||||
for child, sub_parent in child_parent_bones.items():
|
||||
if sub_parent in obj.pose.bones:
|
||||
mat = child.matrix_world.copy()
|
||||
child.parent_bone = sub_parent
|
||||
@ -576,15 +583,18 @@ class Generator(base_generate.BaseGenerator):
|
||||
# Clear any transient errors in drivers
|
||||
refresh_all_drivers()
|
||||
|
||||
#----------------------------------
|
||||
###########################################
|
||||
# Execute the finalize script
|
||||
|
||||
if metarig.data.rigify_finalize_script:
|
||||
# noinspection PyUnresolvedReferences
|
||||
finalize_script = metarig.data.rigify_finalize_script
|
||||
|
||||
if finalize_script:
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
exec(metarig.data.rigify_finalize_script.as_string(), {})
|
||||
exec(finalize_script.as_string(), {})
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
|
||||
#----------------------------------
|
||||
###########################################
|
||||
# Restore active collection
|
||||
view_layer.active_layer_collection = self.layer_collection
|
||||
|
||||
@ -620,26 +630,24 @@ def generate_rig(context, metarig):
|
||||
base_generate.BaseGenerator.instance = None
|
||||
|
||||
|
||||
def create_selection_set_for_rig_layer(
|
||||
rig: bpy.types.Object,
|
||||
set_name: str,
|
||||
layer_idx: int
|
||||
) -> None:
|
||||
def create_selection_set_for_rig_layer(rig: ArmatureObject, set_name: str, layer_idx: int) -> None:
|
||||
"""Create a single selection set on a rig.
|
||||
|
||||
The set will contain all bones on the rig layer with the given index.
|
||||
"""
|
||||
selset = rig.selection_sets.add()
|
||||
selset.name = set_name
|
||||
# noinspection PyUnresolvedReferences
|
||||
sel_set = rig.selection_sets.add()
|
||||
sel_set.name = set_name
|
||||
|
||||
for b in rig.pose.bones:
|
||||
if not b.bone.layers[layer_idx] or b.name in selset.bone_ids:
|
||||
if not b.bone.layers[layer_idx] or b.name in sel_set.bone_ids:
|
||||
continue
|
||||
|
||||
bone_id = selset.bone_ids.add()
|
||||
bone_id = sel_set.bone_ids.add()
|
||||
bone_id.name = b.name
|
||||
|
||||
def create_selection_sets(obj, metarig):
|
||||
|
||||
def create_selection_sets(obj: ArmatureObject, metarig: ArmatureObject):
|
||||
"""Create selection sets if the Selection Sets addon is enabled.
|
||||
|
||||
Whether a selection set for a rig layer is created is controlled in the
|
||||
@ -650,17 +658,20 @@ def create_selection_sets(obj, metarig):
|
||||
and 'bone_selection_sets' not in bpy.context.preferences.addons:
|
||||
return
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
obj.selection_sets.clear()
|
||||
|
||||
for i, name in enumerate(metarig.data.rigify_layers.keys()):
|
||||
if name == '' or not metarig.data.rigify_layers[i].selset:
|
||||
rigify_layers = get_rigify_layers(metarig.data)
|
||||
|
||||
for i, layer in enumerate(rigify_layers):
|
||||
if layer.name == '' or not layer.selset:
|
||||
continue
|
||||
|
||||
create_selection_set_for_rig_layer(obj, name, i)
|
||||
create_selection_set_for_rig_layer(obj, layer.name, i)
|
||||
|
||||
|
||||
# noinspection PyDefaultArgument
|
||||
def create_bone_groups(obj, metarig, priorities={}):
|
||||
|
||||
bpy.ops.object.mode_set(mode='OBJECT')
|
||||
pb = obj.pose.bones
|
||||
layers = metarig.data.rigify_layers
|
||||
@ -668,10 +679,10 @@ def create_bone_groups(obj, metarig, priorities={}):
|
||||
dummy = {}
|
||||
|
||||
# Create BGs
|
||||
for l in layers:
|
||||
if l.group == 0:
|
||||
for layer in layers:
|
||||
if layer.group == 0:
|
||||
continue
|
||||
g_id = l.group - 1
|
||||
g_id = layer.group - 1
|
||||
name = groups[g_id].name
|
||||
if name not in obj.pose.bone_groups.keys():
|
||||
bg = obj.pose.bone_groups.new(name=name)
|
||||
@ -682,9 +693,9 @@ def create_bone_groups(obj, metarig, priorities={}):
|
||||
|
||||
for b in pb:
|
||||
try:
|
||||
prios = priorities.get(b.name, dummy)
|
||||
bone_priorities = priorities.get(b.name, dummy)
|
||||
enabled = [i for i, v in enumerate(b.bone.layers) if v]
|
||||
layer_index = max(enabled, key=lambda i: prios.get(i, 0))
|
||||
layer_index = max(enabled, key=lambda i: bone_priorities.get(i, 0))
|
||||
except ValueError:
|
||||
continue
|
||||
if layer_index > len(layers) - 1: # bone is on reserved layers
|
||||
@ -703,18 +714,3 @@ def get_xy_spread(bones):
|
||||
y_max = max((y_max, abs(b.head[1]), abs(b.tail[1])))
|
||||
|
||||
return max((x_max, y_max))
|
||||
|
||||
|
||||
def param_matches_type(param_name, rig_type):
|
||||
""" Returns True if the parameter name is consistent with the rig type.
|
||||
"""
|
||||
if param_name.rsplit(".", 1)[0] == rig_type:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def param_name(param_name, rig_type):
|
||||
""" Get the actual parameter name, sans-rig-type.
|
||||
"""
|
||||
return param_name[len(rig_type) + 1:]
|
||||
|
@ -3,6 +3,7 @@
|
||||
import bpy
|
||||
|
||||
from collections import OrderedDict
|
||||
from typing import Union, Optional, Any
|
||||
|
||||
from .utils.animation import SCRIPT_REGISTER_BAKE, SCRIPT_UTILITIES_BAKE
|
||||
|
||||
@ -10,7 +11,9 @@ from . import base_generate
|
||||
|
||||
from rna_prop_ui import rna_idprop_quote_path
|
||||
|
||||
from .utils.rig import get_rigify_layers
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UI_IMPORTS = [
|
||||
'import bpy',
|
||||
'import math',
|
||||
@ -23,6 +26,7 @@ UI_IMPORTS = [
|
||||
'from rna_prop_ui import rna_idprop_quote_path',
|
||||
]
|
||||
|
||||
|
||||
UI_BASE_UTILITIES = '''
|
||||
rig_id = "%s"
|
||||
|
||||
@ -44,7 +48,7 @@ def perpendicular_vector(v):
|
||||
else:
|
||||
tv = Vector((0,1,0))
|
||||
|
||||
# Use cross prouct to generate a vector perpendicular to
|
||||
# Use cross product to generate a vector perpendicular to
|
||||
# both tv and (more importantly) v.
|
||||
return v.cross(tv)
|
||||
|
||||
@ -76,7 +80,7 @@ def find_min_range(f,start_angle,delta=pi/8):
|
||||
|
||||
def ternarySearch(f, left, right, absolutePrecision):
|
||||
"""
|
||||
Find minimum of unimodal function f() within [left, right]
|
||||
Find minimum of uni-modal function f() within [left, right]
|
||||
To find the maximum, revert the if/else statement or revert the comparison.
|
||||
"""
|
||||
while True:
|
||||
@ -93,6 +97,7 @@ def ternarySearch(f, left, right, absolutePrecision):
|
||||
right = rightThird
|
||||
'''
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UTILITIES_FUNC_COMMON_IKFK = ['''
|
||||
#########################################
|
||||
## "Visual Transform" helper functions ##
|
||||
@ -292,6 +297,7 @@ def parse_bone_names(names_string):
|
||||
|
||||
''']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UTILITIES_FUNC_OLD_ARM_FKIK = ['''
|
||||
######################
|
||||
## IK Arm functions ##
|
||||
@ -409,6 +415,7 @@ def ik2fk_arm(obj, fk, ik):
|
||||
correct_scale(view_layer, uarmi, uarm.matrix)
|
||||
''']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UTILITIES_FUNC_OLD_LEG_FKIK = ['''
|
||||
######################
|
||||
## IK Leg functions ##
|
||||
@ -551,6 +558,7 @@ def ik2fk_leg(obj, fk, ik):
|
||||
correct_scale(view_layer, thighi, thigh.matrix)
|
||||
''']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UTILITIES_FUNC_OLD_POLE = ['''
|
||||
################################
|
||||
## IK Rotation-Pole functions ##
|
||||
@ -606,8 +614,8 @@ def rotPoleToggle(rig, limb_type, controls, ik_ctrl, fk_ctrl, parent, pole):
|
||||
'foot_ik': ik_ctrl[2], 'mfoot_ik': ik_ctrl[2]}
|
||||
kwargs2 = {'thigh_fk': controls[1], 'shin_fk': controls[2], 'foot_fk': controls[3],
|
||||
'mfoot_fk': controls[7], 'thigh_ik': controls[0], 'shin_ik': ik_ctrl[1],
|
||||
'foot_ik': controls[6], 'pole': pole, 'footroll': controls[5], 'mfoot_ik': ik_ctrl[2],
|
||||
'main_parent': parent}
|
||||
'foot_ik': controls[6], 'pole': pole, 'footroll': controls[5],
|
||||
'mfoot_ik': ik_ctrl[2], 'main_parent': parent}
|
||||
|
||||
func1(**kwargs1)
|
||||
rig.pose.bones[parent]['pole_vector'] = new_pole_vector_value
|
||||
@ -616,8 +624,10 @@ def rotPoleToggle(rig, limb_type, controls, ik_ctrl, fk_ctrl, parent, pole):
|
||||
bpy.ops.pose.select_all(action='DESELECT')
|
||||
''']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
REGISTER_OP_OLD_ARM_FKIK = ['Rigify_Arm_FK2IK', 'Rigify_Arm_IK2FK']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UTILITIES_OP_OLD_ARM_FKIK = ['''
|
||||
##################################
|
||||
## IK/FK Arm snapping operators ##
|
||||
@ -643,7 +653,8 @@ class Rigify_Arm_FK2IK(bpy.types.Operator):
|
||||
return (context.active_object != None and context.mode == 'POSE')
|
||||
|
||||
def execute(self, context):
|
||||
fk2ik_arm(context.active_object, fk=[self.uarm_fk, self.farm_fk, self.hand_fk], ik=[self.uarm_ik, self.farm_ik, self.hand_ik])
|
||||
fk2ik_arm(context.active_object, fk=[self.uarm_fk, self.farm_fk, self.hand_fk],
|
||||
ik=[self.uarm_ik, self.farm_ik, self.hand_ik])
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
@ -670,12 +681,15 @@ class Rigify_Arm_IK2FK(bpy.types.Operator):
|
||||
return (context.active_object != None and context.mode == 'POSE')
|
||||
|
||||
def execute(self, context):
|
||||
ik2fk_arm(context.active_object, fk=[self.uarm_fk, self.farm_fk, self.hand_fk], ik=[self.uarm_ik, self.farm_ik, self.hand_ik, self.pole, self.main_parent])
|
||||
ik2fk_arm(context.active_object, fk=[self.uarm_fk, self.farm_fk, self.hand_fk],
|
||||
ik=[self.uarm_ik, self.farm_ik, self.hand_ik, self.pole, self.main_parent])
|
||||
return {'FINISHED'}
|
||||
''']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
REGISTER_OP_OLD_LEG_FKIK = ['Rigify_Leg_FK2IK', 'Rigify_Leg_IK2FK']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UTILITIES_OP_OLD_LEG_FKIK = ['''
|
||||
##################################
|
||||
## IK/FK Leg snapping operators ##
|
||||
@ -703,7 +717,9 @@ class Rigify_Leg_FK2IK(bpy.types.Operator):
|
||||
return (context.active_object != None and context.mode == 'POSE')
|
||||
|
||||
def execute(self, context):
|
||||
fk2ik_leg(context.active_object, fk=[self.thigh_fk, self.shin_fk, self.foot_fk, self.mfoot_fk], ik=[self.thigh_ik, self.shin_ik, self.foot_ik, self.mfoot_ik])
|
||||
fk2ik_leg(context.active_object,
|
||||
fk=[self.thigh_fk, self.shin_fk, self.foot_fk, self.mfoot_fk],
|
||||
ik=[self.thigh_ik, self.shin_ik, self.foot_ik, self.mfoot_ik])
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
@ -732,7 +748,10 @@ class Rigify_Leg_IK2FK(bpy.types.Operator):
|
||||
return (context.active_object != None and context.mode == 'POSE')
|
||||
|
||||
def execute(self, context):
|
||||
ik2fk_leg(context.active_object, fk=[self.thigh_fk, self.shin_fk, self.mfoot_fk, self.foot_fk], ik=[self.thigh_ik, self.shin_ik, self.foot_ik, self.footroll, self.pole, self.mfoot_ik, self.main_parent])
|
||||
ik2fk_leg(context.active_object,
|
||||
fk=[self.thigh_fk, self.shin_fk, self.mfoot_fk, self.foot_fk],
|
||||
ik=[self.thigh_ik, self.shin_ik, self.foot_ik, self.footroll, self.pole,
|
||||
self.mfoot_ik, self.main_parent])
|
||||
return {'FINISHED'}
|
||||
''']
|
||||
|
||||
@ -763,7 +782,8 @@ class Rigify_Rot2PoleSwitch(bpy.types.Operator):
|
||||
bpy.ops.pose.select_all(action='DESELECT')
|
||||
rig.pose.bones[self.bone_name].bone.select = True
|
||||
|
||||
rotPoleToggle(rig, self.limb_type, self.controls, self.ik_ctrl, self.fk_ctrl, self.parent, self.pole)
|
||||
rotPoleToggle(rig, self.limb_type, self.controls, self.ik_ctrl, self.fk_ctrl,
|
||||
self.parent, self.pole)
|
||||
return {'FINISHED'}
|
||||
''']
|
||||
|
||||
@ -787,9 +807,9 @@ UTILITIES_RIG_OLD_LEG = [
|
||||
*UTILITIES_OP_OLD_POLE,
|
||||
]
|
||||
|
||||
##############################
|
||||
## Default set of utilities ##
|
||||
##############################
|
||||
############################
|
||||
# Default set of utilities #
|
||||
############################
|
||||
|
||||
UI_REGISTER = [
|
||||
'RigUI',
|
||||
@ -799,6 +819,7 @@ UI_REGISTER = [
|
||||
UI_UTILITIES = [
|
||||
]
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UI_SLIDERS = '''
|
||||
###################
|
||||
## Rig UI Panels ##
|
||||
@ -847,6 +868,7 @@ class RigUI(bpy.types.Panel):
|
||||
|
||||
UI_REGISTER_BAKE_SETTINGS = ['RigBakeSettings']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
UI_BAKE_SETTINGS = '''
|
||||
class RigBakeSettings(bpy.types.Panel):
|
||||
bl_space_type = 'VIEW_3D'
|
||||
@ -863,10 +885,12 @@ class RigBakeSettings(bpy.types.Panel):
|
||||
RigifyBakeKeyframesMixin.draw_common_bake_ui(context, self.layout)
|
||||
'''
|
||||
|
||||
|
||||
def layers_ui(layers, layout):
|
||||
""" Turn a list of booleans + a list of names into a layer UI.
|
||||
"""
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
code = '''
|
||||
class RigLayers(bpy.types.Panel):
|
||||
bl_space_type = 'VIEW_3D'
|
||||
@ -899,11 +923,12 @@ class RigLayers(bpy.types.Panel):
|
||||
for key in keys:
|
||||
code += "\n row = col.row()\n"
|
||||
i = 0
|
||||
for l in rows[key]:
|
||||
for layer in rows[key]:
|
||||
if i > 3:
|
||||
code += "\n row = col.row()\n"
|
||||
i = 0
|
||||
code += " row.prop(context.active_object.data, 'layers', index=%s, toggle=True, text='%s')\n" % (str(l[1]), l[0])
|
||||
code += f" row.prop(context.active_object.data, 'layers', "\
|
||||
f"index={layer[1]}, toggle=True, text='{layer[0]}')\n"
|
||||
i += 1
|
||||
|
||||
# Root layer
|
||||
@ -912,18 +937,20 @@ class RigLayers(bpy.types.Panel):
|
||||
code += "\n row = col.row()"
|
||||
code += "\n row.separator()\n"
|
||||
code += "\n row = col.row()\n"
|
||||
code += " row.prop(context.active_object.data, 'layers', index=28, toggle=True, text='Root')\n"
|
||||
code += " row.prop(context.active_object.data, 'layers', "\
|
||||
"index=28, toggle=True, text='Root')\n"
|
||||
|
||||
return code
|
||||
|
||||
|
||||
def quote_parameters(positional, named):
|
||||
def quote_parameters(positional: list[Any], named: dict[str, Any]):
|
||||
"""Quote the given positional and named parameters as a code string."""
|
||||
positional_list = [repr(v) for v in positional]
|
||||
named_list = ["%s=%r" % (k, v) for k, v in named.items()]
|
||||
return ', '.join(positional_list + named_list)
|
||||
|
||||
def indent_lines(lines, indent=4):
|
||||
|
||||
def indent_lines(lines: list[str], indent=4):
|
||||
if indent > 0:
|
||||
prefix = ' ' * indent
|
||||
return [prefix + line for line in lines]
|
||||
@ -934,7 +961,13 @@ def indent_lines(lines, indent=4):
|
||||
class PanelLayout(object):
|
||||
"""Utility class that builds code for creating a layout."""
|
||||
|
||||
def __init__(self, parent, index=0):
|
||||
parent: Optional['PanelLayout']
|
||||
script: 'ScriptGenerator'
|
||||
|
||||
header: list[str]
|
||||
items: list[Union[str, 'PanelLayout']]
|
||||
|
||||
def __init__(self, parent: Union['PanelLayout', 'ScriptGenerator'], index=0):
|
||||
if isinstance(parent, PanelLayout):
|
||||
self.parent = parent
|
||||
self.script = parent.script
|
||||
@ -959,7 +992,7 @@ class PanelLayout(object):
|
||||
if self.parent:
|
||||
self.parent.clear_empty()
|
||||
|
||||
def get_lines(self):
|
||||
def get_lines(self) -> list[str]:
|
||||
lines = []
|
||||
|
||||
for item in self.items:
|
||||
@ -976,7 +1009,7 @@ class PanelLayout(object):
|
||||
def wrap_lines(self, lines):
|
||||
return self.header + indent_lines(lines, self.indent)
|
||||
|
||||
def add_line(self, line):
|
||||
def add_line(self, line: str):
|
||||
assert isinstance(line, str)
|
||||
|
||||
self.items.append(line)
|
||||
@ -988,14 +1021,16 @@ class PanelLayout(object):
|
||||
"""This panel contains operators that need the common Bake settings."""
|
||||
self.parent.use_bake_settings()
|
||||
|
||||
def custom_prop(self, bone_name, prop_name, **params):
|
||||
def custom_prop(self, bone_name: str, prop_name: str, **params):
|
||||
"""Add a custom property input field to the panel."""
|
||||
param_str = quote_parameters([rna_idprop_quote_path(prop_name)], params)
|
||||
self.add_line(
|
||||
"%s.prop(pose_bones[%r], %s)" % (self.layout, bone_name, param_str)
|
||||
)
|
||||
|
||||
def operator(self, operator_name, *, properties=None, **params):
|
||||
def operator(self, operator_name: str, *,
|
||||
properties: Optional[dict[str, Any]] = None,
|
||||
**params):
|
||||
"""Add an operator call button to the panel."""
|
||||
name = operator_name.format_map(self.script.format_args)
|
||||
param_str = quote_parameters([name], params)
|
||||
@ -1007,10 +1042,10 @@ class PanelLayout(object):
|
||||
else:
|
||||
self.add_line(call_str)
|
||||
|
||||
def add_nested_layout(self, name, params):
|
||||
def add_nested_layout(self, method_name: str, params: dict[str, Any]) -> 'PanelLayout':
|
||||
param_str = quote_parameters([], params)
|
||||
sub_panel = PanelLayout(self, self.index + 1)
|
||||
sub_panel.header.append('%s = %s.%s(%s)' % (sub_panel.layout, self.layout, name, param_str))
|
||||
sub_panel.header.append(f'{sub_panel.layout} = {self.layout}.{method_name}({param_str})')
|
||||
self.items.append(sub_panel)
|
||||
return sub_panel
|
||||
|
||||
@ -1030,7 +1065,9 @@ class PanelLayout(object):
|
||||
class BoneSetPanelLayout(PanelLayout):
|
||||
"""Panel restricted to a certain set of bones."""
|
||||
|
||||
def __init__(self, rig_panel, bones):
|
||||
parent: 'RigPanelLayout'
|
||||
|
||||
def __init__(self, rig_panel: 'RigPanelLayout', bones: frozenset[str]):
|
||||
assert isinstance(bones, frozenset)
|
||||
super().__init__(rig_panel)
|
||||
self.bones = bones
|
||||
@ -1059,10 +1096,10 @@ class BoneSetPanelLayout(PanelLayout):
|
||||
class RigPanelLayout(PanelLayout):
|
||||
"""Panel owned by a certain rig."""
|
||||
|
||||
def __init__(self, script, rig):
|
||||
def __init__(self, script: 'ScriptGenerator', _rig):
|
||||
super().__init__(script)
|
||||
self.bones = set()
|
||||
self.subpanels = OrderedDict()
|
||||
self.sub_panels = OrderedDict()
|
||||
|
||||
def wrap_lines(self, lines):
|
||||
header = ["if is_selected(%r):" % (set(self.bones))]
|
||||
@ -1072,11 +1109,11 @@ class RigPanelLayout(PanelLayout):
|
||||
def panel_with_selected_check(self, control_names):
|
||||
selected_set = frozenset(control_names)
|
||||
|
||||
if selected_set in self.subpanels:
|
||||
return self.subpanels[selected_set]
|
||||
if selected_set in self.sub_panels:
|
||||
return self.sub_panels[selected_set]
|
||||
else:
|
||||
panel = BoneSetPanelLayout(self, selected_set)
|
||||
self.subpanels[selected_set] = panel
|
||||
self.sub_panels[selected_set] = panel
|
||||
self.items.append(panel)
|
||||
return panel
|
||||
|
||||
@ -1086,6 +1123,8 @@ class ScriptGenerator(base_generate.GeneratorPlugin):
|
||||
|
||||
priority = -100
|
||||
|
||||
format_args: dict[str, str]
|
||||
|
||||
def __init__(self, generator):
|
||||
super().__init__(generator)
|
||||
|
||||
@ -1114,23 +1153,23 @@ class ScriptGenerator(base_generate.GeneratorPlugin):
|
||||
return panel.panel_with_selected_check(control_names)
|
||||
|
||||
# Raw output
|
||||
def add_panel_code(self, str_list):
|
||||
def add_panel_code(self, str_list: list[str]):
|
||||
"""Add raw code to the panel."""
|
||||
self.ui_scripts += str_list
|
||||
|
||||
def add_imports(self, str_list):
|
||||
def add_imports(self, str_list: list[str]):
|
||||
self.ui_imports += str_list
|
||||
|
||||
def add_utilities(self, str_list):
|
||||
def add_utilities(self, str_list: list[str]):
|
||||
self.ui_utilities += str_list
|
||||
|
||||
def register_classes(self, str_list):
|
||||
def register_classes(self, str_list: list[str]):
|
||||
self.ui_register += str_list
|
||||
|
||||
def register_driver_functions(self, str_list):
|
||||
def register_driver_functions(self, str_list: list[str]):
|
||||
self.ui_register_drivers += str_list
|
||||
|
||||
def register_property(self, name, definition):
|
||||
def register_property(self, name: str, definition):
|
||||
self.ui_register_props.append((name, definition))
|
||||
|
||||
def initialize(self):
|
||||
@ -1145,13 +1184,16 @@ class ScriptGenerator(base_generate.GeneratorPlugin):
|
||||
vis_layers = self.obj.data.layers
|
||||
|
||||
# Ensure the collection of layer names exists
|
||||
for i in range(1 + len(metarig.data.rigify_layers), 29):
|
||||
metarig.data.rigify_layers.add()
|
||||
rigify_layers = get_rigify_layers(metarig.data)
|
||||
|
||||
for i in range(1 + len(rigify_layers), 29):
|
||||
# noinspection PyUnresolvedReferences
|
||||
rigify_layers.add()
|
||||
|
||||
# Create list of layer name/row pairs
|
||||
layer_layout = []
|
||||
for l in metarig.data.rigify_layers:
|
||||
layer_layout += [(l.name, l.row)]
|
||||
for layer in rigify_layers:
|
||||
layer_layout += [(layer.name, layer.row)]
|
||||
|
||||
# Generate the UI script
|
||||
script = metarig.data.rigify_rig_ui
|
||||
@ -1201,8 +1243,8 @@ class ScriptGenerator(base_generate.GeneratorPlugin):
|
||||
script.write(" bpy.app.driver_namespace['"+s+"'] = "+s+"\n")
|
||||
|
||||
ui_register_props = OrderedDict.fromkeys(self.ui_register_props)
|
||||
for s in ui_register_props:
|
||||
script.write(" bpy.types.%s = %s\n " % (*s,))
|
||||
for classname, text in ui_register_props:
|
||||
script.write(f" bpy.types.{classname} = {text}\n ")
|
||||
|
||||
script.write("\ndef unregister():\n")
|
||||
|
||||
|
@ -14,7 +14,7 @@ from .utils.errors import MetarigError
|
||||
from .utils.rig import write_metarig
|
||||
from .utils.widgets import write_widget
|
||||
from .utils.naming import unique_name
|
||||
from .utils.rig import upgradeMetarigTypes, outdated_types
|
||||
from .utils.rig import upgrade_metarig_types, outdated_types
|
||||
|
||||
from .rigs.utils import get_limb_generated_names
|
||||
|
||||
@ -825,7 +825,7 @@ class UpgradeMetarigTypes(bpy.types.Operator):
|
||||
def execute(self, context):
|
||||
for obj in bpy.data.objects:
|
||||
if type(obj.data) == bpy.types.Armature:
|
||||
upgradeMetarigTypes(obj)
|
||||
upgrade_metarig_types(obj)
|
||||
return {'FINISHED'}
|
||||
class Sample(bpy.types.Operator):
|
||||
"""Create a sample metarig to be modified before generating the final rig"""
|
||||
|
@ -26,7 +26,7 @@ from .widgets_basic import create_sphere_widget, create_limb_widget, create_bone
|
||||
from .widgets_special import create_compass_widget, create_root_widget
|
||||
from .widgets_special import create_neck_bend_widget, create_neck_tweak_widget
|
||||
|
||||
from .rig import RIG_DIR, METARIG_DIR, TEMPLATE_DIR, outdated_types, upgradeMetarigTypes
|
||||
from .rig import RIG_DIR, METARIG_DIR, TEMPLATE_DIR, outdated_types, upgrade_metarig_types
|
||||
from .rig import write_metarig, get_resource
|
||||
from .rig import connected_children_names, has_connected_children
|
||||
|
||||
|
@ -1,18 +1,23 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
import bpy
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
import math
|
||||
import json
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
from mathutils import Matrix, Vector
|
||||
|
||||
from typing import Callable, Any, Collection, Iterator
|
||||
from bpy.types import Action, bpy_struct, FCurve
|
||||
|
||||
import json
|
||||
|
||||
rig_id = None
|
||||
|
||||
#=============================================
|
||||
# Keyframing functions
|
||||
#=============================================
|
||||
|
||||
##############################################
|
||||
# Keyframing functions
|
||||
##############################################
|
||||
|
||||
def get_keyed_frames_in_range(context, rig):
|
||||
action = find_action(rig)
|
||||
@ -34,11 +39,11 @@ def bones_in_frame(f, rig, *args):
|
||||
"""
|
||||
|
||||
if rig.animation_data and rig.animation_data.action:
|
||||
fcus = rig.animation_data.action.fcurves
|
||||
fcurves = rig.animation_data.action.fcurves
|
||||
else:
|
||||
return False
|
||||
|
||||
for fc in fcus:
|
||||
for fc in fcurves:
|
||||
animated_frames = [kp.co[0] for kp in fc.keyframe_points]
|
||||
for bone in args:
|
||||
if bone in fc.data_path.split('"') and f in animated_frames:
|
||||
@ -68,10 +73,12 @@ def overwrite_prop_animation(rig, bone, prop_name, value, frames):
|
||||
if kp.co[0] in frames:
|
||||
kp.co[1] = value
|
||||
|
||||
|
||||
################################################################
|
||||
# Utilities for inserting keyframes and/or setting transforms ##
|
||||
################################################################
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
SCRIPT_UTILITIES_KEYING = ['''
|
||||
######################
|
||||
## Keyframing tools ##
|
||||
@ -118,7 +125,8 @@ def get_4d_rotlock(bone):
|
||||
else:
|
||||
return [all(bone.lock_rotation)] * 4
|
||||
|
||||
def keyframe_transform_properties(obj, bone_name, keyflags, *, ignore_locks=False, no_loc=False, no_rot=False, no_scale=False):
|
||||
def keyframe_transform_properties(obj, bone_name, keyflags, *,
|
||||
ignore_locks=False, no_loc=False, no_rot=False, no_scale=False):
|
||||
"Keyframe transformation properties, taking flags and mode into account, and avoiding keying locked channels."
|
||||
bone = obj.pose.bones[bone_name]
|
||||
|
||||
@ -155,7 +163,8 @@ def get_constraint_target_matrix(con):
|
||||
if target.type == 'ARMATURE' and con.subtarget:
|
||||
if con.subtarget in target.pose.bones:
|
||||
bone = target.pose.bones[con.subtarget]
|
||||
return target.convert_space(pose_bone=bone, matrix=bone.matrix, from_space='POSE', to_space=con.target_space)
|
||||
return target.convert_space(
|
||||
pose_bone=bone, matrix=bone.matrix, from_space='POSE', to_space=con.target_space)
|
||||
else:
|
||||
return target.convert_space(matrix=target.matrix_world, from_space='WORLD', to_space=con.target_space)
|
||||
return Matrix.Identity(4)
|
||||
@ -224,8 +233,10 @@ def get_transform_matrix(obj, bone_name, *, space='POSE', with_constraints=True)
|
||||
def get_chain_transform_matrices(obj, bone_names, **options):
|
||||
return [get_transform_matrix(obj, name, **options) for name in bone_names]
|
||||
|
||||
def set_transform_from_matrix(obj, bone_name, matrix, *, space='POSE', undo_copy_scale=False, ignore_locks=False, no_loc=False, no_rot=False, no_scale=False, keyflags=None):
|
||||
"Apply the matrix to the transformation of the bone, taking locked channels, mode and certain constraints into account, and optionally keyframe it."
|
||||
def set_transform_from_matrix(obj, bone_name, matrix, *, space='POSE', undo_copy_scale=False,
|
||||
ignore_locks=False, no_loc=False, no_rot=False, no_scale=False, keyflags=None):
|
||||
"""Apply the matrix to the transformation of the bone, taking locked channels, mode and certain
|
||||
constraints into account, and optionally keyframe it."""
|
||||
bone = obj.pose.bones[bone_name]
|
||||
|
||||
def restore_channels(prop, old_vec, locks, extra_lock):
|
||||
@ -294,6 +305,7 @@ exec(SCRIPT_UTILITIES_KEYING[-1])
|
||||
# Utilities for managing animation curves ##
|
||||
############################################
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
SCRIPT_UTILITIES_CURVES = ['''
|
||||
###########################
|
||||
## Animation curve tools ##
|
||||
@ -433,6 +445,24 @@ class DriverCurveTable(FCurveTable):
|
||||
self.index_curves(self.anim_data.drivers)
|
||||
''']
|
||||
|
||||
AnyCurveSet = None | FCurve | dict | Collection
|
||||
flatten_curve_set: Callable[[AnyCurveSet], Iterator[FCurve]]
|
||||
flatten_curve_key_set: Callable[..., set[float]]
|
||||
get_curve_frame_set: Callable[..., set[float]]
|
||||
set_curve_key_interpolation: Callable[..., None]
|
||||
delete_curve_keys_in_range: Callable[..., None]
|
||||
nla_tweak_to_scene: Callable
|
||||
find_action: Callable[[bpy_struct], Action]
|
||||
clean_action_empty_curves: Callable[[bpy_struct], None]
|
||||
TRANSFORM_PROPS_LOCATION: frozenset[str]
|
||||
TRANSFORM_PROPS_ROTATION = frozenset[str]
|
||||
TRANSFORM_PROPS_SCALE = frozenset[str]
|
||||
TRANSFORM_PROPS_ALL = frozenset[str]
|
||||
transform_props_with_locks: Callable[[bool, bool, bool], set[str]]
|
||||
FCurveTable: Any
|
||||
ActionCurveTable: Any
|
||||
DriverCurveTable: Any
|
||||
|
||||
exec(SCRIPT_UTILITIES_CURVES[-1])
|
||||
|
||||
################################################
|
||||
@ -441,7 +471,9 @@ exec(SCRIPT_UTILITIES_CURVES[-1])
|
||||
|
||||
_SCRIPT_REGISTER_WM_PROPS = '''
|
||||
bpy.types.WindowManager.rigify_transfer_use_all_keys = bpy.props.BoolProperty(
|
||||
name="Bake All Keyed Frames", description="Bake on every frame that has a key for any of the bones, as opposed to just the relevant ones", default=False
|
||||
name="Bake All Keyed Frames",
|
||||
description="Bake on every frame that has a key for any of the bones, as opposed to just the relevant ones",
|
||||
default=False
|
||||
)
|
||||
bpy.types.WindowManager.rigify_transfer_use_frame_range = bpy.props.BoolProperty(
|
||||
name="Limit Frame Range", description="Only bake keyframes in a certain frame range", default=False
|
||||
@ -461,6 +493,7 @@ del bpy.types.WindowManager.rigify_transfer_start_frame
|
||||
del bpy.types.WindowManager.rigify_transfer_end_frame
|
||||
'''
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
_SCRIPT_UTILITIES_BAKE_OPS = '''
|
||||
class RIGIFY_OT_get_frame_range(bpy.types.Operator):
|
||||
bl_idname = "rigify.get_frame_range" + ('_'+rig_id if rig_id else '')
|
||||
@ -497,6 +530,8 @@ class RIGIFY_OT_get_frame_range(bpy.types.Operator):
|
||||
row.operator(self.bl_idname, icon='TIME', text='')
|
||||
'''
|
||||
|
||||
RIGIFY_OT_get_frame_range: Any
|
||||
|
||||
exec(_SCRIPT_UTILITIES_BAKE_OPS)
|
||||
|
||||
################################################
|
||||
@ -505,6 +540,7 @@ exec(_SCRIPT_UTILITIES_BAKE_OPS)
|
||||
|
||||
SCRIPT_REGISTER_BAKE = ['RIGIFY_OT_get_frame_range']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
SCRIPT_UTILITIES_BAKE = SCRIPT_UTILITIES_KEYING + SCRIPT_UTILITIES_CURVES + ['''
|
||||
##################################
|
||||
# Common bake operator settings ##
|
||||
@ -756,6 +792,10 @@ class RigifySingleUpdateMixin(RigifyOperatorMixinBase):
|
||||
return self.execute(context)
|
||||
''']
|
||||
|
||||
RigifyOperatorMixinBase: Any
|
||||
RigifyBakeKeyframesMixin: Any
|
||||
RigifySingleUpdateMixin: Any
|
||||
|
||||
exec(SCRIPT_UTILITIES_BAKE[-1])
|
||||
|
||||
#####################################
|
||||
@ -764,6 +804,7 @@ exec(SCRIPT_UTILITIES_BAKE[-1])
|
||||
|
||||
SCRIPT_REGISTER_OP_CLEAR_KEYS = ['POSE_OT_rigify_clear_keyframes']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
SCRIPT_UTILITIES_OP_CLEAR_KEYS = ['''
|
||||
#############################
|
||||
## Generic Clear Keyframes ##
|
||||
@ -806,6 +847,8 @@ class POSE_OT_rigify_clear_keyframes(bpy.types.Operator):
|
||||
return {'FINISHED'}
|
||||
''']
|
||||
|
||||
|
||||
# noinspection PyDefaultArgument,PyUnusedLocal
|
||||
def add_clear_keyframes_button(panel, *, bones=[], label='', text=''):
|
||||
panel.use_bake_settings()
|
||||
panel.script.add_utilities(SCRIPT_UTILITIES_OP_CLEAR_KEYS)
|
||||
@ -813,7 +856,8 @@ def add_clear_keyframes_button(panel, *, bones=[], label='', text=''):
|
||||
|
||||
op_props = {'bones': json.dumps(bones)}
|
||||
|
||||
panel.operator('pose.rigify_clear_keyframes_{rig_id}', text=text, icon='CANCEL', properties=op_props)
|
||||
panel.operator('pose.rigify_clear_keyframes_{rig_id}', text=text, icon='CANCEL',
|
||||
properties=op_props)
|
||||
|
||||
|
||||
###################################
|
||||
@ -822,6 +866,7 @@ def add_clear_keyframes_button(panel, *, bones=[], label='', text=''):
|
||||
|
||||
SCRIPT_REGISTER_OP_SNAP = ['POSE_OT_rigify_generic_snap', 'POSE_OT_rigify_generic_snap_bake']
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
SCRIPT_UTILITIES_OP_SNAP = ['''
|
||||
#############################
|
||||
## Generic Snap (FK to IK) ##
|
||||
@ -875,11 +920,13 @@ class POSE_OT_rigify_generic_snap_bake(RigifyGenericSnapBase, RigifyBakeKeyframe
|
||||
return self.bake_get_all_bone_curves(self.output_bone_list, props)
|
||||
''']
|
||||
|
||||
def add_fk_ik_snap_buttons(panel, op_single, op_bake, *, label=None, rig_name='', properties=None, clear_bones=None, compact=None):
|
||||
|
||||
def add_fk_ik_snap_buttons(panel, op_single, op_bake, *, label=None, rig_name='', properties=None,
|
||||
clear_bones=None, compact=None):
|
||||
assert label and properties
|
||||
|
||||
if rig_name:
|
||||
label += ' (%s)' % (rig_name)
|
||||
label += ' (%s)' % rig_name
|
||||
|
||||
if compact or not clear_bones:
|
||||
row = panel.row(align=True)
|
||||
@ -895,7 +942,10 @@ def add_fk_ik_snap_buttons(panel, op_single, op_bake, *, label=None, rig_name=''
|
||||
row.operator(op_bake, text='Action', icon='ACTION_TWEAK', properties=properties)
|
||||
add_clear_keyframes_button(row, bones=clear_bones, text='Clear')
|
||||
|
||||
def add_generic_snap(panel, *, output_bones=[], input_bones=[], input_ctrl_bones=[], label='Snap', rig_name='', undo_copy_scale=False, compact=None, clear=True, locks=None, tooltip=None):
|
||||
|
||||
# noinspection PyDefaultArgument
|
||||
def add_generic_snap(panel, *, output_bones=[], input_bones=[], input_ctrl_bones=[], label='Snap',
|
||||
rig_name='', undo_copy_scale=False, compact=None, clear=True, locks=None, tooltip=None):
|
||||
panel.use_bake_settings()
|
||||
panel.script.add_utilities(SCRIPT_UTILITIES_OP_SNAP)
|
||||
panel.script.register_classes(SCRIPT_REGISTER_OP_SNAP)
|
||||
@ -920,12 +970,16 @@ def add_generic_snap(panel, *, output_bones=[], input_bones=[], input_ctrl_bones
|
||||
label=label, rig_name=rig_name, properties=op_props, clear_bones=clear_bones, compact=compact,
|
||||
)
|
||||
|
||||
def add_generic_snap_fk_to_ik(panel, *, fk_bones=[], ik_bones=[], ik_ctrl_bones=[], label='FK->IK', rig_name='', undo_copy_scale=False, compact=None, clear=True):
|
||||
|
||||
# noinspection PyDefaultArgument
|
||||
def add_generic_snap_fk_to_ik(panel, *, fk_bones=[], ik_bones=[], ik_ctrl_bones=[], label='FK->IK',
|
||||
rig_name='', undo_copy_scale=False, compact=None, clear=True):
|
||||
add_generic_snap(
|
||||
panel, output_bones=fk_bones, input_bones=ik_bones, input_ctrl_bones=ik_ctrl_bones,
|
||||
label=label, rig_name=rig_name, undo_copy_scale=undo_copy_scale, compact=compact, clear=clear
|
||||
)
|
||||
|
||||
|
||||
###############################
|
||||
# Module register/unregister ##
|
||||
###############################
|
||||
@ -937,6 +991,7 @@ def register():
|
||||
|
||||
register_class(RIGIFY_OT_get_frame_range)
|
||||
|
||||
|
||||
def unregister():
|
||||
from bpy.utils import unregister_class
|
||||
|
||||
|
@ -2,15 +2,18 @@
|
||||
|
||||
import bpy
|
||||
import math
|
||||
from mathutils import Vector, Matrix, Color
|
||||
|
||||
from mathutils import Vector, Matrix
|
||||
from typing import Optional, Callable
|
||||
|
||||
from .errors import MetarigError
|
||||
from .naming import get_name, make_derived_name, is_control_bone
|
||||
from .misc import pairwise
|
||||
from .misc import pairwise, ArmatureObject
|
||||
|
||||
#=======================
|
||||
|
||||
########################
|
||||
# Bone collection
|
||||
#=======================
|
||||
########################
|
||||
|
||||
class BoneDict(dict):
|
||||
"""
|
||||
@ -18,23 +21,22 @@ class BoneDict(dict):
|
||||
|
||||
Allows access to contained items as attributes, and only
|
||||
accepts certain types of values.
|
||||
|
||||
@DynamicAttrs
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def __sanitize_attr(key, value):
|
||||
if hasattr(BoneDict, key):
|
||||
raise KeyError("Invalid BoneDict key: %s" % (key))
|
||||
raise KeyError(f"Invalid BoneDict key: {key}")
|
||||
|
||||
if (value is None or
|
||||
isinstance(value, str) or
|
||||
isinstance(value, list) or
|
||||
isinstance(value, BoneDict)):
|
||||
if value is None or isinstance(value, (str, list, BoneDict)):
|
||||
return value
|
||||
|
||||
if isinstance(value, dict):
|
||||
return BoneDict(value)
|
||||
|
||||
raise ValueError("Invalid BoneDict value: %r" % (value))
|
||||
raise ValueError(f"Invalid BoneDict value: {repr(value)}")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__()
|
||||
@ -71,13 +73,14 @@ class BoneDict(dict):
|
||||
|
||||
return all_bones
|
||||
|
||||
#=======================
|
||||
|
||||
########################
|
||||
# Bone manipulation
|
||||
#=======================
|
||||
########################
|
||||
#
|
||||
# NOTE: PREFER USING BoneUtilityMixin IN NEW STYLE RIGS!
|
||||
|
||||
def get_bone(obj, bone_name):
|
||||
def get_bone(obj: ArmatureObject, bone_name: Optional[str]):
|
||||
"""Get EditBone or PoseBone by name, depending on the current mode."""
|
||||
if not bone_name:
|
||||
return None
|
||||
@ -87,7 +90,7 @@ def get_bone(obj, bone_name):
|
||||
return bones[bone_name]
|
||||
|
||||
|
||||
def new_bone(obj, bone_name):
|
||||
def new_bone(obj: ArmatureObject, bone_name: str):
|
||||
""" Adds a new bone to the given armature object.
|
||||
Returns the resulting bone's name.
|
||||
"""
|
||||
@ -102,11 +105,13 @@ def new_bone(obj, bone_name):
|
||||
raise MetarigError("Can't add new bone '%s' outside of edit mode" % bone_name)
|
||||
|
||||
|
||||
def copy_bone(obj, bone_name, assign_name='', *, parent=False, inherit_scale=False, bbone=False, length=None, scale=None):
|
||||
def copy_bone(obj: ArmatureObject, bone_name: str, assign_name='', *,
|
||||
parent=False, inherit_scale=False, bbone=False,
|
||||
length: Optional[float] = None, scale: Optional[float] = None):
|
||||
""" Makes a copy of the given bone in the given armature object.
|
||||
Returns the resulting bone's name.
|
||||
"""
|
||||
#if bone_name not in obj.data.bones:
|
||||
|
||||
if bone_name not in obj.data.edit_bones:
|
||||
raise MetarigError("copy_bone(): bone '%s' not found, cannot copy it" % bone_name)
|
||||
|
||||
@ -116,7 +121,6 @@ def copy_bone(obj, bone_name, assign_name='', *, parent=False, inherit_scale=Fal
|
||||
# Copy the edit bone
|
||||
edit_bone_1 = obj.data.edit_bones[bone_name]
|
||||
edit_bone_2 = obj.data.edit_bones.new(assign_name)
|
||||
bone_name_1 = bone_name
|
||||
bone_name_2 = edit_bone_2.name
|
||||
|
||||
# Copy edit bone attributes
|
||||
@ -137,6 +141,7 @@ def copy_bone(obj, bone_name, assign_name='', *, parent=False, inherit_scale=Fal
|
||||
edit_bone_2.inherit_scale = edit_bone_1.inherit_scale
|
||||
|
||||
if bbone:
|
||||
# noinspection SpellCheckingInspection
|
||||
for name in ['bbone_segments',
|
||||
'bbone_easein', 'bbone_easeout',
|
||||
'bbone_rollin', 'bbone_rollout',
|
||||
@ -155,7 +160,8 @@ def copy_bone(obj, bone_name, assign_name='', *, parent=False, inherit_scale=Fal
|
||||
raise MetarigError("Cannot copy bones outside of edit mode")
|
||||
|
||||
|
||||
def copy_bone_properties(obj, bone_name_1, bone_name_2, transforms=True, props=True, widget=True):
|
||||
def copy_bone_properties(obj: ArmatureObject, bone_name_1: str, bone_name_2: str,
|
||||
transforms=True, props=True, widget=True):
|
||||
""" Copy transform and custom properties from bone 1 to bone 2. """
|
||||
if obj.mode in {'OBJECT', 'POSE'}:
|
||||
# Get the pose bones
|
||||
@ -197,7 +203,7 @@ def _legacy_copy_bone(obj, bone_name, assign_name=''):
|
||||
return new_name
|
||||
|
||||
|
||||
def flip_bone(obj, bone_name):
|
||||
def flip_bone(obj: ArmatureObject, bone_name: str):
|
||||
""" Flips an edit bone.
|
||||
"""
|
||||
if bone_name not in obj.data.edit_bones:
|
||||
@ -214,7 +220,7 @@ def flip_bone(obj, bone_name):
|
||||
raise MetarigError("Cannot flip bones outside of edit mode")
|
||||
|
||||
|
||||
def flip_bone_chain(obj, bone_names):
|
||||
def flip_bone_chain(obj: ArmatureObject, bone_names: list[str]):
|
||||
"""Flips a connected bone chain."""
|
||||
assert obj.mode == 'EDIT'
|
||||
|
||||
@ -242,7 +248,9 @@ def flip_bone_chain(obj, bone_names):
|
||||
bone.use_connect = True
|
||||
|
||||
|
||||
def put_bone(obj, bone_name, pos, *, matrix=None, length=None, scale=None):
|
||||
def put_bone(obj: ArmatureObject, bone_name: str, pos: Optional[Vector], *,
|
||||
matrix: Optional[Matrix] = None,
|
||||
length: Optional[float] = None, scale: Optional[float] = None):
|
||||
""" Places a bone at the given position.
|
||||
"""
|
||||
if bone_name not in obj.data.edit_bones:
|
||||
@ -274,13 +282,14 @@ def put_bone(obj, bone_name, pos, *, matrix=None, length=None, scale=None):
|
||||
raise MetarigError("Cannot 'put' bones outside of edit mode")
|
||||
|
||||
|
||||
def disable_bbones(obj, bone_names):
|
||||
def disable_bbones(obj: ArmatureObject, bone_names: list[str]):
|
||||
"""Disables B-Bone segments on the specified bones."""
|
||||
assert(obj.mode != 'EDIT')
|
||||
for bone in bone_names:
|
||||
obj.data.bones[bone].bbone_segments = 1
|
||||
|
||||
|
||||
# noinspection SpellCheckingInspection
|
||||
def _legacy_make_nonscaling_child(obj, bone_name, location, child_name_postfix=""):
|
||||
""" Takes the named bone and creates a non-scaling child of it at
|
||||
the given location. The returned bone (returned by name) is not
|
||||
@ -345,48 +354,65 @@ def _legacy_make_nonscaling_child(obj, bone_name, location, child_name_postfix="
|
||||
raise MetarigError("Cannot make nonscaling child outside of edit mode")
|
||||
|
||||
|
||||
#===================================
|
||||
####################################
|
||||
# Bone manipulation as rig methods
|
||||
#===================================
|
||||
####################################
|
||||
|
||||
|
||||
class BoneUtilityMixin(object):
|
||||
obj: ArmatureObject
|
||||
register_new_bone: Callable[[str, Optional[str]], None]
|
||||
|
||||
"""
|
||||
Provides methods for more convenient creation of bones.
|
||||
|
||||
Requires self.obj to be the armature object being worked on.
|
||||
"""
|
||||
def register_new_bone(self, new_name, old_name=None):
|
||||
def register_new_bone(self, new_name: str, old_name: Optional[str] = None):
|
||||
"""Registers creation or renaming of a bone based on old_name"""
|
||||
pass
|
||||
|
||||
def new_bone(self, new_name):
|
||||
def new_bone(self, new_name: str) -> str:
|
||||
"""Create a new bone with the specified name."""
|
||||
name = new_bone(self.obj, new_name)
|
||||
self.register_new_bone(name)
|
||||
self.register_new_bone(name, None)
|
||||
return name
|
||||
|
||||
def copy_bone(self, bone_name, new_name='', *, parent=False, inherit_scale=False, bbone=False, length=None, scale=None):
|
||||
def copy_bone(self, bone_name: str, new_name='', *,
|
||||
parent=False, inherit_scale=False, bbone=False,
|
||||
length: Optional[float] = None,
|
||||
scale: Optional[float] = None) -> str:
|
||||
"""Copy the bone with the given name, returning the new name."""
|
||||
name = copy_bone(self.obj, bone_name, new_name, parent=parent, inherit_scale=inherit_scale, bbone=bbone, length=length, scale=scale)
|
||||
name = copy_bone(self.obj, bone_name, new_name,
|
||||
parent=parent, inherit_scale=inherit_scale,
|
||||
bbone=bbone, length=length, scale=scale)
|
||||
self.register_new_bone(name, bone_name)
|
||||
return name
|
||||
|
||||
def copy_bone_properties(self, src_name, tgt_name, *, props=True, ui_controls=None, **kwargs):
|
||||
"""Copy pose-mode properties of the bone."""
|
||||
def copy_bone_properties(self, src_name: str, tgt_name: str, *,
|
||||
props=True,
|
||||
ui_controls: list[str] | bool | None = None,
|
||||
**kwargs):
|
||||
"""Copy pose-mode properties of the bone. For using ui_controls, self must be a Rig."""
|
||||
|
||||
if ui_controls:
|
||||
from ..base_rig import BaseRig
|
||||
assert isinstance(self, BaseRig)
|
||||
|
||||
if props:
|
||||
if ui_controls is None and is_control_bone(tgt_name) and hasattr(self, 'script'):
|
||||
ui_controls = [tgt_name]
|
||||
elif ui_controls is True:
|
||||
ui_controls = self.bones.flatten('ctrl')
|
||||
|
||||
copy_bone_properties(self.obj, src_name, tgt_name, props=props and not ui_controls, **kwargs)
|
||||
copy_bone_properties(
|
||||
self.obj, src_name, tgt_name, props=props and not ui_controls, **kwargs)
|
||||
|
||||
if props and ui_controls:
|
||||
from .mechanism import copy_custom_properties_with_ui
|
||||
copy_custom_properties_with_ui(self, src_name, tgt_name, ui_controls=ui_controls)
|
||||
|
||||
def rename_bone(self, old_name, new_name):
|
||||
def rename_bone(self, old_name: str, new_name: str) -> str:
|
||||
"""Rename the bone, returning the actual new name."""
|
||||
bone = self.get_bone(old_name)
|
||||
bone.name = new_name
|
||||
@ -394,15 +420,17 @@ class BoneUtilityMixin(object):
|
||||
self.register_new_bone(bone.name, old_name)
|
||||
return bone.name
|
||||
|
||||
def get_bone(self, bone_name):
|
||||
def get_bone(self, bone_name: Optional[str])\
|
||||
-> Optional[bpy.types.EditBone | bpy.types.PoseBone]:
|
||||
"""Get EditBone or PoseBone by name, depending on the current mode."""
|
||||
return get_bone(self.obj, bone_name)
|
||||
|
||||
def get_bone_parent(self, bone_name):
|
||||
def get_bone_parent(self, bone_name: str) -> Optional[str]:
|
||||
"""Get the name of the parent bone, or None."""
|
||||
return get_name(self.get_bone(bone_name).parent)
|
||||
|
||||
def set_bone_parent(self, bone_name, parent_name, use_connect=False, inherit_scale=None):
|
||||
def set_bone_parent(self, bone_name: str, parent_name: Optional[str],
|
||||
use_connect=False, inherit_scale: Optional[str] = None):
|
||||
"""Set the parent of the bone."""
|
||||
eb = self.obj.data.edit_bones
|
||||
bone = eb[bone_name]
|
||||
@ -412,16 +440,20 @@ class BoneUtilityMixin(object):
|
||||
bone.inherit_scale = inherit_scale
|
||||
bone.parent = (eb[parent_name] if parent_name else None)
|
||||
|
||||
def parent_bone_chain(self, bone_names, use_connect=None, inherit_scale=None):
|
||||
def parent_bone_chain(self, bone_names: list[str],
|
||||
use_connect: Optional[bool] = None,
|
||||
inherit_scale: Optional[str] = None):
|
||||
"""Link bones into a chain with parenting. First bone may be None."""
|
||||
for parent, child in pairwise(bone_names):
|
||||
self.set_bone_parent(child, parent, use_connect=use_connect, inherit_scale=inherit_scale)
|
||||
self.set_bone_parent(
|
||||
child, parent, use_connect=use_connect, inherit_scale=inherit_scale)
|
||||
|
||||
#=============================================
|
||||
|
||||
##############################################
|
||||
# B-Bones
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
def connect_bbone_chain_handles(obj, bone_names):
|
||||
def connect_bbone_chain_handles(obj: ArmatureObject, bone_names: list[str]):
|
||||
assert obj.mode == 'EDIT'
|
||||
|
||||
for prev_name, next_name in pairwise(bone_names):
|
||||
@ -434,26 +466,28 @@ def connect_bbone_chain_handles(obj, bone_names):
|
||||
next_bone.bbone_handle_type_start = 'ABSOLUTE'
|
||||
next_bone.bbone_custom_handle_start = prev_bone
|
||||
|
||||
#=============================================
|
||||
|
||||
##############################################
|
||||
# Math
|
||||
#=============================================
|
||||
##############################################
|
||||
|
||||
|
||||
def is_same_position(obj, bone_name1, bone_name2):
|
||||
def is_same_position(obj: ArmatureObject, bone_name1: str, bone_name2: str):
|
||||
head1 = get_bone(obj, bone_name1).head
|
||||
head2 = get_bone(obj, bone_name2).head
|
||||
|
||||
return (head1 - head2).length < 1e-5
|
||||
|
||||
|
||||
def is_connected_position(obj, bone_name1, bone_name2):
|
||||
def is_connected_position(obj: ArmatureObject, bone_name1: str, bone_name2: str):
|
||||
tail1 = get_bone(obj, bone_name1).tail
|
||||
head2 = get_bone(obj, bone_name2).head
|
||||
|
||||
return (tail1 - head2).length < 1e-5
|
||||
|
||||
|
||||
def copy_bone_position(obj, bone_name, target_bone_name, *, length=None, scale=None):
|
||||
def copy_bone_position(obj: ArmatureObject, bone_name: str, target_bone_name: str, *,
|
||||
length: Optional[float] = None,
|
||||
scale: Optional[float] = None):
|
||||
""" Completely copies the position and orientation of the bone. """
|
||||
bone1_e = obj.data.edit_bones[bone_name]
|
||||
bone2_e = obj.data.edit_bones[target_bone_name]
|
||||
@ -469,7 +503,7 @@ def copy_bone_position(obj, bone_name, target_bone_name, *, length=None, scale=N
|
||||
bone2_e.length *= scale
|
||||
|
||||
|
||||
def align_bone_orientation(obj, bone_name, target_bone_name):
|
||||
def align_bone_orientation(obj: ArmatureObject, bone_name: str, target_bone_name: str):
|
||||
""" Aligns the orientation of bone to target bone. """
|
||||
bone1_e = obj.data.edit_bones[bone_name]
|
||||
bone2_e = obj.data.edit_bones[target_bone_name]
|
||||
@ -480,7 +514,7 @@ def align_bone_orientation(obj, bone_name, target_bone_name):
|
||||
bone1_e.roll = bone2_e.roll
|
||||
|
||||
|
||||
def set_bone_orientation(obj, bone_name, orientation):
|
||||
def set_bone_orientation(obj: ArmatureObject, bone_name: str, orientation: str | Matrix):
|
||||
""" Aligns the orientation of bone to target bone or matrix. """
|
||||
if isinstance(orientation, str):
|
||||
align_bone_orientation(obj, bone_name, orientation)
|
||||
@ -494,7 +528,7 @@ def set_bone_orientation(obj, bone_name, orientation):
|
||||
bone_e.matrix = matrix
|
||||
|
||||
|
||||
def align_bone_roll(obj, bone1, bone2):
|
||||
def align_bone_roll(obj: ArmatureObject, bone1: str, bone2: str):
|
||||
""" Aligns the roll of two bones.
|
||||
"""
|
||||
bone1_e = obj.data.edit_bones[bone1]
|
||||
@ -539,7 +573,7 @@ def align_bone_roll(obj, bone1, bone2):
|
||||
bone1_e.roll = -roll
|
||||
|
||||
|
||||
def align_bone_x_axis(obj, bone, vec):
|
||||
def align_bone_x_axis(obj: ArmatureObject, bone: str, vec: Vector):
|
||||
""" Rolls the bone to align its x-axis as closely as possible to
|
||||
the given vector.
|
||||
Must be in edit mode.
|
||||
@ -564,7 +598,7 @@ def align_bone_x_axis(obj, bone, vec):
|
||||
bone_e.roll += angle * 2
|
||||
|
||||
|
||||
def align_bone_z_axis(obj, bone, vec):
|
||||
def align_bone_z_axis(obj: ArmatureObject, bone: str, vec: Vector):
|
||||
""" Rolls the bone to align its z-axis as closely as possible to
|
||||
the given vector.
|
||||
Must be in edit mode.
|
||||
@ -589,7 +623,7 @@ def align_bone_z_axis(obj, bone, vec):
|
||||
bone_e.roll += angle * 2
|
||||
|
||||
|
||||
def align_bone_y_axis(obj, bone, vec):
|
||||
def align_bone_y_axis(obj: ArmatureObject, bone: str, vec: Vector):
|
||||
""" Matches the bone y-axis to
|
||||
the given vector.
|
||||
Must be in edit mode.
|
||||
@ -597,14 +631,15 @@ def align_bone_y_axis(obj, bone, vec):
|
||||
|
||||
bone_e = obj.data.edit_bones[bone]
|
||||
vec.normalize()
|
||||
|
||||
vec = vec * bone_e.length
|
||||
|
||||
bone_e.tail = bone_e.head + vec
|
||||
|
||||
|
||||
def compute_chain_x_axis(obj, bone_names):
|
||||
def compute_chain_x_axis(obj: ArmatureObject, bone_names: list[str]):
|
||||
"""
|
||||
Compute the x axis of all bones to be perpendicular
|
||||
Compute the X axis of all bones to be perpendicular
|
||||
to the primary plane in which the bones lie.
|
||||
"""
|
||||
eb = obj.data.edit_bones
|
||||
@ -615,6 +650,7 @@ def compute_chain_x_axis(obj, bone_names):
|
||||
|
||||
# Compute normal to the plane defined by the first bone,
|
||||
# and the end of the last bone in the chain
|
||||
|
||||
chain_y_axis = last_bone.tail - first_bone.head
|
||||
chain_rot_axis = first_bone.y_axis.cross(chain_y_axis)
|
||||
|
||||
@ -624,9 +660,9 @@ def compute_chain_x_axis(obj, bone_names):
|
||||
return chain_rot_axis.normalized()
|
||||
|
||||
|
||||
def align_chain_x_axis(obj, bone_names):
|
||||
def align_chain_x_axis(obj: ArmatureObject, bone_names: list[str]):
|
||||
"""
|
||||
Aligns the x axis of all bones to be perpendicular
|
||||
Aligns the X axis of all bones to be perpendicular
|
||||
to the primary plane in which the bones lie.
|
||||
"""
|
||||
chain_rot_axis = compute_chain_x_axis(obj, bone_names)
|
||||
@ -635,7 +671,10 @@ def align_chain_x_axis(obj, bone_names):
|
||||
align_bone_x_axis(obj, name, chain_rot_axis)
|
||||
|
||||
|
||||
def align_bone_to_axis(obj, bone_name, axis, *, length=None, roll=0, flip=False):
|
||||
def align_bone_to_axis(obj: ArmatureObject, bone_name: str, axis: str, *,
|
||||
length: Optional[float] = None,
|
||||
roll: Optional[float] = 0.0,
|
||||
flip=False):
|
||||
"""
|
||||
Aligns the Y axis of the bone to the global axis (x,y,z,-x,-y,-z),
|
||||
optionally adjusting length and initially flipping the bone.
|
||||
@ -664,7 +703,9 @@ def align_bone_to_axis(obj, bone_name, axis, *, length=None, roll=0, flip=False)
|
||||
bone_e.roll = roll
|
||||
|
||||
|
||||
def set_bone_widget_transform(obj, bone_name, transform_bone, use_size=True, scale=1.0, target_size=False):
|
||||
def set_bone_widget_transform(obj: ArmatureObject, bone_name: str,
|
||||
transform_bone: Optional[str], *,
|
||||
use_size=True, scale=1.0, target_size=False):
|
||||
assert obj.mode != 'EDIT'
|
||||
|
||||
bone = obj.pose.bones[bone_name]
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user