Pose Library: Update to use the asset shelf (when enabled) #104546

Merged
Julian Eisel merged 33 commits from asset-shelf into main 2023-08-04 15:00:21 +02:00
83 changed files with 1096 additions and 4100 deletions
Showing only changes of commit 2c08baa7f2 - Show all commits

View File

@ -19,7 +19,7 @@ from .achm_room_maker import get_wall_points
import gpu
from gpu_extras.batch import batch_for_shader
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') if not bpy.app.background else None
shader = gpu.shader.from_builtin('UNIFORM_COLOR') if not bpy.app.background else None
# -------------------------------------------------------------
# Handle all draw routines (OpenGL main entry point)
@ -92,7 +92,7 @@ def draw_main(context):
def draw_text(x_pos, y_pos, display_text, rgba, fsize, right=False):
gap = 12
font_id = 0
blf.size(font_id, fsize, 72)
blf.size(font_id, fsize)
text_width, text_height = blf.dimensions(font_id, display_text)
if right is True:

View File

@ -852,7 +852,7 @@ class MarkerController:
def __init__(self, context):
self.smMap = self.createSMMap(context)
self.shader = gpu.shader.from_builtin('3D_FLAT_COLOR')
self.shader = gpu.shader.from_builtin('FLAT_COLOR')
# self.shader.bind()
MarkerController.drawHandlerRef = \

View File

@ -79,7 +79,7 @@ def draw_bezier_points(self, context, spline, matrix_world, path_color, path_thi
points = get_bezier_points(spline, matrix_world)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
batch = batch_for_shader(shader, 'POINTS', {"pos": points})
shader.bind()
@ -92,7 +92,7 @@ def draw_points(self, context, spline, matrix_world, path_color, path_thickness)
points = get_points(spline, matrix_world)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
batch = batch_for_shader(shader, 'POINTS', {"pos": points})
shader.bind()

View File

@ -51,7 +51,7 @@ def draw(self, context, splines, curve_vertcolor, matrix_world):
for spline in splines:
points = get_points(spline, matrix_world)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
batch = batch_for_shader(shader, 'POINTS', {"pos": points})

View File

@ -60,7 +60,7 @@ def draw(self, context, splines, sequence_color, font_thickness, font_size, matr
first_point_co = Vector((i, 0, 0)) + first_point_co
points = draw_number(r, first_point_co, font_size)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
batch = batch_for_shader(shader, 'LINES', {"pos": points})

View File

@ -4,7 +4,7 @@ bl_info = {
"name": "Grease Pencil Tools",
"description": "Extra tools for Grease Pencil",
"author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola",
"version": (1, 7, 9),
"version": (1, 8, 0),
"blender": (3, 0, 0),
"location": "Sidebar > Grease Pencil > Grease Pencil Tools",
"warning": "",

View File

@ -235,7 +235,9 @@ def view_cage(obj):
if from_obj:
for o in other_gp:
for _ in range(len(o.grease_pencil_modifiers)):
bpy.ops.object.gpencil_modifier_move_up({'object':o}, modifier='tmp_lattice')
context_override = {'object': o}
with bpy.context.temp_override(**context_override):
bpy.ops.object.gpencil_modifier_move_up(modifier='tmp_lattice')
mod.object = cage
if from_obj:

View File

@ -2,11 +2,11 @@
import bpy
class GP_OT_camera_flip_x(bpy.types.Operator):
bl_idname = "gp.camera_flip_x"
class VIEW3D_OT_camera_flip_x(bpy.types.Operator):
bl_idname = "view3d.camera_flip_x"
bl_label = "Camera Flip X"
bl_description = "Invert active camera scale.x to flip view horizontally"
bl_options = {"REGISTER"}
bl_options = {"REGISTER", "UNDO_GROUPED"}
@classmethod
def poll(cls, context):
@ -18,7 +18,7 @@ class GP_OT_camera_flip_x(bpy.types.Operator):
return {"FINISHED"}
def register():
bpy.utils.register_class(GP_OT_camera_flip_x)
bpy.utils.register_class(VIEW3D_OT_camera_flip_x)
def unregister():
bpy.utils.unregister_class(GP_OT_camera_flip_x)
bpy.utils.unregister_class(VIEW3D_OT_camera_flip_x)

View File

@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-or-later
import bpy
import ssl
import urllib.request
import urllib.parse
import zipfile
@ -73,7 +74,6 @@ class GP_OT_install_brush_pack(bpy.types.Operator):
self._append_brushes(Path(self.temp) / blendname)
def execute(self, context):
import ssl
import tempfile
import os

View File

@ -71,10 +71,10 @@ def draw_callback_px(self, context):
## timer for debug purposes
# blf.position(font_id, 15, 30, 0)
# blf.size(font_id, 20, 72)
# blf.size(font_id, 20.0)
# blf.draw(font_id, "Time " + self.text)
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') # initiate shader
shader = gpu.shader.from_builtin('UNIFORM_COLOR') # initiate shader
gpu.state.blend_set('ALPHA')
gpu.state.line_width_set(1.0)
@ -198,7 +198,7 @@ def draw_callback_px(self, context):
for icon_name, coord_list in icons.items():
texture = gpu.texture.from_image(self.icon_tex[icon_name])
for coords in coord_list:
shader_tex = gpu.shader.from_builtin('2D_IMAGE')
shader_tex = gpu.shader.from_builtin('IMAGE')
batch_icons = batch_for_shader(
shader_tex, 'TRI_FAN',
{
@ -228,7 +228,7 @@ def draw_callback_px(self, context):
# if i == self.ui_idx:
# ## color = self.active_layer_color # Color active name
# blf.position(font_id, self.text_x+1, self.text_pos[i]-1, 0)
# blf.size(font_id, self.text_size, 72)
# blf.size(font_id, self.text_size)
# blf.color(font_id, *self.active_layer_color)
# blf.draw(font_id, l.info)
if l.hide:
@ -240,7 +240,7 @@ def draw_callback_px(self, context):
color = self.other_layer_color
blf.position(font_id, self.text_x, self.text_pos[i], 0)
blf.size(font_id, self.text_size, 72)
blf.size(font_id, self.text_size)
blf.color(font_id, *color)
display_name = l.info if len(l.info) <= self.text_char_limit else l.info[:self.text_char_limit-3] + '...'
blf.draw(font_id, display_name)
@ -248,7 +248,7 @@ def draw_callback_px(self, context):
## Drag text
if self.dragging and self.drag_text:
blf.position(font_id, self.mouse.x + 5, self.mouse.y + 5, 0)
blf.size(font_id, self.text_size, 72)
blf.size(font_id, self.text_size)
blf.color(font_id, 1.0, 1.0, 1.0, 1.0)
if self.drag_text == 'opacity_level':
blf.draw(font_id, f'{self.gpl[self.ui_idx].opacity:.2f}')
@ -417,7 +417,7 @@ class GPT_OT_viewport_layer_nav_osd(bpy.types.Operator):
Vector((self.left, self.bottom)), Vector((self.right, self.bottom)),
Vector((self.left, self.top)), Vector((self.left, self.bottom)),
Vector((self.right, self.top)), Vector((self.right, self.bottom))]
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
self.batch_lines = batch_for_shader(
shader, 'LINES', {"pos": self.lines[2:]})

View File

@ -6,13 +6,11 @@ import bpy
import math
import mathutils
from bpy_extras.view3d_utils import location_3d_to_region_2d
from bpy.props import BoolProperty, EnumProperty
from time import time
## draw utils
import gpu
import blf
from gpu_extras.batch import batch_for_shader
from gpu_extras.presets import draw_circle_2d
def step_value(value, step):
'''return the step closer to the passed value'''
@ -29,7 +27,7 @@ def draw_callback_px(self, context):
# 50% alpha, 2 pixel width line
if context.area != self.current_area:
return
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
gpu.state.blend_set('ALPHA')
gpu.state.line_width_set(2.0)
@ -61,7 +59,7 @@ def draw_callback_px(self, context):
font_id = 0
## draw text debug infos
blf.position(font_id, 15, 30, 0)
blf.size(font_id, 20, 72)
blf.size(font_id, 20.0)
blf.draw(font_id, f'angle: {math.degrees(self.angle):.1f}')
@ -195,6 +193,8 @@ class RC_OT_RotateCanvas(bpy.types.Operator):
self.hud = prefs.canvas_use_hud
self.use_view_center = prefs.canvas_use_view_center
self.angle = 0.0
## Check if scene camera or local camera exists ?
# if (context.space_data.use_local_camera and context.space_data.camera) or context.scene.camera
self.in_cam = context.region_data.view_perspective == 'CAMERA'
## store ratio for view rotate correction
@ -216,7 +216,10 @@ class RC_OT_RotateCanvas(bpy.types.Operator):
if self.in_cam:
# Get camera from scene
self.cam = bpy.context.scene.camera
if context.space_data.use_local_camera and context.space_data.camera:
self.cam = context.space_data.camera
else:
self.cam = context.scene.camera
#return if one element is locked (else bypass location)
if self.cam.lock_rotation[:] != (False, False, False):

View File

@ -37,7 +37,7 @@ def draw_callback_px(self, context):
# text
font_id = 0
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') # initiate shader
shader = gpu.shader.from_builtin('UNIFORM_COLOR') # initiate shader
gpu.state.blend_set('ALPHA')
gpu.state.line_width_set(1.0)
@ -77,14 +77,14 @@ def draw_callback_px(self, context):
blf.color(font_id, *self.color_text)
if self.use_hud_frame_current:
blf.position(font_id, self.mouse[0]+10, self.mouse[1]+10, 0)
blf.size(font_id, 30, self.dpi)
blf.size(font_id, 30 * (self.dpi / 72.0))
blf.draw(font_id, f'{self.new_frame:.0f}')
# Display frame offset text
if self.use_hud_frame_offset:
blf.position(font_id, self.mouse[0]+10,
self.mouse[1]+(40*self.ui_scale), 0)
blf.size(font_id, 16, self.dpi)
blf.size(font_id, 16 * (self.dpi / 72.0))
sign = '+' if self.offset > 0 else ''
blf.draw(font_id, f'{sign}{self.offset:.0f}')
@ -291,7 +291,7 @@ class GPTS_OT_time_scrub(bpy.types.Operator):
self.hud_lines += [(0, my), (width, my)]
# Prepare batchs to draw static parts
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') # initiate shader
shader = gpu.shader.from_builtin('UNIFORM_COLOR') # initiate shader
self.batch_timeline = batch_for_shader(
shader, 'LINES', {"pos": self.hud_lines})

View File

@ -33,10 +33,10 @@ class GP_PT_sidebarPanel(bpy.types.Panel):
# View flip
if context.scene.camera and context.scene.camera.scale.x < 0:
row = layout.row(align=True)
row.operator('gp.camera_flip_x', text = 'Camera Mirror Flip', icon = 'MOD_MIRROR')
row.operator('view3d.camera_flip_x', text = 'Camera Mirror Flip', icon = 'MOD_MIRROR')
row.label(text='', icon='LOOP_BACK')
else:
layout.operator('gp.camera_flip_x', text = 'Camera Mirror Flip', icon = 'MOD_MIRROR')
layout.operator('view3d.camera_flip_x', text = 'Camera Mirror Flip', icon = 'MOD_MIRROR')
def menu_boxdeform_entry(self, context):

View File

@ -21,7 +21,7 @@ import bpy
from . import operator
def menu_func(self, context):
self.layout.operator(operator.DXFExporter.bl_idname, text="AutoCAD DXF")
self.layout.operator(operator.DXFExporter.bl_idname, text="AutoCAD DXF (.dxf)")
classes = (
operator.DXFExporter,

View File

@ -537,7 +537,7 @@ class IMPORT_OT_dxf(bpy.types.Operator):
def menu_func(self, context):
self.layout.operator(IMPORT_OT_dxf.bl_idname, text="AutoCAD DXF")
self.layout.operator(IMPORT_OT_dxf.bl_idname, text="AutoCAD DXF (.dxf)")
def register():

View File

@ -959,7 +959,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
plane.select_set(True)
# all done!
self.report({'INFO'}, "Added {} Image Plane(s)".format(len(planes)))
self.report({'INFO'}, tip_("Added {} Image Plane(s)").format(len(planes)))
# operate on a single image
def single_image_spec_to_plane(self, context, img_spec):

View File

@ -2,12 +2,12 @@
#
#
# Author : Clemens Barth (Blendphys@root-1.de)
# Homepage(Wiki) : http://development.root-1.de/Atomic_Blender.php
# Homepage(Wiki) : https://docs.blender.org/manual/en/dev/addons/import_export/mesh_atomic.html
#
# Start of project : 2011-08-31 by CB
# First publication in Blender : 2011-11-11 by CB
# Fusion of the PDB, XYZ and Panel : 2019-03-22 by CB
# Last modified : 2019-05-17
# Last modified : 2023-05-19
#
# Contributing authors
# ====================

View File

@ -96,8 +96,8 @@ class IMPORT_OT_pdb(Operator, ImportHelper):
name="Bonds", default=False,
description="Show double and triple bonds")
sticks_dist: FloatProperty(
name="", default = 1.1, min=1.0, max=3.0,
description="Distance between sticks measured in stick diameter")
name="", default = 0.8, min=0.0, max=3.0,
description="Distance between sticks (double or tripple bonds) measured in stick diameter")
use_sticks_one_object: BoolProperty(
name="One object", default=False,
description="All sticks are one object")
@ -184,7 +184,10 @@ class IMPORT_OT_pdb(Operator, ImportHelper):
col = row.column()
col.active = self.use_sticks_one_object
col.prop(self, "use_sticks_one_object_nr")
row = box.row()
row.active = self.use_sticks and self.use_sticks_bonds
row.label(text="Distance")
row.prop(self, "sticks_dist")
def execute(self, context):
# Switch to 'OBJECT' mode when in 'EDIT' mode.

View File

@ -556,7 +556,7 @@ def camera_light_source(use_camera,
camera_factor = 15.0
# If chosen a camera is put into the scene.
# If chosen, a camera is put into the scene.
if use_camera == True:
# Assume that the object is put into the global origin. Then, the
@ -850,7 +850,7 @@ def draw_sticks_dupliverts(all_atoms,
i = 0
# What follows is school mathematics! :-) We construct equidistant
# planes, on which the stcik sections (cylinders) are perpendicular on.
# planes, on which the stick sections (cylinders) are perpendicular on.
for stick in stick_list:
dv = stick[2]
@ -1100,6 +1100,7 @@ def draw_sticks_normal(all_atoms,
center,
Stick_diameter,
Stick_sectors,
Stick_dist,
use_sticks_smooth,
use_sticks_one_object,
use_sticks_one_object_nr,
@ -1117,52 +1118,96 @@ def draw_sticks_normal(all_atoms,
list_group = []
list_group_sub = []
counter = 0
for stick in all_sticks:
for i, stick in enumerate(all_sticks):
# We treat here single, double and tripple bonds: stick.number <= 3
for repeat in range(stick.number):
# The vectors of the two atoms
atom1 = all_atoms[stick.atom1-1].location-center
atom2 = all_atoms[stick.atom2-1].location-center
# Location
atom1 = copy(all_atoms[stick.atom1-1].location)-center
atom2 = copy(all_atoms[stick.atom2-1].location)-center
dist = Stick_diameter * Stick_dist
# The two sticks are on the left and right of the middle connection.
if stick.number == 2:
if repeat == 0:
atom1 += (stick.dist * dist)
atom2 += (stick.dist * dist)
if repeat == 1:
atom1 -= (stick.dist * dist)
atom2 -= (stick.dist * dist)
if stick.number == 3:
if repeat == 0:
atom1 += (stick.dist * dist)
atom2 += (stick.dist * dist)
if repeat == 2:
atom1 -= (stick.dist * dist)
atom2 -= (stick.dist * dist)
# Vector pointing along the stick direction
dv = atom1 - atom2
# The normalized vector of this, with lenght 1
n = dv / dv.length
# Starting point of the stick
location = (atom1 + atom2) * 0.5
# The difference of both vectors
v = (atom2 - atom1)
# Angle with respect to the z-axis
angle = v.angle(up_axis, 0)
angle = dv.angle(up_axis, 0)
# Cross-product between v and the z-axis vector. It is the
# vector of rotation.
axis = up_axis.cross(v)
axis = up_axis.cross(dv)
# Calculate Euler angles
euler = Matrix.Rotation(angle, 4, axis).to_euler()
# Create stick
stick = bpy.ops.mesh.primitive_cylinder_add(vertices=Stick_sectors,
stick_obj = bpy.ops.mesh.primitive_cylinder_add(vertices=Stick_sectors,
radius=Stick_diameter,
depth=v.length,
depth=dv.length,
end_fill_type='NGON',
align='WORLD',
enter_editmode=False,
location=location,
rotation=(0, 0, 0))
# Put the stick into the scene ...
stick = bpy.context.view_layer.objects.active
stick_obj = bpy.context.view_layer.objects.active
# ... and rotate the stick.
stick.rotation_euler = euler
stick_obj.rotation_euler = euler
# ... and name
stick.name = "Stick_Cylinder"
if stick.number == 1:
stick_obj.name = "Stick_Cylinder_%04d" %(i)
elif stick.number == 2:
if repeat == 0:
stick_obj.name = "Stick_Cylinder_%04d" %(i) + "_left"
elif repeat == 1:
stick_obj.name = "Stick_Cylinder_%04d" %(i) + "_right"
elif stick.number == 3:
if repeat == 0:
stick_obj.name = "Stick_Cylinder_%04d" %(i) + "_left"
elif repeat == 1:
stick_obj.name = "Stick_Cylinder_%04d" %(i) + "_middle"
elif repeat == 2:
stick_obj.name = "Stick_Cylinder_%04d" %(i) + "_right"
# Never occurs:
else:
stick_obj.name = "Stick_Cylinder"
# Never occurs:
else:
stick_obj.name = "Stick_Cylinder"
counter += 1
# Smooth the cylinder.
if use_sticks_smooth == True:
bpy.ops.object.select_all(action='DESELECT')
stick.select_set(True)
stick_obj.select_set(True)
bpy.ops.object.shade_smooth()
list_group_sub.append(stick)
list_group_sub.append(stick_obj)
if use_sticks_one_object == True:
if counter == use_sticks_one_object_nr:
bpy.ops.object.select_all(action='DESELECT')
for stick in list_group_sub:
stick.select_set(True)
for stick_select in list_group_sub:
stick_select.select_set(True)
bpy.ops.object.join()
list_group.append(bpy.context.view_layer.objects.active)
bpy.ops.object.select_all(action='DESELECT')
@ -1170,7 +1215,7 @@ def draw_sticks_normal(all_atoms,
counter = 0
else:
# Material ...
stick.active_material = stick_material
stick_obj.active_material = stick_material
if use_sticks_one_object == True:
bpy.ops.object.select_all(action='DESELECT')
@ -1531,6 +1576,7 @@ def import_pdb(Ball_type,
object_center_vec,
Stick_diameter,
Stick_sectors,
Stick_dist,
use_sticks_smooth,
use_sticks_one_object,
use_sticks_one_object_nr,

View File

@ -1,282 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-or-later
bl_info = {
"name": "Stanford PLY format",
"author": "Bruce Merry, Campbell Barton, Bastien Montagne, Mikhail Rachinsky",
"version": (2, 2, 0),
"blender": (3, 0, 0),
"location": "File > Import/Export",
"description": "Import-Export PLY mesh data with UVs and vertex colors",
"doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/mesh_ply.html",
"support": 'OFFICIAL',
"category": "Import-Export",
}
# Copyright (C) 2004, 2005: Bruce Merry, bmerry@cs.uct.ac.za
# Contributors: Bruce Merry, Campbell Barton
if "bpy" in locals():
import importlib
if "export_ply" in locals():
importlib.reload(export_ply)
if "import_ply" in locals():
importlib.reload(import_ply)
import bpy
from bpy.props import (
CollectionProperty,
StringProperty,
BoolProperty,
FloatProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
axis_conversion,
orientation_helper,
)
class ImportPLY(bpy.types.Operator, ImportHelper):
"""Load a PLY geometry file"""
bl_idname = "import_mesh.ply"
bl_label = "Import PLY"
bl_options = {'UNDO'}
files: CollectionProperty(
name="File Path",
description="File path used for importing the PLY file",
type=bpy.types.OperatorFileListElement,
)
# Hide opertator properties, rest of this is managed in C. See WM_operator_properties_filesel().
hide_props_region: BoolProperty(
name="Hide Operator Properties",
description="Collapse the region displaying the operator settings",
default=True,
)
directory: StringProperty()
filename_ext = ".ply"
filter_glob: StringProperty(default="*.ply", options={'HIDDEN'})
def execute(self, context):
import os
from . import import_ply
context.window.cursor_set('WAIT')
paths = [
os.path.join(self.directory, name.name)
for name in self.files
]
if not paths:
paths.append(self.filepath)
for path in paths:
import_ply.load(self, context, path)
context.window.cursor_set('DEFAULT')
return {'FINISHED'}
@orientation_helper(axis_forward='Y', axis_up='Z')
class ExportPLY(bpy.types.Operator, ExportHelper):
bl_idname = "export_mesh.ply"
bl_label = "Export PLY"
bl_description = "Export as a Stanford PLY with normals, vertex colors and texture coordinates"
filename_ext = ".ply"
filter_glob: StringProperty(default="*.ply", options={'HIDDEN'})
use_ascii: BoolProperty(
name="ASCII",
description="Export using ASCII file format, otherwise use binary",
)
use_selection: BoolProperty(
name="Selection Only",
description="Export selected objects only",
default=False,
)
use_mesh_modifiers: BoolProperty(
name="Apply Modifiers",
description="Apply Modifiers to the exported mesh",
default=True,
)
use_normals: BoolProperty(
name="Normals",
description="Export vertex normals",
default=True,
)
use_uv_coords: BoolProperty(
name="UVs",
description="Export the active UV layer (will split edges by seams)",
default=True,
)
use_colors: BoolProperty(
name="Vertex Colors",
description="Export the active vertex color layer",
default=True,
)
global_scale: FloatProperty(
name="Scale",
min=0.01,
max=1000.0,
default=1.0,
)
def execute(self, context):
from mathutils import Matrix
from . import export_ply
context.window.cursor_set('WAIT')
keywords = self.as_keywords(
ignore=(
"axis_forward",
"axis_up",
"global_scale",
"check_existing",
"filter_glob",
)
)
global_matrix = axis_conversion(
to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4() @ Matrix.Scale(self.global_scale, 4)
keywords["global_matrix"] = global_matrix
export_ply.save(context, **keywords)
context.window.cursor_set('DEFAULT')
return {'FINISHED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
col = layout.column(heading="Format")
col.prop(operator, "use_ascii")
class PLY_PT_export_include(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Include"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_MESH_OT_ply"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, "use_selection")
class PLY_PT_export_transform(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Transform"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_MESH_OT_ply"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, "axis_forward")
layout.prop(operator, "axis_up")
layout.prop(operator, "global_scale")
class PLY_PT_export_geometry(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_MESH_OT_ply"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, "use_mesh_modifiers")
layout.prop(operator, "use_normals")
layout.prop(operator, "use_uv_coords")
layout.prop(operator, "use_colors")
def menu_func_import(self, context):
self.layout.operator(ImportPLY.bl_idname, text="Stanford (.ply)")
def menu_func_export(self, context):
self.layout.operator(ExportPLY.bl_idname, text="Stanford (.ply)")
classes = (
ImportPLY,
ExportPLY,
PLY_PT_export_include,
PLY_PT_export_transform,
PLY_PT_export_geometry,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()

View File

@ -1,212 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-or-later
"""
This script exports Stanford PLY files from Blender. It supports normals,
colors, and texture coordinates per face or per vertex.
"""
import bpy
def _write_binary(fw, ply_verts: list, ply_faces: list) -> None:
from struct import pack
# Vertex data
# ---------------------------
for v, normal, uv, color in ply_verts:
fw(pack("<3f", *v.co))
if normal is not None:
fw(pack("<3f", *normal))
if uv is not None:
fw(pack("<2f", *uv))
if color is not None:
fw(pack("<4B", *color))
# Face data
# ---------------------------
for pf in ply_faces:
length = len(pf)
fw(pack(f"<B{length}I", length, *pf))
def _write_ascii(fw, ply_verts: list, ply_faces: list) -> None:
# Vertex data
# ---------------------------
for v, normal, uv, color in ply_verts:
fw(b"%.6f %.6f %.6f" % v.co[:])
if normal is not None:
fw(b" %.6f %.6f %.6f" % normal[:])
if uv is not None:
fw(b" %.6f %.6f" % uv)
if color is not None:
fw(b" %u %u %u %u" % color)
fw(b"\n")
# Face data
# ---------------------------
for pf in ply_faces:
fw(b"%d" % len(pf))
for index in pf:
fw(b" %d" % index)
fw(b"\n")
def save_mesh(filepath, bm, use_ascii, use_normals, use_uv, use_color):
uv_lay = bm.loops.layers.uv.active
col_lay = bm.loops.layers.color.active
use_uv = use_uv and uv_lay is not None
use_color = use_color and col_lay is not None
normal = uv = color = None
ply_faces = []
ply_verts = []
ply_vert_map = {}
ply_vert_id = 0
for f in bm.faces:
pf = []
ply_faces.append(pf)
for loop in f.loops:
v = map_id = loop.vert
if use_uv:
uv = loop[uv_lay].uv[:]
map_id = v, uv
# Identify vertex by pointer unless exporting UVs,
# in which case id by UV coordinate (will split edges by seams).
if (_id := ply_vert_map.get(map_id)) is not None:
pf.append(_id)
continue
if use_normals:
normal = v.normal
if use_color:
color = tuple(int(x * 255.0) for x in loop[col_lay])
ply_verts.append((v, normal, uv, color))
ply_vert_map[map_id] = ply_vert_id
pf.append(ply_vert_id)
ply_vert_id += 1
with open(filepath, "wb") as file:
fw = file.write
file_format = b"ascii" if use_ascii else b"binary_little_endian"
# Header
# ---------------------------
fw(b"ply\n")
fw(b"format %s 1.0\n" % file_format)
fw(b"comment Created by Blender %s - www.blender.org\n" % bpy.app.version_string.encode("utf-8"))
fw(b"element vertex %d\n" % len(ply_verts))
fw(
b"property float x\n"
b"property float y\n"
b"property float z\n"
)
if use_normals:
fw(
b"property float nx\n"
b"property float ny\n"
b"property float nz\n"
)
if use_uv:
fw(
b"property float s\n"
b"property float t\n"
)
if use_color:
fw(
b"property uchar red\n"
b"property uchar green\n"
b"property uchar blue\n"
b"property uchar alpha\n"
)
fw(b"element face %d\n" % len(ply_faces))
fw(b"property list uchar uint vertex_indices\n")
fw(b"end_header\n")
# Geometry
# ---------------------------
if use_ascii:
_write_ascii(fw, ply_verts, ply_faces)
else:
_write_binary(fw, ply_verts, ply_faces)
def save(
context,
filepath="",
use_ascii=False,
use_selection=False,
use_mesh_modifiers=True,
use_normals=True,
use_uv_coords=True,
use_colors=True,
global_matrix=None,
):
import time
import bmesh
t = time.time()
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
if use_selection:
obs = context.selected_objects
else:
obs = context.scene.objects
depsgraph = context.evaluated_depsgraph_get()
bm = bmesh.new()
for ob in obs:
if use_mesh_modifiers:
ob_eval = ob.evaluated_get(depsgraph)
else:
ob_eval = ob
try:
me = ob_eval.to_mesh()
except RuntimeError:
continue
me.transform(ob.matrix_world)
bm.from_mesh(me)
ob_eval.to_mesh_clear()
# Workaround for hardcoded unsigned char limit in other DCCs PLY importers
if (ngons := [f for f in bm.faces if len(f.verts) > 255]):
bmesh.ops.triangulate(bm, faces=ngons)
if global_matrix is not None:
bm.transform(global_matrix)
if use_normals:
bm.normal_update()
save_mesh(
filepath,
bm,
use_ascii,
use_normals,
use_uv_coords,
use_colors,
)
bm.free()
t_delta = time.time() - t
print(f"Export completed {filepath!r} in {t_delta:.3f}")

View File

@ -1,435 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-or-later
class ElementSpec:
__slots__ = (
"name",
"count",
"properties",
)
def __init__(self, name, count):
self.name = name
self.count = count
self.properties = []
def load(self, format, stream):
if format == b'ascii':
stream = stream.readline().split()
return [x.load(format, stream) for x in self.properties]
def index(self, name):
for i, p in enumerate(self.properties):
if p.name == name:
return i
return -1
class PropertySpec:
__slots__ = (
"name",
"list_type",
"numeric_type",
)
def __init__(self, name, list_type, numeric_type):
self.name = name
self.list_type = list_type
self.numeric_type = numeric_type
def read_format(self, format, count, num_type, stream):
import struct
if format == b'ascii':
if num_type == 's':
ans = []
for i in range(count):
s = stream[i]
if not (len(s) >= 2 and s.startswith(b'"') and s.endswith(b'"')):
print("Invalid string", s)
print("Note: ply_import.py does not handle whitespace in strings")
return None
ans.append(s[1:-1])
stream[:count] = []
return ans
if num_type == 'f' or num_type == 'd':
mapper = float
else:
mapper = int
ans = [mapper(x) for x in stream[:count]]
stream[:count] = []
return ans
else:
if num_type == 's':
ans = []
for i in range(count):
fmt = format + 'i'
data = stream.read(struct.calcsize(fmt))
length = struct.unpack(fmt, data)[0]
fmt = '%s%is' % (format, length)
data = stream.read(struct.calcsize(fmt))
s = struct.unpack(fmt, data)[0]
ans.append(s[:-1]) # strip the NULL
return ans
else:
fmt = '%s%i%s' % (format, count, num_type)
data = stream.read(struct.calcsize(fmt))
return struct.unpack(fmt, data)
def load(self, format, stream):
if self.list_type is not None:
count = int(self.read_format(format, 1, self.list_type, stream)[0])
return self.read_format(format, count, self.numeric_type, stream)
else:
return self.read_format(format, 1, self.numeric_type, stream)[0]
class ObjectSpec:
__slots__ = ("specs",)
def __init__(self):
# A list of element_specs
self.specs = []
def load(self, format, stream):
return {
i.name: [
i.load(format, stream) for j in range(i.count)
]
for i in self.specs
}
def read(filepath):
import re
format = b''
texture = b''
version = b'1.0'
format_specs = {
b'binary_little_endian': '<',
b'binary_big_endian': '>',
b'ascii': b'ascii',
}
type_specs = {
b'char': 'b',
b'uchar': 'B',
b'int8': 'b',
b'uint8': 'B',
b'int16': 'h',
b'uint16': 'H',
b'short': 'h',
b'ushort': 'H',
b'int': 'i',
b'int32': 'i',
b'uint': 'I',
b'uint32': 'I',
b'float': 'f',
b'float32': 'f',
b'float64': 'd',
b'double': 'd',
b'string': 's',
}
obj_spec = ObjectSpec()
invalid_ply = (None, None, None)
with open(filepath, 'rb') as plyf:
signature = plyf.peek(5)
if not signature.startswith(b'ply') or not len(signature) >= 5:
print("Signature line was invalid")
return invalid_ply
custom_line_sep = None
if signature[3] != ord(b'\n'):
if signature[3] != ord(b'\r'):
print("Unknown line separator")
return invalid_ply
if signature[4] == ord(b'\n'):
custom_line_sep = b"\r\n"
else:
custom_line_sep = b"\r"
# Work around binary file reading only accepting "\n" as line separator.
plyf_header_line_iterator = lambda plyf: plyf
if custom_line_sep is not None:
def _plyf_header_line_iterator(plyf):
buff = plyf.peek(2**16)
while len(buff) != 0:
read_bytes = 0
buff = buff.split(custom_line_sep)
for line in buff[:-1]:
read_bytes += len(line) + len(custom_line_sep)
if line.startswith(b'end_header'):
# Since reader code might (will) break iteration at this point,
# we have to ensure file is read up to here, yield, amd return...
plyf.read(read_bytes)
yield line
return
yield line
plyf.read(read_bytes)
buff = buff[-1] + plyf.peek(2**16)
plyf_header_line_iterator = _plyf_header_line_iterator
valid_header = False
for line in plyf_header_line_iterator(plyf):
tokens = re.split(br'[ \r\n]+', line)
if len(tokens) == 0:
continue
if tokens[0] == b'end_header':
valid_header = True
break
elif tokens[0] == b'comment':
if len(tokens) < 2:
continue
elif tokens[1] == b'TextureFile':
if len(tokens) < 4:
print("Invalid texture line")
else:
texture = tokens[2]
continue
elif tokens[0] == b'obj_info':
continue
elif tokens[0] == b'format':
if len(tokens) < 3:
print("Invalid format line")
return invalid_ply
if tokens[1] not in format_specs:
print("Unknown format", tokens[1])
return invalid_ply
try:
version_test = float(tokens[2])
except Exception as ex:
print("Unknown version", ex)
version_test = None
if version_test != float(version):
print("Unknown version", tokens[2])
return invalid_ply
del version_test
format = tokens[1]
elif tokens[0] == b'element':
if len(tokens) < 3:
print("Invalid element line")
return invalid_ply
obj_spec.specs.append(ElementSpec(tokens[1], int(tokens[2])))
elif tokens[0] == b'property':
if not len(obj_spec.specs):
print("Property without element")
return invalid_ply
if tokens[1] == b'list':
obj_spec.specs[-1].properties.append(PropertySpec(tokens[4], type_specs[tokens[2]], type_specs[tokens[3]]))
else:
obj_spec.specs[-1].properties.append(PropertySpec(tokens[2], None, type_specs[tokens[1]]))
if not valid_header:
print("Invalid header ('end_header' line not found!)")
return invalid_ply
obj = obj_spec.load(format_specs[format], plyf)
return obj_spec, obj, texture
def load_ply_mesh(filepath, ply_name):
import bpy
obj_spec, obj, texture = read(filepath)
# XXX28: use texture
if obj is None:
print("Invalid file")
return
uvindices = colindices = None
colmultiply = None
# TODO import normals
# noindices = None
for el in obj_spec.specs:
if el.name == b'vertex':
vindices_x, vindices_y, vindices_z = el.index(b'x'), el.index(b'y'), el.index(b'z')
# noindices = (el.index('nx'), el.index('ny'), el.index('nz'))
# if -1 in noindices: noindices = None
uvindices = (el.index(b's'), el.index(b't'))
if -1 in uvindices:
uvindices = None
# ignore alpha if not present
if el.index(b'alpha') == -1:
colindices = el.index(b'red'), el.index(b'green'), el.index(b'blue')
else:
colindices = el.index(b'red'), el.index(b'green'), el.index(b'blue'), el.index(b'alpha')
if -1 in colindices:
if any(idx > -1 for idx in colindices):
print("Warning: At least one obligatory color channel is missing, ignoring vertex colors.")
colindices = None
else: # if not a float assume uchar
colmultiply = [1.0 if el.properties[i].numeric_type in {'f', 'd'} else (1.0 / 255.0) for i in colindices]
elif el.name == b'face':
findex = el.index(b'vertex_indices')
elif el.name == b'tristrips':
trindex = el.index(b'vertex_indices')
elif el.name == b'edge':
eindex1, eindex2 = el.index(b'vertex1'), el.index(b'vertex2')
mesh_faces = []
mesh_uvs = []
mesh_colors = []
def add_face(vertices, indices, uvindices, colindices):
mesh_faces.append(indices)
if uvindices:
mesh_uvs.extend([(vertices[index][uvindices[0]], vertices[index][uvindices[1]]) for index in indices])
if colindices:
if len(colindices) == 3:
mesh_colors.extend([
(
vertices[index][colindices[0]] * colmultiply[0],
vertices[index][colindices[1]] * colmultiply[1],
vertices[index][colindices[2]] * colmultiply[2],
1.0,
)
for index in indices
])
elif len(colindices) == 4:
mesh_colors.extend([
(
vertices[index][colindices[0]] * colmultiply[0],
vertices[index][colindices[1]] * colmultiply[1],
vertices[index][colindices[2]] * colmultiply[2],
vertices[index][colindices[3]] * colmultiply[3],
)
for index in indices
])
if uvindices or colindices:
# If we have Cols or UVs then we need to check the face order.
add_face_simple = add_face
# EVIL EEKADOODLE - face order annoyance.
def add_face(vertices, indices, uvindices, colindices):
if len(indices) == 4:
if indices[2] == 0 or indices[3] == 0:
indices = indices[2], indices[3], indices[0], indices[1]
elif len(indices) == 3:
if indices[2] == 0:
indices = indices[1], indices[2], indices[0]
add_face_simple(vertices, indices, uvindices, colindices)
verts = obj[b'vertex']
if b'face' in obj:
for f in obj[b'face']:
ind = f[findex]
add_face(verts, ind, uvindices, colindices)
if b'tristrips' in obj:
for t in obj[b'tristrips']:
ind = t[trindex]
len_ind = len(ind)
for j in range(len_ind - 2):
add_face(verts, (ind[j], ind[j + 1], ind[j + 2]), uvindices, colindices)
mesh = bpy.data.meshes.new(name=ply_name)
mesh.vertices.add(len(obj[b'vertex']))
mesh.vertices.foreach_set("co", [a for v in obj[b'vertex'] for a in (v[vindices_x], v[vindices_y], v[vindices_z])])
if b'edge' in obj:
mesh.edges.add(len(obj[b'edge']))
mesh.edges.foreach_set("vertices", [a for e in obj[b'edge'] for a in (e[eindex1], e[eindex2])])
if mesh_faces:
loops_vert_idx = []
faces_loop_start = []
lidx = 0
for f in mesh_faces:
nbr_vidx = len(f)
loops_vert_idx.extend(f)
faces_loop_start.append(lidx)
lidx += nbr_vidx
mesh.loops.add(len(loops_vert_idx))
mesh.polygons.add(len(mesh_faces))
mesh.loops.foreach_set("vertex_index", loops_vert_idx)
mesh.polygons.foreach_set("loop_start", faces_loop_start)
if uvindices:
uv_layer = mesh.uv_layers.new()
for i, uv in enumerate(uv_layer.data):
uv.uv = mesh_uvs[i]
if colindices:
vcol_lay = mesh.vertex_colors.new()
for i, col in enumerate(vcol_lay.data):
col.color[0] = mesh_colors[i][0]
col.color[1] = mesh_colors[i][1]
col.color[2] = mesh_colors[i][2]
col.color[3] = mesh_colors[i][3]
mesh.update()
mesh.validate()
if texture and uvindices:
pass
# TODO add support for using texture.
# import os
# import sys
# from bpy_extras.image_utils import load_image
# encoding = sys.getfilesystemencoding()
# encoded_texture = texture.decode(encoding=encoding)
# name = bpy.path.display_name_from_filepath(texture)
# image = load_image(encoded_texture, os.path.dirname(filepath), recursive=True, place_holder=True)
# if image:
# texture = bpy.data.textures.new(name=name, type='IMAGE')
# texture.image = image
# material = bpy.data.materials.new(name=name)
# material.use_shadeless = True
# mtex = material.texture_slots.add()
# mtex.texture = texture
# mtex.texture_coords = 'UV'
# mtex.use_map_color_diffuse = True
# mesh.materials.append(material)
# for face in mesh.uv_textures[0].data:
# face.image = image
return mesh
def load_ply(filepath):
import time
import bpy
t = time.time()
ply_name = bpy.path.display_name_from_filepath(filepath)
mesh = load_ply_mesh(filepath, ply_name)
if not mesh:
return {'CANCELLED'}
for ob in bpy.context.selected_objects:
ob.select_set(False)
obj = bpy.data.objects.new(ply_name, mesh)
bpy.context.collection.objects.link(obj)
bpy.context.view_layer.objects.active = obj
obj.select_set(True)
print("\nSuccessfully imported %r in %.3f sec" % (filepath, time.time() - t))
return {'FINISHED'}
def load(operator, context, filepath=""):
return load_ply(filepath)

View File

@ -70,7 +70,7 @@ def draw_background_colors(face_data, opacity):
indices.extend([index + offset for index in triangle] for triangle in triangles)
offset += len(uvs)
shader = gpu.shader.from_builtin('2D_FLAT_COLOR')
shader = gpu.shader.from_builtin('FLAT_COLOR')
batch = batch_for_shader(
shader, 'TRIS',
{"pos": coords, "color": colors},

View File

@ -16,7 +16,7 @@ import bpy
bl_info = {
"name": "Autodesk 3DS format",
"author": "Bob Holcomb, Campbell Barton, Andreas Atteneder, Sebastian Schrand",
"version": (2, 3, 4),
"version": (2, 4, 1),
"blender": (3, 6, 0),
"location": "File > Import-Export",
"description": "3DS Import/Export meshes, UVs, materials, textures, "
@ -65,12 +65,12 @@ class Import3DS(bpy.types.Operator, ImportHelper):
"importing incorrectly",
default=True,
)
read_keyframe: bpy.props.BoolProperty(
read_keyframe: BoolProperty(
name="Read Keyframe",
description="Read the keyframe data",
default=True,
)
use_world_matrix: bpy.props.BoolProperty(
use_world_matrix: BoolProperty(
name="World Space",
description="Transform to matrix world",
default=False,
@ -109,6 +109,11 @@ class Export3DS(bpy.types.Operator, ExportHelper):
description="Export selected objects only",
default=False,
)
write_keyframe: BoolProperty(
name="Write Keyframe",
description="Write the keyframe data",
default=False,
)
def execute(self, context):
from . import export_3ds

File diff suppressed because it is too large Load Diff

View File

@ -165,6 +165,7 @@ global scn
scn = None
object_dictionary = {}
parent_dictionary = {}
object_matrix = {}
@ -568,25 +569,25 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
tilt = 0.0
pos = location + target # Target triangulation
if abs(location[0] - target[0]) > abs(location[1] - target[1]):
foc = math.copysign(math.sqrt(pow(pos[0],2)+pow(pos[1],2)),pos[0])
dia = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[0])
pitch = math.radians(90)-math.copysign(math.acos(foc/dia), pos[2])
foc = math.copysign(math.sqrt(pow(pos[0],2) + pow(pos[1],2)),pos[0])
dia = math.copysign(math.sqrt(pow(foc,2) + pow(target[2],2)),pos[0])
pitch = math.radians(90) - math.copysign(math.acos(foc / dia), pos[2])
if location[0] > target[0]:
tilt = math.copysign(pitch, pos[0])
pan = math.radians(90)+math.atan(pos[1]/foc)
pan = math.radians(90) + math.atan(pos[1] / foc)
else:
tilt = -1*(math.copysign(pitch, pos[0]))
pan = -1*(math.radians(90)-math.atan(pos[1]/foc))
tilt = -1 * (math.copysign(pitch, pos[0]))
pan = -1 * (math.radians(90) - math.atan(pos[1] / foc))
elif abs(location[1] - target[1]) > abs(location[0] - target[0]):
foc = math.copysign(math.sqrt(pow(pos[1],2)+pow(pos[0],2)),pos[1])
dia = math.copysign(math.sqrt(pow(foc,2)+pow(target[2],2)),pos[1])
pitch = math.radians(90)-math.copysign(math.acos(foc/dia), pos[2])
foc = math.copysign(math.sqrt(pow(pos[1],2) + pow(pos[0],2)),pos[1])
dia = math.copysign(math.sqrt(pow(foc,2) + pow(target[2],2)),pos[1])
pitch = math.radians(90) - math.copysign(math.acos(foc / dia), pos[2])
if location[1] > target[1]:
tilt = math.copysign(pitch, pos[1])
pan = math.radians(90)+math.acos(pos[0]/foc)
pan = math.radians(90) + math.acos(pos[0] / foc)
else:
tilt = -1*(math.copysign(pitch, pos[1]))
pan = -1*(math.radians(90)-math.acos(pos[0]/foc))
tilt = -1 * (math.copysign(pitch, pos[1]))
pan = -1 * (math.radians(90) - math.acos(pos[0] / foc))
direction = tilt, pan
return direction
@ -1060,6 +1061,11 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
object_parent.append(hierarchy)
pivot_list.append(mathutils.Vector((0.0, 0.0, 0.0)))
elif new_chunk.ID == PARENT_NAME:
parent_name, read_str_len = read_string(file)
parent_dictionary.setdefault(parent_name, []).append(child)
new_chunk.bytes_read += read_str_len
elif new_chunk.ID == OBJECT_INSTANCE_NAME:
object_name, read_str_len = read_string(file)
if child.name == '$$$DUMMY':
@ -1230,17 +1236,17 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
default_value = child.data.angle
child.data.angle = read_track_angle(temp_chunk)[0]
for keydata in keyframe_angle.items():
child.data.lens = (child.data.sensor_width/2)/math.tan(keydata[1]/2)
child.data.lens = (child.data.sensor_width / 2) / math.tan(keydata[1] / 2)
child.data.keyframe_insert(data_path="lens", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == HOTSPOT_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Hotspot
keyframe_angle = {}
cone_angle = math.degrees(child.data.spot_size)
default_value = cone_angle-(child.data.spot_blend*math.floor(cone_angle))
default_value = cone_angle-(child.data.spot_blend * math.floor(cone_angle))
hot_spot = math.degrees(read_track_angle(temp_chunk)[0])
child.data.spot_blend = 1.0 - (hot_spot/cone_angle)
for keydata in keyframe_angle.items():
child.data.spot_blend = 1.0 - (math.degrees(keydata[1])/cone_angle)
child.data.spot_blend = 1.0 - (math.degrees(keydata[1]) / cone_angle)
child.data.keyframe_insert(data_path="spot_blend", frame=keydata[0])
elif KEYFRAME and new_chunk.ID == FALLOFF_TRACK_TAG and child.type == 'LIGHT' and child.data.type == 'SPOT': # Falloff
@ -1283,14 +1289,19 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif parent not in object_dict:
if ob.parent != object_list[parent]:
ob.parent = object_list[parent]
else:
print("\tWarning: Cannot assign self to parent ", ob)
else:
if ob.parent != object_dict[parent]:
elif ob.parent != object_dict[parent]:
ob.parent = object_dict.get(parent)
else:
print("\tWarning: Cannot assign self to parent ", ob.name)
#pivot_list[ind] += pivot_list[parent] # Not sure this is correct, should parent space matrix be applied before combining?
for par, objs in parent_dictionary.items():
parent = object_dictionary.get(par)
for ob in objs:
if parent is not None:
ob.parent = parent
# fix pivots
for ind, ob in enumerate(object_list):
if ob.type == 'MESH':

View File

@ -1561,8 +1561,6 @@ def blen_read_geom(fbx_tmpl, fbx_obj, settings):
# normals_split_custom_set. We use clnors.data since it is a memoryview, which is faster to iterate than clnors.
mesh.normals_split_custom_set(tuple(zip(*(iter(clnors.data),) * 3)))
mesh.use_auto_smooth = True
else:
mesh.calc_normals()
if settings.use_custom_normals:
mesh.free_normals_split()

View File

@ -4,7 +4,7 @@
bl_info = {
'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (4, 0, 0),
"version": (4, 0, 3),
'blender': (3, 5, 0),
'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0',
@ -99,7 +99,7 @@ def on_export_format_changed(self, context):
# Also change the filter
sfile.params.filter_glob = '*.glb' if self.export_format == 'GLB' else '*.gltf'
# Force update of file list, has update the filter does not update the real file list
# Force update of file list, because update the filter does not update the real file list
bpy.ops.file.refresh()
@ -506,7 +506,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_bake_animation: BoolProperty(
name='Bake All Objects Animations',
description=(
"Force exporting animation on every objects. "
"Force exporting animation on every object. "
"Can be useful when using constraints or driver. "
"Also useful when exporting only selection"
),
@ -535,7 +535,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
name='Use Current Frame as Object Rest Transformations',
description=(
'Export the scene in the current animation frame. '
'When off, frame O is used as rest transformations for objects'
'When off, frame 0 is used as rest transformations for objects'
),
default=False
)
@ -543,7 +543,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_rest_position_armature: BoolProperty(
name='Use Rest Position Armature',
description=(
"Export armatures using rest position as joins rest pose. "
"Export armatures using rest position as joints' rest pose. "
"When off, current frame pose is used as rest pose"
),
default=True
@ -594,6 +594,15 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default=True
)
export_morph_reset_sk_data: BoolProperty(
name='Reset shape keys between actions',
description=(
"Reset shape keys between each action exported. "
"This is needed when some SK channels are not keyed on some animations"
),
default=True
)
export_lights: BoolProperty(
name='Punctual Lights',
description='Export directional, point, and spot lights. '
@ -601,6 +610,19 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
default=False
)
# This parameter is only here for backward compatibility, as this option is removed in 3.6
# This option does nothing, and is not displayed in UI
# What you are looking for is probably "export_animation_mode"
export_nla_strips: BoolProperty(
name='Group by NLA Track',
description=(
"When on, multiple actions become part of the same glTF animation if "
"they're pushed onto NLA tracks with the same name. "
"When off, all the currently assigned actions become one glTF animation"
),
default=True
)
will_save_settings: BoolProperty(
name='Remember Export Settings',
description='Store glTF export settings in the Blender project',
@ -629,6 +651,10 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
setattr(self, k, v)
self.will_save_settings = True
# Update filter if user saved settings
if hasattr(self, 'export_format'):
self.filter_glob = '*.glb' if self.export_format == 'GLB' else '*.gltf'
except (AttributeError, TypeError):
self.report({"ERROR"}, "Loading export settings failed. Removed corrupted settings")
del context.scene[self.scene_key]
@ -756,6 +782,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['gltf_optimize_animation_keep_object'] = self.export_optimize_animation_keep_anim_object
export_settings['gltf_export_anim_single_armature'] = self.export_anim_single_armature
export_settings['gltf_export_reset_pose_bones'] = self.export_reset_pose_bones
export_settings['gltf_export_reset_sk_data'] = self.export_morph_reset_sk_data
export_settings['gltf_bake_animation'] = self.export_bake_animation
export_settings['gltf_negative_frames'] = self.export_negative_frame
export_settings['gltf_anim_slide_to_zero'] = self.export_anim_slide_to_zero
@ -768,6 +795,7 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['gltf_optimize_animation_keep_object'] = False
export_settings['gltf_export_anim_single_armature'] = False
export_settings['gltf_export_reset_pose_bones'] = False
export_settings['gltf_export_reset_sk_data'] = False
export_settings['gltf_skins'] = self.export_skins
if self.export_skins:
export_settings['gltf_all_vertex_influences'] = self.export_all_influences
@ -1223,9 +1251,6 @@ class GLTF_PT_export_animation(bpy.types.Panel):
if operator.export_animation_mode == "ACTIVE_ACTIONS":
layout.prop(operator, 'export_nla_strips_merged_animation_name')
row = layout.row()
row.active = operator.export_morph is True
row.prop(operator, 'export_morph_animation')
row = layout.row()
row.active = operator.export_force_sampling and operator.export_animation_mode in ['ACTIONS', 'ACTIVE_ACTIONS']
row.prop(operator, 'export_bake_animation')
@ -1320,6 +1345,39 @@ class GLTF_PT_export_animation_armature(bpy.types.Panel):
layout.prop(operator, 'export_anim_single_armature')
layout.prop(operator, 'export_reset_pose_bones')
class GLTF_PT_export_animation_shapekeys(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Shapekeys Animation"
bl_parent_id = "GLTF_PT_export_animation"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw_header(self, context):
sfile = context.space_data
operator = sfile.active_operator
self.layout.active = operator.export_animations and operator.export_morph
self.layout.prop(operator, "export_morph_animation", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.active = operator.export_animations
layout.prop(operator, 'export_morph_reset_sk_data')
class GLTF_PT_export_animation_sampling(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
@ -1675,6 +1733,7 @@ classes = (
GLTF_PT_export_animation_notes,
GLTF_PT_export_animation_ranges,
GLTF_PT_export_animation_armature,
GLTF_PT_export_animation_shapekeys,
GLTF_PT_export_animation_sampling,
GLTF_PT_export_animation_optimize,
GLTF_PT_export_user_extensions,

View File

@ -41,3 +41,13 @@ def get_delta_modes(target_property: str) -> str:
def is_bone_anim_channel(data_path: str) -> bool:
return data_path[:10] == "pose.bones"
def get_sk_exported(key_blocks):
return [
key_block
for key_block in key_blocks
if not skip_sk(key_block)
]
def skip_sk(k):
return k == k.relative_key or k.mute

View File

@ -4,6 +4,7 @@
import bpy
import typing
from .....io.exp.gltf2_io_user_extensions import export_user_extensions
from .....blender.com.gltf2_blender_data_path import skip_sk
from .....io.com import gltf2_io_debug
from .....io.com import gltf2_io
from ....exp.gltf2_blender_gather_cache import cached
@ -74,7 +75,7 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s
type_ = "BONE"
if blender_object.type == "MESH" and object_path.startswith("key_blocks"):
shape_key = blender_object.data.shape_keys.path_resolve(object_path)
if shape_key.mute is True:
if skip_sk(shape_key):
continue
target = blender_object.data.shape_keys
type_ = "SK"
@ -84,7 +85,7 @@ def get_channel_groups(obj_uuid: str, blender_action: bpy.types.Action, export_s
if blender_object.type == "MESH":
try:
shape_key = blender_object.data.shape_keys.path_resolve(object_path)
if shape_key.mute is True:
if skip_sk(shape_key):
continue
target = blender_object.data.shape_keys
type_ = "SK"
@ -179,9 +180,7 @@ def __get_channel_group_sorted(channels: typing.Tuple[bpy.types.FCurve], blender
shapekeys_idx = {}
cpt_sk = 0
for sk in blender_object.data.shape_keys.key_blocks:
if sk == sk.relative_key:
continue
if sk.mute is True:
if skip_sk(sk):
continue
shapekeys_idx[sk.name] = cpt_sk
cpt_sk += 1

View File

@ -3,6 +3,7 @@
import bpy
import typing
from .....blender.com.gltf2_blender_data_path import skip_sk
from ....com.gltf2_blender_data_path import get_target_object_path
from ...gltf2_blender_gather_cache import cached
from ..gltf2_blender_gather_keyframes import Keyframe
@ -164,9 +165,7 @@ def __gather_non_keyed_values(
shapekeys_idx = {}
cpt_sk = 0
for sk in blender_object.data.shape_keys.key_blocks:
if sk == sk.relative_key:
continue
if sk.mute is True:
if skip_sk(sk):
continue
shapekeys_idx[cpt_sk] = sk.name
cpt_sk += 1

View File

@ -19,7 +19,7 @@ from .sampled.shapekeys.gltf2_blender_gather_sk_action_sampled import gather_act
from .sampled.object.gltf2_blender_gather_object_channels import gather_object_sampled_channels, gather_sampled_object_channel
from .sampled.shapekeys.gltf2_blender_gather_sk_channels import gather_sampled_sk_channel
from .gltf2_blender_gather_drivers import get_sk_drivers
from .gltf2_blender_gather_animation_utils import reset_bone_matrix, link_samplers, add_slide_data, merge_tracks_perform, bake_animation
from .gltf2_blender_gather_animation_utils import reset_bone_matrix, reset_sk_data, link_samplers, add_slide_data, merge_tracks_perform, bake_animation
def gather_actions_animations(export_settings):
@ -267,6 +267,7 @@ def gather_action_animations( obj_uuid: int,
or (blender_object.data.shape_keys.animation_data.action.name != blender_action.name):
if blender_object.data.shape_keys.animation_data.is_property_readonly('action'):
blender_object.data.shape_keys.animation_data.use_tweak_mode = False
reset_sk_data(blender_object, blender_actions, export_settings)
export_user_extensions('pre_animation_switch_hook', export_settings, blender_object, blender_action, track_name, on_type)
blender_object.data.shape_keys.animation_data.action = blender_action
export_user_extensions('post_animation_switch_hook', export_settings, blender_object, blender_action, track_name, on_type)
@ -400,6 +401,7 @@ def gather_action_animations( obj_uuid: int,
and blender_object.data is not None \
and blender_object.data.shape_keys is not None \
and blender_object.data.shape_keys.animation_data is not None:
reset_sk_data(blender_object, blender_actions, export_settings)
blender_object.data.shape_keys.animation_data.action = current_sk_action
if current_world_matrix is not None:

View File

@ -4,6 +4,7 @@
import bpy
import typing
from mathutils import Matrix
from ....blender.com.gltf2_blender_data_path import get_sk_exported
from ....io.com import gltf2_io
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ....io.com.gltf2_io_debug import print_console
@ -55,6 +56,22 @@ def reset_bone_matrix(blender_object, export_settings) -> None:
for bone in blender_object.pose.bones:
bone.matrix_basis = Matrix()
def reset_sk_data(blender_object, blender_actions, export_settings) -> None:
# Using NLA for SK is not so common
# Reset to 0.0 will happen here only if there are at least 2 tracks to export
if export_settings['gltf_export_reset_sk_data'] is False:
return
if len([i for i in blender_actions if i[2] == "SHAPEKEY"]) <= 1:
return
if blender_object.type != "MESH":
return
# Reset
for sk in get_sk_exported(blender_object.data.shape_keys.key_blocks):
sk.value = 0.0
def add_slide_data(start_frame, obj_uuid: int, key: str, export_settings):

View File

@ -1,6 +1,7 @@
# SPDX-License-Identifier: Apache-2.0
# Copyright 2018-2021 The glTF-Blender-IO authors.
from ....blender.com.gltf2_blender_data_path import get_sk_exported, skip_sk
from ...com.gltf2_blender_data_path import get_target_object_path
from ..gltf2_blender_gather_cache import skdriverdiscovercache
@ -41,11 +42,7 @@ def get_sk_drivers(blender_armature_uuid, export_settings):
shapekeys_idx = {}
cpt_sk = 0
for sk in child.data.shape_keys.key_blocks:
if sk == sk.relative_key:
continue
if sk.mute is True:
continue
for sk in get_sk_exported(child.data.shape_keys.key_blocks):
shapekeys_idx[sk.name] = cpt_sk
cpt_sk += 1
@ -63,8 +60,7 @@ def get_sk_drivers(blender_armature_uuid, export_settings):
sk_name = child.data.shape_keys.path_resolve(get_target_object_path(sk_c.data_path)).name
except:
continue
# Do not take into account this driver if corresponding SK is disabled
if child.data.shape_keys.key_blocks[sk_name].mute is True:
if skip_sk(child.data.shape_keys.key_blocks[sk_name]):
continue
idx_channel_mapping.append((shapekeys_idx[sk_name], sk_c))
existing_idx = dict(idx_channel_mapping)

View File

@ -7,7 +7,7 @@ from ....io.com import gltf2_io
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ..gltf2_blender_gather_cache import cached
from ..gltf2_blender_gather_tree import VExportNode
from .gltf2_blender_gather_animation_utils import merge_tracks_perform, bake_animation, add_slide_data, reset_bone_matrix
from .gltf2_blender_gather_animation_utils import merge_tracks_perform, bake_animation, add_slide_data, reset_bone_matrix, reset_sk_data
from .gltf2_blender_gather_drivers import get_sk_drivers
from .sampled.gltf2_blender_gather_animation_sampling_cache import get_cache_data
@ -91,7 +91,7 @@ def gather_track_animations( obj_uuid: int,
for track_group in [b[0] for b in blender_tracks if b[2] == "OBJECT"]:
for track in track_group:
blender_object.animation_data.nla_tracks[track.idx].mute = True
for track_group in [b[0] for b in blender_tracks if b[2] == "SHAPEKEYS"]:
for track_group in [b[0] for b in blender_tracks if b[2] == "SHAPEKEY"]:
for track in track_group:
blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = True
@ -118,6 +118,8 @@ def gather_track_animations( obj_uuid: int,
export_user_extensions('post_animation_track_switch_hook', export_settings, blender_object, track, track_name, on_type)
reset_bone_matrix(blender_object, export_settings)
if on_type == "SHAPEKEY":
reset_sk_data(blender_object, blender_tracks, export_settings)
##### Export animation
animation = bake_animation(obj_uuid, track_name, export_settings, mode=on_type)
@ -161,7 +163,7 @@ def gather_track_animations( obj_uuid: int,
and blender_object.data.shape_keys is not None \
and blender_object.data.shape_keys.animation_data is not None:
blender_object.data.shape_keys.animation_data.use_nla = current_use_nla_sk
for track_group in [b[0] for b in blender_tracks if b[2] == "SHAPEKEYS"]:
for track_group in [b[0] for b in blender_tracks if b[2] == "SHAPEKEY"]:
for track in track_group:
blender_object.data.shape_keys.animation_data.nla_tracks[track.idx].mute = True
@ -300,7 +302,7 @@ def __get_nla_tracks_sk(obj_uuid: str, export_settings):
exported_tracks.append(current_exported_tracks)
track_names = [obj.data.shape_keys.animation_data.nla_tracks[tracks_group[0].idx].name for tracks_group in exported_tracks]
on_types = ['SHAPEKEYS'] * len(track_names)
on_types = ['SHAPEKEY'] * len(track_names)
return exported_tracks, track_names, on_types
def prepare_tracks_range(obj_uuid, tracks, track_name, export_settings):

View File

@ -4,6 +4,7 @@
import mathutils
import bpy
import typing
from .....blender.com.gltf2_blender_data_path import get_sk_exported
from ...gltf2_blender_gather_cache import datacache
from ...gltf2_blender_gather_tree import VExportNode
from ..gltf2_blender_gather_drivers import get_sk_drivers
@ -144,7 +145,7 @@ def get_cache_data(path: str,
data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name] = {}
data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name]['sk'] = {}
data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name]['sk'][None] = {}
data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name]['sk'][None][frame] = [k.value if k.mute is False else 0.0 for k in blender_obj.data.shape_keys.key_blocks][1:]
data[obj_uuid][blender_obj.data.shape_keys.animation_data.action.name]['sk'][None][frame] = [k.value for k in get_sk_exported(blender_obj.data.shape_keys.key_blocks)]
elif export_settings['gltf_morph_anim'] and blender_obj.type == "MESH" \
and blender_obj.data is not None \
@ -159,7 +160,7 @@ def get_cache_data(path: str,
if 'sk' not in data[obj_uuid][action_name].keys():
data[obj_uuid][action_name]['sk'] = {}
data[obj_uuid][action_name]['sk'][None] = {}
data[obj_uuid][action_name]['sk'][None][frame] = [k.value if k.mute is False else 0.0 for k in blender_obj.data.shape_keys.key_blocks][1:]
data[obj_uuid][action_name]['sk'][None][frame] = [k.value for k in get_sk_exported(blender_obj.data.shape_keys.key_blocks)]
@ -173,7 +174,7 @@ def get_cache_data(path: str,
elif 'sk' not in data[obj_uuid][obj_uuid].keys():
data[obj_uuid][obj_uuid]['sk'] = {}
data[obj_uuid][obj_uuid]['sk'][None] = {}
data[obj_uuid][obj_uuid]['sk'][None][frame] = [k.value if k.mute is False else 0.0 for k in blender_obj.data.shape_keys.key_blocks][1:]
data[obj_uuid][obj_uuid]['sk'][None][frame] = [k.value for k in get_sk_exported(blender_obj.data.shape_keys.key_blocks)]
# caching driver sk meshes
# This will avoid to have to do it again when exporting SK animation
@ -189,20 +190,20 @@ def get_cache_data(path: str,
data[dr_obj][obj_uuid + "_" + blender_obj.animation_data.action.name] = {}
data[dr_obj][obj_uuid + "_" + blender_obj.animation_data.action.name]['sk'] = {}
data[dr_obj][obj_uuid + "_" + blender_obj.animation_data.action.name]['sk'][None] = {}
data[dr_obj][obj_uuid + "_" + blender_obj.animation_data.action.name]['sk'][None][frame] = [k.value if k.mute is False else 0.0 for k in driver_object.data.shape_keys.key_blocks][1:]
data[dr_obj][obj_uuid + "_" + blender_obj.animation_data.action.name]['sk'][None][frame] = [k.value for k in get_sk_exported(driver_object.data.shape_keys.key_blocks)]
if blender_obj.animation_data \
and export_settings['gltf_animation_mode'] in ["NLA_TRACKS"]:
if obj_uuid + "_" + action_name not in data[dr_obj]:
data[dr_obj][obj_uuid + "_" + action_name] = {}
data[dr_obj][obj_uuid + "_" + action_name]['sk'] = {}
data[dr_obj][obj_uuid + "_" + action_name]['sk'][None] = {}
data[dr_obj][obj_uuid + "_" + action_name]['sk'][None][frame] = [k.value if k.mute is False else 0.0 for k in driver_object.data.shape_keys.key_blocks][1:]
data[dr_obj][obj_uuid + "_" + action_name]['sk'][None][frame] = [k.value for k in get_sk_exported(driver_object.data.shape_keys.key_blocks)]
else:
if obj_uuid + "_" + obj_uuid not in data[dr_obj]:
data[dr_obj][obj_uuid + "_" + obj_uuid] = {}
data[dr_obj][obj_uuid + "_" + obj_uuid]['sk'] = {}
data[dr_obj][obj_uuid + "_" + obj_uuid]['sk'][None] = {}
data[dr_obj][obj_uuid + "_" + obj_uuid]['sk'][None][frame] = [k.value if k.mute is False else 0.0 for k in driver_object.data.shape_keys.key_blocks][1:]
data[dr_obj][obj_uuid + "_" + obj_uuid]['sk'][None][frame] = [k.value for k in get_sk_exported(driver_object.data.shape_keys.key_blocks)]
frame += step
return data

View File

@ -2,6 +2,7 @@
# Copyright 2018-2022 The glTF-Blender-IO authors.
import numpy as np
from ......blender.com.gltf2_blender_data_path import get_sk_exported
from ....gltf2_blender_gather_cache import cached
from ...gltf2_blender_gather_keyframes import Keyframe
from ..gltf2_blender_gather_animation_sampling_cache import get_cache_data
@ -21,7 +22,7 @@ def gather_sk_sampled_keyframes(obj_uuid,
step = export_settings['gltf_frame_step']
blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object
while frame <= end_frame:
key = Keyframe([None] * (len(blender_obj.data.shape_keys.key_blocks)-1), frame, 'value')
key = Keyframe([None] * (len(get_sk_exported(blender_obj.data.shape_keys.key_blocks))), frame, 'value')
key.value_total = get_cache_data(
'sk',
obj_uuid,

View File

@ -4,6 +4,7 @@
import bpy
from typing import Optional, Dict, List, Any, Tuple
from ...io.com import gltf2_io
from ...blender.com.gltf2_blender_data_path import get_sk_exported
from ...io.com.gltf2_io_debug import print_console
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
from ..com.gltf2_blender_extras import generate_extras
@ -107,12 +108,7 @@ def __gather_extras(blender_mesh: bpy.types.Mesh,
if export_settings['gltf_morph'] and blender_mesh.shape_keys:
morph_max = len(blender_mesh.shape_keys.key_blocks) - 1
if morph_max > 0:
target_names = []
for blender_shape_key in blender_mesh.shape_keys.key_blocks:
if blender_shape_key != blender_shape_key.relative_key:
if blender_shape_key.mute is False:
target_names.append(blender_shape_key.name)
extras['targetNames'] = target_names
extras['targetNames'] = [k.name for k in get_sk_exported(blender_mesh.shape_keys.key_blocks)]
if extras:
return extras
@ -155,11 +151,4 @@ def __gather_weights(blender_mesh: bpy.types.Mesh,
if morph_max <= 0:
return None
weights = []
for blender_shape_key in blender_mesh.shape_keys.key_blocks:
if blender_shape_key != blender_shape_key.relative_key:
if blender_shape_key.mute is False:
weights.append(blender_shape_key.value)
return weights
return [k.value for k in get_sk_exported(blender_mesh.shape_keys.key_blocks)]

View File

@ -298,7 +298,14 @@ def __gather_mesh_from_nonmesh(blender_object, export_settings):
def __gather_name(blender_object, export_settings):
return blender_object.name
class GltfHookName:
def __init__(self, name):
self.name = name
gltf_hook_name = GltfHookName(blender_object.name)
export_user_extensions('gather_node_name_hook', export_settings, gltf_hook_name, blender_object)
return gltf_hook_name.name
def __gather_trans_rot_scale(vnode, export_settings):
if vnode.parent_uuid is None:

View File

@ -6,6 +6,7 @@ from typing import List, Optional, Tuple
import numpy as np
from ...io.com import gltf2_io, gltf2_io_constants, gltf2_io_extensions
from ...io.com.gltf2_io_debug import print_console
from ...blender.com.gltf2_blender_data_path import get_sk_exported
from ...io.exp import gltf2_io_binary_data
from .gltf2_blender_gather_cache import cached, cached_by_key
from . import gltf2_blender_gather_primitives_extract
@ -186,12 +187,7 @@ def __gather_targets(blender_primitive, blender_mesh, modifiers, export_settings
targets = []
if blender_mesh.shape_keys is not None:
morph_index = 0
for blender_shape_key in blender_mesh.shape_keys.key_blocks:
if blender_shape_key == blender_shape_key.relative_key:
continue
if blender_shape_key.mute is True:
continue
for blender_shape_key in get_sk_exported(blender_mesh.shape_keys.key_blocks):
target_position_id = 'MORPH_POSITION_' + str(morph_index)
target_normal_id = 'MORPH_NORMAL_' + str(morph_index)

View File

@ -3,6 +3,7 @@
import numpy as np
from mathutils import Vector
from ...blender.com.gltf2_blender_data_path import get_sk_exported
from ...io.com.gltf2_io_debug import print_console
from ...io.com.gltf2_io_constants import NORMALS_ROUNDING_DIGIT
from ...io.exp.gltf2_io_user_extensions import export_user_extensions
@ -110,12 +111,9 @@ class PrimitiveCreator:
self.armature = None
self.key_blocks = []
# List of SK that are going to be exported, actually
if self.blender_mesh.shape_keys and self.export_settings['gltf_morph']:
self.key_blocks = [
key_block
for key_block in self.blender_mesh.shape_keys.key_blocks
if not (key_block == key_block.relative_key or key_block.mute)
]
self.key_blocks = get_sk_exported(self.blender_mesh.shape_keys.key_blocks)
# Fetch vert positions and bone data (joint,weights)

View File

@ -255,10 +255,10 @@ def do_primitives(gltf, mesh_idx, skin_idx, mesh, ob):
if gltf.import_settings['merge_vertices']:
vert_locs, vert_normals, vert_joints, vert_weights, \
sk_vert_locs, loop_vidxs, edge_vidxs = \
sk_vert_locs, loop_vidxs, edge_vidxs, attribute_data = \
merge_duplicate_verts(
vert_locs, vert_normals, vert_joints, vert_weights, \
sk_vert_locs, loop_vidxs, edge_vidxs\
sk_vert_locs, loop_vidxs, edge_vidxs, attribute_data\
)
# ---------------
@ -700,7 +700,7 @@ def set_poly_smoothing(gltf, pymesh, mesh, vert_normals, loop_vidxs):
mesh.polygons.foreach_set('use_smooth', poly_smooths)
def merge_duplicate_verts(vert_locs, vert_normals, vert_joints, vert_weights, sk_vert_locs, loop_vidxs, edge_vidxs):
def merge_duplicate_verts(vert_locs, vert_normals, vert_joints, vert_weights, sk_vert_locs, loop_vidxs, edge_vidxs, attribute_data):
# This function attempts to invert the splitting done when exporting to
# glTF. Welds together verts with the same per-vert data (but possibly
# different per-loop data).
@ -755,11 +755,17 @@ def merge_duplicate_verts(vert_locs, vert_normals, vert_joints, vert_weights, sk
dots['sk%dy' % i] = locs[:, 1]
dots['sk%dz' % i] = locs[:, 2]
unique_dots, inv_indices = np.unique(dots, return_inverse=True)
unique_dots, unique_ind, inv_indices = np.unique(dots, return_index=True, return_inverse=True)
loop_vidxs = inv_indices[loop_vidxs]
edge_vidxs = inv_indices[edge_vidxs]
# We don't split vertices only because of custom attribute
# If 2 vertices have same data (pos, normals, etc...) except custom attribute, we
# keep 1 custom attribute, arbitrary
for idx, i in enumerate(attribute_data):
attribute_data[idx] = attribute_data[idx][unique_ind]
vert_locs = np.empty((len(unique_dots), 3), dtype=np.float32)
vert_locs[:, 0] = unique_dots['x']
vert_locs[:, 1] = unique_dots['y']
@ -786,4 +792,4 @@ def merge_duplicate_verts(vert_locs, vert_normals, vert_joints, vert_weights, sk
sk_vert_locs[i][:, 1] = unique_dots['sk%dy' % i]
sk_vert_locs[i][:, 2] = unique_dots['sk%dz' % i]
return vert_locs, vert_normals, vert_joints, vert_weights, sk_vert_locs, loop_vidxs, edge_vidxs
return vert_locs, vert_normals, vert_joints, vert_weights, sk_vert_locs, loop_vidxs, edge_vidxs, attribute_data

View File

@ -1,498 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-or-later
bl_info = {
"name": "Wavefront OBJ format (legacy)",
"author": "Campbell Barton, Bastien Montagne",
"version": (3, 9, 0),
"blender": (3, 0, 0),
"location": "File > Import-Export",
"description": "Import-Export OBJ, Import OBJ mesh, UVs, materials and textures",
"warning": "",
"doc_url": "{BLENDER_MANUAL_URL}/addons/import_export/scene_obj.html",
"support": 'OFFICIAL',
"category": "Import-Export",
}
if "bpy" in locals():
import importlib
if "import_obj" in locals():
importlib.reload(import_obj)
if "export_obj" in locals():
importlib.reload(export_obj)
import bpy
from bpy.props import (
BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
orientation_helper,
path_reference_mode,
axis_conversion,
)
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ImportOBJ(bpy.types.Operator, ImportHelper):
"""Load a Wavefront OBJ File"""
bl_idname = "import_scene.obj"
bl_label = "Import OBJ"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".obj"
filter_glob: StringProperty(
default="*.obj;*.mtl",
options={'HIDDEN'},
)
use_edges: BoolProperty(
name="Lines",
description="Import lines and faces with 2 verts as edge",
default=True,
)
use_smooth_groups: BoolProperty(
name="Smooth Groups",
description="Surround smooth groups by sharp edges",
default=True,
)
use_split_objects: BoolProperty(
name="Object",
description="Import OBJ Objects into Blender Objects",
default=True,
)
use_split_groups: BoolProperty(
name="Group",
description="Import OBJ Groups into Blender Objects",
default=False,
)
use_groups_as_vgroups: BoolProperty(
name="Poly Groups",
description="Import OBJ groups as vertex groups",
default=False,
)
use_image_search: BoolProperty(
name="Image Search",
description="Search subdirs for any associated images "
"(Warning, may be slow)",
default=True,
)
split_mode: EnumProperty(
name="Split",
items=(
('ON', "Split", "Split geometry, omits vertices unused by edges or faces"),
('OFF', "Keep Vert Order", "Keep vertex order from file"),
),
)
global_clamp_size: FloatProperty(
name="Clamp Size",
description="Clamp bounds under this value (zero to disable)",
min=0.0, max=1000.0,
soft_min=0.0, soft_max=1000.0,
default=0.0,
)
def execute(self, context):
# print("Selected: " + context.active_object.name)
from . import import_obj
if self.split_mode == 'OFF':
self.use_split_objects = False
self.use_split_groups = False
else:
self.use_groups_as_vgroups = False
keywords = self.as_keywords(
ignore=(
"axis_forward",
"axis_up",
"filter_glob",
"split_mode",
),
)
global_matrix = axis_conversion(
from_forward=self.axis_forward,
from_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
if bpy.data.is_saved and context.preferences.filepaths.use_relative_paths:
import os
keywords["relpath"] = os.path.dirname(bpy.data.filepath)
return import_obj.load(context, **keywords)
def draw(self, context):
pass
class OBJ_PT_import_include(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Include"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "IMPORT_SCENE_OT_obj"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'use_image_search')
layout.prop(operator, 'use_smooth_groups')
layout.prop(operator, 'use_edges')
class OBJ_PT_import_transform(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Transform"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "IMPORT_SCENE_OT_obj"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, "global_clamp_size")
layout.prop(operator, "axis_forward")
layout.prop(operator, "axis_up")
class OBJ_PT_import_geometry(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "IMPORT_SCENE_OT_obj"
def draw(self, context):
layout = self.layout
sfile = context.space_data
operator = sfile.active_operator
layout.row().prop(operator, "split_mode", expand=True)
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
col = layout.column()
if operator.split_mode == 'ON':
col.prop(operator, "use_split_objects", text="Split by Object")
col.prop(operator, "use_split_groups", text="Split by Group")
else:
col.prop(operator, "use_groups_as_vgroups")
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ExportOBJ(bpy.types.Operator, ExportHelper):
"""Save a Wavefront OBJ File"""
bl_idname = "export_scene.obj"
bl_label = 'Export OBJ'
bl_options = {'PRESET'}
filename_ext = ".obj"
filter_glob: StringProperty(
default="*.obj;*.mtl",
options={'HIDDEN'},
)
# context group
use_selection: BoolProperty(
name="Selection Only",
description="Export selected objects only",
default=False,
)
use_animation: BoolProperty(
name="Animation",
description="Write out an OBJ for each frame",
default=False,
)
# object group
use_mesh_modifiers: BoolProperty(
name="Apply Modifiers",
description="Apply modifiers",
default=True,
)
# extra data group
use_edges: BoolProperty(
name="Include Edges",
description="",
default=True,
)
use_smooth_groups: BoolProperty(
name="Smooth Groups",
description="Write sharp edges as smooth groups",
default=False,
)
use_smooth_groups_bitflags: BoolProperty(
name="Bitflag Smooth Groups",
description="Same as 'Smooth Groups', but generate smooth groups IDs as bitflags "
"(produces at most 32 different smooth groups, usually much less)",
default=False,
)
use_normals: BoolProperty(
name="Write Normals",
description="Export one normal per vertex and per face, to represent flat faces and sharp edges",
default=True,
)
use_uvs: BoolProperty(
name="Include UVs",
description="Write out the active UV coordinates",
default=True,
)
use_materials: BoolProperty(
name="Write Materials",
description="Write out the MTL file",
default=True,
)
use_triangles: BoolProperty(
name="Triangulate Faces",
description="Convert all faces to triangles",
default=False,
)
use_nurbs: BoolProperty(
name="Write Nurbs",
description="Write nurbs curves as OBJ nurbs rather than "
"converting to geometry",
default=False,
)
use_vertex_groups: BoolProperty(
name="Polygroups",
description="",
default=False,
)
# grouping group
use_blen_objects: BoolProperty(
name="OBJ Objects",
description="Export Blender objects as OBJ objects",
default=True,
)
group_by_object: BoolProperty(
name="OBJ Groups",
description="Export Blender objects as OBJ groups",
default=False,
)
group_by_material: BoolProperty(
name="Material Groups",
description="Generate an OBJ group for each part of a geometry using a different material",
default=False,
)
keep_vertex_order: BoolProperty(
name="Keep Vertex Order",
description="",
default=False,
)
global_scale: FloatProperty(
name="Scale",
min=0.01, max=1000.0,
default=1.0,
)
path_mode: path_reference_mode
check_extension = True
def execute(self, context):
from . import export_obj
from mathutils import Matrix
keywords = self.as_keywords(
ignore=(
"axis_forward",
"axis_up",
"global_scale",
"check_existing",
"filter_glob",
),
)
global_matrix = (
Matrix.Scale(self.global_scale, 4) @
axis_conversion(
to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4()
)
keywords["global_matrix"] = global_matrix
return export_obj.save(context, **keywords)
def draw(self, context):
pass
class OBJ_PT_export_include(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Include"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_obj"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
col = layout.column(heading="Limit to")
col.prop(operator, 'use_selection')
col = layout.column(heading="Objects as", align=True)
col.prop(operator, 'use_blen_objects')
col.prop(operator, 'group_by_object')
col.prop(operator, 'group_by_material')
layout.separator()
layout.prop(operator, 'use_animation')
class OBJ_PT_export_transform(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Transform"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_obj"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'global_scale')
layout.prop(operator, 'path_mode')
layout.prop(operator, 'axis_forward')
layout.prop(operator, 'axis_up')
class OBJ_PT_export_geometry(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_obj"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'use_mesh_modifiers')
layout.prop(operator, 'use_smooth_groups')
layout.prop(operator, 'use_smooth_groups_bitflags')
layout.prop(operator, 'use_normals')
layout.prop(operator, 'use_uvs')
layout.prop(operator, 'use_materials')
layout.prop(operator, 'use_triangles')
layout.prop(operator, 'use_nurbs', text="Curves as NURBS")
layout.prop(operator, 'use_vertex_groups')
layout.prop(operator, 'keep_vertex_order')
def menu_func_import(self, context):
self.layout.operator(ImportOBJ.bl_idname, text="Wavefront (.obj) (legacy)")
def menu_func_export(self, context):
self.layout.operator(ExportOBJ.bl_idname, text="Wavefront (.obj) (legacy)")
classes = (
ImportOBJ,
OBJ_PT_import_include,
OBJ_PT_import_transform,
OBJ_PT_import_geometry,
ExportOBJ,
OBJ_PT_export_include,
OBJ_PT_export_transform,
OBJ_PT_export_geometry,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()

View File

@ -1,775 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-or-later
import os
import bpy
from mathutils import Matrix, Vector, Color
from bpy_extras import io_utils, node_shader_utils
from bpy_extras.wm_utils.progress_report import (
ProgressReport,
ProgressReportSubstep,
)
def name_compat(name):
if name is None:
return 'None'
else:
return name.replace(' ', '_')
def mesh_triangulate(me):
import bmesh
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces)
bm.to_mesh(me)
bm.free()
def write_mtl(scene, filepath, path_mode, copy_set, mtl_dict):
source_dir = os.path.dirname(bpy.data.filepath)
dest_dir = os.path.dirname(filepath)
with open(filepath, "w", encoding="utf8", newline="\n") as f:
fw = f.write
fw('# Blender MTL File: %r\n' % (os.path.basename(bpy.data.filepath) or "None"))
fw('# Material Count: %i\n' % len(mtl_dict))
mtl_dict_values = list(mtl_dict.values())
mtl_dict_values.sort(key=lambda m: m[0])
# Write material/image combinations we have used.
# Using mtl_dict.values() directly gives un-predictable order.
for mtl_mat_name, mat in mtl_dict_values:
# Get the Blender data for the material and the image.
# Having an image named None will make a bug, dont do it :)
fw('\nnewmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
mat_wrap = node_shader_utils.PrincipledBSDFWrapper(mat) if mat else None
if mat_wrap:
use_mirror = mat_wrap.metallic != 0.0
use_transparency = mat_wrap.alpha != 1.0
# XXX Totally empirical conversion, trying to adapt it
# (from 1.0 - 0.0 Principled BSDF range to 0.0 - 1000.0 OBJ specular exponent range):
# (1.0 - bsdf_roughness)^2 * 1000
spec = (1.0 - mat_wrap.roughness)
spec *= spec * 1000
fw('Ns %.6f\n' % spec)
# Ambient
if use_mirror:
fw('Ka %.6f %.6f %.6f\n' % (mat_wrap.metallic, mat_wrap.metallic, mat_wrap.metallic))
else:
fw('Ka %.6f %.6f %.6f\n' % (1.0, 1.0, 1.0))
fw('Kd %.6f %.6f %.6f\n' % mat_wrap.base_color[:3]) # Diffuse
# XXX TODO Find a way to handle tint and diffuse color, in a consistent way with import...
fw('Ks %.6f %.6f %.6f\n' % (mat_wrap.specular, mat_wrap.specular, mat_wrap.specular)) # Specular
# Emission, not in original MTL standard but seems pretty common, see T45766.
emission_strength = mat_wrap.emission_strength
emission = [emission_strength * c for c in mat_wrap.emission_color[:3]]
fw('Ke %.6f %.6f %.6f\n' % tuple(emission))
fw('Ni %.6f\n' % mat_wrap.ior) # Refraction index
fw('d %.6f\n' % mat_wrap.alpha) # Alpha (obj uses 'd' for dissolve)
# See http://en.wikipedia.org/wiki/Wavefront_.obj_file for whole list of values...
# Note that mapping is rather fuzzy sometimes, trying to do our best here.
if mat_wrap.specular == 0:
fw('illum 1\n') # no specular.
elif use_mirror:
if use_transparency:
fw('illum 6\n') # Reflection, Transparency, Ray trace
else:
fw('illum 3\n') # Reflection and Ray trace
elif use_transparency:
fw('illum 9\n') # 'Glass' transparency and no Ray trace reflection... fuzzy matching, but...
else:
fw('illum 2\n') # light normally
#### And now, the image textures...
image_map = {
"map_Kd": "base_color_texture",
"map_Ka": None, # ambient...
"map_Ks": "specular_texture",
"map_Ns": "roughness_texture",
"map_d": "alpha_texture",
"map_Tr": None, # transmission roughness?
"map_Bump": "normalmap_texture",
"disp": None, # displacement...
"refl": "metallic_texture",
"map_Ke": "emission_color_texture" if emission_strength != 0.0 else None,
}
for key, mat_wrap_key in sorted(image_map.items()):
if mat_wrap_key is None:
continue
tex_wrap = getattr(mat_wrap, mat_wrap_key, None)
if tex_wrap is None:
continue
image = tex_wrap.image
if image is None:
continue
filepath = io_utils.path_reference(image.filepath, source_dir, dest_dir,
path_mode, "", copy_set, image.library)
options = []
if key == "map_Bump":
if mat_wrap.normalmap_strength != 1.0:
options.append('-bm %.6f' % mat_wrap.normalmap_strength)
if tex_wrap.translation != Vector((0.0, 0.0, 0.0)):
options.append('-o %.6f %.6f %.6f' % tex_wrap.translation[:])
if tex_wrap.scale != Vector((1.0, 1.0, 1.0)):
options.append('-s %.6f %.6f %.6f' % tex_wrap.scale[:])
if options:
fw('%s %s %s\n' % (key, " ".join(options), repr(filepath)[1:-1]))
else:
fw('%s %s\n' % (key, repr(filepath)[1:-1]))
else:
# Write a dummy material here?
fw('Ns 500\n')
fw('Ka 0.8 0.8 0.8\n')
fw('Kd 0.8 0.8 0.8\n')
fw('Ks 0.8 0.8 0.8\n')
fw('d 1\n') # No alpha
fw('illum 2\n') # light normally
def test_nurbs_compat(ob):
if ob.type != 'CURVE':
return False
for nu in ob.data.splines:
if nu.point_count_v == 1 and nu.type != 'BEZIER': # not a surface and not bezier
return True
return False
def write_nurb(fw, ob, ob_mat):
tot_verts = 0
cu = ob.data
# use negative indices
for nu in cu.splines:
if nu.type == 'POLY':
DEG_ORDER_U = 1
else:
DEG_ORDER_U = nu.order_u - 1 # odd but tested to be correct
if nu.type == 'BEZIER':
print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported")
continue
if nu.point_count_v > 1:
print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported")
continue
if len(nu.points) <= DEG_ORDER_U:
print("\tWarning, order_u is lower then vert count, skipping:", ob.name)
continue
pt_num = 0
do_closed = nu.use_cyclic_u
do_endpoints = (do_closed == 0) and nu.use_endpoint_u
for pt in nu.points:
fw('v %.6f %.6f %.6f\n' % (ob_mat @ pt.co.to_3d())[:])
pt_num += 1
tot_verts += pt_num
fw('g %s\n' % (name_compat(ob.name))) # name_compat(ob.getData(1)) could use the data name too
fw('cstype bspline\n') # not ideal, hard coded
fw('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
curve_ls = [-(i + 1) for i in range(pt_num)]
# 'curv' keyword
if do_closed:
if DEG_ORDER_U == 1:
pt_num += 1
curve_ls.append(-1)
else:
pt_num += DEG_ORDER_U
curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
fw('curv 0.0 1.0 %s\n' % (" ".join([str(i) for i in curve_ls]))) # Blender has no U and V values for the curve
# 'parm' keyword
tot_parm = (DEG_ORDER_U + 1) + pt_num
tot_parm_div = float(tot_parm - 1)
parm_ls = [(i / tot_parm_div) for i in range(tot_parm)]
if do_endpoints: # end points, force param
for i in range(DEG_ORDER_U + 1):
parm_ls[i] = 0.0
parm_ls[-(1 + i)] = 1.0
fw("parm u %s\n" % " ".join(["%.6f" % i for i in parm_ls]))
fw('end\n')
return tot_verts
def write_file(filepath, objects, depsgraph, scene,
EXPORT_TRI=False,
EXPORT_EDGES=False,
EXPORT_SMOOTH_GROUPS=False,
EXPORT_SMOOTH_GROUPS_BITFLAGS=False,
EXPORT_NORMALS=False,
EXPORT_UV=True,
EXPORT_MTL=True,
EXPORT_APPLY_MODIFIERS=True,
EXPORT_APPLY_MODIFIERS_RENDER=False,
EXPORT_BLEN_OBS=True,
EXPORT_GROUP_BY_OB=False,
EXPORT_GROUP_BY_MAT=False,
EXPORT_KEEP_VERT_ORDER=False,
EXPORT_POLYGROUPS=False,
EXPORT_CURVE_AS_NURBS=True,
EXPORT_GLOBAL_MATRIX=None,
EXPORT_PATH_MODE='AUTO',
progress=ProgressReport(),
):
"""
Basic write function. The context and options must be already set
This can be accessed externally
eg.
write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
"""
if EXPORT_GLOBAL_MATRIX is None:
EXPORT_GLOBAL_MATRIX = Matrix()
def veckey3d(v):
return round(v.x, 4), round(v.y, 4), round(v.z, 4)
def veckey2d(v):
return round(v[0], 4), round(v[1], 4)
def findVertexGroupName(face, vWeightMap):
"""
Searches the vertexDict to see what groups is assigned to a given face.
We use a frequency system in order to sort out the name because a given vertex can
belong to two or more groups at the same time. To find the right name for the face
we list all the possible vertex group names with their frequency and then sort by
frequency in descend order. The top element is the one shared by the highest number
of vertices is the face's group
"""
weightDict = {}
for vert_index in face.vertices:
vWeights = vWeightMap[vert_index]
for vGroupName, weight in vWeights:
weightDict[vGroupName] = weightDict.get(vGroupName, 0.0) + weight
if weightDict:
return max((weight, vGroupName) for vGroupName, weight in weightDict.items())[1]
else:
return '(null)'
with ProgressReportSubstep(progress, 2, "OBJ Export path: %r" % filepath, "OBJ Export Finished") as subprogress1:
with open(filepath, "w", encoding="utf8", newline="\n") as f:
fw = f.write
# Write Header
fw('# Blender v%s OBJ File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
fw('# www.blender.org\n')
# Tell the obj file what material file to use.
if EXPORT_MTL:
mtlfilepath = os.path.splitext(filepath)[0] + ".mtl"
# filepath can contain non utf8 chars, use repr
fw('mtllib %s\n' % repr(os.path.basename(mtlfilepath))[1:-1])
# Initialize totals, these are updated each object
totverts = totuvco = totno = 1
face_vert_index = 1
# A Dict of Materials
# (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
mtl_dict = {}
# Used to reduce the usage of matname_texname materials, which can become annoying in case of
# repeated exports/imports, yet keeping unique mat names per keys!
# mtl_name: (material.name, image.name)
mtl_rev_dict = {}
copy_set = set()
# Get all meshes
subprogress1.enter_substeps(len(objects))
for i, ob_main in enumerate(objects):
# ignore dupli children
if ob_main.parent and ob_main.parent.instance_type in {'VERTS', 'FACES'}:
subprogress1.step("Ignoring %s, dupli child..." % ob_main.name)
continue
obs = [(ob_main, ob_main.matrix_world)]
if ob_main.is_instancer:
obs += [(dup.instance_object.original, dup.matrix_world.copy())
for dup in depsgraph.object_instances
if dup.parent and dup.parent.original == ob_main]
# ~ print(ob_main.name, 'has', len(obs) - 1, 'dupli children')
subprogress1.enter_substeps(len(obs))
for ob, ob_mat in obs:
with ProgressReportSubstep(subprogress1, 6) as subprogress2:
uv_unique_count = no_unique_count = 0
# Nurbs curve support
if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
ob_mat = EXPORT_GLOBAL_MATRIX @ ob_mat
totverts += write_nurb(fw, ob, ob_mat)
continue
# END NURBS
ob_for_convert = ob.evaluated_get(depsgraph) if EXPORT_APPLY_MODIFIERS else ob.original
try:
me = ob_for_convert.to_mesh()
except RuntimeError:
me = None
if me is None:
continue
# _must_ do this before applying transformation, else tessellation may differ
if EXPORT_TRI:
# _must_ do this first since it re-allocs arrays
mesh_triangulate(me)
me.transform(EXPORT_GLOBAL_MATRIX @ ob_mat)
# If negative scaling, we have to invert the normals...
if ob_mat.determinant() < 0.0:
me.flip_normals()
if EXPORT_UV:
faceuv = len(me.uv_layers) > 0
if faceuv:
uv_layer = me.uv_layers.active.data[:]
else:
faceuv = False
me_verts = me.vertices[:]
# Make our own list so it can be sorted to reduce context switching
face_index_pairs = [(face, index) for index, face in enumerate(me.polygons)]
if EXPORT_EDGES:
edges = me.edges
else:
edges = []
if not (len(face_index_pairs) + len(edges) + len(me.vertices)): # Make sure there is something to write
# clean up
ob_for_convert.to_mesh_clear()
continue # dont bother with this mesh.
if EXPORT_NORMALS and face_index_pairs:
me.calc_normals_split()
# No need to call me.free_normals_split later, as this mesh is deleted anyway!
loops = me.loops
if (EXPORT_SMOOTH_GROUPS or EXPORT_SMOOTH_GROUPS_BITFLAGS) and face_index_pairs:
smooth_groups, smooth_groups_tot = me.calc_smooth_groups(use_bitflags=EXPORT_SMOOTH_GROUPS_BITFLAGS)
if smooth_groups_tot <= 1:
smooth_groups, smooth_groups_tot = (), 0
else:
smooth_groups, smooth_groups_tot = (), 0
materials = me.materials[:]
material_names = [m.name if m else None for m in materials]
# avoid bad index errors
if not materials:
materials = [None]
material_names = [name_compat(None)]
# Sort by Material, then images
# so we dont over context switch in the obj file.
if EXPORT_KEEP_VERT_ORDER:
pass
else:
if len(materials) > 1:
if smooth_groups:
sort_func = lambda a: (a[0].material_index,
smooth_groups[a[1]] if a[0].use_smooth else False)
else:
sort_func = lambda a: (a[0].material_index,
a[0].use_smooth)
else:
# no materials
if smooth_groups:
sort_func = lambda a: smooth_groups[a[1] if a[0].use_smooth else False]
else:
sort_func = lambda a: a[0].use_smooth
face_index_pairs.sort(key=sort_func)
del sort_func
# Set the default mat to no material and no image.
contextMat = 0, 0 # Can never be this, so we will label a new material the first chance we get.
contextSmooth = None # Will either be true or false, set bad to force initialization switch.
if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
name1 = ob.name
name2 = ob.data.name
if name1 == name2:
obnamestring = name_compat(name1)
else:
obnamestring = '%s_%s' % (name_compat(name1), name_compat(name2))
if EXPORT_BLEN_OBS:
fw('o %s\n' % obnamestring) # Write Object name
else: # if EXPORT_GROUP_BY_OB:
fw('g %s\n' % obnamestring)
subprogress2.step()
# Vert
for v in me_verts:
fw('v %.6f %.6f %.6f\n' % v.co[:])
subprogress2.step()
# UV
if faceuv:
# in case removing some of these dont get defined.
uv = f_index = uv_index = uv_key = uv_val = uv_ls = None
uv_face_mapping = [None] * len(face_index_pairs)
uv_dict = {}
uv_get = uv_dict.get
for f, f_index in face_index_pairs:
uv_ls = uv_face_mapping[f_index] = []
for uv_index, l_index in enumerate(f.loop_indices):
uv = uv_layer[l_index].uv
# include the vertex index in the key so we don't share UVs between vertices,
# allowed by the OBJ spec but can cause issues for other importers, see: T47010.
# this works too, shared UVs for all verts
#~ uv_key = veckey2d(uv)
uv_key = loops[l_index].vertex_index, veckey2d(uv)
uv_val = uv_get(uv_key)
if uv_val is None:
uv_val = uv_dict[uv_key] = uv_unique_count
fw('vt %.6f %.6f\n' % uv[:])
uv_unique_count += 1
uv_ls.append(uv_val)
del uv_dict, uv, f_index, uv_index, uv_ls, uv_get, uv_key, uv_val
# Only need uv_unique_count and uv_face_mapping
subprogress2.step()
# NORMAL, Smooth/Non smoothed.
if EXPORT_NORMALS:
no_key = no_val = None
normals_to_idx = {}
no_get = normals_to_idx.get
loops_to_normals = [0] * len(loops)
for f, f_index in face_index_pairs:
for l_idx in f.loop_indices:
no_key = veckey3d(loops[l_idx].normal)
no_val = no_get(no_key)
if no_val is None:
no_val = normals_to_idx[no_key] = no_unique_count
fw('vn %.4f %.4f %.4f\n' % no_key)
no_unique_count += 1
loops_to_normals[l_idx] = no_val
del normals_to_idx, no_get, no_key, no_val
else:
loops_to_normals = []
subprogress2.step()
# XXX
if EXPORT_POLYGROUPS:
# Retrieve the list of vertex groups
vertGroupNames = ob.vertex_groups.keys()
if vertGroupNames:
currentVGroup = ''
# Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
vgroupsMap = [[] for _i in range(len(me_verts))]
for v_idx, v_ls in enumerate(vgroupsMap):
v_ls[:] = [(vertGroupNames[g.group], g.weight) for g in me_verts[v_idx].groups]
for f, f_index in face_index_pairs:
f_smooth = f.use_smooth
if f_smooth and smooth_groups:
f_smooth = smooth_groups[f_index]
f_mat = min(f.material_index, len(materials) - 1)
# MAKE KEY
key = material_names[f_mat], None # No image, use None instead.
# Write the vertex group
if EXPORT_POLYGROUPS:
if vertGroupNames:
# find what vertext group the face belongs to
vgroup_of_face = findVertexGroupName(f, vgroupsMap)
if vgroup_of_face != currentVGroup:
currentVGroup = vgroup_of_face
fw('g %s\n' % vgroup_of_face)
# CHECK FOR CONTEXT SWITCH
if key == contextMat:
pass # Context already switched, dont do anything
else:
if key[0] is None and key[1] is None:
# Write a null material, since we know the context has changed.
if EXPORT_GROUP_BY_MAT:
# can be mat_image or (null)
fw("g %s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name)))
if EXPORT_MTL:
fw("usemtl (null)\n") # mat, image
else:
mat_data = mtl_dict.get(key)
if not mat_data:
# First add to global dict so we can export to mtl
# Then write mtl
# Make a new names from the mat and image name,
# converting any spaces to underscores with name_compat.
# If none image dont bother adding it to the name
# Try to avoid as much as possible adding texname (or other things)
# to the mtl name (see [#32102])...
mtl_name = "%s" % name_compat(key[0])
if mtl_rev_dict.get(mtl_name, None) not in {key, None}:
if key[1] is None:
tmp_ext = "_NONE"
else:
tmp_ext = "_%s" % name_compat(key[1])
i = 0
while mtl_rev_dict.get(mtl_name + tmp_ext, None) not in {key, None}:
i += 1
tmp_ext = "_%3d" % i
mtl_name += tmp_ext
mat_data = mtl_dict[key] = mtl_name, materials[f_mat]
mtl_rev_dict[mtl_name] = key
if EXPORT_GROUP_BY_MAT:
# can be mat_image or (null)
fw("g %s_%s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name), mat_data[0]))
if EXPORT_MTL:
fw("usemtl %s\n" % mat_data[0]) # can be mat_image or (null)
contextMat = key
if f_smooth != contextSmooth:
if f_smooth: # on now off
if smooth_groups:
f_smooth = smooth_groups[f_index]
fw('s %d\n' % f_smooth)
else:
fw('s 1\n')
else: # was off now on
fw('s off\n')
contextSmooth = f_smooth
f_v = [(vi, me_verts[v_idx], l_idx)
for vi, (v_idx, l_idx) in enumerate(zip(f.vertices, f.loop_indices))]
fw('f')
if faceuv:
if EXPORT_NORMALS:
for vi, v, li in f_v:
fw(" %d/%d/%d" % (totverts + v.index,
totuvco + uv_face_mapping[f_index][vi],
totno + loops_to_normals[li],
)) # vert, uv, normal
else: # No Normals
for vi, v, li in f_v:
fw(" %d/%d" % (totverts + v.index,
totuvco + uv_face_mapping[f_index][vi],
)) # vert, uv
face_vert_index += len(f_v)
else: # No UVs
if EXPORT_NORMALS:
for vi, v, li in f_v:
fw(" %d//%d" % (totverts + v.index, totno + loops_to_normals[li]))
else: # No Normals
for vi, v, li in f_v:
fw(" %d" % (totverts + v.index))
fw('\n')
subprogress2.step()
# Write edges.
if EXPORT_EDGES:
for ed in edges:
if ed.is_loose:
fw('l %d %d\n' % (totverts + ed.vertices[0], totverts + ed.vertices[1]))
# Make the indices global rather then per mesh
totverts += len(me_verts)
totuvco += uv_unique_count
totno += no_unique_count
# clean up
ob_for_convert.to_mesh_clear()
subprogress1.leave_substeps("Finished writing geometry of '%s'." % ob_main.name)
subprogress1.leave_substeps()
subprogress1.step("Finished exporting geometry, now exporting materials")
# Now we have all our materials, save them
if EXPORT_MTL:
write_mtl(scene, mtlfilepath, EXPORT_PATH_MODE, copy_set, mtl_dict)
# copy all collected files.
io_utils.path_reference_copy(copy_set)
def _write(context, filepath,
EXPORT_TRI, # ok
EXPORT_EDGES,
EXPORT_SMOOTH_GROUPS,
EXPORT_SMOOTH_GROUPS_BITFLAGS,
EXPORT_NORMALS, # ok
EXPORT_UV, # ok
EXPORT_MTL,
EXPORT_APPLY_MODIFIERS, # ok
EXPORT_APPLY_MODIFIERS_RENDER, # ok
EXPORT_BLEN_OBS,
EXPORT_GROUP_BY_OB,
EXPORT_GROUP_BY_MAT,
EXPORT_KEEP_VERT_ORDER,
EXPORT_POLYGROUPS,
EXPORT_CURVE_AS_NURBS,
EXPORT_SEL_ONLY, # ok
EXPORT_ANIMATION,
EXPORT_GLOBAL_MATRIX,
EXPORT_PATH_MODE, # Not used
):
with ProgressReport(context.window_manager) as progress:
base_name, ext = os.path.splitext(filepath)
context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension
depsgraph = context.evaluated_depsgraph_get()
scene = context.scene
# Exit edit mode before exporting, so current object states are exported properly.
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
orig_frame = scene.frame_current
# Export an animation?
if EXPORT_ANIMATION:
scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame.
else:
scene_frames = [orig_frame] # Dont export an animation.
# Loop through all frames in the scene and export.
progress.enter_substeps(len(scene_frames))
for frame in scene_frames:
if EXPORT_ANIMATION: # Add frame to the filepath.
context_name[2] = '_%.6d' % frame
scene.frame_set(frame, subframe=0.0)
if EXPORT_SEL_ONLY:
objects = context.selected_objects
else:
objects = scene.objects
full_path = ''.join(context_name)
# erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
# EXPORT THE FILE.
progress.enter_substeps(1)
write_file(full_path, objects, depsgraph, scene,
EXPORT_TRI,
EXPORT_EDGES,
EXPORT_SMOOTH_GROUPS,
EXPORT_SMOOTH_GROUPS_BITFLAGS,
EXPORT_NORMALS,
EXPORT_UV,
EXPORT_MTL,
EXPORT_APPLY_MODIFIERS,
EXPORT_APPLY_MODIFIERS_RENDER,
EXPORT_BLEN_OBS,
EXPORT_GROUP_BY_OB,
EXPORT_GROUP_BY_MAT,
EXPORT_KEEP_VERT_ORDER,
EXPORT_POLYGROUPS,
EXPORT_CURVE_AS_NURBS,
EXPORT_GLOBAL_MATRIX,
EXPORT_PATH_MODE,
progress,
)
progress.leave_substeps()
scene.frame_set(orig_frame, subframe=0.0)
progress.leave_substeps()
"""
Currently the exporter lacks these features:
* multiple scene export (only active scene is written)
* particles
"""
def save(context,
filepath,
*,
use_triangles=False,
use_edges=True,
use_normals=False,
use_smooth_groups=False,
use_smooth_groups_bitflags=False,
use_uvs=True,
use_materials=True,
use_mesh_modifiers=True,
use_mesh_modifiers_render=False,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False,
use_vertex_groups=False,
use_nurbs=True,
use_selection=True,
use_animation=False,
global_matrix=None,
path_mode='AUTO'
):
_write(context, filepath,
EXPORT_TRI=use_triangles,
EXPORT_EDGES=use_edges,
EXPORT_SMOOTH_GROUPS=use_smooth_groups,
EXPORT_SMOOTH_GROUPS_BITFLAGS=use_smooth_groups_bitflags,
EXPORT_NORMALS=use_normals,
EXPORT_UV=use_uvs,
EXPORT_MTL=use_materials,
EXPORT_APPLY_MODIFIERS=use_mesh_modifiers,
EXPORT_APPLY_MODIFIERS_RENDER=use_mesh_modifiers_render,
EXPORT_BLEN_OBS=use_blen_objects,
EXPORT_GROUP_BY_OB=group_by_object,
EXPORT_GROUP_BY_MAT=group_by_material,
EXPORT_KEEP_VERT_ORDER=keep_vertex_order,
EXPORT_POLYGROUPS=use_vertex_groups,
EXPORT_CURVE_AS_NURBS=use_nurbs,
EXPORT_SEL_ONLY=use_selection,
EXPORT_ANIMATION=use_animation,
EXPORT_GLOBAL_MATRIX=global_matrix,
EXPORT_PATH_MODE=path_mode,
)
return {'FINISHED'}

File diff suppressed because it is too large Load Diff

View File

@ -332,11 +332,12 @@ class MUV_OT_SelectUV_ZoomSelectedUV(bpy.types.Operator):
bmesh.update_edit_mesh(obj.data)
# Zoom.
override_context = self._get_override_context(context)
if override_context is None:
context_override = self._get_override_context(context)
if context_override is None:
self.report({'WARNING'}, "More than one 'VIEW_3D' area must exist")
return {'CANCELLED'}
bpy.ops.view3d.view_selected(override_context, use_all_regions=False)
with context.temp_override(**context_override):
bpy.ops.view3d.view_selected(use_all_regions=False)
# Revert selection of vertices.
for v in sel_verts:

View File

@ -428,8 +428,8 @@ class MUV_OT_UVInspection_PaintUVIsland(bpy.types.Operator):
objs = common.get_uv_editable_objects(context)
mode_orig = context.object.mode
override_context = self._get_override_context(context)
if override_context is None:
context_override = self._get_override_context(context)
if context_override is None:
self.report({'WARNING'}, "More than one 'VIEW_3D' area must exist")
return {'CANCELLED'}
@ -544,8 +544,8 @@ class MUV_OT_UVInspection_PaintUVIsland(bpy.types.Operator):
# Paint.
bpy.ops.object.mode_set(mode='TEXTURE_PAINT')
if compat.check_version(2, 80, 0) >= 0:
bpy.ops.paint.brush_select(override_context,
image_tool='FILL')
with context.temp_override(**context_override):
bpy.ops.paint.brush_select(image_tool='FILL')
else:
paint_settings = \
bpy.data.scenes['Scene'].tool_settings.image_paint
@ -553,9 +553,11 @@ class MUV_OT_UVInspection_PaintUVIsland(bpy.types.Operator):
paint_canvas_orig = paint_settings.canvas
paint_settings.mode = 'IMAGE'
paint_settings.canvas = target_image
bpy.ops.paint.brush_select(override_context,
texture_paint_tool='FILL')
bpy.ops.paint.image_paint(override_context, stroke=[{
with context.temp_override(**context_override):
bpy.ops.paint.brush_select(texture_paint_tool='FILL')
with context.temp_override(**context_override):
bpy.ops.paint.image_paint(stroke=[{
"name": "",
"location": (0, 0, 0),
"mouse": (0, 0),

View File

@ -21,8 +21,8 @@ import gpu
from gpu_extras.batch import batch_for_shader
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') if not bpy.app.background else None
shader_line = gpu.shader.from_builtin('3D_POLYLINE_UNIFORM_COLOR') if not bpy.app.background else None
shader = gpu.shader.from_builtin('UNIFORM_COLOR') if not bpy.app.background else None
shader_line = gpu.shader.from_builtin('POLYLINE_UNIFORM_COLOR') if not bpy.app.background else None
imm_line_width = 1.0
imm_viewport = (0, 0)
@ -814,8 +814,8 @@ def draw_text(myobj, pos2d, display_text, rgba, fsize, align='L', text_rot=0.0):
x_pos, y_pos = pos2d
font_id = 0
ui_scale = bpy.context.preferences.system.ui_scale
blf.size(font_id, round(fsize * ui_scale), 72)
# blf.size(font_id, fsize, dpi)
blf.size(font_id, round(fsize * ui_scale))
# blf.size(font_id, fsize)
# height of one line
mwidth, mheight = blf.dimensions(font_id, "Tp") # uses high/low letters

View File

@ -1389,7 +1389,8 @@ class MESH_OT_SURFSK_add_surface(Operator):
self.is_crosshatch = False
# Delete all duplicates
bpy.ops.object.delete({"selected_objects": objects_to_delete})
with bpy.context.temp_override(selected_objects=objects_to_delete):
bpy.ops.object.delete()
# If the main object has modifiers, turn their "viewport view status" to
# what it was before the forced deactivation above
@ -1610,7 +1611,8 @@ class MESH_OT_SURFSK_add_surface(Operator):
ob_surface.scale = (1.0, 1.0, 1.0)
# Delete final points temporal object
bpy.ops.object.delete({"selected_objects": [final_points_ob]})
with bpy.context.temp_override(selected_objects=[final_points_ob]):
bpy.ops.object.delete()
# Delete isolated verts if there are any
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
@ -1749,7 +1751,8 @@ class MESH_OT_SURFSK_add_surface(Operator):
self.main_object.data.vertices[main_object_related_vert_idx].select = True
# Delete duplicated object
bpy.ops.object.delete({"selected_objects": [final_ob_duplicate]})
with bpy.context.temp_override(selected_objects=[final_ob_duplicate]):
bpy.ops.object.delete()
# Join crosshatched surface and main object
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
@ -2539,7 +2542,8 @@ class MESH_OT_SURFSK_add_surface(Operator):
ob_simplified_curve[i].data.splines[0].bezier_points[t].co
# Delete the temporal curve
bpy.ops.object.delete({"selected_objects": [ob_simplified_curve[i]]})
with bpy.context.temp_override(selected_objects=[ob_simplified_curve[i]]):
bpy.ops.object.delete()
# Get the coords of the points distributed along the sketched strokes,
# with proportions-U of the first selection
@ -3019,9 +3023,11 @@ class MESH_OT_SURFSK_add_surface(Operator):
surface_splines_parsed[len(surface_splines_parsed) - 1][i] = verts_middle_position_co
# Delete object with control points and object from grease pencil conversion
bpy.ops.object.delete({"selected_objects": [ob_ctrl_pts]})
with bpy.context.temp_override(selected_objects=[ob_ctrl_pts]):
bpy.ops.object.delete()
bpy.ops.object.delete({"selected_objects": splines_U_objects})
with bpy.context.temp_override(selected_objects=splines_U_objects):
bpy.ops.object.delete()
# Generate surface
@ -3176,7 +3182,8 @@ class MESH_OT_SURFSK_add_surface(Operator):
mat.roughness = 0.0
self.main_splines.data.materials.append(mat)
else:
bpy.ops.object.delete({"selected_objects": [self.main_splines]})
with bpy.context.temp_override(selected_objects=[self.main_splines]):
bpy.ops.object.delete()
# Delete grease pencil strokes
if self.strokes_type == "GP_STROKES" and not self.stopping_errors:
@ -3275,7 +3282,8 @@ class MESH_OT_SURFSK_add_surface(Operator):
# executions of this operator, with the reserved names used here
for o in bpy.data.objects:
if o.name.find("SURFSKIO_") != -1:
bpy.ops.object.delete({"selected_objects": [o]})
with bpy.context.temp_override(selected_objects=[o]):
bpy.ops.object.delete()
bpy.context.view_layer.objects.active = self.original_curve
@ -3413,7 +3421,8 @@ class MESH_OT_SURFSK_add_surface(Operator):
self.average_gp_segment_length = segments_lengths_sum / segments_count
# Delete temporary strokes curve object
bpy.ops.object.delete({"selected_objects": [self.temporary_curve]})
with bpy.context.temp_override(selected_objects=[self.temporary_curve]):
bpy.ops.object.delete()
# Set again since "execute()" will turn it again to its initial value
self.execute(context)
@ -3434,7 +3443,8 @@ class MESH_OT_SURFSK_add_surface(Operator):
pass
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.delete({"selected_objects": [self.original_curve]})
with bpy.context.temp_override(selected_objects=[self.original_curve]):
bpy.ops.object.delete()
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
return {"FINISHED"}
@ -4062,7 +4072,8 @@ class CURVE_OT_SURFSK_reorder_splines(Operator):
bpy.context.object.name = curve_original_name
# Delete all unused objects
bpy.ops.object.delete({"selected_objects": objects_to_delete})
with bpy.context.temp_override(selected_objects=objects_to_delete):
bpy.ops.object.delete()
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[curve_original_name].select_set(True)

View File

@ -45,7 +45,7 @@ HEIGHT_VALUE = 1
NUM_VALUES = 2
# TODO: make a dooted-line shader
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR') if not bpy.app.background else None
shader = gpu.shader.from_builtin('UNIFORM_COLOR') if not bpy.app.background else None
class MESH_OT_InsetStraightSkeleton(bpy.types.Operator):
bl_idname = "mesh.insetstraightskeleton"

View File

@ -67,9 +67,9 @@ class SnapDrawn():
clip_planes = self.rv3d.clip_planes if self.rv3d.use_clip_planes else None
config = 'CLIPPED' if clip_planes else 'DEFAULT'
self._program_unif_col = gpu.shader.from_builtin(
"3D_UNIFORM_COLOR", config=config)
"UNIFORM_COLOR", config=config)
self._program_smooth_col = gpu.shader.from_builtin(
"3D_SMOOTH_COLOR", config=config)
"SMOOTH_COLOR", config=config)
gpu.state.program_point_size_set(False)
gpu.state.blend_set('ALPHA')

View File

@ -325,7 +325,6 @@ class lattice_along_surface(Operator):
grid_mesh = temp_grid_obj.data
for v in grid_mesh.vertices:
v.co = grid_obj.matrix_world @ v.co
grid_mesh.calc_normals()
if len(grid_mesh.polygons) > 64 * 64:
bpy.data.objects.remove(temp_grid_obj)

View File

@ -261,7 +261,6 @@ def simple_to_mesh(ob, depsgraph=None):
dg = depsgraph
ob_eval = ob.evaluated_get(dg)
me = bpy.data.meshes.new_from_object(ob_eval, preserve_all_data_layers=True, depsgraph=dg)
me.calc_normals()
return me
def _join_objects(context, objects, link_to_scene=True, make_active=True):
@ -344,8 +343,9 @@ def array_mesh(ob, n):
arr = ob.modifiers.new('Repeat','ARRAY')
arr.relative_offset_displace[0] = 0
arr.count = n
#bpy.ops.object.modifier_apply({'active_object':ob},modifier='Repeat')
#me = ob.data
# with bpy.context.temp_override(active_object=ob):
# bpy.ops.object.modifier_apply(modifier='Repeat')
# me = ob.data
ob.modifiers.update()
dg = bpy.context.evaluated_depsgraph_get()

View File

@ -1349,7 +1349,6 @@ class NWMergeNodes(Operator, NWBase):
if tree_type == 'COMPOSITING':
first = 1
second = 2
add.width_hidden = 100.0
elif nodes_list == selected_math:
add_type = node_type + 'Math'
add = nodes.new(add_type)
@ -1359,7 +1358,6 @@ class NWMergeNodes(Operator, NWBase):
loc_y = loc_y - 50
first = 0
second = 1
add.width_hidden = 100.0
elif nodes_list == selected_shader:
if mode == 'MIX':
add_type = node_type + 'MixShader'
@ -1369,7 +1367,6 @@ class NWMergeNodes(Operator, NWBase):
loc_y = loc_y - 50
first = 1
second = 2
add.width_hidden = 100.0
elif mode == 'ADD':
add_type = node_type + 'AddShader'
add = nodes.new(add_type)
@ -1378,7 +1375,6 @@ class NWMergeNodes(Operator, NWBase):
loc_y = loc_y - 50
first = 0
second = 1
add.width_hidden = 100.0
elif nodes_list == selected_geometry:
if mode in ('JOIN', 'MIX'):
add_type = node_type + 'JoinGeometry'
@ -1401,7 +1397,6 @@ class NWMergeNodes(Operator, NWBase):
loc_y = loc_y - 50
first = 0
second = 1
add.width_hidden = 100.0
elif nodes_list == selected_z:
add = nodes.new('CompositorNodeZcombine')
add.show_preview = False
@ -1410,7 +1405,6 @@ class NWMergeNodes(Operator, NWBase):
loc_y = loc_y - 50
first = 0
second = 2
add.width_hidden = 100.0
elif nodes_list == selected_alphaover:
add = nodes.new('CompositorNodeAlphaOver')
add.show_preview = False
@ -1419,7 +1413,6 @@ class NWMergeNodes(Operator, NWBase):
loc_y = loc_y - 50
first = 1
second = 2
add.width_hidden = 100.0
add.location = loc_x, loc_y
loc_y += offset_y
add.select = True
@ -2184,7 +2177,6 @@ class NWAddReroutes(Operator, NWBase):
# unhide 'REROUTE' nodes to avoid issues with location.y
if node.type == 'REROUTE':
node.hide = False
# When node is hidden - width_hidden not usable.
# Hack needed to calculate real width
if node.hide:
bpy.ops.node.select_all(action='DESELECT')
@ -2726,7 +2718,6 @@ class NWAddMultipleImages(Operator, NWBase, ImportHelper):
new_nodes.append(node)
node.label = fname
node.hide = True
node.width_hidden = 100
node.location.x = xloc
node.location.y = yloc
yloc -= 40

View File

@ -554,8 +554,9 @@ class Auto_Boolean:
md.operation = mode
md.object = ob
override = {"object": obj}
bpy.ops.object.modifier_apply(override, modifier=md.name)
context_override = {'object': obj}
with bpy.context.temp_override(**context_override):
bpy.ops.object.modifier_apply(modifier=md.name)
if ob_delete:
bpy.data.objects.remove(ob)

View File

@ -135,7 +135,7 @@ def draw_callback_px(self, context):
# Get the size of the text
text_size = 18 if region.width >= 850 else 12
ui_scale = bpy.context.preferences.system.ui_scale
blf.size(0, round(text_size * ui_scale), 72)
blf.size(0, round(text_size * ui_scale))
# Help Display
if (self.ObjectMode is False) and (self.ProfileMode is False):
@ -233,7 +233,7 @@ def draw_callback_px(self, context):
# Display boolean mode
text_size = 40 if region.width >= 850 else 20
blf.size(0, round(text_size * ui_scale), 72)
blf.size(0, round(text_size * ui_scale))
draw_string(self, color2, color2, region_width - (blf.dimensions(0, BooleanMode)[0]) / 2, \
y_txt + bloc_height + 16, BooleanMode, 0, divide = 2)
@ -242,7 +242,7 @@ def draw_callback_px(self, context):
if self.AskHelp is False:
# "H for Help" text
blf.size(0, round(13 * ui_scale), 72)
blf.size(0, round(13 * ui_scale))
help_txt = "[" + self.carver_prefs.Key_Help + "] for help"
txt_width = blf.dimensions(0, help_txt)[0]
txt_height = (blf.dimensions(0, "gM")[1] * 1.45)
@ -327,7 +327,7 @@ def draw_callback_px(self, context):
["Gap for rows or columns", self.carver_prefs.Key_Gapy + " " + self.carver_prefs.Key_Gapx]
]
blf.size(0, round(15 * ui_scale), 72)
blf.size(0, round(15 * ui_scale))
help_txt, bloc_height, max_option, max_key, comma = get_text_info(self, context, help_txt)
draw_string(self, color1, color2, xHelp, yHelp, help_txt, max_option)

View File

@ -308,11 +308,16 @@ def CreateBevel(context, CurrentObject):
bpy.ops.object.mode_set(mode='OBJECT')
CurrentObject.data.use_customdata_edge_bevel = True
bevel_weights = CurrentObject.data.attributes["bevel_weight_edge"]
if not bevel_weights:
bevel_weights = CurrentObject.data.attributes.new("bevel_weight_edge", 'FLOAT', 'EDGE')
if bevel_weights.data_type != 'FLOAT' or bevel_weights.domain != 'EDGE':
bevel_weights = None
for i in range(len(CurrentObject.data.edges)):
if CurrentObject.data.edges[i].select is True:
CurrentObject.data.edges[i].bevel_weight = 1.0
if bevel_weights:
bevel_weights.data[i] = 1.0
CurrentObject.data.edges[i].use_edge_sharp = True
bevel_modifier = False

View File

@ -343,6 +343,7 @@ class CMPreferences(AddonPreferences):
subtype='COLOR_GAMMA',
min=0.0,
max=1.0,
size=4,
get=get_tool_outline,
set=set_tool_outline,
)
@ -377,6 +378,7 @@ class CMPreferences(AddonPreferences):
subtype='COLOR_GAMMA',
min=0.0,
max=1.0,
size=4,
get=get_menu_back_outline,
set=set_menu_back_outline,
)
@ -411,6 +413,7 @@ class CMPreferences(AddonPreferences):
subtype='COLOR_GAMMA',
min=0.0,
max=1.0,
size=4,
get=get_tooltip_outline,
set=set_tooltip_outline,
)

View File

@ -740,7 +740,7 @@ def allocate_main_ui(self, context):
def draw_callback_px(self, context):
allocate_main_ui(self, context)
shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
shader.bind()
addon_prefs = context.preferences.addons[__package__].preferences
@ -761,7 +761,7 @@ def draw_callback_px(self, context):
text_color = addon_prefs.qcd_ogl_widget_menu_back_text
font_id = 0
blf.position(font_id, x, y, 0)
blf.size(font_id, int(h), 72)
blf.size(font_id, int(h))
blf.color(font_id, text_color[0], text_color[1], text_color[2], 1)
blf.draw(font_id, text)
@ -927,7 +927,7 @@ def draw_tooltip(self, context, shader, message):
font_id = 0
line_height = 11 * scale_factor()
text_color = addon_prefs.qcd_ogl_widget_tooltip_text
blf.size(font_id, int(line_height), 72)
blf.size(font_id, int(line_height))
blf.color(font_id, text_color[0], text_color[1], text_color[2], 1)
lines = message.split("\n")

View File

@ -379,7 +379,7 @@ def draw_text(location, text, size=15, color=(1, 1, 1, 1)):
font_id = 0
ui_scale = bpy.context.preferences.system.ui_scale
blf.position(font_id, *location)
blf.size(font_id, round(size * ui_scale), 72)
blf.size(font_id, round(size * ui_scale))
blf.draw(font_id, text)
@ -485,7 +485,7 @@ def get_max_object_side_length(objects):
)
def get_uniform_color_shader():
return gpu.shader.from_builtin('3D_UNIFORM_COLOR')
return gpu.shader.from_builtin('UNIFORM_COLOR')
# Registration

View File

@ -34,7 +34,9 @@ def convert_old_poselib(old_poselib: Action) -> Collection[Action]:
# appropriate frame in the scene (to set up things like the background
# colour), but the old-style poselib doesn't contain such information. All
# we can do is just render on the current frame.
bpy.ops.asset.mark({'selected_ids': pose_assets})
context_override = {'selected_ids': pose_assets}
with bpy.context.temp_override(**context_override):
bpy.ops.asset.mark()
return pose_assets

View File

@ -15,6 +15,7 @@ from bpy.types import (
WindowManager,
WorkSpace,
)
from bl_ui_utils.layout import operator_context
class PoseLibraryPanel:
@ -131,10 +132,8 @@ def pose_library_list_item_context_menu(self: UIList, context: Context) -> None:
layout.operator("poselib.apply_pose_asset", text="Apply Pose").flipped = False
layout.operator("poselib.apply_pose_asset", text="Apply Pose Flipped").flipped = True
old_op_ctx = layout.operator_context
layout.operator_context = 'INVOKE_DEFAULT'
props = layout.operator("poselib.blend_pose_asset", text="Blend Pose")
layout.operator_context = old_op_ctx
with operator_context(layout, 'INVOKE_DEFAULT'):
layout.operator("poselib.blend_pose_asset", text="Blend Pose")
layout.separator()
props = layout.operator("poselib.pose_asset_select_bones", text="Select Pose Bones")
@ -275,3 +274,4 @@ def unregister() -> None:
bpy.types.UI_MT_list_item_context_menu.remove(pose_library_list_item_context_menu)
bpy.types.ASSETBROWSER_MT_context_menu.remove(pose_library_list_item_context_menu)
bpy.types.ASSETBROWSER_MT_editor_menus.remove(pose_library_list_item_asset_menu)

View File

@ -23,7 +23,7 @@ from .utils.draw import (
from .utils.doc import doc_name, doc_idname, doc_brief, doc_description
if not bpy.app.background:
SHADER = gpu.shader.from_builtin("2D_UNIFORM_COLOR")
SHADER = gpu.shader.from_builtin("UNIFORM_COLOR")
class POWER_SEQUENCER_OT_mouse_trim(bpy.types.Operator):

View File

@ -30,6 +30,9 @@ class POWER_SEQUENCER_OT_save_direct(bpy.types.Operator):
if bpy.data.is_saved:
bpy.ops.wm.save_mainfile()
else:
bpy.ops.wm.save_as_mainfile({"dict": "override"}, "INVOKE_DEFAULT")
# FIXME: what does this override do?
context_override = {"dict": "override"}
with context.temp_override(**context_override):
bpy.ops.wm.save_as_mainfile('INVOKE_DEFAULT')
self.report({"INFO"}, "File saved")
return {"FINISHED"}

View File

@ -82,7 +82,7 @@ def draw_text(x, y, size, text, justify="left", color=(1.0, 1.0, 1.0, 1.0)):
else:
text_width = 0
blf.position(font_id, x - text_width, y, 0)
blf.size(font_id, size, 72)
blf.size(font_id, size)
blf.draw(font_id, text)

View File

@ -560,7 +560,7 @@ def dis_ang(values, flip_angle, plane, scene):
# Shader for displaying the Pivot Point as Graphics.
#
SHADER = gpu.shader.from_builtin("3D_UNIFORM_COLOR") if not bpy.app.background else None
SHADER = gpu.shader.from_builtin("UNIFORM_COLOR") if not bpy.app.background else None
def draw_3d(coords, gtype, rgba, context):

View File

@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
"""Define the POV render engine from generic Blender RenderEngine class."""
import faulthandler
faulthandler.enable()
import bpy
import builtins as __builtin__
@ -27,11 +25,11 @@ def console_get(context):
return area, space, win, scr
return None, None, None, None
def console_write(context, txt):
def console_write(txt):
area, space, window, screen = console_get()
if space is None:
return
#context = bpy.context.copy()
context = bpy.context.copy()
context.update(dict(
area=area,
space_data=space,
@ -39,8 +37,9 @@ def console_write(context, txt):
window=window,
screen=screen,
))
with bpy.context.temp_override(**context):
for line in txt.split("\n"):
bpy.ops.console.scrollback_append(context, text=line, type='INFO')
bpy.ops.console.scrollback_append(text=line, type='INFO')
"""
class RENDER_OT_test(bpy.types.Operator):
bl_idname = 'pov.oha_test'
@ -53,7 +52,7 @@ class RENDER_OT_test(bpy.types.Operator):
)
def execute(self, context):
try:
console_write(context, self.txt)
console_write(self.txt)
return {'FINISHED'}
except:
self.report({'INFO'}, 'Printing report to Info window.')

View File

@ -116,7 +116,10 @@ def pov_centric_moray_like_workspace(dummy):
wsp = available_workspaces.get("Geometry Nodes")
context = bpy.context
if context.scene.render.engine == "POVRAY_RENDER" and wsp is not None:
bpy.ops.workspace.duplicate({"workspace": wsp})
context_override = {"workspace": wsp}
with context.temp_override(**context_override):
bpy.ops.workspace.duplicate()
del context_override
available_workspaces["Geometry Nodes.001"].name = "POV-Ed"
# May be already done, but explicitly make this workspace the active one
context.window.workspace = available_workspaces["POV-Ed"]
@ -197,7 +200,10 @@ def pov_centric_moray_like_workspace(dummy):
wsp1 = available_workspaces.get("Rendering")
context = bpy.context
if context.scene.render.engine == "POVRAY_RENDER" and wsp1 is not None:
bpy.ops.workspace.duplicate({"workspace": wsp1})
context_override = {"workspace": wsp1}
with context.temp_override(**context_override):
bpy.ops.workspace.duplicate()
del context_override
available_workspaces["Rendering.001"].name = "POV-Mo"
# Already done it would seem, but explicitly make this workspace the active one
context.window.workspace = available_workspaces["POV-Mo"]

View File

@ -12,8 +12,8 @@ SpaceView3D = bpy.types.SpaceView3D
callback_handle = []
if not bpy.app.background:
single_color_shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
smooth_color_shader = gpu.shader.from_builtin('3D_SMOOTH_COLOR')
single_color_shader = gpu.shader.from_builtin('UNIFORM_COLOR')
smooth_color_shader = gpu.shader.from_builtin('SMOOTH_COLOR')
else:
single_color_shader = None
smooth_color_shader = None
@ -66,7 +66,7 @@ def draw_callback_px():
font_id = 0
ui_scale = context.preferences.system.ui_scale
blf.size(font_id, round(12 * ui_scale), 72)
blf.size(font_id, round(12 * ui_scale))
data_matrix, data_quat, data_euler, data_vector, data_vector_array = utils.console_math_data()
if not data_matrix and not data_quat and not data_euler and not data_vector and not data_vector_array:

View File

@ -34,23 +34,19 @@ class ApplyAllModifiers(Operator):
is_select = True
# copying context for the operator's override
contx = bpy.context.copy()
contx['object'] = obj
context_override = {'object': obj}
modifiers = modifier_type(obj)
for mod in modifiers[:]:
contx['modifier'] = mod
context_override['modifier'] = mod
is_mod = True
try:
bpy.ops.object.modifier_apply(
contx,
modifier=contx['modifier'].name
)
bpy.ops.object.gpencil_modifier_apply(
modifier=contx['modifier'].name
)
with bpy.context.temp_override(**context_override):
if obj.type != 'GPENCIL':
bpy.ops.object.modifier_apply(modifier=mod.name)
else:
bpy.ops.object.gpencil_modifier_apply(modifier=mod.name)
except:
obj_name = getattr(obj, "name", "NO NAME")
collect_names.append(obj_name)

View File

@ -459,19 +459,20 @@ def init_draw(context=None):
def _draw_callback_px(self, context):
if context.area and context.area.type == 'VIEW_3D':
area = context.area
if area and area.type == 'VIEW_3D':
ui_scale = context.preferences.system.ui_scale
r_width = text_location = context.region.width
r_height = context.region.height
font_id = 0 # TODO: need to find out how best to get font_id
blf.size(font_id, 11, context.preferences.system.dpi)
blf.size(font_id, 11 * ui_scale)
text_size = blf.dimensions(0, self.view_name)
# compute the text location
text_location = 0
overlap = context.preferences.system.use_region_overlap
if overlap:
for region in context.area.regions:
for region in area.regions:
if region.type == "UI":
text_location = r_width - region.width

View File

@ -46,19 +46,21 @@ def init_draw(context=None):
def _draw_callback_px(self, context):
if context.area and context.area.type == 'VIEW_3D':
area = context.area
if area and area.type == 'VIEW_3D':
ui_scale = context.preferences.system.ui_scale
r_width = text_location = context.region.width
r_height = context.region.height
font_id = 0 # TODO: need to find out how best to get font_id
blf.size(font_id, 11, context.preferences.system.dpi)
blf.size(font_id, 11 * ui_scale)
text_size = blf.dimensions(0, self.view_name)
# compute the text location
text_location = 0
overlap = context.preferences.system.use_region_overlap
if overlap:
for region in context.area.regions:
for region in area.regions:
if region.type == "UI":
text_location = r_width - region.width

View File

@ -26,7 +26,7 @@ class LineDrawer:
id="color", comp_type="F32", len=4, fetch_mode="FLOAT"
)
self.shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')
self.shader = gpu.shader.from_builtin('UNIFORM_COLOR')
def draw(
self,

View File

@ -159,7 +159,7 @@ class STORYPENCIL_OT_AddSecondaryWindowOperator(Operator):
new_window.workspace = wk
return
with context.temp_override(window=new_window):
bpy.ops.workspace.append_activate(context, idname=wrk_name, filepath=template_path)
bpy.ops.workspace.append_activate(idname=wrk_name, filepath=template_path)
class STORYPENCIL_OT_WindowBringFront(Operator):

View File

@ -16,7 +16,7 @@
bl_info = {
"name": "Sun Position",
"author": "Michael Martin, Damien Picard",
"version": (3, 5, 0),
"version": (3, 5, 3),
"blender": (3, 2, 0),
"location": "World > Sun Position",
"description": "Show sun position with objects and/or sky texture",

View File

@ -9,7 +9,7 @@ from mathutils import Vector
from .sun_calc import calc_surface, calc_analemma
if bpy.app.background: # ignore north line in background mode
if bpy.app.background: # ignore drawing in background mode
def north_update(self, context):
pass
def surface_update(self, context):
@ -126,7 +126,7 @@ else:
coord_offset + i+1))
coord_offset += len(analemma_verts)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
batch = batch_for_shader(shader, 'LINES',
{"pos": coords}, indices=indices)
@ -156,7 +156,7 @@ else:
if addon_prefs.show_overlays and sun_props.show_surface:
coords = calc_surface(context)
shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
shader = gpu.shader.from_builtin('UNIFORM_COLOR')
batch = batch_for_shader(shader, 'TRIS', {"pos": coords})
if _surface_handle is not None:

View File

@ -10,34 +10,9 @@ from mathutils import Vector
from math import sqrt, pi, atan2, asin
vertex_shader = '''
uniform mat4 ModelViewProjectionMatrix;
/* Keep in sync with intern/opencolorio/gpu_shader_display_transform_vertex.glsl */
in vec2 texCoord;
in vec2 pos;
out vec2 texCoord_interp;
void main()
{
gl_Position = ModelViewProjectionMatrix * vec4(pos.xy, 0.0f, 1.0f);
gl_Position.z = 1.0f;
texCoord_interp = texCoord;
}'''
fragment_shader = '''
in vec2 texCoord_interp;
out vec4 fragColor;
uniform sampler2D image;
uniform float exposure;
void main()
{
fragColor = texture(image, texCoord_interp) * vec4(exposure, exposure, exposure, 1.0f);
}'''
# shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
if not bpy.app.background: # ignore drawing in background mode
image_shader = gpu.shader.from_builtin('IMAGE_COLOR')
line_shader = gpu.shader.from_builtin('FLAT_COLOR')
def draw_callback_px(self, context):
@ -49,9 +24,6 @@ def draw_callback_px(self, context):
if self.area != context.area:
return
if image.gl_load():
raise Exception()
bottom = 0
top = context.area.height
right = context.area.width
@ -59,39 +31,36 @@ def draw_callback_px(self, context):
position = Vector((right, top)) / 2 + self.offset
scale = Vector((context.area.width, context.area.width / 2)) * self.scale
shader = gpu.types.GPUShader(vertex_shader, fragment_shader)
coords = ((-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5))
uv_coords = ((0, 0), (1, 0), (1, 1), (0, 1))
batch = batch_for_shader(shader, 'TRI_FAN',
batch = batch_for_shader(image_shader, 'TRI_FAN',
{"pos": coords, "texCoord": uv_coords})
with gpu.matrix.push_pop():
gpu.matrix.translate(position)
gpu.matrix.scale(scale)
shader.bind()
shader.uniform_sampler("image", texture)
shader.uniform_float("exposure", self.exposure)
batch.draw(shader)
image_shader.bind()
image_shader.uniform_sampler("image", texture)
image_shader.uniform_float("color", (self.exposure, self.exposure, self.exposure, 1.0))
batch.draw(image_shader)
# Crosshair
# vertical
coords = ((self.mouse_position[0], bottom), (self.mouse_position[0], top))
colors = ((1,) * 4,) * 2
shader = gpu.shader.from_builtin('2D_FLAT_COLOR')
batch = batch_for_shader(shader, 'LINES',
batch = batch_for_shader(line_shader, 'LINES',
{"pos": coords, "color": colors})
shader.bind()
batch.draw(shader)
line_shader.bind()
batch.draw(line_shader)
# horizontal
if bottom <= self.mouse_position[1] <= top:
coords = ((0, self.mouse_position[1]), (context.area.width, self.mouse_position[1]))
batch = batch_for_shader(shader, 'LINES',
batch = batch_for_shader(line_shader, 'LINES',
{"pos": coords, "color": colors})
shader.bind()
batch.draw(shader)
line_shader.bind()
batch.draw(line_shader)
class SUNPOS_OT_ShowHdr(bpy.types.Operator):

View File

@ -311,7 +311,7 @@ class SunPosAddonPreferences(AddonPreferences):
box = layout.box()
col = box.column()
col.label(text="Show options or labels:")
col.label(text="Show options and info:")
flow = col.grid_flow(columns=0, even_columns=True, even_rows=False, align=False)
flow.prop(self, "show_refraction")
flow.prop(self, "show_overlays")

View File

@ -432,10 +432,10 @@ translations_tuple = (
("fr_FR", "Projection inconnue",
(False, ())),
),
(("*", "Show options or labels:"),
(("*", "Show options and info:"),
(("scripts/addons/sun_position/properties.py:297",),
()),
("fr_FR", "Afficher les options et étiquettes :",
("fr_FR", "Afficher les options et infos :",
(False, ())),
),
(("*", "ERROR: Could not parse coordinates"),

View File

@ -28,6 +28,8 @@ from bpy.props import (
EnumProperty,
)
DEMO_CFG = "demo.py"
class DemoModeSetup(bpy.types.Operator):
"""Create a demo script and optionally execute it"""
@ -124,19 +126,22 @@ class DemoModeSetup(bpy.types.Operator):
from . import config
keywords = self.as_keywords(ignore=("directory", "random_order", "run", "exit"))
cfg_str, _dirpath = config.as_string(
cfg_str, cfg_file_count, _dirpath = config.as_string(
self.directory,
self.random_order,
self.exit,
**keywords,
)
text = bpy.data.texts.get("demo.py")
text = bpy.data.texts.get(DEMO_CFG)
if text:
text.name += ".back"
text = bpy.data.texts.new("demo.py")
text = bpy.data.texts.new(DEMO_CFG)
text.from_string(cfg_str)
# When run is disabled, no messages makes it seems as if nothing happened.
self.report({'INFO'}, "Demo text \"%s\" created with %s file(s)" % (DEMO_CFG, "{:,d}".format(cfg_file_count)))
if self.run:
extern_demo_mode_run()
@ -154,7 +159,7 @@ class DemoModeSetup(bpy.types.Operator):
box = layout.box()
box.label(text="Search *.blend recursively")
box.label(text="Writes: demo.py config text")
box.label(text="Writes: %s config text" % DEMO_CFG)
layout.prop(self, "run")
layout.prop(self, "exit")
@ -190,7 +195,7 @@ class DemoModeRun(bpy.types.Operator):
if extern_demo_mode_run():
return {'FINISHED'}
else:
self.report({'ERROR'}, "Can't load demo.py config, run: File -> Demo Mode (Setup)")
self.report({'ERROR'}, "Can't load %s config, run: File -> Demo Mode (Setup)" % DEMO_CFG)
return {'CANCELLED'}
@ -234,6 +239,7 @@ classes = (
DemoModeRun,
)
def register():
from bpy.utils import register_class
for cls in classes:
@ -253,5 +259,6 @@ def unregister():
extern_demo_mode_unregister()
if __name__ == "__main__":
register()

View File

@ -1,8 +1,46 @@
# SPDX-License-Identifier: GPL-2.0-or-later
__all__ = (
"as_string",
)
import os
# -----------------------------------------------------------------------------
# Generic Utilities
def round_float_32(f):
from struct import pack, unpack
return unpack("f", pack("f", f))[0]
def repr_f32(f):
f_round = round_float_32(f)
f_str = repr(f)
f_str_frac = f_str.partition(".")[2]
if not f_str_frac:
return f_str
for i in range(1, len(f_str_frac)):
f_test = round(f, i)
f_test_round = round_float_32(f_test)
if f_test_round == f_round:
return "%.*f" % (i, f_test)
return f_str
def repr_pretty(v):
from pprint import pformat
# Float's are originally 32 bit, regular pretty print
# shows them with many decimal places (not so pretty).
if type(v) is float:
return repr_f32(v)
return pformat(v)
# -----------------------------------------------------------------------------
# Configuration Generation
def blend_list(path):
for dirpath, dirnames, filenames in os.walk(path):
# skip '.git'
@ -49,23 +87,16 @@ def as_string(dirpath, random_order, exit, **kwargs):
"\n",
]
# All these work but use nicest formatting!
if 0: # works but not nice to edit.
cfg_str += ["config = %r" % cfg]
elif 0:
import pprint
cfg_str += ["config = %s" % pprint.pformat(cfg, indent=0, width=120)]
elif 0:
cfg_str += [("config = %r" % cfg).replace("{", "\n {")]
else:
import pprint
# Works, but custom formatting looks better.
# `cfg_str.append("config = %s" % pprint.pformat(cfg, indent=4, width=120))`.
def dict_as_kw(d):
return "dict(%s)" % ", ".join(("%s=%s" % (k, pprint.pformat(v))) for k, v in sorted(d.items()))
ident = " "
cfg_str += ["config = [\n"]
for cfg_item in cfg:
cfg_str += ["%s%s,\n" % (ident, dict_as_kw(cfg_item))]
cfg_str += ["%s]\n\n" % ident]
return "dict(%s)" % ", ".join(("%s=%s" % (k, repr_pretty(v))) for k, v in sorted(d.items()))
return "".join(cfg_str), dirpath
ident = " "
cfg_str.append("config = [\n")
for cfg_item in cfg:
cfg_str.append("%s%s,\n" % (ident, dict_as_kw(cfg_item)))
cfg_str.append("%s]\n\n" % ident)
return "".join(cfg_str), len(cfg), dirpath

View File

@ -20,7 +20,7 @@ import bpy
import time
import os
DEMO_CFG = "demo.py"
from . import DEMO_CFG
# populate from script
global_config_files = []
@ -121,7 +121,6 @@ def demo_mode_next_file(step=1):
del global_config_files[global_state["demo_index"]]
global_state["demo_index"] -= 1
print(global_state["demo_index"])
demo_index_next = (global_state["demo_index"] + step) % len(global_config_files)
if global_state["exit"] and step > 0:
@ -132,7 +131,7 @@ def demo_mode_next_file(step=1):
global_state["demo_index"] = demo_index_next
print(global_state["demo_index"], "....")
print("func:demo_mode_next_file", global_state["demo_index"])
print("func:demo_mode_next_file", global_state["demo_index"], "of", len(global_config_files))
filepath = global_config_files[global_state["demo_index"]]["file"]
bpy.ops.wm.open_mainfile(filepath=filepath)
@ -161,7 +160,7 @@ def demo_mode_temp_file():
""" Initialize a temp config for the duration of the play time.
Use this so we can initialize the demo intro screen but not show again.
"""
assert(global_state["demo_index"] == 0)
assert (global_state["demo_index"] == 0)
temp_config = global_config_fallback.copy()
temp_config["anim_time_min"] = 0.0
@ -270,7 +269,7 @@ def demo_mode_update():
if scene != window.scene:
global_state["last_frame"] = -1
#if global_config["mode"] == 'PLAY':
# if global_config["mode"] == 'PLAY':
if 1:
global_state["reset_anim"] = True
@ -369,7 +368,11 @@ class DemoMode(bpy.types.Operator):
use_temp = True
if not global_config_files:
self.report({'INFO'}, "No configuration found with text or file: %s. Run File -> Demo Mode Setup" % DEMO_CFG)
self.report(
{'INFO'},
"No configuration found with text or file: %s. Run File -> Demo Mode Setup" %
DEMO_CFG,
)
return {'CANCELLED'}
if use_temp:
@ -405,9 +408,11 @@ class DemoModeControl(bpy.types.Operator):
bl_label = "Control"
mode: bpy.props.EnumProperty(
items=(('PREV', "Prev", ""),
items=(
('PREV', "Prev", ""),
('PAUSE', "Pause", ""),
('NEXT', "Next", "")),
('NEXT', "Next", ""),
),
name="Mode"
)
@ -453,7 +458,7 @@ def unregister():
def load_config(cfg_name=DEMO_CFG):
namespace = {}
del global_config_files[:]
global_config_files.clear()
basedir = os.path.dirname(bpy.data.filepath)
text = bpy.data.texts.get(cfg_name)

View File

@ -19,7 +19,8 @@ bl_info = {
'version': (1, 0, 2),
'location': 'Sculpt Mode: View3D > Sidebar > Tool Tab',
'warning': '',
'category': 'Baking'
'category': 'Baking',
'doc_url': '{BLENDER_MANUAL_URL}/addons/baking/vdm_brush_baker.html'
}