Fix duplicate shape key import when the FBX connections are duplicated #104954

Merged
19 changed files with 165 additions and 105 deletions
Showing only changes of commit efc15252ef - Show all commits

View File

@ -6,7 +6,7 @@ bl_info = {
"name": "Grease Pencil Tools", "name": "Grease Pencil Tools",
"description": "Extra tools for Grease Pencil", "description": "Extra tools for Grease Pencil",
"author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola", "author": "Samuel Bernou, Antonio Vazquez, Daniel Martinez Lara, Matias Mendiola",
"version": (1, 8, 1), "version": (1, 8, 2),
"blender": (3, 0, 0), "blender": (3, 0, 0),
"location": "Sidebar > Grease Pencil > Grease Pencil Tools", "location": "Sidebar > Grease Pencil > Grease Pencil Tools",
"warning": "", "warning": "",

View File

@ -49,10 +49,10 @@ def get_reduced_area_coord(context):
## minus tool leftbar + sidebar right ## minus tool leftbar + sidebar right
regs = context.area.regions regs = context.area.regions
toolbar = regs[2] toolbar = next((r for r in regs if r.type == 'TOOLS'), None)
sidebar = regs[3] sidebar = next((r for r in regs if r.type == 'UI'), None)
header = regs[0] header = next((r for r in regs if r.type == 'HEADER'), None)
tool_header = regs[1] tool_header = next((r for r in regs if r.type == 'TOOL_HEADER'), None)
up_margin = down_margin = 0 up_margin = down_margin = 0
if tool_header.alignment == 'TOP': if tool_header.alignment == 'TOP':
up_margin += tool_header.height up_margin += tool_header.height

View File

@ -5,8 +5,8 @@
bl_info = { bl_info = {
"name": "Import Images as Planes", "name": "Import Images as Planes",
"author": "Florian Meyer (tstscr), mont29, matali, Ted Schundler (SpkyElctrc), mrbimax", "author": "Florian Meyer (tstscr), mont29, matali, Ted Schundler (SpkyElctrc), mrbimax",
"version": (3, 5, 0), "version": (3, 5, 1),
"blender": (2, 91, 0), "blender": (4, 0, 0),
"location": "File > Import > Images as Planes or Add > Image > Images as Planes", "location": "File > Import > Images as Planes or Add > Image > Images as Planes",
"description": "Imports images and creates planes with the appropriate aspect ratio. " "description": "Imports images and creates planes with the appropriate aspect ratio. "
"The images are mapped to the planes.", "The images are mapped to the planes.",
@ -25,7 +25,10 @@ from math import pi
import bpy import bpy
from bpy.types import Operator from bpy.types import Operator
from bpy.app.translations import pgettext_tip as tip_ from bpy.app.translations import (
pgettext_tip as tip_,
contexts as i18n_contexts
)
from mathutils import Vector from mathutils import Vector
from bpy.props import ( from bpy.props import (
@ -151,6 +154,9 @@ def load_images(filenames, directory, force_reload=False, frame_start=1, find_se
file_iter = zip(filenames, repeat(1), repeat(1)) file_iter = zip(filenames, repeat(1), repeat(1))
for filename, offset, frames in file_iter: for filename, offset, frames in file_iter:
if not os.path.isfile(bpy.path.abspath(os.path.join(directory, filename))):
continue
image = load_image(filename, directory, check_existing=True, force_reload=force_reload) image = load_image(filename, directory, check_existing=True, force_reload=force_reload)
# Size is unavailable for sequences, so we grab it early # Size is unavailable for sequences, so we grab it early
@ -731,7 +737,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
('HASHED', "Hashed","Use noise to dither the binary visibility (works well with multi-samples)"), ('HASHED', "Hashed","Use noise to dither the binary visibility (works well with multi-samples)"),
('OPAQUE', "Opaque","Render surface without transparency"), ('OPAQUE', "Opaque","Render surface without transparency"),
) )
blend_method: EnumProperty(name="Blend Mode", items=BLEND_METHODS, default='BLEND', description="Blend Mode for Transparent Faces") blend_method: EnumProperty(
name="Blend Mode", items=BLEND_METHODS, default='BLEND',
description="Blend Mode for Transparent Faces", translation_context=i18n_contexts.id_material)
SHADOW_METHODS = ( SHADOW_METHODS = (
('CLIP', "Clip","Use the alpha threshold to clip the visibility (binary visibility)"), ('CLIP', "Clip","Use the alpha threshold to clip the visibility (binary visibility)"),
@ -739,7 +747,9 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
('OPAQUE',"Opaque","Material will cast shadows without transparency"), ('OPAQUE',"Opaque","Material will cast shadows without transparency"),
('NONE',"None","Material will cast no shadow"), ('NONE',"None","Material will cast no shadow"),
) )
shadow_method: EnumProperty(name="Shadow Mode", items=SHADOW_METHODS, default='CLIP', description="Shadow mapping method") shadow_method: EnumProperty(
name="Shadow Mode", items=SHADOW_METHODS, default='CLIP',
description="Shadow mapping method", translation_context=i18n_contexts.id_material)
use_backface_culling: BoolProperty( use_backface_culling: BoolProperty(
name="Backface Culling", default=False, name="Backface Culling", default=False,
@ -923,11 +933,11 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
if context.active_object and context.active_object.mode != 'OBJECT': if context.active_object and context.active_object.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.object.mode_set(mode='OBJECT')
self.import_images(context) ret_code = self.import_images(context)
context.preferences.edit.use_enter_edit_mode = editmode context.preferences.edit.use_enter_edit_mode = editmode
return {'FINISHED'} return ret_code
def import_images(self, context): def import_images(self, context):
@ -939,6 +949,10 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
find_sequences=self.image_sequence find_sequences=self.image_sequence
)) ))
if not images:
self.report({'WARNING'}, "Please select at least an image.")
return {'CANCELLED'}
# Create individual planes # Create individual planes
planes = [self.single_image_spec_to_plane(context, img_spec) for img_spec in images] planes = [self.single_image_spec_to_plane(context, img_spec) for img_spec in images]
@ -962,6 +976,7 @@ class IMPORT_IMAGE_OT_to_plane(Operator, AddObjectHelper):
# all done! # all done!
self.report({'INFO'}, tip_("Added {} Image Plane(s)").format(len(planes))) self.report({'INFO'}, tip_("Added {} Image Plane(s)").format(len(planes)))
return {'FINISHED'}
# operate on a single image # operate on a single image
def single_image_spec_to_plane(self, context, img_spec): def single_image_spec_to_plane(self, context, img_spec):

View File

@ -244,7 +244,7 @@ def skip_to_end(file, skip_chunk):
# MATERIALS # # MATERIALS #
############# #############
def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tintcolor, mapto): def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, offset, angle, tint1, tint2, mapto):
shader = contextWrapper.node_principled_bsdf shader = contextWrapper.node_principled_bsdf
nodetree = contextWrapper.material.node_tree nodetree = contextWrapper.material.node_tree
shader.location = (-300, 0) shader.location = (-300, 0)
@ -256,13 +256,16 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
mixer.label = "Mixer" mixer.label = "Mixer"
mixer.inputs[0].default_value = pct / 100 mixer.inputs[0].default_value = pct / 100
mixer.inputs[1].default_value = ( mixer.inputs[1].default_value = (
tintcolor[:3] + [1] if tintcolor else tint1[:3] + [1] if tint1 else shader.inputs['Base Color'].default_value[:])
shader.inputs['Base Color'].default_value[:]
)
contextWrapper._grid_to_location(1, 2, dst_node=mixer, ref_node=shader) contextWrapper._grid_to_location(1, 2, dst_node=mixer, ref_node=shader)
img_wrap = contextWrapper.base_color_texture img_wrap = contextWrapper.base_color_texture
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
links.new(mixer.outputs['Color'], shader.inputs['Base Color']) links.new(mixer.outputs['Color'], shader.inputs['Base Color'])
if tint2 is not None:
img_wrap.colorspace_name = 'Non-Color'
mixer.inputs[2].default_value = tint2[:3] + [1]
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[0])
else:
links.new(img_wrap.node_image.outputs['Color'], mixer.inputs[2])
elif mapto == 'ROUGHNESS': elif mapto == 'ROUGHNESS':
img_wrap = contextWrapper.roughness_texture img_wrap = contextWrapper.roughness_texture
elif mapto == 'METALLIC': elif mapto == 'METALLIC':
@ -274,6 +277,8 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
elif mapto == 'ALPHA': elif mapto == 'ALPHA':
shader.location = (-300,0) shader.location = (-300,0)
img_wrap = contextWrapper.alpha_texture img_wrap = contextWrapper.alpha_texture
img_wrap.use_alpha = False
links.new(img_wrap.node_image.outputs['Color'], img_wrap.socket_dst)
elif mapto == 'EMISSION': elif mapto == 'EMISSION':
shader.location = (0,-900) shader.location = (0,-900)
img_wrap = contextWrapper.emission_color_texture img_wrap = contextWrapper.emission_color_texture
@ -310,10 +315,12 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
img_wrap.extension = 'CLIP' img_wrap.extension = 'CLIP'
if alpha == 'alpha': if alpha == 'alpha':
own_node = img_wrap.node_image
contextWrapper.material.blend_method = 'HASHED'
links.new(own_node.outputs['Alpha'], img_wrap.socket_dst)
for link in links: for link in links:
if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB': if link.from_node.type == 'TEX_IMAGE' and link.to_node.type == 'MIX_RGB':
tex = link.from_node.image.name tex = link.from_node.image.name
own_node = img_wrap.node_image
own_map = img_wrap.node_mapping own_map = img_wrap.node_mapping
if tex == image.name: if tex == image.name:
links.new(link.from_node.outputs['Alpha'], img_wrap.socket_dst) links.new(link.from_node.outputs['Alpha'], img_wrap.socket_dst)
@ -323,9 +330,6 @@ def add_texture_to_material(image, contextWrapper, pct, extend, alpha, scale, of
if imgs.name[-3:].isdigit(): if imgs.name[-3:].isdigit():
if not imgs.users: if not imgs.users:
bpy.data.images.remove(imgs) bpy.data.images.remove(imgs)
else:
links.new(img_wrap.node_image.outputs['Alpha'], img_wrap.socket_dst)
contextWrapper.material.blend_method = 'HASHED'
shader.location = (300, 300) shader.location = (300, 300)
contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader) contextWrapper._grid_to_location(1, 0, dst_node=contextWrapper.node_out, ref_node=shader)
@ -518,7 +522,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
def read_texture(new_chunk, temp_chunk, name, mapto): def read_texture(new_chunk, temp_chunk, name, mapto):
uscale, vscale, uoffset, voffset, angle = 1.0, 1.0, 0.0, 0.0, 0.0 uscale, vscale, uoffset, voffset, angle = 1.0, 1.0, 0.0, 0.0, 0.0
contextWrapper.use_nodes = True contextWrapper.use_nodes = True
tintcolor = None tint1 = tint2 = None
extend = 'wrap' extend = 'wrap'
alpha = False alpha = False
pct = 70 pct = 70
@ -542,14 +546,8 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
img = load_image(texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True) img = load_image(texture_name, dirname, place_holder=False, recursive=IMAGE_SEARCH, check_existing=True)
temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed temp_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
elif temp_chunk.ID == MAT_MAP_USCALE: elif temp_chunk.ID == MAT_BUMP_PERCENT:
uscale = read_float(temp_chunk) contextWrapper.normalmap_strength = (float(read_short(temp_chunk) / 100))
elif temp_chunk.ID == MAT_MAP_VSCALE:
vscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_UOFFSET:
uoffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VOFFSET:
voffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_TILING: elif temp_chunk.ID == MAT_MAP_TILING:
"""Control bit flags, where 0x1 activates decaling, 0x2 activates mirror, """Control bit flags, where 0x1 activates decaling, 0x2 activates mirror,
@ -578,11 +576,20 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
if tiling & 0x200: if tiling & 0x200:
tint = 'RGBtint' tint = 'RGBtint'
elif temp_chunk.ID == MAT_MAP_USCALE:
uscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VSCALE:
vscale = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_UOFFSET:
uoffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_VOFFSET:
voffset = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_ANG: elif temp_chunk.ID == MAT_MAP_ANG:
angle = read_float(temp_chunk) angle = read_float(temp_chunk)
elif temp_chunk.ID == MAT_MAP_COL1: elif temp_chunk.ID == MAT_MAP_COL1:
tintcolor = read_byte_color(temp_chunk) tint1 = read_byte_color(temp_chunk)
elif temp_chunk.ID == MAT_MAP_COL2:
tint2 = read_byte_color(temp_chunk)
skip_to_end(file, temp_chunk) skip_to_end(file, temp_chunk)
new_chunk.bytes_read += temp_chunk.bytes_read new_chunk.bytes_read += temp_chunk.bytes_read
@ -590,7 +597,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
# add the map to the material in the right channel # add the map to the material in the right channel
if img: if img:
add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1), add_texture_to_material(img, contextWrapper, pct, extend, alpha, (uscale, vscale, 1),
(uoffset, voffset, 0), angle, tintcolor, mapto) (uoffset, voffset, 0), angle, tint1, tint2, mapto)
def apply_constrain(vec): def apply_constrain(vec):
convector = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1)) convector = mathutils.Vector.Fill(3, (CONSTRAIN * 0.1))
@ -1381,6 +1388,7 @@ def process_next_chunk(context, file, previous_chunk, imported_objects, CONSTRAI
elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'TARGET': # Target position elif KEYFRAME and new_chunk.ID == POS_TRACK_TAG and tracktype == 'TARGET': # Target position
keyframe_data = {} keyframe_data = {}
location = child.location location = child.location
keyframe_data[0] = trackposition[0]
target = mathutils.Vector(read_track_data(new_chunk)[0]) target = mathutils.Vector(read_track_data(new_chunk)[0])
direction = calc_target(location, target) direction = calc_target(location, target)
child.rotation_euler.x = direction[0] child.rotation_euler.x = direction[0]

View File

@ -5,7 +5,7 @@
bl_info = { bl_info = {
'name': 'glTF 2.0 format', 'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors', 'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (4, 0, 34), "version": (4, 0, 38),
'blender': (4, 0, 0), 'blender': (4, 0, 0),
'location': 'File > Import-Export', 'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0', 'description': 'Import-Export as glTF 2.0',
@ -144,13 +144,10 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
'Most efficient and portable, but more difficult to edit later'), 'Most efficient and portable, but more difficult to edit later'),
('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)', ('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)',
'Exports multiple files, with separate JSON, binary and texture data. ' 'Exports multiple files, with separate JSON, binary and texture data. '
'Easiest to edit later'), 'Easiest to edit later')),
('GLTF_EMBEDDED', 'glTF Embedded (.gltf)',
'Exports a single file, with all data packed in JSON. '
'Less efficient than binary, but easier to edit later')),
description=( description=(
'Output format and embedding options. Binary is most efficient, ' 'Output format. Binary is most efficient, '
'but JSON (embedded or separate) may be easier to edit later' 'but JSON may be easier to edit later'
), ),
default='GLB', #Warning => If you change the default, need to change the default filter too default='GLB', #Warning => If you change the default, need to change the default filter too
update=on_export_format_changed, update=on_export_format_changed,
@ -174,13 +171,13 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_image_format: EnumProperty( export_image_format: EnumProperty(
name='Images', name='Images',
items=(('AUTO', 'Automatic', items=(('AUTO', 'Automatic',
'Save PNGs as PNGs, JPEGs as JPEGs, WEBPs as WEBPs. ' 'Save PNGs as PNGs, JPEGs as JPEGs, WebPs as WebPs. '
'If neither one, use PNG'), 'If neither one, use PNG'),
('JPEG', 'JPEG Format (.jpg)', ('JPEG', 'JPEG Format (.jpg)',
'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) ' 'Save images as JPEGs. (Images that need alpha are saved as PNGs though.) '
'Be aware of a possible loss in quality'), 'Be aware of a possible loss in quality'),
('WEBP', 'Webp Format', ('WEBP', 'WebP Format',
'Save images as WEBPs as main image (no fallback)'), 'Save images as WebPs as main image (no fallback)'),
('NONE', 'None', ('NONE', 'None',
'Don\'t export images'), 'Don\'t export images'),
), ),
@ -192,18 +189,18 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
) )
export_image_add_webp: BoolProperty( export_image_add_webp: BoolProperty(
name='Create Webp', name='Create WebP',
description=( description=(
"Creates webp textures for every textures. " "Creates WebP textures for every textures. "
"For already webp textures, nothing happen" "For already WebP textures, nothing happen"
), ),
default=False default=False
) )
export_image_webp_fallback: BoolProperty( export_image_webp_fallback: BoolProperty(
name='Webp fallback', name='WebP fallback',
description=( description=(
"For all webp textures, create a PNG fallback texture." "For all WebP textures, create a PNG fallback texture"
), ),
default=False default=False
) )
@ -1674,10 +1671,10 @@ class ImportGLTF2(Operator, ConvertGLTF2_Base, ImportHelper):
) )
import_webp_texture: BoolProperty( import_webp_texture: BoolProperty(
name='Import Webp textures', name='Import WebP textures',
description=( description=(
"If a texture exists in webp format," "If a texture exists in WebP format,"
"loads the webp texture instead of the fallback png/jpg one" "loads the WebP texture instead of the fallback png/jpg one"
), ),
default=False, default=False,
) )

View File

@ -85,9 +85,6 @@ def __create_buffer(exporter, export_settings):
buffer = bytes() buffer = bytes()
if export_settings['gltf_format'] == 'GLB': if export_settings['gltf_format'] == 'GLB':
buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True) buffer = exporter.finalize_buffer(export_settings['gltf_filedirectory'], is_glb=True)
else:
if export_settings['gltf_format'] == 'GLTF_EMBEDDED':
exporter.finalize_buffer(export_settings['gltf_filedirectory'])
else: else:
exporter.finalize_buffer(export_settings['gltf_filedirectory'], exporter.finalize_buffer(export_settings['gltf_filedirectory'],
export_settings['gltf_binaryfilename']) export_settings['gltf_binaryfilename'])

View File

@ -117,7 +117,7 @@ class GlTF2Exporter:
f.write(self.__buffer.to_bytes()) f.write(self.__buffer.to_bytes())
uri = buffer_name uri = buffer_name
else: else:
uri = self.__buffer.to_embed_string() pass # This is no more possible, we don't export embedded buffers
buffer = gltf2_io.Buffer( buffer = gltf2_io.Buffer(
byte_length=self.__buffer.byte_length, byte_length=self.__buffer.byte_length,
@ -320,6 +320,20 @@ class GlTF2Exporter:
len_ = len([i for i in self.nodes_idx_to_remove if i < skin.skeleton]) len_ = len([i for i in self.nodes_idx_to_remove if i < skin.skeleton])
skin.skeleton = skin.skeleton - len_ skin.skeleton = skin.skeleton - len_
# Remove animation channels that was targeting a node that will be removed
new_animation_list = []
for animation in self.__gltf.animations:
new_channel_list = []
for channel in animation.channels:
if channel.target.node not in self.nodes_idx_to_remove:
new_channel_list.append(channel)
animation.channels = new_channel_list
if len(animation.channels) > 0:
new_animation_list.append(animation)
self.__gltf.animations = new_animation_list
#TODO: remove unused animation accessors?
# And now really remove nodes # And now really remove nodes
self.__gltf.nodes = [node for idx, node in enumerate(self.__gltf.nodes) if idx not in self.nodes_idx_to_remove] self.__gltf.nodes = [node for idx, node in enumerate(self.__gltf.nodes) if idx not in self.nodes_idx_to_remove]

View File

@ -9,6 +9,7 @@ from ...material.gltf2_blender_gather_texture_info import gather_texture_info
def export_specular(blender_material, export_settings): def export_specular(blender_material, export_settings):
specular_extension = {} specular_extension = {}
extensions_needed = False
specular_socket = gltf2_blender_get.get_socket(blender_material, 'Specular IOR Level') specular_socket = gltf2_blender_get.get_socket(blender_material, 'Specular IOR Level')
speculartint_socket = gltf2_blender_get.get_socket(blender_material, 'Specular Tint') speculartint_socket = gltf2_blender_get.get_socket(blender_material, 'Specular Tint')
@ -23,18 +24,27 @@ def export_specular(blender_material, export_settings):
if specular_non_linked is True: if specular_non_linked is True:
fac = specular_socket.default_value fac = specular_socket.default_value
if fac != 1.0: fac = fac * 2.0
if fac < 1.0:
specular_extension['specularFactor'] = fac specular_extension['specularFactor'] = fac
if fac == 0.0: extensions_needed = True
return None, {} elif fac > 1.0:
# glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
extensions_needed = True
else:
pass # If fac == 1.0, no need to export specularFactor, the default value is 1.0
else: else:
# Factor # Factor
fac = gltf2_blender_get.get_factor_from_socket(specular_socket, kind='VALUE') fac = gltf2_blender_get.get_factor_from_socket(specular_socket, kind='VALUE')
if fac is not None and fac != 1.0: if fac is not None and fac != 1.0:
fac = fac * 2.0 if fac is not None else None
if fac is not None and fac < 1.0:
specular_extension['specularFactor'] = fac specular_extension['specularFactor'] = fac
extensions_needed = True
if fac == 0.0: elif fac is not None and fac > 1.0:
return None, {} # glTF specularFactor should be <= 1.0, so we will multiply ColorFactory by specularFactor, and set SpecularFactor to 1.0 (default value)
extensions_needed = True
# Texture # Texture
if gltf2_blender_get.has_image_node_from_socket(specular_socket): if gltf2_blender_get.has_image_node_from_socket(specular_socket):
@ -46,16 +56,26 @@ def export_specular(blender_material, export_settings):
) )
specular_extension['specularTexture'] = original_specular_texture specular_extension['specularTexture'] = original_specular_texture
uvmap_infos.update({'specularTexture': uvmap_info}) uvmap_infos.update({'specularTexture': uvmap_info})
extensions_needed = True
if specularcolor_non_linked is True: if specularcolor_non_linked is True:
color = speculartint_socket.default_value[:3] color = speculartint_socket.default_value[:3]
if fac is not None and fac > 1.0:
color = (color[0] * fac, color[1] * fac, color[2] * fac)
specular_extension['specularColorFactor'] = color if color != (1.0, 1.0, 1.0) else None
if color != (1.0, 1.0, 1.0): if color != (1.0, 1.0, 1.0):
specular_extension['specularColorFactor'] = color extensions_needed = True
else: else:
# Factor # Factor
fac = gltf2_blender_get.get_factor_from_socket(speculartint_socket, kind='RGB') fac_color = gltf2_blender_get.get_factor_from_socket(speculartint_socket, kind='RGB')
if fac is not None and fac != (1.0, 1.0, 1.0): if fac_color is not None and fac is not None and fac > 1.0:
specular_extension['specularColorFactor'] = fac fac_color = (fac_color[0] * fac, fac_color[1] * fac, fac_color[2] * fac)
elif fac_color is None and fac is not None and fac > 1.0:
fac_color = (fac, fac, fac)
specular_extension['specularColorFactor'] = fac_color if fac_color != (1.0, 1.0, 1.0) else None
if fac_color != (1.0, 1.0, 1.0):
extensions_needed = True
# Texture # Texture
if gltf2_blender_get.has_image_node_from_socket(speculartint_socket): if gltf2_blender_get.has_image_node_from_socket(speculartint_socket):
@ -67,5 +87,9 @@ def export_specular(blender_material, export_settings):
) )
specular_extension['specularColorTexture'] = original_specularcolor_texture specular_extension['specularColorTexture'] = original_specularcolor_texture
uvmap_infos.update({'specularColorTexture': uvmap_info}) uvmap_infos.update({'specularColorTexture': uvmap_info})
extensions_needed = True
if extensions_needed is False:
return None, {}
return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos return Extension('KHR_materials_specular', specular_extension, False), uvmap_infos

View File

@ -59,7 +59,7 @@ def gather_image(
export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets) export_user_extensions('gather_image_hook', export_settings, image, blender_shader_sockets)
# We also return image_data, as it can be used to generate same file with another extension for webp management # We also return image_data, as it can be used to generate same file with another extension for WebP management
return image, image_data, factor return image, image_data, factor
def __gather_original_uri(original_uri, export_settings): def __gather_original_uri(original_uri, export_settings):
@ -118,7 +118,7 @@ def __gather_mime_type(sockets, export_image, export_settings):
if export_settings["gltf_image_format"] == "WEBP": if export_settings["gltf_image_format"] == "WEBP":
return "image/webp" return "image/webp"
else: else:
# If we keep image as is (no channel composition), we need to keep original format (for webp) # If we keep image as is (no channel composition), we need to keep original format (for WebP)
image = export_image.blender_image() image = export_image.blender_image()
if image is not None and __is_blender_image_a_webp(image): if image is not None and __is_blender_image_a_webp(image):
return "image/webp" return "image/webp"

View File

@ -70,7 +70,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
ext_webp = {} ext_webp = {}
# If user want to keep original textures, and these textures are webp, we need to remove source from # If user want to keep original textures, and these textures are WebP, we need to remove source from
# gltf2_io.Texture, and populate extension # gltf2_io.Texture, and populate extension
if export_settings['gltf_keep_original_textures'] is True \ if export_settings['gltf_keep_original_textures'] is True \
and source is not None \ and source is not None \
@ -79,19 +79,19 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
remove_source = True remove_source = True
required = True required = True
# If user want to export in webp format (so without fallback in png/jpg) # If user want to export in WebP format (so without fallback in png/jpg)
if export_settings['gltf_image_format'] == "WEBP": if export_settings['gltf_image_format'] == "WEBP":
# We create all image without fallback # We create all image without fallback
ext_webp["source"] = source ext_webp["source"] = source
remove_source = True remove_source = True
required = True required = True
# If user doesn't want to export in webp format, but want webp too. Texture is not webp # If user doesn't want to export in WebP format, but want WebP too. Texture is not WebP
if export_settings['gltf_image_format'] != "WEBP" \ if export_settings['gltf_image_format'] != "WEBP" \
and export_settings['gltf_add_webp'] \ and export_settings['gltf_add_webp'] \
and source is not None \ and source is not None \
and source.mime_type != "image/webp": and source.mime_type != "image/webp":
# We need here to create some webp textures # We need here to create some WebP textures
new_mime_type = "image/webp" new_mime_type = "image/webp"
new_data, _ = image_data.encode(new_mime_type, export_settings) new_data, _ = image_data.encode(new_mime_type, export_settings)
@ -116,7 +116,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
ext_webp["source"] = webp_image ext_webp["source"] = webp_image
# If user doesn't want to export in webp format, but want webp too. Texture is webp # If user doesn't want to export in WebP format, but want WebP too. Texture is WebP
if export_settings['gltf_image_format'] != "WEBP" \ if export_settings['gltf_image_format'] != "WEBP" \
and source is not None \ and source is not None \
and source.mime_type == "image/webp": and source.mime_type == "image/webp":
@ -127,7 +127,7 @@ def __gather_extensions(blender_shader_sockets, source, webp_image, image_data,
remove_source = True remove_source = True
required = True required = True
# If user doesn't want to export in webp format, but want webp too as fallback. Texture is webp # If user doesn't want to export in webp format, but want WebP too as fallback. Texture is WebP
if export_settings['gltf_image_format'] != "WEBP" \ if export_settings['gltf_image_format'] != "WEBP" \
and webp_image is not None \ and webp_image is not None \
and export_settings['gltf_webp_fallback'] is True: and export_settings['gltf_webp_fallback'] is True:
@ -209,7 +209,7 @@ def __gather_source(blender_shader_sockets, default_sockets, export_settings):
png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings) png_image = __make_webp_image(buffer_view, None, None, new_mime_type, name, uri, export_settings)
# We inverted the png & webp image, to have the png as main source # We inverted the png & WebP image, to have the png as main source
return png_image, source, image_data, factor return png_image, source, image_data, factor
return source, None, image_data, factor return source, None, image_data, factor

View File

@ -39,7 +39,7 @@ def specular(mh, location_specular,
x_specularcolor, y_specularcolor = location_specular_tint x_specularcolor, y_specularcolor = location_specular_tint
if tex_specular_info is None: if tex_specular_info is None:
specular_socket.default_value = specular_factor specular_socket.default_value = specular_factor / 2.0
else: else:
# Mix specular factor # Mix specular factor
if specular_factor != 1.0: if specular_factor != 1.0:
@ -51,7 +51,7 @@ def specular(mh, location_specular,
mh.node_tree.links.new(specular_socket, node.outputs[0]) mh.node_tree.links.new(specular_socket, node.outputs[0])
# Inputs # Inputs
specular_socket = node.inputs[0] specular_socket = node.inputs[0]
node.inputs[1].default_value = specular_factor node.inputs[1].default_value = specular_factor / 2.0
x_specular -= 200 x_specular -= 200
texture( texture(

View File

@ -50,9 +50,6 @@ def pbr_metallic_roughness(mh: MaterialHelper):
# This value may be overridden later if IOR extension is set on file # This value may be overridden later if IOR extension is set on file
pbr_node.inputs['IOR'].default_value = GLTF_IOR pbr_node.inputs['IOR'].default_value = GLTF_IOR
pbr_node.inputs['Specular IOR Level'].default_value = 0.0 # Will be overridden by KHR_materials_specular if set
pbr_node.inputs['Specular Tint'].default_value = [0.0]*3 + [1.0] # Will be overridden by KHR_materials_specular if set
if mh.pymat.occlusion_texture is not None: if mh.pymat.occlusion_texture is not None:
if mh.settings_node is None: if mh.settings_node is None:
mh.settings_node = make_settings_node(mh) mh.settings_node = make_settings_node(mh)

View File

@ -41,7 +41,7 @@ def texture(
if forced_image is None: if forced_image is None:
if mh.gltf.import_settings['import_webp_texture'] is True: if mh.gltf.import_settings['import_webp_texture'] is True:
# Get the webp image if there is one # Get the WebP image if there is one
if pytexture.extensions \ if pytexture.extensions \
and 'EXT_texture_webp' in pytexture.extensions \ and 'EXT_texture_webp' in pytexture.extensions \
and pytexture.extensions['EXT_texture_webp']['source'] is not None: and pytexture.extensions['EXT_texture_webp']['source'] is not None:

View File

@ -47,8 +47,5 @@ class Buffer:
def to_bytes(self): def to_bytes(self):
return self.__data return self.__data
def to_embed_string(self):
return 'data:application/octet-stream;base64,' + base64.b64encode(self.__data).decode('ascii')
def clear(self): def clear(self):
self.__data = b"" self.__data = b""

View File

@ -89,10 +89,10 @@ class ActionSlot(PropertyGroup, ActionSlotBase):
target_space: EnumProperty( target_space: EnumProperty(
name="Transform Space", name="Transform Space",
items=[("WORLD", "World Space", "World Space"), items=[("WORLD", "World Space", "World Space", 0),
("POSE", "Pose Space", "Pose Space"), # ("POSE", "Pose Space", "Pose Space", 1),
("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent"), # ("LOCAL_WITH_PARENT", "Local With Parent", "Local With Parent", 2),
("LOCAL", "Local Space", "Local Space")], ("LOCAL", "Local Space", "Local Space", 3)],
default="LOCAL" default="LOCAL"
) )

View File

@ -153,9 +153,10 @@ def pVisRotExec(bone, active, context):
def pVisScaExec(bone, active, context): def pVisScaExec(bone, active, context):
obj_bone = bone.id_data obj_bone = bone.id_data
bone.scale = getmat(bone, active, context, bone.scale = getmat(
not obj_bone.data.bones[bone.name].use_inherit_scale)\ bone, active, context,
.to_scale() obj_bone.data.bones[bone.name].inherit_scale not in {'NONE', 'NONE_LEGACY'}
).to_scale()
def pDrwExec(bone, active, context): def pDrwExec(bone, active, context):

View File

@ -6,6 +6,7 @@
import bpy import bpy
from bpy.props import FloatProperty, FloatVectorProperty from bpy.props import FloatProperty, FloatVectorProperty
from bpy.app.translations import pgettext_iface as iface_
import gpu import gpu
from gpu_extras.batch import batch_for_shader from gpu_extras.batch import batch_for_shader
from mathutils import Vector from mathutils import Vector
@ -248,8 +249,8 @@ class SUNPOS_OT_ShowHdr(bpy.types.Operator):
self.initial_azimuth = context.scene.sun_pos_properties.hdr_azimuth self.initial_azimuth = context.scene.sun_pos_properties.hdr_azimuth
context.workspace.status_text_set( context.workspace.status_text_set(
"Enter/LMB: confirm, Esc/RMB: cancel," iface_("Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, "
" MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure") "mouse wheel: zoom, Ctrl + mouse wheel: set exposure"))
self._handle = bpy.types.SpaceView3D.draw_handler_add( self._handle = bpy.types.SpaceView3D.draw_handler_add(
draw_callback_px, (self, context), 'WINDOW', 'POST_PIXEL' draw_callback_px, (self, context), 'WINDOW', 'POST_PIXEL'

View File

@ -416,6 +416,19 @@ translations_tuple = (
("fr_FR", "Année", ("fr_FR", "Année",
(False, ())), (False, ())),
), ),
(("*", "Unknown projection"),
(("scripts/addons/sun_position/hdr.py:181",),
()),
("fr_FR", "Projection inconnue",
(False, ())),
),
(("*", "Enter/LMB: confirm, Esc/RMB: cancel, MMB: pan, mouse wheel: zoom, Ctrl + mouse wheel: set exposure"),
(("scripts/addons/sun_position/hdr.py:252",),
()),
("fr_FR", "Entrée/ClicG : Confirmer, Échap/ClicD : Annuler, ClicM : défiler, "
"molette : zoom, Ctrl + molette : exposition",
(False, ())),
),
(("*", "Could not find 3D View"), (("*", "Could not find 3D View"),
(("scripts/addons/sun_position/hdr.py:263",), (("scripts/addons/sun_position/hdr.py:263",),
()), ()),
@ -428,12 +441,6 @@ translations_tuple = (
("fr_FR", "Veuillez utiliser un nœud de texture denvironnement", ("fr_FR", "Veuillez utiliser un nœud de texture denvironnement",
(False, ())), (False, ())),
), ),
(("*", "Unknown projection"),
(("scripts/addons/sun_position/hdr.py:181",),
()),
("fr_FR", "Projection inconnue",
(False, ())),
),
(("*", "Show options and info:"), (("*", "Show options and info:"),
(("scripts/addons/sun_position/properties.py:297",), (("scripts/addons/sun_position/properties.py:297",),
()), ()),

View File

@ -344,7 +344,9 @@ class UI_OT_i18n_addon_translation_export(Operator):
if not lng.use: if not lng.use:
print("Skipping {} language ({}).".format(lng.name, lng.uid)) print("Skipping {} language ({}).".format(lng.name, lng.uid))
continue continue
uid = utils_i18n.find_best_isocode_matches(lng.uid, trans.trans.keys()) translation_keys = {k for k in trans.trans.keys()
if k != self.settings.PARSER_TEMPLATE_ID}
uid = utils_i18n.find_best_isocode_matches(lng.uid, translation_keys)
if uid: if uid:
uids.append(uid[0]) uids.append(uid[0])
@ -357,8 +359,8 @@ class UI_OT_i18n_addon_translation_export(Operator):
if not os.path.isfile(path): if not os.path.isfile(path):
continue continue
msgs = utils_i18n.I18nMessages(kind='PO', src=path, settings=self.settings) msgs = utils_i18n.I18nMessages(kind='PO', src=path, settings=self.settings)
msgs.update(trans.msgs[self.settings.PARSER_TEMPLATE_ID]) msgs.update(trans.trans[self.settings.PARSER_TEMPLATE_ID])
trans.msgs[uid] = msgs trans.trans[uid] = msgs
trans.write(kind='PO', langs=set(uids)) trans.write(kind='PO', langs=set(uids))