EEVEE Next: Add imageStore/LoadFast ops to Raytrace passes #121117

Merged
Clément Foucault merged 5 commits from Jason-Fielder/blender:im_store_fast_rt into main 2024-05-24 12:51:31 +02:00
77 changed files with 976 additions and 479 deletions
Showing only changes of commit 0202492758 - Show all commits

View File

@ -18,12 +18,16 @@ macro(fftw_build FFTW_POSTFIX)
CMAKE_ARGS
-DCMAKE_INSTALL_PREFIX=${LIBDIR}/fftw3
-DENABLE_THREADS=ON
-DWITH_COMBINED_THREADS=OFF
-DBUILD_SHARED_LIBS=OFF
-DBUILD_TESTS=OFF
${FFTW_EXTRA_ARGS}
INSTALL_DIR ${LIBDIR}/fftw3
)
else()
set(FFTW_EXTRA_ARGS --enable-static)
set(FFTW_EXTRA_ARGS --enable-static --enable-threads)
set(FFTW_INSTALL install)
ExternalProject_Add(external_fftw3_${FFTW_POSTFIX}
URL file://${PACKAGE_DIR}/${FFTW_FILE}
@ -57,12 +61,12 @@ if(MSVC)
COMMAND ${CMAKE_COMMAND} -E copy
${LIBDIR}/fftw3/lib/fftw3.lib
${HARVEST_TARGET}/fftw3/lib/fftw3.lib
COMMAND ${CMAKE_COMMAND} -E copy
${LIBDIR}/fftw3/bin/fftw3.dll
${HARVEST_TARGET}/fftw3/lib/fftw3.dll
COMMAND ${CMAKE_COMMAND} -E copy
${LIBDIR}/fftw3/include/fftw3.h
${HARVEST_TARGET}/fftw3/include/fftw3.h
COMMAND ${CMAKE_COMMAND} -E copy
${LIBDIR}/fftw3/lib/fftw3_threads.lib
${HARVEST_TARGET}/fftw3/lib/fftw3_threads.lib
DEPENDEES install
)
ExternalProject_Add_Step(external_fftw3_float after_install
@ -70,8 +74,8 @@ if(MSVC)
${LIBDIR}/fftw3/lib/fftw3f.lib
${HARVEST_TARGET}/fftw3/lib/fftw3f.lib
COMMAND ${CMAKE_COMMAND} -E copy
${LIBDIR}/fftw3/bin/fftw3f.dll
${HARVEST_TARGET}/fftw3/lib/fftw3f.dll
${LIBDIR}/fftw3/lib/fftw3f_threads.lib
${HARVEST_TARGET}/fftw3/lib/fftw3f_threads.lib
DEPENDEES install
)
endif()

View File

@ -73,3 +73,13 @@ index ae14ced..a49e131 100644
else
return HIPArch::Unknown;
}
--- a/devices/cpu/cpu_engine.h
+++ b/devices/cpu/cpu_engine.h
@@ -7,5 +7,7 @@
#include "cpu_device.h"
#include <queue>
#include <condition_variable>
+// BLENDER: needed for building on Linux.
+#include <thread>
OIDN_NAMESPACE_BEGIN

View File

@ -440,12 +440,16 @@ endif()
if(WITH_FFTW3)
set(FFTW3 ${LIBDIR}/fftw3)
if(EXISTS ${FFTW3}/lib/libfftw3-3.lib) # 3.6 libraries
set(FFTW3_LIBRARIES ${FFTW3}/lib/libfftw3-3.lib ${FFTW3}/lib/libfftw3f.lib)
elseif(EXISTS ${FFTW3}/lib/libfftw.lib)
set(FFTW3_LIBRARIES ${FFTW3}/lib/libfftw.lib) # 3.5 Libraries
else()
set(FFTW3_LIBRARIES ${FFTW3}/lib/fftw3.lib ${FFTW3}/lib/fftw3f.lib) # msys2+MSVC Libraries
set(FFTW3_LIBRARIES
${FFTW3}/lib/fftw3.lib
${FFTW3}/lib/fftw3f.lib
)
if(EXISTS ${FFTW3}/lib/fftw3_threads.lib)
list(APPEND FFTW3_LIBRARIES
${FFTW3}/lib/fftw3_threads.lib
${FFTW3}/lib/fftw3f_threads.lib
)
set(WITH_FFTW3_THREADS_SUPPORT ON)
endif()
set(FFTW3_INCLUDE_DIRS ${FFTW3}/include)
set(FFTW3_LIBPATH ${FFTW3}/lib)

View File

@ -409,7 +409,7 @@ void FFMPEGReader::seek(int position)
{
double pts_time_base = av_q2d(m_formatCtx->streams[m_stream]->time_base);
uint64_t st_time = m_formatCtx->streams[m_stream]->start_time;
int64_t st_time = m_formatCtx->streams[m_stream]->start_time;
uint64_t seek_pos = (uint64_t)(position / (pts_time_base * m_specs.rate));
if(st_time != AV_NOPTS_VALUE)

View File

@ -2437,7 +2437,7 @@ class CYCLES_VIEW3D_PT_shading_lighting(Panel):
sub.template_icon_view(shading, "studio_light", scale_popup=3)
col = split.column()
col.operator("preferences.studiolight_show", emboss=False, text="", icon='PREFERENCES')
col.operator("screen.userpref_show", emboss=False, text="", icon='PREFERENCES').section = 'LIGHTS'
split = layout.split(factor=0.9)
col = split.column()

View File

@ -377,7 +377,7 @@ class GHOST_XrGraphicsBindingD3D : public GHOST_IXrGraphicsBinding {
strstream << "Minimum DirectX 11 Feature Level " << gpu_requirements.minFeatureLevel
<< std::endl;
*r_requirement_info = std::move(strstream.str());
*r_requirement_info = strstream.str();
}
return m_ghost_d3d_ctx->m_device->GetFeatureLevel() >= gpu_requirements.minFeatureLevel;

View File

@ -134,6 +134,12 @@ use_repos_to_notify = False
def repos_to_notify():
import os
from .bl_extension_utils import (
scandir_with_demoted_errors,
PKG_MANIFEST_FILENAME_TOML,
)
repos_notify = []
if not bpy.app.background:
# To use notifications on startup requires:
@ -143,7 +149,6 @@ def repos_to_notify():
# Since it's not all that common to disable the status bar just run notifications
# if any repositories are marked to run notifications.
online_access = bpy.app.online_access
prefs = bpy.context.preferences
extension_repos = prefs.extensions.repos
for repo_item in extension_repos:
@ -158,14 +163,30 @@ def repos_to_notify():
if not remote_url:
continue
if online_access:
# All URL's may be accessed.
pass
else:
# Allow remote file-system repositories even when online access is disabled.
if not remote_url.startswith("file://"):
continue
# WARNING: this could be a more expensive check, use a "reasonable" guess.
# This is technically incorrect because knowing if a repository has any installed
# packages requires reading it's meta-data and comparing it with the directory contents.
# Chances are - if the directory contains *any* directories containing a package manifest
# this means it has packages installed.
#
# Simply check the repositories directory isn't empty (ignoring dot-files).
# Importantly, this may be false positives but *not* false negatives.
repo_is_empty = True
repo_directory = repo_item.directory
if os.path.isdir(repo_directory):
for entry in scandir_with_demoted_errors(repo_directory):
if not entry.is_dir():
continue
if entry.name.startswith("."):
continue
if not os.path.exists(os.path.join(entry.path, PKG_MANIFEST_FILENAME_TOML)):
continue
repo_is_empty = False
break
if repo_is_empty:
continue
# NOTE: offline checks are handled by the notification (not here).
repos_notify.append(repo_item)
return repos_notify
@ -223,7 +244,10 @@ def extenion_repos_files_clear(directory, _):
# has the potential to wipe user data #119481.
import shutil
import os
from .bl_extension_utils import scandir_with_demoted_errors
from .bl_extension_utils import (
scandir_with_demoted_errors,
PKG_MANIFEST_FILENAME_TOML,
)
# Unlikely but possible a new repository is immediately removed before initializing,
# avoid errors in this case.
if not os.path.isdir(directory):
@ -232,18 +256,18 @@ def extenion_repos_files_clear(directory, _):
if os.path.isdir(path := os.path.join(directory, ".blender_ext")):
try:
shutil.rmtree(path)
except BaseException as ex:
except Exception as ex:
print("Failed to remove files", ex)
for entry in scandir_with_demoted_errors(directory):
if not entry.is_dir():
continue
path = entry.path
if not os.path.exists(os.path.join(path, "blender_manifest.toml")):
if not os.path.exists(os.path.join(path, PKG_MANIFEST_FILENAME_TOML)):
continue
try:
shutil.rmtree(path)
except BaseException as ex:
except Exception as ex:
print("Failed to remove files", ex)
@ -298,11 +322,11 @@ def monkeypatch_extensions_repos_update_pre(*_):
print_debug("PRE:")
try:
monkeypatch_extenions_repos_update_pre_impl()
except BaseException as ex:
except Exception as ex:
print_debug("ERROR", str(ex))
try:
monkeypatch_extensions_repos_update_pre._fn_orig()
except BaseException as ex:
except Exception as ex:
print_debug("ERROR", str(ex))
@ -311,11 +335,11 @@ def monkeypatch_extenions_repos_update_post(*_):
print_debug("POST:")
try:
monkeypatch_extenions_repos_update_post._fn_orig()
except BaseException as ex:
except Exception as ex:
print_debug("ERROR", str(ex))
try:
monkeypatch_extenions_repos_update_post_impl()
except BaseException as ex:
except Exception as ex:
print_debug("ERROR", str(ex))

View File

@ -94,7 +94,7 @@ class subcmd_utils:
bpy.ops.bl_pkg.repo_sync_all()
if show_done:
sys.stdout.write("Done...\n\n")
except BaseException:
except Exception:
print("Error synchronizing")
import traceback
traceback.print_exc()
@ -372,7 +372,7 @@ class subcmd_pkg:
)
except RuntimeError:
return False # The error will have been printed.
except BaseException as ex:
except Exception as ex:
sys.stderr.write(str(ex))
sys.stderr.write("\n")

View File

@ -24,6 +24,9 @@ from . import bl_extension_utils
# only keep this as a reference and in case we can speed up forcing them to exit.
USE_GRACEFUL_EXIT = False
# Special value to signal no packages can be updated because all repositories are blocked by being offline.
STATE_DATA_ALL_OFFLINE = object()
# -----------------------------------------------------------------------------
# Internal Utilities
@ -140,8 +143,19 @@ def sync_status_generator(repos_notify):
# Setup The Update #
# ################ #
repos_notify_orig = repos_notify
if not bpy.app.online_access:
repos_notify = [repo for repo in repos_notify if repo.remote_url.startswith("file://")]
if not repos_notify:
# Special case, early exit.
yield (STATE_DATA_ALL_OFFLINE, 0, ())
return
yield None
any_offline = len(repos_notify) != len(repos_notify_orig)
del repos_notify_orig
# An extension unique to this session.
unique_ext = "@{:x}".format(os.getpid())
@ -244,10 +258,15 @@ def sync_status_generator(repos_notify):
# TODO: more elegant way to detect changes.
# Re-calculating the same information each time then checking if it's different isn't great.
if command_result.status_data_changed:
extra_warnings = []
if command_result.all_complete:
any_lock_errors = sync_apply_locked(repos_notify, repos_notify_files, unique_ext)
update_total = sync_status_count_outdated_extensions(repos_notify)
yield (cmd_batch.calc_status_data(), update_total, any_lock_errors)
if any_lock_errors:
extra_warnings.append(" Failed to acquire lock!")
if any_offline:
extra_warnings.append(" Skipping online repositories!")
yield (cmd_batch.calc_status_data(), update_total, extra_warnings)
else:
yield None
@ -294,7 +313,7 @@ class NotifyHandle:
self.state = 0
# We could start the generator separately, this seems OK here for now.
self.sync_generator = iter(sync_status_generator(repos_notify))
# TEXT/ICON_ID/COUNT
# status_data, update_count, extra_warnings.
self.sync_info = None
@ -354,11 +373,15 @@ def splash_draw_status_fn(self, context):
if _notify.sync_info is None:
self.layout.label(text="Updates starting...")
elif _notify.sync_info[0] is STATE_DATA_ALL_OFFLINE:
# The special case is ugly but showing this operator doesn't fit well with other kinds of status updates.
self.layout.operator("bl_pkg.extensions_show_online_prefs", text="Offline mode", icon='ORPHAN_DATA')
else:
status_data, update_count, any_lock_errors = _notify.sync_info
status_data, update_count, extra_warnings = _notify.sync_info
text, icon = bl_extension_utils.CommandBatch.calc_status_text_icon_from_data(status_data, update_count)
if any_lock_errors:
text = text + " - failed to acquire lock!"
# Not more than 1-2 of these (failed to lock, some repositories offline .. etc).
for warning in extra_warnings:
text = text + warning
row = self.layout.row(align=True)
if update_count > 0:
row.operator("bl_pkg.extensions_show_for_update", text=text, icon=icon)

View File

@ -456,7 +456,7 @@ def _preferences_ensure_sync():
win.cursor_set('WAIT')
try:
bpy.ops.bl_pkg.repo_sync_all()
except BaseException as ex:
except Exception as ex:
print("Sync failed:", ex)
for wm in bpy.data.window_managers:
@ -982,7 +982,7 @@ class BlPkgRepoSync(Operator, _BlPkgCmdMixIn):
if not os.path.exists(directory):
try:
os.makedirs(directory)
except BaseException as ex:
except Exception as ex:
self.report({'ERROR'}, str(ex))
return {'CANCELLED'}
@ -1050,7 +1050,7 @@ class BlPkgRepoSyncAll(Operator, _BlPkgCmdMixIn):
if not os.path.exists(repo_item.directory):
try:
os.makedirs(repo_item.directory)
except BaseException as ex:
except Exception as ex:
self.report({'WARNING'}, str(ex))
return None
@ -2245,42 +2245,27 @@ class BlPkgShowUpgrade(Operator):
return {'FINISHED'}
class BlPkgOnlineAccess(Operator):
"""Handle online access"""
bl_idname = "bl_pkg.extension_online_access"
# NOTE: this is a wrapper for `SCREEN_OT_userpref_show`.
# It exists *only* to add a poll function which sets a message when offline mode is forced.
class BlPkgShowOnlinePreference(Operator):
"""Show system preferences "Network" panel to allow online access"""
bl_idname = "bl_pkg.extensions_show_online_prefs"
bl_label = ""
bl_options = {'INTERNAL'}
enable: BoolProperty(
name="Enable",
default=False,
)
@classmethod
def poll(cls, context):
if bpy.app.online_access_override:
if not bpy.app.online_access:
cls.poll_message_set("Blender was launched in offline-mode which cannot be changed at runtime")
return False
return True
def execute(self, context):
wm = context.window_manager
prefs = context.preferences
remote_url = "https://extensions.blender.org/api/v1/extensions"
if self.enable:
extension_repos = prefs.extensions.repos
repo_found = None
for repo in extension_repos:
if repo.remote_url == remote_url:
repo_found = repo
break
if repo_found:
repo_found.enabled = True
else:
# While not expected, we want to know if this ever occurs, don't fail silently.
self.report({'WARNING'}, "Repository \"{:s}\" not found!".format(remote_url))
if bpy.app.online_access:
# Run the first check for updates automatically.
# Invoke the modal operator so users can cancel by pressing "Escape".
assert bpy.ops.bl_pkg.repo_sync_all.poll()
bpy.ops.bl_pkg.repo_sync_all('INVOKE_DEFAULT')
prefs.extensions.use_online_access_handled = True
bpy.ops.screen.userpref_show('INVOKE_DEFAULT', section='SYSTEM')
return {'FINISHED'}
@ -2334,7 +2319,7 @@ classes = (
BlPkgRepoUnlock,
BlPkgShowUpgrade,
BlPkgOnlineAccess,
BlPkgShowOnlinePreference,
# Dummy, just shows a message.
BlPkgEnableNotInstalled,

View File

@ -296,16 +296,21 @@ def extensions_panel_draw_online_extensions_request_impl(
if layout_panel is not None:
# Text wrapping isn't supported, manually wrap.
for line in (
"Welcome! Access community-made add-ons and themes from the",
"Welcome! Access community-made add-ons and themes from the ",
"extensions.blender.org repository.",
"",
"This also requires internet access which must be enabled in \"System\" preferences.",
"This requires online access which must be enabled in \"System\" preferences.",
):
layout_panel.label(text=line)
row = layout.row()
row.operator("bl_pkg.extension_online_access", text="Dismiss", icon='X').enable = False
row.operator("bl_pkg.extension_online_access", text="Enable Repository", icon='CHECKMARK').enable = True
props = row.operator("wm.context_set_boolean", text="Dismiss", icon='X')
props.data_path = "preferences.extensions.use_online_access_handled"
props.value = True
# The only reason to prefer this over `screen.userpref_show`
# is it will be disabled when `--offline-mode` is forced with a useful error for why.
row.operator("bl_pkg.extensions_show_online_prefs", text="Go to System")
def extensions_panel_draw_impl(
@ -833,7 +838,16 @@ def extensions_panel_draw(panel, context):
if repo_status_text.running:
return
if not prefs.extensions.use_online_access_handled:
# Check if the extensions "Welcome" panel should be displayed.
# Even though it can be dismissed it's quite "in-your-face" so only show when it's needed.
if (
# The user didn't dismiss.
(not prefs.extensions.use_online_access_handled) and
# Running offline.
(not bpy.app.online_access) and
# There is one or more repositories that require remote access.
any(repo for repo in prefs.extensions.repos if repo.enabled and repo.use_remote_url)
):
extensions_panel_draw_online_extensions_request_impl(panel, context)
extensions_panel_draw_impl(

View File

@ -136,7 +136,7 @@ if sys.platform == "win32":
if res == 0:
print(WinError())
def file_handle_non_blocking_is_error_blocking(ex: BaseException) -> bool:
def file_handle_non_blocking_is_error_blocking(ex: Exception) -> bool:
if not isinstance(ex, OSError):
return False
from ctypes import GetLastError
@ -152,7 +152,7 @@ else:
flags = fcntl.fcntl(file_handle.fileno(), fcntl.F_GETFL)
fcntl.fcntl(file_handle, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def file_handle_non_blocking_is_error_blocking(ex: BaseException) -> bool:
def file_handle_non_blocking_is_error_blocking(ex: Exception) -> bool:
if not isinstance(ex, BlockingIOError):
return False
return True
@ -170,7 +170,7 @@ def scandir_with_demoted_errors(path: str) -> Generator[os.DirEntry[str], None,
try:
for entry in os.scandir(path):
yield entry
except BaseException as ex:
except Exception as ex:
print("Error: scandir", ex)
@ -203,7 +203,7 @@ def command_output_from_json_0(
# It's possible this is multiple chunks.
try:
chunk = stdout.read()
except BaseException as ex:
except Exception as ex:
if not file_handle_non_blocking_is_error_blocking(ex):
raise ex
chunk = b''
@ -512,7 +512,7 @@ def pkg_repo_cache_clear(local_dir: str) -> None:
# Should never fail unless the file-system has permissions issues or corruption.
try:
os.unlink(entry.path)
except BaseException as ex:
except Exception as ex:
print("Error: unlink", ex)
@ -829,7 +829,7 @@ class _RepoCacheEntry:
def _json_data_ensure(
self,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
check_files: bool = False,
ignore_missing: bool = False,
) -> Any:
@ -842,7 +842,7 @@ class _RepoCacheEntry:
try:
self._pkg_manifest_remote = json_from_filepath(filepath_json)
except BaseException as ex:
except Exception as ex:
self._pkg_manifest_remote = None
error_fn(ex)
@ -867,7 +867,7 @@ class _RepoCacheEntry:
def _json_data_refresh_from_toml(
self,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
force: bool = False,
) -> None:
assert self.remote_url == ""
@ -885,7 +885,7 @@ class _RepoCacheEntry:
# A symbolic-link that's followed (good), if it exists and is a file an error is raised here and returned.
if not os.path.isdir(directory):
os.makedirs(directory, exist_ok=True)
except BaseException as ex:
except Exception as ex:
error_fn(ex)
return
del directory
@ -911,7 +911,7 @@ class _RepoCacheEntry:
def _json_data_refresh(
self,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
force: bool = False,
) -> None:
if force or (self._pkg_manifest_remote is None) or (self._pkg_manifest_remote_mtime == 0):
@ -934,7 +934,7 @@ class _RepoCacheEntry:
try:
self._pkg_manifest_remote = json_from_filepath(filepath_json)
except BaseException as ex:
except Exception as ex:
self._pkg_manifest_remote = None
error_fn(ex)
@ -947,7 +947,7 @@ class _RepoCacheEntry:
def pkg_manifest_from_local_ensure(
self,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
ignore_missing: bool = False,
) -> Optional[Dict[str, Dict[str, Any]]]:
# Important for local-only repositories (where the directory name defines the ID).
@ -961,7 +961,7 @@ class _RepoCacheEntry:
pkg_manifest_local = {}
try:
dir_entries = os.scandir(self.directory)
except BaseException as ex:
except Exception as ex:
dir_entries = None
error_fn(ex)
@ -986,7 +986,7 @@ class _RepoCacheEntry:
filepath_toml = os.path.join(self.directory, filename, PKG_MANIFEST_FILENAME_TOML)
try:
item_local = toml_from_filepath(filepath_toml)
except BaseException as ex:
except Exception as ex:
item_local = None
error_fn(ex)
@ -1017,7 +1017,7 @@ class _RepoCacheEntry:
def pkg_manifest_from_remote_ensure(
self,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
ignore_missing: bool = False,
) -> Optional[Dict[str, Dict[str, Any]]]:
if self._pkg_manifest_remote is None:
@ -1069,7 +1069,7 @@ class RepoCacheStore:
self,
directory: str,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
force: bool = False,
) -> None:
for repo_entry in self._repos:
@ -1082,7 +1082,7 @@ class RepoCacheStore:
self,
directory: str,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
ignore_missing: bool = False,
directory_subset: Optional[Set[str]] = None,
) -> Optional[Dict[str, Dict[str, Any]]]:
@ -1099,7 +1099,7 @@ class RepoCacheStore:
def pkg_manifest_from_remote_ensure(
self,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
check_files: bool = False,
ignore_missing: bool = False,
directory_subset: Optional[Set[str]] = None,
@ -1137,7 +1137,7 @@ class RepoCacheStore:
def pkg_manifest_from_local_ensure(
self,
*,
error_fn: Callable[[BaseException], None],
error_fn: Callable[[Exception], None],
check_files: bool = False,
directory_subset: Optional[Set[str]] = None,
) -> Generator[Optional[Dict[str, Dict[str, Any]]], None, None]:
@ -1198,7 +1198,7 @@ class RepoLock:
try:
with open(local_lock_file, "r", encoding="utf8") as fh:
data = fh.read()
except BaseException as ex:
except Exception as ex:
return "lock file could not be read: {:s}".format(str(ex))
# The lock is held.
@ -1210,7 +1210,7 @@ class RepoLock:
# The lock is held (but stale), remove it.
try:
os.remove(local_lock_file)
except BaseException as ex:
except Exception as ex:
return "lock file could not be removed: {:s}".format(str(ex))
return None
@ -1241,12 +1241,12 @@ class RepoLock:
try:
with open(local_lock_file, "w", encoding="utf8") as fh:
fh.write(self._cookie)
except BaseException as ex:
except Exception as ex:
result[directory] = "Lock could not be created: {:s}".format(str(ex))
# Remove if it was created (but failed to write)... disk-full?
try:
os.remove(local_lock_file)
except BaseException:
except Exception:
pass
continue
@ -1268,7 +1268,7 @@ class RepoLock:
try:
with open(local_lock_file, "r", encoding="utf8") as fh:
data = fh.read()
except BaseException as ex:
except Exception as ex:
result[directory] = "release(): lock file could not be read: {:s}".format(str(ex))
continue
# Owned by another application, this shouldn't happen.
@ -1279,7 +1279,7 @@ class RepoLock:
# This is our lock file, we're allowed to remove it!
try:
os.remove(local_lock_file)
except BaseException as ex:
except Exception as ex:
result[directory] = "release(): failed to remove file {!r}".format(ex)
self._held = False

View File

@ -651,7 +651,7 @@ def pkg_manifest_from_archive_and_validate(
) -> Union[PkgManifest, str]:
try:
zip_fh_context = zipfile.ZipFile(filepath, mode="r")
except BaseException as ex:
except Exception as ex:
return "Error extracting archive \"{:s}\"".format(str(ex))
with contextlib.closing(zip_fh_context) as zip_fh:
@ -1517,7 +1517,7 @@ def repo_json_is_valid_or_error(filepath: str) -> Optional[str]:
try:
with open(filepath, "r", encoding="utf-8") as fh:
result = json.load(fh)
except BaseException as ex:
except Exception as ex:
return str(ex)
if not isinstance(result, dict):
@ -1567,7 +1567,7 @@ def pkg_manifest_toml_is_valid_or_error(filepath: str, strict: bool) -> Tuple[Op
try:
with open(filepath, "rb") as fh:
result = tomllib.load(fh)
except BaseException as ex:
except Exception as ex:
return str(ex), {}
error = pkg_manifest_is_valid_or_error(result, from_repo=False, strict=strict)
@ -2155,7 +2155,7 @@ class subcmd_client:
with CleanupPathsContext(files=(), directories=directories_to_clean):
try:
zip_fh_context = zipfile.ZipFile(filepath_archive, mode="r")
except BaseException as ex:
except Exception as ex:
message_warn(
msg_fn,
"Error extracting archive: {:s}".format(str(ex)),
@ -2223,7 +2223,7 @@ class subcmd_client:
try:
for member in zip_fh.infolist():
zip_fh.extract(member, filepath_local_pkg_temp)
except BaseException as ex:
except Exception as ex:
message_warn(
msg_fn,
"Failed to extract files for \"{:s}\": {:s}".format(manifest.id, str(ex)),
@ -2485,7 +2485,7 @@ class subcmd_client:
filepath_local_pkg = os.path.join(local_dir, pkg_idname)
try:
shutil.rmtree(filepath_local_pkg)
except BaseException as ex:
except Exception as ex:
message_error(msg_fn, "Failure to remove \"{:s}\" with error ({:s})".format(pkg_idname, str(ex)))
continue
@ -2613,7 +2613,7 @@ class subcmd_author:
with CleanupPathsContext(files=(outfile_temp,), directories=()):
try:
zip_fh_context = zipfile.ZipFile(outfile_temp, 'w', zipfile.ZIP_LZMA)
except BaseException as ex:
except Exception as ex:
message_status(msg_fn, "Error creating archive \"{:s}\"".format(str(ex)))
return False
@ -2640,7 +2640,7 @@ class subcmd_author:
compress_type = zipfile.ZIP_STORED if filepath_skip_compress(filepath_abs) else None
try:
zip_fh.write(filepath_abs, filepath_rel, compress_type=compress_type)
except BaseException as ex:
except Exception as ex:
message_status(msg_fn, "Error adding to archive \"{:s}\"".format(str(ex)))
return False
@ -2713,7 +2713,7 @@ class subcmd_author:
try:
zip_fh_context = zipfile.ZipFile(pkg_source_archive, mode="r")
except BaseException as ex:
except Exception as ex:
message_status(msg_fn, "Error extracting archive \"{:s}\"".format(str(ex)))
return False
@ -2799,7 +2799,7 @@ class subcmd_dummy:
if not os.path.exists(repo_dir):
try:
os.makedirs(repo_dir)
except BaseException as ex:
except Exception as ex:
message_error(msg_fn, "Failed to create \"{:s}\" with error: {!r}".format(repo_dir, ex))
return False

View File

@ -5,7 +5,7 @@
bl_info = {
'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Scurest, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (4, 2, 28),
"version": (4, 2, 39),
'blender': (4, 2, 0),
'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0',
@ -484,9 +484,32 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
),
default=False)
export_colors: BoolProperty(
name='Dummy',
description='Keep for compatibility only',
export_vertex_color: EnumProperty(
name='Use Vertex Color',
items=(
('MATERIAL', 'Material',
'Export vertex color when used by material'),
('ACTIVE', 'Active',
'Export active vertex color'),
('NONE', 'None',
'Do not export vertex color')),
description='How to export vertex color',
default='MATERIAL'
)
export_all_vertex_colors: BoolProperty(
name='Export all vertex colors',
description=(
'Export all vertex colors, even if not used by any material. '
'If no Vertex Color is used in the mesh materials, a fake COLOR_0 will be created, '
'in order to keep material unchanged'
),
default=True
)
export_active_vertex_color_when_no_material: BoolProperty(
name='Export active vertex color when no material',
description='When there is no material on object, export active vertex color.',
default=True
)
@ -1058,6 +1081,14 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
export_settings['gltf_attributes'] = self.export_attributes
export_settings['gltf_cameras'] = self.export_cameras
export_settings['gltf_vertex_color'] = self.export_vertex_color
if self.export_vertex_color == 'NONE':
export_settings['gltf_all_vertex_colors'] = False
export_settings['gltf_active_vertex_color_when_no_material'] = False
else:
export_settings['gltf_all_vertex_colors'] = self.export_all_vertex_colors
export_settings['gltf_active_vertex_color_when_no_material'] = self.export_active_vertex_color_when_no_material
export_settings['gltf_unused_textures'] = self.export_unused_textures
export_settings['gltf_unused_images'] = self.export_unused_images
@ -1190,6 +1221,8 @@ class ExportGLTF2_Base(ConvertGLTF2_Base):
path_to_uri(os.path.splitext(os.path.basename(self.filepath))[0] + '.bin')
)
export_settings['warning_joint_weight_exceed_already_displayed'] = False
user_extensions = []
pre_export_callbacks = []
post_export_callbacks = []
@ -1354,6 +1387,25 @@ def export_panel_data_mesh(layout, operator):
col = body.column()
col.prop(operator, 'export_shared_accessors')
header, sub_body = body.panel("GLTF_export_data_material_vertex_color", default_closed=True)
header.label(text="Vertex Colors")
if sub_body:
row = sub_body.row()
row.prop(operator, 'export_vertex_color')
if operator.export_vertex_color == "ACTIVE":
row = sub_body.row()
row.label(
text="Note that fully compliant glTF 2.0 engine/viewer will use it as multiplicative factor for base color.",
icon='ERROR')
row = sub_body.row()
row.label(text="If you want to use VC for any other purpose than vertex color, you should use custom attributes.")
row = sub_body.row()
row.active = operator.export_vertex_color != "NONE"
row.prop(operator, 'export_all_vertex_colors')
row = sub_body.row()
row.active = operator.export_vertex_color != "NONE"
row.prop(operator, 'export_active_vertex_color_when_no_material')
def export_panel_data_material(layout, operator):
header, body = layout.panel("GLTF_export_data_material", default_closed=True)

View File

@ -95,7 +95,8 @@ def get_component_type(attribute_component_type):
"FLOAT_VECTOR_4": gltf2_io_constants.ComponentType.Float,
"INT": gltf2_io_constants.ComponentType.Float, # No signed Int in glTF accessor
"FLOAT": gltf2_io_constants.ComponentType.Float,
"BOOLEAN": gltf2_io_constants.ComponentType.Float
"BOOLEAN": gltf2_io_constants.ComponentType.Float,
"UNSIGNED_BYTE": gltf2_io_constants.ComponentType.UnsignedByte
}.get(attribute_component_type)
@ -145,7 +146,8 @@ def get_numpy_type(attribute_component_type):
"FLOAT_VECTOR_4": np.float32,
"INT": np.float32, # signed integer are not supported by glTF
"FLOAT": np.float32,
"BOOLEAN": np.float32
"BOOLEAN": np.float32,
"UNSIGNED_BYTE": np.uint8,
}.get(attribute_component_type)

View File

@ -554,7 +554,9 @@ def __get_blender_actions(obj_uuid: str,
# Multi-strip tracks do not export correctly yet (they need to be baked),
# so skip them for now and only write single-strip tracks.
non_muted_strips = [strip for strip in track.strips if strip.action is not None and strip.mute is False]
if track.strips is None or len(non_muted_strips) != 1:
if track.strips is None or len(non_muted_strips) > 1:
# Warning if multiple strips are found, then ignore this track
# Ignore without warning if no strip
export_settings['log'].warning(
"NLA track '{}' has {} strips, but only single-strip tracks are supported in 'actions' mode.".format(
track.name, len(

View File

@ -31,10 +31,10 @@ def gather_sk_sampled_keyframes(obj_uuid,
# Using this option, we miss the drivers :(
# No solution exists for now. In the future, we should be able to copy a driver
if action_name in bpy.data.actions:
channel_group, _ = get_channel_groups(
channel_group, _, _ = get_channel_groups(
obj_uuid, bpy.data.actions[action_name], export_settings, no_sample_option=True)
elif blender_obj.data.shape_keys.animation_data and blender_obj.data.shape_keys.animation_data.action:
channel_group, _ = get_channel_groups(
channel_group, _, _ = get_channel_groups(
obj_uuid, blender_obj.data.shape_keys.animation_data.action, export_settings, no_sample_option=True)
else:
channel_group = {}
@ -47,7 +47,7 @@ def gather_sk_sampled_keyframes(obj_uuid,
channels = chan['properties']['value']
break
non_keyed_values = gather_non_keyed_values(obj_uuid, channels, None, export_settings)
non_keyed_values = gather_non_keyed_values(obj_uuid, channels, None, False, export_settings)
while frame <= end_frame:
key = Keyframe(channels, frame, None)

View File

@ -78,11 +78,13 @@ def __gather_skins(blender_primitive, export_settings):
# Warning for the case where we are in the same group, will be done later
# (for example, 3 weights needed, but 2 wanted by user)
if max_bone_set_index > wanted_max_bone_set_index:
export_settings['log'].warning(
"There are more than {} joint vertex influences."
"The {} with highest weight will be used (and normalized).".format(
export_settings['gltf_vertex_influences_nb'],
export_settings['gltf_vertex_influences_nb']))
if export_settings['warning_joint_weight_exceed_already_displayed'] is False:
export_settings['log'].warning(
"There are more than {} joint vertex influences."
"The {} with highest weight will be used (and normalized).".format(
export_settings['gltf_vertex_influences_nb'],
export_settings['gltf_vertex_influences_nb']))
export_settings['warning_joint_weight_exceed_already_displayed'] = True
# Take into account only the first set of 4 weights
max_bone_set_index = wanted_max_bone_set_index
@ -107,11 +109,13 @@ def __gather_skins(blender_primitive, export_settings):
idx = 4 - 1 - i
if not all(weight[:, idx]):
if warning_done is False:
export_settings['log'].warning(
"There are more than {} joint vertex influences."
"The {} with highest weight will be used (and normalized).".format(
export_settings['gltf_vertex_influences_nb'],
export_settings['gltf_vertex_influences_nb']))
if export_settings['warning_joint_weight_exceed_already_displayed'] is False:
export_settings['log'].warning(
"There are more than {} joint vertex influences."
"The {} with highest weight will be used (and normalized).".format(
export_settings['gltf_vertex_influences_nb'],
export_settings['gltf_vertex_influences_nb']))
export_settings['warning_joint_weight_exceed_already_displayed'] = True
warning_done = True
weight[:, idx] = 0.0
@ -203,6 +207,31 @@ def __gather_attribute(blender_primitive, attribute, export_settings):
type=data['data_type'],
)}
elif attribute.startswith("COLOR_") and blender_primitive["attributes"][attribute]['component_type'] == gltf2_io_constants.ComponentType.UnsignedByte:
# We are in special case where we fake a COLOR_0 attribute with UNSIGNED_BYTE
# We need to normalize it
export_user_extensions('gather_attribute_change', export_settings, attribute, data, True)
return {
attribute: gltf2_io.Accessor(
buffer_view=gltf2_io_binary_data.BinaryData(
data['data'].tobytes(),
gltf2_io_constants.BufferViewTarget.ARRAY_BUFFER),
byte_offset=None,
component_type=data['component_type'],
count=len(
data['data']),
extensions=None,
extras=None,
max=None,
min=None,
name=None,
normalized=True,
sparse=None,
type=data['data_type'],
)}
elif attribute.startswith("JOINTS_") or attribute.startswith("WEIGHTS_"):
return __gather_skins(blender_primitive, export_settings)

View File

@ -59,6 +59,10 @@ class PrimitiveCreator:
self.blender_vertex_groups = blender_vertex_groups
self.modifiers = modifiers
self.materials = materials
self.vc_infos = []
self.vc_infos_index = 0
self.export_settings = export_settings
@classmethod
@ -420,10 +424,14 @@ class PrimitiveCreator:
new_prim_indices = {}
self.additional_materials = [] # In case of UDIM
self.uvmap_attribute_list = [] # Initialize here, in case we don't have any triangle primitive
self.uvmap_attribute_lists = []
self.uvmap_attribute_list = [] # For each material # Initialize here, in case we don't have any triangle primitive
no_materials = True
materials_use_vc = None
warning_already_displayed = False
warning_already_displayed_vc_nodetree = False
some_alpha = False
for material_idx in self.prim_indices.keys():
base_material, material_info = get_base_material(material_idx, self.materials, self.export_settings)
@ -431,6 +439,10 @@ class PrimitiveCreator:
self.uvmap_attribute_list = list(
set([i['value'] for i in material_info["uv_info"].values() if 'type' in i.keys() and i['type'] == "Attribute"]))
# Check that attributes are not regular UVMaps
self.uvmap_attribute_list = [
i for i in self.uvmap_attribute_list if i not in self.blender_mesh.uv_layers.keys()]
additional_fields = []
for attr in self.uvmap_attribute_list:
if attr + str(0) not in self.dots.dtype.names: # In case user exports custom attributes, we may have it already
@ -455,6 +467,8 @@ class PrimitiveCreator:
data = data.reshape(-1, 3)
data = data[:, :2]
elif self.blender_mesh.attributes[attr].data_type == "FLOAT2":
# This case should not happen, because we are in CORNER domain / 2D Vector,
# So this attribute is an UVMap
data = np.empty(len(self.blender_mesh.loops) *
2, gltf2_blender_conversion.get_numpy_type('FLOAT2'))
self.blender_mesh.attributes[attr].data.foreach_get('vector', data)
@ -493,63 +507,122 @@ class PrimitiveCreator:
if len(additional_fields) > 0:
self.dots = dots
if base_material is not None:
no_materials = False
# There are multiple case to take into account for VC
# The simplier test is when no vertex color are used
if material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is None:
# Nothing to do
if self.export_settings['gltf_vertex_color'] == "NONE":
# We don't export any Vertex Color
pass
elif material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is not None:
self.export_settings['log'].warning('We are not managing this case (Vertex Color alpha without color)')
else:
vc_color_name = None
vc_alpha_name = None
if material_info['vc_info']['color_type'] == "name":
vc_color_name = material_info['vc_info']['color']
elif material_info['vc_info']['color_type'] == "active":
# Get active (render) Vertex Color
if self.blender_mesh.color_attributes.render_color_index != -1:
vc_color_name = self.blender_mesh.color_attributes[self.blender_mesh.color_attributes.render_color_index].name
# There is no Vertex Color in node tree
if material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is None:
if material_info['vc_info']['alpha_type'] == "name":
vc_alpha_name = material_info['vc_info']['alpha']
elif material_info['vc_info']['alpha_type'] == "active":
# Get active (render) Vertex Color
if self.blender_mesh.color_attributes.render_color_index != -1:
vc_alpha_name = self.blender_mesh.color_attributes[self.blender_mesh.color_attributes.render_color_index].name
# If user wants to force active vertex color, we need to add it
if (base_material is not None and self.export_settings['gltf_vertex_color'] == "ACTIVE") or (
base_material is None and self.export_settings['gltf_active_vertex_color_when_no_material'] is True):
# We need to add the active vertex color as COLOR_0
vc_color_name = None
vc_alpha_name = None
if vc_color_name is not None:
if self.blender_mesh.color_attributes.render_color_index != -1:
vc_color_name = self.blender_mesh.color_attributes[self.blender_mesh.color_attributes.render_color_index].name
vc_alpha_name = self.blender_mesh.color_attributes[self.blender_mesh.color_attributes.render_color_index].name
vc_key = ""
vc_key += vc_color_name if vc_color_name is not None else ""
vc_key += vc_alpha_name if vc_alpha_name is not None else ""
if vc_color_name is not None:
if materials_use_vc is not None and materials_use_vc != vc_key:
if warning_already_displayed is False:
vc_key = ""
vc_key += vc_color_name if vc_color_name is not None else ""
vc_key += vc_alpha_name if vc_alpha_name is not None else ""
if materials_use_vc is not None and materials_use_vc != vc_key:
if warning_already_displayed is False:
self.export_settings['log'].warning(
'glTF specification does not allow this case (multiple materials with different Vertex Color)')
warning_already_displayed = True
materials_use_vc = vc_key
elif materials_use_vc is None:
materials_use_vc = vc_key
add_alpha = True # As we are using the active Vertex Color without checking node tree, we need to add alpha
if add_alpha is True:
some_alpha = True
self.vc_infos.append({
'color': vc_color_name,
'alpha': vc_alpha_name,
'add_alpha': add_alpha,
'gltf_name': 'COLOR_' + str(self.vc_infos_index),
'forced': False
})
self.vc_infos_index += 1
else:
pass # Using the same Vertex Color
elif base_material is not None and self.export_settings['gltf_vertex_color'] == "MATERIAL":
if warning_already_displayed_vc_nodetree is False:
self.export_settings['log'].warning(
'glTF specification does not allow this case (multiple materials with different Vertex Color)')
warning_already_displayed = True
materials_use_vc = vc_key
'The active Vertex Color will not be exported, as it is not used in the node tree of the material')
warning_already_displayed_vc_nodetree = True
elif materials_use_vc is None:
materials_use_vc = vc_key
# There is only alpha Vertex Color in node tree
elif material_info['vc_info']['color_type'] is None and material_info['vc_info']['alpha_type'] is not None:
self.export_settings['log'].warning(
'We are not managing this case for now (Vertex Color alpha without color)')
# We need to check if we need to add alpha
add_alpha = vc_alpha_name is not None
mat = get_material_from_idx(material_idx, self.materials, self.export_settings)
add_alpha = mat is not None and add_alpha and not (
mat.blend_method is None or mat.blend_method == 'OPAQUE')
# Manage Vertex Color (RGB and Alpha if needed)
self.__manage_color_attribute(vc_color_name, vc_alpha_name if add_alpha else None)
else:
pass # Using the same Vertex Color
# There are some Vertex Color in node tree (or there is no material)
else:
vc_color_name = None
vc_alpha_name = None
if material_info['vc_info']['color_type'] == "name":
vc_color_name = material_info['vc_info']['color']
elif material_info['vc_info']['color_type'] == "active":
# Get active (render) Vertex Color
if self.blender_mesh.color_attributes.render_color_index != -1:
vc_color_name = self.blender_mesh.color_attributes[self.blender_mesh.color_attributes.render_color_index].name
if material_info['vc_info']['alpha_type'] == "name":
vc_alpha_name = material_info['vc_info']['alpha']
elif material_info['vc_info']['alpha_type'] == "active":
# Get active (render) Vertex Color
if self.blender_mesh.color_attributes.render_color_index != -1:
vc_alpha_name = self.blender_mesh.color_attributes[self.blender_mesh.color_attributes.render_color_index].name
if vc_color_name is not None:
vc_key = ""
vc_key += vc_color_name if vc_color_name is not None else ""
vc_key += vc_alpha_name if vc_alpha_name is not None else ""
if materials_use_vc is not None and materials_use_vc != vc_key:
if warning_already_displayed is False:
self.export_settings['log'].warning(
'glTF specification does not allow this case (multiple materials with different Vertex Color)')
warning_already_displayed = True
materials_use_vc = vc_key
elif materials_use_vc is None:
materials_use_vc = vc_key
add_alpha = vc_alpha_name is not None
add_alpha = add_alpha and material_info['vc_info']['alpha_mode'] != "OPAQUE"
if add_alpha is True:
some_alpha = True
self.vc_infos.append({
'color': vc_color_name,
'alpha': vc_alpha_name,
'add_alpha': add_alpha,
'gltf_name': 'COLOR_' + str(self.vc_infos_index),
'forced': False
})
self.vc_infos_index += 1
else:
pass # Using the same Vertex Color
##### UDIM #####
if len(material_info['udim_info'].keys()) == 0:
new_prim_indices[material_idx] = self.prim_indices[material_idx]
self.uvmap_attribute_lists.append(self.uvmap_attribute_list)
self.additional_materials.append(None)
continue
@ -571,12 +644,19 @@ class PrimitiveCreator:
index_uvmap = get_active_uvmap_index(self.blender_mesh)
uvmap_name = "TEXCOORD_" + str(index_uvmap)
else: # Attribute
uvmap_name = material_info['uv_info'][tex]['value']
# This can be a regular UVMap, or a custom attribute
index_uvmap = self.blender_mesh.uv_layers.find(material_info['uv_info'][tex]['value'])
if index_uvmap < 0:
# This is a custom attribute
uvmap_name = material_info['uv_info'][tex]['value']
else:
uvmap_name = "TEXCOORD_" + str(index_uvmap)
all_uvmaps[tex] = uvmap_name
if len(set(all_uvmaps.values())) > 1:
self.export_settings['log'].warning('We are not managing this case (multiple UVMap for UDIM)')
new_prim_indices[material_idx] = self.prim_indices[material_idx]
self.uvmap_attribute_lists.append(self.uvmap_attribute_list)
self.additional_materials.append(None)
continue
@ -632,6 +712,7 @@ class PrimitiveCreator:
new_triangle_indices.append(self.prim_indices[material_idx][idx + 1])
new_triangle_indices.append(self.prim_indices[material_idx][idx + 2])
new_prim_indices[new_material_index] = np.array(new_triangle_indices, dtype=np.uint32)
self.uvmap_attribute_lists.append(self.uvmap_attribute_list)
new_material_index += 1
# Now we have to create a new material for this tile
@ -688,6 +769,35 @@ class PrimitiveCreator:
self.additional_materials.append(
(new_material, material_info, int(str(id(base_material)) + str(u) + str(v))))
# Now, we need to add additional Vertex Color if needed
if self.export_settings['gltf_all_vertex_colors'] is True:
if no_materials is False:
if len(self.vc_infos) == 0 and len(self.blender_mesh.color_attributes) > 0:
# We need to add a fake Vertex Color
self.vc_infos.append({
'gltf_name': 'COLOR_0',
'forced': True
})
self.vc_infos_index += 1
# Now, loop on existing Vertex Color, and add the missing ones
if no_materials is False or (
no_materials is True and self.export_settings['gltf_active_vertex_color_when_no_material'] is True):
for vc in self.blender_mesh.color_attributes:
if vc.name not in [v['color'] for v in self.vc_infos if v['forced'] is False]:
add_alpha = some_alpha is True
self.vc_infos.append({
'color': vc.name,
'alpha': vc.name,
'add_alpha': add_alpha,
'gltf_name': 'COLOR_' + str(self.vc_infos_index),
'forced': False
})
self.vc_infos_index += 1
# Now, we need to populate Vertex Color data
self.__manage_color_attributes()
self.prim_indices = new_prim_indices
def primitive_creation_shared(self):
@ -763,7 +873,8 @@ class PrimitiveCreator:
def primitive_creation_not_shared(self):
primitives = []
for material_idx, dot_indices in self.prim_indices.items():
for (material_idx, dot_indices), uvmap_attribute_list in zip(
self.prim_indices.items(), self.uvmap_attribute_lists):
# Extract just dots used by this primitive, deduplicate them, and
# calculate indices into this deduplicated list.
self.prim_dots = self.dots[dot_indices]
@ -786,7 +897,7 @@ class PrimitiveCreator:
next_texcoor_idx = self.tex_coord_max
uvmap_attributes_index = {}
for attr in self.uvmap_attribute_list:
for attr in uvmap_attribute_list:
res = np.empty((len(self.prim_dots), 2), dtype=gltf2_blender_conversion.get_numpy_type('FLOAT2'))
for i in range(2):
res[:, i] = self.prim_dots[attr + str(i)]
@ -998,53 +1109,75 @@ class PrimitiveCreator:
return getting_function
def __manage_color_attribute(self, attr_name, attr_name_alpha):
blender_color_idx = self.blender_mesh.color_attributes.find(attr_name)
if blender_color_idx < 0:
return None
def __manage_color_attributes(self):
if len(self.vc_infos) == 0:
return
# Add COLOR_0 in dots data
attr = self.blender_mesh.color_attributes[blender_color_idx]
# Get data
data_dots, data_dots_edges, data_dots_points = self.__get_color_attribute_data(attr)
# Get data for alpha if needed
if attr_name_alpha is not None and attr_name_alpha != attr_name:
blender_alpha_idx = self.blender_mesh.color_attributes.find(attr_name_alpha)
if blender_alpha_idx >= 0:
attr_alpha = self.blender_mesh.color_attributes[blender_alpha_idx]
data_dots_alpha, data_dots_edges_alpha, data_dots_points_alpha = self.__get_color_attribute_data(
attr_alpha)
# Merging data
data_dots[:, 3] = data_dots_alpha[:, 3]
if data_dots_edges is not None:
data_dots_edges[:, 3] = data_dots_edges_alpha[:, 3]
if data_dots_points is not None:
data_dots_points[:, 3] = data_dots_points_alpha[:, 3]
# Check if we need to get alpha (the 4th channel) here
max_index = 4 if attr_name_alpha is not None else 3
# Add this data to dots structure
# Creating new fields in dots structure
additional_fields = []
for i in range(max_index):
# Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR
additional_fields.append(
('COLOR_0' + str(i),
gltf2_blender_conversion.get_numpy_type(
'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR')))
additional_fields_edges = []
additional_fields_points = []
for vc in self.vc_infos:
if vc['forced'] is False:
blender_color_idx = self.blender_mesh.color_attributes.find(vc['color'])
if blender_color_idx < 0:
continue
max_index = 4 if vc['add_alpha'] else 3
for i in range(max_index):
# Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR
additional_fields.append(
(vc['gltf_name'] + str(i),
gltf2_blender_conversion.get_numpy_type(
'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR')))
if self.export_settings['gltf_loose_edges']:
for i in range(max_index):
# Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR
additional_fields_edges.append(
(vc['gltf_name'] + str(i),
gltf2_blender_conversion.get_numpy_type(
'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR')))
if self.export_settings['gltf_loose_points']:
for i in range(max_index):
# Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR
additional_fields_points.append(
(vc['gltf_name'] + str(i),
gltf2_blender_conversion.get_numpy_type(
'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR')))
else:
# Forced Vertex Color
max_index = 4
# To reduce the file size, using a normalized unsigned byte attribute filled with 255.
for i in range(max_index):
additional_fields.append(
(vc['gltf_name'] + str(i), gltf2_blender_conversion.get_numpy_type('UNSIGNED_BYTE')))
if self.export_settings['gltf_loose_edges']:
for i in range(max_index):
additional_fields_edges.append(
(vc['gltf_name'] + str(i), gltf2_blender_conversion.get_numpy_type('UNSIGNED_BYTE')))
if self.export_settings['gltf_loose_points']:
for i in range(max_index):
additional_fields_points.append(
(vc['gltf_name'] + str(i), gltf2_blender_conversion.get_numpy_type('UNSIGNED_BYTE')))
# Keep the existing custom attribute
# Data will be exported twice, one for COLOR_O, one for the custom attribute
new_dt = np.dtype(self.dots.dtype.descr + additional_fields)
dots = np.zeros(self.dots.shape, dtype=new_dt)
for f in self.dots.dtype.names:
dots[f] = self.dots[f]
self.dots = dots
if self.export_settings['gltf_loose_edges']:
additional_fields_edges = []
for i in range(max_index):
# Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR
additional_fields_edges.append(
('COLOR_0' + str(i),
gltf2_blender_conversion.get_numpy_type(
'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR')))
new_dt = np.dtype(self.dots_edges.dtype.descr + additional_fields_edges)
dots_edges = np.zeros(self.dots_edges.shape, dtype=new_dt)
for f in self.dots_edges.dtype.names:
@ -1053,14 +1186,6 @@ class PrimitiveCreator:
self.dots_edges = dots_edges
if self.export_settings['gltf_loose_points']:
additional_fields_points = []
for i in range(max_index):
# Must calculate the type of the field : FLOAT_COLOR or BYTE_COLOR
additional_fields_points.append(
('COLOR_0' + str(i),
gltf2_blender_conversion.get_numpy_type(
'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR')))
new_dt = np.dtype(self.dots_points.dtype.descr + additional_fields_points)
dots_points = np.zeros(self.dots_points.shape, dtype=new_dt)
for f in self.dots_points.dtype.names:
@ -1068,34 +1193,79 @@ class PrimitiveCreator:
self.dots_points = dots_points
# Keep the existing custom attribute
# Data will be exported twice, one for COLOR_O, one for the custom attribute
new_dt = np.dtype(self.dots.dtype.descr + additional_fields)
dots = np.zeros(self.dots.shape, dtype=new_dt)
for f in self.dots.dtype.names:
dots[f] = self.dots[f]
# Now, retrieve data, and populate the new fields
for vc in self.vc_infos:
if vc['forced'] is False:
self.dots = dots
blender_color_idx = self.blender_mesh.color_attributes.find(vc['color'])
if blender_color_idx < 0:
continue
# colors are already linear, no need to switch color space
for i in range(max_index):
self.dots['COLOR_0' + str(i)] = data_dots[:, i]
if self.export_settings['gltf_loose_edges'] and attr.domain == "POINT":
self.dots_edges['COLOR_0' + str(i)] = data_dots_edges[:, i]
if self.export_settings['gltf_loose_points'] and attr.domain == "POINT":
self.dots_points['COLOR_0' + str(i)] = data_dots_points[:, i]
attr = self.blender_mesh.color_attributes[blender_color_idx]
# Add COLOR_0 in attribute list
attr_color_0 = {}
attr_color_0['blender_data_type'] = 'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR'
attr_color_0['blender_domain'] = attr.domain
attr_color_0['gltf_attribute_name'] = 'COLOR_0'
attr_color_0['len'] = max_index # 3 or 4, depending if we have alpha
attr_color_0['type'] = gltf2_blender_conversion.get_numpy_type(attr_color_0['blender_data_type'])
attr_color_0['component_type'] = gltf2_blender_conversion.get_component_type(attr_color_0['blender_data_type'])
attr_color_0['data_type'] = gltf2_io_constants.DataType.Vec3 if max_index == 3 else gltf2_io_constants.DataType.Vec4
max_index = 4 if vc['add_alpha'] else 3
self.blender_attributes.append(attr_color_0)
# Get data
data_dots, data_dots_edges, data_dots_points = self.__get_color_attribute_data(attr)
# Get data for alpha if needed
if vc['add_alpha']:
blender_alpha_idx = self.blender_mesh.color_attributes.find(vc['alpha'])
if blender_alpha_idx >= 0:
attr_alpha = self.blender_mesh.color_attributes[blender_alpha_idx]
data_dots_alpha, data_dots_edges_alpha, data_dots_points_alpha = self.__get_color_attribute_data(
attr_alpha)
# Merging data
data_dots[:, 3] = data_dots_alpha[:, 3]
if data_dots_edges is not None:
data_dots_edges[:, 3] = data_dots_edges_alpha[:, 3]
if data_dots_points is not None:
data_dots_points[:, 3] = data_dots_points_alpha[:, 3]
# colors are already linear, no need to switch color space
for i in range(max_index):
self.dots[vc['gltf_name'] + str(i)] = data_dots[:, i]
if self.export_settings['gltf_loose_edges'] and attr.domain == "POINT":
self.dots_edges[vc['gltf_name'] + str(i)] = data_dots_edges[:, i]
if self.export_settings['gltf_loose_points'] and attr.domain == "POINT":
self.dots_points[vc['gltf_name'] + str(i)] = data_dots_points[:, i]
# Add COLOR_x in attribute list
attr_color_x = {}
attr_color_x['blender_data_type'] = 'FLOAT_COLOR' if max_index == 3 else 'BYTE_COLOR'
attr_color_x['blender_domain'] = attr.domain
attr_color_x['gltf_attribute_name'] = vc['gltf_name']
attr_color_x['len'] = max_index # 3 or 4, depending if we have alpha
attr_color_x['type'] = gltf2_blender_conversion.get_numpy_type(attr_color_x['blender_data_type'])
attr_color_x['component_type'] = gltf2_blender_conversion.get_component_type(
attr_color_x['blender_data_type'])
attr_color_x['data_type'] = gltf2_io_constants.DataType.Vec3 if max_index == 3 else gltf2_io_constants.DataType.Vec4
self.blender_attributes.append(attr_color_x)
else:
# Forced Vertex Color
max_index = 4
# To reduce the file size, using a normalized unsigned byte attribute filled with 255.
for i in range(max_index):
self.dots[vc['gltf_name'] + str(i)] = 255
if self.export_settings['gltf_loose_edges']:
self.dots_edges[vc['gltf_name'] + str(i)] = 255
if self.export_settings['gltf_loose_points']:
self.dots_points[vc['gltf_name'] + str(i)] = 255
# Add COLOR_0 in attribute list
attr_color_x = {}
attr_color_x['blender_data_type'] = 'UNSIGNED_BYTE'
attr_color_x['blender_domain'] = "POINT"
attr_color_x['gltf_attribute_name'] = vc['gltf_name']
attr_color_x['len'] = max_index # 3
attr_color_x['type'] = gltf2_blender_conversion.get_numpy_type(attr_color_x['blender_data_type'])
attr_color_x['component_type'] = gltf2_blender_conversion.get_component_type(
attr_color_x['blender_data_type'])
attr_color_x['data_type'] = gltf2_io_constants.DataType.Vec4
self.blender_attributes.append(attr_color_x)
def __get_color_attribute_data(self, attr):
data_dots_edges = None

View File

@ -346,6 +346,12 @@ class VExportTree:
new_delta or delta,
blender_children,
is_children_in_collection=True)
# Some objects are parented to instance collection
for child in blender_children[blender_object]:
self.recursive_node_traverse(child, None, node.uuid, node.matrix_world,
new_delta or delta, blender_children)
else:
# Manage children objects
for child in blender_object.instance_collection.objects:

View File

@ -203,7 +203,10 @@ class ExportImage:
# Unhappy path = we need to create the image self.fills describes or self.stores describes
if self.numpy_calc is None:
return self.__encode_unhappy(export_settings), None
if self.__unhappy_is_udim():
return self.__encode_unhappy_udim(export_settings), None
else:
return self.__encode_unhappy(export_settings), None
else:
pixels, width, height, factor = self.numpy_calc(self.stored, export_settings)
return self.__encode_from_numpy_array(pixels, (width, height), export_settings), factor
@ -215,6 +218,71 @@ class ExportImage:
return self.__encode_from_image_tile(
self.fills[list(self.fills.keys())[0]].image, export_settings['current_udim_info']['tile'], export_settings)
def __unhappy_is_udim(self):
return any(isinstance(fill, FillImageTile) for fill in self.fills.values())
def __encode_unhappy_udim(self, export_settings) -> bytes:
# We need to assemble the image out of channels.
# Do it with numpy and image.pixels of the right UDIM tile.
images = []
for fill in self.fills.values():
if isinstance(fill, FillImageTile):
if fill.image not in images:
images.append((fill.image, fill.tile))
export_settings['exported_images'][fill.image.name] = 2 # 2 = partially used
if not images:
# No ImageFills; use a 1x1 white pixel
pixels = np.array([1.0, 1.0, 1.0, 1.0], np.float32)
return self.__encode_from_numpy_array(pixels, (1, 1), export_settings)
# We need to open the original UDIM image tile to get size & pixel data
original_image_sizes = []
for image, tile in images:
src_path = bpy.path.abspath(image.filepath_raw).replace("<UDIM>", tile)
with TmpImageGuard() as guard:
guard.image = bpy.data.images.load(
src_path,
)
original_image_sizes.append((guard.image.size[0], guard.image.size[1]))
width = max(image_size[0] for image_size in original_image_sizes)
height = max(image_size[1] for image_size in original_image_sizes)
out_buf = np.ones(width * height * 4, np.float32)
tmp_buf = np.empty(width * height * 4, np.float32)
for idx, (image, tile) in enumerate(images):
if original_image_sizes[idx][0] == width and original_image_sizes[idx][1] == height:
src_path = bpy.path.abspath(image.filepath_raw).replace("<UDIM>", tile)
with TmpImageGuard() as guard:
guard.image = bpy.data.images.load(
src_path,
)
guard.image.pixels.foreach_get(tmp_buf)
else:
# Image is the wrong size; make a temp copy and scale it.
src_path = bpy.path.abspath(image.filepath_raw).replace("<UDIM>", tile)
with TmpImageGuard() as guard:
guard.image = bpy.data.images.load(
src_path,
)
tmp_image = guard.image
tmp_image.scale(width, height)
tmp_image.pixels.foreach_get(tmp_buf)
# Copy any channels for this image to the output
for dst_chan, fill in self.fills.items():
if isinstance(fill, FillImageTile) and fill.image == image:
out_buf[int(dst_chan)::4] = tmp_buf[int(fill.src_chan)::4]
elif isinstance(fill, FillWith):
out_buf[int(dst_chan)::4] = fill.value
tmp_buf = None # GC this
return self.__encode_from_numpy_array(out_buf, (width, height), export_settings)
def __encode_unhappy(self, export_settings) -> bytes:
# We need to assemble the image out of channels.
# Do it with numpy and image.pixels.

View File

@ -12,7 +12,7 @@ from ....io.exp import gltf2_io_binary_data, gltf2_io_image_data
from ....io.com import gltf2_io_debug
from ....io.exp.gltf2_io_user_extensions import export_user_extensions
from ..gltf2_blender_gather_cache import cached
from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage
from .extensions.gltf2_blender_image import Channel, ExportImage, FillImage, FillImageTile
from .gltf2_blender_search_node_tree import get_texture_node_from_socket, detect_anisotropy_nodes
@ -36,7 +36,10 @@ def gather_image(
return None, None, None, None
mime_type = __gather_mime_type(blender_shader_sockets, image_data, export_settings)
name = __gather_name(image_data, export_settings)
name = __gather_name(image_data, use_tile, export_settings)
if use_tile is not None:
name = name.replace("<UDIM>", str(export_settings['current_udim_info']['tile']))
factor = None
@ -153,12 +156,18 @@ def __gather_mime_type(sockets, export_image, export_settings):
return "image/jpeg"
def __gather_name(export_image, export_settings):
def __gather_name(export_image, use_tile, export_settings):
if export_image.original is None:
# Find all Blender images used in the ExportImage
if use_tile is not None:
FillCheck = FillImageTile
else:
FillCheck = FillImage
imgs = []
for fill in export_image.fills.values():
if isinstance(fill, FillImage):
if isinstance(fill, FillCheck):
img = fill.image
if img not in imgs:
imgs.append(img)
@ -404,7 +413,7 @@ def __is_blender_image_a_webp(image: bpy.types.Image) -> bool:
def get_gltf_image_from_blender_image(blender_image_name, export_settings):
image_data = ExportImage.from_blender_image(bpy.data.images[blender_image_name])
name = __gather_name(image_data, export_settings)
name = __gather_name(image_data, None, export_settings)
mime_type = __get_mime_type_of_image(blender_image_name, export_settings)
uri, _ = __gather_uri(image_data, mime_type, name, export_settings)

View File

@ -57,7 +57,7 @@ def gather_material(blender_material, export_settings):
"""
if not __filter_material(blender_material, export_settings):
return None, {"uv_info": {}, "vc_info": {'color': None, 'alpha': None,
'color_type': None, 'alpha_type': None}, "udim_info": {}}
'color_type': None, 'alpha_type': None, 'alpha_mode': "OPAQUE"}, "udim_info": {}}
# Reset exported images / textures nodes
export_settings['exported_texture_nodes'] = []
@ -476,7 +476,7 @@ def __export_unlit(blender_material, export_settings):
blender_material.use_nodes,
export_settings)
if info is None:
return None, {}, {"color": None, "alpha": None, "color_type": None, "alpha_type": None}, {}
return None, {}, {"color": None, "alpha": None, "color_type": None, "alpha_type": None, "alpha_mode": "OPAQUE"}, {}
base_color_texture, uvmap_info, udim_info = gltf2_unlit.gather_base_color_texture(info, export_settings)
@ -561,7 +561,12 @@ def get_final_material(mesh, blender_material, attr_indices, base_material, uvma
elif v['type'] == 'Active':
indices[m] = get_active_uvmap_index(mesh)
elif v['type'] == "Attribute":
indices[m] = attr_indices[v['value']]
# This can be a regular UVMap or a custom attribute
i = mesh.uv_layers.find(v['value'])
if i >= 0:
indices[m] = i
else:
indices[m] = attr_indices[v['value']]
# Now we have all needed indices, let's create a set that can be used for
# caching, so containing all possible textures
@ -681,7 +686,8 @@ def get_base_material(material_idx, materials, export_settings):
"color": None,
"alpha": None,
"color_type": None,
"alpha_type": None
"alpha_type": None,
"alpha_mode": "OPAQUE"
},
"udim_info": {}
}
@ -695,8 +701,11 @@ def get_base_material(material_idx, materials, export_settings):
if material is None:
# If no material, the mesh can still have vertex color
# So, retrieving it
material_info["vc_info"] = {"color_type": "active", "alpha_type": "active"}
# So, retrieving it if user request it
if export_settings['gltf_active_vertex_color_when_no_material'] is True:
material_info["vc_info"] = {"color_type": "active", "alpha_type": "active"}
# VC will have alpha, as there is no material to know if alpha is used or not
material_info["vc_info"]["alpha_mode"] = "BLEND"
return material, material_info

View File

@ -15,13 +15,14 @@ from .gltf2_blender_search_node_tree import \
get_const_from_default_value_socket, \
get_socket, \
get_factor_from_socket, \
gather_alpha_info
gather_alpha_info, \
gather_color_info
@cached
def gather_material_pbr_metallic_roughness(blender_material, orm_texture, export_settings):
if not __filter_pbr_material(blender_material, export_settings):
return None, {}, {'color': None, 'alpha': None, 'color_type': None, 'alpha_type': None}, {}
return None, {}, {'color': None, 'alpha': None, 'color_type': None, 'alpha_type': None, 'alpha_mode': "OPAQUE"}, {}
uvmap_infos = {}
udim_infos = {}
@ -62,16 +63,20 @@ def __filter_pbr_material(blender_material, export_settings):
def __gather_base_color_factor(blender_material, export_settings):
if not blender_material.use_nodes:
return [*blender_material.diffuse_color[:3], 1.0], {"color": None,
"alpha": None, "color_type": None, "alpha_type": None}
return [*blender_material.diffuse_color[:3],
1.0], {"color": None, "alpha": None, "color_type": None, "alpha_type": None, "alpha_mode": "OPAQUE"}
rgb, alpha = None, None
vc_info = {"color": None, "alpha": None, "color_type": None, "alpha_type": None, "alpha_mode": "OPAQUE"}
path_alpha = None
path = None
alpha_socket = get_socket(blender_material.node_tree, blender_material.use_nodes, "Alpha")
if alpha_socket.socket is not None and isinstance(alpha_socket.socket, bpy.types.NodeSocket):
alpha_info = gather_alpha_info(alpha_socket.to_node_nav())
vc_info['alpha'] = alpha_info['alphaColorAttrib']
vc_info['alpha_type'] = alpha_info['alphaColorAttribType']
vc_info['alpha_mode'] = alpha_info['alphaMode']
alpha = alpha_info['alphaFactor']
path_alpha = alpha_info['alphaPath']
@ -85,7 +90,11 @@ def __gather_base_color_factor(blender_material, export_settings):
blender_material.node_tree, blender_material.use_nodes, "BaseColorFactor")
if base_color_socket.socket is not None and isinstance(base_color_socket.socket, bpy.types.NodeSocket):
if export_settings['gltf_image_format'] != "NONE":
rgb, path = get_factor_from_socket(base_color_socket, kind='RGB')
rgb_vc_info = gather_color_info(base_color_socket.to_node_nav())
vc_info['color'] = rgb_vc_info['colorAttrib']
vc_info['color_type'] = rgb_vc_info['colorAttribType']
rgb = rgb_vc_info['colorFactor']
path = rgb_vc_info['colorPath']
else:
rgb, path = get_const_from_default_value_socket(base_color_socket, kind='RGB')
@ -115,8 +124,6 @@ def __gather_base_color_factor(blender_material, export_settings):
rgba = [*rgb, alpha]
vc_info = get_vertex_color_info(base_color_socket, alpha_socket, export_settings)
if rgba == [1, 1, 1, 1]:
return None, vc_info
return rgba, vc_info

View File

@ -184,7 +184,7 @@ def __gather_sampler(blender_shader_sockets, export_settings):
if len(first_valid_shader_node.group_path) > 0:
# Retrieving the blender material using this shader tree
for mat in bpy.data.materials:
if mat.use_nodes is True and id(mat.node_tree) == id(first_valid_shader_node.group_path[0]):
if mat.use_nodes is True and id(mat.node_tree) == id(first_valid_shader_node.group_path[0].original):
group_path_str += mat.name # TODO if linked, we can have multiple materials with same name...
break
if len(first_valid_shader_node.group_path) > 1:

View File

@ -259,6 +259,9 @@ def __gather_texture_transform_and_tex_coord(primary_socket, export_settings):
elif node and node.node and node.node.type == 'ATTRIBUTE' \
and node.node.attribute_type == "GEOMETRY" \
and node.node.attribute_name:
# If this attribute is Face Corner / 2D Vector, this is a UV map
# So we can use it as a UV map Fixed
# But this will be checked later, when we know the mesh
uvmap_info['type'] = 'Attribute'
uvmap_info['value'] = node.node.attribute_name

View File

@ -390,6 +390,54 @@ class NodeNav:
return None, None
# Gather information about factor and vertex color from the Color socket
# The general form for color is
# color = factor * color attribute * texture
def gather_color_info(color_nav):
info = {
'colorFactor': None,
'colorAttrib': None,
'colorAttribType': None,
'colorPath': None,
}
# Reads the factor and color attribute by checking for variations on
# -> [Multiply by Factor] -> [Multiply by Color Attrib] ->
for _ in range(2): # Twice, to handle both factor and attrib
# No factor found yet?
if info['colorFactor'] is None:
a, color_path = color_nav.get_constant()
if a is not None:
info['colorFactor'] = a[:3]
info['colorPath'] = color_path
break
a, color_path = detect_multiply_by_constant(color_nav)
if a is not None:
info['colorFactor'] = a[:3]
info['colorPath'] = color_path
continue
# No color attrib found yet?
if info['colorAttrib'] is None:
attr = get_color_attrib(color_nav)
if attr is not None:
info['colorAttrib'] = attr
info['colorAttribType'] = 'active' if attr == "" else 'name'
break
attr = detect_multiply_by_color_attrib(color_nav)
if attr is not None:
info['colorAttrib'] = attr
info['colorAttribType'] = 'active' if attr == "" else 'name'
continue
break
return info
# Gather information about alpha from the Alpha socket. Alpha has the
# general form
#
@ -403,6 +451,7 @@ def gather_alpha_info(alpha_nav):
'alphaCutoff': None,
'alphaFactor': None,
'alphaColorAttrib': None,
'alphaColorAttribType': None,
'alphaPath': None,
}
if not alpha_nav:
@ -443,11 +492,13 @@ def gather_alpha_info(alpha_nav):
attr = get_color_attrib(alpha_nav)
if attr is not None:
info['alphaColorAttrib'] = attr
info['alphaColorAttribType'] = 'active' if attr == "" else 'name'
break
attr = detect_multiply_by_color_attrib(alpha_nav)
if attr is not None:
info['alphaColorAttrib'] = attr
info['alphaColorAttribType'] = 'active' if attr == "" else 'name'
continue
break
@ -519,7 +570,7 @@ def get_multiply_factors(nav):
prev.node.type == 'MIX' and
prev.node.data_type == 'RGBA' and
prev.node.blend_type == 'MULTIPLY' and
prev.get_constant('Fac') == 1
prev.get_constant('Factor')[0] == 1
)
if is_mul:
fac1 = prev
@ -607,8 +658,7 @@ def get_color_attrib(nav):
if nav.node.attribute_type == 'GEOMETRY':
# Does NOT use color attribute when blank
name = nav.node.attribute_name
if name:
return name
return name # Fixed name or "" for active color
return None
@ -865,6 +915,7 @@ def get_vertex_color_info(color_socket, alpha_socket, export_settings):
attribute_alpha = None
attribute_color_type = None
attribute_alpha_type = None
alpha_mode = "OPAQUE"
# Retrieve Attribute used as vertex color for Color
if color_socket is not None and color_socket.socket is not None:
@ -897,12 +948,14 @@ def get_vertex_color_info(color_socket, alpha_socket, export_settings):
elif alpha_info['alphaColorAttrib'] is not None:
attribute_alpha = alpha_info['alphaColorAttrib']
attribute_alpha_type = 'name'
alpha_mode = alpha_info['alphaMode']
return {
"color": attribute_color,
"alpha": attribute_alpha,
"color_type": attribute_color_type,
"alpha_type": attribute_alpha_type}
"alpha_type": attribute_alpha_type,
'alpha_mode': alpha_mode}
def get_attribute_name(socket, export_settings):

View File

@ -22,9 +22,9 @@ class ImageData:
return hash(self._data)
def adjusted_name(self):
regex_dot = re.compile("\\.")
regex_dot = re.compile(r"\.")
adjusted_name = re.sub(regex_dot, "_", self.name)
new_name = "".join([char for char in adjusted_name if char not in "!#$&'()*+,/:;<>?@[\\]^`{|}~"])
new_name = "".join([char for char in adjusted_name if char not in r"!#$&'()*+,/:;<>?@[\]^`{|}~"])
return new_name
@property

View File

@ -1148,22 +1148,6 @@ class PREFERENCES_OT_studiolight_copy_settings(Operator):
return {'CANCELLED'}
class PREFERENCES_OT_studiolight_show(Operator):
"""Show light preferences"""
bl_idname = "preferences.studiolight_show"
bl_label = ""
bl_options = {'INTERNAL'}
@classmethod
def poll(cls, _context):
return bpy.ops.screen.userpref_show.poll()
def execute(self, context):
context.preferences.active_section = 'LIGHTS'
bpy.ops.screen.userpref_show('INVOKE_DEFAULT')
return {'FINISHED'}
class PREFERENCES_OT_script_directory_new(Operator):
bl_idname = "preferences.script_directory_add"
bl_label = "Add Python Script Directory"
@ -1243,7 +1227,6 @@ classes = (
PREFERENCES_OT_studiolight_new,
PREFERENCES_OT_studiolight_uninstall,
PREFERENCES_OT_studiolight_copy_settings,
PREFERENCES_OT_studiolight_show,
PREFERENCES_OT_script_directory_new,
PREFERENCES_OT_script_directory_remove,
)

View File

@ -3288,9 +3288,6 @@ class WM_MT_splash_quick_setup(Menu):
if hasattr(kc_prefs, "spacebar_action"):
col.row().prop(kc_prefs, "spacebar_action", text="Spacebar Action")
# Network.
col.row().prop(prefs.system, "use_online_access")
# Save Preferences.
sub = col.column()
sub.separator(factor=2)

View File

@ -6801,7 +6801,7 @@ class VIEW3D_PT_shading_lighting(Panel):
)
col = split.column()
col.operator("preferences.studiolight_show", emboss=False, text="", icon='PREFERENCES')
col.operator("screen.userpref_show", emboss=False, text="", icon='PREFERENCES').section = 'LIGHTS'
split = layout.split(factor=0.9)
col = split.column()
@ -6818,7 +6818,7 @@ class VIEW3D_PT_shading_lighting(Panel):
sub.template_icon_view(shading, "studio_light", scale_popup=3.0)
col = split.column()
col.operator("preferences.studiolight_show", emboss=False, text="", icon='PREFERENCES')
col.operator("screen.userpref_show", emboss=False, text="", icon='PREFERENCES').section = 'LIGHTS'
col.operator("view3d.toggle_matcap_flip", emboss=False, text="", icon='ARROW_LEFTRIGHT')
elif shading.type == 'MATERIAL':
@ -6834,7 +6834,7 @@ class VIEW3D_PT_shading_lighting(Panel):
sub.template_icon_view(shading, "studio_light", scale_popup=3)
col = split.column()
col.operator("preferences.studiolight_show", emboss=False, text="", icon='PREFERENCES')
col.operator("screen.userpref_show", emboss=False, text="", icon='PREFERENCES').section = 'LIGHTS'
split = layout.split(factor=0.9)
col = split.column()
@ -6865,7 +6865,7 @@ class VIEW3D_PT_shading_lighting(Panel):
sub.template_icon_view(shading, "studio_light", scale_popup=3)
col = split.column()
col.operator("preferences.studiolight_show", emboss=False, text="", icon='PREFERENCES')
col.operator("screen.userpref_show", emboss=False, text="", icon='PREFERENCES').section = 'LIGHTS'
split = layout.split(factor=0.9)
col = split.column()

View File

@ -204,8 +204,7 @@ bUserExtensionRepo *BKE_preferences_extension_repo_add_default(UserDef *userdef)
STRNCPY(repo->remote_url, "https://extensions.blender.org/api/v1/extensions");
/* Disable `blender.org` by default, the initial "Online Preferences" section gives
* the option to enable this. */
repo->flag |= USER_EXTENSION_REPO_FLAG_USE_REMOTE_URL | USER_EXTENSION_REPO_FLAG_DISABLED |
USER_EXTENSION_REPO_FLAG_SYNC_ON_STARTUP;
repo->flag |= USER_EXTENSION_REPO_FLAG_USE_REMOTE_URL | USER_EXTENSION_REPO_FLAG_SYNC_ON_STARTUP;
return repo;
}

View File

@ -749,7 +749,10 @@ static void grease_pencil_geom_batch_ensure(Object &object,
GreasePencilColorVert &c_vert) {
const float3 pos = math::transform_point(layer_space_to_object_space, positions[point_i]);
copy_v3_v3(s_vert.pos, pos);
s_vert.radius = radii[point_i] * ((end_cap == GP_STROKE_CAP_TYPE_ROUND) ? 1.0f : -1.0f);
/* GP data itself does not constrain radii to be positive, but drawing code expects it, and
* use negative values as a special 'flag' to get rounded caps. */
s_vert.radius = math::max(radii[point_i], 0.0f) *
((end_cap == GP_STROKE_CAP_TYPE_ROUND) ? 1.0f : -1.0f);
/* Convert to legacy "pixel" space. We divide here, because the shader expects the values to
* be in the `px` space rather than world space. Otherwise the values will get clamped. */
s_vert.radius /= bke::greasepencil::LEGACY_RADIUS_CONVERSION_FACTOR;

View File

@ -23,14 +23,7 @@ static void extract_tris_mesh(const MeshRenderData &mr,
const Span<int3> corner_tris = mr.mesh->corner_tris();
if (!face_sorted.face_tri_offsets) {
/* There are no hidden faces and no reordering is necessary to group triangles with the same
* material. The corner indices from #Mesh::corner_tris() can be copied directly to the GPU
* without the usual CPU-side copy owned by the index buffer. Crucially, this assumes that the
* data is uploaded to the GPU *before* the dependency graph's evaluated state is cleared (and
* with it, the evaluated mesh's triangulation data).
*
* Eventually these local copies should be completely removed, and code should write directly
* to GPU memory, but even then it could be helpful to know that the data already exists
* contiguously, owned elsewhere by Blender. */
* material. The corner indices from #Mesh::corner_tris() can be copied directly to the GPU. */
BLI_assert(face_sorted.visible_tris_num == corner_tris.size());
GPU_indexbuf_build_in_place_from_memory(&ibo,
GPU_PRIM_TRIS,

View File

@ -244,7 +244,7 @@ static void extract_edit_data_init_subdiv(const DRWSubdivCache &subdiv_cache,
void *buf,
void *data)
{
orig_index_face gpu::VertBuf *vbo = static_cast<gpu::VertBuf *>(buf);
gpu::VertBuf *vbo = static_cast<gpu::VertBuf *>(buf);
GPU_vertbuf_init_with_format(vbo, get_edit_data_format());
GPU_vertbuf_data_alloc(vbo, subdiv_full_vbo_size(mr, subdiv_cache));
EditLoopData *vbo_data = (EditLoopData *)GPU_vertbuf_get_data(vbo);
@ -379,4 +379,3 @@ constexpr MeshExtract create_extractor_edit_data()
const MeshExtract extract_edit_data = create_extractor_edit_data();
} // namespace blender::draw
orig_index_face

View File

@ -362,7 +362,10 @@ void ANIM_animdata_update(bAnimContext *ac, ListBase *anim_data)
ANIMTYPE_ANIMDATA,
ANIMTYPE_NLAACTION,
ANIMTYPE_NLATRACK,
ANIMTYPE_NLACURVE))
ANIMTYPE_NLACURVE,
ANIMTYPE_GREASE_PENCIL_LAYER,
ANIMTYPE_GREASE_PENCIL_LAYER_GROUP,
ANIMTYPE_GREASE_PENCIL_DATABLOCK))
{
if (ale->update & ANIM_UPDATE_DEPS) {
ale->update &= ~ANIM_UPDATE_DEPS;

View File

@ -424,9 +424,9 @@ static int pose_clear_paths_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
static std::string pose_clear_paths_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
static std::string pose_clear_paths_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
const bool only_selected = RNA_boolean_get(ptr, "only_selected");
if (only_selected) {
@ -444,7 +444,7 @@ void POSE_OT_paths_clear(wmOperatorType *ot)
/* api callbacks */
ot->exec = pose_clear_paths_exec;
ot->poll = ED_operator_posemode_exclusive;
ot->get_description = pose_clear_paths_description;
ot->get_description = pose_clear_paths_get_description;
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;

View File

@ -361,9 +361,9 @@ static bool asset_clear_poll(bContext *C, const Span<PointerRNA> ids)
static std::string asset_clear_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *values)
PointerRNA *ptr)
{
const bool set_fake_user = RNA_boolean_get(values, "set_fake_user");
const bool set_fake_user = RNA_boolean_get(ptr, "set_fake_user");
if (!set_fake_user) {
return "";
}

View File

@ -644,7 +644,7 @@ static int grease_pencil_primitive_invoke(bContext *C, wmOperator *op, const wmE
/* Initialize helper class for projecting screen space coordinates. */
DrawingPlacement placement = DrawingPlacement(
*vc.scene, *vc.region, *view3d, *vc.obact, *grease_pencil->get_active_layer());
*vc.scene, *vc.region, *view3d, *vc.obact, grease_pencil->get_active_layer());
if (placement.use_project_to_surface()) {
placement.cache_viewport_depths(CTX_data_depsgraph_pointer(C), vc.region, view3d);
}

View File

@ -201,7 +201,7 @@ class StepObject {
int layers_num_ = 0;
bke::greasepencil::LayerGroup root_group_;
std::string active_layer_name_;
std::string active_node_name_;
CustomData layers_data_ = {};
private:
@ -263,8 +263,8 @@ class StepObject {
CustomData_copy(
&grease_pencil.layers_data, &layers_data_, eCustomDataMask(CD_MASK_ALL), layers_num_);
if (grease_pencil.has_active_layer()) {
active_layer_name_ = grease_pencil.get_active_layer()->name();
if (grease_pencil.active_node != nullptr) {
active_node_name_ = grease_pencil.get_active_node()->name();
}
root_group_ = grease_pencil.root_group();
@ -279,11 +279,11 @@ class StepObject {
grease_pencil.root_group_ptr = MEM_new<bke::greasepencil::LayerGroup>(__func__, root_group_);
BLI_assert(layers_num_ == grease_pencil.layers().size());
if (!active_layer_name_.empty()) {
bke::greasepencil::TreeNode *active_node = grease_pencil.root_group().find_node_by_name(
active_layer_name_);
if (active_node && active_node->is_layer()) {
grease_pencil.set_active_layer(&active_node->as_layer());
if (!active_node_name_.empty()) {
if (bke::greasepencil::TreeNode *active_node = grease_pencil.root_group().find_node_by_name(
active_node_name_))
{
grease_pencil.set_active_node(active_node);
}
}

View File

@ -37,10 +37,11 @@ DrawingPlacement::DrawingPlacement(const Scene &scene,
const ARegion &region,
const View3D &view3d,
const Object &eval_object,
const bke::greasepencil::Layer &layer)
const bke::greasepencil::Layer *layer)
: region_(&region), view3d_(&view3d)
{
layer_space_to_world_space_ = layer.to_world_space(eval_object);
layer_space_to_world_space_ = (layer != nullptr) ? layer->to_world_space(eval_object) :
eval_object.object_to_world();
world_space_to_layer_space_ = math::invert(layer_space_to_world_space_);
/* Initialize DrawingPlacementPlane from toolsettings. */
switch (scene.toolsettings->gp_sculpt.lock_axis) {

View File

@ -102,7 +102,7 @@ class DrawingPlacement {
const ARegion &region,
const View3D &view3d,
const Object &eval_object,
const bke::greasepencil::Layer &layer);
const bke::greasepencil::Layer *layer);
~DrawingPlacement();
public:

View File

@ -1405,18 +1405,16 @@ static int edbm_select_mode_invoke(bContext *C, wmOperator *op, const wmEvent *e
static std::string edbm_select_mode_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *values)
PointerRNA *ptr)
{
const int type = RNA_enum_get(values, "type");
const int type = RNA_enum_get(ptr, "type");
/* Because the special behavior for shift and ctrl click depend on user input, they may be
* incorrect if the operator is used from a script or from a special button. So only return the
* specialized descriptions if only the "type" is set, which conveys that the operator is meant
* to be used with the logic in the `invoke` method. */
if (RNA_struct_property_is_set(values, "type") &&
!RNA_struct_property_is_set(values, "use_extend") &&
!RNA_struct_property_is_set(values, "use_expand") &&
!RNA_struct_property_is_set(values, "action"))
if (RNA_struct_property_is_set(ptr, "type") && !RNA_struct_property_is_set(ptr, "use_extend") &&
!RNA_struct_property_is_set(ptr, "use_expand") && !RNA_struct_property_is_set(ptr, "action"))
{
switch (type) {
case SCE_SELECT_VERTEX:

View File

@ -1520,9 +1520,9 @@ static int object_clear_paths_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
static std::string object_clear_paths_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
static std::string object_clear_paths_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
const bool only_selected = RNA_boolean_get(ptr, "only_selected");
if (only_selected) {
@ -1540,7 +1540,7 @@ void OBJECT_OT_paths_clear(wmOperatorType *ot)
/* api callbacks */
ot->exec = object_clear_paths_exec;
ot->poll = ED_operator_object_active_editable;
ot->get_description = object_clear_paths_description;
ot->get_description = object_clear_paths_get_description;
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;

View File

@ -2039,9 +2039,9 @@ static int modifier_apply_as_shapekey_invoke(bContext *C, wmOperator *op, const
static std::string modifier_apply_as_shapekey_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *values)
PointerRNA *ptr)
{
bool keep = RNA_boolean_get(values, "keep_modifier");
bool keep = RNA_boolean_get(ptr, "keep_modifier");
if (keep) {
return TIP_("Apply modifier as a new shapekey and keep it in the stack");
}

View File

@ -704,11 +704,11 @@ static int shape_key_lock_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
static std::string shape_key_lock_description(bContext * /*C*/,
wmOperatorType * /*op*/,
PointerRNA *params)
static std::string shape_key_lock_get_description(bContext * /*C*/,
wmOperatorType * /*op*/,
PointerRNA *ptr)
{
const int action = RNA_enum_get(params, "action");
const int action = RNA_enum_get(ptr, "action");
switch (action) {
case SHAPE_KEY_LOCK:
@ -738,7 +738,7 @@ void OBJECT_OT_shape_key_lock(wmOperatorType *ot)
/* api callbacks */
ot->poll = shape_key_exists_poll;
ot->exec = shape_key_lock_exec;
ot->get_description = shape_key_lock_description;
ot->get_description = shape_key_lock_get_description;
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;

View File

@ -2984,12 +2984,12 @@ static int vertex_group_lock_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
static std::string vertex_group_lock_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *params)
static std::string vertex_group_lock_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
int action = RNA_enum_get(params, "action");
int mask = RNA_enum_get(params, "mask");
int action = RNA_enum_get(ptr, "action");
int mask = RNA_enum_get(ptr, "mask");
/* NOTE: constructing the following string literals can be done in a less verbose way,
* however the resulting strings can't be usefully translated, (via `TIP_`). */
@ -3058,7 +3058,7 @@ void OBJECT_OT_vertex_group_lock(wmOperatorType *ot)
/* api callbacks */
ot->poll = vertex_group_poll;
ot->exec = vertex_group_lock_exec;
ot->get_description = vertex_group_lock_description;
ot->get_description = vertex_group_lock_get_description;
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;

View File

@ -1317,9 +1317,9 @@ static int screen_opengl_render_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
static std::string screen_opengl_render_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
static std::string screen_opengl_render_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
if (!RNA_boolean_get(ptr, "animation")) {
return "";
@ -1344,7 +1344,7 @@ void RENDER_OT_opengl(wmOperatorType *ot)
ot->idname = "RENDER_OT_opengl";
/* api callbacks */
ot->get_description = screen_opengl_render_description;
ot->get_description = screen_opengl_render_get_description;
ot->invoke = screen_opengl_render_invoke;
ot->exec = screen_opengl_render_exec; /* blocking */
ot->modal = screen_opengl_render_modal;

View File

@ -15,6 +15,7 @@ set(INC
../../makesrna
../../sequencer
../../windowmanager
../../../../extern/fmtlib/include
# RNA_prototypes.h
${CMAKE_BINARY_DIR}/source/blender/makesrna
)

View File

@ -8,6 +8,7 @@
#include <cmath>
#include <cstring>
#include <fmt/format.h>
#include "MEM_guardedalloc.h"
@ -5254,6 +5255,22 @@ static int userpref_show_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
}
static std::string userpref_show_get_description(bContext *C,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
PropertyRNA *prop = RNA_struct_find_property(ptr, "section");
if (RNA_property_is_set(ptr, prop)) {
int section = RNA_property_enum_get(ptr, prop);
const char *section_name;
if (RNA_property_enum_name_gettexted(C, ptr, prop, section, &section_name)) {
return fmt::format(TIP_("Show {} preferences"), section_name);
}
}
/* Fallback to default. */
return "";
}
static void SCREEN_OT_userpref_show(wmOperatorType *ot)
{
PropertyRNA *prop;
@ -5266,6 +5283,7 @@ static void SCREEN_OT_userpref_show(wmOperatorType *ot)
/* api callbacks */
ot->exec = userpref_show_exec;
ot->poll = ED_operator_screenactive_nobackground; /* Not in background as this opens a window. */
ot->get_description = userpref_show_get_description;
prop = RNA_def_enum(ot->srna,
"section",

View File

@ -960,7 +960,7 @@ bke::CurvesGeometry fill_strokes(const ViewContext &view_context,
const bool use_xray = false;
const float4x4 layer_to_world = layer.to_world_space(object);
ed::greasepencil::DrawingPlacement placement(scene, region, view3d, object_eval, layer);
ed::greasepencil::DrawingPlacement placement(scene, region, view3d, object_eval, &layer);
const float3 fill_point_world = math::transform_point(layer_to_world,
placement.project(fill_point_view));

View File

@ -624,7 +624,7 @@ void PaintOperation::on_stroke_begin(const bContext &C, const InputSample &start
const bke::greasepencil::Layer &layer = *grease_pencil->get_active_layer();
/* Initialize helper class for projecting screen space coordinates. */
placement_ = ed::greasepencil::DrawingPlacement(*scene, *region, *view3d, *eval_object, layer);
placement_ = ed::greasepencil::DrawingPlacement(*scene, *region, *view3d, *eval_object, &layer);
if (placement_.use_project_to_surface()) {
placement_.cache_viewport_depths(CTX_data_depsgraph_pointer(&C), region, view3d);
}

View File

@ -246,7 +246,7 @@ void GreasePencilStrokeOperationCommon::foreach_editable_drawing(
threading::parallel_for_each(drawings, [&](const MutableDrawingInfo &info) {
const Layer &layer = *grease_pencil.layer(info.layer_index);
ed::greasepencil::DrawingPlacement placement(scene, region, view3d, object_eval, layer);
ed::greasepencil::DrawingPlacement placement(scene, region, view3d, object_eval, &layer);
if (placement.use_project_to_surface()) {
placement.cache_viewport_depths(&depsgraph, &region, &view3d);
}

View File

@ -91,7 +91,7 @@ void GrabOperation::foreach_grabbed_drawing(
return;
}
ed::greasepencil::DrawingPlacement placement(scene, region, view3d, object_eval, layer);
ed::greasepencil::DrawingPlacement placement(scene, region, view3d, object_eval, &layer);
if (placement.use_project_to_surface()) {
placement.cache_viewport_depths(&depsgraph, &region, &view3d);
}
@ -148,7 +148,7 @@ void GrabOperation::on_stroke_begin(const bContext &C, const InputSample &start_
const bke::greasepencil::Layer &layer = *grease_pencil.layer(info.layer_index);
BLI_assert(grease_pencil.get_drawing_at(layer, info.frame_number) == &info.drawing);
ed::greasepencil::DrawingPlacement placement(scene, region, view3d, ob_eval, layer);
ed::greasepencil::DrawingPlacement placement(scene, region, view3d, ob_eval, &layer);
GreasePencilStrokeParams params = {*scene.toolsettings,
region,
ob_orig,

View File

@ -1541,7 +1541,7 @@ static void grease_pencil_brush_cursor_draw(PaintCursorContext *pcontext)
}
if (BKE_brush_use_locked_size(pcontext->scene, brush)) {
const bke::greasepencil::Layer &layer = *grease_pencil->get_active_layer();
const bke::greasepencil::Layer *layer = grease_pencil->get_active_layer();
const ed::greasepencil::DrawingPlacement placement(
*pcontext->scene, *pcontext->region, *pcontext->vc.v3d, *object, layer);
const float radius = BKE_brush_unprojected_radius_get(pcontext->scene, brush);

View File

@ -709,9 +709,9 @@ static int actkeys_paste_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
static std::string actkeys_paste_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
static std::string actkeys_paste_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
/* Custom description if the 'flipped' option is used. */
if (RNA_boolean_get(ptr, "flipped")) {
@ -735,7 +735,7 @@ void ACTION_OT_paste(wmOperatorType *ot)
/* api callbacks */
// ot->invoke = WM_operator_props_popup; /* Better wait for action redo panel. */
ot->get_description = actkeys_paste_description;
ot->get_description = actkeys_paste_get_description;
ot->exec = actkeys_paste_exec;
ot->poll = ED_operator_action_active;

View File

@ -380,7 +380,9 @@ static int track_markers_modal(bContext *C, wmOperator * /*op*/, const wmEvent *
return OPERATOR_PASS_THROUGH;
}
static std::string track_markers_desc(bContext * /*C*/, wmOperatorType * /*ot*/, PointerRNA *ptr)
static std::string track_markers_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
const bool backwards = RNA_boolean_get(ptr, "backwards");
const bool sequence = RNA_boolean_get(ptr, "sequence");
@ -414,7 +416,7 @@ void CLIP_OT_track_markers(wmOperatorType *ot)
ot->invoke = track_markers_invoke;
ot->modal = track_markers_modal;
ot->poll = ED_space_clip_tracking_poll;
ot->get_description = track_markers_desc;
ot->get_description = track_markers_get_description;
/* flags */
ot->flag = OPTYPE_UNDO;

View File

@ -1848,9 +1848,9 @@ static int file_external_operation_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
}
static std::string file_external_operation_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
static std::string file_external_operation_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
const char *description = "";
RNA_enum_description(file_external_operation, RNA_enum_get(ptr, "operation"), &description);
@ -1868,7 +1868,7 @@ void FILE_OT_external_operation(wmOperatorType *ot)
/* api callbacks */
ot->exec = file_external_operation_exec;
ot->get_description = file_external_operation_description;
ot->get_description = file_external_operation_get_description;
/* flags */
ot->flag = OPTYPE_REGISTER; /* No undo! */

View File

@ -605,9 +605,9 @@ static int graphkeys_paste_exec(bContext *C, wmOperator *op)
return OPERATOR_FINISHED;
}
static std::string graphkeys_paste_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
static std::string graphkeys_paste_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
/* Custom description if the 'flipped' option is used. */
if (RNA_boolean_get(ptr, "flipped")) {
@ -633,7 +633,7 @@ void GRAPH_OT_paste(wmOperatorType *ot)
/* API callbacks */
// ot->invoke = WM_operator_props_popup; /* better wait for graph redo panel */
ot->get_description = graphkeys_paste_description;
ot->get_description = graphkeys_paste_get_description;
ot->exec = graphkeys_paste_exec;
ot->poll = graphop_editable_keyframes_poll;

View File

@ -565,7 +565,9 @@ static bool decimate_poll_property(const bContext * /*C*/, wmOperator *op, const
return true;
}
static std::string decimate_desc(bContext * /*C*/, wmOperatorType * /*ot*/, PointerRNA *ptr)
static std::string decimate_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
if (RNA_enum_get(ptr, "mode") == DECIM_ERROR) {
@ -602,7 +604,7 @@ void GRAPH_OT_decimate(wmOperatorType *ot)
/* API callbacks */
ot->poll_property = decimate_poll_property;
ot->get_description = decimate_desc;
ot->get_description = decimate_get_description;
ot->invoke = decimate_invoke;
ot->modal = graph_slider_modal;
ot->exec = decimate_exec;

View File

@ -473,10 +473,10 @@ static int node_add_group_asset_invoke(bContext *C, wmOperator *op, const wmEven
static std::string node_add_group_asset_get_description(bContext *C,
wmOperatorType * /*ot*/,
PointerRNA *values)
PointerRNA *ptr)
{
const asset_system::AssetRepresentation *asset =
asset::operator_asset_reference_props_get_asset_from_all_library(*C, *values, nullptr);
asset::operator_asset_reference_props_get_asset_from_all_library(*C, *ptr, nullptr);
if (!asset) {
return "";
}

View File

@ -1906,17 +1906,21 @@ static std::string node_socket_get_tooltip(const SpaceNode *snode,
}
if (inspection_strings.is_empty()) {
const bool is_extend = StringRef(socket.idname) == "NodeSocketVirtual";
const bNode &node = socket.owner_node();
if (node.is_reroute()) {
char reroute_name[MAX_NAME];
bke::nodeLabel(&ntree, &node, reroute_name, sizeof(reroute_name));
output << reroute_name;
}
else if (is_extend) {
output << TIP_("Connect a link to create a new socket");
}
else {
output << bke::nodeSocketLabel(&socket);
}
if (ntree.type == NTREE_GEOMETRY) {
if (ntree.type == NTREE_GEOMETRY && !is_extend) {
output << ".\n\n";
output << TIP_(
"Unknown socket value. Either the socket was not used or its value was not logged "

View File

@ -64,7 +64,7 @@ ListBase TreeDisplayIDOrphans::build_tree(const TreeSourceData &source_data)
/* Add the orphaned data-blocks - these will not be added with any subtrees attached. */
for (ID *id : List<ID>(lbarray[a])) {
if (ID_REAL_USERS(id) <= 0) {
if (ID_REFCOUNTING_USERS(id) <= 0) {
add_element((te) ? &te->subtree : &tree, id, nullptr, te, TSE_SOME_ID, 0);
}
}
@ -76,7 +76,7 @@ ListBase TreeDisplayIDOrphans::build_tree(const TreeSourceData &source_data)
bool TreeDisplayIDOrphans::datablock_has_orphans(ListBase &lb) const
{
for (ID *id : List<ID>(lb)) {
if (ID_REAL_USERS(id) <= 0) {
if (ID_REFCOUNTING_USERS(id) <= 0) {
return true;
}
}

View File

@ -1489,9 +1489,9 @@ static int sequencer_add_effect_strip_invoke(bContext *C,
return sequencer_add_effect_strip_exec(C, op);
}
static std::string sequencer_add_effect_strip_desc(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
static std::string sequencer_add_effect_strip_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
const int type = RNA_enum_get(ptr, "type");
@ -1554,7 +1554,7 @@ void SEQUENCER_OT_effect_strip_add(wmOperatorType *ot)
ot->exec = sequencer_add_effect_strip_exec;
ot->poll = ED_operator_sequencer_active_editable;
ot->poll_property = seq_effect_add_properties_poll;
ot->get_description = sequencer_add_effect_strip_desc;
ot->get_description = sequencer_add_effect_strip_get_description;
/* Flags. */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;

View File

@ -53,8 +53,6 @@ class IndexBuf {
bool is_subrange_ = false;
/** True if buffer only contains restart indices. */
bool is_empty_ = false;
/** #data_ is read-only, not owned by an index buffer. */
bool reference_data_ = false;
union {
/** Mapped buffer data. non-NULL indicates not yet sent to VRAM. */
@ -72,8 +70,7 @@ class IndexBuf {
uint min_index,
uint max_index,
GPUPrimType prim_type,
bool uses_restart_indices,
bool reference_data);
bool uses_restart_indices);
void init_subrange(IndexBuf *elem_src, uint start, uint length);
void init_build_on_device(uint index_len);

View File

@ -315,15 +315,13 @@ void IndexBuf::init(uint indices_len,
uint min_index,
uint max_index,
GPUPrimType prim_type,
bool uses_restart_indices,
bool reference_data)
bool uses_restart_indices)
{
is_init_ = true;
data_ = indices;
index_start_ = 0;
index_len_ = indices_len;
is_empty_ = min_index > max_index;
reference_data_ = reference_data;
/* Patch index buffer to remove restart indices from
* non-restart-compatible primitive types. Restart indices
@ -494,8 +492,7 @@ void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *builder, IndexBuf *elem)
builder->index_min,
builder->index_max,
builder->prim_type,
builder->uses_restart_indices,
false);
builder->uses_restart_indices);
builder->data = nullptr;
}
@ -513,8 +510,7 @@ void GPU_indexbuf_build_in_place_ex(GPUIndexBufBuilder *builder,
index_min,
index_max,
builder->prim_type,
uses_restart_indices,
false);
uses_restart_indices);
builder->data = nullptr;
}
@ -526,16 +522,15 @@ void GPU_indexbuf_build_in_place_from_memory(IndexBuf *ibo,
const int32_t index_max,
const bool uses_restart_indices)
{
/* If restart indices are used, they need to be stripped on Metal which would require a copy. */
BLI_assert(!uses_restart_indices);
const uint32_t indices_num = data_len * indices_per_primitive(prim_type);
ibo->init(indices_num,
const_cast<uint32_t *>(data),
index_min,
index_max,
prim_type,
uses_restart_indices,
true);
/* TODO: The need for this copy is meant to be temporary. The data should be uploaded directly to
* the GPU here rather than copied to an array owned by the IBO first. */
uint32_t *copy = static_cast<uint32_t *>(
MEM_malloc_arrayN(indices_num, sizeof(uint32_t), __func__));
threading::memory_bandwidth_bound_task(sizeof(uint32_t) * indices_num * 2, [&]() {
array_utils::copy(Span(data, indices_num), MutableSpan(copy, indices_num));
});
ibo->init(indices_num, copy, index_min, index_max, prim_type, uses_restart_indices);
}
void GPU_indexbuf_create_subrange_in_place(IndexBuf *elem,

View File

@ -139,10 +139,7 @@ void MTLIndexBuf::upload_data()
}
/* No need to keep copy of data_ in system memory. */
if (reference_data_) {
data_ = nullptr;
}
else {
if (data_) {
MEM_SAFE_FREE(data_);
}
}

View File

@ -36,12 +36,7 @@ void GLIndexBuf::bind()
/* Sends data to GPU. */
glBufferData(GL_ELEMENT_ARRAY_BUFFER, size, data_, GL_STATIC_DRAW);
/* No need to keep copy of data in system memory. */
if (reference_data_) {
data_ = nullptr;
}
else {
MEM_SAFE_FREE(data_);
}
MEM_SAFE_FREE(data_);
}
}

View File

@ -107,7 +107,7 @@ static void test_framebuffer_clear_multiple_color_multiple_attachments()
MEM_freeN(read_data1);
float4 *read_data2 = static_cast<float4 *>(GPU_texture_read(texture2, GPU_DATA_FLOAT, 0));
for (float4 pixel_color : Span<float4>(read_data1, size.x * size.y)) {
for (float4 pixel_color : Span<float4>(read_data2, size.x * size.y)) {
EXPECT_EQ(clear_color[1], pixel_color);
}
MEM_freeN(read_data2);

View File

@ -33,12 +33,7 @@ void VKIndexBuffer::ensure_updated()
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::HostToDevice);
staging_buffer.host_buffer_get().update(data_);
staging_buffer.copy_to_device(context);
if (reference_data_) {
data_ = nullptr;
}
else {
MEM_SAFE_FREE(data_);
}
MEM_SAFE_FREE(data_);
}
void VKIndexBuffer::upload_data()

View File

@ -653,9 +653,35 @@ typedef struct PreviewImage {
PreviewImageRuntimeHandle *runtime;
} PreviewImage;
/**
* Amount of 'fake user' usages of this ID.
* Always 0 or 1.
*/
#define ID_FAKE_USERS(id) ((((const ID *)id)->flag & LIB_FAKEUSER) ? 1 : 0)
#define ID_REAL_USERS(id) (((const ID *)id)->us - ID_FAKE_USERS(id))
/**
* Amount of defined 'extra' shallow, runtime-only usages of this ID (typically from UI).
* Always 0 or 1.
*
* \warning May not actually be part of the total #ID.us count, see #ID_EXTRA_REAL_USERS.
*/
#define ID_EXTRA_USERS(id) (((const ID *)id)->tag & LIB_TAG_EXTRAUSER ? 1 : 0)
/**
* Amount of real 'extra' shallow, runtime-only usages of this ID (typically from UI).
* Always 0 or 1.
*
* \note Actual number of usages added to #ID.us by these extra usages. May be 0 even if there are
* some 'extra' usages of this ID, when there are also other 'normal' refcounting usages of it. */
#define ID_EXTRA_REAL_USERS(id) (((const ID *)id)->tag & LIB_TAG_EXTRAUSER_SET ? 1 : 0)
/**
* Amount of real usages of this ID (i.e. excluding the 'fake user' one, but including a potential
* 'extra' shallow/runtime usage).
*/
#define ID_REAL_USERS(id) (((const ID *)id)->us - ID_FAKE_USERS(id))
/**
* Amount of 'normal' refcounting usages of this ID (i.e. excluding the 'fake user' one, and a
* potential 'extra' shallow/runtime usage).
*/
#define ID_REFCOUNTING_USERS(id) (ID_REAL_USERS(id) - ID_EXTRA_REAL_USERS(id))
#define ID_CHECK_UNDO(id) \
((GS((id)->name) != ID_SCR) && (GS((id)->name) != ID_WM) && (GS((id)->name) != ID_WS))

View File

@ -270,11 +270,11 @@ void RNA_property_enum_items_gettexted_all(bContext *C,
bool RNA_property_enum_value(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const char *identifier, int *r_value);
bool RNA_property_enum_identifier(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, int value, const char **identifier);
bContext *C, PointerRNA *ptr, PropertyRNA *prop, int value, const char **r_identifier);
bool RNA_property_enum_name(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, int value, const char **name);
bContext *C, PointerRNA *ptr, PropertyRNA *prop, int value, const char **r_name);
bool RNA_property_enum_name_gettexted(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, int value, const char **name);
bContext *C, PointerRNA *ptr, PropertyRNA *prop, int value, const char **r_name);
bool RNA_property_enum_item_from_value(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, int value, EnumPropertyItem *r_item);

View File

@ -1960,7 +1960,7 @@ uint RNA_enum_items_count(const EnumPropertyItem *item)
}
bool RNA_property_enum_identifier(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const int value, const char **identifier)
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const int value, const char **r_identifier)
{
const EnumPropertyItem *item = nullptr;
bool free;
@ -1968,7 +1968,7 @@ bool RNA_property_enum_identifier(
RNA_property_enum_items(C, ptr, prop, &item, nullptr, &free);
if (item) {
bool result;
result = RNA_enum_identifier(item, value, identifier);
result = RNA_enum_identifier(item, value, r_identifier);
if (free) {
MEM_freeN((void *)item);
}
@ -1978,7 +1978,7 @@ bool RNA_property_enum_identifier(
}
bool RNA_property_enum_name(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const int value, const char **name)
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const int value, const char **r_name)
{
const EnumPropertyItem *item = nullptr;
bool free;
@ -1986,7 +1986,7 @@ bool RNA_property_enum_name(
RNA_property_enum_items(C, ptr, prop, &item, nullptr, &free);
if (item) {
bool result;
result = RNA_enum_name(item, value, name);
result = RNA_enum_name(item, value, r_name);
if (free) {
MEM_freeN((void *)item);
}
@ -1997,15 +1997,15 @@ bool RNA_property_enum_name(
}
bool RNA_property_enum_name_gettexted(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const int value, const char **name)
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const int value, const char **r_name)
{
bool result;
result = RNA_property_enum_name(C, ptr, prop, value, name);
result = RNA_property_enum_name(C, ptr, prop, value, r_name);
if (result) {
if (!(prop->flag & PROP_ENUM_NO_TRANSLATE)) {
*name = BLT_translate_do_iface(prop->translation_context, *name);
*r_name = BLT_translate_do_iface(prop->translation_context, *r_name);
}
}
@ -2054,7 +2054,7 @@ bool RNA_property_enum_item_from_value_gettexted(
}
int RNA_property_enum_bitflag_identifiers(
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const int value, const char **identifier)
bContext *C, PointerRNA *ptr, PropertyRNA *prop, const int value, const char **r_identifier)
{
const EnumPropertyItem *item = nullptr;
bool free;
@ -2062,7 +2062,7 @@ int RNA_property_enum_bitflag_identifiers(
RNA_property_enum_items(C, ptr, prop, &item, nullptr, &free);
if (item) {
int result;
result = RNA_enum_bitflag_identifiers(item, value, identifier);
result = RNA_enum_bitflag_identifiers(item, value, r_identifier);
if (free) {
MEM_freeN((void *)item);
}

View File

@ -454,7 +454,34 @@ static void rna_userdef_script_autoexec_update(Main * /*bmain*/,
USERDEF_TAG_DIRTY;
}
int rna_userdef_use_online_access_editable(const PointerRNA * /*ptr*/, const char **r_info)
static void rna_userdef_use_online_access_set(PointerRNA *ptr, bool value)
{
/* A `set` function is needed to clear the override flags. */
UserDef *userdef = (UserDef *)ptr->data;
if ((G.f & G_FLAG_INTERNET_ALLOW) == 0) {
if (G.f & G_FLAG_INTERNET_OVERRIDE_PREF_OFFLINE) {
/* The `editable` check should account for this, assert since this is security related. */
BLI_assert_unreachable();
return;
}
}
if (value) {
userdef->flag |= USER_INTERNET_ALLOW;
G.f |= G_FLAG_INTERNET_ALLOW;
}
else {
userdef->flag &= ~USER_INTERNET_ALLOW;
G.f &= ~G_FLAG_INTERNET_ALLOW;
}
/* Once the user edits this option (even to set it to the value it was)
* it's no longer considered overridden. */
G.f &= ~G_FLAG_INTERNET_OVERRIDE_PREF_ANY;
}
static int rna_userdef_use_online_access_editable(const PointerRNA * /*ptr*/, const char **r_info)
{
if ((G.f & G_FLAG_INTERNET_ALLOW) == 0) {
/* Return 0 when blender was invoked with `--offline-mode` "forced". */
@ -466,21 +493,6 @@ int rna_userdef_use_online_access_editable(const PointerRNA * /*ptr*/, const cha
return PROP_EDITABLE;
}
static void rna_userdef_use_online_access_update(Main * /*bmain*/,
Scene * /*scene*/,
PointerRNA *ptr)
{
UserDef *userdef = (UserDef *)ptr->data;
if (userdef->flag & USER_INTERNET_ALLOW) {
G.f |= G_FLAG_INTERNET_ALLOW;
}
else {
G.f &= ~G_FLAG_INTERNET_ALLOW;
}
USERDEF_TAG_DIRTY;
}
static void rna_userdef_script_directory_name_set(PointerRNA *ptr, const char *value)
{
bUserScriptDirectory *script_dir = static_cast<bUserScriptDirectory *>(ptr->data);
@ -6175,13 +6187,14 @@ static void rna_def_userdef_system(BlenderRNA *brna)
prop = RNA_def_property(srna, "use_online_access", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, nullptr, "flag", USER_INTERNET_ALLOW);
RNA_def_property_boolean_funcs(prop, nullptr, "rna_userdef_use_online_access_set");
RNA_def_property_ui_text(
prop,
"Allow Online Access",
"Allow internet access. Blender may access configured online extension repositories. "
"Installed third party add-ons may access the internet for their own functionality");
RNA_def_property_editable_func(prop, "rna_userdef_use_online_access_editable");
RNA_def_property_update(prop, 0, "rna_userdef_use_online_access_update");
RNA_def_property_update(prop, 0, "rna_userdef_update");
/* Audio */

View File

@ -257,6 +257,10 @@ PyDoc_STRVAR(
/* Wrap. */
bpy_app_internet_offline_doc,
"Boolean, true when internet access is allowed by Blender & 3rd party scripts (read-only)");
PyDoc_STRVAR(
/* Wrap. */
bpy_app_internet_offline_override_doc,
"Boolean, true when internet access preference is overridden by the command line (read-only)");
PyDoc_STRVAR(
/* Wrap. */
@ -497,6 +501,11 @@ static PyGetSetDef bpy_app_getsets[] = {
nullptr,
bpy_app_internet_offline_doc,
(void *)G_FLAG_INTERNET_ALLOW},
{"online_access_override",
bpy_app_global_flag_get,
nullptr,
bpy_app_internet_offline_override_doc,
(void *)G_FLAG_INTERNET_OVERRIDE_PREF_ANY},
/* security */
{"autoexec_fail",

View File

@ -479,10 +479,6 @@ class RealtimeCompositor {
public:
RealtimeCompositor(Render &render, const ContextInputData &input_data) : render_(render)
{
/* Ensure that in foreground mode we are using different contexts for main and render threads,
* to avoid them blocking each other. */
BLI_assert(!BLI_thread_is_main() || G.background);
/* Create resources with GPU context enabled. */
DRW_render_context_enable(&render_);
texture_pool_ = std::make_unique<TexturePool>();
@ -515,19 +511,16 @@ class RealtimeCompositor {
/* Evaluate the compositor and output to the scene render result. */
void execute(const ContextInputData &input_data)
{
/* Ensure that in foreground mode we are using different contexts for main and render threads,
* to avoid them blocking each other. */
BLI_assert(!BLI_thread_is_main() || G.background);
if (G.background) {
/* In the background mode the system context of the render engine might be nullptr, which
* forces some code paths which more tightly couple it with the draw manager.
* For the compositor we want to have the least amount of coupling with the draw manager, so
* ensure that the render engine has its own system GPU context. */
void *re_system_gpu_context = RE_system_gpu_context_get(&render_);
if (!re_system_gpu_context) {
/* In some cases like background mode and blocking rendering the system context of the render
* engine might be nullptr, which forces some code paths which more tightly couple it with
* the draw manager. For the compositor we want to have the least amount of coupling with the
* draw manager, so ensure that the render engine has its own system GPU context. */
RE_system_gpu_context_ensure(&render_);
re_system_gpu_context = RE_system_gpu_context_get(&render_);
}
void *re_system_gpu_context = RE_system_gpu_context_get(&render_);
void *re_blender_gpu_context = RE_blender_gpu_context_ensure(&render_);
GPU_render_begin();

View File

@ -3040,16 +3040,16 @@ static int wm_open_mainfile_exec(bContext *C, wmOperator *op)
return wm_open_mainfile__open(C, op);
}
static std::string wm_open_mainfile_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *params)
static std::string wm_open_mainfile_get_description(bContext * /*C*/,
wmOperatorType * /*ot*/,
PointerRNA *ptr)
{
if (!RNA_struct_property_is_set(params, "filepath")) {
if (!RNA_struct_property_is_set(ptr, "filepath")) {
return "";
}
char filepath[FILE_MAX];
RNA_string_get(params, "filepath", filepath);
RNA_string_get(ptr, "filepath", filepath);
BLI_stat_t stats;
if (BLI_stat(filepath, &stats) == -1) {
@ -3151,7 +3151,7 @@ void WM_OT_open_mainfile(wmOperatorType *ot)
ot->name = "Open";
ot->idname = "WM_OT_open_mainfile";
ot->description = "Open a Blender file";
ot->get_description = wm_open_mainfile_description;
ot->get_description = wm_open_mainfile_get_description;
ot->invoke = wm_open_mainfile_invoke;
ot->exec = wm_open_mainfile_exec;