Mesh: Move functions to C++ header #105416

Merged
Hans Goudey merged 18 commits from HooglyBoogly/blender:mesh-header-cleanup into main 2023-03-12 22:29:27 +01:00
36 changed files with 1968 additions and 1421 deletions
Showing only changes of commit c1cd454127 - Show all commits

View File

@ -1,6 +1,5 @@
Project: TinyGLTF
URL: https://github.com/syoyo/tinygltf
License: MIT
Upstream version: 2.5.0, 19a41d20ec0
Local modifications:
* Silence "enum value not handled in switch" warnings due to JSON dependency.
Upstream version: 2.8.3, 84a83d39f55d
Local modifications: None

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@ -621,6 +621,8 @@ void MetalKernelPipeline::compile()
MTLPipelineOption pipelineOptions = MTLPipelineOptionNone;
bool use_binary_archive = should_use_binary_archive();
bool loading_existing_archive = false;
bool creating_new_archive = false;
id<MTLBinaryArchive> archive = nil;
string metalbin_path;
@ -650,42 +652,39 @@ void MetalKernelPipeline::compile()
metalbin_path = path_cache_get(path_join("kernels", metalbin_name));
path_create_directories(metalbin_path);
/* Retrieve shader binary from disk, and update the file timestamp for LRU purging to work as
* intended. */
if (use_binary_archive && path_cache_kernel_exists_and_mark_used(metalbin_path)) {
if (@available(macOS 11.0, *)) {
MTLBinaryArchiveDescriptor *archiveDesc = [[MTLBinaryArchiveDescriptor alloc] init];
/* Check if shader binary exists on disk, and if so, update the file timestamp for LRU purging
* to work as intended. */
loading_existing_archive = path_cache_kernel_exists_and_mark_used(metalbin_path);
creating_new_archive = !loading_existing_archive;
if (@available(macOS 11.0, *)) {
MTLBinaryArchiveDescriptor *archiveDesc = [[MTLBinaryArchiveDescriptor alloc] init];
if (loading_existing_archive) {
archiveDesc.url = [NSURL fileURLWithPath:@(metalbin_path.c_str())];
archive = [mtlDevice newBinaryArchiveWithDescriptor:archiveDesc error:nil];
[archiveDesc release];
}
}
}
bool creating_new_archive = false;
bool recreate_archive = false;
if (@available(macOS 11.0, *)) {
if (use_binary_archive) {
NSError *error = nil;
archive = [mtlDevice newBinaryArchiveWithDescriptor:archiveDesc error:&error];
if (!archive) {
MTLBinaryArchiveDescriptor *archiveDesc = [[MTLBinaryArchiveDescriptor alloc] init];
archiveDesc.url = nil;
archive = [mtlDevice newBinaryArchiveWithDescriptor:archiveDesc error:nil];
creating_new_archive = true;
const char *err = error ? [[error localizedDescription] UTF8String] : nullptr;
metal_printf("newBinaryArchiveWithDescriptor failed: %s\n", err ? err : "nil");
}
else {
[archiveDesc release];
if (loading_existing_archive) {
pipelineOptions = MTLPipelineOptionFailOnBinaryArchiveMiss;
computePipelineStateDescriptor.binaryArchives = [NSArray arrayWithObjects:archive, nil];
}
}
}
bool recreate_archive = false;
/* Lambda to do the actual pipeline compilation. */
auto do_compilation = [&]() {
__block bool compilation_finished = false;
__block string error_str;
if (archive && path_exists(metalbin_path)) {
if (loading_existing_archive) {
/* Use the blocking variant of newComputePipelineStateWithDescriptor if an archive exists on
* disk. It should load almost instantaneously, and will fail gracefully when loading a
* corrupt archive (unlike the async variant). */
@ -698,8 +697,30 @@ void MetalKernelPipeline::compile()
error_str = err ? err : "nil";
}
else {
/* TODO / MetalRT workaround:
* Workaround for a crash when addComputePipelineFunctionsWithDescriptor is called *after*
* newComputePipelineStateWithDescriptor with linked functions (i.e. with MetalRT enabled).
* Ideally we would like to call newComputePipelineStateWithDescriptor (async) first so we
* can bail out if needed, but we can stop the crash by flipping the order when there are
* linked functions. However when addComputePipelineFunctionsWithDescriptor is called first
* it will block while it builds the pipeline, offering no way of bailing out. */
auto addComputePipelineFunctionsWithDescriptor = [&]() {
if (creating_new_archive && ShaderCache::running) {
NSError *error;
if (![archive addComputePipelineFunctionsWithDescriptor:computePipelineStateDescriptor
error:&error]) {
NSString *errStr = [error localizedDescription];
metal_printf("Failed to add PSO to archive:\n%s\n",
errStr ? [errStr UTF8String] : "nil");
}
}
};
if (computePipelineStateDescriptor.linkedFunctions) {
addComputePipelineFunctionsWithDescriptor();
}
/* Use the async variant of newComputePipelineStateWithDescriptor if no archive exists on
* disk. This allows us responds to app shutdown. */
* disk. This allows us to respond to app shutdown. */
[mtlDevice
newComputePipelineStateWithDescriptor:computePipelineStateDescriptor
options:pipelineOptions
@ -725,21 +746,14 @@ void MetalKernelPipeline::compile()
while (ShaderCache::running && !compilation_finished) {
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
}
if (creating_new_archive && pipeline && ShaderCache::running) {
/* Add pipeline into the new archive. It should be instantaneous following
* newComputePipelineStateWithDescriptor. */
NSError *error;
computePipelineStateDescriptor.binaryArchives = [NSArray arrayWithObjects:archive, nil];
if (![archive addComputePipelineFunctionsWithDescriptor:computePipelineStateDescriptor
error:&error]) {
NSString *errStr = [error localizedDescription];
metal_printf("Failed to add PSO to archive:\n%s\n", errStr ? [errStr UTF8String] : "nil");
/* Add pipeline into the new archive (unless we did it earlier). */
if (pipeline && !computePipelineStateDescriptor.linkedFunctions) {
addComputePipelineFunctionsWithDescriptor();
}
}
else if (!pipeline) {
if (!pipeline) {
metal_printf(
"newComputePipelineStateWithDescriptor failed for \"%s\"%s. "
"Error:\n%s\n",

View File

@ -741,22 +741,21 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
endif()
# SYCL_CPP_FLAGS is a variable that the user can set to pass extra compiler options
set(sycl_compiler_flags
${CMAKE_CURRENT_SOURCE_DIR}/${SRC_KERNEL_DEVICE_ONEAPI}
-fsycl
-fsycl-unnamed-lambda
-fdelayed-template-parsing
-mllvm -inlinedefault-threshold=250
-mllvm -inlinehint-threshold=350
-fsycl-device-code-split=per_kernel
-fsycl-max-parallel-link-jobs=${SYCL_OFFLINE_COMPILER_PARALLEL_JOBS}
-shared
-DWITH_ONEAPI
-ffast-math
-DNDEBUG
-O2
-o ${cycles_kernel_oneapi_lib}
-I${CMAKE_CURRENT_SOURCE_DIR}/..
${SYCL_CPP_FLAGS}
${CMAKE_CURRENT_SOURCE_DIR}/${SRC_KERNEL_DEVICE_ONEAPI}
-fsycl
-fsycl-unnamed-lambda
-fdelayed-template-parsing
-mllvm -inlinedefault-threshold=250
-mllvm -inlinehint-threshold=350
-fsycl-device-code-split=per_kernel
-fsycl-max-parallel-link-jobs=${SYCL_OFFLINE_COMPILER_PARALLEL_JOBS}
-shared
-DWITH_ONEAPI
-ffast-math
-O2
-o"${cycles_kernel_oneapi_lib}"
-I"${CMAKE_CURRENT_SOURCE_DIR}/.."
${SYCL_CPP_FLAGS}
)
if(WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION)
@ -783,14 +782,14 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
list(APPEND sycl_compiler_flags -fsycl-targets=${targets_string})
foreach(target ${CYCLES_ONEAPI_SYCL_TARGETS})
if(DEFINED CYCLES_ONEAPI_SYCL_OPTIONS_${target})
list(APPEND sycl_compiler_flags -Xsycl-target-backend=${target} "${CYCLES_ONEAPI_SYCL_OPTIONS_${target}}")
list(APPEND sycl_compiler_flags "-Xsycl-target-backend=${target} \"${CYCLES_ONEAPI_SYCL_OPTIONS_${target}}\"")
endif()
endforeach()
else()
# If AOT is disabled, build for spir64
list(APPEND sycl_compiler_flags
-fsycl-targets=spir64
-Xsycl-target-backend=spir64 "${CYCLES_ONEAPI_SYCL_OPTIONS_spir64}")
"-Xsycl-target-backend=spir64 \"${CYCLES_ONEAPI_SYCL_OPTIONS_spir64}\"")
endif()
if(WITH_NANOVDB)
@ -804,7 +803,6 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
endif()
get_filename_component(sycl_compiler_root ${SYCL_COMPILER} DIRECTORY)
get_filename_component(sycl_compiler_compiler_name ${SYCL_COMPILER} NAME_WE)
if(UNIX AND NOT APPLE)
if(NOT WITH_CXX11_ABI)
@ -816,7 +814,7 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
endif()
endif()
if(WIN32)
if(WIN32) # Add Windows specific compiler flags.
list(APPEND sycl_compiler_flags
-fuse-ld=link
-fms-extensions
@ -843,20 +841,43 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
get_filename_component(WINDOWS_KIT_DIR "${WINDOWS_KIT_DIR}/../" ABSOLUTE)
endif()
list(APPEND sycl_compiler_flags
-L "${MSVC_TOOLS_DIR}/lib/x64"
-L "${WINDOWS_KIT_DIR}/um/x64"
-L "${WINDOWS_KIT_DIR}/ucrt/x64")
-L"${MSVC_TOOLS_DIR}/lib/x64"
-L"${WINDOWS_KIT_DIR}/um/x64"
-L"${WINDOWS_KIT_DIR}/ucrt/x64")
else() # Add Linux specific compiler flags.
list(APPEND sycl_compiler_flags -fPIC)
set(sycl_compiler_flags_Release ${sycl_compiler_flags})
set(sycl_compiler_flags_Debug ${sycl_compiler_flags})
set(sycl_compiler_flags_RelWithDebInfo ${sycl_compiler_flags})
set(sycl_compiler_flags_MinSizeRel ${sycl_compiler_flags})
list(APPEND sycl_compiler_flags_RelWithDebInfo -g)
# We avoid getting __FAST_MATH__ to be defined when building on CentOS-7 and Rocky-8
# until the compilation issues it triggers at either AoT or JIT stages gets fixed.
list(APPEND sycl_compiler_flags -fhonor-nans)
# add $ORIGIN to cycles_kernel_oneapi.so rpath so libsycl.so and
# libpi_level_zero.so can be placed next to it and get found.
list(APPEND sycl_compiler_flags -Wl,-rpath,'$$ORIGIN')
endif()
# Create CONFIG specific compiler flags.
set(sycl_compiler_flags_Release ${sycl_compiler_flags})
set(sycl_compiler_flags_Debug ${sycl_compiler_flags})
set(sycl_compiler_flags_RelWithDebInfo ${sycl_compiler_flags})
list(APPEND sycl_compiler_flags_Release
-DNDEBUG
)
list(APPEND sycl_compiler_flags_RelWithDebInfo
-DNDEBUG
-g
)
list(APPEND sycl_compiler_flags_Debug
-g
)
if(WIN32)
list(APPEND sycl_compiler_flags_Debug
-g
-D_DEBUG
-nostdlib -Xclang --dependent-lib=msvcrtd)
-nostdlib
-Xclang --dependent-lib=msvcrtd
)
add_custom_command(
OUTPUT ${cycles_kernel_oneapi_lib} ${cycles_kernel_oneapi_linker_lib}
COMMAND ${CMAKE_COMMAND} -E env
@ -867,30 +888,32 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
"$<$<CONFIG:RelWithDebInfo>:${sycl_compiler_flags_RelWithDebInfo}>"
"$<$<CONFIG:Debug>:${sycl_compiler_flags_Debug}>"
"$<$<CONFIG:MinSizeRel>:${sycl_compiler_flags_Release}>"
COMMAND_EXPAND_LISTS
DEPENDS ${cycles_oneapi_kernel_sources})
COMMAND_EXPAND_LISTS
DEPENDS ${cycles_oneapi_kernel_sources})
else()
list(APPEND sycl_compiler_flags -fPIC)
# We avoid getting __FAST_MATH__ to be defined when building on CentOS-7 until the compilation
# crash it triggers at either AoT or JIT stages gets fixed.
# TODO: check if this is still needed on Rocky-8.
list(APPEND sycl_compiler_flags -fhonor-nans)
# add $ORIGIN to cycles_kernel_oneapi.so rpath so libsycl.so and
# libpi_level_zero.so can be placed next to it and get found.
list(APPEND sycl_compiler_flags -Wl,-rpath,'$$ORIGIN')
if(NOT IGC_INSTALL_DIR)
get_filename_component(IGC_INSTALL_DIR "${sycl_compiler_root}/../lib/igc" ABSOLUTE)
endif()
# The following join/replace operations are to prevent cmake from
# escaping space chars with backslashes in add_custom_command.
list(JOIN sycl_compiler_flags_Release " " sycl_compiler_flags_Release_str)
string(REPLACE " " ";" sycl_compiler_flags_Release_str ${sycl_compiler_flags_Release_str})
list(JOIN sycl_compiler_flags_RelWithDebInfo " " sycl_compiler_flags_RelWithDebInfo_str)
string(REPLACE " " ";" sycl_compiler_flags_RelWithDebInfo_str ${sycl_compiler_flags_RelWithDebInfo_str})
list(JOIN sycl_compiler_flags_Debug " " sycl_compiler_flags_Debug_str)
string(REPLACE " " ";" sycl_compiler_flags_Debug_str ${sycl_compiler_flags_Debug_str})
add_custom_command(
OUTPUT ${cycles_kernel_oneapi_lib}
COMMAND ${CMAKE_COMMAND} -E env
"LD_LIBRARY_PATH=${sycl_compiler_root}/../lib:${OCLOC_INSTALL_DIR}/lib:${IGC_INSTALL_DIR}/lib"
# `$ENV{PATH}` is for compiler to find `ld`.
"PATH=${OCLOC_INSTALL_DIR}/bin:${sycl_compiler_root}:$ENV{PATH}"
${SYCL_COMPILER} $<$<CONFIG:Debug>:-g>$<$<CONFIG:RelWithDebInfo>:-g> ${sycl_compiler_flags}
${SYCL_COMPILER}
"$<$<CONFIG:Release>:${sycl_compiler_flags_Release_str}>"
"$<$<CONFIG:RelWithDebInfo>:${sycl_compiler_flags_RelWithDebInfo_str}>"
"$<$<CONFIG:Debug>:${sycl_compiler_flags_Debug_str}>"
"$<$<CONFIG:MinSizeRel>:${sycl_compiler_flags_Release_str}>"
COMMAND_EXPAND_LISTS
DEPENDS ${cycles_oneapi_kernel_sources})
endif()

View File

@ -57,7 +57,8 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
return svm_node_closure_bsdf_skip(kg, offset, type);
}
float3 N = stack_valid(data_node.x) ? stack_load_float3(stack, data_node.x) : sd->N;
float3 N = stack_valid(data_node.x) ? safe_normalize(stack_load_float3(stack, data_node.x)) :
sd->N;
if (!(sd->type & PRIMITIVE_CURVE)) {
N = ensure_valid_reflection(sd->Ng, sd->wi, N);
}

View File

@ -312,7 +312,7 @@ class NODE_MT_node(Menu):
snode = context.space_data
is_compositor = snode.tree_type == 'CompositorNodeTree'
layout.operator("transform.translate")
layout.operator("transform.translate").view2d_edge_pan = True
layout.operator("transform.rotate")
layout.operator("transform.resize")

View File

@ -836,7 +836,7 @@ class SEQUENCER_MT_strip_transform(Menu):
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
else:
layout.operator("transform.seq_slide", text="Move")
layout.operator("transform.seq_slide", text="Move").view2d_edge_pan = True
layout.operator("transform.transform", text="Move/Extend from Current Frame").mode = 'TIME_EXTEND'
layout.operator("sequencer.slip", text="Slip Strip Contents")

View File

@ -19,7 +19,7 @@ extern "C" {
/* Blender major and minor version. */
#define BLENDER_VERSION 306
/* Blender patch version for bugfix releases. */
#define BLENDER_VERSION_PATCH 0
#define BLENDER_VERSION_PATCH 1
/** Blender release cycle stage: alpha/beta/rc/release. */
#define BLENDER_VERSION_CYCLE alpha

View File

@ -181,18 +181,13 @@ bool BKE_scene_collections_object_remove(struct Main *bmain,
bool free_us);
/**
* Check all collections in \a bmain (including embedded ones in scenes) for CollectionObject with
* NULL object pointer, and remove them.
*/
void BKE_collections_object_remove_nulls(struct Main *bmain);
/**
* Check all collections in \a bmain (including embedded ones in scenes) for duplicate
* CollectionObject with a same object pointer within a same object, and remove them.
* Check all collections in \a bmain (including embedded ones in scenes) for invalid
* CollectionObject (either with NULL object pointer, or duplicates), and remove them.
*
* NOTE: Always keeps the first of the detected duplicates.
* \note In case of duplicates, the first CollectionObject in the list is kept, all others are
* removed.
*/
void BKE_collections_object_remove_duplicates(struct Main *bmain);
void BKE_collections_object_remove_invalids(struct Main *bmain);
/**
* Remove all NULL children from parent collections of changed \a collection.

View File

@ -369,7 +369,10 @@ int BKE_fcurve_pathcache_find_array(struct FCurvePathCache *fcache,
* Calculate the x range of the given F-Curve's data.
* \return true if a range has been found.
*/
bool BKE_fcurve_calc_range(const struct FCurve *fcu, float *r_min, float *r_max, bool selected_keys_only);
bool BKE_fcurve_calc_range(const struct FCurve *fcu,
float *r_min,
float *r_max,
bool selected_keys_only);
/**
* Calculate the x and y extents of F-Curve's data.

View File

@ -83,17 +83,10 @@ static bool collection_find_child_recursive(const Collection *parent,
const Collection *collection);
static void collection_gobject_hash_ensure(Collection *collection);
static void collection_gobject_hash_remove_object(Collection *collection, const Object *ob);
static void collection_gobject_hash_update_object(Collection *collection,
Object *ob_old,
CollectionObject *cob);
/** Does nothing unless #USE_DEBUG_EXTRA_GOBJECT_ASSERT is defined. */
static void collection_gobject_hash_assert_internal_consistency(Collection *collection);
#define BLI_ASSERT_COLLECION_GOBJECT_HASH_IS_VALID(collection) \
collection_gobject_hash_assert_internal_consistency(collection)
/** \} */
/* -------------------------------------------------------------------- */
@ -183,8 +176,16 @@ static void collection_foreach_id(ID *id, LibraryForeachIDData *data)
BKE_LIB_FOREACHID_PROCESS_IDSUPER(data, cob->ob, IDWALK_CB_USER);
if (collection->runtime.gobject_hash) {
/* If the remapping does not create inconsistent data (NULL object pointer or duplicate
* CollectionObjects), keeping the ghash consistent is also possible. Otherwise, this call
* will take care of tagging the collection objects list as dirty. */
collection_gobject_hash_update_object(collection, cob_ob_old, cob);
}
else if (cob_ob_old != cob->ob || cob->ob == NULL) {
/* If there is no reference GHash, duplicates cannot be reliably detected, so assume that any
* NULL pointer or changed pointer may create an invalid collection object list. */
collection->runtime.tag |= COLLECTION_TAG_COLLECTION_OBJECT_DIRTY;
}
}
LISTBASE_FOREACH (CollectionChild *, child, &collection->children) {
BKE_LIB_FOREACHID_PROCESS_IDSUPER(
@ -983,7 +984,6 @@ bool BKE_collection_has_object(Collection *collection, const Object *ob)
if (ELEM(NULL, collection, ob)) {
return false;
}
BLI_ASSERT_COLLECION_GOBJECT_HASH_IS_VALID(collection);
collection_gobject_hash_ensure(collection);
return BLI_ghash_lookup(collection->runtime.gobject_hash, ob);
}
@ -1053,6 +1053,171 @@ bool BKE_collection_is_empty(const Collection *collection)
/** \name Collection Objects
* \{ */
static void collection_gobject_assert_internal_consistency(Collection *collection,
const bool do_extensive_check);
static GHash *collection_gobject_hash_alloc(const Collection *collection)
{
return BLI_ghash_ptr_new_ex(__func__, (uint)BLI_listbase_count(&collection->gobject));
}
static void collection_gobject_hash_create(Collection *collection)
{
GHash *gobject_hash = collection_gobject_hash_alloc(collection);
LISTBASE_FOREACH (CollectionObject *, cob, &collection->gobject) {
if (UNLIKELY(cob->ob == NULL)) {
BLI_assert(collection->runtime.tag & COLLECTION_TAG_COLLECTION_OBJECT_DIRTY);
continue;
}
CollectionObject **cob_p;
/* Do not overwrite an already existing entry. */
if (UNLIKELY(BLI_ghash_ensure_p(gobject_hash, cob->ob, (void ***)&cob_p))) {
BLI_assert(collection->runtime.tag & COLLECTION_TAG_COLLECTION_OBJECT_DIRTY);
continue;
}
*cob_p = cob;
}
collection->runtime.gobject_hash = gobject_hash;
}
static void collection_gobject_hash_ensure(Collection *collection)
{
if (collection->runtime.gobject_hash) {
#ifdef USE_DEBUG_EXTRA_GOBJECT_ASSERT
collection_gobject_assert_internal_consistency(collection, true);
#endif
return;
}
collection_gobject_hash_create(collection);
collection_gobject_assert_internal_consistency(collection, true);
}
/** Similar to #collection_gobject_hash_ensure/#collection_gobject_hash_create, but does fix
* inconsistencies in the collection objects list. */
static void collection_gobject_hash_ensure_fix(Collection *collection)
{
bool changed = false;
if ((collection->runtime.tag & COLLECTION_TAG_COLLECTION_OBJECT_DIRTY) == 0) {
#ifdef USE_DEBUG_EXTRA_GOBJECT_ASSERT
collection_gobject_assert_internal_consistency(collection, true);
#endif
return;
}
GHash *gobject_hash = collection->runtime.gobject_hash;
if (gobject_hash) {
BLI_ghash_clear_ex(gobject_hash, NULL, NULL, BLI_ghash_len(gobject_hash));
}
else {
collection->runtime.gobject_hash = gobject_hash = collection_gobject_hash_alloc(collection);
}
LISTBASE_FOREACH_MUTABLE (CollectionObject *, cob, &collection->gobject) {
if (cob->ob == NULL) {
BLI_freelinkN(&collection->gobject, cob);
changed = true;
continue;
}
CollectionObject **cob_p;
if (BLI_ghash_ensure_p(gobject_hash, cob->ob, (void ***)&cob_p)) {
BLI_freelinkN(&collection->gobject, cob);
changed = true;
continue;
}
*cob_p = cob;
}
if (changed) {
BKE_collection_object_cache_free(collection);
}
collection->runtime.tag &= ~COLLECTION_TAG_COLLECTION_OBJECT_DIRTY;
collection_gobject_assert_internal_consistency(collection, true);
}
/**
* Update the collections object hash, removing `ob_old`, inserting `cob->ob` as the new key.
*
* \note This function is called fron foreach_id callback, and a difference of Object pointers is
* only expected in case ID remapping is happening. This code is the only are in Blender allowed to
* (temporarily) leave the CollectionObject list in an inconsistent/invalid state (with NULL object
* pointers, or duplicates of CollectionObjects). If such invalid cases are encountered, it will
* tag the collection objects list as dirty.
*
* \param ob_old: The existing key to `cob` in the hash, not removed when NULL.
* \param cob: The `cob->ob` is to be used as the new key,
* when NULL it's not added back into the hash.
*/
static void collection_gobject_hash_update_object(Collection *collection,
Object *ob_old,
CollectionObject *cob)
{
if (ob_old == cob->ob) {
return;
}
if (ob_old) {
CollectionObject *cob_old = BLI_ghash_popkey(collection->runtime.gobject_hash, ob_old, NULL);
if (cob_old != cob) {
/* Old object alredy removed from the ghash. */
collection->runtime.tag |= COLLECTION_TAG_COLLECTION_OBJECT_DIRTY;
}
}
if (cob->ob) {
CollectionObject **cob_p;
if (!BLI_ghash_ensure_p(collection->runtime.gobject_hash, cob->ob, (void ***)&cob_p)) {
*cob_p = cob;
}
else {
/* Duplicate CollectionObject entries. */
collection->runtime.tag |= COLLECTION_TAG_COLLECTION_OBJECT_DIRTY;
}
}
else {
/* CollectionObject with NULL objetc pointer. */
collection->runtime.tag |= COLLECTION_TAG_COLLECTION_OBJECT_DIRTY;
}
}
/**
* Validate the integrity of the collection's CollectionObject list, and of its mapping.
*
* Simple test is very fast, as it only checks that the 'dirty' tag for collection's objects is not
* set.
*
* The extensive check is expensive. This should not be done from within loops over collections
* items, or from low-level operations that can be assumed safe (like adding or removing an object
* from a colleciton). It ensures that:
* - There is a `gobject_hash` mapping.
* - There is no NULL-object CollectionObject items.
* - there is no duplicate CollectionObject items (two or more referencing the same Object).
*/
static void collection_gobject_assert_internal_consistency(Collection *collection,
const bool do_extensive_check)
{
BLI_assert((collection->runtime.tag & COLLECTION_TAG_COLLECTION_OBJECT_DIRTY) == 0);
if (!do_extensive_check) {
return;
}
if (collection->runtime.gobject_hash == NULL) {
/* NOTE: If the ghash does not exist yet, it's creation will assert on errors, so in theory the
* second loop below could be skipped. */
collection_gobject_hash_create(collection);
}
GHash *gobject_hash = collection->runtime.gobject_hash;
LISTBASE_FOREACH (CollectionObject *, cob, &collection->gobject) {
BLI_assert(cob->ob != NULL);
/* If there are more than one CollectionObject for the same object, at most one of them will
* pass this test. */
BLI_assert(BLI_ghash_lookup(gobject_hash, cob->ob) == cob);
}
}
static void collection_tag_update_parent_recursive(Main *bmain,
Collection *collection,
const int flag)
@ -1336,83 +1501,14 @@ bool BKE_scene_collections_object_remove(Main *bmain, Scene *scene, Object *ob,
return scene_collections_object_remove(bmain, scene, ob, free_us, NULL);
}
/*
* Remove all NULL objects from collections.
* This is used for library remapping, where these pointers have been set to NULL.
* Otherwise this should never happen.
*/
static void collection_object_remove_nulls(Collection *collection)
{
bool changed = false;
LISTBASE_FOREACH_MUTABLE (CollectionObject *, cob, &collection->gobject) {
if (cob->ob == NULL) {
BLI_freelinkN(&collection->gobject, cob);
changed = true;
}
}
if (changed) {
BKE_collection_object_cache_free(collection);
}
}
void BKE_collections_object_remove_nulls(Main *bmain)
void BKE_collections_object_remove_invalids(Main *bmain)
{
LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
collection_object_remove_nulls(scene->master_collection);
collection_gobject_hash_ensure_fix(scene->master_collection);
}
LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
collection_object_remove_nulls(collection);
}
}
/*
* Remove all duplicate objects from collections.
* This is used for library remapping, happens when remapping an object to another one already
* present in the collection. Otherwise this should never happen.
*/
static void collection_object_remove_duplicates(Collection *collection)
{
bool changed = false;
BLI_ASSERT_COLLECION_GOBJECT_HASH_IS_VALID(collection);
const bool use_hash_exists = (collection->runtime.gobject_hash != NULL);
LISTBASE_FOREACH_MUTABLE (CollectionObject *, cob, &collection->gobject) {
if (cob->ob->runtime.collection_management) {
if (use_hash_exists) {
collection_gobject_hash_remove_object(collection, cob->ob);
}
BLI_freelinkN(&collection->gobject, cob);
changed = true;
continue;
}
cob->ob->runtime.collection_management = true;
}
/* Cleanup. */
LISTBASE_FOREACH (CollectionObject *, cob, &collection->gobject) {
cob->ob->runtime.collection_management = false;
}
if (changed) {
BKE_collection_object_cache_free(collection);
}
}
void BKE_collections_object_remove_duplicates(struct Main *bmain)
{
LISTBASE_FOREACH (Object *, ob, &bmain->objects) {
ob->runtime.collection_management = false;
}
LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
collection_object_remove_duplicates(scene->master_collection);
}
LISTBASE_FOREACH (Collection *, collection, &bmain->collections) {
collection_object_remove_duplicates(collection);
collection_gobject_hash_ensure_fix(collection);
}
}
@ -1646,79 +1742,6 @@ static CollectionParent *collection_find_parent(Collection *child, Collection *c
return BLI_findptr(&child->runtime.parents, collection, offsetof(CollectionParent, collection));
}
static void collection_gobject_hash_ensure(Collection *collection)
{
if (collection->runtime.gobject_hash) {
BLI_ASSERT_COLLECION_GOBJECT_HASH_IS_VALID(collection);
return;
}
GHash *gobject_hash = BLI_ghash_ptr_new_ex(__func__, BLI_listbase_count(&collection->gobject));
LISTBASE_FOREACH (CollectionObject *, cob, &collection->gobject) {
BLI_ghash_insert(gobject_hash, cob->ob, cob);
}
collection->runtime.gobject_hash = gobject_hash;
BLI_ASSERT_COLLECION_GOBJECT_HASH_IS_VALID(collection);
}
static void collection_gobject_hash_remove_object(Collection *collection, const Object *ob)
{
const bool found = BLI_ghash_remove(collection->runtime.gobject_hash, ob, NULL, NULL);
BLI_assert(found);
UNUSED_VARS_NDEBUG(found);
}
/**
* Update the collections object hash, removing `ob_old`, inserting `cob->ob` as the new key.
*
* \param ob_old: The existing key to `cob` in the hash, not removed when NULL.
* \param cob: The `cob->ob` is to be used as the new key,
* when NULL it's not added back into the hash.
*/
static void collection_gobject_hash_update_object(Collection *collection,
Object *ob_old,
CollectionObject *cob)
{
if (ob_old == cob->ob) {
return;
}
BLI_ASSERT_COLLECION_GOBJECT_HASH_IS_VALID(collection);
if (ob_old) {
collection_gobject_hash_remove_object(collection, ob_old);
}
/* The object may be set to NULL if the ID is being cleared from #collection_foreach_id,
* generally `cob->ob` is not expected to be NULL. */
if (cob->ob) {
BLI_ghash_insert(collection->runtime.gobject_hash, cob->ob, cob);
}
}
/**
* Should only be called by: #BLI_ASSERT_COLLECION_GOBJECT_HASH_IS_VALID macro,
* this is an expensive operation intended only to be used for debugging.
*/
static void collection_gobject_hash_assert_internal_consistency(Collection *collection)
{
#ifdef USE_DEBUG_EXTRA_GOBJECT_ASSERT
if (collection->runtime.gobject_hash == NULL) {
return;
}
GHash *gobject_hash = collection->runtime.gobject_hash;
int gobject_count = 0;
LISTBASE_FOREACH (CollectionObject *, cob, &collection->gobject) {
CollectionObject *cob_test = BLI_ghash_lookup(gobject_hash, cob->ob);
BLI_assert(cob == cob_test);
gobject_count += 1;
}
const int gobject_hash_count = BLI_ghash_len(gobject_hash);
BLI_assert(gobject_count == gobject_hash_count);
#else
UNUSED_VARS(collection);
#endif /* USE_DEBUG_EXTRA_GOBJECT_ASSERT */
}
static bool collection_child_add(Collection *parent,
Collection *collection,
const int flag,

View File

@ -325,20 +325,12 @@ static void libblock_remap_data_preprocess(ID *id_owner,
*/
static void libblock_remap_data_postprocess_object_update(Main *bmain,
Object *old_ob,
Object *new_ob,
Object *UNUSED(new_ob),
const bool do_sync_collection)
{
if (new_ob == NULL) {
/* In case we unlinked old_ob (new_ob is NULL), the object has already
* been removed from the scenes and their collections. We still have
* to remove the NULL children from collections not used in any scene. */
BKE_collections_object_remove_nulls(bmain);
}
else {
/* Remapping may have created duplicates of CollectionObject pointing to the same object within
* the same collection. */
BKE_collections_object_remove_duplicates(bmain);
}
/* Will only effectively process collections that have been tagged with
* #COLLECTION_TAG_COLLECTION_OBJECT_DIRTY. See #collection_foreach_id callback. */
BKE_collections_object_remove_invalids(bmain);
if (do_sync_collection) {
BKE_main_collection_sync_remap(bmain);

View File

@ -105,17 +105,19 @@
* support disabling some parts of this.
* \{ */
/* Scan first chunks (happy path when beginning of the array matches).
/**
* Scan first chunks (happy path when beginning of the array matches).
* When the array is a perfect match, we can re-use the entire list.
*
* Note that disabling makes some tests fail that check for output-size.
*/
#define USE_FASTPATH_CHUNKS_FIRST
/* Scan last chunks (happy path when end of the array matches).
/**
* Scan last chunks (happy path when end of the array matches).
* When the end of the array matches, we can quickly add these chunks.
* note that we will add contiguous matching chunks
* so this isn't as useful as USE_FASTPATH_CHUNKS_FIRST,
* so this isn't as useful as #USE_FASTPATH_CHUNKS_FIRST,
* however it avoids adding matching chunks into the lookup table,
* so creating the lookup table won't be as expensive.
*/
@ -123,14 +125,16 @@
# define USE_FASTPATH_CHUNKS_LAST
#endif
/* For arrays of matching length, test that *enough* of the chunks are aligned,
/**
* For arrays of matching length, test that *enough* of the chunks are aligned,
* and simply step over both arrays, using matching chunks.
* This avoids overhead of using a lookup table for cases
* when we can assume they're mostly aligned.
*/
#define USE_ALIGN_CHUNKS_TEST
/* Accumulate hashes from right to left so we can create a hash for the chunk-start.
/**
* Accumulate hashes from right to left so we can create a hash for the chunk-start.
* This serves to increase uniqueness and will help when there is many values which are the same.
*/
#define USE_HASH_TABLE_ACCUMULATE
@ -138,28 +142,48 @@
#ifdef USE_HASH_TABLE_ACCUMULATE
/* Number of times to propagate hashes back.
* Effectively a 'triangle-number'.
* so 4 -> 7, 5 -> 10, 6 -> 15... etc.
* so 3 -> 7, 4 -> 11, 5 -> 16, 6 -> 22, 7 -> 29, ... etc.
*
* \note additional steps are expensive, so avoid high values unless necessary
* (with low strides, between 1-4) where a low value would cause the hashes to
* be un-evenly distributed.
*/
# define BCHUNK_HASH_TABLE_ACCUMULATE_STEPS 4
# define BCHUNK_HASH_TABLE_ACCUMULATE_STEPS_DEFAULT 3
# define BCHUNK_HASH_TABLE_ACCUMULATE_STEPS_32BITS 4
# define BCHUNK_HASH_TABLE_ACCUMULATE_STEPS_16BITS 5
/**
* Singe bytes (or boolean) arrays need a higher number of steps
* because the resulting values are not unique enough to result in evenly distributed values.
* Use more accumulation when the the size of the structs is small, see: #105046.
*
* With 6 -> 22, one byte each - means an array of booleans can be combine into 22 bits
* representing 4,194,303 different combinations.
*/
# define BCHUNK_HASH_TABLE_ACCUMULATE_STEPS_8BITS 6
#else
/* How many items to hash (multiplied by stride)
/**
* How many items to hash (multiplied by stride).
* The more values, the greater the chance this block has a unique hash.
*/
# define BCHUNK_HASH_LEN 4
# define BCHUNK_HASH_LEN 16
#endif
/* Calculate the key once and reuse it
/**
* Calculate the key once and reuse it
*/
#define USE_HASH_TABLE_KEY_CACHE
#ifdef USE_HASH_TABLE_KEY_CACHE
# define HASH_TABLE_KEY_UNSET ((uint64_t)-1)
# define HASH_TABLE_KEY_FALLBACK ((uint64_t)-2)
# define HASH_TABLE_KEY_UNSET ((hash_key)-1)
# define HASH_TABLE_KEY_FALLBACK ((hash_key)-2)
#endif
/* How much larger the table is then the total number of chunks.
/**
* How much larger the table is then the total number of chunks.
*/
#define BCHUNK_HASH_TABLE_MUL 3
/* Merge too small/large chunks:
/**
* Merge too small/large chunks:
*
* Using this means chunks below a threshold will be merged together.
* Even though short term this uses more memory,
@ -172,19 +196,20 @@
#define USE_MERGE_CHUNKS
#ifdef USE_MERGE_CHUNKS
/* Merge chunks smaller then: (chunk_size / BCHUNK_MIN_SIZE_DIV)
*/
/** Merge chunks smaller then: (#BArrayInfo::chunk_byte_size / #BCHUNK_SIZE_MIN_DIV). */
# define BCHUNK_SIZE_MIN_DIV 8
/* Disallow chunks bigger than the regular chunk size scaled by this value
* NOTE: must be at least 2!
/**
* Disallow chunks bigger than the regular chunk size scaled by this value.
*
* \note must be at least 2!
* however, this code runs won't run in tests unless it's ~1.1 ugh.
* so lower only to check splitting works.
*/
# define BCHUNK_SIZE_MAX_MUL 2
#endif /* USE_MERGE_CHUNKS */
/* slow (keep disabled), but handy for debugging */
/** Slow (keep disabled), but handy for debugging */
// #define USE_VALIDATE_LIST_SIZE
// #define USE_VALIDATE_LIST_DATA_PARTIAL
@ -197,7 +222,7 @@
/** \name Internal Structs
* \{ */
typedef uint64_t hash_key;
typedef uint32_t hash_key;
typedef struct BArrayInfo {
size_t chunk_stride;
@ -208,7 +233,10 @@ typedef struct BArrayInfo {
/* min/max limits (inclusive) */
size_t chunk_byte_size_min;
size_t chunk_byte_size_max;
/**
* The read-ahead value should never exceed `chunk_byte_size`,
* otherwise the hash would be based on values in the next chunk.
*/
size_t accum_read_ahead_bytes;
#ifdef USE_HASH_TABLE_ACCUMULATE
size_t accum_steps;
@ -251,20 +279,23 @@ struct BArrayStore {
struct BArrayState {
/** linked list in #BArrayStore.states */
struct BArrayState *next, *prev;
struct BChunkList *chunk_list; /* BChunkList's */
/** Shared chunk list, this reference must hold a #BChunkList::users. */
struct BChunkList *chunk_list;
};
typedef struct BChunkList {
ListBase chunk_refs; /* BChunkRef's */
uint chunk_refs_len; /* BLI_listbase_count(chunks), store for reuse. */
size_t total_size; /* size of all chunks */
/** List of #BChunkRef's */
ListBase chunk_refs;
/** Result of `BLI_listbase_count(chunks)`, store for reuse. */
uint chunk_refs_len;
/** Size of all chunks (expanded). */
size_t total_expanded_size;
/** number of #BArrayState using this. */
/** Number of #BArrayState using this. */
int users;
} BChunkList;
/* a chunk of an array */
/** A chunk of memory in an array (unit of de-duplication). */
typedef struct BChunk {
const uchar *data;
size_t data_len;
@ -353,13 +384,13 @@ static bool bchunk_data_compare(const BChunk *chunk,
/** \name Internal BChunkList API
* \{ */
static BChunkList *bchunk_list_new(BArrayMemory *bs_mem, size_t total_size)
static BChunkList *bchunk_list_new(BArrayMemory *bs_mem, size_t total_expanded_size)
{
BChunkList *chunk_list = BLI_mempool_alloc(bs_mem->chunk_list);
BLI_listbase_clear(&chunk_list->chunk_refs);
chunk_list->chunk_refs_len = 0;
chunk_list->total_size = total_size;
chunk_list->total_expanded_size = total_expanded_size;
chunk_list->users = 0;
return chunk_list;
}
@ -734,19 +765,19 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
#define HASH_INIT (5381)
BLI_INLINE uint hash_data_single(const uchar p)
BLI_INLINE hash_key hash_data_single(const uchar p)
{
return ((HASH_INIT << 5) + HASH_INIT) + (uint)(*((signed char *)&p));
return ((HASH_INIT << 5) + HASH_INIT) + (hash_key)(*((signed char *)&p));
}
/* hash bytes, from BLI_ghashutil_strhash_n */
static uint hash_data(const uchar *key, size_t n)
static hash_key hash_data(const uchar *key, size_t n)
{
const signed char *p;
uint h = HASH_INIT;
hash_key h = HASH_INIT;
for (p = (const signed char *)key; n--; p++) {
h = (uint)((h << 5) + h) + (uint)*p;
h = (hash_key)((h << 5) + h) + (hash_key)*p;
}
return h;
@ -802,6 +833,14 @@ static void hash_array_from_cref(const BArrayInfo *info,
BLI_assert(i == hash_array_len);
}
BLI_INLINE void hash_accum_impl(hash_key *hash_array, const size_t i_dst, const size_t i_ahead)
{
/* Tested to give good results when accumulating unique values from an array of booleans.
* (least unused cells in the `BTableRef **table`). */
BLI_assert(i_dst < i_ahead);
hash_array[i_dst] += ((hash_array[i_ahead] << 3) ^ (hash_array[i_dst] >> 1));
}
static void hash_accum(hash_key *hash_array, const size_t hash_array_len, size_t iter_steps)
{
/* _very_ unlikely, can happen if you select a chunk-size of 1 for example. */
@ -812,8 +851,8 @@ static void hash_accum(hash_key *hash_array, const size_t hash_array_len, size_t
const size_t hash_array_search_len = hash_array_len - iter_steps;
while (iter_steps != 0) {
const size_t hash_offset = iter_steps;
for (uint i = 0; i < hash_array_search_len; i++) {
hash_array[i] += (hash_array[i + hash_offset]) * ((hash_array[i] & 0xff) + 1);
for (size_t i = 0; i < hash_array_search_len; i++) {
hash_accum_impl(hash_array, i, i + hash_offset);
}
iter_steps -= 1;
}
@ -838,7 +877,7 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
const size_t hash_array_search_len = hash_array_len - iter_steps_sub;
const size_t hash_offset = iter_steps;
for (uint i = 0; i < hash_array_search_len; i++) {
hash_array[i] += (hash_array[i + hash_offset]) * ((hash_array[i] & 0xff) + 1);
hash_accum_impl(hash_array, i, i + hash_offset);
}
iter_steps -= 1;
iter_steps_sub += iter_steps;
@ -932,9 +971,9 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref)
{
const size_t data_hash_len = BCHUNK_HASH_LEN * info->chunk_stride;
hash_key key;
BChunk *chunk = cref->link;
const size_t data_hash_len = MIN2(chunk->data_len, BCHUNK_HASH_LEN * info->chunk_stride);
# ifdef USE_HASH_TABLE_KEY_CACHE
key = chunk->key;
@ -1012,7 +1051,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
const size_t data_len_original,
const BChunkList *chunk_list_reference)
{
ASSERT_CHUNKLIST_SIZE(chunk_list_reference, chunk_list_reference->total_size);
ASSERT_CHUNKLIST_SIZE(chunk_list_reference, chunk_list_reference->total_expanded_size);
/* -----------------------------------------------------------------------
* Fast-Path for exact match
@ -1045,7 +1084,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
}
if (full_match) {
if (chunk_list_reference->total_size == data_len_original) {
if (chunk_list_reference->total_expanded_size == data_len_original) {
return (BChunkList *)chunk_list_reference;
}
}
@ -1135,9 +1174,9 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
bool use_aligned = false;
#ifdef USE_ALIGN_CHUNKS_TEST
if (chunk_list->total_size == chunk_list_reference->total_size) {
if (chunk_list->total_expanded_size == chunk_list_reference->total_expanded_size) {
/* if we're already a quarter aligned */
if (data_len - i_prev <= chunk_list->total_size / 4) {
if (data_len - i_prev <= chunk_list->total_expanded_size / 4) {
use_aligned = true;
}
else {
@ -1219,7 +1258,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#endif
const BChunkRef *cref;
size_t chunk_list_reference_bytes_remaining = chunk_list_reference->total_size -
size_t chunk_list_reference_bytes_remaining = chunk_list_reference->total_expanded_size -
chunk_list_reference_skip_bytes;
if (cref_match_first) {
@ -1371,7 +1410,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
/* check we're the correct size and that we didn't accidentally modify the reference */
ASSERT_CHUNKLIST_SIZE(chunk_list, data_len_original);
ASSERT_CHUNKLIST_SIZE(chunk_list_reference, chunk_list_reference->total_size);
ASSERT_CHUNKLIST_SIZE(chunk_list_reference, chunk_list_reference->total_expanded_size);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
@ -1387,6 +1426,8 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
{
BLI_assert(stride > 0 && chunk_count > 0);
BArrayStore *bs = MEM_callocN(sizeof(BArrayStore), __func__);
bs->info.chunk_stride = stride;
@ -1399,14 +1440,32 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
#endif
#ifdef USE_HASH_TABLE_ACCUMULATE
bs->info.accum_steps = BCHUNK_HASH_TABLE_ACCUMULATE_STEPS - 1;
/* Triangle number, identifying now much read-ahead we need:
* https://en.wikipedia.org/wiki/Triangular_number (+ 1) */
bs->info.accum_read_ahead_len =
(uint)(((bs->info.accum_steps * (bs->info.accum_steps + 1)) / 2) + 1);
/* One is always subtracted from this `accum_steps`, this is intentional
* as it results in reading ahead the expected amount. */
if (stride <= sizeof(int8_t)) {
bs->info.accum_steps = BCHUNK_HASH_TABLE_ACCUMULATE_STEPS_8BITS + 1;
}
else if (stride <= sizeof(int16_t)) {
bs->info.accum_steps = BCHUNK_HASH_TABLE_ACCUMULATE_STEPS_16BITS + 1;
}
else if (stride <= sizeof(int32_t)) {
bs->info.accum_steps = BCHUNK_HASH_TABLE_ACCUMULATE_STEPS_32BITS + 1;
}
else {
bs->info.accum_steps = BCHUNK_HASH_TABLE_ACCUMULATE_STEPS_DEFAULT + 1;
}
do {
bs->info.accum_steps -= 1;
/* Triangle number, identifying now much read-ahead we need:
* https://en.wikipedia.org/wiki/Triangular_number (+ 1) */
bs->info.accum_read_ahead_len = ((bs->info.accum_steps * (bs->info.accum_steps + 1)) / 2) + 1;
/* Only small chunk counts are likely to exceed the read-ahead length. */
} while (UNLIKELY(chunk_count < bs->info.accum_read_ahead_len));
bs->info.accum_read_ahead_bytes = bs->info.accum_read_ahead_len * stride;
#else
bs->info.accum_read_ahead_bytes = BCHUNK_HASH_LEN * stride;
bs->info.accum_read_ahead_bytes = MIN2((size_t)BCHUNK_HASH_LEN, chunk_count) * stride;
#endif
bs->memory.chunk_list = BLI_mempool_create(sizeof(BChunkList), 0, 512, BLI_MEMPOOL_NOP);
@ -1415,6 +1474,8 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
* (we could loop over all states as an alternative). */
bs->memory.chunk = BLI_mempool_create(sizeof(BChunk), 0, 512, BLI_MEMPOOL_ALLOW_ITER);
BLI_assert(bs->info.accum_read_ahead_bytes <= bs->info.chunk_byte_size);
return bs;
}
@ -1470,7 +1531,7 @@ size_t BLI_array_store_calc_size_expanded_get(const BArrayStore *bs)
{
size_t size_accum = 0;
LISTBASE_FOREACH (const BArrayState *, state, &bs->states) {
size_accum += state->chunk_list->total_size;
size_accum += state->chunk_list->total_expanded_size;
}
return size_accum;
}
@ -1556,7 +1617,7 @@ void BLI_array_store_state_remove(BArrayStore *bs, BArrayState *state)
size_t BLI_array_store_state_size_get(BArrayState *state)
{
return state->chunk_list->total_size;
return state->chunk_list->total_expanded_size;
}
void BLI_array_store_state_data_get(BArrayState *state, void *data)
@ -1566,7 +1627,7 @@ void BLI_array_store_state_data_get(BArrayState *state, void *data)
LISTBASE_FOREACH (BChunkRef *, cref, &state->chunk_list->chunk_refs) {
data_test_len += cref->link->data_len;
}
BLI_assert(data_test_len == state->chunk_list->total_size);
BLI_assert(data_test_len == state->chunk_list->total_expanded_size);
#endif
uchar *data_step = (uchar *)data;
@ -1579,9 +1640,9 @@ void BLI_array_store_state_data_get(BArrayState *state, void *data)
void *BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_len)
{
void *data = MEM_mallocN(state->chunk_list->total_size, __func__);
void *data = MEM_mallocN(state->chunk_list->total_expanded_size, __func__);
BLI_array_store_state_data_get(state, data);
*r_data_len = state->chunk_list->total_size;
*r_data_len = state->chunk_list->total_expanded_size;
return data;
}
@ -1594,11 +1655,11 @@ void *BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_le
/* only for test validation */
static size_t bchunk_list_size(const BChunkList *chunk_list)
{
size_t total_size = 0;
size_t total_expanded_size = 0;
LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
total_size += cref->link->data_len;
total_expanded_size += cref->link->data_len;
}
return total_size;
return total_expanded_size;
}
bool BLI_array_store_is_valid(BArrayStore *bs)
@ -1610,7 +1671,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
LISTBASE_FOREACH (BArrayState *, state, &bs->states) {
BChunkList *chunk_list = state->chunk_list;
if (!(bchunk_list_size(chunk_list) == chunk_list->total_size)) {
if (!(bchunk_list_size(chunk_list) == chunk_list->total_expanded_size)) {
return false;
}
@ -1620,7 +1681,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
#ifdef USE_MERGE_CHUNKS
/* ensure we merge all chunks that could be merged */
if (chunk_list->total_size > bs->info.chunk_byte_size_min) {
if (chunk_list->total_expanded_size > bs->info.chunk_byte_size_min) {
LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
if (cref->link->data_len < bs->info.chunk_byte_size_min) {
return false;

View File

@ -54,8 +54,8 @@ void BLI_array_store_at_size_clear(struct BArrayStore_AtSize *bs_stride)
}
}
MEM_freeN(bs_stride->stride_table);
bs_stride->stride_table = NULL;
/* It's possible this table was never used. */
MEM_SAFE_FREE(bs_stride->stride_table);
bs_stride->stride_table_len = 0;
}

View File

@ -4148,16 +4148,7 @@ void blo_do_versions_300(FileData *fd, Library * /*lib*/, Main *bmain)
}
}
/**
* Versioning code until next subversion bump goes here.
*
* \note Be sure to check when bumping the version:
* - "versioning_userdef.c", #blo_do_versions_userdef
* - "versioning_userdef.c", #do_versions_theme
*
* \note Keep this message at the bottom of the function.
*/
{
if (!MAIN_VERSION_ATLEAST(bmain, 306, 1)) {
/* Z bias for retopology overlay. */
if (!DNA_struct_elem_find(fd->filesdna, "View3DOverlay", "float", "retopology_offset")) {
LISTBASE_FOREACH (bScreen *, screen, &bmain->screens) {
@ -4180,11 +4171,24 @@ void blo_do_versions_300(FileData *fd, Library * /*lib*/, Main *bmain)
SEQ_for_each_callback(&ed->seqbase, version_set_seq_single_frame_content, nullptr);
}
}
/* Keep this block, even when empty. */
LISTBASE_FOREACH (bNodeTree *, ntree, &bmain->nodetrees) {
if (ntree->type == NTREE_GEOMETRY) {
version_geometry_nodes_extrude_smooth_propagation(*ntree);
}
}
}
/**
* Versioning code until next subversion bump goes here.
*
* \note Be sure to check when bumping the version:
* - "versioning_userdef.c", #blo_do_versions_userdef
* - "versioning_userdef.c", #do_versions_theme
*
* \note Keep this message at the bottom of the function.
*/
{
/* Keep this block, even when empty. */
}
}

View File

@ -89,6 +89,10 @@ static void do_versions_theme(const UserDef *userdef, bTheme *btheme)
btheme->tui.wcol_view_item = U_theme_default.tui.wcol_view_item;
}
if (!USER_VERSION_ATLEAST(306, 1)) {
FROM_DEFAULT_V4_UCHAR(space_view3d.face_retopology);
}
/**
* Versioning code until next subversion bump goes here.
*

View File

@ -8781,7 +8781,7 @@ uiBut *UI_context_active_but_prop_get(const bContext *C,
PropertyRNA **r_prop,
int *r_index)
{
uiBut *activebut = ui_context_rna_button_active(C);
uiBut *activebut = UI_context_active_but_get_respect_menu(C);
if (activebut && activebut->rnapoin.data) {
*r_ptr = activebut->rnapoin;
@ -8799,7 +8799,7 @@ uiBut *UI_context_active_but_prop_get(const bContext *C,
void UI_context_active_but_prop_handle(bContext *C, const bool handle_undo)
{
uiBut *activebut = ui_context_rna_button_active(C);
uiBut *activebut = UI_context_active_but_get_respect_menu(C);
if (activebut) {
/* TODO(@ideasman42): look into a better way to handle the button change
* currently this is mainly so reset defaults works for the

View File

@ -868,7 +868,6 @@ uiBlock *ui_block_func_COLOR(bContext *C, uiPopupBlockHandle *handle, void *arg_
{
uiBut *but = static_cast<uiBut *>(arg_but);
uiBlock *block;
bool show_picker = true;
block = UI_block_begin(C, handle->region, __func__, UI_EMBOSS);
@ -876,17 +875,9 @@ uiBlock *ui_block_func_COLOR(bContext *C, uiPopupBlockHandle *handle, void *arg_
block->is_color_gamma_picker = true;
}
if (but->block) {
/* if color block is invoked from a popup we wouldn't be able to set color properly
* this is because color picker will close popups first and then will try to figure
* out active button RNA, and of course it'll fail
*/
show_picker = (but->block->flag & UI_BLOCK_POPUP) == 0;
}
copy_v3_v3(handle->retvec, but->editvec);
ui_block_colorpicker(block, but, handle->retvec, show_picker);
ui_block_colorpicker(block, but, handle->retvec, true);
block->flag = UI_BLOCK_LOOP | UI_BLOCK_KEEP_OPEN | UI_BLOCK_OUT_1 | UI_BLOCK_MOVEMOUSE_QUIT;
UI_block_theme_style_set(block, UI_BLOCK_THEME_STYLE_POPUP);

View File

@ -17,6 +17,7 @@
#include "BLI_array_utils.h"
#include "BLI_listbase.h"
#include "BLI_task.hh"
#include "BKE_context.h"
#include "BKE_customdata.h"
@ -118,8 +119,22 @@ struct UndoMesh {
/** \name Array Store
* \{ */
/**
* Store separate #BArrayStore_AtSize so multiple threads
* can access array stores without locking.
*/
enum {
ARRAY_STORE_INDEX_VERT = 0,
ARRAY_STORE_INDEX_EDGE,
ARRAY_STORE_INDEX_LOOP,
ARRAY_STORE_INDEX_POLY,
ARRAY_STORE_INDEX_SHAPE,
ARRAY_STORE_INDEX_MSEL,
};
# define ARRAY_STORE_INDEX_NUM (ARRAY_STORE_INDEX_MSEL + 1)
static struct {
BArrayStore_AtSize bs_stride;
BArrayStore_AtSize bs_stride[ARRAY_STORE_INDEX_NUM];
int users;
/**
@ -132,11 +147,12 @@ static struct {
TaskPool *task_pool;
# endif
} um_arraystore = {{nullptr}};
} um_arraystore = {{{nullptr}}};
static void um_arraystore_cd_compact(CustomData *cdata,
const size_t data_len,
bool create,
const bool create,
const int bs_index,
const BArrayCustomData *bcd_reference,
BArrayCustomData **r_bcd_first)
{
@ -175,7 +191,7 @@ static void um_arraystore_cd_compact(CustomData *cdata,
const int stride = CustomData_sizeof(type);
BArrayStore *bs = create ? BLI_array_store_at_size_ensure(
&um_arraystore.bs_stride, stride, ARRAY_CHUNK_SIZE) :
&um_arraystore.bs_stride[bs_index], stride, ARRAY_CHUNK_SIZE) :
nullptr;
const int layer_len = layer_end - layer_start;
@ -284,12 +300,12 @@ static void um_arraystore_cd_expand(const BArrayCustomData *bcd,
}
}
static void um_arraystore_cd_free(BArrayCustomData *bcd)
static void um_arraystore_cd_free(BArrayCustomData *bcd, const int bs_index)
{
while (bcd) {
BArrayCustomData *bcd_next = bcd->next;
const int stride = CustomData_sizeof(bcd->type);
BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride, stride);
BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride[bs_index], stride);
for (int i = 0; i < bcd->states_len; i++) {
if (bcd->states[i]) {
BLI_array_store_state_remove(bs, bcd->states[i]);
@ -309,56 +325,94 @@ static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool
{
Mesh *me = &um->me;
um_arraystore_cd_compact(
&me->vdata, me->totvert, create, um_ref ? um_ref->store.vdata : nullptr, &um->store.vdata);
um_arraystore_cd_compact(
&me->edata, me->totedge, create, um_ref ? um_ref->store.edata : nullptr, &um->store.edata);
um_arraystore_cd_compact(
&me->ldata, me->totloop, create, um_ref ? um_ref->store.ldata : nullptr, &um->store.ldata);
um_arraystore_cd_compact(
&me->pdata, me->totpoly, create, um_ref ? um_ref->store.pdata : nullptr, &um->store.pdata);
/* Compacting can be time consuming, run in parallel.
*
* NOTE(@ideasman42): this could be further parallelized with every custom-data layer
* running in it's own thread. If this is a bottleneck it's worth considering.
* At the moment it seems fast enough to split by element type.
* Since this is it's self a background thread, using too many threads here could
* interfere with foreground tasks. */
blender::threading::parallel_invoke(
4096 < (me->totvert + me->totedge + me->totloop + me->totpoly),
[&]() {
um_arraystore_cd_compact(&me->vdata,
me->totvert,
create,
ARRAY_STORE_INDEX_VERT,
um_ref ? um_ref->store.vdata : nullptr,
&um->store.vdata);
},
[&]() {
um_arraystore_cd_compact(&me->edata,
me->totedge,
create,
ARRAY_STORE_INDEX_EDGE,
um_ref ? um_ref->store.edata : nullptr,
&um->store.edata);
},
[&]() {
um_arraystore_cd_compact(&me->ldata,
me->totloop,
create,
ARRAY_STORE_INDEX_LOOP,
um_ref ? um_ref->store.ldata : nullptr,
&um->store.ldata);
},
[&]() {
um_arraystore_cd_compact(&me->pdata,
me->totpoly,
create,
ARRAY_STORE_INDEX_POLY,
um_ref ? um_ref->store.pdata : nullptr,
&um->store.pdata);
},
[&]() {
if (me->key && me->key->totkey) {
const size_t stride = me->key->elemsize;
BArrayStore *bs = create ? BLI_array_store_at_size_ensure(
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_SHAPE],
stride,
ARRAY_CHUNK_SIZE) :
nullptr;
if (create) {
um->store.keyblocks = static_cast<BArrayState **>(
MEM_mallocN(me->key->totkey * sizeof(*um->store.keyblocks), __func__));
}
KeyBlock *keyblock = static_cast<KeyBlock *>(me->key->block.first);
for (int i = 0; i < me->key->totkey; i++, keyblock = keyblock->next) {
if (create) {
BArrayState *state_reference = (um_ref && um_ref->me.key &&
(i < um_ref->me.key->totkey)) ?
um_ref->store.keyblocks[i] :
nullptr;
um->store.keyblocks[i] = BLI_array_store_state_add(
bs, keyblock->data, size_t(keyblock->totelem) * stride, state_reference);
}
if (me->key && me->key->totkey) {
const size_t stride = me->key->elemsize;
BArrayStore *bs = create ? BLI_array_store_at_size_ensure(
&um_arraystore.bs_stride, stride, ARRAY_CHUNK_SIZE) :
nullptr;
if (create) {
um->store.keyblocks = static_cast<BArrayState **>(
MEM_mallocN(me->key->totkey * sizeof(*um->store.keyblocks), __func__));
}
KeyBlock *keyblock = static_cast<KeyBlock *>(me->key->block.first);
for (int i = 0; i < me->key->totkey; i++, keyblock = keyblock->next) {
if (create) {
BArrayState *state_reference = (um_ref && um_ref->me.key && (i < um_ref->me.key->totkey)) ?
um_ref->store.keyblocks[i] :
nullptr;
um->store.keyblocks[i] = BLI_array_store_state_add(
bs, keyblock->data, size_t(keyblock->totelem) * stride, state_reference);
}
if (keyblock->data) {
MEM_freeN(keyblock->data);
keyblock->data = nullptr;
}
}
}
},
[&]() {
if (me->mselect && me->totselect) {
BLI_assert(create == (um->store.mselect == nullptr));
if (create) {
BArrayState *state_reference = um_ref ? um_ref->store.mselect : nullptr;
const size_t stride = sizeof(*me->mselect);
BArrayStore *bs = BLI_array_store_at_size_ensure(
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_MSEL], stride, ARRAY_CHUNK_SIZE);
um->store.mselect = BLI_array_store_state_add(
bs, me->mselect, size_t(me->totselect) * stride, state_reference);
}
if (keyblock->data) {
MEM_freeN(keyblock->data);
keyblock->data = nullptr;
}
}
}
if (me->mselect && me->totselect) {
BLI_assert(create == (um->store.mselect == nullptr));
if (create) {
BArrayState *state_reference = um_ref ? um_ref->store.mselect : nullptr;
const size_t stride = sizeof(*me->mselect);
BArrayStore *bs = BLI_array_store_at_size_ensure(
&um_arraystore.bs_stride, stride, ARRAY_CHUNK_SIZE);
um->store.mselect = BLI_array_store_state_add(
bs, me->mselect, size_t(me->totselect) * stride, state_reference);
}
/* keep me->totselect for validation */
MEM_freeN(me->mselect);
me->mselect = nullptr;
}
/* keep me->totselect for validation */
MEM_freeN(me->mselect);
me->mselect = nullptr;
}
});
if (create) {
um_arraystore.users += 1;
@ -376,9 +430,15 @@ static void um_arraystore_compact(UndoMesh *um, const UndoMesh *um_ref)
static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref)
{
# ifdef DEBUG_PRINT
size_t size_expanded_prev, size_compacted_prev;
BLI_array_store_at_size_calc_memory_usage(
&um_arraystore.bs_stride, &size_expanded_prev, &size_compacted_prev);
size_t size_expanded_prev = 0, size_compacted_prev = 0;
for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
size_t size_expanded_prev_iter, size_compacted_prev_iter;
BLI_array_store_at_size_calc_memory_usage(
&um_arraystore.bs_stride[bs_index], &size_expanded_prev_iter, &size_compacted_prev_iter);
size_expanded_prev += size_expanded_prev_iter;
size_compacted_prev += size_compacted_prev_iter;
}
# endif
# ifdef DEBUG_TIME
@ -393,9 +453,15 @@ static void um_arraystore_compact_with_info(UndoMesh *um, const UndoMesh *um_ref
# ifdef DEBUG_PRINT
{
size_t size_expanded, size_compacted;
BLI_array_store_at_size_calc_memory_usage(
&um_arraystore.bs_stride, &size_expanded, &size_compacted);
size_t size_expanded = 0, size_compacted = 0;
for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
size_t size_expanded_iter, size_compacted_iter;
BLI_array_store_at_size_calc_memory_usage(
&um_arraystore.bs_stride[bs_index], &size_expanded_iter, &size_compacted_iter);
size_expanded += size_expanded_iter;
size_compacted += size_compacted_iter;
}
const double percent_total = size_expanded ?
((double(size_compacted) / double(size_expanded)) * 100.0) :
@ -471,14 +537,15 @@ static void um_arraystore_free(UndoMesh *um)
{
Mesh *me = &um->me;
um_arraystore_cd_free(um->store.vdata);
um_arraystore_cd_free(um->store.edata);
um_arraystore_cd_free(um->store.ldata);
um_arraystore_cd_free(um->store.pdata);
um_arraystore_cd_free(um->store.vdata, ARRAY_STORE_INDEX_VERT);
um_arraystore_cd_free(um->store.edata, ARRAY_STORE_INDEX_EDGE);
um_arraystore_cd_free(um->store.ldata, ARRAY_STORE_INDEX_LOOP);
um_arraystore_cd_free(um->store.pdata, ARRAY_STORE_INDEX_POLY);
if (um->store.keyblocks) {
const size_t stride = me->key->elemsize;
BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride, stride);
BArrayStore *bs = BLI_array_store_at_size_get(
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_SHAPE], stride);
for (int i = 0; i < me->key->totkey; i++) {
BArrayState *state = um->store.keyblocks[i];
BLI_array_store_state_remove(bs, state);
@ -489,7 +556,8 @@ static void um_arraystore_free(UndoMesh *um)
if (um->store.mselect) {
const size_t stride = sizeof(*me->mselect);
BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride, stride);
BArrayStore *bs = BLI_array_store_at_size_get(&um_arraystore.bs_stride[ARRAY_STORE_INDEX_MSEL],
stride);
BArrayState *state = um->store.mselect;
BLI_array_store_state_remove(bs, state);
um->store.mselect = nullptr;
@ -503,8 +571,9 @@ static void um_arraystore_free(UndoMesh *um)
# ifdef DEBUG_PRINT
printf("mesh undo store: freeing all data!\n");
# endif
BLI_array_store_at_size_clear(&um_arraystore.bs_stride);
for (int bs_index = 0; bs_index < ARRAY_STORE_INDEX_NUM; bs_index++) {
BLI_array_store_at_size_clear(&um_arraystore.bs_stride[bs_index]);
}
# ifdef USE_ARRAY_STORE_THREAD
BLI_task_pool_free(um_arraystore.task_pool);
um_arraystore.task_pool = nullptr;

View File

@ -2716,14 +2716,27 @@ static void count_multi_input_socket_links(bNodeTree &ntree, SpaceNode &snode)
}
}
static float frame_node_label_height(const NodeFrame &frame_data)
{
return frame_data.label_size * U.dpi_fac;
}
#define NODE_FRAME_MARGIN (1.5f * U.widget_unit)
/* XXX Does a bounding box update by iterating over all children.
* Not ideal to do this in every draw call, but doing as transform callback doesn't work,
* since the child node totr rects are not updated properly at that point. */
static void frame_node_prepare_for_draw(bNode &node, Span<bNode *> nodes)
{
const float margin = 1.5f * U.widget_unit;
NodeFrame *data = (NodeFrame *)node.storage;
const float margin = NODE_FRAME_MARGIN;
const float has_label = node.label[0] != '\0';
const float label_height = frame_node_label_height(*data);
/* Add an additional 25 % to account for the descenders. This works well in most cases. */
const float margin_top = 0.5f * margin + (has_label ? 1.25f * label_height : 0.5f * margin);
/* Initialize rect from current frame size. */
rctf rect;
node_to_updated_rect(node, rect);
@ -2743,7 +2756,7 @@ static void frame_node_prepare_for_draw(bNode &node, Span<bNode *> nodes)
noderect.xmin -= margin;
noderect.xmax += margin;
noderect.ymin -= margin;
noderect.ymax += margin;
noderect.ymax += margin_top;
/* First child initializes frame. */
if (bbinit) {
@ -2841,8 +2854,7 @@ static void frame_node_draw_label(TreeDrawContext &tree_draw_ctx,
BLF_enable(fontid, BLF_ASPECT);
BLF_aspect(fontid, aspect, aspect, 1.0f);
/* Clamp. Otherwise it can suck up a LOT of memory. */
BLF_size(fontid, MIN2(24.0f, font_size) * U.dpi_fac);
BLF_size(fontid, font_size * U.dpi_fac);
/* Title color. */
int color_id = node_get_colorid(tree_draw_ctx, node);
@ -2850,21 +2862,18 @@ static void frame_node_draw_label(TreeDrawContext &tree_draw_ctx,
UI_GetThemeColorBlendShade3ubv(TH_TEXT, color_id, 0.4f, 10, color);
BLF_color3ubv(fontid, color);
const float margin = float(NODE_DY / 4);
const float margin = NODE_FRAME_MARGIN;
const float width = BLF_width(fontid, label, sizeof(label));
const float ascender = BLF_ascender(fontid);
const int label_height = ((margin / aspect) + (ascender * aspect));
const int label_height = frame_node_label_height(*data);
/* 'x' doesn't need aspect correction */
const rctf &rct = node.runtime->totr;
/* XXX a bit hacky, should use separate align values for x and y. */
float x = BLI_rctf_cent_x(&rct) - (0.5f * width);
float y = rct.ymax - label_height;
const float label_x = BLI_rctf_cent_x(&rct) - (0.5f * width);
const float label_y = rct.ymax - label_height - (0.5f * margin);
/* label */
/* Label. */
const bool has_label = node.label[0] != '\0';
if (has_label) {
BLF_position(fontid, x, y, 0);
BLF_position(fontid, label_x, label_y, 0);
BLF_draw(fontid, label, sizeof(label));
}
@ -2875,11 +2884,10 @@ static void frame_node_draw_label(TreeDrawContext &tree_draw_ctx,
const float line_spacing = (line_height_max * aspect);
const float line_width = (BLI_rctf_size_x(&rct) - 2 * margin) / aspect;
/* 'x' doesn't need aspect correction. */
x = rct.xmin + margin;
y = rct.ymax - label_height - (has_label ? line_spacing : 0);
const float x = rct.xmin + margin;
float y = rct.ymax - label_height - (has_label ? line_spacing + margin : 0);
int y_min = y + ((margin * 2) - (y - rct.ymin));
const int y_min = rct.ymin + margin;
BLF_enable(fontid, BLF_CLIPPING | BLF_WORD_WRAP);
BLF_clipping(fontid, rct.xmin, rct.ymin + margin, rct.xmax, rct.ymax);

View File

@ -898,27 +898,31 @@ static void displace_links(bNodeTree *ntree, const bNode *node, bNodeLink *inser
bNodeSocket *replacement_socket = node_find_linkable_socket(*ntree, node, linked_socket);
if (linked_socket->is_input()) {
if (linked_socket->limit + 1 < nodeSocketLinkLimit(linked_socket)) {
BLI_assert(!linked_socket->is_multi_input());
ntree->ensure_topology_cache();
bNodeLink *displaced_link = linked_socket->runtime->directly_linked_links.first();
if (!replacement_socket) {
nodeRemLink(ntree, displaced_link);
return;
}
LISTBASE_FOREACH_MUTABLE (bNodeLink *, link, &ntree->links) {
if (link->tosock == linked_socket) {
if (!replacement_socket) {
nodeRemLink(ntree, link);
BKE_ntree_update_tag_link_removed(ntree);
displaced_link->tosock = replacement_socket;
if (replacement_socket->is_multi_input()) {
/* Check for duplicate links when linking to multi input sockets. */
for (bNodeLink *existing_link : replacement_socket->runtime->directly_linked_links) {
if (existing_link->fromsock == displaced_link->fromsock) {
nodeRemLink(ntree, displaced_link);
return;
}
link->tosock = replacement_socket;
if (replacement_socket->is_multi_input()) {
link->multi_input_socket_index = node_socket_count_links(*ntree, *replacement_socket) -
1;
}
BKE_ntree_update_tag_link_changed(ntree);
return;
}
const int multi_input_index = node_socket_count_links(*ntree, *replacement_socket) - 1;
displaced_link->multi_input_socket_index = multi_input_index;
}
BKE_ntree_update_tag_link_changed(ntree);
return;
}
LISTBASE_FOREACH_MUTABLE (bNodeLink *, link, &ntree->links) {
@ -976,29 +980,46 @@ static void node_remove_existing_links_if_needed(bNodeLinkDrag &nldrag, bNodeTre
{
bNodeSocket &linked_socket = *nldrag.hovered_socket;
const int link_count = node_socket_count_links(ntree, linked_socket);
int link_count = node_socket_count_links(ntree, linked_socket);
const int link_limit = nodeSocketLinkLimit(&linked_socket);
Set<bNodeLink *> links_to_remove;
if (link_count < link_limit) {
return;
}
ntree.ensure_topology_cache();
if (linked_socket.is_input()) {
LISTBASE_FOREACH_MUTABLE (bNodeLink *, link, &ntree.links) {
if (link->tosock == &linked_socket) {
nodeRemLink(&ntree, link);
return;
/* Remove duplicate links first. */
for (const bNodeLink dragged_link : nldrag.links) {
if (linked_socket.is_input()) {
for (bNodeLink *link : linked_socket.runtime->directly_linked_links) {
const bool duplicate_link = link->fromsock == dragged_link.fromsock;
if (duplicate_link) {
links_to_remove.add(link);
link_count--;
}
}
}
else {
for (bNodeLink *link : linked_socket.runtime->directly_linked_links) {
const bool duplicate_link = link->tosock == dragged_link.tosock;
if (duplicate_link) {
links_to_remove.add(link);
link_count--;
}
}
}
}
else {
LISTBASE_FOREACH_MUTABLE (bNodeLink *, link, &ntree.links) {
if (link->fromsock == &linked_socket) {
nodeRemLink(&ntree, link);
return;
for (bNodeLink *link : linked_socket.runtime->directly_linked_links) {
const bool link_limit_exceeded = !(link_count < link_limit);
if (link_limit_exceeded) {
if (links_to_remove.add(link)) {
link_count--;
}
}
}
for (bNodeLink *link : links_to_remove) {
nodeRemLink(&ntree, link);
}
}
static void add_dragged_links_to_tree(bContext &C, bNodeLinkDrag &nldrag)

View File

@ -219,7 +219,7 @@ void outliner_item_mode_toggle(bContext *C,
static void tree_element_viewlayer_activate(bContext *C, TreeElement *te)
{
/* paranoia check */
if (te->idcode != ID_SCE) {
if (te->store_elem->type != TSE_R_LAYER) {
return;
}

View File

@ -79,8 +79,14 @@ ListBase TreeDisplayViewLayer::buildTree(const TreeSourceData &source_data)
}
else {
TreeElement &te_view_layer = *outliner_add_element(
&space_outliner_, &tree, scene, nullptr, TSE_R_LAYER, 0);
TREESTORE(&te_view_layer)->flag &= ~TSE_CLOSED;
&space_outliner_, &tree, view_layer, nullptr, TSE_R_LAYER, 0);
TreeStoreElem *tselem = TREESTORE(&te_view_layer);
if (!tselem->used) {
tselem->flag &= ~TSE_CLOSED;
}
te_view_layer.name = view_layer->name;
te_view_layer.directdata = view_layer;

View File

@ -141,8 +141,7 @@ static bool ED_uvedit_ensure_uvs(Object *obedit)
/** \name UDIM Access
* \{ */
static void ED_uvedit_udim_params_from_image_space(const SpaceImage *sima,
UVPackIsland_Params *r_params)
void blender::geometry::UVPackIsland_Params::setUDIMOffsetFromSpaceImage(const SpaceImage *sima)
{
if (!sima) {
return; /* Nothing to do. */
@ -155,8 +154,8 @@ static void ED_uvedit_udim_params_from_image_space(const SpaceImage *sima,
ImageTile *active_tile = static_cast<ImageTile *>(
BLI_findlink(&image->tiles, image->active_tile_index));
if (active_tile) {
r_params->udim_base_offset[0] = (active_tile->tile_number - 1001) % 10;
r_params->udim_base_offset[1] = (active_tile->tile_number - 1001) / 10;
udim_base_offset[0] = (active_tile->tile_number - 1001) % 10;
udim_base_offset[1] = (active_tile->tile_number - 1001) / 10;
}
return;
}
@ -164,8 +163,8 @@ static void ED_uvedit_udim_params_from_image_space(const SpaceImage *sima,
/* TODO: Support storing an active UDIM when there are no tiles present.
* Until then, use 2D cursor to find the active tile index for the UDIM grid. */
if (uv_coords_isect_udim(sima->image, sima->tile_grid_shape, sima->cursor)) {
r_params->udim_base_offset[0] = floorf(sima->cursor[0]);
r_params->udim_base_offset[1] = floorf(sima->cursor[1]);
udim_base_offset[0] = floorf(sima->cursor[0]);
udim_base_offset[1] = floorf(sima->cursor[1]);
}
}
/** \} */
@ -195,6 +194,15 @@ struct UnwrapOptions {
bool pin_unselected;
};
void blender::geometry::UVPackIsland_Params::setFromUnwrapOptions(const UnwrapOptions &options)
{
only_selected_uvs = options.only_selected_uvs;
only_selected_faces = options.only_selected_faces;
use_seams = !options.topology_from_uvs || options.topology_from_uvs_use_seams;
correct_aspect = options.correct_aspect;
pin_unselected = options.pin_unselected;
}
static bool uvedit_have_selection(const Scene *scene, BMEditMesh *em, const UnwrapOptions *options)
{
BMFace *efa;
@ -1250,7 +1258,7 @@ static float uv_nearest_grid_tile_distance(const int udim_grid[2],
static bool island_has_pins(const Scene *scene,
FaceIsland *island,
const UVPackIsland_Params *params)
const blender::geometry::UVPackIsland_Params *params)
{
const bool pin_unselected = params->pin_unselected;
const bool only_selected_faces = params->only_selected_faces;
@ -1291,7 +1299,7 @@ static void uvedit_pack_islands_multi(const Scene *scene,
const int objects_len,
BMesh **bmesh_override,
const UVMapUDIM_Params *closest_udim,
const UVPackIsland_Params *params)
const blender::geometry::UVPackIsland_Params *params)
{
blender::Vector<FaceIsland *> island_vector;
@ -1502,14 +1510,10 @@ static int pack_islands_exec(bContext *C, wmOperator *op)
RNA_float_set(op->ptr, "margin", scene->toolsettings->uvcalc_margin);
}
UVPackIsland_Params pack_island_params{};
blender::geometry::UVPackIsland_Params pack_island_params;
pack_island_params.setFromUnwrapOptions(options);
pack_island_params.rotate = RNA_boolean_get(op->ptr, "rotate");
pack_island_params.only_selected_uvs = options.only_selected_uvs;
pack_island_params.only_selected_faces = options.only_selected_faces;
pack_island_params.use_seams = !options.topology_from_uvs || options.topology_from_uvs_use_seams;
pack_island_params.correct_aspect = options.correct_aspect;
pack_island_params.ignore_pinned = false;
pack_island_params.pin_unselected = options.pin_unselected;
pack_island_params.margin_method = eUVPackIsland_MarginMethod(
RNA_enum_get(op->ptr, "margin_method"));
pack_island_params.margin = RNA_float_get(op->ptr, "margin");
@ -1517,7 +1521,7 @@ static int pack_islands_exec(bContext *C, wmOperator *op)
UVMapUDIM_Params closest_udim_buf;
UVMapUDIM_Params *closest_udim = nullptr;
if (udim_source == PACK_UDIM_SRC_ACTIVE) {
ED_uvedit_udim_params_from_image_space(sima, &pack_island_params);
pack_island_params.setUDIMOffsetFromSpaceImage(sima);
}
else if (sima) {
BLI_assert(udim_source == PACK_UDIM_SRC_CLOSEST);
@ -2290,15 +2294,10 @@ void ED_uvedit_live_unwrap(const Scene *scene, Object **objects, int objects_len
options.correct_aspect = (scene->toolsettings->uvcalc_flag & UVCALC_NO_ASPECT_CORRECT) == 0;
uvedit_unwrap_multi(scene, objects, objects_len, &options, nullptr);
UVPackIsland_Params pack_island_params{};
blender::geometry::UVPackIsland_Params pack_island_params;
pack_island_params.setFromUnwrapOptions(options);
pack_island_params.rotate = true;
pack_island_params.only_selected_uvs = options.only_selected_uvs;
pack_island_params.only_selected_faces = options.only_selected_faces;
pack_island_params.use_seams = !options.topology_from_uvs ||
options.topology_from_uvs_use_seams;
pack_island_params.correct_aspect = options.correct_aspect;
pack_island_params.ignore_pinned = true;
pack_island_params.pin_unselected = options.pin_unselected;
pack_island_params.margin_method = ED_UVPACK_MARGIN_SCALED;
pack_island_params.margin = scene->toolsettings->uvcalc_margin;
@ -2436,14 +2435,10 @@ static int unwrap_exec(bContext *C, wmOperator *op)
int count_failed = 0;
uvedit_unwrap_multi(scene, objects, objects_len, &options, &count_changed, &count_failed);
UVPackIsland_Params pack_island_params{};
blender::geometry::UVPackIsland_Params pack_island_params;
pack_island_params.setFromUnwrapOptions(options);
pack_island_params.rotate = true;
pack_island_params.only_selected_uvs = options.only_selected_uvs;
pack_island_params.only_selected_faces = options.only_selected_faces;
pack_island_params.use_seams = !options.topology_from_uvs || options.topology_from_uvs_use_seams;
pack_island_params.correct_aspect = options.correct_aspect;
pack_island_params.ignore_pinned = true;
pack_island_params.pin_unselected = options.pin_unselected;
pack_island_params.margin_method = eUVPackIsland_MarginMethod(
RNA_enum_get(op->ptr, "margin_method"));
pack_island_params.margin = RNA_float_get(op->ptr, "margin");
@ -2819,7 +2814,7 @@ static int smart_project_exec(bContext *C, wmOperator *op)
/* Depsgraph refresh functions are called here. */
const bool correct_aspect = RNA_boolean_get(op->ptr, "correct_aspect");
UVPackIsland_Params params{};
blender::geometry::UVPackIsland_Params params;
params.rotate = true;
params.only_selected_uvs = only_selected_uvs;
params.only_selected_faces = true;
@ -3808,7 +3803,7 @@ void ED_uvedit_add_simple_uvs(Main *bmain, const Scene *scene, Object *ob)
uvedit_unwrap_cube_project(scene, bm, 2.0, false, false, nullptr);
/* Pack UVs. */
UVPackIsland_Params params{};
blender::geometry::UVPackIsland_Params params;
params.rotate = true;
params.only_selected_uvs = false;
params.only_selected_faces = false;

View File

@ -3,6 +3,7 @@
#include "BLI_math_matrix.hh"
#include "BLI_span.hh"
#include "DNA_space_types.h"
#include "DNA_vec_types.h"
#pragma once
@ -11,14 +12,25 @@
* \ingroup geo
*/
struct UnwrapOptions;
enum eUVPackIsland_MarginMethod {
ED_UVPACK_MARGIN_SCALED = 0, /* Use scale of existing UVs to multiply margin. */
ED_UVPACK_MARGIN_ADD, /* Just add the margin, ignoring any UV scale. */
ED_UVPACK_MARGIN_FRACTION, /* Specify a precise fraction of final UV output. */
};
namespace blender::geometry {
/** See also #UnwrapOptions. */
struct UVPackIsland_Params {
class UVPackIsland_Params {
public:
/** Reasonable defaults. */
UVPackIsland_Params();
void setFromUnwrapOptions(const UnwrapOptions &options);
void setUDIMOffsetFromSpaceImage(const SpaceImage *sima);
/** Islands can be rotated to improve packing. */
bool rotate;
/** (In UV Editor) only pack islands which have one or more selected UVs. */
@ -41,8 +53,6 @@ struct UVPackIsland_Params {
float udim_base_offset[2];
};
namespace blender::geometry {
class PackIsland {
public:
rctf bounds_rect;

View File

@ -1113,7 +1113,10 @@ static int poly_find_doubles(const OffsetIndices<int> poly_corners_offsets,
/* Fills the `r_buffer` buffer with the intersection of the arrays in `buffer_a` and `buffer_b`.
* `buffer_a` and `buffer_b` have a sequence of sorted, non-repeating indices representing
* polygons. */
const auto intersect = [](const Span<int> buffer_a, const Span<int> buffer_b, int *r_buffer) {
const auto intersect = [](const Span<int> buffer_a,
const Span<int> buffer_b,
const BitVector<> &is_double,
int *r_buffer) {
int result_num = 0;
int index_a = 0, index_b = 0;
while (index_a < buffer_a.size() && index_b < buffer_b.size()) {
@ -1127,7 +1130,12 @@ static int poly_find_doubles(const OffsetIndices<int> poly_corners_offsets,
}
else {
/* Equality. */
r_buffer[result_num++] = value_a;
/* Do not add duplicates.
* As they are already in the original array, this can cause buffer overflow. */
if (!is_double[value_a]) {
r_buffer[result_num++] = value_a;
}
index_a++;
index_b++;
}
@ -1214,6 +1222,7 @@ static int poly_find_doubles(const OffsetIndices<int> poly_corners_offsets,
int *isect_result = doubles_buffer.data() + doubles_buffer_num + 1;
/* `polys_a` are the polygons connected to the first corner. So skip the first corner. */
for (int corner_index : IndexRange(corner_first + 1, corner_num - 1)) {
elem_index = corners[corner_index];
link_offs = linked_polys_offset[elem_index];
@ -1227,8 +1236,10 @@ static int poly_find_doubles(const OffsetIndices<int> poly_corners_offsets,
polys_b_num--;
} while (poly_to_test != poly_index);
doubles_num = intersect(
Span<int>{polys_a, polys_a_num}, Span<int>{polys_b, polys_b_num}, isect_result);
doubles_num = intersect(Span<int>{polys_a, polys_a_num},
Span<int>{polys_b, polys_b_num},
is_double,
isect_result);
if (doubles_num == 0) {
break;
@ -1246,6 +1257,12 @@ static int poly_find_doubles(const OffsetIndices<int> poly_corners_offsets,
}
doubles_buffer_num += doubles_num;
doubles_offsets.append(++doubles_buffer_num);
if ((doubles_buffer_num + 1) == poly_num) {
/* The last slot is the remaining unduplicated polygon.
* Avoid checking intersection as there are no more slots left. */
break;
}
}
}

View File

@ -22,6 +22,22 @@
namespace blender::geometry {
UVPackIsland_Params::UVPackIsland_Params()
{
/* TEMPORARY, set every thing to "zero" for backwards compatibility. */
rotate = false;
only_selected_uvs = false;
only_selected_faces = false;
use_seams = false;
correct_aspect = false;
ignore_pinned = false;
pin_unselected = false;
margin = 0.001f;
margin_method = ED_UVPACK_MARGIN_SCALED;
udim_base_offset[0] = 0.0f;
udim_base_offset[1] = 0.0f;
}
/* Compact representation for AABB packers. */
class UVAABBIsland {
public:

View File

@ -4179,18 +4179,10 @@ void uv_parametrizer_pack(ParamHandle *handle, float margin, bool do_rotate, boo
pack_island->bounds_rect.ymax = maxv[1];
}
UVPackIsland_Params params{};
UVPackIsland_Params params;
params.rotate = do_rotate;
params.only_selected_uvs = false;
params.only_selected_faces = false;
params.use_seams = false;
params.correct_aspect = false;
params.ignore_pinned = false;
params.pin_unselected = false;
params.margin = margin;
params.margin_method = ED_UVPACK_MARGIN_SCALED;
params.udim_base_offset[0] = 0.0f;
params.udim_base_offset[1] = 0.0f;
float scale[2] = {1.0f, 1.0f};
pack_islands(pack_island_vector, params, scale);

View File

@ -128,6 +128,13 @@ enum {
* Using a generic tag like #LIB_TAG_DOIT for this is just impossible, we need our very own.
*/
COLLECTION_TAG_RELATION_REBUILD = (1 << 0),
/**
* Mark the `gobject` list and/or its `runtime.gobject_hash` mapping as dirty, i.e. that their
* data is not reliable and should be cleaned-up or updated.
*
* This should typically only be set by ID remapping code.
*/
COLLECTION_TAG_COLLECTION_OBJECT_DIRTY = (1 << 1),
};
/** #Collection.color_tag */

View File

@ -113,12 +113,7 @@ typedef struct Object_Runtime {
/** Did last modifier stack generation need mapping support? */
char last_need_mapping;
/** Opaque data reserved for management of objects in collection context.
* E.g. used currently to check for potential duplicates of objects in a collection, after
* remapping process. */
char collection_management;
char _pad0[2];
char _pad0[3];
/** Only used for drawing the parent/child help-line. */
float parent_display_origin[3];

View File

@ -654,11 +654,16 @@ static void rna_Image_pixels_set(PointerRNA *ptr, const float *values)
}
}
/* NOTE: Do update from the set() because typically pixels.foreach_set() is used to update
* the values, and it does not invoke the update(). */
ibuf->userflags |= IB_DISPLAY_BUFFER_INVALID | IB_MIPMAP_INVALID;
BKE_image_mark_dirty(ima, ibuf);
if (!G.background) {
BKE_image_free_gputextures(ima);
}
BKE_image_partial_update_mark_full_update(ima);
WM_main_add_notifier(NC_IMAGE | ND_DISPLAY, &ima->id);
}

View File

@ -11,6 +11,8 @@
#include "BLI_math.h"
#include "BLI_utildefines.h"
#include "BLF_api.h"
#include "BLT_translation.h"
#include "DNA_curves_types.h"
@ -4164,6 +4166,12 @@ static bNodeSocket *rna_NodeOutputFile_slots_new(
return sock;
}
static void rna_FrameNode_label_size_update(Main *bmain, Scene *scene, PointerRNA *ptr)
{
BLF_cache_clear();
rna_Node_update(bmain, scene, ptr);
}
static void rna_ShaderNodeTexIES_mode_set(PointerRNA *ptr, int value)
{
bNode *node = (bNode *)ptr->data;
@ -4816,7 +4824,7 @@ static void def_frame(StructRNA *srna)
RNA_def_property_int_sdna(prop, NULL, "label_size");
RNA_def_property_range(prop, 8, 64);
RNA_def_property_ui_text(prop, "Label Font Size", "Font size to use for displaying the label");
RNA_def_property_update(prop, NC_NODE | ND_DISPLAY, NULL);
RNA_def_property_update(prop, NC_NODE | ND_DISPLAY, "rna_FrameNode_label_size_update");
}
static void def_clamp(StructRNA *srna)

View File

@ -21,8 +21,6 @@ struct bNodeSocket *node_group_find_output_socket(struct bNode *groupnode, const
struct bNodeSocket *node_group_input_find_socket(struct bNode *node, const char *identifier);
struct bNodeSocket *node_group_output_find_socket(struct bNode *node, const char *identifier);
void node_internal_links_create(struct bNodeTree *ntree, struct bNode *node);
#ifdef __cplusplus
}
#endif

View File

@ -66,13 +66,6 @@ Mesh *create_cylinder_or_cone_mesh(float radius_top,
GeometryNodeMeshCircleFillType fill_type,
ConeAttributeOutputs &attribute_outputs);
/**
* Copies the point domain attributes from `in_component` that are in the mask to `out_component`.
*/
void copy_point_attributes_based_on_mask(const GeometryComponent &in_component,
GeometryComponent &result_component,
Span<bool> masks,
bool invert);
/**
* Returns the parts of the geometry that are on the selection for the given domain. If the domain
* is not applicable for the component, e.g. face domain for point cloud, nothing happens to that

View File

@ -22,7 +22,7 @@ class InstanceRotationFieldInput final : public bke::InstancesFieldInput {
GVArray get_varray_for_context(const bke::Instances &instances, IndexMask /*mask*/) const final
{
auto rotation_fn = [&](const int i) -> float3 {
return float3(math::to_euler(instances.transforms()[i]));
return float3(math::to_euler(math::normalize(instances.transforms()[i])));
};
return VArray<float3>::ForFunc(instances.instances_num(), rotation_fn);