Mesh: Reimplement and unify topology maps #107861

Merged
Hans Goudey merged 14 commits from HooglyBoogly/blender:cleanup-unify-mesh-maps into main 2023-05-24 13:17:03 +02:00
323 changed files with 5183 additions and 4148 deletions
Showing only changes of commit 9f2eef575d - Show all commits

View File

@ -8,7 +8,7 @@
# It also supports non-standard names for the library components.
#
# To use a custom IlmBase:
# - Set the variable ILMBASE_CUSTOM to True
# - Set the variable ILMBASE_CUSTOM to TRUE
# - Set the variable ILMBASE_CUSTOM_LIBRARIES to a list of the libraries to
# use, e.g. "SpiImath SpiHalf SpiIlmThread SpiIex"
# - Optionally set the variable ILMBASE_CUSTOM_INCLUDE_DIR to any
@ -20,7 +20,7 @@
#
# ILMBASE_INCLUDE_DIR - where to find half.h, IlmBaseConfig.h, etc.
# ILMBASE_LIBRARIES - list of libraries to link against when using IlmBase.
# ILMBASE_FOUND - True if IlmBase was found.
# ILMBASE_FOUND - TRUE if IlmBase was found.
# Other standard issue macros
include(FindPackageHandleStandardArgs)

View File

@ -8,7 +8,7 @@
# It also supports non-standard names for the library components.
#
# To use a custom OpenEXR
# - Set the variable OPENEXR_CUSTOM to True
# - Set the variable OPENEXR_CUSTOM to TRUE
# - Set the variable OPENEXR_CUSTOM_LIBRARY to the name of the library to
# use, e.g. "SpiIlmImf"
# - Optionally set the variable OPENEXR_CUSTOM_INCLUDE_DIR to any
@ -22,7 +22,7 @@
# OPENEXR_LIBRARIES - list of libraries to link against when using OpenEXR.
# This list does NOT include the IlmBase libraries.
# These are defined by the FindIlmBase module.
# OPENEXR_FOUND - True if OpenEXR was found.
# OPENEXR_FOUND - TRUE if OpenEXR was found.
# Other standard issue macros
include(SelectLibraryConfigurations)

View File

@ -20,14 +20,14 @@ else()
# Choose the best suitable libraries.
if(EXISTS ${LIBDIR_NATIVE_ABI})
set(LIBDIR ${LIBDIR_NATIVE_ABI})
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND True)
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND TRUE)
elseif(EXISTS ${LIBDIR_GLIBC228_ABI})
set(LIBDIR ${LIBDIR_GLIBC228_ABI})
if(WITH_MEM_JEMALLOC)
# jemalloc provides malloc hooks.
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND False)
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND FALSE)
else()
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND True)
set(WITH_LIBC_MALLOC_HOOK_WORKAROUND TRUE)
endif()
endif()

View File

@ -39,8 +39,14 @@ if(CMAKE_C_COMPILER_ID MATCHES "Clang")
set(WITH_WINDOWS_STRIPPED_PDB OFF)
endif()
else()
if(WITH_BLENDER AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.28.29921) # MSVC 2019 16.9.16
message(FATAL_ERROR "Compiler is unsupported, MSVC 2019 16.9.16 or newer is required for building blender.")
if(WITH_BLENDER)
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.28.29921) # MSVC 2019 16.9.16
message(FATAL_ERROR "Compiler is unsupported, MSVC 2019 16.9.16 or newer is required for building blender.")
endif()
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 19.36.32532 AND # MSVC 2022 17.6.0 has a bad codegen
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.37.32705) # But it is fixed in 2022 17.7 preview 1
message(FATAL_ERROR "Compiler is unsupported, MSVC 2022 17.6.x has codegen issues and cannot be used to build blender. Please use MSVC 17.5 for the time being.")
endif()
endif()
endif()
@ -1034,7 +1040,7 @@ endif()
if(WITH_VULKAN_BACKEND)
if(EXISTS ${LIBDIR}/vulkan)
set(VULKAN_FOUND On)
set(VULKAN_FOUND ON)
set(VULKAN_ROOT_DIR ${LIBDIR}/vulkan)
set(VULKAN_INCLUDE_DIR ${VULKAN_ROOT_DIR}/include)
set(VULKAN_INCLUDE_DIRS ${VULKAN_INCLUDE_DIR})
@ -1048,7 +1054,7 @@ endif()
if(WITH_VULKAN_BACKEND)
if(EXISTS ${LIBDIR}/shaderc)
set(SHADERC_FOUND On)
set(SHADERC_FOUND ON)
set(SHADERC_ROOT_DIR ${LIBDIR}/shaderc)
set(SHADERC_INCLUDE_DIR ${SHADERC_ROOT_DIR}/include)
set(SHADERC_INCLUDE_DIRS ${SHADERC_INCLUDE_DIR})

View File

@ -38,7 +38,7 @@ PROJECT_NAME = Blender
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = V3.6
PROJECT_NUMBER = V4.0
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a

View File

@ -67,7 +67,7 @@ if(UNIX AND NOT APPLE)
add_subdirectory(libc_compat)
endif()
if (WITH_RENDERDOC)
if(WITH_RENDERDOC)
add_subdirectory(renderdoc_dynload)
endif()

View File

@ -262,6 +262,11 @@ string OneapiDevice::oneapi_error_message()
return string(oneapi_error_string_);
}
int OneapiDevice::scene_max_shaders()
{
return scene_max_shaders_;
}
void *OneapiDevice::kernel_globals_device_pointer()
{
return kg_memory_device_;
@ -436,6 +441,9 @@ void OneapiDevice::const_copy_to(const char *name, void *host, size_t size)
/* Update scene handle(since it is different for each device on multi devices) */
KernelData *const data = (KernelData *)host;
data->device_bvh = embree_scene;
/* We need this number later for proper local memory allocation. */
scene_max_shaders_ = data->max_shaders;
}
# endif

View File

@ -37,6 +37,7 @@ class OneapiDevice : public Device {
std::string oneapi_error_string_;
bool use_hardware_raytracing = false;
unsigned int kernel_features = 0;
int scene_max_shaders_ = 0;
public:
virtual BVHLayoutMask get_bvh_layout_mask(uint kernel_features) const override;
@ -61,6 +62,8 @@ class OneapiDevice : public Device {
string oneapi_error_message();
int scene_max_shaders();
void *kernel_globals_device_pointer();
void mem_alloc(device_memory &mem) override;

View File

@ -59,7 +59,7 @@ void OneapiDeviceQueue::init_execution()
void *kg_dptr = (void *)oneapi_device_->kernel_globals_device_pointer();
assert(device_queue);
assert(kg_dptr);
kernel_context_ = new KernelContext{device_queue, kg_dptr};
kernel_context_ = new KernelContext{device_queue, kg_dptr, 0};
debug_init_execution();
}
@ -78,12 +78,13 @@ bool OneapiDeviceQueue::enqueue(DeviceKernel kernel,
assert(signed_kernel_work_size >= 0);
size_t kernel_work_size = (size_t)signed_kernel_work_size;
assert(kernel_context_);
kernel_context_->scene_max_shaders = oneapi_device_->scene_max_shaders();
size_t kernel_local_size = oneapi_kernel_preferred_local_size(
kernel_context_->queue, (::DeviceKernel)kernel, kernel_work_size);
size_t uniformed_kernel_work_size = round_up(kernel_work_size, kernel_local_size);
assert(kernel_context_);
/* Call the oneAPI kernel DLL to launch the requested kernel. */
bool is_finished_ok = oneapi_device_->enqueue_kernel(
kernel_context_, kernel, uniformed_kernel_work_size, args);

View File

@ -39,6 +39,11 @@ class OneapiDeviceQueue : public DeviceQueue {
virtual void copy_to_device(device_memory &mem) override;
virtual void copy_from_device(device_memory &mem) override;
virtual bool supports_local_atomic_sort() const
{
return true;
}
protected:
OneapiDevice *oneapi_device_;
KernelContext *kernel_context_;

View File

@ -385,11 +385,17 @@ void PathTraceWorkGPU::enqueue_reset()
queue_->enqueue(DEVICE_KERNEL_INTEGRATOR_RESET, max_num_paths_, args);
queue_->zero_to_device(integrator_queue_counter_);
queue_->zero_to_device(integrator_shader_sort_counter_);
if (device_scene_->data.kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) {
if (integrator_shader_sort_counter_.size() != 0) {
queue_->zero_to_device(integrator_shader_sort_counter_);
}
if (device_scene_->data.kernel_features & KERNEL_FEATURE_NODE_RAYTRACE &&
integrator_shader_raytrace_sort_counter_.size() != 0)
{
queue_->zero_to_device(integrator_shader_raytrace_sort_counter_);
}
if (device_scene_->data.kernel_features & KERNEL_FEATURE_MNEE) {
if (device_scene_->data.kernel_features & KERNEL_FEATURE_MNEE &&
integrator_shader_mnee_sort_counter_.size() != 0)
{
queue_->zero_to_device(integrator_shader_mnee_sort_counter_);
}

View File

@ -847,6 +847,7 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
-DWITH_ONEAPI
-ffast-math
-O2
-D__KERNEL_LOCAL_ATOMIC_SORT__
-o"${cycles_kernel_oneapi_lib}"
-I"${CMAKE_CURRENT_SOURCE_DIR}/.."
${SYCL_CPP_FLAGS}

View File

@ -432,6 +432,17 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_SORTED_INDEX_DEFAULT_BLOCK_SIZE)
}
ccl_gpu_kernel_postfix
/* oneAPI verion needs the local_mem accessor in the arguments. */
#ifdef __KERNEL_ONEAPI__
ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
ccl_gpu_kernel_signature(integrator_sort_bucket_pass,
int num_states,
int partition_size,
int num_states_limit,
ccl_global int *indices,
int kernel_index,
sycl::local_accessor<int> &local_mem)
#else
ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
ccl_gpu_kernel_signature(integrator_sort_bucket_pass,
int num_states,
@ -439,9 +450,9 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
int num_states_limit,
ccl_global int *indices,
int kernel_index)
#endif
{
#if defined(__KERNEL_LOCAL_ATOMIC_SORT__)
int max_shaders = context.launch_params_metal.data.max_shaders;
ccl_global ushort *d_queued_kernel = (ccl_global ushort *)
kernel_integrator_state.path.queued_kernel;
ccl_global uint *d_shader_sort_key = (ccl_global uint *)
@ -449,6 +460,20 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
ccl_global int *key_offsets = (ccl_global int *)
kernel_integrator_state.sort_partition_key_offsets;
# ifdef __KERNEL_METAL__
int max_shaders = context.launch_params_metal.data.max_shaders;
# endif
# ifdef __KERNEL_ONEAPI__
/* Metal backend doesn't have these particular ccl_gpu_* defines and current kernel code
* uses metal_*, we need the below to be compatible with these kernels. */
int max_shaders = ((ONEAPIKernelContext *)kg)->__data->max_shaders;
int metal_local_id = ccl_gpu_thread_idx_x;
int metal_local_size = ccl_gpu_block_dim_x;
int metal_grid_id = ccl_gpu_block_idx_x;
ccl_gpu_shared int *threadgroup_array = local_mem.get_pointer();
# endif
gpu_parallel_sort_bucket_pass(num_states,
partition_size,
max_shaders,
@ -456,7 +481,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
d_queued_kernel,
d_shader_sort_key,
key_offsets,
(threadgroup int *)threadgroup_array,
(ccl_gpu_shared int *)threadgroup_array,
metal_local_id,
metal_local_size,
metal_grid_id);
@ -464,6 +489,17 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
}
ccl_gpu_kernel_postfix
/* oneAPI verion needs the local_mem accessor in the arguments. */
#ifdef __KERNEL_ONEAPI__
ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
ccl_gpu_kernel_signature(integrator_sort_write_pass,
int num_states,
int partition_size,
int num_states_limit,
ccl_global int *indices,
int kernel_index,
sycl::local_accessor<int> &local_mem)
#else
ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
ccl_gpu_kernel_signature(integrator_sort_write_pass,
int num_states,
@ -471,9 +507,10 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
int num_states_limit,
ccl_global int *indices,
int kernel_index)
#endif
{
#if defined(__KERNEL_LOCAL_ATOMIC_SORT__)
int max_shaders = context.launch_params_metal.data.max_shaders;
ccl_global ushort *d_queued_kernel = (ccl_global ushort *)
kernel_integrator_state.path.queued_kernel;
ccl_global uint *d_shader_sort_key = (ccl_global uint *)
@ -481,6 +518,20 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
ccl_global int *key_offsets = (ccl_global int *)
kernel_integrator_state.sort_partition_key_offsets;
# ifdef __KERNEL_METAL__
int max_shaders = context.launch_params_metal.data.max_shaders;
# endif
# ifdef __KERNEL_ONEAPI__
/* Metal backend doesn't have these particular ccl_gpu_* defines and current kernel code
* uses metal_*, we need the below to be compatible with these kernels. */
int max_shaders = ((ONEAPIKernelContext *)kg)->__data->max_shaders;
int metal_local_id = ccl_gpu_thread_idx_x;
int metal_local_size = ccl_gpu_block_dim_x;
int metal_grid_id = ccl_gpu_block_idx_x;
ccl_gpu_shared int *threadgroup_array = local_mem.get_pointer();
# endif
gpu_parallel_sort_write_pass(num_states,
partition_size,
max_shaders,
@ -490,7 +541,7 @@ ccl_gpu_kernel_threads(GPU_PARALLEL_SORT_BLOCK_SIZE)
d_queued_kernel,
d_shader_sort_key,
key_offsets,
(threadgroup int *)threadgroup_array,
(ccl_gpu_shared int *)threadgroup_array,
metal_local_id,
metal_local_size,
metal_grid_id);

View File

@ -23,11 +23,6 @@ CCL_NAMESPACE_BEGIN
#if defined(__KERNEL_LOCAL_ATOMIC_SORT__)
# define atomic_store_local(p, x) \
atomic_store_explicit((threadgroup atomic_int *)p, x, memory_order_relaxed)
# define atomic_load_local(p) \
atomic_load_explicit((threadgroup atomic_int *)p, memory_order_relaxed)
ccl_device_inline void gpu_parallel_sort_bucket_pass(const uint num_states,
const uint partition_size,
const uint max_shaders,
@ -45,7 +40,13 @@ ccl_device_inline void gpu_parallel_sort_bucket_pass(const uint num_states,
atomic_store_local(&buckets[local_id], 0);
}
# ifdef __KERNEL_ONEAPI__
/* NOTE(@nsirgien): For us here only local memory writing (buckets) is important,
* so faster local barriers can be used. */
ccl_gpu_local_syncthreads();
# else
ccl_gpu_syncthreads();
# endif
/* Determine bucket sizes within the partitions. */
@ -58,11 +59,17 @@ ccl_device_inline void gpu_parallel_sort_bucket_pass(const uint num_states,
ushort kernel_index = d_queued_kernel[state_index];
if (kernel_index == queued_kernel) {
uint key = d_shader_sort_key[state_index] % max_shaders;
atomic_fetch_and_add_uint32(&buckets[key], 1);
atomic_fetch_and_add_uint32_shared(&buckets[key], 1);
}
}
# ifdef __KERNEL_ONEAPI__
/* NOTE(@nsirgien): For us here only local memory writing (buckets) is important,
* so faster local barriers can be used. */
ccl_gpu_local_syncthreads();
# else
ccl_gpu_syncthreads();
# endif
/* Calculate the partition's local offsets from the prefix sum of bucket sizes. */
@ -106,7 +113,13 @@ ccl_device_inline void gpu_parallel_sort_write_pass(const uint num_states,
atomic_store_local(&local_offset[local_id], key_offsets[local_id] + partition_offset);
}
# ifdef __KERNEL_ONEAPI__
/* NOTE(@nsirgien): For us here only local memory writing (local_offset) is important,
* so faster local barriers can be used. */
ccl_gpu_local_syncthreads();
# else
ccl_gpu_syncthreads();
# endif
/* Write the sorted active indices. */
@ -121,7 +134,7 @@ ccl_device_inline void gpu_parallel_sort_write_pass(const uint num_states,
ushort kernel_index = d_queued_kernel[state_index];
if (kernel_index == queued_kernel) {
uint key = d_shader_sort_key[state_index] % max_shaders;
int index = atomic_fetch_and_add_uint32(&local_offset[key], 1);
int index = atomic_fetch_and_add_uint32_shared(&local_offset[key], 1);
if (index < num_states_limit) {
indices[index] = state_index;
}

View File

@ -48,6 +48,7 @@
#define ccl_loop_no_unroll
#define ccl_optional_struct_init
#define ccl_private
#define ccl_gpu_shared
#define ATTR_FALLTHROUGH __attribute__((fallthrough))
#define ccl_constant const
#define ccl_try_align(...) __attribute__((aligned(__VA_ARGS__)))

View File

@ -2,8 +2,40 @@
* Copyright 2021-2022 Intel Corporation */
#ifdef WITH_NANOVDB
/* Data type to replace `double` used in the NanoVDB headers. Cycles don't need doubles, and is
* safer and more portable to never use double datatype on GPU.
* Use a special structure, so that the following is true:
* - No unnoticed implicit cast or mathematical operations used on scalar 64bit type
* (which rules out trick like using `uint64_t` as a drop-in replacement for double).
* - Padding rules are matching exactly `double`
* (which rules out array of `uint8_t`). */
typedef struct ccl_vdb_double_t {
union ccl_vdb_helper_t {
double d;
uint64_t i;
};
uint64_t i;
ccl_vdb_double_t(double value)
{
ccl_vdb_helper_t helper;
helper.d = value;
i = helper.i;
}
/* We intentionally allow conversion to float in order to workaround compilation errors
* for defined math functions that take doubles. */
operator float() const
{
ccl_vdb_helper_t helper;
helper.i = i;
return (float)helper.d;
}
} ccl_vdb_double_t;
# define double ccl_vdb_double_t
# include <nanovdb/NanoVDB.h>
# include <nanovdb/util/SampleFromVoxels.h>
# undef double
#endif
/* clang-format off */

View File

@ -109,7 +109,10 @@ size_t oneapi_kernel_preferred_local_size(SyclQueue *queue,
assert(queue);
(void)kernel_global_size;
const static size_t preferred_work_group_size_intersect_shading = 32;
const static size_t preferred_work_group_size_technical = 1024;
/* Shader evalutation kernels seems to use some amount of shared memory, so better
* to avoid usage of maximum work group sizes for them. */
const static size_t preferred_work_group_size_shader_evaluation = 256;
const static size_t preferred_work_group_size_default = 1024;
size_t preferred_work_group_size = 0;
switch (kernel) {
@ -133,19 +136,36 @@ size_t oneapi_kernel_preferred_local_size(SyclQueue *queue,
case DEVICE_KERNEL_INTEGRATOR_QUEUED_SHADOW_PATHS_ARRAY:
case DEVICE_KERNEL_INTEGRATOR_ACTIVE_PATHS_ARRAY:
case DEVICE_KERNEL_INTEGRATOR_TERMINATED_PATHS_ARRAY:
case DEVICE_KERNEL_INTEGRATOR_SORTED_PATHS_ARRAY:
case DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY:
case DEVICE_KERNEL_INTEGRATOR_COMPACT_STATES:
case DEVICE_KERNEL_INTEGRATOR_TERMINATED_SHADOW_PATHS_ARRAY:
case DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY:
case DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY:
preferred_work_group_size = GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE;
break;
case DEVICE_KERNEL_INTEGRATOR_SORTED_PATHS_ARRAY:
case DEVICE_KERNEL_INTEGRATOR_COMPACT_STATES:
case DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_STATES:
case DEVICE_KERNEL_INTEGRATOR_RESET:
case DEVICE_KERNEL_INTEGRATOR_SHADOW_CATCHER_COUNT_POSSIBLE_SPLITS:
preferred_work_group_size = preferred_work_group_size_technical;
preferred_work_group_size = GPU_PARALLEL_SORTED_INDEX_DEFAULT_BLOCK_SIZE;
break;
case DEVICE_KERNEL_INTEGRATOR_SORT_BUCKET_PASS:
case DEVICE_KERNEL_INTEGRATOR_SORT_WRITE_PASS:
preferred_work_group_size = GPU_PARALLEL_SORT_BLOCK_SIZE;
break;
case DEVICE_KERNEL_PREFIX_SUM:
preferred_work_group_size = GPU_PARALLEL_PREFIX_SUM_DEFAULT_BLOCK_SIZE;
break;
case DEVICE_KERNEL_SHADER_EVAL_DISPLACE:
case DEVICE_KERNEL_SHADER_EVAL_BACKGROUND:
case DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY:
preferred_work_group_size = preferred_work_group_size_shader_evaluation;
break;
default:
preferred_work_group_size = 512;
preferred_work_group_size = preferred_work_group_size_default;
break;
}
const size_t limit_work_group_size = reinterpret_cast<sycl::queue *>(queue)
@ -316,12 +336,6 @@ bool oneapi_enqueue_kernel(KernelContext *kernel_context,
kernel_context->queue, device_kernel, global_size);
assert(global_size % local_size == 0);
/* Local size for DEVICE_KERNEL_INTEGRATOR_ACTIVE_PATHS_ARRAY needs to be enforced so we
* overwrite it outside of oneapi_kernel_preferred_local_size. */
if (device_kernel == DEVICE_KERNEL_INTEGRATOR_ACTIVE_PATHS_ARRAY) {
local_size = GPU_PARALLEL_ACTIVE_INDEX_DEFAULT_BLOCK_SIZE;
}
/* Kernels listed below need a specific number of work groups. */
if (device_kernel == DEVICE_KERNEL_INTEGRATOR_ACTIVE_PATHS_ARRAY ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_QUEUED_PATHS_ARRAY ||
@ -353,6 +367,14 @@ bool oneapi_enqueue_kernel(KernelContext *kernel_context,
# pragma GCC diagnostic error "-Wswitch"
# endif
int max_shaders = 0;
if (device_kernel == DEVICE_KERNEL_INTEGRATOR_SORT_BUCKET_PASS ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_SORT_WRITE_PASS)
{
max_shaders = (kernel_context->scene_max_shaders);
}
try {
queue->submit([&](sycl::handler &cgh) {
# ifdef WITH_EMBREE_GPU
@ -495,13 +517,31 @@ bool oneapi_enqueue_kernel(KernelContext *kernel_context,
break;
}
case DEVICE_KERNEL_INTEGRATOR_SORT_BUCKET_PASS: {
oneapi_call(
kg, cgh, global_size, local_size, args, oneapi_kernel_integrator_sort_bucket_pass);
sycl::local_accessor<int> local_mem(max_shaders, cgh);
oneapi_kernel_integrator_sort_bucket_pass(kg,
global_size,
local_size,
cgh,
*(int *)(args[0]),
*(int *)(args[1]),
*(int *)(args[2]),
*(int **)(args[3]),
*(int *)(args[4]),
local_mem);
break;
}
case DEVICE_KERNEL_INTEGRATOR_SORT_WRITE_PASS: {
oneapi_call(
kg, cgh, global_size, local_size, args, oneapi_kernel_integrator_sort_write_pass);
sycl::local_accessor<int> local_mem(max_shaders, cgh);
oneapi_kernel_integrator_sort_write_pass(kg,
global_size,
local_size,
cgh,
*(int *)(args[0]),
*(int *)(args[1]),
*(int *)(args[2]),
*(int **)(args[3]),
*(int *)(args[4]),
local_mem);
break;
}
case DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY: {

View File

@ -32,6 +32,8 @@ struct KernelContext {
SyclQueue *queue;
/* Pointer to USM device memory with all global/constant allocation on this device */
void *kernel_globals;
/* We needs this additional data for some kernels. */
int scene_max_shaders;
};
/* Use extern C linking so that the symbols can be easily load from the dynamic library at runtime.

View File

@ -21,6 +21,10 @@
#else /* __KERNEL_GPU__ */
# ifndef __KERNEL_ONEAPI__
# define atomic_fetch_and_add_uint32_shared atomic_fetch_and_add_uint32
# endif
# if defined(__KERNEL_CUDA__) || defined(__KERNEL_HIP__)
# define atomic_add_and_fetch_float(p, x) (atomicAdd((float *)(p), (float)(x)) + (float)(x))
@ -140,6 +144,11 @@ ccl_device_inline float atomic_compare_and_swap_float(volatile ccl_global float
# define atomic_store(p, x) atomic_store_explicit(p, x, memory_order_relaxed)
# define atomic_fetch(p) atomic_load_explicit(p, memory_order_relaxed)
# define atomic_store_local(p, x) \
atomic_store_explicit((ccl_gpu_shared atomic_int *)p, x, memory_order_relaxed)
# define atomic_load_local(p) \
atomic_load_explicit((ccl_gpu_shared atomic_int *)p, memory_order_relaxed)
# define CCL_LOCAL_MEM_FENCE mem_flags::mem_threadgroup
# define ccl_barrier(flags) threadgroup_barrier(flags)
@ -191,6 +200,16 @@ ccl_device_inline int atomic_fetch_and_add_uint32(ccl_global int *p, int x)
return atomic.fetch_add(x);
}
ccl_device_inline int atomic_fetch_and_add_uint32_shared(int *p, int x)
{
sycl::atomic_ref<int,
sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
atomic(*p);
return atomic.fetch_add(x);
}
ccl_device_inline unsigned int atomic_fetch_and_sub_uint32(ccl_global unsigned int *p,
unsigned int x)
{
@ -253,6 +272,26 @@ ccl_device_inline int atomic_fetch_and_or_uint32(ccl_global int *p, int x)
return atomic.fetch_or(x);
}
ccl_device_inline void atomic_store_local(int *p, int x)
{
sycl::atomic_ref<int,
sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
atomic(*p);
atomic.store(x);
}
ccl_device_inline int atomic_load_local(int *p)
{
sycl::atomic_ref<int,
sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
atomic(*p);
return atomic.load();
}
# endif /* __KERNEL_ONEAPI__ */
#endif /* __KERNEL_GPU__ */

View File

@ -1261,7 +1261,7 @@ GHOST_TSuccess GHOST_SystemCocoa::handleDraggingEvent(GHOST_TEventType eventType
![bitmapImage isPlanar])
{
/* Try a fast copy if the image is a meshed RGBA 32bit bitmap. */
toIBuf = (uint8_t *)ibuf->rect;
toIBuf = ibuf->byte_buffer.data;
rasterRGB = (uint8_t *)[bitmapImage bitmapData];
for (y = 0; y < imgSize.height; y++) {
to_i = (imgSize.height - y - 1) * imgSize.width;
@ -1338,7 +1338,7 @@ GHOST_TSuccess GHOST_SystemCocoa::handleDraggingEvent(GHOST_TEventType eventType
}
/* Copy the image to ibuf, flipping it vertically. */
toIBuf = (uint8_t *)ibuf->rect;
toIBuf = ibuf->byte_buffer.data;
for (y = 0; y < imgSize.height; y++) {
for (x = 0; x < imgSize.width; x++) {
to_i = (imgSize.height - y - 1) * imgSize.width + x;

View File

@ -173,7 +173,7 @@ static bool use_gnome_confine_hack = false;
* This is a hack because it seems there is no way to check if the compositor supports
* server side decorations when initializing WAYLAND.
*/
#if defined(WITH_GHOST_WAYLAND_LIBDECOR) && defined(WITH_GHOST_X11)
#ifdef WITH_GHOST_WAYLAND_LIBDECOR
# define USE_GNOME_NEEDS_LIBDECOR_HACK
#endif
@ -1333,6 +1333,22 @@ static void ghost_wayland_log_handler(const char *msg, va_list arg)
}
}
#ifdef WITH_GHOST_X11
/**
* Check if the system is running X11.
* This is not intended to be a fool-proof check (the `DISPLAY` is not validated for e.g.).
* Just check `DISPLAY` is set and not-empty.
*/
static bool ghost_wayland_is_x11_available()
{
const char *x11_display = getenv("DISPLAY");
if (x11_display && x11_display[0]) {
return true;
}
return false;
}
#endif /* WITH_GHOST_X11 */
static GHOST_TKey xkb_map_gkey(const xkb_keysym_t sym)
{
@ -5517,12 +5533,21 @@ GHOST_SystemWayland::GHOST_SystemWayland(bool background)
}
#ifdef WITH_GHOST_WAYLAND_LIBDECOR
/* Ignore windowing requirements when running in background mode,
* as it doesn't make sense to fall back to X11 because of windowing functionality
* in background mode, also LIBDECOR is crashing in background mode `blender -b -f 1`
* for e.g. while it could be fixed, requiring the library at all makes no sense . */
if (background) {
display_->libdecor_required = false;
if (display_->libdecor_required) {
/* Ignore windowing requirements when running in background mode,
* as it doesn't make sense to fall back to X11 because of windowing functionality
* in background mode, also LIBDECOR is crashing in background mode `blender -b -f 1`
* for e.g. while it could be fixed, requiring the library at all makes no sense . */
if (background) {
display_->libdecor_required = false;
}
# ifdef WITH_GHOST_X11
else if (!has_libdecor && !ghost_wayland_is_x11_available()) {
/* Only require LIBDECOR when X11 is available, otherwise there is nothing to fall back to.
* It's better to open without window decorations than failing entirely. */
display_->libdecor_required = false;
}
# endif /* WITH_GHOST_X11 */
}
if (display_->libdecor_required) {

View File

@ -2422,7 +2422,7 @@ static uint *getClipboardImageImBuf(int *r_width, int *r_height, UINT format)
*r_width = ibuf->x;
*r_height = ibuf->y;
rgba = (uint *)malloc(4 * ibuf->x * ibuf->y);
memcpy(rgba, ibuf->rect, 4 * ibuf->x * ibuf->y);
memcpy(rgba, ibuf->byte_buffer.data, 4 * ibuf->x * ibuf->y);
IMB_freeImBuf(ibuf);
}
@ -2513,7 +2513,7 @@ static bool putClipboardImagePNG(uint *rgba, int width, int height)
UINT cf = RegisterClipboardFormat("PNG");
/* Load buffer into ImBuf, convert to PNG. */
ImBuf *ibuf = IMB_allocFromBuffer(rgba, nullptr, width, height, 32);
ImBuf *ibuf = IMB_allocFromBuffer(reinterpret_cast<uint8_t *>(rgba), nullptr, width, height, 32);
ibuf->ftype = IMB_FTYPE_PNG;
ibuf->foptions.quality = 15;
if (!IMB_saveiff(ibuf, "<memory>", IB_rect | IB_mem)) {
@ -2521,7 +2521,7 @@ static bool putClipboardImagePNG(uint *rgba, int width, int height)
return false;
}
HGLOBAL hMem = GlobalAlloc(GHND, ibuf->encodedbuffersize);
HGLOBAL hMem = GlobalAlloc(GHND, ibuf->encoded_buffer_size);
if (!hMem) {
IMB_freeImBuf(ibuf);
return false;
@ -2534,7 +2534,7 @@ static bool putClipboardImagePNG(uint *rgba, int width, int height)
return false;
}
memcpy(pMem, ibuf->encodedbuffer, ibuf->encodedbuffersize);
memcpy(pMem, ibuf->encoded_buffer.data, ibuf->encoded_buffer_size);
GlobalUnlock(hMem);
IMB_freeImBuf(ibuf);

View File

@ -9,9 +9,9 @@ set(INC_SYS
)
set(SRC
intern/renderdoc_api.cc
intern/renderdoc_api.cc
include/renderdoc_api.hh
include/renderdoc_api.hh
)
blender_add_lib(bf_intern_renderdoc_dynload "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")

Binary file not shown.

View File

@ -78,29 +78,30 @@ class PREFERENCES_OT_copy_prev(Operator):
@classmethod
def previous_version(cls):
# Find config folder from previous version.
import os
version = bpy.app.version
version_new = ((version[0] * 100) + version[1])
version_old = ((version[0] * 100) + version[1]) - 1
# Find config folder from previous version.
#
# Always allow to load startup data from any release from current major release cycle, and the previous one.
# Ensure we only try to copy files from a point release.
# The check below ensures the second numbers match.
while (version_new % 100) // 10 == (version_old % 100) // 10:
version_split = version_old // 100, version_old % 100
if os.path.isdir(cls._old_version_path(version_split)):
return version_split
version_old = version_old - 1
# NOTE: This value may need to be updated when the release cycle system is modified.
# Here could be `6` in theory (Blender 3.6 LTS), just give it a bit of extra room, such that it does not have to
# be updated if there ever exist a 3.7 release e.g.
MAX_MINOR_VERSION_FOR_PREVIOUS_MAJOR_LOOKUP = 10
# Support loading 2.8x..2.9x startup (any older isn't so useful to load).
# NOTE: remove this block for Blender 4.0 and later.
if version_old == 299:
version_old = 294
while version_old >= 280:
version_split = version_old // 100, version_old % 100
if os.path.isdir(cls._old_version_path(version_split)):
return version_split
version_old = version_old - 1
version_new = bpy.app.version[:2]
version_old = [version_new[0], version_new[1] - 1]
while True:
while version_old[1] >= 0:
if os.path.isdir(cls._old_version_path(version_old)):
return tuple(version_old)
version_old[1] -= 1
if version_new[0] == version_old[0]:
# Retry with older major version.
version_old[0] -= 1
version_old[1] = MAX_MINOR_VERSION_FOR_PREVIOUS_MAJOR_LOOKUP
else:
break
return None

View File

@ -613,6 +613,7 @@ class NODE_MT_category_GEO_VOLUME(Menu):
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodeMeanFilterSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeOffsetSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeSampleVolume")
node_add_menu.add_node_type(layout, "GeometryNodeSDFVolumeSphere")
node_add_menu.add_node_type(layout, "GeometryNodeInputSignedDistance")
node_add_menu.draw_assets_for_catalog(layout, self.bl_label)

View File

@ -147,6 +147,9 @@ class VIEWLAYER_PT_eevee_layer_passes_effects(ViewLayerButtonsPanel, Panel):
col.prop(view_layer_eevee, "use_pass_bloom", text="Bloom")
col.active = scene_eevee.use_bloom
col = layout.column()
col.prop(view_layer_eevee, "use_pass_transparent")
class ViewLayerAOVPanel(ViewLayerButtonsPanel, Panel):
bl_label = "Shader AOV"

View File

@ -61,8 +61,8 @@ set(SRC_DNA_INC
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_pointcache_types.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_pointcloud_types.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_rigidbody_types.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_scene_types.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_scene_enums.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_scene_types.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_screen_types.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_sdna_types.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_sequence_types.h

View File

@ -17,7 +17,7 @@ extern "C" {
*/
/* Blender major and minor version. */
#define BLENDER_VERSION 306
#define BLENDER_VERSION 400
/* Blender patch version for bugfix releases. */
#define BLENDER_VERSION_PATCH 0
/** Blender release cycle stage: alpha/beta/rc/release. */
@ -25,12 +25,12 @@ extern "C" {
/* Blender file format version. */
#define BLENDER_FILE_VERSION BLENDER_VERSION
#define BLENDER_FILE_SUBVERSION 10
#define BLENDER_FILE_SUBVERSION 1
/* Minimum Blender version that supports reading file written with the current
* version. Older Blender versions will test this and show a warning if the file
* was written with too new a version. */
#define BLENDER_FILE_MIN_VERSION 305
#define BLENDER_FILE_MIN_VERSION 306
#define BLENDER_FILE_MIN_SUBVERSION 9
/** User readable version string. */

View File

@ -322,7 +322,7 @@ void BKE_collection_blend_read_expand(struct BlendExpander *expander,
void BKE_collection_compat_blend_read_data(struct BlendDataReader *reader,
struct SceneCollection *sc);
void BKE_collection_compat_blend_read_lib(struct BlendLibReader *reader,
struct Library *lib,
struct ID *self_id,
struct SceneCollection *sc);
void BKE_collection_compat_blend_read_expand(struct BlendExpander *expander,
struct SceneCollection *sc);

View File

@ -314,6 +314,9 @@ void CustomData_free_layers(struct CustomData *data, eCustomDataType type, int t
* Returns true if a layer with the specified type exists.
*/
bool CustomData_has_layer(const struct CustomData *data, eCustomDataType type);
bool CustomData_has_layer_named(const struct CustomData *data,
eCustomDataType type,
const char *name);
/**
* Returns the number of layers with this type.

View File

@ -2,8 +2,6 @@
#pragma once
#include "BLI_math_matrix_types.hh"
#include "BKE_geometry_set.hh"
namespace blender::bke {
@ -13,36 +11,4 @@ namespace blender::bke {
*/
GeometrySet object_get_evaluated_geometry_set(const Object &object);
/**
* Used to keep track of a group of instances using the same geometry data.
*/
struct GeometryInstanceGroup {
/**
* The geometry set instanced on each of the transforms. The components are not necessarily
* owned here. For example, they may be owned by the instanced object. This cannot be a
* reference because not all instanced data will necessarily have a #geometry_set_eval.
*/
GeometrySet geometry_set;
/**
* As an optimization to avoid copying, the same geometry set can be associated with multiple
* instances. Each instance is stored as a transform matrix here. Again, these must be owned
* because they may be transformed from the original data. TODO: Validate that last statement.
*/
Vector<float4x4> transforms;
};
/**
* Return flattened vector of the geometry component's recursive instances. I.e. all collection
* instances and object instances will be expanded into the instances of their geometry components.
* Even the instances in those geometry components' will be included.
*
* \note For convenience (to avoid duplication in the caller), the returned vector also contains
* the argument geometry set.
*
* \note This doesn't extract instances from the "dupli" system for non-geometry-nodes instances.
*/
void geometry_set_gather_instances(const GeometrySet &geometry_set,
Vector<GeometryInstanceGroup> &r_instance_groups);
} // namespace blender::bke

View File

@ -315,7 +315,7 @@ void IDP_BlendReadData_impl(struct BlendDataReader *reader,
struct IDProperty **prop,
const char *caller_func_id);
#define IDP_BlendDataRead(reader, prop) IDP_BlendReadData_impl(reader, prop, __func__)
void IDP_BlendReadLib(struct BlendLibReader *reader, struct Library *lib, struct IDProperty *prop);
void IDP_BlendReadLib(struct BlendLibReader *reader, struct ID *self_id, struct IDProperty *prop);
void IDP_BlendReadExpand(struct BlendExpander *expander, struct IDProperty *prop);
typedef enum eIDPropertyUIDataType {

View File

@ -57,13 +57,14 @@ template<typename T, int Channels = 4> struct ImageBufferAccessor {
{
if constexpr ((std::is_same_v<T, float4>)) {
int offset = (coordinate.y * image_buffer.x + coordinate.x) * Channels;
return float4(&image_buffer.rect_float[offset]);
return float4(&image_buffer.float_buffer.data[offset]);
}
if constexpr ((std::is_same_v<T, int>)) {
int offset = (coordinate.y * image_buffer.x + coordinate.x);
float4 result;
rgba_uchar_to_float(result,
static_cast<uchar *>(static_cast<void *>(&image_buffer.rect[offset])));
rgba_uchar_to_float(
result,
static_cast<uchar *>(static_cast<void *>(&image_buffer.byte_buffer.data[offset])));
return result;
}
return float4();
@ -73,12 +74,13 @@ template<typename T, int Channels = 4> struct ImageBufferAccessor {
{
if constexpr ((std::is_same_v<T, float>)) {
int offset = (coordinate.y * image_buffer.x + coordinate.x) * Channels;
copy_v4_v4(&image_buffer.rect_float[offset], new_value);
copy_v4_v4(&image_buffer.float_buffer.data[offset], new_value);
}
if constexpr ((std::is_same_v<T, int>)) {
int offset = (coordinate.y * image_buffer.x + coordinate.x);
rgba_float_to_uchar(static_cast<uchar *>(static_cast<void *>(&image_buffer.rect[offset])),
new_value);
rgba_float_to_uchar(
static_cast<uchar *>(static_cast<void *>(&image_buffer.byte_buffer.data[offset])),
new_value);
}
}
};

View File

@ -275,7 +275,7 @@ void BKE_view_layer_blend_write(struct BlendWriter *writer,
struct ViewLayer *view_layer);
void BKE_view_layer_blend_read_data(struct BlendDataReader *reader, struct ViewLayer *view_layer);
void BKE_view_layer_blend_read_lib(struct BlendLibReader *reader,
struct Library *lib,
struct ID *self_id,
struct ViewLayer *view_layer);
/* iterators */

View File

@ -111,12 +111,12 @@ typedef struct LibraryIDLinkCallbackData {
* 'Real' ID, the one that might be in bmain, only differs from self_id when the later is an
* embedded one.
*/
struct ID *id_owner;
struct ID *owner_id;
/**
* ID from which the current ID pointer is being processed. It may be an embedded ID like master
* collection or root node tree.
*/
struct ID *id_self;
struct ID *self_id;
struct ID **id_pointer;
int cb_flag;
} LibraryIDLinkCallbackData;
@ -259,17 +259,17 @@ void BKE_library_update_ID_link_user(struct ID *id_dst, struct ID *id_src, int c
int BKE_library_ID_use_ID(struct ID *id_user, struct ID *id_used);
/**
* Say whether given \a id_owner may use (in any way) a data-block of \a id_type_used.
* Say whether given \a owner_id may use (in any way) a data-block of \a id_type_used.
*
* This is a 'simplified' abstract version of #BKE_library_foreach_ID_link() above,
* quite useful to reduce useless iterations in some cases.
*/
bool BKE_library_id_can_use_idtype(struct ID *id_owner, short id_type_used);
bool BKE_library_id_can_use_idtype(struct ID *owner_id, short id_type_used);
/**
* Given the id_owner return the type of id_types it can use as a filter_id.
* Given the owner_id return the type of id_types it can use as a filter_id.
*/
uint64_t BKE_library_id_can_use_filter_id(const struct ID *id_owner, const bool include_ui);
uint64_t BKE_library_id_can_use_filter_id(const struct ID *owner_id, const bool include_ui);
/**
* Check whether given ID is used locally (i.e. by another non-linked ID).

View File

@ -120,7 +120,18 @@ enum {
typedef struct Main {
struct Main *next, *prev;
/** The file-path of this blend file, an empty string indicates an unsaved file. */
/**
* The file-path of this blend file, an empty string indicates an unsaved file.
*
* \note For the current loaded blend file this path should be absolute & normalized
* to prevent redundant leading slashes or current-working-directory relative paths
* from causing problems with absolute/relative patch conversion that relies on this being
* an absolute path. See #BLI_path_canonicalize_native.
*
* This rule is not strictly enforced as in some cases loading a #Main is performed
* to read data temporarily (preferences & startup) for e.g.
* where the `filepath` is not persistent or used as a basis for other paths.
*/
char filepath[1024]; /* 1024 = FILE_MAX */
short versionfile, subversionfile; /* see BLENDER_FILE_VERSION, BLENDER_FILE_SUBVERSION */
short minversionfile, minsubversionfile;

View File

@ -30,107 +30,51 @@ void BKE_mesh_legacy_convert_uvs_to_struct(
blender::Vector<CustomDataLayer, 16> &loop_layers_to_write);
void BKE_mesh_legacy_convert_uvs_to_generic(Mesh *mesh);
/**
* Move face sets to the legacy type from a generic type.
*/
void BKE_mesh_legacy_face_set_from_generic(
blender::MutableSpan<CustomDataLayer> poly_layers_to_write);
/**
* Copy face sets to the generic data type from the legacy type.
*/
void BKE_mesh_legacy_face_set_to_generic(struct Mesh *mesh);
/**
* Copy edge creases from a separate layer into edges.
*/
void BKE_mesh_legacy_edge_crease_from_layers(struct Mesh *mesh);
/**
* Copy edge creases from edges to a separate layer.
*/
void BKE_mesh_legacy_edge_crease_to_layers(struct Mesh *mesh);
/**
* Copy bevel weights from separate layers into vertices and edges.
*/
void BKE_mesh_legacy_bevel_weight_from_layers(struct Mesh *mesh);
/**
* Copy bevel weights from vertices and edges to separate layers.
*/
void BKE_mesh_legacy_bevel_weight_to_layers(struct Mesh *mesh);
/**
* Convert the hidden element attributes to the old flag format for writing.
*/
void BKE_mesh_legacy_convert_hide_layers_to_flags(struct Mesh *mesh,
blender::MutableSpan<MPoly> legacy_polys);
/**
* Convert the old hide flags (#ME_HIDE) to the hidden element attribute for reading.
* Only add the attributes when there are any elements in each domain hidden.
*/
void BKE_mesh_legacy_convert_flags_to_hide_layers(struct Mesh *mesh);
/**
* Convert the selected element attributes to the old flag format for writing.
*/
void BKE_mesh_legacy_convert_selection_layers_to_flags(struct Mesh *mesh,
blender::MutableSpan<MPoly> legacy_polys);
/**
* Convert the old selection flags (#SELECT/#ME_FACE_SEL) to the selected element attribute for
* reading. Only add the attributes when there are any elements in each domain selected.
*/
void BKE_mesh_legacy_convert_flags_to_selection_layers(struct Mesh *mesh);
/**
* Move material indices from a generic attribute to #MPoly.
*/
void BKE_mesh_legacy_convert_material_indices_to_mpoly(struct Mesh *mesh,
blender::MutableSpan<MPoly> legacy_polys);
/**
* Move material indices from the #MPoly struct to a generic attributes.
* Only add the attribute when the indices are not all zero.
*/
void BKE_mesh_legacy_convert_mpoly_to_material_indices(struct Mesh *mesh);
/** Convert from runtime loose edge cache to legacy edge flag. */
void BKE_mesh_legacy_convert_loose_edges_to_flag(struct Mesh *mesh);
void BKE_mesh_legacy_attribute_flags_to_strings(struct Mesh *mesh);
void BKE_mesh_legacy_attribute_strings_to_flags(struct Mesh *mesh);
void BKE_mesh_legacy_sharp_faces_to_flags(struct Mesh *mesh,
blender::MutableSpan<MPoly> legacy_polys);
void BKE_mesh_legacy_sharp_faces_from_flags(struct Mesh *mesh);
void BKE_mesh_legacy_sharp_edges_to_flags(struct Mesh *mesh);
void BKE_mesh_legacy_sharp_edges_from_flags(struct Mesh *mesh);
void BKE_mesh_legacy_uv_seam_to_flags(struct Mesh *mesh);
void BKE_mesh_legacy_uv_seam_from_flags(struct Mesh *mesh);
struct MVert *BKE_mesh_legacy_convert_positions_to_verts(
Mesh *mesh,
blender::ResourceScope &temp_arrays_for_convert,
blender::Vector<CustomDataLayer, 16> &vert_layers_to_write);
void BKE_mesh_legacy_convert_verts_to_positions(Mesh *mesh);
MEdge *BKE_mesh_legacy_convert_edges_to_medge(
Mesh *mesh,
blender::ResourceScope &temp_arrays_for_convert,
blender::Vector<CustomDataLayer, 16> &edge_layers_to_write);
void BKE_mesh_legacy_convert_edges_to_generic(Mesh *mesh);
struct MLoop *BKE_mesh_legacy_convert_corners_to_loops(
Mesh *mesh,
blender::ResourceScope &temp_arrays_for_convert,
blender::Vector<CustomDataLayer, 16> &loop_layers_to_write);
blender::MutableSpan<MPoly> BKE_mesh_legacy_convert_offsets_to_polys(
const Mesh *mesh,
blender::ResourceScope &temp_arrays_for_convert,
blender::Vector<CustomDataLayer, 16> &poly_layers_to_write);
void BKE_mesh_legacy_convert_polys_to_offsets(Mesh *mesh);
void BKE_mesh_legacy_convert_loops_to_corners(struct Mesh *mesh);

View File

@ -35,6 +35,14 @@ void sample_point_attribute(Span<int> corner_verts,
IndexMask mask,
GMutableSpan dst);
void sample_point_normals(Span<int> corner_verts,
Span<MLoopTri> looptris,
Span<int> looptri_indices,
Span<float3> bary_coords,
Span<float3> src,
IndexMask mask,
MutableSpan<float3> dst);
void sample_corner_attribute(Span<MLoopTri> looptris,
Span<int> looptri_indices,
Span<float3> bary_coords,

View File

@ -1304,6 +1304,7 @@ void BKE_nodetree_remove_layer_n(struct bNodeTree *ntree, struct Scene *scene, i
#define GEO_NODE_SIMULATION_INPUT 2100
#define GEO_NODE_SIMULATION_OUTPUT 2101
#define GEO_NODE_INPUT_SIGNED_DISTANCE 2102
#define GEO_NODE_SAMPLE_VOLUME 2103
/** \} */

View File

@ -39,7 +39,6 @@ bNodeTree *ntreeCopyTree(Main *bmain, const bNodeTree *ntree);
void ntreeFreeLocalNode(bNodeTree *ntree, bNode *node);
void ntreeUpdateAllNew(Main *main);
void ntreeNodeFlagSet(const bNodeTree *ntree, int flag, bool enable);

View File

@ -346,7 +346,7 @@ struct CopyPixelTile {
void copy_pixels(ImBuf &tile_buffer, IndexRange group_range) const
{
if (tile_buffer.rect_float) {
if (tile_buffer.float_buffer.data) {
image::ImageBufferAccessor<float4> accessor(tile_buffer);
copy_pixels<float4>(accessor, group_range);
}

View File

@ -154,6 +154,8 @@ void BKE_sound_set_scene_sound_volume(void *handle, float volume, char animated)
void BKE_sound_set_scene_sound_pitch(void *handle, float pitch, char animated);
void BKE_sound_set_scene_sound_pitch_at_frame(void *handle, int frame, float pitch, char animated);
void BKE_sound_set_scene_sound_pitch_constant_range(void *handle,
int frame_start,
int frame_end,

View File

@ -40,7 +40,7 @@ bool BKE_viewer_path_equal(const ViewerPath *a, const ViewerPath *b);
void BKE_viewer_path_blend_write(struct BlendWriter *writer, const ViewerPath *viewer_path);
void BKE_viewer_path_blend_read_data(struct BlendDataReader *reader, ViewerPath *viewer_path);
void BKE_viewer_path_blend_read_lib(struct BlendLibReader *reader,
struct Library *lib,
struct ID *self_id,
ViewerPath *viewer_path);
void BKE_viewer_path_foreach_id(struct LibraryForeachIDData *data, ViewerPath *viewer_path);
void BKE_viewer_path_id_remap(ViewerPath *viewer_path, const struct IDRemapper *mappings);

View File

@ -443,6 +443,7 @@ set(SRC
BKE_multires.h
BKE_nla.h
BKE_node.h
BKE_node.hh
BKE_node_runtime.hh
BKE_node_tree_update.h
BKE_node_tree_zones.hh

View File

@ -222,7 +222,7 @@ static void action_blend_read_data(BlendDataReader *reader, ID *id)
static void blend_read_lib_constraint_channels(BlendLibReader *reader, ID *id, ListBase *chanbase)
{
LISTBASE_FOREACH (bConstraintChannel *, chan, chanbase) {
BLO_read_id_address(reader, id->lib, &chan->ipo);
BLO_read_id_address(reader, id, &chan->ipo);
}
}
@ -232,16 +232,16 @@ static void action_blend_read_lib(BlendLibReader *reader, ID *id)
/* XXX deprecated - old animation system <<< */
LISTBASE_FOREACH (bActionChannel *, chan, &act->chanbase) {
BLO_read_id_address(reader, act->id.lib, &chan->ipo);
BLO_read_id_address(reader, id, &chan->ipo);
blend_read_lib_constraint_channels(reader, &act->id, &chan->constraintChannels);
}
/* >>> XXX deprecated - old animation system */
BKE_fcurve_blend_read_lib(reader, &act->id, &act->curves);
BKE_fcurve_blend_read_lib(reader, id, &act->curves);
LISTBASE_FOREACH (TimeMarker *, marker, &act->markers) {
if (marker->camera) {
BLO_read_id_address(reader, act->id.lib, &marker->camera);
BLO_read_id_address(reader, id, &marker->camera);
}
}
}
@ -1952,9 +1952,9 @@ void BKE_pose_blend_read_lib(BlendLibReader *reader, Object *ob, bPose *pose)
pchan->bone = BKE_armature_find_bone_name(arm, pchan->name);
IDP_BlendReadLib(reader, ob->id.lib, pchan->prop);
IDP_BlendReadLib(reader, &ob->id, pchan->prop);
BLO_read_id_address(reader, ob->id.lib, &pchan->custom);
BLO_read_id_address(reader, &ob->id, &pchan->custom);
if (UNLIKELY(pchan->bone == NULL)) {
rebuild = true;
}

View File

@ -1479,8 +1479,8 @@ void BKE_animdata_blend_read_lib(BlendLibReader *reader, ID *id, AnimData *adt)
}
/* link action data */
BLO_read_id_address(reader, id->lib, &adt->action);
BLO_read_id_address(reader, id->lib, &adt->tmpact);
BLO_read_id_address(reader, id, &adt->action);
BLO_read_id_address(reader, id, &adt->tmpact);
/* link drivers */
BKE_fcurve_blend_read_lib(reader, id, &adt->drivers);

View File

@ -329,7 +329,7 @@ void BKE_keyingsets_blend_read_lib(BlendLibReader *reader, ID *id, ListBase *lis
{
LISTBASE_FOREACH (KeyingSet *, ks, list) {
LISTBASE_FOREACH (KS_Path *, ksp, &ks->paths) {
BLO_read_id_address(reader, id->lib, &ksp->id);
BLO_read_id_address(reader, id, &ksp->id);
}
}
}

View File

@ -897,8 +897,7 @@ void BKE_appdir_program_path_init(const char *argv0)
* Otherwise other methods of detecting the binary that override this argument
* which must point to the Python module for data-files to be detected. */
STRNCPY(g_app.program_filepath, argv0);
BLI_path_abs_from_cwd(g_app.program_filepath, sizeof(g_app.program_filepath));
BLI_path_normalize_native(g_app.program_filepath);
BLI_path_canonicalize_native(g_app.program_filepath, sizeof(g_app.program_filepath));
if (g_app.program_dirname[0] == '\0') {
/* First time initializing, the file binary path isn't valid from a Python module.

View File

@ -261,12 +261,12 @@ static void armature_blend_read_data(BlendDataReader *reader, ID *id)
BKE_armature_bone_hash_make(arm);
}
static void lib_link_bones(BlendLibReader *reader, Library *lib, Bone *bone)
static void lib_link_bones(BlendLibReader *reader, ID *self_id, Bone *bone)
{
IDP_BlendReadLib(reader, lib, bone->prop);
IDP_BlendReadLib(reader, self_id, bone->prop);
LISTBASE_FOREACH (Bone *, curbone, &bone->childbase) {
lib_link_bones(reader, lib, curbone);
lib_link_bones(reader, self_id, curbone);
}
}
@ -274,7 +274,7 @@ static void armature_blend_read_lib(BlendLibReader *reader, ID *id)
{
bArmature *arm = (bArmature *)id;
LISTBASE_FOREACH (Bone *, curbone, &arm->bonebase) {
lib_link_bones(reader, id->lib, curbone);
lib_link_bones(reader, id, curbone);
}
}

View File

@ -446,7 +446,7 @@ bool BuiltinCustomDataLayerProvider::try_create(void *owner,
const int element_num = custom_data_access_.get_element_num(owner);
if (stored_as_named_attribute_) {
if (CustomData_get_layer_named(custom_data, data_type_, name_.c_str())) {
if (CustomData_has_layer_named(custom_data, data_type_, name_.c_str())) {
/* Exists already. */
return false;
}
@ -469,7 +469,7 @@ bool BuiltinCustomDataLayerProvider::exists(const void *owner) const
return false;
}
if (stored_as_named_attribute_) {
return CustomData_get_layer_named(custom_data, stored_type_, name_.c_str()) != nullptr;
return CustomData_has_layer_named(custom_data, stored_type_, name_.c_str());
}
return CustomData_get_layer(custom_data, stored_type_) != nullptr;
}

View File

@ -952,7 +952,7 @@ static int foreach_libblock_link_append_callback(LibraryIDLinkCallbackData *cb_d
* meshes for shape keys e.g.), or this is an unsupported case (two shape-keys depending on
* each-other need to be also 'linked' in by their respective meshes, independent shape-keys
* are not allowed). ref #96048. */
if (id != cb_data->id_self && BKE_idtype_idcode_is_linkable(GS(cb_data->id_self->name))) {
if (id != cb_data->self_id && BKE_idtype_idcode_is_linkable(GS(cb_data->self_id->name))) {
BKE_library_foreach_ID_link(
cb_data->bmain, id, foreach_libblock_link_append_callback, data, IDWALK_NOP);
}
@ -972,7 +972,7 @@ static int foreach_libblock_link_append_callback(LibraryIDLinkCallbackData *cb_d
const bool do_recursive = (data->lapp_context->params->flag & BLO_LIBLINK_APPEND_RECURSIVE) !=
0 ||
do_link;
if (!do_recursive && cb_data->id_owner->lib != id->lib) {
if (!do_recursive && cb_data->owner_id->lib != id->lib) {
return IDWALK_RET_NOP;
}

View File

@ -513,7 +513,14 @@ static bool relative_convert_foreach_path_cb(BPathForeachPathData *bpath_data,
data->count_changed++;
}
else {
BKE_reportf(data->reports, RPT_WARNING, "Path '%s' cannot be made relative", path_src);
const char *type_name = BKE_idtype_get_info_from_id(bpath_data->owner_id)->name;
const char *id_name = bpath_data->owner_id->name + 2;
BKE_reportf(data->reports,
RPT_WARNING,
"Path '%s' cannot be made relative for %s '%s'",
path_src,
type_name,
id_name);
data->count_failed++;
}
return true;
@ -537,7 +544,14 @@ static bool absolute_convert_foreach_path_cb(BPathForeachPathData *bpath_data,
data->count_changed++;
}
else {
BKE_reportf(data->reports, RPT_WARNING, "Path '%s' cannot be made absolute", path_src);
const char *type_name = BKE_idtype_get_info_from_id(bpath_data->owner_id)->name;
const char *id_name = bpath_data->owner_id->name + 2;
BKE_reportf(data->reports,
RPT_WARNING,
"Path '%s' cannot be made absolute for %s '%s'",
path_src,
type_name,
id_name);
data->count_failed++;
}
return true;

View File

@ -361,16 +361,16 @@ static void brush_blend_read_lib(BlendLibReader *reader, ID *id)
Brush *brush = (Brush *)id;
/* brush->(mask_)mtex.obj is ignored on purpose? */
BLO_read_id_address(reader, brush->id.lib, &brush->mtex.tex);
BLO_read_id_address(reader, brush->id.lib, &brush->mask_mtex.tex);
BLO_read_id_address(reader, brush->id.lib, &brush->clone.image);
BLO_read_id_address(reader, brush->id.lib, &brush->toggle_brush);
BLO_read_id_address(reader, brush->id.lib, &brush->paint_curve);
BLO_read_id_address(reader, id, &brush->mtex.tex);
BLO_read_id_address(reader, id, &brush->mask_mtex.tex);
BLO_read_id_address(reader, id, &brush->clone.image);
BLO_read_id_address(reader, id, &brush->toggle_brush);
BLO_read_id_address(reader, id, &brush->paint_curve);
/* link default grease pencil palette */
if (brush->gpencil_settings != nullptr) {
if (brush->gpencil_settings->flag & GP_BRUSH_MATERIAL_PINNED) {
BLO_read_id_address(reader, brush->id.lib, &brush->gpencil_settings->material);
BLO_read_id_address(reader, id, &brush->gpencil_settings->material);
if (!brush->gpencil_settings->material) {
brush->gpencil_settings->flag &= ~GP_BRUSH_MATERIAL_PINNED;
@ -379,7 +379,7 @@ static void brush_blend_read_lib(BlendLibReader *reader, ID *id)
else {
brush->gpencil_settings->material = nullptr;
}
BLO_read_id_address(reader, brush->id.lib, &brush->gpencil_settings->material_alt);
BLO_read_id_address(reader, id, &brush->gpencil_settings->material_alt);
}
}
@ -399,10 +399,12 @@ static void brush_blend_read_expand(BlendExpander *expander, ID *id)
static int brush_undo_preserve_cb(LibraryIDLinkCallbackData *cb_data)
{
BlendLibReader *reader = (BlendLibReader *)cb_data->user_data;
ID *self_id = cb_data->self_id;
ID *id_old = *cb_data->id_pointer;
/* Old data has not been remapped to new values of the pointers, if we want to keep the old
* pointer here we need its new address. */
ID *id_old_new = id_old != nullptr ? BLO_read_get_new_id_address(reader, id_old->lib, id_old) :
ID *id_old_new = id_old != nullptr ? BLO_read_get_new_id_address(
reader, self_id, ID_IS_LINKED(self_id), id_old) :
nullptr;
BLI_assert(id_old_new == nullptr || ELEM(id_old, id_old_new, id_old_new->orig_id));
if (cb_data->cb_flag & IDWALK_CB_USER) {
@ -2622,18 +2624,22 @@ struct ImBuf *BKE_brush_gen_radial_control_imbuf(Brush *br, bool secondary, bool
int half = side / 2;
BKE_curvemapping_init(br->curve);
im->rect_float = (float *)MEM_callocN(sizeof(float) * side * side, "radial control rect");
float *rect_float = (float *)MEM_callocN(sizeof(float) * side * side, "radial control rect");
IMB_assign_float_buffer(im, rect_float, IB_DO_NOT_TAKE_OWNERSHIP);
im->x = im->y = side;
const bool have_texture = brush_gen_texture(br, side, secondary, im->rect_float);
const bool have_texture = brush_gen_texture(br, side, secondary, im->float_buffer.data);
if (display_gradient || have_texture) {
for (int i = 0; i < side; i++) {
for (int j = 0; j < side; j++) {
const float magn = sqrtf(pow2f(i - half) + pow2f(j - half));
const float strength = BKE_brush_curve_strength_clamped(br, magn, half);
im->rect_float[i * side + j] = (have_texture) ? im->rect_float[i * side + j] * strength :
strength;
im->float_buffer.data[i * side + j] = (have_texture) ?
im->float_buffer.data[i * side + j] * strength :
strength;
}
}
}

View File

@ -142,14 +142,14 @@ static void camera_blend_read_data(BlendDataReader *reader, ID *id)
static void camera_blend_read_lib(BlendLibReader *reader, ID *id)
{
Camera *ca = (Camera *)id;
BLO_read_id_address(reader, ca->id.lib, &ca->ipo); /* deprecated, for versioning */
BLO_read_id_address(reader, id, &ca->ipo); /* deprecated, for versioning */
BLO_read_id_address(reader, ca->id.lib, &ca->dof_ob); /* deprecated, for versioning */
BLO_read_id_address(reader, ca->id.lib, &ca->dof.focus_object);
BLO_read_id_address(reader, id, &ca->dof_ob); /* deprecated, for versioning */
BLO_read_id_address(reader, id, &ca->dof.focus_object);
LISTBASE_FOREACH (CameraBGImage *, bgpic, &ca->bg_images) {
BLO_read_id_address(reader, ca->id.lib, &bgpic->ima);
BLO_read_id_address(reader, ca->id.lib, &bgpic->clip);
BLO_read_id_address(reader, id, &bgpic->ima);
BLO_read_id_address(reader, id, &bgpic->clip);
}
}

View File

@ -325,11 +325,11 @@ static void collection_blend_read_data(BlendDataReader *reader, ID *id)
BKE_collection_blend_read_data(reader, collection, NULL);
}
static void lib_link_collection_data(BlendLibReader *reader, Library *lib, Collection *collection)
static void lib_link_collection_data(BlendLibReader *reader, ID *self_id, Collection *collection)
{
BLI_assert(collection->runtime.gobject_hash == NULL);
LISTBASE_FOREACH_MUTABLE (CollectionObject *, cob, &collection->gobject) {
BLO_read_id_address(reader, lib, &cob->ob);
BLO_read_id_address(reader, self_id, &cob->ob);
if (cob->ob == NULL) {
BLI_freelinkN(&collection->gobject, cob);
@ -337,22 +337,20 @@ static void lib_link_collection_data(BlendLibReader *reader, Library *lib, Colle
}
LISTBASE_FOREACH (CollectionChild *, child, &collection->children) {
BLO_read_id_address(reader, lib, &child->collection);
BLO_read_id_address(reader, self_id, &child->collection);
}
}
#ifdef USE_COLLECTION_COMPAT_28
void BKE_collection_compat_blend_read_lib(BlendLibReader *reader,
Library *lib,
SceneCollection *sc)
void BKE_collection_compat_blend_read_lib(BlendLibReader *reader, ID *self_id, SceneCollection *sc)
{
LISTBASE_FOREACH (LinkData *, link, &sc->objects) {
BLO_read_id_address(reader, lib, &link->data);
BLO_read_id_address(reader, self_id, &link->data);
BLI_assert(link->data);
}
LISTBASE_FOREACH (SceneCollection *, nsc, &sc->scene_collections) {
BKE_collection_compat_blend_read_lib(reader, lib, nsc);
BKE_collection_compat_blend_read_lib(reader, self_id, nsc);
}
}
#endif
@ -361,15 +359,15 @@ void BKE_collection_blend_read_lib(BlendLibReader *reader, Collection *collectio
{
#ifdef USE_COLLECTION_COMPAT_28
if (collection->collection) {
BKE_collection_compat_blend_read_lib(reader, collection->id.lib, collection->collection);
BKE_collection_compat_blend_read_lib(reader, &collection->id, collection->collection);
}
if (collection->view_layer) {
BKE_view_layer_blend_read_lib(reader, collection->id.lib, collection->view_layer);
BKE_view_layer_blend_read_lib(reader, &collection->id, collection->view_layer);
}
#endif
lib_link_collection_data(reader, collection->id.lib, collection);
lib_link_collection_data(reader, &collection->id, collection);
}
static void collection_blend_read_lib(BlendLibReader *reader, ID *id)

View File

@ -1382,11 +1382,11 @@ void BKE_histogram_update_sample_line(Histogram *hist,
hist->xmax = 1.0f;
/* hist->ymax = 1.0f; */ /* now do this on the operator _only_ */
if (ibuf->rect == NULL && ibuf->rect_float == NULL) {
if (ibuf->byte_buffer.data == NULL && ibuf->float_buffer.data == NULL) {
return;
}
if (ibuf->rect_float) {
if (ibuf->float_buffer.data) {
cm_processor = IMB_colormanagement_display_processor_new(view_settings, display_settings);
}
@ -1399,9 +1399,9 @@ void BKE_histogram_update_sample_line(Histogram *hist,
0.0f;
}
else {
if (ibuf->rect_float) {
if (ibuf->float_buffer.data) {
float rgba[4];
fp = (ibuf->rect_float + (ibuf->channels) * (y * ibuf->x + x));
fp = (ibuf->float_buffer.data + (ibuf->channels) * (y * ibuf->x + x));
switch (ibuf->channels) {
case 4:
@ -1431,8 +1431,8 @@ void BKE_histogram_update_sample_line(Histogram *hist,
hist->data_b[i] = rgba[2];
hist->data_a[i] = rgba[3];
}
else if (ibuf->rect) {
cp = (uchar *)(ibuf->rect + y * ibuf->x + x);
else if (ibuf->byte_buffer.data) {
cp = ibuf->byte_buffer.data + 4 * (y * ibuf->x + x);
hist->data_luma[i] = (float)IMB_colormanagement_get_luminance_byte(cp) / 255.0f;
hist->data_r[i] = (float)cp[0] / 255.0f;
hist->data_g[i] = (float)cp[1] / 255.0f;
@ -1492,10 +1492,10 @@ static void scopes_update_cb(void *__restrict userdata,
const int savedlines = y / rows_per_sample_line;
const bool do_sample_line = (savedlines < scopes->sample_lines) &&
(y % rows_per_sample_line) == 0;
const bool is_float = (ibuf->rect_float != NULL);
const bool is_float = (ibuf->float_buffer.data != NULL);
if (is_float) {
rf = ibuf->rect_float + ((size_t)y) * ibuf->x * ibuf->channels;
rf = ibuf->float_buffer.data + ((size_t)y) * ibuf->x * ibuf->channels;
}
else {
rc = display_buffer + ((size_t)y) * ibuf->x * ibuf->channels;
@ -1616,7 +1616,7 @@ void BKE_scopes_update(Scopes *scopes,
void *cache_handle = NULL;
struct ColormanageProcessor *cm_processor = NULL;
if (ibuf->rect == NULL && ibuf->rect_float == NULL) {
if (ibuf->byte_buffer.data == NULL && ibuf->float_buffer.data == NULL) {
return;
}
@ -1692,7 +1692,7 @@ void BKE_scopes_update(Scopes *scopes,
scopes->vecscope = MEM_callocN(scopes->waveform_tot * 2 * sizeof(float),
"vectorscope point channel");
if (ibuf->rect_float) {
if (ibuf->float_buffer.data) {
cm_processor = IMB_colormanagement_display_processor_new(view_settings, display_settings);
}
else {

View File

@ -6556,7 +6556,7 @@ static void lib_link_constraint_cb(bConstraint *UNUSED(con),
void *userdata)
{
tConstraintLinkData *cld = (tConstraintLinkData *)userdata;
BLO_read_id_address(cld->reader, cld->id->lib, idpoin);
BLO_read_id_address(cld->reader, cld->id, idpoin);
}
void BKE_constraint_blend_read_lib(BlendLibReader *reader, ID *id, ListBase *conlist)
@ -6571,7 +6571,7 @@ void BKE_constraint_blend_read_lib(BlendLibReader *reader, ID *id, ListBase *con
con->type = CONSTRAINT_TYPE_NULL;
}
/* own ipo, all constraints have it */
BLO_read_id_address(reader, id->lib, &con->ipo); /* XXX deprecated - old animation system */
BLO_read_id_address(reader, id, &con->ipo); /* XXX deprecated - old animation system */
/* If linking from a library, clear 'local' library override flag. */
if (ID_IS_LINKED(id)) {

View File

@ -270,19 +270,19 @@ static void curve_blend_read_lib(BlendLibReader *reader, ID *id)
{
Curve *cu = (Curve *)id;
for (int a = 0; a < cu->totcol; a++) {
BLO_read_id_address(reader, cu->id.lib, &cu->mat[a]);
BLO_read_id_address(reader, id, &cu->mat[a]);
}
BLO_read_id_address(reader, cu->id.lib, &cu->bevobj);
BLO_read_id_address(reader, cu->id.lib, &cu->taperobj);
BLO_read_id_address(reader, cu->id.lib, &cu->textoncurve);
BLO_read_id_address(reader, cu->id.lib, &cu->vfont);
BLO_read_id_address(reader, cu->id.lib, &cu->vfontb);
BLO_read_id_address(reader, cu->id.lib, &cu->vfonti);
BLO_read_id_address(reader, cu->id.lib, &cu->vfontbi);
BLO_read_id_address(reader, id, &cu->bevobj);
BLO_read_id_address(reader, id, &cu->taperobj);
BLO_read_id_address(reader, id, &cu->textoncurve);
BLO_read_id_address(reader, id, &cu->vfont);
BLO_read_id_address(reader, id, &cu->vfontb);
BLO_read_id_address(reader, id, &cu->vfonti);
BLO_read_id_address(reader, id, &cu->vfontbi);
BLO_read_id_address(reader, cu->id.lib, &cu->ipo); /* XXX deprecated - old animation system */
BLO_read_id_address(reader, cu->id.lib, &cu->key);
BLO_read_id_address(reader, id, &cu->ipo); /* XXX deprecated - old animation system */
BLO_read_id_address(reader, id, &cu->key);
}
static void curve_blend_read_expand(BlendExpander *expander, ID *id)

View File

@ -137,9 +137,9 @@ static void curves_blend_read_lib(BlendLibReader *reader, ID *id)
{
Curves *curves = (Curves *)id;
for (int a = 0; a < curves->totcol; a++) {
BLO_read_id_address(reader, curves->id.lib, &curves->mat[a]);
BLO_read_id_address(reader, id, &curves->mat[a]);
}
BLO_read_id_address(reader, curves->id.lib, &curves->surface);
BLO_read_id_address(reader, id, &curves->surface);
}
static void curves_blend_read_expand(BlendExpander *expander, ID *id)

View File

@ -3176,6 +3176,13 @@ void CustomData_free_layers(CustomData *data, const eCustomDataType type, const
}
}
bool CustomData_has_layer_named(const CustomData *data,
const eCustomDataType type,
const char *name)
{
return CustomData_get_named_layer_index(data, type, name) != -1;
}
bool CustomData_has_layer(const CustomData *data, const eCustomDataType type)
{
return (CustomData_get_layer_index(data, type) != -1);
@ -5181,9 +5188,6 @@ void CustomData_blend_write(BlendWriter *writer,
case CD_PAINT_MASK:
BLO_write_raw(writer, sizeof(float) * count, static_cast<const float *>(layer.data));
break;
case CD_SCULPT_FACE_SETS:
BLO_write_raw(writer, sizeof(float) * count, static_cast<const float *>(layer.data));
break;
case CD_GRID_PAINT_MASK:
write_grid_paint_mask(writer, count, static_cast<const GridPaintMask *>(layer.data));
break;

View File

@ -1004,7 +1004,7 @@ static bool data_transfer_layersmapping_generate(ListBase *r_map,
return true;
}
if (r_map && cddata_type == CD_FAKE_SEAM) {
if (!CustomData_get_layer_named(&me_dst->edata, CD_PROP_BOOL, ".uv_seam")) {
if (!CustomData_has_layer_named(&me_dst->edata, CD_PROP_BOOL, ".uv_seam")) {
CustomData_add_layer_named(
&me_dst->edata, CD_PROP_BOOL, CD_SET_DEFAULT, me_dst->totedge, ".uv_seam");
}
@ -1022,7 +1022,7 @@ static bool data_transfer_layersmapping_generate(ListBase *r_map,
return true;
}
if (r_map && cddata_type == CD_FAKE_SHARP) {
if (!CustomData_get_layer_named(&me_dst->edata, CD_PROP_BOOL, "sharp_edge")) {
if (!CustomData_has_layer_named(&me_dst->edata, CD_PROP_BOOL, "sharp_edge")) {
CustomData_add_layer_named(
&me_dst->edata, CD_PROP_BOOL, CD_SET_DEFAULT, me_dst->totedge, "sharp_edge");
}
@ -1110,7 +1110,7 @@ static bool data_transfer_layersmapping_generate(ListBase *r_map,
return true;
}
if (r_map && cddata_type == CD_FAKE_SHARP) {
if (!CustomData_get_layer_named(&me_dst->pdata, CD_PROP_BOOL, "sharp_face")) {
if (!CustomData_has_layer_named(&me_dst->pdata, CD_PROP_BOOL, "sharp_face")) {
CustomData_add_layer_named(
&me_dst->pdata, CD_PROP_BOOL, CD_SET_DEFAULT, me_dst->totpoly, "sharp_face");
}

View File

@ -3210,12 +3210,15 @@ static void dynamic_paint_output_surface_image_paint_cb(void *__restrict userdat
const int pos = ((ImgSeqFormatData *)(surface->data->format_data))->uv_p[index].pixel_index * 4;
/* blend wet and dry layers */
blendColors(
point->color, point->color[3], point->e_color, point->e_color[3], &ibuf->rect_float[pos]);
blendColors(point->color,
point->color[3],
point->e_color,
point->e_color[3],
&ibuf->float_buffer.data[pos]);
/* Multiply color by alpha if enabled */
if (surface->flags & MOD_DPAINT_MULALPHA) {
mul_v3_fl(&ibuf->rect_float[pos], ibuf->rect_float[pos + 3]);
mul_v3_fl(&ibuf->float_buffer.data[pos], ibuf->float_buffer.data[pos + 3]);
}
}
@ -3242,8 +3245,8 @@ static void dynamic_paint_output_surface_image_displace_cb(
CLAMP(depth, 0.0f, 1.0f);
copy_v3_fl(&ibuf->rect_float[pos], depth);
ibuf->rect_float[pos + 3] = 1.0f;
copy_v3_fl(&ibuf->float_buffer.data[pos], depth);
ibuf->float_buffer.data[pos + 3] = 1.0f;
}
static void dynamic_paint_output_surface_image_wave_cb(void *__restrict userdata,
@ -3268,8 +3271,8 @@ static void dynamic_paint_output_surface_image_wave_cb(void *__restrict userdata
depth = (0.5f + depth / 2.0f);
CLAMP(depth, 0.0f, 1.0f);
copy_v3_fl(&ibuf->rect_float[pos], depth);
ibuf->rect_float[pos + 3] = 1.0f;
copy_v3_fl(&ibuf->float_buffer.data[pos], depth);
ibuf->float_buffer.data[pos + 3] = 1.0f;
}
static void dynamic_paint_output_surface_image_wetmap_cb(void *__restrict userdata,
@ -3286,8 +3289,8 @@ static void dynamic_paint_output_surface_image_wetmap_cb(void *__restrict userda
/* image buffer position */
const int pos = ((ImgSeqFormatData *)(surface->data->format_data))->uv_p[index].pixel_index * 4;
copy_v3_fl(&ibuf->rect_float[pos], (point->wetness > 1.0f) ? 1.0f : point->wetness);
ibuf->rect_float[pos + 3] = 1.0f;
copy_v3_fl(&ibuf->float_buffer.data[pos], (point->wetness > 1.0f) ? 1.0f : point->wetness);
ibuf->float_buffer.data[pos + 3] = 1.0f;
}
void dynamicPaint_outputSurfaceImage(DynamicPaintSurface *surface,

View File

@ -2540,7 +2540,7 @@ void BKE_fmodifiers_blend_read_lib(BlendLibReader *reader, ID *id, ListBase *fmo
switch (fcm->type) {
case FMODIFIER_TYPE_PYTHON: {
FMod_Python *data = (FMod_Python *)fcm->data;
BLO_read_id_address(reader, id->lib, &data->script);
BLO_read_id_address(reader, id, &data->script);
break;
}
}
@ -2670,7 +2670,7 @@ void BKE_fcurve_blend_read_lib(BlendLibReader *reader, ID *id, ListBase *fcurves
DRIVER_TARGETS_LOOPER_BEGIN (dvar) {
/* only relink if still used */
if (tarIndex < dvar->num_targets) {
BLO_read_id_address(reader, id->lib, &dtar->id);
BLO_read_id_address(reader, id, &dtar->id);
}
else {
dtar->id = NULL;

View File

@ -3,28 +3,16 @@
#include "BKE_collection.h"
#include "BKE_geometry_set_instances.hh"
#include "BKE_instances.hh"
#include "BKE_material.h"
#include "BKE_mesh.hh"
#include "BKE_mesh_wrapper.h"
#include "BKE_modifier.h"
#include "BKE_pointcloud.h"
#include "DNA_collection_types.h"
#include "DNA_layer_types.h"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_object_types.h"
namespace blender::bke {
static void geometry_set_collect_recursive(const GeometrySet &geometry_set,
const float4x4 &transform,
Vector<GeometryInstanceGroup> &r_sets);
static void geometry_set_collect_recursive_collection(const Collection &collection,
const float4x4 &transform,
Vector<GeometryInstanceGroup> &r_sets);
static void add_final_mesh_as_geometry_component(const Object &object, GeometrySet &geometry_set)
{
Mesh *mesh = BKE_modifier_get_evaluated_mesh_from_evaluated_object(
@ -74,87 +62,6 @@ GeometrySet object_get_evaluated_geometry_set(const Object &object)
return {};
}
static void geometry_set_collect_recursive_collection_instance(
const Collection &collection, const float4x4 &transform, Vector<GeometryInstanceGroup> &r_sets)
{
float4x4 offset_matrix = float4x4::identity();
offset_matrix.location() -= float3(collection.instance_offset);
const float4x4 instance_transform = transform * offset_matrix;
geometry_set_collect_recursive_collection(collection, instance_transform, r_sets);
}
static void geometry_set_collect_recursive_object(const Object &object,
const float4x4 &transform,
Vector<GeometryInstanceGroup> &r_sets)
{
GeometrySet instance_geometry_set = object_get_evaluated_geometry_set(object);
geometry_set_collect_recursive(instance_geometry_set, transform, r_sets);
}
static void geometry_set_collect_recursive_collection(const Collection &collection,
const float4x4 &transform,
Vector<GeometryInstanceGroup> &r_sets)
{
LISTBASE_FOREACH (const CollectionObject *, collection_object, &collection.gobject) {
BLI_assert(collection_object->ob != nullptr);
const Object &object = *collection_object->ob;
const float4x4 object_transform = transform * float4x4_view(object.object_to_world);
geometry_set_collect_recursive_object(object, object_transform, r_sets);
}
LISTBASE_FOREACH (const CollectionChild *, collection_child, &collection.children) {
BLI_assert(collection_child->collection != nullptr);
const Collection &collection = *collection_child->collection;
geometry_set_collect_recursive_collection(collection, transform, r_sets);
}
}
static void geometry_set_collect_recursive(const GeometrySet &geometry_set,
const float4x4 &transform,
Vector<GeometryInstanceGroup> &r_sets)
{
r_sets.append({geometry_set, {transform}});
if (geometry_set.has_instances()) {
const Instances &instances = *geometry_set.get_instances_for_read();
Span<float4x4> transforms = instances.transforms();
Span<int> handles = instances.reference_handles();
Span<InstanceReference> references = instances.references();
for (const int i : transforms.index_range()) {
const InstanceReference &reference = references[handles[i]];
const float4x4 instance_transform = transform * transforms[i];
switch (reference.type()) {
case InstanceReference::Type::Object: {
Object &object = reference.object();
geometry_set_collect_recursive_object(object, instance_transform, r_sets);
break;
}
case InstanceReference::Type::Collection: {
Collection &collection = reference.collection();
geometry_set_collect_recursive_collection_instance(
collection, instance_transform, r_sets);
break;
}
case InstanceReference::Type::GeometrySet: {
const GeometrySet &geometry_set = reference.geometry_set();
geometry_set_collect_recursive(geometry_set, instance_transform, r_sets);
break;
}
case InstanceReference::Type::None: {
break;
}
}
}
}
}
void geometry_set_gather_instances(const GeometrySet &geometry_set,
Vector<GeometryInstanceGroup> &r_instance_groups)
{
geometry_set_collect_recursive(geometry_set, float4x4::identity(), r_instance_groups);
}
void Instances::foreach_referenced_geometry(
blender::FunctionRef<void(const GeometrySet &geometry_set)> callback) const
{

View File

@ -275,12 +275,12 @@ static void greasepencil_blend_read_lib(BlendLibReader *reader, ID *id)
/* Layers */
LISTBASE_FOREACH (bGPDlayer *, gpl, &gpd->layers) {
/* Layer -> Parent References */
BLO_read_id_address(reader, gpd->id.lib, &gpl->parent);
BLO_read_id_address(reader, id, &gpl->parent);
}
/* materials */
for (int a = 0; a < gpd->totcol; a++) {
BLO_read_id_address(reader, gpd->id.lib, &gpd->mat[a]);
BLO_read_id_address(reader, id, &gpd->mat[a]);
}
}
@ -2308,7 +2308,7 @@ bool BKE_gpencil_from_image(
ibuf = BKE_image_acquire_ibuf(image, &iuser, &lock);
if (ibuf && ibuf->rect) {
if (ibuf && ibuf->byte_buffer.data) {
int img_x = ibuf->x;
int img_y = ibuf->y;

View File

@ -552,7 +552,7 @@ void BKE_previewimg_ensure(PreviewImage *prv, const int size)
if (do_preview) {
prv->w[ICON_SIZE_PREVIEW] = thumb->x;
prv->h[ICON_SIZE_PREVIEW] = thumb->y;
prv->rect[ICON_SIZE_PREVIEW] = (uint *)MEM_dupallocN(thumb->rect);
prv->rect[ICON_SIZE_PREVIEW] = (uint *)MEM_dupallocN(thumb->byte_buffer.data);
prv->flag[ICON_SIZE_PREVIEW] &= ~(PRV_CHANGED | PRV_USER_EDITED | PRV_RENDERING);
}
if (do_icon) {
@ -571,7 +571,7 @@ void BKE_previewimg_ensure(PreviewImage *prv, const int size)
IMB_scaleImBuf(thumb, icon_w, icon_h);
prv->w[ICON_SIZE_ICON] = icon_w;
prv->h[ICON_SIZE_ICON] = icon_h;
prv->rect[ICON_SIZE_ICON] = (uint *)MEM_dupallocN(thumb->rect);
prv->rect[ICON_SIZE_ICON] = (uint *)MEM_dupallocN(thumb->byte_buffer.data);
prv->flag[ICON_SIZE_ICON] &= ~(PRV_CHANGED | PRV_USER_EDITED | PRV_RENDERING);
}
IMB_freeImBuf(thumb);
@ -588,7 +588,7 @@ ImBuf *BKE_previewimg_to_imbuf(PreviewImage *prv, const int size)
if (w > 0 && h > 0 && rect) {
/* first allocate imbuf for copying preview into it */
ima = IMB_allocImBuf(w, h, 32, IB_rect);
memcpy(ima->rect, rect, w * h * sizeof(*ima->rect));
memcpy(ima->byte_buffer.data, rect, w * h * sizeof(uint8_t) * 4);
}
return ima;

View File

@ -85,7 +85,7 @@ ImBuf *BKE_icon_geom_rasterize(const struct Icon_Geom *geom, const uint size_x,
data.rect_size[0] = rect_size[0];
data.rect_size[1] = rect_size[1];
data.rect = ibuf->rect;
data.rect = (uint *)ibuf->byte_buffer.data;
float scale[2];
const bool use_scale = (rect_size[0] != 256) || (rect_size[1] != 256);

View File

@ -1495,7 +1495,7 @@ void IDP_BlendReadData_impl(BlendDataReader *reader, IDProperty **prop, const ch
}
}
void IDP_BlendReadLib(BlendLibReader *reader, Library *lib, IDProperty *prop)
void IDP_BlendReadLib(BlendLibReader *reader, ID *self_id, IDProperty *prop)
{
if (!prop) {
return;
@ -1504,7 +1504,8 @@ void IDP_BlendReadLib(BlendLibReader *reader, Library *lib, IDProperty *prop)
switch (prop->type) {
case IDP_ID: /* PointerProperty */
{
void *newaddr = BLO_read_get_new_id_address(reader, lib, IDP_Id(prop));
void *newaddr = BLO_read_get_new_id_address(
reader, self_id, ID_IS_LINKED(self_id), IDP_Id(prop));
if (IDP_Id(prop) && !newaddr && G.debug) {
printf("Error while loading \"%s\". Data not found in file!\n", prop->name);
}
@ -1515,14 +1516,14 @@ void IDP_BlendReadLib(BlendLibReader *reader, Library *lib, IDProperty *prop)
{
IDProperty *idp_array = IDP_IDPArray(prop);
for (int i = 0; i < prop->len; i++) {
IDP_BlendReadLib(reader, lib, &(idp_array[i]));
IDP_BlendReadLib(reader, self_id, &(idp_array[i]));
}
break;
}
case IDP_GROUP: /* PointerProperty */
{
LISTBASE_FOREACH (IDProperty *, loop, &prop->data.group) {
IDP_BlendReadLib(reader, lib, loop);
IDP_BlendReadLib(reader, self_id, loop);
}
break;
}

View File

@ -1138,7 +1138,7 @@ static ImBuf *add_ibuf_for_tile(Image *ima, ImageTile *tile)
}
if (ibuf != nullptr) {
rect_float = ibuf->rect_float;
rect_float = ibuf->float_buffer.data;
IMB_colormanagement_check_is_data(ibuf, ima->colorspace_settings.name);
}
@ -1162,7 +1162,7 @@ static ImBuf *add_ibuf_for_tile(Image *ima, ImageTile *tile)
}
if (ibuf != nullptr) {
rect = (uchar *)ibuf->rect;
rect = ibuf->byte_buffer.data;
IMB_colormanagement_assign_rect_colorspace(ibuf, ima->colorspace_settings.name);
}
@ -1261,7 +1261,7 @@ static void image_colorspace_from_imbuf(Image *image, const ImBuf *ibuf)
{
const char *colorspace_name = nullptr;
if (ibuf->rect_float) {
if (ibuf->float_buffer.data) {
if (ibuf->float_colorspace) {
colorspace_name = IMB_colormanagement_colorspace_get_name(ibuf->float_colorspace);
}
@ -1270,7 +1270,7 @@ static void image_colorspace_from_imbuf(Image *image, const ImBuf *ibuf)
}
}
if (ibuf->rect && !colorspace_name) {
if (ibuf->byte_buffer.data && !colorspace_name) {
if (ibuf->rect_colorspace) {
colorspace_name = IMB_colormanagement_colorspace_get_name(ibuf->rect_colorspace);
}
@ -1318,7 +1318,7 @@ void BKE_image_replace_imbuf(Image *image, ImBuf *ibuf)
/* Keep generated image type flags consistent with the image buffer. */
if (image->source == IMA_SRC_GENERATED) {
if (ibuf->rect_float) {
if (ibuf->float_buffer.data) {
image->gen_flag |= IMA_GEN_FLOAT;
}
else {
@ -1338,11 +1338,11 @@ void BKE_image_replace_imbuf(Image *image, ImBuf *ibuf)
static bool image_memorypack_imbuf(
Image *ima, ImBuf *ibuf, int view, int tile_number, const char *filepath)
{
ibuf->ftype = (ibuf->rect_float) ? IMB_FTYPE_OPENEXR : IMB_FTYPE_PNG;
ibuf->ftype = (ibuf->float_buffer.data) ? IMB_FTYPE_OPENEXR : IMB_FTYPE_PNG;
IMB_saveiff(ibuf, filepath, IB_rect | IB_mem);
if (ibuf->encodedbuffer == nullptr) {
if (ibuf->encoded_buffer.data == nullptr) {
CLOG_STR_ERROR(&LOG, "memory save for pack error");
IMB_freeImBuf(ibuf);
image_free_packedfiles(ima);
@ -1352,8 +1352,8 @@ static bool image_memorypack_imbuf(
ImagePackedFile *imapf;
PackedFile *pf = MEM_cnew<PackedFile>("PackedFile");
pf->data = ibuf->encodedbuffer;
pf->size = ibuf->encodedsize;
pf->data = IMB_steal_encoded_buffer(ibuf);
pf->size = ibuf->encoded_size;
imapf = static_cast<ImagePackedFile *>(MEM_mallocN(sizeof(ImagePackedFile), "Image PackedFile"));
STRNCPY(imapf->filepath, filepath);
@ -1362,8 +1362,6 @@ static bool image_memorypack_imbuf(
imapf->tile_number = tile_number;
BLI_addtail(&ima->packedfiles, imapf);
ibuf->encodedbuffer = nullptr;
ibuf->encodedsize = 0;
ibuf->userflags &= ~IB_BITMAPDIRTY;
return true;
@ -1505,26 +1503,12 @@ static uintptr_t image_mem_size(Image *image)
if (ibuf == nullptr) {
continue;
}
ImBuf *ibufm;
int level;
if (ibuf->rect) {
size += MEM_allocN_len(ibuf->rect);
}
if (ibuf->rect_float) {
size += MEM_allocN_len(ibuf->rect_float);
}
size += IMB_get_size_in_memory(ibuf);
for (level = 0; level < IMB_MIPMAP_LEVELS; level++) {
ibufm = ibuf->mipmap[level];
if (ibufm) {
if (ibufm->rect) {
size += MEM_allocN_len(ibufm->rect);
}
if (ibufm->rect_float) {
size += MEM_allocN_len(ibufm->rect_float);
}
}
for (int level = 0; level < IMB_MIPMAP_LEVELS; level++) {
ImBuf *ibufm = ibuf->mipmap[level];
size += IMB_get_size_in_memory(ibufm);
}
}
IMB_moviecacheIter_free(iter);
@ -2537,16 +2521,16 @@ void BKE_stamp_info_from_imbuf(RenderResult *rr, ImBuf *ibuf)
bool BKE_imbuf_alpha_test(ImBuf *ibuf)
{
int tot;
if (ibuf->rect_float) {
const float *buf = ibuf->rect_float;
if (ibuf->float_buffer.data) {
const float *buf = ibuf->float_buffer.data;
for (tot = ibuf->x * ibuf->y; tot--; buf += 4) {
if (buf[3] < 1.0f) {
return true;
}
}
}
else if (ibuf->rect) {
uchar *buf = (uchar *)ibuf->rect;
else if (ibuf->byte_buffer.data) {
uchar *buf = ibuf->byte_buffer.data;
for (tot = ibuf->x * ibuf->y; tot--; buf += 4) {
if (buf[3] != 255) {
return true;
@ -4007,9 +3991,8 @@ static ImBuf *image_load_sequence_multilayer(Image *ima, ImageUser *iuser, int e
// printf("load from pass %s\n", rpass->name);
/* since we free render results, we copy the rect */
ibuf = IMB_allocImBuf(ima->rr->rectx, ima->rr->recty, 32, 0);
ibuf->rect_float = static_cast<float *>(MEM_dupallocN(rpass->rect));
ibuf->flags |= IB_rectfloat;
ibuf->mall = IB_rectfloat;
IMB_assign_float_buffer(
ibuf, static_cast<float *>(MEM_dupallocN(rpass->rect)), IB_TAKE_OWNERSHIP);
ibuf->channels = rpass->channels;
BKE_imbuf_stamp_info(ima->rr, ibuf);
@ -4318,8 +4301,7 @@ static ImBuf *image_get_ibuf_multilayer(Image *ima, ImageUser *iuser)
image_init_after_load(ima, iuser, ibuf);
ibuf->rect_float = rpass->rect;
ibuf->flags |= IB_rectfloat;
IMB_assign_float_buffer(ibuf, rpass->rect, IB_DO_NOT_TAKE_OWNERSHIP);
ibuf->channels = rpass->channels;
BKE_imbuf_stamp_info(ima->rr, ibuf);
@ -4459,56 +4441,29 @@ static ImBuf *image_get_render_result(Image *ima, ImageUser *iuser, void **r_loc
*
* For other cases we need to be sure it stays to default byte buffer space.
*/
if (ibuf->rect != rect) {
if (ibuf->byte_buffer.data != (uint8_t *)rect) {
const char *colorspace = IMB_colormanagement_role_colorspace_name_get(COLOR_ROLE_DEFAULT_BYTE);
IMB_colormanagement_assign_rect_colorspace(ibuf, colorspace);
}
/* invalidate color managed buffers if render result changed */
BLI_thread_lock(LOCK_COLORMANAGE);
if (ibuf->x != rres.rectx || ibuf->y != rres.recty || ibuf->rect_float != rectf) {
if (ibuf->x != rres.rectx || ibuf->y != rres.recty || ibuf->float_buffer.data != rectf) {
ibuf->userflags |= IB_DISPLAY_BUFFER_INVALID;
}
ibuf->x = rres.rectx;
ibuf->y = rres.recty;
ibuf->channels = channels;
if (rect) {
imb_freerectImBuf(ibuf);
ibuf->rect = rect;
}
else {
/* byte buffer of render result has been freed, make sure image buffers
* does not reference to this buffer anymore
* need check for whether byte buffer was allocated and owned by image itself
* or if it's reusing buffer from render result
*/
if ((ibuf->mall & IB_rect) == 0) {
ibuf->rect = nullptr;
}
}
imb_freerectImBuf(ibuf);
if (rectf) {
ibuf->rect_float = rectf;
ibuf->flags |= IB_rectfloat;
ibuf->channels = channels;
}
else {
ibuf->rect_float = nullptr;
ibuf->flags &= ~IB_rectfloat;
}
if (rectz) {
ibuf->zbuf_float = rectz;
ibuf->flags |= IB_zbuffloat;
}
else {
ibuf->zbuf_float = nullptr;
ibuf->flags &= ~IB_zbuffloat;
}
IMB_assign_byte_buffer(ibuf, (uint8_t *)rect, IB_DO_NOT_TAKE_OWNERSHIP);
IMB_assign_float_buffer(ibuf, rectf, IB_DO_NOT_TAKE_OWNERSHIP);
IMB_assign_float_z_buffer(ibuf, rectz, IB_DO_NOT_TAKE_OWNERSHIP);
/* TODO(sergey): Make this faster by either simply referencing the stamp
* or by changing both ImBug and RenderResult to use same data type to
* or by changing both ImBuf and RenderResult to use same data type to
* store metadata. */
if (ibuf->metadata != nullptr) {
IMB_metadata_free(ibuf->metadata);
@ -5269,7 +5224,7 @@ uchar *BKE_image_get_pixels_for_frame(struct Image *image, int frame, int tile)
ibuf = BKE_image_acquire_ibuf(image, &iuser, &lock);
if (ibuf) {
pixels = (uchar *)ibuf->rect;
pixels = ibuf->byte_buffer.data;
if (pixels) {
pixels = static_cast<uchar *>(MEM_dupallocN(pixels));
@ -5299,7 +5254,7 @@ float *BKE_image_get_float_pixels_for_frame(struct Image *image, int frame, int
ibuf = BKE_image_acquire_ibuf(image, &iuser, &lock);
if (ibuf) {
pixels = ibuf->rect_float;
pixels = ibuf->float_buffer.data;
if (pixels) {
pixels = static_cast<float *>(MEM_dupallocN(pixels));

View File

@ -799,7 +799,7 @@ void BKE_image_format_from_imbuf(ImageFormatData *im_format, const ImBuf *imbuf)
if (custom_flags & OPENEXR_COMPRESS) {
im_format->exr_codec = R_IMF_EXR_CODEC_ZIP; /* Can't determine compression */
}
if (imbuf->zbuf_float) {
if (imbuf->float_z_buffer.data) {
im_format->flag |= R_IMF_FLAG_ZBUF;
}
}

View File

@ -50,11 +50,11 @@ bool BKE_image_has_gpu_texture_premultiplied_alpha(Image *image, ImBuf *ibuf)
}
/* Generated images use pre multiplied float buffer, but straight alpha for byte buffers. */
if (image->type == IMA_TYPE_UV_TEST && ibuf) {
return ibuf->rect_float != nullptr;
return ibuf->float_buffer.data != nullptr;
}
}
if (ibuf) {
if (ibuf->rect_float) {
if (ibuf->float_buffer.data) {
return image ? (image->alpha_mode != IMA_ALPHA_STRAIGHT) : false;
}
@ -643,7 +643,7 @@ static ImBuf *update_do_scale(uchar *rect,
}
/* Scale pixels. */
ImBuf *ibuf = IMB_allocFromBuffer((uint *)rect, rect_float, part_w, part_h, 4);
ImBuf *ibuf = IMB_allocFromBuffer(rect, rect_float, part_w, part_h, 4);
IMB_scaleImBuf(ibuf, *w, *h);
return ibuf;
@ -679,8 +679,9 @@ static void gpu_texture_update_scaled(GPUTexture *tex,
ibuf = update_do_scale(rect, rect_float, &x, &y, &w, &h, limit_w, limit_h, full_w, full_h);
}
void *data = (ibuf->rect_float) ? (void *)(ibuf->rect_float) : (void *)(ibuf->rect);
eGPUDataFormat data_format = (ibuf->rect_float) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
void *data = (ibuf->float_buffer.data) ? (void *)(ibuf->float_buffer.data) :
(void *)(ibuf->byte_buffer.data);
eGPUDataFormat data_format = (ibuf->float_buffer.data) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
GPU_texture_update_sub(tex, data_format, data, x, y, layer, w, h, 1);
@ -742,8 +743,8 @@ static void gpu_texture_update_from_ibuf(
}
/* Get texture data pointers. */
float *rect_float = ibuf->rect_float;
uchar *rect = (uchar *)ibuf->rect;
float *rect_float = ibuf->float_buffer.data;
uchar *rect = ibuf->byte_buffer.data;
int tex_stride = ibuf->x;
int tex_offset = ibuf->channels * (y * ibuf->x + x);
@ -832,10 +833,10 @@ static void gpu_texture_update_from_ibuf(
}
/* Free buffers if needed. */
if (rect && rect != (uchar *)ibuf->rect) {
if (rect && rect != ibuf->byte_buffer.data) {
MEM_freeN(rect);
}
if (rect_float && rect_float != ibuf->rect_float) {
if (rect_float && rect_float != ibuf->float_buffer.data) {
MEM_freeN(rect_float);
}

View File

@ -42,7 +42,7 @@ static char imtype_best_depth(ImBuf *ibuf, const char imtype)
{
const char depth_ok = BKE_imtype_valid_depths(imtype);
if (ibuf->rect_float) {
if (ibuf->float_buffer.data) {
if (depth_ok & R_IMF_CHAN_DEPTH_32) {
return R_IMF_CHAN_DEPTH_32;
}
@ -298,18 +298,10 @@ static void image_save_post(ReportList *reports,
/* workaround to ensure the render result buffer is no longer used
* by this image, otherwise can crash when a new render result is
* created. */
if (ibuf->rect && !(ibuf->mall & IB_rect)) {
imb_freerectImBuf(ibuf);
}
if (ibuf->rect_float && !(ibuf->mall & IB_rectfloat)) {
imb_freerectfloatImBuf(ibuf);
}
if (ibuf->zbuf && !(ibuf->mall & IB_zbuf)) {
IMB_freezbufImBuf(ibuf);
}
if (ibuf->zbuf_float && !(ibuf->mall & IB_zbuffloat)) {
IMB_freezbuffloatImBuf(ibuf);
}
imb_freerectImBuf(ibuf);
imb_freerectfloatImBuf(ibuf);
IMB_freezbufImBuf(ibuf);
IMB_freezbuffloatImBuf(ibuf);
}
if (ELEM(ima->source, IMA_SRC_GENERATED, IMA_SRC_VIEWER)) {
ima->source = IMA_SRC_FILE;
@ -365,7 +357,8 @@ static bool image_save_single(ReportList *reports,
RenderResult *rr = nullptr;
bool ok = false;
if (ibuf == nullptr || (ibuf->rect == nullptr && ibuf->rect_float == nullptr)) {
if (ibuf == nullptr || (ibuf->byte_buffer.data == nullptr && ibuf->float_buffer.data == nullptr))
{
BKE_image_release_ibuf(ima, ibuf, lock);
return ok;
}

View File

@ -144,7 +144,7 @@ static void ipo_blend_read_lib(BlendLibReader *reader, ID *id)
LISTBASE_FOREACH (IpoCurve *, icu, &ipo->curve) {
if (icu->driver) {
BLO_read_id_address(reader, ipo->id.lib, &icu->driver->ob);
BLO_read_id_address(reader, id, &icu->driver->ob);
}
}
}

View File

@ -185,8 +185,8 @@ static void shapekey_blend_read_lib(BlendLibReader *reader, ID *id)
Key *key = (Key *)id;
BLI_assert((key->id.tag & LIB_TAG_EXTERN) == 0);
BLO_read_id_address(reader, key->id.lib, &key->ipo); /* XXX deprecated - old animation system */
BLO_read_id_address(reader, key->id.lib, &key->from);
BLO_read_id_address(reader, id, &key->ipo); /* XXX deprecated - old animation system */
BLO_read_id_address(reader, id, &key->from);
}
static void shapekey_blend_read_expand(BlendExpander *expander, ID *id)

View File

@ -161,8 +161,8 @@ static void lattice_blend_read_data(BlendDataReader *reader, ID *id)
static void lattice_blend_read_lib(BlendLibReader *reader, ID *id)
{
Lattice *lt = (Lattice *)id;
BLO_read_id_address(reader, lt->id.lib, &lt->ipo); // XXX deprecated - old animation system
BLO_read_id_address(reader, lt->id.lib, &lt->key);
BLO_read_id_address(reader, id, &lt->ipo); // XXX deprecated - old animation system
BLO_read_id_address(reader, id, &lt->key);
}
static void lattice_blend_read_expand(BlendExpander *expander, ID *id)

View File

@ -2438,35 +2438,35 @@ void BKE_view_layer_blend_read_data(BlendDataReader *reader, ViewLayer *view_lay
}
static void lib_link_layer_collection(BlendLibReader *reader,
Library *lib,
ID *self_id,
LayerCollection *layer_collection,
bool master)
const bool master)
{
/* Master collection is not a real data-block. */
if (!master) {
BLO_read_id_address(reader, lib, &layer_collection->collection);
BLO_read_id_address(reader, self_id, &layer_collection->collection);
}
LISTBASE_FOREACH (
LayerCollection *, layer_collection_nested, &layer_collection->layer_collections) {
lib_link_layer_collection(reader, lib, layer_collection_nested, false);
lib_link_layer_collection(reader, self_id, layer_collection_nested, false);
}
}
void BKE_view_layer_blend_read_lib(BlendLibReader *reader, Library *lib, ViewLayer *view_layer)
void BKE_view_layer_blend_read_lib(BlendLibReader *reader, ID *self_id, ViewLayer *view_layer)
{
LISTBASE_FOREACH (FreestyleModuleConfig *, fmc, &view_layer->freestyle_config.modules) {
BLO_read_id_address(reader, lib, &fmc->script);
BLO_read_id_address(reader, self_id, &fmc->script);
}
LISTBASE_FOREACH (FreestyleLineSet *, fls, &view_layer->freestyle_config.linesets) {
BLO_read_id_address(reader, lib, &fls->linestyle);
BLO_read_id_address(reader, lib, &fls->group);
BLO_read_id_address(reader, self_id, &fls->linestyle);
BLO_read_id_address(reader, self_id, &fls->group);
}
LISTBASE_FOREACH_MUTABLE (Base *, base, &view_layer->object_bases) {
/* we only bump the use count for the collection objects */
BLO_read_id_address(reader, lib, &base->object);
BLO_read_id_address(reader, self_id, &base->object);
if (base->object == nullptr) {
/* Free in case linked object got lost. */
@ -2478,12 +2478,12 @@ void BKE_view_layer_blend_read_lib(BlendLibReader *reader, Library *lib, ViewLay
}
LISTBASE_FOREACH (LayerCollection *, layer_collection, &view_layer->layer_collections) {
lib_link_layer_collection(reader, lib, layer_collection, true);
lib_link_layer_collection(reader, self_id, layer_collection, true);
}
BLO_read_id_address(reader, lib, &view_layer->mat_override);
BLO_read_id_address(reader, self_id, &view_layer->mat_override);
IDP_BlendReadLib(reader, lib, view_layer->id_properties);
IDP_BlendReadLib(reader, self_id, view_layer->id_properties);
}
/** \} */

View File

@ -173,7 +173,7 @@ static int lib_id_clear_library_data_users_update_cb(LibraryIDLinkCallbackData *
/* Even though the ID itself remain the same after being made local, from depsgraph point of
* view this is a different ID. Hence we need to tag all of its users for COW update. */
DEG_id_tag_update_ex(
cb_data->bmain, cb_data->id_owner, ID_RECALC_TAG_FOR_UNDO | ID_RECALC_COPY_ON_WRITE);
cb_data->bmain, cb_data->owner_id, ID_RECALC_TAG_FOR_UNDO | ID_RECALC_COPY_ON_WRITE);
return IDWALK_RET_STOP_ITER;
}
return IDWALK_RET_NOP;
@ -396,7 +396,7 @@ void BKE_id_newptr_and_tag_clear(ID *id)
static int lib_id_expand_local_cb(LibraryIDLinkCallbackData *cb_data)
{
Main *bmain = cb_data->bmain;
ID *id_self = cb_data->id_self;
ID *self_id = cb_data->self_id;
ID **id_pointer = cb_data->id_pointer;
int const cb_flag = cb_data->cb_flag;
const int flags = POINTER_AS_INT(cb_data->user_data);
@ -412,7 +412,7 @@ static int lib_id_expand_local_cb(LibraryIDLinkCallbackData *cb_data)
* local directly), its embedded IDs should also have already been duplicated, and hence be
* fully local here already. */
if (*id_pointer != NULL && ID_IS_LINKED(*id_pointer)) {
BLI_assert(*id_pointer != id_self);
BLI_assert(*id_pointer != self_id);
BKE_lib_id_clear_library_data(bmain, *id_pointer, flags);
}
@ -423,7 +423,7 @@ static int lib_id_expand_local_cb(LibraryIDLinkCallbackData *cb_data)
* (through drivers)...
* Just skip it, shape key can only be either indirectly linked, or fully local, period.
* And let's curse one more time that stupid useless shape-key ID type! */
if (*id_pointer && *id_pointer != id_self &&
if (*id_pointer && *id_pointer != self_id &&
BKE_idtype_idcode_is_linkable(GS((*id_pointer)->name)))
{
id_lib_extern(*id_pointer);
@ -583,14 +583,14 @@ static int id_copy_libmanagement_cb(LibraryIDLinkCallbackData *cb_data)
/* Remap self-references to new copied ID. */
if (id == data->id_src) {
/* We cannot use id_self here, it is not *always* id_dst (thanks to $£!+@#&/? nodetrees). */
/* We cannot use self_id here, it is not *always* id_dst (thanks to $£!+@#&/? nodetrees). */
id = *id_pointer = data->id_dst;
}
/* Increase used IDs refcount if needed and required. */
if ((data->flag & LIB_ID_CREATE_NO_USER_REFCOUNT) == 0 && (cb_flag & IDWALK_CB_USER)) {
if ((data->flag & LIB_ID_CREATE_NO_MAIN) != 0) {
BLI_assert(cb_data->id_self->tag & LIB_TAG_NO_MAIN);
BLI_assert(cb_data->self_id->tag & LIB_TAG_NO_MAIN);
id_us_plus_no_lib(id);
}
else {

View File

@ -373,7 +373,7 @@ bool BKE_lib_override_library_property_is_animated(const ID *id,
static int foreachid_is_hierarchy_leaf_fn(LibraryIDLinkCallbackData *cb_data)
{
ID *id_owner = cb_data->id_owner;
ID *id_owner = cb_data->owner_id;
ID *id = *cb_data->id_pointer;
bool *is_leaf = static_cast<bool *>(cb_data->user_data);
@ -3194,7 +3194,7 @@ static int lib_override_sort_libraries_func(LibraryIDLinkCallbackData *cb_data)
if (cb_data->cb_flag & IDWALK_CB_LOOPBACK) {
return IDWALK_RET_NOP;
}
ID *id_owner = cb_data->id_owner;
ID *id_owner = cb_data->owner_id;
ID *id = *cb_data->id_pointer;
if (id != nullptr && ID_IS_LINKED(id) && id->lib != id_owner->lib) {
const int owner_library_indirect_level = ID_IS_LINKED(id_owner) ? id_owner->lib->temp_index :

View File

@ -87,8 +87,8 @@ void BKE_lib_query_foreachid_process(LibraryForeachIDData *data, ID **id_pp, int
const int callback_return = data->callback(
&(struct LibraryIDLinkCallbackData){.user_data = data->user_data,
.bmain = data->bmain,
.id_owner = data->owner_id,
.id_self = data->self_id,
.owner_id = data->owner_id,
.self_id = data->self_id,
.id_pointer = id_pp,
.cb_flag = cb_flag});
if (flag & IDWALK_READONLY) {
@ -126,7 +126,7 @@ int BKE_lib_query_foreachid_process_callback_flag_override(LibraryForeachIDData
}
static bool library_foreach_ID_link(Main *bmain,
ID *id_owner,
ID *owner_id,
ID *id,
LibraryIDLinkCallback callback,
void *user_data,
@ -192,7 +192,7 @@ static void library_foreach_ID_data_cleanup(LibraryForeachIDData *data)
/** \return false in case iteration over ID pointers must be stopped, true otherwise. */
static bool library_foreach_ID_link(Main *bmain,
ID *id_owner,
ID *owner_id,
ID *id,
LibraryIDLinkCallback callback,
void *user_data,
@ -259,7 +259,7 @@ static bool library_foreach_ID_link(Main *bmain,
* knowledge of the owner ID then.
* While not great, and that should be probably sanitized at some point, we cal live with it
* for now. */
data.owner_id = ((id->flag & LIB_EMBEDDED_DATA) != 0 && id_owner != NULL) ? id_owner :
data.owner_id = ((id->flag & LIB_EMBEDDED_DATA) != 0 && owner_id != NULL) ? owner_id :
data.self_id;
/* inherit_data is non-NULL when this function is called for some sub-data ID
@ -374,13 +374,13 @@ void BKE_library_update_ID_link_user(ID *id_dst, ID *id_src, const int cb_flag)
}
}
uint64_t BKE_library_id_can_use_filter_id(const ID *id_owner, const bool include_ui)
uint64_t BKE_library_id_can_use_filter_id(const ID *owner_id, const bool include_ui)
{
/* any type of ID can be used in custom props. */
if (id_owner->properties) {
if (owner_id->properties) {
return FILTER_ID_ALL;
}
const short id_type_owner = GS(id_owner->name);
const short id_type_owner = GS(owner_id->name);
/* IDProps of armature bones and nodes, and bNode->id can use virtually any type of ID. */
if (ELEM(id_type_owner, ID_NT, ID_AR)) {
@ -395,16 +395,16 @@ uint64_t BKE_library_id_can_use_filter_id(const ID *id_owner, const bool include
/* Casting to non const.
* TODO(jbakker): We should introduce a ntree_id_has_tree function as we are actually not
* interested in the result. */
if (ntreeFromID((ID *)id_owner)) {
if (ntreeFromID((ID *)owner_id)) {
return FILTER_ID_ALL;
}
if (BKE_animdata_from_id(id_owner)) {
if (BKE_animdata_from_id(owner_id)) {
/* AnimationData can use virtually any kind of data-blocks, through drivers especially. */
return FILTER_ID_ALL;
}
if (ID_IS_OVERRIDE_LIBRARY_REAL(id_owner)) {
if (ID_IS_OVERRIDE_LIBRARY_REAL(owner_id)) {
/* LibOverride data 'hierarchy root' can virtually point back to any type of ID. */
return FILTER_ID_ALL;
}
@ -496,14 +496,14 @@ uint64_t BKE_library_id_can_use_filter_id(const ID *id_owner, const bool include
return 0;
}
bool BKE_library_id_can_use_idtype(ID *id_owner, const short id_type_used)
bool BKE_library_id_can_use_idtype(ID *owner_id, const short id_type_used)
{
/* any type of ID can be used in custom props. */
if (id_owner->properties) {
if (owner_id->properties) {
return true;
}
const short id_type_owner = GS(id_owner->name);
const short id_type_owner = GS(owner_id->name);
/* Exception for ID_LI as they don't exist as a filter. */
if (id_type_used == ID_LI) {
return id_type_owner == ID_LI;
@ -520,7 +520,7 @@ bool BKE_library_id_can_use_idtype(ID *id_owner, const short id_type_used)
}
const uint64_t filter_id_type_used = BKE_idtype_idcode_to_idfilter(id_type_used);
const uint64_t can_be_used = BKE_library_id_can_use_filter_id(id_owner, false);
const uint64_t can_be_used = BKE_library_id_can_use_filter_id(owner_id, false);
return (can_be_used & filter_id_type_used) != 0;
}
@ -866,7 +866,7 @@ void BKE_lib_query_unused_ids_tag(Main *bmain,
static int foreach_libblock_used_linked_data_tag_clear_cb(LibraryIDLinkCallbackData *cb_data)
{
ID *self_id = cb_data->id_self;
ID *self_id = cb_data->self_id;
ID **id_p = cb_data->id_pointer;
const int cb_flag = cb_data->cb_flag;
bool *is_changed = cb_data->user_data;

View File

@ -177,8 +177,8 @@ static int foreach_libblock_remap_callback(LibraryIDLinkCallbackData *cb_data)
return IDWALK_RET_NOP;
}
ID *id_owner = cb_data->id_owner;
ID *id_self = cb_data->id_self;
ID *id_owner = cb_data->owner_id;
ID *id_self = cb_data->self_id;
ID **id_p = cb_data->id_pointer;
IDRemap *id_remap_data = cb_data->user_data;

View File

@ -167,7 +167,7 @@ static void light_blend_read_data(BlendDataReader *reader, ID *id)
static void light_blend_read_lib(BlendLibReader *reader, ID *id)
{
Light *la = (Light *)id;
BLO_read_id_address(reader, la->id.lib, &la->ipo); // XXX deprecated - old animation system
BLO_read_id_address(reader, id, &la->ipo); // XXX deprecated - old animation system
}
static void light_blend_read_expand(BlendExpander *expander, ID *id)

View File

@ -66,7 +66,7 @@ static void lightprobe_blend_read_data(BlendDataReader *reader, ID *id)
static void lightprobe_blend_read_lib(BlendLibReader *reader, ID *id)
{
LightProbe *prb = (LightProbe *)id;
BLO_read_id_address(reader, prb->id.lib, &prb->visibility_grp);
BLO_read_id_address(reader, &prb->id, &prb->visibility_grp);
}
IDTypeInfo IDType_ID_LP = {

View File

@ -682,7 +682,7 @@ static void linestyle_blend_read_lib(BlendLibReader *reader, ID *id)
case LS_MODIFIER_DISTANCE_FROM_OBJECT: {
LineStyleColorModifier_DistanceFromObject *cm =
(LineStyleColorModifier_DistanceFromObject *)m;
BLO_read_id_address(reader, linestyle->id.lib, &cm->target);
BLO_read_id_address(reader, id, &cm->target);
break;
}
}
@ -692,7 +692,7 @@ static void linestyle_blend_read_lib(BlendLibReader *reader, ID *id)
case LS_MODIFIER_DISTANCE_FROM_OBJECT: {
LineStyleAlphaModifier_DistanceFromObject *am =
(LineStyleAlphaModifier_DistanceFromObject *)m;
BLO_read_id_address(reader, linestyle->id.lib, &am->target);
BLO_read_id_address(reader, id, &am->target);
break;
}
}
@ -702,7 +702,7 @@ static void linestyle_blend_read_lib(BlendLibReader *reader, ID *id)
case LS_MODIFIER_DISTANCE_FROM_OBJECT: {
LineStyleThicknessModifier_DistanceFromObject *tm =
(LineStyleThicknessModifier_DistanceFromObject *)m;
BLO_read_id_address(reader, linestyle->id.lib, &tm->target);
BLO_read_id_address(reader, id, &tm->target);
break;
}
}
@ -710,8 +710,8 @@ static void linestyle_blend_read_lib(BlendLibReader *reader, ID *id)
for (int a = 0; a < MAX_MTEX; a++) {
MTex *mtex = linestyle->mtex[a];
if (mtex) {
BLO_read_id_address(reader, linestyle->id.lib, &mtex->tex);
BLO_read_id_address(reader, linestyle->id.lib, &mtex->object);
BLO_read_id_address(reader, id, &mtex->tex);
BLO_read_id_address(reader, id, &mtex->object);
}
}
}

View File

@ -230,22 +230,22 @@ void BKE_main_unlock(struct Main *bmain)
static int main_relations_create_idlink_cb(LibraryIDLinkCallbackData *cb_data)
{
MainIDRelations *bmain_relations = cb_data->user_data;
ID *id_self = cb_data->id_self;
ID *self_id = cb_data->self_id;
ID **id_pointer = cb_data->id_pointer;
const int cb_flag = cb_data->cb_flag;
if (*id_pointer) {
MainIDRelationsEntry **entry_p;
/* Add `id_pointer` as child of `id_self`. */
/* Add `id_pointer` as child of `self_id`. */
{
if (!BLI_ghash_ensure_p(
bmain_relations->relations_from_pointers, id_self, (void ***)&entry_p)) {
bmain_relations->relations_from_pointers, self_id, (void ***)&entry_p)) {
*entry_p = MEM_callocN(sizeof(**entry_p), __func__);
(*entry_p)->session_uuid = id_self->session_uuid;
(*entry_p)->session_uuid = self_id->session_uuid;
}
else {
BLI_assert((*entry_p)->session_uuid == id_self->session_uuid);
BLI_assert((*entry_p)->session_uuid == self_id->session_uuid);
}
MainIDRelationsEntryItem *to_id_entry = BLI_mempool_alloc(bmain_relations->entry_items_pool);
to_id_entry->next = (*entry_p)->to_ids;
@ -256,7 +256,7 @@ static int main_relations_create_idlink_cb(LibraryIDLinkCallbackData *cb_data)
(*entry_p)->to_ids = to_id_entry;
}
/* Add `id_self` as parent of `id_pointer`. */
/* Add `self_id` as parent of `id_pointer`. */
if (*id_pointer != NULL) {
if (!BLI_ghash_ensure_p(
bmain_relations->relations_from_pointers, *id_pointer, (void ***)&entry_p)) {
@ -269,8 +269,8 @@ static int main_relations_create_idlink_cb(LibraryIDLinkCallbackData *cb_data)
MainIDRelationsEntryItem *from_id_entry = BLI_mempool_alloc(
bmain_relations->entry_items_pool);
from_id_entry->next = (*entry_p)->from_ids;
from_id_entry->id_pointer.from = id_self;
from_id_entry->session_uuid = id_self->session_uuid;
from_id_entry->id_pointer.from = self_id;
from_id_entry->session_uuid = self_id->session_uuid;
from_id_entry->usage_flag = cb_flag;
(*entry_p)->from_ids = from_id_entry;
}
@ -523,7 +523,7 @@ BlendThumbnail *BKE_main_thumbnail_from_imbuf(Main *bmain, ImBuf *img)
IMB_rect_from_float(img); /* Just in case... */
data->width = img->x;
data->height = img->y;
memcpy(data->rect, img->rect, data_size - sizeof(*data));
memcpy(data->rect, img->byte_buffer.data, data_size - sizeof(*data));
}
if (bmain) {
@ -542,7 +542,7 @@ ImBuf *BKE_main_thumbnail_to_imbuf(Main *bmain, BlendThumbnail *data)
if (data) {
img = IMB_allocFromBuffer(
(const uint *)data->rect, NULL, (uint)data->width, (uint)data->height, 4);
(const uint8_t *)data->rect, NULL, (uint)data->width, (uint)data->height, 4);
}
return img;

View File

@ -183,7 +183,7 @@ static void mask_blend_read_data(BlendDataReader *reader, ID *id)
static void lib_link_mask_parent(BlendLibReader *reader, Mask *mask, MaskParent *parent)
{
BLO_read_id_address(reader, mask->id.lib, &parent->id);
BLO_read_id_address(reader, &mask->id, &parent->id);
}
static void mask_blend_read_lib(BlendLibReader *reader, ID *id)

View File

@ -223,16 +223,16 @@ static void material_blend_read_data(BlendDataReader *reader, ID *id)
static void material_blend_read_lib(BlendLibReader *reader, ID *id)
{
Material *ma = (Material *)id;
BLO_read_id_address(reader, ma->id.lib, &ma->ipo); /* XXX deprecated - old animation system */
BLO_read_id_address(reader, id, &ma->ipo); /* XXX deprecated - old animation system */
/* relink grease pencil settings */
if (ma->gp_style != nullptr) {
MaterialGPencilStyle *gp_style = ma->gp_style;
if (gp_style->sima != nullptr) {
BLO_read_id_address(reader, ma->id.lib, &gp_style->sima);
BLO_read_id_address(reader, id, &gp_style->sima);
}
if (gp_style->ima != nullptr) {
BLO_read_id_address(reader, ma->id.lib, &gp_style->ima);
BLO_read_id_address(reader, id, &gp_style->ima);
}
}
}

View File

@ -141,10 +141,10 @@ static void metaball_blend_read_lib(BlendLibReader *reader, ID *id)
{
MetaBall *mb = (MetaBall *)id;
for (int a = 0; a < mb->totcol; a++) {
BLO_read_id_address(reader, mb->id.lib, &mb->mat[a]);
BLO_read_id_address(reader, id, &mb->mat[a]);
}
BLO_read_id_address(reader, mb->id.lib, &mb->ipo); // XXX deprecated - old animation system
BLO_read_id_address(reader, id, &mb->ipo); // XXX deprecated - old animation system
}
static void metaball_blend_read_expand(BlendExpander *expander, ID *id)

View File

@ -257,68 +257,10 @@ static void mesh_blend_write(BlendWriter *writer, ID *id, const void *id_address
mesh->poly_offset_indices = nullptr;
}
else {
Set<std::string> names_to_skip;
if (!BLO_write_is_undo(writer)) {
/* When converting to the old mesh format, don't save redundant attributes. */
names_to_skip.add_multiple_new({"position",
".edge_verts",
".corner_vert",
".corner_edge",
".hide_vert",
".hide_edge",
".hide_poly",
".uv_seam",
".select_vert",
".select_edge",
".select_poly",
"material_index",
"sharp_face",
"sharp_edge"});
mesh->mvert = BKE_mesh_legacy_convert_positions_to_verts(
mesh, temp_arrays_for_legacy_format, vert_layers);
mesh->mloop = BKE_mesh_legacy_convert_corners_to_loops(
mesh, temp_arrays_for_legacy_format, loop_layers);
mesh->medge = BKE_mesh_legacy_convert_edges_to_medge(
mesh, temp_arrays_for_legacy_format, edge_layers);
MutableSpan<MPoly> legacy_polys = BKE_mesh_legacy_convert_offsets_to_polys(
mesh, temp_arrays_for_legacy_format, poly_layers);
BKE_mesh_legacy_convert_hide_layers_to_flags(mesh, legacy_polys);
BKE_mesh_legacy_convert_selection_layers_to_flags(mesh, legacy_polys);
BKE_mesh_legacy_convert_material_indices_to_mpoly(mesh, legacy_polys);
BKE_mesh_legacy_sharp_faces_to_flags(mesh, legacy_polys);
BKE_mesh_legacy_bevel_weight_from_layers(mesh);
BKE_mesh_legacy_edge_crease_from_layers(mesh);
BKE_mesh_legacy_sharp_edges_to_flags(mesh);
BKE_mesh_legacy_uv_seam_to_flags(mesh);
BKE_mesh_legacy_attribute_strings_to_flags(mesh);
mesh->active_color_attribute = nullptr;
mesh->default_color_attribute = nullptr;
BKE_mesh_legacy_convert_loose_edges_to_flag(mesh);
mesh->poly_offset_indices = nullptr;
/* Set deprecated mesh data pointers for forward compatibility. */
mesh->mpoly = legacy_polys.data();
mesh->dvert = const_cast<MDeformVert *>(mesh->deform_verts().data());
}
CustomData_blend_write_prepare(mesh->vdata, vert_layers, names_to_skip);
CustomData_blend_write_prepare(mesh->edata, edge_layers, names_to_skip);
CustomData_blend_write_prepare(mesh->ldata, loop_layers, names_to_skip);
CustomData_blend_write_prepare(mesh->pdata, poly_layers, names_to_skip);
if (!BLO_write_is_undo(writer)) {
/* #CustomData expects the layers to be sorted in increasing order based on type. */
std::stable_sort(
poly_layers.begin(),
poly_layers.end(),
[](const CustomDataLayer &a, const CustomDataLayer &b) { return a.type < b.type; });
BKE_mesh_legacy_convert_uvs_to_struct(mesh, temp_arrays_for_legacy_format, loop_layers);
BKE_mesh_legacy_face_set_from_generic(poly_layers);
}
CustomData_blend_write_prepare(mesh->vdata, vert_layers, {});
CustomData_blend_write_prepare(mesh->edata, edge_layers, {});
CustomData_blend_write_prepare(mesh->ldata, loop_layers, {});
CustomData_blend_write_prepare(mesh->pdata, poly_layers, {});
}
mesh->runtime = nullptr;
@ -420,16 +362,16 @@ static void mesh_blend_read_lib(BlendLibReader *reader, ID *id)
/* this check added for python created meshes */
if (me->mat) {
for (int i = 0; i < me->totcol; i++) {
BLO_read_id_address(reader, me->id.lib, &me->mat[i]);
BLO_read_id_address(reader, id, &me->mat[i]);
}
}
else {
me->totcol = 0;
}
BLO_read_id_address(reader, me->id.lib, &me->ipo); // XXX: deprecated: old anim sys
BLO_read_id_address(reader, me->id.lib, &me->key);
BLO_read_id_address(reader, me->id.lib, &me->texcomesh);
BLO_read_id_address(reader, id, &me->ipo); // XXX: deprecated: old anim sys
BLO_read_id_address(reader, id, &me->key);
BLO_read_id_address(reader, id, &me->texcomesh);
}
static void mesh_read_expand(BlendExpander *expander, ID *id)

View File

@ -291,8 +291,9 @@ static IMesh meshes_to_imesh(Span<const Mesh *> meshes,
r_info->mesh_poly_offset[mi] = f;
/* Get matrix that transforms a coordinate in meshes[mi]'s local space
* to the target space. */
const float4x4 objn_mat = (obmats[mi] == nullptr) ? float4x4::identity() :
clean_transform(*obmats[mi]);
const float4x4 objn_mat = (obmats.is_empty() || obmats[mi] == nullptr) ?
float4x4::identity() :
clean_transform(*obmats[mi]);
r_info->to_target_transform[mi] = inv_target_mat * objn_mat;
r_info->has_negative_transform[mi] = math::is_negative(objn_mat);
@ -311,7 +312,7 @@ static IMesh meshes_to_imesh(Span<const Mesh *> meshes,
* Skip the matrix multiplication for each point when there is no transform for a mesh,
* for example when the first mesh is already in the target space. (Note the logic
* directly above, which uses an identity matrix with a null input transform). */
if (obmats[mi] == nullptr) {
if (obmats.is_empty() || obmats[mi] == nullptr) {
threading::parallel_for(vert_positions.index_range(), 2048, [&](IndexRange range) {
for (int i : range) {
float3 co = vert_positions[i];
@ -798,7 +799,7 @@ Mesh *direct_mesh_boolean(Span<const Mesh *> meshes,
Vector<int> *r_intersecting_edges)
{
#ifdef WITH_GMP
BLI_assert(meshes.size() == transforms.size());
BLI_assert(transforms.is_empty() || meshes.size() == transforms.size());
BLI_assert(material_remaps.size() == 0 || material_remaps.size() == meshes.size());
if (meshes.size() <= 0) {
return nullptr;

View File

@ -231,7 +231,7 @@ void BKE_mesh_calc_edges(Mesh *mesh, bool keep_existing_edges, const bool select
}
/* Create new edges. */
if (!CustomData_get_layer_named(&mesh->ldata, CD_PROP_INT32, ".corner_edge")) {
if (!CustomData_has_layer_named(&mesh->ldata, CD_PROP_INT32, ".corner_edge")) {
CustomData_add_layer_named(
&mesh->ldata, CD_PROP_INT32, CD_CONSTRUCT, mesh->totloop, ".corner_edge");
}

View File

@ -1236,25 +1236,6 @@ void BKE_mesh_tessface_ensure(struct Mesh *mesh)
/** \name Sharp Edge Conversion
* \{ */
void BKE_mesh_legacy_sharp_faces_to_flags(Mesh *mesh, blender::MutableSpan<MPoly> legacy_polys)
{
using namespace blender;
if (const bool *sharp_faces = static_cast<const bool *>(
CustomData_get_layer_named(&mesh->pdata, CD_PROP_BOOL, "sharp_face")))
{
threading::parallel_for(legacy_polys.index_range(), 4096, [&](const IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(legacy_polys[i].flag_legacy, !sharp_faces[i], ME_SMOOTH);
}
});
}
else {
for (const int i : legacy_polys.index_range()) {
legacy_polys[i].flag_legacy |= ME_SMOOTH;
}
}
}
void BKE_mesh_legacy_sharp_faces_from_flags(Mesh *mesh)
{
using namespace blender;
@ -1289,28 +1270,6 @@ void BKE_mesh_legacy_sharp_faces_from_flags(Mesh *mesh)
/** \name Face Set Conversion
* \{ */
void BKE_mesh_legacy_face_set_from_generic(blender::MutableSpan<CustomDataLayer> poly_layers)
{
using namespace blender;
bool changed = false;
for (CustomDataLayer &layer : poly_layers) {
if (StringRef(layer.name) == ".sculpt_face_set") {
layer.type = CD_SCULPT_FACE_SETS;
layer.name[0] = '\0';
changed = true;
break;
}
}
if (!changed) {
return;
}
/* #CustomData expects the layers to be sorted in increasing order based on type. */
std::stable_sort(
poly_layers.begin(),
poly_layers.end(),
[](const CustomDataLayer &a, const CustomDataLayer &b) { return a.type < b.type; });
}
void BKE_mesh_legacy_face_set_to_generic(Mesh *mesh)
{
using namespace blender;
@ -1349,41 +1308,6 @@ void BKE_mesh_legacy_face_set_to_generic(Mesh *mesh)
/** \name Bevel Weight Conversion
* \{ */
void BKE_mesh_legacy_bevel_weight_from_layers(Mesh *mesh)
{
using namespace blender;
MutableSpan<MVert> verts(mesh->mvert, mesh->totvert);
if (const float *weights = static_cast<const float *>(
CustomData_get_layer(&mesh->vdata, CD_BWEIGHT)))
{
mesh->cd_flag |= ME_CDFLAG_VERT_BWEIGHT;
for (const int i : verts.index_range()) {
verts[i].bweight_legacy = std::clamp(weights[i], 0.0f, 1.0f) * 255.0f;
}
}
else {
mesh->cd_flag &= ~ME_CDFLAG_VERT_BWEIGHT;
for (const int i : verts.index_range()) {
verts[i].bweight_legacy = 0;
}
}
MutableSpan<MEdge> edges(mesh->medge, mesh->totedge);
if (const float *weights = static_cast<const float *>(
CustomData_get_layer(&mesh->edata, CD_BWEIGHT)))
{
mesh->cd_flag |= ME_CDFLAG_EDGE_BWEIGHT;
for (const int i : edges.index_range()) {
edges[i].bweight_legacy = std::clamp(weights[i], 0.0f, 1.0f) * 255.0f;
}
}
else {
mesh->cd_flag &= ~ME_CDFLAG_EDGE_BWEIGHT;
for (const int i : edges.index_range()) {
edges[i].bweight_legacy = 0;
}
}
}
void BKE_mesh_legacy_bevel_weight_to_layers(Mesh *mesh)
{
using namespace blender;
@ -1416,26 +1340,6 @@ void BKE_mesh_legacy_bevel_weight_to_layers(Mesh *mesh)
/** \name Edge Crease Conversion
* \{ */
void BKE_mesh_legacy_edge_crease_from_layers(Mesh *mesh)
{
using namespace blender;
MutableSpan<MEdge> edges(mesh->medge, mesh->totedge);
if (const float *creases = static_cast<const float *>(
CustomData_get_layer(&mesh->edata, CD_CREASE)))
{
mesh->cd_flag |= ME_CDFLAG_EDGE_CREASE;
for (const int i : edges.index_range()) {
edges[i].crease_legacy = std::clamp(creases[i], 0.0f, 1.0f) * 255.0f;
}
}
else {
mesh->cd_flag &= ~ME_CDFLAG_EDGE_CREASE;
for (const int i : edges.index_range()) {
edges[i].crease_legacy = 0;
}
}
}
void BKE_mesh_legacy_edge_crease_to_layers(Mesh *mesh)
{
using namespace blender;
@ -1458,26 +1362,6 @@ void BKE_mesh_legacy_edge_crease_to_layers(Mesh *mesh)
/** \name Sharp Edge Conversion
* \{ */
void BKE_mesh_legacy_sharp_edges_to_flags(Mesh *mesh)
{
using namespace blender;
MutableSpan<MEdge> edges(mesh->medge, mesh->totedge);
if (const bool *sharp_edges = static_cast<const bool *>(
CustomData_get_layer_named(&mesh->edata, CD_PROP_BOOL, "sharp_edge")))
{
threading::parallel_for(edges.index_range(), 4096, [&](const IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(edges[i].flag_legacy, sharp_edges[i], ME_SHARP);
}
});
}
else {
for (const int i : edges.index_range()) {
edges[i].flag_legacy &= ~ME_SHARP;
}
}
}
void BKE_mesh_legacy_sharp_edges_from_flags(Mesh *mesh)
{
using namespace blender;
@ -1511,26 +1395,6 @@ void BKE_mesh_legacy_sharp_edges_from_flags(Mesh *mesh)
/** \name UV Seam Conversion
* \{ */
void BKE_mesh_legacy_uv_seam_to_flags(Mesh *mesh)
{
using namespace blender;
MutableSpan<MEdge> edges(mesh->medge, mesh->totedge);
if (const bool *uv_seams = static_cast<const bool *>(
CustomData_get_layer_named(&mesh->edata, CD_PROP_BOOL, ".uv_seam")))
{
threading::parallel_for(edges.index_range(), 4096, [&](const IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(edges[i].flag_legacy, uv_seams[i], ME_SEAM);
}
});
}
else {
for (const int i : edges.index_range()) {
edges[i].flag_legacy &= ~ME_SEAM;
}
}
}
void BKE_mesh_legacy_uv_seam_from_flags(Mesh *mesh)
{
using namespace blender;
@ -1564,40 +1428,6 @@ void BKE_mesh_legacy_uv_seam_from_flags(Mesh *mesh)
/** \name Hide Attribute and Legacy Flag Conversion
* \{ */
void BKE_mesh_legacy_convert_hide_layers_to_flags(Mesh *mesh,
blender::MutableSpan<MPoly> legacy_polys)
{
using namespace blender;
using namespace blender::bke;
const AttributeAccessor attributes = mesh->attributes();
MutableSpan<MVert> verts(mesh->mvert, mesh->totvert);
const VArray<bool> hide_vert = *attributes.lookup_or_default<bool>(
".hide_vert", ATTR_DOMAIN_POINT, false);
threading::parallel_for(verts.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(verts[i].flag_legacy, hide_vert[i], ME_HIDE);
}
});
MutableSpan<MEdge> edges(mesh->medge, mesh->totedge);
const VArray<bool> hide_edge = *attributes.lookup_or_default<bool>(
".hide_edge", ATTR_DOMAIN_EDGE, false);
threading::parallel_for(edges.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(edges[i].flag_legacy, hide_edge[i], ME_HIDE);
}
});
const VArray<bool> hide_poly = *attributes.lookup_or_default<bool>(
".hide_poly", ATTR_DOMAIN_FACE, false);
threading::parallel_for(legacy_polys.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(legacy_polys[i].flag_legacy, hide_poly[i], ME_HIDE);
}
});
}
void BKE_mesh_legacy_convert_flags_to_hide_layers(Mesh *mesh)
{
using namespace blender;
@ -1663,21 +1493,6 @@ void BKE_mesh_legacy_convert_flags_to_hide_layers(Mesh *mesh)
/** \name Material Index Conversion
* \{ */
void BKE_mesh_legacy_convert_material_indices_to_mpoly(Mesh *mesh,
blender::MutableSpan<MPoly> legacy_polys)
{
using namespace blender;
using namespace blender::bke;
const AttributeAccessor attributes = mesh->attributes();
const VArray<int> material_indices = *attributes.lookup_or_default<int>(
"material_index", ATTR_DOMAIN_FACE, 0);
threading::parallel_for(legacy_polys.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
legacy_polys[i].mat_nr_legacy = material_indices[i];
}
});
}
void BKE_mesh_legacy_convert_mpoly_to_material_indices(Mesh *mesh)
{
using namespace blender;
@ -1708,87 +1523,6 @@ void BKE_mesh_legacy_convert_mpoly_to_material_indices(Mesh *mesh)
/** \name Generic UV Map Conversion
* \{ */
static const bool *layers_find_bool_named(const Span<CustomDataLayer> layers,
const blender::StringRef name)
{
for (const CustomDataLayer &layer : layers) {
if (layer.type == CD_PROP_BOOL) {
if (layer.name == name) {
return static_cast<const bool *>(layer.data);
}
}
}
return nullptr;
}
void BKE_mesh_legacy_convert_uvs_to_struct(
Mesh *mesh,
blender::ResourceScope &temp_mloopuv_for_convert,
blender::Vector<CustomDataLayer, 16> &loop_layers_to_write)
{
using namespace blender;
using namespace blender::bke;
const int loops_num = mesh->totloop;
Vector<CustomDataLayer, 16> new_layer_to_write;
/* Don't write the boolean UV map sublayers which will be written in the legacy #MLoopUV type. */
Set<std::string> uv_sublayers_to_skip;
char vert_name[MAX_CUSTOMDATA_LAYER_NAME];
char edge_name[MAX_CUSTOMDATA_LAYER_NAME];
char pin_name[MAX_CUSTOMDATA_LAYER_NAME];
for (const CustomDataLayer &layer : loop_layers_to_write) {
if (layer.type == CD_PROP_FLOAT2) {
uv_sublayers_to_skip.add_multiple_new(
{BKE_uv_map_vert_select_name_get(layer.name, vert_name),
BKE_uv_map_edge_select_name_get(layer.name, edge_name),
BKE_uv_map_pin_name_get(layer.name, pin_name)});
}
}
for (const CustomDataLayer &layer : loop_layers_to_write) {
if (layer.name[0] && uv_sublayers_to_skip.contains_as(layer.name)) {
continue;
}
if (layer.type != CD_PROP_FLOAT2) {
new_layer_to_write.append(layer);
continue;
}
const Span<float2> coords{static_cast<const float2 *>(layer.data), loops_num};
CustomDataLayer mloopuv_layer = layer;
mloopuv_layer.type = CD_MLOOPUV;
MutableSpan<MLoopUV> mloopuv = temp_mloopuv_for_convert.construct<Array<MLoopUV>>(loops_num);
mloopuv_layer.data = mloopuv.data();
char buffer[MAX_CUSTOMDATA_LAYER_NAME];
const bool *vert_selection = layers_find_bool_named(
loop_layers_to_write, BKE_uv_map_vert_select_name_get(layer.name, buffer));
const bool *edge_selection = layers_find_bool_named(
loop_layers_to_write, BKE_uv_map_edge_select_name_get(layer.name, buffer));
const bool *pin = layers_find_bool_named(loop_layers_to_write,
BKE_uv_map_pin_name_get(layer.name, buffer));
threading::parallel_for(mloopuv.index_range(), 2048, [&](IndexRange range) {
for (const int i : range) {
copy_v2_v2(mloopuv[i].uv, coords[i]);
SET_FLAG_FROM_TEST(mloopuv[i].flag, vert_selection && vert_selection[i], MLOOPUV_VERTSEL);
SET_FLAG_FROM_TEST(mloopuv[i].flag, edge_selection && edge_selection[i], MLOOPUV_EDGESEL);
SET_FLAG_FROM_TEST(mloopuv[i].flag, pin && pin[i], MLOOPUV_PINNED);
}
});
new_layer_to_write.append(mloopuv_layer);
}
/* #CustomData expects the layers to be sorted in increasing order based on type. */
std::stable_sort(
new_layer_to_write.begin(),
new_layer_to_write.end(),
[](const CustomDataLayer &a, const CustomDataLayer &b) { return a.type < b.type; });
loop_layers_to_write = new_layer_to_write;
mesh->ldata.totlayer = new_layer_to_write.size();
mesh->ldata.maxlayer = mesh->ldata.totlayer;
}
void BKE_mesh_legacy_convert_uvs_to_generic(Mesh *mesh)
{
using namespace blender;
@ -1918,40 +1652,6 @@ void BKE_mesh_legacy_convert_uvs_to_generic(Mesh *mesh)
/** \name Selection Attribute and Legacy Flag Conversion
* \{ */
void BKE_mesh_legacy_convert_selection_layers_to_flags(Mesh *mesh,
blender::MutableSpan<MPoly> legacy_polys)
{
using namespace blender;
using namespace blender::bke;
const AttributeAccessor attributes = mesh->attributes();
MutableSpan<MVert> verts(mesh->mvert, mesh->totvert);
const VArray<bool> select_vert = *attributes.lookup_or_default<bool>(
".select_vert", ATTR_DOMAIN_POINT, false);
threading::parallel_for(verts.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(verts[i].flag_legacy, select_vert[i], SELECT);
}
});
MutableSpan<MEdge> edges(mesh->medge, mesh->totedge);
const VArray<bool> select_edge = *attributes.lookup_or_default<bool>(
".select_edge", ATTR_DOMAIN_EDGE, false);
threading::parallel_for(edges.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(edges[i].flag_legacy, select_edge[i], SELECT);
}
});
const VArray<bool> select_poly = *attributes.lookup_or_default<bool>(
".select_poly", ATTR_DOMAIN_FACE, false);
threading::parallel_for(legacy_polys.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
SET_FLAG_FROM_TEST(legacy_polys[i].flag_legacy, select_poly[i], ME_FACE_SEL);
}
});
}
void BKE_mesh_legacy_convert_flags_to_selection_layers(Mesh *mesh)
{
using namespace blender;
@ -2013,67 +1713,16 @@ void BKE_mesh_legacy_convert_flags_to_selection_layers(Mesh *mesh)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Loose Edges
* \{ */
void BKE_mesh_legacy_convert_loose_edges_to_flag(Mesh *mesh)
{
using namespace blender;
using namespace blender::bke;
const LooseEdgeCache &loose_edges = mesh->loose_edges();
MutableSpan<MEdge> edges(mesh->medge, mesh->totedge);
threading::parallel_for(edges.index_range(), 4096, [&](const IndexRange range) {
if (loose_edges.count == 0) {
for (const int64_t i : range) {
edges[i].flag_legacy &= ~ME_LOOSEEDGE;
}
}
else {
for (const int64_t i : range) {
SET_FLAG_FROM_TEST(edges[i].flag_legacy, loose_edges.is_loose_bits[i], ME_LOOSEEDGE);
}
}
});
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Vertex and Position Conversion
* \{ */
MVert *BKE_mesh_legacy_convert_positions_to_verts(
Mesh *mesh,
blender::ResourceScope &temp_arrays_for_convert,
blender::Vector<CustomDataLayer, 16> &vert_layers_to_write)
{
using namespace blender;
const Span<float3> positions = mesh->vert_positions();
CustomDataLayer mvert_layer{};
mvert_layer.type = CD_MVERT;
MutableSpan<MVert> verts = temp_arrays_for_convert.construct<Array<MVert>>(mesh->totvert);
mvert_layer.data = verts.data();
threading::parallel_for(verts.index_range(), 2048, [&](IndexRange range) {
for (const int i : range) {
copy_v3_v3(verts[i].co_legacy, positions[i]);
}
});
vert_layers_to_write.append(mvert_layer);
return verts.data();
}
void BKE_mesh_legacy_convert_verts_to_positions(Mesh *mesh)
{
using namespace blender;
using namespace blender::bke;
const MVert *mvert = static_cast<const MVert *>(CustomData_get_layer(&mesh->vdata, CD_MVERT));
if (!mvert || CustomData_get_layer_named(&mesh->vdata, CD_PROP_FLOAT3, "position")) {
if (!mvert || CustomData_has_layer_named(&mesh->vdata, CD_PROP_FLOAT3, "position")) {
return;
}
@ -2098,37 +1747,12 @@ void BKE_mesh_legacy_convert_verts_to_positions(Mesh *mesh)
/** \name MEdge and int2 conversion
* \{ */
MEdge *BKE_mesh_legacy_convert_edges_to_medge(
Mesh *mesh,
blender::ResourceScope &temp_arrays_for_convert,
blender::Vector<CustomDataLayer, 16> &edge_layers_to_write)
{
using namespace blender;
const Span<int2> edges = mesh->edges();
CustomDataLayer medge_layer{};
medge_layer.type = CD_MEDGE;
MutableSpan<MEdge> legacy_edges = temp_arrays_for_convert.construct<Array<MEdge>>(mesh->totedge);
medge_layer.data = legacy_edges.data();
threading::parallel_for(edges.index_range(), 2048, [&](IndexRange range) {
for (const int i : range) {
legacy_edges[i] = {};
legacy_edges[i].v1 = edges[i][0];
legacy_edges[i].v2 = edges[i][1];
}
});
edge_layers_to_write.append(medge_layer);
return legacy_edges.data();
}
void BKE_mesh_legacy_convert_edges_to_generic(Mesh *mesh)
{
using namespace blender;
using namespace blender::bke;
const MEdge *medge = static_cast<const MEdge *>(CustomData_get_layer(&mesh->edata, CD_MEDGE));
if (!medge || CustomData_get_layer_named(&mesh->edata, CD_PROP_INT32_2D, ".edge_verts")) {
if (!medge || CustomData_has_layer_named(&mesh->edata, CD_PROP_INT32_2D, ".edge_verts")) {
return;
}
@ -2217,95 +1841,17 @@ void BKE_mesh_legacy_attribute_flags_to_strings(Mesh *mesh)
default_from_indices(mesh->ldata);
}
void BKE_mesh_legacy_attribute_strings_to_flags(Mesh *mesh)
{
using namespace blender;
CustomData *vdata = &mesh->vdata;
CustomData *ldata = &mesh->ldata;
CustomData_clear_layer_flag(
vdata, CD_PROP_BYTE_COLOR, CD_FLAG_COLOR_ACTIVE | CD_FLAG_COLOR_RENDER);
CustomData_clear_layer_flag(
ldata, CD_PROP_BYTE_COLOR, CD_FLAG_COLOR_ACTIVE | CD_FLAG_COLOR_RENDER);
CustomData_clear_layer_flag(ldata, CD_PROP_COLOR, CD_FLAG_COLOR_ACTIVE | CD_FLAG_COLOR_RENDER);
CustomData_clear_layer_flag(vdata, CD_PROP_COLOR, CD_FLAG_COLOR_ACTIVE | CD_FLAG_COLOR_RENDER);
if (const char *name = mesh->active_color_attribute) {
int i;
if ((i = CustomData_get_named_layer_index(vdata, CD_PROP_BYTE_COLOR, name)) != -1) {
CustomData_set_layer_active_index(vdata, CD_PROP_BYTE_COLOR, i);
vdata->layers[i].flag |= CD_FLAG_COLOR_ACTIVE;
}
else if ((i = CustomData_get_named_layer_index(vdata, CD_PROP_COLOR, name)) != -1) {
CustomData_set_layer_active_index(vdata, CD_PROP_COLOR, i);
vdata->layers[i].flag |= CD_FLAG_COLOR_ACTIVE;
}
else if ((i = CustomData_get_named_layer_index(ldata, CD_PROP_BYTE_COLOR, name)) != -1) {
CustomData_set_layer_active_index(ldata, CD_PROP_BYTE_COLOR, i);
ldata->layers[i].flag |= CD_FLAG_COLOR_ACTIVE;
}
else if ((i = CustomData_get_named_layer_index(ldata, CD_PROP_COLOR, name)) != -1) {
CustomData_set_layer_active_index(ldata, CD_PROP_COLOR, i);
ldata->layers[i].flag |= CD_FLAG_COLOR_ACTIVE;
}
}
if (const char *name = mesh->default_color_attribute) {
int i;
if ((i = CustomData_get_named_layer_index(vdata, CD_PROP_BYTE_COLOR, name)) != -1) {
CustomData_set_layer_render_index(vdata, CD_PROP_BYTE_COLOR, i);
vdata->layers[i].flag |= CD_FLAG_COLOR_RENDER;
}
else if ((i = CustomData_get_named_layer_index(vdata, CD_PROP_COLOR, name)) != -1) {
CustomData_set_layer_render_index(vdata, CD_PROP_COLOR, i);
vdata->layers[i].flag |= CD_FLAG_COLOR_RENDER;
}
else if ((i = CustomData_get_named_layer_index(ldata, CD_PROP_BYTE_COLOR, name)) != -1) {
CustomData_set_layer_render_index(ldata, CD_PROP_BYTE_COLOR, i);
ldata->layers[i].flag |= CD_FLAG_COLOR_RENDER;
}
else if ((i = CustomData_get_named_layer_index(ldata, CD_PROP_COLOR, name)) != -1) {
CustomData_set_layer_render_index(ldata, CD_PROP_COLOR, i);
ldata->layers[i].flag |= CD_FLAG_COLOR_RENDER;
}
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Face Corner Conversion
* \{ */
MLoop *BKE_mesh_legacy_convert_corners_to_loops(
Mesh *mesh,
blender::ResourceScope &temp_arrays_for_convert,
blender::Vector<CustomDataLayer, 16> &loop_layers_to_write)
{
using namespace blender;
const Span<int> corner_verts = mesh->corner_verts();
const Span<int> corner_edges = mesh->corner_edges();
CustomDataLayer mloop_layer{};
mloop_layer.type = CD_MLOOP;
MutableSpan<MLoop> loops = temp_arrays_for_convert.construct<Array<MLoop>>(mesh->totloop);
mloop_layer.data = loops.data();
threading::parallel_for(loops.index_range(), 2048, [&](IndexRange range) {
for (const int i : range) {
loops[i].v = corner_verts[i];
loops[i].e = corner_edges[i];
}
});
loop_layers_to_write.append(mloop_layer);
return loops.data();
}
void BKE_mesh_legacy_convert_loops_to_corners(Mesh *mesh)
{
using namespace blender;
if (CustomData_get_layer_named(&mesh->ldata, CD_PROP_INT32, ".corner_vert") &&
CustomData_get_layer_named(&mesh->ldata, CD_PROP_INT32, ".corner_edge"))
if (CustomData_has_layer_named(&mesh->ldata, CD_PROP_INT32, ".corner_vert") &&
CustomData_has_layer_named(&mesh->ldata, CD_PROP_INT32, ".corner_edge"))
{
return;
}
@ -2335,30 +1881,6 @@ void BKE_mesh_legacy_convert_loops_to_corners(Mesh *mesh)
/** \name Poly Offset Conversion
* \{ */
blender::MutableSpan<MPoly> BKE_mesh_legacy_convert_offsets_to_polys(
const Mesh *mesh,
blender::ResourceScope &temp_arrays_for_convert,
blender::Vector<CustomDataLayer, 16> &poly_layers_to_write)
{
using namespace blender;
const OffsetIndices polys = mesh->polys();
MutableSpan<MPoly> polys_legacy = temp_arrays_for_convert.construct<Array<MPoly>>(mesh->totpoly);
threading::parallel_for(polys_legacy.index_range(), 2048, [&](IndexRange range) {
for (const int i : range) {
polys_legacy[i].loopstart = polys[i].start();
polys_legacy[i].totloop = polys[i].size();
}
});
CustomDataLayer layer{};
layer.type = CD_MPOLY;
layer.data = polys_legacy.data();
poly_layers_to_write.append(layer);
return polys_legacy;
}
static bool poly_loops_orders_match(const Span<MPoly> polys)
{
for (const int i : polys.index_range().drop_back(1)) {

View File

@ -32,6 +32,24 @@ BLI_NOINLINE static void sample_point_attribute(const Span<int> corner_verts,
}
}
void sample_point_normals(const Span<int> corner_verts,
const Span<MLoopTri> looptris,
const Span<int> looptri_indices,
const Span<float3> bary_coords,
const Span<float3> src,
const IndexMask mask,
const MutableSpan<float3> dst)
{
for (const int i : mask) {
const MLoopTri &tri = looptris[looptri_indices[i]];
const float3 value = attribute_math::mix3(bary_coords[i],
src[corner_verts[tri.tri[0]]],
src[corner_verts[tri.tri[1]]],
src[corner_verts[tri.tri[2]]]);
dst[i] = math::normalize(value);
}
}
void sample_point_attribute(const Span<int> corner_verts,
const Span<MLoopTri> looptris,
const Span<int> looptri_indices,

View File

@ -71,10 +71,6 @@
#include "tracking_private.h"
/* Convert camera object to legacy format where the camera tracks are stored in the MovieTracking
* structure when saving .blend file. */
#define USE_LEGACY_CAMERA_OBJECT_FORMAT_ON_SAVE 1
static void free_buffers(MovieClip *clip);
static void movie_clip_init_data(ID *id)
@ -200,39 +196,6 @@ static void movieclip_blend_write(BlendWriter *writer, ID *id, const void *id_ad
MovieTracking *tracking = &clip->tracking;
#if USE_LEGACY_CAMERA_OBJECT_FORMAT_ON_SAVE
const bool is_undo = BLO_write_is_undo(writer);
/* When using legacy format for camera object assign the list of camera tracks to the
* MovieTracking object. Do it in-place as it simplifies the code a bit, and it is not
* supposed to cause threading issues as no other code is meant to access the legacy fields. */
if (!is_undo) {
MovieTrackingObject *active_tracking_object = BKE_tracking_object_get_active(tracking);
MovieTrackingObject *tracking_camera_object = BKE_tracking_object_get_camera(tracking);
BLI_assert(active_tracking_object != NULL);
BLI_assert(tracking_camera_object != NULL);
tracking->tracks_legacy = tracking_camera_object->tracks;
tracking->plane_tracks_legacy = tracking_camera_object->plane_tracks;
/* The active track in the tracking structure used to be shared across all tracking objects. */
tracking->act_track_legacy = active_tracking_object->active_track;
tracking->act_plane_track_legacy = active_tracking_object->active_plane_track;
tracking->reconstruction_legacy = tracking_camera_object->reconstruction;
}
#endif
/* Assign the pixel-space principal point for forward compatibility. */
/* TODO(sergey): Remove with the next major version update when forward compatibility is allowed
* to be broken. */
if (!is_undo && clip->lastsize[0] != 0 && clip->lastsize[1] != 0) {
tracking_principal_point_normalized_to_pixel(tracking->camera.principal_point,
clip->lastsize[0],
clip->lastsize[1],
tracking->camera.principal_legacy);
}
BLO_write_id_struct(writer, MovieClip, id_address, &clip->id);
BKE_id_blend_write(writer, &clip->id);
@ -241,39 +204,11 @@ static void movieclip_blend_write(BlendWriter *writer, ID *id, const void *id_ad
}
LISTBASE_FOREACH (MovieTrackingObject *, object, &tracking->objects) {
#if USE_LEGACY_CAMERA_OBJECT_FORMAT_ON_SAVE
/* When saving cameras object in the legacy format clear the list of tracks. This is because
* the tracking object code is generic and assumes object owns the tracks in the list. For the
* camera tracks that is not the case in the legacy format. */
if (!is_undo && (object->flag & TRACKING_OBJECT_CAMERA)) {
MovieTrackingObject legacy_object = *object;
BLI_listbase_clear(&legacy_object.tracks);
BLI_listbase_clear(&legacy_object.plane_tracks);
legacy_object.active_track = NULL;
legacy_object.active_plane_track = NULL;
memset(&legacy_object.reconstruction, 0, sizeof(legacy_object.reconstruction));
BLO_write_struct_at_address(writer, MovieTrackingObject, object, &legacy_object);
}
else
#endif
{
BLO_write_struct(writer, MovieTrackingObject, object);
}
BLO_write_struct(writer, MovieTrackingObject, object);
write_movieTracks(writer, &object->tracks);
write_moviePlaneTracks(writer, &object->plane_tracks);
write_movieReconstruction(writer, &object->reconstruction);
}
#if USE_LEGACY_CAMERA_OBJECT_FORMAT_ON_SAVE
if (!is_undo) {
BLI_listbase_clear(&tracking->tracks_legacy);
BLI_listbase_clear(&tracking->plane_tracks_legacy);
tracking->act_track_legacy = NULL;
tracking->act_plane_track_legacy = NULL;
memset(&tracking->reconstruction_legacy, 0, sizeof(tracking->reconstruction_legacy));
}
#endif
}
static void direct_link_movieReconstruction(BlendDataReader *reader,
@ -350,7 +285,7 @@ static void movieclip_blend_read_data(BlendDataReader *reader, ID *id)
static void lib_link_movieTracks(BlendLibReader *reader, MovieClip *clip, ListBase *tracksbase)
{
LISTBASE_FOREACH (MovieTrackingTrack *, track, tracksbase) {
BLO_read_id_address(reader, clip->id.lib, &track->gpd);
BLO_read_id_address(reader, &clip->id, &track->gpd);
}
}
@ -359,7 +294,7 @@ static void lib_link_moviePlaneTracks(BlendLibReader *reader,
ListBase *tracksbase)
{
LISTBASE_FOREACH (MovieTrackingPlaneTrack *, plane_track, tracksbase) {
BLO_read_id_address(reader, clip->id.lib, &plane_track->image);
BLO_read_id_address(reader, &clip->id, &plane_track->image);
}
}
@ -368,7 +303,7 @@ static void movieclip_blend_read_lib(BlendLibReader *reader, ID *id)
MovieClip *clip = (MovieClip *)id;
MovieTracking *tracking = &clip->tracking;
BLO_read_id_address(reader, clip->id.lib, &clip->gpd);
BLO_read_id_address(reader, id, &clip->gpd);
LISTBASE_FOREACH (MovieTrackingObject *, object, &tracking->objects) {
lib_link_movieTracks(reader, clip, &object->tracks);
@ -613,11 +548,9 @@ void BKE_movieclip_convert_multilayer_ibuf(struct ImBuf *ibuf)
movieclip_convert_multilayer_add_layer,
movieclip_convert_multilayer_add_pass);
if (ctx.combined_pass != NULL) {
BLI_assert(ibuf->rect_float == NULL);
ibuf->rect_float = ctx.combined_pass;
BLI_assert(ibuf->float_buffer.data == NULL);
IMB_assign_float_buffer(ibuf, ctx.combined_pass, IB_TAKE_OWNERSHIP);
ibuf->channels = ctx.num_combined_channels;
ibuf->flags |= IB_rectfloat;
ibuf->mall |= IB_rectfloat;
}
IMB_exr_close(ibuf->userdata);
ibuf->userdata = NULL;
@ -1797,7 +1730,7 @@ void BKE_movieclip_update_scopes(MovieClip *clip,
scopes->track_disabled = false;
if (ibuf && (ibuf->rect || ibuf->rect_float)) {
if (ibuf && (ibuf->byte_buffer.data || ibuf->float_buffer.data)) {
MovieTrackingMarker undist_marker = *marker;
if (user->render_flag & MCLIP_PROXY_RENDER_UNDISTORT) {
@ -2114,7 +2047,7 @@ GPUTexture *BKE_movieclip_get_gpu_texture(MovieClip *clip, MovieClipUser *cuser)
/* This only means RGBA16F instead of RGBA32F. */
const bool high_bitdepth = false;
const bool store_premultiplied = ibuf->rect_float ? false : true;
const bool store_premultiplied = ibuf->float_buffer.data ? false : true;
*tex = IMB_create_gpu_texture(clip->id.name + 2, ibuf, high_bitdepth, store_premultiplied);
/* Do not generate mips for movieclips... too slow. */

View File

@ -822,18 +822,15 @@ static void foreach_edge(const SubdivForeachContext *foreach_context,
return;
}
/* Ignore all inner face edges as they have sharpness of zero when using Catmull-Clark mode. In
* simple mode, all edges have maximum sharpness, so they can't be skipped. */
if (coarse_edge_index == ORIGINDEX_NONE &&
reshape_smooth_context->smoothing_type != MULTIRES_SUBDIVIDE_SIMPLE)
{
/* Ignore all inner face edges as they have sharpness of zero. */
if (coarse_edge_index == ORIGINDEX_NONE) {
return;
}
/* Ignore all loose edges as well, as they are not communicated to the OpenSubdiv. */
if (!reshape_smooth_context->loose_base_edges.is_empty() &&
reshape_smooth_context->loose_base_edges[coarse_edge_index])
{
return;
if (!reshape_smooth_context->loose_base_edges.is_empty()) {
if (reshape_smooth_context->loose_base_edges[coarse_edge_index]) {
return;
}
}
/* Edges without crease are to be ignored as well. */
const float crease = get_effective_crease(reshape_smooth_context, coarse_edge_index);

View File

@ -2387,7 +2387,7 @@ static void blend_lib_read_nla_strips(BlendLibReader *reader, ID *id, ListBase *
BKE_fcurve_blend_read_lib(reader, id, &strip->fcurves);
/* reassign the counted-reference to action */
BLO_read_id_address(reader, id->lib, &strip->act);
BLO_read_id_address(reader, id, &strip->act);
}
}

View File

@ -879,9 +879,9 @@ static void ntree_blend_read_data(BlendDataReader *reader, ID *id)
ntreeBlendReadData(reader, nullptr, ntree);
}
static void lib_link_node_socket(BlendLibReader *reader, Library *lib, bNodeSocket *sock)
static void lib_link_node_socket(BlendLibReader *reader, ID *self_id, bNodeSocket *sock)
{
IDP_BlendReadLib(reader, lib, sock->prop);
IDP_BlendReadLib(reader, self_id, sock->prop);
/* This can happen for all socket types when a file is saved in an older version of Blender than
* it was originally created in (#86298). Some socket types still require a default value. The
@ -893,26 +893,27 @@ static void lib_link_node_socket(BlendLibReader *reader, Library *lib, bNodeSock
switch (eNodeSocketDatatype(sock->type)) {
case SOCK_OBJECT: {
BLO_read_id_address(
reader, lib, &sock->default_value_typed<bNodeSocketValueObject>()->value);
reader, self_id, &sock->default_value_typed<bNodeSocketValueObject>()->value);
break;
}
case SOCK_IMAGE: {
BLO_read_id_address(reader, lib, &sock->default_value_typed<bNodeSocketValueImage>()->value);
BLO_read_id_address(
reader, self_id, &sock->default_value_typed<bNodeSocketValueImage>()->value);
break;
}
case SOCK_COLLECTION: {
BLO_read_id_address(
reader, lib, &sock->default_value_typed<bNodeSocketValueCollection>()->value);
reader, self_id, &sock->default_value_typed<bNodeSocketValueCollection>()->value);
break;
}
case SOCK_TEXTURE: {
BLO_read_id_address(
reader, lib, &sock->default_value_typed<bNodeSocketValueTexture>()->value);
reader, self_id, &sock->default_value_typed<bNodeSocketValueTexture>()->value);
break;
}
case SOCK_MATERIAL: {
BLO_read_id_address(
reader, lib, &sock->default_value_typed<bNodeSocketValueMaterial>()->value);
reader, self_id, &sock->default_value_typed<bNodeSocketValueMaterial>()->value);
break;
}
case SOCK_FLOAT:
@ -929,32 +930,30 @@ static void lib_link_node_socket(BlendLibReader *reader, Library *lib, bNodeSock
}
}
static void lib_link_node_sockets(BlendLibReader *reader, Library *lib, ListBase *sockets)
static void lib_link_node_sockets(BlendLibReader *reader, ID *self_id, ListBase *sockets)
{
LISTBASE_FOREACH (bNodeSocket *, sock, sockets) {
lib_link_node_socket(reader, lib, sock);
lib_link_node_socket(reader, self_id, sock);
}
}
void ntreeBlendReadLib(BlendLibReader *reader, bNodeTree *ntree)
{
Library *lib = ntree->id.lib;
BLO_read_id_address(reader, lib, &ntree->gpd);
BLO_read_id_address(reader, &ntree->id, &ntree->gpd);
LISTBASE_FOREACH (bNode *, node, &ntree->nodes) {
/* Link ID Properties -- and copy this comment EXACTLY for easy finding
* of library blocks that implement this. */
IDP_BlendReadLib(reader, lib, node->prop);
IDP_BlendReadLib(reader, &ntree->id, node->prop);
BLO_read_id_address(reader, lib, &node->id);
BLO_read_id_address(reader, &ntree->id, &node->id);
lib_link_node_sockets(reader, lib, &node->inputs);
lib_link_node_sockets(reader, lib, &node->outputs);
lib_link_node_sockets(reader, &ntree->id, &node->inputs);
lib_link_node_sockets(reader, &ntree->id, &node->outputs);
}
lib_link_node_sockets(reader, lib, &ntree->inputs);
lib_link_node_sockets(reader, lib, &ntree->outputs);
lib_link_node_sockets(reader, &ntree->id, &ntree->inputs);
lib_link_node_sockets(reader, &ntree->id, &ntree->outputs);
/* Set `node->typeinfo` pointers. This is done in lib linking, after the
* first versioning that can change types still without functions that

View File

@ -845,11 +845,11 @@ static void object_blend_read_data(BlendDataReader *reader, ID *id)
static void lib_link_nlastrips(BlendLibReader *reader, ID *id, ListBase *striplist)
{
LISTBASE_FOREACH (bActionStrip *, strip, striplist) {
BLO_read_id_address(reader, id->lib, &strip->object);
BLO_read_id_address(reader, id->lib, &strip->act);
BLO_read_id_address(reader, id->lib, &strip->ipo);
BLO_read_id_address(reader, id, &strip->object);
BLO_read_id_address(reader, id, &strip->act);
BLO_read_id_address(reader, id, &strip->ipo);
LISTBASE_FOREACH (bActionModifier *, amod, &strip->modifiers) {
BLO_read_id_address(reader, id->lib, &amod->ob);
BLO_read_id_address(reader, id, &amod->ob);
}
}
}
@ -858,7 +858,7 @@ static void lib_link_nlastrips(BlendLibReader *reader, ID *id, ListBase *stripli
static void lib_link_constraint_channels(BlendLibReader *reader, ID *id, ListBase *chanbase)
{
LISTBASE_FOREACH (bConstraintChannel *, chan, chanbase) {
BLO_read_id_address(reader, id->lib, &chan->ipo);
BLO_read_id_address(reader, id, &chan->ipo);
}
}
@ -870,23 +870,24 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
BlendFileReadReport *reports = BLO_read_lib_reports(reader);
/* XXX deprecated - old animation system <<< */
BLO_read_id_address(reader, ob->id.lib, &ob->ipo);
BLO_read_id_address(reader, ob->id.lib, &ob->action);
BLO_read_id_address(reader, id, &ob->ipo);
BLO_read_id_address(reader, id, &ob->action);
/* >>> XXX deprecated - old animation system */
BLO_read_id_address(reader, ob->id.lib, &ob->parent);
BLO_read_id_address(reader, ob->id.lib, &ob->track);
BLO_read_id_address(reader, id, &ob->parent);
BLO_read_id_address(reader, id, &ob->track);
/* XXX deprecated - old pose library, deprecated in Blender 3.5. */
BLO_read_id_address(reader, ob->id.lib, &ob->poselib);
BLO_read_id_address(reader, id, &ob->poselib);
/* 2.8x drops support for non-empty dupli instances. */
if (ob->type == OB_EMPTY) {
BLO_read_id_address(reader, ob->id.lib, &ob->instance_collection);
BLO_read_id_address(reader, id, &ob->instance_collection);
}
else {
if (ob->instance_collection != nullptr) {
ID *new_id = BLO_read_get_new_id_address(reader, ob->id.lib, &ob->instance_collection->id);
ID *new_id = BLO_read_get_new_id_address(
reader, id, ID_IS_LINKED(id), &ob->instance_collection->id);
BLO_reportf_wrap(reports,
RPT_INFO,
TIP_("Non-Empty object '%s' cannot duplicate collection '%s' "
@ -898,7 +899,7 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
ob->transflag &= ~OB_DUPLICOLLECTION;
}
BLO_read_id_address(reader, ob->id.lib, &ob->proxy);
BLO_read_id_address(reader, id, &ob->proxy);
if (ob->proxy) {
/* paranoia check, actually a proxy_from pointer should never be written... */
if (!ID_IS_LINKED(ob->proxy)) {
@ -923,10 +924,10 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
ob->proxy->proxy_from = ob;
}
}
BLO_read_id_address(reader, ob->id.lib, &ob->proxy_group);
BLO_read_id_address(reader, id, &ob->proxy_group);
void *poin = ob->data;
BLO_read_id_address(reader, ob->id.lib, &ob->data);
BLO_read_id_address(reader, id, &ob->data);
if (ob->data == nullptr && poin != nullptr) {
ob->type = OB_EMPTY;
@ -959,7 +960,7 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
reports->count.missing_obdata++;
}
for (int a = 0; a < ob->totcol; a++) {
BLO_read_id_address(reader, ob->id.lib, &ob->mat[a]);
BLO_read_id_address(reader, id, &ob->mat[a]);
}
/* When the object is local and the data is library its possible
@ -968,7 +969,7 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
BKE_object_materials_test(bmain, ob, (ID *)ob->data);
}
BLO_read_id_address(reader, ob->id.lib, &ob->gpd);
BLO_read_id_address(reader, id, &ob->gpd);
/* if id.us==0 a new base will be created later on */
@ -983,7 +984,7 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
LISTBASE_FOREACH (PartEff *, paf, &ob->effect) {
if (paf->type == EFF_PARTICLE) {
BLO_read_id_address(reader, ob->id.lib, &paf->group);
BLO_read_id_address(reader, id, &paf->group);
}
}
@ -993,7 +994,7 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
if (fluidmd && fluidmd->fss) {
/* XXX: deprecated - old animation system. */
BLO_read_id_address(reader, ob->id.lib, &fluidmd->fss->ipo);
BLO_read_id_address(reader, id, &fluidmd->fss->ipo);
}
}
@ -1019,9 +1020,9 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
}
if (ob->soft) {
BLO_read_id_address(reader, ob->id.lib, &ob->soft->collision_group);
BLO_read_id_address(reader, id, &ob->soft->collision_group);
BLO_read_id_address(reader, ob->id.lib, &ob->soft->effector_weights->group);
BLO_read_id_address(reader, id, &ob->soft->effector_weights->group);
}
BKE_particle_system_blend_read_lib(reader, ob, &ob->id, &ob->particlesystem);
@ -1030,8 +1031,8 @@ static void object_blend_read_lib(BlendLibReader *reader, ID *id)
BKE_shaderfx_blend_read_lib(reader, ob);
if (ob->rigidbody_constraint) {
BLO_read_id_address(reader, ob->id.lib, &ob->rigidbody_constraint->ob1);
BLO_read_id_address(reader, ob->id.lib, &ob->rigidbody_constraint->ob2);
BLO_read_id_address(reader, id, &ob->rigidbody_constraint->ob1);
BLO_read_id_address(reader, id, &ob->rigidbody_constraint->ob2);
}
}
@ -5648,7 +5649,7 @@ void BKE_object_modifiers_lib_link_common(void *userData,
{
BlendLibReader *reader = (BlendLibReader *)userData;
BLO_read_id_address(reader, ob->id.lib, idpoin);
BLO_read_id_address(reader, &ob->id, idpoin);
if (*idpoin != nullptr && (cb_flag & IDWALK_CB_USER) != 0) {
id_us_plus_no_lib(*idpoin);
}

View File

@ -1286,23 +1286,23 @@ void BKE_ocean_cache_eval_ij(struct OceanCache *och, struct OceanResult *ocr, in
j = j % res_y;
if (och->ibufs_disp[f]) {
copy_v3_v3(ocr->disp, &och->ibufs_disp[f]->rect_float[4 * (res_x * j + i)]);
copy_v3_v3(ocr->disp, &och->ibufs_disp[f]->float_buffer.data[4 * (res_x * j + i)]);
}
if (och->ibufs_foam[f]) {
ocr->foam = och->ibufs_foam[f]->rect_float[4 * (res_x * j + i)];
ocr->foam = och->ibufs_foam[f]->float_buffer.data[4 * (res_x * j + i)];
}
if (och->ibufs_spray[f]) {
copy_v3_v3(ocr->Eplus, &och->ibufs_spray[f]->rect_float[4 * (res_x * j + i)]);
copy_v3_v3(ocr->Eplus, &och->ibufs_spray[f]->float_buffer.data[4 * (res_x * j + i)]);
}
if (och->ibufs_spray_inverse[f]) {
copy_v3_v3(ocr->Eminus, &och->ibufs_spray_inverse[f]->rect_float[4 * (res_x * j + i)]);
copy_v3_v3(ocr->Eminus, &och->ibufs_spray_inverse[f]->float_buffer.data[4 * (res_x * j + i)]);
}
if (och->ibufs_norm[f]) {
copy_v3_v3(ocr->normal, &och->ibufs_norm[f]->rect_float[4 * (res_x * j + i)]);
copy_v3_v3(ocr->normal, &och->ibufs_norm[f]->float_buffer.data[4 * (res_x * j + i)]);
}
}
@ -1435,7 +1435,7 @@ void BKE_ocean_bake(struct Ocean *o,
BKE_ocean_eval_ij(o, &ocr, x, y);
/* add to the image */
rgb_to_rgba_unit_alpha(&ibuf_disp->rect_float[4 * (res_x * y + x)], ocr.disp);
rgb_to_rgba_unit_alpha(&ibuf_disp->float_buffer.data[4 * (res_x * y + x)], ocr.disp);
if (o->_do_jacobian) {
/* TODO(@ideasman42): cleanup unused code. */
@ -1478,18 +1478,19 @@ void BKE_ocean_bake(struct Ocean *o,
// foam_result = min_ff(foam_result, 1.0f);
value_to_rgba_unit_alpha(&ibuf_foam->rect_float[4 * (res_x * y + x)], foam_result);
value_to_rgba_unit_alpha(&ibuf_foam->float_buffer.data[4 * (res_x * y + x)],
foam_result);
/* spray map baking */
if (o->_do_spray) {
rgb_to_rgba_unit_alpha(&ibuf_spray->rect_float[4 * (res_x * y + x)], ocr.Eplus);
rgb_to_rgba_unit_alpha(&ibuf_spray_inverse->rect_float[4 * (res_x * y + x)],
rgb_to_rgba_unit_alpha(&ibuf_spray->float_buffer.data[4 * (res_x * y + x)], ocr.Eplus);
rgb_to_rgba_unit_alpha(&ibuf_spray_inverse->float_buffer.data[4 * (res_x * y + x)],
ocr.Eminus);
}
}
if (o->_do_normals) {
rgb_to_rgba_unit_alpha(&ibuf_normal->rect_float[4 * (res_x * y + x)], ocr.normal);
rgb_to_rgba_unit_alpha(&ibuf_normal->float_buffer.data[4 * (res_x * y + x)], ocr.normal);
}
}
}

View File

@ -1246,13 +1246,13 @@ void BKE_paint_blend_read_data(BlendDataReader *reader, const Scene *scene, Pain
void BKE_paint_blend_read_lib(BlendLibReader *reader, Scene *sce, Paint *p)
{
if (p) {
BLO_read_id_address(reader, sce->id.lib, &p->brush);
BLO_read_id_address(reader, &sce->id, &p->brush);
for (int i = 0; i < p->tool_slots_len; i++) {
if (p->tool_slots[i].brush != nullptr) {
BLO_read_id_address(reader, sce->id.lib, &p->tool_slots[i].brush);
BLO_read_id_address(reader, &sce->id, &p->tool_slots[i].brush);
}
}
BLO_read_id_address(reader, sce->id.lib, &p->palette);
BLO_read_id_address(reader, &sce->id, &p->palette);
p->paint_cursor = nullptr;
BKE_paint_runtime_init(sce->toolsettings, p);

Some files were not shown because too many files have changed in this diff Show More