UI: Asset Shelf (Experimental Feature) #104831

Closed
Julian Eisel wants to merge 399 commits from asset-shelf into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
95 changed files with 1102 additions and 680 deletions
Showing only changes of commit 1dce368d37 - Show all commits

View File

@ -78,12 +78,7 @@ include(cmake/tbb.cmake)
include(cmake/python.cmake)
include(cmake/llvm.cmake)
include(cmake/osl.cmake)
option(USE_PIP_NUMPY "Install NumPy using pip wheel instead of building from source" OFF)
if(APPLE AND ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
set(USE_PIP_NUMPY ON)
else()
include(cmake/numpy.cmake)
endif()
include(cmake/numpy.cmake)
include(cmake/python_site_packages.cmake)
include(cmake/package_python.cmake)
include(cmake/openimageio.cmake)

View File

@ -38,15 +38,6 @@ ExternalProject_Add(external_python_site_packages
--no-binary :all:
)
if(USE_PIP_NUMPY)
# Use only wheel (and not build from source) to stop NumPy from linking against buggy
# Accelerate framework backend on macOS. Official wheels are built with OpenBLAS.
ExternalProject_Add_Step(external_python_site_packages after_install
COMMAND ${PYTHON_BINARY} -m pip install --no-cache-dir numpy==${NUMPY_VERSION} --only-binary :all:
DEPENDEES install
)
endif()
add_dependencies(
external_python_site_packages
external_python

View File

@ -80,6 +80,7 @@ set(_CLANG_FIND_COMPONENTS
clangAST
clangLex
clangBasic
clangSupport
)
set(_CLANG_LIBRARIES)
@ -94,7 +95,9 @@ foreach(COMPONENT ${_CLANG_FIND_COMPONENTS})
PATH_SUFFIXES
lib64 lib
)
list(APPEND _CLANG_LIBRARIES "${CLANG_${UPPERCOMPONENT}_LIBRARY}")
if(CLANG_${UPPERCOMPONENT}_LIBRARY)
list(APPEND _CLANG_LIBRARIES "${CLANG_${UPPERCOMPONENT}_LIBRARY}")
endif()
endforeach()

View File

@ -7,7 +7,7 @@ set(INC
)
set(INC_SYS
${X11_X11_INCLUDE_PATH}
)
set(SRC

View File

@ -204,7 +204,7 @@ ccl_device bool integrator_init_from_bake(KernelGlobals kg,
ray.time = 0.5f;
ray.dP = differential_zero_compact();
ray.dD = differential_zero_compact();
integrator_state_write_ray(kg, state, &ray);
integrator_state_write_ray(state, &ray);
/* Setup next kernel to execute. */
integrator_path_init(kg, state, DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND);
@ -299,7 +299,7 @@ ccl_device bool integrator_init_from_bake(KernelGlobals kg,
ray.dD = differential_zero_compact();
/* Write ray. */
integrator_state_write_ray(kg, state, &ray);
integrator_state_write_ray(state, &ray);
/* Setup and write intersection. */
Intersection isect ccl_optional_struct_init;
@ -309,7 +309,7 @@ ccl_device bool integrator_init_from_bake(KernelGlobals kg,
isect.v = v;
isect.t = 1.0f;
isect.type = PRIMITIVE_TRIANGLE;
integrator_state_write_isect(kg, state, &isect);
integrator_state_write_isect(state, &isect);
/* Setup next kernel to execute. */
const bool use_caustics = kernel_data.integrator.use_caustics &&

View File

@ -85,7 +85,7 @@ ccl_device bool integrator_init_from_camera(KernelGlobals kg,
}
/* Write camera ray to state. */
integrator_state_write_ray(kg, state, &ray);
integrator_state_write_ray(state, &ray);
}
/* Initialize path state for path integration. */

View File

@ -150,7 +150,7 @@ ccl_device_forceinline void integrator_intersect_next_kernel_after_shadow_catche
/* Continue with shading shadow catcher surface. Same as integrator_split_shadow_catcher, but
* using NEXT instead of INIT. */
Intersection isect ccl_optional_struct_init;
integrator_state_read_isect(kg, state, &isect);
integrator_state_read_isect(state, &isect);
const int shader = intersection_get_shader(kg, &isect);
const int flags = kernel_data_fetch(shaders, shader).flags;
@ -326,7 +326,7 @@ ccl_device void integrator_intersect_closest(KernelGlobals kg,
/* Read ray from integrator state into local memory. */
Ray ray ccl_optional_struct_init;
integrator_state_read_ray(kg, state, &ray);
integrator_state_read_ray(state, &ray);
kernel_assert(ray.tmax != 0.0f);
const uint visibility = path_state_ray_visibility(state);
@ -397,7 +397,7 @@ ccl_device void integrator_intersect_closest(KernelGlobals kg,
}
/* Write intersection result into global integrator state memory. */
integrator_state_write_isect(kg, state, &isect);
integrator_state_write_isect(state, &isect);
/* Setup up next kernel to be executed. */
integrator_intersect_next_kernel<DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST>(

View File

@ -142,7 +142,7 @@ ccl_device void integrator_intersect_shadow(KernelGlobals kg, IntegratorShadowSt
/* Read ray from integrator state into local memory. */
Ray ray ccl_optional_struct_init;
integrator_state_read_shadow_ray(kg, state, &ray);
integrator_state_read_shadow_ray(state, &ray);
ray.self.object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 0, object);
ray.self.prim = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 0, prim);
ray.self.light_object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 1, object);

View File

@ -73,7 +73,7 @@ ccl_device void integrator_volume_stack_init(KernelGlobals kg, IntegratorState s
ccl_private ShaderData *stack_sd = AS_SHADER_DATA(&stack_sd_storage);
Ray volume_ray ccl_optional_struct_init;
integrator_state_read_ray(kg, state, &volume_ray);
integrator_state_read_ray(state, &volume_ray);
/* Trace ray in random direction. Any direction works, Z up is a guess to get the
* fewest hits. */

View File

@ -16,7 +16,7 @@ ccl_device_inline void integrate_light(KernelGlobals kg,
{
/* Setup light sample. */
Intersection isect ccl_optional_struct_init;
integrator_state_read_isect(kg, state, &isect);
integrator_state_read_isect(state, &isect);
guiding_record_light_surface_segment(kg, state, &isect);

View File

@ -35,7 +35,7 @@ ccl_device_inline Spectrum integrate_transparent_surface_shadow(KernelGlobals kg
integrator_state_read_shadow_isect(state, &isect, hit);
Ray ray ccl_optional_struct_init;
integrator_state_read_shadow_ray(kg, state, &ray);
integrator_state_read_shadow_ray(state, &ray);
shader_setup_from_ray(kg, shadow_sd, &ray, &isect);
@ -70,7 +70,7 @@ ccl_device_inline void integrate_transparent_volume_shadow(KernelGlobals kg,
/* Setup shader data. */
Ray ray ccl_optional_struct_init;
integrator_state_read_shadow_ray(kg, state, &ray);
integrator_state_read_shadow_ray(state, &ray);
ray.self.object = OBJECT_NONE;
ray.self.prim = PRIM_NONE;
ray.self.light_object = OBJECT_NONE;

View File

@ -24,10 +24,10 @@ ccl_device_forceinline void integrate_surface_shader_setup(KernelGlobals kg,
ccl_private ShaderData *sd)
{
Intersection isect ccl_optional_struct_init;
integrator_state_read_isect(kg, state, &isect);
integrator_state_read_isect(state, &isect);
Ray ray ccl_optional_struct_init;
integrator_state_read_ray(kg, state, &ray);
integrator_state_read_ray(state, &ray);
shader_setup_from_ray(kg, sd, &ray, &isect);
}
@ -253,7 +253,7 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
}
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
integrator_state_write_shadow_ray(shadow_state, &ray);
// Save memory by storing the light and object indices in the shadow_isect
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
@ -548,7 +548,7 @@ ccl_device_forceinline void integrate_surface_ao(KernelGlobals kg,
integrator_state_copy_volume_stack_to_shadow(kg, shadow_state, state);
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
integrator_state_write_shadow_ray(shadow_state, &ray);
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;

View File

@ -827,7 +827,7 @@ ccl_device_forceinline void integrate_volume_direct_light(
kg, state, DEVICE_KERNEL_INTEGRATOR_INTERSECT_SHADOW, false);
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
integrator_state_write_shadow_ray(shadow_state, &ray);
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;
@ -1172,10 +1172,10 @@ ccl_device void integrator_shade_volume(KernelGlobals kg,
#ifdef __VOLUME__
/* Setup shader data. */
Ray ray ccl_optional_struct_init;
integrator_state_read_ray(kg, state, &ray);
integrator_state_read_ray(state, &ray);
Intersection isect ccl_optional_struct_init;
integrator_state_read_isect(kg, state, &isect);
integrator_state_read_isect(state, &isect);
/* Set ray length to current segment. */
ray.tmax = (isect.prim != PRIM_NONE) ? isect.t : FLT_MAX;

View File

@ -11,8 +11,7 @@ CCL_NAMESPACE_BEGIN
/* Ray */
ccl_device_forceinline void integrator_state_write_ray(KernelGlobals kg,
IntegratorState state,
ccl_device_forceinline void integrator_state_write_ray(IntegratorState state,
ccl_private const Ray *ccl_restrict ray)
{
INTEGRATOR_STATE_WRITE(state, ray, P) = ray->P;
@ -24,8 +23,7 @@ ccl_device_forceinline void integrator_state_write_ray(KernelGlobals kg,
INTEGRATOR_STATE_WRITE(state, ray, dD) = ray->dD;
}
ccl_device_forceinline void integrator_state_read_ray(KernelGlobals kg,
ConstIntegratorState state,
ccl_device_forceinline void integrator_state_read_ray(ConstIntegratorState state,
ccl_private Ray *ccl_restrict ray)
{
ray->P = INTEGRATOR_STATE(state, ray, P);
@ -40,7 +38,7 @@ ccl_device_forceinline void integrator_state_read_ray(KernelGlobals kg,
/* Shadow Ray */
ccl_device_forceinline void integrator_state_write_shadow_ray(
KernelGlobals kg, IntegratorShadowState state, ccl_private const Ray *ccl_restrict ray)
IntegratorShadowState state, ccl_private const Ray *ccl_restrict ray)
{
INTEGRATOR_STATE_WRITE(state, shadow_ray, P) = ray->P;
INTEGRATOR_STATE_WRITE(state, shadow_ray, D) = ray->D;
@ -50,8 +48,7 @@ ccl_device_forceinline void integrator_state_write_shadow_ray(
INTEGRATOR_STATE_WRITE(state, shadow_ray, dP) = ray->dP;
}
ccl_device_forceinline void integrator_state_read_shadow_ray(KernelGlobals kg,
ConstIntegratorShadowState state,
ccl_device_forceinline void integrator_state_read_shadow_ray(ConstIntegratorShadowState state,
ccl_private Ray *ccl_restrict ray)
{
ray->P = INTEGRATOR_STATE(state, shadow_ray, P);
@ -66,7 +63,7 @@ ccl_device_forceinline void integrator_state_read_shadow_ray(KernelGlobals kg,
/* Intersection */
ccl_device_forceinline void integrator_state_write_isect(
KernelGlobals kg, IntegratorState state, ccl_private const Intersection *ccl_restrict isect)
IntegratorState state, ccl_private const Intersection *ccl_restrict isect)
{
INTEGRATOR_STATE_WRITE(state, isect, t) = isect->t;
INTEGRATOR_STATE_WRITE(state, isect, u) = isect->u;
@ -77,7 +74,7 @@ ccl_device_forceinline void integrator_state_write_isect(
}
ccl_device_forceinline void integrator_state_read_isect(
KernelGlobals kg, ConstIntegratorState state, ccl_private Intersection *ccl_restrict isect)
ConstIntegratorState state, ccl_private Intersection *ccl_restrict isect)
{
isect->prim = INTEGRATOR_STATE(state, isect, prim);
isect->object = INTEGRATOR_STATE(state, isect, object);

View File

@ -162,8 +162,8 @@ ccl_device_inline bool subsurface_scatter(KernelGlobals kg, IntegratorState stat
ray.P += ray.D * ray.tmax * 2.0f;
ray.D = -ray.D;
integrator_state_write_isect(kg, state, &ss_isect.hits[0]);
integrator_state_write_ray(kg, state, &ray);
integrator_state_write_isect(state, &ss_isect.hits[0]);
integrator_state_write_ray(state, &ray);
/* Advance random number offset for bounce. */
INTEGRATOR_STATE_WRITE(state, path, rng_offset) += PRNG_BOUNCE_NUM;

View File

@ -161,7 +161,11 @@ ccl_device_inline void osl_eval_nodes(KernelGlobals kg,
/* shadeindex = */ 0);
# endif
# if __cplusplus < 201703L
if (type == SHADER_TYPE_DISPLACEMENT) {
# else
if constexpr (type == SHADER_TYPE_DISPLACEMENT) {
# endif
sd->P = globals.P;
}
else if (globals.Ci) {

View File

@ -1646,8 +1646,8 @@ enum KernelFeatureFlag : uint32_t {
/* Must be constexpr on the CPU to avoid compile errors because the state types
* are different depending on the main, shadow or null path. For GPU we don't have
* C++17 everywhere so can't use it. */
#ifdef __KERNEL_GPU__
* C++17 everywhere so need to check it. */
#if __cplusplus < 201703L
# define IF_KERNEL_FEATURE(feature) if ((node_feature_mask & (KERNEL_FEATURE_##feature)) != 0U)
# define IF_KERNEL_NODES_FEATURE(feature) \
if ((node_feature_mask & (KERNEL_FEATURE_NODE_##feature)) != 0U)

View File

@ -442,6 +442,13 @@ void ColorSpaceManager::free_memory()
#endif
}
void ColorSpaceManager::init_fallback_config()
{
#ifdef WITH_OCIO
OCIO::SetCurrentConfig(OCIO::Config::CreateRaw());
#endif
}
/* Template instantiations so we don't have to inline functions. */
template void ColorSpaceManager::to_scene_linear(ustring, uchar *, size_t, bool, bool);
template void ColorSpaceManager::to_scene_linear(ustring, ushort *, size_t, bool, bool);

View File

@ -43,6 +43,12 @@ class ColorSpaceManager {
/* Clear memory when the application exits. Invalidates all processors. */
static void free_memory();
/* Create a fallback color space configuration.
*
* This may be useful to allow regression test to create a configuration which is considered
* valid without knowing the actual configuration used by the final application. */
static void init_fallback_config();
private:
static void is_builtin_colorspace(ustring colorspace, bool &is_no_op, bool &is_srgb);
};

View File

@ -57,7 +57,7 @@ OrientationBounds merge(const OrientationBounds &cone_a, const OrientationBounds
/* Rotate new axis to be between a and b. */
float theta_r = theta_o - a->theta_o;
float3 new_axis = rotate_around_axis(a->axis, cross(a->axis, b->axis), theta_r);
float3 new_axis = rotate_around_axis(a->axis, normalize(cross(a->axis, b->axis)), theta_r);
new_axis = normalize(new_axis);
return OrientationBounds({new_axis, theta_o, theta_e});

View File

@ -6,6 +6,7 @@
#include "device/device.h"
#include "scene/colorspace.h"
#include "scene/scene.h"
#include "scene/shader_graph.h"
#include "scene/shader_nodes.h"
@ -165,15 +166,29 @@ class RenderGraph : public testing::Test {
virtual void SetUp()
{
util_logging_start();
util_logging_verbosity_set(5);
/* The test is running outside of the typical application configuration when the OCIO is
* initialized prior to Cycles. Explicitly create the raw configuration to avoid the warning
* printed by the OCIO when accessing non-figured environment.
* Functionally it is the same as not doing this explicit call: the OCIO will warn and then do
* the same raw configuration. */
ColorSpaceManager::init_fallback_config();
device_cpu = Device::create(device_info, stats, profiler);
scene = new Scene(scene_params, device_cpu);
/* Initialize logging after the creation of the essential resources. This way the logging
* mock sink does not warn about uninteresting messages which happens prior to the setup of
* the actual mock sinks. */
util_logging_start();
util_logging_verbosity_set(5);
}
virtual void TearDown()
{
/* Effectively disable logging, so that the next test suit starts in an environment which is
* not logging by default. */
util_logging_verbosity_set(0);
delete scene;
delete device_cpu;
}

View File

@ -2664,13 +2664,7 @@ static void pointer_handle_enter(void *data,
/* Resetting scroll events is likely unnecessary,
* do this to avoid any possible problems as it's harmless. */
seat->pointer_scroll.smooth_xy[0] = 0;
seat->pointer_scroll.smooth_xy[1] = 0;
seat->pointer_scroll.discrete_xy[0] = 0;
seat->pointer_scroll.discrete_xy[1] = 0;
seat->pointer_scroll.inverted_xy[0] = false;
seat->pointer_scroll.inverted_xy[1] = false;
seat->pointer_scroll.axis_source = WL_POINTER_AXIS_SOURCE_WHEEL;
seat->pointer_scroll = GWL_SeatStatePointerScroll{};
seat->pointer.wl_surface_window = wl_surface;

View File

@ -450,10 +450,10 @@ void *MEM_guarded_mallocN(size_t len, const char *str)
#endif
return (++memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -463,11 +463,11 @@ void *MEM_guarded_malloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Malloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)mem_in_use);
mem_in_use);
abort();
return NULL;
}
@ -523,10 +523,10 @@ void *MEM_guarded_mallocN_aligned(size_t len, size_t alignment, const char *str)
#endif
return (++memh);
}
print_error("aligned_malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("aligned_malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -547,10 +547,10 @@ void *MEM_guarded_callocN(size_t len, const char *str)
#endif
return (++memh);
}
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -560,11 +560,11 @@ void *MEM_guarded_calloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Calloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)mem_in_use);
mem_in_use);
abort();
return NULL;
}

View File

@ -213,10 +213,10 @@ void *MEM_lockfree_callocN(size_t len, const char *str)
return PTR_FROM_MEMHEAD(memh);
}
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}
@ -226,11 +226,11 @@ void *MEM_lockfree_calloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Calloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(unsigned int)memory_usage_current());
memory_usage_current());
abort();
return NULL;
}
@ -256,10 +256,10 @@ void *MEM_lockfree_mallocN(size_t len, const char *str)
return PTR_FROM_MEMHEAD(memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}
@ -269,11 +269,11 @@ void *MEM_lockfree_malloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Malloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)memory_usage_current());
memory_usage_current());
abort();
return NULL;
}
@ -325,10 +325,10 @@ void *MEM_lockfree_mallocN_aligned(size_t len, size_t alignment, const char *str
return PTR_FROM_MEMHEAD(memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}

View File

@ -40,6 +40,25 @@
</screenshot>
</screenshots>
<releases>
<release version="3.5" date="2023-03-29">
<description>
<p>New features:</p>
<ul>
<li>Real-Time compositor</li>
<li>Vector displacement sculpting</li>
<li>Built-in hair node groups</li>
<li>Cycles many light sampling</li>
<li>Metal Viewport for macOS</li>
</ul>
<p>Enhancements:</p>
<ul>
<li>Support for importing and exporting compressed .USDZ files</li>
<li>New Ease operator in the graph editor</li>
<li>New Geometry Nodes, like Image Info and Blur Attribute</li>
<li>Font previews now differentiate better between Korean, Japanese, Simplified and Traditional Chinese</li>
</ul>
</description>
</release>
<release version="3.4" date="2022-12-07">
<description>
<p>New features:</p>

View File

@ -21,10 +21,7 @@ __all__ = (
"ImagePreviewCollection",
)
import _bpy
_utils_previews = _bpy._utils_previews
del _bpy
from _bpy import _utils_previews
_uuid_open = set()

View File

@ -379,7 +379,7 @@ class NODE_MT_geometry_node_GEO_MESH_OPERATIONS(Menu):
bl_idname = "NODE_MT_geometry_node_GEO_MESH_OPERATIONS"
bl_label = "Operations"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeDualMesh")
node_add_menu.add_node_type(layout, "GeometryNodeEdgePathsToCurves")
@ -389,7 +389,7 @@ class NODE_MT_geometry_node_GEO_MESH_OPERATIONS(Menu):
node_add_menu.add_node_type(layout, "GeometryNodeMeshBoolean")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToCurve")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToPoints")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
node_add_menu.add_node_type(layout, "GeometryNodeMeshToSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToVolume")
node_add_menu.add_node_type(layout, "GeometryNodeScaleElements")
@ -448,14 +448,14 @@ class NODE_MT_category_GEO_POINT(Menu):
bl_idname = "NODE_MT_category_GEO_POINT"
bl_label = "Point"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeDistributePointsInVolume")
node_add_menu.add_node_type(layout, "GeometryNodeDistributePointsOnFaces")
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodePoints")
node_add_menu.add_node_type(layout, "GeometryNodePointsToVertices")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
node_add_menu.add_node_type(layout, "GeometryNodePointsToSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodePointsToVolume")
layout.separator()
@ -593,11 +593,11 @@ class NODE_MT_category_GEO_VOLUME(Menu):
bl_idname = "NODE_MT_category_GEO_VOLUME"
bl_label = "Volume"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeVolumeCube")
node_add_menu.add_node_type(layout, "GeometryNodeVolumeToMesh")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodeMeanFilterSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeOffsetSDFVolume")

View File

@ -294,6 +294,7 @@ class USERPREF_PT_interface_statusbar(InterfacePanel, CenterAlignMixIn, Panel):
col = layout.column(heading="Show")
col.prop(view, "show_statusbar_stats", text="Scene Statistics")
col.prop(view, "show_statusbar_scene_duration", text="Scene Duration")
col.prop(view, "show_statusbar_memory", text="System Memory")
col.prop(view, "show_statusbar_vram", text="Video Memory")
col.prop(view, "show_statusbar_version", text="Blender Version")

View File

@ -104,26 +104,6 @@ void BKE_mesh_ensure_default_orig_index_customdata(struct Mesh *mesh);
*/
void BKE_mesh_ensure_default_orig_index_customdata_no_check(struct Mesh *mesh);
/**
* Find the index of the loop in 'poly' which references vertex,
* returns -1 if not found
*/
int poly_find_loop_from_vert(const struct MPoly *poly, const int *poly_verts, int vert);
/**
* Fill \a r_adj with the loop indices in \a poly adjacent to the
* vertex. Returns the index of the loop matching vertex, or -1 if the
* vertex is not in \a poly
*/
int poly_get_adj_loops_from_vert(const struct MPoly *poly,
const int *corner_verts,
int vert,
int r_adj[2]);
/**
* Return the index of the edge vert that is not equal to \a v. If
* neither edge vertex is equal to \a v, returns -1.
*/
int BKE_mesh_edge_other_vert(const struct MEdge *e, int v);
/**
* Sets each output array element to the edge index if it is a real edge, or -1.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0-or-later. */
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
@ -142,7 +142,71 @@ void edges_sharp_from_angle_set(Span<MPoly> polys,
const float split_angle,
MutableSpan<bool> sharp_edges);
} // namespace blender::bke::mesh
/** \} */
/* -------------------------------------------------------------------- */
/** \name Topology Queries
* \{ */
/**
* Find the index of the next corner in the polygon, looping to the start if necessary.
* The indices are into the entire corners array, not just the polygon's corners.
*/
inline int poly_corner_prev(const MPoly &poly, const int corner)
{
return corner - 1 + (corner == poly.loopstart) * poly.totloop;
}
/**
* Find the index of the previous corner in the polygon, looping to the end if necessary.
* The indices are into the entire corners array, not just the polygon's corners.
*/
inline int poly_corner_next(const MPoly &poly, const int corner)
{
if (corner == poly.loopstart + poly.totloop - 1) {
return poly.loopstart;
}
return corner + 1;
}
/**
* Find the index of the corner in the polygon that uses the given vertex.
* The index is into the entire corners array, not just the polygon's corners.
*/
inline int poly_find_corner_from_vert(const MPoly &poly,
const Span<int> corner_verts,
const int vert)
{
return poly.loopstart + corner_verts.slice(poly.loopstart, poly.totloop).first_index(vert);
}
/**
* Return the vertex indices on either side of the given vertex, ordered based on the winding
* direction of the polygon. The vertex must be in the polygon.
*/
inline int2 poly_find_adjecent_verts(const MPoly &poly,
const Span<int> corner_verts,
const int vert)
{
const int corner = poly_find_corner_from_vert(poly, corner_verts, vert);
return {corner_verts[poly_corner_prev(poly, corner)],
corner_verts[poly_corner_next(poly, corner)]};
}
/**
* Return the index of the edge's vertex that is not the \a vert.
* If neither edge vertex is equal to \a v, returns -1.
*/
inline int edge_other_vert(const MEdge &edge, const int vert)
{
if (edge.v1 == vert) {
return edge.v2;
}
if (edge.v2 == vert) {
return edge.v1;
}
return -1;
}
/** \} */
@ -150,6 +214,8 @@ void edges_sharp_from_angle_set(Span<MPoly> polys,
/** \name Inline Mesh Data Access
* \{ */
} // namespace blender::bke::mesh
inline blender::Span<blender::float3> Mesh::vert_positions() const
{
return {reinterpret_cast<const blender::float3 *>(BKE_mesh_vert_positions(this)), this->totvert};

View File

@ -363,18 +363,5 @@ Array<Vector<int, 2>> build_edge_to_poly_map(Span<MPoly> polys,
int edges_num);
Vector<Vector<int>> build_edge_to_loop_map_resizable(Span<int> corner_edges, int edges_num);
inline int poly_loop_prev(const MPoly &poly, int loop_i)
{
return loop_i - 1 + (loop_i == poly.loopstart) * poly.totloop;
}
inline int poly_loop_next(const MPoly &poly, int loop_i)
{
if (loop_i == poly.loopstart + poly.totloop - 1) {
return poly.loopstart;
}
return loop_i + 1;
}
} // namespace blender::bke::mesh_topology
#endif

View File

@ -51,6 +51,7 @@ struct PaletteColor;
struct Scene;
struct StrokeCache;
struct Sculpt;
struct SculptSession;
struct SubdivCCG;
struct Tex;
struct ToolSettings;
@ -563,6 +564,8 @@ typedef struct SculptAttributePointers {
SculptAttribute *dyntopo_node_id_face;
} SculptAttributePointers;
#ifdef __cplusplus
typedef struct SculptSession {
/* Mesh data (not copied) can come either directly from a Mesh, or from a MultiresDM */
struct { /* Special handling for multires meshes */
@ -576,8 +579,8 @@ typedef struct SculptSession {
/* These are always assigned to base mesh data when using PBVH_FACES and PBVH_GRIDS. */
float (*vert_positions)[3];
const struct MPoly *polys;
const int *corner_verts;
blender::Span<MPoly> polys;
blender::Span<int> corner_verts;
/* These contain the vertex and poly counts of the final mesh. */
int totvert, totpoly;
@ -758,12 +761,14 @@ typedef struct SculptSession {
bool islands_valid; /* Is attrs.topology_island_key valid? */
} SculptSession;
#endif
void BKE_sculptsession_free(struct Object *ob);
void BKE_sculptsession_free_deformMats(struct SculptSession *ss);
void BKE_sculptsession_free_vwpaint_data(struct SculptSession *ss);
void BKE_sculptsession_bm_to_me(struct Object *ob, bool reorder);
void BKE_sculptsession_bm_to_me_for_render(struct Object *object);
int BKE_sculptsession_vertex_count(const SculptSession *ss);
int BKE_sculptsession_vertex_count(const struct SculptSession *ss);
/* Ensure an attribute layer exists. */
SculptAttribute *BKE_sculpt_attribute_ensure(struct Object *ob,
@ -911,6 +916,11 @@ bool BKE_object_attributes_active_color_fill(struct Object *ob,
const float fill_color[4],
bool only_selected);
/** C accessor for #Object::sculpt::pbvh. */
struct PBVH *BKE_object_sculpt_pbvh_get(struct Object *object);
bool BKE_object_sculpt_use_dyntopo(const struct Object *object);
void BKE_object_sculpt_dyntopo_smooth_shading_set(struct Object *object, bool value);
/* paint_canvas.cc */
/**

View File

@ -37,6 +37,7 @@ struct PBVH;
struct PBVHBatches;
struct PBVHNode;
struct PBVH_GPU_Args;
struct SculptSession;
struct SubdivCCG;
struct TaskParallelSettings;
struct Image;

View File

@ -310,6 +310,7 @@ typedef enum SubdivCCGAdjacencyType {
SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
int corners_num,
const struct MPoly *mpoly,
int *r_v1,
int *r_v2);

View File

@ -2583,8 +2583,12 @@ const char *CustomData_get_render_layer_name(const CustomData *data, const int t
void CustomData_set_layer_active(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active = n;
}
}
@ -2592,8 +2596,12 @@ void CustomData_set_layer_active(CustomData *data, const int type, const int n)
void CustomData_set_layer_render(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_rnd = n;
}
}
@ -2601,8 +2609,12 @@ void CustomData_set_layer_render(CustomData *data, const int type, const int n)
void CustomData_set_layer_clone(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_clone = n;
}
}
@ -2610,8 +2622,12 @@ void CustomData_set_layer_clone(CustomData *data, const int type, const int n)
void CustomData_set_layer_stencil(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_mask = n;
}
}
@ -2619,48 +2635,64 @@ void CustomData_set_layer_stencil(CustomData *data, const int type, const int n)
void CustomData_set_layer_active_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active = layer_index;
}
}
}
void CustomData_set_layer_render_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_rnd = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_rnd = layer_index;
}
}
}
void CustomData_set_layer_clone_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_clone = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_clone = layer_index;
}
}
}
void CustomData_set_layer_stencil_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_mask = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_mask = layer_index;
}
}
}

View File

@ -606,7 +606,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
/* For every corner, mix the values from the adjacent edges on the face. */
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const int loop_index_prev = mesh::poly_corner_prev(poly, loop_index);
const int edge = corner_edges[loop_index];
const int edge_prev = corner_edges[loop_index_prev];
mixer.mix_in(loop_index, old_values[edge]);
@ -633,7 +633,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
for (const int poly_index : range) {
const MPoly &poly = polys[poly_index];
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const int loop_index_prev = mesh::poly_corner_prev(poly, loop_index);
const int edge = corner_edges[loop_index];
const int edge_prev = corner_edges[loop_index_prev];
if (old_values[edge] && old_values[edge_prev]) {

View File

@ -1505,45 +1505,6 @@ void BKE_mesh_auto_smooth_flag_set(Mesh *me,
}
}
int poly_find_loop_from_vert(const MPoly *poly, const int *poly_corner_verts, int vert)
{
for (int j = 0; j < poly->totloop; j++) {
if (poly_corner_verts[j] == vert) {
return j;
}
}
return -1;
}
int poly_get_adj_loops_from_vert(const MPoly *poly,
const int *corner_verts,
int vert,
int r_adj[2])
{
int corner = poly_find_loop_from_vert(poly, &corner_verts[poly->loopstart], vert);
if (corner != -1) {
/* vertex was found */
r_adj[0] = corner_verts[ME_POLY_LOOP_PREV(poly, corner)];
r_adj[1] = corner_verts[ME_POLY_LOOP_NEXT(poly, corner)];
}
return corner;
}
int BKE_mesh_edge_other_vert(const MEdge *edge, int v)
{
if (edge->v1 == v) {
return edge->v2;
}
if (edge->v2 == v) {
return edge->v1;
}
return -1;
}
void BKE_mesh_looptri_get_real_edges(const MEdge *edges,
const int *corner_verts,
const int *corner_edges,

View File

@ -237,11 +237,12 @@ class MeshFairingContext : public FairingContext {
float r_adj_next[3],
float r_adj_prev[3]) override
{
using namespace blender;
const int vert = corner_verts_[loop];
const MPoly &poly = polys[loop_to_poly_map_[loop]];
const int corner = poly_find_loop_from_vert(&poly, &corner_verts_[poly.loopstart], vert);
copy_v3_v3(r_adj_next, co_[corner_verts_[ME_POLY_LOOP_NEXT(&poly, corner)]]);
copy_v3_v3(r_adj_prev, co_[corner_verts_[ME_POLY_LOOP_PREV(&poly, corner)]]);
const int2 adjecent_verts = bke::mesh::poly_find_adjecent_verts(poly, corner_verts_, vert);
copy_v3_v3(r_adj_next, co_[adjecent_verts[0]]);
copy_v3_v3(r_adj_prev, co_[adjecent_verts[1]]);
}
int other_vertex_index_from_loop(const int loop, const uint v) override

View File

@ -1284,7 +1284,7 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
const MPoly &poly = polys[poly_index];
for (const int ml_curr_index : IndexRange(poly.loopstart, poly.totloop)) {
const int ml_prev_index = mesh_topology::poly_loop_prev(poly, ml_curr_index);
const int ml_prev_index = mesh::poly_corner_prev(poly, ml_curr_index);
#if 0
printf("Checking loop %d / edge %u / vert %u (sharp edge: %d, skiploop: %d)",

View File

@ -740,13 +740,13 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
nearest.index = -1;
for (i = 0; i < numedges_dst; i++) {
const MEdge *e_dst = &edges_dst[i];
const MEdge &e_dst = edges_dst[i];
float best_totdist = FLT_MAX;
int best_eidx_src = -1;
int j = 2;
while (j--) {
const uint vidx_dst = j ? e_dst->v1 : e_dst->v2;
const uint vidx_dst = j ? e_dst.v1 : e_dst.v2;
/* Compute closest verts only once! */
if (v_dst_to_src_map[vidx_dst].hit_dist == -1.0f) {
@ -772,7 +772,7 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
/* Now, check all source edges of closest sources vertices,
* and select the one giving the smallest total verts-to-verts distance. */
for (j = 2; j--;) {
const uint vidx_dst = j ? e_dst->v1 : e_dst->v2;
const uint vidx_dst = j ? e_dst.v1 : e_dst.v2;
const float first_dist = v_dst_to_src_map[vidx_dst].hit_dist;
const int vidx_src = v_dst_to_src_map[vidx_dst].index;
int *eidx_src, k;
@ -785,10 +785,11 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
k = vert_to_edge_src_map[vidx_src].count;
for (; k--; eidx_src++) {
const MEdge *edge_src = &edges_src[*eidx_src];
const float *other_co_src = vcos_src[BKE_mesh_edge_other_vert(edge_src, vidx_src)];
const MEdge &edge_src = edges_src[*eidx_src];
const float *other_co_src =
vcos_src[blender::bke::mesh::edge_other_vert(edge_src, vidx_src)];
const float *other_co_dst =
vert_positions_dst[BKE_mesh_edge_other_vert(e_dst, int(vidx_dst))];
vert_positions_dst[blender::bke::mesh::edge_other_vert(e_dst, int(vidx_dst))];
const float totdist = first_dist + len_v3v3(other_co_src, other_co_dst);
if (totdist < best_totdist) {
@ -801,8 +802,8 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
if (best_eidx_src >= 0) {
const float *co1_src = vcos_src[edges_src[best_eidx_src].v1];
const float *co2_src = vcos_src[edges_src[best_eidx_src].v2];
const float *co1_dst = vert_positions_dst[e_dst->v1];
const float *co2_dst = vert_positions_dst[e_dst->v2];
const float *co1_dst = vert_positions_dst[e_dst.v1];
const float *co2_dst = vert_positions_dst[e_dst.v2];
float co_src[3], co_dst[3];
/* TODO: would need an isect_seg_seg_v3(), actually! */

View File

@ -929,16 +929,57 @@ static bool mesh_validate_customdata(CustomData *data,
PRINT_MSG("%s: Checking %d CD layers...\n", __func__, data->totlayer);
/* Set dummy values so the layer-type is always initialized on first access. */
int layer_num = -1;
int layer_num_type = -1;
while (i < data->totlayer) {
CustomDataLayer *layer = &data->layers[i];
bool ok = true;
/* Count layers when the type changes. */
if (layer_num_type != layer->type) {
layer_num = CustomData_number_of_layers(data, layer->type);
layer_num_type = layer->type;
}
/* Validate active index, for a time this could be set to a negative value, see: #105860. */
int *active_index_array[] = {
&layer->active,
&layer->active_rnd,
&layer->active_clone,
&layer->active_mask,
};
for (int *active_index : Span(active_index_array, ARRAY_SIZE(active_index_array))) {
if (*active_index < 0) {
PRINT_ERR("\tCustomDataLayer type %d has a negative active index (%d)\n",
layer->type,
*active_index);
if (do_fixes) {
*active_index = 0;
has_fixes = true;
}
}
else {
if (*active_index >= layer_num) {
PRINT_ERR("\tCustomDataLayer type %d has an out of bounds active index (%d >= %d)\n",
layer->type,
*active_index,
layer_num);
if (do_fixes) {
BLI_assert(layer_num > 0);
*active_index = layer_num - 1;
has_fixes = true;
}
}
}
}
if (CustomData_layertype_is_singleton(layer->type)) {
const int layer_tot = CustomData_number_of_layers(data, layer->type);
if (layer_tot > 1) {
if (layer_num > 1) {
PRINT_ERR("\tCustomDataLayer type %d is a singleton, found %d in Mesh structure\n",
layer->type,
layer_tot);
layer_num);
ok = false;
}
}

View File

@ -4357,7 +4357,7 @@ void BKE_object_handle_update(Depsgraph *depsgraph, Scene *scene, Object *ob)
void BKE_object_sculpt_data_create(Object *ob)
{
BLI_assert((ob->sculpt == nullptr) && (ob->mode & OB_MODE_ALL_SCULPT));
ob->sculpt = MEM_cnew<SculptSession>(__func__);
ob->sculpt = MEM_new<SculptSession>(__func__);
ob->sculpt->mode_type = (eObjectMode)ob->mode;
}

View File

@ -1696,16 +1696,16 @@ static void sculpt_update_object(
/* These are assigned to the base mesh in Multires. This is needed because Face Sets operators
* and tools use the Face Sets data from the base mesh when Multires is active. */
ss->vert_positions = BKE_mesh_vert_positions_for_write(me);
ss->polys = me->polys().data();
ss->corner_verts = me->corner_verts().data();
ss->polys = me->polys();
ss->corner_verts = me->corner_verts();
}
else {
ss->totvert = me->totvert;
ss->totpoly = me->totpoly;
ss->totfaces = me->totpoly;
ss->vert_positions = BKE_mesh_vert_positions_for_write(me);
ss->polys = me->polys().data();
ss->corner_verts = me->corner_verts().data();
ss->polys = me->polys();
ss->corner_verts = me->corner_verts();
ss->multires.active = false;
ss->multires.modifier = nullptr;
ss->multires.level = 0;
@ -1999,12 +1999,11 @@ int BKE_sculpt_mask_layers_ensure(Depsgraph *depsgraph,
int level = max_ii(1, mmd->sculptlvl);
int gridsize = BKE_ccg_gridsize(level);
int gridarea = gridsize * gridsize;
int i, j;
gmask = static_cast<GridPaintMask *>(
CustomData_add_layer(&me->ldata, CD_GRID_PAINT_MASK, CD_SET_DEFAULT, me->totloop));
for (i = 0; i < me->totloop; i++) {
for (int i = 0; i < me->totloop; i++) {
GridPaintMask *gpm = &gmask[i];
gpm->level = level;
@ -2012,29 +2011,28 @@ int BKE_sculpt_mask_layers_ensure(Depsgraph *depsgraph,
MEM_callocN(sizeof(float) * gridarea, "GridPaintMask.data"));
}
/* if vertices already have mask, copy into multires data */
/* If vertices already have mask, copy into multires data. */
if (paint_mask) {
for (i = 0; i < me->totpoly; i++) {
for (const int i : polys.index_range()) {
const MPoly &poly = polys[i];
float avg = 0;
/* mask center */
for (j = 0; j < poly.totloop; j++) {
const int vert = corner_verts[poly.loopstart + j];
/* Mask center. */
float avg = 0.0f;
for (const int vert : corner_verts.slice(poly.loopstart, poly.totloop)) {
avg += paint_mask[vert];
}
avg /= float(poly.totloop);
/* fill in multires mask corner */
for (j = 0; j < poly.totloop; j++) {
GridPaintMask *gpm = &gmask[poly.loopstart + j];
const int vert = corner_verts[poly.loopstart + j];
const int prev = ME_POLY_LOOP_PREV(&poly, j);
const int next = ME_POLY_LOOP_NEXT(&poly, j);
/* Fill in multires mask corner. */
for (const int corner : blender::IndexRange(poly.loopstart, poly.totloop)) {
GridPaintMask *gpm = &gmask[corner];
const int vert = corner_verts[corner];
const int prev = corner_verts[blender::bke::mesh::poly_corner_prev(poly, vert)];
const int next = corner_verts[blender::bke::mesh::poly_corner_next(poly, vert)];
gpm->data[0] = avg;
gpm->data[1] = (paint_mask[vert] + paint_mask[corner_verts[next]]) * 0.5f;
gpm->data[2] = (paint_mask[vert] + paint_mask[corner_verts[prev]]) * 0.5f;
gpm->data[1] = (paint_mask[vert] + paint_mask[next]) * 0.5f;
gpm->data[2] = (paint_mask[vert] + paint_mask[prev]) * 0.5f;
gpm->data[3] = paint_mask[vert];
}
}
@ -2285,6 +2283,24 @@ PBVH *BKE_sculpt_object_pbvh_ensure(Depsgraph *depsgraph, Object *ob)
return pbvh;
}
PBVH *BKE_object_sculpt_pbvh_get(Object *object)
{
if (!object->sculpt) {
return nullptr;
}
return object->sculpt->pbvh;
}
bool BKE_object_sculpt_use_dyntopo(const Object *object)
{
return object->sculpt && object->sculpt->bm;
}
void BKE_object_sculpt_dyntopo_smooth_shading_set(Object *object, const bool value)
{
object->sculpt->bm_smooth_shading = value;
}
void BKE_sculpt_bvh_update_from_ccg(PBVH *pbvh, SubdivCCG *subdiv_ccg)
{
CCGKey key;

View File

@ -1986,7 +1986,7 @@ const int *BKE_subdiv_ccg_start_face_grid_index_get(const SubdivCCG *subdiv_ccg)
static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
const blender::Span<int> corner_verts,
const MPoly *polys,
int *r_v1,
int *r_v2)
@ -1996,13 +1996,13 @@ static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_cc
const MPoly &poly = polys[poly_index];
*r_v1 = corner_verts[coord->grid_index];
const int corner = poly_find_loop_from_vert(&poly, &corner_verts[poly.loopstart], *r_v1);
const int corner = blender::bke::mesh::poly_find_corner_from_vert(poly, corner_verts, *r_v1);
if (coord->x == grid_size_1) {
const int next = ME_POLY_LOOP_NEXT(&poly, corner);
const int next = blender::bke::mesh::poly_corner_next(poly, corner);
*r_v2 = corner_verts[next];
}
if (coord->y == grid_size_1) {
const int prev = ME_POLY_LOOP_PREV(&poly, corner);
const int prev = blender::bke::mesh::poly_corner_prev(poly, corner);
*r_v2 = corner_verts[prev];
}
}
@ -2010,6 +2010,7 @@ static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_cc
SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
const int corners_num,
const MPoly *polys,
int *r_v1,
int *r_v2)
@ -2027,7 +2028,8 @@ SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const Subdi
return SUBDIV_CCG_ADJACENT_VERTEX;
}
/* Grid corner adjacent to the middle of a coarse mesh edge. */
adjacet_vertices_index_from_adjacent_edge(subdiv_ccg, coord, corner_verts, polys, r_v1, r_v2);
adjacet_vertices_index_from_adjacent_edge(
subdiv_ccg, coord, {corner_verts, corners_num}, polys, r_v1, r_v2);
return SUBDIV_CCG_ADJACENT_EDGE;
}
@ -2035,7 +2037,7 @@ SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const Subdi
if (!is_inner_edge_grid_coordinate(subdiv_ccg, coord)) {
/* Grid boundary adjacent to a coarse mesh edge. */
adjacet_vertices_index_from_adjacent_edge(
subdiv_ccg, coord, corner_verts, polys, r_v1, r_v2);
subdiv_ccg, coord, {corner_verts, corners_num}, polys, r_v1, r_v2);
return SUBDIV_CCG_ADJACENT_EDGE;
}
}

View File

@ -19,12 +19,13 @@
# undef NOMINMAX
# endif
# endif
#else
# include <atomic>
# include <mutex>
# include "BLI_map.hh"
#endif
#include <atomic>
#include <mutex>
#include "BLI_map.hh"
#include "BLI_utility_mixins.hh"
namespace blender::threading {

View File

@ -64,6 +64,22 @@ template<typename T> class SharedCache {
BLI_assert(cache_->mutex.is_cached());
return cache_->data;
}
/**
* Return true if the cache currently does not exist or has been invalidated.
*/
bool is_dirty() const
{
return cache_->mutex.is_dirty();
}
/**
* Return true if the cache exists and is valid.
*/
bool is_cached() const
{
return cache_->mutex.is_cached();
}
};
} // namespace blender

View File

@ -5,10 +5,11 @@
* \brief Array storage to minimize duplication.
*
* This is done by splitting arrays into chunks and using copy-on-write (COW),
* to de-duplicate chunks,
* from the users perspective this is an implementation detail.
* to de-duplicate chunks, from the users perspective this is an implementation detail.
*
* Overview
* ========
*
* Data Structure
* --------------
*
@ -16,51 +17,52 @@
*
* \note The only 2 structures here which are referenced externally are the.
*
* - BArrayStore: The whole array store.
* - BArrayState: Represents a single state (array) of data.
* - #BArrayStore: The whole array store.
* - #BArrayState: Represents a single state (array) of data.
* These can be add using a reference state,
* while this could be considered the previous or parent state.
* no relationship is kept,
* so the caller is free to add any state from the same BArrayStore as a reference.
* so the caller is free to add any state from the same #BArrayStore as a reference.
*
* <pre>
* <+> BArrayStore: root data-structure,
* <+> #BArrayStore: root data-structure,
* | can store many 'states', which share memory.
* |
* | This can store many arrays, however they must share the same 'stride'.
* | Arrays of different types will need to use a new BArrayStore.
* | Arrays of different types will need to use a new #BArrayStore.
* |
* +- <+> states (Collection of BArrayState's):
* +- <+> states (Collection of #BArrayState's):
* | | Each represents an array added by the user of this API.
* | | and references a chunk_list (each state is a chunk_list user).
* | | Note that the list order has no significance.
* | |
* | +- <+> chunk_list (BChunkList):
* | +- <+> chunk_list (#BChunkList):
* | | The chunks that make up this state.
* | | Each state is a chunk_list user,
* | | avoids duplicating lists when there is no change between states.
* | |
* | +- chunk_refs (List of BChunkRef): Each chunk_ref links to a BChunk.
* | +- chunk_refs (List of #BChunkRef): Each chunk_ref links to a #BChunk.
* | Each reference is a chunk user,
* | avoids duplicating smaller chunks of memory found in multiple states.
* |
* +- info (BArrayInfo):
* +- info (#BArrayInfo):
* | Sizes and offsets for this array-store.
* | Also caches some variables for reuse.
* |
* +- <+> memory (BArrayMemory):
* | Memory pools for storing BArrayStore data.
* +- <+> memory (#BArrayMemory):
* | Memory pools for storing #BArrayStore data.
* |
* +- chunk_list (Pool of BChunkList):
* | All chunk_lists, (reference counted, used by BArrayState).
* +- chunk_list (Pool of #BChunkList):
* | All chunk_lists, (reference counted, used by #BArrayState).
* |
* +- chunk_ref (Pool of BChunkRef):
* | All chunk_refs (link between BChunkList & BChunk).
* +- chunk_ref (Pool of #BChunkRef):
* | All chunk_refs (link between #BChunkList & #BChunk).
* |
* +- chunks (Pool of BChunk):
* All chunks, (reference counted, used by BChunkList).
* +- chunks (Pool of #BChunk):
* All chunks, (reference counted, used by #BChunkList).
* These have their headers hashed for reuse so we can quickly check for duplicates.
* </pre>
*
* De-Duplication
* --------------
*
@ -71,7 +73,7 @@
* For identical arrays this is all that's needed.
*
* De-duplication is performed on any remaining chunks, by hashing the first few bytes of the chunk
* (see: BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
* (see: #BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
*
* \note This is cached for reuse since the referenced data never changes.
*
@ -93,9 +95,9 @@
#include "BLI_strict_flags.h"
#include "BLI_array_store.h" /* own include */
#include "BLI_array_store.h" /* Own include. */
/* only for BLI_array_store_is_valid */
/* Only for #BLI_array_store_is_valid. */
#include "BLI_ghash.h"
/* -------------------------------------------------------------------- */
@ -169,7 +171,7 @@
#endif
/**
* Calculate the key once and reuse it
* Calculate the key once and reuse it.
*/
#define USE_HASH_TABLE_KEY_CACHE
#ifdef USE_HASH_TABLE_KEY_CACHE
@ -177,6 +179,16 @@
# define HASH_TABLE_KEY_FALLBACK ((hash_key)-2)
#endif
/**
* Ensure duplicate entries aren't added to temporary hash table
* needed for arrays where many values match (an array of booleans all true/false for e.g.).
*
* Without this, a huge number of duplicates are added a single bucket, making hash lookups slow.
* While de-duplication adds some cost, it's only performed with other chunks in the same bucket
* so cases when all chunks are unique will quickly detect and exit the `memcmp` in most cases.
*/
#define USE_HASH_TABLE_DEDUPLICATE
/**
* How much larger the table is then the total number of chunks.
*/
@ -209,7 +221,7 @@
# define BCHUNK_SIZE_MAX_MUL 2
#endif /* USE_MERGE_CHUNKS */
/** Slow (keep disabled), but handy for debugging */
/** Slow (keep disabled), but handy for debugging. */
// #define USE_VALIDATE_LIST_SIZE
// #define USE_VALIDATE_LIST_DATA_PARTIAL
@ -228,9 +240,9 @@ typedef struct BArrayInfo {
size_t chunk_stride;
// uint chunk_count; /* UNUSED (other values are derived from this) */
/* pre-calculated */
/* Pre-calculated. */
size_t chunk_byte_size;
/* min/max limits (inclusive) */
/* Min/max limits (inclusive) */
size_t chunk_byte_size_min;
size_t chunk_byte_size_max;
/**
@ -245,19 +257,19 @@ typedef struct BArrayInfo {
} BArrayInfo;
typedef struct BArrayMemory {
BLI_mempool *chunk_list; /* BChunkList */
BLI_mempool *chunk_ref; /* BChunkRef */
BLI_mempool *chunk; /* BChunk */
BLI_mempool *chunk_list; /* #BChunkList. */
BLI_mempool *chunk_ref; /* #BChunkRef. */
BLI_mempool *chunk; /* #BChunk. */
} BArrayMemory;
/**
* Main storage for all states
* Main storage for all states.
*/
struct BArrayStore {
/* static */
/* Static. */
BArrayInfo info;
/* memory storage */
/** Memory storage. */
BArrayMemory memory;
/**
@ -277,14 +289,14 @@ struct BArrayStore {
* it makes it easier to trace invalid usage, so leave as-is for now.
*/
struct BArrayState {
/** linked list in #BArrayStore.states */
/** linked list in #BArrayStore.states. */
struct BArrayState *next, *prev;
/** Shared chunk list, this reference must hold a #BChunkList::users. */
struct BChunkList *chunk_list;
};
typedef struct BChunkList {
/** List of #BChunkRef's */
/** List of #BChunkRef's. */
ListBase chunk_refs;
/** Result of `BLI_listbase_count(chunks)`, store for reuse. */
uint chunk_refs_len;
@ -367,13 +379,23 @@ static void bchunk_decref(BArrayMemory *bs_mem, BChunk *chunk)
}
}
BLI_INLINE bool bchunk_data_compare_unchecked(const BChunk *chunk,
const uchar *data_base,
const size_t data_base_len,
const size_t offset)
{
BLI_assert(offset + (size_t)chunk->data_len <= data_base_len);
UNUSED_VARS_NDEBUG(data_base_len);
return (memcmp(&data_base[offset], chunk->data, chunk->data_len) == 0);
}
static bool bchunk_data_compare(const BChunk *chunk,
const uchar *data_base,
const size_t data_base_len,
const size_t offset)
{
if (offset + (size_t)chunk->data_len <= data_base_len) {
return (memcmp(&data_base[offset], chunk->data, chunk->data_len) == 0);
return bchunk_data_compare_unchecked(chunk, data_base, data_base_len, offset);
}
return false;
}
@ -446,15 +468,15 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
{
BChunkRef *cref = chunk_list->chunk_refs.last;
if (cref && cref->prev) {
/* both are decref'd after use (end of this block) */
/* Both are decref'd after use (end of this block) */
BChunk *chunk_curr = cref->link;
BChunk *chunk_prev = cref->prev->link;
if (MIN2(chunk_prev->data_len, chunk_curr->data_len) < info->chunk_byte_size_min) {
const size_t data_merge_len = chunk_prev->data_len + chunk_curr->data_len;
/* we could pass, but no need */
/* We could pass, but no need. */
if (data_merge_len <= info->chunk_byte_size_max) {
/* we have enough space to merge */
/* We have enough space to merge. */
/* Remove last from the linked-list. */
BLI_assert(chunk_list->chunk_refs.last != chunk_list->chunk_refs.first);
@ -478,10 +500,10 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
*
* if we do, the code below works (test by setting 'BCHUNK_SIZE_MAX_MUL = 1.2') */
/* keep chunk on the left hand side a regular size */
/* Keep chunk on the left hand side a regular size. */
const size_t split = info->chunk_byte_size;
/* merge and split */
/* Merge and split. */
const size_t data_prev_len = split;
const size_t data_curr_len = data_merge_len - split;
uchar *data_prev = MEM_mallocN(data_prev_len, __func__);
@ -490,10 +512,10 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
if (data_prev_len <= chunk_prev->data_len) {
const size_t data_curr_shrink_len = chunk_prev->data_len - data_prev_len;
/* setup 'data_prev' */
/* Setup 'data_prev'. */
memcpy(data_prev, chunk_prev->data, data_prev_len);
/* setup 'data_curr' */
/* Setup 'data_curr'. */
memcpy(data_curr, &chunk_prev->data[data_prev_len], data_curr_shrink_len);
memcpy(&data_curr[data_curr_shrink_len], chunk_curr->data, chunk_curr->data_len);
}
@ -503,11 +525,11 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
const size_t data_prev_grow_len = data_prev_len - chunk_prev->data_len;
/* setup 'data_prev' */
/* Setup 'data_prev'. */
memcpy(data_prev, chunk_prev->data, chunk_prev->data_len);
memcpy(&data_prev[chunk_prev->data_len], chunk_curr->data, data_prev_grow_len);
/* setup 'data_curr' */
/* Setup 'data_curr'. */
memcpy(data_curr, &chunk_curr->data[data_prev_grow_len], data_curr_len);
}
@ -518,7 +540,7 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
cref->link->users += 1;
}
/* free zero users */
/* Free zero users. */
bchunk_decref(bs_mem, chunk_curr);
bchunk_decref(bs_mem, chunk_prev);
}
@ -543,8 +565,7 @@ static void bchunk_list_calc_trim_len(const BArrayInfo *info,
size_t data_trim_len = data_len;
#ifdef USE_MERGE_CHUNKS
/* avoid creating too-small chunks
* more efficient than merging after */
/* Avoid creating too-small chunks more efficient than merging after. */
if (data_len > info->chunk_byte_size) {
data_last_chunk_len = (data_trim_len % info->chunk_byte_size);
data_trim_len = data_trim_len - data_last_chunk_len;
@ -606,7 +627,7 @@ static void bchunk_list_append_data(const BArrayInfo *info,
if (MIN2(chunk_prev->data_len, data_len) < info->chunk_byte_size_min) {
const size_t data_merge_len = chunk_prev->data_len + data_len;
/* realloc for single user */
/* Re-allocate for single user. */
if (cref->link->users == 1) {
uchar *data_merge = MEM_reallocN((void *)cref->link->data, data_merge_len);
memcpy(&data_merge[chunk_prev->data_len], data, data_len);
@ -631,7 +652,7 @@ static void bchunk_list_append_data(const BArrayInfo *info,
BChunk *chunk = bchunk_new_copydata(bs_mem, data, data_len);
bchunk_list_append_only(bs_mem, chunk_list, chunk);
/* don't run this, instead preemptively avoid creating a chunk only to merge it (above). */
/* Don't run this, instead preemptively avoid creating a chunk only to merge it (above). */
#if 0
# ifdef USE_MERGE_CHUNKS
bchunk_list_ensure_min_size_last(info, bs_mem, chunk_list);
@ -678,8 +699,7 @@ static void bchunk_list_append_data_n(const BArrayInfo *info,
}
}
else {
/* if we didn't write any chunks previously,
* we may need to merge with the last. */
/* If we didn't write any chunks previously, we may need to merge with the last. */
if (data_last_chunk_len) {
bchunk_list_append_data(info, bs_mem, chunk_list, data, data_last_chunk_len);
// i_prev = data_len; /* UNUSED */
@ -740,7 +760,7 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
}
#endif
/* works but better avoid redundant re-alloc */
/* Works but better avoid redundant re-allocation. */
#if 0
# ifdef USE_MERGE_CHUNKS
bchunk_list_ensure_min_size_last(info, bs_mem, chunk_list);
@ -754,7 +774,7 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
/** \} */
/*
* Internal Table Lookup Functions
* Internal Table Lookup Functions.
*/
/* -------------------------------------------------------------------- */
@ -770,7 +790,7 @@ BLI_INLINE hash_key hash_data_single(const uchar p)
return ((HASH_INIT << 5) + HASH_INIT) + (hash_key)(*((signed char *)&p));
}
/* hash bytes, from BLI_ghashutil_strhash_n */
/* Hash bytes, from #BLI_ghashutil_strhash_n. */
static hash_key hash_data(const uchar *key, size_t n)
{
const signed char *p;
@ -797,14 +817,14 @@ static void hash_array_from_data(const BArrayInfo *info,
}
}
else {
/* fast-path for bytes */
/* Fast-path for bytes. */
for (size_t i = 0; i < data_slice_len; i++) {
hash_array[i] = hash_data_single(data_slice[i]);
}
}
}
/*
/**
* Similar to hash_array_from_data,
* but able to step into the next chunk if we run-out of data.
*/
@ -829,7 +849,7 @@ static void hash_array_from_cref(const BArrayInfo *info,
} while ((i < hash_array_len) && (cref != NULL));
/* If this isn't equal, the caller didn't properly check
* that there was enough data left in all chunks */
* that there was enough data left in all chunks. */
BLI_assert(i == hash_array_len);
}
@ -866,11 +886,11 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
{
BLI_assert(iter_steps <= hash_array_len);
if (UNLIKELY(!(iter_steps <= hash_array_len))) {
/* while this shouldn't happen, avoid crashing */
/* While this shouldn't happen, avoid crashing. */
iter_steps = hash_array_len;
}
/* We can increase this value each step to avoid accumulating quite as much
* while getting the same results as hash_accum */
* while getting the same results as hash_accum. */
size_t iter_steps_sub = iter_steps;
while (iter_steps != 0) {
@ -886,11 +906,11 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
static hash_key key_from_chunk_ref(const BArrayInfo *info,
const BChunkRef *cref,
/* avoid reallocating each time */
/* Avoid reallocating each time. */
hash_key *hash_store,
const size_t hash_store_len)
{
/* in C, will fill in a reusable array */
/* In C, will fill in a reusable array. */
BChunk *chunk = cref->link;
BLI_assert((info->accum_read_ahead_bytes * info->chunk_stride) != 0);
@ -901,14 +921,14 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info,
key = chunk->key;
if (key != HASH_TABLE_KEY_UNSET) {
/* Using key cache!
* avoids calculating every time */
* avoids calculating every time. */
}
else {
hash_array_from_cref(info, cref, info->accum_read_ahead_bytes, hash_store);
hash_accum_single(hash_store, hash_store_len, info->accum_steps);
key = hash_store[0];
/* cache the key */
/* Cache the key. */
if (UNLIKELY(key == HASH_TABLE_KEY_UNSET)) {
key = HASH_TABLE_KEY_FALLBACK;
}
@ -921,7 +941,7 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info,
# endif
return key;
}
/* corner case - we're too small, calculate the key each time. */
/* Corner case - we're too small, calculate the key each time. */
hash_array_from_cref(info, cref, info->accum_read_ahead_bytes, hash_store);
hash_accum_single(hash_store, hash_store_len, info->accum_steps);
@ -944,30 +964,33 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
const size_t offset,
const hash_key *table_hash_array)
{
size_t size_left = data_len - offset;
hash_key key = table_hash_array[((offset - i_table_start) / info->chunk_stride)];
size_t key_index = (size_t)(key % (hash_key)table_len);
for (const BTableRef *tref = table[key_index]; tref; tref = tref->next) {
const BChunkRef *cref = tref->cref;
const hash_key key = table_hash_array[((offset - i_table_start) / info->chunk_stride)];
const uint key_index = (uint)(key % (hash_key)table_len);
const BTableRef *tref = table[key_index];
if (tref != NULL) {
const size_t size_left = data_len - offset;
do {
const BChunkRef *cref = tref->cref;
# ifdef USE_HASH_TABLE_KEY_CACHE
if (cref->link->key == key)
if (cref->link->key == key)
# endif
{
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare(chunk_test, data, data_len, offset)) {
/* we could remove the chunk from the table, to avoid multiple hits */
return cref;
{
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare_unchecked(chunk_test, data, data_len, offset)) {
/* We could remove the chunk from the table, to avoid multiple hits. */
return cref;
}
}
}
}
} while ((tref = tref->next));
}
return NULL;
}
#else /* USE_HASH_TABLE_ACCUMULATE */
/* NON USE_HASH_TABLE_ACCUMULATE code (simply hash each chunk) */
/* NON USE_HASH_TABLE_ACCUMULATE code (simply hash each chunk). */
static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref)
{
@ -979,10 +1002,10 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref
key = chunk->key;
if (key != HASH_TABLE_KEY_UNSET) {
/* Using key cache!
* avoids calculating every time */
* avoids calculating every time. */
}
else {
/* cache the key */
/* Cache the key. */
key = hash_data(chunk->data, data_hash_len);
if (key == HASH_TABLE_KEY_UNSET) {
key = HASH_TABLE_KEY_FALLBACK;
@ -1007,9 +1030,9 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
{
const size_t data_hash_len = BCHUNK_HASH_LEN * info->chunk_stride; /* TODO: cache. */
size_t size_left = data_len - offset;
hash_key key = hash_data(&data[offset], MIN2(data_hash_len, size_left));
size_t key_index = (size_t)(key % (hash_key)table_len);
const size_t size_left = data_len - offset;
const hash_key key = hash_data(&data[offset], MIN2(data_hash_len, size_left));
const uint key_index = (uint)(key % (hash_key)table_len);
for (BTableRef *tref = table[key_index]; tref; tref = tref->next) {
const BChunkRef *cref = tref->cref;
# ifdef USE_HASH_TABLE_KEY_CACHE
@ -1018,8 +1041,8 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
{
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare(chunk_test, data, data_len, offset)) {
/* we could remove the chunk from the table, to avoid multiple hits */
if (bchunk_data_compare_unchecked(chunk_test, data, data_len, offset)) {
/* We could remove the chunk from the table, to avoid multiple hits. */
return cref;
}
}
@ -1095,7 +1118,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#endif /* USE_FASTPATH_CHUNKS_FIRST */
/* Copy until we have a mismatch */
/* Copy until we have a mismatch. */
BChunkList *chunk_list = bchunk_list_new(bs_mem, data_len_original);
if (cref_match_first != NULL) {
size_t chunk_size_step = 0;
@ -1111,7 +1134,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
}
cref = cref->next;
}
/* happens when bytes are removed from the end of the array */
/* Happens when bytes are removed from the end of the array. */
if (chunk_size_step == data_len_original) {
return chunk_list;
}
@ -1125,17 +1148,16 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
/* ------------------------------------------------------------------------
* Fast-Path for end chunks
*
* Check for trailing chunks
* Check for trailing chunks.
*/
/* In this case use 'chunk_list_reference_last' to define the last index
* index_match_last = -1 */
* `index_match_last = -1`. */
/* warning, from now on don't use len(data)
* since we want to ignore chunks already matched */
/* Warning, from now on don't use len(data) since we want to ignore chunks already matched. */
size_t data_len = data_len_original;
#define data_len_original invalid_usage
#ifdef data_len_original /* quiet warning */
#ifdef data_len_original /* Quiet warning. */
#endif
const BChunkRef *chunk_list_reference_last = NULL;
@ -1175,7 +1197,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#ifdef USE_ALIGN_CHUNKS_TEST
if (chunk_list->total_expanded_size == chunk_list_reference->total_expanded_size) {
/* if we're already a quarter aligned */
/* If we're already a quarter aligned. */
if (data_len - i_prev <= chunk_list->total_expanded_size / 4) {
use_aligned = true;
}
@ -1189,7 +1211,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
* ----------------------- */
if (use_aligned) {
/* Copy matching chunks, creates using the same 'layout' as the reference */
/* Copy matching chunks, creates using the same 'layout' as the reference. */
const BChunkRef *cref = cref_match_first ? cref_match_first->next :
chunk_list_reference->chunk_refs.first;
while (i_prev != data_len) {
@ -1218,12 +1240,12 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
(chunk_list_reference->chunk_refs.first != NULL)) {
/* --------------------------------------------------------------------
* Non-Aligned Chunk De-Duplication */
* Non-Aligned Chunk De-Duplication. */
/* only create a table if we have at least one chunk to search
/* Only create a table if we have at least one chunk to search
* otherwise just make a new one.
*
* Support re-arranged chunks */
* Support re-arranged chunks. */
#ifdef USE_HASH_TABLE_ACCUMULATE
size_t i_table_start = i_prev;
@ -1234,7 +1256,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
hash_accum(table_hash_array, table_hash_array_len, info->accum_steps);
#else
/* dummy vars */
/* Dummy vars. */
uint i_table_start = 0;
hash_key *table_hash_array = NULL;
#endif
@ -1249,8 +1271,8 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
const size_t table_len = chunk_list_reference_remaining_len * BCHUNK_HASH_TABLE_MUL;
BTableRef **table = MEM_callocN(table_len * sizeof(*table), __func__);
/* table_make - inline
* include one matching chunk, to allow for repeating values */
/* Table_make - inline
* include one matching chunk, to allow for repeating values. */
{
#ifdef USE_HASH_TABLE_ACCUMULATE
const size_t hash_store_len = info->accum_read_ahead_len;
@ -1292,13 +1314,41 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
hash_store_len
#endif
);
size_t key_index = (size_t)(key % (hash_key)table_len);
const uint key_index = (uint)(key % (hash_key)table_len);
BTableRef *tref_prev = table[key_index];
BLI_assert(table_ref_stack_n < chunk_list_reference_remaining_len);
BTableRef *tref = &table_ref_stack[table_ref_stack_n++];
tref->cref = cref;
tref->next = tref_prev;
table[key_index] = tref;
#ifdef USE_HASH_TABLE_DEDUPLICATE
bool is_duplicate = false;
if (tref_prev) {
const BChunk *chunk_a = cref->link;
const BTableRef *tref = tref_prev;
do {
const BChunk *chunk_b = tref->cref->link;
# ifdef USE_HASH_TABLE_KEY_CACHE
if (key == chunk_b->key)
# endif
{
/* Not an error, it just isn't expected, in the case chunks are shared
* matching chunks should also be skipped to avoid a redundant `memcmp` call. */
BLI_assert(chunk_a != chunk_b);
if (chunk_a->data_len == chunk_b->data_len) {
if (memcmp(chunk_a->data, chunk_b->data, chunk_a->data_len) == 0) {
is_duplicate = true;
break;
}
}
}
} while ((tref = tref->next));
}
if (!is_duplicate)
#endif /* USE_HASH_TABLE_DEDUPLICATE */
{
BTableRef *tref = &table_ref_stack[table_ref_stack_n++];
tref->cref = cref;
tref->next = tref_prev;
table[key_index] = tref;
}
chunk_list_reference_bytes_remaining -= cref->link->data_len;
cref = cref->next;
@ -1310,7 +1360,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
MEM_freeN(hash_store);
#endif
}
/* done making the table */
/* Done making the table. */
BLI_assert(i_prev <= data_len);
for (size_t i = i_prev; i < data_len;) {
@ -1325,7 +1375,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
i_prev = i;
}
/* now add the reference chunk */
/* Now add the reference chunk. */
{
BChunk *chunk_found = cref_found->link;
i += chunk_found->data_len;
@ -1336,7 +1386,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
ASSERT_CHUNKLIST_SIZE(chunk_list, i_prev);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
/* its likely that the next chunk in the list will be a match, so check it! */
/* Its likely that the next chunk in the list will be a match, so check it! */
while (!ELEM(cref_found->next, NULL, chunk_list_reference_last)) {
cref_found = cref_found->next;
BChunk *chunk_found = cref_found->link;
@ -1346,7 +1396,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
* repeating memory where it would be useful to re-use chunks. */
i += chunk_found->data_len;
bchunk_list_append(info, bs_mem, chunk_list, chunk_found);
/* chunk_found may be freed! */
/* Chunk_found may be freed! */
i_prev = i;
BLI_assert(i_prev <= data_len);
ASSERT_CHUNKLIST_SIZE(chunk_list, i_prev);
@ -1389,14 +1439,13 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#ifdef USE_FASTPATH_CHUNKS_LAST
if (chunk_list_reference_last != NULL) {
/* write chunk_list_reference_last since it hasn't been written yet */
/* Write chunk_list_reference_last since it hasn't been written yet. */
const BChunkRef *cref = chunk_list_reference_last;
while (cref != NULL) {
BChunk *chunk = cref->link;
// BLI_assert(bchunk_data_compare(chunk, data, data_len, i_prev));
i_prev += chunk->data_len;
/* use simple since we assume the references chunks
* have already been sized correctly. */
/* Use simple since we assume the references chunks have already been sized correctly. */
bchunk_list_append_only(bs_mem, chunk_list, chunk);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
cref = cref->next;
@ -1408,7 +1457,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
BLI_assert(i_prev == data_len_original);
/* check we're the correct size and that we didn't accidentally modify the reference */
/* Check we're the correct size and that we didn't accidentally modify the reference. */
ASSERT_CHUNKLIST_SIZE(chunk_list, data_len_original);
ASSERT_CHUNKLIST_SIZE(chunk_list_reference, chunk_list_reference->total_expanded_size);
@ -1416,7 +1465,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
return chunk_list;
}
/* end private API */
/* End private API. */
/** \} */
@ -1470,7 +1519,7 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
bs->memory.chunk_list = BLI_mempool_create(sizeof(BChunkList), 0, 512, BLI_MEMPOOL_NOP);
bs->memory.chunk_ref = BLI_mempool_create(sizeof(BChunkRef), 0, 512, BLI_MEMPOOL_NOP);
/* allow iteration to simplify freeing, otherwise its not needed
/* Allow iteration to simplify freeing, otherwise its not needed
* (we could loop over all states as an alternative). */
bs->memory.chunk = BLI_mempool_create(sizeof(BChunk), 0, 512, BLI_MEMPOOL_ALLOW_ITER);
@ -1481,7 +1530,7 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
static void array_store_free_data(BArrayStore *bs)
{
/* free chunk data */
/* Free chunk data. */
{
BLI_mempool_iter iter;
BChunk *chunk;
@ -1492,7 +1541,7 @@ static void array_store_free_data(BArrayStore *bs)
}
}
/* free states */
/* Free states. */
for (BArrayState *state = bs->states.first, *state_next; state; state = state_next) {
state_next = state->next;
MEM_freeN(state);
@ -1560,7 +1609,7 @@ BArrayState *BLI_array_store_state_add(BArrayStore *bs,
const size_t data_len,
const BArrayState *state_reference)
{
/* ensure we're aligned to the stride */
/* Ensure we're aligned to the stride. */
BLI_assert((data_len % bs->info.chunk_stride) == 0);
#ifdef USE_PARANOID_CHECKS
@ -1575,7 +1624,7 @@ BArrayState *BLI_array_store_state_add(BArrayStore *bs,
&bs->memory,
(const uchar *)data,
data_len,
/* re-use reference chunks */
/* Re-use reference chunks. */
state_reference->chunk_list);
}
else {
@ -1652,7 +1701,7 @@ void *BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_le
/** \name Debugging API (for testing).
* \{ */
/* only for test validation */
/* Only for test validation. */
static size_t bchunk_list_size(const BChunkList *chunk_list)
{
size_t total_expanded_size = 0;
@ -1680,7 +1729,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
}
#ifdef USE_MERGE_CHUNKS
/* ensure we merge all chunks that could be merged */
/* Ensure we merge all chunks that could be merged. */
if (chunk_list->total_expanded_size > bs->info.chunk_byte_size_min) {
LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
if (cref->link->data_len < bs->info.chunk_byte_size_min) {
@ -1719,7 +1768,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
} \
((void)0)
/* count chunk_list's */
/* Count chunk_list's. */
GHash *chunk_list_map = BLI_ghash_ptr_new(__func__);
GHash *chunk_map = BLI_ghash_ptr_new(__func__);
@ -1740,7 +1789,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
goto user_finally;
}
/* count chunk's */
/* Count chunk's. */
GHASH_ITER (gh_iter, chunk_list_map) {
const struct BChunkList *chunk_list = BLI_ghashIterator_getKey(&gh_iter);
LISTBASE_FOREACH (const BChunkRef *, cref, &chunk_list->chunk_refs) {

View File

@ -222,10 +222,10 @@ static void basic_cache_populate(void *vedata, Object *ob)
}
}
if (G.debug_value == 889 && ob->sculpt && ob->sculpt->pbvh) {
if (G.debug_value == 889 && ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
int debug_node_nr = 0;
DRW_debug_modelmat(ob->object_to_world);
BKE_pbvh_draw_debug_cb(ob->sculpt->pbvh, DRW_sculpt_debug_cb, &debug_node_nr);
BKE_pbvh_draw_debug_cb(BKE_object_sculpt_pbvh_get(ob), DRW_sculpt_debug_cb, &debug_node_nr);
}
}
}

View File

@ -814,8 +814,8 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
bool use_sculpt_pbvh = BKE_sculptsession_use_pbvh_draw(ob, draw_ctx->rv3d) &&
!DRW_state_is_image_render();
if (ob->sculpt && ob->sculpt->pbvh) {
BKE_pbvh_is_drawing_set(ob->sculpt->pbvh, use_sculpt_pbvh);
if (ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
BKE_pbvh_is_drawing_set(BKE_object_sculpt_pbvh_get(ob), use_sculpt_pbvh);
}
/* First get materials for this mesh. */
@ -887,10 +887,11 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
}
}
if (G.debug_value == 889 && ob->sculpt && ob->sculpt->pbvh) {
if (G.debug_value == 889 && ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
int debug_node_nr = 0;
DRW_debug_modelmat(ob->object_to_world);
BKE_pbvh_draw_debug_cb(ob->sculpt->pbvh, DRW_sculpt_debug_cb, &debug_node_nr);
BKE_pbvh_draw_debug_cb(
BKE_object_sculpt_pbvh_get(ob), DRW_sculpt_debug_cb, &debug_node_nr);
}
}

View File

@ -161,8 +161,6 @@ class DeferredLayer {
class DeferredPipeline {
private:
Instance &inst_;
/* Gbuffer filling passes. We could have an arbitrary number of them but for now we just have
* a hardcoded number of them. */
DeferredLayer opaque_layer_;
@ -171,7 +169,7 @@ class DeferredPipeline {
public:
DeferredPipeline(Instance &inst)
: inst_(inst), opaque_layer_(inst), refraction_layer_(inst), volumetric_layer_(inst){};
: opaque_layer_(inst), refraction_layer_(inst), volumetric_layer_(inst){};
void begin_sync();
void end_sync();

View File

@ -1,3 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "eevee_defines.hh"
#include "gpu_shader_create_info.hh"

View File

@ -277,8 +277,8 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
* of vertex color arrays from being sent to the GPU (e.g.
* when switching from eevee to workbench).
*/
if (ob->sculpt && ob->sculpt->pbvh) {
BKE_pbvh_is_drawing_set(ob->sculpt->pbvh, is_sculpt_pbvh);
if (ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
BKE_pbvh_is_drawing_set(BKE_object_sculpt_pbvh_get(ob), is_sculpt_pbvh);
}
bool has_color = false;
@ -334,7 +334,7 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
}
if (is_sculpt_pbvh && color_type == V3D_SHADING_TEXTURE_COLOR &&
BKE_pbvh_type(ob->sculpt->pbvh) != PBVH_FACES) {
BKE_pbvh_type(BKE_object_sculpt_pbvh_get(ob)) != PBVH_FACES) {
/* Force use of material color for sculpt. */
color_type = V3D_SHADING_MATERIAL_COLOR;
}

View File

@ -12,34 +12,44 @@
* - Custom context menus
* - Notifier listening
* - Drag controllers (dragging view items)
* - Drop controllers (dropping onto/into view items)
* - Drop targets (dropping onto/into view items)
*/
#pragma once
#include <array>
#include <memory>
#include <optional>
#include "DNA_defs.h"
#include "DNA_vec_types.h"
#include "BLI_span.hh"
#include "BLI_string_ref.hh"
#include "UI_interface.hh"
struct bContext;
struct uiBlock;
struct uiLayout;
struct uiViewItemHandle;
struct ViewLink;
struct wmDrag;
struct wmNotifier;
namespace blender::ui {
class AbstractViewItem;
class AbstractViewItemDropController;
class AbstractViewItemDropTarget;
class AbstractViewItemDragController;
/** The view drop target can share logic with the view item drop target for now, so just an alias.
*/
using AbstractViewDropTarget = AbstractViewItemDropTarget;
class AbstractView {
friend class AbstractViewItem;
friend struct ::ViewLink;
bool is_reconstructed_ = false;
/**
@ -51,9 +61,21 @@ class AbstractView {
*/
std::unique_ptr<std::array<char, MAX_NAME>> rename_buffer_;
/* See #get_bounds(). */
std::optional<rcti> bounds_;
public:
virtual ~AbstractView() = default;
/**
* If a view wants to support dropping data into it, it has to return a drop target here.
* That is an object implementing #AbstractViewDropTarget.
*
* \note This drop target may be requested for each event. The view doesn't keep the drop target
* around currently. So it cannot contain persistent state.
*/
virtual std::unique_ptr<AbstractViewDropTarget> create_drop_target() const;
/** Listen to a notifier, returning true if a redraw is needed. */
virtual bool listen(const wmNotifier &) const;
@ -70,6 +92,11 @@ class AbstractView {
void end_renaming();
Span<char> get_rename_buffer() const;
MutableSpan<char> get_rename_buffer();
/**
* Get the rectangle containing all the view items that are in the layout, in button space.
* Updated as part of #UI_block_end(), before that it's unset.
*/
std::optional<rcti> get_bounds() const;
protected:
AbstractView() = default;
@ -133,13 +160,13 @@ class AbstractViewItem {
*/
virtual std::unique_ptr<AbstractViewItemDragController> create_drag_controller() const;
/**
* If an item wants to support dropping data into it, it has to return a drop controller here.
* That is an object implementing #AbstractViewItemDropController.
* If an item wants to support dropping data into it, it has to return a drop target here.
* That is an object implementing #AbstractViewItemDropTarget.
*
* \note This drop controller may be requested for each event. The view doesn't keep a drop
* controller around currently. So it can not contain persistent state.
* \note This drop target may be requested for each event. The view doesn't keep a drop target
* around currently. So it can not contain persistent state.
*/
virtual std::unique_ptr<AbstractViewItemDropController> create_drop_controller() const;
virtual std::unique_ptr<AbstractViewItemDropTarget> create_drop_target() const;
/** Get the view this item is registered for using #AbstractView::register_item(). */
AbstractView &get_view() const;
@ -200,7 +227,7 @@ template<typename ToType> ToType *AbstractViewItem::from_item_handle(uiViewItemH
* \{ */
/**
* Class to enable dragging a view item. An item can return a drop controller for itself by
* Class to enable dragging a view item. An item can return a drag controller for itself by
* implementing #AbstractViewItem::create_drag_controller().
*/
class AbstractViewItemDragController {
@ -222,38 +249,15 @@ class AbstractViewItemDragController {
/**
* Class to define the behavior when dropping something onto/into a view item, plus the behavior
* when dragging over this item. An item can return a drop controller for itself via a custom
* implementation of #AbstractViewItem::create_drop_controller().
* when dragging over this item. An item can return a drop target for itself via a custom
* implementation of #AbstractViewItem::create_drop_target().
*/
class AbstractViewItemDropController {
class AbstractViewItemDropTarget : public DropTargetInterface {
protected:
AbstractView &view_;
public:
AbstractViewItemDropController(AbstractView &view);
virtual ~AbstractViewItemDropController() = default;
/**
* Check if the data dragged with \a drag can be dropped on the item this controller is for.
* \param r_disabled_hint: Return a static string to display to the user, explaining why dropping
* isn't possible on this item. Shouldn't be done too aggressively, e.g.
* don't set this if the drag-type can't be dropped here; only if it can
* but there's another reason it can't be dropped.
* Can assume this is a non-null pointer.
*/
virtual bool can_drop(const wmDrag &drag, const char **r_disabled_hint) const = 0;
/**
* Custom text to display when dragging over a view item. Should explain what happens when
* dropping the data onto this item. Will only be used if #AbstractViewItem::can_drop()
* returns true, so the implementing override doesn't have to check that again.
* The returned value must be a translated string.
*/
virtual std::string drop_tooltip(const wmDrag &drag) const = 0;
/**
* Execute the logic to apply a drop of the data dragged with \a drag onto/into the item this
* controller is for.
*/
virtual bool on_drop(struct bContext *C, const wmDrag &drag) = 0;
AbstractViewItemDropTarget(AbstractView &view);
/** Request the view the item is registered for as type #ViewType. Throws a `std::bad_cast`
* exception if the view is not of the requested type. */
@ -267,7 +271,7 @@ template<class ViewType> ViewType &AbstractViewItemDragController::get_view() co
return dynamic_cast<ViewType &>(view_);
}
template<class ViewType> ViewType &AbstractViewItemDropController::get_view() const
template<class ViewType> ViewType &AbstractViewItemDropTarget::get_view() const
{
static_assert(std::is_base_of<AbstractView, ViewType>::value,
"Type must derive from and implement the ui::AbstractView interface");

View File

@ -155,8 +155,6 @@ class AbstractGridView : public AbstractView {
* \{ */
class GridViewBuilder {
uiBlock &block_;
public:
GridViewBuilder(uiBlock &block);

View File

@ -3277,18 +3277,12 @@ void UI_view_item_context_menu_build(struct bContext *C,
* \return True if dragging started successfully, otherwise false.
*/
bool UI_view_item_drag_start(struct bContext *C, const uiViewItemHandle *item_);
bool UI_view_item_can_drop(const uiViewItemHandle *item_,
const struct wmDrag *drag,
const char **r_disabled_hint);
char *UI_view_item_drop_tooltip(const uiViewItemHandle *item, const struct wmDrag *drag);
/**
* Let a view item handle a drop event.
* \return True if the drop was handled by the view item.
*/
bool UI_view_item_drop_handle(struct bContext *C,
const uiViewItemHandle *item_,
const struct ListBase *drags);
/**
* \param xy: Coordinate to find a view item at, in window space.
* \param pad: Extra padding added to the bounding box of the view.
*/
uiViewHandle *UI_region_view_find_at(const struct ARegion *region, const int xy[2], int pad);
/**
* \param xy: Coordinate to find a view item at, in window space.
*/

View File

@ -19,6 +19,8 @@ namespace blender::nodes::geo_eval_log {
struct GeometryAttributeInfo;
}
struct ARegion;
struct bContext;
struct PointerRNA;
struct StructRNA;
struct uiBlock;
@ -26,6 +28,9 @@ struct uiBut;
struct uiLayout;
struct uiList;
struct uiSearchItems;
struct uiViewHandle;
struct uiViewItemHandle;
struct wmDrag;
namespace blender::ui {
@ -57,6 +62,67 @@ void attribute_search_add_items(StringRefNull str,
uiSearchItems *items,
bool is_first);
/**
* This provides a common interface for UI elements that want to support dragging & dropping
* entities into/onto them. With it, the element can determine if the dragged entity can be dropped
* onto itself, provide feedback while dragging and run custom code for the dropping.
*
* Note that this is just an interface. A #wmDropBox is needed to request instances of it from a UI
* element and call its functions. For example the drop box using "UI_OT_view_drop" implements
* dropping for views and view items via this interface. To support other kinds of UI elements,
* similar drop boxes would be necessary.
*/
class DropTargetInterface {
public:
DropTargetInterface() = default;
virtual ~DropTargetInterface() = default;
/**
* Check if the data dragged with \a drag can be dropped on the element this drop target is for.
* \param r_disabled_hint: Return a static string to display to the user, explaining why dropping
* isn't possible on this UI element. Shouldn't be done too aggressively,
* e.g. don't set this if the drag-type can't be dropped here; only if it
* can but there's another reason it can't be dropped. Can assume this is
* a non-null pointer.
*/
virtual bool can_drop(const wmDrag &drag, const char **r_disabled_hint) const = 0;
/**
* Custom text to display when dragging over the element using this drop target. Should
* explain what happens when dropping the data onto this UI element. Will only be used if
* #DropTargetInterface::can_drop() returns true, so the implementing override doesn't have
* to check that again. The returned value must be a translated string.
*/
virtual std::string drop_tooltip(const wmDrag &drag) const = 0;
/**
* Execute the logic to apply a drop of the data dragged with \a drag onto/into the UI element
* this drop target is for.
*/
virtual bool on_drop(bContext *C, const wmDrag &drag) const = 0;
};
/**
* Let a drop target handle a drop event.
* \return True if the dropping was successful.
*/
bool drop_target_apply_drop(bContext &C,
const DropTargetInterface &drop_target,
const ListBase &drags);
/**
* Call #DropTargetInterface::drop_tooltip() and return the result as newly allocated C string
* (unless the result is empty, returns null then). Needs freeing with MEM_freeN().
*/
char *drop_target_tooltip(const DropTargetInterface &drop_target, const wmDrag &drag);
std::unique_ptr<DropTargetInterface> view_drop_target(const uiViewHandle *view_handle);
std::unique_ptr<DropTargetInterface> view_item_drop_target(const uiViewItemHandle *item_handle);
/**
* Try to find a view item with a drop target under the mouse cursor, or if not found, a view
* with a drop target.
* \param xy: Coordinate to find a drop target at, in window space.
*/
std::unique_ptr<DropTargetInterface> region_views_find_drop_target_at(const ARegion *region,
const int xy[2]);
} // namespace blender::ui
enum eUIListFilterResult {

View File

@ -46,6 +46,7 @@ set(SRC
interface_context_path.cc
interface_drag.cc
interface_draw.cc
interface_drop.cc
interface_dropboxes.cc
interface_handlers.cc
interface_icons.cc

View File

@ -2019,6 +2019,8 @@ void UI_block_end_ex(const bContext *C, uiBlock *block, const int xy[2], int r_x
break;
}
ui_block_views_bounds_calc(block);
if (block->rect.xmin == 0.0f && block->rect.xmax == 0.0f) {
UI_block_bounds_set_normal(block, 0);
}

View File

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup edinterface
*/
#include "UI_interface.hh"
namespace blender::ui {
bool drop_target_apply_drop(bContext &C,
const DropTargetInterface &drop_target,
const ListBase &drags)
{
const char *disabled_hint_dummy = nullptr;
LISTBASE_FOREACH (const wmDrag *, drag, &drags) {
if (drop_target.can_drop(*drag, &disabled_hint_dummy)) {
return drop_target.on_drop(&C, *drag);
}
}
return false;
}
char *drop_target_tooltip(const DropTargetInterface &drop_target, const wmDrag &drag)
{
const std::string tooltip = drop_target.drop_tooltip(drag);
return tooltip.empty() ? nullptr : BLI_strdup(tooltip.c_str());
}
} // namespace blender::ui

View File

@ -20,6 +20,9 @@
#include "WM_api.h"
#include "UI_interface.h"
#include "UI_interface.hh"
using namespace blender::ui;
/* -------------------------------------------------------------------- */
/** \name View Drag/Drop Callbacks
@ -28,28 +31,27 @@
static bool ui_view_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
const ARegion *region = CTX_wm_region(C);
const uiViewItemHandle *hovered_item = UI_region_views_find_item_at(region, event->xy);
if (!hovered_item) {
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region,
event->xy);
if (!drop_target) {
return false;
}
if (drag->drop_state.free_disabled_info) {
MEM_SAFE_FREE(drag->drop_state.disabled_info);
}
drag->drop_state.free_disabled_info = false;
return UI_view_item_can_drop(hovered_item, drag, &drag->drop_state.disabled_info);
return drop_target->can_drop(*drag, &drag->drop_state.disabled_info);
}
static char *ui_view_drop_tooltip(bContext *C, wmDrag *drag, const int xy[2], wmDropBox * /*drop*/)
{
const ARegion *region = CTX_wm_region(C);
const uiViewItemHandle *hovered_item = UI_region_views_find_item_at(region, xy);
if (!hovered_item) {
return nullptr;
}
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region, xy);
return UI_view_item_drop_tooltip(hovered_item, drag);
return drop_target_tooltip(*drop_target, *drag);
}
/** \} */

View File

@ -1456,6 +1456,7 @@ void ui_interface_tag_script_reload_queries();
/* interface_view.cc */
void ui_block_free_views(uiBlock *block);
void ui_block_views_bounds_calc(const uiBlock *block);
void ui_block_views_listen(const uiBlock *block, const wmRegionListenerParams *listener_params);
uiViewHandle *ui_block_view_find_matching_in_old_block(const uiBlock *new_block,
const uiViewHandle *new_view);

View File

@ -47,6 +47,7 @@
#include "RNA_types.h"
#include "UI_interface.h"
#include "UI_interface.hh"
#include "interface_intern.hh"
@ -65,6 +66,8 @@
#include "ED_screen.h"
#include "ED_text.h"
using namespace blender::ui;
/* -------------------------------------------------------------------- */
/** \name Immediate redraw helper
*
@ -2351,7 +2354,7 @@ static void UI_OT_list_start_filter(wmOperatorType *ot)
/** \} */
/* -------------------------------------------------------------------- */
/** \name UI Tree-View Drop Operator
/** \name UI View Drop Operator
* \{ */
static bool ui_view_drop_poll(bContext *C)
@ -2361,9 +2364,7 @@ static bool ui_view_drop_poll(bContext *C)
if (region == nullptr) {
return false;
}
const uiViewItemHandle *hovered_item = UI_region_views_find_item_at(region, win->eventstate->xy);
return hovered_item != nullptr;
return region_views_find_drop_target_at(region, win->eventstate->xy) != nullptr;
}
static int ui_view_drop_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *event)
@ -2373,10 +2374,11 @@ static int ui_view_drop_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *
}
const ARegion *region = CTX_wm_region(C);
uiViewItemHandle *hovered_item = UI_region_views_find_item_at(region, event->xy);
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region,
event->xy);
if (!UI_view_item_drop_handle(
C, hovered_item, static_cast<const ListBase *>(event->customdata))) {
if (!drop_target_apply_drop(
*C, *drop_target, *static_cast<const ListBase *>(event->customdata))) {
return OPERATOR_CANCELLED | OPERATOR_PASS_THROUGH;
}
@ -2385,9 +2387,9 @@ static int ui_view_drop_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *
static void UI_OT_view_drop(wmOperatorType *ot)
{
ot->name = "View drop";
ot->name = "View Drop";
ot->idname = "UI_OT_view_drop";
ot->description = "Drag and drop items onto a data-set item";
ot->description = "Drag and drop onto a data-set or item within the data-set";
ot->invoke = ui_view_drop_invoke;
ot->poll = ui_view_drop_poll;

View File

@ -62,6 +62,12 @@ void AbstractView::update_from_old(uiBlock &new_block)
/** \name Default implementations of virtual functions
* \{ */
std::unique_ptr<AbstractViewDropTarget> AbstractView::create_drop_target() const
{
/* There's no drop target (and hence no drop support) by default. */
return nullptr;
}
bool AbstractView::listen(const wmNotifier & /*notifier*/) const
{
/* Nothing by default. */
@ -104,6 +110,23 @@ MutableSpan<char> AbstractView::get_rename_buffer()
return *rename_buffer_;
}
std::optional<rcti> AbstractView::get_bounds() const
{
return bounds_;
}
/** \} */
/* ---------------------------------------------------------------------- */
/** \name General API functions
* \{ */
std::unique_ptr<DropTargetInterface> view_drop_target(const uiViewHandle *view_handle)
{
const AbstractView &view = reinterpret_cast<const AbstractView &>(*view_handle);
return view.create_drop_target();
}
/** \} */
} // namespace blender::ui

View File

@ -174,9 +174,9 @@ std::unique_ptr<AbstractViewItemDragController> AbstractViewItem::create_drag_co
return nullptr;
}
std::unique_ptr<AbstractViewItemDropController> AbstractViewItem::create_drop_controller() const
std::unique_ptr<AbstractViewItemDropTarget> AbstractViewItem::create_drop_target() const
{
/* There's no drop controller (and hence no drop support) by default. */
/* There's no drop target (and hence no drop support) by default. */
return nullptr;
}
@ -189,7 +189,7 @@ void AbstractViewItemDragController::on_drag_start()
/* Do nothing by default. */
}
AbstractViewItemDropController::AbstractViewItemDropController(AbstractView &view) : view_(view)
AbstractViewItemDropTarget::AbstractViewItemDropTarget(AbstractView &view) : view_(view)
{
}
@ -217,6 +217,18 @@ bool AbstractViewItem::is_active() const
/** \} */
/* ---------------------------------------------------------------------- */
/** \name General API functions
* \{ */
std::unique_ptr<DropTargetInterface> view_item_drop_target(const uiViewItemHandle *item_handle)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_handle);
return item.create_drop_target();
}
/** \} */
} // namespace blender::ui
/* ---------------------------------------------------------------------- */
@ -264,45 +276,6 @@ class ViewItemAPIWrapper {
return true;
}
static bool can_drop(const AbstractViewItem &item,
const wmDrag &drag,
const char **r_disabled_hint)
{
const std::unique_ptr<AbstractViewItemDropController> drop_controller =
item.create_drop_controller();
if (!drop_controller) {
return false;
}
return drop_controller->can_drop(drag, r_disabled_hint);
}
static std::string drop_tooltip(const AbstractViewItem &item, const wmDrag &drag)
{
const std::unique_ptr<AbstractViewItemDropController> drop_controller =
item.create_drop_controller();
if (!drop_controller) {
return {};
}
return drop_controller->drop_tooltip(drag);
}
static bool drop_handle(bContext &C, const AbstractViewItem &item, const ListBase &drags)
{
std::unique_ptr<AbstractViewItemDropController> drop_controller =
item.create_drop_controller();
const char *disabled_hint_dummy = nullptr;
LISTBASE_FOREACH (const wmDrag *, drag, &drags) {
if (drop_controller->can_drop(*drag, &disabled_hint_dummy)) {
return drop_controller->on_drop(&C, *drag);
}
}
return false;
}
};
} // namespace blender::ui
@ -348,26 +321,4 @@ bool UI_view_item_drag_start(bContext *C, const uiViewItemHandle *item_)
return ViewItemAPIWrapper::drag_start(*C, item);
}
bool UI_view_item_can_drop(const uiViewItemHandle *item_,
const wmDrag *drag,
const char **r_disabled_hint)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_);
return ViewItemAPIWrapper::can_drop(item, *drag, r_disabled_hint);
}
char *UI_view_item_drop_tooltip(const uiViewItemHandle *item_, const wmDrag *drag)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_);
const std::string tooltip = ViewItemAPIWrapper::drop_tooltip(item, *drag);
return tooltip.empty() ? nullptr : BLI_strdup(tooltip.c_str());
}
bool UI_view_item_drop_handle(bContext *C, const uiViewItemHandle *item_, const ListBase *drags)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_);
return ViewItemAPIWrapper::drop_handle(*C, item, *drags);
}
/** \} */

View File

@ -389,7 +389,7 @@ uiLayout *GridViewLayoutBuilder::current_layout() const
/* ---------------------------------------------------------------------- */
GridViewBuilder::GridViewBuilder(uiBlock &block) : block_(block)
GridViewBuilder::GridViewBuilder(uiBlock & /*block*/)
{
}

View File

@ -24,6 +24,7 @@
#include "BKE_screen.h"
#include "BLI_listbase.h"
#include "BLI_map.hh"
#include "ED_screen.h"
@ -44,6 +45,8 @@ using namespace blender::ui;
struct ViewLink : public Link {
std::string idname;
std::unique_ptr<AbstractView> view;
static void views_bounds_calc(const uiBlock &block);
};
template<class T>
@ -81,6 +84,51 @@ void ui_block_free_views(uiBlock *block)
}
}
void ViewLink::views_bounds_calc(const uiBlock &block)
{
Map<AbstractView *, rcti> views_bounds;
rcti minmax;
BLI_rcti_init_minmax(&minmax);
LISTBASE_FOREACH (ViewLink *, link, &block.views) {
views_bounds.add(link->view.get(), minmax);
}
LISTBASE_FOREACH (uiBut *, but, &block.buttons) {
if (but->type != UI_BTYPE_VIEW_ITEM) {
continue;
}
uiButViewItem *view_item_but = static_cast<uiButViewItem *>(but);
if (!view_item_but->view_item) {
continue;
}
/* Get the view from the button. */
AbstractViewItem &view_item = reinterpret_cast<AbstractViewItem &>(*view_item_but->view_item);
AbstractView &view = view_item.get_view();
rcti &bounds = views_bounds.lookup(&view);
rcti but_rcti{};
BLI_rcti_rctf_copy_round(&but_rcti, &view_item_but->rect);
BLI_rcti_do_minmax_rcti(&bounds, &but_rcti);
}
for (const auto item : views_bounds.items()) {
const rcti &bounds = item.value;
if (BLI_rcti_is_empty(&bounds)) {
continue;
}
AbstractView &view = *item.key;
view.bounds_ = bounds;
}
}
void ui_block_views_bounds_calc(const uiBlock *block)
{
ViewLink::views_bounds_calc(*block);
}
void ui_block_views_listen(const uiBlock *block, const wmRegionListenerParams *listener_params)
{
ARegion *region = listener_params->region;
@ -92,6 +140,35 @@ void ui_block_views_listen(const uiBlock *block, const wmRegionListenerParams *l
}
}
/* Similar to #ui_but_find_mouse_over_ex(). */
uiViewHandle *UI_region_view_find_at(const ARegion *region, const int xy[2], const int pad)
{
if (!ui_region_contains_point_px(region, xy)) {
return nullptr;
}
LISTBASE_FOREACH (uiBlock *, block, &region->uiblocks) {
float mx = xy[0], my = xy[1];
ui_window_to_block_fl(region, block, &mx, &my);
LISTBASE_FOREACH (ViewLink *, view_link, &block->views) {
std::optional<rcti> bounds = view_link->view->get_bounds();
if (!bounds) {
continue;
}
rcti padded_bounds = *bounds;
if (pad) {
BLI_rcti_pad(&padded_bounds, pad, pad);
}
if (BLI_rcti_isect_pt(&padded_bounds, mx, my)) {
return reinterpret_cast<uiViewHandle *>(view_link->view.get());
}
}
}
return nullptr;
}
uiViewItemHandle *UI_region_views_find_item_at(const ARegion *region, const int xy[2])
{
uiButViewItem *item_but = (uiButViewItem *)ui_view_item_find_mouse_over(region, xy);
@ -112,6 +189,34 @@ uiViewItemHandle *UI_region_views_find_active_item(const ARegion *region)
return item_but->view_item;
}
namespace blender::ui {
std::unique_ptr<DropTargetInterface> region_views_find_drop_target_at(const ARegion *region,
const int xy[2])
{
const uiViewItemHandle *hovered_view_item = UI_region_views_find_item_at(region, xy);
if (hovered_view_item) {
std::unique_ptr<DropTargetInterface> drop_target = view_item_drop_target(hovered_view_item);
if (drop_target) {
return drop_target;
}
}
/* Get style for some sensible padding around the view items. */
const uiStyle *style = UI_style_get_dpi();
const uiViewHandle *hovered_view = UI_region_view_find_at(region, xy, style->buttonspacex);
if (hovered_view) {
std::unique_ptr<DropTargetInterface> drop_target = view_drop_target(hovered_view);
if (drop_target) {
return drop_target;
}
}
return nullptr;
}
} // namespace blender::ui
static StringRef ui_block_view_find_idname(const uiBlock &block, const AbstractView &view)
{
/* First get the idname the of the view we're looking for. */

View File

@ -373,8 +373,6 @@ static bool wm_collada_export_check(bContext *UNUSED(C), wmOperator *op)
void WM_OT_collada_export(wmOperatorType *ot)
{
struct StructRNA *func = ot->srna;
static const EnumPropertyItem prop_bc_export_mesh_type[] = {
{BC_MESH_TYPE_VIEW, "view", 0, "Viewport", "Apply modifier's viewport settings"},
{BC_MESH_TYPE_RENDER, "render", 0, "Render", "Apply modifier's render settings"},
@ -456,20 +454,20 @@ void WM_OT_collada_export(wmOperatorType *ot)
FILE_DEFAULTDISPLAY,
FILE_SORT_DEFAULT);
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"prop_bc_export_ui_section",
prop_bc_export_ui_section,
0,
"Export Section",
"Only for User Interface organization");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"apply_modifiers",
0,
"Apply Modifiers",
"Apply modifiers to exported mesh (non destructive))");
RNA_def_int(func,
RNA_def_int(ot->srna,
"export_mesh_type",
0,
INT_MIN,
@ -479,83 +477,83 @@ void WM_OT_collada_export(wmOperatorType *ot)
INT_MIN,
INT_MAX);
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_mesh_type_selection",
prop_bc_export_mesh_type,
0,
"Resolution",
"Modifier resolution for export");
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_global_forward_selection",
prop_bc_export_global_forward,
BC_DEFAULT_FORWARD,
"Global Forward Axis",
"Global Forward axis for export");
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_global_up_selection",
prop_bc_export_global_up,
BC_DEFAULT_UP,
"Global Up Axis",
"Global Up axis for export");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"apply_global_orientation",
false,
"Apply Global Orientation",
"Rotate all root objects to match the global orientation settings "
"otherwise set the global orientation per Collada asset");
RNA_def_boolean(func, "selected", false, "Selection Only", "Export only selected elements");
RNA_def_boolean(ot->srna, "selected", false, "Selection Only", "Export only selected elements");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"include_children",
false,
"Include Children",
"Export all children of selected objects (even if not selected)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"include_armatures",
false,
"Include Armatures",
"Export related armatures (even if not selected)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"include_shapekeys",
false,
"Include Shape Keys",
"Export all Shape Keys from Mesh Objects");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"deform_bones_only",
false,
"Deform Bones Only",
"Only export deforming bones with armatures");
RNA_def_boolean(
func,
ot->srna,
"include_animations",
true,
"Include Animations",
"Export animations if available (exporting animations will enforce the decomposition of "
"node transforms into <translation> <rotation> and <scale> components)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"include_all_actions",
true,
"Include all Actions",
"Export also unassigned actions (this allows you to export entire animation "
"libraries for your character(s))");
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_animation_type_selection",
prop_bc_export_animation_type,
0,
"Key Type",
"Type for exported animations (use sample keys or Curve keys)");
RNA_def_int(func,
RNA_def_int(ot->srna,
"sampling_rate",
1,
1,
@ -565,7 +563,7 @@ void WM_OT_collada_export(wmOperatorType *ot)
1,
INT_MAX);
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"keep_smooth_curves",
0,
"Keep Smooth curves",
@ -573,48 +571,51 @@ void WM_OT_collada_export(wmOperatorType *ot)
"inverse parent matrix "
"is the unity matrix, otherwise you may end up with odd results)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"keep_keyframes",
0,
"Keep Keyframes",
"Use existing keyframes as additional sample points (this helps when you want "
"to keep manual tweaks)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"keep_flat_curves",
0,
"All Keyed Curves",
"Export also curves which have only one key or are totally flat");
RNA_def_boolean(
func, "active_uv_only", 0, "Only Selected UV Map", "Export only the selected UV Map");
ot->srna, "active_uv_only", 0, "Only Selected UV Map", "Export only the selected UV Map");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"use_texture_copies",
1,
"Copy",
"Copy textures to same folder where the .dae file is exported");
RNA_def_boolean(
func, "triangulate", 1, "Triangulate", "Export polygons (quads and n-gons) as triangles");
RNA_def_boolean(ot->srna,
"triangulate",
1,
"Triangulate",
"Export polygons (quads and n-gons) as triangles");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"use_object_instantiation",
1,
"Use Object Instances",
"Instantiate multiple Objects from same Data");
RNA_def_boolean(
func,
ot->srna,
"use_blender_profile",
1,
"Use Blender Profile",
"Export additional Blender specific information (for material, shaders, bones, etc.)");
RNA_def_boolean(
func, "sort_by_name", 0, "Sort by Object name", "Sort exported data by Object name");
ot->srna, "sort_by_name", 0, "Sort by Object name", "Sort exported data by Object name");
RNA_def_int(func,
RNA_def_int(ot->srna,
"export_object_transformation_type",
0,
INT_MIN,
@ -624,14 +625,14 @@ void WM_OT_collada_export(wmOperatorType *ot)
INT_MIN,
INT_MAX);
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_object_transformation_type_selection",
prop_bc_export_transformation_type,
0,
"Transform",
"Object Transformation type for translation, scale and rotation");
RNA_def_int(func,
RNA_def_int(ot->srna,
"export_animation_transformation_type",
0,
INT_MIN,
@ -643,7 +644,7 @@ void WM_OT_collada_export(wmOperatorType *ot)
INT_MIN,
INT_MAX);
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_animation_transformation_type_selection",
prop_bc_export_transformation_type,
0,
@ -652,20 +653,20 @@ void WM_OT_collada_export(wmOperatorType *ot)
"Note: The Animation transformation type in the Anim Tab "
"is always equal to the Object transformation type in the Geom tab");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"open_sim",
0,
"Export to SL/OpenSim",
"Compatibility mode for SL, OpenSim and other compatible online worlds");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"limit_precision",
0,
"Limit Precision",
"Reduce the precision of the exported data to 6 digits");
RNA_def_boolean(
func,
ot->srna,
"keep_bind_info",
0,
"Keep Bind Info",

View File

@ -51,8 +51,15 @@
# include "BLI_array_store.h"
# include "BLI_array_store_utils.h"
/* check on best size later... */
# define ARRAY_CHUNK_SIZE 256
/**
* This used to be much smaller (256), but this caused too much overhead
* when selection moved to boolean arrays. Especially with high-poly meshes
* where managing a large number of small chunks could be slow, blocking user interactivity.
* Use a larger value (in bytes) which calculates the chunk size using #array_chunk_size_calc.
* See: #105046 & #105205.
*/
# define ARRAY_CHUNK_SIZE_IN_BYTES 65536
# define ARRAY_CHUNK_NUM_MIN 256
# define USE_ARRAY_STORE_THREAD
#endif
@ -70,6 +77,14 @@ static CLG_LogRef LOG = {"ed.undo.mesh"};
#ifdef USE_ARRAY_STORE
static size_t array_chunk_size_calc(const size_t stride)
{
/* Return a chunk size that targets a size in bytes,
* this is done so boolean arrays don't add so much overhead and
* larger arrays aren't so big as to waste memory, see: #105205. */
return std::max(ARRAY_CHUNK_NUM_MIN, ARRAY_CHUNK_SIZE_IN_BYTES / power_of_2_max_i(stride));
}
/* Single linked list of layers stored per type */
struct BArrayCustomData {
BArrayCustomData *next;
@ -190,8 +205,9 @@ static void um_arraystore_cd_compact(CustomData *cdata,
}
const int stride = CustomData_sizeof(type);
BArrayStore *bs = create ? BLI_array_store_at_size_ensure(
&um_arraystore.bs_stride[bs_index], stride, ARRAY_CHUNK_SIZE) :
BArrayStore *bs = create ? BLI_array_store_at_size_ensure(&um_arraystore.bs_stride[bs_index],
stride,
array_chunk_size_calc(stride)) :
nullptr;
const int layer_len = layer_end - layer_start;
@ -372,7 +388,7 @@ static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool
BArrayStore *bs = create ? BLI_array_store_at_size_ensure(
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_SHAPE],
stride,
ARRAY_CHUNK_SIZE) :
array_chunk_size_calc(stride)) :
nullptr;
if (create) {
um->store.keyblocks = static_cast<BArrayState **>(
@ -403,7 +419,9 @@ static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool
BArrayState *state_reference = um_ref ? um_ref->store.mselect : nullptr;
const size_t stride = sizeof(*me->mselect);
BArrayStore *bs = BLI_array_store_at_size_ensure(
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_MSEL], stride, ARRAY_CHUNK_SIZE);
&um_arraystore.bs_stride[ARRAY_STORE_INDEX_MSEL],
stride,
array_chunk_size_calc(stride));
um->store.mselect = BLI_array_store_state_add(
bs, me->mselect, size_t(me->totselect) * stride, state_reference);
}

View File

@ -4346,6 +4346,7 @@ static void ed_screens_statusbar_menu_create(uiLayout *layout, void *UNUSED(arg)
RNA_pointer_create(NULL, &RNA_PreferencesView, &U, &ptr);
uiItemR(layout, &ptr, "show_statusbar_stats", 0, IFACE_("Scene Statistics"), ICON_NONE);
uiItemR(layout, &ptr, "show_statusbar_scene_duration", 0, IFACE_("Scene Duration"), ICON_NONE);
uiItemR(layout, &ptr, "show_statusbar_memory", 0, IFACE_("System Memory"), ICON_NONE);
if (GPU_mem_stats_supported()) {
uiItemR(layout, &ptr, "show_statusbar_vram", 0, IFACE_("Video Memory"), ICON_NONE);

View File

@ -768,7 +768,13 @@ bool SCULPT_vertex_has_unique_face_set(SculptSession *ss, PBVHVertRef vertex)
coord.y = vertex_index / key->grid_size;
int v1, v2;
const SubdivCCGAdjacencyType adjacency = BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
ss->subdiv_ccg, &coord, ss->corner_verts, ss->polys, &v1, &v2);
ss->subdiv_ccg,
&coord,
ss->corner_verts.data(),
ss->corner_verts.size(),
ss->polys.data(),
&v1,
&v2);
switch (adjacency) {
case SUBDIV_CCG_ADJACENT_VERTEX:
return sculpt_check_unique_face_set_in_base_mesh(ss, v1);
@ -888,12 +894,11 @@ static void sculpt_vertex_neighbors_get_faces(SculptSession *ss,
continue;
}
const MPoly &poly = ss->polys[vert_map->indices[i]];
int f_adj_v[2];
if (poly_get_adj_loops_from_vert(&poly, ss->corner_verts, vertex.i, f_adj_v) != -1) {
for (int j = 0; j < ARRAY_SIZE(f_adj_v); j += 1) {
if (f_adj_v[j] != vertex.i) {
sculpt_vertex_neighbor_add(iter, BKE_pbvh_make_vref(f_adj_v[j]), f_adj_v[j]);
}
const blender::int2 f_adj_v = blender::bke::mesh::poly_find_adjecent_verts(
poly, ss->corner_verts, vertex.i);
for (int j = 0; j < 2; j++) {
if (f_adj_v[j] != vertex.i) {
sculpt_vertex_neighbor_add(iter, BKE_pbvh_make_vref(f_adj_v[j]), f_adj_v[j]);
}
}
}
@ -1003,7 +1008,13 @@ bool SCULPT_vertex_is_boundary(const SculptSession *ss, const PBVHVertRef vertex
coord.y = vertex_index / key->grid_size;
int v1, v2;
const SubdivCCGAdjacencyType adjacency = BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
ss->subdiv_ccg, &coord, ss->corner_verts, ss->polys, &v1, &v2);
ss->subdiv_ccg,
&coord,
ss->corner_verts.data(),
ss->corner_verts.size(),
ss->polys.data(),
&v1,
&v2);
switch (adjacency) {
case SUBDIV_CCG_ADJACENT_VERTEX:
return sculpt_check_boundary_vertex_in_base_mesh(ss, v1);

View File

@ -129,7 +129,6 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
const Span<float3> positions(
reinterpret_cast<const float3 *>(SCULPT_mesh_deformed_positions_get(ss)),
SCULPT_vertex_count_get(ss));
const Span<int> corner_verts(ss->corner_verts, data->me->totloop);
AutomaskingNodeData automask_data;
SCULPT_automasking_node_begin(
data->ob, ss, ss->cache->automasking, &automask_data, data->nodes[n]);
@ -144,7 +143,7 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
const MPoly &poly = ss->polys[vert_map->indices[j]];
const float3 poly_center = bke::mesh::poly_center_calc(
positions, corner_verts.slice(poly.loopstart, poly.totloop));
positions, ss->corner_verts.slice(poly.loopstart, poly.totloop));
if (!sculpt_brush_test_sq_fn(&test, poly_center)) {
continue;

View File

@ -273,7 +273,7 @@ static void sculpt_init_session(Main *bmain, Depsgraph *depsgraph, Scene *scene,
if (ob->sculpt != nullptr) {
BKE_sculptsession_free(ob);
}
ob->sculpt = MEM_cnew<SculptSession>(__func__);
ob->sculpt = MEM_new<SculptSession>(__func__);
ob->sculpt->mode_type = OB_MODE_SCULPT;
/* Trigger evaluation of modifier stack to ensure

View File

@ -656,14 +656,14 @@ static void timeline_cache_draw_cached_segments(PointCache *cache, uint pos_id)
static void timeline_cache_draw_single(PTCacheID *pid, float y_offset, float height, uint pos_id)
{
GPU_matrix_push();
GPU_matrix_translate_2f(0.0, (float)V2D_SCROLL_HANDLE_HEIGHT + y_offset);
GPU_matrix_translate_2f(0.0, float(V2D_SCROLL_HANDLE_HEIGHT) + y_offset);
GPU_matrix_scale_2f(1.0, height);
float color[4];
timeline_cache_color_get(pid, color);
immUniformColor4fv(color);
immRectf(pos_id, (float)pid->cache->startframe, 0.0, (float)pid->cache->endframe, 1.0);
immRectf(pos_id, float(pid->cache->startframe), 0.0, float(pid->cache->endframe), 1.0);
color[3] = 0.4f;
timeline_cache_modify_color_based_on_state(pid->cache, color);

View File

@ -178,7 +178,7 @@ static bool get_keyframe_extents(bAnimContext *ac, float *min, float *max, const
/* Find gp-frame which is less than or equal to current-frame. */
for (gpf = static_cast<bGPDframe *>(gpl->frames.first); gpf; gpf = gpf->next) {
if (!onlySel || (gpf->flag & GP_FRAME_SELECT)) {
const float framenum = (float)gpf->framenum;
const float framenum = float(gpf->framenum);
*min = min_ff(*min, framenum);
*max = max_ff(*max, framenum);
found = true;
@ -193,7 +193,7 @@ static bool get_keyframe_extents(bAnimContext *ac, float *min, float *max, const
for (masklay_shape = static_cast<MaskLayerShape *>(masklay->splines_shapes.first);
masklay_shape;
masklay_shape = masklay_shape->next) {
const float framenum = (float)masklay_shape->frame;
const float framenum = float(masklay_shape->frame);
*min = min_ff(*min, framenum);
*max = max_ff(*max, framenum);
found = true;
@ -231,8 +231,8 @@ static bool get_keyframe_extents(bAnimContext *ac, float *min, float *max, const
else {
/* set default range */
if (ac->scene) {
*min = (float)ac->scene->r.sfra;
*max = (float)ac->scene->r.efra;
*min = float(ac->scene->r.sfra);
*max = float(ac->scene->r.efra);
}
else {
*min = -5;
@ -397,7 +397,7 @@ static int actkeys_viewall(bContext *C, const bool only_sel)
/* view all -> the summary channel is usually the shows everything,
* and resides right at the top... */
v2d->cur.ymax = 0.0f;
v2d->cur.ymin = (float)-BLI_rcti_size_y(&v2d->mask);
v2d->cur.ymin = float(-BLI_rcti_size_y(&v2d->mask));
}
else {
/* locate first selected channel (or the active one), and frame those */
@ -850,7 +850,7 @@ static void insert_action_keys(bAnimContext *ac, short mode)
/* insert keyframes */
const AnimationEvalContext anim_eval_context = BKE_animsys_eval_context_construct(
ac->depsgraph, (float)scene->r.cfra);
ac->depsgraph, float(scene->r.cfra));
for (ale = static_cast<bAnimListElem *>(anim_data.first); ale; ale = ale->next) {
switch (ale->type) {
case ANIMTYPE_GPLAYER:
@ -1962,7 +1962,7 @@ static void mirror_action_keys(bAnimContext *ac, short mode)
TimeMarker *marker = ED_markers_get_first_selected(ac->markers);
if (marker) {
ked.f1 = (float)marker->frame;
ked.f1 = float(marker->frame);
}
else {
return;

View File

@ -163,8 +163,10 @@ static void actkeys_find_key_in_list_element(bAnimContext *ac,
/* half-size (for either side), but rounded up to nearest int (for easier targeting) */
key_hsize = roundf(key_hsize / 2.0f);
const Range2f range = {UI_view2d_region_to_view_x(v2d, region_x - (int)key_hsize),
UI_view2d_region_to_view_x(v2d, region_x + (int)key_hsize)};
const Range2f range = {
UI_view2d_region_to_view_x(v2d, region_x - int(key_hsize)),
UI_view2d_region_to_view_x(v2d, region_x + int(key_hsize)),
};
const ActKeyColumn *ak = ED_keylist_find_any_between(keylist, range);
if (ak) {
@ -1075,7 +1077,7 @@ static void columnselect_action_keys(bAnimContext *ac, short mode)
ce = MEM_cnew<CfraElem>("cfraElem");
BLI_addtail(&ked.list, ce);
ce->cfra = (float)scene->r.cfra;
ce->cfra = float(scene->r.cfra);
break;
case ACTKEYS_COLUMNSEL_MARKERS_COLUMN: /* list of selected markers */
@ -1416,10 +1418,10 @@ static void actkeys_select_leftright(bAnimContext *ac, short leftright, short se
if (leftright == ACTKEYS_LRSEL_LEFT) {
ked.f1 = MINAFRAMEF;
ked.f2 = (float)(scene->r.cfra + 0.1f);
ked.f2 = float(scene->r.cfra + 0.1f);
}
else {
ked.f1 = (float)(scene->r.cfra - 0.1f);
ked.f1 = float(scene->r.cfra - 0.1f);
ked.f2 = MAXFRAMEF;
}

View File

@ -101,9 +101,9 @@ static SpaceLink *action_create(const ScrArea *area, const Scene *scene)
BLI_addtail(&saction->regionbase, region);
region->regiontype = RGN_TYPE_WINDOW;
region->v2d.tot.xmin = (float)(scene->r.sfra - 10);
region->v2d.tot.ymin = (float)(-area->winy) / 3.0f;
region->v2d.tot.xmax = (float)(scene->r.efra + 10);
region->v2d.tot.xmin = float(scene->r.sfra - 10);
region->v2d.tot.ymin = float(-area->winy) / 3.0f;
region->v2d.tot.xmax = float(scene->r.efra + 10);
region->v2d.tot.ymax = 0.0f;
region->v2d.cur = region->v2d.tot;
@ -577,8 +577,8 @@ static void action_listener(const wmSpaceTypeListenerParams *params)
LISTBASE_FOREACH (ARegion *, region, &area->regionbase) {
if (region->regiontype == RGN_TYPE_WINDOW) {
Scene *scene = static_cast<Scene *>(wmn->reference);
region->v2d.tot.xmin = (float)(scene->r.sfra - 4);
region->v2d.tot.xmax = (float)(scene->r.efra + 4);
region->v2d.tot.xmin = float(scene->r.sfra - 4);
region->v2d.tot.xmax = float(scene->r.efra + 4);
break;
}
}

View File

@ -49,7 +49,7 @@ class AssetCatalogTreeView : public ui::AbstractTreeView {
SpaceFile &space_file_;
friend class AssetCatalogTreeViewItem;
friend class AssetCatalogDropController;
friend class AssetCatalogDropTarget;
friend class AssetCatalogTreeViewAllItem;
public:
@ -90,7 +90,7 @@ class AssetCatalogTreeViewItem : public ui::BasicTreeViewItem {
/** Add drag support for catalog items. */
std::unique_ptr<ui::AbstractViewItemDragController> create_drag_controller() const override;
/** Add dropping support for catalog items. */
std::unique_ptr<ui::AbstractViewItemDropController> create_drop_controller() const override;
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() const override;
};
class AssetCatalogDragController : public ui::AbstractViewItemDragController {
@ -105,15 +105,15 @@ class AssetCatalogDragController : public ui::AbstractViewItemDragController {
void on_drag_start() override;
};
class AssetCatalogDropController : public ui::AbstractViewItemDropController {
class AssetCatalogDropTarget : public ui::AbstractViewItemDropTarget {
AssetCatalogTreeItem &catalog_item_;
public:
AssetCatalogDropController(AssetCatalogTreeView &tree_view, AssetCatalogTreeItem &catalog_item);
AssetCatalogDropTarget(AssetCatalogTreeView &tree_view, AssetCatalogTreeItem &catalog_item);
bool can_drop(const wmDrag &drag, const char **r_disabled_hint) const override;
std::string drop_tooltip(const wmDrag &drag) const override;
bool on_drop(struct bContext *C, const wmDrag &drag) override;
bool on_drop(struct bContext *C, const wmDrag &drag) const override;
::AssetLibrary &get_asset_library() const;
@ -146,29 +146,29 @@ class AssetCatalogTreeViewAllItem : public ui::BasicTreeViewItem {
void build_row(uiLayout &row) override;
struct DropController : public ui::AbstractViewItemDropController {
DropController(AssetCatalogTreeView &tree_view);
struct DropTarget : public ui::AbstractViewItemDropTarget {
DropTarget(AssetCatalogTreeView &tree_view);
bool can_drop(const wmDrag &drag, const char **r_disabled_hint) const override;
std::string drop_tooltip(const wmDrag &drag) const override;
bool on_drop(struct bContext *C, const wmDrag &drag) override;
bool on_drop(struct bContext *C, const wmDrag &drag) const override;
};
std::unique_ptr<ui::AbstractViewItemDropController> create_drop_controller() const override;
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() const override;
};
class AssetCatalogTreeViewUnassignedItem : public ui::BasicTreeViewItem {
using BasicTreeViewItem::BasicTreeViewItem;
struct DropController : public ui::AbstractViewItemDropController {
DropController(AssetCatalogTreeView &tree_view);
struct DropTarget : public ui::AbstractViewItemDropTarget {
DropTarget(AssetCatalogTreeView &tree_view);
bool can_drop(const wmDrag &drag, const char **r_disabled_hint) const override;
std::string drop_tooltip(const wmDrag &drag) const override;
bool on_drop(struct bContext *C, const wmDrag &drag) override;
bool on_drop(struct bContext *C, const wmDrag &drag) const override;
};
std::unique_ptr<ui::AbstractViewItemDropController> create_drop_controller() const override;
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() const override;
};
/* ---------------------------------------------------------------------- */
@ -339,10 +339,10 @@ bool AssetCatalogTreeViewItem::rename(StringRefNull new_name)
return true;
}
std::unique_ptr<ui::AbstractViewItemDropController> AssetCatalogTreeViewItem::
create_drop_controller() const
std::unique_ptr<ui::AbstractViewItemDropTarget> AssetCatalogTreeViewItem::create_drop_target()
const
{
return std::make_unique<AssetCatalogDropController>(
return std::make_unique<AssetCatalogDropTarget>(
static_cast<AssetCatalogTreeView &>(get_tree_view()), catalog_item_);
}
@ -355,13 +355,13 @@ std::unique_ptr<ui::AbstractViewItemDragController> AssetCatalogTreeViewItem::
/* ---------------------------------------------------------------------- */
AssetCatalogDropController::AssetCatalogDropController(AssetCatalogTreeView &tree_view,
AssetCatalogTreeItem &catalog_item)
: ui::AbstractViewItemDropController(tree_view), catalog_item_(catalog_item)
AssetCatalogDropTarget::AssetCatalogDropTarget(AssetCatalogTreeView &tree_view,
AssetCatalogTreeItem &catalog_item)
: ui::AbstractViewItemDropTarget(tree_view), catalog_item_(catalog_item)
{
}
bool AssetCatalogDropController::can_drop(const wmDrag &drag, const char **r_disabled_hint) const
bool AssetCatalogDropTarget::can_drop(const wmDrag &drag, const char **r_disabled_hint) const
{
if (drag.type == WM_DRAG_ASSET_CATALOG) {
const ::AssetLibrary &library = get_asset_library();
@ -389,7 +389,7 @@ bool AssetCatalogDropController::can_drop(const wmDrag &drag, const char **r_dis
return false;
}
std::string AssetCatalogDropController::drop_tooltip(const wmDrag &drag) const
std::string AssetCatalogDropTarget::drop_tooltip(const wmDrag &drag) const
{
if (drag.type == WM_DRAG_ASSET_CATALOG) {
return drop_tooltip_asset_catalog(drag);
@ -397,7 +397,7 @@ std::string AssetCatalogDropController::drop_tooltip(const wmDrag &drag) const
return drop_tooltip_asset_list(drag);
}
std::string AssetCatalogDropController::drop_tooltip_asset_catalog(const wmDrag &drag) const
std::string AssetCatalogDropTarget::drop_tooltip_asset_catalog(const wmDrag &drag) const
{
BLI_assert(drag.type == WM_DRAG_ASSET_CATALOG);
const AssetCatalog *src_catalog = get_drag_catalog(drag, get_asset_library());
@ -406,7 +406,7 @@ std::string AssetCatalogDropController::drop_tooltip_asset_catalog(const wmDrag
TIP_("into") + " '" + catalog_item_.get_name() + "'";
}
std::string AssetCatalogDropController::drop_tooltip_asset_list(const wmDrag &drag) const
std::string AssetCatalogDropTarget::drop_tooltip_asset_list(const wmDrag &drag) const
{
BLI_assert(drag.type == WM_DRAG_ASSET_LIST);
@ -429,7 +429,7 @@ std::string AssetCatalogDropController::drop_tooltip_asset_list(const wmDrag &dr
return basic_tip;
}
bool AssetCatalogDropController::on_drop(struct bContext *C, const wmDrag &drag)
bool AssetCatalogDropTarget::on_drop(struct bContext *C, const wmDrag &drag) const
{
if (drag.type == WM_DRAG_ASSET_CATALOG) {
return drop_asset_catalog_into_catalog(
@ -442,7 +442,7 @@ bool AssetCatalogDropController::on_drop(struct bContext *C, const wmDrag &drag)
catalog_item_.get_simple_name());
}
bool AssetCatalogDropController::drop_asset_catalog_into_catalog(
bool AssetCatalogDropTarget::drop_asset_catalog_into_catalog(
const wmDrag &drag,
AssetCatalogTreeView &tree_view,
const std::optional<CatalogID> drop_catalog_id)
@ -456,11 +456,11 @@ bool AssetCatalogDropController::drop_asset_catalog_into_catalog(
return true;
}
bool AssetCatalogDropController::drop_assets_into_catalog(struct bContext *C,
const AssetCatalogTreeView &tree_view,
const wmDrag &drag,
CatalogID catalog_id,
StringRefNull simple_name)
bool AssetCatalogDropTarget::drop_assets_into_catalog(struct bContext *C,
const AssetCatalogTreeView &tree_view,
const wmDrag &drag,
CatalogID catalog_id,
StringRefNull simple_name)
{
BLI_assert(drag.type == WM_DRAG_ASSET_LIST);
const ListBase *asset_drags = WM_drag_asset_list_get(&drag);
@ -491,8 +491,8 @@ bool AssetCatalogDropController::drop_assets_into_catalog(struct bContext *C,
return true;
}
AssetCatalog *AssetCatalogDropController::get_drag_catalog(const wmDrag &drag,
const ::AssetLibrary &asset_library)
AssetCatalog *AssetCatalogDropTarget::get_drag_catalog(const wmDrag &drag,
const ::AssetLibrary &asset_library)
{
if (drag.type != WM_DRAG_ASSET_CATALOG) {
return nullptr;
@ -504,8 +504,7 @@ AssetCatalog *AssetCatalogDropController::get_drag_catalog(const wmDrag &drag,
return catalog_service->find_catalog(catalog_drag->drag_catalog_id);
}
bool AssetCatalogDropController::has_droppable_asset(const wmDrag &drag,
const char **r_disabled_hint)
bool AssetCatalogDropTarget::has_droppable_asset(const wmDrag &drag, const char **r_disabled_hint)
{
const ListBase *asset_drags = WM_drag_asset_list_get(&drag);
@ -521,8 +520,8 @@ bool AssetCatalogDropController::has_droppable_asset(const wmDrag &drag,
return false;
}
bool AssetCatalogDropController::can_modify_catalogs(const ::AssetLibrary &library,
const char **r_disabled_hint)
bool AssetCatalogDropTarget::can_modify_catalogs(const ::AssetLibrary &library,
const char **r_disabled_hint)
{
if (ED_asset_catalogs_read_only(library)) {
*r_disabled_hint = "Catalogs cannot be edited in this asset library";
@ -531,7 +530,7 @@ bool AssetCatalogDropController::can_modify_catalogs(const ::AssetLibrary &libra
return true;
}
::AssetLibrary &AssetCatalogDropController::get_asset_library() const
::AssetLibrary &AssetCatalogDropTarget::get_asset_library() const
{
return *get_view<AssetCatalogTreeView>().asset_library_;
}
@ -580,30 +579,30 @@ void AssetCatalogTreeViewAllItem::build_row(uiLayout &row)
RNA_string_set(props, "parent_path", nullptr);
}
std::unique_ptr<ui::AbstractViewItemDropController> AssetCatalogTreeViewAllItem::
create_drop_controller() const
std::unique_ptr<ui::AbstractViewItemDropTarget> AssetCatalogTreeViewAllItem::create_drop_target()
const
{
return std::make_unique<AssetCatalogTreeViewAllItem::DropController>(
return std::make_unique<AssetCatalogTreeViewAllItem::DropTarget>(
static_cast<AssetCatalogTreeView &>(get_tree_view()));
}
AssetCatalogTreeViewAllItem::DropController::DropController(AssetCatalogTreeView &tree_view)
: ui::AbstractViewItemDropController(tree_view)
AssetCatalogTreeViewAllItem::DropTarget::DropTarget(AssetCatalogTreeView &tree_view)
: ui::AbstractViewItemDropTarget(tree_view)
{
}
bool AssetCatalogTreeViewAllItem::DropController::can_drop(const wmDrag &drag,
const char **r_disabled_hint) const
bool AssetCatalogTreeViewAllItem::DropTarget::can_drop(const wmDrag &drag,
const char **r_disabled_hint) const
{
if (drag.type != WM_DRAG_ASSET_CATALOG) {
return false;
}
::AssetLibrary &library = *get_view<AssetCatalogTreeView>().asset_library_;
if (!AssetCatalogDropController::can_modify_catalogs(library, r_disabled_hint)) {
if (!AssetCatalogDropTarget::can_modify_catalogs(library, r_disabled_hint)) {
return false;
}
const AssetCatalog *drag_catalog = AssetCatalogDropController::get_drag_catalog(drag, library);
const AssetCatalog *drag_catalog = AssetCatalogDropTarget::get_drag_catalog(drag, library);
if (drag_catalog->path.parent() == "") {
*r_disabled_hint = "Catalog is already placed at the highest level";
return false;
@ -612,21 +611,21 @@ bool AssetCatalogTreeViewAllItem::DropController::can_drop(const wmDrag &drag,
return true;
}
std::string AssetCatalogTreeViewAllItem::DropController::drop_tooltip(const wmDrag &drag) const
std::string AssetCatalogTreeViewAllItem::DropTarget::drop_tooltip(const wmDrag &drag) const
{
BLI_assert(drag.type == WM_DRAG_ASSET_CATALOG);
const AssetCatalog *drag_catalog = AssetCatalogDropController::get_drag_catalog(
const AssetCatalog *drag_catalog = AssetCatalogDropTarget::get_drag_catalog(
drag, *get_view<AssetCatalogTreeView>().asset_library_);
return std::string(TIP_("Move Catalog")) + " '" + drag_catalog->path.name() + "' " +
TIP_("to the top level of the tree");
}
bool AssetCatalogTreeViewAllItem::DropController::on_drop(struct bContext * /*C*/,
const wmDrag &drag)
bool AssetCatalogTreeViewAllItem::DropTarget::on_drop(struct bContext * /*C*/,
const wmDrag &drag) const
{
BLI_assert(drag.type == WM_DRAG_ASSET_CATALOG);
return AssetCatalogDropController::drop_asset_catalog_into_catalog(
return AssetCatalogDropTarget::drop_asset_catalog_into_catalog(
drag,
get_view<AssetCatalogTreeView>(),
/* No value to drop into the root level. */
@ -635,29 +634,28 @@ bool AssetCatalogTreeViewAllItem::DropController::on_drop(struct bContext * /*C*
/* ---------------------------------------------------------------------- */
std::unique_ptr<ui::AbstractViewItemDropController> AssetCatalogTreeViewUnassignedItem::
create_drop_controller() const
std::unique_ptr<ui::AbstractViewItemDropTarget> AssetCatalogTreeViewUnassignedItem::
create_drop_target() const
{
return std::make_unique<AssetCatalogTreeViewUnassignedItem::DropController>(
return std::make_unique<AssetCatalogTreeViewUnassignedItem::DropTarget>(
static_cast<AssetCatalogTreeView &>(get_tree_view()));
}
AssetCatalogTreeViewUnassignedItem::DropController::DropController(AssetCatalogTreeView &tree_view)
: ui::AbstractViewItemDropController(tree_view)
AssetCatalogTreeViewUnassignedItem::DropTarget::DropTarget(AssetCatalogTreeView &tree_view)
: ui::AbstractViewItemDropTarget(tree_view)
{
}
bool AssetCatalogTreeViewUnassignedItem::DropController::can_drop(
const wmDrag &drag, const char **r_disabled_hint) const
bool AssetCatalogTreeViewUnassignedItem::DropTarget::can_drop(const wmDrag &drag,
const char **r_disabled_hint) const
{
if (drag.type != WM_DRAG_ASSET_LIST) {
return false;
}
return AssetCatalogDropController::has_droppable_asset(drag, r_disabled_hint);
return AssetCatalogDropTarget::has_droppable_asset(drag, r_disabled_hint);
}
std::string AssetCatalogTreeViewUnassignedItem::DropController::drop_tooltip(
const wmDrag &drag) const
std::string AssetCatalogTreeViewUnassignedItem::DropTarget::drop_tooltip(const wmDrag &drag) const
{
const ListBase *asset_drags = WM_drag_asset_list_get(&drag);
const bool is_multiple_assets = !BLI_listbase_is_single(asset_drags);
@ -666,11 +664,11 @@ std::string AssetCatalogTreeViewUnassignedItem::DropController::drop_tooltip(
TIP_("Move asset out of any catalog");
}
bool AssetCatalogTreeViewUnassignedItem::DropController::on_drop(struct bContext *C,
const wmDrag &drag)
bool AssetCatalogTreeViewUnassignedItem::DropTarget::on_drop(struct bContext *C,
const wmDrag &drag) const
{
/* Assign to nil catalog ID. */
return AssetCatalogDropController::drop_assets_into_catalog(
return AssetCatalogDropTarget::drop_assets_into_catalog(
C, get_view<AssetCatalogTreeView>(), drag, CatalogID{});
}

View File

@ -25,6 +25,7 @@
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_string.h"
#include "BLI_timecode.h"
#include "BLI_utildefines.h"
#include "BLT_translation.h"
@ -616,6 +617,24 @@ static const char *info_statusbar_string(Main *bmain,
}
}
/* Scene Duration. */
if (statusbar_flag & STATUSBAR_SHOW_SCENE_DURATION) {
if (info[0]) {
ofs += BLI_snprintf_rlen(info + ofs, len - ofs, " | ");
}
const int relative_current_frame = (scene->r.cfra - scene->r.sfra) + 1;
const int frame_count = (scene->r.efra - scene->r.sfra) + 1;
char timecode[32];
BLI_timecode_string_from_time(
timecode, sizeof(timecode), -2, FRA2TIME(frame_count), FPS, U.timecode_style);
ofs += BLI_snprintf_rlen(info + ofs,
len - ofs,
TIP_("Duration: %s (Frame %i/%i)"),
timecode,
relative_current_frame,
frame_count);
}
/* Memory status. */
if (statusbar_flag & STATUSBAR_SHOW_MEMORY) {
if (info[0]) {
@ -668,7 +687,8 @@ const char *ED_info_statistics_string(Main *bmain, Scene *scene, ViewLayer *view
{
const eUserpref_StatusBar_Flag statistics_status_bar_flag = STATUSBAR_SHOW_STATS |
STATUSBAR_SHOW_MEMORY |
STATUSBAR_SHOW_VERSION;
STATUSBAR_SHOW_VERSION |
STATUSBAR_SHOW_SCENE_DURATION;
return info_statusbar_string(bmain, scene, view_layer, statistics_status_bar_flag);
}

View File

@ -251,8 +251,8 @@ static int adjacent_edge(const Span<int> corner_verts,
const int vertex)
{
const int adjacent_loop_i = (corner_verts[loop_i] == vertex) ?
bke::mesh_topology::poly_loop_prev(poly, loop_i) :
bke::mesh_topology::poly_loop_next(poly, loop_i);
bke::mesh::poly_corner_prev(poly, loop_i) :
bke::mesh::poly_corner_next(poly, loop_i);
return corner_edges[adjacent_loop_i];
}

View File

@ -1476,7 +1476,6 @@ struct EdgeFeatData {
Object *ob_eval; /* For evaluated materials. */
const int *material_indices;
blender::Span<MEdge> edges;
blender::Span<MPoly> polys;
blender::Span<int> corner_verts;
blender::Span<int> corner_edges;
blender::Span<MLoopTri> looptris;
@ -2106,7 +2105,6 @@ static void lineart_geometry_object_load(LineartObjectInfo *ob_info,
edge_feat_data.ob_eval = ob_info->original_ob_eval;
edge_feat_data.material_indices = material_indices;
edge_feat_data.edges = me->edges();
edge_feat_data.polys = me->polys();
edge_feat_data.corner_verts = me->corner_verts();
edge_feat_data.corner_edges = me->corner_edges();
edge_feat_data.looptris = looptris;

View File

@ -1,3 +1,5 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "gpu_testing.hh"
#include "MEM_guardedalloc.h"
@ -46,4 +48,4 @@ static void test_texture_read()
}
GPU_TEST(texture_read)
} // namespace blender::gpu::tests
} // namespace blender::gpu::tests

View File

@ -398,10 +398,6 @@ enum {
/** \name Utility Macros
* \{ */
#define ME_POLY_LOOP_PREV(poly, i) \
((poly)->loopstart + (((i) + (poly)->totloop - 1) % (poly)->totloop))
#define ME_POLY_LOOP_NEXT(poly, i) ((poly)->loopstart + (((i) + 1) % (poly)->totloop))
/** Number of tri's that make up this polygon once tessellated. */
#define ME_POLY_TRI_TOT(poly) ((poly)->totloop - 2)

View File

@ -1180,6 +1180,7 @@ typedef enum eUserpref_StatusBar_Flag {
STATUSBAR_SHOW_VRAM = (1 << 1),
STATUSBAR_SHOW_STATS = (1 << 2),
STATUSBAR_SHOW_VERSION = (1 << 3),
STATUSBAR_SHOW_SCENE_DURATION = (1 << 4),
} eUserpref_StatusBar_Flag;
/**

View File

@ -108,6 +108,12 @@
Mesh *me = rna_mesh(ptr); \
CustomData *data = rna_mesh_##customdata_type(ptr); \
if (data) { \
if (UNLIKELY(value < 0)) { \
value = 0; \
} \
else if (value > 0) { \
value = min_ii(value, CustomData_number_of_layers(data, layer_type) - 1); \
} \
CustomData_set_layer_##active_type(data, layer_type, value); \
BKE_mesh_tessface_clear(me); \
} \

View File

@ -2242,8 +2242,7 @@ bool rna_GPencil_object_poll(PointerRNA *UNUSED(ptr), PointerRNA value)
bool rna_Object_use_dynamic_topology_sculpting_get(PointerRNA *ptr)
{
SculptSession *ss = ((Object *)ptr->owner_id)->sculpt;
return (ss && ss->bm);
return BKE_object_sculpt_use_dyntopo((Object *)ptr->owner_id);
}
static void rna_object_lineart_update(Main *UNUSED(bmain), Scene *UNUSED(scene), PointerRNA *ptr)

View File

@ -385,8 +385,8 @@ static void rna_Sculpt_update(bContext *C, PointerRNA *UNUSED(ptr))
WM_main_add_notifier(NC_OBJECT | ND_MODIFIER, ob);
if (ob->sculpt) {
ob->sculpt->bm_smooth_shading = ((scene->toolsettings->sculpt->flags &
SCULPT_DYNTOPO_SMOOTH_SHADING) != 0);
BKE_object_sculpt_dyntopo_smooth_shading_set(
ob, ((scene->toolsettings->sculpt->flags & SCULPT_DYNTOPO_SMOOTH_SHADING) != 0));
}
}
}

View File

@ -4972,6 +4972,11 @@ static void rna_def_userdef_view(BlenderRNA *brna)
RNA_def_property_boolean_sdna(prop, NULL, "statusbar_flag", STATUSBAR_SHOW_STATS);
RNA_def_property_ui_text(prop, "Show Statistics", "Show scene statistics");
RNA_def_property_update(prop, NC_SPACE | ND_SPACE_INFO, "rna_userdef_update");
prop = RNA_def_property(srna, "show_statusbar_scene_duration", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, NULL, "statusbar_flag", STATUSBAR_SHOW_SCENE_DURATION);
RNA_def_property_ui_text(prop, "Show Scene Duration", "Show scene duration");
RNA_def_property_update(prop, NC_SPACE | ND_SPACE_INFO, "rna_userdef_update");
}
static void rna_def_userdef_edit(BlenderRNA *brna)

View File

@ -245,8 +245,8 @@ static Mesh *modifyMesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh *
sculpt_session->totvert = mesh->totvert;
sculpt_session->totpoly = mesh->totpoly;
sculpt_session->vert_positions = nullptr;
sculpt_session->polys = nullptr;
sculpt_session->corner_verts = nullptr;
sculpt_session->polys = {};
sculpt_session->corner_verts = {};
}
// BKE_subdiv_stats_print(&subdiv->stats);
}

View File

@ -464,8 +464,8 @@ static Frame **collect_hull_frames(int v,
hull_frames = MEM_cnew_array<Frame *>(*tothullframe, __func__);
hull_frames_num = 0;
for (i = 0; i < emap[v].count; i++) {
const MEdge *edge = &edges[emap[v].indices[i]];
f = &frames[BKE_mesh_edge_other_vert(edge, v)];
const MEdge &edge = edges[emap[v].indices[i]];
f = &frames[blender::bke::mesh::edge_other_vert(edge, v)];
/* Can't have adjacent branch nodes yet */
if (f->totframe) {
hull_frames[hull_frames_num++] = &f->frames[0];
@ -745,7 +745,7 @@ static void build_emats_stack(BLI_Stack *stack,
parent_is_branch = ((emap[parent_v].count > 2) || (vs[parent_v].flag & MVERT_SKIN_ROOT));
v = BKE_mesh_edge_other_vert(&edges[e], parent_v);
v = blender::bke::mesh::edge_other_vert(edges[e], parent_v);
emat[e].origin = parent_v;
/* If parent is a branch node, start a new edge chain */
@ -796,9 +796,10 @@ static EMat *build_edge_mats(const MVertSkin *vs,
for (v = 0; v < verts_num; v++) {
if (vs[v].flag & MVERT_SKIN_ROOT) {
if (emap[v].count >= 1) {
const MEdge *edge = &edges[emap[v].indices[0]];
calc_edge_mat(
stack_elem.mat, vert_positions[v], vert_positions[BKE_mesh_edge_other_vert(edge, v)]);
const MEdge &edge = edges[emap[v].indices[0]];
calc_edge_mat(stack_elem.mat,
vert_positions[v],
vert_positions[blender::bke::mesh::edge_other_vert(edge, v)]);
stack_elem.parent_v = v;
/* Add adjacent edges to stack */

View File

@ -82,7 +82,7 @@ class CornerPreviousEdgeFieldInput final : public bke::MeshFieldInput {
mesh.totloop,
[polys, corner_edges, loop_to_poly_map = std::move(loop_to_poly_map)](const int corner_i) {
const MPoly &poly = polys[loop_to_poly_map[corner_i]];
return corner_edges[bke::mesh_topology::poly_loop_prev(poly, corner_i)];
return corner_edges[bke::mesh::poly_corner_prev(poly, corner_i)];
});
}

View File

@ -98,7 +98,7 @@ void initialize_volume_component_from_points(GeoNodeExecParams &params,
return;
}
float voxel_size;
float voxel_size = 0.0f;
if (storage.resolution_mode == GEO_NODE_POINTS_TO_VOLUME_RESOLUTION_MODE_SIZE) {
voxel_size = params.get_input<float>("Voxel Size");
}
@ -107,6 +107,9 @@ void initialize_volume_component_from_points(GeoNodeExecParams &params,
const float max_radius = *std::max_element(radii.begin(), radii.end());
voxel_size = compute_voxel_size_from_amount(voxel_amount, positions, max_radius);
}
else {
BLI_assert_msg(0, "Unknown volume resolution mode");
}
const double determinant = std::pow(double(voxel_size), 3.0);
if (!BKE_volume_grid_determinant_valid(determinant)) {

View File

@ -145,11 +145,16 @@ static void node_gather_link_searches(GatherLinkSearchOpParams &params)
}
class LazyFunctionForSwitchNode : public LazyFunction {
private:
bool can_be_field_ = false;
public:
LazyFunctionForSwitchNode(const bNode &node)
{
const NodeSwitch &storage = node_storage(node);
const eNodeSocketDatatype data_type = eNodeSocketDatatype(storage.input_type);
can_be_field_ = ELEM(data_type, SOCK_FLOAT, SOCK_INT, SOCK_BOOLEAN, SOCK_VECTOR, SOCK_RGBA);
const bNodeSocketType *socket_type = nullptr;
for (const bNodeSocket *socket : node.output_sockets()) {
if (socket->type == data_type) {
@ -169,7 +174,7 @@ class LazyFunctionForSwitchNode : public LazyFunction {
void execute_impl(lf::Params &params, const lf::Context & /*context*/) const override
{
const ValueOrField<bool> condition = params.get_input<ValueOrField<bool>>(0);
if (condition.is_field()) {
if (condition.is_field() && can_be_field_) {
Field<bool> condition_field = condition.as_field();
if (condition_field.node().depends_on_input()) {
this->execute_field(condition.as_field(), params);

View File

@ -40,15 +40,15 @@ static CLG_LogRef LOG = {"bgl"};
static void report_deprecated_call(const char *function_name)
{
/* Only report first 100 deprecated calls. BGL is typically used inside an handler that is
/* Only report first 10 deprecated calls. BGL is typically used inside an handler that is
* triggered at refresh. */
static int times = 0;
while (times >= 100) {
while (times >= 10) {
return;
}
char message[256];
SNPRINTF(message,
"'bgl.gl%s' is deprecated and will be removed in Blender 3.7. Report or update your "
"'bgl.gl%s' is deprecated and will be removed in Blender 4.0. Report or update your "
"script to use 'gpu' module.",
function_name);
CLOG_WARN(&LOG, "%s", message);
@ -2653,7 +2653,7 @@ PyObject *BPyInit_bgl(void)
if (GPU_backend_get_type() != GPU_BACKEND_OPENGL) {
CLOG_WARN(&LOG,
"'bgl' imported without an OpenGL backend. Please update your add-ons to use the "
"'gpu' module. In Blender 3.7 'bgl' will be removed.");
"'gpu' module. In Blender 4.0 'bgl' will be removed.");
}
PyModule_AddObject(submodule, "Buffer", (PyObject *)&BGL_bufferType);

View File

@ -414,6 +414,7 @@ if(WITH_PYTHON)
DESTINATION ${TARGETDIR_VER}
PATTERN ".git" EXCLUDE
PATTERN ".gitignore" EXCLUDE
PATTERN ".gitea" EXCLUDE
PATTERN ".github" EXCLUDE
PATTERN ".arcconfig" EXCLUDE
PATTERN "__pycache__" EXCLUDE
@ -535,6 +536,9 @@ macro(install_dir from to)
DESTINATION ${to}
# Irrelevant files and caches.
PATTERN ".git" EXCLUDE
PATTERN ".gitignore" EXCLUDE
PATTERN ".gitea" EXCLUDE
PATTERN ".github" EXCLUDE
PATTERN ".svn" EXCLUDE
PATTERN "*.pyc" EXCLUDE
PATTERN "*.pyo" EXCLUDE
@ -1294,33 +1298,6 @@ elseif(APPLE)
set_target_properties(blender PROPERTIES OUTPUT_NAME Blender)
endif()
# Handy install macro to exclude files, we use \$ escape for the "to"
# argument when calling so `${BUILD_TYPE}` does not get expanded.
macro(install_dir from to)
install(
DIRECTORY ${from}
DESTINATION ${to}
PATTERN ".git" EXCLUDE
PATTERN ".svn" EXCLUDE
PATTERN "*.pyc" EXCLUDE
PATTERN "*.pyo" EXCLUDE
PATTERN "*.orig" EXCLUDE
PATTERN "*.rej" EXCLUDE
PATTERN "__pycache__" EXCLUDE
PATTERN "__MACOSX" EXCLUDE
PATTERN ".DS_Store" EXCLUDE
PATTERN "config-${PYTHON_VERSION}/*.a" EXCLUDE # static lib
PATTERN "lib2to3" EXCLUDE # ./lib2to3
PATTERN "tkinter" EXCLUDE # ./tkinter
PATTERN "lib-dynload/_tkinter.*" EXCLUDE # ./lib-dynload/_tkinter.co
PATTERN "idlelib" EXCLUDE # ./idlelib
PATTERN "test" EXCLUDE # ./test
PATTERN "turtledemo" EXCLUDE # ./turtledemo
PATTERN "turtle.py" EXCLUDE # ./turtle.py
PATTERN "wininst*.exe" EXCLUDE # from distutils, avoid malware false positive
)
endmacro()
set(OSX_APP_SOURCEDIR ${CMAKE_SOURCE_DIR}/release/darwin/Blender.app)
# Setup `Info.plist`.

View File

@ -39,6 +39,9 @@ global:
realpath;
sched_*;
valloc;
/* needed on FreeBSD */
__progname;
environ;
local:
*;
};