WIP: Vulkan: Initial Immediate Mode Support. #106954

Closed
Jeroen Bakker wants to merge 27 commits from Jeroen-Bakker:vulkan-immediate into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
160 changed files with 1284 additions and 782 deletions
Showing only changes of commit 44aae51d41 - Show all commits

View File

@ -1300,16 +1300,29 @@ macro(windows_install_shared_manifest)
endif()
if(WINDOWS_INSTALL_DEBUG)
set(WINDOWS_CONFIGURATIONS "${WINDOWS_CONFIGURATIONS};Debug")
list(APPEND WINDOWS_SHARED_MANIFEST_DEBUG ${WINDOWS_INSTALL_FILES})
endif()
if(WINDOWS_INSTALL_RELEASE)
list(APPEND WINDOWS_SHARED_MANIFEST_RELEASE ${WINDOWS_INSTALL_FILES})
set(WINDOWS_CONFIGURATIONS "${WINDOWS_CONFIGURATIONS};Release;RelWithDebInfo;MinSizeRel")
endif()
install(FILES ${WINDOWS_INSTALL_FILES}
CONFIGURATIONS ${WINDOWS_CONFIGURATIONS}
DESTINATION "./blender.shared"
)
if(NOT WITH_PYTHON_MODULE)
# Blender executable with manifest.
if(WINDOWS_INSTALL_DEBUG)
list(APPEND WINDOWS_SHARED_MANIFEST_DEBUG ${WINDOWS_INSTALL_FILES})
endif()
if(WINDOWS_INSTALL_RELEASE)
list(APPEND WINDOWS_SHARED_MANIFEST_RELEASE ${WINDOWS_INSTALL_FILES})
endif()
install(FILES ${WINDOWS_INSTALL_FILES}
CONFIGURATIONS ${WINDOWS_CONFIGURATIONS}
DESTINATION "./blender.shared"
)
else()
# Python module without manifest.
install(FILES ${WINDOWS_INSTALL_FILES}
CONFIGURATIONS ${WINDOWS_CONFIGURATIONS}
DESTINATION "./bpy"
)
endif()
endmacro()
macro(windows_generate_manifest)
@ -1326,24 +1339,28 @@ macro(windows_generate_manifest)
endmacro()
macro(windows_generate_shared_manifest)
windows_generate_manifest(
FILES "${WINDOWS_SHARED_MANIFEST_DEBUG}"
OUTPUT "${CMAKE_BINARY_DIR}/Debug/blender.shared.manifest"
NAME "blender.shared"
)
windows_generate_manifest(
FILES "${WINDOWS_SHARED_MANIFEST_RELEASE}"
OUTPUT "${CMAKE_BINARY_DIR}/Release/blender.shared.manifest"
NAME "blender.shared"
)
install(
FILES ${CMAKE_BINARY_DIR}/Release/blender.shared.manifest
DESTINATION "./blender.shared"
CONFIGURATIONS Release;RelWithDebInfo;MinSizeRel
)
install(
FILES ${CMAKE_BINARY_DIR}/Debug/blender.shared.manifest
DESTINATION "./blender.shared"
CONFIGURATIONS Debug
)
if(WINDOWS_SHARED_MANIFEST_DEBUG)
windows_generate_manifest(
FILES "${WINDOWS_SHARED_MANIFEST_DEBUG}"
OUTPUT "${CMAKE_BINARY_DIR}/Debug/blender.shared.manifest"
NAME "blender.shared"
)
install(
FILES ${CMAKE_BINARY_DIR}/Debug/blender.shared.manifest
DESTINATION "./blender.shared"
CONFIGURATIONS Debug
)
endif()
if(WINDOWS_SHARED_MANIFEST_RELEASE)
windows_generate_manifest(
FILES "${WINDOWS_SHARED_MANIFEST_RELEASE}"
OUTPUT "${CMAKE_BINARY_DIR}/Release/blender.shared.manifest"
NAME "blender.shared"
)
install(
FILES ${CMAKE_BINARY_DIR}/Release/blender.shared.manifest
DESTINATION "./blender.shared"
CONFIGURATIONS Release;RelWithDebInfo;MinSizeRel
)
endif()
endmacro()

View File

@ -114,12 +114,13 @@ add_definitions(-D_WIN32_WINNT=0x603)
# First generate the manifest for tests since it will not need the dependency on the CRT.
configure_file(${CMAKE_SOURCE_DIR}/release/windows/manifest/blender.exe.manifest.in ${CMAKE_CURRENT_BINARY_DIR}/tests.exe.manifest @ONLY)
if(WITH_WINDOWS_BUNDLE_CRT)
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
include(InstallRequiredSystemLibraries)
# Always detect CRT paths, but only manually install with WITH_WINDOWS_BUNDLE_CRT.
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
include(InstallRequiredSystemLibraries)
if(WITH_WINDOWS_BUNDLE_CRT)
# ucrtbase(d).dll cannot be in the manifest, due to the way windows 10 handles
# redirects for this dll, for details see #88813.
foreach(lib ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS})
@ -141,7 +142,9 @@ if(WITH_WINDOWS_BUNDLE_CRT)
install(FILES ${CMAKE_BINARY_DIR}/blender.crt.manifest DESTINATION ./blender.crt)
set(BUNDLECRT "<dependency><dependentAssembly><assemblyIdentity type=\"win32\" name=\"blender.crt\" version=\"1.0.0.0\" /></dependentAssembly></dependency>")
endif()
set(BUNDLECRT "${BUNDLECRT}<dependency><dependentAssembly><assemblyIdentity type=\"win32\" name=\"blender.shared\" version=\"1.0.0.0\" /></dependentAssembly></dependency>")
if(NOT WITH_PYTHON_MODULE)
set(BUNDLECRT "${BUNDLECRT}<dependency><dependentAssembly><assemblyIdentity type=\"win32\" name=\"blender.shared\" version=\"1.0.0.0\" /></dependentAssembly></dependency>")
endif()
configure_file(${CMAKE_SOURCE_DIR}/release/windows/manifest/blender.exe.manifest.in ${CMAKE_CURRENT_BINARY_DIR}/blender.exe.manifest @ONLY)

View File

@ -231,3 +231,22 @@ index 355ee008246..a770bbee60c 100644
}
allocator.deallocate(values, capacity);
capacity = 0;
diff --git a/extern/quadriflow/src/hierarchy.cpp b/extern/quadriflow/src/hierarchy.cpp
index 8cc41da23d0..70a9628320f 100644
--- a/extern/quadriflow/src/hierarchy.cpp
+++ b/extern/quadriflow/src/hierarchy.cpp
@@ -269,7 +269,13 @@ void Hierarchy::DownsampleGraph(const AdjacentMatrix adj, const MatrixXd& V, con
for (auto it = ad.begin(); it != ad.end(); ++it, ++entry_it) {
int k = it->id;
double dp = N.col(i).dot(N.col(k));
- double ratio = A[i] > A[k] ? (A[i] / A[k]) : (A[k] / A[i]);
+ double ratio;
+ if (A[i] > A[k]) {
+ ratio = (A[k] == 0.0f) ? 1.0f : A[i] / A[k];
+ }
+ else {
+ ratio = (A[i] == 0.0f) ? 1.0f : A[k] / A[i];
+ }
*entry_it = Entry(i, k, dp * ratio);
}
}

View File

@ -269,7 +269,13 @@ void Hierarchy::DownsampleGraph(const AdjacentMatrix adj, const MatrixXd& V, con
for (auto it = ad.begin(); it != ad.end(); ++it, ++entry_it) {
int k = it->id;
double dp = N.col(i).dot(N.col(k));
double ratio = A[i] > A[k] ? (A[i] / A[k]) : (A[k] / A[i]);
double ratio;
if (A[i] > A[k]) {
ratio = (A[k] == 0.0f) ? 1.0f : A[i] / A[k];
}
else {
ratio = (A[i] == 0.0f) ? 1.0f : A[k] / A[i];
}
*entry_it = Entry(i, k, dp * ratio);
}
}

View File

@ -619,7 +619,12 @@ ccl_device_forceinline void volume_integrate_heterogeneous(
const Spectrum emission = volume_emission_integrate(
&coeff, closure_flag, transmittance, dt);
accum_emission += result.indirect_throughput * emission;
guiding_record_volume_emission(kg, state, emission);
# if OPENPGL_VERSION_MINOR < 5 // WORKAROUND #104329
if (kernel_data.integrator.max_volume_bounce > 1)
# endif
{
guiding_record_volume_emission(kg, state, emission);
}
}
}
@ -961,9 +966,13 @@ ccl_device_forceinline bool integrate_volume_phase_scatter(
const Spectrum phase_weight = bsdf_eval_sum(&phase_eval) / phase_pdf;
/* Add phase function sampling data to the path segment. */
guiding_record_volume_bounce(
kg, state, sd, phase_weight, phase_pdf, normalize(phase_wo), sampled_roughness);
# if OPENPGL_VERSION_MINOR < 5 // WORKAROUND #104329
if (kernel_data.integrator.max_volume_bounce > 1)
# endif
{
guiding_record_volume_bounce(
kg, state, sd, phase_weight, phase_pdf, normalize(phase_wo), sampled_roughness);
}
/* Update throughput. */
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
const Spectrum throughput_phase = throughput * phase_weight;
@ -1058,7 +1067,11 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
const float3 direct_P = ray->P + result.direct_t * ray->D;
# ifdef __PATH_GUIDING__
# if OPENPGL_VERSION_MINOR < 5 // WORKAROUND #104329
if (kernel_data.integrator.use_guiding && kernel_data.integrator.max_volume_bounce > 1) {
# else
if (kernel_data.integrator.use_guiding) {
# endif
# if PATH_GUIDING_LEVEL >= 1
if (result.direct_sample_method == VOLUME_SAMPLE_DISTANCE) {
/* If the direct scatter event is generated using VOLUME_SAMPLE_DISTANCE the direct event
@ -1131,7 +1144,12 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
# if defined(__PATH_GUIDING__)
# if PATH_GUIDING_LEVEL >= 1
if (!guiding_generated_new_segment) {
guiding_record_volume_segment(kg, state, sd.P, sd.wi);
# if OPENPGL_VERSION_MINOR < 5 // WORKAROUND #104329
if (kernel_data.integrator.max_volume_bounce > 1)
# endif
{
guiding_record_volume_segment(kg, state, sd.P, sd.wi);
}
}
# endif
# if PATH_GUIDING_LEVEL >= 4

View File

@ -179,8 +179,8 @@ ccl_device_inline void surface_shader_validate_bsdf_sample(const KernelGlobals k
const float2 org_roughness,
const float org_eta)
{
/* Validate the the bsdf_label and bsdf_roughness_eta functions
* by estimating the values after a bsdf sample. */
/* Validate the #bsdf_label and #bsdf_roughness_eta functions
* by estimating the values after a BSDF sample. */
const int comp_label = bsdf_label(kg, sc, wo);
kernel_assert(org_label == comp_label);

View File

@ -569,10 +569,10 @@ void LightManager::device_update_tree(Device *,
* To do so, we repeatedly move to the left child of the current node until we reach the leftmost
* descendant, while keeping track of the right child of each node we visited by storing the
* pointer in the `right_node_stack`.
* Once finished visiting the left subtree, we retrieve the the last stored pointer from
* Once finished visiting the left subtree, we retrieve the last stored pointer from
* `right_node_stack`, assign it to its parent (retrieved from `left_index_stack`), and repeat
* the process from there. */
int left_index_stack[32]; /* sizeof(bit_trail) * 8 == 32 */
int left_index_stack[32]; /* `sizeof(bit_trail) * 8 == 32`. */
LightTreeNode *right_node_stack[32];
int stack_id = 0;
const LightTreeNode *node = light_tree.get_root();

View File

@ -1198,7 +1198,7 @@ int GHOST_XrGetControllerModelData(GHOST_XrContextHandle xr_context,
* Get Vulkan handles for the given context.
*
* These handles are the same for a given context.
* Should should only be called when using a Vulkan context.
* Should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
@ -1234,7 +1234,7 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle context,
* At the start of each frame the correct command buffer should be
* retrieved with this function.
*
* Should should only be called when using a Vulkan context.
* Should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
@ -1251,7 +1251,7 @@ void GHOST_GetVulkanCommandBuffer(GHOST_ContextHandle context, void *r_command_b
* Gets the Vulkan back-buffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the back-buffer will change.
*
* Should should only be called when using a Vulkan context with an active swap chain.
* Should only be called when using a Vulkan context with an active swap chain.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*

View File

@ -44,7 +44,7 @@ class GHOST_IContext {
* Get Vulkan handles for the given context.
*
* These handles are the same for a given context.
* Should should only be called when using a Vulkan context.
* Should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*

View File

@ -139,7 +139,7 @@ class GHOST_Context : public GHOST_IContext {
* Get Vulkan handles for the given context.
*
* These handles are the same for a given context.
* Should should only be called when using a Vulkan context.
* Should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*

View File

@ -114,7 +114,7 @@ struct GWL_Window {
*/
wl_fixed_t scale_fractional = 0;
/** A temporary token used for the window to be notified of of it's activation. */
/** A temporary token used for the window to be notified of it's activation. */
struct xdg_activation_token_v1 *xdg_activation_token = nullptr;
#ifdef WITH_GHOST_WAYLAND_LIBDECOR

View File

@ -174,7 +174,7 @@ class Rall2d
friend INLINE Rall2d<T,V,S> operator +(S s,const Rall2d<T,V,S>& v);
friend INLINE Rall2d<T,V,S> operator +(const Rall2d<T,V,S>& v,S s);
friend INLINE Rall2d<T,V,S> operator -(S s,const Rall2d<T,V,S>& v);
friend INLINE INLINE Rall2d<T,V,S> operator -(const Rall2d<T,V,S>& v,S s);
friend INLINE Rall2d<T,V,S> operator -(const Rall2d<T,V,S>& v,S s);
friend INLINE Rall2d<T,V,S> operator /(S s,const Rall2d<T,V,S>& v);
friend INLINE Rall2d<T,V,S> operator /(const Rall2d<T,V,S>& v,S s);

View File

@ -62,8 +62,8 @@ void delete_MEM_CacheLimiter(MEM_CacheLimiterC *This);
/**
* Manage object
*
* \param This: "This" pointer, data data object to manage.
* \return CacheLimiterHandle to ref, unref, touch the managed object
* \param This: "This" pointer, data object to manage.
* \return The handle to reference/unreference & touch the managed object.
*/
MEM_CacheLimiterHandleC *MEM_CacheLimiter_insert(MEM_CacheLimiterC *This, void *data);

View File

@ -398,7 +398,7 @@ Copyright Contributors to the OpenColorIO Project.
** OpenEXR; version 3.1.5 --
https://github.com/AcademySoftwareFoundation/openexr
Copyright Contributors to the OpenEXR Project. All rights reserved.
** OpenImageIO; version 2.4.6.0 -- http://www.openimageio.org
** OpenImageIO; version 2.4.9.0 -- http://www.openimageio.org
Copyright (c) 2008-present by Contributors to the OpenImageIO project. All
Rights Reserved.
** Pystring; version 1.1.3 -- https://github.com/imageworks/pystring

View File

@ -318,9 +318,9 @@ class NODE_MT_node(Menu):
layout.separator()
layout.operator("node.clipboard_copy", text="Copy")
row = layout.row()
row.operator_context = 'EXEC_DEFAULT'
row.operator("node.clipboard_paste", text="Paste")
layout.operator_context = 'EXEC_DEFAULT'
layout.operator("node.clipboard_paste", text="Paste")
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("node.duplicate_move")
layout.operator("node.duplicate_move_linked")
layout.operator("node.delete")

View File

@ -29,9 +29,10 @@ using namespace blender::asset_system;
bool asset_system::AssetLibrary::save_catalogs_when_file_is_saved = true;
/* Can probably removed once #WITH_DESTROY_VIA_LOAD_HANDLER gets enabled by default. */
void AS_asset_libraries_exit()
{
/* NOTE: Can probably removed once #WITH_DESTROY_VIA_LOAD_HANDLER gets enabled by default. */
AssetLibraryService::destroy();
}
@ -42,12 +43,11 @@ asset_system::AssetLibrary *AS_asset_library_load(const Main *bmain,
return service->get_asset_library(bmain, library_reference);
}
/**
* Loading an asset library at this point only means loading the catalogs. Later on this should
* invoke reading of asset representations too.
*/
struct ::AssetLibrary *AS_asset_library_load(const char *library_path)
{
/* NOTE: Loading an asset library at this point only means loading the catalogs.
* Later on this should invoke reading of asset representations too. */
AssetLibraryService *service = AssetLibraryService::get();
asset_system::AssetLibrary *lib;
if (library_path == nullptr || library_path[0] == '\0') {

View File

@ -31,10 +31,6 @@ static void compare_item_with_path(const AssetCatalogPath &expected_path,
EXPECT_EQ(expected_parent_count, actual_item.count_parents());
}
/**
* Recursively iterate over all tree items using #AssetCatalogTree::foreach_item() and check if
* the items map exactly to \a expected_paths.
*/
void AssetCatalogTreeTestFunctions::expect_tree_items(
AssetCatalogTree *tree, const std::vector<AssetCatalogPath> &expected_paths)
{
@ -47,11 +43,6 @@ void AssetCatalogTreeTestFunctions::expect_tree_items(
});
}
/**
* Iterate over the root items of \a tree and check if the items map exactly to \a
* expected_paths. Similar to #assert_expected_tree_items() but calls
* #AssetCatalogTree::foreach_root_item() instead of #AssetCatalogTree::foreach_item().
*/
void AssetCatalogTreeTestFunctions::expect_tree_root_items(
AssetCatalogTree *tree, const std::vector<AssetCatalogPath> &expected_paths)
{
@ -65,11 +56,6 @@ void AssetCatalogTreeTestFunctions::expect_tree_root_items(
});
}
/**
* Iterate over the child items of \a parent_item and check if the items map exactly to \a
* expected_paths. Similar to #assert_expected_tree_items() but calls
* #AssetCatalogTreeItem::foreach_child() instead of #AssetCatalogTree::foreach_item().
*/
void AssetCatalogTreeTestFunctions::expect_tree_item_child_items(
AssetCatalogTreeItem *parent_item, const std::vector<AssetCatalogPath> &expected_paths)
{

View File

@ -41,7 +41,6 @@
} \
((void)0)
/* Font array. */
FontBLF *global_font[BLF_MAX_FONT] = {NULL};
/* XXX: should these be made into global_font_'s too? */

View File

@ -1348,9 +1348,6 @@ static void blf_font_fill(FontBLF *font)
font->buf_info.col_init[3] = 0;
}
/**
* Create an FT_Face for this font if not already existing.
*/
bool blf_ensure_face(FontBLF *font)
{
if (font->face) {
@ -1506,11 +1503,6 @@ static const struct FaceDetails static_face_details[] = {
{"NotoSansThai-VariableFont_wdth,wght.woff2", TT_UCR_THAI, 0, 0, 0},
};
/**
* Create a new font from filename OR memory pointer.
* For normal operation pass NULL as FT_Library object. Pass a custom FT_Library if you
* want to use the font without its lifetime being managed by the FreeType cache subsystem.
*/
FontBLF *blf_font_new_ex(const char *name,
const char *filepath,
const uchar *mem,

View File

@ -13,24 +13,28 @@ struct GlyphCacheBLF;
struct ResultBLF;
struct rcti;
/* Max number of FontBLFs in memory. Take care that every font has a glyph cache per size/dpi,
* so we don't need load the same font with different size, just load one and call BLF_size. */
/**
* Max number of FontBLFs in memory. Take care that every font has a glyph cache per size/dpi,
* so we don't need load the same font with different size, just load one and call #BLF_size.
*/
#define BLF_MAX_FONT 64
/* Maximum number of opened FT_Face objects managed by cache. 0 is default of 2. */
/** Maximum number of opened FT_Face objects managed by cache. 0 is default of 2. */
#define BLF_CACHE_MAX_FACES 4
/* Maximum number of opened FT_Size objects managed by cache. 0 is default of 4 */
/** Maximum number of opened FT_Size objects managed by cache. 0 is default of 4 */
#define BLF_CACHE_MAX_SIZES 8
/* Maximum number of bytes to use for cached data nodes. 0 is default of 200,000. */
/** Maximum number of bytes to use for cached data nodes. 0 is default of 200,000. */
#define BLF_CACHE_BYTES 400000
/* We assume square pixels at a fixed DPI of 72, scaling only the size. Therefore
/**
* We assume square pixels at a fixed DPI of 72, scaling only the size. Therefore
* font size = points = pixels, i.e. a size of 20 will result in a 20-pixel EM square.
* Although we could use the actual monitor DPI instead, we would then have to scale
* the size to cancel that out. Other libraries like Skia use this same fixed value.
*/
#define BLF_DPI 72
/** Font array. */
extern struct FontBLF *global_font[BLF_MAX_FONT];
void blf_batch_draw_begin(struct FontBLF *font);
@ -45,7 +49,7 @@ char *blf_dir_search(const char *file);
* in general, the extension of the file is: `.afm` or `.pfm`
*/
char *blf_dir_metrics_search(const char *filepath);
/* int blf_dir_split(const char *str, char *file, int *size); */ /* UNUSED */
// int blf_dir_split(const char *str, char *file, int *size); /*UNUSED*/
int blf_font_init(void);
void blf_font_exit(void);
@ -57,12 +61,20 @@ bool blf_font_id_is_valid(int fontid);
*/
uint blf_get_char_index(struct FontBLF *font, uint charcode);
/**
* Create an FT_Face for this font if not already existing.
*/
bool blf_ensure_face(struct FontBLF *font);
void blf_ensure_size(struct FontBLF *font);
void blf_draw_buffer__start(struct FontBLF *font);
void blf_draw_buffer__end(void);
/**
* Create a new font from filename OR memory pointer.
* For normal operation pass NULL as FT_Library object. Pass a custom FT_Library if you
* want to use the font without its lifetime being managed by the FreeType cache subsystem.
*/
struct FontBLF *blf_font_new_ex(const char *name,
const char *filepath,
const unsigned char *mem,

View File

@ -4,9 +4,9 @@
#include <atomic>
#include "BLI_implicit_sharing_ptr.hh"
#include "BLI_set.hh"
#include "BLI_string_ref.hh"
#include "BLI_user_counter.hh"
namespace blender::bke {
@ -32,10 +32,7 @@ namespace blender::bke {
* because that is not available in C code. If possible, the #AutoAnonymousAttributeID wrapper
* should be used to avoid manual reference counting in C++ code.
*/
class AnonymousAttributeID {
private:
mutable std::atomic<int> users_ = 1;
class AnonymousAttributeID : public ImplicitSharingMixin {
protected:
std::string name_;
@ -49,22 +46,15 @@ class AnonymousAttributeID {
virtual std::string user_name() const;
void user_add() const
private:
void delete_self() override
{
users_.fetch_add(1);
}
void user_remove() const
{
const int new_users = users_.fetch_sub(1) - 1;
if (new_users == 0) {
MEM_delete(this);
}
MEM_delete(this);
}
};
/** Wrapper for #AnonymousAttributeID that avoids manual reference counting. */
using AutoAnonymousAttributeID = UserCounter<const AnonymousAttributeID>;
using AutoAnonymousAttributeID = ImplicitSharingPtr<const AnonymousAttributeID>;
/**
* A set of anonymous attribute names that is passed around in geometry nodes.

View File

@ -4,6 +4,8 @@
/** \file
* \ingroup bke
*
* This header encapsulates necessary code to build a BVH.
*/
#include "BLI_kdopbvh.h"
@ -19,10 +21,6 @@
extern "C" {
#endif
/**
* This header encapsulates necessary code to build a BVH
*/
struct BMEditMesh;
struct MFace;
struct Mesh;

View File

@ -12,8 +12,6 @@
#include "BLI_function_ref.hh"
#include "BLI_map.hh"
#include "BLI_math_vector_types.hh"
#include "BLI_user_counter.hh"
#include "BLI_vector_set.hh"
#include "BKE_attribute.hh"
@ -40,18 +38,13 @@ class CurvesEditHints;
class Instances;
} // namespace blender::bke
class GeometryComponent;
/**
* This is the base class for specialized geometry component types. A geometry component handles
* a user count to allow avoiding duplication when it is wrapped with #UserCounter. It also handles
* the attribute API, which generalizes storing and modifying generic information on a geometry.
* This is the base class for specialized geometry component types. A geometry component uses
* implicit sharing to avoid read-only copies. It also integrates with attribute API, which
* generalizes storing and modifying generic information on a geometry.
*/
class GeometryComponent {
class GeometryComponent : public blender::ImplicitSharingMixin {
private:
/* The reference count has two purposes. When it becomes zero, the component is freed. When it is
* larger than one, the component becomes immutable. */
mutable std::atomic<int> users_ = 1;
GeometryComponentType type_;
public:
@ -77,13 +70,12 @@ class GeometryComponent {
virtual bool owns_direct_data() const = 0;
virtual void ensure_owns_direct_data() = 0;
void user_add() const;
void user_remove() const;
bool is_mutable() const;
GeometryComponentType type() const;
virtual bool is_empty() const;
private:
void delete_self() override;
};
template<typename T>
@ -109,7 +101,7 @@ inline constexpr bool is_geometry_component_v = std::is_base_of_v<GeometryCompon
*/
struct GeometrySet {
private:
using GeometryComponentPtr = blender::UserCounter<class GeometryComponent>;
using GeometryComponentPtr = blender::ImplicitSharingPtr<class GeometryComponent>;
/* Indexed by #GeometryComponentType. */
std::array<GeometryComponentPtr, GEO_COMPONENT_TYPE_ENUM_SIZE> components_;

View File

@ -679,6 +679,11 @@ extern void (*BKE_gpencil_batch_cache_free_cb)(struct bGPdata *gpd);
void BKE_gpencil_frame_original_pointers_update(const struct bGPDframe *gpf_orig,
const struct bGPDframe *gpf_eval);
/**
* Update original pointers in evaluated layer.
* \param gpl_orig: Original grease-pencil layer.
* \param gpl_eval: Evaluated grease pencil layer.
*/
void BKE_gpencil_layer_original_pointers_update(const struct bGPDlayer *gpl_orig,
const struct bGPDlayer *gpl_eval);
/**
@ -728,6 +733,12 @@ void BKE_gpencil_blend_read_data(struct BlendDataReader *reader, struct bGPdata
bool BKE_gpencil_can_avoid_full_copy_on_write(const struct Depsgraph *depsgraph,
struct bGPdata *gpd);
/**
* Update the geometry of the evaluated bGPdata.
* This function will:
* 1) Copy the original data over to the evaluated object.
* 2) Update the original pointers in the runtime structs.
*/
void BKE_gpencil_update_on_write(struct bGPdata *gpd_orig, struct bGPdata *gpd_eval);
#ifdef __cplusplus

View File

@ -329,7 +329,7 @@ int BKE_ptcache_mem_pointers_seek(int point_index,
void *cur[BPHYS_TOT_DATA]);
/**
* Main cache reading call.
* Main cache reading call which reads cache from disk or memory.
* Possible to get old or interpolated result.
*/
int BKE_ptcache_read(PTCacheID *pid, float cfra, bool no_extrapolate_old);
@ -341,11 +341,12 @@ int BKE_ptcache_read(PTCacheID *pid, float cfra, bool no_extrapolate_old);
int BKE_ptcache_write(PTCacheID *pid, unsigned int cfra);
/******************* Allocate & free ***************/
struct PointCache *BKE_ptcache_add(struct ListBase *ptcaches);
void BKE_ptcache_free_mem(struct ListBase *mem_cache);
void BKE_ptcache_free(struct PointCache *cache);
void BKE_ptcache_free_list(struct ListBase *ptcaches);
/* returns first point cache */
/** Returns first point cache. */
struct PointCache *BKE_ptcache_copy_list(struct ListBase *ptcaches_new,
const struct ListBase *ptcaches_old,
int flag);

View File

@ -559,6 +559,7 @@ bool BKE_screen_area_map_blend_read_data(struct BlendDataReader *reader,
struct ScrAreaMap *area_map);
/**
* And as patch for 2.48 and older.
* For the saved 2.50 files without `regiondata`.
*/
void BKE_screen_view3d_do_versions_250(struct View3D *v3d, ListBase *regions);
void BKE_screen_area_blend_read_lib(struct BlendLibReader *reader,

View File

@ -26,7 +26,9 @@ static int kHashSizes[] = {
1048583, 2097169, 4194319, 8388617, 16777259, 33554467, 67108879, 134217757, 268435459,
};
/* Generic hash functions. */
/* -------------------------------------------------------------------- */
/** \name Generic Hash Functions
* \{ */
EHash *ccg_ehash_new(int estimatedNumEntries,
CCGAllocatorIFC *allocatorIFC,
@ -128,7 +130,11 @@ void *ccg_ehash_lookup(EHash *eh, void *key)
return entry;
}
/* Hash elements iteration. */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Hash Elements Iteration
* \{ */
void ccg_ehashIterator_init(EHash *eh, EHashIterator *ehi)
{
@ -169,9 +175,11 @@ int ccg_ehashIterator_isStopped(EHashIterator *ehi)
return !ehi->curEntry;
}
/**
* Standard allocator implementation.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Standard allocator implementation
* \{ */
static void *_stdAllocator_alloc(CCGAllocatorHDL UNUSED(a), int numBytes)
{
@ -203,9 +211,11 @@ CCGAllocatorIFC *ccg_getStandardAllocatorIFC(void)
return &ifc;
}
/**
* Catmull-Clark Gridding Subdivision Surface.
*/
/** \} */
/* -------------------------------------------------------------------- */
/** \name * Catmull-Clark Gridding Subdivision Surface
* \{ */
#ifdef DUMP_RESULT_GRIDS
void ccgSubSurf__dumpCoords(CCGSubSurf *ss)
@ -303,3 +313,5 @@ void ccgSubSurf__dumpCoords(CCGSubSurf *ss)
}
}
#endif /* DUMP_RESULT_GRIDS */
/** \} */

View File

@ -5531,6 +5531,7 @@ void BKE_curve_eval_geometry(Depsgraph *depsgraph, Curve *curve)
}
/* Draw Engine */
void (*BKE_curve_batch_cache_dirty_tag_cb)(Curve *cu, int mode) = nullptr;
void (*BKE_curve_batch_cache_free_cb)(Curve *cu) = nullptr;

View File

@ -22,9 +22,9 @@ int calculate_evaluated_num(const int points_num, const bool cyclic, const int r
return eval_num + 1;
}
/* Adapted from Cycles #catmull_rom_basis_eval function. */
void calculate_basis(const float parameter, float4 &r_weights)
{
/* Adapted from Cycles #catmull_rom_basis_eval function. */
const float t = parameter;
const float s = 1.0f - parameter;
r_weights[0] = -t * s * s;

View File

@ -2273,7 +2273,7 @@ bool CustomData_merge(const CustomData *source,
layer->anonymous_id = nullptr;
}
else {
layer->anonymous_id->user_add();
layer->anonymous_id->add_user();
}
}
if (alloctype == CD_ASSIGN) {
@ -2365,7 +2365,7 @@ static void customData_free_layer__internal(CustomDataLayer *layer, const int to
const LayerTypeInfo *typeInfo;
if (layer->anonymous_id != nullptr) {
layer->anonymous_id->user_remove();
layer->anonymous_id->remove_user_and_delete_if_last();
layer->anonymous_id = nullptr;
}
if (!(layer->flag & CD_FLAG_NOFREE) && layer->data) {
@ -2956,7 +2956,7 @@ void *CustomData_add_layer_anonymous(CustomData *data,
return nullptr;
}
anonymous_id->user_add();
anonymous_id->add_user();
layer->anonymous_id = anonymous_id;
return layer->data;
}

View File

@ -84,26 +84,6 @@ std::optional<blender::bke::MutableAttributeAccessor> GeometryComponent::attribu
return std::nullopt;
}
void GeometryComponent::user_add() const
{
users_.fetch_add(1);
}
void GeometryComponent::user_remove() const
{
const int new_users = users_.fetch_sub(1) - 1;
if (new_users == 0) {
delete this;
}
}
bool GeometryComponent::is_mutable() const
{
/* If the item is shared, it is read-only. */
/* The user count can be 0, when this is called from the destructor. */
return users_ <= 1;
}
GeometryComponentType GeometryComponent::type() const
{
return type_;
@ -114,6 +94,11 @@ bool GeometryComponent::is_empty() const
return false;
}
void GeometryComponent::delete_self()
{
delete this;
}
/** \} */
/* -------------------------------------------------------------------- */
@ -198,7 +183,7 @@ void GeometrySet::remove_geometry_during_modify()
void GeometrySet::add(const GeometryComponent &component)
{
BLI_assert(!components_[component.type()]);
component.user_add();
component.add_user();
components_[component.type()] = const_cast<GeometryComponent *>(&component);
}

View File

@ -1818,6 +1818,7 @@ Material *BKE_gpencil_object_material_ensure_active(Object *ob)
}
/* ************************************************** */
bool BKE_gpencil_stroke_select_check(const bGPDstroke *gps)
{
const bGPDspoint *pt;
@ -2649,11 +2650,6 @@ void BKE_gpencil_frame_original_pointers_update(const struct bGPDframe *gpf_orig
}
}
/**
* Update original pointers in evaluated layer.
* \param gpl_orig: Original grease-pencil layer.
* \param gpl_eval: Evaluated grease pencil layer.
*/
void BKE_gpencil_layer_original_pointers_update(const struct bGPDlayer *gpl_orig,
const struct bGPDlayer *gpl_eval)
{
@ -2686,11 +2682,6 @@ void BKE_gpencil_data_update_orig_pointers(const bGPdata *gpd_orig, const bGPdat
}
}
/**
* Update pointers of eval data to original data to keep references.
* \param ob_orig: Original grease pencil object
* \param ob_eval: Evaluated grease pencil object
*/
void BKE_gpencil_update_orig_pointers(const Object *ob_orig, const Object *ob_eval)
{
BKE_gpencil_data_update_orig_pointers((bGPdata *)ob_orig->data, (bGPdata *)ob_eval->data);
@ -3014,12 +3005,6 @@ static bool gpencil_update_on_write_stroke_cb(GPencilUpdateCache *gps_cache, voi
return false;
}
/**
* Update the geometry of the evaluated bGPdata.
* This function will:
* 1) Copy the original data over to the evaluated object.
* 2) Update the original pointers in the runtime structs.
*/
void BKE_gpencil_update_on_write(bGPdata *gpd_orig, bGPdata *gpd_eval)
{
GPencilUpdateCache *update_cache = gpd_orig->runtime.update_cache;

View File

@ -3625,10 +3625,11 @@ void BKE_image_set_filepath_from_tile_number(char *filepath,
}
}
/* if layer or pass changes, we need an index for the imbufs list */
/* note it is called for rendered results, but it doesn't use the index! */
RenderPass *BKE_image_multilayer_index(RenderResult *rr, ImageUser *iuser)
{
/* If layer or pass changes, we need an index for the imbufs list. */
/* NOTE: it is called for rendered results, but it doesn't use the index! */
RenderLayer *rl;
RenderPass *rpass = nullptr;
@ -3679,10 +3680,11 @@ void BKE_image_multiview_index(const Image *ima, ImageUser *iuser)
}
}
/* if layer or pass changes, we need an index for the imbufs list */
/* note it is called for rendered results, but it doesn't use the index! */
bool BKE_image_is_multilayer(const Image *ima)
{
/* If layer or pass changes, we need an index for the imbufs list. */
/* NOTE: it is called for rendered results, but it doesn't use the index! */
if (ELEM(ima->source, IMA_SRC_FILE, IMA_SRC_SEQUENCE, IMA_SRC_TILED)) {
if (ima->type == IMA_TYPE_MULTILAYER) {
return true;

View File

@ -763,6 +763,7 @@ void BKE_lattice_eval_geometry(struct Depsgraph *UNUSED(depsgraph), Lattice *UNU
}
/* Draw Engine */
void (*BKE_lattice_batch_cache_dirty_tag_cb)(Lattice *lt, int mode) = NULL;
void (*BKE_lattice_batch_cache_free_cb)(Lattice *lt) = NULL;

View File

@ -149,9 +149,9 @@ static ScanFillVert *scanfill_vert_add_v2_with_depth(ScanFillContext *sf_ctx,
* each #MaskRasterLayer does its own lookup which contributes to
* the final pixel with its own blending mode and the final pixel
* is blended between these.
*
* \note internal use only.
*/
/* internal use only */
typedef struct MaskRasterLayer {
/* geometry */
uint face_tot;

View File

@ -281,6 +281,7 @@ eMeshWrapperType BKE_mesh_wrapper_type(const struct Mesh *mesh)
* \{ */
/* Draw Engine */
void (*BKE_mesh_batch_cache_dirty_tag_cb)(Mesh *me, eMeshBatchDirtyMode mode) = nullptr;
void (*BKE_mesh_batch_cache_free_cb)(void *batch_cache) = nullptr;

View File

@ -5305,6 +5305,7 @@ void psys_apply_hair_lattice(Depsgraph *depsgraph, Scene *scene, Object *ob, Par
}
/* Draw Engine */
void (*BKE_particle_batch_cache_dirty_tag_cb)(ParticleSystem *psys, int mode) = nullptr;
void (*BKE_particle_batch_cache_free_cb)(ParticleSystem *psys) = nullptr;

View File

@ -1331,9 +1331,6 @@ bool UVPrimitive::has_shared_edge(const MeshData &mesh_data, const int primitive
return false;
}
/**
* Get the UVVertex in the order that the verts are ordered in the MeshPrimitive.
*/
const UVVertex *UVPrimitive::get_uv_vertex(const MeshData &mesh_data,
const uint8_t mesh_vert_index) const
{
@ -1350,10 +1347,6 @@ const UVVertex *UVPrimitive::get_uv_vertex(const MeshData &mesh_data,
return nullptr;
}
/**
* Get the UVEdge that share the given uv coordinates.
* Will assert when no UVEdge found.
*/
UVEdge *UVPrimitive::get_uv_edge(const float2 uv1, const float2 uv2) const
{
for (UVEdge *uv_edge : edges) {

View File

@ -2267,7 +2267,7 @@ static int ptcache_interpolate(PTCacheID *pid, float cfra, int cfra1, int cfra2)
return 1;
}
/* reads cache from disk or memory */
int BKE_ptcache_read(PTCacheID *pid, float cfra, bool no_extrapolate_old)
{
int cfrai = (int)floor(cfra), cfra1 = 0, cfra2 = 0;

View File

@ -1256,7 +1256,6 @@ static void direct_link_region(BlendDataReader *reader, ARegion *region, int spa
memset(&region->drawrct, 0, sizeof(region->drawrct));
}
/* for the saved 2.50 files without regiondata */
void BKE_screen_view3d_do_versions_250(View3D *v3d, ListBase *regions)
{
LISTBASE_FOREACH (ARegion *, region, regions) {

View File

@ -57,6 +57,10 @@ int BLI_delete(const char *file, bool dir, bool recursive) ATTR_NONNULL();
* \return zero on success (matching 'remove' behavior).
*/
int BLI_delete_soft(const char *file, const char **error_message) ATTR_NONNULL();
/**
* When `path` points to a directory, moves all its contents into `to`,
* else rename `path` itself to `to`.
*/
int BLI_path_move(const char *path, const char *to) ATTR_NONNULL();
#if 0 /* Unused */
int BLI_create_symlink(const char *path, const char *to) ATTR_NONNULL();

View File

@ -0,0 +1,109 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bli
*/
#include <atomic>
#include "BLI_compiler_attrs.h"
#include "BLI_utildefines.h"
#include "BLI_utility_mixins.hh"
namespace blender {
/**
* #ImplicitSharingInfo is the core data structure for implicit sharing in Blender. Implicit
* sharing is a technique that avoids copying data when it is not necessary. This results in better
* memory usage and performance. Only read-only data can be shared, because otherwise multiple
* owners might want to change the data in conflicting ways.
*
* To determine whether data is shared, #ImplicitSharingInfo keeps a user count. If the count is 1,
* the data only has a single owner and is therefore mutable. If some code wants to modify data
* that is currently shared, it has to make a copy first.
* This behavior is also called "copy on write".
*
* In addition to containing the reference count, #ImplicitSharingInfo also knows how to destruct
* the referenced data. This is important because the code freeing the data in the end might not
* know how it was allocated (for example, it doesn't know whether an array was allocated using the
* system or guarded allocator).
*
* #ImplicitSharingInfo can be used in two ways:
* - It can be allocated separately from the referenced data. This is used when the shared data is
* e.g. a plain data array.
* - It can be embedded into another struct. For that it's best to use #ImplicitSharingMixin.
*/
class ImplicitSharingInfo : NonCopyable, NonMovable {
private:
mutable std::atomic<int> users_;
public:
ImplicitSharingInfo(const int initial_users) : users_(initial_users)
{
}
virtual ~ImplicitSharingInfo()
{
BLI_assert(this->is_mutable());
}
/** True if there are other const references to the resource, meaning it cannot be modified. */
bool is_shared() const
{
return users_.load(std::memory_order_relaxed) >= 2;
}
/** Whether the resource can be modified without a copy because there is only one owner. */
bool is_mutable() const
{
return !this->is_shared();
}
/** Call when a the data has a new additional owner. */
void add_user() const
{
users_.fetch_add(1, std::memory_order_relaxed);
}
/**
* Call when the data is no longer needed. This might just decrement the user count, or it might
* also delete the data if this was the last user.
*/
void remove_user_and_delete_if_last() const
{
const int old_user_count = users_.fetch_sub(1, std::memory_order_acq_rel);
BLI_assert(old_user_count >= 1);
const bool was_last_user = old_user_count == 1;
if (was_last_user) {
const_cast<ImplicitSharingInfo *>(this)->delete_self_with_data();
}
}
private:
/** Has to free the #ImplicitSharingInfo and the referenced data. */
virtual void delete_self_with_data() = 0;
};
/**
* Makes it easy to embed implicit-sharing behavior into a struct. Structs that derive from this
* class can be used with #ImplicitSharingPtr.
*/
class ImplicitSharingMixin : public ImplicitSharingInfo {
public:
ImplicitSharingMixin() : ImplicitSharingInfo(1)
{
}
private:
void delete_self_with_data() override
{
/* Can't use `delete this` here, because we don't know what allocator was used. */
this->delete_self();
}
virtual void delete_self() = 0;
};
} // namespace blender

View File

@ -6,59 +6,60 @@
* \ingroup bli
*/
#include <atomic>
#include "BLI_implicit_sharing.hh"
namespace blender {
/**
* A simple automatic reference counter. It is similar to std::shared_ptr, but expects that the
* reference count is inside the object.
* #ImplicitSharingPtr is a smart pointer that manages implicit sharing. It's designed to work with
* types that derive from #ImplicitSharingMixin. It is fairly similar to #std::shared_ptr but
* requires the reference count to be embedded in the data.
*/
template<typename T> class UserCounter {
template<typename T> class ImplicitSharingPtr {
private:
T *data_ = nullptr;
public:
UserCounter() = default;
ImplicitSharingPtr() = default;
UserCounter(T *data) : data_(data)
ImplicitSharingPtr(T *data) : data_(data)
{
}
UserCounter(const UserCounter &other) : data_(other.data_)
ImplicitSharingPtr(const ImplicitSharingPtr &other) : data_(other.data_)
{
this->user_add(data_);
this->add_user(data_);
}
UserCounter(UserCounter &&other) : data_(other.data_)
ImplicitSharingPtr(ImplicitSharingPtr &&other) : data_(other.data_)
{
other.data_ = nullptr;
}
~UserCounter()
~ImplicitSharingPtr()
{
this->user_remove(data_);
this->remove_user_and_delete_if_last(data_);
}
UserCounter &operator=(const UserCounter &other)
ImplicitSharingPtr &operator=(const ImplicitSharingPtr &other)
{
if (this == &other) {
return *this;
}
this->user_remove(data_);
this->remove_user_and_delete_if_last(data_);
data_ = other.data_;
this->user_add(data_);
this->add_user(data_);
return *this;
}
UserCounter &operator=(UserCounter &&other)
ImplicitSharingPtr &operator=(ImplicitSharingPtr &&other)
{
if (this == &other) {
return *this;
}
this->user_remove(data_);
this->remove_user_and_delete_if_last(data_);
data_ = other.data_;
other.data_ = nullptr;
return *this;
@ -112,7 +113,7 @@ template<typename T> class UserCounter {
void reset()
{
this->user_remove(data_);
this->remove_user_and_delete_if_last(data_);
data_ = nullptr;
}
@ -126,29 +127,23 @@ template<typename T> class UserCounter {
return get_default_hash(data_);
}
friend bool operator==(const UserCounter &a, const UserCounter &b)
friend bool operator==(const ImplicitSharingPtr &a, const ImplicitSharingPtr &b)
{
return a.data_ == b.data_;
}
friend std::ostream &operator<<(std::ostream &stream, const UserCounter &value)
{
stream << value.data_;
return stream;
}
private:
static void user_add(T *data)
static void add_user(T *data)
{
if (data != nullptr) {
data->user_add();
data->add_user();
}
}
static void user_remove(T *data)
static void remove_user_and_delete_if_last(T *data)
{
if (data != nullptr) {
data->user_remove();
data->remove_user_and_delete_if_last();
}
}
};

View File

@ -689,11 +689,9 @@ MINLINE void clamp_v4_v4v4(float vec[4], const float min[4], const float max[4])
/* -------------------------------------------------------------------- */
/** \name Array Functions
* \{ */
/**
*
* Follow fixed length vector function conventions.
*/
* \{ */
double dot_vn_vn(const float *array_src_a,
const float *array_src_b,

View File

@ -30,7 +30,7 @@ template<typename T> class OffsetIndices {
BLI_assert(std::is_sorted(offsets_.begin(), offsets_.end()));
}
/** Return the total number of elements in the the referenced arrays. */
/** Return the total number of elements in the referenced arrays. */
T total_size() const
{
return offsets_.last();

View File

@ -646,6 +646,24 @@ class Set {
return !Intersects(a, b);
}
friend bool operator==(const Set &a, const Set &b)
{
if (a.size() != b.size()) {
return false;
}
for (const Key &key : a) {
if (!b.contains(key)) {
return false;
}
}
return true;
}
friend bool operator!=(const Set &a, const Set &b)
{
return !(a == b);
}
private:
BLI_NOINLINE void realloc_and_reinsert(const int64_t min_usable_slots)
{

View File

@ -64,6 +64,8 @@ typedef void (*TaskFreeFunction)(TaskPool *__restrict pool, void *taskdata);
/**
* Regular task pool that immediately starts executing tasks as soon as they
* are pushed, either on the current or another thread.
*
* Tasks will be executed as soon as they are added.
*/
TaskPool *BLI_task_pool_create(void *userdata, eTaskPriority priority);
@ -74,8 +76,10 @@ TaskPool *BLI_task_pool_create(void *userdata, eTaskPriority priority);
TaskPool *BLI_task_pool_create_background(void *userdata, eTaskPriority priority);
/**
* Background Serial: run tasks one after the other in the background,
* without parallelization between the tasks.
* Background Serial: run tasks one after the other in the background.
*
* Executes one task after the other, possibly on different threads
* but never in parallel.
*/
TaskPool *BLI_task_pool_create_background_serial(void *userdata, eTaskPriority priority);
@ -87,7 +91,7 @@ TaskPool *BLI_task_pool_create_background_serial(void *userdata, eTaskPriority p
TaskPool *BLI_task_pool_create_suspended(void *userdata, eTaskPriority priority);
/**
* No threads: immediately executes tasks on the same thread. For debugging.
* No threads: immediately executes tasks on the same thread. For debugging purposes.
*/
TaskPool *BLI_task_pool_create_no_threads(void *userdata);

View File

@ -243,6 +243,8 @@ set(SRC
BLI_hash_tables.hh
BLI_heap.h
BLI_heap_simple.h
BLI_implicit_sharing.hh
BLI_implicit_sharing_ptr.hh
BLI_index_mask.hh
BLI_index_mask_ops.hh
BLI_index_range.hh
@ -355,7 +357,6 @@ set(SRC
BLI_timecode.h
BLI_timeit.hh
BLI_timer.h
BLI_user_counter.hh
BLI_utildefines.h
BLI_utildefines_iter.h
BLI_utildefines_stack.h
@ -493,6 +494,7 @@ if(WITH_GTESTS)
tests/BLI_hash_mm2a_test.cc
tests/BLI_heap_simple_test.cc
tests/BLI_heap_test.cc
tests/BLI_implicit_sharing_test.cc
tests/BLI_index_mask_test.cc
tests/BLI_index_range_test.cc
tests/BLI_inplace_priority_queue_test.cc

View File

@ -453,9 +453,9 @@ static void node_join(BVHTree *tree, BVHNode *node)
#ifdef USE_PRINT_TREE
/**
* Debug and information functions
*/
/* -------------------------------------------------------------------- */
/** \name * Debug and Information Functions
* \{ */
static void bvhtree_print_tree(BVHTree *tree, BVHNode *node, int depth)
{
@ -500,6 +500,9 @@ static void bvhtree_info(BVHTree *tree)
bvhtree_print_tree(tree, tree->nodes[tree->leaf_num], 0);
}
/** \} */
#endif /* USE_PRINT_TREE */
#ifdef USE_VERIFY_TREE

View File

@ -156,7 +156,7 @@
/**
* Singe bytes (or boolean) arrays need a higher number of steps
* because the resulting values are not unique enough to result in evenly distributed values.
* Use more accumulation when the the size of the structs is small, see: #105046.
* Use more accumulation when the size of the structs is small, see: #105046.
*
* With 6 -> 22, one byte each - means an array of booleans can be combine into 22 bits
* representing 4,194,303 different combinations.

View File

@ -1142,14 +1142,12 @@ static int move_single_file(const char *from, const char *to)
return RecursiveOp_Callback_OK;
}
/* if *file represents a directory, moves all its contents into *to, else renames
* file itself to *to. */
int BLI_path_move(const char *file, const char *to)
int BLI_path_move(const char *path, const char *to)
{
int ret = recursive_operation(file, to, move_callback_pre, move_single_file, NULL);
int ret = recursive_operation(path, to, move_callback_pre, move_single_file, NULL);
if (ret && ret != -1) {
return recursive_operation(file, NULL, NULL, delete_single_file, delete_callback_post);
return recursive_operation(path, NULL, NULL, delete_single_file, delete_callback_post);
}
return ret;

View File

@ -362,9 +362,6 @@ MINLINE int divide_floor_i(int a, int b)
return r ? d - ((a < 0) ^ (b < 0)) : d;
}
/**
* Integer division that returns the ceiling, instead of flooring like normal C division.
*/
MINLINE uint divide_ceil_u(uint a, uint b)
{
return (a + b - 1) / b;
@ -375,9 +372,6 @@ MINLINE uint64_t divide_ceil_ul(uint64_t a, uint64_t b)
return (a + b - 1) / b;
}
/**
* Returns \a a if it is a multiple of \a b or the next multiple or \a b after \b a .
*/
MINLINE uint ceil_to_multiple_u(uint a, uint b)
{
return divide_ceil_u(a, b) * b;

View File

@ -412,7 +412,6 @@ static double isperrboundA, isperrboundB, isperrboundC;
*
* Don't change this routine unless you fully understand it.
*/
void exactinit()
{
double half;

View File

@ -9,7 +9,9 @@
#include "BLI_strict_flags.h"
//******************************* Interpolation *******************************/
/* -------------------------------------------------------------------- */
/** \name Interpolation
* \{ */
void interp_v2_v2v2(float r[2], const float a[2], const float b[2], const float t)
{
@ -339,7 +341,11 @@ void flip_v2_v2v2(float v[2], const float v1[2], const float v2[2])
v[1] = v1[1] + (v1[1] - v2[1]);
}
/********************************* Comparison ********************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Comparison
* \{ */
bool is_finite_v2(const float v[2])
{
@ -356,7 +362,11 @@ bool is_finite_v4(const float v[4])
return (isfinite(v[0]) && isfinite(v[1]) && isfinite(v[2]) && isfinite(v[3]));
}
/********************************** Angles ***********************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Angles
* \{ */
float angle_v3v3v3(const float a[3], const float b[3], const float c[3])
{
@ -584,7 +594,11 @@ void angle_poly_v3(float *angles, const float *verts[3], int len)
}
}
/********************************* Geometry **********************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Geometry
* \{ */
void project_v2_v2v2(float out[2], const float p[2], const float v_proj[2])
{
@ -808,7 +822,11 @@ void rotate_v3_v3v3fl(float r[3], const float p[3], const float axis[3], const f
rotate_normalized_v3_v3v3fl(r, p, axis_n, angle);
}
/*********************************** Other ***********************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Other
* \{ */
void print_v2(const char *str, const float v[2])
{
@ -958,7 +976,11 @@ void axis_sort_v3(const float axis_values[3], int r_axis_order[3])
#undef SWAP_AXIS
}
/***************************** Array Functions *******************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Array Functions
* \{ */
MINLINE double sqr_db(double f)
{

View File

@ -10,11 +10,6 @@
/* Don't re-wrap large data definitions. */
/* clang-format off */
/**
* Stored in R8G8 format. Load it in the following format:
* - DX10: DXGI_FORMAT_R8G8_UNORM
* - GPU: GPU_RG8 texture format and GPU_DATA_UBYTE data format.
*/
const unsigned char areaTexBytes[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@ -14952,11 +14947,6 @@ const unsigned char areaTexBytes[] = {
0x00, 0x00, 0x00, 0x00
};
/**
* Stored in R8 format. Load it in the following format:
* - DX10: DXGI_FORMAT_R8_UNORM
* - GPU: GPU_R8 texture format and GPU_DATA_UBYTE data format.
*/
const unsigned char searchTexBytes[] = {
0xfe, 0xfe, 0x00, 0x7f, 0x7f, 0x00, 0x00, 0xfe, 0xfe, 0x00, 0x7f, 0x7f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x7f, 0x00,

View File

@ -96,9 +96,10 @@ static eStrCursorDelimType cursor_delim_type_utf8(const char *ch_utf8,
return cursor_delim_type_unicode(uch);
}
/* Keep in sync with BLI_str_cursor_step_next_utf32. */
bool BLI_str_cursor_step_next_utf8(const char *str, size_t maxlen, int *pos)
{
/* NOTE: Keep in sync with #BLI_str_cursor_step_next_utf32. */
if ((*pos) >= (int)maxlen) {
return false;
}
@ -116,9 +117,10 @@ bool BLI_str_cursor_step_next_utf8(const char *str, size_t maxlen, int *pos)
return true;
}
/* Keep in sync with BLI_str_cursor_step_prev_utf32. */
bool BLI_str_cursor_step_prev_utf8(const char *str, size_t maxlen, int *pos)
{
/* NOTE: Keep in sync with #BLI_str_cursor_step_prev_utf32. */
if ((*pos) > 0 && (*pos) <= maxlen) {
const char *str_pos = str + (*pos);
const char *str_prev = str_pos;
@ -210,9 +212,10 @@ void BLI_str_cursor_step_utf8(const char *str,
}
}
/* Keep in sync with BLI_str_cursor_step_next_utf8. */
bool BLI_str_cursor_step_next_utf32(const char32_t *str, size_t maxlen, int *pos)
{
/* NOTE: Keep in sync with #BLI_str_cursor_step_next_utf8. */
if ((*pos) >= (int)maxlen) {
return false;
}
@ -223,9 +226,10 @@ bool BLI_str_cursor_step_next_utf32(const char32_t *str, size_t maxlen, int *pos
return true;
}
/* Keep in sync with BLI_str_cursor_step_prev_utf8. */
bool BLI_str_cursor_step_prev_utf32(const char32_t *str, size_t UNUSED(maxlen), int *pos)
{
/* NOTE: Keep in sync with #BLI_str_cursor_step_prev_utf8. */
if ((*pos) <= 0) {
return false;
}

View File

@ -384,54 +384,38 @@ static TaskPool *task_pool_create_ex(void *userdata, TaskPoolType type, eTaskPri
return pool;
}
/**
* Create a normal task pool. Tasks will be executed as soon as they are added.
*/
TaskPool *BLI_task_pool_create(void *userdata, eTaskPriority priority)
{
return task_pool_create_ex(userdata, TASK_POOL_TBB, priority);
}
/**
* Create a background task pool.
* In multi-threaded context, there is no differences with #BLI_task_pool_create(),
* but in single-threaded case it is ensured to have at least one worker thread to run on
* (i.e. you don't have to call #BLI_task_pool_work_and_wait
* on it to be sure it will be processed).
*
* \note Background pools are non-recursive
* (that is, you should not create other background pools in tasks assigned to a background pool,
* they could end never being executed, since the 'fallback' background thread is already
* busy with parent task in single-threaded context).
*/
TaskPool *BLI_task_pool_create_background(void *userdata, eTaskPriority priority)
{
/* NOTE: In multi-threaded context, there is no differences with #BLI_task_pool_create(),
* but in single-threaded case it is ensured to have at least one worker thread to run on
* (i.e. you don't have to call #BLI_task_pool_work_and_wait
* on it to be sure it will be processed).
*
* NOTE: Background pools are non-recursive
* (that is, you should not create other background pools in tasks assigned to a background pool,
* they could end never being executed, since the 'fallback' background thread is already
* busy with parent task in single-threaded context). */
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND, priority);
}
/**
* Similar to BLI_task_pool_create() but does not schedule any tasks for execution
* for until BLI_task_pool_work_and_wait() is called. This helps reducing threading
* overhead when pushing huge amount of small initial tasks from the main thread.
*/
TaskPool *BLI_task_pool_create_suspended(void *userdata, eTaskPriority priority)
{
/* NOTE: Similar to #BLI_task_pool_create() but does not schedule any tasks for execution
* for until BLI_task_pool_work_and_wait() is called. This helps reducing threading
* overhead when pushing huge amount of small initial tasks from the main thread. */
return task_pool_create_ex(userdata, TASK_POOL_TBB_SUSPENDED, priority);
}
/**
* Single threaded task pool that executes pushed task immediately, for
* debugging purposes.
*/
TaskPool *BLI_task_pool_create_no_threads(void *userdata)
{
return task_pool_create_ex(userdata, TASK_POOL_NO_THREADS, TASK_PRIORITY_HIGH);
}
/**
* Task pool that executes one task after the other, possibly on different threads
* but never in parallel.
*/
TaskPool *BLI_task_pool_create_background_serial(void *userdata, eTaskPriority priority)
{
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND_SERIAL, priority);

View File

@ -0,0 +1,85 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "MEM_guardedalloc.h"
#include "BLI_implicit_sharing_ptr.hh"
#include "testing/testing.h"
namespace blender::tests {
class ImplicitlySharedData : public ImplicitSharingMixin {
public:
ImplicitSharingPtr<ImplicitlySharedData> copy() const
{
return MEM_new<ImplicitlySharedData>(__func__);
}
void delete_self() override
{
MEM_delete(this);
}
};
class SharedDataContainer {
private:
ImplicitSharingPtr<ImplicitlySharedData> data_;
public:
SharedDataContainer() : data_(MEM_new<ImplicitlySharedData>(__func__))
{
}
const ImplicitlySharedData *get_for_read() const
{
return data_.get();
}
ImplicitlySharedData *get_for_write()
{
if (!data_) {
return nullptr;
}
if (data_->is_mutable()) {
return data_.get();
}
data_ = data_->copy();
return data_.get();
}
};
TEST(implicit_sharing, CopyOnWriteAccess)
{
/* Create the initial data. */
SharedDataContainer a;
EXPECT_NE(a.get_for_read(), nullptr);
/* a and b share the same underlying data now. */
SharedDataContainer b = a;
EXPECT_EQ(a.get_for_read(), b.get_for_read());
/* c now shares the data with a and b. */
SharedDataContainer c = a;
EXPECT_EQ(b.get_for_read(), c.get_for_read());
/* Retrieving write access on b should make a copy because the data is shared. */
ImplicitlySharedData *data_b1 = b.get_for_write();
EXPECT_NE(data_b1, nullptr);
EXPECT_EQ(data_b1, b.get_for_read());
EXPECT_NE(data_b1, a.get_for_read());
EXPECT_NE(data_b1, c.get_for_read());
/* Retrieving the same write access again should *not* make another copy. */
ImplicitlySharedData *data_b2 = b.get_for_write();
EXPECT_EQ(data_b1, data_b2);
/* Moving b should also move the data. b then does not have ownership anymore. Since the data in
* b only had one owner, the data is still mutable now that d is the owner. */
SharedDataContainer d = std::move(b);
EXPECT_EQ(b.get_for_read(), nullptr);
EXPECT_EQ(b.get_for_write(), nullptr);
EXPECT_EQ(d.get_for_read(), data_b1);
EXPECT_EQ(d.get_for_write(), data_b1);
}
} // namespace blender::tests

View File

@ -600,6 +600,28 @@ TEST(set, RemoveUniquePtrWithRaw)
EXPECT_TRUE(set.is_empty());
}
TEST(set, Equality)
{
const Set<int> a = {1, 2, 3, 4, 5};
const Set<int> b = {5, 2, 3, 1, 4};
const Set<int> c = {1, 2, 3};
const Set<int> d = {1, 2, 3, 4, 5, 6};
const Set<int> e = {};
const Set<int> f = {10, 11, 12, 13, 14};
EXPECT_EQ(a, a);
EXPECT_EQ(a, b);
EXPECT_EQ(b, a);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
EXPECT_NE(a, f);
EXPECT_NE(c, a);
EXPECT_NE(d, a);
EXPECT_NE(e, a);
EXPECT_NE(f, a);
}
/**
* Set this to 1 to activate the benchmark. It is disabled by default, because it prints a lot.
*/

View File

@ -1141,7 +1141,7 @@ static bool write_file_handle(Main *mainvar,
* asap afterward. */
id_lib_extern(id_iter);
}
else if (ID_FAKE_USERS(id_iter) > 0) {
else if (ID_FAKE_USERS(id_iter) > 0 && id_iter->asset_data == nullptr) {
/* Even though fake user is not directly editable by the user on linked data, it is a
* common 'work-around' to set it in library files on data-blocks that need to be linked
* but typically do not have an actual real user (e.g. texts, etc.).

View File

@ -1593,10 +1593,11 @@ void BM_mesh_bm_to_me(Main *bmain, BMesh *bm, Mesh *me, const struct BMeshToMesh
material_index.finish();
}
/* NOTE: The function is called from multiple threads with the same input BMesh and different
* mesh objects. */
void BM_mesh_bm_to_me_for_eval(BMesh *bm, Mesh *me, const CustomData_MeshMasks *cd_mask_extra)
{
/* NOTE: The function is called from multiple threads with the same input BMesh and different
* mesh objects. */
using namespace blender;
/* Must be an empty mesh. */
BLI_assert(me->totvert == 0);

View File

@ -4,19 +4,7 @@
* \ingroup bmesh
*
* BMesh Walker API.
*/
#include <stdlib.h>
#include <string.h> /* for memcpy */
#include "BLI_listbase.h"
#include "BLI_utildefines.h"
#include "bmesh.h"
#include "bmesh_walkers_private.h"
/**
*
* NOTE(@joeedh): Details on design.
*
* Original design: walkers directly emulation recursive functions.
@ -37,6 +25,16 @@
* for if walkers fail.
*/
#include <stdlib.h>
#include <string.h> /* for memcpy */
#include "BLI_listbase.h"
#include "BLI_utildefines.h"
#include "bmesh.h"
#include "bmesh_walkers_private.h"
void *BMW_begin(BMWalker *walker, void *start)
{
BLI_assert(((BMHeader *)start)->htype & walker->begin_htype);

View File

@ -3,6 +3,7 @@
#include "COM_MaskNode.h"
#include "COM_MaskOperation.h"
#include "COM_ScaleOperation.h"
namespace blender::compositor {
@ -50,7 +51,21 @@ void MaskNode::convert_to_operations(NodeConverter &converter,
}
converter.add_operation(operation);
converter.map_output_socket(output_mask, operation->get_output_socket());
ScaleFixedSizeOperation *scale_operation = new ScaleFixedSizeOperation();
scale_operation->set_variable_size(true);
/* Consider aspect ratio from scene. */
const int new_height = rd->xasp / rd->yasp * operation->get_mask_height();
scale_operation->set_new_height(new_height);
scale_operation->set_new_width(operation->get_mask_width());
scale_operation->set_is_aspect(false);
scale_operation->set_is_crop(false);
scale_operation->set_scale_canvas_max_size({float(data->size_x), float(data->size_y)});
converter.add_operation(scale_operation);
converter.add_link(operation->get_output_socket(0), scale_operation->get_input_socket(0));
converter.map_output_socket(output_mask, scale_operation->get_output_socket(0));
}
} // namespace blender::compositor

View File

@ -63,6 +63,14 @@ class MaskOperation : public MultiThreadedOperation {
mask_height_inv_ = 1.0f / (float)height;
mask_px_ofs_[1] = mask_height_inv_ * 0.5f;
}
int get_mask_width()
{
return mask_width_;
}
int get_mask_height()
{
return mask_height_;
}
void set_framenumber(int frame_number)
{
frame_number_ = frame_number;

View File

@ -121,7 +121,7 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
DRW_ObjectFilterFn object_filter_fn,
void *object_filter_user_data);
/**
* object mode select-loop, see: #ED_view3d_draw_depth_loop (legacy drawing).
* Object mode select-loop, see: #ED_view3d_draw_depth_loop (legacy drawing).
*/
void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
struct ARegion *region,
@ -167,7 +167,7 @@ void DRW_opengl_context_enable(void);
void DRW_opengl_context_disable(void);
#ifdef WITH_XR_OPENXR
/* XXX see comment on DRW_xr_opengl_context_get() */
/* XXX: see comment on #DRW_xr_opengl_context_get() */
void *DRW_xr_opengl_context_get(void);
void *DRW_xr_gpu_context_get(void);
void DRW_xr_drawing_begin(void);

View File

@ -2598,9 +2598,6 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
#endif /* USE_GPU_SELECT */
}
/**
* object mode select-loop, see: ED_view3d_draw_depth_loop (legacy drawing).
*/
void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
ARegion *region,
View3D *v3d,
@ -3281,35 +3278,38 @@ void DRW_gpu_render_context_disable(void *UNUSED(re_gpu_context))
#ifdef WITH_XR_OPENXR
/* XXX
* There should really be no such getter, but for VR we currently can't easily avoid it. OpenXR
* needs some low level info for the OpenGL context that will be used for submitting the
* final framebuffer. VR could in theory create its own context, but that would mean we have to
* switch to it just to submit the final frame, which has notable performance impact.
*
* We could "inject" a context through DRW_opengl_render_context_enable(), but that would have to
* work from the main thread, which is tricky to get working too. The preferable solution would
* be using a separate thread for VR drawing where a single context can stay active. */
void *DRW_xr_opengl_context_get(void)
{
/* XXX: There should really be no such getter, but for VR we currently can't easily avoid it.
* OpenXR needs some low level info for the OpenGL context that will be used for submitting the
* final frame-buffer. VR could in theory create its own context, but that would mean we have to
* switch to it just to submit the final frame, which has notable performance impact.
*
* We could "inject" a context through DRW_opengl_render_context_enable(), but that would have to
* work from the main thread, which is tricky to get working too. The preferable solution would
* be using a separate thread for VR drawing where a single context can stay active. */
return DST.gl_context;
}
/* XXX See comment on DRW_xr_opengl_context_get(). */
void *DRW_xr_gpu_context_get(void)
{
/* XXX: See comment on #DRW_xr_opengl_context_get(). */
return DST.gpu_context;
}
/* XXX See comment on DRW_xr_opengl_context_get(). */
void DRW_xr_drawing_begin(void)
{
/* XXX: See comment on #DRW_xr_opengl_context_get(). */
BLI_ticket_mutex_lock(DST.gl_context_mutex);
}
/* XXX See comment on DRW_xr_opengl_context_get(). */
void DRW_xr_drawing_end(void)
{
/* XXX: See comment on #DRW_xr_opengl_context_get(). */
BLI_ticket_mutex_unlock(DST.gl_context_mutex);
}

View File

@ -46,7 +46,9 @@ struct Object;
#define DRW_DEBUG_USE_UNIFORM_NAME 0
#define DRW_UNIFORM_BUFFER_NAME 64
/* ------------ Profiling --------------- */
/* -------------------------------------------------------------------- */
/** \name Profiling
* \{ */
#define USE_PROFILE
@ -82,11 +84,15 @@ struct Object;
#endif /* USE_PROFILE */
/* ------------ Data Structure --------------- */
/**
/** \} */
/* -------------------------------------------------------------------- */
/** \name Data Structure
*
* Data structure to for registered draw engines that can store draw manager
* specific data.
*/
* \{ */
typedef struct DRWRegisteredDrawEngine {
void /*DRWRegisteredDrawEngine*/ *next, *prev;
DrawEngineType *draw_engine;
@ -473,15 +479,18 @@ struct DRWView {
/* Needed to assert that alignment is the same in C++ and C. */
BLI_STATIC_ASSERT_ALIGN(DRWView, 16);
/* ------------ Data Chunks --------------- */
/**
/** \} */
/* -------------------------------------------------------------------- */
/** \name Data Chunks
*
* In order to keep a cache friendly data structure,
* we alloc most of our little data into chunks of multiple item.
* we allocate most of our little data into chunks of multiple item.
* Iteration, allocation and memory usage are better.
* We lose a bit of memory by allocating more than what we need
* but it's counterbalanced by not needing the linked-list pointers
* for each item.
*/
* \{ */
typedef struct DRWUniformChunk {
struct DRWUniformChunk *next; /* single-linked list */
@ -516,9 +525,13 @@ typedef struct DRWCommandSmallChunk {
BLI_STATIC_ASSERT_ALIGN(DRWCommandChunk, 16);
#endif
/* ------------- Memory Pools ------------ */
/** \} */
/* Contains memory pools information */
/* -------------------------------------------------------------------- */
/** \name Memory Pools
* \{ */
/** Contains memory pools information. */
typedef struct DRWData {
/** Instance data. */
DRWInstanceDataList *idatalist;
@ -558,7 +571,11 @@ typedef struct DRWData {
struct CurvesUniformBufPool *curves_ubos;
} DRWData;
/* ------------- DRAW MANAGER ------------ */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Draw Manager
* \{ */
typedef struct DupliKey {
struct Object *ob;
@ -663,7 +680,11 @@ typedef struct DRWManager {
extern DRWManager DST; /* TODO: get rid of this and allow multi-threaded rendering. */
/* --------------- FUNCTIONS ------------- */
/** \} */
/* -------------------------------------------------------------------- */
/** \name Functions
* \{ */
void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags);
@ -725,6 +746,8 @@ void DRW_mesh_get_attributes(struct Object *object,
void DRW_manager_begin_sync(void);
void DRW_manager_end_sync(void);
/** \} */
#ifdef __cplusplus
}
#endif

View File

@ -3976,9 +3976,10 @@ void ED_operatortypes_animchannels(void)
WM_operatortype_append(ANIM_OT_channels_ungroup);
}
/* TODO: check on a poll callback for this, to get hotkeys into menus */
void ED_keymap_animchannels(wmKeyConfig *keyconf)
{
/* TODO: check on a poll callback for this, to get hotkeys into menus. */
WM_keymap_ensure(keyconf, "Animation Channels", 0, 0);
}

View File

@ -85,8 +85,9 @@ bool duplicate_fcurve_keys(FCurve *fcu)
return changed;
}
/* **************************************************** */
/* Various Tools */
/* -------------------------------------------------------------------- */
/** \name Various Tools
* \{ */
void clean_fcurve(struct bAnimContext *ac, bAnimListElem *ale, float thresh, bool cleardefault)
{
@ -375,8 +376,6 @@ float get_default_rna_value(FCurve *fcu, PropertyRNA *prop, PointerRNA *ptr)
return default_value;
}
/* This function blends the selected keyframes to the default value of the property the fcurve
* drives. */
void blend_to_default_fcurve(PointerRNA *id_ptr, FCurve *fcu, const float factor)
{
PointerRNA ptr;
@ -503,7 +502,11 @@ void breakdown_fcurve_segment(FCurve *fcu, FCurveSegment *segment, const float f
}
}
/* ---------------- */
/** \} */
/* -------------------------------------------------------------------- */
/** \name FCurve Decimate
* \{ */
/* Check if the keyframe interpolation type is supported */
static bool prepare_for_decimate(FCurve *fcu, int i)
@ -624,7 +627,11 @@ bool decimate_fcurve(bAnimListElem *ale, float remove_ratio, float error_sq_max)
return can_decimate_all_selected;
}
/* ---------------- */
/** \} */
/* -------------------------------------------------------------------- */
/** \name FCurve Smooth
* \{ */
/* temp struct used for smooth_fcurve */
typedef struct tSmooth_Bezt {
@ -728,7 +735,11 @@ void smooth_fcurve(FCurve *fcu)
BKE_fcurve_handles_recalc(fcu);
}
/* ---------------- */
/** \} */
/* -------------------------------------------------------------------- */
/** \name FCurve Sample
* \{ */
/* little cache for values... */
typedef struct TempFrameValCache {
@ -821,15 +832,18 @@ void sample_fcurve(FCurve *fcu)
BKE_fcurve_handles_recalc(fcu);
}
/* **************************************************** */
/* Copy/Paste Tools:
/** \} */
/* -------------------------------------------------------------------- */
/** \name Copy/Paste Tools
*
* - The copy/paste buffer currently stores a set of temporary F-Curves containing only the
* keyframes that were selected in each of the original F-Curves.
* - All pasted frames are offset by the same amount.
* This is calculated as the difference in the times of the current frame and the
* 'first keyframe' (i.e. the earliest one in all channels).
* `first keyframe` (i.e. the earliest one in all channels).
* - The earliest frame is calculated per copy operation.
*/
* \{ */
/* globals for copy/paste data (like for other copy/paste buffers) */
static ListBase animcopybuf = {NULL, NULL};
@ -1470,4 +1484,4 @@ eKeyPasteError paste_animedit_keys(bAnimContext *ac,
return KEYFRAME_PASTE_OK;
}
/* **************************************************** */
/** \} */

View File

@ -923,9 +923,10 @@ void ED_armature_ebone_selectflag_disable(EditBone *ebone, int flag)
ED_armature_ebone_selectflag_set(ebone, ebone->flag & ~flag);
}
/* could be used in more places */
void ED_armature_ebone_select_set(EditBone *ebone, bool select)
{
/* NOTE: this function could be used in more places. */
int flag;
if (select) {
BLI_assert((ebone->flag & BONE_UNSELECTABLE) == 0);

View File

@ -425,9 +425,10 @@ void blend_to_neighbor_fcurve_segment(struct FCurve *fcu,
struct FCurveSegment *segment,
float factor);
void breakdown_fcurve_segment(struct FCurve *fcu, struct FCurveSegment *segment, float factor);
/** Get a 1D gauss kernel. Since the kernel is symmetrical, only calculates the positive side.
* \param sigma The shape of the gauss distribution.
* \param kernel_size How long the kernel array is.
/**
* Get a 1D gauss kernel. Since the kernel is symmetrical, only calculates the positive side.
* \param sigma: The shape of the gauss distribution.
* \param kernel_size: How long the kernel array is.
*/
void ED_ANIM_get_1d_gauss_kernel(const float sigma, int kernel_size, double *r_kernel);
void smooth_fcurve_segment(struct FCurve *fcu,
@ -438,6 +439,10 @@ void smooth_fcurve_segment(struct FCurve *fcu,
double *kernel);
void ease_fcurve_segment(struct FCurve *fcu, struct FCurveSegment *segment, float factor);
bool decimate_fcurve(struct bAnimListElem *ale, float remove_ratio, float error_sq_max);
/**
* Blends the selected keyframes to the default value of the property the F-curve drives.
*/
void blend_to_default_fcurve(struct PointerRNA *id_ptr, struct FCurve *fcu, float factor);
/**
* Use a weighted moving-means method to reduce intensity of fluctuations.

View File

@ -164,7 +164,9 @@ typedef enum eObClearParentTypes {
} eObClearParentTypes;
#ifdef __RNA_TYPES_H__
/** Operator Property: `OBJECT_OT_parent_clear`. */
extern struct EnumPropertyItem prop_clear_parent_types[];
/** Operator Property: `OBJECT_OT_parent_set`. */
extern struct EnumPropertyItem prop_make_parent_types[];
#endif

View File

@ -18,7 +18,11 @@ struct Scene *ED_scene_add(struct Main *bmain,
struct bContext *C,
struct wmWindow *win,
enum eSceneCopyMethod method) ATTR_NONNULL();
/** Special mode for adding a scene assigned to sequencer strip. */
/**
* Add a new scene in the sequence editor.
*
* Special mode for adding a scene assigned to sequencer strip.
*/
struct Scene *ED_scene_sequencer_add(struct Main *bmain,
struct bContext *C,
enum eSceneCopyMethod method,

View File

@ -203,6 +203,13 @@ void uvedit_face_select_shared_vert(const struct Scene *scene,
const bool select,
const bool do_history,
BMUVOffsets offsets);
/**
* Selects UV edges and shared vertices according to sticky_flag.
*
* \param sticky_flag:
* - #SI_STICKY_LOC: selects all UV edges that share the same mesh vertices and UV coordinates.
* - #SI_STICKY_VERTEX: selects all UV edges sharing the same mesh vertices.
*/
void uvedit_edge_select_shared_vert(const struct Scene *scene,
struct BMEditMesh *em,
struct BMLoop *l,
@ -210,6 +217,13 @@ void uvedit_edge_select_shared_vert(const struct Scene *scene,
const int sticky_flag,
const bool do_history,
BMUVOffsets offsets);
/**
* Selects shared UVs based on #sticky_flag.
*
* \param sticky_flag: Type of sticky selection :
* - #SI_STICKY_LOC: selects all UVs sharing same mesh vertex and UV coordinates.
* - #SI_STICKY_VERTEX: selects all UVs sharing same mesh vertex.
*/
void uvedit_uv_select_shared_vert(const struct Scene *scene,
struct BMEditMesh *em,
struct BMLoop *l,
@ -218,7 +232,9 @@ void uvedit_uv_select_shared_vert(const struct Scene *scene,
const bool do_history,
BMUVOffsets offsets);
/* Sets required UV edge flags as specified by the sticky_flag. */
/**
* Sets required UV edge flags as specified by the `sticky_flag`.
*/
void uvedit_edge_select_set_noflush(const struct Scene *scene,
struct BMLoop *l,
const bool select,
@ -333,6 +349,9 @@ struct FaceIsland {
float aspect_y;
};
/**
* Calculate islands and add them to \a island_list returning the number of items added.
*/
int bm_mesh_calc_uv_islands(const Scene *scene,
struct BMesh *bm,
ListBase *island_list,

View File

@ -127,6 +127,7 @@ class AbstractViewItem {
* If this wasn't done, the behavior of items is undefined.
*/
AbstractView *view_ = nullptr;
bool is_interactive_ = true;
bool is_active_ = false;
bool is_renaming_ = false;
@ -171,6 +172,11 @@ class AbstractViewItem {
/** Get the view this item is registered for using #AbstractView::register_item(). */
AbstractView &get_view() const;
/** Disable the interacting with this item, meaning the buttons drawn will be disabled and there
* will be no mouse hover feedback for the view row. */
void disable_interaction();
bool is_interactive() const;
/**
* Requires the view to have completed reconstruction, see #is_reconstructed(). Otherwise we
* can't be sure about the item state.

View File

@ -646,6 +646,8 @@ typedef struct uiPopupMenu uiPopupMenu;
uiPopupMenu *UI_popup_menu_begin(struct bContext *C, const char *title, int icon) ATTR_NONNULL();
/**
* Directly create a popup menu that is not refreshed on redraw.
*
* Only return handler, and set optional title.
* \param block_name: Assigned to uiBlock.name (useful info for debugging).
*/
@ -3010,6 +3012,8 @@ void UI_context_active_but_prop_get_filebrowser(const struct bContext *C,
bool *r_is_userdef);
/**
* For new/open operators.
*
* This is for browsing and editing the ID-blocks used.
*/
void UI_context_active_but_prop_get_templateID(struct bContext *C,
struct PointerRNA *r_ptr,
@ -3254,6 +3258,7 @@ void UI_interface_tag_script_reload(void);
/* Support click-drag motion which presses the button and closes a popover (like a menu). */
#define USE_UI_POPOVER_ONCE
bool UI_view_item_is_interactive(const uiViewItemHandle *item_handle);
bool UI_view_item_is_active(const uiViewItemHandle *item_handle);
bool UI_view_item_matches(const uiViewItemHandle *a_handle, const uiViewItemHandle *b_handle);
/**

View File

@ -108,6 +108,8 @@ using TreeViewOrItem = TreeViewItemContainer;
* \{ */
class AbstractTreeView : public AbstractView, public TreeViewItemContainer {
int min_rows_ = 0;
friend class AbstractTreeViewItem;
friend class TreeViewBuilder;
@ -116,6 +118,12 @@ class AbstractTreeView : public AbstractView, public TreeViewItemContainer {
void foreach_item(ItemIterFn iter_fn, IterOptions options = IterOptions::None) const;
/** Visual feature: Define a number of item rows the view will always show at minimum. If there
* are fewer items, empty dummy items will be added. These contribute to the view bounds, so the
* drop target of the view includes them, but they are not interactive (e.g. no mouse-hover
* highlight). */
void set_min_rows(int min_rows);
protected:
virtual void build_tree() = 0;
@ -308,6 +316,9 @@ class BasicTreeViewItem : public AbstractTreeViewItem {
class TreeViewBuilder {
public:
static void build_tree_view(AbstractTreeView &tree_view, uiLayout &layout);
private:
static void ensure_min_rows_items(AbstractTreeView &tree_view);
};
/** \} */

View File

@ -1540,7 +1540,9 @@ static bool ui_but_event_property_operator_string(const bContext *C,
/** \} */
/**
/* -------------------------------------------------------------------- */
/** \name Pie Menu Direction
*
* This goes in a seemingly weird pattern:
*
* <pre>
@ -1564,7 +1566,8 @@ static bool ui_but_event_property_operator_string(const bContext *C,
* subdividing the rest of the angles for the last 4 items.
*
* --Matt 07/2006
*/
* \{ */
const char ui_radial_dir_order[8] = {
UI_RADIAL_W,
UI_RADIAL_E,
@ -1585,6 +1588,8 @@ static void ui_but_pie_direction_string(uiBut *but, char *buf, int size)
BLI_snprintf(buf, size, "%d", ui_radial_dir_to_numpad[but->pie_dir]);
}
/** \} */
static void ui_menu_block_set_keymaps(const bContext *C, uiBlock *block)
{
char buf[128];

View File

@ -90,6 +90,10 @@ bool ui_but_is_interactive_ex(const uiBut *but, const bool labeledit, const bool
if ((but->type == UI_BTYPE_LISTROW) && labeledit) {
return false;
}
if (but->type == UI_BTYPE_VIEW_ITEM) {
const uiButViewItem *but_item = static_cast<const uiButViewItem *>(but);
return UI_view_item_is_interactive(but_item->view_item);
}
return true;
}

View File

@ -475,7 +475,6 @@ static void create_title_button(uiLayout *layout, const char *title, int icon)
uiItemS(layout);
}
/* Used to directly create a popup menu that is not refreshed on redraw. */
uiPopupMenu *UI_popup_menu_begin_ex(bContext *C,
const char *title,
const char *block_name,

View File

@ -574,9 +574,6 @@ static uiBlock *id_search_menu(bContext *C, ARegion *region, void *arg_litem)
static void template_id_cb(bContext *C, void *arg_litem, void *arg_event);
/**
* This is for browsing and editing the ID-blocks used.
*/
void UI_context_active_but_prop_get_templateID(bContext *C,
PointerRNA *r_ptr,
PropertyRNA **r_prop)

View File

@ -1353,9 +1353,7 @@ static void VIEW2D_OT_zoom(wmOperatorType *ot)
/* -------------------------------------------------------------------- */
/** \name Border Zoom Operator
* \{ */
/**
*
* The user defines a rect using standard box select tools, and we use this rect to
* define the new zoom-level of the view in the following ways:
*
@ -1363,8 +1361,8 @@ static void VIEW2D_OT_zoom(wmOperatorType *ot)
* -# RIGHTMOUSE - zoom out of view
*
* Currently, these key mappings are hardcoded, but it shouldn't be too important to
* have custom keymappings for this...
*/
* have custom keymappings for this.
* \{ */
static int view_borderzoom_exec(bContext *C, wmOperator *op)
{

View File

@ -208,6 +208,16 @@ AbstractView &AbstractViewItem::get_view() const
return *view_;
}
void AbstractViewItem::disable_interaction()
{
is_interactive_ = false;
}
bool AbstractViewItem::is_interactive() const
{
return is_interactive_;
}
bool AbstractViewItem::is_active() const
{
BLI_assert_msg(get_view().is_reconstructed(),
@ -282,6 +292,12 @@ class ViewItemAPIWrapper {
using namespace blender::ui;
bool UI_view_item_is_interactive(const uiViewItemHandle *item_handle)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_handle);
return item.is_interactive();
}
bool UI_view_item_is_active(const uiViewItemHandle *item_handle)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_handle);

View File

@ -140,9 +140,10 @@ void ui_block_views_listen(const uiBlock *block, const wmRegionListenerParams *l
}
}
/* Similar to #ui_but_find_mouse_over_ex(). */
uiViewHandle *UI_region_view_find_at(const ARegion *region, const int xy[2], const int pad)
{
/* NOTE: Similar to #ui_but_find_mouse_over_ex(). */
if (!ui_region_contains_point_px(region, xy)) {
return nullptr;
}

View File

@ -24,10 +24,6 @@ namespace blender::ui {
/* ---------------------------------------------------------------------- */
/**
* Add a tree-item to the container. This is the only place where items should be added, it
* handles important invariants!
*/
AbstractTreeViewItem &TreeViewItemContainer::add_tree_item(
std::unique_ptr<AbstractTreeViewItem> item)
{
@ -70,6 +66,11 @@ void AbstractTreeView::foreach_item(ItemIterFn iter_fn, IterOptions options) con
foreach_item_recursive(iter_fn, options);
}
void AbstractTreeView::set_min_rows(int min_rows)
{
min_rows_ = min_rows;
}
void AbstractTreeView::update_children_from_old(const AbstractView &old_view)
{
const AbstractTreeView &old_tree_view = dynamic_cast<const AbstractTreeView &>(old_view);
@ -455,6 +456,10 @@ void TreeViewLayoutBuilder::build_row(AbstractTreeViewItem &item) const
uiLayout *overlap = uiLayoutOverlap(&prev_layout);
if (!item.is_interactive_) {
uiLayoutSetActive(overlap, false);
}
uiLayoutRow(overlap, false);
/* Every item gets one! Other buttons can be overlapped on top. */
item.add_treerow_button(block_);
@ -490,6 +495,23 @@ uiLayout &TreeViewLayoutBuilder::current_layout() const
/* ---------------------------------------------------------------------- */
void TreeViewBuilder::ensure_min_rows_items(AbstractTreeView &tree_view)
{
int tot_visible_items = 0;
tree_view.foreach_item(
[&tot_visible_items](AbstractTreeViewItem & /*item*/) { tot_visible_items++; },
AbstractTreeView::IterOptions::SkipCollapsed);
if (tot_visible_items >= tree_view.min_rows_) {
return;
}
for (int i = 0; i < (tree_view.min_rows_ - tot_visible_items); i++) {
BasicTreeViewItem &new_item = tree_view.add_tree_item<BasicTreeViewItem>("");
new_item.disable_interaction();
}
}
void TreeViewBuilder::build_tree_view(AbstractTreeView &tree_view, uiLayout &layout)
{
uiBlock &block = *uiLayoutGetBlock(&layout);
@ -498,6 +520,8 @@ void TreeViewBuilder::build_tree_view(AbstractTreeView &tree_view, uiLayout &lay
tree_view.update_from_old(block);
tree_view.change_state_delayed();
ensure_min_rows_items(tree_view);
/* Ensure the given layout is actually active. */
UI_block_layout_set_current(&block, &layout);

View File

@ -6814,8 +6814,7 @@ static void sort_bmelem_flag(bContext *C,
/* Multiplying with totface and adding i ensures us
* we keep current order for all faces of same mat. */
sb[affected[2]++].srt = srt * float(totelem[2]) + float(i);
// printf("e: %d; srt: %f; final: %f\n",
// i, srt, srt * ((float)totface) + ((float)i));
// printf("e: %d; srt: %f; final: %f\n", i, srt, srt * float(totface) + float(i));
}
else {
pb[i] = true;

View File

@ -316,10 +316,11 @@ const bool *ED_mesh_uv_map_vert_select_layer_get(const Mesh *mesh, const int uv_
return mesh_loop_boolean_custom_data_get_by_name(
*mesh, BKE_uv_map_vert_select_name_get(uv_name, buffer));
}
/* UV map edge selections are stored on face corners (loops) and not on edges
* because we need selections per face edge, even when the edge is split in UV space. */
const bool *ED_mesh_uv_map_edge_select_layer_get(const Mesh *mesh, const int uv_index)
{
/* UV map edge selections are stored on face corners (loops) and not on edges
* because we need selections per face edge, even when the edge is split in UV space. */
using namespace blender::bke;
char buffer[MAX_CUSTOMDATA_LAYER_NAME];
const char *uv_name = CustomData_get_layer_name(&mesh->ldata, CD_PROP_FLOAT2, uv_index);

View File

@ -127,7 +127,8 @@ using blender::Vector;
/* This is an exact copy of the define in `rna_light.c`
* kept here because of linking order.
* Icons are only defined here */
* Icons are only defined here. */
const EnumPropertyItem rna_enum_light_type_items[] = {
{LA_LOCAL, "POINT", ICON_LIGHT_POINT, "Point", "Omnidirectional point light source"},
{LA_SUN, "SUN", ICON_LIGHT_SUN, "Sun", "Constant direction parallel ray light source"},

View File

@ -483,7 +483,6 @@ void ED_object_parent(Object *ob, Object *par, const int type, const char *subst
BLI_strncpy(ob->parsubstr, substr, sizeof(ob->parsubstr));
}
/* Operator Property */
EnumPropertyItem prop_make_parent_types[] = {
{PAR_OBJECT, "OBJECT", 0, "Object", ""},
{PAR_ARMATURE, "ARMATURE", 0, "Armature Deform", ""},

View File

@ -66,7 +66,6 @@ static Scene *scene_add(Main *bmain, Scene *scene_old, eSceneCopyMethod method)
return scene_new;
}
/** Add a new scene in the sequence editor. */
Scene *ED_scene_sequencer_add(Main *bmain,
bContext *C,
eSceneCopyMethod method,

View File

@ -66,6 +66,10 @@
extern "C" {
/* -------------------------------------------------------------------- */
/** \name Image Paint Tile Utilities (Partial Update)
* \{ */
/**
* This is a static resource for non-global access.
* Maybe it should be exposed as part of the paint operation,
@ -175,6 +179,12 @@ void imapaint_image_update(
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Image Paint Blur
* \{ */
BlurKernel *paint_new_blur_kernel(Brush *br, bool proj)
{
int i, j;
@ -251,7 +261,11 @@ void paint_delete_blur_kernel(BlurKernel *kernel)
}
}
/************************ image paint poll ************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Image Paint Poll
* \{ */
static Brush *image_paint_brush(bContext *C)
{
@ -320,7 +334,12 @@ static bool image_paint_2d_clone_poll(bContext *C)
return false;
}
/************************ paint operator ************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Paint Operator
* \{ */
bool paint_use_opacity_masking(Brush *brush)
{
return ((brush->flag & BRUSH_AIRBRUSH) || (brush->flag & BRUSH_DRAG_DOT) ||
@ -425,7 +444,11 @@ bool get_imapaint_zoom(bContext *C, float *zoomx, float *zoomy)
return false;
}
/************************ cursor drawing *******************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Cursor Drawing
* \{ */
static void toggle_paint_cursor(Scene *scene, bool enable)
{
@ -470,7 +493,11 @@ void ED_space_image_paint_update(Main *bmain, wmWindowManager *wm, Scene *scene)
}
}
/************************ grab clone operator ************************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Grab Clone Operator
* \{ */
struct GrabClone {
float startoffset[2];
@ -579,7 +606,12 @@ void PAINT_OT_grab_clone(wmOperatorType *ot)
1.0f);
}
/******************** sample color operator ********************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sample Color Operator
* \{ */
struct SampleColorData {
bool show_cursor;
short launch_event;
@ -757,7 +789,11 @@ void PAINT_OT_sample_color(wmOperatorType *ot)
RNA_def_boolean(ot->srna, "palette", false, "Add to Palette", "");
}
/******************** texture paint toggle operator ********************/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Texture Paint Toggle Operator
* \{ */
static void paint_init_pivot_mesh(Object *ob, float location[3])
{
@ -957,6 +993,12 @@ void PAINT_OT_texture_paint_toggle(wmOperatorType *ot)
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Brush Color Flip Operator
* \{ */
static int brush_colors_flip_exec(bContext *C, wmOperator * /*op*/)
{
Scene *scene = CTX_data_scene(C);
@ -1014,6 +1056,12 @@ void PAINT_OT_brush_colors_flip(wmOperatorType *ot)
ot->flag = OPTYPE_REGISTER;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Texture Paint Bucket Fill Operator
* \{ */
void ED_imapaint_bucket_fill(struct bContext *C,
float color[3],
wmOperator *op,
@ -1066,3 +1114,5 @@ bool mask_paint_poll(bContext *C)
return BKE_paint_select_elem_test(CTX_data_active_object(C));
}
}
/** \} */

View File

@ -53,8 +53,6 @@ struct LibraryAsset {
struct AssetItemTree {
asset_system::AssetCatalogTree catalogs;
MultiValueMap<asset_system::AssetCatalogPath, LibraryAsset> assets_per_path;
Map<const asset_system::AssetCatalogTreeItem *, asset_system::AssetCatalogPath>
full_catalog_per_tree_item;
};
static AssetLibraryReference all_library_reference()
@ -71,6 +69,34 @@ static bool all_loading_finished()
return ED_assetlist_is_loaded(&all_library_ref);
}
static asset_system::AssetLibrary *get_all_library_once_available()
{
const AssetLibraryReference all_library_ref = all_library_reference();
return ED_assetlist_library_get_once_available(all_library_ref);
}
/**
* The menus want to pass catalog paths to context and for this they need persistent pointers to
* the paths. Rather than keeping some local path storage, get a pointer into the asset system
* directly, which is persistent until the library is reloaded and can safely be held by context.
*/
static PointerRNA persistent_catalog_path_rna_pointer(
bScreen &owner_screen,
const asset_system::AssetLibrary &library,
const asset_system::AssetCatalogTreeItem &item)
{
const asset_system::AssetCatalog *catalog = library.catalog_service->find_catalog_by_path(
item.catalog_path());
if (!catalog) {
return PointerRNA_NULL;
}
const asset_system::AssetCatalogPath &path = catalog->path;
return {&owner_screen.id,
&RNA_AssetCatalogPath,
const_cast<asset_system::AssetCatalogPath *>(&path)};
}
static AssetItemTree build_catalog_tree(const bContext &C, const bNodeTree *node_tree)
{
if (!node_tree) {
@ -88,8 +114,7 @@ static AssetItemTree build_catalog_tree(const bContext &C, const bNodeTree *node
ED_assetlist_storage_fetch(&all_library_ref, &C);
ED_assetlist_ensure_previews_job(&all_library_ref, &C);
asset_system::AssetLibrary *all_library = ED_assetlist_library_get_once_available(
all_library_ref);
asset_system::AssetLibrary *all_library = get_all_library_once_available();
if (!all_library) {
return {};
}
@ -132,18 +157,7 @@ static AssetItemTree build_catalog_tree(const bContext &C, const bNodeTree *node
catalogs_with_node_assets.insert_item(*catalog);
});
/* Build another map storing full asset paths for each tree item, in order to have stable
* pointers to asset catalog paths to use for context pointers. This is necessary because
* #asset_system::AssetCatalogTreeItem doesn't store its full path directly. */
Map<const asset_system::AssetCatalogTreeItem *, asset_system::AssetCatalogPath>
full_catalog_per_tree_item;
catalogs_with_node_assets.foreach_item([&](asset_system::AssetCatalogTreeItem &item) {
full_catalog_per_tree_item.add_new(&item, item.catalog_path());
});
return {std::move(catalogs_with_node_assets),
std::move(assets_per_path),
std::move(full_catalog_per_tree_item)};
return {std::move(catalogs_with_node_assets), std::move(assets_per_path)};
}
static void node_add_catalog_assets_draw(const bContext *C, Menu *menu)
@ -195,14 +209,21 @@ static void node_add_catalog_assets_draw(const bContext *C, Menu *menu)
"NODE_OT_add_group_asset");
}
asset_system::AssetLibrary *all_library = get_all_library_once_available();
if (!all_library) {
return;
}
catalog_item->foreach_child([&](asset_system::AssetCatalogTreeItem &child_item) {
const asset_system::AssetCatalogPath &path = tree.full_catalog_per_tree_item.lookup(
&child_item);
PointerRNA path_ptr{
&screen.id, &RNA_AssetCatalogPath, const_cast<asset_system::AssetCatalogPath *>(&path)};
PointerRNA path_ptr = persistent_catalog_path_rna_pointer(screen, *all_library, child_item);
if (path_ptr.data == nullptr) {
return;
}
uiLayout *col = uiLayoutColumn(layout, false);
uiLayoutSetContextPointer(col, "asset_catalog_path", &path_ptr);
uiItemM(col, "NODE_MT_node_add_catalog_assets", IFACE_(path.name().c_str()), ICON_NONE);
uiItemM(
col, "NODE_MT_node_add_catalog_assets", IFACE_(child_item.get_name().c_str()), ICON_NONE);
});
}
@ -263,16 +284,22 @@ static void add_root_catalogs_draw(const bContext *C, Menu *menu)
return menus;
}();
asset_system::AssetLibrary *all_library = get_all_library_once_available();
if (!all_library) {
return;
}
tree.catalogs.foreach_root_item([&](asset_system::AssetCatalogTreeItem &item) {
if (all_builtin_menus.contains(item.get_name())) {
return;
}
const asset_system::AssetCatalogPath &path = tree.full_catalog_per_tree_item.lookup(&item);
PointerRNA path_ptr{
&screen.id, &RNA_AssetCatalogPath, const_cast<asset_system::AssetCatalogPath *>(&path)};
PointerRNA path_ptr = persistent_catalog_path_rna_pointer(screen, *all_library, item);
if (path_ptr.data == nullptr) {
return;
}
uiLayout *col = uiLayoutColumn(layout, false);
uiLayoutSetContextPointer(col, "asset_catalog_path", &path_ptr);
uiItemM(col, "NODE_MT_node_add_catalog_assets", IFACE_(path.name().c_str()), ICON_NONE);
uiItemM(col, "NODE_MT_node_add_catalog_assets", IFACE_(item.get_name().c_str()), ICON_NONE);
});
}
@ -310,9 +337,17 @@ void uiTemplateNodeAssetMenuItems(uiLayout *layout, bContext *C, const char *cat
if (!item) {
return;
}
const asset_system::AssetCatalogPath &path = tree.full_catalog_per_tree_item.lookup(item);
PointerRNA path_ptr{
&screen.id, &RNA_AssetCatalogPath, const_cast<asset_system::AssetCatalogPath *>(&path)};
asset_system::AssetLibrary *all_library = get_all_library_once_available();
if (!all_library) {
return;
}
PointerRNA path_ptr = persistent_catalog_path_rna_pointer(screen, *all_library, *item);
if (path_ptr.data == nullptr) {
return;
}
uiItemS(layout);
uiLayout *col = uiLayoutColumn(layout, false);
uiLayoutSetContextPointer(col, "asset_catalog_path", &path_ptr);

View File

@ -727,7 +727,7 @@ static void get_min_max_of_nodes(const Span<bNode *> nodes,
}
/**
* Skip reroute nodes when finding the the socket to use as an example for a new group interface
* Skip reroute nodes when finding the socket to use as an example for a new group interface
* item. This moves "inward" into nodes selected for grouping to find properties like whether a
* connected socket has a hidden value. It only works in trivial situations-- a single line of
* connected reroutes with no branching.

View File

@ -49,7 +49,7 @@ set(SRC
view3d_gizmo_tool_generic.c
view3d_header.c
view3d_iterators.cc
view3d_navigate.c
view3d_navigate.cc
view3d_navigate_dolly.c
view3d_navigate_fly.c
view3d_navigate_move.c

View File

@ -110,8 +110,8 @@ void view3d_operator_properties_common(wmOperatorType *ot, const enum eV3D_OpPro
void calctrackballvec(const rcti *rect, const int event_xy[2], float r_dir[3])
{
const float radius = V3D_OP_TRACKBALLSIZE;
const float t = radius / (float)M_SQRT2;
const float size[2] = {BLI_rcti_size_x(rect), BLI_rcti_size_y(rect)};
const float t = radius / float(M_SQRT2);
const float size[2] = {float(BLI_rcti_size_x(rect)), float(BLI_rcti_size_y(rect))};
/* Aspect correct so dragging in a non-square view doesn't squash the direction.
* So diagonal motion rotates the same direction the cursor is moving. */
const float size_min = min_ff(size[0], size[1]);
@ -178,7 +178,7 @@ bool view3d_orbit_calc_center(bContext *C, float r_dyn_ofs[3])
is_set = true;
}
else if (ob_act && (ob_act->mode & OB_MODE_EDIT) && (ob_act->type == OB_FONT)) {
Curve *cu = ob_act_eval->data;
Curve *cu = static_cast<Curve *>(ob_act_eval->data);
EditFont *ef = cu->editfont;
zero_v3(lastofs);
@ -191,7 +191,7 @@ bool view3d_orbit_calc_center(bContext *C, float r_dyn_ofs[3])
is_set = true;
}
else if (ob_act == NULL || ob_act->mode == OB_MODE_OBJECT) {
else if (ob_act == nullptr || ob_act->mode == OB_MODE_OBJECT) {
/* object mode use boundbox centers */
uint tot = 0;
float select_center[3];
@ -217,14 +217,14 @@ bool view3d_orbit_calc_center(bContext *C, float r_dyn_ofs[3])
}
}
if (tot) {
mul_v3_fl(select_center, 1.0f / (float)tot);
mul_v3_fl(select_center, 1.0f / float(tot));
copy_v3_v3(lastofs, select_center);
is_set = true;
}
}
else {
/* If there's no selection, `lastofs` is unmodified and last value since static. */
is_set = calculateTransformCenter(C, V3D_AROUND_CENTER_MEDIAN, lastofs, NULL);
is_set = calculateTransformCenter(C, V3D_AROUND_CENTER_MEDIAN, lastofs, nullptr);
}
copy_v3_v3(r_dyn_ofs, lastofs);
@ -234,7 +234,7 @@ bool view3d_orbit_calc_center(bContext *C, float r_dyn_ofs[3])
static enum eViewOpsFlag viewops_flag_from_args(bool use_select, bool use_depth)
{
enum eViewOpsFlag flag = 0;
enum eViewOpsFlag flag = VIEWOPS_FLAG_NONE;
if (use_select) {
flag |= VIEWOPS_FLAG_ORBIT_SELECT;
}
@ -253,7 +253,7 @@ enum eViewOpsFlag viewops_flag_from_prefs(void)
ViewOpsData *viewops_data_create(bContext *C, const wmEvent *event, enum eViewOpsFlag viewops_flag)
{
ViewOpsData *vod = MEM_callocN(sizeof(ViewOpsData), __func__);
ViewOpsData *vod = MEM_cnew<ViewOpsData>(__func__);
/* Store data. */
vod->bmain = CTX_data_main(C);
@ -261,8 +261,8 @@ ViewOpsData *viewops_data_create(bContext *C, const wmEvent *event, enum eViewOp
vod->scene = CTX_data_scene(C);
vod->area = CTX_wm_area(C);
vod->region = CTX_wm_region(C);
vod->v3d = vod->area->spacedata.first;
vod->rv3d = vod->region->regiondata;
vod->v3d = static_cast<View3D *>(vod->area->spacedata.first);
vod->rv3d = static_cast<RegionView3D *>(vod->region->regiondata);
Depsgraph *depsgraph = vod->depsgraph;
RegionView3D *rv3d = vod->rv3d;
@ -382,8 +382,8 @@ ViewOpsData *viewops_data_create(bContext *C, const wmEvent *event, enum eViewOp
negate_v3_v3(rv3d->ofs, dvec);
}
else {
const float mval_region_mid[2] = {(float)vod->region->winx / 2.0f,
(float)vod->region->winy / 2.0f};
const float mval_region_mid[2] = {float(vod->region->winx) / 2.0f,
float(vod->region->winy) / 2.0f};
ED_view3d_win_to_3d(vod->v3d, vod->region, vod->dyn_ofs, mval_region_mid, rv3d->ofs);
negate_v3(rv3d->ofs);
@ -394,7 +394,8 @@ ViewOpsData *viewops_data_create(bContext *C, const wmEvent *event, enum eViewOp
}
/* For dolly */
ED_view3d_win_to_vector(vod->region, (const float[2]){UNPACK2(event->mval)}, vod->init.mousevec);
const float mval[2] = {float(event->mval[0]), float(event->mval[1])};
ED_view3d_win_to_vector(vod->region, mval, vod->init.mousevec);
{
int event_xy_offset[2];
@ -453,7 +454,7 @@ void viewops_data_free(bContext *C, ViewOpsData *vod)
* \{ */
/**
* \param align_to_quat: When not NULL, set the axis relative to this rotation.
* \param align_to_quat: When not nullptr, set the axis relative to this rotation.
*/
static void axis_set_view(bContext *C,
View3D *v3d,
@ -465,7 +466,9 @@ static void axis_set_view(bContext *C,
const float *align_to_quat,
const int smooth_viewtx)
{
RegionView3D *rv3d = region->regiondata; /* no NULL check is needed, poll checks */
/* no nullptr check is needed, poll checks */
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
float quat[4];
const short orig_persp = rv3d->persp;
@ -477,7 +480,7 @@ static void axis_set_view(bContext *C,
rv3d->view_axis_roll = RV3D_VIEW_AXIS_ROLL_0;
}
if (align_to_quat == NULL) {
if (align_to_quat == nullptr) {
rv3d->view = view;
rv3d->view_axis_roll = view_axis_roll;
}
@ -496,17 +499,14 @@ static void axis_set_view(bContext *C,
if (rv3d->persp == RV3D_CAMOB && v3d->camera) {
/* to camera */
ED_view3d_smooth_view(C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.camera_old = v3d->camera,
.ofs = rv3d->ofs,
.quat = quat,
/* No undo because this switches to/from camera. */
.undo_str = NULL,
});
V3D_SmoothParams sview = {nullptr};
sview.camera_old = v3d->camera;
sview.ofs = rv3d->ofs;
sview.quat = quat;
/* No undo because this switches to/from camera. */
sview.undo_str = nullptr;
ED_view3d_smooth_view(C, v3d, region, smooth_viewtx, &sview);
}
else if (orig_persp == RV3D_CAMOB && v3d->camera) {
/* from camera */
@ -518,24 +518,21 @@ static void axis_set_view(bContext *C,
/* so we animate _from_ the camera location */
Object *camera_eval = DEG_get_evaluated_object(CTX_data_ensure_evaluated_depsgraph(C),
v3d->camera);
ED_view3d_from_object(camera_eval, rv3d->ofs, NULL, &rv3d->dist, NULL);
ED_view3d_from_object(camera_eval, rv3d->ofs, nullptr, &rv3d->dist, nullptr);
ED_view3d_smooth_view(C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.camera_old = camera_eval,
.ofs = ofs,
.quat = quat,
.dist = &dist,
/* No undo because this switches to/from camera. */
.undo_str = NULL,
});
V3D_SmoothParams sview = {nullptr};
sview.camera_old = camera_eval;
sview.ofs = ofs;
sview.quat = quat;
sview.dist = &dist;
/* No undo because this switches to/from camera. */
sview.undo_str = nullptr;
ED_view3d_smooth_view(C, v3d, region, smooth_viewtx, &sview);
}
else {
/* rotate around selection */
const float *dyn_ofs_pt = NULL;
const float *dyn_ofs_pt = nullptr;
float dyn_ofs[3];
if (U.uiflag & USER_ORBIT_SELECTION) {
@ -546,32 +543,29 @@ static void axis_set_view(bContext *C,
}
/* no camera involved */
ED_view3d_smooth_view(C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.quat = quat,
.dyn_ofs = dyn_ofs_pt,
/* No undo because this isn't a camera view. */
.undo_str = NULL,
});
V3D_SmoothParams sview = {nullptr};
sview.quat = quat;
sview.dyn_ofs = dyn_ofs_pt;
/* No undo because this switches to/from camera. */
sview.undo_str = nullptr;
ED_view3d_smooth_view(C, v3d, region, smooth_viewtx, &sview);
}
}
void viewmove_apply(ViewOpsData *vod, int x, int y)
{
const float event_ofs[2] = {
vod->prev.event_xy[0] - x,
vod->prev.event_xy[1] - y,
float(vod->prev.event_xy[0] - x),
float(vod->prev.event_xy[1] - y),
};
if ((vod->rv3d->persp == RV3D_CAMOB) && !ED_view3d_camera_lock_check(vod->v3d, vod->rv3d)) {
ED_view3d_camera_view_pan(vod->region, event_ofs);
}
else if (ED_view3d_offset_lock_check(vod->v3d, vod->rv3d)) {
vod->rv3d->ofs_lock[0] -= (event_ofs[0] * 2.0f) / (float)vod->region->winx;
vod->rv3d->ofs_lock[1] -= (event_ofs[1] * 2.0f) / (float)vod->region->winy;
vod->rv3d->ofs_lock[0] -= (event_ofs[0] * 2.0f) / float(vod->region->winx);
vod->rv3d->ofs_lock[1] -= (event_ofs[1] * 2.0f) / float(vod->region->winy);
}
else {
float dvec[3];
@ -624,7 +618,7 @@ static bool view3d_object_skip_minmax(const View3D *v3d,
const bool skip_camera,
bool *r_only_center)
{
BLI_assert(ob->id.orig_id == NULL);
BLI_assert(ob->id.orig_id == nullptr);
*r_only_center = false;
if (skip_camera && (ob == v3d->camera)) {
@ -667,7 +661,7 @@ static void view3d_from_minmax(bContext *C,
bool ok_dist,
const int smooth_viewtx)
{
RegionView3D *rv3d = region->regiondata;
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
float afm[3];
float size;
@ -716,33 +710,19 @@ static void view3d_from_minmax(bContext *C,
mid_v3_v3v3(new_ofs, min, max);
negate_v3(new_ofs);
V3D_SmoothParams sview = {nullptr};
sview.ofs = new_ofs;
sview.dist = ok_dist ? &new_dist : nullptr;
/* The caller needs to use undo begin/end calls. */
sview.undo_str = nullptr;
if (rv3d->persp == RV3D_CAMOB && !ED_view3d_camera_lock_check(v3d, rv3d)) {
rv3d->persp = RV3D_PERSP;
ED_view3d_smooth_view(C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.camera_old = v3d->camera,
.ofs = new_ofs,
.dist = ok_dist ? &new_dist : NULL,
/* The caller needs to use undo begin/end calls. */
.undo_str = NULL,
});
}
else {
ED_view3d_smooth_view(C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.ofs = new_ofs,
.dist = ok_dist ? &new_dist : NULL,
/* The caller needs to use undo begin/end calls. */
.undo_str = NULL,
});
sview.camera_old = v3d->camera;
}
ED_view3d_smooth_view(C, v3d, region, smooth_viewtx, &sview);
/* Smooth-view does view-lock #RV3D_BOXVIEW copy. */
}
@ -757,10 +737,9 @@ static void view3d_from_minmax_multi(bContext *C,
const int smooth_viewtx)
{
ScrArea *area = CTX_wm_area(C);
ARegion *region;
for (region = area->regionbase.first; region; region = region->next) {
LISTBASE_FOREACH (ARegion *, region, &area->regionbase) {
if (region->regiontype == RGN_TYPE_WINDOW) {
RegionView3D *rv3d = region->regiondata;
RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
/* when using all regions, don't jump out of camera view,
* but _do_ allow locked cameras to be moved */
if ((rv3d->persp != RV3D_CAMOB) || ED_view3d_camera_lock_check(v3d, rv3d)) {
@ -782,7 +761,7 @@ static int view3d_all_exec(bContext *C, wmOperator *op)
ViewLayer *view_layer_eval = DEG_get_evaluated_view_layer(depsgraph);
const bool use_all_regions = RNA_boolean_get(op->ptr, "use_all_regions");
const bool skip_camera = (ED_view3d_camera_lock_check(v3d, region->regiondata) ||
const bool skip_camera = (ED_view3d_camera_lock_check(v3d, rv3d) ||
/* any one of the regions may be locked */
(use_all_regions && v3d->flag2 & V3D_LOCK_CAMERA));
const bool center = RNA_boolean_get(op->ptr, "center");
@ -819,7 +798,7 @@ static int view3d_all_exec(bContext *C, wmOperator *op)
}
if (center) {
struct wmMsgBus *mbus = CTX_wm_message_bus(C);
wmMsgBus *mbus = CTX_wm_message_bus(C);
WM_msg_publish_rna_prop(mbus, &scene->id, &scene->cursor, View3DCursor, location);
DEG_id_tag_update(&scene->id, ID_RECALC_COPY_ON_WRITE);
@ -895,21 +874,23 @@ static int viewselected_exec(bContext *C, wmOperator *op)
BKE_view_layer_synced_ensure(scene_eval, view_layer_eval);
Object *ob_eval = BKE_view_layer_active_object_get(view_layer_eval);
Object *obedit = CTX_data_edit_object(C);
const bGPdata *gpd_eval = ob_eval && (ob_eval->type == OB_GPENCIL_LEGACY) ? ob_eval->data : NULL;
const bGPdata *gpd_eval = ob_eval && (ob_eval->type == OB_GPENCIL_LEGACY) ?
static_cast<const bGPdata *>(ob_eval->data) :
nullptr;
const bool is_gp_edit = gpd_eval ? GPENCIL_ANY_MODE(gpd_eval) : false;
const bool is_face_map = ((is_gp_edit == false) && region->gizmo_map &&
WM_gizmomap_is_any_selected(region->gizmo_map));
float min[3], max[3];
bool ok = false, ok_dist = true;
const bool use_all_regions = RNA_boolean_get(op->ptr, "use_all_regions");
const bool skip_camera = (ED_view3d_camera_lock_check(v3d, region->regiondata) ||
const bool skip_camera = (ED_view3d_camera_lock_check(v3d, rv3d) ||
/* any one of the regions may be locked */
(use_all_regions && v3d->flag2 & V3D_LOCK_CAMERA));
const int smooth_viewtx = WM_operator_smooth_viewtx_get(op);
INIT_MINMAX(min, max);
if (is_face_map) {
ob_eval = NULL;
ob_eval = nullptr;
}
if (ob_eval && (ob_eval->mode & OB_MODE_WEIGHT_PAINT)) {
@ -917,7 +898,7 @@ static int viewselected_exec(bContext *C, wmOperator *op)
/* this is weak code this way, we should make a generic
* active/selection callback interface once... */
Base *base_eval;
for (base_eval = BKE_view_layer_object_bases_get(view_layer_eval)->first; base_eval;
for (base_eval = (Base *)BKE_view_layer_object_bases_get(view_layer_eval)->first; base_eval;
base_eval = base_eval->next) {
if (BASE_SELECTED_EDITABLE(v3d, base_eval)) {
if (base_eval->object->type == OB_ARMATURE) {
@ -938,7 +919,7 @@ static int viewselected_exec(bContext *C, wmOperator *op)
if ((gps->flag & GP_STROKE_SELECT) && (gps->flag & GP_STROKE_3DSPACE)) {
ok |= BKE_gpencil_stroke_minmax(gps, true, min, max);
}
if (gps->editcurve != NULL) {
if (gps->editcurve != nullptr) {
for (int i = 0; i < gps->editcurve->tot_curve_points; i++) {
BezTriple *bezt = &gps->editcurve->curve_points[i].bezt;
if (bezt->f1 & SELECT) {
@ -1070,14 +1051,11 @@ static int viewcenter_cursor_exec(bContext *C, wmOperator *op)
/* non camera center */
float new_ofs[3];
negate_v3_v3(new_ofs, scene->cursor.location);
ED_view3d_smooth_view(C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.ofs = new_ofs,
.undo_str = op->type->name,
});
V3D_SmoothParams sview = {nullptr};
sview.ofs = new_ofs;
sview.undo_str = op->type->name;
ED_view3d_smooth_view(C, v3d, region, smooth_viewtx, &sview);
/* Smooth view does view-lock #RV3D_BOXVIEW copy. */
}
@ -1113,7 +1091,7 @@ static int viewcenter_pick_invoke(bContext *C, wmOperator *op, const wmEvent *ev
ARegion *region = CTX_wm_region(C);
if (rv3d) {
struct Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C);
Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C);
float new_ofs[3];
const int smooth_viewtx = WM_operator_smooth_viewtx_get(op);
@ -1121,7 +1099,7 @@ static int viewcenter_pick_invoke(bContext *C, wmOperator *op, const wmEvent *ev
view3d_operator_needs_opengl(C);
if (ED_view3d_autodist(depsgraph, region, v3d, event->mval, new_ofs, false, NULL)) {
if (ED_view3d_autodist(depsgraph, region, v3d, event->mval, new_ofs, false, nullptr)) {
/* pass */
}
else {
@ -1130,14 +1108,12 @@ static int viewcenter_pick_invoke(bContext *C, wmOperator *op, const wmEvent *ev
ED_view3d_win_to_3d_int(v3d, region, new_ofs, event->mval, new_ofs);
}
negate_v3(new_ofs);
ED_view3d_smooth_view(C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.ofs = new_ofs,
.undo_str = op->type->name,
});
V3D_SmoothParams sview = {nullptr};
sview.ofs = new_ofs;
sview.undo_str = op->type->name;
ED_view3d_smooth_view(C, v3d, region, smooth_viewtx, &sview);
}
return OPERATOR_FINISHED;
@ -1171,7 +1147,7 @@ static const EnumPropertyItem prop_view_items[] = {
{RV3D_VIEW_TOP, "TOP", ICON_TRIA_UP, "Top", "View from the top"},
{RV3D_VIEW_FRONT, "FRONT", 0, "Front", "View from the front"},
{RV3D_VIEW_BACK, "BACK", 0, "Back", "View from the back"},
{0, NULL, 0, NULL, NULL},
{0, nullptr, 0, nullptr, nullptr},
};
static int view_axis_exec(bContext *C, wmOperator *op)
@ -1184,24 +1160,24 @@ static int view_axis_exec(bContext *C, wmOperator *op)
int view_axis_roll = RV3D_VIEW_AXIS_ROLL_0;
const int smooth_viewtx = WM_operator_smooth_viewtx_get(op);
/* no NULL check is needed, poll checks */
/* no nullptr check is needed, poll checks */
ED_view3d_context_user_region(C, &v3d, &region);
rv3d = region->regiondata;
rv3d = static_cast<RegionView3D *>(region->regiondata);
ED_view3d_smooth_view_force_finish(C, v3d, region);
viewnum = RNA_enum_get(op->ptr, "type");
float align_quat_buf[4];
float *align_quat = NULL;
float *align_quat = nullptr;
if (RNA_boolean_get(op->ptr, "align_active")) {
/* align to active object */
Object *obact = CTX_data_active_object(C);
if (obact != NULL) {
if (obact != nullptr) {
float twmat[3][3];
const Scene *scene = CTX_data_scene(C);
struct ViewLayer *view_layer = CTX_data_view_layer(C);
ViewLayer *view_layer = CTX_data_view_layer(C);
Object *obedit = CTX_data_edit_object(C);
/* same as transform gizmo when normal is set */
ED_getTransformOrientationMatrix(
@ -1328,9 +1304,9 @@ static int view_camera_exec(bContext *C, wmOperator *op)
RegionView3D *rv3d;
const int smooth_viewtx = WM_operator_smooth_viewtx_get(op);
/* no NULL check is needed, poll checks */
/* no nullptr check is needed, poll checks */
ED_view3d_context_user_region(C, &v3d, &region);
rv3d = region->regiondata;
rv3d = static_cast<RegionView3D *>(region->regiondata);
ED_view3d_smooth_view_force_finish(C, v3d, region);
@ -1356,22 +1332,22 @@ static int view_camera_exec(bContext *C, wmOperator *op)
}
else {
/* use scene camera if one is not set (even though we're unlocked) */
if (v3d->camera == NULL) {
if (v3d->camera == nullptr) {
v3d->camera = scene->camera;
}
}
/* if the camera isn't found, check a number of options */
if (v3d->camera == NULL && ob && ob->type == OB_CAMERA) {
if (v3d->camera == nullptr && ob && ob->type == OB_CAMERA) {
v3d->camera = ob;
}
if (v3d->camera == NULL) {
if (v3d->camera == nullptr) {
v3d->camera = BKE_view_layer_camera_find(scene, view_layer);
}
/* couldn't find any useful camera, bail out */
if (v3d->camera == NULL) {
if (v3d->camera == nullptr) {
return OPERATOR_CANCELLED;
}
@ -1383,20 +1359,17 @@ static int view_camera_exec(bContext *C, wmOperator *op)
/* finally do snazzy view zooming */
rv3d->persp = RV3D_CAMOB;
ED_view3d_smooth_view(
C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.camera = v3d->camera,
.ofs = rv3d->ofs,
.quat = rv3d->viewquat,
.dist = &rv3d->dist,
.lens = &v3d->lens,
/* No undo because this changes cameras (and wont move the camera). */
.undo_str = NULL,
});
V3D_SmoothParams sview = {nullptr};
sview.camera = v3d->camera;
sview.ofs = rv3d->ofs;
sview.quat = rv3d->viewquat;
sview.dist = &rv3d->dist;
sview.lens = &v3d->lens;
/* No undo because this changes cameras (and wont move the camera). */
sview.undo_str = nullptr;
ED_view3d_smooth_view(C, v3d, region, smooth_viewtx, &sview);
}
else {
/* return to settings of last view */
@ -1408,7 +1381,7 @@ static int view_camera_exec(bContext *C, wmOperator *op)
rv3d->lview,
rv3d->lview_axis_roll,
rv3d->lpersp,
NULL,
nullptr,
smooth_viewtx);
}
}
@ -1451,7 +1424,7 @@ static const EnumPropertyItem prop_view_orbit_items[] = {
{V3D_VIEW_STEPRIGHT, "ORBITRIGHT", 0, "Orbit Right", "Orbit the view around to the right"},
{V3D_VIEW_STEPUP, "ORBITUP", 0, "Orbit Up", "Orbit the view up"},
{V3D_VIEW_STEPDOWN, "ORBITDOWN", 0, "Orbit Down", "Orbit the view down"},
{0, NULL, 0, NULL, NULL},
{0, nullptr, 0, nullptr, nullptr},
};
static int vieworbit_exec(bContext *C, wmOperator *op)
@ -1466,20 +1439,20 @@ static int vieworbit_exec(bContext *C, wmOperator *op)
RNA_property_float_get(op->ptr, prop_angle) :
DEG2RADF(U.pad_rot_angle);
/* no NULL check is needed, poll checks */
/* no nullptr check is needed, poll checks */
v3d = CTX_wm_view3d(C);
region = CTX_wm_region(C);
rv3d = region->regiondata;
rv3d = static_cast<RegionView3D *>(region->regiondata);
/* support for switching to the opposite view (even when in locked views) */
view_opposite = (fabsf(angle) == (float)M_PI) ? ED_view3d_axis_view_opposite(rv3d->view) :
view_opposite = (fabsf(angle) == float(M_PI)) ? ED_view3d_axis_view_opposite(rv3d->view) :
RV3D_VIEW_USER;
orbitdir = RNA_enum_get(op->ptr, "type");
if ((RV3D_LOCK_FLAGS(rv3d) & RV3D_LOCK_ROTATION) && (view_opposite == RV3D_VIEW_USER)) {
/* no NULL check is needed, poll checks */
/* no nullptr check is needed, poll checks */
ED_view3d_context_user_region(C, &v3d, &region);
rv3d = region->regiondata;
rv3d = static_cast<RegionView3D *>(region->regiondata);
}
ED_view3d_smooth_view_force_finish(C, v3d, region);
@ -1532,7 +1505,7 @@ static int vieworbit_exec(bContext *C, wmOperator *op)
rv3d->view = RV3D_VIEW_USER;
}
float dyn_ofs[3], *dyn_ofs_pt = NULL;
float dyn_ofs[3], *dyn_ofs_pt = nullptr;
if (U.uiflag & USER_ORBIT_SELECTION) {
if (view3d_orbit_calc_center(C, dyn_ofs)) {
@ -1541,17 +1514,15 @@ static int vieworbit_exec(bContext *C, wmOperator *op)
}
}
ED_view3d_smooth_view(C,
v3d,
region,
smooth_viewtx,
&(const V3D_SmoothParams){
.quat = quat_new,
.dyn_ofs = dyn_ofs_pt,
/* Group as successive orbit may run by holding a key. */
.undo_str = op->type->name,
.undo_grouped = true,
});
V3D_SmoothParams sview = {nullptr};
sview.quat = quat_new;
sview.dyn_ofs = dyn_ofs_pt;
sview.lens = &v3d->lens;
/* Group as successive orbit may run by holding a key. */
sview.undo_str = op->type->name;
sview.undo_grouped = true;
ED_view3d_smooth_view(C, v3d, region, smooth_viewtx, &sview);
return OPERATOR_FINISHED;
}
@ -1604,7 +1575,7 @@ static const EnumPropertyItem prop_view_pan_items[] = {
{V3D_VIEW_PANRIGHT, "PANRIGHT", 0, "Pan Right", "Pan the view to the right"},
{V3D_VIEW_PANUP, "PANUP", 0, "Pan Up", "Pan the view up"},
{V3D_VIEW_PANDOWN, "PANDOWN", 0, "Pan Down", "Pan the view down"},
{0, NULL, 0, NULL, NULL},
{0, nullptr, 0, nullptr, nullptr},
};
static int viewpan_invoke(bContext *C, wmOperator *op, const wmEvent *event)

View File

@ -7,6 +7,8 @@
#pragma once
#include "BLI_utildefines.h"
#ifdef __cplusplus
extern "C" {
#endif
@ -55,6 +57,7 @@ enum {
};
enum eViewOpsFlag {
VIEWOPS_FLAG_NONE = 0,
/** When enabled, rotate around the selection. */
VIEWOPS_FLAG_ORBIT_SELECT = (1 << 0),
/** When enabled, use the depth under the cursor for navigation. */
@ -68,6 +71,7 @@ enum eViewOpsFlag {
/** When set, ignore any options that depend on initial cursor location. */
VIEWOPS_FLAG_USE_MOUSE_INIT = (1 << 3),
};
ENUM_OPERATORS(eViewOpsFlag, VIEWOPS_FLAG_USE_MOUSE_INIT);
/** Generic View Operator Custom-Data */
typedef struct ViewOpsData {
@ -148,7 +152,7 @@ typedef struct ViewOpsData {
bool use_dyn_ofs;
} ViewOpsData;
/* view3d_navigate.c */
/* view3d_navigate.cc */
bool view3d_location_poll(struct bContext *C);
bool view3d_rotation_poll(struct bContext *C);

View File

@ -811,6 +811,10 @@ void calculateCenter2D(TransInfo *t);
void calculateCenterLocal(TransInfo *t, const float center_global[3]);
void calculateCenter(TransInfo *t);
/**
* Called every time the view changes due to navigation.
* Adjusts the mouse position relative to the object.
*/
void tranformViewUpdate(TransInfo *t);
/* API functions for getting center points */

View File

@ -1159,8 +1159,6 @@ void calculateCenter(TransInfo *t)
calculateZfac(t);
}
/* Called every time the view changes due to navigation.
* Adjusts the mouse position relative to the object. */
void tranformViewUpdate(TransInfo *t)
{
float zoom_prev = t->zfac;

View File

@ -35,9 +35,10 @@
#include "ED_transverts.h" /* own include */
/* copied from editobject.c, now uses (almost) proper depsgraph. */
void ED_transverts_update_obedit(TransVertStore *tvs, Object *obedit)
{
/* NOTE: copied from `editobject.c`, now uses (almost) proper depsgraph. */
const int mode = tvs->mode;
BLI_assert(ED_transverts_check_obedit(obedit) == true);

View File

@ -104,9 +104,6 @@ static bool uvedit_is_face_affected_for_calc_uv_islands(const Scene *scene,
return true;
}
/**
* Calculate islands and add them to \a island_list returning the number of items added.
*/
int bm_mesh_calc_uv_islands(const Scene *scene,
BMesh *bm,
ListBase *island_list,

Some files were not shown because too many files have changed in this diff Show More