Cycles: add instancing support in light tree #106683

Closed
Weizhen Huang wants to merge 10 commits from weizhen:light_tree_instance into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
22 changed files with 469 additions and 103 deletions
Showing only changes of commit 49ede3338d - Show all commits

View File

@ -9,6 +9,7 @@
#include "BLI_bounds_types.hh"
#include "BLI_generic_virtual_array.hh"
#include "BLI_implicit_sharing.hh"
#include "BLI_index_mask.hh"
#include "BLI_math_matrix_types.hh"
#include "BLI_math_vector_types.hh"
@ -53,6 +54,9 @@ struct BasisCache {
*/
class CurvesGeometryRuntime {
public:
/** Implicit sharing user count for #CurvesGeometry::curve_offsets. */
ImplicitSharingInfo *curve_offsets_sharing_info = nullptr;
/**
* The cached number of curves with each type. Unlike other caches here, this is not computed
* lazily, since it is needed so often and types are not adjusted much anyway.

View File

@ -56,28 +56,38 @@ enum {
*/
ID_REMAP_FORCE_NEVER_NULL_USAGE = 1 << 3,
/** Do not remap library override pointers. */
ID_REMAP_SKIP_OVERRIDE_LIBRARY = 1 << 5,
/** Don't touch the special user counts (use when the 'old' remapped ID remains in use):
* - Do not transfer 'fake user' status from old to new ID.
* - Do not clear 'extra user' from old ID. */
ID_REMAP_SKIP_USER_CLEAR = 1 << 6,
ID_REMAP_SKIP_OVERRIDE_LIBRARY = 1 << 4,
/**
* Force internal ID runtime pointers (like `ID.newid`, `ID.orig_id` etc.) to also be processed.
* This should only be needed in some very specific cases, typically only BKE ID management code
* should need it (e.g. required from `id_delete` to ensure no runtime pointer remains using
* freed ones).
*/
ID_REMAP_FORCE_INTERNAL_RUNTIME_POINTERS = 1 << 7,
/** Force handling user count even for IDs that are outside of Main (used in some cases when
* dealing with IDs temporarily out of Main, but which will be put in it ultimately).
*/
ID_REMAP_FORCE_USER_REFCOUNT = 1 << 8,
ID_REMAP_FORCE_INTERNAL_RUNTIME_POINTERS = 1 << 5,
/** Force remapping of 'UI-like' ID usages (ID pointers stored in editors data etc.). */
ID_REMAP_FORCE_UI_POINTERS = 1 << 6,
/**
* Force obdata pointers to also be processed, even when object (`id_owner`) is in Edit mode.
* This is required by some tools creating/deleting IDs while operating in Edit mode, like e.g.
* the 'separate' mesh operator.
*/
ID_REMAP_FORCE_OBDATA_IN_EDITMODE = 1 << 9,
ID_REMAP_FORCE_OBDATA_IN_EDITMODE = 1 << 7,
/** Don't touch the special user counts (use when the 'old' remapped ID remains in use):
* - Do not transfer 'fake user' status from old to new ID.
* - Do not clear 'extra user' from old ID. */
ID_REMAP_SKIP_USER_CLEAR = 1 << 16,
/** Force handling user count even for IDs that are outside of Main (used in some cases when
* dealing with IDs temporarily out of Main, but which will be put in it ultimately).
*/
ID_REMAP_FORCE_USER_REFCOUNT = 1 << 17,
/** Do NOT handle user count for IDs (used in some cases when dealing with IDs from different
* BMains, if usercount will be recomputed anyway afterwards, like e.g. in memfile reading during
* undo step decoding). */
ID_REMAP_SKIP_USER_REFCOUNT = 1 << 18,
/** Do NOT tag IDs which had some of their ID pointers updated for update in the depsgraph, or ID
* type specific updates, like e.g. with node trees. */
ID_REMAP_SKIP_UPDATE_TAGGING = 1 << 19,
};
typedef enum eIDRemapType {
@ -95,12 +105,24 @@ typedef enum eIDRemapType {
*/
void BKE_libblock_remap_multiple_locked(struct Main *bmain,
struct IDRemapper *mappings,
short remap_flags);
const int remap_flags);
void BKE_libblock_remap_multiple(struct Main *bmain,
struct IDRemapper *mappings,
short remap_flags);
const int remap_flags);
/**
* Bare raw remapping of IDs, with no other processing than actually updating the ID pointers. No
* usercount, direct vs indirect linked status update, depsgraph tagging, etc.
*
* This is way more efficient than regular remapping from #BKE_libblock_remap_multiple & co, but it
* implies that calling code handles all the other aspects described above. This is typically the
* case e.g. in readfile process.
*
* WARNING: This call will likely leave the given BMain in invalid state in many aspects. */
void BKE_libblock_remap_multiple_raw(struct Main *bmain,
struct IDRemapper *mappings,
const int remap_flags);
/**
* Replace all references in given Main to \a old_id by \a new_id
* (if \a new_id is NULL, it unlinks \a old_id).
@ -108,9 +130,9 @@ void BKE_libblock_remap_multiple(struct Main *bmain,
* \note Requiring new_id to be non-null, this *may* not be the case ultimately,
* but makes things simpler for now.
*/
void BKE_libblock_remap_locked(struct Main *bmain, void *old_idv, void *new_idv, short remap_flags)
void BKE_libblock_remap_locked(struct Main *bmain, void *old_idv, void *new_idv, int remap_flags)
ATTR_NONNULL(1, 2);
void BKE_libblock_remap(struct Main *bmain, void *old_idv, void *new_idv, short remap_flags)
void BKE_libblock_remap(struct Main *bmain, void *old_idv, void *new_idv, int remap_flags)
ATTR_NONNULL(1, 2);
/**
@ -130,12 +152,12 @@ void BKE_libblock_unlink(struct Main *bmain,
*
* \param old_idv: Unlike BKE_libblock_remap, can be NULL,
* in which case all ID usages by given \a idv will be cleared.
*
* \param bmain: May be NULL, in which case there won't be depsgraph updates nor post-processing on
* some ID types (like collections or objects) to ensure their runtime data is valid.
*/
void BKE_libblock_relink_ex(struct Main *bmain,
void *idv,
void *old_idv,
void *new_idv,
short remap_flags) ATTR_NONNULL(1, 2);
void BKE_libblock_relink_ex(
struct Main *bmain, void *idv, void *old_idv, void *new_idv, int remap_flags) ATTR_NONNULL(2);
/**
* Same as #BKE_libblock_relink_ex, but applies all rules defined in \a id_remapper to \a ids (or
* does cleanup if `ID_REMAP_TYPE_CLEANUP` is specified as \a remap_type).
@ -144,7 +166,7 @@ void BKE_libblock_relink_multiple(struct Main *bmain,
struct LinkNode *ids,
eIDRemapType remap_type,
struct IDRemapper *id_remapper,
short remap_flags);
int remap_flags);
/**
* Remaps ID usages of given ID to their `id->newid` pointer if not None, and proceeds recursively

View File

@ -800,10 +800,7 @@ BLI_INLINE const int *BKE_mesh_poly_offsets(const Mesh *mesh)
{
return mesh->poly_offset_indices;
}
BLI_INLINE int *BKE_mesh_poly_offsets_for_write(Mesh *mesh)
{
return mesh->poly_offset_indices;
}
int *BKE_mesh_poly_offsets_for_write(Mesh *mesh);
BLI_INLINE const int *BKE_mesh_corner_verts(const Mesh *mesh)
{

View File

@ -16,6 +16,7 @@
# include "BLI_array.hh"
# include "BLI_bit_vector.hh"
# include "BLI_bounds_types.hh"
# include "BLI_implicit_sharing.hh"
# include "BLI_math_vector_types.hh"
# include "BLI_shared_cache.hh"
# include "BLI_span.hh"
@ -96,6 +97,9 @@ struct MeshRuntime {
/** Needed to ensure some thread-safety during render data pre-processing. */
std::mutex render_mutex;
/** Implicit sharing user count for #Mesh::poly_offset_indices. */
ImplicitSharingInfoHandle *poly_offsets_sharing_info;
/**
* A cache of bounds shared between data-blocks with unchanged positions. When changing positions
* affect the bounds, the cache is "un-shared" with other geometries. See #SharedCache comments.

View File

@ -1426,7 +1426,7 @@ static void blendfile_library_relocate_remap(Main *bmain,
ID *new_id,
ReportList *reports,
const bool do_reload,
const short remap_flags)
const int remap_flags)
{
BLI_assert(old_id);
if (do_reload) {
@ -1594,8 +1594,8 @@ void BKE_blendfile_library_relocate(BlendfileLinkAppendContext *lapp_context,
BKE_layer_collection_resync_forbid();
/* Note that in reload case, we also want to replace indirect usages. */
const short remap_flags = ID_REMAP_SKIP_NEVER_NULL_USAGE |
(do_reload ? 0 : ID_REMAP_SKIP_INDIRECT_USAGE);
const int remap_flags = ID_REMAP_SKIP_NEVER_NULL_USAGE |
(do_reload ? 0 : ID_REMAP_SKIP_INDIRECT_USAGE);
for (item_idx = 0, itemlink = lapp_context->items.list; itemlink;
item_idx++, itemlink = itemlink->next) {
BlendfileLinkAppendContextItem *item = itemlink->link;

View File

@ -60,13 +60,24 @@ CurvesGeometry::CurvesGeometry(const int point_num, const int curve_num)
CustomData_add_layer_named(
&this->point_data, CD_PROP_FLOAT3, CD_CONSTRUCT, this->point_num, ATTR_POSITION.c_str());
this->curve_offsets = (int *)MEM_malloc_arrayN(this->curve_num + 1, sizeof(int), __func__);
#ifdef DEBUG
this->offsets_for_write().fill(-1);
#endif
this->offsets_for_write().first() = 0;
this->runtime = MEM_new<CurvesGeometryRuntime>(__func__);
if (curve_num > 0) {
this->curve_offsets = static_cast<int *>(
MEM_malloc_arrayN(this->curve_num + 1, sizeof(int), __func__));
this->runtime->curve_offsets_sharing_info = implicit_sharing::info_for_mem_free(
this->curve_offsets);
#ifdef DEBUG
this->offsets_for_write().fill(-1);
#endif
/* Set common values for convenience. */
this->curve_offsets[0] = 0;
this->curve_offsets[this->curve_num] = this->point_num;
}
else {
this->curve_offsets = nullptr;
}
/* Fill the type counts with the default so they're in a valid state. */
this->runtime->type_counts[CURVE_TYPE_CATMULL_ROM] = curve_num;
}
@ -83,9 +94,10 @@ static void copy_curves_geometry(CurvesGeometry &dst, const CurvesGeometry &src)
CustomData_copy(&src.point_data, &dst.point_data, CD_MASK_ALL, dst.point_num);
CustomData_copy(&src.curve_data, &dst.curve_data, CD_MASK_ALL, dst.curve_num);
MEM_SAFE_FREE(dst.curve_offsets);
dst.curve_offsets = (int *)MEM_malloc_arrayN(dst.point_num + 1, sizeof(int), __func__);
dst.offsets_for_write().copy_from(src.offsets());
implicit_sharing::copy_shared_pointer(src.curve_offsets,
src.runtime->curve_offsets_sharing_info,
&dst.curve_offsets,
&dst.runtime->curve_offsets_sharing_info);
dst.tag_topology_changed();
@ -127,7 +139,6 @@ static void move_curves_geometry(CurvesGeometry &dst, CurvesGeometry &src)
src.curve_num = 0;
std::swap(dst.curve_offsets, src.curve_offsets);
MEM_SAFE_FREE(src.curve_offsets);
std::swap(dst.runtime, src.runtime);
}
@ -149,7 +160,8 @@ CurvesGeometry::~CurvesGeometry()
{
CustomData_free(&this->point_data, this->point_num);
CustomData_free(&this->curve_data, this->curve_num);
MEM_SAFE_FREE(this->curve_offsets);
implicit_sharing::free_shared_data(&this->curve_offsets,
&this->runtime->curve_offsets_sharing_info);
MEM_delete(this->runtime);
this->runtime = nullptr;
}
@ -326,6 +338,8 @@ Span<int> CurvesGeometry::offsets() const
}
MutableSpan<int> CurvesGeometry::offsets_for_write()
{
implicit_sharing::make_trivial_data_mutable(
&this->curve_offsets, &this->runtime->curve_offsets_sharing_info, this->curve_num + 1);
return {this->curve_offsets, this->curve_num + 1};
}
@ -948,8 +962,14 @@ void CurvesGeometry::resize(const int points_num, const int curves_num)
}
if (curves_num != this->curve_num) {
CustomData_realloc(&this->curve_data, this->curves_num(), curves_num);
implicit_sharing::resize_trivial_array(&this->curve_offsets,
&this->runtime->curve_offsets_sharing_info,
this->curve_num == 0 ? 0 : (this->curve_num + 1),
curves_num + 1);
/* Set common values for convenience. */
this->curve_offsets[0] = 0;
this->curve_offsets[curves_num] = this->point_num;
this->curve_num = curves_num;
this->curve_offsets = (int *)MEM_reallocN(this->curve_offsets, sizeof(int) * (curves_num + 1));
}
this->tag_topology_changed();
}
@ -1585,7 +1605,11 @@ void CurvesGeometry::blend_read(BlendDataReader &reader)
CustomData_blend_read(&reader, &this->point_data, this->point_num);
CustomData_blend_read(&reader, &this->curve_data, this->curve_num);
BLO_read_int32_array(&reader, this->curve_num + 1, &this->curve_offsets);
if (this->curve_offsets) {
BLO_read_int32_array(&reader, this->curve_num + 1, &this->curve_offsets);
this->runtime->curve_offsets_sharing_info = implicit_sharing::info_for_mem_free(
this->curve_offsets);
}
/* Recalculate curve type count cache that isn't saved in files. */
this->update_curve_types();

View File

@ -60,7 +60,7 @@ typedef struct IDRemap {
/** The ID in which we are replacing old_id by new_id usages. */
ID *id_owner;
short flag;
int flag;
} IDRemap;
/* IDRemap->flag enums defined in BKE_lib.h */
@ -104,30 +104,46 @@ static void foreach_libblock_remap_callback_apply(ID *id_owner,
const IDRemapperApplyOptions id_remapper_options,
const int cb_flag,
const bool is_indirect,
const bool violates_never_null,
const bool force_user_refcount)
const bool violates_never_null)
{
const bool skip_update_tagging = (id_remap_data->flag & ID_REMAP_SKIP_UPDATE_TAGGING) != 0;
const bool skip_user_refcount = (id_remap_data->flag & ID_REMAP_SKIP_USER_REFCOUNT) != 0;
const bool force_user_refcount = (id_remap_data->flag & ID_REMAP_FORCE_USER_REFCOUNT) != 0;
BLI_assert(!skip_user_refcount || !force_user_refcount);
ID *old_id = *id_ptr;
if (!violates_never_null) {
BKE_id_remapper_apply_ex(mappings, id_ptr, id_remapper_options, id_self);
DEG_id_tag_update_ex(id_remap_data->bmain,
id_self,
ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
if (id_self != id_owner) {
DEG_id_tag_update_ex(id_remap_data->bmain,
id_owner,
ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
}
if (GS(id_owner->name) == ID_NT) {
/* Make sure that the node tree is updated after a property in it changed. Ideally, we would
* know which nodes property was changed so that only this node is tagged. */
BKE_ntree_update_tag_all((bNodeTree *)id_owner);
if (!skip_update_tagging) {
if (id_remap_data->bmain != NULL) {
DEG_id_tag_update_ex(id_remap_data->bmain,
id_self,
ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
if (id_self != id_owner) {
DEG_id_tag_update_ex(id_remap_data->bmain,
id_owner,
ID_RECALC_COPY_ON_WRITE | ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
}
}
if (GS(id_owner->name) == ID_NT) {
/* Make sure that the node tree is updated after a property in it changed. Ideally, we
* would know which nodes property was changed so that only this node is tagged. */
BKE_ntree_update_tag_all((bNodeTree *)id_owner);
}
}
}
/* Get the new_id pointer. When the mapping is violating never null we should use a NULL
* pointer otherwise the incorrect users are decreased and increased on the same instance. */
ID *new_id = violates_never_null ? NULL : *id_ptr;
if (!is_indirect && new_id) {
new_id->runtime.remap.status |= ID_REMAP_IS_LINKED_DIRECT;
}
if (skip_user_refcount) {
return;
}
if (cb_flag & IDWALK_CB_USER) {
/* NOTE: by default we don't user-count IDs which are not in the main database.
* This is because in certain conditions we can have data-blocks in
@ -148,9 +164,6 @@ static void foreach_libblock_remap_callback_apply(ID *id_owner,
/* We cannot affect old_id->us directly, LIB_TAG_EXTRAUSER(_SET)
* are assumed to be set as needed, that extra user is processed in final handling. */
}
if (!is_indirect && new_id) {
new_id->runtime.remap.status |= ID_REMAP_IS_LINKED_DIRECT;
}
}
static int foreach_libblock_remap_callback(LibraryIDLinkCallbackData *cb_data)
@ -218,7 +231,6 @@ static int foreach_libblock_remap_callback(LibraryIDLinkCallbackData *cb_data)
(id_remap_data->flag & ID_REMAP_FORCE_NEVER_NULL_USAGE) == 0);
const bool skip_reference = (id_remap_data->flag & ID_REMAP_SKIP_OVERRIDE_LIBRARY) != 0;
const bool skip_never_null = (id_remap_data->flag & ID_REMAP_SKIP_NEVER_NULL_USAGE) != 0;
const bool force_user_refcount = (id_remap_data->flag & ID_REMAP_FORCE_USER_REFCOUNT) != 0;
#ifdef DEBUG_PRINT
printf(
@ -264,8 +276,7 @@ static int foreach_libblock_remap_callback(LibraryIDLinkCallbackData *cb_data)
id_remapper_options,
cb_flag,
is_indirect,
violates_never_null,
force_user_refcount);
violates_never_null);
}
return IDWALK_RET_NOP;
@ -449,7 +460,8 @@ static void libblock_remap_reset_remapping_status_callback(ID *old_id,
* (i.e. \a id does not references any other data-block anymore).
* + If \a old_id is non-NULL, behavior is as with a NULL \a id, but only within given \a id.
*
* \param bmain: the Main data storage to operate on (must never be NULL).
* \param bmain: the Main data storage to operate on (may be NULL, in which case part of the
* post-process/depsgraph update won't happen).
* \param id: the data-block to operate on
* (can be NULL, in which case we operate over all IDs from given bmain).
* \param old_id: the data-block to dereference (may be NULL if \a id is non-NULL).
@ -457,17 +469,18 @@ static void libblock_remap_reset_remapping_status_callback(ID *old_id,
* \param r_id_remap_data: if non-NULL, the IDRemap struct to use
* (useful to retrieve info about remapping process).
*/
ATTR_NONNULL(1)
static void libblock_remap_data(Main *bmain,
ID *id,
eIDRemapType remap_type,
struct IDRemapper *id_remapper,
const short remap_flags)
const int remap_flags)
{
IDRemap id_remap_data = {0};
const int foreach_id_flags = ((remap_flags & ID_REMAP_FORCE_INTERNAL_RUNTIME_POINTERS) != 0 ?
IDWALK_DO_INTERNAL_RUNTIME_POINTERS :
IDWALK_NOP);
const int foreach_id_flags =
(((remap_flags & ID_REMAP_FORCE_INTERNAL_RUNTIME_POINTERS) != 0 ?
IDWALK_DO_INTERNAL_RUNTIME_POINTERS :
IDWALK_NOP) |
((remap_flags & ID_REMAP_FORCE_UI_POINTERS) != 0 ? IDWALK_INCLUDE_UI : IDWALK_NOP));
id_remap_data.id_remapper = id_remapper;
id_remap_data.type = remap_type;
@ -519,7 +532,7 @@ static void libblock_remap_data(Main *bmain,
typedef struct LibblockRemapMultipleUserData {
Main *bmain;
short remap_flags;
int remap_flags;
} LibBlockRemapMultipleUserData;
static void libblock_remap_foreach_idpair_cb(ID *old_id, ID *new_id, void *user_data)
@ -530,7 +543,7 @@ static void libblock_remap_foreach_idpair_cb(ID *old_id, ID *new_id, void *user_
LibBlockRemapMultipleUserData *data = user_data;
Main *bmain = data->bmain;
const short remap_flags = data->remap_flags;
const int remap_flags = data->remap_flags;
BLI_assert(old_id != NULL);
BLI_assert((new_id == NULL) || GS(old_id->name) == GS(new_id->name));
@ -614,7 +627,7 @@ static void libblock_remap_foreach_idpair_cb(ID *old_id, ID *new_id, void *user_
void BKE_libblock_remap_multiple_locked(Main *bmain,
struct IDRemapper *mappings,
const short remap_flags)
const int remap_flags)
{
if (BKE_id_remapper_is_empty(mappings)) {
/* Early exit nothing to do. */
@ -640,7 +653,23 @@ void BKE_libblock_remap_multiple_locked(Main *bmain,
DEG_relations_tag_update(bmain);
}
void BKE_libblock_remap_locked(Main *bmain, void *old_idv, void *new_idv, const short remap_flags)
void BKE_libblock_remap_multiple_raw(Main *bmain,
struct IDRemapper *mappings,
const int remap_flags)
{
if (BKE_id_remapper_is_empty(mappings)) {
/* Early exit nothing to do. */
return;
}
libblock_remap_data(bmain,
NULL,
ID_REMAP_TYPE_REMAP,
mappings,
remap_flags | ID_REMAP_SKIP_USER_REFCOUNT | ID_REMAP_SKIP_UPDATE_TAGGING);
}
void BKE_libblock_remap_locked(Main *bmain, void *old_idv, void *new_idv, const int remap_flags)
{
struct IDRemapper *remapper = BKE_id_remapper_create();
ID *old_id = old_idv;
@ -650,7 +679,7 @@ void BKE_libblock_remap_locked(Main *bmain, void *old_idv, void *new_idv, const
BKE_id_remapper_free(remapper);
}
void BKE_libblock_remap(Main *bmain, void *old_idv, void *new_idv, const short remap_flags)
void BKE_libblock_remap(Main *bmain, void *old_idv, void *new_idv, const int remap_flags)
{
BKE_main_lock(bmain);
@ -659,7 +688,7 @@ void BKE_libblock_remap(Main *bmain, void *old_idv, void *new_idv, const short r
BKE_main_unlock(bmain);
}
void BKE_libblock_remap_multiple(Main *bmain, struct IDRemapper *mappings, const short remap_flags)
void BKE_libblock_remap_multiple(Main *bmain, struct IDRemapper *mappings, const int remap_flags)
{
BKE_main_lock(bmain);
@ -673,8 +702,8 @@ void BKE_libblock_unlink(Main *bmain,
const bool do_flag_never_null,
const bool do_skip_indirect)
{
const short remap_flags = (do_skip_indirect ? ID_REMAP_SKIP_INDIRECT_USAGE : 0) |
(do_flag_never_null ? ID_REMAP_FLAG_NEVER_NULL_USAGE : 0);
const int remap_flags = (do_skip_indirect ? ID_REMAP_SKIP_INDIRECT_USAGE : 0) |
(do_flag_never_null ? ID_REMAP_FLAG_NEVER_NULL_USAGE : 0);
BKE_main_lock(bmain);
@ -756,7 +785,7 @@ void BKE_libblock_relink_multiple(Main *bmain,
LinkNode *ids,
const eIDRemapType remap_type,
struct IDRemapper *id_remapper,
const short remap_flags)
const int remap_flags)
{
BLI_assert(remap_type == ID_REMAP_TYPE_REMAP || BKE_id_remapper_is_empty(id_remapper));
@ -765,6 +794,10 @@ void BKE_libblock_relink_multiple(Main *bmain,
libblock_remap_data(bmain, id_iter, remap_type, id_remapper, remap_flags);
}
if (bmain == NULL) {
return;
}
switch (remap_type) {
case ID_REMAP_TYPE_REMAP: {
LibBlockRelinkMultipleUserData user_data = {0};
@ -815,7 +848,7 @@ void BKE_libblock_relink_multiple(Main *bmain,
}
void BKE_libblock_relink_ex(
Main *bmain, void *idv, void *old_idv, void *new_idv, const short remap_flags)
Main *bmain, void *idv, void *old_idv, void *new_idv, const int remap_flags)
{
/* Should be able to replace all _relink() functions (constraints, rigidbody, etc.) ? */
@ -905,8 +938,8 @@ void BKE_libblock_relink_to_newid(Main *bmain, ID *id, const int remap_flag)
libblock_relink_to_newid_prepare_data(bmain, id, &relink_data);
const short remap_flag_final = remap_flag | ID_REMAP_SKIP_INDIRECT_USAGE |
ID_REMAP_SKIP_OVERRIDE_LIBRARY;
const int remap_flag_final = remap_flag | ID_REMAP_SKIP_INDIRECT_USAGE |
ID_REMAP_SKIP_OVERRIDE_LIBRARY;
BKE_libblock_relink_multiple(
bmain, relink_data.ids, ID_REMAP_TYPE_REMAP, relink_data.id_remapper, remap_flag_final);

View File

@ -22,6 +22,7 @@
#include "BLI_endian_switch.h"
#include "BLI_ghash.h"
#include "BLI_hash.h"
#include "BLI_implicit_sharing.hh"
#include "BLI_index_range.hh"
#include "BLI_linklist.h"
#include "BLI_listbase.h"
@ -154,7 +155,10 @@ static void mesh_copy_data(Main *bmain, ID *id_dst, const ID *id_src, const int
CustomData_copy(&mesh_src->edata, &mesh_dst->edata, mask.emask, mesh_dst->totedge);
CustomData_copy(&mesh_src->ldata, &mesh_dst->ldata, mask.lmask, mesh_dst->totloop);
CustomData_copy(&mesh_src->pdata, &mesh_dst->pdata, mask.pmask, mesh_dst->totpoly);
mesh_dst->poly_offset_indices = static_cast<int *>(MEM_dupallocN(mesh_src->poly_offset_indices));
blender::implicit_sharing::copy_shared_pointer(mesh_src->poly_offset_indices,
mesh_src->runtime->poly_offsets_sharing_info,
&mesh_dst->poly_offset_indices,
&mesh_dst->runtime->poly_offsets_sharing_info);
if (do_tessface) {
CustomData_copy(&mesh_src->fdata, &mesh_dst->fdata, mask.fmask, mesh_dst->totface);
}
@ -368,8 +372,6 @@ static void mesh_blend_read_data(BlendDataReader *reader, ID *id)
BLO_read_list(reader, &mesh->vertex_group_names);
BLO_read_int32_array(reader, mesh->totpoly + 1, &mesh->poly_offset_indices);
CustomData_blend_read(reader, &mesh->vdata, mesh->totvert);
CustomData_blend_read(reader, &mesh->edata, mesh->totedge);
CustomData_blend_read(reader, &mesh->fdata, mesh->totface);
@ -388,6 +390,12 @@ static void mesh_blend_read_data(BlendDataReader *reader, ID *id)
mesh->runtime = new blender::bke::MeshRuntime();
if (mesh->poly_offset_indices) {
BLO_read_int32_array(reader, mesh->totpoly + 1, &mesh->poly_offset_indices);
mesh->runtime->poly_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
mesh->poly_offset_indices);
}
/* happens with old files */
if (mesh->mselect == nullptr) {
mesh->totselect = 0;
@ -921,8 +929,10 @@ static void mesh_clear_geometry(Mesh &mesh)
CustomData_free(&mesh.fdata, mesh.totface);
CustomData_free(&mesh.ldata, mesh.totloop);
CustomData_free(&mesh.pdata, mesh.totpoly);
MEM_SAFE_FREE(mesh.poly_offset_indices);
if (mesh.poly_offset_indices) {
blender::implicit_sharing::free_shared_data(&mesh.poly_offset_indices,
&mesh.runtime->poly_offsets_sharing_info);
}
MEM_SAFE_FREE(mesh.mselect);
mesh.totvert = 0;
@ -976,20 +986,31 @@ Mesh *BKE_mesh_add(Main *bmain, const char *name)
void BKE_mesh_poly_offsets_ensure_alloc(Mesh *mesh)
{
BLI_assert(mesh->poly_offset_indices == nullptr);
BLI_assert(mesh->runtime->poly_offsets_sharing_info == nullptr);
if (mesh->totpoly == 0) {
return;
}
mesh->poly_offset_indices = static_cast<int *>(
MEM_malloc_arrayN(mesh->totpoly + 1, sizeof(int), __func__));
mesh->runtime->poly_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
mesh->poly_offset_indices);
#ifdef DEBUG
/* Fill offsets with obviously bad values to simplify finding missing initialization. */
mesh->poly_offsets_for_write().fill(-1);
#endif
/* Set common values for convenience. */
mesh->poly_offset_indices[0] = 0;
mesh->poly_offset_indices[mesh->totpoly] = mesh->totloop;
}
int *BKE_mesh_poly_offsets_for_write(Mesh *mesh)
{
blender::implicit_sharing::make_trivial_data_mutable(
&mesh->poly_offset_indices, &mesh->runtime->poly_offsets_sharing_info, mesh->totpoly + 1);
return mesh->poly_offset_indices;
}
static void mesh_ensure_cdlayers_primary(Mesh &mesh)
{
if (!CustomData_get_layer_named(&mesh.vdata, CD_PROP_FLOAT3, "position")) {

View File

@ -1128,6 +1128,8 @@ void BKE_mesh_nomain_to_mesh(Mesh *mesh_src, Mesh *mesh_dst, Object *ob)
CustomData_copy(&mesh_src->pdata, &mesh_dst->pdata, mask.pmask, mesh_src->totpoly);
CustomData_copy(&mesh_src->ldata, &mesh_dst->ldata, mask.lmask, mesh_src->totloop);
std::swap(mesh_dst->poly_offset_indices, mesh_src->poly_offset_indices);
std::swap(mesh_dst->runtime->poly_offsets_sharing_info,
mesh_src->runtime->poly_offsets_sharing_info);
/* Make sure attribute names are moved. */
std::swap(mesh_dst->active_color_attribute, mesh_src->active_color_attribute);

View File

@ -102,4 +102,83 @@ class ImplicitSharingMixin : public ImplicitSharingInfo {
virtual void delete_self() = 0;
};
namespace implicit_sharing {
namespace detail {
void *resize_trivial_array_impl(void *old_data,
int64_t old_size,
int64_t new_size,
int64_t alignment,
ImplicitSharingInfo **sharing_info);
void *make_trivial_data_mutable_impl(void *old_data,
int64_t size,
int64_t alignment,
ImplicitSharingInfo **sharing_info);
} // namespace detail
/**
* Copy shared data from the source to the destination, adding a user count.
* \note Does not free any existing data in the destination.
*/
template<typename T>
void copy_shared_pointer(T *src_ptr,
ImplicitSharingInfo *src_sharing_info,
T **r_dst_ptr,
ImplicitSharingInfo **r_dst_sharing_info)
{
*r_dst_ptr = src_ptr;
*r_dst_sharing_info = src_sharing_info;
if (*r_dst_ptr) {
BLI_assert(*r_dst_sharing_info != nullptr);
(*r_dst_sharing_info)->add_user();
}
}
/**
* Remove this reference to the shared data and remove dangling pointers.
*/
template<typename T> void free_shared_data(T **data, ImplicitSharingInfo **sharing_info)
{
if (*sharing_info) {
BLI_assert(*data != nullptr);
(*sharing_info)->remove_user_and_delete_if_last();
}
*data = nullptr;
*sharing_info = nullptr;
}
/**
* Create an implicit sharing object that takes ownership of the data, allowing it to be shared.
* When it is no longer used, the data is freed with #MEM_freeN, so it must be a trivial type.
*/
ImplicitSharingInfo *info_for_mem_free(void *data);
/**
* Make data mutable (single-user) if it is shared. For trivially-copyable data only.
*/
template<typename T>
void make_trivial_data_mutable(T **data, ImplicitSharingInfo **sharing_info, const int64_t size)
{
*data = static_cast<T *>(
detail::make_trivial_data_mutable_impl(*data, sizeof(T) * size, alignof(T), sharing_info));
}
/**
* Resize an array of shared data. For trivially-copyable data only. Any new values are not
* initialized.
*/
template<typename T>
void resize_trivial_array(T **data,
ImplicitSharingInfo **sharing_info,
int64_t old_size,
int64_t new_size)
{
*data = static_cast<T *>(detail::resize_trivial_array_impl(
*data, sizeof(T) * old_size, sizeof(T) * new_size, alignof(T), sharing_info));
}
} // namespace implicit_sharing
} // namespace blender

View File

@ -83,6 +83,7 @@ set(SRC
intern/hash_md5.c
intern/hash_mm2a.c
intern/hash_mm3.c
intern/implicit_sharing.cc
intern/index_mask.cc
intern/jitter_2d.c
intern/kdtree_1d.c

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <algorithm>
#include <cstring>
#include "MEM_guardedalloc.h"
#include "BLI_implicit_sharing.hh"
namespace blender::implicit_sharing {
class MEMFreeImplicitSharing : public ImplicitSharingInfo {
public:
void *data;
MEMFreeImplicitSharing(void *data) : ImplicitSharingInfo(1), data(data)
{
BLI_assert(data != nullptr);
}
private:
void delete_self_with_data() override
{
MEM_freeN(data);
MEM_delete(this);
}
};
ImplicitSharingInfo *info_for_mem_free(void *data)
{
return MEM_new<MEMFreeImplicitSharing>(__func__, data);
}
namespace detail {
void *make_trivial_data_mutable_impl(void *old_data,
const int64_t size,
const int64_t alignment,
ImplicitSharingInfo **sharing_info)
{
if (!old_data) {
BLI_assert(size == 0);
return nullptr;
}
BLI_assert(*sharing_info != nullptr);
if ((*sharing_info)->is_shared()) {
void *new_data = MEM_mallocN_aligned(size, alignment, __func__);
memcpy(new_data, old_data, size);
(*sharing_info)->remove_user_and_delete_if_last();
*sharing_info = info_for_mem_free(new_data);
return new_data;
}
return old_data;
}
void *resize_trivial_array_impl(void *old_data,
const int64_t old_size,
const int64_t new_size,
const int64_t alignment,
ImplicitSharingInfo **sharing_info)
{
if (new_size == 0) {
if (*sharing_info) {
(*sharing_info)->remove_user_and_delete_if_last();
*sharing_info = nullptr;
}
return nullptr;
}
if (!old_data) {
BLI_assert(old_size == 0);
BLI_assert(*sharing_info == nullptr);
void *new_data = MEM_mallocN_aligned(new_size, alignment, __func__);
*sharing_info = info_for_mem_free(new_data);
return new_data;
}
BLI_assert(old_size != 0);
if ((*sharing_info)->is_mutable()) {
if (auto *info = dynamic_cast<MEMFreeImplicitSharing *>(*sharing_info)) {
/* If the array was allocated with the MEM allocator, we can use realloc directly, which
* could theoretically give better performance if the data can be reused in place. */
void *new_data = static_cast<int *>(MEM_reallocN(old_data, new_size));
info->data = new_data;
return new_data;
}
}
void *new_data = MEM_mallocN_aligned(new_size, alignment, __func__);
memcpy(new_data, old_data, std::min(old_size, new_size));
(*sharing_info)->remove_user_and_delete_if_last();
*sharing_info = info_for_mem_free(new_data);
return new_data;
}
} // namespace detail
} // namespace blender::implicit_sharing

View File

@ -408,8 +408,8 @@ static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool
um->store.poly_offset_indices = BLI_array_store_state_add(
bs, me->poly_offset_indices, size_t(me->totpoly + 1) * stride, state_reference);
}
MEM_SAFE_FREE(me->poly_offset_indices);
blender::implicit_sharing::free_shared_data(&me->poly_offset_indices,
&me->runtime->poly_offsets_sharing_info);
}
},
[&]() {
@ -577,6 +577,8 @@ static void um_arraystore_expand(UndoMesh *um)
size_t state_len;
me->poly_offset_indices = static_cast<int *>(
BLI_array_store_state_data_get_alloc(state, &state_len));
me->runtime->poly_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
me->poly_offset_indices);
BLI_assert((me->totpoly + 1) == (state_len / stride));
UNUSED_VARS_NDEBUG(stride);
}

View File

@ -1251,8 +1251,13 @@ static void mesh_add_polys(Mesh *mesh, int len)
CustomData_copy_layout(&mesh->pdata, &pdata, CD_MASK_MESH.pmask, CD_SET_DEFAULT, totpoly);
CustomData_copy_data(&mesh->pdata, &pdata, 0, 0, mesh->totpoly);
mesh->poly_offset_indices = static_cast<int *>(
MEM_reallocN(mesh->poly_offset_indices, sizeof(int) * (totpoly + 1)));
implicit_sharing::resize_trivial_array(&mesh->poly_offset_indices,
&mesh->runtime->poly_offsets_sharing_info,
mesh->totpoly == 0 ? 0 : (mesh->totpoly + 1),
totpoly + 1);
/* Set common values for convenience. */
mesh->poly_offset_indices[0] = 0;
mesh->poly_offset_indices[totpoly] = mesh->totloop;
CustomData_free(&mesh->pdata, mesh->totpoly);
mesh->pdata = pdata;
@ -1260,9 +1265,6 @@ static void mesh_add_polys(Mesh *mesh, int len)
BKE_mesh_runtime_clear_cache(mesh);
mesh->totpoly = totpoly;
/* Update the last offset, which may not be set elsewhere and must be the same as the number of
* face corners. */
mesh->poly_offsets_for_write().last() = mesh->totloop;
bke::MutableAttributeAccessor attributes = mesh->attributes_for_write();
bke::SpanAttributeWriter<bool> select_poly = attributes.lookup_or_add_for_write_span<bool>(

View File

@ -677,7 +677,11 @@ int ED_mesh_join_objects_exec(bContext *C, wmOperator *op)
BKE_mesh_clear_geometry(me);
me->poly_offset_indices = poly_offsets;
if (totpoly) {
me->poly_offset_indices = poly_offsets;
me->runtime->poly_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
poly_offsets);
}
me->totvert = totvert;
me->totedge = totedge;

View File

@ -161,7 +161,10 @@ static void SCULPT_dynamic_topology_disable_ex(
CustomData_copy(&geometry->edata, &me->edata, CD_MASK_MESH.emask, geometry->totedge);
CustomData_copy(&geometry->ldata, &me->ldata, CD_MASK_MESH.lmask, geometry->totloop);
CustomData_copy(&geometry->pdata, &me->pdata, CD_MASK_MESH.pmask, geometry->totpoly);
me->poly_offset_indices = static_cast<int *>(MEM_dupallocN(geometry->poly_offset_indices));
blender::implicit_sharing::copy_shared_pointer(geometry->poly_offset_indices,
geometry->poly_offsets_sharing_info,
&me->poly_offset_indices,
&me->runtime->poly_offsets_sharing_info);
}
else {
BKE_sculptsession_bm_to_me(ob, true);

View File

@ -16,6 +16,8 @@
#include "BKE_paint.h"
#include "BKE_pbvh.h"
#include "BLI_implicit_sharing.hh"
#include "BLI_bitmap.h"
#include "BLI_compiler_attrs.h"
#include "BLI_compiler_compat.h"
@ -147,6 +149,7 @@ struct SculptUndoNodeGeometry {
CustomData ldata;
CustomData pdata;
int *poly_offset_indices;
blender::ImplicitSharingInfo *poly_offsets_sharing_info;
int totvert;
int totedge;
int totloop;

View File

@ -749,7 +749,10 @@ static void sculpt_undo_geometry_store_data(SculptUndoNodeGeometry *geometry, Ob
CustomData_copy(&mesh->edata, &geometry->edata, CD_MASK_MESH.emask, mesh->totedge);
CustomData_copy(&mesh->ldata, &geometry->ldata, CD_MASK_MESH.lmask, mesh->totloop);
CustomData_copy(&mesh->pdata, &geometry->pdata, CD_MASK_MESH.pmask, mesh->totpoly);
geometry->poly_offset_indices = static_cast<int *>(MEM_dupallocN(mesh->poly_offset_indices));
blender::implicit_sharing::copy_shared_pointer(mesh->poly_offset_indices,
mesh->runtime->poly_offsets_sharing_info,
&geometry->poly_offset_indices,
&geometry->poly_offsets_sharing_info);
geometry->totvert = mesh->totvert;
geometry->totedge = mesh->totedge;
@ -775,7 +778,10 @@ static void sculpt_undo_geometry_restore_data(SculptUndoNodeGeometry *geometry,
CustomData_copy(&geometry->edata, &mesh->edata, CD_MASK_MESH.emask, geometry->totedge);
CustomData_copy(&geometry->ldata, &mesh->ldata, CD_MASK_MESH.lmask, geometry->totloop);
CustomData_copy(&geometry->pdata, &mesh->pdata, CD_MASK_MESH.pmask, geometry->totpoly);
mesh->poly_offset_indices = static_cast<int *>(MEM_dupallocN(geometry->poly_offset_indices));
blender::implicit_sharing::copy_shared_pointer(geometry->poly_offset_indices,
geometry->poly_offsets_sharing_info,
&mesh->poly_offset_indices,
&mesh->runtime->poly_offsets_sharing_info);
}
static void sculpt_undo_geometry_free_data(SculptUndoNodeGeometry *geometry)
@ -792,7 +798,8 @@ static void sculpt_undo_geometry_free_data(SculptUndoNodeGeometry *geometry)
if (geometry->totpoly) {
CustomData_free(&geometry->pdata, geometry->totpoly);
}
MEM_SAFE_FREE(geometry->poly_offset_indices);
blender::implicit_sharing::free_shared_data(&geometry->poly_offset_indices,
&geometry->poly_offsets_sharing_info);
}
static void sculpt_undo_geometry_restore(SculptUndoNode *unode, Object *object)

View File

@ -107,6 +107,9 @@ typedef struct CurvesGeometry {
* Every curve offset must be at least one larger than the previous. In other words, every curve
* must have at least one point. The first value is 0 and the last value is #point_num.
*
* This array is shared based on the bke::MeshRuntime::poly_offsets_sharing_info.
* Avoid accessing directly when possible.
*
* \note This is *not* stored as an attribute because its size is one larger than #curve_num.
*/
int *curve_offsets;

View File

@ -78,6 +78,9 @@ typedef struct Mesh {
/**
* Array owned by mesh. May be null of there are no polygons. Index of the first corner of each
* polygon, with the total number of corners at the end. See #Mesh::polys() and #OffsetIndices.
*
* This array is shared based on the bke::MeshRuntime::poly_offsets_sharing_info.
* Avoid accessing directly when possible.
*/
int *poly_offset_indices;

View File

@ -77,6 +77,18 @@ static void rna_Curves_curve_offset_data_begin(CollectionPropertyIterator *iter,
NULL);
}
static int rna_Curves_curve_offset_data_lookup_int(PointerRNA *ptr, int index, PointerRNA *r_ptr)
{
Curves *curves = rna_curves(ptr);
if (index < 0 || index >= curves->geometry.curve_num + 1) {
return false;
}
r_ptr->owner_id = &curves->id;
r_ptr->type = &RNA_IntAttributeValue;
r_ptr->data = &ED_curves_offsets_for_write(curves)[index];
return true;
}
static float (*get_curves_positions(Curves *curves))[3]
{
return (float(*)[3])CustomData_get_layer_named_for_write(
@ -97,6 +109,35 @@ static int rna_CurvePoint_index_get_const(const PointerRNA *ptr)
return (int)(co - positions);
}
static void rna_Curves_curves_begin(CollectionPropertyIterator *iter, PointerRNA *ptr)
{
Curves *curves = rna_curves(ptr);
rna_iterator_array_begin(iter,
ED_curves_offsets_for_write(curves),
sizeof(int),
curves->geometry.curve_num,
false,
NULL);
}
static int rna_Curves_curves_length(PointerRNA *ptr)
{
const Curves *curves = rna_curves(ptr);
return curves->geometry.curve_num;
}
static int rna_Curves_curves_lookup_int(PointerRNA *ptr, int index, PointerRNA *r_ptr)
{
Curves *curves = rna_curves(ptr);
if (index < 0 || index >= curves->geometry.curve_num) {
return false;
}
r_ptr->owner_id = &curves->id;
r_ptr->type = &RNA_CurveSlice;
r_ptr->data = &ED_curves_offsets_for_write(curves)[index];
return true;
}
static int rna_Curves_position_data_length(PointerRNA *ptr)
{
const Curves *curves = rna_curves(ptr);
@ -343,7 +384,15 @@ static void rna_def_curves(BlenderRNA *brna)
/* Point and Curve RNA API helpers. */
prop = RNA_def_property(srna, "curves", PROP_COLLECTION, PROP_NONE);
RNA_def_property_collection_sdna(prop, NULL, "geometry.curve_offsets", "geometry.curve_num");
RNA_def_property_collection_funcs(prop,
"rna_Curves_curves_begin",
"rna_iterator_array_next",
"rna_iterator_array_end",
"rna_iterator_array_get",
"rna_Curves_curves_length",
"rna_Curves_curves_lookup_int",
NULL,
NULL);
RNA_def_property_struct_type(prop, "CurveSlice");
RNA_def_property_ui_text(prop, "Curves", "All curves in the data-block");
@ -376,7 +425,6 @@ static void rna_def_curves(BlenderRNA *brna)
RNA_def_property_update(prop, 0, "rna_Curves_update_data");
prop = RNA_def_property(srna, "curve_offset_data", PROP_COLLECTION, PROP_NONE);
RNA_def_property_collection_sdna(prop, NULL, "geometry.curve_offsets", NULL);
RNA_def_property_struct_type(prop, "IntAttributeValue");
RNA_def_property_collection_funcs(prop,
"rna_Curves_curve_offset_data_begin",
@ -384,7 +432,7 @@ static void rna_def_curves(BlenderRNA *brna)
"rna_iterator_array_end",
"rna_iterator_array_get",
"rna_Curves_curve_offset_data_length",
NULL,
"rna_Curves_curve_offset_data_lookup_int",
NULL,
NULL);
RNA_def_property_update(prop, 0, "rna_Curves_update_data");

View File

@ -118,9 +118,13 @@ static void expand_mesh(Mesh &mesh,
const int old_polys_num = mesh.totpoly;
mesh.totpoly += poly_expand;
CustomData_realloc(&mesh.pdata, old_polys_num, mesh.totpoly);
mesh.poly_offset_indices = static_cast<int *>(
MEM_reallocN(mesh.poly_offset_indices, sizeof(int) * (mesh.totpoly + 1)));
mesh.poly_offsets_for_write().last() = mesh.totloop + loop_expand;
implicit_sharing::resize_trivial_array(&mesh.poly_offset_indices,
&mesh.runtime->poly_offsets_sharing_info,
old_polys_num == 0 ? 0 : (old_polys_num + 1),
mesh.totpoly + 1);
/* Set common values for convenience. */
mesh.poly_offset_indices[0] = 0;
mesh.poly_offset_indices[mesh.totpoly] = mesh.totloop + loop_expand;
}
if (loop_expand != 0) {
CustomData_free_layers(&mesh.ldata, CD_NORMAL, mesh.totloop);