Cleanup: Use std::mutex for mesh runtime mutexes

Instead of allocating three separate ThreadMutex pointers,
just embed std::mutex into the struct directly.
This commit is contained in:
2022-10-12 22:31:02 -05:00
parent 375dafe3ef
commit c67e5628d2
13 changed files with 53 additions and 104 deletions

View File

@@ -10,6 +10,10 @@
#include "BLI_kdopbvh.h" #include "BLI_kdopbvh.h"
#include "BLI_threads.h" #include "BLI_threads.h"
#ifdef __cplusplus
# include <mutex>
#endif
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
@@ -196,6 +200,8 @@ BVHTree *BKE_bvhtree_from_mesh_get(struct BVHTreeFromMesh *data,
BVHCacheType bvh_cache_type, BVHCacheType bvh_cache_type,
int tree_type); int tree_type);
#ifdef __cplusplus
/** /**
* Builds or queries a BVH-cache for the cache BVH-tree of the request type. * Builds or queries a BVH-cache for the cache BVH-tree of the request type.
*/ */
@@ -204,7 +210,9 @@ BVHTree *BKE_bvhtree_from_editmesh_get(BVHTreeFromEditMesh *data,
int tree_type, int tree_type,
BVHCacheType bvh_cache_type, BVHCacheType bvh_cache_type,
struct BVHCache **bvh_cache_p, struct BVHCache **bvh_cache_p,
ThreadMutex *mesh_eval_mutex); std::mutex *mesh_eval_mutex);
#endif
/** /**
* Frees data allocated by a call to `bvhtree_from_editmesh_*`. * Frees data allocated by a call to `bvhtree_from_editmesh_*`.

View File

@@ -9,6 +9,8 @@
#ifdef __cplusplus #ifdef __cplusplus
# include <mutex>
# include "BLI_span.hh" # include "BLI_span.hh"
# include "DNA_customdata_types.h" # include "DNA_customdata_types.h"
@@ -73,14 +75,14 @@ struct MeshRuntime {
* This mesh is used as a result of modifier stack evaluation. * This mesh is used as a result of modifier stack evaluation.
* Since modifier stack evaluation is threaded on object level we need some synchronization. */ * Since modifier stack evaluation is threaded on object level we need some synchronization. */
Mesh *mesh_eval = nullptr; Mesh *mesh_eval = nullptr;
void *eval_mutex = nullptr; std::mutex eval_mutex;
/* A separate mutex is needed for normal calculation, because sometimes /* A separate mutex is needed for normal calculation, because sometimes
* the normals are needed while #eval_mutex is already locked. */ * the normals are needed while #eval_mutex is already locked. */
void *normals_mutex = nullptr; std::mutex normals_mutex;
/** Needed to ensure some thread-safety during render data pre-processing. */ /** Needed to ensure some thread-safety during render data pre-processing. */
void *render_mutex = nullptr; std::mutex render_mutex;
/** Lazily initialized SoA data from the #edit_mesh field in #Mesh. */ /** Lazily initialized SoA data from the #edit_mesh field in #Mesh. */
EditMeshData *edit_data = nullptr; EditMeshData *edit_data = nullptr;
@@ -148,9 +150,9 @@ struct MeshRuntime {
*/ */
uint32_t *subsurf_face_dot_tags = nullptr; uint32_t *subsurf_face_dot_tags = nullptr;
MeshRuntime(); MeshRuntime() = default;
/** \warning This does not free all data currently. See #BKE_mesh_runtime_free_data. */ /** \warning This does not free all data currently. See #BKE_mesh_runtime_free_data. */
~MeshRuntime(); ~MeshRuntime() = default;
MEM_CXX_CLASS_ALLOC_FUNCS("MeshRuntime") MEM_CXX_CLASS_ALLOC_FUNCS("MeshRuntime")
}; };

View File

@@ -1123,8 +1123,7 @@ static void mesh_calc_modifiers(struct Depsgraph *depsgraph,
else { else {
blender::bke::MeshRuntime *runtime = mesh_input->runtime; blender::bke::MeshRuntime *runtime = mesh_input->runtime;
if (runtime->mesh_eval == nullptr) { if (runtime->mesh_eval == nullptr) {
BLI_assert(runtime->eval_mutex != nullptr); std::lock_guard lock{mesh_input->runtime->eval_mutex};
BLI_mutex_lock((ThreadMutex *)runtime->eval_mutex);
if (runtime->mesh_eval == nullptr) { if (runtime->mesh_eval == nullptr) {
/* Not yet finalized by any instance, do it now /* Not yet finalized by any instance, do it now
* Isolate since computing normals is multithreaded and we are holding a lock. */ * Isolate since computing normals is multithreaded and we are holding a lock. */
@@ -1140,7 +1139,6 @@ static void mesh_calc_modifiers(struct Depsgraph *depsgraph,
/* Already finalized by another instance, reuse. */ /* Already finalized by another instance, reuse. */
mesh_final = runtime->mesh_eval; mesh_final = runtime->mesh_eval;
} }
BLI_mutex_unlock((ThreadMutex *)runtime->eval_mutex);
} }
else if (!mesh_has_modifier_final_normals(mesh_input, &final_datamask, runtime->mesh_eval)) { else if (!mesh_has_modifier_final_normals(mesh_input, &final_datamask, runtime->mesh_eval)) {
/* Modifier stack was (re-)evaluated with a request for additional normals /* Modifier stack was (re-)evaluated with a request for additional normals

View File

@@ -51,13 +51,13 @@ struct BVHCache {
* When the `r_locked` is filled and the tree could not be found the caches mutex will be * When the `r_locked` is filled and the tree could not be found the caches mutex will be
* locked. This mutex can be unlocked by calling `bvhcache_unlock`. * locked. This mutex can be unlocked by calling `bvhcache_unlock`.
* *
* When `r_locked` is used the `mesh_eval_mutex` must contain the `Mesh_Runtime.eval_mutex`. * When `r_locked` is used the `mesh_eval_mutex` must contain the `MeshRuntime.eval_mutex`.
*/ */
static bool bvhcache_find(BVHCache **bvh_cache_p, static bool bvhcache_find(BVHCache **bvh_cache_p,
BVHCacheType type, BVHCacheType type,
BVHTree **r_tree, BVHTree **r_tree,
bool *r_locked, bool *r_locked,
ThreadMutex *mesh_eval_mutex) std::mutex *mesh_eval_mutex)
{ {
bool do_lock = r_locked; bool do_lock = r_locked;
if (r_locked) { if (r_locked) {
@@ -69,11 +69,10 @@ static bool bvhcache_find(BVHCache **bvh_cache_p,
return false; return false;
} }
/* Lazy initialization of the bvh_cache using the `mesh_eval_mutex`. */ /* Lazy initialization of the bvh_cache using the `mesh_eval_mutex`. */
BLI_mutex_lock(mesh_eval_mutex); std::lock_guard lock{*mesh_eval_mutex};
if (*bvh_cache_p == nullptr) { if (*bvh_cache_p == nullptr) {
*bvh_cache_p = bvhcache_init(); *bvh_cache_p = bvhcache_init();
} }
BLI_mutex_unlock(mesh_eval_mutex);
} }
BVHCache *bvh_cache = *bvh_cache_p; BVHCache *bvh_cache = *bvh_cache_p;
@@ -1223,7 +1222,6 @@ BVHTree *BKE_bvhtree_from_mesh_get(struct BVHTreeFromMesh *data,
const int tree_type) const int tree_type)
{ {
BVHCache **bvh_cache_p = (BVHCache **)&mesh->runtime->bvh_cache; BVHCache **bvh_cache_p = (BVHCache **)&mesh->runtime->bvh_cache;
ThreadMutex *mesh_eval_mutex = (ThreadMutex *)mesh->runtime->eval_mutex;
const MLoopTri *looptri = nullptr; const MLoopTri *looptri = nullptr;
int looptri_len = 0; int looptri_len = 0;
@@ -1248,7 +1246,7 @@ BVHTree *BKE_bvhtree_from_mesh_get(struct BVHTreeFromMesh *data,
bool lock_started = false; bool lock_started = false;
data->cached = bvhcache_find( data->cached = bvhcache_find(
bvh_cache_p, bvh_cache_type, &data->tree, &lock_started, mesh_eval_mutex); bvh_cache_p, bvh_cache_type, &data->tree, &lock_started, &mesh->runtime->eval_mutex);
if (data->cached) { if (data->cached) {
BLI_assert(lock_started == false); BLI_assert(lock_started == false);
@@ -1352,7 +1350,7 @@ BVHTree *BKE_bvhtree_from_editmesh_get(BVHTreeFromEditMesh *data,
const int tree_type, const int tree_type,
const BVHCacheType bvh_cache_type, const BVHCacheType bvh_cache_type,
BVHCache **bvh_cache_p, BVHCache **bvh_cache_p,
ThreadMutex *mesh_eval_mutex) std::mutex *mesh_eval_mutex)
{ {
bool lock_started = false; bool lock_started = false;

View File

@@ -356,11 +356,9 @@ const float (*BKE_mesh_vertex_normals_ensure(const Mesh *mesh))[3]
return nullptr; return nullptr;
} }
ThreadMutex *normals_mutex = (ThreadMutex *)mesh->runtime->normals_mutex; std::lock_guard lock{mesh->runtime->normals_mutex};
BLI_mutex_lock(normals_mutex);
if (!BKE_mesh_vertex_normals_are_dirty(mesh)) { if (!BKE_mesh_vertex_normals_are_dirty(mesh)) {
BLI_assert(mesh->runtime->vert_normals != nullptr); BLI_assert(mesh->runtime->vert_normals != nullptr);
BLI_mutex_unlock(normals_mutex);
return mesh->runtime->vert_normals; return mesh->runtime->vert_normals;
} }
@@ -390,7 +388,6 @@ const float (*BKE_mesh_vertex_normals_ensure(const Mesh *mesh))[3]
BKE_mesh_poly_normals_clear_dirty(&mesh_mutable); BKE_mesh_poly_normals_clear_dirty(&mesh_mutable);
}); });
BLI_mutex_unlock(normals_mutex);
return vert_normals; return vert_normals;
} }
@@ -405,11 +402,9 @@ const float (*BKE_mesh_poly_normals_ensure(const Mesh *mesh))[3]
return nullptr; return nullptr;
} }
ThreadMutex *normals_mutex = (ThreadMutex *)mesh->runtime->normals_mutex; std::lock_guard lock{mesh->runtime->normals_mutex};
BLI_mutex_lock(normals_mutex);
if (!BKE_mesh_poly_normals_are_dirty(mesh)) { if (!BKE_mesh_poly_normals_are_dirty(mesh)) {
BLI_assert(mesh->runtime->poly_normals != nullptr); BLI_assert(mesh->runtime->poly_normals != nullptr);
BLI_mutex_unlock(normals_mutex);
return mesh->runtime->poly_normals; return mesh->runtime->poly_normals;
} }
@@ -435,7 +430,6 @@ const float (*BKE_mesh_poly_normals_ensure(const Mesh *mesh))[3]
BKE_mesh_poly_normals_clear_dirty(&mesh_mutable); BKE_mesh_poly_normals_clear_dirty(&mesh_mutable);
}); });
BLI_mutex_unlock(normals_mutex);
return poly_normals; return poly_normals;
} }

View File

@@ -36,39 +36,6 @@ void BKE_mesh_runtime_free_data(Mesh *mesh)
BKE_mesh_runtime_clear_cache(mesh); BKE_mesh_runtime_clear_cache(mesh);
} }
namespace blender::bke {
MeshRuntime::MeshRuntime()
{
this->eval_mutex = MEM_new<ThreadMutex>("mesh runtime eval_mutex");
BLI_mutex_init(static_cast<ThreadMutex *>(this->eval_mutex));
this->normals_mutex = MEM_new<ThreadMutex>("mesh runtime normals_mutex");
BLI_mutex_init(static_cast<ThreadMutex *>(this->normals_mutex));
this->render_mutex = MEM_new<ThreadMutex>("mesh runtime render_mutex");
BLI_mutex_init(static_cast<ThreadMutex *>(this->render_mutex));
}
MeshRuntime::~MeshRuntime()
{
if (this->eval_mutex != nullptr) {
BLI_mutex_end(static_cast<ThreadMutex *>(this->eval_mutex));
MEM_freeN(this->eval_mutex);
this->eval_mutex = nullptr;
}
if (this->normals_mutex != nullptr) {
BLI_mutex_end(static_cast<ThreadMutex *>(this->normals_mutex));
MEM_freeN(this->normals_mutex);
this->normals_mutex = nullptr;
}
if (this->render_mutex != nullptr) {
BLI_mutex_end(static_cast<ThreadMutex *>(this->render_mutex));
MEM_freeN(this->render_mutex);
this->render_mutex = nullptr;
}
}
} // namespace blender::bke
void BKE_mesh_runtime_clear_cache(Mesh *mesh) void BKE_mesh_runtime_clear_cache(Mesh *mesh)
{ {
if (mesh->runtime->mesh_eval != nullptr) { if (mesh->runtime->mesh_eval != nullptr) {
@@ -166,8 +133,7 @@ int BKE_mesh_runtime_looptri_len(const Mesh *mesh)
const MLoopTri *BKE_mesh_runtime_looptri_ensure(const Mesh *mesh) const MLoopTri *BKE_mesh_runtime_looptri_ensure(const Mesh *mesh)
{ {
ThreadMutex *mesh_eval_mutex = (ThreadMutex *)mesh->runtime->eval_mutex; std::lock_guard lock{mesh->runtime->eval_mutex};
BLI_mutex_lock(mesh_eval_mutex);
MLoopTri *looptri = mesh->runtime->looptris.array; MLoopTri *looptri = mesh->runtime->looptris.array;
@@ -181,8 +147,6 @@ const MLoopTri *BKE_mesh_runtime_looptri_ensure(const Mesh *mesh)
looptri = mesh->runtime->looptris.array; looptri = mesh->runtime->looptris.array;
} }
BLI_mutex_unlock(mesh_eval_mutex);
return looptri; return looptri;
} }

View File

@@ -95,11 +95,8 @@ Mesh *BKE_mesh_wrapper_from_editmesh(BMEditMesh *em,
void BKE_mesh_wrapper_ensure_mdata(Mesh *me) void BKE_mesh_wrapper_ensure_mdata(Mesh *me)
{ {
ThreadMutex *mesh_eval_mutex = (ThreadMutex *)me->runtime->eval_mutex; std::lock_guard lock{me->runtime->eval_mutex};
BLI_mutex_lock(mesh_eval_mutex);
if (me->runtime->wrapper_type == ME_WRAPPER_TYPE_MDATA) { if (me->runtime->wrapper_type == ME_WRAPPER_TYPE_MDATA) {
BLI_mutex_unlock(mesh_eval_mutex);
return; return;
} }
@@ -149,8 +146,6 @@ void BKE_mesh_wrapper_ensure_mdata(Mesh *me)
* the underlying data has been initialized. */ * the underlying data has been initialized. */
me->runtime->wrapper_type = ME_WRAPPER_TYPE_MDATA; me->runtime->wrapper_type = ME_WRAPPER_TYPE_MDATA;
}); });
BLI_mutex_unlock(mesh_eval_mutex);
} }
bool BKE_mesh_wrapper_minmax(const Mesh *me, float min[3], float max[3]) bool BKE_mesh_wrapper_minmax(const Mesh *me, float min[3], float max[3])
@@ -371,11 +366,9 @@ static Mesh *mesh_wrapper_ensure_subdivision(Mesh *me)
Mesh *BKE_mesh_wrapper_ensure_subdivision(Mesh *me) Mesh *BKE_mesh_wrapper_ensure_subdivision(Mesh *me)
{ {
ThreadMutex *mesh_eval_mutex = (ThreadMutex *)me->runtime->eval_mutex; std::lock_guard lock{me->runtime->eval_mutex};
BLI_mutex_lock(mesh_eval_mutex);
if (me->runtime->wrapper_type == ME_WRAPPER_TYPE_SUBD) { if (me->runtime->wrapper_type == ME_WRAPPER_TYPE_SUBD) {
BLI_mutex_unlock(mesh_eval_mutex);
return me->runtime->mesh_eval; return me->runtime->mesh_eval;
} }
@@ -384,7 +377,6 @@ Mesh *BKE_mesh_wrapper_ensure_subdivision(Mesh *me)
/* Must isolate multithreaded tasks while holding a mutex lock. */ /* Must isolate multithreaded tasks while holding a mutex lock. */
blender::threading::isolate_task([&]() { result = mesh_wrapper_ensure_subdivision(me); }); blender::threading::isolate_task([&]() { result = mesh_wrapper_ensure_subdivision(me); });
BLI_mutex_unlock(mesh_eval_mutex);
return result; return result;
} }

View File

@@ -373,10 +373,8 @@ void BKE_object_select_update(Depsgraph *depsgraph, Object *object)
DEG_debug_print_eval(depsgraph, __func__, object->id.name, object); DEG_debug_print_eval(depsgraph, __func__, object->id.name, object);
if (object->type == OB_MESH && !object->runtime.is_data_eval_owned) { if (object->type == OB_MESH && !object->runtime.is_data_eval_owned) {
Mesh *mesh_input = (Mesh *)object->runtime.data_orig; Mesh *mesh_input = (Mesh *)object->runtime.data_orig;
blender::bke::MeshRuntime *mesh_runtime = mesh_input->runtime; std::lock_guard lock{mesh_input->runtime->eval_mutex};
BLI_mutex_lock(static_cast<ThreadMutex *>(mesh_runtime->eval_mutex));
BKE_object_data_select_update(depsgraph, static_cast<ID *>(object->data)); BKE_object_data_select_update(depsgraph, static_cast<ID *>(object->data));
BLI_mutex_unlock(static_cast<ThreadMutex *>(mesh_runtime->eval_mutex));
} }
else { else {
BKE_object_data_select_update(depsgraph, static_cast<ID *>(object->data)); BKE_object_data_select_update(depsgraph, static_cast<ID *>(object->data));

View File

@@ -44,13 +44,10 @@ void drw_attributes_clear(DRW_Attributes *attributes)
memset(attributes, 0, sizeof(DRW_Attributes)); memset(attributes, 0, sizeof(DRW_Attributes));
} }
void drw_attributes_merge(DRW_Attributes *dst, void drw_attributes_merge(DRW_Attributes *dst, const DRW_Attributes *src, std::mutex &render_mutex)
const DRW_Attributes *src,
ThreadMutex *render_mutex)
{ {
BLI_mutex_lock(render_mutex); std::lock_guard lock{render_mutex};
drw_attributes_merge_requests(src, dst); drw_attributes_merge_requests(src, dst);
BLI_mutex_unlock(render_mutex);
} }
bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b) bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b)

View File

@@ -9,6 +9,10 @@
#pragma once #pragma once
#ifdef __cplusplus
# include <mutex>
#endif
#include "DNA_customdata_types.h" #include "DNA_customdata_types.h"
#include "DNA_meshdata_types.h" #include "DNA_meshdata_types.h"
@@ -56,9 +60,11 @@ BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask ex
void drw_attributes_clear(DRW_Attributes *attributes); void drw_attributes_clear(DRW_Attributes *attributes);
#ifdef __cplusplus
void drw_attributes_merge(DRW_Attributes *dst, void drw_attributes_merge(DRW_Attributes *dst,
const DRW_Attributes *src, const DRW_Attributes *src,
ThreadMutex *render_mutex); std::mutex &render_mutex);
#endif
/* Return true if all requests in b are in a. */ /* Return true if all requests in b are in a. */
bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b); bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b);

View File

@@ -60,7 +60,7 @@ struct CurvesBatchCache {
* some locking would be necessary because multiple objects can use the same curves data with * some locking would be necessary because multiple objects can use the same curves data with
* different materials, etc. This is a placeholder to make multi-threading easier in the future. * different materials, etc. This is a placeholder to make multi-threading easier in the future.
*/ */
ThreadMutex render_mutex; std::mutex render_mutex;
}; };
static bool curves_batch_cache_valid(const Curves &curves) static bool curves_batch_cache_valid(const Curves &curves)
@@ -74,15 +74,13 @@ static void curves_batch_cache_init(Curves &curves)
CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache); CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
if (!cache) { if (!cache) {
cache = MEM_cnew<CurvesBatchCache>(__func__); cache = MEM_new<CurvesBatchCache>(__func__);
curves.batch_cache = cache; curves.batch_cache = cache;
} }
else { else {
memset(cache, 0, sizeof(*cache)); cache = {};
} }
BLI_mutex_init(&cache->render_mutex);
cache->is_dirty = false; cache->is_dirty = false;
} }
@@ -172,9 +170,8 @@ void DRW_curves_batch_cache_dirty_tag(Curves *curves, int mode)
void DRW_curves_batch_cache_free(Curves *curves) void DRW_curves_batch_cache_free(Curves *curves)
{ {
curves_batch_cache_clear(*curves); curves_batch_cache_clear(*curves);
CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves->batch_cache); MEM_delete(static_cast<CurvesBatchCache *>(curves->batch_cache));
BLI_mutex_end(&cache->render_mutex); curves->batch_cache = nullptr;
MEM_SAFE_FREE(curves->batch_cache);
} }
void DRW_curves_batch_cache_free_old(Curves *curves, int ctime) void DRW_curves_batch_cache_free_old(Curves *curves, int ctime)
@@ -554,7 +551,6 @@ static bool curves_ensure_attributes(const Curves &curves,
GPUMaterial *gpu_material, GPUMaterial *gpu_material,
int subdiv) int subdiv)
{ {
ThreadMutex *render_mutex = &cache.render_mutex;
const CustomData *cd_curve = &curves.geometry.curve_data; const CustomData *cd_curve = &curves.geometry.curve_data;
const CustomData *cd_point = &curves.geometry.point_data; const CustomData *cd_point = &curves.geometry.point_data;
CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv]; CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
@@ -588,9 +584,9 @@ static bool curves_ensure_attributes(const Curves &curves,
GPU_VERTBUF_DISCARD_SAFE(cache.curves_cache.proc_attributes_buf[i]); GPU_VERTBUF_DISCARD_SAFE(cache.curves_cache.proc_attributes_buf[i]);
DRW_TEXTURE_FREE_SAFE(cache.curves_cache.proc_attributes_tex[i]); DRW_TEXTURE_FREE_SAFE(cache.curves_cache.proc_attributes_tex[i]);
} }
drw_attributes_merge(&final_cache.attr_used, &attrs_needed, render_mutex); drw_attributes_merge(&final_cache.attr_used, &attrs_needed, cache.render_mutex);
} }
drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed, render_mutex); drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed, cache.render_mutex);
} }
bool need_tf_update = false; bool need_tf_update = false;
@@ -689,7 +685,7 @@ static void request_attribute(Curves &curves, const char *name)
drw_attributes_add_request( drw_attributes_add_request(
&attributes, name, type, CustomData_get_named_layer(&custom_data, type, name), domain); &attributes, name, type, CustomData_get_named_layer(&custom_data, type, name), domain);
drw_attributes_merge(&final_cache.attr_used, &attributes, &cache.render_mutex); drw_attributes_merge(&final_cache.attr_used, &attributes, cache.render_mutex);
} }
GPUTexture **DRW_curves_texture_for_evaluated_attribute(Curves *curves, GPUTexture **DRW_curves_texture_for_evaluated_attribute(Curves *curves,

View File

@@ -1017,8 +1017,7 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
BLI_assert(gpumat_array_len == cache->mat_len); BLI_assert(gpumat_array_len == cache->mat_len);
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed); mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime->render_mutex; drw_attributes_merge(&cache->attr_needed, &attrs_needed, me->runtime->render_mutex);
drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
mesh_batch_cache_request_surface_batches(cache); mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat; return cache->surface_per_mat;
} }
@@ -1046,8 +1045,7 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Object *object, Mesh *me)
DRW_Attributes attrs_needed{}; DRW_Attributes attrs_needed{};
request_active_and_default_color_attributes(*object, *me, attrs_needed); request_active_and_default_color_attributes(*object, *me, attrs_needed);
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime->render_mutex; drw_attributes_merge(&cache->attr_needed, &attrs_needed, me->runtime->render_mutex);
drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
mesh_batch_cache_request_surface_batches(cache); mesh_batch_cache_request_surface_batches(cache);
return cache->batch.surface; return cache->batch.surface;
@@ -1060,8 +1058,7 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(Object *object, Mesh *me)
DRW_Attributes attrs_needed{}; DRW_Attributes attrs_needed{};
request_active_and_default_color_attributes(*object, *me, attrs_needed); request_active_and_default_color_attributes(*object, *me, attrs_needed);
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime->render_mutex; drw_attributes_merge(&cache->attr_needed, &attrs_needed, me->runtime->render_mutex);
drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
mesh_batch_cache_request_surface_batches(cache); mesh_batch_cache_request_surface_batches(cache);
return cache->batch.surface; return cache->batch.surface;
@@ -1446,7 +1443,6 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
} }
} }
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime->render_mutex;
/* Verify that all surface batches have needed attribute layers. /* Verify that all surface batches have needed attribute layers.
*/ */
@@ -1485,12 +1481,12 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
cache->batch_ready &= ~(MBC_SURFACE); cache->batch_ready &= ~(MBC_SURFACE);
mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed); mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
drw_attributes_merge(&cache->attr_used, &cache->attr_needed, mesh_render_mutex); drw_attributes_merge(&cache->attr_used, &cache->attr_needed, me->runtime->render_mutex);
} }
mesh_cd_layers_type_merge(&cache->cd_used_over_time, cache->cd_needed); mesh_cd_layers_type_merge(&cache->cd_used_over_time, cache->cd_needed);
mesh_cd_layers_type_clear(&cache->cd_needed); mesh_cd_layers_type_clear(&cache->cd_needed);
drw_attributes_merge(&cache->attr_used_over_time, &cache->attr_needed, mesh_render_mutex); drw_attributes_merge(&cache->attr_used_over_time, &cache->attr_needed, me->runtime->render_mutex);
drw_attributes_clear(&cache->attr_needed); drw_attributes_clear(&cache->attr_needed);
} }

View File

@@ -457,7 +457,7 @@ static BVHTreeFromEditMesh *snap_object_data_editmesh_treedata_get(SnapObjectCon
4, 4,
BVHTREE_FROM_EM_LOOPTRI, BVHTREE_FROM_EM_LOOPTRI,
&sod->mesh_runtime->bvh_cache, &sod->mesh_runtime->bvh_cache,
static_cast<ThreadMutex *>(sod->mesh_runtime->eval_mutex)); &sod->mesh_runtime->eval_mutex);
} }
} }
if (treedata == nullptr || treedata->tree == nullptr) { if (treedata == nullptr || treedata->tree == nullptr) {
@@ -2923,7 +2923,7 @@ static eSnapMode snapEditMesh(SnapObjectContext *sctx,
2, 2,
BVHTREE_FROM_EM_VERTS, BVHTREE_FROM_EM_VERTS,
&sod->mesh_runtime->bvh_cache, &sod->mesh_runtime->bvh_cache,
(ThreadMutex *)sod->mesh_runtime->eval_mutex); &sod->mesh_runtime->eval_mutex);
} }
sod->bvhtree[0] = treedata.tree; sod->bvhtree[0] = treedata.tree;
sod->cached[0] = treedata.cached; sod->cached[0] = treedata.cached;
@@ -2955,7 +2955,7 @@ static eSnapMode snapEditMesh(SnapObjectContext *sctx,
2, 2,
BVHTREE_FROM_EM_EDGES, BVHTREE_FROM_EM_EDGES,
&sod->mesh_runtime->bvh_cache, &sod->mesh_runtime->bvh_cache,
static_cast<ThreadMutex *>(sod->mesh_runtime->eval_mutex)); &sod->mesh_runtime->eval_mutex);
} }
sod->bvhtree[1] = treedata.tree; sod->bvhtree[1] = treedata.tree;
sod->cached[1] = treedata.cached; sod->cached[1] = treedata.cached;