Fix: Uncached materials not being released #105795

Merged
Jeroen Bakker merged 2 commits from Jason-Fielder/blender:Fix_UncachedMaterialLeak into blender-v3.5-release 2023-03-16 08:19:43 +01:00
2 changed files with 20 additions and 8 deletions

View File

@ -99,6 +99,8 @@ struct GPUPass {
/** Hint that an optimized variant of this pass should be created based on a complexity heuristic /** Hint that an optimized variant of this pass should be created based on a complexity heuristic
* during pass code generation. */ * during pass code generation. */
bool should_optimize; bool should_optimize;
/** Whether pass is in the GPUPass cache. */
bool cached;
}; };
/* -------------------------------------------------------------------- */ /* -------------------------------------------------------------------- */
@ -132,6 +134,7 @@ static GPUPass *gpu_pass_cache_lookup(uint32_t hash)
static void gpu_pass_cache_insert_after(GPUPass *node, GPUPass *pass) static void gpu_pass_cache_insert_after(GPUPass *node, GPUPass *pass)
{ {
BLI_spin_lock(&pass_cache_spin); BLI_spin_lock(&pass_cache_spin);
pass->cached = true;
if (node != nullptr) { if (node != nullptr) {
/* Add after the first pass having the same hash. */ /* Add after the first pass having the same hash. */
pass->next = node->next; pass->next = node->next;
@ -775,6 +778,7 @@ GPUPass *GPU_generate_pass(GPUMaterial *material,
pass->create_info = codegen.create_info; pass->create_info = codegen.create_info;
pass->hash = codegen.hash_get(); pass->hash = codegen.hash_get();
pass->compiled = false; pass->compiled = false;
pass->cached = false;
/* Only flag pass optimization hint if this is the first generated pass for a material. /* Only flag pass optimization hint if this is the first generated pass for a material.
* Optimized passes cannot be optimized further, even if the heuristic is still not * Optimized passes cannot be optimized further, even if the heuristic is still not
* favorable. */ * favorable. */
@ -881,14 +885,6 @@ GPUShader *GPU_pass_shader_get(GPUPass *pass)
return pass->shader; return pass->shader;
} }
void GPU_pass_release(GPUPass *pass)
{
BLI_spin_lock(&pass_cache_spin);
BLI_assert(pass->refcount > 0);
pass->refcount--;
BLI_spin_unlock(&pass_cache_spin);
}
static void gpu_pass_free(GPUPass *pass) static void gpu_pass_free(GPUPass *pass)
{ {
BLI_assert(pass->refcount == 0); BLI_assert(pass->refcount == 0);
@ -899,6 +895,18 @@ static void gpu_pass_free(GPUPass *pass)
MEM_freeN(pass); MEM_freeN(pass);
} }
void GPU_pass_release(GPUPass *pass)
{
BLI_spin_lock(&pass_cache_spin);
BLI_assert(pass->refcount > 0);
pass->refcount--;
/* Un-cached passes will not be filtered by garbage collection, so release here. */
if (pass->refcount == 0 && !pass->cached) {
gpu_pass_free(pass);
}
BLI_spin_unlock(&pass_cache_spin);
}
void GPU_pass_cache_garbage_collect(void) void GPU_pass_cache_garbage_collect(void)
{ {
static int lasttime = 0; static int lasttime = 0;

View File

@ -7,6 +7,10 @@
* and texture2d types in metal). * and texture2d types in metal).
*/ */
/* Suppress unhelpful shader compiler warnings. */
#pragma clang diagnostic ignored "-Wunused-variable"
#pragma clang diagnostic ignored "-Wcomment"
/* Base instance with offsets. */ /* Base instance with offsets. */
#define gpu_BaseInstance gl_BaseInstanceARB #define gpu_BaseInstance gl_BaseInstanceARB
#define gpu_InstanceIndex (gl_InstanceID + gpu_BaseInstance) #define gpu_InstanceIndex (gl_InstanceID + gpu_BaseInstance)