GPUMaterial: Rework the deferred compilation to not use double locks
This uses refcounter instead of double thread mutexes. This should be more robust and avoir use after free situation. Also remove redundant structures and the use of scene as the job owner.
This commit is contained in:
@@ -9,6 +9,8 @@
|
|||||||
#include "DNA_object_types.h"
|
#include "DNA_object_types.h"
|
||||||
#include "DNA_world_types.h"
|
#include "DNA_world_types.h"
|
||||||
|
|
||||||
|
#include "PIL_time.h"
|
||||||
|
|
||||||
#include "BLI_dynstr.h"
|
#include "BLI_dynstr.h"
|
||||||
#include "BLI_listbase.h"
|
#include "BLI_listbase.h"
|
||||||
#include "BLI_string_utils.h"
|
#include "BLI_string_utils.h"
|
||||||
@@ -48,48 +50,22 @@ extern char datatoc_common_fullscreen_vert_glsl[];
|
|||||||
*
|
*
|
||||||
* \{ */
|
* \{ */
|
||||||
|
|
||||||
typedef struct DRWDeferredShader {
|
|
||||||
struct DRWDeferredShader *prev, *next;
|
|
||||||
|
|
||||||
GPUMaterial *mat;
|
|
||||||
} DRWDeferredShader;
|
|
||||||
|
|
||||||
typedef struct DRWShaderCompiler {
|
typedef struct DRWShaderCompiler {
|
||||||
ListBase queue; /* DRWDeferredShader */
|
ListBase queue; /* GPUMaterial */
|
||||||
ListBase queue_conclude; /* DRWDeferredShader */
|
|
||||||
SpinLock list_lock;
|
SpinLock list_lock;
|
||||||
|
|
||||||
DRWDeferredShader *mat_compiling;
|
|
||||||
ThreadMutex compilation_lock;
|
|
||||||
|
|
||||||
void *gl_context;
|
void *gl_context;
|
||||||
GPUContext *gpu_context;
|
GPUContext *gpu_context;
|
||||||
bool own_context;
|
bool own_context;
|
||||||
|
|
||||||
int shaders_done; /* To compute progress. */
|
|
||||||
} DRWShaderCompiler;
|
} DRWShaderCompiler;
|
||||||
|
|
||||||
static void drw_deferred_shader_free(DRWDeferredShader *dsh)
|
|
||||||
{
|
|
||||||
/* Make sure it is not queued before freeing. */
|
|
||||||
MEM_freeN(dsh);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void drw_deferred_shader_queue_free(ListBase *queue)
|
|
||||||
{
|
|
||||||
DRWDeferredShader *dsh;
|
|
||||||
while ((dsh = BLI_pophead(queue))) {
|
|
||||||
drw_deferred_shader_free(dsh);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void drw_deferred_shader_compilation_exec(
|
static void drw_deferred_shader_compilation_exec(
|
||||||
void *custom_data,
|
void *custom_data,
|
||||||
/* Cannot be const, this function implements wm_jobs_start_callback.
|
/* Cannot be const, this function implements wm_jobs_start_callback.
|
||||||
* NOLINTNEXTLINE: readability-non-const-parameter. */
|
* NOLINTNEXTLINE: readability-non-const-parameter. */
|
||||||
short *stop,
|
short *stop,
|
||||||
short *do_update,
|
short *UNUSED(do_update),
|
||||||
float *progress)
|
float *UNUSED(progress))
|
||||||
{
|
{
|
||||||
GPU_render_begin();
|
GPU_render_begin();
|
||||||
DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
|
DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
|
||||||
@@ -109,50 +85,36 @@ static void drw_deferred_shader_compilation_exec(
|
|||||||
GPU_context_active_set(gpu_context);
|
GPU_context_active_set(gpu_context);
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
BLI_spin_lock(&comp->list_lock);
|
|
||||||
|
|
||||||
if (*stop != 0) {
|
if (*stop != 0) {
|
||||||
/* We don't want user to be able to cancel the compilation
|
/* We don't want user to be able to cancel the compilation
|
||||||
* but wm can kill the task if we are closing blender. */
|
* but wm can kill the task if we are closing blender. */
|
||||||
BLI_spin_unlock(&comp->list_lock);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BLI_spin_lock(&comp->list_lock);
|
||||||
/* Pop tail because it will be less likely to lock the main thread
|
/* Pop tail because it will be less likely to lock the main thread
|
||||||
* if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
|
* if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
|
||||||
comp->mat_compiling = BLI_poptail(&comp->queue);
|
LinkData *link = (LinkData *)BLI_poptail(&comp->queue);
|
||||||
if (comp->mat_compiling == NULL) {
|
GPUMaterial *mat = link ? (GPUMaterial *)link->data : NULL;
|
||||||
/* No more Shader to compile. */
|
if (mat) {
|
||||||
BLI_spin_unlock(&comp->list_lock);
|
/* Avoid another thread freeing the material mid compilation. */
|
||||||
break;
|
GPU_material_acquire(mat);
|
||||||
}
|
}
|
||||||
|
|
||||||
comp->shaders_done++;
|
|
||||||
int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
|
|
||||||
|
|
||||||
BLI_mutex_lock(&comp->compilation_lock);
|
|
||||||
BLI_spin_unlock(&comp->list_lock);
|
BLI_spin_unlock(&comp->list_lock);
|
||||||
|
|
||||||
/* Do the compilation. */
|
if (mat) {
|
||||||
GPU_material_compile(comp->mat_compiling->mat);
|
/* Do the compilation. */
|
||||||
|
GPU_material_compile(mat);
|
||||||
*progress = (float)comp->shaders_done / (float)total;
|
GPU_material_release(mat);
|
||||||
*do_update = true;
|
MEM_freeN(link);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL)) {
|
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL)) {
|
||||||
GPU_flush();
|
GPU_flush();
|
||||||
}
|
}
|
||||||
BLI_mutex_unlock(&comp->compilation_lock);
|
|
||||||
|
|
||||||
BLI_spin_lock(&comp->list_lock);
|
|
||||||
if (GPU_material_status(comp->mat_compiling->mat) == GPU_MAT_QUEUED) {
|
|
||||||
BLI_addtail(&comp->queue_conclude, comp->mat_compiling);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
drw_deferred_shader_free(comp->mat_compiling);
|
|
||||||
}
|
|
||||||
comp->mat_compiling = NULL;
|
|
||||||
BLI_spin_unlock(&comp->list_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GPU_context_active_set(NULL);
|
GPU_context_active_set(NULL);
|
||||||
@@ -167,21 +129,9 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
|
|||||||
{
|
{
|
||||||
DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
|
DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
|
||||||
|
|
||||||
drw_deferred_shader_queue_free(&comp->queue);
|
BLI_spin_lock(&comp->list_lock);
|
||||||
|
BLI_freelistN(&comp->queue);
|
||||||
if (!BLI_listbase_is_empty(&comp->queue_conclude)) {
|
BLI_spin_unlock(&comp->list_lock);
|
||||||
/* Compile the shaders in the context they will be deleted. */
|
|
||||||
DRW_opengl_context_enable_ex(false);
|
|
||||||
DRWDeferredShader *mat_conclude;
|
|
||||||
while ((mat_conclude = BLI_poptail(&comp->queue_conclude))) {
|
|
||||||
GPU_material_compile(mat_conclude->mat);
|
|
||||||
drw_deferred_shader_free(mat_conclude);
|
|
||||||
}
|
|
||||||
DRW_opengl_context_disable_ex(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
BLI_spin_end(&comp->list_lock);
|
|
||||||
BLI_mutex_end(&comp->compilation_lock);
|
|
||||||
|
|
||||||
if (comp->own_context) {
|
if (comp->own_context) {
|
||||||
/* Only destroy if the job owns the context. */
|
/* Only destroy if the job owns the context. */
|
||||||
@@ -198,40 +148,48 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
|
|||||||
|
|
||||||
static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
|
static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
|
||||||
{
|
{
|
||||||
/* Do not defer the compilation if we are rendering for image.
|
if (ELEM(GPU_material_status(mat), GPU_MAT_SUCCESS, GPU_MAT_FAILED)) {
|
||||||
* deferred rendering is only possible when `evil_C` is available */
|
|
||||||
if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION ||
|
|
||||||
!deferred) {
|
|
||||||
/* Double checking that this GPUMaterial is not going to be
|
|
||||||
* compiled by another thread. */
|
|
||||||
DRW_deferred_shader_remove(mat);
|
|
||||||
GPU_material_compile(mat);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
/* Do not defer the compilation if we are rendering for image.
|
||||||
|
* deferred rendering is only possible when `evil_C` is available */
|
||||||
|
if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
|
||||||
|
deferred = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!deferred) {
|
||||||
|
DRW_deferred_shader_remove(mat);
|
||||||
|
/* Shaders could already be compiling. Have to wait for compilation to finish. */
|
||||||
|
while (GPU_material_status(mat) == GPU_MAT_QUEUED) {
|
||||||
|
PIL_sleep_ms(20);
|
||||||
|
}
|
||||||
|
if (GPU_material_status(mat) == GPU_MAT_CREATED) {
|
||||||
|
GPU_material_compile(mat);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Don't add material to the queue twice. */
|
||||||
|
if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const bool use_main_context = GPU_use_main_context_workaround();
|
const bool use_main_context = GPU_use_main_context_workaround();
|
||||||
const bool job_own_context = !use_main_context;
|
const bool job_own_context = !use_main_context;
|
||||||
|
|
||||||
DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
|
|
||||||
|
|
||||||
dsh->mat = mat;
|
|
||||||
|
|
||||||
BLI_assert(DST.draw_ctx.evil_C);
|
BLI_assert(DST.draw_ctx.evil_C);
|
||||||
wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
|
wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
|
||||||
wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
|
wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
|
||||||
|
|
||||||
/* Use original scene ID since this is what the jobs template tests for. */
|
|
||||||
Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
|
|
||||||
|
|
||||||
/* Get the running job or a new one if none is running. Can only have one job per type & owner.
|
/* Get the running job or a new one if none is running. Can only have one job per type & owner.
|
||||||
*/
|
*/
|
||||||
wmJob *wm_job = WM_jobs_get(
|
wmJob *wm_job = WM_jobs_get(
|
||||||
wm, win, scene, "Shaders Compilation", WM_JOB_PROGRESS, WM_JOB_TYPE_SHADER_COMPILATION);
|
wm, win, wm, "Shaders Compilation", 0, WM_JOB_TYPE_SHADER_COMPILATION);
|
||||||
|
|
||||||
DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
|
DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
|
||||||
|
|
||||||
DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
|
DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
|
||||||
BLI_spin_init(&comp->list_lock);
|
BLI_spin_init(&comp->list_lock);
|
||||||
BLI_mutex_init(&comp->compilation_lock);
|
|
||||||
|
|
||||||
if (old_comp) {
|
if (old_comp) {
|
||||||
BLI_spin_lock(&old_comp->list_lock);
|
BLI_spin_lock(&old_comp->list_lock);
|
||||||
@@ -246,7 +204,9 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BLI_addtail(&comp->queue, dsh);
|
GPU_material_status_set(mat, GPU_MAT_QUEUED);
|
||||||
|
LinkData *node = BLI_genericNodeN(mat);
|
||||||
|
BLI_addtail(&comp->queue, node);
|
||||||
|
|
||||||
/* Create only one context. */
|
/* Create only one context. */
|
||||||
if (comp->gl_context == NULL) {
|
if (comp->gl_context == NULL) {
|
||||||
@@ -277,38 +237,26 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
|
|||||||
|
|
||||||
void DRW_deferred_shader_remove(GPUMaterial *mat)
|
void DRW_deferred_shader_remove(GPUMaterial *mat)
|
||||||
{
|
{
|
||||||
Scene *scene = GPU_material_scene(mat);
|
|
||||||
|
|
||||||
for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
|
for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
|
||||||
if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
|
if (WM_jobs_test(wm, wm, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
|
||||||
/* No job running, do not create a new one by calling WM_jobs_get. */
|
/* No job running, do not create a new one by calling WM_jobs_get. */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
|
LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
|
||||||
wmJob *wm_job = WM_jobs_get(
|
wmJob *wm_job = WM_jobs_get(
|
||||||
wm, win, scene, "Shaders Compilation", WM_JOB_PROGRESS, WM_JOB_TYPE_SHADER_COMPILATION);
|
wm, win, wm, "Shaders Compilation", 0, WM_JOB_TYPE_SHADER_COMPILATION);
|
||||||
|
|
||||||
DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
|
DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
|
||||||
if (comp != NULL) {
|
if (comp != NULL) {
|
||||||
BLI_spin_lock(&comp->list_lock);
|
BLI_spin_lock(&comp->list_lock);
|
||||||
DRWDeferredShader *dsh;
|
LinkData *link = (LinkData *)BLI_findptr(&comp->queue, mat, offsetof(LinkData, data));
|
||||||
dsh = (DRWDeferredShader *)BLI_findptr(
|
if (link) {
|
||||||
&comp->queue, mat, offsetof(DRWDeferredShader, mat));
|
BLI_remlink(&comp->queue, link);
|
||||||
if (dsh) {
|
GPU_material_status_set(link->data, GPU_MAT_CREATED);
|
||||||
BLI_remlink(&comp->queue, dsh);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wait for compilation to finish */
|
|
||||||
if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
|
|
||||||
BLI_mutex_lock(&comp->compilation_lock);
|
|
||||||
BLI_mutex_unlock(&comp->compilation_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
BLI_spin_unlock(&comp->list_lock);
|
BLI_spin_unlock(&comp->list_lock);
|
||||||
|
|
||||||
if (dsh) {
|
MEM_freeN(link);
|
||||||
drw_deferred_shader_free(dsh);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -436,20 +384,12 @@ GPUMaterial *DRW_shader_from_world(World *wo,
|
|||||||
false,
|
false,
|
||||||
callback,
|
callback,
|
||||||
thunk);
|
thunk);
|
||||||
if (!DRW_state_is_image_render() && deferred && GPU_material_status(mat) == GPU_MAT_QUEUED) {
|
if (DRW_state_is_image_render()) {
|
||||||
/* Shader has been already queued. */
|
/* Do not deferred if doing render. */
|
||||||
return mat;
|
deferred = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (GPU_material_status(mat) == GPU_MAT_CREATED) {
|
drw_deferred_shader_add(mat, deferred);
|
||||||
GPU_material_status_set(mat, GPU_MAT_QUEUED);
|
|
||||||
drw_deferred_shader_add(mat, deferred);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!deferred && GPU_material_status(mat) == GPU_MAT_QUEUED) {
|
|
||||||
/* Force compilation for shaders already queued. */
|
|
||||||
drw_deferred_shader_add(mat, false);
|
|
||||||
}
|
|
||||||
return mat;
|
return mat;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -478,20 +418,7 @@ GPUMaterial *DRW_shader_from_material(Material *ma,
|
|||||||
deferred = false;
|
deferred = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (deferred && GPU_material_status(mat) == GPU_MAT_QUEUED) {
|
drw_deferred_shader_add(mat, deferred);
|
||||||
/* Shader has been already queued. */
|
|
||||||
return mat;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (GPU_material_status(mat) == GPU_MAT_CREATED) {
|
|
||||||
GPU_material_status_set(mat, GPU_MAT_QUEUED);
|
|
||||||
drw_deferred_shader_add(mat, deferred);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!deferred && GPU_material_status(mat) == GPU_MAT_QUEUED) {
|
|
||||||
/* Force compilation for shaders already queued. */
|
|
||||||
drw_deferred_shader_add(mat, false);
|
|
||||||
}
|
|
||||||
return mat;
|
return mat;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ set(INC
|
|||||||
../nodes
|
../nodes
|
||||||
../nodes/intern
|
../nodes/intern
|
||||||
|
|
||||||
|
../../../intern/atomic
|
||||||
../../../intern/clog
|
../../../intern/clog
|
||||||
../../../intern/ghost
|
../../../intern/ghost
|
||||||
../../../intern/glew-mx
|
../../../intern/glew-mx
|
||||||
|
|||||||
@@ -215,6 +215,9 @@ GPUMaterial *GPU_material_from_nodetree(struct Scene *scene,
|
|||||||
void GPU_material_compile(GPUMaterial *mat);
|
void GPU_material_compile(GPUMaterial *mat);
|
||||||
void GPU_material_free(struct ListBase *gpumaterial);
|
void GPU_material_free(struct ListBase *gpumaterial);
|
||||||
|
|
||||||
|
void GPU_material_acquire(GPUMaterial *mat);
|
||||||
|
void GPU_material_release(GPUMaterial *mat);
|
||||||
|
|
||||||
void GPU_materials_free(struct Main *bmain);
|
void GPU_materials_free(struct Main *bmain);
|
||||||
|
|
||||||
struct Scene *GPU_material_scene(GPUMaterial *material);
|
struct Scene *GPU_material_scene(GPUMaterial *material);
|
||||||
|
|||||||
@@ -40,6 +40,8 @@
|
|||||||
#include "gpu_codegen.h"
|
#include "gpu_codegen.h"
|
||||||
#include "gpu_node_graph.h"
|
#include "gpu_node_graph.h"
|
||||||
|
|
||||||
|
#include "atomic_ops.h"
|
||||||
|
|
||||||
/* Structs */
|
/* Structs */
|
||||||
#define MAX_COLOR_BAND 128
|
#define MAX_COLOR_BAND 128
|
||||||
|
|
||||||
@@ -88,6 +90,8 @@ struct GPUMaterial {
|
|||||||
int sss_samples;
|
int sss_samples;
|
||||||
bool sss_dirty;
|
bool sss_dirty;
|
||||||
|
|
||||||
|
uint32_t refcount;
|
||||||
|
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
char name[64];
|
char name[64];
|
||||||
#endif
|
#endif
|
||||||
@@ -142,8 +146,10 @@ static void gpu_material_ramp_texture_build(GPUMaterial *mat)
|
|||||||
|
|
||||||
static void gpu_material_free_single(GPUMaterial *material)
|
static void gpu_material_free_single(GPUMaterial *material)
|
||||||
{
|
{
|
||||||
/* Cancel / wait any pending lazy compilation. */
|
bool do_free = atomic_sub_and_fetch_uint32(&material->refcount, 1) == 0;
|
||||||
DRW_deferred_shader_remove(material);
|
if (!do_free) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
gpu_node_graph_free(&material->graph);
|
gpu_node_graph_free(&material->graph);
|
||||||
|
|
||||||
@@ -168,6 +174,7 @@ void GPU_material_free(ListBase *gpumaterial)
|
|||||||
{
|
{
|
||||||
LISTBASE_FOREACH (LinkData *, link, gpumaterial) {
|
LISTBASE_FOREACH (LinkData *, link, gpumaterial) {
|
||||||
GPUMaterial *material = link->data;
|
GPUMaterial *material = link->data;
|
||||||
|
DRW_deferred_shader_remove(material);
|
||||||
gpu_material_free_single(material);
|
gpu_material_free_single(material);
|
||||||
MEM_freeN(material);
|
MEM_freeN(material);
|
||||||
}
|
}
|
||||||
@@ -660,6 +667,7 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
|
|||||||
mat->is_volume_shader = is_volume_shader;
|
mat->is_volume_shader = is_volume_shader;
|
||||||
mat->graph.used_libraries = BLI_gset_new(
|
mat->graph.used_libraries = BLI_gset_new(
|
||||||
BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "GPUNodeGraph.used_libraries");
|
BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "GPUNodeGraph.used_libraries");
|
||||||
|
mat->refcount = 1;
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
BLI_snprintf(mat->name, sizeof(mat->name), "%s", name);
|
BLI_snprintf(mat->name, sizeof(mat->name), "%s", name);
|
||||||
#else
|
#else
|
||||||
@@ -709,11 +717,21 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
|
|||||||
return mat;
|
return mat;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GPU_material_acquire(GPUMaterial *mat)
|
||||||
|
{
|
||||||
|
atomic_add_and_fetch_uint32(&mat->refcount, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
void GPU_material_release(GPUMaterial *mat)
|
||||||
|
{
|
||||||
|
gpu_material_free_single(mat);
|
||||||
|
}
|
||||||
|
|
||||||
void GPU_material_compile(GPUMaterial *mat)
|
void GPU_material_compile(GPUMaterial *mat)
|
||||||
{
|
{
|
||||||
bool success;
|
bool success;
|
||||||
|
|
||||||
BLI_assert(mat->status == GPU_MAT_QUEUED);
|
BLI_assert(ELEM(mat->status, GPU_MAT_QUEUED, GPU_MAT_CREATED));
|
||||||
BLI_assert(mat->pass);
|
BLI_assert(mat->pass);
|
||||||
|
|
||||||
/* NOTE: The shader may have already been compiled here since we are
|
/* NOTE: The shader may have already been compiled here since we are
|
||||||
|
|||||||
Reference in New Issue
Block a user