Fix T77346: GPU Workaround Always Render Using Main Context

In Blender 2.90 EEVEE materials were refactored that introduced crashes on Intel
GPUs on Windows. The crash happened in the `local_context_workaround` that temporary
stored compiled materials in a binary form to reload it in the main GL context.

It has been tested that the workaround isn't needed anymore for HD6xx GPUs, but it
is still needed for HD4000.

After several unsuccesfull fixes we came to the conclusion that we could not support
the local context workaround and needed to come with a different workaround. The idea
of this patch is that in these cases there is only a single context that is used for
rendering. Threads that uses these contextes are guarded by a mutex and will block.

Impact on User Level:
* Due to main mutex lock the UI freezes when rendering or baking or feel less snappy

Reviewed By: Clément Foucault, Brecht van Lommel

Differential Revision: https://developer.blender.org/D8410
This commit is contained in:
Jeroen Bakker
2020-08-05 15:26:49 +02:00
committed by Jeroen Bakker
parent 56d7e39b92
commit f7d38e2e64
10 changed files with 76 additions and 32 deletions

View File

@@ -531,6 +531,12 @@ void EEVEE_lightcache_free(LightCache *lcache)
static void eevee_lightbake_context_enable(EEVEE_LightBake *lbake) static void eevee_lightbake_context_enable(EEVEE_LightBake *lbake)
{ {
if (GPU_use_main_context_workaround() && !BLI_thread_is_main()) {
GPU_context_main_lock();
DRW_opengl_context_enable();
return;
}
if (lbake->gl_context) { if (lbake->gl_context) {
DRW_opengl_render_context_enable(lbake->gl_context); DRW_opengl_render_context_enable(lbake->gl_context);
if (lbake->gpu_context == NULL) { if (lbake->gpu_context == NULL) {
@@ -545,6 +551,12 @@ static void eevee_lightbake_context_enable(EEVEE_LightBake *lbake)
static void eevee_lightbake_context_disable(EEVEE_LightBake *lbake) static void eevee_lightbake_context_disable(EEVEE_LightBake *lbake)
{ {
if (GPU_use_main_context_workaround() && !BLI_thread_is_main()) {
DRW_opengl_context_disable();
GPU_context_main_unlock();
return;
}
if (lbake->gl_context) { if (lbake->gl_context) {
DRW_gpu_render_context_disable(lbake->gpu_context); DRW_gpu_render_context_disable(lbake->gpu_context);
DRW_opengl_render_context_disable(lbake->gl_context); DRW_opengl_render_context_disable(lbake->gl_context);
@@ -697,7 +709,7 @@ wmJob *EEVEE_lightbake_job_create(struct wmWindowManager *wm,
lbake->delay = delay; lbake->delay = delay;
lbake->frame = frame; lbake->frame = frame;
if (lbake->gl_context == NULL) { if (lbake->gl_context == NULL && !GPU_use_main_context_workaround()) {
lbake->gl_context = WM_opengl_context_create(); lbake->gl_context = WM_opengl_context_create();
wm_window_reset_drawable(); wm_window_reset_drawable();
} }
@@ -742,7 +754,7 @@ void *EEVEE_lightbake_job_data_alloc(struct Main *bmain,
lbake->mutex = BLI_mutex_alloc(); lbake->mutex = BLI_mutex_alloc();
lbake->frame = frame; lbake->frame = frame;
if (run_as_job) { if (run_as_job && !GPU_use_main_context_workaround()) {
lbake->gl_context = WM_opengl_context_create(); lbake->gl_context = WM_opengl_context_create();
wm_window_reset_drawable(); wm_window_reset_drawable();
} }

View File

@@ -2719,6 +2719,12 @@ void DRW_render_context_enable(Render *render)
WM_init_opengl(G_MAIN); WM_init_opengl(G_MAIN);
} }
if (GPU_use_main_context_workaround()) {
GPU_context_main_lock();
DRW_opengl_context_enable();
return;
}
void *re_gl_context = RE_gl_context_get(render); void *re_gl_context = RE_gl_context_get(render);
/* Changing Context */ /* Changing Context */
@@ -2736,6 +2742,12 @@ void DRW_render_context_enable(Render *render)
void DRW_render_context_disable(Render *render) void DRW_render_context_disable(Render *render)
{ {
if (GPU_use_main_context_workaround()) {
DRW_opengl_context_disable();
GPU_context_main_unlock();
return;
}
void *re_gl_context = RE_gl_context_get(render); void *re_gl_context = RE_gl_context_get(render);
if (re_gl_context != NULL) { if (re_gl_context != NULL) {

View File

@@ -34,6 +34,7 @@
#include "DEG_depsgraph_query.h" #include "DEG_depsgraph_query.h"
#include "GPU_extensions.h"
#include "GPU_material.h" #include "GPU_material.h"
#include "GPU_shader.h" #include "GPU_shader.h"
@@ -106,6 +107,12 @@ static void drw_deferred_shader_compilation_exec(
BLI_assert(gl_context != NULL); BLI_assert(gl_context != NULL);
#endif #endif
const bool use_main_context_workaround = GPU_use_main_context_workaround();
if (use_main_context_workaround) {
BLI_assert(gl_context == DST.gl_context);
GPU_context_main_lock();
}
WM_opengl_context_activate(gl_context); WM_opengl_context_activate(gl_context);
while (true) { while (true) {
@@ -154,6 +161,9 @@ static void drw_deferred_shader_compilation_exec(
} }
WM_opengl_context_release(gl_context); WM_opengl_context_release(gl_context);
if (use_main_context_workaround) {
GPU_context_main_unlock();
}
} }
static void drw_deferred_shader_compilation_free(void *custom_data) static void drw_deferred_shader_compilation_free(void *custom_data)
@@ -196,6 +206,8 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
GPU_material_compile(mat); GPU_material_compile(mat);
return; return;
} }
const bool use_main_context = GPU_use_main_context_workaround();
const bool job_own_context = !use_main_context;
DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader"); DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
@@ -227,7 +239,7 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
if (old_comp->gl_context) { if (old_comp->gl_context) {
comp->gl_context = old_comp->gl_context; comp->gl_context = old_comp->gl_context;
old_comp->own_context = false; old_comp->own_context = false;
comp->own_context = true; comp->own_context = job_own_context;
} }
} }
@@ -235,9 +247,14 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
/* Create only one context. */ /* Create only one context. */
if (comp->gl_context == NULL) { if (comp->gl_context == NULL) {
comp->gl_context = WM_opengl_context_create(); if (use_main_context) {
WM_opengl_context_activate(DST.gl_context); comp->gl_context = DST.gl_context;
comp->own_context = true; }
else {
comp->gl_context = WM_opengl_context_create();
WM_opengl_context_activate(DST.gl_context);
}
comp->own_context = job_own_context;
} }
WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free); WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);

View File

@@ -42,6 +42,14 @@ void GPU_context_discard(GPUContext *);
void GPU_context_active_set(GPUContext *); void GPU_context_active_set(GPUContext *);
GPUContext *GPU_context_active_get(void); GPUContext *GPU_context_active_get(void);
/* Legacy GPU (Intel HD4000 series) do not support sharing GPU objects between GPU
* contexts. EEVEE/Workbench can create different contexts for image/preview rendering, baking or
* compiling. When a legacy GPU is detected (`GPU_use_main_context_workaround()`) any worker
* threads should use the draw manager opengl context and make sure that they are the only one
* using it by locking the main context using these two functions. */
void GPU_context_main_lock(void);
void GPU_context_main_unlock(void);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -48,7 +48,7 @@ bool GPU_arb_texture_cube_map_array_is_supported(void);
bool GPU_mip_render_workaround(void); bool GPU_mip_render_workaround(void);
bool GPU_depth_blitting_workaround(void); bool GPU_depth_blitting_workaround(void);
bool GPU_unused_fb_slot_workaround(void); bool GPU_unused_fb_slot_workaround(void);
bool GPU_context_local_shaders_workaround(void); bool GPU_use_main_context_workaround(void);
bool GPU_texture_copy_workaround(void); bool GPU_texture_copy_workaround(void);
bool GPU_crappy_amd_driver(void); bool GPU_crappy_amd_driver(void);

View File

@@ -1228,21 +1228,9 @@ bool GPU_pass_compile(GPUPass *pass, const char *shname)
shader = NULL; shader = NULL;
} }
} }
else if (!BLI_thread_is_main() && GPU_context_local_shaders_workaround()) {
pass->binary.content = GPU_shader_get_binary(
shader, &pass->binary.format, &pass->binary.len);
GPU_shader_free(shader);
shader = NULL;
}
pass->shader = shader; pass->shader = shader;
pass->compiled = true; pass->compiled = true;
} }
else if (pass->binary.content && BLI_thread_is_main()) {
pass->shader = GPU_shader_load_from_binary(
pass->binary.content, pass->binary.format, pass->binary.len, shname);
MEM_SAFE_FREE(pass->binary.content);
}
return success; return success;
} }
@@ -1263,9 +1251,6 @@ static void gpu_pass_free(GPUPass *pass)
MEM_SAFE_FREE(pass->geometrycode); MEM_SAFE_FREE(pass->geometrycode);
MEM_SAFE_FREE(pass->vertexcode); MEM_SAFE_FREE(pass->vertexcode);
MEM_SAFE_FREE(pass->defines); MEM_SAFE_FREE(pass->defines);
if (pass->binary.content) {
MEM_freeN(pass->binary.content);
}
MEM_freeN(pass); MEM_freeN(pass);
} }

View File

@@ -43,11 +43,6 @@ typedef struct GPUPass {
char *defines; char *defines;
uint refcount; /* Orphaned GPUPasses gets freed by the garbage collector. */ uint refcount; /* Orphaned GPUPasses gets freed by the garbage collector. */
uint32_t hash; /* Identity hash generated from all GLSL code. */ uint32_t hash; /* Identity hash generated from all GLSL code. */
struct {
char *content;
uint format;
int len;
} binary;
bool compiled; /* Did we already tried to compile the attached GPUShader. */ bool compiled; /* Did we already tried to compile the attached GPUShader. */
} GPUPass; } GPUPass;

View File

@@ -62,6 +62,7 @@ static std::vector<GLuint> orphaned_buffer_ids;
static std::vector<GLuint> orphaned_texture_ids; static std::vector<GLuint> orphaned_texture_ids;
static std::mutex orphans_mutex; static std::mutex orphans_mutex;
static std::mutex main_context_mutex;
struct GPUContext { struct GPUContext {
GLuint default_vao; GLuint default_vao;
@@ -345,3 +346,13 @@ struct GPUMatrixState *gpu_context_active_matrix_state_get()
BLI_assert(active_ctx); BLI_assert(active_ctx);
return active_ctx->matrix_state; return active_ctx->matrix_state;
} }
void GPU_context_main_lock(void)
{
main_context_mutex.lock();
}
void GPU_context_main_unlock(void)
{
main_context_mutex.unlock();
}

View File

@@ -95,7 +95,7 @@ static struct GPUGlobal {
bool broken_amd_driver; bool broken_amd_driver;
/* Some crappy Intel drivers don't work well with shaders created in different /* Some crappy Intel drivers don't work well with shaders created in different
* rendering contexts. */ * rendering contexts. */
bool context_local_shaders_workaround; bool use_main_context_workaround;
/* Intel drivers exhibit artifacts when using #glCopyImageSubData & workbench anti-aliasing. /* Intel drivers exhibit artifacts when using #glCopyImageSubData & workbench anti-aliasing.
* (see T76273) */ * (see T76273) */
bool texture_copy_workaround; bool texture_copy_workaround;
@@ -222,9 +222,9 @@ bool GPU_unused_fb_slot_workaround(void)
return GG.unused_fb_slot_workaround; return GG.unused_fb_slot_workaround;
} }
bool GPU_context_local_shaders_workaround(void) bool GPU_use_main_context_workaround(void)
{ {
return GG.context_local_shaders_workaround; return GG.use_main_context_workaround;
} }
bool GPU_texture_copy_workaround(void) bool GPU_texture_copy_workaround(void)
@@ -381,12 +381,12 @@ void gpu_extensions_init(void)
/* Maybe not all of these drivers have problems with `GLEW_ARB_base_instance`. /* Maybe not all of these drivers have problems with `GLEW_ARB_base_instance`.
* But it's hard to test each case. */ * But it's hard to test each case. */
GG.glew_arb_base_instance_is_supported = false; GG.glew_arb_base_instance_is_supported = false;
GG.context_local_shaders_workaround = true; GG.use_main_context_workaround = true;
} }
if (strstr(version, "Build 20.19.15.4285")) { if (strstr(version, "Build 20.19.15.4285")) {
/* Somehow fixes armature display issues (see T69743). */ /* Somehow fixes armature display issues (see T69743). */
GG.context_local_shaders_workaround = true; GG.use_main_context_workaround = true;
} }
} }
else if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_UNIX, GPU_DRIVER_OPENSOURCE) && else if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_UNIX, GPU_DRIVER_OPENSOURCE) &&

View File

@@ -51,6 +51,7 @@
#include "ED_screen.h" #include "ED_screen.h"
#include "ED_view3d.h" #include "ED_view3d.h"
#include "GPU_context.h"
#include "GPU_draw.h" #include "GPU_draw.h"
#include "GPU_framebuffer.h" #include "GPU_framebuffer.h"
#include "GPU_immediate.h" #include "GPU_immediate.h"
@@ -999,6 +1000,7 @@ void wm_draw_update(bContext *C)
wmWindowManager *wm = CTX_wm_manager(C); wmWindowManager *wm = CTX_wm_manager(C);
wmWindow *win; wmWindow *win;
GPU_context_main_lock();
GPU_free_unused_buffers(); GPU_free_unused_buffers();
for (win = wm->windows.first; win; win = win->next) { for (win = wm->windows.first; win; win = win->next) {
@@ -1036,6 +1038,8 @@ void wm_draw_update(bContext *C)
/* Draw non-windows (surfaces) */ /* Draw non-windows (surfaces) */
wm_surfaces_iter(C, wm_draw_surface); wm_surfaces_iter(C, wm_draw_surface);
GPU_context_main_unlock();
} }
void wm_draw_region_clear(wmWindow *win, ARegion *UNUSED(region)) void wm_draw_region_clear(wmWindow *win, ARegion *UNUSED(region))