DRW: Fix Ubo binding logic and improve Texture binding logic
Ubo needs to be rebound every times the shader changes. This simplify the logic a bit. Also modify texture binding logic to potentially reuse more already bound textures.
This commit is contained in:
@@ -362,10 +362,11 @@ static struct DRWGlobalState {
|
|||||||
/** GPU Resource State: Memory storage between drawing. */
|
/** GPU Resource State: Memory storage between drawing. */
|
||||||
static struct DRWResourceState {
|
static struct DRWResourceState {
|
||||||
GPUTexture **bound_texs;
|
GPUTexture **bound_texs;
|
||||||
GPUUniformBuffer **bound_ubos;
|
|
||||||
|
|
||||||
bool *bound_tex_slots;
|
bool *bound_tex_slots;
|
||||||
bool *bound_ubo_slots;
|
|
||||||
|
int bind_tex_inc;
|
||||||
|
int bind_ubo_inc;
|
||||||
} RST = {NULL};
|
} RST = {NULL};
|
||||||
|
|
||||||
static struct DRWMatrixOveride {
|
static struct DRWMatrixOveride {
|
||||||
@@ -1838,10 +1839,11 @@ static void bind_texture(GPUTexture *tex)
|
|||||||
int bind_num = GPU_texture_bound_number(tex);
|
int bind_num = GPU_texture_bound_number(tex);
|
||||||
if (bind_num == -1) {
|
if (bind_num == -1) {
|
||||||
for (int i = 0; i < GPU_max_textures(); ++i) {
|
for (int i = 0; i < GPU_max_textures(); ++i) {
|
||||||
if (RST.bound_tex_slots[i] == false) {
|
RST.bind_tex_inc = (RST.bind_tex_inc + 1) % GPU_max_textures();
|
||||||
GPU_texture_bind(tex, i);
|
if (RST.bound_tex_slots[RST.bind_tex_inc] == false) {
|
||||||
RST.bound_texs[i] = tex;
|
GPU_texture_bind(tex, RST.bind_tex_inc);
|
||||||
RST.bound_tex_slots[i] = true;
|
RST.bound_texs[RST.bind_tex_inc] = tex;
|
||||||
|
RST.bound_tex_slots[RST.bind_tex_inc] = true;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1853,17 +1855,11 @@ static void bind_texture(GPUTexture *tex)
|
|||||||
|
|
||||||
static void bind_ubo(GPUUniformBuffer *ubo)
|
static void bind_ubo(GPUUniformBuffer *ubo)
|
||||||
{
|
{
|
||||||
int bind_num = GPU_uniformbuffer_bindpoint(ubo);
|
if (RST.bind_ubo_inc < GPU_max_ubo_binds()) {
|
||||||
if (bind_num == -1) {
|
GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc);
|
||||||
for (int i = 0; i < GPU_max_ubo_binds(); ++i) {
|
RST.bind_ubo_inc++;
|
||||||
if (RST.bound_ubo_slots[i] == false) {
|
}
|
||||||
GPU_uniformbuffer_bind(ubo, i);
|
else {
|
||||||
RST.bound_ubos[i] = ubo;
|
|
||||||
RST.bound_ubo_slots[i] = true;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This is not depending on user input.
|
/* This is not depending on user input.
|
||||||
* It is our responsability to make sure there enough slots. */
|
* It is our responsability to make sure there enough slots. */
|
||||||
BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
|
BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
|
||||||
@@ -1871,7 +1867,6 @@ static void bind_ubo(GPUUniformBuffer *ubo)
|
|||||||
/* printf so user can report bad behaviour */
|
/* printf so user can report bad behaviour */
|
||||||
printf("Not enough ubo slots! This should not happen!\n");
|
printf("Not enough ubo slots! This should not happen!\n");
|
||||||
}
|
}
|
||||||
RST.bound_ubo_slots[bind_num] = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void release_texture_slots(void)
|
static void release_texture_slots(void)
|
||||||
@@ -1881,7 +1876,7 @@ static void release_texture_slots(void)
|
|||||||
|
|
||||||
static void release_ubo_slots(void)
|
static void release_ubo_slots(void)
|
||||||
{
|
{
|
||||||
memset(RST.bound_ubo_slots, 0x0, sizeof(bool) * GPU_max_ubo_binds());
|
RST.bind_ubo_inc = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
|
||||||
@@ -2074,14 +2069,6 @@ static void DRW_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWSha
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Clear Bound Ubos */
|
|
||||||
for (int i = 0; i < GPU_max_ubo_binds(); i++) {
|
|
||||||
if (RST.bound_ubos[i] != NULL) {
|
|
||||||
GPU_uniformbuffer_unbind(RST.bound_ubos[i]);
|
|
||||||
RST.bound_ubos[i] = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (DST.shader) {
|
if (DST.shader) {
|
||||||
GPU_shader_unbind();
|
GPU_shader_unbind();
|
||||||
DST.shader = NULL;
|
DST.shader = NULL;
|
||||||
@@ -2555,14 +2542,6 @@ static void DRW_viewport_var_init(void)
|
|||||||
if (RST.bound_tex_slots == NULL) {
|
if (RST.bound_tex_slots == NULL) {
|
||||||
RST.bound_tex_slots = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_textures(), "Bound Texture Slots");
|
RST.bound_tex_slots = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_textures(), "Bound Texture Slots");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Alloc array of ubos reference. */
|
|
||||||
if (RST.bound_ubos == NULL) {
|
|
||||||
RST.bound_ubos = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_ubo_binds(), "Bound GPUUniformBuffer refs");
|
|
||||||
}
|
|
||||||
if (RST.bound_ubo_slots == NULL) {
|
|
||||||
RST.bound_ubo_slots = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_textures(), "Bound UBO Slots");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
|
void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
|
||||||
@@ -3701,9 +3680,7 @@ void DRW_engines_free(void)
|
|||||||
GPU_texture_free(globals_ramp);
|
GPU_texture_free(globals_ramp);
|
||||||
|
|
||||||
MEM_SAFE_FREE(RST.bound_texs);
|
MEM_SAFE_FREE(RST.bound_texs);
|
||||||
MEM_SAFE_FREE(RST.bound_ubos);
|
|
||||||
MEM_SAFE_FREE(RST.bound_tex_slots);
|
MEM_SAFE_FREE(RST.bound_tex_slots);
|
||||||
MEM_SAFE_FREE(RST.bound_ubo_slots);
|
|
||||||
|
|
||||||
#ifdef WITH_CLAY_ENGINE
|
#ifdef WITH_CLAY_ENGINE
|
||||||
BLI_remlink(&R_engines, &DRW_engine_viewport_clay_type);
|
BLI_remlink(&R_engines, &DRW_engine_viewport_clay_type);
|
||||||
|
|||||||
Reference in New Issue
Block a user