Gawain: use ShaderInterface to manage vertex attribs
This eliminates tons of glGetAttribLocation calls from the drawing loop. Vast majority of code can keep making the same function calls. They're just faster now!
This commit is contained in:
@@ -12,6 +12,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "vertex_format.h"
|
||||
#include "shader_interface.h"
|
||||
|
||||
typedef struct {
|
||||
uint64_t loc_bits; // store 4 bits for each of the 16 attribs
|
||||
@@ -20,5 +21,5 @@ typedef struct {
|
||||
|
||||
void AttribBinding_clear(AttribBinding*);
|
||||
|
||||
void get_attrib_locations(const VertexFormat*, AttribBinding*, GLuint program);
|
||||
void get_attrib_locations(const VertexFormat*, AttribBinding*, const ShaderInterface*);
|
||||
unsigned read_attrib_location(const AttribBinding*, unsigned a_idx);
|
||||
|
||||
@@ -48,3 +48,4 @@ void ShaderInterface_discard(ShaderInterface*);
|
||||
|
||||
const ShaderInput* ShaderInterface_uniform(const ShaderInterface*, const char* name);
|
||||
const ShaderInput* ShaderInterface_builtin_uniform(const ShaderInterface*, BuiltinUniform);
|
||||
const ShaderInput* ShaderInterface_attrib(const ShaderInterface*, const char* name);
|
||||
|
||||
@@ -46,24 +46,20 @@ static void write_attrib_location(AttribBinding* binding, unsigned a_idx, unsign
|
||||
binding->enabled_bits |= 1 << a_idx;
|
||||
}
|
||||
|
||||
void get_attrib_locations(const VertexFormat* format, AttribBinding* binding, GLuint program)
|
||||
void get_attrib_locations(const VertexFormat* format, AttribBinding* binding, const ShaderInterface* shaderface)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(glIsProgram(program));
|
||||
#endif
|
||||
|
||||
AttribBinding_clear(binding);
|
||||
|
||||
for (unsigned a_idx = 0; a_idx < format->attrib_ct; ++a_idx)
|
||||
{
|
||||
const Attrib* a = format->attribs + a_idx;
|
||||
GLint loc = glGetAttribLocation(program, a->name);
|
||||
const ShaderInput* input = ShaderInterface_attrib(shaderface, a->name);
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
assert(loc != -1);
|
||||
assert(input != NULL);
|
||||
// TODO: make this a recoverable runtime error? indicates mismatch between vertex format and program
|
||||
#endif
|
||||
|
||||
write_attrib_location(binding, a_idx, loc);
|
||||
write_attrib_location(binding, a_idx, input->location);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,23 +127,23 @@ static void Batch_update_program_bindings(Batch* batch)
|
||||
|
||||
const GLvoid* pointer = (const GLubyte*)0 + a->offset;
|
||||
|
||||
const GLint loc = glGetAttribLocation(batch->program, a->name);
|
||||
const ShaderInput* input = ShaderInterface_attrib(batch->interface, a->name);
|
||||
|
||||
if (loc == -1) continue;
|
||||
if (input == NULL) continue;
|
||||
|
||||
glEnableVertexAttribArray(loc);
|
||||
glEnableVertexAttribArray(input->location);
|
||||
|
||||
switch (a->fetch_mode)
|
||||
{
|
||||
case KEEP_FLOAT:
|
||||
case CONVERT_INT_TO_FLOAT:
|
||||
glVertexAttribPointer(loc, a->comp_ct, a->gl_comp_type, GL_FALSE, stride, pointer);
|
||||
glVertexAttribPointer(input->location, a->comp_ct, a->gl_comp_type, GL_FALSE, stride, pointer);
|
||||
break;
|
||||
case NORMALIZE_INT_TO_FLOAT:
|
||||
glVertexAttribPointer(loc, a->comp_ct, a->gl_comp_type, GL_TRUE, stride, pointer);
|
||||
glVertexAttribPointer(input->location, a->comp_ct, a->gl_comp_type, GL_TRUE, stride, pointer);
|
||||
break;
|
||||
case KEEP_INT:
|
||||
glVertexAttribIPointer(loc, a->comp_ct, a->gl_comp_type, stride, pointer);
|
||||
glVertexAttribIPointer(input->location, a->comp_ct, a->gl_comp_type, stride, pointer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ void immBindProgram(GLuint program, const ShaderInterface* shaderface)
|
||||
VertexFormat_pack(&imm.vertex_format);
|
||||
|
||||
glUseProgram(program);
|
||||
get_attrib_locations(&imm.vertex_format, &imm.attrib_binding, program);
|
||||
get_attrib_locations(&imm.vertex_format, &imm.attrib_binding, shaderface);
|
||||
gpuBindMatrices(shaderface);
|
||||
}
|
||||
|
||||
|
||||
@@ -181,3 +181,21 @@ const ShaderInput* ShaderInterface_builtin_uniform(const ShaderInterface* shader
|
||||
// TODO: look up by enum, not name (fix setup_builtin_uniform first)
|
||||
return ShaderInterface_uniform(shaderface, BuiltinUniform_name(builtin));
|
||||
}
|
||||
|
||||
const ShaderInput* ShaderInterface_attrib(const ShaderInterface* shaderface, const char* name)
|
||||
{
|
||||
// attribs are stored after uniforms
|
||||
const uint32_t input_ct = shaderface->uniform_ct + shaderface->attrib_ct;
|
||||
for (uint32_t i = shaderface->uniform_ct; i < input_ct; ++i)
|
||||
{
|
||||
const ShaderInput* attrib = shaderface->inputs + i;
|
||||
|
||||
#if SUPPORT_LEGACY_GLSL
|
||||
if (attrib->name == NULL) continue;
|
||||
#endif
|
||||
|
||||
if (strcmp(attrib->name, name) == 0)
|
||||
return attrib;
|
||||
}
|
||||
return NULL; // not found
|
||||
}
|
||||
|
||||
@@ -632,8 +632,8 @@ void GPU_shader_uniform_texture(GPUShader *UNUSED(shader), int location, GPUText
|
||||
int GPU_shader_get_attribute(GPUShader *shader, const char *name)
|
||||
{
|
||||
BLI_assert(shader && shader->program);
|
||||
|
||||
return glGetAttribLocation(shader->program, name);
|
||||
const ShaderInput *attrib = ShaderInterface_attrib(shader->interface, name);
|
||||
return attrib ? attrib->location : -1;
|
||||
}
|
||||
|
||||
GPUShader *GPU_shader_get_builtin_shader(GPUBuiltinShader shader)
|
||||
|
||||
Reference in New Issue
Block a user