GPU: Cleanup GPU_batch.h documentation and some of the API for consistency
Documented all functions, adding use case and side effects. Also replace the use of shortened argument name by more meaningful ones. Renamed `GPU_batch_instbuf_add_ex` and `GPU_batch_vertbuf_add_ex` to remove the `ex` suffix as they are the main version used (removed the few usage of the other version). Renamed `GPU_batch_draw_instanced` to `GPU_batch_draw_instance_range` and make it consistent with `GPU_batch_draw_range`.
This commit is contained in:
@@ -40,40 +40,43 @@ GPUBatch *GPU_batch_calloc()
|
||||
return batch;
|
||||
}
|
||||
|
||||
GPUBatch *GPU_batch_create_ex(GPUPrimType prim_type,
|
||||
GPUVertBuf *verts,
|
||||
GPUIndexBuf *elem,
|
||||
GPUBatch *GPU_batch_create_ex(GPUPrimType primitive_type,
|
||||
GPUVertBuf *vertex_buf,
|
||||
GPUIndexBuf *index_buf,
|
||||
eGPUBatchFlag owns_flag)
|
||||
{
|
||||
GPUBatch *batch = GPU_batch_calloc();
|
||||
GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
|
||||
GPU_batch_init_ex(batch, primitive_type, vertex_buf, index_buf, owns_flag);
|
||||
return batch;
|
||||
}
|
||||
|
||||
void GPU_batch_init_ex(GPUBatch *batch,
|
||||
GPUPrimType prim_type,
|
||||
GPUVertBuf *verts,
|
||||
GPUIndexBuf *elem,
|
||||
GPUPrimType primitive_type,
|
||||
GPUVertBuf *vertex_buf,
|
||||
GPUIndexBuf *index_buf,
|
||||
eGPUBatchFlag owns_flag)
|
||||
{
|
||||
/* Do not pass any other flag */
|
||||
BLI_assert((owns_flag & ~(GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX)) == 0);
|
||||
/* Batch needs to be in cleared state. */
|
||||
BLI_assert((batch->flag & GPU_BATCH_INIT) == 0);
|
||||
|
||||
batch->verts[0] = verts;
|
||||
batch->verts[0] = vertex_buf;
|
||||
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||
batch->verts[v] = nullptr;
|
||||
}
|
||||
for (auto &v : batch->inst) {
|
||||
v = nullptr;
|
||||
}
|
||||
batch->elem = elem;
|
||||
batch->prim_type = prim_type;
|
||||
batch->elem = index_buf;
|
||||
batch->prim_type = primitive_type;
|
||||
batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
|
||||
batch->shader = nullptr;
|
||||
}
|
||||
|
||||
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
|
||||
{
|
||||
GPU_batch_clear(batch_dst);
|
||||
GPU_batch_init_ex(
|
||||
batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, GPU_BATCH_INVALID);
|
||||
|
||||
@@ -118,35 +121,35 @@ void GPU_batch_discard(GPUBatch *batch)
|
||||
/** \name Buffers Management
|
||||
* \{ */
|
||||
|
||||
void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
|
||||
void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo)
|
||||
{
|
||||
BLI_assert(inst);
|
||||
BLI_assert(vertex_buf);
|
||||
batch->flag |= GPU_BATCH_DIRTY;
|
||||
|
||||
if (batch->inst[0] && (batch->flag & GPU_BATCH_OWNS_INST_VBO)) {
|
||||
GPU_vertbuf_discard(batch->inst[0]);
|
||||
}
|
||||
batch->inst[0] = inst;
|
||||
batch->inst[0] = vertex_buf;
|
||||
|
||||
SET_FLAG_FROM_TEST(batch->flag, own_vbo, GPU_BATCH_OWNS_INST_VBO);
|
||||
}
|
||||
|
||||
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo)
|
||||
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *index_buf, bool own_ibo)
|
||||
{
|
||||
BLI_assert(elem);
|
||||
BLI_assert(index_buf);
|
||||
batch->flag |= GPU_BATCH_DIRTY;
|
||||
|
||||
if (batch->elem && (batch->flag & GPU_BATCH_OWNS_INDEX)) {
|
||||
GPU_indexbuf_discard(batch->elem);
|
||||
}
|
||||
batch->elem = elem;
|
||||
batch->elem = index_buf;
|
||||
|
||||
SET_FLAG_FROM_TEST(batch->flag, own_ibo, GPU_BATCH_OWNS_INDEX);
|
||||
}
|
||||
|
||||
int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
|
||||
int GPU_batch_instbuf_add(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo)
|
||||
{
|
||||
BLI_assert(insts);
|
||||
BLI_assert(vertex_buf);
|
||||
batch->flag |= GPU_BATCH_DIRTY;
|
||||
|
||||
for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
|
||||
@@ -157,7 +160,7 @@ int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
|
||||
// BLI_assert(insts->vertex_len == batch->inst[0]->vertex_len);
|
||||
}
|
||||
|
||||
batch->inst[v] = insts;
|
||||
batch->inst[v] = vertex_buf;
|
||||
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_INST_VBO << v));
|
||||
return v;
|
||||
}
|
||||
@@ -167,9 +170,9 @@ int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
|
||||
return -1;
|
||||
}
|
||||
|
||||
int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
|
||||
int GPU_batch_vertbuf_add(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo)
|
||||
{
|
||||
BLI_assert(verts);
|
||||
BLI_assert(vertex_buf);
|
||||
batch->flag |= GPU_BATCH_DIRTY;
|
||||
|
||||
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||
@@ -179,7 +182,7 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
|
||||
/* This is an issue for the HACK inside DRW_vbo_request(). */
|
||||
// BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
|
||||
}
|
||||
batch->verts[v] = verts;
|
||||
batch->verts[v] = vertex_buf;
|
||||
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_VBO << v));
|
||||
return v;
|
||||
}
|
||||
@@ -189,10 +192,10 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool GPU_batch_vertbuf_has(GPUBatch *batch, GPUVertBuf *verts)
|
||||
bool GPU_batch_vertbuf_has(GPUBatch *batch, GPUVertBuf *vertex_buf)
|
||||
{
|
||||
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||
if (batch->verts[v] == verts) {
|
||||
if (batch->verts[v] == vertex_buf) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -211,7 +214,6 @@ void GPU_batch_resource_id_buf_set(GPUBatch *batch, GPUStorageBuf *resource_id_b
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Uniform setters
|
||||
*
|
||||
* TODO(fclem): port this to GPUShader.
|
||||
* \{ */
|
||||
|
||||
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
|
||||
@@ -226,19 +228,22 @@ void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
|
||||
/** \name Drawing / Drawcall functions
|
||||
* \{ */
|
||||
|
||||
void GPU_batch_draw_parameter_get(
|
||||
GPUBatch *gpu_batch, int *r_v_count, int *r_v_first, int *r_base_index, int *r_i_count)
|
||||
void GPU_batch_draw_parameter_get(GPUBatch *gpu_batch,
|
||||
int *r_vertex_count,
|
||||
int *r_vertex_first,
|
||||
int *r_base_index,
|
||||
int *r_indices_count)
|
||||
{
|
||||
Batch *batch = static_cast<Batch *>(gpu_batch);
|
||||
|
||||
if (batch->elem) {
|
||||
*r_v_count = batch->elem_()->index_len_get();
|
||||
*r_v_first = batch->elem_()->index_start_get();
|
||||
*r_vertex_count = batch->elem_()->index_len_get();
|
||||
*r_vertex_first = batch->elem_()->index_start_get();
|
||||
*r_base_index = batch->elem_()->index_base_get();
|
||||
}
|
||||
else {
|
||||
*r_v_count = batch->verts_(0)->vertex_len;
|
||||
*r_v_first = 0;
|
||||
*r_vertex_count = batch->verts_(0)->vertex_len;
|
||||
*r_vertex_first = 0;
|
||||
*r_base_index = -1;
|
||||
}
|
||||
|
||||
@@ -247,7 +252,7 @@ void GPU_batch_draw_parameter_get(
|
||||
if (batch->inst[1] != nullptr) {
|
||||
i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
|
||||
}
|
||||
*r_i_count = i_count;
|
||||
*r_indices_count = i_count;
|
||||
}
|
||||
|
||||
void GPU_batch_draw(GPUBatch *batch)
|
||||
@@ -256,48 +261,51 @@ void GPU_batch_draw(GPUBatch *batch)
|
||||
GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count)
|
||||
void GPU_batch_draw_range(GPUBatch *batch, int vertex_first, int vertex_count)
|
||||
{
|
||||
GPU_shader_bind(batch->shader);
|
||||
GPU_batch_draw_advanced(batch, v_first, v_count, 0, 0);
|
||||
GPU_batch_draw_advanced(batch, vertex_first, vertex_count, 0, 0);
|
||||
}
|
||||
|
||||
void GPU_batch_draw_instanced(GPUBatch *batch, int i_count)
|
||||
void GPU_batch_draw_instance_range(GPUBatch *batch, int instance_first, int instance_count)
|
||||
{
|
||||
BLI_assert(batch->inst[0] == nullptr);
|
||||
|
||||
GPU_shader_bind(batch->shader);
|
||||
GPU_batch_draw_advanced(batch, 0, 0, 0, i_count);
|
||||
GPU_batch_draw_advanced(batch, 0, 0, instance_first, instance_count);
|
||||
}
|
||||
|
||||
void GPU_batch_draw_advanced(
|
||||
GPUBatch *gpu_batch, int v_first, int v_count, int i_first, int i_count)
|
||||
void GPU_batch_draw_advanced(GPUBatch *gpu_batch,
|
||||
int vertex_first,
|
||||
int vertex_count,
|
||||
int instance_first,
|
||||
int instance_count)
|
||||
{
|
||||
BLI_assert(Context::get()->shader != nullptr);
|
||||
Batch *batch = static_cast<Batch *>(gpu_batch);
|
||||
|
||||
if (v_count == 0) {
|
||||
if (vertex_count == 0) {
|
||||
if (batch->elem) {
|
||||
v_count = batch->elem_()->index_len_get();
|
||||
vertex_count = batch->elem_()->index_len_get();
|
||||
}
|
||||
else {
|
||||
v_count = batch->verts_(0)->vertex_len;
|
||||
vertex_count = batch->verts_(0)->vertex_len;
|
||||
}
|
||||
}
|
||||
if (i_count == 0) {
|
||||
i_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
|
||||
if (instance_count == 0) {
|
||||
instance_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
|
||||
/* Meh. This is to be able to use different numbers of verts in instance VBO's. */
|
||||
if (batch->inst[1] != nullptr) {
|
||||
i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
|
||||
instance_count = min_ii(instance_count, batch->inst_(1)->vertex_len);
|
||||
}
|
||||
}
|
||||
|
||||
if (v_count == 0 || i_count == 0) {
|
||||
if (vertex_count == 0 || instance_count == 0) {
|
||||
/* Nothing to draw. */
|
||||
return;
|
||||
}
|
||||
|
||||
batch->draw(v_first, v_count, i_first, i_count);
|
||||
batch->draw(vertex_first, vertex_count, instance_first, instance_count);
|
||||
}
|
||||
|
||||
void GPU_batch_draw_indirect(GPUBatch *gpu_batch, GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||
|
||||
Reference in New Issue
Block a user