forked from blender/blender
me-main #1
@ -507,7 +507,7 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
|
|||||||
if (!GPU_batch_vertbuf_has(batch, vbo)) {
|
if (!GPU_batch_vertbuf_has(batch, vbo)) {
|
||||||
/* Currently, the code assumes that all objects that share the same mesh in the
|
/* Currently, the code assumes that all objects that share the same mesh in the
|
||||||
* current frame also share the same mesh on other frames. */
|
* current frame also share the same mesh on other frames. */
|
||||||
GPU_batch_vertbuf_add_ex(batch, vbo, false);
|
GPU_batch_vertbuf_add(batch, vbo, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -904,10 +904,10 @@ static void gpencil_edit_batches_ensure(Object *ob, GpencilBatchCache *cache, in
|
|||||||
|
|
||||||
/* Create the batches */
|
/* Create the batches */
|
||||||
cache->edit_points_batch = GPU_batch_create(GPU_PRIM_POINTS, cache->vbo, nullptr);
|
cache->edit_points_batch = GPU_batch_create(GPU_PRIM_POINTS, cache->vbo, nullptr);
|
||||||
GPU_batch_vertbuf_add(cache->edit_points_batch, cache->edit_vbo);
|
GPU_batch_vertbuf_add(cache->edit_points_batch, cache->edit_vbo, false);
|
||||||
|
|
||||||
cache->edit_lines_batch = GPU_batch_create(GPU_PRIM_LINE_STRIP, cache->vbo, nullptr);
|
cache->edit_lines_batch = GPU_batch_create(GPU_PRIM_LINE_STRIP, cache->vbo, nullptr);
|
||||||
GPU_batch_vertbuf_add(cache->edit_lines_batch, cache->edit_vbo);
|
GPU_batch_vertbuf_add(cache->edit_lines_batch, cache->edit_vbo, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Curve Handles and Points for Editing. */
|
/* Curve Handles and Points for Editing. */
|
||||||
@ -941,11 +941,11 @@ static void gpencil_edit_batches_ensure(Object *ob, GpencilBatchCache *cache, in
|
|||||||
|
|
||||||
cache->edit_curve_handles_batch = GPU_batch_create(
|
cache->edit_curve_handles_batch = GPU_batch_create(
|
||||||
GPU_PRIM_LINES, cache->edit_curve_vbo, nullptr);
|
GPU_PRIM_LINES, cache->edit_curve_vbo, nullptr);
|
||||||
GPU_batch_vertbuf_add(cache->edit_curve_handles_batch, cache->edit_curve_vbo);
|
GPU_batch_vertbuf_add(cache->edit_curve_handles_batch, cache->edit_curve_vbo, false);
|
||||||
|
|
||||||
cache->edit_curve_points_batch = GPU_batch_create(
|
cache->edit_curve_points_batch = GPU_batch_create(
|
||||||
GPU_PRIM_POINTS, cache->edit_curve_vbo, nullptr);
|
GPU_PRIM_POINTS, cache->edit_curve_vbo, nullptr);
|
||||||
GPU_batch_vertbuf_add(cache->edit_curve_points_batch, cache->edit_curve_vbo);
|
GPU_batch_vertbuf_add(cache->edit_curve_points_batch, cache->edit_curve_vbo, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
gpd->flag &= ~GP_DATA_CACHE_IS_DIRTY;
|
gpd->flag &= ~GP_DATA_CACHE_IS_DIRTY;
|
||||||
|
@ -197,7 +197,7 @@ static void drw_volume_wireframe_cb(
|
|||||||
GPU_PRIM_LINES, cache->face_wire.pos_nor_in_order, ibo, GPU_BATCH_OWNS_INDEX);
|
GPU_PRIM_LINES, cache->face_wire.pos_nor_in_order, ibo, GPU_BATCH_OWNS_INDEX);
|
||||||
}
|
}
|
||||||
|
|
||||||
GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wiredata, true);
|
GPU_batch_vertbuf_add(cache->face_wire.batch, vbo_wiredata, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
GPUBatch *DRW_volume_batch_cache_get_wireframes_face(Volume *volume)
|
GPUBatch *DRW_volume_batch_cache_get_wireframes_face(Volume *volume)
|
||||||
|
@ -71,7 +71,7 @@ BLI_INLINE void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
|
|||||||
}
|
}
|
||||||
if (batch != NULL) {
|
if (batch != NULL) {
|
||||||
/* HACK we set VBO's that may not yet be valid. */
|
/* HACK we set VBO's that may not yet be valid. */
|
||||||
GPU_batch_vertbuf_add(batch, *vbo);
|
GPU_batch_vertbuf_add(batch, *vbo, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,11 +226,11 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
|
|||||||
GPU_batch_copy(batch, geom);
|
GPU_batch_copy(batch, geom);
|
||||||
if (inst_batch != NULL) {
|
if (inst_batch != NULL) {
|
||||||
for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && inst_batch->verts[i]; i++) {
|
for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && inst_batch->verts[i]; i++) {
|
||||||
GPU_batch_instbuf_add_ex(batch, inst_batch->verts[i], false);
|
GPU_batch_instbuf_add(batch, inst_batch->verts[i], false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
GPU_batch_instbuf_add_ex(batch, inst_buf, false);
|
GPU_batch_instbuf_add(batch, inst_buf, false);
|
||||||
}
|
}
|
||||||
/* Add reference to avoid comparing pointers (in DRW_temp_batch_request) that could
|
/* Add reference to avoid comparing pointers (in DRW_temp_batch_request) that could
|
||||||
* potentially be the same. This will delay the freeing of the GPUVertBuf itself. */
|
* potentially be the same. This will delay the freeing of the GPUVertBuf itself. */
|
||||||
|
@ -1303,10 +1303,10 @@ struct PBVHBatches {
|
|||||||
int vbo_i = get_vbo_index(vbo);
|
int vbo_i = get_vbo_index(vbo);
|
||||||
|
|
||||||
batch.vbos.append(vbo_i);
|
batch.vbos.append(vbo_i);
|
||||||
GPU_batch_vertbuf_add_ex(batch.tris, vbo->vert_buf, false);
|
GPU_batch_vertbuf_add(batch.tris, vbo->vert_buf, false);
|
||||||
|
|
||||||
if (batch.lines) {
|
if (batch.lines) {
|
||||||
GPU_batch_vertbuf_add_ex(batch.lines, vbo->vert_buf, false);
|
GPU_batch_vertbuf_add(batch.lines, vbo->vert_buf, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1630,7 +1630,7 @@ static void icon_draw_cache_texture_flush_ex(GPUTexture *texture,
|
|||||||
|
|
||||||
GPUBatch *quad = GPU_batch_preset_quad();
|
GPUBatch *quad = GPU_batch_preset_quad();
|
||||||
GPU_batch_set_shader(quad, shader);
|
GPU_batch_set_shader(quad, shader);
|
||||||
GPU_batch_draw_instanced(quad, texture_draw_calls->calls);
|
GPU_batch_draw_instance_range(quad, 0, texture_draw_calls->calls);
|
||||||
|
|
||||||
GPU_texture_unbind(texture);
|
GPU_texture_unbind(texture);
|
||||||
GPU_uniformbuf_unbind(ubo);
|
GPU_uniformbuf_unbind(ubo);
|
||||||
|
@ -1157,7 +1157,7 @@ void UI_widgetbase_draw_cache_flush()
|
|||||||
MAX_WIDGET_PARAMETERS * MAX_WIDGET_BASE_BATCH,
|
MAX_WIDGET_PARAMETERS * MAX_WIDGET_BASE_BATCH,
|
||||||
(float(*)[4])g_widget_base_batch.params);
|
(float(*)[4])g_widget_base_batch.params);
|
||||||
GPU_batch_uniform_3fv(batch, "checkerColorAndSize", checker_params);
|
GPU_batch_uniform_3fv(batch, "checkerColorAndSize", checker_params);
|
||||||
GPU_batch_draw_instanced(batch, g_widget_base_batch.count);
|
GPU_batch_draw_instance_range(batch, 0, g_widget_base_batch.count);
|
||||||
}
|
}
|
||||||
g_widget_base_batch.count = 0;
|
g_widget_base_batch.count = 0;
|
||||||
}
|
}
|
||||||
|
@ -4,8 +4,15 @@
|
|||||||
/** \file
|
/** \file
|
||||||
* \ingroup gpu
|
* \ingroup gpu
|
||||||
*
|
*
|
||||||
* GPU geometry batch
|
* GPU geometry batch.
|
||||||
* Contains VAOs + VBOs + Shader representing a drawable entity.
|
*
|
||||||
|
* Contains Vertex Buffers, Index Buffers, and Shader reference, altogether representing a drawable
|
||||||
|
* entity. It is meant to be used for drawing large (> 1000 vertices) reusable (drawn multiple
|
||||||
|
* times) model with complex data layout. In other words, it is meant for all cases where the
|
||||||
|
* immediate drawing module (imm) is inadequate.
|
||||||
|
*
|
||||||
|
* Vertex & Index Buffers can be owned by a batch. In such case they will be freed when the batch
|
||||||
|
* gets cleared or discarded.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
@ -60,7 +67,8 @@ extern "C" {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* IMPORTANT: Do not allocate manually as the real struct is bigger (i.e: GLBatch). This is only
|
* IMPORTANT: Do not allocate manually as the real struct is bigger (i.e: GLBatch). This is only
|
||||||
* the common and "public" part of the struct. Use the provided allocator.
|
* the common and "public" part of the struct. Use `GPU_batch_calloc()` and `GPU_batch_create()`
|
||||||
|
* instead.
|
||||||
* TODO(fclem): Make the content of this struct hidden and expose getters/setters.
|
* TODO(fclem): Make the content of this struct hidden and expose getters/setters.
|
||||||
*/
|
*/
|
||||||
typedef struct GPUBatch {
|
typedef struct GPUBatch {
|
||||||
@ -80,75 +88,163 @@ typedef struct GPUBatch {
|
|||||||
struct GPUShader *shader;
|
struct GPUShader *shader;
|
||||||
} GPUBatch;
|
} GPUBatch;
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------- */
|
||||||
|
/** \name Creation
|
||||||
|
* \{ */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocate a #GPUBatch with a cleared state.
|
||||||
|
* The returned #GPUBatch needs to be passed to `GPU_batch_init` before being usable.
|
||||||
|
*/
|
||||||
GPUBatch *GPU_batch_calloc(void);
|
GPUBatch *GPU_batch_calloc(void);
|
||||||
GPUBatch *GPU_batch_create_ex(GPUPrimType prim,
|
|
||||||
GPUVertBuf *vert,
|
/**
|
||||||
GPUIndexBuf *elem,
|
* Creates a #GPUBatch with explicit buffer ownership.
|
||||||
eGPUBatchFlag owns_flag);
|
*/
|
||||||
void GPU_batch_init_ex(GPUBatch *batch,
|
GPUBatch *GPU_batch_create_ex(GPUPrimType primitive_type,
|
||||||
GPUPrimType prim,
|
GPUVertBuf *vertex_buf,
|
||||||
GPUVertBuf *vert,
|
GPUIndexBuf *index_buf,
|
||||||
GPUIndexBuf *elem,
|
|
||||||
eGPUBatchFlag owns_flag);
|
eGPUBatchFlag owns_flag);
|
||||||
/**
|
/**
|
||||||
|
* Creates a #GPUBatch without buffer ownership.
|
||||||
|
*/
|
||||||
|
#define GPU_batch_create(primitive_type, vertex_buf, index_buf) \
|
||||||
|
GPU_batch_create_ex(primitive_type, vertex_buf, index_buf, (eGPUBatchFlag)0)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialize a cleared #GPUBatch with explicit buffer ownership.
|
||||||
|
* A #GPUBatch is in cleared state if it was just allocated using `GPU_batch_calloc()` or cleared
|
||||||
|
* using `GPU_batch_clear()`.
|
||||||
|
*/
|
||||||
|
void GPU_batch_init_ex(GPUBatch *batch,
|
||||||
|
GPUPrimType primitive_type,
|
||||||
|
GPUVertBuf *vertex_buf,
|
||||||
|
GPUIndexBuf *index_buf,
|
||||||
|
eGPUBatchFlag owns_flag);
|
||||||
|
/**
|
||||||
|
* Initialize a cleared #GPUBatch without buffer ownership.
|
||||||
|
* A #GPUBatch is in cleared state if it was just allocated using `GPU_batch_calloc()` or cleared
|
||||||
|
* using `GPU_batch_clear()`.
|
||||||
|
*/
|
||||||
|
#define GPU_batch_init(batch, primitive_type, vertex_buf, index_buf) \
|
||||||
|
GPU_batch_init_ex(batch, primitive_type, vertex_buf, index_buf, (eGPUBatchFlag)0)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DEPRECATED: It is easy to loose ownership with this. To be removed.
|
||||||
* This will share the VBOs with the new batch.
|
* This will share the VBOs with the new batch.
|
||||||
*/
|
*/
|
||||||
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
|
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
|
||||||
|
|
||||||
#define GPU_batch_create(prim, verts, elem) \
|
/** \} */
|
||||||
GPU_batch_create_ex(prim, verts, elem, (eGPUBatchFlag)0)
|
|
||||||
#define GPU_batch_init(batch, prim, verts, elem) \
|
/* -------------------------------------------------------------------- */
|
||||||
GPU_batch_init_ex(batch, prim, verts, elem, (eGPUBatchFlag)0)
|
/** \name Deletion
|
||||||
|
* \{ */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Same as discard but does not free. (does not call free callback).
|
* Clear a #GPUBatch without freeing its own memory.
|
||||||
|
* The #GPUBatch can then be reused using `GPU_batch_init()`.
|
||||||
|
* Discards all owned vertex and index buffers.
|
||||||
*/
|
*/
|
||||||
void GPU_batch_clear(GPUBatch *);
|
void GPU_batch_clear(GPUBatch *batch);
|
||||||
|
|
||||||
|
#define GPU_BATCH_CLEAR_SAFE(batch) \
|
||||||
|
do { \
|
||||||
|
if (batch != NULL) { \
|
||||||
|
GPU_batch_clear(batch); \
|
||||||
|
memset(batch, 0, sizeof(*(batch))); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \note Verts & elem are not discarded.
|
* Free a #GPUBatch object.
|
||||||
|
* Discards all owned vertex and index buffers.
|
||||||
*/
|
*/
|
||||||
void GPU_batch_discard(GPUBatch *);
|
void GPU_batch_discard(GPUBatch *batch);
|
||||||
|
|
||||||
|
#define GPU_BATCH_DISCARD_SAFE(batch) \
|
||||||
|
do { \
|
||||||
|
if (batch != NULL) { \
|
||||||
|
GPU_batch_discard(batch); \
|
||||||
|
batch = NULL; \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/** \} */
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------- */
|
||||||
|
/** \name Buffers Management
|
||||||
|
* \{ */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* Add the given \a vertex_buf as vertex buffer to a #GPUBatch.
|
||||||
|
* \return the index of verts in the batch.
|
||||||
|
*/
|
||||||
|
int GPU_batch_vertbuf_add(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add the given \a vertex_buf as instanced vertex buffer to a #GPUBatch.
|
||||||
|
* \return the index of verts in the batch.
|
||||||
|
*/
|
||||||
|
int GPU_batch_instbuf_add(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the first instanced vertex buffer of a #GPUBatch.
|
||||||
* \note Override ONLY the first instance VBO (and free them if owned).
|
* \note Override ONLY the first instance VBO (and free them if owned).
|
||||||
*/
|
*/
|
||||||
void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */
|
void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* \note Override any previously assigned elem (and free it if owned).
|
* Set the index buffer of a #GPUBatch.
|
||||||
|
* \note Override any previously assigned index buffer (and free it if owned).
|
||||||
*/
|
*/
|
||||||
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo);
|
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *index_buf, bool own_ibo);
|
||||||
|
|
||||||
int GPU_batch_instbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
|
|
||||||
/**
|
/**
|
||||||
* Returns the index of verts in the batch.
|
* Returns true if the #GPUbatch has \a vertex_buf in its vertex buffer list.
|
||||||
|
* \note The search is only conducted on the non-instance rate vertex buffer list.
|
||||||
*/
|
*/
|
||||||
int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
|
bool GPU_batch_vertbuf_has(GPUBatch *batch, GPUVertBuf *vertex_buf);
|
||||||
bool GPU_batch_vertbuf_has(GPUBatch *, GPUVertBuf *);
|
|
||||||
|
|
||||||
#define GPU_batch_vertbuf_add(batch, verts) GPU_batch_vertbuf_add_ex(batch, verts, false)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set resource id buffer to bind as instance attribute to workaround the lack of gl_BaseInstance.
|
* Set resource id buffer to bind as instance attribute to workaround the lack of gl_BaseInstance
|
||||||
|
* on some hardware / platform.
|
||||||
|
* \note Only to be used by draw manager.
|
||||||
*/
|
*/
|
||||||
void GPU_batch_resource_id_buf_set(GPUBatch *batch, GPUStorageBuf *resource_id_buf);
|
void GPU_batch_resource_id_buf_set(GPUBatch *batch, GPUStorageBuf *resource_id_buf);
|
||||||
|
|
||||||
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader);
|
/** \} */
|
||||||
/**
|
|
||||||
* Bind program bound to IMM to the batch.
|
/* -------------------------------------------------------------------- */
|
||||||
|
/** \name Shader Binding & Uniforms
|
||||||
*
|
*
|
||||||
* XXX Use this with much care. Drawing with the #GPUBatch API is not compatible with IMM.
|
* TODO(fclem): This whole section should be removed. See the other `TODO`s in this section.
|
||||||
* DO NOT DRAW WITH THE BATCH BEFORE CALLING #immUnbindProgram.
|
* This is because we want to remove #GPUBatch.shader to avoid usage mistakes.
|
||||||
|
* Interacting directly with the #GPUShader provide a clearer interface and less error-prone.
|
||||||
|
* \{ */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the shader to be drawn with this #GPUBatch.
|
||||||
|
* \note This need to be called first for the `GPU_batch_uniform_*` functions to work.
|
||||||
*/
|
*/
|
||||||
void GPU_batch_program_set_imm_shader(GPUBatch *batch);
|
/* TODO(fclem): These should be removed and replaced by `GPU_shader_bind()`. */
|
||||||
|
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader);
|
||||||
void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
|
void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
|
||||||
void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
|
void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
|
||||||
eGPUBuiltinShader shader_id,
|
eGPUBuiltinShader shader_id,
|
||||||
eGPUShaderConfig sh_cfg);
|
eGPUShaderConfig sh_cfg);
|
||||||
|
/**
|
||||||
|
* Bind program bound to IMM (immediate mode) to the #GPUBatch.
|
||||||
|
*
|
||||||
|
* XXX: Use this with much care. Drawing with the #GPUBatch API is not compatible with IMM.
|
||||||
|
* DO NOT DRAW WITH THE BATCH BEFORE CALLING #immUnbindProgram.
|
||||||
|
*/
|
||||||
|
void GPU_batch_program_set_imm_shader(GPUBatch *batch);
|
||||||
|
|
||||||
/* Will only work after setting the batch program. */
|
/**
|
||||||
|
* Set uniform variables for the shader currently bound to the #GPUBatch.
|
||||||
|
*/
|
||||||
/* TODO(fclem): These need to be replaced by GPU_shader_uniform_* with explicit shader. */
|
/* TODO(fclem): These need to be replaced by GPU_shader_uniform_* with explicit shader. */
|
||||||
|
|
||||||
#define GPU_batch_uniform_1i(batch, name, x) GPU_shader_uniform_1i((batch)->shader, name, x);
|
#define GPU_batch_uniform_1i(batch, name, x) GPU_shader_uniform_1i((batch)->shader, name, x);
|
||||||
#define GPU_batch_uniform_1b(batch, name, x) GPU_shader_uniform_1b((batch)->shader, name, x);
|
#define GPU_batch_uniform_1b(batch, name, x) GPU_shader_uniform_1b((batch)->shader, name, x);
|
||||||
#define GPU_batch_uniform_1f(batch, name, x) GPU_shader_uniform_1f((batch)->shader, name, x);
|
#define GPU_batch_uniform_1f(batch, name, x) GPU_shader_uniform_1f((batch)->shader, name, x);
|
||||||
@ -171,99 +267,105 @@ void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
|
|||||||
#define GPU_batch_texture_bind(batch, name, tex) \
|
#define GPU_batch_texture_bind(batch, name, tex) \
|
||||||
GPU_texture_bind(tex, GPU_shader_get_texture_binding((batch)->shader, name));
|
GPU_texture_bind(tex, GPU_shader_get_texture_binding((batch)->shader, name));
|
||||||
|
|
||||||
/**
|
/** \} */
|
||||||
* Return indirect draw call parameters for this batch.
|
|
||||||
* NOTE: r_base_index is set to -1 if not using an index buffer.
|
|
||||||
*/
|
|
||||||
void GPU_batch_draw_parameter_get(
|
|
||||||
GPUBatch *batch, int *r_v_count, int *r_v_first, int *r_base_index, int *r_i_count);
|
|
||||||
|
|
||||||
|
/* -------------------------------------------------------------------- */
|
||||||
|
/** \name Shader Binding & Uniforms
|
||||||
|
* \{ */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Draw the #GPUBatch with vertex count and instance count from its vertex buffers lengths.
|
||||||
|
* Ensures the associated shader is bound. TODO(fclem) remove this behavior.
|
||||||
|
*/
|
||||||
void GPU_batch_draw(GPUBatch *batch);
|
void GPU_batch_draw(GPUBatch *batch);
|
||||||
void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count);
|
|
||||||
/**
|
|
||||||
* Draw multiple instance of a batch without having any instance attributes.
|
|
||||||
*/
|
|
||||||
void GPU_batch_draw_instanced(GPUBatch *batch, int i_count);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This does not bind/unbind shader and does not call GPU_matrix_bind().
|
* Draw the #GPUBatch with vertex count and instance count from its vertex buffers lengths.
|
||||||
|
* Ensures the associated shader is bound. TODO(fclem) remove this behavior.
|
||||||
|
*
|
||||||
|
* A \a vertex_count of 0 will use the default number of vertex.
|
||||||
|
* The \a vertex_first sets the start of the instance-rate attributes.
|
||||||
|
*
|
||||||
|
* \note No out-of-bound access check is made on the vertex buffers since they are tricky to
|
||||||
|
* detect. Double check that the range of vertex has data or that the data isn't read by the
|
||||||
|
* shader.
|
||||||
*/
|
*/
|
||||||
void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count);
|
void GPU_batch_draw_range(GPUBatch *batch, int vertex_first, int vertex_count);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Issue a draw call using GPU computed arguments. The argument are expected to be valid for the
|
* Draw multiple instances of the #GPUBatch with custom instance range.
|
||||||
* type of geometry drawn (index or non-indexed).
|
* Ensures the associated shader is bound. TODO(fclem) remove this behavior.
|
||||||
|
*
|
||||||
|
* An \a instance_count of 0 will use the default number of instances.
|
||||||
|
* The \a instance_first sets the start of the instance-rate attributes.
|
||||||
|
*
|
||||||
|
* \note this can be used even if the #GPUBatch contains no instance-rate attributes.
|
||||||
|
* \note No out-of-bound access check is made on the vertex buffers since they are tricky to
|
||||||
|
* detect. Double check that the range of vertex has data or that the data isn't read by the
|
||||||
|
* shader.
|
||||||
|
*/
|
||||||
|
void GPU_batch_draw_instance_range(GPUBatch *batch, int instance_first, int instance_count);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Draw the #GPUBatch custom parameters.
|
||||||
|
* IMPORTANT: This does not bind/unbind shader and does not call GPU_matrix_bind().
|
||||||
|
*
|
||||||
|
* A \a vertex_count of 0 will use the default number of vertex.
|
||||||
|
* An \a instance_count of 0 will use the default number of instances.
|
||||||
|
*
|
||||||
|
* \note No out-of-bound access check is made on the vertex buffers since they are tricky to
|
||||||
|
* detect. Double check that the range of vertex has data or that the data isn't read by the
|
||||||
|
* shader.
|
||||||
|
*/
|
||||||
|
void GPU_batch_draw_advanced(
|
||||||
|
GPUBatch *batch, int vertex_first, int vertex_count, int instance_first, int instance_count);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Issue a single draw call using arguments sourced from a #GPUStorageBuf.
|
||||||
|
* The argument are expected to be valid for the type of geometry contained by this #GPUBatch
|
||||||
|
* (index or non-indexed).
|
||||||
|
*
|
||||||
|
* A `GPU_BARRIER_COMMAND` memory barrier is automatically added before the call.
|
||||||
|
*
|
||||||
|
* For more info see the GL documentation:
|
||||||
|
* https://registry.khronos.org/OpenGL-Refpages/gl4/html/glDrawArraysIndirect.xhtml
|
||||||
*/
|
*/
|
||||||
void GPU_batch_draw_indirect(GPUBatch *batch, GPUStorageBuf *indirect_buf, intptr_t offset);
|
void GPU_batch_draw_indirect(GPUBatch *batch, GPUStorageBuf *indirect_buf, intptr_t offset);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Issue \a count draw calls using arguments sourced from a #GPUStorageBuf.
|
||||||
|
* The \a stride (in bytes) control the spacing between each command description.
|
||||||
|
* The argument are expected to be valid for the type of geometry contained by this #GPUBatch
|
||||||
|
* (index or non-indexed).
|
||||||
|
*
|
||||||
|
* A `GPU_BARRIER_COMMAND` memory barrier is automatically added before the call.
|
||||||
|
*
|
||||||
|
* For more info see the GL documentation:
|
||||||
|
* https://registry.khronos.org/OpenGL-Refpages/gl4/html/glMultiDrawArraysIndirect.xhtml
|
||||||
|
*/
|
||||||
void GPU_batch_multi_draw_indirect(
|
void GPU_batch_multi_draw_indirect(
|
||||||
GPUBatch *batch, GPUStorageBuf *indirect_buf, int count, intptr_t offset, intptr_t stride);
|
GPUBatch *batch, GPUStorageBuf *indirect_buf, int count, intptr_t offset, intptr_t stride);
|
||||||
|
|
||||||
#if 0 /* future plans */
|
/**
|
||||||
|
* Return indirect draw call parameters for this #GPUBatch.
|
||||||
|
* NOTE: \a r_base_index is set to -1 if not using an index buffer.
|
||||||
|
*/
|
||||||
|
void GPU_batch_draw_parameter_get(GPUBatch *batch,
|
||||||
|
int *r_vertex_count,
|
||||||
|
int *r_vertex_first,
|
||||||
|
int *r_base_index,
|
||||||
|
int *r_indices_count);
|
||||||
|
|
||||||
/* Can multiple batches share a #GPUVertBuf? Use ref count? */
|
/** \} */
|
||||||
|
|
||||||
/* We often need a batch with its own data, to be created and discarded together. */
|
/* -------------------------------------------------------------------- */
|
||||||
/* WithOwn variants reduce number of system allocations. */
|
/** \name Module init/exit
|
||||||
|
* \{ */
|
||||||
typedef struct BatchWithOwnVertexBuffer {
|
|
||||||
GPUBatch batch;
|
|
||||||
GPUVertBuf verts; /* link batch.verts to this */
|
|
||||||
} BatchWithOwnVertexBuffer;
|
|
||||||
|
|
||||||
typedef struct BatchWithOwnElementList {
|
|
||||||
GPUBatch batch;
|
|
||||||
GPUIndexBuf elem; /* link batch.elem to this */
|
|
||||||
} BatchWithOwnElementList;
|
|
||||||
|
|
||||||
typedef struct BatchWithOwnVertexBufferAndElementList {
|
|
||||||
GPUBatch batch;
|
|
||||||
GPUIndexBuf elem; /* link batch.elem to this */
|
|
||||||
GPUVertBuf verts; /* link batch.verts to this */
|
|
||||||
} BatchWithOwnVertexBufferAndElementList;
|
|
||||||
|
|
||||||
GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *);
|
|
||||||
GPUBatch *create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf *, uint prim_len);
|
|
||||||
GPUBatch *create_BatchWithOwnVertexBufferAndElementList(GPUPrimType,
|
|
||||||
GPUVertFormat *,
|
|
||||||
uint v_len,
|
|
||||||
uint prim_len);
|
|
||||||
/* verts: shared, own */
|
|
||||||
/* elem: none, shared, own */
|
|
||||||
GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);
|
|
||||||
|
|
||||||
#endif /* future plans */
|
|
||||||
|
|
||||||
void gpu_batch_init(void);
|
void gpu_batch_init(void);
|
||||||
void gpu_batch_exit(void);
|
void gpu_batch_exit(void);
|
||||||
|
|
||||||
/* Macros */
|
/** \} */
|
||||||
|
|
||||||
#define GPU_BATCH_DISCARD_SAFE(batch) \
|
|
||||||
do { \
|
|
||||||
if (batch != NULL) { \
|
|
||||||
GPU_batch_discard(batch); \
|
|
||||||
batch = NULL; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define GPU_BATCH_CLEAR_SAFE(batch) \
|
|
||||||
do { \
|
|
||||||
if (batch != NULL) { \
|
|
||||||
GPU_batch_clear(batch); \
|
|
||||||
memset(batch, 0, sizeof(*(batch))); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) \
|
|
||||||
do { \
|
|
||||||
if (_batch_array != NULL) { \
|
|
||||||
BLI_assert(_len > 0); \
|
|
||||||
for (int _i = 0; _i < _len; _i++) { \
|
|
||||||
GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \
|
|
||||||
} \
|
|
||||||
MEM_freeN(_batch_array); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
@ -40,40 +40,43 @@ GPUBatch *GPU_batch_calloc()
|
|||||||
return batch;
|
return batch;
|
||||||
}
|
}
|
||||||
|
|
||||||
GPUBatch *GPU_batch_create_ex(GPUPrimType prim_type,
|
GPUBatch *GPU_batch_create_ex(GPUPrimType primitive_type,
|
||||||
GPUVertBuf *verts,
|
GPUVertBuf *vertex_buf,
|
||||||
GPUIndexBuf *elem,
|
GPUIndexBuf *index_buf,
|
||||||
eGPUBatchFlag owns_flag)
|
eGPUBatchFlag owns_flag)
|
||||||
{
|
{
|
||||||
GPUBatch *batch = GPU_batch_calloc();
|
GPUBatch *batch = GPU_batch_calloc();
|
||||||
GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
|
GPU_batch_init_ex(batch, primitive_type, vertex_buf, index_buf, owns_flag);
|
||||||
return batch;
|
return batch;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU_batch_init_ex(GPUBatch *batch,
|
void GPU_batch_init_ex(GPUBatch *batch,
|
||||||
GPUPrimType prim_type,
|
GPUPrimType primitive_type,
|
||||||
GPUVertBuf *verts,
|
GPUVertBuf *vertex_buf,
|
||||||
GPUIndexBuf *elem,
|
GPUIndexBuf *index_buf,
|
||||||
eGPUBatchFlag owns_flag)
|
eGPUBatchFlag owns_flag)
|
||||||
{
|
{
|
||||||
/* Do not pass any other flag */
|
/* Do not pass any other flag */
|
||||||
BLI_assert((owns_flag & ~(GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX)) == 0);
|
BLI_assert((owns_flag & ~(GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX)) == 0);
|
||||||
|
/* Batch needs to be in cleared state. */
|
||||||
|
BLI_assert((batch->flag & GPU_BATCH_INIT) == 0);
|
||||||
|
|
||||||
batch->verts[0] = verts;
|
batch->verts[0] = vertex_buf;
|
||||||
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||||
batch->verts[v] = nullptr;
|
batch->verts[v] = nullptr;
|
||||||
}
|
}
|
||||||
for (auto &v : batch->inst) {
|
for (auto &v : batch->inst) {
|
||||||
v = nullptr;
|
v = nullptr;
|
||||||
}
|
}
|
||||||
batch->elem = elem;
|
batch->elem = index_buf;
|
||||||
batch->prim_type = prim_type;
|
batch->prim_type = primitive_type;
|
||||||
batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
|
batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
|
||||||
batch->shader = nullptr;
|
batch->shader = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
|
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
|
||||||
{
|
{
|
||||||
|
GPU_batch_clear(batch_dst);
|
||||||
GPU_batch_init_ex(
|
GPU_batch_init_ex(
|
||||||
batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, GPU_BATCH_INVALID);
|
batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, GPU_BATCH_INVALID);
|
||||||
|
|
||||||
@ -118,35 +121,35 @@ void GPU_batch_discard(GPUBatch *batch)
|
|||||||
/** \name Buffers Management
|
/** \name Buffers Management
|
||||||
* \{ */
|
* \{ */
|
||||||
|
|
||||||
void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
|
void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo)
|
||||||
{
|
{
|
||||||
BLI_assert(inst);
|
BLI_assert(vertex_buf);
|
||||||
batch->flag |= GPU_BATCH_DIRTY;
|
batch->flag |= GPU_BATCH_DIRTY;
|
||||||
|
|
||||||
if (batch->inst[0] && (batch->flag & GPU_BATCH_OWNS_INST_VBO)) {
|
if (batch->inst[0] && (batch->flag & GPU_BATCH_OWNS_INST_VBO)) {
|
||||||
GPU_vertbuf_discard(batch->inst[0]);
|
GPU_vertbuf_discard(batch->inst[0]);
|
||||||
}
|
}
|
||||||
batch->inst[0] = inst;
|
batch->inst[0] = vertex_buf;
|
||||||
|
|
||||||
SET_FLAG_FROM_TEST(batch->flag, own_vbo, GPU_BATCH_OWNS_INST_VBO);
|
SET_FLAG_FROM_TEST(batch->flag, own_vbo, GPU_BATCH_OWNS_INST_VBO);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo)
|
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *index_buf, bool own_ibo)
|
||||||
{
|
{
|
||||||
BLI_assert(elem);
|
BLI_assert(index_buf);
|
||||||
batch->flag |= GPU_BATCH_DIRTY;
|
batch->flag |= GPU_BATCH_DIRTY;
|
||||||
|
|
||||||
if (batch->elem && (batch->flag & GPU_BATCH_OWNS_INDEX)) {
|
if (batch->elem && (batch->flag & GPU_BATCH_OWNS_INDEX)) {
|
||||||
GPU_indexbuf_discard(batch->elem);
|
GPU_indexbuf_discard(batch->elem);
|
||||||
}
|
}
|
||||||
batch->elem = elem;
|
batch->elem = index_buf;
|
||||||
|
|
||||||
SET_FLAG_FROM_TEST(batch->flag, own_ibo, GPU_BATCH_OWNS_INDEX);
|
SET_FLAG_FROM_TEST(batch->flag, own_ibo, GPU_BATCH_OWNS_INDEX);
|
||||||
}
|
}
|
||||||
|
|
||||||
int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
|
int GPU_batch_instbuf_add(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo)
|
||||||
{
|
{
|
||||||
BLI_assert(insts);
|
BLI_assert(vertex_buf);
|
||||||
batch->flag |= GPU_BATCH_DIRTY;
|
batch->flag |= GPU_BATCH_DIRTY;
|
||||||
|
|
||||||
for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
|
for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
|
||||||
@ -157,7 +160,7 @@ int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
|
|||||||
// BLI_assert(insts->vertex_len == batch->inst[0]->vertex_len);
|
// BLI_assert(insts->vertex_len == batch->inst[0]->vertex_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
batch->inst[v] = insts;
|
batch->inst[v] = vertex_buf;
|
||||||
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_INST_VBO << v));
|
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_INST_VBO << v));
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
@ -167,9 +170,9 @@ int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
|
int GPU_batch_vertbuf_add(GPUBatch *batch, GPUVertBuf *vertex_buf, bool own_vbo)
|
||||||
{
|
{
|
||||||
BLI_assert(verts);
|
BLI_assert(vertex_buf);
|
||||||
batch->flag |= GPU_BATCH_DIRTY;
|
batch->flag |= GPU_BATCH_DIRTY;
|
||||||
|
|
||||||
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||||
@ -179,7 +182,7 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
|
|||||||
/* This is an issue for the HACK inside DRW_vbo_request(). */
|
/* This is an issue for the HACK inside DRW_vbo_request(). */
|
||||||
// BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
|
// BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
|
||||||
}
|
}
|
||||||
batch->verts[v] = verts;
|
batch->verts[v] = vertex_buf;
|
||||||
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_VBO << v));
|
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_VBO << v));
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
@ -189,10 +192,10 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GPU_batch_vertbuf_has(GPUBatch *batch, GPUVertBuf *verts)
|
bool GPU_batch_vertbuf_has(GPUBatch *batch, GPUVertBuf *vertex_buf)
|
||||||
{
|
{
|
||||||
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
||||||
if (batch->verts[v] == verts) {
|
if (batch->verts[v] == vertex_buf) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -211,7 +214,6 @@ void GPU_batch_resource_id_buf_set(GPUBatch *batch, GPUStorageBuf *resource_id_b
|
|||||||
/* -------------------------------------------------------------------- */
|
/* -------------------------------------------------------------------- */
|
||||||
/** \name Uniform setters
|
/** \name Uniform setters
|
||||||
*
|
*
|
||||||
* TODO(fclem): port this to GPUShader.
|
|
||||||
* \{ */
|
* \{ */
|
||||||
|
|
||||||
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
|
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
|
||||||
@ -226,19 +228,22 @@ void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
|
|||||||
/** \name Drawing / Drawcall functions
|
/** \name Drawing / Drawcall functions
|
||||||
* \{ */
|
* \{ */
|
||||||
|
|
||||||
void GPU_batch_draw_parameter_get(
|
void GPU_batch_draw_parameter_get(GPUBatch *gpu_batch,
|
||||||
GPUBatch *gpu_batch, int *r_v_count, int *r_v_first, int *r_base_index, int *r_i_count)
|
int *r_vertex_count,
|
||||||
|
int *r_vertex_first,
|
||||||
|
int *r_base_index,
|
||||||
|
int *r_indices_count)
|
||||||
{
|
{
|
||||||
Batch *batch = static_cast<Batch *>(gpu_batch);
|
Batch *batch = static_cast<Batch *>(gpu_batch);
|
||||||
|
|
||||||
if (batch->elem) {
|
if (batch->elem) {
|
||||||
*r_v_count = batch->elem_()->index_len_get();
|
*r_vertex_count = batch->elem_()->index_len_get();
|
||||||
*r_v_first = batch->elem_()->index_start_get();
|
*r_vertex_first = batch->elem_()->index_start_get();
|
||||||
*r_base_index = batch->elem_()->index_base_get();
|
*r_base_index = batch->elem_()->index_base_get();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
*r_v_count = batch->verts_(0)->vertex_len;
|
*r_vertex_count = batch->verts_(0)->vertex_len;
|
||||||
*r_v_first = 0;
|
*r_vertex_first = 0;
|
||||||
*r_base_index = -1;
|
*r_base_index = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -247,7 +252,7 @@ void GPU_batch_draw_parameter_get(
|
|||||||
if (batch->inst[1] != nullptr) {
|
if (batch->inst[1] != nullptr) {
|
||||||
i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
|
i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
|
||||||
}
|
}
|
||||||
*r_i_count = i_count;
|
*r_indices_count = i_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU_batch_draw(GPUBatch *batch)
|
void GPU_batch_draw(GPUBatch *batch)
|
||||||
@ -256,48 +261,51 @@ void GPU_batch_draw(GPUBatch *batch)
|
|||||||
GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
|
GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count)
|
void GPU_batch_draw_range(GPUBatch *batch, int vertex_first, int vertex_count)
|
||||||
{
|
{
|
||||||
GPU_shader_bind(batch->shader);
|
GPU_shader_bind(batch->shader);
|
||||||
GPU_batch_draw_advanced(batch, v_first, v_count, 0, 0);
|
GPU_batch_draw_advanced(batch, vertex_first, vertex_count, 0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU_batch_draw_instanced(GPUBatch *batch, int i_count)
|
void GPU_batch_draw_instance_range(GPUBatch *batch, int instance_first, int instance_count)
|
||||||
{
|
{
|
||||||
BLI_assert(batch->inst[0] == nullptr);
|
BLI_assert(batch->inst[0] == nullptr);
|
||||||
|
|
||||||
GPU_shader_bind(batch->shader);
|
GPU_shader_bind(batch->shader);
|
||||||
GPU_batch_draw_advanced(batch, 0, 0, 0, i_count);
|
GPU_batch_draw_advanced(batch, 0, 0, instance_first, instance_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU_batch_draw_advanced(
|
void GPU_batch_draw_advanced(GPUBatch *gpu_batch,
|
||||||
GPUBatch *gpu_batch, int v_first, int v_count, int i_first, int i_count)
|
int vertex_first,
|
||||||
|
int vertex_count,
|
||||||
|
int instance_first,
|
||||||
|
int instance_count)
|
||||||
{
|
{
|
||||||
BLI_assert(Context::get()->shader != nullptr);
|
BLI_assert(Context::get()->shader != nullptr);
|
||||||
Batch *batch = static_cast<Batch *>(gpu_batch);
|
Batch *batch = static_cast<Batch *>(gpu_batch);
|
||||||
|
|
||||||
if (v_count == 0) {
|
if (vertex_count == 0) {
|
||||||
if (batch->elem) {
|
if (batch->elem) {
|
||||||
v_count = batch->elem_()->index_len_get();
|
vertex_count = batch->elem_()->index_len_get();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
v_count = batch->verts_(0)->vertex_len;
|
vertex_count = batch->verts_(0)->vertex_len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (i_count == 0) {
|
if (instance_count == 0) {
|
||||||
i_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
|
instance_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
|
||||||
/* Meh. This is to be able to use different numbers of verts in instance VBO's. */
|
/* Meh. This is to be able to use different numbers of verts in instance VBO's. */
|
||||||
if (batch->inst[1] != nullptr) {
|
if (batch->inst[1] != nullptr) {
|
||||||
i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
|
instance_count = min_ii(instance_count, batch->inst_(1)->vertex_len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (v_count == 0 || i_count == 0) {
|
if (vertex_count == 0 || instance_count == 0) {
|
||||||
/* Nothing to draw. */
|
/* Nothing to draw. */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
batch->draw(v_first, v_count, i_first, i_count);
|
batch->draw(vertex_first, vertex_count, instance_first, instance_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GPU_batch_draw_indirect(GPUBatch *gpu_batch, GPUStorageBuf *indirect_buf, intptr_t offset)
|
void GPU_batch_draw_indirect(GPUBatch *gpu_batch, GPUStorageBuf *indirect_buf, intptr_t offset)
|
||||||
|
@ -157,7 +157,7 @@ static PyObject *pygpu_batch_vertbuf_add(BPyGPUBatch *self, BPyGPUVertBuf *py_bu
|
|||||||
PyList_Append(self->references, (PyObject *)py_buf);
|
PyList_Append(self->references, (PyObject *)py_buf);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
GPU_batch_vertbuf_add(self->batch, py_buf->buf);
|
GPU_batch_vertbuf_add(self->batch, py_buf->buf, false);
|
||||||
Py_RETURN_NONE;
|
Py_RETURN_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user