This repository has been archived on 2023-10-09. You can view files and clone it, but cannot push or open issues or pull requests.
Files
blender-archive/source/blender/gpu/GPU_batch.h
Clément Foucault 65ad36f5fd DRWManager: New implementation.
This is a new implementation of the draw manager using modern
rendering practices and GPU driven culling.

This only ports features that are not considered deprecated or to be
removed.

The old DRW API is kept working along side this new one, and does not
interfeer with it. However this needed some more hacking inside the
draw_view_lib.glsl. At least the create info are well separated.

The reviewer might start by looking at `draw_pass_test.cc` to see the
API in usage.

Important files are `draw_pass.hh`, `draw_command.hh`,
`draw_command_shared.hh`.

In a nutshell (for a developper used to old DRW API):
- `DRWShadingGroups` are replaced by `Pass<T>::Sub`.
- Contrary to DRWShadingGroups, all commands recorded inside a pass or
   sub-pass (even binds / push_constant / uniforms) will be executed in order.
- All memory is managed per object (except for Sub-Pass which are managed
   by their parent pass) and not from draw manager pools. So passes "can"
   potentially be recorded once and submitted multiple time (but this is
   not really encouraged for now). The only implicit link is between resource
   lifetime and `ResourceHandles`
- Sub passes can be any level deep.
- IMPORTANT: All state propagate from sub pass to subpass. There is no
   state stack concept anymore. Ensure the correct render state is set before
   drawing anything using `Pass::state_set()`.
- The drawcalls now needs a `ResourceHandle` instead of an `Object *`.
   This is to remove any implicit dependency between `Pass` and `Manager`.
   This was a huge problem in old implementation since the manager did not
   know what to pull from the object. Now it is explicitly requested by the
   engine.
- The pases need to be submitted to a `draw::Manager` instance which can
   be retrieved using `DRW_manager_get()` (for now).

Internally:
- All object data are stored in contiguous storage buffers. Removing a lot
   of complexity in the pass submission.
- Draw calls are sorted and visibility tested on GPU. Making more modern
   culling and better instancing usage possible in the future.
- Unit Tests have been added for regression testing and avoid most API
   breakage.
- `draw::View` now contains culling data for all objects in the scene
   allowing caching for multiple views.
- Bounding box and sphere final setup is moved to GPU.
- Some global resources locations have been hardcoded to reduce complexity.

What is missing:
- ~~Workaround for lack of gl_BaseInstanceARB.~~ Done
- ~~Object Uniform Attributes.~~ Done (Not in this patch)
- Workaround for hardware supporting a maximum of 8 SSBO.

Reviewed By: jbakker

Differential Revision: https://developer.blender.org/D15817
2022-09-02 18:45:14 +02:00

271 lines
9.8 KiB
C++

/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2016 by Mike Erwin. All rights reserved. */
/** \file
* \ingroup gpu
*
* GPU geometry batch
* Contains VAOs + VBOs + Shader representing a drawable entity.
*/
#pragma once
#include "BLI_utildefines.h"
#include "GPU_index_buffer.h"
#include "GPU_shader.h"
#include "GPU_storage_buffer.h"
#include "GPU_uniform_buffer.h"
#include "GPU_vertex_buffer.h"
#define GPU_BATCH_VBO_MAX_LEN 16
#define GPU_BATCH_INST_VBO_MAX_LEN 2
#define GPU_BATCH_VAO_STATIC_LEN 3
#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
typedef enum eGPUBatchFlag {
/** Invalid default state. */
GPU_BATCH_INVALID = 0,
/** GPUVertBuf ownership. (One bit per vbo) */
GPU_BATCH_OWNS_VBO = (1 << 0),
GPU_BATCH_OWNS_VBO_MAX = (GPU_BATCH_OWNS_VBO << (GPU_BATCH_VBO_MAX_LEN - 1)),
GPU_BATCH_OWNS_VBO_ANY = ((GPU_BATCH_OWNS_VBO << GPU_BATCH_VBO_MAX_LEN) - 1),
/** Instance GPUVertBuf ownership. (One bit per vbo) */
GPU_BATCH_OWNS_INST_VBO = (GPU_BATCH_OWNS_VBO_MAX << 1),
GPU_BATCH_OWNS_INST_VBO_MAX = (GPU_BATCH_OWNS_INST_VBO << (GPU_BATCH_INST_VBO_MAX_LEN - 1)),
GPU_BATCH_OWNS_INST_VBO_ANY = ((GPU_BATCH_OWNS_INST_VBO << GPU_BATCH_INST_VBO_MAX_LEN) - 1) &
~GPU_BATCH_OWNS_VBO_ANY,
/** GPUIndexBuf ownership. */
GPU_BATCH_OWNS_INDEX = (GPU_BATCH_OWNS_INST_VBO_MAX << 1),
/** Has been initialized. At least one VBO is set. */
GPU_BATCH_INIT = (1 << 26),
/** Batch is initialized but its VBOs are still being populated. (optional) */
GPU_BATCH_BUILDING = (1 << 26),
/** Cached data need to be rebuild. (VAO, PSO, ...) */
GPU_BATCH_DIRTY = (1 << 27),
} eGPUBatchFlag;
#define GPU_BATCH_OWNS_NONE GPU_BATCH_INVALID
BLI_STATIC_ASSERT(GPU_BATCH_OWNS_INDEX < GPU_BATCH_INIT,
"eGPUBatchFlag: Error: status flags are shadowed by the ownership bits!")
ENUM_OPERATORS(eGPUBatchFlag, GPU_BATCH_DIRTY)
#ifdef __cplusplus
extern "C" {
#endif
/**
* IMPORTANT: Do not allocate manually as the real struct is bigger (i.e: GLBatch). This is only
* the common and "public" part of the struct. Use the provided allocator.
* TODO(fclem): Make the content of this struct hidden and expose getters/setters.
*/
typedef struct GPUBatch {
/** verts[0] is required, others can be NULL */
GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN];
/** Instance attributes. */
GPUVertBuf *inst[GPU_BATCH_INST_VBO_MAX_LEN];
/** NULL if element list not needed */
GPUIndexBuf *elem;
/** Resource ID attribute workaround. */
GPUStorageBuf *resource_id_buf;
/** Bookkeeping. */
eGPUBatchFlag flag;
/** Type of geometry to draw. */
GPUPrimType prim_type;
/** Current assigned shader. DEPRECATED. Here only for uniform binding. */
struct GPUShader *shader;
} GPUBatch;
GPUBatch *GPU_batch_calloc(void);
GPUBatch *GPU_batch_create_ex(GPUPrimType prim,
GPUVertBuf *vert,
GPUIndexBuf *elem,
eGPUBatchFlag owns_flag);
void GPU_batch_init_ex(GPUBatch *batch,
GPUPrimType prim,
GPUVertBuf *vert,
GPUIndexBuf *elem,
eGPUBatchFlag owns_flag);
/**
* This will share the VBOs with the new batch.
*/
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
#define GPU_batch_create(prim, verts, elem) \
GPU_batch_create_ex(prim, verts, elem, (eGPUBatchFlag)0)
#define GPU_batch_init(batch, prim, verts, elem) \
GPU_batch_init_ex(batch, prim, verts, elem, (eGPUBatchFlag)0)
/**
* Same as discard but does not free. (does not call free callback).
*/
void GPU_batch_clear(GPUBatch *);
/**
* \note Verts & elem are not discarded.
*/
void GPU_batch_discard(GPUBatch *);
/**
* \note Override ONLY the first instance VBO (and free them if owned).
*/
void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */
/**
* \note Override any previously assigned elem (and free it if owned).
*/
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo);
int GPU_batch_instbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
/**
* Returns the index of verts in the batch.
*/
int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
bool GPU_batch_vertbuf_has(GPUBatch *, GPUVertBuf *);
#define GPU_batch_vertbuf_add(batch, verts) GPU_batch_vertbuf_add_ex(batch, verts, false)
/**
* Set resource id buffer to bind as instance attribute to workaround the lack of gl_BaseInstance.
*/
void GPU_batch_resource_id_buf_set(GPUBatch *batch, GPUStorageBuf *resource_id_buf);
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader);
/**
* Bind program bound to IMM to the batch.
*
* XXX Use this with much care. Drawing with the #GPUBatch API is not compatible with IMM.
* DO NOT DRAW WITH THE BATCH BEFORE CALLING #immUnbindProgram.
*/
void GPU_batch_program_set_imm_shader(GPUBatch *batch);
void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
eGPUBuiltinShader shader_id,
eGPUShaderConfig sh_cfg);
/* Will only work after setting the batch program. */
/* TODO(fclem): These need to be replaced by GPU_shader_uniform_* with explicit shader. */
#define GPU_batch_uniform_1i(batch, name, x) GPU_shader_uniform_1i((batch)->shader, name, x);
#define GPU_batch_uniform_1b(batch, name, x) GPU_shader_uniform_1b((batch)->shader, name, x);
#define GPU_batch_uniform_1f(batch, name, x) GPU_shader_uniform_1f((batch)->shader, name, x);
#define GPU_batch_uniform_2f(batch, name, x, y) GPU_shader_uniform_2f((batch)->shader, name, x, y);
#define GPU_batch_uniform_3f(batch, name, x, y, z) \
GPU_shader_uniform_3f((batch)->shader, name, x, y, z);
#define GPU_batch_uniform_4f(batch, name, x, y, z, w) \
GPU_shader_uniform_4f((batch)->shader, name, x, y, z, w);
#define GPU_batch_uniform_2fv(batch, name, val) GPU_shader_uniform_2fv((batch)->shader, name, val);
#define GPU_batch_uniform_3fv(batch, name, val) GPU_shader_uniform_3fv((batch)->shader, name, val);
#define GPU_batch_uniform_4fv(batch, name, val) GPU_shader_uniform_4fv((batch)->shader, name, val);
#define GPU_batch_uniform_2fv_array(batch, name, len, val) \
GPU_shader_uniform_2fv_array((batch)->shader, name, len, val);
#define GPU_batch_uniform_4fv_array(batch, name, len, val) \
GPU_shader_uniform_4fv_array((batch)->shader, name, len, val);
#define GPU_batch_uniform_mat4(batch, name, val) \
GPU_shader_uniform_mat4((batch)->shader, name, val);
#define GPU_batch_uniformbuf_bind(batch, name, ubo) \
GPU_uniformbuf_bind(ubo, GPU_shader_get_uniform_block_binding((batch)->shader, name));
#define GPU_batch_texture_bind(batch, name, tex) \
GPU_texture_bind(tex, GPU_shader_get_texture_binding((batch)->shader, name));
/**
* Return indirect draw call parameters for this batch.
* NOTE: r_base_index is set to -1 if not using an index buffer.
*/
void GPU_batch_draw_parameter_get(
GPUBatch *batch, int *r_v_count, int *r_v_first, int *r_base_index, int *r_i_count);
void GPU_batch_draw(GPUBatch *batch);
void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count);
/**
* Draw multiple instance of a batch without having any instance attributes.
*/
void GPU_batch_draw_instanced(GPUBatch *batch, int i_count);
/**
* This does not bind/unbind shader and does not call GPU_matrix_bind().
*/
void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count);
/**
* Issue a draw call using GPU computed arguments. The argument are expected to be valid for the
* type of geometry drawn (index or non-indexed).
*/
void GPU_batch_draw_indirect(GPUBatch *batch, GPUStorageBuf *indirect_buf, intptr_t offset);
void GPU_batch_multi_draw_indirect(
GPUBatch *batch, GPUStorageBuf *indirect_buf, int count, intptr_t offset, intptr_t stride);
#if 0 /* future plans */
/* Can multiple batches share a #GPUVertBuf? Use ref count? */
/* We often need a batch with its own data, to be created and discarded together. */
/* WithOwn variants reduce number of system allocations. */
typedef struct BatchWithOwnVertexBuffer {
GPUBatch batch;
GPUVertBuf verts; /* link batch.verts to this */
} BatchWithOwnVertexBuffer;
typedef struct BatchWithOwnElementList {
GPUBatch batch;
GPUIndexBuf elem; /* link batch.elem to this */
} BatchWithOwnElementList;
typedef struct BatchWithOwnVertexBufferAndElementList {
GPUBatch batch;
GPUIndexBuf elem; /* link batch.elem to this */
GPUVertBuf verts; /* link batch.verts to this */
} BatchWithOwnVertexBufferAndElementList;
GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *);
GPUBatch *create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf *, uint prim_len);
GPUBatch *create_BatchWithOwnVertexBufferAndElementList(GPUPrimType,
GPUVertFormat *,
uint v_len,
uint prim_len);
/* verts: shared, own */
/* elem: none, shared, own */
GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);
#endif /* future plans */
void gpu_batch_init(void);
void gpu_batch_exit(void);
/* Macros */
#define GPU_BATCH_DISCARD_SAFE(batch) \
do { \
if (batch != NULL) { \
GPU_batch_discard(batch); \
batch = NULL; \
} \
} while (0)
#define GPU_BATCH_CLEAR_SAFE(batch) \
do { \
if (batch != NULL) { \
GPU_batch_clear(batch); \
memset(batch, 0, sizeof(*(batch))); \
} \
} while (0)
#define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) \
do { \
if (_batch_array != NULL) { \
BLI_assert(_len > 0); \
for (int _i = 0; _i < _len; _i++) { \
GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \
} \
MEM_freeN(_batch_array); \
} \
} while (0)
#ifdef __cplusplus
}
#endif