DRW: Batch Cache: Add batch request system
This makes it possible for engines to ask for batches and only fill their data after all engine populate functions have run. This means that, when creating the batches data we already know all the batches that are needed for this redraw and the needed data. This allows for less redundant data preparation and better attrib masking. Ideally, we should run all viewports populate function before executing the batch construction but this is not the scope of this patch. Conversion from the old request method will be progressive and both can coexist (see uses of mesh_create_pos_and_nor()).
This commit is contained in:
@@ -48,10 +48,9 @@
|
||||
|
||||
#include "MEM_guardedalloc.h"
|
||||
|
||||
#include "DRW_render.h"
|
||||
|
||||
#include "draw_cache.h"
|
||||
#include "draw_cache_impl.h"
|
||||
#include "draw_manager.h"
|
||||
|
||||
/* Batch's only (free'd as an array) */
|
||||
static struct DRWShapeCache {
|
||||
@@ -3703,3 +3702,81 @@ GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
|
||||
}
|
||||
return *drw_cursor;
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
||||
/** \name Batch Cache Impl. common
|
||||
* \{ */
|
||||
|
||||
GPUBatch *DRW_batch_request(GPUBatch **batch)
|
||||
{
|
||||
if (*batch == NULL) {
|
||||
*batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
|
||||
}
|
||||
return *batch;
|
||||
}
|
||||
|
||||
bool DRW_batch_requested(GPUBatch *batch, int prim_type)
|
||||
{
|
||||
/* Batch has been requested if it has been created but not initialized. */
|
||||
if (batch != NULL && batch->verts[0] == NULL) {
|
||||
/* HACK. We init without a valid VBO and let the first vbo binding
|
||||
* fill verts[0]. */
|
||||
GPU_batch_init_ex(batch, prim_type, (GPUVertBuf *)1, NULL, 0);
|
||||
batch->verts[0] = NULL;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void DRW_ibo_request(GPUBatch *batch, GPUIndexBuf **ibo)
|
||||
{
|
||||
if (*ibo == NULL) {
|
||||
*ibo = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
|
||||
}
|
||||
GPU_batch_vao_cache_clear(batch);
|
||||
batch->elem = *ibo;
|
||||
}
|
||||
|
||||
bool DRW_ibo_requested(GPUIndexBuf *ibo)
|
||||
{
|
||||
/* TODO do not rely on data uploaded. This prevents multithreading.
|
||||
* (need access to a gl context) */
|
||||
return (ibo != NULL && ibo->ibo_id == 0);
|
||||
}
|
||||
|
||||
void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
|
||||
{
|
||||
if (*vbo == NULL) {
|
||||
*vbo = MEM_callocN(sizeof(GPUVertBuf), "GPUVertBuf");
|
||||
}
|
||||
/* HACK set first vbo if not init. */
|
||||
if (batch->verts[0] == NULL) {
|
||||
GPU_batch_vao_cache_clear(batch);
|
||||
batch->verts[0] = *vbo;
|
||||
}
|
||||
else {
|
||||
GPU_batch_vertbuf_add(batch, *vbo);
|
||||
}
|
||||
}
|
||||
|
||||
bool DRW_vbo_requested(GPUVertBuf *vbo)
|
||||
{
|
||||
return (vbo != NULL && vbo->format.attr_len == 0);
|
||||
}
|
||||
|
||||
void drw_batch_cache_generate_requested(Object *ob)
|
||||
{
|
||||
switch (ob->type) {
|
||||
case OB_MESH:
|
||||
DRW_mesh_batch_cache_create_requested(ob);
|
||||
break;
|
||||
/* TODO all cases */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -124,6 +124,8 @@ void DRW_mesh_weight_state_copy(struct DRW_MeshWeightState *wstate_dst, const st
|
||||
bool DRW_mesh_weight_state_compare(const struct DRW_MeshWeightState *a, const struct DRW_MeshWeightState *b);
|
||||
|
||||
/* Mesh */
|
||||
void DRW_mesh_batch_cache_create_requested(struct Object *ob);
|
||||
|
||||
struct GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(
|
||||
struct Mesh *me, struct GPUMaterial **gpumat_array, uint gpumat_array_len, bool use_hide,
|
||||
char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count);
|
||||
@@ -213,4 +215,15 @@ struct GPUBatch *DRW_particles_batch_cache_get_edit_inner_points(
|
||||
struct GPUBatch *DRW_particles_batch_cache_get_edit_tip_points(
|
||||
struct Object *object, struct ParticleSystem *psys, struct PTCacheEdit *edit);
|
||||
|
||||
/* Common */
|
||||
#define DRW_ADD_FLAG_FROM_VBO_REQUEST(flag, vbo, value) (flag |= DRW_vbo_requested(vbo) ? value : 0)
|
||||
#define DRW_ADD_FLAG_FROM_IBO_REQUEST(flag, ibo, value) (flag |= DRW_ibo_requested(ibo) ? value : 0)
|
||||
|
||||
struct GPUBatch *DRW_batch_request(struct GPUBatch **batch);
|
||||
bool DRW_batch_requested(struct GPUBatch *batch, int prim_type);
|
||||
void DRW_ibo_request(struct GPUBatch *batch, struct GPUIndexBuf **ibo);
|
||||
bool DRW_ibo_requested(struct GPUIndexBuf *ibo);
|
||||
void DRW_vbo_request(struct GPUBatch *batch, struct GPUVertBuf **vbo);
|
||||
bool DRW_vbo_requested(struct GPUVertBuf *vbo);
|
||||
|
||||
#endif /* __DRAW_CACHE_IMPL_H__ */
|
||||
|
||||
@@ -74,6 +74,7 @@
|
||||
|
||||
static void mesh_batch_cache_clear(Mesh *me);
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
/** \name Mesh/BMesh Interface (direct access to basic data).
|
||||
@@ -990,7 +991,6 @@ static MeshRenderData *mesh_render_data_create(Mesh *me, const int types)
|
||||
|
||||
/** \} */
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
/** \name Accessor Functions
|
||||
@@ -3617,59 +3617,64 @@ static GPUVertBuf *mesh_create_tri_select_id(
|
||||
return vbo;
|
||||
}
|
||||
|
||||
static void mesh_create_pos_and_nor(MeshRenderData *rdata, GPUVertBuf *vbo)
|
||||
{
|
||||
static GPUVertFormat format = { 0 };
|
||||
static struct { uint pos, nor; } attr_id;
|
||||
if (format.attr_len == 0) {
|
||||
attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
|
||||
attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
|
||||
}
|
||||
|
||||
GPU_vertbuf_init_with_format(vbo, &format);
|
||||
const int vbo_len_capacity = mesh_render_data_verts_len_get_maybe_mapped(rdata);
|
||||
GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
|
||||
|
||||
if (rdata->mapped.use == false) {
|
||||
if (rdata->edit_bmesh) {
|
||||
BMesh *bm = rdata->edit_bmesh->bm;
|
||||
BMIter iter;
|
||||
BMVert *eve;
|
||||
uint i;
|
||||
|
||||
BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, i) {
|
||||
static short no_short[4];
|
||||
normal_float_to_short_v3(no_short, eve->no);
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, i, eve->co);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.nor, i, no_short);
|
||||
}
|
||||
BLI_assert(i == vbo_len_capacity);
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < vbo_len_capacity; i++) {
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, i, rdata->mvert[i].co);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.nor, i, rdata->mvert[i].no); /* XXX actually reading 4 shorts */
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
const MVert *mvert = rdata->mapped.me_cage->mvert;
|
||||
const int *v_origindex = rdata->mapped.v_origindex;
|
||||
for (int i = 0; i < vbo_len_capacity; i++) {
|
||||
const int v_orig = v_origindex[i];
|
||||
if (v_orig != ORIGINDEX_NONE) {
|
||||
const MVert *mv = &mvert[i];
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, i, mv->co);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.nor, i, mv->no); /* XXX actually reading 4 shorts */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static GPUVertBuf *mesh_batch_cache_get_vert_pos_and_nor_in_order(
|
||||
MeshRenderData *rdata, MeshBatchCache *cache)
|
||||
{
|
||||
BLI_assert(rdata->types & MR_DATATYPE_VERT);
|
||||
|
||||
if (cache->pos_in_order == NULL) {
|
||||
static GPUVertFormat format = { 0 };
|
||||
static struct { uint pos, nor; } attr_id;
|
||||
if (format.attr_len == 0) {
|
||||
/* Normal is padded so that the vbo can be used as a buffer texture */
|
||||
attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
|
||||
attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
|
||||
}
|
||||
|
||||
GPUVertBuf *vbo = cache->pos_in_order = GPU_vertbuf_create_with_format(&format);
|
||||
const int vbo_len_capacity = mesh_render_data_verts_len_get_maybe_mapped(rdata);
|
||||
GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
|
||||
|
||||
if (rdata->mapped.use == false) {
|
||||
if (rdata->edit_bmesh) {
|
||||
BMesh *bm = rdata->edit_bmesh->bm;
|
||||
BMIter iter;
|
||||
BMVert *eve;
|
||||
uint i;
|
||||
|
||||
BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, i) {
|
||||
static short no_short[4];
|
||||
normal_float_to_short_v3(no_short, eve->no);
|
||||
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, i, eve->co);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.nor, i, no_short);
|
||||
}
|
||||
BLI_assert(i == vbo_len_capacity);
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < vbo_len_capacity; i++) {
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, i, rdata->mvert[i].co);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.nor, i, rdata->mvert[i].no); /* XXX actually reading 4 shorts */
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
const MVert *mvert = rdata->mapped.me_cage->mvert;
|
||||
const int *v_origindex = rdata->mapped.v_origindex;
|
||||
for (int i = 0; i < vbo_len_capacity; i++) {
|
||||
const int v_orig = v_origindex[i];
|
||||
if (v_orig != ORIGINDEX_NONE) {
|
||||
const MVert *mv = &mvert[i];
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.pos, i, mv->co);
|
||||
GPU_vertbuf_attr_set(vbo, attr_id.nor, i, mv->no); /* XXX actually reading 4 shorts */
|
||||
}
|
||||
}
|
||||
}
|
||||
cache->pos_in_order = GPU_vertbuf_create(GPU_USAGE_STATIC);
|
||||
mesh_create_pos_and_nor(rdata, cache->pos_in_order);
|
||||
}
|
||||
|
||||
return cache->pos_in_order;
|
||||
@@ -4970,18 +4975,7 @@ GPUBatch *DRW_mesh_batch_cache_get_points_with_normals(Mesh *me)
|
||||
GPUBatch *DRW_mesh_batch_cache_get_all_verts(Mesh *me)
|
||||
{
|
||||
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
||||
|
||||
if (cache->all_verts == NULL) {
|
||||
/* create batch from DM */
|
||||
MeshRenderData *rdata = mesh_render_data_create(me, MR_DATATYPE_VERT);
|
||||
|
||||
cache->all_verts = GPU_batch_create(
|
||||
GPU_PRIM_POINTS, mesh_batch_cache_get_vert_pos_and_nor_in_order(rdata, cache), NULL);
|
||||
|
||||
mesh_render_data_free(rdata);
|
||||
}
|
||||
|
||||
return cache->all_verts;
|
||||
return DRW_batch_request(&cache->all_verts);
|
||||
}
|
||||
|
||||
GPUBatch *DRW_mesh_batch_cache_get_fancy_edges(Mesh *me)
|
||||
@@ -6012,3 +6006,37 @@ void DRW_mesh_cache_uvedit(
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
/** \name Grouped batch generation
|
||||
* \{ */
|
||||
|
||||
void DRW_mesh_batch_cache_create_requested(Object *ob)
|
||||
{
|
||||
BLI_assert(ob->type == OB_MESH);
|
||||
|
||||
Mesh *me = (Mesh *)ob->data;
|
||||
MeshBatchCache *cache = mesh_batch_cache_get(me);
|
||||
|
||||
/* Init batches and request VBOs & IBOs */
|
||||
if (DRW_batch_requested(cache->all_verts, GPU_PRIM_POINTS)) {
|
||||
DRW_vbo_request(cache->all_verts, &cache->pos_in_order);
|
||||
}
|
||||
|
||||
/* Generate MeshRenderData flags */
|
||||
int mr_flag = 0;
|
||||
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->pos_in_order, MR_DATATYPE_VERT);
|
||||
|
||||
MeshRenderData *rdata = mesh_render_data_create(me, mr_flag);
|
||||
|
||||
/* Generate VBOs */
|
||||
if (DRW_vbo_requested(cache->pos_in_order)) {
|
||||
mesh_create_pos_and_nor(rdata, cache->pos_in_order);
|
||||
}
|
||||
|
||||
mesh_render_data_free(rdata);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -1018,6 +1018,10 @@ static void drw_engines_cache_populate(Object *ob)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: in the future it would be nice to generate once for all viewports.
|
||||
* But we need threaded DRW manager first. */
|
||||
drw_batch_cache_generate_requested(ob);
|
||||
|
||||
/* ... and clearing it here too because theses draw data are
|
||||
* from a mempool and must not be free individually by depsgraph. */
|
||||
drw_drawdata_unlink_dupli((ID *)ob);
|
||||
|
||||
@@ -420,4 +420,6 @@ void drw_state_set(DRWState state);
|
||||
void drw_debug_draw(void);
|
||||
void drw_debug_init(void);
|
||||
|
||||
void drw_batch_cache_generate_requested(struct Object *ob);
|
||||
|
||||
#endif /* __DRAW_MANAGER_H__ */
|
||||
|
||||
Reference in New Issue
Block a user