2018-01-09 14:09:14 +01:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*
|
2019-01-23 11:29:18 +11:00
|
|
|
* Copyright 2016, Blender Foundation.
|
2018-01-09 14:09:14 +01:00
|
|
|
*/
|
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
* \ingroup draw
|
2018-01-09 14:09:14 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DRW Instance Data Manager
|
2020-09-07 15:57:12 +10:00
|
|
|
* This is a special memory manager that keeps memory blocks ready to send as VBO data in one
|
2019-08-18 04:11:41 +10:00
|
|
|
* continuous allocation. This way we avoid feeding #GPUBatch each instance data one by one and
|
2019-05-01 10:35:46 +10:00
|
|
|
* unnecessary memcpy. Since we lose which memory block was used each #DRWShadingGroup we need to
|
|
|
|
* redistribute them in the same order/size to avoid to realloc each frame. This is why
|
|
|
|
* #DRWInstanceDatas are sorted in a list for each different data size.
|
2019-03-19 15:17:46 +11:00
|
|
|
*/
|
2018-01-09 14:09:14 +01:00
|
|
|
|
|
|
|
#include "draw_instance_data.h"
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
#include "draw_manager.h"
|
|
|
|
|
2018-01-09 14:09:14 +01:00
|
|
|
#include "DRW_engine.h"
|
2018-02-14 18:16:52 +01:00
|
|
|
#include "DRW_render.h" /* For DRW_shgroup_get_instance_count() */
|
2018-01-09 14:09:14 +01:00
|
|
|
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
#include "GPU_material.h"
|
|
|
|
|
|
|
|
#include "DNA_particle_types.h"
|
|
|
|
|
|
|
|
#include "BKE_duplilist.h"
|
|
|
|
|
|
|
|
#include "RNA_access.h"
|
|
|
|
|
|
|
|
#include "BLI_bitmap.h"
|
2019-05-13 17:56:20 +02:00
|
|
|
#include "BLI_memblock.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BLI_mempool.h"
|
|
|
|
#include "BLI_utildefines.h"
|
|
|
|
#include "MEM_guardedalloc.h"
|
2018-01-09 14:09:14 +01:00
|
|
|
|
|
|
|
struct DRWInstanceData {
|
|
|
|
struct DRWInstanceData *next;
|
|
|
|
bool used; /* If this data is used or not. */
|
|
|
|
size_t data_size; /* Size of one instance data. */
|
2018-05-30 12:19:20 +02:00
|
|
|
BLI_mempool *mempool;
|
2018-01-09 14:09:14 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
struct DRWInstanceDataList {
|
2018-02-20 01:55:19 +01:00
|
|
|
struct DRWInstanceDataList *next, *prev;
|
2018-01-09 14:09:14 +01:00
|
|
|
/* Linked lists for all possible data pool size */
|
|
|
|
DRWInstanceData *idata_head[MAX_INSTANCE_DATA_SIZE];
|
|
|
|
DRWInstanceData *idata_tail[MAX_INSTANCE_DATA_SIZE];
|
2018-02-14 18:16:52 +01:00
|
|
|
|
2019-05-13 17:56:20 +02:00
|
|
|
BLI_memblock *pool_instancing;
|
|
|
|
BLI_memblock *pool_batching;
|
|
|
|
BLI_memblock *pool_buffers;
|
2018-01-09 14:09:14 +01:00
|
|
|
};
|
|
|
|
|
2019-05-13 17:56:20 +02:00
|
|
|
typedef struct DRWTempBufferHandle {
|
2020-08-10 01:43:50 +02:00
|
|
|
GPUVertBuf *buf;
|
2019-05-13 17:56:20 +02:00
|
|
|
/** Format pointer for reuse. */
|
|
|
|
GPUVertFormat *format;
|
|
|
|
/** Touched vertex length for resize. */
|
2019-05-31 01:45:41 +02:00
|
|
|
int *vert_len;
|
2019-05-13 17:56:20 +02:00
|
|
|
} DRWTempBufferHandle;
|
|
|
|
|
2020-08-10 01:43:50 +02:00
|
|
|
typedef struct DRWTempInstancingHandle {
|
|
|
|
/** Copy of geom but with the per-instance attributes. */
|
|
|
|
GPUBatch *batch;
|
|
|
|
/** Batch containing instancing attributes. */
|
|
|
|
GPUBatch *instancer;
|
|
|
|
/** Callbuffer to be used instead of instancer . */
|
|
|
|
GPUVertBuf *buf;
|
|
|
|
/** Original non-instanced batch pointer. */
|
|
|
|
GPUBatch *geom;
|
|
|
|
} DRWTempInstancingHandle;
|
2018-02-20 01:55:19 +01:00
|
|
|
|
2020-08-10 01:43:50 +02:00
|
|
|
static ListBase g_idatalists = {NULL, NULL};
|
2018-02-14 18:16:52 +01:00
|
|
|
|
2020-08-10 01:43:50 +02:00
|
|
|
static void instancing_batch_references_add(GPUBatch *batch)
|
2018-02-14 18:16:52 +01:00
|
|
|
{
|
2020-08-10 01:43:50 +02:00
|
|
|
for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && batch->verts[i]; i++) {
|
|
|
|
GPU_vertbuf_handle_ref_add(batch->verts[i]);
|
|
|
|
}
|
|
|
|
for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && batch->inst[i]; i++) {
|
|
|
|
GPU_vertbuf_handle_ref_add(batch->inst[i]);
|
2018-12-18 13:08:08 +01:00
|
|
|
}
|
2020-08-10 01:43:50 +02:00
|
|
|
}
|
2019-05-13 17:56:20 +02:00
|
|
|
|
2020-08-10 01:43:50 +02:00
|
|
|
static void instancing_batch_references_remove(GPUBatch *batch)
|
|
|
|
{
|
|
|
|
for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && batch->verts[i]; i++) {
|
|
|
|
GPU_vertbuf_handle_ref_remove(batch->verts[i]);
|
|
|
|
}
|
|
|
|
for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && batch->inst[i]; i++) {
|
|
|
|
GPU_vertbuf_handle_ref_remove(batch->inst[i]);
|
2018-02-20 01:55:19 +01:00
|
|
|
}
|
|
|
|
}
|
2018-02-14 18:16:52 +01:00
|
|
|
|
2020-08-10 01:43:50 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Instance Buffer Management
|
|
|
|
* \{ */
|
|
|
|
|
2019-05-13 17:56:20 +02:00
|
|
|
/**
|
|
|
|
* This manager allows to distribute existing batches for instancing
|
|
|
|
* attributes. This reduce the number of batches creation.
|
|
|
|
* Querying a batch is done with a vertex format. This format should
|
|
|
|
* be static so that its pointer never changes (because we are using
|
|
|
|
* this pointer as identifier [we don't want to check the full format
|
|
|
|
* that would be too slow]).
|
|
|
|
*/
|
|
|
|
GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
|
|
|
|
GPUVertFormat *format,
|
2019-05-31 01:45:41 +02:00
|
|
|
int *vert_len)
|
2018-02-20 01:55:19 +01:00
|
|
|
{
|
2019-05-13 17:56:20 +02:00
|
|
|
BLI_assert(format != NULL);
|
|
|
|
BLI_assert(vert_len != NULL);
|
|
|
|
|
|
|
|
DRWTempBufferHandle *handle = BLI_memblock_alloc(idatalist->pool_buffers);
|
|
|
|
|
|
|
|
if (handle->format != format) {
|
|
|
|
handle->format = format;
|
2020-08-10 01:43:50 +02:00
|
|
|
GPU_VERTBUF_DISCARD_SAFE(handle->buf);
|
|
|
|
|
2020-09-06 22:09:51 +02:00
|
|
|
GPUVertBuf *vert = GPU_vertbuf_calloc();
|
2019-05-13 17:56:20 +02:00
|
|
|
GPU_vertbuf_init_with_format_ex(vert, format, GPU_USAGE_DYNAMIC);
|
|
|
|
GPU_vertbuf_data_alloc(vert, DRW_BUFFER_VERTS_CHUNK);
|
2020-08-10 01:43:50 +02:00
|
|
|
|
|
|
|
handle->buf = vert;
|
2018-02-20 01:55:19 +01:00
|
|
|
}
|
2020-08-10 01:43:50 +02:00
|
|
|
handle->vert_len = vert_len;
|
|
|
|
return handle->buf;
|
2018-02-20 01:55:19 +01:00
|
|
|
}
|
2018-02-14 18:16:52 +01:00
|
|
|
|
2020-08-10 01:43:50 +02:00
|
|
|
/* NOTE: Does not return a valid drawable batch until DRW_instance_buffer_finish has run.
|
|
|
|
* Initialization is delayed because instancer or geom could still not be initialized. */
|
2019-05-13 17:56:20 +02:00
|
|
|
GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
|
|
|
|
GPUVertBuf *buf,
|
2019-12-02 01:40:58 +01:00
|
|
|
GPUBatch *instancer,
|
2019-05-13 17:56:20 +02:00
|
|
|
GPUBatch *geom)
|
2018-02-20 01:55:19 +01:00
|
|
|
{
|
2019-05-13 17:56:20 +02:00
|
|
|
/* Do not call this with a batch that is already an instancing batch. */
|
2019-12-02 01:40:58 +01:00
|
|
|
BLI_assert(geom->inst[0] == NULL);
|
|
|
|
/* Only call with one of them. */
|
|
|
|
BLI_assert((instancer != NULL) != (buf != NULL));
|
2019-05-13 17:56:20 +02:00
|
|
|
|
2020-08-10 01:43:50 +02:00
|
|
|
DRWTempInstancingHandle *handle = BLI_memblock_alloc(idatalist->pool_instancing);
|
|
|
|
if (handle->batch == NULL) {
|
2020-08-10 11:41:22 +02:00
|
|
|
handle->batch = GPU_batch_calloc();
|
2020-08-01 12:57:17 +02:00
|
|
|
}
|
|
|
|
|
2020-08-10 01:43:50 +02:00
|
|
|
GPUBatch *batch = handle->batch;
|
2020-09-06 16:40:07 +02:00
|
|
|
bool instancer_compat = buf ? ((batch->inst[0] == buf) &&
|
|
|
|
(GPU_vertbuf_get_status(buf) & GPU_VERTBUF_DATA_UPLOADED)) :
|
2020-08-10 01:43:50 +02:00
|
|
|
((batch->inst[0] == instancer->verts[0]) &&
|
|
|
|
(batch->inst[1] == instancer->verts[1]));
|
2020-08-08 23:37:43 +02:00
|
|
|
bool is_compatible = (batch->prim_type == geom->prim_type) && instancer_compat &&
|
2020-08-10 03:17:35 +02:00
|
|
|
(batch->flag & GPU_BATCH_BUILDING) == 0 && (batch->elem == geom->elem);
|
2019-05-13 17:56:20 +02:00
|
|
|
for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && is_compatible; i++) {
|
|
|
|
if (batch->verts[i] != geom->verts[i]) {
|
|
|
|
is_compatible = false;
|
2018-02-14 18:16:52 +01:00
|
|
|
}
|
|
|
|
}
|
2019-05-13 17:56:20 +02:00
|
|
|
|
|
|
|
if (!is_compatible) {
|
2020-08-10 01:43:50 +02:00
|
|
|
instancing_batch_references_remove(batch);
|
2019-05-13 17:56:20 +02:00
|
|
|
GPU_batch_clear(batch);
|
2020-08-10 01:43:50 +02:00
|
|
|
/* Save args and init later. */
|
2020-08-10 03:17:35 +02:00
|
|
|
batch->flag = GPU_BATCH_BUILDING;
|
2020-08-10 01:43:50 +02:00
|
|
|
handle->buf = buf;
|
|
|
|
handle->instancer = instancer;
|
|
|
|
handle->geom = geom;
|
2018-02-20 01:55:19 +01:00
|
|
|
}
|
2019-05-13 17:56:20 +02:00
|
|
|
return batch;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTE: Use only with buf allocated via DRW_temp_buffer_request. */
|
|
|
|
GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
|
|
|
|
GPUVertBuf *buf,
|
|
|
|
GPUPrimType prim_type)
|
|
|
|
{
|
2020-08-10 01:43:50 +02:00
|
|
|
GPUBatch **batch_ptr = BLI_memblock_alloc(idatalist->pool_batching);
|
2020-08-01 12:57:17 +02:00
|
|
|
if (*batch_ptr == NULL) {
|
2020-08-10 11:41:22 +02:00
|
|
|
*batch_ptr = GPU_batch_calloc();
|
2020-08-01 12:57:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
GPUBatch *batch = *batch_ptr;
|
2020-09-06 16:40:07 +02:00
|
|
|
bool is_compatible = (batch->verts[0] == buf) && (batch->prim_type == prim_type) &&
|
|
|
|
(GPU_vertbuf_get_status(buf) & GPU_VERTBUF_DATA_UPLOADED);
|
2019-05-13 17:56:20 +02:00
|
|
|
if (!is_compatible) {
|
|
|
|
GPU_batch_clear(batch);
|
|
|
|
GPU_batch_init(batch, prim_type, buf, NULL);
|
2018-02-14 18:16:52 +01:00
|
|
|
}
|
2019-05-13 17:56:20 +02:00
|
|
|
return batch;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void temp_buffer_handle_free(DRWTempBufferHandle *handle)
|
|
|
|
{
|
|
|
|
handle->format = NULL;
|
2020-08-10 01:43:50 +02:00
|
|
|
GPU_VERTBUF_DISCARD_SAFE(handle->buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void temp_instancing_handle_free(DRWTempInstancingHandle *handle)
|
|
|
|
{
|
|
|
|
instancing_batch_references_remove(handle->batch);
|
|
|
|
GPU_BATCH_DISCARD_SAFE(handle->batch);
|
2018-02-14 18:16:52 +01:00
|
|
|
}
|
|
|
|
|
2020-08-01 12:57:17 +02:00
|
|
|
static void temp_batch_free(GPUBatch **batch)
|
|
|
|
{
|
|
|
|
GPU_BATCH_DISCARD_SAFE(*batch);
|
|
|
|
}
|
|
|
|
|
2018-02-14 18:16:52 +01:00
|
|
|
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
|
|
|
|
{
|
2019-05-13 17:56:20 +02:00
|
|
|
/* Resize down buffers in use and send data to GPU. */
|
|
|
|
BLI_memblock_iter iter;
|
|
|
|
DRWTempBufferHandle *handle;
|
|
|
|
BLI_memblock_iternew(idatalist->pool_buffers, &iter);
|
|
|
|
while ((handle = BLI_memblock_iterstep(&iter))) {
|
|
|
|
if (handle->vert_len != NULL) {
|
|
|
|
uint vert_len = *(handle->vert_len);
|
|
|
|
uint target_buf_size = ((vert_len / DRW_BUFFER_VERTS_CHUNK) + 1) * DRW_BUFFER_VERTS_CHUNK;
|
2020-09-06 16:40:07 +02:00
|
|
|
if (target_buf_size < GPU_vertbuf_get_vertex_alloc(handle->buf)) {
|
2020-08-10 01:43:50 +02:00
|
|
|
GPU_vertbuf_data_resize(handle->buf, target_buf_size);
|
2018-02-20 01:55:19 +01:00
|
|
|
}
|
2020-08-10 01:43:50 +02:00
|
|
|
GPU_vertbuf_data_len_set(handle->buf, vert_len);
|
|
|
|
GPU_vertbuf_use(handle->buf); /* Send data. */
|
2018-02-20 01:55:19 +01:00
|
|
|
}
|
|
|
|
}
|
2019-05-13 17:56:20 +02:00
|
|
|
/* Finish pending instancing batches. */
|
2020-08-10 01:43:50 +02:00
|
|
|
DRWTempInstancingHandle *handle_inst;
|
2019-05-13 17:56:20 +02:00
|
|
|
BLI_memblock_iternew(idatalist->pool_instancing, &iter);
|
2020-08-10 01:43:50 +02:00
|
|
|
while ((handle_inst = BLI_memblock_iterstep(&iter))) {
|
|
|
|
GPUBatch *batch = handle_inst->batch;
|
2020-08-10 03:17:35 +02:00
|
|
|
if (batch && batch->flag == GPU_BATCH_BUILDING) {
|
2020-08-10 01:43:50 +02:00
|
|
|
GPUVertBuf *inst_buf = handle_inst->buf;
|
|
|
|
GPUBatch *inst_batch = handle_inst->instancer;
|
|
|
|
GPUBatch *geom = handle_inst->geom;
|
2019-05-13 17:56:20 +02:00
|
|
|
GPU_batch_copy(batch, geom);
|
2019-12-02 01:40:58 +01:00
|
|
|
if (inst_batch != NULL) {
|
|
|
|
for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && inst_batch->verts[i]; i++) {
|
|
|
|
GPU_batch_instbuf_add_ex(batch, inst_batch->verts[i], false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
GPU_batch_instbuf_add_ex(batch, inst_buf, false);
|
|
|
|
}
|
2020-08-10 01:43:50 +02:00
|
|
|
/* Add reference to avoid comparing pointers (in DRW_temp_batch_request) that could
|
|
|
|
* potentially be the same. This will delay the freeing of the GPUVertBuf itself. */
|
|
|
|
instancing_batch_references_add(batch);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2018-02-14 18:16:52 +01:00
|
|
|
}
|
2019-05-13 17:56:20 +02:00
|
|
|
/* Resize pools and free unused. */
|
|
|
|
BLI_memblock_clear(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
|
2020-08-10 01:43:50 +02:00
|
|
|
BLI_memblock_clear(idatalist->pool_instancing, (MemblockValFreeFP)temp_instancing_handle_free);
|
2020-08-01 12:57:17 +02:00
|
|
|
BLI_memblock_clear(idatalist->pool_batching, (MemblockValFreeFP)temp_batch_free);
|
2018-02-14 18:16:52 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
2018-01-09 14:09:14 +01:00
|
|
|
/** \name Instance Data (DRWInstanceData)
|
|
|
|
* \{ */
|
|
|
|
|
2019-01-29 07:46:25 +11:00
|
|
|
static DRWInstanceData *drw_instance_data_create(DRWInstanceDataList *idatalist, uint attr_size)
|
2018-01-09 14:09:14 +01:00
|
|
|
{
|
2018-02-20 01:55:19 +01:00
|
|
|
DRWInstanceData *idata = MEM_callocN(sizeof(DRWInstanceData), "DRWInstanceData");
|
2018-01-09 14:09:14 +01:00
|
|
|
idata->next = NULL;
|
|
|
|
idata->used = true;
|
2019-01-29 07:46:25 +11:00
|
|
|
idata->data_size = attr_size;
|
2018-05-30 12:19:20 +02:00
|
|
|
idata->mempool = BLI_mempool_create(sizeof(float) * idata->data_size, 0, 16, 0);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-29 07:46:25 +11:00
|
|
|
BLI_assert(attr_size > 0);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-09 14:09:14 +01:00
|
|
|
/* Push to linked list. */
|
2019-01-29 07:46:25 +11:00
|
|
|
if (idatalist->idata_head[attr_size - 1] == NULL) {
|
|
|
|
idatalist->idata_head[attr_size - 1] = idata;
|
2018-01-09 14:09:14 +01:00
|
|
|
}
|
|
|
|
else {
|
2019-01-29 07:46:25 +11:00
|
|
|
idatalist->idata_tail[attr_size - 1]->next = idata;
|
2018-01-09 14:09:14 +01:00
|
|
|
}
|
2019-01-29 07:46:25 +11:00
|
|
|
idatalist->idata_tail[attr_size - 1] = idata;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-09 14:09:14 +01:00
|
|
|
return idata;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void DRW_instance_data_free(DRWInstanceData *idata)
|
|
|
|
{
|
2018-05-30 12:19:20 +02:00
|
|
|
BLI_mempool_destroy(idata->mempool);
|
2018-01-09 14:09:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return a pointer to the next instance data space.
|
2019-03-19 15:17:46 +11:00
|
|
|
*/
|
2018-01-09 14:09:14 +01:00
|
|
|
void *DRW_instance_data_next(DRWInstanceData *idata)
|
|
|
|
{
|
2018-05-30 12:19:20 +02:00
|
|
|
return BLI_mempool_alloc(idata->mempool);
|
2018-01-09 14:09:14 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 07:46:25 +11:00
|
|
|
DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size)
|
2018-01-09 14:09:14 +01:00
|
|
|
{
|
2019-01-29 07:46:25 +11:00
|
|
|
BLI_assert(attr_size > 0 && attr_size <= MAX_INSTANCE_DATA_SIZE);
|
2018-01-09 14:09:14 +01:00
|
|
|
|
2019-01-29 07:46:25 +11:00
|
|
|
DRWInstanceData *idata = idatalist->idata_head[attr_size - 1];
|
2018-01-09 14:09:14 +01:00
|
|
|
|
|
|
|
/* Search for an unused data chunk. */
|
|
|
|
for (; idata; idata = idata->next) {
|
|
|
|
if (idata->used == false) {
|
|
|
|
idata->used = true;
|
|
|
|
return idata;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-29 07:46:25 +11:00
|
|
|
return drw_instance_data_create(idatalist, attr_size);
|
2018-01-09 14:09:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Instance Data List (DRWInstanceDataList)
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
DRWInstanceDataList *DRW_instance_data_list_create(void)
|
|
|
|
{
|
2018-02-14 18:16:52 +01:00
|
|
|
DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
|
2019-05-13 17:56:20 +02:00
|
|
|
|
2020-08-01 12:57:17 +02:00
|
|
|
idatalist->pool_batching = BLI_memblock_create(sizeof(GPUBatch *));
|
2020-08-10 01:43:50 +02:00
|
|
|
idatalist->pool_instancing = BLI_memblock_create(sizeof(DRWTempInstancingHandle));
|
2019-05-21 00:54:03 +02:00
|
|
|
idatalist->pool_buffers = BLI_memblock_create(sizeof(DRWTempBufferHandle));
|
2018-02-20 01:55:19 +01:00
|
|
|
|
|
|
|
BLI_addtail(&g_idatalists, idatalist);
|
2018-02-14 18:16:52 +01:00
|
|
|
|
|
|
|
return idatalist;
|
2018-01-09 14:09:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
|
|
|
|
{
|
|
|
|
DRWInstanceData *idata, *next_idata;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
|
2018-01-09 14:09:14 +01:00
|
|
|
for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
|
|
|
|
next_idata = idata->next;
|
|
|
|
DRW_instance_data_free(idata);
|
|
|
|
MEM_freeN(idata);
|
|
|
|
}
|
|
|
|
idatalist->idata_head[i] = NULL;
|
|
|
|
idatalist->idata_tail[i] = NULL;
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-13 17:56:20 +02:00
|
|
|
BLI_memblock_destroy(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
|
2020-08-10 01:43:50 +02:00
|
|
|
BLI_memblock_destroy(idatalist->pool_instancing, (MemblockValFreeFP)temp_instancing_handle_free);
|
2020-08-01 12:57:17 +02:00
|
|
|
BLI_memblock_destroy(idatalist->pool_batching, (MemblockValFreeFP)temp_batch_free);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-02-20 01:55:19 +01:00
|
|
|
BLI_remlink(&g_idatalists, idatalist);
|
2018-01-09 14:09:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist)
|
|
|
|
{
|
|
|
|
DRWInstanceData *idata;
|
|
|
|
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
|
2018-01-09 14:09:14 +01:00
|
|
|
for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
|
|
|
|
idata->used = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist)
|
|
|
|
{
|
|
|
|
DRWInstanceData *idata, *next_idata;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-01-09 14:09:14 +01:00
|
|
|
/* Remove unused data blocks and sanitize each list. */
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
|
2018-01-09 14:09:14 +01:00
|
|
|
idatalist->idata_tail[i] = NULL;
|
|
|
|
for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
|
|
|
|
next_idata = idata->next;
|
|
|
|
if (idata->used == false) {
|
|
|
|
if (idatalist->idata_head[i] == idata) {
|
|
|
|
idatalist->idata_head[i] = next_idata;
|
|
|
|
}
|
|
|
|
else {
|
2018-09-19 18:19:49 +02:00
|
|
|
/* idatalist->idata_tail[i] is guaranteed not to be null in this case. */
|
2018-01-09 14:09:14 +01:00
|
|
|
idatalist->idata_tail[i]->next = next_idata;
|
|
|
|
}
|
|
|
|
DRW_instance_data_free(idata);
|
|
|
|
MEM_freeN(idata);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (idatalist->idata_tail[i] != NULL) {
|
|
|
|
idatalist->idata_tail[i]->next = idata;
|
|
|
|
}
|
|
|
|
idatalist->idata_tail[i] = idata;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist)
|
|
|
|
{
|
|
|
|
DRWInstanceData *idata;
|
|
|
|
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; i++) {
|
2018-01-09 14:09:14 +01:00
|
|
|
for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
|
2018-05-30 12:19:20 +02:00
|
|
|
BLI_mempool_clear_ex(idata->mempool, BLI_mempool_len(idata->mempool));
|
2018-01-09 14:09:14 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** \} */
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Sparse Uniform Buffer
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
#define CHUNK_LIST_STEP (1 << 4)
|
|
|
|
|
|
|
|
/** A chunked UBO manager that doesn't actually allocate unneeded chunks. */
|
|
|
|
typedef struct DRWSparseUniformBuf {
|
|
|
|
/* Memory buffers used to stage chunk data before transfer to UBOs. */
|
|
|
|
char **chunk_buffers;
|
|
|
|
/* Uniform buffer objects with flushed data. */
|
|
|
|
struct GPUUniformBuf **chunk_ubos;
|
|
|
|
/* True if the relevant chunk contains data (distinct from simply being allocated). */
|
|
|
|
BLI_bitmap *chunk_used;
|
|
|
|
|
|
|
|
int num_chunks;
|
|
|
|
unsigned int item_size, chunk_size, chunk_bytes;
|
|
|
|
} DRWSparseUniformBuf;
|
|
|
|
|
|
|
|
static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer,
|
|
|
|
unsigned int item_size,
|
|
|
|
unsigned int chunk_size)
|
|
|
|
{
|
|
|
|
buffer->chunk_buffers = NULL;
|
|
|
|
buffer->chunk_used = NULL;
|
|
|
|
buffer->chunk_ubos = NULL;
|
|
|
|
buffer->num_chunks = 0;
|
|
|
|
buffer->item_size = item_size;
|
|
|
|
buffer->chunk_size = chunk_size;
|
|
|
|
buffer->chunk_bytes = item_size * chunk_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Allocate a chunked UBO with the specified item and chunk size. */
|
|
|
|
DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size, unsigned int chunk_size)
|
|
|
|
{
|
|
|
|
DRWSparseUniformBuf *buf = MEM_mallocN(sizeof(DRWSparseUniformBuf), __func__);
|
|
|
|
drw_sparse_uniform_buffer_init(buf, item_size, chunk_size);
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Flush data from ordinary memory to UBOs. */
|
|
|
|
void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer)
|
|
|
|
{
|
|
|
|
for (int i = 0; i < buffer->num_chunks; i++) {
|
|
|
|
if (BLI_BITMAP_TEST(buffer->chunk_used, i)) {
|
|
|
|
if (buffer->chunk_ubos[i] == NULL) {
|
|
|
|
buffer->chunk_ubos[i] = GPU_uniformbuf_create(buffer->chunk_bytes);
|
|
|
|
}
|
|
|
|
GPU_uniformbuf_update(buffer->chunk_ubos[i], buffer->chunk_buffers[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Clean all buffers and free unused ones. */
|
|
|
|
void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all)
|
|
|
|
{
|
|
|
|
int max_used_chunk = 0;
|
|
|
|
|
|
|
|
for (int i = 0; i < buffer->num_chunks; i++) {
|
|
|
|
/* Delete buffers that were not used since the last clear call. */
|
|
|
|
if (free_all || !BLI_BITMAP_TEST(buffer->chunk_used, i)) {
|
|
|
|
MEM_SAFE_FREE(buffer->chunk_buffers[i]);
|
|
|
|
|
|
|
|
if (buffer->chunk_ubos[i]) {
|
|
|
|
GPU_uniformbuf_free(buffer->chunk_ubos[i]);
|
|
|
|
buffer->chunk_ubos[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
max_used_chunk = i + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shrink the chunk array if appropriate. */
|
|
|
|
const int old_num_chunks = buffer->num_chunks;
|
|
|
|
|
|
|
|
buffer->num_chunks = (max_used_chunk + CHUNK_LIST_STEP - 1) & ~(CHUNK_LIST_STEP - 1);
|
|
|
|
|
|
|
|
if (buffer->num_chunks == 0) {
|
|
|
|
/* Ensure that an empty pool holds no memory allocations. */
|
|
|
|
MEM_SAFE_FREE(buffer->chunk_buffers);
|
|
|
|
MEM_SAFE_FREE(buffer->chunk_used);
|
|
|
|
MEM_SAFE_FREE(buffer->chunk_ubos);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (buffer->num_chunks != old_num_chunks) {
|
|
|
|
buffer->chunk_buffers = MEM_recallocN(buffer->chunk_buffers,
|
|
|
|
buffer->num_chunks * sizeof(void *));
|
|
|
|
buffer->chunk_ubos = MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *));
|
|
|
|
BLI_BITMAP_RESIZE(buffer->chunk_used, buffer->num_chunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
BLI_bitmap_set_all(buffer->chunk_used, false, buffer->num_chunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Frees the buffer. */
|
|
|
|
void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer)
|
|
|
|
{
|
|
|
|
DRW_sparse_uniform_buffer_clear(buffer, true);
|
|
|
|
MEM_freeN(buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Checks if the buffer contains any allocated chunks. */
|
|
|
|
bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer)
|
|
|
|
{
|
|
|
|
return buffer->num_chunks == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static GPUUniformBuf *drw_sparse_uniform_buffer_get_ubo(DRWSparseUniformBuf *buffer, int chunk)
|
|
|
|
{
|
|
|
|
if (buffer && chunk < buffer->num_chunks && BLI_BITMAP_TEST(buffer->chunk_used, chunk)) {
|
|
|
|
return buffer->chunk_ubos[chunk];
|
|
|
|
}
|
2020-11-03 15:12:50 -06:00
|
|
|
return NULL;
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Bind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty. */
|
|
|
|
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
|
|
|
|
{
|
|
|
|
GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
|
|
|
|
if (ubo) {
|
|
|
|
GPU_uniformbuf_bind(ubo, location);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Unbind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty. */
|
|
|
|
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
|
|
|
|
{
|
|
|
|
GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
|
|
|
|
if (ubo) {
|
|
|
|
GPU_uniformbuf_unbind(ubo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns a pointer to the given item of the given chunk, allocating memory if necessary. */
|
2020-11-03 15:26:23 -06:00
|
|
|
void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chunk, int item)
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
{
|
2020-11-03 15:26:23 -06:00
|
|
|
if (chunk >= buffer->num_chunks) {
|
|
|
|
buffer->num_chunks = (chunk + CHUNK_LIST_STEP) & ~(CHUNK_LIST_STEP - 1);
|
|
|
|
buffer->chunk_buffers = MEM_recallocN(buffer->chunk_buffers,
|
|
|
|
buffer->num_chunks * sizeof(void *));
|
|
|
|
buffer->chunk_ubos = MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *));
|
|
|
|
BLI_BITMAP_RESIZE(buffer->chunk_used, buffer->num_chunks);
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
}
|
|
|
|
|
2020-11-03 15:26:23 -06:00
|
|
|
char *chunk_buffer = buffer->chunk_buffers[chunk];
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
|
2020-11-03 15:26:23 -06:00
|
|
|
if (chunk_buffer == NULL) {
|
|
|
|
buffer->chunk_buffers[chunk] = chunk_buffer = MEM_callocN(buffer->chunk_bytes, __func__);
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
}
|
2020-11-03 15:26:23 -06:00
|
|
|
else if (!BLI_BITMAP_TEST(buffer->chunk_used, chunk)) {
|
|
|
|
memset(chunk_buffer, 0, buffer->chunk_bytes);
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
}
|
|
|
|
|
2020-11-03 15:26:23 -06:00
|
|
|
BLI_BITMAP_ENABLE(buffer->chunk_used, chunk);
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
|
2020-11-03 15:26:23 -06:00
|
|
|
return chunk_buffer + buffer->item_size * item;
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Uniform Attribute Buffers
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
/** Sparse UBO buffer for a specific uniform attribute list. */
|
|
|
|
typedef struct DRWUniformAttrBuf {
|
|
|
|
/* Attribute list (also used as hash table key) handled by this buffer. */
|
|
|
|
GPUUniformAttrList key;
|
|
|
|
/* Sparse UBO buffer containing the attribute values. */
|
|
|
|
DRWSparseUniformBuf ubos;
|
|
|
|
/* Last handle used to update the buffer, checked for avoiding redundant updates. */
|
|
|
|
DRWResourceHandle last_handle;
|
|
|
|
/* Linked list pointer used for freeing the empty unneeded buffers. */
|
|
|
|
struct DRWUniformAttrBuf *next_empty;
|
|
|
|
} DRWUniformAttrBuf;
|
|
|
|
|
|
|
|
static DRWUniformAttrBuf *drw_uniform_attrs_pool_ensure(GHash *table, GPUUniformAttrList *key)
|
|
|
|
{
|
|
|
|
void **pkey, **pval;
|
|
|
|
|
|
|
|
if (!BLI_ghash_ensure_p_ex(table, key, &pkey, &pval)) {
|
|
|
|
DRWUniformAttrBuf *buffer = MEM_callocN(sizeof(*buffer), __func__);
|
|
|
|
|
|
|
|
*pkey = &buffer->key;
|
|
|
|
*pval = buffer;
|
|
|
|
|
|
|
|
GPU_uniform_attr_list_copy(&buffer->key, key);
|
|
|
|
drw_sparse_uniform_buffer_init(
|
|
|
|
&buffer->ubos, key->count * sizeof(float[4]), DRW_RESOURCE_CHUNK_LEN);
|
|
|
|
|
|
|
|
buffer->last_handle = (DRWResourceHandle)-1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (DRWUniformAttrBuf *)*pval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function mirrors lookup_property in cycles/blender/blender_object.cpp */
|
|
|
|
static bool drw_uniform_property_lookup(ID *id, const char *name, float r_data[4])
|
|
|
|
{
|
|
|
|
PointerRNA ptr, id_ptr;
|
|
|
|
PropertyRNA *prop;
|
|
|
|
|
|
|
|
if (!id) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
RNA_id_pointer_create(id, &id_ptr);
|
|
|
|
|
|
|
|
if (!RNA_path_resolve(&id_ptr, name, &ptr, &prop)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-12-23 15:45:55 +01:00
|
|
|
if (prop == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
PropertyType type = RNA_property_type(prop);
|
|
|
|
int arraylen = RNA_property_array_length(&ptr, prop);
|
|
|
|
|
|
|
|
if (arraylen == 0) {
|
|
|
|
float value;
|
|
|
|
|
2020-11-03 15:12:50 -06:00
|
|
|
if (type == PROP_FLOAT) {
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
value = RNA_property_float_get(&ptr, prop);
|
2020-11-03 15:12:50 -06:00
|
|
|
}
|
|
|
|
else if (type == PROP_INT) {
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
value = RNA_property_int_get(&ptr, prop);
|
2020-11-03 15:12:50 -06:00
|
|
|
}
|
|
|
|
else {
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
return false;
|
2020-11-03 15:12:50 -06:00
|
|
|
}
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
|
|
|
|
copy_v4_fl4(r_data, value, value, value, 1);
|
|
|
|
return true;
|
|
|
|
}
|
2020-11-03 15:12:50 -06:00
|
|
|
|
2020-11-03 15:26:23 -06:00
|
|
|
if (type == PROP_FLOAT && arraylen <= 4) {
|
|
|
|
copy_v4_fl4(r_data, 0, 0, 0, 1);
|
|
|
|
RNA_property_float_get_array(&ptr, prop, r_data);
|
|
|
|
return true;
|
|
|
|
}
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function mirrors lookup_instance_property in cycles/blender/blender_object.cpp */
|
|
|
|
static void drw_uniform_attribute_lookup(GPUUniformAttr *attr,
|
|
|
|
Object *ob,
|
|
|
|
Object *dupli_parent,
|
|
|
|
DupliObject *dupli_source,
|
|
|
|
float r_data[4])
|
|
|
|
{
|
|
|
|
copy_v4_fl(r_data, 0);
|
2020-12-14 18:44:04 +11:00
|
|
|
|
|
|
|
char idprop_name[(sizeof(attr->name) * 2) + 4];
|
|
|
|
{
|
|
|
|
char attr_name_esc[sizeof(attr->name) * 2];
|
|
|
|
BLI_str_escape(attr_name_esc, attr->name, sizeof(attr_name_esc));
|
|
|
|
SNPRINTF(idprop_name, "[\"%s\"]", attr_name_esc);
|
|
|
|
}
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
|
|
|
|
/* If requesting instance data, check the parent particle system and object. */
|
|
|
|
if (attr->use_dupli) {
|
|
|
|
if (dupli_source && dupli_source->particle_system) {
|
|
|
|
ParticleSettings *settings = dupli_source->particle_system->part;
|
|
|
|
if (drw_uniform_property_lookup((ID *)settings, idprop_name, r_data) ||
|
|
|
|
drw_uniform_property_lookup((ID *)settings, attr->name, r_data)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (drw_uniform_property_lookup((ID *)dupli_parent, idprop_name, r_data) ||
|
|
|
|
drw_uniform_property_lookup((ID *)dupli_parent, attr->name, r_data)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the object and mesh. */
|
|
|
|
if (ob) {
|
|
|
|
if (drw_uniform_property_lookup((ID *)ob, idprop_name, r_data) ||
|
|
|
|
drw_uniform_property_lookup((ID *)ob, attr->name, r_data) ||
|
|
|
|
drw_uniform_property_lookup((ID *)ob->data, idprop_name, r_data) ||
|
|
|
|
drw_uniform_property_lookup((ID *)ob->data, attr->name, r_data)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void drw_uniform_attrs_pool_update(GHash *table,
|
|
|
|
GPUUniformAttrList *key,
|
|
|
|
DRWResourceHandle *handle,
|
|
|
|
Object *ob,
|
|
|
|
Object *dupli_parent,
|
|
|
|
DupliObject *dupli_source)
|
|
|
|
{
|
|
|
|
DRWUniformAttrBuf *buffer = drw_uniform_attrs_pool_ensure(table, key);
|
|
|
|
|
|
|
|
if (buffer->last_handle != *handle) {
|
|
|
|
buffer->last_handle = *handle;
|
|
|
|
|
|
|
|
int chunk = DRW_handle_chunk_get(handle);
|
|
|
|
int item = DRW_handle_id_get(handle);
|
|
|
|
float(*values)[4] = DRW_sparse_uniform_buffer_ensure_item(&buffer->ubos, chunk, item);
|
|
|
|
|
|
|
|
LISTBASE_FOREACH (GPUUniformAttr *, attr, &buffer->key.list) {
|
|
|
|
drw_uniform_attribute_lookup(attr, ob, dupli_parent, dupli_source, *values++);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DRWSparseUniformBuf *DRW_uniform_attrs_pool_find_ubo(GHash *table, struct GPUUniformAttrList *key)
|
|
|
|
{
|
|
|
|
DRWUniformAttrBuf *buffer = BLI_ghash_lookup(table, key);
|
|
|
|
return buffer ? &buffer->ubos : NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
GHash *DRW_uniform_attrs_pool_new()
|
|
|
|
{
|
|
|
|
return GPU_uniform_attr_list_hash_new("obattr_hash");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_uniform_attrs_pool_flush_all(GHash *table)
|
|
|
|
{
|
|
|
|
GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
|
|
|
|
DRW_sparse_uniform_buffer_flush(&buffer->ubos);
|
|
|
|
}
|
|
|
|
GHASH_FOREACH_END();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void drw_uniform_attrs_pool_free_cb(void *ptr)
|
|
|
|
{
|
|
|
|
DRWUniformAttrBuf *buffer = ptr;
|
|
|
|
|
|
|
|
GPU_uniform_attr_list_free(&buffer->key);
|
|
|
|
DRW_sparse_uniform_buffer_clear(&buffer->ubos, true);
|
|
|
|
MEM_freeN(buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_uniform_attrs_pool_clear_all(GHash *table)
|
|
|
|
{
|
|
|
|
DRWUniformAttrBuf *remove_list = NULL;
|
|
|
|
|
|
|
|
GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
|
|
|
|
buffer->last_handle = (DRWResourceHandle)-1;
|
|
|
|
DRW_sparse_uniform_buffer_clear(&buffer->ubos, false);
|
|
|
|
|
|
|
|
if (DRW_sparse_uniform_buffer_is_empty(&buffer->ubos)) {
|
|
|
|
buffer->next_empty = remove_list;
|
|
|
|
remove_list = buffer;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
GHASH_FOREACH_END();
|
|
|
|
|
|
|
|
while (remove_list) {
|
|
|
|
DRWUniformAttrBuf *buffer = remove_list;
|
|
|
|
remove_list = buffer->next_empty;
|
|
|
|
BLI_ghash_remove(table, &buffer->key, NULL, drw_uniform_attrs_pool_free_cb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_uniform_attrs_pool_free(GHash *table)
|
|
|
|
{
|
|
|
|
BLI_ghash_free(table, NULL, drw_uniform_attrs_pool_free_cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** \} */
|