2018-02-28 01:16:23 +01:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*
|
2019-01-23 11:29:18 +11:00
|
|
|
* Copyright 2016, Blender Foundation.
|
2018-02-28 01:16:23 +01:00
|
|
|
*/
|
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
* \ingroup draw
|
2018-02-28 01:16:23 +01:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "draw_manager.h"
|
|
|
|
|
|
|
|
#include "BKE_curve.h"
|
2020-04-03 11:35:04 +02:00
|
|
|
#include "BKE_duplilist.h"
|
2018-02-28 01:16:23 +01:00
|
|
|
#include "BKE_global.h"
|
Add support for tiled images and the UDIM naming scheme
This patch contains the work that I did during my week at the Code Quest - adding support for tiled images to Blender.
With this patch, images now contain a list of tiles. By default, this just contains one tile, but if the source type is set to Tiled, the user can add additional tiles. When acquiring an ImBuf, the tile to be loaded is specified in the ImageUser.
Therefore, code that is not yet aware of tiles will just access the default tile as usual.
The filenames of the additional tiles are derived from the original filename according to the UDIM naming scheme - the filename contains an index that is calculated as (1001 + 10*<y coordinate of the tile> + <x coordinate of the tile>), where the x coordinate never goes above 9.
Internally, the various tiles are stored in a cache just like sequences. When acquired for the first time, the code will try to load the corresponding file from disk. Alternatively, a new operator can be used to initialize the tile similar to the New Image operator.
The following features are supported so far:
- Automatic detection and loading of all tiles when opening the first tile (1001)
- Saving all tiles
- Adding and removing tiles
- Filling tiles with generated images
- Drawing all tiles in the Image Editor
- Viewing a tiled grid even if no image is selected
- Rendering tiled images in Eevee
- Rendering tiled images in Cycles (in SVM mode)
- Automatically skipping loading of unused tiles in Cycles
- 2D texture painting (also across tiles)
- 3D texture painting (also across tiles, only limitation: individual faces can not cross tile borders)
- Assigning custom labels to individual tiles (drawn in the Image Editor instead of the ID)
- Different resolutions between tiles
There still are some missing features that will be added later (see T72390):
- Workbench engine support
- Packing/Unpacking support
- Baking support
- Cycles OSL support
- many other Blender features that rely on images
Thanks to Brecht for the review and to all who tested the intermediate versions!
Differential Revision: https://developer.blender.org/D3509
2019-12-12 16:06:08 +01:00
|
|
|
#include "BKE_image.h"
|
2018-02-28 01:16:23 +01:00
|
|
|
#include "BKE_mesh.h"
|
2018-03-01 03:52:54 +01:00
|
|
|
#include "BKE_object.h"
|
2018-02-28 01:16:23 +01:00
|
|
|
#include "BKE_paint.h"
|
|
|
|
#include "BKE_pbvh.h"
|
|
|
|
|
|
|
|
#include "DNA_curve_types.h"
|
|
|
|
#include "DNA_mesh_types.h"
|
|
|
|
#include "DNA_meta_types.h"
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
#include "BLI_alloca.h"
|
2018-10-09 17:32:13 +02:00
|
|
|
#include "BLI_hash.h"
|
2018-02-28 01:16:23 +01:00
|
|
|
#include "BLI_link_utils.h"
|
2020-04-03 17:38:58 +02:00
|
|
|
#include "BLI_listbase.h"
|
2019-05-07 18:01:14 +02:00
|
|
|
#include "BLI_memblock.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "BLI_mempool.h"
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
#ifdef DRW_DEBUG_CULLING
|
|
|
|
# include "BLI_math_bits.h"
|
|
|
|
#endif
|
|
|
|
|
2019-05-04 01:39:35 +02:00
|
|
|
#include "GPU_buffers.h"
|
2021-05-28 08:16:26 +02:00
|
|
|
#include "GPU_capabilities.h"
|
2020-02-12 12:48:44 +01:00
|
|
|
#include "GPU_material.h"
|
2020-08-20 23:09:37 +02:00
|
|
|
#include "GPU_uniform_buffer.h"
|
2019-05-04 01:39:35 +02:00
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
#include "intern/gpu_codegen.h"
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Uniform Buffer Object (DRW_uniformbuffer)
|
|
|
|
* \{ */
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static void draw_call_sort(DRWCommand *array, DRWCommand *array_tmp, int array_len)
|
|
|
|
{
|
|
|
|
/* Count unique batches. Tt's not really important if
|
2019-09-19 13:18:52 +10:00
|
|
|
* there is collisions. If there is a lot of different batches,
|
|
|
|
* the sorting benefit will be negligible.
|
|
|
|
* So at least sort fast! */
|
2019-05-31 01:45:41 +02:00
|
|
|
uchar idx[128] = {0};
|
|
|
|
/* Shift by 6 positions knowing each GPUBatch is > 64 bytes */
|
|
|
|
#define KEY(a) ((((size_t)((a).draw.batch)) >> 6) % ARRAY_SIZE(idx))
|
|
|
|
BLI_assert(array_len <= ARRAY_SIZE(idx));
|
|
|
|
|
|
|
|
for (int i = 0; i < array_len; i++) {
|
|
|
|
/* Early out if nothing to sort. */
|
2019-09-23 09:36:12 +10:00
|
|
|
if (++idx[KEY(array[i])] == array_len) {
|
2019-05-31 01:45:41 +02:00
|
|
|
return;
|
2019-09-23 09:36:12 +10:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
/* Cumulate batch indices */
|
|
|
|
for (int i = 1; i < ARRAY_SIZE(idx); i++) {
|
|
|
|
idx[i] += idx[i - 1];
|
|
|
|
}
|
2020-07-01 13:12:24 +10:00
|
|
|
/* Traverse in reverse to not change the order of the resource ID's. */
|
2019-05-31 01:45:41 +02:00
|
|
|
for (int src = array_len - 1; src >= 0; src--) {
|
|
|
|
array_tmp[--idx[KEY(array[src])]] = array[src];
|
|
|
|
}
|
|
|
|
#undef KEY
|
|
|
|
|
|
|
|
memcpy(array, array_tmp, sizeof(*array) * array_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
void drw_resource_buffer_finish(ViewportMemoryPool *vmempool)
|
|
|
|
{
|
|
|
|
int chunk_id = DRW_handle_chunk_get(&DST.resource_handle);
|
|
|
|
int elem_id = DRW_handle_id_get(&DST.resource_handle);
|
|
|
|
int ubo_len = 1 + chunk_id - ((elem_id == 0) ? 1 : 0);
|
2020-08-20 23:09:37 +02:00
|
|
|
size_t list_size = sizeof(GPUUniformBuf *) * ubo_len;
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
/* TODO find a better system. currently a lot of obinfos UBO are going to be unused
|
|
|
|
* if not rendering with Eevee. */
|
|
|
|
|
|
|
|
if (vmempool->matrices_ubo == NULL) {
|
|
|
|
vmempool->matrices_ubo = MEM_callocN(list_size, __func__);
|
|
|
|
vmempool->obinfos_ubo = MEM_callocN(list_size, __func__);
|
|
|
|
vmempool->ubo_len = ubo_len;
|
|
|
|
}
|
|
|
|
|
2020-07-01 13:12:24 +10:00
|
|
|
/* Remove unnecessary buffers */
|
2019-05-31 01:45:41 +02:00
|
|
|
for (int i = ubo_len; i < vmempool->ubo_len; i++) {
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_free(vmempool->matrices_ubo[i]);
|
|
|
|
GPU_uniformbuf_free(vmempool->obinfos_ubo[i]);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ubo_len != vmempool->ubo_len) {
|
|
|
|
vmempool->matrices_ubo = MEM_recallocN(vmempool->matrices_ubo, list_size);
|
|
|
|
vmempool->obinfos_ubo = MEM_recallocN(vmempool->obinfos_ubo, list_size);
|
|
|
|
vmempool->ubo_len = ubo_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create/Update buffers. */
|
|
|
|
for (int i = 0; i < ubo_len; i++) {
|
|
|
|
void *data_obmat = BLI_memblock_elem_get(vmempool->obmats, i, 0);
|
|
|
|
void *data_infos = BLI_memblock_elem_get(vmempool->obinfos, i, 0);
|
|
|
|
if (vmempool->matrices_ubo[i] == NULL) {
|
2020-08-20 23:09:37 +02:00
|
|
|
vmempool->matrices_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectMatrix) *
|
|
|
|
DRW_RESOURCE_CHUNK_LEN);
|
|
|
|
vmempool->obinfos_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectInfos) *
|
|
|
|
DRW_RESOURCE_CHUNK_LEN);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2020-08-20 23:09:37 +02:00
|
|
|
GPU_uniformbuf_update(vmempool->matrices_ubo[i], data_obmat);
|
|
|
|
GPU_uniformbuf_update(vmempool->obinfos_ubo[i], data_infos);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
DRW_uniform_attrs_pool_flush_all(vmempool->obattrs_ubo_pool);
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Aligned alloc to avoid unaligned memcpy. */
|
|
|
|
DRWCommandChunk *chunk_tmp = MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, "tmp call chunk");
|
|
|
|
DRWCommandChunk *chunk;
|
|
|
|
BLI_memblock_iter iter;
|
|
|
|
BLI_memblock_iternew(vmempool->commands, &iter);
|
|
|
|
while ((chunk = BLI_memblock_iterstep(&iter))) {
|
|
|
|
bool sortable = true;
|
2020-07-01 13:12:24 +10:00
|
|
|
/* We can only sort chunks that contain #DRWCommandDraw only. */
|
2019-05-31 01:45:41 +02:00
|
|
|
for (int i = 0; i < ARRAY_SIZE(chunk->command_type) && sortable; i++) {
|
|
|
|
if (chunk->command_type[i] != 0) {
|
|
|
|
sortable = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sortable) {
|
|
|
|
draw_call_sort(chunk->commands, chunk_tmp->commands, chunk->command_used);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MEM_freeN(chunk_tmp);
|
|
|
|
}
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Uniforms (DRW_shgroup_uniform)
|
|
|
|
* \{ */
|
|
|
|
|
2020-06-03 13:03:36 +02:00
|
|
|
static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
|
|
|
|
int loc,
|
|
|
|
DRWUniformType type,
|
|
|
|
const void *value,
|
|
|
|
eGPUSamplerState sampler_state,
|
|
|
|
int length,
|
|
|
|
int arraysize)
|
|
|
|
{
|
|
|
|
if (loc == -1) {
|
2020-07-01 13:12:24 +10:00
|
|
|
/* Nice to enable eventually, for now EEVEE uses uniforms that might not exist. */
|
2020-06-03 13:03:36 +02:00
|
|
|
// BLI_assert(0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWUniformChunk *unichunk = shgroup->uniforms;
|
|
|
|
/* Happens on first uniform or if chunk is full. */
|
|
|
|
if (!unichunk || unichunk->uniform_used == unichunk->uniform_len) {
|
|
|
|
unichunk = BLI_memblock_alloc(DST.vmempool->uniforms);
|
|
|
|
unichunk->uniform_len = ARRAY_SIZE(shgroup->uniforms->uniforms);
|
|
|
|
unichunk->uniform_used = 0;
|
|
|
|
BLI_LINKS_PREPEND(shgroup->uniforms, unichunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used++;
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
uni->location = loc;
|
2018-03-09 19:52:37 +01:00
|
|
|
uni->type = type;
|
2018-02-28 01:16:23 +01:00
|
|
|
uni->length = length;
|
|
|
|
uni->arraysize = arraysize;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-05-11 19:00:08 +02:00
|
|
|
switch (type) {
|
|
|
|
case DRW_UNIFORM_INT_COPY:
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_assert(length <= 4);
|
2019-05-13 19:20:27 +02:00
|
|
|
memcpy(uni->ivalue, value, sizeof(int) * length);
|
2018-05-11 19:00:08 +02:00
|
|
|
break;
|
|
|
|
case DRW_UNIFORM_FLOAT_COPY:
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_assert(length <= 4);
|
2019-05-13 19:20:27 +02:00
|
|
|
memcpy(uni->fvalue, value, sizeof(float) * length);
|
2018-05-11 19:00:08 +02:00
|
|
|
break;
|
2020-06-02 18:14:28 +02:00
|
|
|
case DRW_UNIFORM_BLOCK:
|
2020-08-20 23:09:37 +02:00
|
|
|
uni->block = (GPUUniformBuf *)value;
|
2020-06-02 18:14:28 +02:00
|
|
|
break;
|
|
|
|
case DRW_UNIFORM_BLOCK_REF:
|
2020-08-20 23:09:37 +02:00
|
|
|
uni->block_ref = (GPUUniformBuf **)value;
|
2020-06-02 18:14:28 +02:00
|
|
|
break;
|
2020-09-12 06:10:11 +02:00
|
|
|
case DRW_UNIFORM_IMAGE:
|
2020-06-02 18:14:28 +02:00
|
|
|
case DRW_UNIFORM_TEXTURE:
|
|
|
|
uni->texture = (GPUTexture *)value;
|
2020-06-03 13:03:36 +02:00
|
|
|
uni->sampler_state = sampler_state;
|
2020-06-02 18:14:28 +02:00
|
|
|
break;
|
2020-09-12 06:10:11 +02:00
|
|
|
case DRW_UNIFORM_IMAGE_REF:
|
2020-06-02 18:14:28 +02:00
|
|
|
case DRW_UNIFORM_TEXTURE_REF:
|
|
|
|
uni->texture_ref = (GPUTexture **)value;
|
2020-06-03 13:03:36 +02:00
|
|
|
uni->sampler_state = sampler_state;
|
2020-06-02 18:14:28 +02:00
|
|
|
break;
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
case DRW_UNIFORM_BLOCK_OBATTRS:
|
|
|
|
uni->uniform_attrs = (GPUUniformAttrList *)value;
|
|
|
|
break;
|
2018-05-11 19:00:08 +02:00
|
|
|
default:
|
2019-05-13 19:20:27 +02:00
|
|
|
uni->pvalue = (const float *)value;
|
2018-05-11 19:00:08 +02:00
|
|
|
break;
|
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2018-03-25 18:27:32 +02:00
|
|
|
static void drw_shgroup_uniform(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
2018-02-28 01:16:23 +01:00
|
|
|
DRWUniformType type,
|
|
|
|
const void *value,
|
|
|
|
int length,
|
|
|
|
int arraysize)
|
|
|
|
{
|
|
|
|
BLI_assert(arraysize > 0 && arraysize <= 16);
|
|
|
|
BLI_assert(length >= 0 && length <= 16);
|
2020-06-03 13:03:36 +02:00
|
|
|
BLI_assert(!ELEM(type,
|
|
|
|
DRW_UNIFORM_BLOCK,
|
|
|
|
DRW_UNIFORM_BLOCK_REF,
|
|
|
|
DRW_UNIFORM_TEXTURE,
|
|
|
|
DRW_UNIFORM_TEXTURE_REF));
|
|
|
|
int location = GPU_shader_get_uniform(shgroup->shader, name);
|
|
|
|
drw_shgroup_uniform_create_ex(shgroup, location, type, value, 0, length, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2020-06-08 10:58:45 +02:00
|
|
|
void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const GPUTexture *tex,
|
|
|
|
eGPUSamplerState sampler_state)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2018-03-09 19:54:27 +01:00
|
|
|
BLI_assert(tex != NULL);
|
2020-06-03 13:03:36 +02:00
|
|
|
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
|
2020-06-08 10:58:45 +02:00
|
|
|
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE, tex, sampler_state, 0, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
|
|
|
|
{
|
|
|
|
DRW_shgroup_uniform_texture_ex(shgroup, name, tex, GPU_SAMPLER_MAX);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2020-06-19 17:02:55 +02:00
|
|
|
void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
GPUTexture **tex,
|
|
|
|
eGPUSamplerState sampler_state)
|
2020-06-02 16:58:07 +02:00
|
|
|
{
|
|
|
|
BLI_assert(tex != NULL);
|
2020-06-03 13:03:36 +02:00
|
|
|
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
|
2020-06-19 17:02:55 +02:00
|
|
|
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE_REF, tex, sampler_state, 0, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
|
|
|
|
{
|
|
|
|
DRW_shgroup_uniform_texture_ref_ex(shgroup, name, tex, GPU_SAMPLER_MAX);
|
2020-06-02 16:58:07 +02:00
|
|
|
}
|
|
|
|
|
2020-09-12 06:10:11 +02:00
|
|
|
void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
|
|
|
|
{
|
|
|
|
BLI_assert(tex != NULL);
|
|
|
|
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
|
|
|
|
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE, tex, 0, 0, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
|
|
|
|
{
|
|
|
|
BLI_assert(tex != NULL);
|
|
|
|
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
|
|
|
|
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE_REF, tex, 0, 0, 1);
|
|
|
|
}
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
2020-08-20 23:09:37 +02:00
|
|
|
const GPUUniformBuf *ubo)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2018-03-09 19:54:27 +01:00
|
|
|
BLI_assert(ubo != NULL);
|
2020-06-03 13:03:36 +02:00
|
|
|
int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
|
|
|
|
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, 0, 0, 1);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2020-08-20 23:09:37 +02:00
|
|
|
void DRW_shgroup_uniform_block_ref(DRWShadingGroup *shgroup, const char *name, GPUUniformBuf **ubo)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2020-06-02 16:58:07 +02:00
|
|
|
BLI_assert(ubo != NULL);
|
2020-06-03 13:03:36 +02:00
|
|
|
int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
|
|
|
|
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, 0, 0, 1);
|
2020-06-02 16:58:07 +02:00
|
|
|
}
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const int *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
2019-05-13 19:20:27 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const float *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
2018-03-25 18:27:32 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const float *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
2018-03-25 18:27:32 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const float *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
2018-03-25 18:27:32 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const float *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
2018-03-25 18:27:32 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const int *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
2018-03-25 18:27:32 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const int *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
2018-03-25 18:27:32 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const int *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
2018-03-25 18:27:32 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2018-09-05 17:02:00 +02:00
|
|
|
void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const int *value,
|
|
|
|
int arraysize)
|
|
|
|
{
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
|
|
|
|
}
|
|
|
|
|
2018-04-20 18:10:00 +02:00
|
|
|
void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2018-04-20 18:10:00 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2018-04-20 18:10:00 +02:00
|
|
|
void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2018-04-20 18:10:00 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2018-04-17 12:59:18 +02:00
|
|
|
/* Stores the int instead of a pointer. */
|
|
|
|
void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
|
|
|
|
{
|
2018-05-11 19:00:08 +02:00
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
|
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
void DRW_shgroup_uniform_ivec2_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
|
|
|
|
{
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 2, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_ivec3_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
|
|
|
|
{
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 3, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_ivec4_copy(DRWShadingGroup *shgroup, const char *name, const int *value)
|
|
|
|
{
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, value, 4, 1);
|
|
|
|
}
|
|
|
|
|
2018-05-11 19:00:08 +02:00
|
|
|
void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
|
|
|
|
{
|
2019-05-13 19:20:27 +02:00
|
|
|
int ival = value;
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &ival, 1, 1);
|
2018-04-17 12:59:18 +02:00
|
|
|
}
|
|
|
|
|
2018-05-11 19:00:08 +02:00
|
|
|
void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
|
|
|
|
{
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
|
|
|
|
}
|
|
|
|
|
2019-05-13 19:20:58 +02:00
|
|
|
void DRW_shgroup_uniform_vec2_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
|
|
|
|
{
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 2, 1);
|
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
void DRW_shgroup_uniform_vec3_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
|
|
|
|
{
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 3, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_uniform_vec4_copy(DRWShadingGroup *shgroup, const char *name, const float *value)
|
|
|
|
{
|
|
|
|
drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, value, 4, 1);
|
|
|
|
}
|
|
|
|
|
2020-05-28 01:14:45 +02:00
|
|
|
void DRW_shgroup_uniform_vec4_array_copy(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
const float (*value)[4],
|
|
|
|
int arraysize)
|
|
|
|
{
|
2020-06-02 12:29:20 +02:00
|
|
|
int location = GPU_shader_get_uniform(shgroup->shader, name);
|
2020-05-28 01:14:45 +02:00
|
|
|
|
|
|
|
if (location == -1) {
|
2020-07-01 13:12:24 +10:00
|
|
|
/* Nice to enable eventually, for now EEVEE uses uniforms that might not exist. */
|
2020-05-28 01:14:45 +02:00
|
|
|
// BLI_assert(0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < arraysize; i++) {
|
2020-06-03 13:03:36 +02:00
|
|
|
drw_shgroup_uniform_create_ex(
|
|
|
|
shgroup, location + i, DRW_UNIFORM_FLOAT_COPY, &value[i], 0, 4, 1);
|
2020-05-28 01:14:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-28 08:16:26 +02:00
|
|
|
void DRW_shgroup_vertex_buffer(DRWShadingGroup *shgroup,
|
|
|
|
const char *name,
|
|
|
|
GPUVertBuf *vertex_buffer)
|
|
|
|
{
|
|
|
|
int location = GPU_shader_get_ssbo(shgroup->shader, name);
|
|
|
|
if (location == -1) {
|
|
|
|
BLI_assert(false && "Unable to locate binding of shader storage buffer objects.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
drw_shgroup_uniform_create_ex(
|
|
|
|
shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE, vertex_buffer, 0, 0, 1);
|
|
|
|
}
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Draw Call (DRW_calls)
|
|
|
|
* \{ */
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[4])
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2018-04-04 12:26:57 +02:00
|
|
|
ID *ob_data = (ob) ? ob->data : NULL;
|
2018-02-28 01:16:23 +01:00
|
|
|
float *texcoloc = NULL;
|
|
|
|
float *texcosize = NULL;
|
|
|
|
if (ob_data != NULL) {
|
|
|
|
switch (GS(ob_data->name)) {
|
|
|
|
case ID_ME:
|
2019-09-23 15:54:21 +02:00
|
|
|
BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, &texcosize);
|
2018-02-28 01:16:23 +01:00
|
|
|
break;
|
|
|
|
case ID_CU: {
|
|
|
|
Curve *cu = (Curve *)ob_data;
|
2019-09-23 15:31:11 +02:00
|
|
|
BKE_curve_texspace_ensure(cu);
|
2018-02-28 01:16:23 +01:00
|
|
|
texcoloc = cu->loc;
|
|
|
|
texcosize = cu->size;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ID_MB: {
|
|
|
|
MetaBall *mb = (MetaBall *)ob_data;
|
|
|
|
texcoloc = mb->loc;
|
|
|
|
texcosize = mb->size;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
if ((texcoloc != NULL) && (texcosize != NULL)) {
|
|
|
|
mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
|
|
|
|
invert_v3(r_orcofacs[1]);
|
|
|
|
sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
|
|
|
|
negate_v3(r_orcofacs[0]);
|
|
|
|
mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
copy_v3_fl(r_orcofacs[0], 0.0f);
|
|
|
|
copy_v3_fl(r_orcofacs[1], 1.0f);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_INLINE void drw_call_matrix_init(DRWObjectMatrix *ob_mats, Object *ob, float (*obmat)[4])
|
2019-03-04 01:14:51 +01:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
copy_m4_m4(ob_mats->model, obmat);
|
|
|
|
if (ob) {
|
|
|
|
copy_m4_m4(ob_mats->modelinverse, ob->imat);
|
2019-05-08 17:14:54 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
else {
|
|
|
|
/* WATCH: Can be costly. */
|
|
|
|
invert_m4_m4(ob_mats->modelinverse, ob_mats->model);
|
2019-09-13 23:02:45 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2019-05-20 18:01:42 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob)
|
2019-05-31 01:45:41 +02:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_assert(ob);
|
|
|
|
/* Index. */
|
|
|
|
ob_infos->ob_index = ob->index;
|
|
|
|
/* Orco factors. */
|
|
|
|
drw_call_calc_orco(ob, ob_infos->orcotexfac);
|
|
|
|
/* Random float value. */
|
|
|
|
uint random = (DST.dupli_source) ?
|
2021-01-06 13:55:27 +01:00
|
|
|
DST.dupli_source->random_id :
|
|
|
|
/* TODO(fclem): this is rather costly to do at runtime. Maybe we can
|
|
|
|
* put it in ob->runtime and make depsgraph ensure it is up to date. */
|
|
|
|
BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
|
2019-05-31 01:45:41 +02:00
|
|
|
ob_infos->ob_random = random * (1.0f / (float)0xFFFFFFFF);
|
2019-12-02 01:40:58 +01:00
|
|
|
/* Object State. */
|
|
|
|
ob_infos->ob_flag = 1.0f; /* Required to have a correct sign */
|
|
|
|
ob_infos->ob_flag += (ob->base_flag & BASE_SELECTED) ? (1 << 1) : 0;
|
|
|
|
ob_infos->ob_flag += (ob->base_flag & BASE_FROM_DUPLI) ? (1 << 2) : 0;
|
|
|
|
ob_infos->ob_flag += (ob->base_flag & BASE_FROM_SET) ? (1 << 3) : 0;
|
|
|
|
ob_infos->ob_flag += (ob == DST.draw_ctx.obact) ? (1 << 4) : 0;
|
2020-03-03 22:06:52 +11:00
|
|
|
/* Negative scaling. */
|
2019-12-02 01:40:58 +01:00
|
|
|
ob_infos->ob_flag *= (ob->transflag & OB_NEG_SCALE) ? -1.0f : 1.0f;
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Object Color. */
|
|
|
|
copy_v4_v4(ob_infos->ob_color, ob->color);
|
|
|
|
}
|
2019-09-13 23:02:45 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static void drw_call_culling_init(DRWCullingState *cull, Object *ob)
|
|
|
|
{
|
2019-05-30 19:33:48 +02:00
|
|
|
BoundBox *bbox;
|
|
|
|
if (ob != NULL && (bbox = BKE_object_boundbox_get(ob))) {
|
2018-03-01 03:52:54 +01:00
|
|
|
float corner[3];
|
|
|
|
/* Get BoundSphere center and radius from the BoundBox. */
|
2019-05-20 18:01:42 +02:00
|
|
|
mid_v3_v3v3(cull->bsphere.center, bbox->vec[0], bbox->vec[6]);
|
2019-05-31 01:45:41 +02:00
|
|
|
mul_v3_m4v3(corner, ob->obmat, bbox->vec[0]);
|
|
|
|
mul_m4_v3(ob->obmat, cull->bsphere.center);
|
2019-05-20 18:01:42 +02:00
|
|
|
cull->bsphere.radius = len_v3v3(cull->bsphere.center, corner);
|
2020-06-24 17:44:50 +02:00
|
|
|
|
|
|
|
/* Bypass test for very large objects (see T67319). */
|
|
|
|
if (UNLIKELY(cull->bsphere.radius > 1e12)) {
|
|
|
|
cull->bsphere.radius = -1.0f;
|
|
|
|
}
|
2018-03-01 03:52:54 +01:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Bypass test. */
|
2019-05-20 18:01:42 +02:00
|
|
|
cull->bsphere.radius = -1.0f;
|
2018-03-01 03:52:54 +01:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Reset user data */
|
|
|
|
cull->user_data = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DRWResourceHandle drw_resource_handle_new(float (*obmat)[4], Object *ob)
|
|
|
|
{
|
|
|
|
DRWCullingState *culling = BLI_memblock_alloc(DST.vmempool->cullstates);
|
|
|
|
DRWObjectMatrix *ob_mats = BLI_memblock_alloc(DST.vmempool->obmats);
|
|
|
|
/* FIXME Meh, not always needed but can be accessed after creation.
|
|
|
|
* Also it needs to have the same resource handle. */
|
|
|
|
DRWObjectInfos *ob_infos = BLI_memblock_alloc(DST.vmempool->obinfos);
|
|
|
|
UNUSED_VARS(ob_infos);
|
|
|
|
|
|
|
|
DRWResourceHandle handle = DST.resource_handle;
|
|
|
|
DRW_handle_increment(&DST.resource_handle);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
if (ob && (ob->transflag & OB_NEG_SCALE)) {
|
|
|
|
DRW_handle_negative_scale_enable(&handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
drw_call_matrix_init(ob_mats, ob, obmat);
|
|
|
|
drw_call_culling_init(culling, ob);
|
|
|
|
/* ob_infos is init only if needed. */
|
|
|
|
|
|
|
|
return handle;
|
2018-02-28 16:23:33 +01:00
|
|
|
}
|
|
|
|
|
2020-03-11 17:07:43 +01:00
|
|
|
uint32_t DRW_object_resource_id_get(Object *UNUSED(ob))
|
|
|
|
{
|
|
|
|
DRWResourceHandle handle = DST.ob_handle;
|
|
|
|
if (handle == 0) {
|
|
|
|
/* Handle not yet allocated. Return next handle. */
|
|
|
|
handle = DST.resource_handle;
|
|
|
|
}
|
2020-08-08 01:11:09 +02:00
|
|
|
return handle & ~(1u << 31);
|
2020-03-11 17:07:43 +01:00
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup,
|
|
|
|
float (*obmat)[4],
|
|
|
|
Object *ob)
|
2018-02-28 16:23:33 +01:00
|
|
|
{
|
2019-05-29 23:52:37 +02:00
|
|
|
if (ob == NULL) {
|
|
|
|
if (obmat == NULL) {
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWResourceHandle handle = 0;
|
|
|
|
return handle;
|
2019-05-29 23:52:37 +02:00
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
|
|
|
|
return drw_resource_handle_new(obmat, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DST.ob_handle == 0) {
|
|
|
|
DST.ob_handle = drw_resource_handle_new(obmat, ob);
|
|
|
|
DST.ob_state_obinfo_init = false;
|
2018-02-28 16:23:33 +01:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
|
2020-08-07 11:49:59 +02:00
|
|
|
if (shgroup->objectinfo) {
|
|
|
|
if (!DST.ob_state_obinfo_init) {
|
|
|
|
DST.ob_state_obinfo_init = true;
|
|
|
|
DRWObjectInfos *ob_infos = DRW_memblock_elem_from_handle(DST.vmempool->obinfos,
|
|
|
|
&DST.ob_handle);
|
2019-05-31 01:45:41 +02:00
|
|
|
|
2020-08-07 11:49:59 +02:00
|
|
|
drw_call_obinfos_init(ob_infos, ob);
|
2019-05-29 23:52:37 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
if (shgroup->uniform_attrs) {
|
|
|
|
drw_uniform_attrs_pool_update(DST.vmempool->obattrs_ubo_pool,
|
|
|
|
shgroup->uniform_attrs,
|
|
|
|
&DST.ob_handle,
|
|
|
|
ob,
|
|
|
|
DST.dupli_parent,
|
|
|
|
DST.dupli_source);
|
|
|
|
}
|
|
|
|
|
2020-08-07 11:49:59 +02:00
|
|
|
return DST.ob_handle;
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void command_type_set(uint64_t *command_type_bits, int index, eDRWCommandType type)
|
|
|
|
{
|
|
|
|
command_type_bits[index / 16] |= ((uint64_t)type) << ((index % 16) * 4);
|
|
|
|
}
|
|
|
|
|
2020-07-13 11:27:09 +02:00
|
|
|
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
|
2019-05-31 01:45:41 +02:00
|
|
|
{
|
|
|
|
return ((command_type_bits[index / 16] >> ((index % 16) * 4)) & 0xF);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *drw_command_create(DRWShadingGroup *shgroup, eDRWCommandType type)
|
|
|
|
{
|
|
|
|
DRWCommandChunk *chunk = shgroup->cmd.last;
|
|
|
|
|
|
|
|
if (chunk == NULL) {
|
|
|
|
DRWCommandSmallChunk *smallchunk = BLI_memblock_alloc(DST.vmempool->commands_small);
|
|
|
|
smallchunk->command_len = ARRAY_SIZE(smallchunk->commands);
|
|
|
|
smallchunk->command_used = 0;
|
|
|
|
smallchunk->command_type[0] = 0x0lu;
|
|
|
|
chunk = (DRWCommandChunk *)smallchunk;
|
|
|
|
BLI_LINKS_APPEND(&shgroup->cmd, chunk);
|
2019-05-29 23:52:37 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
else if (chunk->command_used == chunk->command_len) {
|
|
|
|
chunk = BLI_memblock_alloc(DST.vmempool->commands);
|
|
|
|
chunk->command_len = ARRAY_SIZE(chunk->commands);
|
|
|
|
chunk->command_used = 0;
|
|
|
|
memset(chunk->command_type, 0x0, sizeof(chunk->command_type));
|
|
|
|
BLI_LINKS_APPEND(&shgroup->cmd, chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
command_type_set(chunk->command_type, chunk->command_used, type);
|
|
|
|
|
|
|
|
return chunk->commands + chunk->command_used++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void drw_command_draw(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle)
|
|
|
|
{
|
|
|
|
DRWCommandDraw *cmd = drw_command_create(shgroup, DRW_CMD_DRAW);
|
|
|
|
cmd->batch = batch;
|
|
|
|
cmd->handle = handle;
|
|
|
|
}
|
|
|
|
|
2020-03-09 16:27:24 +01:00
|
|
|
static void drw_command_draw_range(
|
|
|
|
DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count)
|
2019-05-31 01:45:41 +02:00
|
|
|
{
|
|
|
|
DRWCommandDrawRange *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_RANGE);
|
|
|
|
cmd->batch = batch;
|
2020-03-09 16:27:24 +01:00
|
|
|
cmd->handle = handle;
|
2019-05-31 01:45:41 +02:00
|
|
|
cmd->vert_first = start;
|
|
|
|
cmd->vert_count = count;
|
|
|
|
}
|
|
|
|
|
2020-04-03 16:59:34 +11:00
|
|
|
static void drw_command_draw_instance(
|
|
|
|
DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint count, bool use_attr)
|
2019-05-31 01:45:41 +02:00
|
|
|
{
|
|
|
|
DRWCommandDrawInstance *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE);
|
|
|
|
cmd->batch = batch;
|
|
|
|
cmd->handle = handle;
|
|
|
|
cmd->inst_count = count;
|
2020-04-03 16:59:34 +11:00
|
|
|
cmd->use_attrs = use_attr;
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
2020-03-09 16:27:24 +01:00
|
|
|
static void drw_command_draw_intance_range(
|
|
|
|
DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count)
|
|
|
|
{
|
|
|
|
DRWCommandDrawInstanceRange *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE_RANGE);
|
|
|
|
cmd->batch = batch;
|
|
|
|
cmd->handle = handle;
|
|
|
|
cmd->inst_first = start;
|
|
|
|
cmd->inst_count = count;
|
|
|
|
}
|
|
|
|
|
2021-05-28 08:16:26 +02:00
|
|
|
static void drw_command_compute(DRWShadingGroup *shgroup,
|
|
|
|
int groups_x_len,
|
|
|
|
int groups_y_len,
|
|
|
|
int groups_z_len)
|
|
|
|
{
|
|
|
|
DRWCommandCompute *cmd = drw_command_create(shgroup, DRW_CMD_COMPUTE);
|
|
|
|
cmd->groups_x_len = groups_x_len;
|
|
|
|
cmd->groups_y_len = groups_y_len;
|
|
|
|
cmd->groups_z_len = groups_z_len;
|
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static void drw_command_draw_procedural(DRWShadingGroup *shgroup,
|
|
|
|
GPUBatch *batch,
|
|
|
|
DRWResourceHandle handle,
|
|
|
|
uint vert_count)
|
|
|
|
{
|
|
|
|
DRWCommandDrawProcedural *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_PROCEDURAL);
|
|
|
|
cmd->batch = batch;
|
|
|
|
cmd->handle = handle;
|
|
|
|
cmd->vert_count = vert_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void drw_command_set_select_id(DRWShadingGroup *shgroup, GPUVertBuf *buf, uint select_id)
|
|
|
|
{
|
|
|
|
/* Only one can be valid. */
|
|
|
|
BLI_assert(buf == NULL || select_id == -1);
|
|
|
|
DRWCommandSetSelectID *cmd = drw_command_create(shgroup, DRW_CMD_SELECTID);
|
|
|
|
cmd->select_buf = buf;
|
|
|
|
cmd->select_id = select_id;
|
|
|
|
}
|
|
|
|
|
2020-03-09 16:27:24 +01:00
|
|
|
static void drw_command_set_stencil_mask(DRWShadingGroup *shgroup,
|
|
|
|
uint write_mask,
|
|
|
|
uint reference,
|
2020-03-11 17:07:43 +01:00
|
|
|
uint compare_mask)
|
2019-05-31 01:45:41 +02:00
|
|
|
{
|
2020-03-09 16:27:24 +01:00
|
|
|
BLI_assert(write_mask <= 0xFF);
|
|
|
|
BLI_assert(reference <= 0xFF);
|
2020-03-11 17:07:43 +01:00
|
|
|
BLI_assert(compare_mask <= 0xFF);
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWCommandSetStencil *cmd = drw_command_create(shgroup, DRW_CMD_STENCIL);
|
2020-03-09 16:27:24 +01:00
|
|
|
cmd->write_mask = write_mask;
|
2020-03-11 17:07:43 +01:00
|
|
|
cmd->comp_mask = compare_mask;
|
2020-03-09 16:27:24 +01:00
|
|
|
cmd->ref = reference;
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void drw_command_clear(DRWShadingGroup *shgroup,
|
|
|
|
eGPUFrameBufferBits channels,
|
|
|
|
uchar r,
|
|
|
|
uchar g,
|
|
|
|
uchar b,
|
|
|
|
uchar a,
|
|
|
|
float depth,
|
|
|
|
uchar stencil)
|
|
|
|
{
|
|
|
|
DRWCommandClear *cmd = drw_command_create(shgroup, DRW_CMD_CLEAR);
|
|
|
|
cmd->clear_channels = channels;
|
|
|
|
cmd->r = r;
|
|
|
|
cmd->g = g;
|
|
|
|
cmd->b = b;
|
|
|
|
cmd->a = a;
|
|
|
|
cmd->depth = depth;
|
|
|
|
cmd->stencil = stencil;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void drw_command_set_mutable_state(DRWShadingGroup *shgroup,
|
|
|
|
DRWState enable,
|
|
|
|
DRWState disable)
|
|
|
|
{
|
|
|
|
/* TODO Restrict what state can be changed. */
|
|
|
|
DRWCommandSetMutableState *cmd = drw_command_create(shgroup, DRW_CMD_DRWSTATE);
|
|
|
|
cmd->enable = enable;
|
|
|
|
cmd->disable = disable;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2019-05-29 23:52:37 +02:00
|
|
|
void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
|
|
|
|
Object *ob,
|
|
|
|
float (*obmat)[4],
|
|
|
|
struct GPUBatch *geom,
|
|
|
|
bool bypass_culling,
|
|
|
|
void *user_data)
|
2018-06-01 16:38:21 +02:00
|
|
|
{
|
|
|
|
BLI_assert(geom != NULL);
|
2019-05-31 01:45:41 +02:00
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
drw_command_set_select_id(shgroup, NULL, DST.select_id);
|
|
|
|
}
|
|
|
|
DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : obmat, ob);
|
|
|
|
drw_command_draw(shgroup, geom, handle);
|
2018-06-01 16:38:21 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Culling data. */
|
|
|
|
if (user_data || bypass_culling) {
|
|
|
|
DRWCullingState *culling = DRW_memblock_elem_from_handle(DST.vmempool->cullstates,
|
|
|
|
&DST.ob_handle);
|
|
|
|
|
|
|
|
if (user_data) {
|
|
|
|
culling->user_data = user_data;
|
|
|
|
}
|
2019-05-29 23:52:37 +02:00
|
|
|
if (bypass_culling) {
|
|
|
|
/* NOTE this will disable culling for the whole object. */
|
2019-05-31 01:45:41 +02:00
|
|
|
culling->bsphere.radius = -1.0f;
|
2019-05-29 23:52:37 +02:00
|
|
|
}
|
|
|
|
}
|
2018-06-01 16:38:21 +02:00
|
|
|
}
|
|
|
|
|
2020-03-09 16:27:24 +01:00
|
|
|
void DRW_shgroup_call_range(
|
|
|
|
DRWShadingGroup *shgroup, struct Object *ob, GPUBatch *geom, uint v_sta, uint v_ct)
|
|
|
|
{
|
|
|
|
BLI_assert(geom != NULL);
|
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
drw_command_set_select_id(shgroup, NULL, DST.select_id);
|
|
|
|
}
|
|
|
|
DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
|
|
|
|
drw_command_draw_range(shgroup, geom, handle, v_sta, v_ct);
|
|
|
|
}
|
|
|
|
|
2020-07-15 20:10:45 +02:00
|
|
|
/* A count of 0 instance will use the default number of instance in the batch. */
|
2020-03-09 16:27:24 +01:00
|
|
|
void DRW_shgroup_call_instance_range(
|
|
|
|
DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint i_sta, uint i_ct)
|
2019-05-31 01:45:41 +02:00
|
|
|
{
|
|
|
|
BLI_assert(geom != NULL);
|
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
drw_command_set_select_id(shgroup, NULL, DST.select_id);
|
|
|
|
}
|
2020-03-09 16:27:24 +01:00
|
|
|
DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
|
|
|
|
drw_command_draw_intance_range(shgroup, geom, handle, i_sta, i_ct);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
2021-05-28 08:16:26 +02:00
|
|
|
void DRW_shgroup_call_compute(DRWShadingGroup *shgroup,
|
|
|
|
int groups_x_len,
|
|
|
|
int groups_y_len,
|
|
|
|
int groups_z_len)
|
|
|
|
{
|
|
|
|
BLI_assert(groups_x_len > 0 && groups_y_len > 0 && groups_z_len > 0);
|
|
|
|
BLI_assert(GPU_compute_shader_support());
|
|
|
|
|
|
|
|
drw_command_compute(shgroup, groups_x_len, groups_y_len, groups_z_len);
|
|
|
|
}
|
|
|
|
|
2018-05-25 12:43:02 +02:00
|
|
|
static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
|
2019-05-11 17:45:20 +02:00
|
|
|
GPUBatch *geom,
|
2019-05-29 23:52:37 +02:00
|
|
|
Object *ob,
|
|
|
|
uint vert_count)
|
2018-05-25 12:43:02 +02:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
BLI_assert(vert_count > 0);
|
|
|
|
BLI_assert(geom != NULL);
|
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
drw_command_set_select_id(shgroup, NULL, DST.select_id);
|
|
|
|
}
|
|
|
|
DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
|
|
|
|
drw_command_draw_procedural(shgroup, geom, handle, vert_count);
|
2018-05-25 12:43:02 +02:00
|
|
|
}
|
|
|
|
|
2020-09-04 20:59:13 +02:00
|
|
|
void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup, Object *ob, uint point_count)
|
2018-05-25 12:43:02 +02:00
|
|
|
{
|
2019-05-11 17:45:20 +02:00
|
|
|
struct GPUBatch *geom = drw_cache_procedural_points_get();
|
2020-09-04 20:59:13 +02:00
|
|
|
drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, point_count);
|
2018-05-25 12:43:02 +02:00
|
|
|
}
|
|
|
|
|
2019-05-29 23:52:37 +02:00
|
|
|
void DRW_shgroup_call_procedural_lines(DRWShadingGroup *shgroup, Object *ob, uint line_count)
|
2018-05-25 12:43:02 +02:00
|
|
|
{
|
2019-05-11 17:45:20 +02:00
|
|
|
struct GPUBatch *geom = drw_cache_procedural_lines_get();
|
2019-05-29 23:52:37 +02:00
|
|
|
drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, line_count * 2);
|
2018-05-25 12:43:02 +02:00
|
|
|
}
|
|
|
|
|
2020-09-04 20:59:13 +02:00
|
|
|
void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, Object *ob, uint tri_count)
|
2018-05-25 12:43:02 +02:00
|
|
|
{
|
2019-05-11 17:45:20 +02:00
|
|
|
struct GPUBatch *geom = drw_cache_procedural_triangles_get();
|
2020-09-04 20:59:13 +02:00
|
|
|
drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, tri_count * 3);
|
2018-04-24 12:29:15 +02:00
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
/* Should be removed */
|
2019-05-13 18:28:36 +02:00
|
|
|
void DRW_shgroup_call_instances(DRWShadingGroup *shgroup,
|
2019-05-29 23:52:37 +02:00
|
|
|
Object *ob,
|
|
|
|
struct GPUBatch *geom,
|
2019-05-13 18:28:36 +02:00
|
|
|
uint count)
|
2018-03-01 19:27:38 +01:00
|
|
|
{
|
|
|
|
BLI_assert(geom != NULL);
|
2019-05-31 01:45:41 +02:00
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
drw_command_set_select_id(shgroup, NULL, DST.select_id);
|
|
|
|
}
|
|
|
|
DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
|
2019-12-02 01:40:58 +01:00
|
|
|
drw_command_draw_instance(shgroup, geom, handle, count, false);
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
|
|
|
|
2020-04-03 16:59:34 +11:00
|
|
|
void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup,
|
|
|
|
Object *ob,
|
|
|
|
struct GPUBatch *geom,
|
|
|
|
struct GPUBatch *inst_attributes)
|
2019-05-13 17:56:20 +02:00
|
|
|
{
|
|
|
|
BLI_assert(geom != NULL);
|
2019-12-02 01:40:58 +01:00
|
|
|
BLI_assert(inst_attributes != NULL);
|
2019-05-31 01:45:41 +02:00
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
drw_command_set_select_id(shgroup, NULL, DST.select_id);
|
|
|
|
}
|
|
|
|
DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
|
2019-12-02 01:40:58 +01:00
|
|
|
GPUBatch *batch = DRW_temp_batch_instance_request(DST.idatalist, NULL, inst_attributes, geom);
|
|
|
|
drw_command_draw_instance(shgroup, batch, handle, 0, true);
|
2018-03-01 19:27:38 +01:00
|
|
|
}
|
|
|
|
|
2019-09-28 01:29:59 +02:00
|
|
|
#define SCULPT_DEBUG_BUFFERS (G.debug_value == 889)
|
2019-05-04 01:39:35 +02:00
|
|
|
typedef struct DRWSculptCallbackData {
|
|
|
|
Object *ob;
|
|
|
|
DRWShadingGroup **shading_groups;
|
2020-04-22 13:54:35 +02:00
|
|
|
int num_shading_groups;
|
2019-05-04 01:39:35 +02:00
|
|
|
bool use_wire;
|
|
|
|
bool use_mats;
|
|
|
|
bool use_mask;
|
2020-03-05 14:53:23 +01:00
|
|
|
bool use_fsets;
|
2019-05-04 01:39:35 +02:00
|
|
|
bool fast_mode; /* Set by draw manager. Do not init. */
|
2019-09-28 01:29:59 +02:00
|
|
|
|
|
|
|
int debug_node_nr;
|
2019-05-04 01:39:35 +02:00
|
|
|
} DRWSculptCallbackData;
|
|
|
|
|
2019-09-28 01:29:59 +02:00
|
|
|
#define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9])
|
2019-05-04 01:39:35 +02:00
|
|
|
static float sculpt_debug_colors[9][4] = {
|
|
|
|
{1.0f, 0.2f, 0.2f, 1.0f},
|
|
|
|
{0.2f, 1.0f, 0.2f, 1.0f},
|
|
|
|
{0.2f, 0.2f, 1.0f, 1.0f},
|
|
|
|
{1.0f, 1.0f, 0.2f, 1.0f},
|
|
|
|
{0.2f, 1.0f, 1.0f, 1.0f},
|
|
|
|
{1.0f, 0.2f, 1.0f, 1.0f},
|
|
|
|
{1.0f, 0.7f, 0.2f, 1.0f},
|
|
|
|
{0.2f, 1.0f, 0.7f, 1.0f},
|
|
|
|
{0.7f, 0.2f, 1.0f, 1.0f},
|
|
|
|
};
|
2018-02-28 16:23:33 +01:00
|
|
|
|
2019-05-04 01:39:35 +02:00
|
|
|
static void sculpt_draw_cb(DRWSculptCallbackData *scd, GPU_PBVH_Buffers *buffers)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2020-03-12 17:51:39 +01:00
|
|
|
if (!buffers) {
|
|
|
|
return;
|
|
|
|
}
|
2020-03-21 18:09:43 +01:00
|
|
|
|
2020-03-12 17:51:39 +01:00
|
|
|
/* Meh... use_mask is a bit misleading here. */
|
2020-03-21 18:09:43 +01:00
|
|
|
if (scd->use_mask && !GPU_pbvh_buffers_has_overlays(buffers)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-09-27 22:42:57 +02:00
|
|
|
GPUBatch *geom = GPU_pbvh_buffers_batch_get(buffers, scd->fast_mode, scd->use_wire);
|
|
|
|
short index = 0;
|
|
|
|
|
2019-05-04 01:39:35 +02:00
|
|
|
if (scd->use_mats) {
|
|
|
|
index = GPU_pbvh_buffers_material_index_get(buffers);
|
2020-04-22 13:54:35 +02:00
|
|
|
if (index >= scd->num_shading_groups) {
|
|
|
|
index = 0;
|
|
|
|
}
|
2018-09-14 12:19:41 +02:00
|
|
|
}
|
|
|
|
|
2019-05-04 01:39:35 +02:00
|
|
|
DRWShadingGroup *shgrp = scd->shading_groups[index];
|
|
|
|
if (geom != NULL && shgrp != NULL) {
|
2019-09-28 01:29:59 +02:00
|
|
|
if (SCULPT_DEBUG_BUFFERS) {
|
|
|
|
/* Color each buffers in different colors. Only work in solid/Xray mode. */
|
|
|
|
shgrp = DRW_shgroup_create_sub(shgrp);
|
|
|
|
DRW_shgroup_uniform_vec3(
|
|
|
|
shgrp, "materialDiffuseColor", SCULPT_DEBUG_COLOR(scd->debug_node_nr++), 1);
|
|
|
|
}
|
2019-05-29 23:52:37 +02:00
|
|
|
/* DRW_shgroup_call_no_cull reuses matrices calculations for all the drawcalls of this
|
2019-05-04 01:39:35 +02:00
|
|
|
* object. */
|
2019-05-29 23:52:37 +02:00
|
|
|
DRW_shgroup_call_no_cull(shgrp, geom, scd->ob);
|
2019-02-14 20:24:13 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-04 01:39:35 +02:00
|
|
|
static void sculpt_debug_cb(void *user_data,
|
|
|
|
const float bmin[3],
|
|
|
|
const float bmax[3],
|
|
|
|
PBVHNodeFlags flag)
|
2019-02-14 20:24:13 +01:00
|
|
|
{
|
2019-09-28 01:29:59 +02:00
|
|
|
int *debug_node_nr = (int *)user_data;
|
2019-05-04 01:39:35 +02:00
|
|
|
BoundBox bb;
|
|
|
|
BKE_boundbox_init_from_minmax(&bb, bmin, bmax);
|
|
|
|
|
2019-09-28 01:29:59 +02:00
|
|
|
#if 0 /* Nodes hierarchy. */
|
2019-05-04 01:39:35 +02:00
|
|
|
if (flag & PBVH_Leaf) {
|
|
|
|
DRW_debug_bbox(&bb, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
DRW_debug_bbox(&bb, (float[4]){0.5f, 0.5f, 0.5f, 0.6f});
|
|
|
|
}
|
2019-09-28 01:29:59 +02:00
|
|
|
#else /* Color coded leaf bounds. */
|
2019-05-04 01:39:35 +02:00
|
|
|
if (flag & PBVH_Leaf) {
|
2019-09-28 01:29:59 +02:00
|
|
|
DRW_debug_bbox(&bb, SCULPT_DEBUG_COLOR((*debug_node_nr)++));
|
2019-05-04 01:39:35 +02:00
|
|
|
}
|
|
|
|
#endif
|
2019-09-28 01:29:59 +02:00
|
|
|
}
|
2019-02-14 20:24:13 +01:00
|
|
|
|
2019-09-27 14:44:45 +02:00
|
|
|
static void drw_sculpt_get_frustum_planes(Object *ob, float planes[6][4])
|
2019-09-27 11:09:50 +02:00
|
|
|
{
|
|
|
|
/* TODO: take into account partial redraw for clipping planes. */
|
2019-09-27 14:44:45 +02:00
|
|
|
DRW_view_frustum_planes_get(DRW_view_default_get(), planes);
|
2019-09-27 11:09:50 +02:00
|
|
|
|
|
|
|
/* Transform clipping planes to object space. Transforming a plane with a
|
2019-09-30 17:06:28 +10:00
|
|
|
* 4x4 matrix is done by multiplying with the transpose inverse.
|
|
|
|
* The inverse cancels out here since we transform by inverse(obmat). */
|
2019-09-27 11:09:50 +02:00
|
|
|
float tmat[4][4];
|
|
|
|
transpose_m4_m4(tmat, ob->obmat);
|
2019-09-27 14:44:45 +02:00
|
|
|
for (int i = 0; i < 6; i++) {
|
2019-09-27 11:09:50 +02:00
|
|
|
mul_m4_v4(tmat, planes[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 11:56:16 +02:00
|
|
|
static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
|
2019-05-04 01:39:35 +02:00
|
|
|
{
|
2021-02-05 16:23:34 +11:00
|
|
|
/* PBVH should always exist for non-empty meshes, created by depsgraph eval. */
|
2019-05-31 12:50:15 +02:00
|
|
|
PBVH *pbvh = (scd->ob->sculpt) ? scd->ob->sculpt->pbvh : NULL;
|
2019-05-04 01:39:35 +02:00
|
|
|
if (!pbvh) {
|
|
|
|
return;
|
|
|
|
}
|
2019-02-14 20:24:13 +01:00
|
|
|
|
2019-09-27 22:42:57 +02:00
|
|
|
const DRWContextState *drwctx = DRW_context_state_get();
|
|
|
|
RegionView3D *rv3d = drwctx->rv3d;
|
2020-03-12 17:51:39 +01:00
|
|
|
const bool navigating = rv3d && (rv3d->rflag & RV3D_NAVIGATING);
|
|
|
|
|
|
|
|
Paint *p = NULL;
|
|
|
|
if (drwctx->evil_C != NULL) {
|
|
|
|
p = BKE_paint_get_active_from_context(drwctx->evil_C);
|
|
|
|
}
|
2019-09-27 22:42:57 +02:00
|
|
|
|
|
|
|
/* Frustum planes to show only visible PBVH nodes. */
|
2020-03-12 17:51:39 +01:00
|
|
|
float update_planes[6][4];
|
|
|
|
float draw_planes[6][4];
|
|
|
|
PBVHFrustumPlanes update_frustum;
|
|
|
|
PBVHFrustumPlanes draw_frustum;
|
|
|
|
|
|
|
|
if (p && (p->flags & PAINT_SCULPT_DELAY_UPDATES)) {
|
|
|
|
update_frustum.planes = update_planes;
|
|
|
|
update_frustum.num_planes = 6;
|
|
|
|
BKE_pbvh_get_frustum_planes(pbvh, &update_frustum);
|
|
|
|
if (!navigating) {
|
|
|
|
drw_sculpt_get_frustum_planes(scd->ob, update_planes);
|
|
|
|
update_frustum.planes = update_planes;
|
|
|
|
update_frustum.num_planes = 6;
|
|
|
|
BKE_pbvh_set_frustum_planes(pbvh, &update_frustum);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
drw_sculpt_get_frustum_planes(scd->ob, update_planes);
|
|
|
|
update_frustum.planes = update_planes;
|
|
|
|
update_frustum.num_planes = 6;
|
|
|
|
}
|
|
|
|
|
|
|
|
drw_sculpt_get_frustum_planes(scd->ob, draw_planes);
|
|
|
|
draw_frustum.planes = draw_planes;
|
|
|
|
draw_frustum.num_planes = 6;
|
2019-09-27 11:09:50 +02:00
|
|
|
|
2019-09-27 22:42:57 +02:00
|
|
|
/* Fast mode to show low poly multires while navigating. */
|
2019-05-04 01:39:35 +02:00
|
|
|
scd->fast_mode = false;
|
2020-03-12 17:51:39 +01:00
|
|
|
if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
|
|
|
|
scd->fast_mode = rv3d && (rv3d->rflag & RV3D_NAVIGATING);
|
2019-02-14 20:24:13 +01:00
|
|
|
}
|
|
|
|
|
2019-09-29 02:27:53 +02:00
|
|
|
/* Update draw buffers only for visible nodes while painting.
|
|
|
|
* But do update them otherwise so navigating stays smooth. */
|
2020-03-12 17:51:39 +01:00
|
|
|
bool update_only_visible = rv3d && !(rv3d->rflag & RV3D_PAINTING);
|
|
|
|
if (p && (p->flags & PAINT_SCULPT_DELAY_UPDATES)) {
|
|
|
|
update_only_visible = true;
|
|
|
|
}
|
2019-09-29 02:27:53 +02:00
|
|
|
|
2019-05-31 12:51:12 +02:00
|
|
|
Mesh *mesh = scd->ob->data;
|
|
|
|
BKE_pbvh_update_normals(pbvh, mesh->runtime.subdiv_ccg);
|
|
|
|
|
2019-09-29 02:27:53 +02:00
|
|
|
BKE_pbvh_draw_cb(pbvh,
|
|
|
|
update_only_visible,
|
2020-03-12 17:51:39 +01:00
|
|
|
&update_frustum,
|
|
|
|
&draw_frustum,
|
2019-09-29 02:27:53 +02:00
|
|
|
(void (*)(void *, GPU_PBVH_Buffers *))sculpt_draw_cb,
|
|
|
|
scd);
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2019-09-28 01:29:59 +02:00
|
|
|
if (SCULPT_DEBUG_BUFFERS) {
|
|
|
|
int debug_node_nr = 0;
|
|
|
|
DRW_debug_modelmat(scd->ob->obmat);
|
|
|
|
BKE_pbvh_draw_debug_cb(
|
|
|
|
pbvh,
|
|
|
|
(void (*)(
|
|
|
|
void *d, const float min[3], const float max[3], PBVHNodeFlags f))sculpt_debug_cb,
|
|
|
|
&debug_node_nr);
|
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2020-05-14 11:56:16 +02:00
|
|
|
void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask)
|
2019-05-04 01:39:35 +02:00
|
|
|
{
|
|
|
|
DRWSculptCallbackData scd = {
|
|
|
|
.ob = ob,
|
|
|
|
.shading_groups = &shgroup,
|
2020-04-22 13:54:35 +02:00
|
|
|
.num_shading_groups = 1,
|
2019-05-04 01:39:35 +02:00
|
|
|
.use_wire = use_wire,
|
|
|
|
.use_mats = false,
|
|
|
|
.use_mask = use_mask,
|
|
|
|
};
|
2020-05-14 11:56:16 +02:00
|
|
|
drw_sculpt_generate_calls(&scd);
|
2019-05-04 01:39:35 +02:00
|
|
|
}
|
|
|
|
|
2020-04-22 13:54:35 +02:00
|
|
|
void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
|
|
|
|
int num_shgroups,
|
2020-05-14 11:56:16 +02:00
|
|
|
Object *ob)
|
2019-05-04 01:39:35 +02:00
|
|
|
{
|
|
|
|
DRWSculptCallbackData scd = {
|
|
|
|
.ob = ob,
|
|
|
|
.shading_groups = shgroups,
|
2020-04-22 13:54:35 +02:00
|
|
|
.num_shading_groups = num_shgroups,
|
2019-05-04 01:39:35 +02:00
|
|
|
.use_wire = false,
|
|
|
|
.use_mats = true,
|
|
|
|
.use_mask = false,
|
|
|
|
};
|
2020-05-14 11:56:16 +02:00
|
|
|
drw_sculpt_generate_calls(&scd);
|
2019-02-14 20:24:13 +01:00
|
|
|
}
|
|
|
|
|
2019-05-13 17:56:20 +02:00
|
|
|
static GPUVertFormat inst_select_format = {0};
|
|
|
|
|
2019-05-13 18:28:36 +02:00
|
|
|
DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup,
|
|
|
|
struct GPUVertFormat *format,
|
|
|
|
GPUPrimType prim_type)
|
2019-05-13 17:56:20 +02:00
|
|
|
{
|
|
|
|
BLI_assert(ELEM(prim_type, GPU_PRIM_POINTS, GPU_PRIM_LINES, GPU_PRIM_TRI_FAN));
|
|
|
|
BLI_assert(format != NULL);
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWCallBuffer *callbuf = BLI_memblock_alloc(DST.vmempool->callbuffers);
|
|
|
|
callbuf->buf = DRW_temp_buffer_request(DST.idatalist, format, &callbuf->count);
|
|
|
|
callbuf->buf_select = NULL;
|
|
|
|
callbuf->count = 0;
|
2019-05-13 17:56:20 +02:00
|
|
|
|
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
/* Not actually used for rendering but alloced in one chunk. */
|
|
|
|
if (inst_select_format.attr_len == 0) {
|
|
|
|
GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
|
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
callbuf->buf_select = DRW_temp_buffer_request(
|
|
|
|
DST.idatalist, &inst_select_format, &callbuf->count);
|
|
|
|
drw_command_set_select_id(shgroup, callbuf->buf_select, -1);
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
DRWResourceHandle handle = drw_resource_handle(shgroup, NULL, NULL);
|
|
|
|
GPUBatch *batch = DRW_temp_batch_request(DST.idatalist, callbuf->buf, prim_type);
|
|
|
|
drw_command_draw(shgroup, batch, handle);
|
|
|
|
|
|
|
|
return callbuf;
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
|
|
|
|
2019-05-13 18:28:36 +02:00
|
|
|
DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup,
|
|
|
|
struct GPUVertFormat *format,
|
|
|
|
GPUBatch *geom)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2019-05-13 17:56:20 +02:00
|
|
|
BLI_assert(geom != NULL);
|
|
|
|
BLI_assert(format != NULL);
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWCallBuffer *callbuf = BLI_memblock_alloc(DST.vmempool->callbuffers);
|
|
|
|
callbuf->buf = DRW_temp_buffer_request(DST.idatalist, format, &callbuf->count);
|
|
|
|
callbuf->buf_select = NULL;
|
|
|
|
callbuf->count = 0;
|
2019-05-13 17:56:20 +02:00
|
|
|
|
2019-02-02 13:39:51 +11:00
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
2019-05-13 17:56:20 +02:00
|
|
|
/* Not actually used for rendering but alloced in one chunk. */
|
|
|
|
if (inst_select_format.attr_len == 0) {
|
|
|
|
GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
callbuf->buf_select = DRW_temp_buffer_request(
|
|
|
|
DST.idatalist, &inst_select_format, &callbuf->count);
|
|
|
|
drw_command_set_select_id(shgroup, callbuf->buf_select, -1);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
DRWResourceHandle handle = drw_resource_handle(shgroup, NULL, NULL);
|
2019-12-02 01:40:58 +01:00
|
|
|
GPUBatch *batch = DRW_temp_batch_instance_request(DST.idatalist, callbuf->buf, NULL, geom);
|
2019-05-31 01:45:41 +02:00
|
|
|
drw_command_draw(shgroup, batch, handle);
|
|
|
|
|
|
|
|
return callbuf;
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2019-12-02 01:40:58 +01:00
|
|
|
void DRW_buffer_add_entry_struct(DRWCallBuffer *callbuf, const void *data)
|
|
|
|
{
|
|
|
|
GPUVertBuf *buf = callbuf->buf;
|
2020-09-06 16:40:07 +02:00
|
|
|
const bool resize = (callbuf->count == GPU_vertbuf_get_vertex_alloc(buf));
|
2019-12-02 01:40:58 +01:00
|
|
|
|
|
|
|
if (UNLIKELY(resize)) {
|
|
|
|
GPU_vertbuf_data_resize(buf, callbuf->count + DRW_BUFFER_VERTS_CHUNK);
|
|
|
|
}
|
|
|
|
|
|
|
|
GPU_vertbuf_vert_set(buf, callbuf->count, data);
|
|
|
|
|
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
if (UNLIKELY(resize)) {
|
|
|
|
GPU_vertbuf_data_resize(callbuf->buf_select, callbuf->count + DRW_BUFFER_VERTS_CHUNK);
|
|
|
|
}
|
|
|
|
GPU_vertbuf_attr_set(callbuf->buf_select, 0, callbuf->count, &DST.select_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
callbuf->count++;
|
|
|
|
}
|
|
|
|
|
2019-05-13 17:56:20 +02:00
|
|
|
void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len)
|
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
GPUVertBuf *buf = callbuf->buf;
|
2020-09-06 16:40:07 +02:00
|
|
|
const bool resize = (callbuf->count == GPU_vertbuf_get_vertex_alloc(buf));
|
2019-05-13 17:56:20 +02:00
|
|
|
|
2020-09-06 16:40:07 +02:00
|
|
|
BLI_assert(attr_len == GPU_vertbuf_get_format(buf)->attr_len);
|
2018-02-28 01:16:23 +01:00
|
|
|
UNUSED_VARS_NDEBUG(attr_len);
|
|
|
|
|
2019-05-13 17:56:20 +02:00
|
|
|
if (UNLIKELY(resize)) {
|
2019-05-31 01:45:41 +02:00
|
|
|
GPU_vertbuf_data_resize(buf, callbuf->count + DRW_BUFFER_VERTS_CHUNK);
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
|
|
|
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < attr_len; i++) {
|
2019-05-31 01:45:41 +02:00
|
|
|
GPU_vertbuf_attr_set(buf, i, callbuf->count, attr[i]);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2019-05-13 17:56:20 +02:00
|
|
|
if (G.f & G_FLAG_PICKSEL) {
|
|
|
|
if (UNLIKELY(resize)) {
|
2019-05-31 01:45:41 +02:00
|
|
|
GPU_vertbuf_data_resize(callbuf->buf_select, callbuf->count + DRW_BUFFER_VERTS_CHUNK);
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
GPU_vertbuf_attr_set(callbuf->buf_select, 0, callbuf->count, &DST.select_id);
|
2019-05-13 17:56:20 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
callbuf->count++;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Shading Groups (DRW_shgroup)
|
|
|
|
* \{ */
|
|
|
|
|
2018-03-25 18:27:32 +02:00
|
|
|
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
|
|
|
shgroup->uniforms = NULL;
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
shgroup->uniform_attrs = NULL;
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2020-06-04 13:43:28 +02:00
|
|
|
int view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_VIEW);
|
|
|
|
int model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_MODEL);
|
|
|
|
int info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_INFO);
|
2019-05-31 01:45:41 +02:00
|
|
|
int baseinst_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_BASE_INSTANCE);
|
|
|
|
int chunkid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_CHUNK);
|
2020-03-09 16:27:24 +01:00
|
|
|
int resourceid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_ID);
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
if (chunkid_location != -1) {
|
|
|
|
drw_shgroup_uniform_create_ex(
|
2020-06-03 13:03:36 +02:00
|
|
|
shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, NULL, 0, 0, 1);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
2020-03-09 16:27:24 +01:00
|
|
|
if (resourceid_location != -1) {
|
|
|
|
drw_shgroup_uniform_create_ex(
|
2020-06-03 13:03:36 +02:00
|
|
|
shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, NULL, 0, 0, 1);
|
2020-03-09 16:27:24 +01:00
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
if (baseinst_location != -1) {
|
|
|
|
drw_shgroup_uniform_create_ex(
|
2020-06-03 13:03:36 +02:00
|
|
|
shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, NULL, 0, 0, 1);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (model_ubo_location != -1) {
|
|
|
|
drw_shgroup_uniform_create_ex(
|
2020-06-03 13:03:36 +02:00
|
|
|
shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, NULL, 0, 0, 1);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
else {
|
2020-06-03 10:58:05 +02:00
|
|
|
/* Note: This is only here to support old hardware fallback where uniform buffer is still
|
|
|
|
* too slow or buggy. */
|
2019-05-31 01:45:41 +02:00
|
|
|
int model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
|
|
|
|
int modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
|
|
|
|
if (model != -1) {
|
2020-06-03 13:03:36 +02:00
|
|
|
drw_shgroup_uniform_create_ex(shgroup, model, DRW_UNIFORM_MODEL_MATRIX, NULL, 0, 0, 1);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
if (modelinverse != -1) {
|
|
|
|
drw_shgroup_uniform_create_ex(
|
2020-06-03 13:03:36 +02:00
|
|
|
shgroup, modelinverse, DRW_UNIFORM_MODEL_MATRIX_INVERSE, NULL, 0, 0, 1);
|
2019-05-31 01:45:41 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info_ubo_location != -1) {
|
|
|
|
drw_shgroup_uniform_create_ex(
|
2020-06-03 13:03:36 +02:00
|
|
|
shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, NULL, 0, 0, 1);
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
/* Abusing this loc to tell shgroup we need the obinfos. */
|
|
|
|
shgroup->objectinfo = 1;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
shgroup->objectinfo = 0;
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-09 19:52:37 +01:00
|
|
|
if (view_ubo_location != -1) {
|
2019-02-23 18:31:45 +11:00
|
|
|
drw_shgroup_uniform_create_ex(
|
2020-06-03 13:03:36 +02:00
|
|
|
shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, 0, 0, 1);
|
2018-03-09 19:52:37 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-11 14:04:18 +02:00
|
|
|
/* Not supported. */
|
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV) == -1);
|
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW) == -1);
|
2019-05-13 17:56:20 +02:00
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1);
|
2020-06-03 10:54:14 +02:00
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEW) == -1);
|
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEW_INV) == -1);
|
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEWPROJECTION) == -1);
|
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEWPROJECTION_INV) == -1);
|
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_PROJECTION) == -1);
|
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_PROJECTION_INV) == -1);
|
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CLIPPLANES) == -1);
|
2020-06-03 10:58:05 +02:00
|
|
|
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP) == -1);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
|
|
|
|
{
|
2019-05-07 18:01:14 +02:00
|
|
|
DRWShadingGroup *shgroup = BLI_memblock_alloc(DST.vmempool->shgroups);
|
2018-02-28 01:16:23 +01:00
|
|
|
|
|
|
|
BLI_LINKS_APPEND(&pass->shgroups, shgroup);
|
|
|
|
|
|
|
|
shgroup->shader = shader;
|
2019-05-31 01:45:41 +02:00
|
|
|
shgroup->cmd.first = NULL;
|
|
|
|
shgroup->cmd.last = NULL;
|
|
|
|
shgroup->pass_handle = pass->handle;
|
2018-02-28 01:16:23 +01:00
|
|
|
|
|
|
|
return shgroup;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
|
|
|
|
{
|
|
|
|
if (!gpupass) {
|
|
|
|
/* Shader compilation error */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-06-07 11:58:15 +02:00
|
|
|
GPUShader *sh = GPU_pass_shader_get(gpupass);
|
|
|
|
|
|
|
|
if (!sh) {
|
|
|
|
/* Shader not yet compiled */
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
|
2018-02-28 01:16:23 +01:00
|
|
|
return grp;
|
|
|
|
}
|
|
|
|
|
2020-02-27 13:55:29 +01:00
|
|
|
static void drw_shgroup_material_texture(DRWShadingGroup *grp,
|
2020-07-29 18:13:19 +02:00
|
|
|
GPUTexture *gputex,
|
2020-02-27 13:55:29 +01:00
|
|
|
const char *name,
|
2020-07-29 18:13:19 +02:00
|
|
|
eGPUSamplerState state)
|
2020-02-27 13:55:29 +01:00
|
|
|
{
|
2020-06-08 10:58:45 +02:00
|
|
|
DRW_shgroup_uniform_texture_ex(grp, name, gputex, state);
|
2020-02-27 13:55:29 +01:00
|
|
|
|
|
|
|
GPUTexture **gputex_ref = BLI_memblock_alloc(DST.vmempool->images);
|
|
|
|
*gputex_ref = gputex;
|
|
|
|
GPU_texture_ref(gputex);
|
|
|
|
}
|
|
|
|
|
2020-06-02 16:58:07 +02:00
|
|
|
void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial *material)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2020-02-14 10:47:20 +01:00
|
|
|
ListBase textures = GPU_material_textures(material);
|
|
|
|
|
|
|
|
/* Bind all textures needed by the material. */
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (GPUMaterialTexture *, tex, &textures) {
|
2020-02-14 10:47:20 +01:00
|
|
|
if (tex->ima) {
|
|
|
|
/* Image */
|
2020-07-29 18:13:19 +02:00
|
|
|
GPUTexture *gputex;
|
2020-02-27 13:55:29 +01:00
|
|
|
if (tex->tiled_mapping_name[0]) {
|
2020-07-29 18:13:19 +02:00
|
|
|
gputex = BKE_image_get_gpu_tiles(tex->ima, tex->iuser, NULL);
|
|
|
|
drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state);
|
|
|
|
gputex = BKE_image_get_gpu_tilemap(tex->ima, tex->iuser, NULL);
|
|
|
|
drw_shgroup_material_texture(grp, gputex, tex->tiled_mapping_name, tex->sampler_state);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2018-08-13 18:44:27 +02:00
|
|
|
else {
|
2020-07-29 18:13:19 +02:00
|
|
|
gputex = BKE_image_get_gpu_texture(tex->ima, tex->iuser, NULL);
|
|
|
|
drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state);
|
2018-08-13 18:44:27 +02:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2020-02-14 10:47:20 +01:00
|
|
|
else if (tex->colorband) {
|
|
|
|
/* Color Ramp */
|
2020-06-03 09:49:54 +02:00
|
|
|
DRW_shgroup_uniform_texture(grp, tex->sampler_name, *tex->colorband);
|
2020-02-14 10:47:20 +01:00
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2020-08-20 23:09:37 +02:00
|
|
|
GPUUniformBuf *ubo = GPU_material_uniform_buffer_get(material);
|
2018-02-28 01:16:23 +01:00
|
|
|
if (ubo != NULL) {
|
|
|
|
DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
|
|
|
|
}
|
Materials: add custom object properties as uniform attributes.
This patch allows the user to type a property name into the
Attribute node, which will then output the value of the property
for each individual object, allowing to e.g. customize shaders
by object without duplicating the shader.
In order to make supporting this easier for Eevee, it is necessary
to explicitly choose whether the attribute is varying or uniform
via a dropdown option of the Attribute node. The dropdown also
allows choosing whether instancing should be taken into account.
The Cycles design treats all attributes as one common namespace,
so the Blender interface converts the enum to a name prefix that
can't be entered using keyboard.
In Eevee, the attributes are provided to the shader via a UBO indexed
with resource_id, similar to the existing Object Info data. Unlike it,
however, it is necessary to maintain a separate buffer for every
requested combination of attributes.
This is done using a hash table with the attribute set as the key,
as it is expected that technically different but similar materials
may use the same set of attributes. In addition, in order to minimize
wasted memory, a sparse UBO pool is implemented, so that chunks that
don't contain any data don't have to be allocated.
The back-end Cycles code is already refactored and committed by Brecht.
Differential Revision: https://developer.blender.org/D2057
2020-08-05 19:14:40 +03:00
|
|
|
|
|
|
|
GPUUniformAttrList *uattrs = GPU_material_uniform_attributes(material);
|
|
|
|
if (uattrs != NULL) {
|
|
|
|
int loc = GPU_shader_get_uniform_block_binding(grp->shader, GPU_ATTRIBUTE_UBO_BLOCK_NAME);
|
|
|
|
drw_shgroup_uniform_create_ex(grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, 0, 0, 1);
|
|
|
|
grp->uniform_attrs = uattrs;
|
|
|
|
}
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2019-01-29 07:46:25 +11:00
|
|
|
GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[],
|
|
|
|
int arraysize)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2018-07-18 00:12:21 +02:00
|
|
|
GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < arraysize; i++) {
|
2019-01-29 07:46:25 +11:00
|
|
|
GPU_vertformat_attr_add(format,
|
|
|
|
attrs[i].name,
|
|
|
|
(attrs[i].type == DRW_ATTR_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
|
|
|
|
attrs[i].components,
|
|
|
|
(attrs[i].type == DRW_ATTR_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
return format;
|
|
|
|
}
|
|
|
|
|
|
|
|
DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass)
|
|
|
|
{
|
|
|
|
GPUPass *gpupass = GPU_material_get_pass(material);
|
|
|
|
DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
|
|
|
|
|
|
|
|
if (shgroup) {
|
2018-06-07 11:58:15 +02:00
|
|
|
drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
|
2020-06-02 16:58:07 +02:00
|
|
|
DRW_shgroup_add_material_resources(shgroup, material);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
return shgroup;
|
|
|
|
}
|
|
|
|
|
|
|
|
DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
|
|
|
|
{
|
|
|
|
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
|
2018-03-25 18:27:32 +02:00
|
|
|
drw_shgroup_init(shgroup, shader);
|
2018-02-28 01:16:23 +01:00
|
|
|
return shgroup;
|
|
|
|
}
|
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
|
|
|
|
DRWPass *pass,
|
|
|
|
GPUVertBuf *tf_target)
|
2018-05-16 12:47:15 +02:00
|
|
|
{
|
2018-05-29 12:11:03 +02:00
|
|
|
BLI_assert(tf_target != NULL);
|
2018-05-16 12:47:15 +02:00
|
|
|
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
|
|
|
|
drw_shgroup_init(shgroup, shader);
|
2020-06-03 13:03:36 +02:00
|
|
|
drw_shgroup_uniform_create_ex(shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, 0, 0, 1);
|
2018-05-16 12:47:15 +02:00
|
|
|
return shgroup;
|
|
|
|
}
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
/**
|
|
|
|
* State is added to #Pass.state while drawing.
|
|
|
|
* Use to temporarily enable draw options.
|
|
|
|
*/
|
|
|
|
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
|
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
drw_command_set_mutable_state(shgroup, state, 0x0);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
|
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
drw_command_set_mutable_state(shgroup, 0x0, state);
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2020-03-09 16:27:24 +01:00
|
|
|
void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup,
|
|
|
|
uint write_mask,
|
|
|
|
uint reference,
|
2020-03-11 17:07:43 +01:00
|
|
|
uint compare_mask)
|
2020-03-09 16:27:24 +01:00
|
|
|
{
|
2020-03-11 17:07:43 +01:00
|
|
|
drw_command_set_stencil_mask(shgroup, write_mask, reference, compare_mask);
|
2020-03-09 16:27:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO remove this function. */
|
2018-05-11 07:48:52 +02:00
|
|
|
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
2020-03-09 16:27:24 +01:00
|
|
|
drw_command_set_stencil_mask(shgroup, 0xFF, mask, 0xFF);
|
2019-06-18 14:00:19 +02:00
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
void DRW_shgroup_clear_framebuffer(DRWShadingGroup *shgroup,
|
|
|
|
eGPUFrameBufferBits channels,
|
|
|
|
uchar r,
|
|
|
|
uchar g,
|
|
|
|
uchar b,
|
|
|
|
uchar a,
|
|
|
|
float depth,
|
|
|
|
uchar stencil)
|
2019-06-26 13:28:26 +02:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
drw_command_clear(shgroup, channels, r, g, b, a, depth, stencil);
|
2019-09-13 23:02:45 +02:00
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
|
2019-09-13 23:02:45 +02:00
|
|
|
{
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWCommandChunk *chunk = shgroup->cmd.first;
|
|
|
|
for (; chunk; chunk = chunk->next) {
|
|
|
|
for (int i = 0; i < chunk->command_used; i++) {
|
|
|
|
if (command_type_get(chunk->command_type, i) <= DRW_MAX_DRAW_CMD_TYPE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
2019-06-26 13:28:26 +02:00
|
|
|
}
|
|
|
|
|
2018-10-11 15:50:46 +02:00
|
|
|
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
|
|
|
|
{
|
2019-05-07 18:01:14 +02:00
|
|
|
DRWShadingGroup *shgroup_new = BLI_memblock_alloc(DST.vmempool->shgroups);
|
2018-10-11 15:50:46 +02:00
|
|
|
|
|
|
|
*shgroup_new = *shgroup;
|
2019-05-31 01:45:41 +02:00
|
|
|
drw_shgroup_init(shgroup_new, shgroup_new->shader);
|
|
|
|
shgroup_new->cmd.first = NULL;
|
|
|
|
shgroup_new->cmd.last = NULL;
|
2018-10-11 15:50:46 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
DRWPass *parent_pass = DRW_memblock_elem_from_handle(DST.vmempool->passes,
|
|
|
|
&shgroup->pass_handle);
|
|
|
|
|
|
|
|
BLI_LINKS_INSERT_AFTER(&parent_pass->shgroups, shgroup, shgroup_new);
|
2018-10-11 15:50:46 +02:00
|
|
|
|
2018-10-17 19:01:56 +02:00
|
|
|
return shgroup_new;
|
2018-10-11 15:50:46 +02:00
|
|
|
}
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
/** \} */
|
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name View (DRW_view)
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
/* Extract the 8 corners from a Projection Matrix.
|
|
|
|
* Although less accurate, this solution can be simplified as follows:
|
|
|
|
* BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const
|
|
|
|
* float[3]){1.0f, 1.0f, 1.0f}); for (int i = 0; i < 8; i++) {mul_project_m4_v3(projinv,
|
|
|
|
* bbox.vec[i]);}
|
|
|
|
*/
|
|
|
|
static void draw_frustum_boundbox_calc(const float (*viewinv)[4],
|
|
|
|
const float (*projmat)[4],
|
|
|
|
BoundBox *r_bbox)
|
|
|
|
{
|
|
|
|
float left, right, bottom, top, near, far;
|
|
|
|
bool is_persp = projmat[3][3] == 0.0f;
|
|
|
|
|
|
|
|
#if 0 /* Equivalent to this but it has accuracy problems. */
|
|
|
|
BKE_boundbox_init_from_minmax(
|
|
|
|
&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
mul_project_m4_v3(projinv, bbox.vec[i]);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
projmat_dimensions(projmat, &left, &right, &bottom, &top, &near, &far);
|
|
|
|
|
|
|
|
if (is_persp) {
|
|
|
|
left *= near;
|
|
|
|
right *= near;
|
|
|
|
bottom *= near;
|
|
|
|
top *= near;
|
|
|
|
}
|
|
|
|
|
|
|
|
r_bbox->vec[0][2] = r_bbox->vec[3][2] = r_bbox->vec[7][2] = r_bbox->vec[4][2] = -near;
|
|
|
|
r_bbox->vec[0][0] = r_bbox->vec[3][0] = left;
|
|
|
|
r_bbox->vec[4][0] = r_bbox->vec[7][0] = right;
|
|
|
|
r_bbox->vec[0][1] = r_bbox->vec[4][1] = bottom;
|
|
|
|
r_bbox->vec[7][1] = r_bbox->vec[3][1] = top;
|
|
|
|
|
|
|
|
/* Get the coordinates of the far plane. */
|
|
|
|
if (is_persp) {
|
|
|
|
float sca_far = far / near;
|
|
|
|
left *= sca_far;
|
|
|
|
right *= sca_far;
|
|
|
|
bottom *= sca_far;
|
|
|
|
top *= sca_far;
|
|
|
|
}
|
|
|
|
|
|
|
|
r_bbox->vec[1][2] = r_bbox->vec[2][2] = r_bbox->vec[6][2] = r_bbox->vec[5][2] = -far;
|
|
|
|
r_bbox->vec[1][0] = r_bbox->vec[2][0] = left;
|
|
|
|
r_bbox->vec[6][0] = r_bbox->vec[5][0] = right;
|
|
|
|
r_bbox->vec[1][1] = r_bbox->vec[5][1] = bottom;
|
|
|
|
r_bbox->vec[2][1] = r_bbox->vec[6][1] = top;
|
|
|
|
|
|
|
|
/* Transform into world space. */
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
mul_m4_v3(viewinv, r_bbox->vec[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-18 09:41:03 -03:00
|
|
|
static void draw_frustum_culling_planes_calc(const float (*persmat)[4], float (*frustum_planes)[4])
|
2019-05-20 18:01:42 +02:00
|
|
|
{
|
2019-12-18 09:41:03 -03:00
|
|
|
planes_from_projmat(persmat,
|
|
|
|
frustum_planes[0],
|
|
|
|
frustum_planes[5],
|
|
|
|
frustum_planes[1],
|
2021-06-21 17:25:09 +10:00
|
|
|
frustum_planes[3],
|
2019-12-18 09:41:03 -03:00
|
|
|
frustum_planes[4],
|
|
|
|
frustum_planes[2]);
|
2019-05-20 18:01:42 +02:00
|
|
|
|
2019-12-18 09:41:03 -03:00
|
|
|
/* Normalize. */
|
2019-05-20 18:01:42 +02:00
|
|
|
for (int p = 0; p < 6; p++) {
|
2019-12-18 09:41:03 -03:00
|
|
|
frustum_planes[p][3] /= normalize_v3(frustum_planes[p]);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void draw_frustum_bound_sphere_calc(const BoundBox *bbox,
|
|
|
|
const float (*viewinv)[4],
|
|
|
|
const float (*projmat)[4],
|
|
|
|
const float (*projinv)[4],
|
|
|
|
BoundSphere *bsphere)
|
|
|
|
{
|
|
|
|
/* Extract Bounding Sphere */
|
|
|
|
if (projmat[3][3] != 0.0f) {
|
|
|
|
/* Orthographic */
|
|
|
|
/* The most extreme points on the near and far plane. (normalized device coords). */
|
|
|
|
const float *nearpoint = bbox->vec[0];
|
|
|
|
const float *farpoint = bbox->vec[6];
|
|
|
|
|
|
|
|
/* just use median point */
|
|
|
|
mid_v3_v3v3(bsphere->center, farpoint, nearpoint);
|
|
|
|
bsphere->radius = len_v3v3(bsphere->center, farpoint);
|
|
|
|
}
|
|
|
|
else if (projmat[2][0] == 0.0f && projmat[2][1] == 0.0f) {
|
|
|
|
/* Perspective with symmetrical frustum. */
|
|
|
|
|
|
|
|
/* We obtain the center and radius of the circumscribed circle of the
|
|
|
|
* isosceles trapezoid composed by the diagonals of the near and far clipping plane */
|
|
|
|
|
|
|
|
/* center of each clipping plane */
|
|
|
|
float mid_min[3], mid_max[3];
|
|
|
|
mid_v3_v3v3(mid_min, bbox->vec[3], bbox->vec[4]);
|
|
|
|
mid_v3_v3v3(mid_max, bbox->vec[2], bbox->vec[5]);
|
|
|
|
|
|
|
|
/* square length of the diagonals of each clipping plane */
|
|
|
|
float a_sq = len_squared_v3v3(bbox->vec[3], bbox->vec[4]);
|
|
|
|
float b_sq = len_squared_v3v3(bbox->vec[2], bbox->vec[5]);
|
|
|
|
|
|
|
|
/* distance squared between clipping planes */
|
|
|
|
float h_sq = len_squared_v3v3(mid_min, mid_max);
|
|
|
|
|
|
|
|
float fac = (4 * h_sq + b_sq - a_sq) / (8 * h_sq);
|
|
|
|
|
|
|
|
/* The goal is to get the smallest sphere,
|
|
|
|
* not the sphere that passes through each corner */
|
|
|
|
CLAMP(fac, 0.0f, 1.0f);
|
|
|
|
|
|
|
|
interp_v3_v3v3(bsphere->center, mid_min, mid_max, fac);
|
|
|
|
|
|
|
|
/* distance from the center to one of the points of the far plane (1, 2, 5, 6) */
|
|
|
|
bsphere->radius = len_v3v3(bsphere->center, bbox->vec[1]);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Perspective with asymmetrical frustum. */
|
|
|
|
|
|
|
|
/* We put the sphere center on the line that goes from origin
|
|
|
|
* to the center of the far clipping plane. */
|
|
|
|
|
|
|
|
/* Detect which of the corner of the far clipping plane is the farthest to the origin */
|
|
|
|
float nfar[4]; /* most extreme far point in NDC space */
|
2021-02-05 16:23:34 +11:00
|
|
|
float farxy[2]; /* far-point projection onto the near plane */
|
2019-05-20 18:01:42 +02:00
|
|
|
float farpoint[3] = {0.0f}; /* most extreme far point in camera coordinate */
|
|
|
|
float nearpoint[3]; /* most extreme near point in camera coordinate */
|
2021-02-05 16:23:34 +11:00
|
|
|
float farcenter[3] = {0.0f}; /* center of far clipping plane in camera coordinate */
|
2019-05-20 18:01:42 +02:00
|
|
|
float F = -1.0f, N; /* square distance of far and near point to origin */
|
|
|
|
float f, n; /* distance of far and near point to z axis. f is always > 0 but n can be < 0 */
|
|
|
|
float e, s; /* far and near clipping distance (<0) */
|
|
|
|
float c; /* slope of center line = distance of far clipping center
|
|
|
|
* to z axis / far clipping distance. */
|
|
|
|
float z; /* projection of sphere center on z axis (<0) */
|
|
|
|
|
|
|
|
/* Find farthest corner and center of far clip plane. */
|
|
|
|
float corner[3] = {1.0f, 1.0f, 1.0f}; /* in clip space */
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
float point[3];
|
|
|
|
mul_v3_project_m4_v3(point, projinv, corner);
|
|
|
|
float len = len_squared_v3(point);
|
|
|
|
if (len > F) {
|
|
|
|
copy_v3_v3(nfar, corner);
|
|
|
|
copy_v3_v3(farpoint, point);
|
|
|
|
F = len;
|
|
|
|
}
|
|
|
|
add_v3_v3(farcenter, point);
|
|
|
|
/* rotate by 90 degree to walk through the 4 points of the far clip plane */
|
|
|
|
float tmp = corner[0];
|
|
|
|
corner[0] = -corner[1];
|
|
|
|
corner[1] = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the far center is the average of the far clipping points */
|
|
|
|
mul_v3_fl(farcenter, 0.25f);
|
|
|
|
/* the extreme near point is the opposite point on the near clipping plane */
|
|
|
|
copy_v3_fl3(nfar, -nfar[0], -nfar[1], -1.0f);
|
|
|
|
mul_v3_project_m4_v3(nearpoint, projinv, nfar);
|
|
|
|
/* this is a frustum projection */
|
|
|
|
N = len_squared_v3(nearpoint);
|
|
|
|
e = farpoint[2];
|
|
|
|
s = nearpoint[2];
|
|
|
|
/* distance to view Z axis */
|
|
|
|
f = len_v2(farpoint);
|
|
|
|
/* get corresponding point on the near plane */
|
|
|
|
mul_v2_v2fl(farxy, farpoint, s / e);
|
|
|
|
/* this formula preserve the sign of n */
|
|
|
|
sub_v2_v2(nearpoint, farxy);
|
|
|
|
n = f * s / e - len_v2(nearpoint);
|
|
|
|
c = len_v2(farcenter) / e;
|
|
|
|
/* the big formula, it simplifies to (F-N)/(2(e-s)) for the symmetric case */
|
|
|
|
z = (F - N) / (2.0f * (e - s + c * (f - n)));
|
|
|
|
|
|
|
|
bsphere->center[0] = farcenter[0] * z / e;
|
|
|
|
bsphere->center[1] = farcenter[1] * z / e;
|
|
|
|
bsphere->center[2] = z;
|
|
|
|
bsphere->radius = len_v3v3(bsphere->center, farpoint);
|
|
|
|
|
|
|
|
/* Transform to world space. */
|
|
|
|
mul_m4_v3(viewinv, bsphere->center);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-21 23:56:55 +02:00
|
|
|
static void draw_view_matrix_state_update(DRWViewUboStorage *storage,
|
|
|
|
const float viewmat[4][4],
|
|
|
|
const float winmat[4][4])
|
2019-05-20 18:01:42 +02:00
|
|
|
{
|
2019-05-21 23:56:55 +02:00
|
|
|
copy_m4_m4(storage->viewmat, viewmat);
|
|
|
|
invert_m4_m4(storage->viewinv, storage->viewmat);
|
2019-05-20 18:01:42 +02:00
|
|
|
|
2019-05-21 23:56:55 +02:00
|
|
|
copy_m4_m4(storage->winmat, winmat);
|
|
|
|
invert_m4_m4(storage->wininv, storage->winmat);
|
2019-05-20 18:01:42 +02:00
|
|
|
|
2019-05-21 23:56:55 +02:00
|
|
|
mul_m4_m4m4(storage->persmat, winmat, viewmat);
|
|
|
|
invert_m4_m4(storage->persinv, storage->persmat);
|
2020-07-15 14:35:57 +02:00
|
|
|
|
|
|
|
const bool is_persp = (winmat[3][3] == 0.0f);
|
|
|
|
|
|
|
|
/* Near clip distance. */
|
|
|
|
storage->viewvecs[0][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] - 1.0f) :
|
|
|
|
-(winmat[3][2] + 1.0f) / winmat[2][2];
|
|
|
|
|
|
|
|
/* Far clip distance. */
|
|
|
|
storage->viewvecs[1][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] + 1.0f) :
|
|
|
|
-(winmat[3][2] - 1.0f) / winmat[2][2];
|
|
|
|
|
|
|
|
/* view vectors for the corners of the view frustum.
|
|
|
|
* Can be used to recreate the world space position easily */
|
|
|
|
float view_vecs[4][3] = {
|
|
|
|
{-1.0f, -1.0f, -1.0f},
|
|
|
|
{1.0f, -1.0f, -1.0f},
|
|
|
|
{-1.0f, 1.0f, -1.0f},
|
|
|
|
{-1.0f, -1.0f, 1.0f},
|
|
|
|
};
|
|
|
|
|
|
|
|
/* convert the view vectors to view space */
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
mul_project_m4_v3(storage->wininv, view_vecs[i]);
|
|
|
|
/* normalized trick see:
|
|
|
|
* http://www.derschmale.com/2014/01/26/reconstructing-positions-from-the-depth-buffer */
|
|
|
|
if (is_persp) {
|
|
|
|
/* Divide XY by Z. */
|
|
|
|
mul_v2_fl(view_vecs[i], 1.0f / view_vecs[i][2]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* If ortho : view_vecs[0] is the near-bottom-left corner of the frustum and
|
|
|
|
* view_vecs[1] is the vector going from the near-bottom-left corner to
|
|
|
|
* the far-top-right corner.
|
|
|
|
* If Persp : view_vecs[0].xy and view_vecs[1].xy are respectively the bottom-left corner
|
|
|
|
* when Z = 1, and top-left corner if Z = 1.
|
|
|
|
* view_vecs[0].z the near clip distance and view_vecs[1].z is the (signed)
|
|
|
|
* distance from the near plane to the far clip plane.
|
|
|
|
*/
|
|
|
|
copy_v3_v3(storage->viewvecs[0], view_vecs[0]);
|
|
|
|
|
|
|
|
/* we need to store the differences */
|
|
|
|
storage->viewvecs[1][0] = view_vecs[1][0] - view_vecs[0][0];
|
|
|
|
storage->viewvecs[1][1] = view_vecs[2][1] - view_vecs[0][1];
|
|
|
|
storage->viewvecs[1][2] = view_vecs[3][2] - view_vecs[0][2];
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a view with culling. */
|
|
|
|
DRWView *DRW_view_create(const float viewmat[4][4],
|
|
|
|
const float winmat[4][4],
|
|
|
|
const float (*culling_viewmat)[4],
|
|
|
|
const float (*culling_winmat)[4],
|
|
|
|
DRWCallVisibilityFn *visibility_fn)
|
|
|
|
{
|
|
|
|
DRWView *view = BLI_memblock_alloc(DST.vmempool->views);
|
|
|
|
|
|
|
|
if (DST.primary_view_ct < MAX_CULLED_VIEWS) {
|
|
|
|
view->culling_mask = 1u << DST.primary_view_ct++;
|
|
|
|
}
|
|
|
|
else {
|
2019-05-21 17:53:49 +02:00
|
|
|
BLI_assert(0);
|
2019-05-20 18:01:42 +02:00
|
|
|
view->culling_mask = 0u;
|
|
|
|
}
|
|
|
|
view->clip_planes_len = 0;
|
|
|
|
view->visibility_fn = visibility_fn;
|
|
|
|
view->parent = NULL;
|
|
|
|
|
2019-05-22 16:09:03 +02:00
|
|
|
copy_v4_fl4(view->storage.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f);
|
2019-05-20 18:01:42 +02:00
|
|
|
|
|
|
|
DRW_view_update(view, viewmat, winmat, culling_viewmat, culling_winmat);
|
|
|
|
|
|
|
|
return view;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a view with culling done by another view. */
|
|
|
|
DRWView *DRW_view_create_sub(const DRWView *parent_view,
|
|
|
|
const float viewmat[4][4],
|
|
|
|
const float winmat[4][4])
|
|
|
|
{
|
2019-12-02 01:40:58 +01:00
|
|
|
/* Search original parent. */
|
|
|
|
const DRWView *ori_view = parent_view;
|
|
|
|
while (ori_view->parent != NULL) {
|
|
|
|
ori_view = ori_view->parent;
|
|
|
|
}
|
2019-05-20 18:01:42 +02:00
|
|
|
|
|
|
|
DRWView *view = BLI_memblock_alloc(DST.vmempool->views);
|
|
|
|
|
|
|
|
/* Perform copy. */
|
2019-12-02 01:40:58 +01:00
|
|
|
*view = *ori_view;
|
|
|
|
view->parent = (DRWView *)ori_view;
|
2019-05-20 18:01:42 +02:00
|
|
|
|
|
|
|
DRW_view_update_sub(view, viewmat, winmat);
|
|
|
|
|
|
|
|
return view;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* DRWView Update:
|
|
|
|
* This is meant to be done on existing views when rendering in a loop and there is no
|
|
|
|
* need to allocate more DRWViews.
|
2021-01-04 12:00:18 +11:00
|
|
|
*/
|
2019-05-20 18:01:42 +02:00
|
|
|
|
|
|
|
/* Update matrices of a view created with DRW_view_create_sub. */
|
|
|
|
void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4])
|
|
|
|
{
|
|
|
|
BLI_assert(view->parent != NULL);
|
|
|
|
|
|
|
|
view->is_dirty = true;
|
2020-02-19 19:36:12 +01:00
|
|
|
view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat));
|
2019-05-20 18:01:42 +02:00
|
|
|
|
2019-05-21 23:56:55 +02:00
|
|
|
draw_view_matrix_state_update(&view->storage, viewmat, winmat);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Update matrices of a view created with DRW_view_create. */
|
|
|
|
void DRW_view_update(DRWView *view,
|
|
|
|
const float viewmat[4][4],
|
|
|
|
const float winmat[4][4],
|
|
|
|
const float (*culling_viewmat)[4],
|
|
|
|
const float (*culling_winmat)[4])
|
|
|
|
{
|
|
|
|
/* DO NOT UPDATE THE DEFAULT VIEW.
|
2020-09-07 15:57:12 +10:00
|
|
|
* Create sub-views instead, or a copy. */
|
2019-05-20 18:01:42 +02:00
|
|
|
BLI_assert(view != DST.view_default);
|
|
|
|
BLI_assert(view->parent == NULL);
|
|
|
|
|
|
|
|
view->is_dirty = true;
|
2020-02-19 19:36:12 +01:00
|
|
|
view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat));
|
2019-05-20 18:01:42 +02:00
|
|
|
|
2019-05-21 23:56:55 +02:00
|
|
|
draw_view_matrix_state_update(&view->storage, viewmat, winmat);
|
2019-05-20 18:01:42 +02:00
|
|
|
|
|
|
|
/* Prepare frustum culling. */
|
|
|
|
|
|
|
|
#ifdef DRW_DEBUG_CULLING
|
|
|
|
static float mv[MAX_CULLED_VIEWS][4][4], mw[MAX_CULLED_VIEWS][4][4];
|
|
|
|
|
|
|
|
/* Select view here. */
|
|
|
|
if (view->culling_mask != 0) {
|
|
|
|
uint index = bitscan_forward_uint(view->culling_mask);
|
|
|
|
|
|
|
|
if (G.debug_value == 0) {
|
|
|
|
copy_m4_m4(mv[index], culling_viewmat ? culling_viewmat : viewmat);
|
|
|
|
copy_m4_m4(mw[index], culling_winmat ? culling_winmat : winmat);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
culling_winmat = mw[index];
|
|
|
|
culling_viewmat = mv[index];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
float wininv[4][4];
|
|
|
|
if (culling_winmat) {
|
|
|
|
winmat = culling_winmat;
|
|
|
|
invert_m4_m4(wininv, winmat);
|
|
|
|
}
|
|
|
|
else {
|
2019-05-21 23:56:55 +02:00
|
|
|
copy_m4_m4(wininv, view->storage.wininv);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
float viewinv[4][4];
|
|
|
|
if (culling_viewmat) {
|
|
|
|
viewmat = culling_viewmat;
|
|
|
|
invert_m4_m4(viewinv, viewmat);
|
|
|
|
}
|
|
|
|
else {
|
2019-05-21 23:56:55 +02:00
|
|
|
copy_m4_m4(viewinv, view->storage.viewinv);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
draw_frustum_boundbox_calc(viewinv, winmat, &view->frustum_corners);
|
2019-12-18 09:41:03 -03:00
|
|
|
draw_frustum_culling_planes_calc(view->storage.persmat, view->frustum_planes);
|
2019-05-20 18:01:42 +02:00
|
|
|
draw_frustum_bound_sphere_calc(
|
|
|
|
&view->frustum_corners, viewinv, winmat, wininv, &view->frustum_bsphere);
|
|
|
|
|
|
|
|
#ifdef DRW_DEBUG_CULLING
|
|
|
|
if (G.debug_value != 0) {
|
|
|
|
DRW_debug_sphere(
|
|
|
|
view->frustum_bsphere.center, view->frustum_bsphere.radius, (const float[4]){1, 1, 0, 1});
|
|
|
|
DRW_debug_bbox(&view->frustum_corners, (const float[4]){1, 1, 0, 1});
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return default view if it is a viewport render. */
|
|
|
|
const DRWView *DRW_view_default_get(void)
|
|
|
|
{
|
|
|
|
return DST.view_default;
|
|
|
|
}
|
|
|
|
|
2020-06-19 17:02:55 +02:00
|
|
|
/* WARNING: Only use in render AND only if you are going to set view_default again. */
|
|
|
|
void DRW_view_reset(void)
|
|
|
|
{
|
|
|
|
DST.view_default = NULL;
|
|
|
|
DST.view_active = NULL;
|
|
|
|
DST.view_previous = NULL;
|
|
|
|
}
|
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
/* MUST only be called once per render and only in render mode. Sets default view. */
|
|
|
|
void DRW_view_default_set(DRWView *view)
|
|
|
|
{
|
|
|
|
BLI_assert(DST.view_default == NULL);
|
|
|
|
DST.view_default = view;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
|
|
|
|
* and if the shaders have support for it (see usage of gl_ClipDistance).
|
2019-05-21 17:53:49 +02:00
|
|
|
* NOTE: planes must be in world space.
|
2019-05-20 18:01:42 +02:00
|
|
|
*/
|
|
|
|
void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len)
|
|
|
|
{
|
|
|
|
BLI_assert(plane_len <= MAX_CLIP_PLANES);
|
|
|
|
view->clip_planes_len = plane_len;
|
|
|
|
if (plane_len > 0) {
|
2020-08-08 13:29:21 +10:00
|
|
|
memcpy(view->storage.clipplanes, planes, sizeof(float[4]) * plane_len);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-22 16:09:03 +02:00
|
|
|
void DRW_view_camtexco_set(DRWView *view, float texco[4])
|
|
|
|
{
|
|
|
|
copy_v4_v4(view->storage.viewcamtexcofac, texco);
|
|
|
|
}
|
|
|
|
|
2019-05-20 18:01:42 +02:00
|
|
|
/* Return world space frustum corners. */
|
|
|
|
void DRW_view_frustum_corners_get(const DRWView *view, BoundBox *corners)
|
|
|
|
{
|
|
|
|
memcpy(corners, &view->frustum_corners, sizeof(view->frustum_corners));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return world space frustum sides as planes.
|
|
|
|
* See draw_frustum_culling_planes_calc() for the plane order. */
|
|
|
|
void DRW_view_frustum_planes_get(const DRWView *view, float planes[6][4])
|
|
|
|
{
|
|
|
|
memcpy(planes, &view->frustum_planes, sizeof(view->frustum_planes));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DRW_view_is_persp_get(const DRWView *view)
|
|
|
|
{
|
2019-05-21 21:29:25 +02:00
|
|
|
view = (view) ? view : DST.view_default;
|
2019-05-21 23:56:55 +02:00
|
|
|
return view->storage.winmat[3][3] == 0.0f;
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
float DRW_view_near_distance_get(const DRWView *view)
|
|
|
|
{
|
2019-05-21 21:29:25 +02:00
|
|
|
view = (view) ? view : DST.view_default;
|
2019-05-21 23:56:55 +02:00
|
|
|
const float(*projmat)[4] = view->storage.winmat;
|
2019-05-20 18:01:42 +02:00
|
|
|
|
|
|
|
if (DRW_view_is_persp_get(view)) {
|
|
|
|
return -projmat[3][2] / (projmat[2][2] - 1.0f);
|
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
|
|
|
|
return -(projmat[3][2] + 1.0f) / projmat[2][2];
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
float DRW_view_far_distance_get(const DRWView *view)
|
|
|
|
{
|
2019-05-21 21:29:25 +02:00
|
|
|
view = (view) ? view : DST.view_default;
|
2019-05-21 23:56:55 +02:00
|
|
|
const float(*projmat)[4] = view->storage.winmat;
|
2019-05-20 18:01:42 +02:00
|
|
|
|
|
|
|
if (DRW_view_is_persp_get(view)) {
|
|
|
|
return -projmat[3][2] / (projmat[2][2] + 1.0f);
|
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
|
|
|
|
return -(projmat[3][2] - 1.0f) / projmat[2][2];
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse)
|
|
|
|
{
|
2019-05-21 21:29:25 +02:00
|
|
|
view = (view) ? view : DST.view_default;
|
2019-05-21 23:56:55 +02:00
|
|
|
const DRWViewUboStorage *storage = &view->storage;
|
|
|
|
copy_m4_m4(mat, (inverse) ? storage->viewinv : storage->viewmat);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse)
|
|
|
|
{
|
2019-05-21 21:29:25 +02:00
|
|
|
view = (view) ? view : DST.view_default;
|
2019-05-21 23:56:55 +02:00
|
|
|
const DRWViewUboStorage *storage = &view->storage;
|
|
|
|
copy_m4_m4(mat, (inverse) ? storage->wininv : storage->winmat);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse)
|
|
|
|
{
|
2019-05-21 21:29:25 +02:00
|
|
|
view = (view) ? view : DST.view_default;
|
2019-05-21 23:56:55 +02:00
|
|
|
const DRWViewUboStorage *storage = &view->storage;
|
|
|
|
copy_m4_m4(mat, (inverse) ? storage->persinv : storage->persmat);
|
2019-05-20 18:01:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
/** \name Passes (DRW_pass)
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
DRWPass *DRW_pass_create(const char *name, DRWState state)
|
|
|
|
{
|
2019-05-07 18:01:14 +02:00
|
|
|
DRWPass *pass = BLI_memblock_alloc(DST.vmempool->passes);
|
2019-05-28 17:18:39 +02:00
|
|
|
pass->state = state | DRW_STATE_PROGRAM_POINT_SIZE;
|
2020-09-14 01:07:05 +02:00
|
|
|
if (G.debug & G_DEBUG_GPU) {
|
2018-02-28 01:16:23 +01:00
|
|
|
BLI_strncpy(pass->name, name, MAX_PASS_NAME);
|
|
|
|
}
|
|
|
|
|
|
|
|
pass->shgroups.first = NULL;
|
|
|
|
pass->shgroups.last = NULL;
|
2019-05-31 01:45:41 +02:00
|
|
|
pass->handle = DST.pass_handle;
|
|
|
|
DRW_handle_increment(&DST.pass_handle);
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2020-06-02 16:58:07 +02:00
|
|
|
pass->original = NULL;
|
|
|
|
pass->next = NULL;
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
return pass;
|
|
|
|
}
|
|
|
|
|
2020-09-14 01:06:49 +02:00
|
|
|
/* Create an instance of the original pass that will execute the same drawcalls but with its own
|
|
|
|
* DRWState. */
|
2020-06-02 16:58:07 +02:00
|
|
|
DRWPass *DRW_pass_create_instance(const char *name, DRWPass *original, DRWState state)
|
|
|
|
{
|
|
|
|
DRWPass *pass = DRW_pass_create(name, state);
|
|
|
|
pass->original = original;
|
|
|
|
|
|
|
|
return pass;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Link two passes so that they are both rendered if the first one is being drawn. */
|
|
|
|
void DRW_pass_link(DRWPass *first, DRWPass *second)
|
|
|
|
{
|
|
|
|
BLI_assert(first != second);
|
|
|
|
BLI_assert(first->next == NULL);
|
|
|
|
first->next = second;
|
|
|
|
}
|
|
|
|
|
2018-07-30 12:23:23 +02:00
|
|
|
bool DRW_pass_is_empty(DRWPass *pass)
|
|
|
|
{
|
2020-09-14 01:06:49 +02:00
|
|
|
if (pass->original) {
|
|
|
|
return DRW_pass_is_empty(pass->original);
|
|
|
|
}
|
|
|
|
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (DRWShadingGroup *, shgroup, &pass->shgroups) {
|
2018-07-30 12:23:23 +02:00
|
|
|
if (!DRW_shgroup_is_empty(shgroup)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
void DRW_pass_foreach_shgroup(DRWPass *pass,
|
|
|
|
void (*callback)(void *userData, DRWShadingGroup *shgrp),
|
|
|
|
void *userData)
|
|
|
|
{
|
2020-04-03 19:15:01 +02:00
|
|
|
LISTBASE_FOREACH (DRWShadingGroup *, shgroup, &pass->shgroups) {
|
2018-02-28 01:16:23 +01:00
|
|
|
callback(userData, shgroup);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
static int pass_shgroup_dist_sort(const void *a, const void *b)
|
2018-02-28 01:16:23 +01:00
|
|
|
{
|
|
|
|
const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
|
|
|
|
const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
if (shgrp_a->z_sorting.distance < shgrp_b->z_sorting.distance) {
|
2019-03-28 01:11:28 +11:00
|
|
|
return 1;
|
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
if (shgrp_a->z_sorting.distance > shgrp_b->z_sorting.distance) {
|
2019-03-28 01:11:28 +11:00
|
|
|
return -1;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
|
|
|
|
/* If distances are the same, keep original order. */
|
|
|
|
if (shgrp_a->z_sorting.original_index > shgrp_b->z_sorting.original_index) {
|
|
|
|
return -1;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2020-08-07 11:49:59 +02:00
|
|
|
|
|
|
|
return 0;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------ Shading group sorting --------------------- */
|
|
|
|
|
|
|
|
#define SORT_IMPL_LINKTYPE DRWShadingGroup
|
|
|
|
|
|
|
|
#define SORT_IMPL_FUNC shgroup_sort_fn_r
|
|
|
|
#include "../../blenlib/intern/list_sort_impl.h"
|
|
|
|
#undef SORT_IMPL_FUNC
|
|
|
|
|
|
|
|
#undef SORT_IMPL_LINKTYPE
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Sort Shading groups by decreasing Z of their first draw call.
|
2019-05-31 01:45:41 +02:00
|
|
|
* This is useful for order dependent effect such as alpha-blending.
|
2019-03-19 15:17:46 +11:00
|
|
|
*/
|
2018-02-28 01:16:23 +01:00
|
|
|
void DRW_pass_sort_shgroup_z(DRWPass *pass)
|
|
|
|
{
|
2019-05-21 23:56:55 +02:00
|
|
|
const float(*viewinv)[4] = DST.view_active->storage.viewinv;
|
2018-02-28 01:16:23 +01:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
if (!(pass->shgroups.first && pass->shgroups.first->next)) {
|
|
|
|
/* Nothing to sort */
|
|
|
|
return;
|
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
uint index = 0;
|
|
|
|
DRWShadingGroup *shgroup = pass->shgroups.first;
|
|
|
|
do {
|
|
|
|
DRWResourceHandle handle = 0;
|
|
|
|
/* Find first DRWCommandDraw. */
|
|
|
|
DRWCommandChunk *cmd_chunk = shgroup->cmd.first;
|
|
|
|
for (; cmd_chunk && handle == 0; cmd_chunk = cmd_chunk->next) {
|
|
|
|
for (int i = 0; i < cmd_chunk->command_used && handle == 0; i++) {
|
|
|
|
if (DRW_CMD_DRAW == command_type_get(cmd_chunk->command_type, i)) {
|
|
|
|
handle = cmd_chunk->commands[i].draw.handle;
|
|
|
|
}
|
|
|
|
}
|
2019-09-13 23:02:45 +02:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
/* To be sorted a shgroup needs to have at least one draw command. */
|
2020-05-18 16:59:04 +02:00
|
|
|
/* FIXME(fclem) In some case, we can still have empty shading group to sort. However their
|
|
|
|
* final order is not well defined.
|
|
|
|
* (see T76730 & D7729). */
|
|
|
|
// BLI_assert(handle != 0);
|
2019-05-31 01:45:41 +02:00
|
|
|
|
|
|
|
DRWObjectMatrix *obmats = DRW_memblock_elem_from_handle(DST.vmempool->obmats, &handle);
|
|
|
|
|
|
|
|
/* Compute distance to camera. */
|
|
|
|
float tmp[3];
|
|
|
|
sub_v3_v3v3(tmp, viewinv[3], obmats->model[3]);
|
|
|
|
shgroup->z_sorting.distance = dot_v3v3(viewinv[2], tmp);
|
|
|
|
shgroup->z_sorting.original_index = index++;
|
|
|
|
|
|
|
|
} while ((shgroup = shgroup->next));
|
|
|
|
|
|
|
|
/* Sort using computed distances. */
|
|
|
|
pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort);
|
|
|
|
|
|
|
|
/* Find the new last */
|
|
|
|
DRWShadingGroup *last = pass->shgroups.first;
|
|
|
|
while ((last = last->next)) {
|
|
|
|
/* Reset the pass id for debugging. */
|
|
|
|
last->pass_handle = pass->handle;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
2019-05-31 01:45:41 +02:00
|
|
|
pass->shgroups.last = last;
|
2018-02-28 01:16:23 +01:00
|
|
|
}
|
|
|
|
|
2019-12-02 01:40:58 +01:00
|
|
|
/**
|
|
|
|
* Reverse Shading group submission order.
|
|
|
|
*/
|
|
|
|
void DRW_pass_sort_shgroup_reverse(DRWPass *pass)
|
|
|
|
{
|
|
|
|
pass->shgroups.last = pass->shgroups.first;
|
|
|
|
/* WARNING: Assume that DRWShadingGroup->next is the first member. */
|
|
|
|
BLI_linklist_reverse((LinkNode **)&pass->shgroups.first);
|
|
|
|
}
|
|
|
|
|
2018-02-28 01:16:23 +01:00
|
|
|
/** \} */
|