2022-02-11 09:07:11 +11:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
* Copyright 2016 by Mike Erwin. All rights reserved. */
|
2018-07-17 14:46:44 +02:00
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup gpu
|
2018-07-17 14:46:44 +02:00
|
|
|
*
|
2018-07-18 00:12:21 +02:00
|
|
|
* GPU element list (AKA index buffer)
|
2018-07-17 14:46:44 +02:00
|
|
|
*/
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2018-07-31 16:54:58 +02:00
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
|
2021-06-08 16:35:33 +02:00
|
|
|
#include "BLI_math_base.h"
|
2020-09-06 02:46:51 +02:00
|
|
|
#include "BLI_utildefines.h"
|
2018-07-19 15:48:13 +02:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
#include "gpu_backend.hh"
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
#include "gpu_index_buffer_private.hh"
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2021-05-26 16:49:17 +02:00
|
|
|
#include <cstring>
|
|
|
|
|
|
2016-09-13 02:41:43 -04:00
|
|
|
#define KEEP_SINGLE_COPY 1
|
|
|
|
|
|
2019-05-29 00:08:10 +02:00
|
|
|
#define RESTART_INDEX 0xFFFFFFFF
|
|
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name IndexBufBuilder
|
|
|
|
|
* \{ */
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
using namespace blender;
|
|
|
|
|
using namespace blender::gpu;
|
2018-09-27 00:53:45 -03:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void GPU_indexbuf_init_ex(GPUIndexBufBuilder *builder,
|
|
|
|
|
GPUPrimType prim_type,
|
|
|
|
|
uint index_len,
|
2019-05-29 00:08:10 +02:00
|
|
|
uint vertex_len)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
builder->max_allowed_index = vertex_len - 1;
|
|
|
|
|
builder->max_index_len = index_len;
|
|
|
|
|
builder->index_len = 0; // start empty
|
2021-06-07 08:38:38 -03:00
|
|
|
builder->index_min = UINT32_MAX;
|
|
|
|
|
builder->index_max = 0;
|
2019-04-17 06:17:24 +02:00
|
|
|
builder->prim_type = prim_type;
|
2020-07-25 18:39:19 +02:00
|
|
|
builder->data = (uint *)MEM_callocN(builder->max_index_len * sizeof(uint), "GPUIndexBuf data");
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2018-03-14 22:06:20 +01:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void GPU_indexbuf_init(GPUIndexBufBuilder *builder,
|
|
|
|
|
GPUPrimType prim_type,
|
|
|
|
|
uint prim_len,
|
|
|
|
|
uint vertex_len)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
int verts_per_prim = GPU_indexbuf_primitive_len(prim_type);
|
2016-10-13 12:24:01 -04:00
|
|
|
#if TRUST_NO_ONE
|
2019-04-17 06:17:24 +02:00
|
|
|
assert(verts_per_prim != -1);
|
2016-10-13 12:24:01 -04:00
|
|
|
#endif
|
2019-05-29 00:08:10 +02:00
|
|
|
GPU_indexbuf_init_ex(builder, prim_type, prim_len * (uint)verts_per_prim, vertex_len);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2021-05-26 16:49:17 +02:00
|
|
|
GPUIndexBuf *GPU_indexbuf_build_on_device(uint index_len)
|
|
|
|
|
{
|
|
|
|
|
GPUIndexBuf *elem_ = GPU_indexbuf_calloc();
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
GPU_indexbuf_init_build_on_device(elem_, index_len);
|
2021-05-26 16:49:17 +02:00
|
|
|
return elem_;
|
|
|
|
|
}
|
|
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
void GPU_indexbuf_init_build_on_device(GPUIndexBuf *elem, uint index_len)
|
|
|
|
|
{
|
|
|
|
|
IndexBuf *elem_ = unwrap(elem);
|
|
|
|
|
elem_->init_build_on_device(index_len);
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-10 11:13:01 -03:00
|
|
|
void GPU_indexbuf_join(GPUIndexBufBuilder *builder_to, const GPUIndexBufBuilder *builder_from)
|
2021-06-08 16:35:33 +02:00
|
|
|
{
|
2021-06-10 11:01:36 -03:00
|
|
|
BLI_assert(builder_to->data == builder_from->data);
|
|
|
|
|
builder_to->index_len = max_uu(builder_to->index_len, builder_from->index_len);
|
|
|
|
|
builder_to->index_min = min_uu(builder_to->index_min, builder_from->index_min);
|
|
|
|
|
builder_to->index_max = max_uu(builder_to->index_max, builder_from->index_max);
|
2021-06-08 16:35:33 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *builder, uint v)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2016-09-13 02:41:43 -04:00
|
|
|
#if TRUST_NO_ONE
|
2020-11-06 17:49:09 +01:00
|
|
|
assert(builder->data != nullptr);
|
2019-04-17 06:17:24 +02:00
|
|
|
assert(builder->index_len < builder->max_index_len);
|
|
|
|
|
assert(v <= builder->max_allowed_index);
|
2016-09-13 02:41:43 -04:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
builder->data[builder->index_len++] = v;
|
2021-06-07 08:38:38 -03:00
|
|
|
builder->index_min = MIN2(builder->index_min, v);
|
|
|
|
|
builder->index_max = MAX2(builder->index_max, v);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder *builder)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2018-03-14 22:06:20 +01:00
|
|
|
#if TRUST_NO_ONE
|
2020-11-06 17:49:09 +01:00
|
|
|
assert(builder->data != nullptr);
|
2019-04-17 06:17:24 +02:00
|
|
|
assert(builder->index_len < builder->max_index_len);
|
2018-03-14 22:06:20 +01:00
|
|
|
#endif
|
2019-05-29 00:08:10 +02:00
|
|
|
builder->data[builder->index_len++] = RESTART_INDEX;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2018-03-14 22:06:20 +01:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder *builder, uint v)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2016-09-13 02:41:43 -04:00
|
|
|
#if TRUST_NO_ONE
|
2019-04-17 06:17:24 +02:00
|
|
|
assert(builder->prim_type == GPU_PRIM_POINTS);
|
2016-09-13 02:41:43 -04:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_indexbuf_add_generic_vert(builder, v);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *builder, uint v1, uint v2)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2016-09-13 02:41:43 -04:00
|
|
|
#if TRUST_NO_ONE
|
2019-04-17 06:17:24 +02:00
|
|
|
assert(builder->prim_type == GPU_PRIM_LINES);
|
|
|
|
|
assert(v1 != v2);
|
2016-09-13 02:41:43 -04:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_indexbuf_add_generic_vert(builder, v1);
|
|
|
|
|
GPU_indexbuf_add_generic_vert(builder, v2);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *builder, uint v1, uint v2, uint v3)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2016-09-13 02:41:43 -04:00
|
|
|
#if TRUST_NO_ONE
|
2019-04-17 06:17:24 +02:00
|
|
|
assert(builder->prim_type == GPU_PRIM_TRIS);
|
|
|
|
|
assert(v1 != v2 && v2 != v3 && v3 != v1);
|
2016-09-13 02:41:43 -04:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_indexbuf_add_generic_vert(builder, v1);
|
|
|
|
|
GPU_indexbuf_add_generic_vert(builder, v2);
|
|
|
|
|
GPU_indexbuf_add_generic_vert(builder, v3);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void GPU_indexbuf_add_line_adj_verts(
|
|
|
|
|
GPUIndexBufBuilder *builder, uint v1, uint v2, uint v3, uint v4)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2018-05-19 13:09:47 +02:00
|
|
|
#if TRUST_NO_ONE
|
2019-04-17 06:17:24 +02:00
|
|
|
assert(builder->prim_type == GPU_PRIM_LINES_ADJ);
|
|
|
|
|
assert(v2 != v3); /* only the line need diff indices */
|
2018-05-19 13:09:47 +02:00
|
|
|
#endif
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_indexbuf_add_generic_vert(builder, v1);
|
|
|
|
|
GPU_indexbuf_add_generic_vert(builder, v2);
|
|
|
|
|
GPU_indexbuf_add_generic_vert(builder, v3);
|
|
|
|
|
GPU_indexbuf_add_generic_vert(builder, v4);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2018-05-19 13:09:47 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
void GPU_indexbuf_set_point_vert(GPUIndexBufBuilder *builder, uint elem, uint v1)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(builder->prim_type == GPU_PRIM_POINTS);
|
|
|
|
|
BLI_assert(elem < builder->max_index_len);
|
|
|
|
|
builder->data[elem++] = v1;
|
2021-06-07 08:38:38 -03:00
|
|
|
builder->index_min = MIN2(builder->index_min, v1);
|
|
|
|
|
builder->index_max = MAX2(builder->index_max, v1);
|
|
|
|
|
builder->index_len = MAX2(builder->index_len, elem);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GPU_indexbuf_set_line_verts(GPUIndexBufBuilder *builder, uint elem, uint v1, uint v2)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(builder->prim_type == GPU_PRIM_LINES);
|
|
|
|
|
BLI_assert(v1 != v2);
|
|
|
|
|
BLI_assert(v1 <= builder->max_allowed_index);
|
|
|
|
|
BLI_assert(v2 <= builder->max_allowed_index);
|
|
|
|
|
BLI_assert((elem + 1) * 2 <= builder->max_index_len);
|
|
|
|
|
uint idx = elem * 2;
|
|
|
|
|
builder->data[idx++] = v1;
|
|
|
|
|
builder->data[idx++] = v2;
|
2021-06-07 08:38:38 -03:00
|
|
|
builder->index_min = MIN3(builder->index_min, v1, v2);
|
|
|
|
|
builder->index_max = MAX3(builder->index_max, v1, v2);
|
|
|
|
|
builder->index_len = MAX2(builder->index_len, idx);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GPU_indexbuf_set_tri_verts(GPUIndexBufBuilder *builder, uint elem, uint v1, uint v2, uint v3)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(builder->prim_type == GPU_PRIM_TRIS);
|
|
|
|
|
BLI_assert(v1 != v2 && v2 != v3 && v3 != v1);
|
|
|
|
|
BLI_assert(v1 <= builder->max_allowed_index);
|
|
|
|
|
BLI_assert(v2 <= builder->max_allowed_index);
|
|
|
|
|
BLI_assert(v3 <= builder->max_allowed_index);
|
|
|
|
|
BLI_assert((elem + 1) * 3 <= builder->max_index_len);
|
|
|
|
|
uint idx = elem * 3;
|
|
|
|
|
builder->data[idx++] = v1;
|
|
|
|
|
builder->data[idx++] = v2;
|
|
|
|
|
builder->data[idx++] = v3;
|
2021-06-07 08:38:38 -03:00
|
|
|
|
|
|
|
|
builder->index_min = MIN4(builder->index_min, v1, v2, v3);
|
|
|
|
|
builder->index_max = MAX4(builder->index_max, v1, v2, v3);
|
|
|
|
|
builder->index_len = MAX2(builder->index_len, idx);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GPU_indexbuf_set_point_restart(GPUIndexBufBuilder *builder, uint elem)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(builder->prim_type == GPU_PRIM_POINTS);
|
|
|
|
|
BLI_assert(elem < builder->max_index_len);
|
|
|
|
|
builder->data[elem++] = RESTART_INDEX;
|
2021-06-07 08:38:38 -03:00
|
|
|
builder->index_len = MAX2(builder->index_len, elem);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GPU_indexbuf_set_line_restart(GPUIndexBufBuilder *builder, uint elem)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(builder->prim_type == GPU_PRIM_LINES);
|
|
|
|
|
BLI_assert((elem + 1) * 2 <= builder->max_index_len);
|
|
|
|
|
uint idx = elem * 2;
|
|
|
|
|
builder->data[idx++] = RESTART_INDEX;
|
|
|
|
|
builder->data[idx++] = RESTART_INDEX;
|
2021-06-07 08:38:38 -03:00
|
|
|
builder->index_len = MAX2(builder->index_len, idx);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GPU_indexbuf_set_tri_restart(GPUIndexBufBuilder *builder, uint elem)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(builder->prim_type == GPU_PRIM_TRIS);
|
|
|
|
|
BLI_assert((elem + 1) * 3 <= builder->max_index_len);
|
|
|
|
|
uint idx = elem * 3;
|
|
|
|
|
builder->data[idx++] = RESTART_INDEX;
|
|
|
|
|
builder->data[idx++] = RESTART_INDEX;
|
|
|
|
|
builder->data[idx++] = RESTART_INDEX;
|
2021-06-07 08:38:38 -03:00
|
|
|
builder->index_len = MAX2(builder->index_len, idx);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Creation & Deletion
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
namespace blender::gpu {
|
|
|
|
|
|
|
|
|
|
IndexBuf::~IndexBuf()
|
2019-07-14 16:49:44 +02:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
if (!is_subrange_) {
|
|
|
|
|
MEM_SAFE_FREE(data_);
|
|
|
|
|
}
|
2019-12-03 08:27:55 +01:00
|
|
|
}
|
|
|
|
|
|
2021-06-07 08:38:38 -03:00
|
|
|
void IndexBuf::init(uint indices_len, uint32_t *indices, uint min_index, uint max_index)
|
2019-12-03 08:27:55 +01:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
is_init_ = true;
|
|
|
|
|
data_ = indices;
|
|
|
|
|
index_start_ = 0;
|
|
|
|
|
index_len_ = indices_len;
|
2022-05-10 23:36:05 +02:00
|
|
|
is_empty_ = min_index > max_index;
|
2020-09-06 02:46:51 +02:00
|
|
|
|
2019-07-14 16:49:44 +02:00
|
|
|
#if GPU_TRACK_INDEX_RANGE
|
2020-09-06 02:46:51 +02:00
|
|
|
/* Everything remains 32 bit while building to keep things simple.
|
|
|
|
|
* Find min/max after, then convert to smallest index type possible. */
|
2021-06-07 08:38:38 -03:00
|
|
|
uint range = min_index < max_index ? max_index - min_index : 0;
|
2020-09-06 02:46:51 +02:00
|
|
|
/* count the primitive restart index. */
|
|
|
|
|
range += 1;
|
|
|
|
|
|
|
|
|
|
if (range <= 0xFFFF) {
|
|
|
|
|
index_type_ = GPU_INDEX_U16;
|
|
|
|
|
this->squeeze_indices_short(min_index, max_index);
|
|
|
|
|
}
|
2019-07-14 16:49:44 +02:00
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-26 16:49:17 +02:00
|
|
|
void IndexBuf::init_build_on_device(uint index_len)
|
|
|
|
|
{
|
|
|
|
|
is_init_ = true;
|
|
|
|
|
index_start_ = 0;
|
|
|
|
|
index_len_ = index_len;
|
|
|
|
|
index_type_ = GPU_INDEX_U32;
|
|
|
|
|
data_ = nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
void IndexBuf::init_subrange(IndexBuf *elem_src, uint start, uint length)
|
|
|
|
|
{
|
|
|
|
|
/* We don't support nested subranges. */
|
|
|
|
|
BLI_assert(elem_src && elem_src->is_subrange_ == false);
|
|
|
|
|
BLI_assert((length == 0) || (start + length <= elem_src->index_len_));
|
|
|
|
|
|
|
|
|
|
is_init_ = true;
|
|
|
|
|
is_subrange_ = true;
|
|
|
|
|
src_ = elem_src;
|
|
|
|
|
index_start_ = start;
|
|
|
|
|
index_len_ = length;
|
|
|
|
|
index_base_ = elem_src->index_base_;
|
|
|
|
|
index_type_ = elem_src->index_type_;
|
|
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
uint IndexBuf::index_range(uint *r_min, uint *r_max)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
if (index_len_ == 0) {
|
|
|
|
|
*r_min = *r_max = 0;
|
2019-04-17 06:17:24 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
2020-09-06 02:46:51 +02:00
|
|
|
const uint32_t *uint_idx = (uint32_t *)data_;
|
2019-08-21 14:47:41 +02:00
|
|
|
uint min_value = RESTART_INDEX;
|
|
|
|
|
uint max_value = 0;
|
2020-09-06 02:46:51 +02:00
|
|
|
for (uint i = 0; i < index_len_; i++) {
|
|
|
|
|
const uint value = uint_idx[i];
|
2019-05-29 00:08:10 +02:00
|
|
|
if (value == RESTART_INDEX) {
|
2019-04-17 06:17:24 +02:00
|
|
|
continue;
|
2019-04-22 09:32:37 +10:00
|
|
|
}
|
2020-08-07 12:39:35 +02:00
|
|
|
if (value < min_value) {
|
2019-04-17 06:17:24 +02:00
|
|
|
min_value = value;
|
2019-04-22 09:32:37 +10:00
|
|
|
}
|
|
|
|
|
else if (value > max_value) {
|
2019-04-17 06:17:24 +02:00
|
|
|
max_value = value;
|
2019-04-22 09:32:37 +10:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2019-08-21 14:47:41 +02:00
|
|
|
if (min_value == RESTART_INDEX) {
|
2020-09-06 02:46:51 +02:00
|
|
|
*r_min = *r_max = 0;
|
2019-08-21 14:47:41 +02:00
|
|
|
return 0;
|
|
|
|
|
}
|
2020-09-06 02:46:51 +02:00
|
|
|
*r_min = min_value;
|
|
|
|
|
*r_max = max_value;
|
2020-08-07 12:39:35 +02:00
|
|
|
return max_value - min_value;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
void IndexBuf::squeeze_indices_short(uint min_idx, uint max_idx)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* data will never be *larger* than builder->data...
|
|
|
|
|
* converting in place to avoid extra allocation */
|
2020-09-06 02:46:51 +02:00
|
|
|
uint16_t *ushort_idx = (uint16_t *)data_;
|
|
|
|
|
const uint32_t *uint_idx = (uint32_t *)data_;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
if (max_idx >= 0xFFFF) {
|
|
|
|
|
index_base_ = min_idx;
|
|
|
|
|
for (uint i = 0; i < index_len_; i++) {
|
2020-09-15 14:55:28 +02:00
|
|
|
ushort_idx[i] = (uint16_t)MIN2(0xFFFF, uint_idx[i] - min_idx);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
2020-09-06 02:46:51 +02:00
|
|
|
index_base_ = 0;
|
|
|
|
|
for (uint i = 0; i < index_len_; i++) {
|
|
|
|
|
ushort_idx[i] = (uint16_t)(uint_idx[i]);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2021-05-26 16:49:17 +02:00
|
|
|
uint32_t *IndexBuf::unmap(const uint32_t *mapped_memory) const
|
|
|
|
|
{
|
|
|
|
|
size_t size = size_get();
|
|
|
|
|
uint32_t *result = static_cast<uint32_t *>(MEM_mallocN(size, __func__));
|
|
|
|
|
memcpy(result, mapped_memory, size);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
} // namespace blender::gpu
|
|
|
|
|
|
|
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name C-API
|
|
|
|
|
* \{ */
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2021-12-08 00:31:20 -05:00
|
|
|
GPUIndexBuf *GPU_indexbuf_calloc()
|
2020-08-10 01:40:23 +02:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
return wrap(GPUBackend::get()->indexbuf_alloc());
|
2020-08-10 01:40:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
GPUIndexBuf *GPU_indexbuf_build(GPUIndexBufBuilder *builder)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
GPUIndexBuf *elem = GPU_indexbuf_calloc();
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_indexbuf_build_in_place(builder, elem);
|
|
|
|
|
return elem;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-10-23 23:16:54 -04:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
GPUIndexBuf *GPU_indexbuf_create_subrange(GPUIndexBuf *elem_src, uint start, uint length)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
GPUIndexBuf *elem = GPU_indexbuf_calloc();
|
|
|
|
|
GPU_indexbuf_create_subrange_in_place(elem, elem_src, start, length);
|
|
|
|
|
return elem;
|
|
|
|
|
}
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *builder, GPUIndexBuf *elem)
|
|
|
|
|
{
|
2020-11-06 17:49:09 +01:00
|
|
|
BLI_assert(builder->data != nullptr);
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Transfer data ownership to GPUIndexBuf.
|
|
|
|
|
* It will be uploaded upon first use. */
|
2021-06-07 08:38:38 -03:00
|
|
|
unwrap(elem)->init(builder->index_len, builder->data, builder->index_min, builder->index_max);
|
2020-09-06 02:46:51 +02:00
|
|
|
builder->data = nullptr;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-10-23 23:16:54 -04:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
void GPU_indexbuf_create_subrange_in_place(GPUIndexBuf *elem,
|
|
|
|
|
GPUIndexBuf *elem_src,
|
|
|
|
|
uint start,
|
|
|
|
|
uint length)
|
2018-12-08 18:15:57 +01:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
unwrap(elem)->init_subrange(unwrap(elem_src), start, length);
|
2018-12-08 18:15:57 +01:00
|
|
|
}
|
|
|
|
|
|
2021-05-26 16:49:17 +02:00
|
|
|
const uint32_t *GPU_indexbuf_read(GPUIndexBuf *elem)
|
|
|
|
|
{
|
|
|
|
|
return unwrap(elem)->read();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t *GPU_indexbuf_unmap(const GPUIndexBuf *elem, const uint32_t *mapped_buffer)
|
|
|
|
|
{
|
|
|
|
|
return unwrap(elem)->unmap(mapped_buffer);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
void GPU_indexbuf_discard(GPUIndexBuf *elem)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
delete unwrap(elem);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2018-03-17 18:23:04 +01:00
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
bool GPU_indexbuf_is_init(GPUIndexBuf *elem)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 02:46:51 +02:00
|
|
|
return unwrap(elem)->is_init();
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2020-09-06 02:46:51 +02:00
|
|
|
|
|
|
|
|
int GPU_indexbuf_primitive_len(GPUPrimType prim_type)
|
|
|
|
|
{
|
|
|
|
|
return indices_per_primitive(prim_type);
|
|
|
|
|
}
|
|
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
void GPU_indexbuf_use(GPUIndexBuf *elem)
|
|
|
|
|
{
|
|
|
|
|
unwrap(elem)->upload_data();
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-26 16:49:17 +02:00
|
|
|
void GPU_indexbuf_bind_as_ssbo(GPUIndexBuf *elem, int binding)
|
|
|
|
|
{
|
|
|
|
|
unwrap(elem)->bind_as_ssbo(binding);
|
|
|
|
|
}
|
|
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
void GPU_indexbuf_update_sub(GPUIndexBuf *elem, uint start, uint len, const void *data)
|
|
|
|
|
{
|
|
|
|
|
unwrap(elem)->update_sub(start, len, data);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-06 02:46:51 +02:00
|
|
|
/** \} */
|