2022-02-11 09:07:11 +11:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
|
* Copyright 2016 by Mike Erwin. All rights reserved. */
|
2018-07-17 14:46:44 +02:00
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup gpu
|
2018-07-17 14:46:44 +02:00
|
|
|
*
|
2018-07-18 00:12:21 +02:00
|
|
|
* GPU vertex buffer
|
2018-07-17 14:46:44 +02:00
|
|
|
*/
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2018-07-31 16:54:58 +02:00
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
#include "gpu_backend.hh"
|
2018-07-17 21:11:23 +02:00
|
|
|
#include "gpu_vertex_format_private.h"
|
2018-07-19 15:48:13 +02:00
|
|
|
|
2021-07-03 23:08:40 +10:00
|
|
|
#include "gl_vertex_buffer.hh" /* TODO: remove. */
|
|
|
|
|
#include "gpu_context_private.hh" /* TODO: remove. */
|
2020-09-06 23:45:51 +02:00
|
|
|
|
2020-09-06 16:40:07 +02:00
|
|
|
#include "gpu_vertex_buffer_private.hh"
|
2016-09-13 02:41:43 -04:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
#include <cstring>
|
2016-09-15 16:51:10 +02:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name VertBuf
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
namespace blender::gpu {
|
|
|
|
|
|
|
|
|
|
size_t VertBuf::memory_usage = 0;
|
|
|
|
|
|
2020-09-07 01:20:55 +02:00
|
|
|
VertBuf::VertBuf()
|
|
|
|
|
{
|
|
|
|
|
/* Needed by some code check. */
|
|
|
|
|
format.attr_len = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VertBuf::~VertBuf()
|
|
|
|
|
{
|
|
|
|
|
/* Should already have been cleared. */
|
|
|
|
|
BLI_assert(flag == GPU_VERTBUF_INVALID);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void VertBuf::init(const GPUVertFormat *format, GPUUsageType usage)
|
|
|
|
|
{
|
|
|
|
|
usage_ = usage;
|
|
|
|
|
flag = GPU_VERTBUF_DATA_DIRTY;
|
|
|
|
|
GPU_vertformat_copy(&this->format, format);
|
|
|
|
|
if (!format->packed) {
|
|
|
|
|
VertexFormat_pack(&this->format);
|
|
|
|
|
}
|
|
|
|
|
flag |= GPU_VERTBUF_INIT;
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-06 13:18:48 +01:00
|
|
|
void VertBuf::clear()
|
2020-09-07 01:20:55 +02:00
|
|
|
{
|
|
|
|
|
this->release_data();
|
|
|
|
|
flag = GPU_VERTBUF_INVALID;
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-06 13:18:48 +01:00
|
|
|
VertBuf *VertBuf::duplicate()
|
2020-09-07 01:20:55 +02:00
|
|
|
{
|
|
|
|
|
VertBuf *dst = GPUBackend::get()->vertbuf_alloc();
|
|
|
|
|
/* Full copy. */
|
|
|
|
|
*dst = *this;
|
|
|
|
|
/* Almost full copy... */
|
|
|
|
|
dst->handle_refcount_ = 1;
|
|
|
|
|
/* Duplicate all needed implementation specifics data. */
|
|
|
|
|
this->duplicate_data(dst);
|
|
|
|
|
return dst;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void VertBuf::allocate(uint vert_len)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(format.packed);
|
|
|
|
|
/* Catch any unnecessary usage. */
|
|
|
|
|
BLI_assert(vertex_alloc != vert_len || data == nullptr);
|
|
|
|
|
vertex_len = vertex_alloc = vert_len;
|
|
|
|
|
|
|
|
|
|
this->acquire_data();
|
|
|
|
|
|
|
|
|
|
flag |= GPU_VERTBUF_DATA_DIRTY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void VertBuf::resize(uint vert_len)
|
|
|
|
|
{
|
|
|
|
|
/* Catch any unnecessary usage. */
|
|
|
|
|
BLI_assert(vertex_alloc != vert_len);
|
|
|
|
|
vertex_len = vertex_alloc = vert_len;
|
|
|
|
|
|
|
|
|
|
this->resize_data();
|
|
|
|
|
|
|
|
|
|
flag |= GPU_VERTBUF_DATA_DIRTY;
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-06 13:18:48 +01:00
|
|
|
void VertBuf::upload()
|
2020-09-07 01:20:55 +02:00
|
|
|
{
|
|
|
|
|
this->upload_data();
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
} // namespace blender::gpu
|
2018-03-17 16:58:43 +01:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name C-API
|
|
|
|
|
* \{ */
|
|
|
|
|
|
|
|
|
|
using namespace blender;
|
|
|
|
|
using namespace blender::gpu;
|
|
|
|
|
|
|
|
|
|
/* -------- Creation & deletion -------- */
|
|
|
|
|
|
2020-11-06 13:18:48 +01:00
|
|
|
GPUVertBuf *GPU_vertbuf_calloc()
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
return wrap(GPUBackend::get()->vertbuf_alloc());
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-14 16:28:16 +02:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
GPUVertBuf *GPU_vertbuf_create_with_format_ex(const GPUVertFormat *format, GPUUsageType usage)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
GPUVertBuf *verts = GPU_vertbuf_calloc();
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts)->init(format, usage);
|
2016-09-15 21:45:10 +02:00
|
|
|
return verts;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-15 21:45:10 +02:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *verts_,
|
|
|
|
|
const GPUVertFormat *format,
|
|
|
|
|
GPUUsageType usage)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts_)->init(format, usage);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-15 21:45:10 +02:00
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
void GPU_vertbuf_init_build_on_device(GPUVertBuf *verts, GPUVertFormat *format, uint v_len)
|
|
|
|
|
{
|
|
|
|
|
GPU_vertbuf_init_with_format_ex(verts, format, GPU_USAGE_DEVICE_ONLY);
|
|
|
|
|
GPU_vertbuf_data_alloc(verts, v_len);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
GPUVertBuf *GPU_vertbuf_duplicate(GPUVertBuf *verts_)
|
2020-06-19 17:02:55 +02:00
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
return wrap(unwrap(verts_)->duplicate());
|
2020-06-19 17:02:55 +02:00
|
|
|
}
|
|
|
|
|
|
2021-05-26 16:49:17 +02:00
|
|
|
const void *GPU_vertbuf_read(GPUVertBuf *verts)
|
|
|
|
|
{
|
|
|
|
|
return unwrap(verts)->read();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void *GPU_vertbuf_unmap(const GPUVertBuf *verts, const void *mapped_data)
|
|
|
|
|
{
|
|
|
|
|
return unwrap(verts)->unmap(mapped_data);
|
|
|
|
|
}
|
|
|
|
|
|
2020-09-07 01:20:55 +02:00
|
|
|
void GPU_vertbuf_clear(GPUVertBuf *verts)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts)->clear();
|
2019-05-13 17:27:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GPU_vertbuf_discard(GPUVertBuf *verts)
|
|
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts)->clear();
|
|
|
|
|
unwrap(verts)->reference_remove();
|
2020-08-10 01:35:59 +02:00
|
|
|
}
|
|
|
|
|
|
2020-09-07 01:20:55 +02:00
|
|
|
void GPU_vertbuf_handle_ref_add(GPUVertBuf *verts)
|
2020-08-10 01:35:59 +02:00
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts)->reference_add();
|
2020-08-10 01:35:59 +02:00
|
|
|
}
|
|
|
|
|
|
2020-09-07 01:20:55 +02:00
|
|
|
void GPU_vertbuf_handle_ref_remove(GPUVertBuf *verts)
|
2020-08-10 01:35:59 +02:00
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts)->reference_remove();
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-15 21:45:10 +02:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
/* -------- Data update -------- */
|
|
|
|
|
|
2020-09-07 01:20:55 +02:00
|
|
|
void GPU_vertbuf_data_alloc(GPUVertBuf *verts, uint v_len)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts)->allocate(v_len);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-15 21:45:10 +02:00
|
|
|
|
2020-09-07 01:20:55 +02:00
|
|
|
void GPU_vertbuf_data_resize(GPUVertBuf *verts, uint v_len)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts)->resize(v_len);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2018-03-30 18:10:08 +02:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
void GPU_vertbuf_data_len_set(GPUVertBuf *verts_, uint v_len)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
VertBuf *verts = unwrap(verts_);
|
2020-11-06 17:49:09 +01:00
|
|
|
BLI_assert(verts->data != nullptr); /* Only for dynamic data. */
|
2020-09-06 23:45:51 +02:00
|
|
|
BLI_assert(v_len <= verts->vertex_alloc);
|
2018-07-08 13:05:41 +02:00
|
|
|
verts->vertex_len = v_len;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-14 16:28:16 +02:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
void GPU_vertbuf_attr_set(GPUVertBuf *verts_, uint a_idx, uint v_idx, const void *data)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
VertBuf *verts = unwrap(verts_);
|
2018-07-18 23:09:31 +10:00
|
|
|
const GPUVertFormat *format = &verts->format;
|
2019-01-29 07:46:25 +11:00
|
|
|
const GPUVertAttr *a = &format->attrs[a_idx];
|
2020-09-07 02:12:59 +02:00
|
|
|
BLI_assert(v_idx < verts->vertex_alloc);
|
|
|
|
|
BLI_assert(a_idx < format->attr_len);
|
2020-11-06 17:49:09 +01:00
|
|
|
BLI_assert(verts->data != nullptr);
|
2020-09-06 16:40:07 +02:00
|
|
|
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
|
2020-09-07 02:12:59 +02:00
|
|
|
memcpy(verts->data + a->offset + v_idx * format->stride, data, a->sz);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-14 16:28:16 +02:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
void GPU_vertbuf_attr_fill(GPUVertBuf *verts_, uint a_idx, const void *data)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
VertBuf *verts = unwrap(verts_);
|
2018-07-18 23:09:31 +10:00
|
|
|
const GPUVertFormat *format = &verts->format;
|
2020-09-07 02:12:59 +02:00
|
|
|
BLI_assert(a_idx < format->attr_len);
|
2019-01-29 07:46:25 +11:00
|
|
|
const GPUVertAttr *a = &format->attrs[a_idx];
|
2018-07-17 14:46:44 +02:00
|
|
|
const uint stride = a->sz; /* tightly packed input data */
|
2020-09-06 23:45:51 +02:00
|
|
|
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
|
|
|
|
|
GPU_vertbuf_attr_fill_stride(verts_, a_idx, stride, data);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-14 16:28:16 +02:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
void GPU_vertbuf_vert_set(GPUVertBuf *verts_, uint v_idx, const void *data)
|
2019-12-02 01:40:58 +01:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
VertBuf *verts = unwrap(verts_);
|
2019-12-02 01:40:58 +01:00
|
|
|
const GPUVertFormat *format = &verts->format;
|
2020-09-07 02:12:59 +02:00
|
|
|
BLI_assert(v_idx < verts->vertex_alloc);
|
2020-11-06 17:49:09 +01:00
|
|
|
BLI_assert(verts->data != nullptr);
|
2020-09-06 16:40:07 +02:00
|
|
|
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
|
2020-09-07 02:12:59 +02:00
|
|
|
memcpy(verts->data + v_idx * format->stride, data, format->stride);
|
2019-12-02 01:40:58 +01:00
|
|
|
}
|
|
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
void GPU_vertbuf_attr_fill_stride(GPUVertBuf *verts_, uint a_idx, uint stride, const void *data)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
VertBuf *verts = unwrap(verts_);
|
2018-07-18 23:09:31 +10:00
|
|
|
const GPUVertFormat *format = &verts->format;
|
2019-01-29 07:46:25 +11:00
|
|
|
const GPUVertAttr *a = &format->attrs[a_idx];
|
2020-09-07 02:12:59 +02:00
|
|
|
BLI_assert(a_idx < format->attr_len);
|
2020-11-06 17:49:09 +01:00
|
|
|
BLI_assert(verts->data != nullptr);
|
2020-09-06 16:40:07 +02:00
|
|
|
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
|
2018-07-17 14:46:44 +02:00
|
|
|
const uint vertex_len = verts->vertex_len;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-07-17 14:46:44 +02:00
|
|
|
if (format->attr_len == 1 && stride == format->stride) {
|
|
|
|
|
/* we can copy it all at once */
|
2018-07-08 13:05:41 +02:00
|
|
|
memcpy(verts->data, data, vertex_len * a->sz);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
/* we must copy it per vertex */
|
2019-09-08 00:12:26 +10:00
|
|
|
for (uint v = 0; v < vertex_len; v++) {
|
2020-09-07 02:12:59 +02:00
|
|
|
memcpy(
|
|
|
|
|
verts->data + a->offset + v * format->stride, (const uchar *)data + v * stride, a->sz);
|
2016-09-14 16:28:16 +02:00
|
|
|
}
|
|
|
|
|
}
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2016-09-15 16:51:10 +02:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts_, uint a_idx, GPUVertBufRaw *access)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
VertBuf *verts = unwrap(verts_);
|
2018-07-18 23:09:31 +10:00
|
|
|
const GPUVertFormat *format = &verts->format;
|
2019-01-29 07:46:25 +11:00
|
|
|
const GPUVertAttr *a = &format->attrs[a_idx];
|
2020-09-07 02:12:59 +02:00
|
|
|
BLI_assert(a_idx < format->attr_len);
|
2020-11-06 17:49:09 +01:00
|
|
|
BLI_assert(verts->data != nullptr);
|
2017-06-29 20:09:05 +10:00
|
|
|
|
2020-09-06 16:40:07 +02:00
|
|
|
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
|
2020-09-06 23:45:51 +02:00
|
|
|
verts->flag &= ~GPU_VERTBUF_DATA_UPLOADED;
|
2017-06-29 20:09:05 +10:00
|
|
|
access->size = a->sz;
|
|
|
|
|
access->stride = format->stride;
|
2020-09-06 23:45:51 +02:00
|
|
|
access->data = (uchar *)verts->data + a->offset;
|
2017-06-29 20:09:05 +10:00
|
|
|
access->data_init = access->data;
|
2020-09-07 02:12:59 +02:00
|
|
|
#ifdef DEBUG
|
2018-03-30 18:10:08 +02:00
|
|
|
access->_data_end = access->data_init + (size_t)(verts->vertex_alloc * format->stride);
|
2017-06-29 20:09:05 +10:00
|
|
|
#endif
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-06-29 20:09:05 +10:00
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
/* -------- Getters -------- */
|
|
|
|
|
|
2020-09-06 16:40:07 +02:00
|
|
|
void *GPU_vertbuf_get_data(const GPUVertBuf *verts)
|
|
|
|
|
{
|
2021-07-03 23:08:40 +10:00
|
|
|
/* TODO: Assert that the format has no padding. */
|
2020-09-06 23:45:51 +02:00
|
|
|
return unwrap(verts)->data;
|
2020-09-06 16:40:07 +02:00
|
|
|
}
|
|
|
|
|
|
2020-09-06 23:45:51 +02:00
|
|
|
void *GPU_vertbuf_steal_data(GPUVertBuf *verts_)
|
2020-09-06 16:40:07 +02:00
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
VertBuf *verts = unwrap(verts_);
|
2021-07-03 23:08:40 +10:00
|
|
|
/* TODO: Assert that the format has no padding. */
|
2020-09-06 16:40:07 +02:00
|
|
|
BLI_assert(verts->data);
|
|
|
|
|
void *data = verts->data;
|
|
|
|
|
verts->data = nullptr;
|
|
|
|
|
return data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const GPUVertFormat *GPU_vertbuf_get_format(const GPUVertBuf *verts)
|
|
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
return &unwrap(verts)->format;
|
2020-09-06 16:40:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint GPU_vertbuf_get_vertex_alloc(const GPUVertBuf *verts)
|
|
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
return unwrap(verts)->vertex_alloc;
|
2020-09-06 16:40:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint GPU_vertbuf_get_vertex_len(const GPUVertBuf *verts)
|
|
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
return unwrap(verts)->vertex_len;
|
2020-09-06 16:40:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
GPUVertBufStatus GPU_vertbuf_get_status(const GPUVertBuf *verts)
|
|
|
|
|
{
|
2020-09-06 23:45:51 +02:00
|
|
|
return unwrap(verts)->flag;
|
|
|
|
|
}
|
|
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
void GPU_vertbuf_tag_dirty(GPUVertBuf *verts)
|
|
|
|
|
{
|
|
|
|
|
unwrap(verts)->flag |= GPU_VERTBUF_DATA_DIRTY;
|
|
|
|
|
}
|
|
|
|
|
|
2020-11-06 13:18:48 +01:00
|
|
|
uint GPU_vertbuf_get_memory_usage()
|
2020-09-06 23:45:51 +02:00
|
|
|
{
|
|
|
|
|
return VertBuf::memory_usage;
|
2020-09-06 16:40:07 +02:00
|
|
|
}
|
|
|
|
|
|
2020-09-07 01:20:55 +02:00
|
|
|
void GPU_vertbuf_use(GPUVertBuf *verts)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-09-07 01:20:55 +02:00
|
|
|
unwrap(verts)->upload();
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-05-04 21:22:41 +02:00
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
void GPU_vertbuf_wrap_handle(GPUVertBuf *verts, uint64_t handle)
|
|
|
|
|
{
|
|
|
|
|
unwrap(verts)->wrap_handle(handle);
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-26 16:49:17 +02:00
|
|
|
void GPU_vertbuf_bind_as_ssbo(struct GPUVertBuf *verts, int binding)
|
|
|
|
|
{
|
|
|
|
|
unwrap(verts)->bind_as_ssbo(binding);
|
|
|
|
|
}
|
|
|
|
|
|
OpenSubDiv: add support for an OpenGL evaluator
This evaluator is used in order to evaluate subdivision at render time, allowing for
faster renders of meshes with a subdivision surface modifier placed at the last
position in the modifier list.
When evaluating the subsurf modifier, we detect whether we can delegate evaluation
to the draw code. If so, the subdivision is first evaluated on the GPU using our own
custom evaluator (only the coarse data needs to be initially sent to the GPU), then,
buffers for the final `MeshBufferCache` are filled on the GPU using a set of
compute shaders. However, some buffers are still filled on the CPU side, if doing so
on the GPU is impractical (e.g. the line adjacency buffer used for x-ray, whose
logic is hardly GPU compatible).
This is done at the mesh buffer extraction level so that the result can be readily used
in the various OpenGL engines, without having to write custom geometry or tesselation
shaders.
We use our own subdivision evaluation shaders, instead of OpenSubDiv's vanilla one, in
order to control the data layout, and interpolation. For example, we store vertex colors
as compressed 16-bit integers, while OpenSubDiv's default evaluator only work for float
types.
In order to still access the modified geometry on the CPU side, for use in modifiers
or transform operators, a dedicated wrapper type is added `MESH_WRAPPER_TYPE_SUBD`.
Subdivision will be lazily evaluated via `BKE_object_get_evaluated_mesh` which will
create such a wrapper if possible. If the final subdivision surface is not needed on
the CPU side, `BKE_object_get_evaluated_mesh_no_subsurf` should be used.
Enabling or disabling GPU subdivision can be done through the user preferences (under
Viewport -> Subdivision).
See patch description for benchmarks.
Reviewed By: campbellbarton, jbakker, fclem, brecht, #eevee_viewport
Differential Revision: https://developer.blender.org/D12406
2021-12-27 16:34:47 +01:00
|
|
|
void GPU_vertbuf_update_sub(GPUVertBuf *verts, uint start, uint len, const void *data)
|
2020-09-12 19:48:52 +02:00
|
|
|
{
|
|
|
|
|
unwrap(verts)->update_sub(start, len, data);
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-02 09:48:41 +10:00
|
|
|
/** \} */
|