DRW: Add support for GPUStorageBuf in wrappers
This commit is contained in:
@@ -359,29 +359,29 @@ void LightModule::end_sync(void)
|
||||
{
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(CULLING_SELECT);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, culling_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "lights_buf", lights_data);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_vertex_buffer(grp, "keys_buf", culling_key_buf);
|
||||
DRW_shgroup_storage_block(grp, "lights_buf", lights_data);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_storage_block(grp, "keys_buf", culling_key_buf);
|
||||
DRW_shgroup_call_compute(grp, batch_len, 1, 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_STORAGE);
|
||||
}
|
||||
{
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(CULLING_SORT);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, culling_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "lights_buf", lights_data);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_vertex_buffer(grp, "keys_buf", culling_key_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_zbin_buf", &culling_zbin_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "out_lights_buf", &culling_light_buf);
|
||||
DRW_shgroup_storage_block(grp, "lights_buf", lights_data);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_storage_block(grp, "keys_buf", culling_key_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_zbin_buf", &culling_zbin_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "out_lights_buf", &culling_light_buf);
|
||||
DRW_shgroup_call_compute(grp, batch_len, 1, 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_STORAGE);
|
||||
}
|
||||
{
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(CULLING_TILE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, culling_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "lights_buf", culling_light_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_tile_buf", &culling_tile_buf);
|
||||
DRW_shgroup_storage_block(grp, "lights_buf", culling_light_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_tile_buf", &culling_tile_buf);
|
||||
DRW_shgroup_call_compute_ref(grp, culling_tile_dispatch_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
@@ -402,10 +402,10 @@ void LightModule::debug_end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(CULLING_DEBUG);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, debug_draw_ps_);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_buf", &culling_light_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_zbin_buf", &culling_zbin_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_tile_buf", &culling_tile_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_buf", &culling_light_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_zbin_buf", &culling_zbin_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_tile_buf", &culling_tile_buf);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &input_depth_tx_);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
@@ -468,10 +468,10 @@ void LightModule::debug_draw(GPUFrameBuffer *view_fb, HiZBuffer &hiz)
|
||||
|
||||
void LightModule::shgroup_resources(DRWShadingGroup *grp)
|
||||
{
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_buf", &culling_light_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_zbin_buf", &culling_zbin_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_tile_buf", &culling_tile_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_buf", &culling_light_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_cull_buf", &culling_data);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_zbin_buf", &culling_zbin_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_tile_buf", &culling_tile_buf);
|
||||
|
||||
DRW_shgroup_uniform_texture(grp, "shadow_atlas_tx", inst_.shadows.atlas_tx_get());
|
||||
DRW_shgroup_uniform_texture(grp, "shadow_tilemaps_tx", inst_.shadows.tilemap_tx_get());
|
||||
|
@@ -708,9 +708,9 @@ void ShadowModule::end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_TILE_SETUP);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tilemap_setup_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_vertex_buffer(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_storage_block(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_storage_block(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_storage_block(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_uniform_bool(grp, "do_tilemap_setup", &do_tilemap_setup_, 1);
|
||||
int64_t tilemaps_updated_len = tilemaps_len + tilemap_allocator.deleted_maps_len;
|
||||
@@ -729,7 +729,7 @@ void ShadowModule::end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_TILE_TAG_VISIBILITY);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tilemap_visibility_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_storage_block(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_uniform_float(grp, "tilemap_pixel_radius", &tilemap_pixel_radius_, 1);
|
||||
DRW_shgroup_uniform_float(grp, "screen_pixel_radius_inv", &screen_pixel_radius_inv_, 1);
|
||||
@@ -747,7 +747,7 @@ void ShadowModule::end_sync(void)
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_TILE_TAG_USAGE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tilemap_usage_tag_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "aabbs_buf", receivers_aabbs);
|
||||
DRW_shgroup_vertex_buffer(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_storage_block(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_uniform_float(grp, "tilemap_pixel_radius", &tilemap_pixel_radius_, 1);
|
||||
DRW_shgroup_uniform_float(grp, "screen_pixel_radius_inv", &screen_pixel_radius_inv_, 1);
|
||||
@@ -764,10 +764,10 @@ void ShadowModule::end_sync(void)
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_TILE_DEPTH_SCAN);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tilemap_depth_scan_ps_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &input_depth_tx_);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_buf", &inst_.lights.culling_light_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_cull_buf", &inst_.lights.culling_data);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_zbin_buf", &inst_.lights.culling_zbin_buf);
|
||||
DRW_shgroup_vertex_buffer_ref(grp, "lights_tile_buf", &inst_.lights.culling_tile_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_buf", &inst_.lights.culling_light_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_cull_buf", &inst_.lights.culling_data);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_zbin_buf", &inst_.lights.culling_zbin_buf);
|
||||
DRW_shgroup_storage_block_ref(grp, "lights_tile_buf", &inst_.lights.culling_tile_buf);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_uniform_float(grp, "tilemap_pixel_radius", &tilemap_pixel_radius_, 1);
|
||||
DRW_shgroup_uniform_float(grp, "screen_pixel_radius_inv", &screen_pixel_radius_inv_, 1);
|
||||
@@ -783,7 +783,7 @@ void ShadowModule::end_sync(void)
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_TILE_TAG_UPDATE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tilemap_update_tag_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "aabbs_buf", casters_aabbs);
|
||||
DRW_shgroup_vertex_buffer(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_storage_block(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_uniform_int_copy(grp, "aabb_len", GPU_vertbuf_get_vertex_len(casters_aabbs));
|
||||
if (tilemaps_len > 0 && aabb_len > 0) {
|
||||
@@ -797,7 +797,7 @@ void ShadowModule::end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_TILE_LOD_MASK);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tilemap_lod_mask_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_storage_block(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
if (tilemaps_len > 0) {
|
||||
DRW_shgroup_call_compute(grp, 1, 1, tilemaps_len);
|
||||
@@ -814,8 +814,8 @@ void ShadowModule::end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_PAGE_INIT);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, page_init_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_storage_block(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_storage_block(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_call_compute(grp, SHADOW_MAX_PAGE / SHADOW_PAGE_PER_ROW, 1, 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS | GPU_BARRIER_SHADER_STORAGE);
|
||||
@@ -829,9 +829,9 @@ void ShadowModule::end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_PAGE_FREE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, page_free_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_vertex_buffer(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_storage_block(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_storage_block(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_storage_block(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
int64_t tilemaps_updated_len = tilemaps_len + tilemap_allocator.deleted_maps_len;
|
||||
if (tilemaps_updated_len > 0) {
|
||||
@@ -848,8 +848,8 @@ void ShadowModule::end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_PAGE_DEFRAG);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, page_defrag_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_storage_block(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_storage_block(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_call_compute(grp, 1, 1, 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS | GPU_BARRIER_SHADER_STORAGE);
|
||||
@@ -859,9 +859,9 @@ void ShadowModule::end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_PAGE_ALLOC);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, page_alloc_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_vertex_buffer(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_storage_block(grp, "pages_infos_buf", pages_infos_data_);
|
||||
DRW_shgroup_storage_block(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_storage_block(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_uniform_image(grp, "tilemap_rects_img", tilemap_allocator.tilemap_rects_tx);
|
||||
if (tilemaps_len > 0) {
|
||||
@@ -914,7 +914,7 @@ void ShadowModule::debug_page_map_call(DRWPass *pass)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_PAGE_DEBUG);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
|
||||
DRW_shgroup_vertex_buffer(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_storage_block(grp, "pages_free_buf", pages_free_data_);
|
||||
DRW_shgroup_uniform_image(grp, "tilemaps_img", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_uniform_image(grp, "debug_img", debug_page_tx_);
|
||||
DRW_shgroup_call_compute(grp, 1, 1, 1);
|
||||
@@ -981,7 +981,7 @@ void ShadowModule::debug_end_sync(void)
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(SHADOW_DEBUG);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, debug_draw_ps_);
|
||||
DRW_shgroup_vertex_buffer(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_storage_block(grp, "tilemaps_buf", tilemap_allocator.tilemaps_data);
|
||||
DRW_shgroup_uniform_texture(grp, "tilemaps_tx", tilemap_allocator.tilemap_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &input_depth_tx_);
|
||||
DRW_shgroup_uniform_texture(grp, "atlas_tx", atlas_tx_);
|
||||
|
@@ -78,9 +78,9 @@
|
||||
#include "BLI_utility_mixins.hh"
|
||||
|
||||
#include "GPU_framebuffer.h"
|
||||
#include "GPU_storage_buffer.h"
|
||||
#include "GPU_texture.h"
|
||||
#include "GPU_uniform_buffer.h"
|
||||
#include "GPU_vertex_buffer.h"
|
||||
|
||||
namespace blender::draw {
|
||||
|
||||
@@ -215,8 +215,7 @@ class UniformCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
|
||||
template<typename T, int64_t len, bool device_only>
|
||||
class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable {
|
||||
protected:
|
||||
/* Use vertex buffer for now. Until there is a complete GPUStorageBuf implementation. */
|
||||
GPUVertBuf *ssbo_;
|
||||
GPUStorageBuf *ssbo_;
|
||||
|
||||
#ifdef DEBUG
|
||||
const char *name_ = typeid(T).name();
|
||||
@@ -232,24 +231,30 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
|
||||
|
||||
~StorageCommon()
|
||||
{
|
||||
GPU_vertbuf_discard(ssbo_);
|
||||
GPU_storagebuf_free(ssbo_);
|
||||
}
|
||||
|
||||
void resize(int64_t new_size)
|
||||
{
|
||||
BLI_assert(new_size > 0);
|
||||
if (new_size != this->len_) {
|
||||
GPU_vertbuf_discard(ssbo_);
|
||||
GPU_storagebuf_free(ssbo_);
|
||||
this->init(new_size);
|
||||
}
|
||||
}
|
||||
|
||||
operator GPUVertBuf *() const
|
||||
void push_update(void)
|
||||
{
|
||||
BLI_assert(device_only == false);
|
||||
GPU_storagebuf_update(ssbo_, this->data_);
|
||||
}
|
||||
|
||||
operator GPUStorageBuf *() const
|
||||
{
|
||||
return ssbo_;
|
||||
}
|
||||
/* To be able to use it with DRW_shgroup_*_ref(). */
|
||||
GPUVertBuf **operator&()
|
||||
GPUStorageBuf **operator&()
|
||||
{
|
||||
return &ssbo_;
|
||||
}
|
||||
@@ -258,17 +263,8 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
|
||||
void init(int64_t new_size)
|
||||
{
|
||||
this->len_ = new_size;
|
||||
|
||||
GPUVertFormat format = {0};
|
||||
GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
|
||||
|
||||
GPUUsageType usage = device_only ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_DYNAMIC;
|
||||
ssbo_ = GPU_vertbuf_create_with_format_ex(&format, usage);
|
||||
GPU_vertbuf_data_alloc(ssbo_, divide_ceil_u(sizeof(T) * this->len_, 4));
|
||||
if (!device_only) {
|
||||
this->data_ = (T *)GPU_vertbuf_get_data(ssbo_);
|
||||
GPU_vertbuf_use(ssbo_);
|
||||
}
|
||||
ssbo_ = GPU_storagebuf_create_ex(sizeof(T) * this->len_, nullptr, usage, this->name_);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -337,13 +333,14 @@ template<
|
||||
bool device_only = false>
|
||||
class StorageArrayBuffer : public detail::StorageCommon<T, len, device_only> {
|
||||
public:
|
||||
void push_update(void)
|
||||
StorageArrayBuffer()
|
||||
{
|
||||
BLI_assert(!device_only);
|
||||
/* Get the data again to tag for update. The actual pointer should not
|
||||
* change. */
|
||||
this->data_ = (T *)GPU_vertbuf_get_data(this->ssbo_);
|
||||
GPU_vertbuf_use(this->ssbo_);
|
||||
/* TODO(@fclem): We should map memory instead. */
|
||||
this->data_ = (T *)MEM_mallocN_aligned(len * sizeof(T), 16, this->name_);
|
||||
}
|
||||
~StorageArrayBuffer()
|
||||
{
|
||||
MEM_freeN(this->data_);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -354,14 +351,10 @@ template<
|
||||
bool device_only = false>
|
||||
class StorageBuffer : public T, public detail::StorageCommon<T, 1, device_only> {
|
||||
public:
|
||||
void push_update(void)
|
||||
StorageBuffer()
|
||||
{
|
||||
BLI_assert(!device_only);
|
||||
/* TODO(fclem): Avoid a full copy. */
|
||||
T &vert_data = *(T *)GPU_vertbuf_get_data(this->ssbo_);
|
||||
vert_data = *this;
|
||||
|
||||
GPU_vertbuf_use(this->ssbo_);
|
||||
/* TODO(@fclem): How could we map this? */
|
||||
this->data_ = static_cast<T *>(this);
|
||||
}
|
||||
|
||||
StorageBuffer<T> &operator=(const T &other)
|
||||
|
Reference in New Issue
Block a user