DRW: Pointcloud: Refactor drawing to remove instancing

This change the attribute binding scheme to something similar to the
curves objects. Attributes are now buffer textures sampled per points.

The actual geometry is now rendered using an index buffer that avoid too
many vertex shader invocation.

Drawcall is wrapped in a DRW function to reduce complexity of future
changes.
This commit is contained in:
2022-10-25 10:43:47 +02:00
parent fb424db2b7
commit ffdb41a8bc
19 changed files with 530 additions and 228 deletions

View File

@@ -95,6 +95,7 @@ set(SRC
intern/draw_manager_text.cc
intern/draw_manager_texture.c
intern/draw_pbvh.cc
intern/draw_pointcloud.cc
intern/draw_select_buffer.c
intern/draw_shader.cc
intern/draw_texture_pool.cc

View File

@@ -30,7 +30,7 @@ void main()
vec3 world_pos = pos;
#elif defined(POINTCLOUD_SHADER)
pointcloud_get_pos_and_radius(pointPosition, pointRadius);
pointID = gl_VertexID;
pointID = pointcloud_get_point_id();
#else
vec3 world_pos = point_object_to_world(pos);
#endif

View File

@@ -37,7 +37,7 @@ void main()
vec3 world_pos = pos;
#elif defined(POINTCLOUD_SHADER)
pointcloud_get_pos_and_radius(pointPosition, pointRadius);
pointID = gl_VertexID;
pointID = pointcloud_get_point_id();
#else
vec3 world_pos = point_object_to_world(pos);
#endif

View File

@@ -138,6 +138,7 @@ void OVERLAY_outline_cache_init(OVERLAY_Data *vedata)
DRW_shgroup_uniform_block(grp, "globalsBlock", G_draw.block_ubo);
GPUShader *sh_curves = OVERLAY_shader_outline_prepass_curves();
pd->outlines_curves_grp = grp = DRW_shgroup_create(sh_curves, psl->outlines_prepass_ps);
DRW_shgroup_uniform_bool_copy(grp, "isTransform", (G.moving & G_TRANSFORM_OBJ) != 0);
DRW_shgroup_uniform_block(grp, "globalsBlock", G_draw.block_ubo);
@@ -282,6 +283,18 @@ static void OVERLAY_outline_curves(OVERLAY_PrivateData *pd, Object *ob)
DRW_shgroup_curves_create_sub(ob, shgroup, nullptr);
}
static void OVERLAY_outline_pointcloud(OVERLAY_PrivateData *pd, Object *ob)
{
if (pd->wireframe_mode) {
/* Looks bad in this case. Could be relaxed if we draw a
* wireframe of some sort in the future. */
return;
}
DRWShadingGroup *shgroup = pd->outlines_ptcloud_grp;
DRW_shgroup_pointcloud_create_sub(ob, shgroup, nullptr);
}
void OVERLAY_outline_cache_populate(OVERLAY_Data *vedata,
Object *ob,
OVERLAY_DupliData *dupli,
@@ -313,9 +326,8 @@ void OVERLAY_outline_cache_populate(OVERLAY_Data *vedata,
return;
}
if (ob->type == OB_POINTCLOUD && pd->wireframe_mode) {
/* Looks bad in this case. Could be relaxed if we draw a
* wireframe of some sort in the future. */
if (ob->type == OB_POINTCLOUD) {
OVERLAY_outline_pointcloud(pd, ob);
return;
}
@@ -338,18 +350,12 @@ void OVERLAY_outline_cache_populate(OVERLAY_Data *vedata,
}
if (geom) {
shgroup = (ob->type == OB_POINTCLOUD) ? pd->outlines_ptcloud_grp : pd->outlines_grp;
shgroup = pd->outlines_grp;
}
}
if (shgroup && geom) {
if (ob->type == OB_POINTCLOUD) {
/* Draw range to avoid drawcall batching messing up the instance attribute. */
DRW_shgroup_call_instance_range(shgroup, ob, geom, 0, 0);
}
else {
DRW_shgroup_call(shgroup, geom, ob);
}
DRW_shgroup_call(shgroup, geom, ob);
}
if (init_dupli) {

View File

@@ -81,11 +81,9 @@ static void populate_cache_for_instance(Object &object,
break;
}
case OB_POINTCLOUD: {
DRWShadingGroup *sub_grp = DRW_shgroup_create_sub(
pd.viewer_attribute_instance_pointcloud_grp);
DRWShadingGroup *sub_grp = DRW_shgroup_pointcloud_create_sub(
&object, pd.viewer_attribute_pointcloud_grp, nullptr);
DRW_shgroup_uniform_vec4_copy(sub_grp, "ucolor", color);
GPUBatch *batch = DRW_cache_pointcloud_surface_get(&object);
DRW_shgroup_call_instance_range(sub_grp, &object, batch, 0, 0);
break;
}
case OB_CURVES_LEGACY: {
@@ -123,9 +121,11 @@ static void populate_cache_for_geometry(Object &object,
case OB_POINTCLOUD: {
PointCloud *pointcloud = static_cast<PointCloud *>(object.data);
if (pointcloud->attributes().contains(".viewer")) {
GPUBatch *batch = DRW_cache_pointcloud_surface_viewer_attribute_get(&object);
DRW_shgroup_uniform_float_copy(pd.viewer_attribute_pointcloud_grp, "opacity", opacity);
DRW_shgroup_call_instance_range(pd.viewer_attribute_pointcloud_grp, &object, batch, 0, 0);
GPUVertBuf **vertbuf = DRW_pointcloud_evaluated_attribute(pointcloud, ".viewer");
DRWShadingGroup *grp = DRW_shgroup_pointcloud_create_sub(
&object, pd.viewer_attribute_pointcloud_grp, nullptr);
DRW_shgroup_uniform_float_copy(grp, "opacity", opacity);
DRW_shgroup_buffer_texture_ref(grp, "attribute_tx", vertbuf);
}
break;
}

View File

@@ -27,7 +27,7 @@ GPU_SHADER_CREATE_INFO(overlay_viewer_attribute_pointcloud)
.fragment_source("overlay_viewer_attribute_frag.glsl")
.fragment_out(0, Type::VEC4, "out_color")
.fragment_out(1, Type::VEC4, "lineOutput")
.vertex_in(3, Type::VEC4, "attribute_value")
.sampler(3, ImageType::FLOAT_BUFFER, "attribute_tx")
.vertex_out(overlay_viewer_attribute_iface)
.additional_info("overlay_viewer_attribute_common", "draw_pointcloud");

View File

@@ -6,5 +6,5 @@ void main()
{
vec3 world_pos = pointcloud_get_pos();
gl_Position = point_world_to_ndc(world_pos);
finalColor = attribute_value;
finalColor = pointcloud_get_customdata_vec4(attribute_tx);
}

View File

@@ -27,6 +27,7 @@
#include "DNA_mesh_types.h"
#include "DNA_modifier_types.h"
#include "DNA_node_types.h"
#include "DNA_pointcloud_types.h"
#include "ED_paint.h"
@@ -418,7 +419,7 @@ void workbench_cache_populate(void *ved, Object *ob)
return;
}
if (ELEM(ob->type, OB_MESH, OB_POINTCLOUD)) {
if (ob->type == OB_MESH) {
bool use_sculpt_pbvh, use_texpaint_mode, draw_shadow, has_transp_mat = false;
eV3DShadingColorType color_type = workbench_color_type_get(
wpd, ob, &use_sculpt_pbvh, &use_texpaint_mode, &draw_shadow);
@@ -442,6 +443,12 @@ void workbench_cache_populate(void *ved, Object *ob)
DRWShadingGroup *grp = workbench_material_hair_setup(wpd, ob, CURVES_MATERIAL_NR, color_type);
DRW_shgroup_curves_create_sub(ob, grp, NULL);
}
else if (ob->type == OB_POINTCLOUD) {
int color_type = workbench_color_type_get(wpd, ob, NULL, NULL, NULL);
DRWShadingGroup *grp = workbench_material_ptcloud_setup(
wpd, ob, POINTCLOUD_MATERIAL_NR, color_type);
DRW_shgroup_pointcloud_create_sub(ob, grp, NULL);
}
else if (ob->type == OB_VOLUME) {
if (wpd->shading.type != OB_WIRE) {
int color_type = workbench_color_type_get(wpd, ob, NULL, NULL, NULL);

View File

@@ -511,6 +511,11 @@ DRWShadingGroup *workbench_image_setup_ex(WORKBENCH_PrivateData *wpd,
#define workbench_image_hair_setup(wpd, ob, mat_nr, ima, iuser, interp) \
workbench_image_setup_ex(wpd, ob, mat_nr, ima, iuser, interp, WORKBENCH_DATATYPE_HAIR)
#define workbench_material_ptcloud_setup(wpd, ob, mat_nr, color_type) \
workbench_material_setup_ex(wpd, ob, mat_nr, color_type, WORKBENCH_DATATYPE_POINTCLOUD, 0)
#define workbench_image_ptcloud_setup(wpd, ob, mat_nr, ima, iuser, interp) \
workbench_image_setup_ex(wpd, ob, mat_nr, ima, iuser, interp, WORKBENCH_DATATYPE_POINTCLOUD)
/* workbench_data.c */
void workbench_private_data_alloc(WORKBENCH_StorageList *stl);

View File

@@ -899,8 +899,6 @@ GPUBatch *DRW_cache_object_surface_get(Object *ob)
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_surface_get(ob);
case OB_POINTCLOUD:
return DRW_cache_pointcloud_surface_get(ob);
default:
return NULL;
}
@@ -959,8 +957,6 @@ GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
case OB_POINTCLOUD:
return DRW_cache_pointcloud_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
default:
return NULL;
}
@@ -3006,24 +3002,6 @@ GPUBatch *DRW_cache_lattice_vert_overlay_get(Object *ob)
/** \name PointCloud
* \{ */
GPUBatch *DRW_cache_pointcloud_get_dots(Object *object)
{
BLI_assert(object->type == OB_POINTCLOUD);
return DRW_pointcloud_batch_cache_get_dots(object);
}
GPUBatch *DRW_cache_pointcloud_surface_get(Object *object)
{
BLI_assert(object->type == OB_POINTCLOUD);
return DRW_pointcloud_batch_cache_get_surface(object);
}
GPUBatch *DRW_cache_pointcloud_surface_viewer_attribute_get(Object *object)
{
BLI_assert(object->type == OB_POINTCLOUD);
return DRW_pointcloud_batch_cache_get_surface_viewer_attribute(object);
}
/** \} */
/* -------------------------------------------------------------------- */
@@ -3308,6 +3286,9 @@ void drw_batch_cache_generate_requested(Object *ob)
case OB_CURVES:
DRW_curves_batch_cache_create_requested(ob);
break;
case OB_POINTCLOUD:
DRW_pointcloud_batch_cache_create_requested(ob);
break;
/* TODO: all cases. */
default:
break;
@@ -3358,7 +3339,9 @@ void DRW_batch_cache_free_old(Object *ob, int ctime)
case OB_CURVES:
DRW_curves_batch_cache_free_old((Curves *)ob->data, ctime);
break;
/* TODO: all cases. */
case OB_POINTCLOUD:
DRW_pointcloud_batch_cache_free_old((PointCloud *)ob->data, ctime);
break;
default:
break;
}

View File

@@ -225,12 +225,6 @@ struct GPUBatch **DRW_cache_curves_surface_shaded_get(struct Object *ob,
struct GPUBatch *DRW_cache_curves_face_wireframe_get(struct Object *ob);
struct GPUBatch *DRW_cache_curves_edge_detection_get(struct Object *ob, bool *r_is_manifold);
/* PointCloud */
struct GPUBatch *DRW_cache_pointcloud_get_dots(struct Object *obj);
struct GPUBatch *DRW_cache_pointcloud_surface_get(struct Object *obj);
struct GPUBatch *DRW_cache_pointcloud_surface_viewer_attribute_get(struct Object *obj);
/* Volume */
typedef struct DRWVolumeGrid {

View File

@@ -80,6 +80,7 @@ void DRW_batch_cache_free_old(struct Object *ob, int ctime);
*/
void DRW_mesh_batch_cache_free_old(struct Mesh *me, int ctime);
void DRW_curves_batch_cache_free_old(struct Curves *curves, int ctime);
void DRW_pointcloud_batch_cache_free_old(struct PointCloud *pointcloud, int ctime);
/** \} */
@@ -147,12 +148,11 @@ void DRW_curves_batch_cache_create_requested(struct Object *ob);
int DRW_pointcloud_material_count_get(struct PointCloud *pointcloud);
struct GPUVertBuf **DRW_pointcloud_evaluated_attribute(struct PointCloud *pointcloud,
const char *name);
struct GPUBatch *DRW_pointcloud_batch_cache_get_dots(struct Object *ob);
struct GPUBatch *DRW_pointcloud_batch_cache_get_surface(struct Object *ob);
struct GPUBatch *DRW_pointcloud_batch_cache_get_surface_viewer_attribute(struct Object *ob);
struct GPUBatch **DRW_cache_pointcloud_surface_shaded_get(struct Object *ob,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
void DRW_pointcloud_batch_cache_create_requested(struct Object *ob);
/** \} */

View File

@@ -23,30 +23,64 @@
#include "BKE_pointcloud.h"
#include "GPU_batch.h"
#include "GPU_material.h"
#include "draw_cache_impl.h" /* own include */
#include "draw_attributes.h"
#include "draw_cache_impl.h"
#include "draw_cache_inline.h"
#include "draw_pointcloud_private.hh" /* own include */
/* ---------------------------------------------------------------------- */
/* PointCloud GPUBatch Cache */
using namespace blender;
/** \} */
/* -------------------------------------------------------------------- */
/** \name GPUBatch cache management
* \{ */
struct PointCloudBatchCache {
GPUVertBuf *pos; /* Position and radius. */
GPUVertBuf *geom; /* Instanced geometry for each point in the cloud (small sphere). */
GPUVertBuf *attr_viewer;
GPUIndexBuf *geom_indices;
/* Dot primitive types. */
GPUBatch *dots;
/* Triangle primitive types. */
GPUBatch *surface;
GPUBatch **surface_per_mat;
GPUBatch *surface_viewer_attribute;
/* Triangles indices to draw the points. */
GPUIndexBuf *geom_indices;
/* Position and radius. */
GPUVertBuf *pos_rad;
/* Active attribute in 3D view. */
GPUVertBuf *attr_viewer;
/* Requested attributes */
GPUVertBuf *attributes_buf[GPU_MAX_ATTR];
/** Attributes currently being drawn or about to be drawn. */
DRW_Attributes attr_used;
/**
* Attributes that were used at some point. This is used for garbage collection, to remove
* attributes that are not used in shaders anymore due to user edits.
*/
DRW_Attributes attr_used_over_time;
/**
* The last time in seconds that the `attr_used` and `attr_used_over_time` were exactly the same.
* If the delta between this time and the current scene time is greater than the timeout set in
* user preferences (`U.vbotimeout`) then garbage collection is performed.
*/
int last_attr_matching_time;
/* settings to determine if cache is invalid */
bool is_dirty;
int mat_len;
};
/* GPUBatch cache management. */
/**
* The draw cache extraction is currently not multi-threaded for multiple objects, but if it was,
* some locking would be necessary because multiple objects can use the same object data with
* different materials, etc. This is a placeholder to make multi-threading easier in the future.
*/
std::mutex render_mutex;
};
static PointCloudBatchCache *pointcloud_batch_cache_get(PointCloud &pointcloud)
{
@@ -71,7 +105,7 @@ static void pointcloud_batch_cache_init(PointCloud &pointcloud)
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (!cache) {
cache = MEM_cnew<PointCloudBatchCache>(__func__);
cache = MEM_new<PointCloudBatchCache>(__func__);
pointcloud.batch_cache = cache;
}
else {
@@ -100,6 +134,15 @@ void DRW_pointcloud_batch_cache_dirty_tag(PointCloud *pointcloud, int mode)
}
}
static void pointcloud_discard_attributes(PointCloudBatchCache &cache)
{
for (const int j : IndexRange(GPU_MAX_ATTR)) {
GPU_VERTBUF_DISCARD_SAFE(cache.attributes_buf[j]);
}
drw_attributes_clear(&cache.attr_used);
}
static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
@@ -109,8 +152,7 @@ static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
GPU_BATCH_DISCARD_SAFE(cache->dots);
GPU_BATCH_DISCARD_SAFE(cache->surface);
GPU_VERTBUF_DISCARD_SAFE(cache->pos);
GPU_VERTBUF_DISCARD_SAFE(cache->geom);
GPU_VERTBUF_DISCARD_SAFE(cache->pos_rad);
GPU_VERTBUF_DISCARD_SAFE(cache->attr_viewer);
GPU_INDEXBUF_DISCARD_SAFE(cache->geom_indices);
@@ -119,8 +161,9 @@ static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
}
}
GPU_BATCH_DISCARD_SAFE(cache->surface_viewer_attribute);
MEM_SAFE_FREE(cache->surface_per_mat);
pointcloud_discard_attributes(*cache);
}
void DRW_pointcloud_batch_cache_validate(PointCloud *pointcloud)
@@ -137,32 +180,85 @@ void DRW_pointcloud_batch_cache_free(PointCloud *pointcloud)
MEM_SAFE_FREE(pointcloud->batch_cache);
}
static void pointcloud_batch_cache_ensure_pos(const PointCloud &pointcloud,
PointCloudBatchCache &cache)
void DRW_pointcloud_batch_cache_free_old(PointCloud *pointcloud, int ctime)
{
using namespace blender;
if (cache.pos != nullptr) {
PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
if (!cache) {
return;
}
bool do_discard = false;
if (drw_attributes_overlap(&cache->attr_used_over_time, &cache->attr_used)) {
cache->last_attr_matching_time = ctime;
}
if (ctime - cache->last_attr_matching_time > U.vbotimeout) {
do_discard = true;
}
drw_attributes_clear(&cache->attr_used_over_time);
if (do_discard) {
pointcloud_discard_attributes(*cache);
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name PointCloud extraction
* \{ */
static const uint half_octahedron_tris[4][3] = {
{0, 1, 2},
{0, 2, 3},
{0, 3, 4},
{0, 4, 1},
};
static void pointcloud_extract_indices(const PointCloud &pointcloud, PointCloudBatchCache &cache)
{
/** \note: Avoid modulo by non-power-of-two in shader. */
uint32_t vertid_max = pointcloud.totpoint * 32;
uint32_t index_len = pointcloud.totpoint * ARRAY_SIZE(half_octahedron_tris);
GPUIndexBufBuilder builder;
GPU_indexbuf_init(&builder, GPU_PRIM_TRIS, index_len, vertid_max);
for (int p = 0; p < pointcloud.totpoint; p++) {
for (int i = 0; i < ARRAY_SIZE(half_octahedron_tris); i++) {
GPU_indexbuf_add_tri_verts(&builder,
half_octahedron_tris[i][0] + p * 32,
half_octahedron_tris[i][1] + p * 32,
half_octahedron_tris[i][2] + p * 32);
}
}
GPU_indexbuf_build_in_place(&builder, cache.geom_indices);
}
static void pointcloud_extract_position_and_radius(const PointCloud &pointcloud,
PointCloudBatchCache &cache)
{
using namespace blender;
const bke::AttributeAccessor attributes = pointcloud.attributes();
const VArraySpan<float3> positions = attributes.lookup<float3>("position", ATTR_DOMAIN_POINT);
const VArray<float> radii = attributes.lookup<float>("radius", ATTR_DOMAIN_POINT);
/* From the opengl wiki:
* Note that size does not have to exactly match the size used by the vertex shader. If the
* vertex shader has fewer components than the attribute provides, then the extras are ignored.
* If the vertex shader has more components than the array provides, the extras are given
* values from the vector (0, 0, 0, 1) for the missing XYZW components. */
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
GPUUsageType usage_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
GPU_vertbuf_init_with_format_ex(cache.pos_rad, &format, usage_flag);
GPU_vertbuf_data_alloc(cache.pos_rad, positions.size());
MutableSpan<float4> vbo_data{static_cast<float4 *>(GPU_vertbuf_get_data(cache.pos_rad)),
pointcloud.totpoint};
if (radii) {
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
cache.pos = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(cache.pos, positions.size());
const VArraySpan<float> radii_span(radii);
MutableSpan<float4> vbo_data{static_cast<float4 *>(GPU_vertbuf_get_data(cache.pos)),
pointcloud.totpoint};
threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
vbo_data[i].x = positions[i].x;
@@ -174,156 +270,183 @@ static void pointcloud_batch_cache_ensure_pos(const PointCloud &pointcloud,
});
}
else {
static GPUVertFormat format = {0};
static uint pos;
if (format.attr_len == 0) {
pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
cache.pos = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(cache.pos, positions.size());
GPU_vertbuf_attr_fill(cache.pos, pos, positions.data());
threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
vbo_data[i].x = positions[i].x;
vbo_data[i].y = positions[i].y;
vbo_data[i].z = positions[i].z;
vbo_data[i].w = 1.0f;
}
});
}
}
static const float half_octahedron_normals[5][3] = {
{0.0f, 0.0f, 1.0f},
{1.0f, 0.0f, 0.0f},
{0.0f, 1.0f, 0.0f},
{-1.0f, 0.0f, 0.0f},
{0.0f, -1.0f, 0.0f},
};
static const uint half_octahedron_tris[4][3] = {
{0, 1, 2},
{0, 2, 3},
{0, 3, 4},
{0, 4, 1},
};
static void pointcloud_batch_cache_ensure_geom(PointCloudBatchCache &cache)
{
if (cache.geom != nullptr) {
return;
}
static GPUVertFormat format = {0};
static uint pos;
if (format.attr_len == 0) {
pos = GPU_vertformat_attr_add(&format, "pos_inst", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
GPU_vertformat_alias_add(&format, "nor");
}
cache.geom = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(cache.geom, ARRAY_SIZE(half_octahedron_normals));
GPU_vertbuf_attr_fill(cache.geom, pos, half_octahedron_normals);
GPUIndexBufBuilder builder;
GPU_indexbuf_init(&builder,
GPU_PRIM_TRIS,
ARRAY_SIZE(half_octahedron_tris),
ARRAY_SIZE(half_octahedron_normals));
for (int i = 0; i < ARRAY_SIZE(half_octahedron_tris); i++) {
GPU_indexbuf_add_tri_verts(&builder, UNPACK3(half_octahedron_tris[i]));
}
cache.geom_indices = GPU_indexbuf_build(&builder);
}
static void pointcloud_batch_cache_ensure_attribute_overlay(const PointCloud &pointcloud,
PointCloudBatchCache &cache)
static void pointcloud_extract_attribute(const PointCloud &pointcloud,
PointCloudBatchCache &cache,
const DRW_AttributeRequest &request,
int index)
{
using namespace blender;
if (cache.attr_viewer != nullptr) {
return;
}
GPUVertBuf *&attr_buf = cache.attributes_buf[index];
const bke::AttributeAccessor attributes = pointcloud.attributes();
const VArray<ColorGeometry4f> colors = attributes.lookup_or_default<ColorGeometry4f>(
".viewer", ATTR_DOMAIN_POINT, {1.0f, 0.0f, 1.0f, 1.0f});
/* TODO(@kevindietrich): float4 is used for scalar attributes as the implicit conversion done
* by OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following
* the Blender convention, it should be `vec4(s, s, s, 1)`. This could be resolved using a
* similar texture state swizzle to map the attribute correctly as for volume attributes, so we
* can control the conversion ourselves. */
VArray<ColorGeometry4f> attribute = attributes.lookup_or_default<ColorGeometry4f>(
request.attribute_name, request.domain, {0.0f, 0.0f, 0.0f, 1.0f});
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "attribute_value", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "attr", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
cache.attr_viewer = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(cache.attr_viewer, pointcloud.totpoint);
GPUUsageType usage_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
GPU_vertbuf_init_with_format_ex(attr_buf, &format, usage_flag);
GPU_vertbuf_data_alloc(attr_buf, pointcloud.totpoint);
MutableSpan<ColorGeometry4f> vbo_data{
static_cast<ColorGeometry4f *>(GPU_vertbuf_get_data(cache.attr_viewer)),
pointcloud.totpoint};
colors.materialize(vbo_data);
static_cast<ColorGeometry4f *>(GPU_vertbuf_get_data(attr_buf)), pointcloud.totpoint};
attribute.materialize(vbo_data);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Private API
* \{ */
GPUVertBuf *pointcloud_position_and_radius_get(PointCloud *pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
DRW_vbo_request(nullptr, &cache->pos_rad);
return cache->pos_rad;
}
GPUBatch **pointcloud_surface_shaded_get(PointCloud *pointcloud,
GPUMaterial **gpu_materials,
int mat_len)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
DRW_Attributes attrs_needed;
drw_attributes_clear(&attrs_needed);
for (GPUMaterial *gpu_material : Span<GPUMaterial *>(gpu_materials, mat_len)) {
ListBase gpu_attrs = GPU_material_attributes(gpu_material);
LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
const char *name = gpu_attr->name;
int layer_index;
eCustomDataType type;
eAttrDomain domain = ATTR_DOMAIN_POINT;
if (!drw_custom_data_match_attribute(&pointcloud->pdata, name, &layer_index, &type)) {
continue;
}
drw_attributes_add_request(&attrs_needed, name, type, layer_index, domain);
}
}
if (!drw_attributes_overlap(&cache->attr_used, &attrs_needed)) {
/* Some new attributes have been added, free all and start over. */
for (const int i : IndexRange(GPU_MAX_ATTR)) {
GPU_VERTBUF_DISCARD_SAFE(cache->attributes_buf[i]);
}
drw_attributes_merge(&cache->attr_used, &attrs_needed, cache->render_mutex);
}
drw_attributes_merge(&cache->attr_used_over_time, &attrs_needed, cache->render_mutex);
DRW_batch_request(&cache->surface_per_mat[0]);
return cache->surface_per_mat;
}
GPUBatch *pointcloud_surface_get(PointCloud *pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
return DRW_batch_request(&cache->surface);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name API
* \{ */
GPUBatch *DRW_pointcloud_batch_cache_get_dots(Object *ob)
{
PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (cache->dots == nullptr) {
pointcloud_batch_cache_ensure_pos(pointcloud, *cache);
cache->dots = GPU_batch_create(GPU_PRIM_POINTS, cache->pos, nullptr);
}
return cache->dots;
return DRW_batch_request(&cache->dots);
}
GPUBatch *DRW_pointcloud_batch_cache_get_surface(Object *ob)
GPUVertBuf **DRW_pointcloud_evaluated_attribute(PointCloud *pointcloud, const char *name)
{
PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
PointCloudBatchCache &cache = *pointcloud_batch_cache_get(*pointcloud);
if (cache->surface == nullptr) {
pointcloud_batch_cache_ensure_pos(pointcloud, *cache);
pointcloud_batch_cache_ensure_geom(*cache);
cache->surface = GPU_batch_create(GPU_PRIM_TRIS, cache->geom, cache->geom_indices);
GPU_batch_instbuf_add_ex(cache->surface, cache->pos, false);
int layer_index;
eCustomDataType type;
eAttrDomain domain = ATTR_DOMAIN_POINT;
if (drw_custom_data_match_attribute(&pointcloud->pdata, name, &layer_index, &type)) {
DRW_Attributes attributes{};
drw_attributes_add_request(&attributes, name, type, layer_index, domain);
drw_attributes_merge(&cache.attr_used, &attributes, cache.render_mutex);
}
return cache->surface;
}
GPUBatch *DRW_pointcloud_batch_cache_get_surface_viewer_attribute(Object *ob)
{
PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (cache->surface_viewer_attribute == nullptr) {
pointcloud_batch_cache_ensure_pos(pointcloud, *cache);
pointcloud_batch_cache_ensure_geom(*cache);
pointcloud_batch_cache_ensure_attribute_overlay(pointcloud, *cache);
cache->surface_viewer_attribute = GPU_batch_create(
GPU_PRIM_TRIS, cache->geom, cache->geom_indices);
GPU_batch_instbuf_add_ex(cache->surface_viewer_attribute, cache->attr_viewer, false);
GPU_batch_instbuf_add_ex(cache->surface_viewer_attribute, cache->pos, false);
int request_i = -1;
for (const int i : IndexRange(cache.attr_used.num_requests)) {
if (STREQ(cache.attr_used.requests[i].attribute_name, name)) {
request_i = i;
break;
}
}
return cache->surface_viewer_attribute;
}
GPUBatch **DRW_cache_pointcloud_surface_shaded_get(Object *ob,
struct GPUMaterial ** /*gpumat_array*/,
uint gpumat_array_len)
{
PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
BLI_assert(cache->mat_len == gpumat_array_len);
UNUSED_VARS(gpumat_array_len);
if (cache->surface_per_mat[0] == nullptr) {
pointcloud_batch_cache_ensure_pos(pointcloud, *cache);
pointcloud_batch_cache_ensure_geom(*cache);
cache->surface_per_mat[0] = GPU_batch_create(GPU_PRIM_TRIS, cache->geom, cache->geom_indices);
GPU_batch_instbuf_add_ex(cache->surface_per_mat[0], cache->pos, false);
if (request_i == -1) {
return nullptr;
}
return cache->surface_per_mat;
return &cache.attributes_buf[request_i];
}
int DRW_pointcloud_material_count_get(PointCloud *pointcloud)
{
return max_ii(1, pointcloud->totcol);
}
void DRW_pointcloud_batch_cache_create_requested(Object *ob)
{
PointCloud *pointcloud = static_cast<PointCloud *>(ob->data);
PointCloudBatchCache &cache = *pointcloud_batch_cache_get(*pointcloud);
if (DRW_batch_requested(cache.dots, GPU_PRIM_POINTS)) {
DRW_vbo_request(cache.dots, &cache.pos_rad);
}
if (DRW_batch_requested(cache.surface, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache.surface, &cache.geom_indices);
DRW_vbo_request(cache.surface, &cache.pos_rad);
}
for (int i = 0; i < cache.mat_len; i++) {
if (DRW_batch_requested(cache.surface_per_mat[i], GPU_PRIM_TRIS)) {
/* TODO(fclem): Per material ranges. */
DRW_ibo_request(cache.surface_per_mat[i], &cache.geom_indices);
}
}
for (int j = 0; j < cache.attr_used.num_requests; j++) {
DRW_vbo_request(nullptr, &cache.attributes_buf[j]);
if (DRW_vbo_requested(cache.attributes_buf[j])) {
pointcloud_extract_attribute(*pointcloud, cache, cache.attr_used.requests[j], j);
}
}
if (DRW_ibo_requested(cache.geom_indices)) {
pointcloud_extract_indices(*pointcloud, cache);
}
if (DRW_vbo_requested(cache.pos_rad)) {
pointcloud_extract_position_and_radius(*pointcloud, cache);
}
}
/** \} */

View File

@@ -88,6 +88,14 @@ void DRW_curves_ubos_pool_free(struct CurvesUniformBufPool *pool);
void DRW_curves_update(void);
void DRW_curves_free(void);
/* draw_pointcloud.cc */
struct DRWShadingGroup *DRW_shgroup_pointcloud_create_sub(struct Object *object,
struct DRWShadingGroup *shgrp_parent,
struct GPUMaterial *gpu_material);
void DRW_pointcloud_init(void);
void DRW_pointcloud_free(void);
/* draw_volume.cc */
/**

View File

@@ -1690,6 +1690,7 @@ void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
DRW_globals_update();
drw_debug_init();
DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2053,6 +2054,7 @@ void DRW_render_object_iter(
void (*callback)(void *vedata, Object *ob, RenderEngine *engine, struct Depsgraph *depsgraph))
{
const DRWContextState *draw_ctx = DRW_context_state_get();
DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2113,6 +2115,7 @@ void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
drw_manager_init(&DST, NULL, NULL);
DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2148,6 +2151,7 @@ void DRW_cache_restart(void)
DST.buffer_finish_called = false;
DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2473,6 +2477,7 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
/* Init engines */
drw_engines_init();
DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2654,6 +2659,7 @@ void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
/* Init engines */
drw_engines_init();
DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -3068,6 +3074,7 @@ void DRW_engines_free(void)
GPU_FRAMEBUFFER_FREE_SAFE(g_select_buffer.framebuffer_depth_only);
DRW_shaders_free();
DRW_pointcloud_free();
DRW_curves_free();
DRW_volume_free();
DRW_shape_cache_free();

View File

@@ -0,0 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2017 Blender Foundation. All rights reserved. */
/** \file
* \ingroup draw
*
* \brief Contains procedural GPU hair drawing methods.
*/
#include "BLI_string_utils.h"
#include "BLI_utildefines.h"
#include "DNA_customdata_types.h"
#include "DNA_pointcloud_types.h"
#include "BKE_curves.hh"
#include "BKE_geometry_set.hh"
#include "GPU_batch.h"
#include "GPU_capabilities.h"
#include "GPU_compute.h"
#include "GPU_material.h"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "GPU_vertex_buffer.h"
#include "DRW_gpu_wrapper.hh"
#include "DRW_render.h"
#include "draw_attributes.h"
#include "draw_cache_impl.h"
#include "draw_common.h"
#include "draw_manager.h"
#include "draw_pointcloud_private.hh"
static GPUVertBuf *g_dummy_vbo = nullptr;
void DRW_pointcloud_init()
{
if (g_dummy_vbo == nullptr) {
/* initialize vertex format */
GPUVertFormat format = {0};
uint dummy_id = GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
g_dummy_vbo = GPU_vertbuf_create_with_format_ex(
&format, GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
const float vert[4] = {0.0f, 0.0f, 0.0f, 0.0f};
GPU_vertbuf_data_alloc(g_dummy_vbo, 1);
GPU_vertbuf_attr_fill(g_dummy_vbo, dummy_id, vert);
}
}
DRWShadingGroup *DRW_shgroup_pointcloud_create_sub(Object *object,
DRWShadingGroup *shgrp_parent,
GPUMaterial *gpu_material)
{
PointCloud &pointcloud = *static_cast<PointCloud *>(object->data);
DRWShadingGroup *shgrp = DRW_shgroup_create_sub(shgrp_parent);
/* Fix issue with certain driver not drawing anything if there is no texture bound to
* "ac", "au", "u" or "c". */
DRW_shgroup_buffer_texture(shgrp, "u", g_dummy_vbo);
DRW_shgroup_buffer_texture(shgrp, "au", g_dummy_vbo);
DRW_shgroup_buffer_texture(shgrp, "c", g_dummy_vbo);
DRW_shgroup_buffer_texture(shgrp, "ac", g_dummy_vbo);
GPUVertBuf *pos_rad_buf = pointcloud_position_and_radius_get(&pointcloud);
DRW_shgroup_buffer_texture(shgrp, "ptcloud_pos_rad_tx", pos_rad_buf);
if (gpu_material != nullptr) {
// const DRW_Attributes &attrs = cache->attr_used;
// for (int i = 0; i < attrs.num_requests; i++) {
// const DRW_AttributeRequest &request = attrs.requests[i];
// char sampler_name[32];
// /* \note reusing curve attribute function. */
// drw_curves_get_attribute_sampler_name(request.attribute_name, sampler_name);
// GPUTexture *attribute_buf = DRW_pointcloud_evaluated_attribute(&pointcloud);
// if (!cache->attributes_tex[i]) {
// continue;
// }
// DRW_shgroup_buffer_texture_ref(shgrp, sampler_name, attribute_buf);
// }
/* Only single material supported for now. */
GPUBatch **geom = pointcloud_surface_shaded_get(&pointcloud, &gpu_material, 1);
DRW_shgroup_call(shgrp, geom[0], object);
}
else {
GPUBatch *geom = pointcloud_surface_get(&pointcloud);
DRW_shgroup_call(shgrp, geom, object);
}
return shgrp;
}
void DRW_pointcloud_free()
{
GPU_VERTBUF_DISCARD_SAFE(g_dummy_vbo);
}

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation. All rights reserved. */
/** \file
* \ingroup draw
*/
#pragma once
struct PointCloud;
struct GPUBatch;
struct GPUVertBuf;
struct GPUMaterial;
GPUVertBuf *pointcloud_position_and_radius_get(PointCloud *pointcloud);
GPUBatch **pointcloud_surface_shaded_get(PointCloud *pointcloud,
GPUMaterial **gpu_materials,
int mat_len);
GPUBatch *pointcloud_surface_get(PointCloud *pointcloud);

View File

@@ -2,16 +2,10 @@
/* NOTE: To be used with UNIFORM_RESOURCE_ID and INSTANCED_ATTR as define. */
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#ifndef DRW_SHADER_SHARED_H
in vec4 pos; /* Position and radius. */
/* ---- Instanced attribs ---- */
in vec3 pos_inst;
in vec3 nor;
#endif
int pointcloud_get_point_id()
{
return gl_VertexID / 32;
}
mat3 pointcloud_get_facing_matrix(vec3 p)
{
@@ -25,8 +19,10 @@ mat3 pointcloud_get_facing_matrix(vec3 p)
/* Returns world center position and radius. */
void pointcloud_get_pos_and_radius(out vec3 outpos, out float outradius)
{
outpos = point_object_to_world(pos.xyz);
outradius = dot(abs(mat3(ModelMatrix) * pos.www), vec3(1.0 / 3.0));
int id = pointcloud_get_point_id();
vec4 pos_rad = texelFetch(ptcloud_pos_rad_tx, id);
outpos = point_object_to_world(pos_rad.xyz);
outradius = dot(abs(mat3(ModelMatrix) * pos_rad.www), vec3(1.0 / 3.0));
}
/* Return world position and normal. */
@@ -38,15 +34,67 @@ void pointcloud_get_pos_and_nor(out vec3 outpos, out vec3 outnor)
mat3 facing_mat = pointcloud_get_facing_matrix(p);
/** \note: Avoid modulo by non-power-of-two in shader. See Index buffer setup. */
int vert_id = gl_VertexID % 32;
vec3 pos_inst = vec3(0.0);
switch (vert_id) {
case 0:
pos_inst.z = 1.0;
break;
case 1:
pos_inst.x = 1.0;
break;
case 2:
pos_inst.y = 1.0;
break;
case 3:
pos_inst.x = -1.0;
break;
case 4:
pos_inst.y = -1.0;
break;
}
/* TODO(fclem): remove multiplication here. Here only for keeping the size correct for now. */
radius *= 0.01;
outpos = p + (facing_mat * pos_inst) * radius;
outnor = facing_mat * nor;
outnor = facing_mat * pos_inst;
outpos = p + outnor * radius;
}
vec3 pointcloud_get_pos(void)
vec3 pointcloud_get_pos()
{
vec3 outpos, outnor;
pointcloud_get_pos_and_nor(outpos, outnor);
return outpos;
}
float pointcloud_get_customdata_float(const samplerBuffer cd_buf)
{
int id = pointcloud_get_point_id();
return texelFetch(cd_buf, id).r;
}
vec2 pointcloud_get_customdata_vec2(const samplerBuffer cd_buf)
{
int id = pointcloud_get_point_id();
return texelFetch(cd_buf, id).rg;
}
vec3 pointcloud_get_customdata_vec3(const samplerBuffer cd_buf)
{
int id = pointcloud_get_point_id();
return texelFetch(cd_buf, id).rgb;
}
vec4 pointcloud_get_customdata_vec4(const samplerBuffer cd_buf)
{
int id = pointcloud_get_point_id();
return texelFetch(cd_buf, id).rgba;
}
vec2 pointcloud_get_barycentric(void)
{
/* TODO: To be implemented. */
return vec2(0.0);
}

View File

@@ -112,9 +112,7 @@ GPU_SHADER_CREATE_INFO(draw_hair)
.additional_info("draw_modelmat", "draw_resource_id");
GPU_SHADER_CREATE_INFO(draw_pointcloud)
.vertex_in(0, Type::VEC4, "pos")
.vertex_in(1, Type::VEC3, "pos_inst")
.vertex_in(2, Type::VEC3, "nor")
.sampler(0, ImageType::FLOAT_BUFFER, "ptcloud_pos_rad_tx", Frequency::BATCH)
.additional_info("draw_modelmat_instanced_attr", "draw_resource_id_uniform");
GPU_SHADER_CREATE_INFO(draw_volume).additional_info("draw_modelmat", "draw_resource_id_uniform");