WIP: Volume grid attribute support in geometry nodes #110044

Closed
Lukas Tönne wants to merge 130 commits from LukasTonne/blender:geometry-nodes-flip into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
31 changed files with 1538 additions and 717 deletions
Showing only changes of commit ce3fae522a - Show all commits

View File

@ -161,9 +161,9 @@ struct AttributeInitShared : public AttributeInit {
* the newly created attribute.
*/
struct AttributeInitGrid : public AttributeInit {
volume::GGrid grid;
GVGrid grid;
AttributeInitGrid(volume::GGrid grid) : AttributeInit(Type::Grid), grid(std::move(grid)) {}
AttributeInitGrid(GVGrid grid) : AttributeInit(Type::Grid), grid(std::move(grid)) {}
};
/**
@ -172,10 +172,9 @@ struct AttributeInitGrid : public AttributeInit {
* preferable to move data directly to the created attribute to avoid a new allocation and a copy.
*/
struct AttributeInitMoveGrid : public AttributeInit {
volume::GMutableGrid grid;
GVMutableGrid grid;
AttributeInitMoveGrid(volume::GMutableGrid grid)
: AttributeInit(Type::MoveGrid), grid(std::move(grid))
AttributeInitMoveGrid(GVMutableGrid grid) : AttributeInit(Type::MoveGrid), grid(std::move(grid))
{
}
};
@ -185,10 +184,10 @@ struct AttributeInitMoveGrid : public AttributeInit {
* The sharing info has ownership of the provided contiguous array.
*/
struct AttributeInitSharedGrid : public AttributeInit {
const volume::GMutableGrid grid;
const GVMutableGrid grid;
const ImplicitSharingInfo *sharing_info = nullptr;
AttributeInitSharedGrid(const volume::GMutableGrid grid, const ImplicitSharingInfo &sharing_info)
AttributeInitSharedGrid(const GVMutableGrid grid, const ImplicitSharingInfo &sharing_info)
: AttributeInit(Type::SharedGrid), grid(grid), sharing_info(&sharing_info)
{
}
@ -441,7 +440,7 @@ struct AttributeAccessorFunctions {
const AttributeIDRef &attribute_id);
bool (*domain_supported)(const void *owner, eAttrDomain domain);
int (*domain_size)(const void *owner, eAttrDomain domain);
volume::GGrid (*domain_grid_mask)(const void *owner, eAttrDomain domain, int main_grid);
GVGrid (*domain_grid_mask)(const void *owner, eAttrDomain domain, int main_grid);
bool (*is_builtin)(const void *owner, const AttributeIDRef &attribute_id);
GAttributeReader (*lookup)(const void *owner, const AttributeIDRef &attribute_id);
GAttributeGridReader (*lookup_grid)(const void *owner, const AttributeIDRef &attribute_id);
@ -527,7 +526,7 @@ class AttributeAccessor {
/**
* \return Topology grid that defines the default extent of a volume.
*/
volume::GGrid domain_grid_mask(const eAttrDomain domain, int main_grid) const
GVGrid domain_grid_mask(const eAttrDomain domain, int main_grid) const
{
return fn_->domain_grid_mask(owner_, domain, main_grid);
}

View File

@ -167,11 +167,11 @@ class GeometryFieldInput : public fn::FieldInput {
ResourceScope &scope) const override;
virtual GVArray get_varray_for_context(const GeometryFieldContext &context,
const IndexMask &mask) const = 0;
virtual volume::GGrid get_volume_grid_for_context(const fn::FieldContext &context,
const volume::GGrid &mask,
ResourceScope &scope) const override;
virtual volume::GGrid get_volume_grid_for_context(const GeometryFieldContext &context,
const volume::GGrid &mask) const = 0;
virtual volume::GVGrid get_volume_grid_for_context(const fn::FieldContext &context,
const volume::GVGrid &mask,
ResourceScope &scope) const override;
virtual volume::GVGrid get_volume_grid_for_context(const GeometryFieldContext &context,
const volume::GVGrid &mask) const = 0;
virtual std::optional<eAttrDomain> preferred_domain(const GeometryComponent &component) const;
};
@ -245,7 +245,7 @@ class InstancesFieldInput : public fn::FieldInput {
class VolumeFieldInput : public fn::FieldInput {
public:
using GGrid = volume::GGrid;
using GGrid = volume::GVGrid;
using fn::FieldInput::FieldInput;
GVArray get_varray_for_context(const fn::FieldContext & /*context*/,
@ -320,7 +320,7 @@ class AttributeExistsFieldInput final : public bke::GeometryFieldInput {
GVArray get_varray_for_context(const bke::GeometryFieldContext &context,
const IndexMask &mask) const final;
GGrid get_volume_grid_for_context(const GeometryFieldContext &context,
const volume::GGrid &mask) const final;
const volume::GVGrid &mask) const final;
};
class IDAttributeFieldInput : public GeometryFieldInput {
@ -332,8 +332,8 @@ class IDAttributeFieldInput : public GeometryFieldInput {
GVArray get_varray_for_context(const GeometryFieldContext &context,
const IndexMask &mask) const override;
volume::GGrid get_volume_grid_for_context(const GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const override
volume::GVGrid get_volume_grid_for_context(const GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const override
{
return {};
}
@ -357,8 +357,8 @@ class NormalFieldInput : public GeometryFieldInput {
GVArray get_varray_for_context(const GeometryFieldContext &context,
const IndexMask &mask) const override;
volume::GGrid get_volume_grid_for_context(const GeometryFieldContext &context,
const volume::GGrid &mask) const override;
volume::GVGrid get_volume_grid_for_context(const GeometryFieldContext &context,
const volume::GVGrid &mask) const override;
std::string socket_inspection_name() const override;
uint64_t hash() const override;
@ -397,8 +397,8 @@ class AnonymousAttributeFieldInput : public GeometryFieldInput {
GVArray get_varray_for_context(const GeometryFieldContext &context,
const IndexMask &mask) const override;
volume::GGrid get_volume_grid_for_context(const GeometryFieldContext &context,
const volume::GGrid &mask) const override;
volume::GVGrid get_volume_grid_for_context(const GeometryFieldContext &context,
const volume::GVGrid &mask) const override;
std::string socket_inspection_name() const override;
uint64_t hash() const override;

View File

@ -6,6 +6,7 @@
#include "BLI_cpp_type.hh"
#include "BLI_generic_virtual_array.hh"
#include "BLI_generic_virtual_grid.hh"
#include "BLI_math_vector_types.hh"
#include "BLI_volume.hh"
@ -22,7 +23,7 @@ template<typename T> struct AttributeGridReader {
/**
* Virtual array that provides access to the attribute data. This may be empty.
*/
volume::Grid<T> grid;
VGrid<T> grid;
/**
* Domain where the attribute is stored. This also determines the size of the virtual array.
*/
@ -34,11 +35,11 @@ template<typename T> struct AttributeGridReader {
*/
const ImplicitSharingInfo *sharing_info;
const volume::Grid<T> &operator*() const
const VGrid<T> &operator*() const
{
return this->grid;
}
volume::Grid<T> &operator*()
VGrid<T> &operator*()
{
return this->grid;
}
@ -58,7 +59,7 @@ template<typename T> struct AttributeGridWriter {
/**
* Grid pointer giving read and write access to the attribute. This may be empty.
*/
volume::MutableGrid<T> grid;
VMutableGrid<T> grid;
/**
* Domain where the attribute is stored on the geometry. Also determines the size of the
* virtual array.
@ -89,7 +90,7 @@ template<typename T> struct AttributeGridWriter {
* A generic version of #AttributeReader.
*/
struct GAttributeGridReader {
volume::GGrid grid;
GVGrid grid;
eAttrDomain domain;
const ImplicitSharingInfo *sharing_info;
@ -98,11 +99,11 @@ struct GAttributeGridReader {
return this->grid;
}
const volume::GGrid &operator*() const
const GVGrid &operator*() const
{
return this->grid;
}
volume::GGrid &operator*()
GVGrid &operator*()
{
return this->grid;
}
@ -117,7 +118,7 @@ struct GAttributeGridReader {
* A generic version of #AttributeWriter.
*/
struct GAttributeGridWriter {
volume::GMutableGrid grid;
GVMutableGrid grid;
eAttrDomain domain;
std::function<void()> tag_modified_fn;

View File

@ -18,9 +18,9 @@ struct VolumeGrid;
VolumeGrid *BKE_volume_grid_add_vdb(Volume &volume,
blender::StringRef name,
blender::volume::GMutableGrid vdb_grid);
blender::volume::GVMutableGrid vdb_grid);
bool BKE_volume_grid_bounds(blender::volume::GGrid grid,
bool BKE_volume_grid_bounds(blender::volume::GVGrid grid,
blender::float3 &r_min,
blender::float3 &r_max);
@ -29,15 +29,15 @@ bool BKE_volume_grid_bounds(blender::volume::GGrid grid,
* This is useful for instances, where there is a separate transform on top of the original
* grid transform that must be applied for some operations that only take a grid argument.
*/
blender::volume::GGrid BKE_volume_grid_shallow_transform(blender::volume::GGrid grid,
const blender::float4x4 &transform);
blender::volume::GVGrid BKE_volume_grid_shallow_transform(blender::volume::GVGrid grid,
const blender::float4x4 &transform);
blender::volume::GGrid BKE_volume_grid_openvdb_for_metadata(const VolumeGrid *grid);
blender::volume::GGrid BKE_volume_grid_openvdb_for_read(const Volume *volume,
const VolumeGrid *grid);
blender::volume::GMutableGrid BKE_volume_grid_openvdb_for_write(const Volume *volume,
VolumeGrid *grid,
bool clear);
blender::volume::GVGrid BKE_volume_grid_openvdb_for_metadata(const VolumeGrid *grid);
blender::volume::GVGrid BKE_volume_grid_openvdb_for_read(const Volume *volume,
const VolumeGrid *grid);
blender::volume::GVMutableGrid BKE_volume_grid_openvdb_for_write(const Volume *volume,
VolumeGrid *grid,
bool clear);
void BKE_volume_grid_clear_tree(Volume &volume, VolumeGrid &volume_grid);
void BKE_volume_grid_clear_tree(openvdb::GridBase &grid);
@ -77,7 +77,7 @@ auto BKE_volume_grid_type_operation(const VolumeGridType grid_type, OpType &&op)
return op.template operator()<openvdb::FloatGrid>();
}
blender::volume::GMutableGrid BKE_volume_grid_create_with_changed_resolution(
blender::volume::GVMutableGrid BKE_volume_grid_create_with_changed_resolution(
const VolumeGridType grid_type, const openvdb::GridBase &old_grid, float resolution_factor);
#endif

View File

@ -104,7 +104,7 @@ template<typename ToTreeType> struct ConvertGridOp<ToTreeType, openvdb::MaskTree
};
static openvdb::GridBase::Ptr add_generic_grid_copy(const CPPType &value_type,
const volume::GGrid &data)
const volume::GVGrid &data)
{
/* Template build sanitization: nested static type dispatch creates a lot of code, which can
* easily make builds run out of memory. Capturing a functor allows doing the 2nd type dispatch
@ -148,13 +148,13 @@ static openvdb::GridBase::Ptr add_generic_grid_copy(const CPPType &value_type,
}
static openvdb::GridBase::Ptr add_generic_grid_move(const CPPType & /*value_type*/,
const volume::GMutableGrid &data)
const volume::GVMutableGrid &data)
{
return data.grid_;
}
static openvdb::GridBase::Ptr add_generic_grid_shared(const CPPType &value_type,
const volume::GMutableGrid &data,
const volume::GVMutableGrid &data,
const ImplicitSharingInfo *sharing_info)
{
/* XXX May eventually use this, for now just rely on shared_ptr. */
@ -202,7 +202,7 @@ static bool add_grid_from_attribute_init(const AttributeIDRef &attribute_id,
case AttributeInit::Type::Shared:
break;
case AttributeInit::Type::Grid: {
const volume::GGrid &data =
const volume::GVGrid &data =
static_cast<const blender::bke::AttributeInitGrid &>(initializer).grid;
result = add_generic_grid_copy(value_type, data);
#ifdef DEBUG_GRID_ATTRIBUTES
@ -214,7 +214,7 @@ static bool add_grid_from_attribute_init(const AttributeIDRef &attribute_id,
break;
}
case AttributeInit::Type::MoveGrid: {
const volume::GMutableGrid &data =
const volume::GVMutableGrid &data =
static_cast<const blender::bke::AttributeInitMoveGrid &>(initializer).grid;
result = add_generic_grid_move(value_type, data);
#ifdef DEBUG_GRID_ATTRIBUTES
@ -294,7 +294,7 @@ bool VolumeCustomAttributeGridProvider::foreach_attribute(
for (const VolumeGrid &grid : grids) {
const AttributeIDRef attribute_id{grid.name()};
const CPPType *type = volume::GGrid{grid.grid()}.value_type();
const CPPType *type = volume::GVGrid{grid.grid()}.value_type();
if (type == nullptr) {
continue;
}

View File

@ -582,7 +582,7 @@ static AttributeAccessorFunctions get_curves_accessor_functions()
};
fn.domain_grid_mask = [](const void * /*owner*/,
const eAttrDomain /*domain*/,
const int /*main_grid*/) -> volume::GGrid { return {}; };
const int /*main_grid*/) -> volume::GVGrid { return {}; };
fn.domain_supported = [](const void * /*owner*/, const eAttrDomain domain) {
return ELEM(domain, ATTR_DOMAIN_POINT, ATTR_DOMAIN_CURVE);
};

View File

@ -220,7 +220,7 @@ static AttributeAccessorFunctions get_instances_accessor_functions()
};
fn.domain_grid_mask = [](const void * /*owner*/,
const eAttrDomain /*domain*/,
const int /*main_grid*/) -> volume::GGrid { return {}; };
const int /*main_grid*/) -> volume::GVGrid { return {}; };
fn.domain_supported = [](const void * /*owner*/, const eAttrDomain domain) {
return domain == ATTR_DOMAIN_INSTANCE;
};

View File

@ -1237,7 +1237,7 @@ static AttributeAccessorFunctions get_mesh_accessor_functions()
};
fn.domain_grid_mask = [](const void * /*owner*/,
const eAttrDomain /*domain*/,
const int /*main_grid*/) -> volume::GGrid { return {}; };
const int /*main_grid*/) -> volume::GVGrid { return {}; };
fn.domain_supported = [](const void * /*owner*/, const eAttrDomain domain) {
return ELEM(domain, ATTR_DOMAIN_POINT, ATTR_DOMAIN_EDGE, ATTR_DOMAIN_FACE, ATTR_DOMAIN_CORNER);
};

View File

@ -183,9 +183,9 @@ static AttributeAccessorFunctions get_pointcloud_accessor_functions()
return 0;
}
};
fn.domain_grid_mask = [](const void */*owner*/,
fn.domain_grid_mask = [](const void * /*owner*/,
const eAttrDomain /*domain*/,
const int /*main_grid*/) -> volume::GGrid { return {}; };
const int /*main_grid*/) -> volume::GVGrid { return {}; };
fn.domain_supported = [](const void * /*owner*/, const eAttrDomain domain) {
return domain == ATTR_DOMAIN_POINT;
};

View File

@ -196,9 +196,9 @@ GVArray GeometryFieldInput::get_varray_for_context(const fn::FieldContext &conte
return {};
}
volume::GGrid GeometryFieldInput::get_volume_grid_for_context(const fn::FieldContext &context,
const volume::GGrid &mask,
ResourceScope & /*scope*/) const
volume::GVGrid GeometryFieldInput::get_volume_grid_for_context(const fn::FieldContext &context,
const volume::GVGrid &mask,
ResourceScope & /*scope*/) const
{
if (const GeometryFieldContext *geometry_context = dynamic_cast<const GeometryFieldContext *>(
&context))
@ -304,9 +304,9 @@ GVArray InstancesFieldInput::get_varray_for_context(const fn::FieldContext &cont
return {};
}
volume::GGrid VolumeFieldInput::get_volume_grid_for_context(const fn::FieldContext &context,
const volume::GGrid &mask,
ResourceScope & /*scope*/) const
volume::GVGrid VolumeFieldInput::get_volume_grid_for_context(const fn::FieldContext &context,
const volume::GVGrid &mask,
ResourceScope & /*scope*/) const
{
if (const GeometryFieldContext *geometry_context = dynamic_cast<const GeometryFieldContext *>(
&context))
@ -333,8 +333,8 @@ GVArray AttributeFieldInput::get_varray_for_context(const GeometryFieldContext &
return {};
}
volume::GGrid AttributeFieldInput::get_volume_grid_for_context(const GeometryFieldContext &context,
const GGrid & /*mask*/) const
volume::GVGrid AttributeFieldInput::get_volume_grid_for_context(
const GeometryFieldContext &context, const GGrid & /*mask*/) const
{
const eCustomDataType data_type = cpp_type_to_custom_data_type(*type_);
if (auto attributes = context.attributes()) {
@ -351,11 +351,11 @@ GVArray AttributeExistsFieldInput::get_varray_for_context(const bke::GeometryFie
return VArray<bool>::ForSingle(exists, domain_size);
}
volume::GGrid AttributeExistsFieldInput::get_volume_grid_for_context(
const GeometryFieldContext &context, const volume::GGrid & /*mask*/) const
volume::GVGrid AttributeExistsFieldInput::get_volume_grid_for_context(
const GeometryFieldContext &context, const volume::GVGrid & /*mask*/) const
{
const bool exists = context.attributes()->contains(name_);
return volume::MutableGrid<bool>::create(exists);
return volume::VMutableGrid<bool>::create(exists);
}
std::string AttributeFieldInput::socket_inspection_name() const
@ -440,8 +440,8 @@ GVArray AnonymousAttributeFieldInput::get_varray_for_context(const GeometryField
return *context.attributes()->lookup(*anonymous_id_, context.domain(), data_type);
}
volume::GGrid AnonymousAttributeFieldInput::get_volume_grid_for_context(
const GeometryFieldContext &context, const volume::GGrid & /*mask*/) const
volume::GVGrid AnonymousAttributeFieldInput::get_volume_grid_for_context(
const GeometryFieldContext &context, const volume::GVGrid & /*mask*/) const
{
const eCustomDataType data_type = cpp_type_to_custom_data_type(*type_);
return *context.attributes()->lookup_grid(*anonymous_id_, context.domain(), data_type);
@ -501,8 +501,8 @@ GVArray NormalFieldInput::get_varray_for_context(const GeometryFieldContext &con
return {};
}
volume::GGrid NormalFieldInput::get_volume_grid_for_context(
const GeometryFieldContext & /*context*/, const volume::GGrid & /*mask*/) const
volume::GVGrid NormalFieldInput::get_volume_grid_for_context(
const GeometryFieldContext & /*context*/, const volume::GVGrid & /*mask*/) const
{
return {};
}
@ -632,14 +632,14 @@ bool try_capture_field_on_geometry(GeometryComponent &component,
const auto &volume_component = static_cast<const bke::VolumeComponent &>(component);
main_grid = volume_component.get()->active_grid;
}
const volume::GGrid domain_mask = {attributes.domain_grid_mask(domain, main_grid)};
const volume::GVGrid domain_mask = {attributes.domain_grid_mask(domain, main_grid)};
const bke::GeometryFieldContext field_context{component, domain};
fn::VolumeFieldEvaluator evaluator{field_context, domain_mask};
evaluator.add(validator.validate_field_if_necessary(field));
evaluator.set_selection(selection);
evaluator.evaluate();
const volume::GGrid selection = evaluator.get_evaluated_selection_as_mask();
const volume::GVGrid selection = evaluator.get_evaluated_selection_as_mask();
dst_attribute.grid.try_copy_masked(evaluator.get_evaluated(0), selection);
@ -697,9 +697,9 @@ bool try_capture_field_on_geometry(GeometryComponent &component,
const auto &volume_component = static_cast<const bke::VolumeComponent &>(component);
main_grid = volume_component.get()->active_grid;
}
const volume::GGrid domain_mask = {attributes.domain_grid_mask(domain, main_grid)};
const volume::GVGrid domain_mask = {attributes.domain_grid_mask(domain, main_grid)};
fn::VolumeFieldEvaluator evaluator{field_context, domain_mask};
volume::GMutableGrid grid = volume::GMutableGrid::create(type);
volume::GVMutableGrid grid = volume::GVMutableGrid::create(type);
evaluator.add_with_destination(validator.validate_field_if_necessary(field), grid);
evaluator.set_selection(selection);
evaluator.evaluate();

View File

@ -160,7 +160,8 @@ static void volume_foreach_path(ID *id, BPathForeachPathData *bpath_data)
Volume *volume = reinterpret_cast<Volume *>(id);
if (volume->packedfile != nullptr &&
(bpath_data->flag & BKE_BPATH_FOREACH_PATH_SKIP_PACKED) != 0) {
(bpath_data->flag & BKE_BPATH_FOREACH_PATH_SKIP_PACKED) != 0)
{
return;
}
@ -564,7 +565,7 @@ bool BKE_volume_min_max(const Volume *volume, float3 &r_min, float3 &r_max)
if (BKE_volume_load(const_cast<Volume *>(volume), G.main)) {
for (const int i : IndexRange(BKE_volume_num_grids(volume))) {
const VolumeGrid *volume_grid = BKE_volume_grid_get_for_read(volume, i);
blender::volume::GGrid grid = BKE_volume_grid_openvdb_for_read(volume, volume_grid);
blender::volume::GVGrid grid = BKE_volume_grid_openvdb_for_read(volume, volume_grid);
float3 grid_min;
float3 grid_max;
if (BKE_volume_grid_bounds(grid, grid_min, grid_max)) {
@ -987,7 +988,7 @@ void BKE_volume_grid_clear_tree(openvdb::GridBase &grid)
}
void BKE_volume_grid_clear_tree(Volume &volume, VolumeGrid &volume_grid)
{
blender::volume::GMutableGrid grid = BKE_volume_grid_openvdb_for_write(
blender::volume::GVMutableGrid grid = BKE_volume_grid_openvdb_for_write(
&volume, &volume_grid, false);
BKE_volume_grid_clear_tree(*grid.grid_);
}
@ -1165,7 +1166,7 @@ VolumeGrid *BKE_volume_grid_add(Volume *volume, const char *name, VolumeGridType
#ifdef WITH_OPENVDB
VolumeGrid *BKE_volume_grid_add_vdb(Volume &volume,
const StringRef name,
blender::volume::GMutableGrid vdb_grid)
blender::volume::GVMutableGrid vdb_grid)
{
VolumeGridVector &grids = *volume.runtime.grids;
BLI_assert(BKE_volume_grid_find_for_read(&volume, name.data()) == nullptr);
@ -1234,7 +1235,7 @@ float BKE_volume_simplify_factor(const Depsgraph *depsgraph)
#ifdef WITH_OPENVDB
bool BKE_volume_grid_bounds(blender::volume::GGrid grid, float3 &r_min, float3 &r_max)
bool BKE_volume_grid_bounds(blender::volume::GVGrid grid, float3 &r_min, float3 &r_max)
{
/* TODO: we can get this from grid metadata in some cases? */
openvdb::CoordBBox coordbbox;
@ -1250,31 +1251,31 @@ bool BKE_volume_grid_bounds(blender::volume::GGrid grid, float3 &r_min, float3 &
return true;
}
blender::volume::GGrid BKE_volume_grid_shallow_transform(blender::volume::GGrid grid,
const blender::float4x4 &transform)
blender::volume::GVGrid BKE_volume_grid_shallow_transform(blender::volume::GVGrid grid,
const blender::float4x4 &transform)
{
openvdb::math::Transform::Ptr grid_transform = grid.grid_->transform().copy();
grid_transform->postMult(openvdb::Mat4d((float *)transform.ptr()));
/* Create a transformed grid. The underlying tree is shared. */
return blender::volume::GGrid{grid.grid_->copyGridReplacingTransform(grid_transform)};
return blender::volume::GVGrid{grid.grid_->copyGridReplacingTransform(grid_transform)};
}
blender::volume::GGrid BKE_volume_grid_openvdb_for_metadata(const VolumeGrid *grid)
blender::volume::GVGrid BKE_volume_grid_openvdb_for_metadata(const VolumeGrid *grid)
{
return blender::volume::GGrid{grid->grid()};
return blender::volume::GVGrid{grid->grid()};
}
blender::volume::GGrid BKE_volume_grid_openvdb_for_read(const Volume *volume,
const VolumeGrid *grid)
blender::volume::GVGrid BKE_volume_grid_openvdb_for_read(const Volume *volume,
const VolumeGrid *grid)
{
BKE_volume_grid_load(volume, grid);
return blender::volume::GGrid{grid->grid()};
return blender::volume::GVGrid{grid->grid()};
}
blender::volume::GMutableGrid BKE_volume_grid_openvdb_for_write(const Volume *volume,
VolumeGrid *grid,
const bool clear)
blender::volume::GVMutableGrid BKE_volume_grid_openvdb_for_write(const Volume *volume,
VolumeGrid *grid,
const bool clear)
{
const char *volume_name = volume->id.name + 2;
if (clear) {
@ -1285,7 +1286,7 @@ blender::volume::GMutableGrid BKE_volume_grid_openvdb_for_write(const Volume *vo
grid->duplicate_reference(volume_name, grids.filepath);
}
return blender::volume::GMutableGrid{grid->grid()};
return blender::volume::GVMutableGrid{grid->grid()};
}
/* Changing the resolution of a grid. */
@ -1316,14 +1317,14 @@ struct CreateGridWithChangedResolutionOp {
const openvdb::GridBase &grid;
const float resolution_factor;
template<typename GridType> blender::volume::GMutableGrid operator()()
template<typename GridType> blender::volume::GVMutableGrid operator()()
{
return {create_grid_with_changed_resolution(static_cast<const GridType &>(grid),
resolution_factor)};
}
};
blender::volume::GMutableGrid BKE_volume_grid_create_with_changed_resolution(
blender::volume::GVMutableGrid BKE_volume_grid_create_with_changed_resolution(
const VolumeGridType grid_type,
const openvdb::GridBase &old_grid,
const float resolution_factor)

View File

@ -378,7 +378,7 @@ static AttributeAccessorFunctions get_volume_accessor_functions()
return grids.domain_size(domain);
};
fn.domain_grid_mask =
[](const void *owner, const eAttrDomain /*domain*/, const int main_grid) -> volume::GGrid {
[](const void *owner, const eAttrDomain /*domain*/, const int main_grid) -> volume::GVGrid {
if (owner == nullptr || main_grid < 0) {
return {nullptr};
}

View File

@ -0,0 +1,662 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bli
*
* A generic virtual array is the same as a virtual array, except for the fact that the data type
* is only known at runtime.
*/
#include "BLI_timeit.hh"
#include "BLI_virtual_grid.hh"
namespace blender {
/* -------------------------------------------------------------------- */
/** \name #GVGridImpl and #GVMutableGridImpl.
* \{ */
class GVGrid;
class GVGridImpl;
class GVMutableGrid;
class GVMutableGridImpl;
/* A generically typed version of #VGridImpl. */
class GVGridImpl {
protected:
const CPPType *type_;
public:
GVGridImpl(const CPPType &type);
virtual ~GVGridImpl() = default;
const CPPType &type() const;
// virtual void get(int64_t index, void *r_value) const;
// virtual void get_to_uninitialized(int64_t index, void *r_value) const = 0;
virtual CommonVGridInfo common_info() const;
// virtual void materialize(const IndexMask &mask, void *dst) const;
// virtual void materialize_to_uninitialized(const IndexMask &mask, void *dst) const;
// virtual void materialize_compressed(const IndexMask &mask, void *dst) const;
// virtual void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const;
virtual bool try_assign_VGrid(void *vgrid) const;
};
/* A generic version of #VMutableGridImpl. */
class GVMutableGridImpl : public GVGridImpl {
public:
GVMutableGridImpl(const CPPType &type) : GVGridImpl(type) {}
// virtual void set_by_copy(int64_t index, const void *value);
// virtual void set_by_relocate(int64_t index, void *value);
// virtual void set_by_move(int64_t index, void *value) = 0;
// virtual void set_all(const void *src);
virtual bool try_assign_VMutableGrid(void *vgrid) const;
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGrid and #GVMutableGrid
* \{ */
namespace detail {
struct GVGridAnyExtraInfo {
const GVGridImpl *(*get_vgrid)(const void *buffer) =
[](const void * /*buffer*/) -> const GVGridImpl * { return nullptr; };
template<typename StorageT> static constexpr GVGridAnyExtraInfo get();
};
} // namespace detail
class GVMutableGrid;
/**
* Utility class to reduce code duplication between #GVGrid and #GVMutableGrid.
* It pretty much follows #VGridCommon. Don't use this class outside of this header.
*/
class GVGridCommon {
public:
#ifdef WITH_OPENVDB
using GridType = openvdb::GridBase;
#endif
protected:
/**
* See #VGridCommon for more information. The inline buffer is a bit larger here, because
* generic virtual array implementations often require a bit more space than typed ones.
*/
using Storage = Any<detail::GVGridAnyExtraInfo, 40, 8>;
const GVGridImpl *impl_ = nullptr;
Storage storage_;
protected:
GVGridCommon() = default;
GVGridCommon(const GVGridCommon &other);
GVGridCommon(GVGridCommon &&other) noexcept;
GVGridCommon(const GVGridImpl *impl);
GVGridCommon(std::shared_ptr<const GVGridImpl> impl);
~GVGridCommon();
template<typename ImplT, typename... Args> void emplace(Args &&...args);
void copy_from(const GVGridCommon &other);
void move_from(GVGridCommon &&other) noexcept;
const GVGridImpl *impl_from_storage() const;
public:
const CPPType &type() const;
operator bool() const;
template<typename T> bool try_assign_VGrid(VGrid<T> &vgrid) const;
bool may_have_ownership() const;
// void materialize(void *dst) const;
// void materialize(const IndexMask &mask, void *dst) const;
// void materialize_to_uninitialized(void *dst) const;
// void materialize_to_uninitialized(const IndexMask &mask, void *dst) const;
// void materialize_compressed(const IndexMask &mask, void *dst) const;
// void materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const;
CommonVGridInfo common_info() const;
/**
* Returns true when the virtual grid is stored as a grid internally.
*/
bool is_grid() const;
/**
* Returns the internally used grid of the virtual grid. This invokes undefined behavior if the
* virtual grid is not stored as a grid internally.
*/
#ifdef WITH_OPENVDB
GridType *get_internal_grid() const;
#endif
/**
* Returns true when the virtual array returns the same value for every index.
*/
bool is_single() const;
/**
* Copies the value that is used for every element into `r_value`, which is expected to point to
* initialized memory. This invokes undefined behavior if the virtual array would not return the
* same value for every index.
*/
void get_internal_single(void *r_value) const;
/**
* Same as `get_internal_single`, but `r_value` points to initialized memory.
*/
void get_internal_single_to_uninitialized(void *r_value) const;
// void get(int64_t index, void *r_value) const;
///**
// * Returns a copy of the value at the given index. Usually a typed virtual array should
// * be used instead, but sometimes this is simpler when only a few indices are needed.
// */
// template<typename T> T get(int64_t index) const;
// void get_to_uninitialized(int64_t index, void *r_value) const;
};
/** Generic version of #VGrid. */
class GVGrid : public GVGridCommon {
private:
friend GVMutableGrid;
public:
#ifdef WITH_OPENVDB
using GridType = GVGridCommon::GridType;
#endif
public:
GVGrid() = default;
GVGrid(const GVGrid &other);
GVGrid(GVGrid &&other) noexcept;
GVGrid(const GVGridImpl *impl);
GVGrid(std::shared_ptr<const GVGridImpl> impl);
GVGrid(vgrid_tag::grid /* tag */, const GridType &grid);
GVGrid(vgrid_tag::single_ref /* tag */, const CPPType &type, const void *value);
GVGrid(vgrid_tag::single /* tag */, const CPPType &type, const void *value);
template<typename T> GVGrid(const VGrid<T> &vgrid);
template<typename T> VGrid<T> typed() const;
template<typename ImplT, typename... Args> static GVGrid For(Args &&...args);
static GVGrid ForSingle(const CPPType &type, const void *value);
static GVGrid ForSingleRef(const CPPType &type, const void *value);
static GVGrid ForSingleDefault(const CPPType &type);
static GVGrid ForGrid(const GridType &grid);
static GVGrid ForEmpty(const CPPType &type);
GVGrid &operator=(const GVGrid &other);
GVGrid &operator=(GVGrid &&other) noexcept;
const GVGridImpl *get_implementation() const
{
return impl_;
}
};
/** Generic version of #VMutableGrid. */
class GVMutableGrid : public GVGridCommon {
public:
#ifdef WITH_OPENVDB
using GridType = GVGridCommon::GridType;
#endif
public:
GVMutableGrid() = default;
GVMutableGrid(const GVMutableGrid &other);
GVMutableGrid(GVMutableGrid &&other) noexcept;
GVMutableGrid(GVMutableGridImpl *impl);
GVMutableGrid(std::shared_ptr<GVMutableGridImpl> impl);
template<typename T> GVMutableGrid(const VMutableGrid<T> &vgrid);
template<typename T> VMutableGrid<T> typed() const;
template<typename ImplT, typename... Args> static GVMutableGrid For(Args &&...args);
static GVMutableGrid ForGrid(GridType &grid);
operator GVGrid() const &;
operator GVGrid() &&noexcept;
GVMutableGrid &operator=(const GVMutableGrid &other);
GVMutableGrid &operator=(GVMutableGrid &&other) noexcept;
GridType *get_internal_grid() const;
template<typename T> bool try_assign_VMutableGrid(VMutableGrid<T> &vgrid) const;
// void set_by_copy(int64_t index, const void *value);
// void set_by_move(int64_t index, void *value);
// void set_by_relocate(int64_t index, void *value);
// void fill(const void *value);
///**
// * Copy the values from the source buffer to all elements in the virtual array.
// */
// void set_all(const void *src);
GVMutableGridImpl *get_implementation() const;
private:
GVMutableGridImpl *get_impl() const;
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Conversions between generic and typed virtual arrays.
* \{ */
/* Used to convert a typed virtual grid into a generic one. */
template<typename T> class GVGridImpl_For_VGrid : public GVGridImpl {
protected:
VGrid<T> vgrid_;
public:
GVGridImpl_For_VGrid(VGrid<T> vgrid) : GVGridImpl(CPPType::get<T>()), vgrid_(std::move(vgrid)) {}
protected:
bool try_assign_VGrid(void *vgrid) const override
{
*(VGrid<T> *)vgrid = vgrid_;
return true;
}
CommonVGridInfo common_info() const override
{
return vgrid_.common_info();
}
};
/* Used to convert any generic virtual grid into a typed one. */
template<typename T> class VGridImpl_For_GVGrid : public VGridImpl<T> {
protected:
GVGrid vgrid_;
public:
VGridImpl_For_GVGrid(GVGrid vgrid) : vgrid_(std::move(vgrid))
{
BLI_assert(vgrid_);
BLI_assert(vgrid_.type().template is<T>());
}
protected:
CommonVGridInfo common_info() const override
{
return vgrid_.common_info();
}
bool try_assign_GVGrid(GVGrid &vgrid) const override
{
vgrid = vgrid_;
return true;
}
};
/* Used to convert any typed virtual mutable grid into a generic one. */
template<typename T> class GVMutableGridImpl_For_VMutableGrid : public GVMutableGridImpl {
protected:
VMutableGrid<T> vgrid_;
public:
GVMutableGridImpl_For_VMutableGrid(VMutableGrid<T> vgrid)
: GVMutableGridImpl(CPPType::get<T>()), vgrid_(std::move(vgrid))
{
}
protected:
CommonVGridInfo common_info() const override
{
return vgrid_.common_info();
}
bool try_assign_VGrid(void *vgrid) const override
{
*(VGrid<T> *)vgrid = vgrid_;
return true;
}
bool try_assign_VMutableGrid(void *vgrid) const override
{
*(VMutableGrid<T> *)vgrid = vgrid_;
return true;
}
};
/* Used to convert an generic mutable virtual grid into a typed one. */
template<typename T> class VMutableGridImpl_For_GVMutableGrid : public VMutableGridImpl<T> {
protected:
GVMutableGrid vgrid_;
public:
VMutableGridImpl_For_GVMutableGrid(GVMutableGrid vgrid) : vgrid_(vgrid)
{
BLI_assert(vgrid_);
BLI_assert(vgrid_.type().template is<T>());
}
private:
CommonVGridInfo common_info() const override
{
return vgrid_.common_info();
}
bool try_assign_GVGrid(GVGrid &vgrid) const override
{
vgrid = vgrid_;
return true;
}
bool try_assign_GVMutableGrid(GVMutableGrid &vgrid) const override
{
vgrid = vgrid_;
return true;
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGridImpl_For_GSpan.
* \{ */
class GVGridImpl_For_Grid : public GVMutableGridImpl {
public:
#ifdef WITH_OPENVDB
using GridType = openvdb::GridBase;
#endif
protected:
const GridType *grid_ = nullptr;
public:
GVGridImpl_For_Grid(const GridType &grid)
: GVMutableGridImpl(volume::grid_attribute_type(grid)), grid_(&grid)
{
}
protected:
GVGridImpl_For_Grid(const CPPType &type) : GVMutableGridImpl(type) {}
public:
CommonVGridInfo common_info() const override;
};
class GVGridImpl_For_Grid_final final : public GVGridImpl_For_Grid {
public:
using GVGridImpl_For_Grid::GVGridImpl_For_Grid;
private:
CommonVGridInfo common_info() const override;
};
template<> inline constexpr bool is_trivial_extended_v<GVGridImpl_For_Grid> = true;
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGridImpl_For_SingleValueRef.
* \{ */
class GVGridImpl_For_SingleValueRef : public GVGridImpl {
protected:
const void *value_ = nullptr;
public:
GVGridImpl_For_SingleValueRef(const CPPType &type, const void *value)
: GVGridImpl(type), value_(value)
{
}
protected:
GVGridImpl_For_SingleValueRef(const CPPType &type) : GVGridImpl(type) {}
CommonVGridInfo common_info() const override;
};
class GVGridImpl_For_SingleValueRef_final final : public GVGridImpl_For_SingleValueRef {
public:
using GVGridImpl_For_SingleValueRef::GVGridImpl_For_SingleValueRef;
private:
CommonVGridInfo common_info() const override;
};
template<> inline constexpr bool is_trivial_extended_v<GVGridImpl_For_SingleValueRef_final> = true;
/** \} */
/* -------------------------------------------------------------------- */
/** \name Inline methods for #GVGridImpl.
* \{ */
inline GVGridImpl::GVGridImpl(const CPPType &type) : type_(&type) {}
inline const CPPType &GVGridImpl::type() const
{
return *type_;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Inline methods for #GVMutableGridImpl.
* \{ */
template<typename T>
inline bool GVMutableGrid::try_assign_VMutableGrid(VMutableGrid<T> &vgrid) const
{
BLI_assert(impl_->type().is<T>());
return this->get_impl()->try_assign_VMutableGrid(&vgrid);
}
inline GVMutableGridImpl *GVMutableGrid::get_impl() const
{
return const_cast<GVMutableGridImpl *>(static_cast<const GVMutableGridImpl *>(impl_));
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Inline methods for #GVGridCommon.
* \{ */
template<typename ImplT, typename... Args> inline void GVGridCommon::emplace(Args &&...args)
{
static_assert(std::is_base_of_v<GVGridImpl, ImplT>);
if constexpr (std::is_copy_constructible_v<ImplT> && Storage::template is_inline_v<ImplT>) {
impl_ = &storage_.template emplace<ImplT>(std::forward<Args>(args)...);
}
else {
std::shared_ptr<const GVGridImpl> ptr = std::make_shared<ImplT>(std::forward<Args>(args)...);
impl_ = &*ptr;
storage_ = std::move(ptr);
}
}
template<typename T> inline bool GVGridCommon::try_assign_VGrid(VGrid<T> &vgrid) const
{
BLI_assert(impl_->type().is<T>());
return impl_->try_assign_VGrid(&vgrid);
}
inline const CPPType &GVGridCommon::type() const
{
return impl_->type();
}
inline GVGridCommon::operator bool() const
{
return impl_ != nullptr;
}
inline CommonVGridInfo GVGridCommon::common_info() const
{
return impl_->common_info();
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Inline methods for #GVGrid.
* \{ */
inline GVGrid::GVGrid(vgrid_tag::grid /* tag */, const GridType &grid)
{
/* Use const-cast because the underlying virtual grid implementation is shared between const
* and non const data. */
GridType &mutable_grid = const_cast<GridType &>(grid);
this->emplace<GVGridImpl_For_Grid_final>(mutable_grid);
}
inline GVGrid::GVGrid(vgrid_tag::single_ref /* tag */, const CPPType &type, const void *value)
{
this->emplace<GVGridImpl_For_SingleValueRef_final>(type, value);
}
namespace detail {
template<typename StorageT> constexpr GVGridAnyExtraInfo GVGridAnyExtraInfo::get()
{
static_assert(std::is_base_of_v<GVGridImpl, StorageT> ||
is_same_any_v<StorageT, const GVGridImpl *, std::shared_ptr<const GVGridImpl>>);
if constexpr (std::is_base_of_v<GVGridImpl, StorageT>) {
return {[](const void *buffer) {
return static_cast<const GVGridImpl *>((const StorageT *)buffer);
}};
}
else if constexpr (std::is_same_v<StorageT, const GVGridImpl *>) {
return {[](const void *buffer) { return *(const StorageT *)buffer; }};
}
else if constexpr (std::is_same_v<StorageT, std::shared_ptr<const GVGridImpl>>) {
return {[](const void *buffer) { return ((const StorageT *)buffer)->get(); }};
}
else {
BLI_assert_unreachable();
return {};
}
}
} // namespace detail
template<typename ImplT, typename... Args> inline GVGrid GVGrid::For(Args &&...args)
{
static_assert(std::is_base_of_v<GVGridImpl, ImplT>);
GVGrid vgrid;
vgrid.template emplace<ImplT>(std::forward<Args>(args)...);
return vgrid;
}
template<typename T> inline GVGrid::GVGrid(const VGrid<T> &vgrid)
{
if (!vgrid) {
return;
}
const CommonVGridInfo info = vgrid.common_info();
if (info.type == CommonVGridInfo::Type::Single) {
*this = GVGrid::ForSingle(CPPType::get<T>(), vgrid.size(), info.data);
return;
}
/* Need to check for ownership, because otherwise the referenced data can be destructed when
* #this is destructed. */
if (info.type == CommonVGridInfo::Type::Span && !info.may_have_ownership) {
*this = GVGrid::ForSpan(GSpan(CPPType::get<T>(), info.data, vgrid.size()));
return;
}
if (vgrid.try_assign_GVGrid(*this)) {
return;
}
*this = GVGrid::For<GVGridImpl_For_VGrid<T>>(vgrid);
}
template<typename T> inline VGrid<T> GVGrid::typed() const
{
using GridType = typename VGrid<T>::GridType;
if (!*this) {
return {};
}
BLI_assert(impl_->type().is<T>());
const CommonVGridInfo info = this->common_info();
if (info.type == CommonVGridInfo::Type::Single) {
return VGrid<T>::ForSingle(*static_cast<const T *>(info.data));
}
/* Need to check for ownership, because otherwise the referenced data can be destructed when
* #this is destructed. */
if (info.type == CommonVGridInfo::Type::Grid && !info.may_have_ownership) {
return VGrid<T>::ForGrid(static_cast<const GridType *>(info.data));
}
VGrid<T> vgrid;
if (this->try_assign_VGrid(vgrid)) {
return vgrid;
}
return VGrid<T>::template For<VGridImpl_For_GVGrid<T>>(*this);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Inline methods for #GVMutableGrid.
* \{ */
template<typename ImplT, typename... Args> inline GVMutableGrid GVMutableGrid::For(Args &&...args)
{
static_assert(std::is_base_of_v<GVMutableGridImpl, ImplT>);
GVMutableGrid vgrid;
vgrid.emplace<ImplT>(std::forward<Args>(args)...);
return vgrid;
}
template<typename T> inline GVMutableGrid::GVMutableGrid(const VMutableGrid<T> &vgrid)
{
if (!vgrid) {
return;
}
const CommonVGridInfo info = vgrid.common_info();
if (info.type == CommonVGridInfo::Type::Grid && !info.may_have_ownership) {
*this = GVMutableGrid::ForGrid(static_cast<GridType *>(const_cast<void *>(info.data)));
return;
}
if (vgrid.try_assign_GVMutableGrid(*this)) {
return;
}
*this = GVMutableGrid::For<GVMutableGridImpl_For_VMutableGrid<T>>(vgrid);
}
template<typename T> inline VMutableGrid<T> GVMutableGrid::typed() const
{
if (!*this) {
return {};
}
BLI_assert(this->type().is<T>());
const CommonVGridInfo info = this->common_info();
if (info.type == CommonVGridInfo::Type::Grid && !info.may_have_ownership) {
return VMutableGrid<T>::ForGrid(const_cast<GridType *>(static_cast<const T *>(info.data)));
}
VMutableGrid<T> vgrid;
if (this->try_assign_VMutableGrid(vgrid)) {
return vgrid;
}
return VMutableGrid<T>::template For<VMutableGridImpl_For_GVMutableGrid<T>>(*this);
}
/** \} */
} // namespace blender

View File

@ -276,164 +276,13 @@ using SupportedGridTypes = openvdb::TypeList<openvdb::BoolGrid,
openvdb::TopologyGrid>;
} // namespace grid_types
const CPPType &grid_attribute_type(const openvdb::GridBase &grid);
template<typename GridType> const CPPType &grid_attribute_type(const GridType &grid);
openvdb::GridBase *make_grid_for_attribute_type(const CPPType &type);
#endif
class GGrid;
class GMutableGrid;
template<typename T> class Grid;
template<typename T> class MutableGrid;
/* -------------------------------------------------------------------- */
/** \name Grid pointer wrappers
* \note Using wrappers avoids checking for WITH_OPENVDB everywhere.
* \{ */
/* Generic grid reference. */
class GGrid {
public:
#ifdef WITH_OPENVDB
using GridPtr = std::shared_ptr<const openvdb::GridBase>;
GridPtr grid_ = nullptr;
#endif
int64_t voxel_count() const;
bool is_empty() const;
operator bool() const;
const CPPType *value_type() const;
template<typename T> Grid<T> typed() const;
};
/* Generic grid reference. */
class GMutableGrid {
public:
#ifdef WITH_OPENVDB
using GridPtr = std::shared_ptr<openvdb::GridBase>;
GridPtr grid_ = nullptr;
#endif
operator GGrid() const
{
#ifdef WITH_OPENVDB
return GGrid{grid_};
#else
return GGrid{};
#endif
}
/* Create an empty grid with a background value. */
static GMutableGrid create(const CPPType &type, const void *background_value);
/* Create an empty grid with the type default as background value. */
static GMutableGrid create(const CPPType &type);
/* Create a grid with the active volume mask voxels. */
static GMutableGrid create(const CPPType &type,
const GGrid &mask,
const void *inactive_value,
const void *active_value);
bool try_copy_masked(const GGrid &other, const GGrid &selection);
int64_t voxel_count() const;
bool is_empty() const;
operator bool() const;
const CPPType *value_type() const;
template<typename T> MutableGrid<T> typed() const;
};
template<typename T> class Grid {
public:
#ifdef WITH_OPENVDB
using GridType = grid_types::AttributeGrid<T>;
using TreeType = typename GridType::TreeType;
using GridPtr = typename GridType::Ptr;
using GridConstPtr = typename GridType::ConstPtr;
using ValueType = typename GridType::ValueType;
using Converter = grid_types::Converter<GridType>;
GridConstPtr grid_ = nullptr;
#endif
operator GGrid();
operator GGrid const() const;
int64_t voxel_count() const;
bool is_empty() const;
operator bool() const;
const CPPType *value_type() const;
};
template<typename T> class MutableGrid {
public:
#ifdef WITH_OPENVDB
using GridType = grid_types::AttributeGrid<T>;
using TreeType = typename GridType::TreeType;
using GridPtr = typename GridType::Ptr;
using GridConstPtr = typename GridType::ConstPtr;
using ValueType = typename GridType::ValueType;
using Converter = grid_types::Converter<GridType>;
GridPtr grid_ = nullptr;
#endif
/* Create an empty grid with a background value. */
static MutableGrid<T> create(const T &background_value);
/* Create an empty grid with the type default as background value. */
static MutableGrid<T> create();
/* Create a grid with the active volume mask voxels. */
static MutableGrid<T> create(const GGrid &mask, const T &inactive_value, const T &active_value);
operator GGrid();
operator GGrid const() const;
operator GMutableGrid();
operator GMutableGrid const() const;
int64_t voxel_count() const;
bool is_empty() const;
operator bool() const;
const CPPType *value_type() const;
};
template<typename T> MutableGrid<T>::operator GGrid()
{
#ifdef WITH_OPENVDB
return GGrid{grid_};
#else
return GGrid{};
#endif
}
template<typename T> MutableGrid<T>::operator GGrid const() const
{
#ifdef WITH_OPENVDB
return GGrid{grid_};
#else
return GGrid{};
#endif
}
template<typename T> MutableGrid<T>::operator GMutableGrid()
{
#ifdef WITH_OPENVDB
return GMutableGrid{grid_};
#else
return GMutableGrid{};
#endif
}
template<typename T> MutableGrid<T>::operator GMutableGrid const() const
{
#ifdef WITH_OPENVDB
return GMutableGrid{grid_};
#else
return GMutableGrid{};
#endif
}
/** \} */
} // namespace blender::volume

View File

@ -27,44 +27,6 @@ namespace blender::volume {
#ifdef WITH_OPENVDB
template<typename T> Grid<T> GGrid::typed() const
{
# ifdef WITH_OPENVDB
using GridType = typename Grid<T>::GridType;
using GridPtr = typename Grid<T>::GridConstPtr;
if (!grid_) {
return {};
}
GridPtr typed_grid = openvdb::GridBase::grid<GridType>(grid_);
if (!typed_grid) {
return {};
}
return {typed_grid};
# else
return {};
# endif
}
template<typename T> MutableGrid<T> GMutableGrid::typed() const
{
# ifdef WITH_OPENVDB
using TypedGrid = typename MutableGrid<T>::GridType;
using TypedGridPtr = typename MutableGrid<T>::GridPtr;
if (!grid_) {
return {};
}
TypedGridPtr typed_grid = openvdb::GridBase::grid<TypedGrid>(grid_);
if (!typed_grid) {
return {};
}
return {typed_grid};
# else
return {};
# endif
}
namespace detail {
template<typename Func> struct FilterVoidOp {
@ -93,155 +55,35 @@ template<typename Func> void field_to_static_type(const CPPType &type, Func func
}
/* Helper function to evaluate a function with a static field type. */
template<typename Func>
void grid_to_static_type(const std::shared_ptr<openvdb::GridBase> &grid, Func func)
template<typename Func> void grid_to_static_type(openvdb::GridBase &grid, Func func)
{
grid->apply<grid_types::SupportedGridTypes>(func);
grid.apply<grid_types::SupportedGridTypes>(func);
}
/* Helper function to evaluate a function with a static field type. */
template<typename Func>
void grid_to_static_type(const std::shared_ptr<const openvdb::GridBase> &grid, Func func)
template<typename Func> void grid_to_static_type(const openvdb::GridBase &grid, Func func)
{
grid->apply<grid_types::SupportedGridTypes>(func);
grid.apply<grid_types::SupportedGridTypes>(func);
}
template<typename T> MutableGrid<T> MutableGrid<T>::create(const T &background_value)
const CPPType &grid_attribute_type(const openvdb::GridBase &grid)
{
typename GridType::Ptr grid = GridType::create(
Converter::single_value_to_grid(background_value));
return MutableGrid<T>{std::move(grid)};
const CPPType *type = &CPPType::get<float>();
volume::grid_to_static_type(grid,
[&](auto &typed_grid) { type = &grid_attribute_type(typed_grid); });
return *type;
}
template<typename T> MutableGrid<T> MutableGrid<T>::create()
template<typename GridType> const CPPType &grid_attribute_type(const GridType & /*grid*/)
{
const T &value = *CPPType::get<T>().default_value_;
typename GridType::Ptr grid = GridType::create(Converter::single_value_to_grid(value));
return MutableGrid<T>{std::move(grid)};
}
using Converter = grid_types::Converter<GridType>;
using AttributeValueType = typename Converter::AttributeValueType;
template<typename T>
MutableGrid<T> MutableGrid<T>::create(const GGrid &mask,
const T &inactive_value,
const T &active_value)
{
if (mask.is_empty()) {
typename GridType::Ptr grid = GridType::create();
return MutableGrid<T>{std::move(grid)};
}
const typename TreeType::Ptr tree = nullptr;
volume::grid_to_static_type(mask.grid_, [&](auto &typed_mask) {
tree = typename TreeType::Ptr(new TreeType(typed_mask.grid_->tree(),
Converter::single_value_to_grid(inactive_value),
Converter::single_value_to_grid(active_value),
openvdb::TopologyCopy{}));
});
typename GridType::Ptr grid(new GridType(tree));
return MutableGrid<T>{std::move(grid)};
}
template<typename T> int64_t MutableGrid<T>::voxel_count() const
{
return grid_ ? grid_->activeVoxelCount() : 0;
}
template<typename T> bool MutableGrid<T>::is_empty() const
{
return grid_ ? grid_->empty() : true;
}
template<typename T> MutableGrid<T>::operator bool() const
{
return grid_ != nullptr;
}
template<typename T> const CPPType *MutableGrid<T>::value_type() const
{
return &CPPType::get<T>();
}
template<typename T> const CPPType *Grid<T>::value_type() const
{
return &CPPType::get<T>();
}
template<typename T> Grid<T>::operator GGrid()
{
return {grid_};
}
template<typename T> Grid<T>::operator GGrid const() const
{
return {grid_};
}
template<typename T> int64_t Grid<T>::voxel_count() const
{
return grid_ ? grid_->activeVoxelCount() : 0;
}
template<typename T> bool Grid<T>::is_empty() const
{
return grid_ ? grid_->empty() : true;
}
template<typename T> Grid<T>::operator bool() const
{
return grid_ != nullptr;
return CPPType::get<AttributeValueType>();
}
#else
template<typename T>
Grid<T> Grid<T>::create(ResourceScope & /*scope*/, const T & /*background_value*/)
{
return Grid<T>{};
}
template<typename T> Grid<T> Grid<T>::create(ResourceScope & /*scope*/)
{
return Grid<T>{};
}
template<typename T>
Grid<T> Grid<T>::create(ResourceScope & /*scope*/,
const GridMask & /*mask*/,
const T & /*inactive_value*/,
const T & /*active_value*/)
{
return Grid<T>{};
}
template<typename T> int64_t Grid<T>::voxel_count() const
{
return 0;
}
template<typename T> bool Grid<T>::is_empty() const
{
return true;
}
template<typename T> Grid<T>::operator bool() const
{
return false;
}
template<typename T> const CPPType *Grid<T>::value_type() const
{
return nullptr;
}
template<typename T> Grid<T>::operator GGrid()
{
return {};
}
template<typename T> Grid<T>::operator GGrid const() const
{
return {};
}
#endif
} // namespace blender::volume

View File

@ -75,6 +75,7 @@ set(SRC
intern/fnmatch.c
intern/generic_vector_array.cc
intern/generic_virtual_array.cc
intern/generic_virtual_grid.cc
intern/generic_virtual_vector_array.cc
intern/gsqueue.c
intern/hash_md5.c
@ -241,6 +242,7 @@ set(SRC
BLI_generic_value_map.hh
BLI_generic_vector_array.hh
BLI_generic_virtual_array.hh
BLI_generic_virtual_grid.hh
BLI_generic_virtual_vector_array.hh
BLI_ghash.h
BLI_gsqueue.h

View File

@ -0,0 +1,464 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_generic_virtual_grid.hh"
#include "BLI_volume_openvdb.hh"
namespace blender {
/* -------------------------------------------------------------------- */
/** \name #GVGridImpl
* \{ */
// void GVGridImpl::materialize(const IndexMask &mask, void *dst) const
//{
// mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
// void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
// this->get(i, elem_dst);
// });
// }
//
// void GVGridImpl::materialize_to_uninitialized(const IndexMask &mask, void *dst) const
//{
// mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
// void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
// this->get_to_uninitialized(i, elem_dst);
// });
// }
//
// void GVGridImpl::materialize_compressed(const IndexMask &mask, void *dst) const
//{
// mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
// void *elem_dst = POINTER_OFFSET(dst, type_->size() * pos);
// this->get(i, elem_dst);
// });
// }
//
// void GVGridImpl::materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const
//{
// mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
// void *elem_dst = POINTER_OFFSET(dst, type_->size() * pos);
// this->get_to_uninitialized(i, elem_dst);
// });
// }
//
// void GVGridImpl::get(const int64_t index, void *r_value) const
//{
// type_->destruct(r_value);
// this->get_to_uninitialized(index, r_value);
// }
CommonVGridInfo GVGridImpl::common_info() const
{
return {};
}
bool GVGridImpl::try_assign_VGrid(void * /*vgrid*/) const
{
return false;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVMutableGridImpl
* \{ */
// void GVMutableGridImpl::set_by_copy(const int64_t index, const void *value)
//{
// BUFFER_FOR_CPP_TYPE_VALUE(*type_, buffer);
// type_->copy_construct(value, buffer);
// this->set_by_move(index, buffer);
// type_->destruct(buffer);
// }
//
// void GVMutableGridImpl::set_by_relocate(const int64_t index, void *value)
//{
// this->set_by_move(index, value);
// type_->destruct(value);
// }
//
// void GVMutableGridImpl::set_all(const void *src)
//{
// const CommonVGridInfo info = this->common_info();
// if (info.type == CommonVGridInfo::Type::Span) {
// type_->copy_assign_n(src, const_cast<void *>(info.data));
// }
// else {
// for (int64_t i : IndexRange(size_)) {
// this->set_by_copy(i, POINTER_OFFSET(src, type_->size() * i));
// }
// }
// }
//
// void GVMutableGrid::fill(const void *value)
//{
// const CommonVGridInfo info = this->common_info();
// if (info.type == CommonVGridInfo::Type::Span) {
// this->type().fill_assign_n(value, const_cast<void *>(info.data), this->size());
// }
// else {
// for (int64_t i : IndexRange(this->size())) {
// this->set_by_copy(i, value);
// }
// }
// }
bool GVMutableGridImpl::try_assign_VMutableGrid(void * /*vgrid*/) const
{
return false;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGridImpl_For_GSpan
* \{ */
CommonVGridInfo GVGridImpl_For_Grid::common_info() const
{
return CommonVGridInfo{CommonVGridInfo::Type::Grid, true, grid_};
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGridImpl_For_SingleValueRef
* \{ */
/* Generic virtual array where each element has the same value. The value is not owned. */
CommonVGridInfo GVGridImpl_For_SingleValueRef::common_info() const
{
return CommonVGridInfo{CommonVGridInfo::Type::Single, true, value_};
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGridImpl_For_SingleValue
* \{ */
/* Same as GVGridImpl_For_SingleValueRef, but the value is owned. */
class GVGridImpl_For_SingleValue : public GVGridImpl_For_SingleValueRef, NonCopyable, NonMovable {
public:
GVGridImpl_For_SingleValue(const CPPType &type, const void *value)
: GVGridImpl_For_SingleValueRef(type)
{
value_ = MEM_mallocN_aligned(type.size(), type.alignment(), __func__);
type.copy_construct(value, (void *)value_);
}
~GVGridImpl_For_SingleValue() override
{
type_->destruct((void *)value_);
MEM_freeN((void *)value_);
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGridImpl_For_SmallTrivialSingleValue
* \{ */
/**
* Contains an inline buffer that can store a single value of a trivial type.
* This avoids the allocation that would be done by #GVGridImpl_For_SingleValue.
*/
template<int BufferSize> class GVGridImpl_For_SmallTrivialSingleValue : public GVGridImpl {
private:
AlignedBuffer<BufferSize, 8> buffer_;
public:
GVGridImpl_For_SmallTrivialSingleValue(const CPPType &type, const void *value) : GVGridImpl(type)
{
BLI_assert(type.is_trivial());
BLI_assert(type.alignment() <= 8);
BLI_assert(type.size() <= BufferSize);
type.copy_construct(value, &buffer_);
}
private:
CommonVGridInfo common_info() const override
{
return CommonVGridInfo{CommonVGridInfo::Type::Single, true, &buffer_};
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGridCommon
* \{ */
GVGridCommon::GVGridCommon(const GVGridCommon &other) : storage_(other.storage_)
{
impl_ = this->impl_from_storage();
}
GVGridCommon::GVGridCommon(GVGridCommon &&other) noexcept : storage_(std::move(other.storage_))
{
impl_ = this->impl_from_storage();
other.storage_.reset();
other.impl_ = nullptr;
}
GVGridCommon::GVGridCommon(const GVGridImpl *impl) : impl_(impl)
{
storage_ = impl_;
}
GVGridCommon::GVGridCommon(std::shared_ptr<const GVGridImpl> impl) : impl_(impl.get())
{
if (impl) {
storage_ = std::move(impl);
}
}
GVGridCommon::~GVGridCommon() = default;
// void GVGridCommon::materialize(void *dst) const
//{
// this->materialize(IndexMask(impl_->size()), dst);
// }
//
// void GVGridCommon::materialize(const IndexMask &mask, void *dst) const
//{
// impl_->materialize(mask, dst);
// }
//
// void GVGridCommon::materialize_to_uninitialized(void *dst) const
//{
// this->materialize_to_uninitialized(IndexMask(impl_->size()), dst);
// }
//
// void GVGridCommon::materialize_to_uninitialized(const IndexMask &mask, void *dst) const
//{
// BLI_assert(mask.min_array_size() <= impl_->size());
// impl_->materialize_to_uninitialized(mask, dst);
// }
//
// void GVGridCommon::materialize_compressed(const IndexMask &mask, void *dst) const
//{
// impl_->materialize_compressed(mask, dst);
// }
//
// void GVGridCommon::materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst)
// const
//{
// impl_->materialize_compressed_to_uninitialized(mask, dst);
// }
void GVGridCommon::copy_from(const GVGridCommon &other)
{
if (this == &other) {
return;
}
storage_ = other.storage_;
impl_ = this->impl_from_storage();
}
void GVGridCommon::move_from(GVGridCommon &&other) noexcept
{
if (this == &other) {
return;
}
storage_ = std::move(other.storage_);
impl_ = this->impl_from_storage();
other.storage_.reset();
other.impl_ = nullptr;
}
bool GVGridCommon::is_grid() const
{
const CommonVGridInfo info = impl_->common_info();
return info.type == CommonVGridInfo::Type::Grid;
}
#ifdef WITH_OPENVDB
GVGridCommon::GridType *GVGridCommon::get_internal_grid() const
{
BLI_assert(this->is_grid());
const CommonVGridInfo info = impl_->common_info();
return static_cast<GridType *>(const_cast<void *>(info.data));
}
#endif
bool GVGridCommon::is_single() const
{
const CommonVGridInfo info = impl_->common_info();
return info.type == CommonVGridInfo::Type::Single;
}
void GVGridCommon::get_internal_single(void *r_value) const
{
BLI_assert(this->is_single());
const CommonVGridInfo info = impl_->common_info();
this->type().copy_assign(info.data, r_value);
}
void GVGridCommon::get_internal_single_to_uninitialized(void *r_value) const
{
impl_->type().default_construct(r_value);
this->get_internal_single(r_value);
}
const GVGridImpl *GVGridCommon::impl_from_storage() const
{
if (!storage_.has_value()) {
return nullptr;
}
return storage_.extra_info().get_vgrid(storage_.get());
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVGrid
* \{ */
GVGrid::GVGrid(const GVGrid &other) = default;
GVGrid::GVGrid(GVGrid &&other) noexcept = default;
GVGrid::GVGrid(const GVGridImpl *impl) : GVGridCommon(impl) {}
GVGrid::GVGrid(std::shared_ptr<const GVGridImpl> impl) : GVGridCommon(std::move(impl)) {}
GVGrid::GVGrid(vgrid_tag::single /* tag */, const CPPType &type, const void *value)
{
if (type.is_trivial() && type.size() <= 16 && type.alignment() <= 8) {
this->emplace<GVGridImpl_For_SmallTrivialSingleValue<16>>(type, value);
}
else {
this->emplace<GVGridImpl_For_SingleValue>(type, value);
}
}
GVGrid GVGrid::ForSingle(const CPPType &type, const void *value)
{
return GVGrid(vgrid_tag::single{}, type, value);
}
GVGrid GVGrid::ForSingleRef(const CPPType &type, const void *value)
{
return GVGrid(vgrid_tag::single_ref{}, type, value);
}
GVGrid GVGrid::ForSingleDefault(const CPPType &type)
{
return GVGrid::ForSingleRef(type, type.default_value());
}
GVGrid GVGrid::ForGrid(const GridType &grid)
{
return GVGrid(vgrid_tag::grid{}, grid);
}
GVGrid GVGrid::ForEmpty(const CPPType &type)
{
openvdb::GridBase *grid = nullptr;
volume::field_to_static_type(type, [&](auto type_tag) {
using T = typename decltype(type_tag)::type;
using GridType = volume::grid_types::AttributeGrid<T>;
using Converter = volume::grid_types::Converter<GridType>;
const T &value = *static_cast<const T *>(type.default_value());
grid = new volume::grid_types::AttributeGrid<T>(Converter::single_value_to_grid(value));
});
BLI_assert(grid != nullptr);
return GVGrid::ForGrid(*grid);
}
GVGrid &GVGrid::operator=(const GVGrid &other)
{
this->copy_from(other);
return *this;
}
GVGrid &GVGrid::operator=(GVGrid &&other) noexcept
{
this->move_from(std::move(other));
return *this;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name #GVMutableGrid
* \{ */
GVMutableGrid::GVMutableGrid(const GVMutableGrid &other) = default;
GVMutableGrid::GVMutableGrid(GVMutableGrid &&other) noexcept = default;
GVMutableGrid::GVMutableGrid(GVMutableGridImpl *impl) : GVGridCommon(impl) {}
GVMutableGrid::GVMutableGrid(std::shared_ptr<GVMutableGridImpl> impl)
: GVGridCommon(std::move(impl))
{
}
GVMutableGrid GVMutableGrid::ForGrid(GridType &grid)
{
return GVMutableGrid::For<GVGridImpl_For_Grid_final>(grid);
}
GVMutableGrid::operator GVGrid() const &
{
GVGrid varray;
varray.copy_from(*this);
return varray;
}
GVMutableGrid::operator GVGrid() &&noexcept
{
GVGrid varray;
varray.move_from(std::move(*this));
return varray;
}
GVMutableGrid &GVMutableGrid::operator=(const GVMutableGrid &other)
{
this->copy_from(other);
return *this;
}
GVMutableGrid &GVMutableGrid::operator=(GVMutableGrid &&other) noexcept
{
this->move_from(std::move(other));
return *this;
}
GVMutableGridImpl *GVMutableGrid::get_implementation() const
{
return this->get_impl();
}
// void GVMutableGrid::set_all(const void *src)
//{
// this->get_impl()->set_all(src);
// }
GVMutableGrid::GridType *GVMutableGrid::get_internal_grid() const
{
BLI_assert(this->is_grid());
const CommonVGridInfo info = impl_->common_info();
return static_cast<GridType *>(const_cast<void *>(info.data));
}
/** \} */
CommonVGridInfo GVGridImpl_For_Grid_final::common_info() const
{
return CommonVGridInfo(CommonVGridInfo::Type::Grid, false, grid_);
}
CommonVGridInfo GVGridImpl_For_SingleValueRef_final::common_info() const
{
return CommonVGridInfo(CommonVGridInfo::Type::Single, false, value_);
}
} // namespace blender

View File

@ -18,176 +18,193 @@ namespace blender::volume {
#ifdef WITH_OPENVDB
int64_t GGrid::voxel_count() const
openvdb::GridBase *make_grid_for_attribute_type(const CPPType &type)
{
return grid_ ? grid_->activeVoxelCount() : 0;
}
openvdb::GridBase *result = nullptr;
volume::field_to_static_type(type, [&](auto tag) {
using ValueType = typename decltype(tag)::type;
using GridType = volume::grid_types::AttributeGrid<ValueType>;
using TreeType = typename GridType::TreeType;
using Converter = volume::grid_types::Converter<GridType>;
bool GGrid::is_empty() const
{
return grid_ ? grid_->empty() : true;
}
GGrid::operator bool() const
{
return grid_ != nullptr;
}
const CPPType *GGrid::value_type() const
{
const CPPType *type = nullptr;
grid_to_static_type(grid_, [&](auto &grid) {
using GridType = typename std::decay<decltype(grid)>::type;
using Converter = grid_types::Converter<GridType>;
type = &CPPType::get<typename Converter::AttributeValueType>();
const ValueType &background_value = *static_cast<const ValueType *>(
value_type.default_value());
result = new GridType(Converter::single_value_to_grid(background_value));
});
return type;
BLI_assert(result != nullptr);
return result;
}
GMutableGrid GMutableGrid::create(const CPPType &type, const void *background_value)
{
openvdb::GridBase::Ptr grid;
volume::field_to_static_type(type, [&grid, background_value](auto type_tag) {
using T = typename decltype(type_tag)::type;
using GridType = grid_types::AttributeGrid<T>;
using Converter = grid_types::Converter<GridType>;
const T &value = *static_cast<const T *>(background_value);
grid = grid_types::AttributeGrid<T>::create(Converter::single_value_to_grid(value));
});
return GMutableGrid{std::move(grid)};
}
GMutableGrid GMutableGrid::create(const CPPType &type)
{
openvdb::GridBase::Ptr grid;
volume::field_to_static_type(type, [&](auto type_tag) {
using T = typename decltype(type_tag)::type;
using GridType = grid_types::AttributeGrid<T>;
using Converter = grid_types::Converter<GridType>;
const T &value = *static_cast<const T *>(type.default_value());
grid = grid_types::AttributeGrid<T>::create(Converter::single_value_to_grid(value));
});
return GMutableGrid{std::move(grid)};
}
GMutableGrid GMutableGrid::create(const CPPType &type,
const GGrid &mask,
const void *inactive_value,
const void *active_value)
{
openvdb::GridBase::Ptr grid = nullptr;
volume::field_to_static_type(type, [&](auto type_tag) {
using T = typename decltype(type_tag)::type;
using TreeType = grid_types::AttributeTree<T>;
using GridType = grid_types::AttributeGrid<T>;
using Converter = grid_types::Converter<GridType>;
if (mask.is_empty()) {
grid = GridType::create();
return;
}
const T &typed_inactive_value = *static_cast<const T *>(inactive_value);
const T &typed_active_value = *static_cast<const T *>(active_value);
typename TreeType::Ptr tree = nullptr;
volume::grid_to_static_type(mask.grid_, [&](auto &typed_mask) {
tree = typename TreeType::Ptr(
new TreeType(typed_mask.tree(),
Converter::single_value_to_grid(typed_inactive_value),
Converter::single_value_to_grid(typed_active_value),
openvdb::TopologyCopy{}));
});
grid = typename GridType::Ptr(new GridType(tree));
grid->setTransform(mask.grid_->transform().copy());
});
return GMutableGrid{std::move(grid)};
}
bool GMutableGrid::try_copy_masked(const GGrid &other, const GGrid & /*mask*/)
{
if (!grid_ || !other.grid_) {
return false;
}
*grid_ = *other.grid_->copyGridWithNewTree();
/* XXX TODO prune tree with mask */
return true;
}
int64_t GMutableGrid::voxel_count() const
{
return grid_ ? grid_->activeVoxelCount() : 0;
}
bool GMutableGrid::is_empty() const
{
return grid_ ? grid_->empty() : true;
}
GMutableGrid::operator bool() const
{
return grid_ != nullptr;
}
const CPPType *GMutableGrid::value_type() const
{
const CPPType *type = nullptr;
grid_to_static_type(grid_, [&](auto &grid) {
using GridType = typename std::decay<decltype(grid)>::type;
using Converter = grid_types::Converter<GridType>;
type = &CPPType::get<typename Converter::AttributeValueType>();
});
return type;
}
#else
GGrid::operator bool() const
{
return false;
}
int64_t GGrid::voxel_count() const
{
return 0;
}
bool GGrid::is_empty() const
{
return true;
}
const CPPType *GGrid::value_type() const
{
return nullptr;
}
GGrid GGrid::create(ResourceScope & /*scope*/,
const CPPType & /*type*/,
const void * /*background_value*/)
{
return GGrid{};
}
GGrid GGrid::create(ResourceScope & /*scope*/, const CPPType & /*type*/)
{
return GGrid{};
}
GGrid GGrid::create(ResourceScope & /*scope*/,
const CPPType & /*type*/,
const GridMask & /*mask*/,
const void * /*inactive_value*/,
const void * /*active_value*/)
{
return GGrid{};
}
// int64_t GVGrid::voxel_count() const
//{
// return grid_ ? grid_->activeVoxelCount() : 0;
// }
//
// bool GVGrid::is_empty() const
//{
// return grid_ ? grid_->empty() : true;
// }
//
// GVGrid::operator bool() const
//{
// return grid_ != nullptr;
// }
//
// const CPPType *GVGrid::value_type() const
//{
// const CPPType *type = nullptr;
// grid_to_static_type(grid_, [&](auto &grid) {
// using GridType = typename std::decay<decltype(grid)>::type;
// using Converter = grid_types::Converter<GridType>;
//
// type = &CPPType::get<typename Converter::AttributeValueType>();
// });
// return type;
// }
//
// GVMutableGrid GVMutableGrid::create(const CPPType &type, const void *background_value)
//{
// openvdb::GridBase::Ptr grid;
// volume::field_to_static_type(type, [&grid, background_value](auto type_tag) {
// using T = typename decltype(type_tag)::type;
// using GridType = grid_types::AttributeGrid<T>;
// using Converter = grid_types::Converter<GridType>;
//
// const T &value = *static_cast<const T *>(background_value);
// grid = grid_types::AttributeGrid<T>::create(Converter::single_value_to_grid(value));
// });
//
// return GVMutableGrid{std::move(grid)};
// }
//
// GVMutableGrid GVMutableGrid::create(const CPPType &type)
//{
// openvdb::GridBase::Ptr grid;
// volume::field_to_static_type(type, [&](auto type_tag) {
// using T = typename decltype(type_tag)::type;
// using GridType = grid_types::AttributeGrid<T>;
// using Converter = grid_types::Converter<GridType>;
//
// const T &value = *static_cast<const T *>(type.default_value());
// grid = grid_types::AttributeGrid<T>::create(Converter::single_value_to_grid(value));
// });
//
// return GVMutableGrid{std::move(grid)};
// }
//
// GVMutableGrid GVMutableGrid::create(const CPPType &type,
// const GVGrid &mask,
// const void *inactive_value,
// const void *active_value)
//{
// openvdb::GridBase::Ptr grid = nullptr;
// volume::field_to_static_type(type, [&](auto type_tag) {
// using T = typename decltype(type_tag)::type;
// using TreeType = grid_types::AttributeTree<T>;
// using GridType = grid_types::AttributeGrid<T>;
// using Converter = grid_types::Converter<GridType>;
//
// if (mask.is_empty()) {
// grid = GridType::create();
// return;
// }
//
// const T &typed_inactive_value = *static_cast<const T *>(inactive_value);
// const T &typed_active_value = *static_cast<const T *>(active_value);
// typename TreeType::Ptr tree = nullptr;
// volume::grid_to_static_type(mask.grid_, [&](auto &typed_mask) {
// tree = typename TreeType::Ptr(
// new TreeType(typed_mask.tree(),
// Converter::single_value_to_grid(typed_inactive_value),
// Converter::single_value_to_grid(typed_active_value),
// openvdb::TopologyCopy{}));
// });
// grid = typename GridType::Ptr(new GridType(tree));
// grid->setTransform(mask.grid_->transform().copy());
// });
//
// return GVMutableGrid{std::move(grid)};
// }
//
// bool GVMutableGrid::try_copy_masked(const GVGrid &other, const GVGrid & /*mask*/)
//{
// if (!grid_ || !other.grid_) {
// return false;
// }
// *grid_ = *other.grid_->copyGridWithNewTree();
// /* XXX TODO prune tree with mask */
// return true;
// }
//
// int64_t GVMutableGrid::voxel_count() const
//{
// return grid_ ? grid_->activeVoxelCount() : 0;
// }
//
// bool GVMutableGrid::is_empty() const
//{
// return grid_ ? grid_->empty() : true;
// }
//
// GVMutableGrid::operator bool() const
//{
// return grid_ != nullptr;
// }
//
// const CPPType *GVMutableGrid::value_type() const
//{
// const CPPType *type = nullptr;
// grid_to_static_type(grid_, [&](auto &grid) {
// using GridType = typename std::decay<decltype(grid)>::type;
// using Converter = grid_types::Converter<GridType>;
//
// type = &CPPType::get<typename Converter::AttributeValueType>();
// });
// return type;
// }
//
// #else
//
// GGrid::operator bool() const
//{
// return false;
// }
//
// int64_t GGrid::voxel_count() const
//{
// return 0;
// }
//
// bool GGrid::is_empty() const
//{
// return true;
// }
//
// const CPPType *GGrid::value_type() const
//{
// return nullptr;
// }
//
// GGrid GGrid::create(ResourceScope & /*scope*/,
// const CPPType & /*type*/,
// const void * /*background_value*/)
//{
// return GGrid{};
// }
//
// GGrid GGrid::create(ResourceScope & /*scope*/, const CPPType & /*type*/)
//{
// return GGrid{};
// }
//
// GGrid GGrid::create(ResourceScope & /*scope*/,
// const CPPType & /*type*/,
// const GridMask & /*mask*/,
// const void * /*inactive_value*/,
// const void * /*active_value*/)
//{
// return GGrid{};
// }
#endif

View File

@ -36,6 +36,7 @@
#include "BLI_function_ref.hh"
#include "BLI_generic_virtual_array.hh"
#include "BLI_generic_virtual_grid.hh"
#include "BLI_string_ref.hh"
#include "BLI_vector.hh"
#include "BLI_vector_set.hh"
@ -178,8 +179,7 @@ class GFieldRef : public GFieldBase<const FieldNode *> {
namespace detail {
/* Utility class to make #is_field_v work. */
struct TypedFieldBase {
};
struct TypedFieldBase {};
} // namespace detail
/**
@ -260,7 +260,7 @@ class FieldContext;
*/
class FieldInput : public FieldNode {
public:
using GGrid = volume::GGrid;
using GGrid = GVGrid;
/* The order is also used for sorting in socket inspection. */
enum class Category {
@ -334,7 +334,7 @@ struct FieldInputs {
*/
class FieldContext {
public:
using GGrid = volume::GGrid;
using GGrid = GVGrid;
virtual ~FieldContext() = default;
@ -489,8 +489,8 @@ class FieldEvaluator : NonMovable, NonCopyable {
*/
class VolumeFieldEvaluator : NonMovable, NonCopyable {
public:
using GGrid = volume::GGrid;
using GMutableGrid = volume::GMutableGrid;
using GGrid = GVGrid;
using GMutableGrid = GVMutableGrid;
private:
static const GGrid empty_grid_;
@ -551,7 +551,7 @@ class VolumeFieldEvaluator : NonMovable, NonCopyable {
int add_with_destination(GField field, GMutableGrid &dst);
/** Same as #add_with_destination but typed. */
template<typename T> int add_with_destination(Field<T> field, volume::MutableGrid<T> &dst)
template<typename T> int add_with_destination(Field<T> field, VMutableGrid<T> &dst)
{
return this->add_with_destination(GField(std::move(field)), std::move(dst));
}
@ -564,13 +564,13 @@ class VolumeFieldEvaluator : NonMovable, NonCopyable {
* assigned to the given position.
* \return Index of the field in the evaluator which can be used in the #get_evaluated methods.
*/
template<typename T> int add(Field<T> field, volume::Grid<T> *grid_ptr)
template<typename T> int add(Field<T> field, VGrid<T> *grid_ptr)
{
const int field_index = fields_to_evaluate_.append_and_get_index(std::move(field));
dst_grids_.append({});
output_pointer_infos_.append(
OutputPointerInfo{grid_ptr, [](void *dst, const GGrid &grid, ResourceScope & /*scope*/) {
*(volume::Grid<T> *)dst = grid.typed<T>();
*(VGrid<T> *)dst = grid.typed<T>();
}});
return field_index;
}
@ -648,11 +648,11 @@ Vector<GVArray> evaluate_fields(ResourceScope &scope,
* \return The computed virtual arrays for each provided field. If #dst_varrays is passed,
* the provided virtual arrays are returned.
*/
Vector<volume::GGrid> evaluate_volume_fields(ResourceScope &scope,
Span<GFieldRef> fields_to_evaluate,
const volume::GGrid &mask,
const FieldContext &context,
Span<volume::GMutableGrid *> dst_grids = {});
Vector<GVGrid> evaluate_volume_fields(ResourceScope &scope,
Span<GFieldRef> fields_to_evaluate,
const GVGrid &mask,
const FieldContext &context,
Span<GVMutableGrid> dst_grids = {});
/* -------------------------------------------------------------------- */
/** \name Utility functions for simple field creation and evaluation
@ -697,9 +697,9 @@ class IndexFieldInput final : public FieldInput {
GVArray get_varray_for_context(const FieldContext &context,
const IndexMask &mask,
ResourceScope &scope) const final;
volume::GGrid get_volume_grid_for_context(const FieldContext & /*context*/,
const volume::GGrid & /*mask*/,
ResourceScope & /*scope*/) const final
GVGrid get_volume_grid_for_context(const FieldContext & /*context*/,
const GVGrid & /*mask*/,
ResourceScope & /*scope*/) const final
{
return {};
}

View File

@ -19,19 +19,19 @@ class GFieldRef;
namespace blender::fn {
void evaluate_procedure_on_varying_volume_fields(ResourceScope &scope,
const volume::GGrid &mask,
const GVGrid &mask,
const multi_function::Procedure &procedure,
Span<volume::GGrid> field_context_inputs,
Span<GVGrid> field_context_inputs,
Span<GFieldRef> fields_to_evaluate,
Span<int> field_indices,
Span<volume::GMutableGrid *> dst_grids,
MutableSpan<volume::GGrid> r_grids,
Span<GVMutableGrid> dst_grids,
MutableSpan<GVGrid> r_grids,
MutableSpan<bool> r_is_output_written_to_dst);
void evaluate_procedure_on_constant_volume_fields(ResourceScope &scope,
const multi_function::Procedure &procedure,
Span<volume::GGrid> field_context_inputs,
Span<GVGrid> field_context_inputs,
Span<GFieldRef> fields_to_evaluate,
Span<int> field_indices,
MutableSpan<volume::GGrid> r_grids);
MutableSpan<GVGrid> r_grids);
} // namespace blender::fn

View File

@ -24,9 +24,6 @@
namespace blender::fn {
using GGrid = volume::GGrid;
using GMutableGrid = volume::GMutableGrid;
/* -------------------------------------------------------------------- */
/** \name Field Evaluation
* \{ */
@ -113,21 +110,22 @@ static Vector<GVArray> get_field_context_inputs(
/**
* Retrieves the data from the context that is passed as input into the field.
*/
static Vector<GGrid> get_volume_field_context_inputs(
static Vector<GVGrid> get_volume_field_context_inputs(
ResourceScope &scope,
const GGrid &mask,
const GVGrid &mask,
const FieldContext &context,
const Span<std::reference_wrapper<const FieldInput>> field_inputs)
{
Vector<GGrid> field_context_inputs;
Vector<GVGrid> field_context_inputs;
for (const FieldInput &field_input : field_inputs) {
GGrid grid = context.get_volume_grid_for_input(field_input, mask, scope);
GVGrid grid = context.get_volume_grid_for_input(field_input, mask, scope);
if (!grid) {
const CPPType &type = field_input.cpp_type();
const void *default_value = type.default_value();
grid = GMutableGrid::create(type, default_value);
/* Move ownership to the resource scope. */
grid = {scope.add_value<GGrid::GridPtr>(std::move(grid.grid_))};
openvdb::GridBase::Ptr buffer{volume::make_grid_for_attribute_type(type)};
buffer = scope.add_value<openvdb::GridBase::Ptr>(std::move(buffer));
grid = GVMutableGrid::ForGrid(*buffer);
}
field_context_inputs.append(grid);
}
@ -178,7 +176,7 @@ static Set<GFieldRef> find_varying_fields(const FieldTreeInfo &field_tree_info,
* for different indices.
*/
static Set<GFieldRef> find_varying_fields(const FieldTreeInfo &field_tree_info,
Span<GGrid> field_context_inputs)
Span<GVGrid> field_context_inputs)
{
Set<GFieldRef> found_fields;
Stack<GFieldRef> fields_to_check;
@ -187,8 +185,8 @@ static Set<GFieldRef> find_varying_fields(const FieldTreeInfo &field_tree_info,
* start the tree search at the non-constant input fields and traverse through all fields that
* depend on them. */
for (const int i : field_context_inputs.index_range()) {
const GGrid &grid = field_context_inputs[i];
if (grid.is_empty()) {
const GVGrid &grid = field_context_inputs[i];
if (!grid) {
continue;
}
const FieldInput &field_input = field_tree_info.deduplicated_field_inputs[i];
@ -261,8 +259,8 @@ static void build_multi_function_procedure_for_fields(mf::Procedure &procedure,
const Span<GField> operation_inputs = operation_node.inputs();
if (field_with_index.current_input_index < operation_inputs.size()) {
/* Not all inputs are handled yet. Push the next input field to the stack and increment
* the input index. */
/* Not all inputs are handled yet. Push the next input field to the stack and
* increment the input index. */
fields_to_check.push({operation_inputs[field_with_index.current_input_index]});
field_with_index.current_input_index++;
}
@ -567,30 +565,21 @@ Vector<GVArray> evaluate_fields(ResourceScope &scope,
return r_varrays;
}
Vector<GGrid> evaluate_volume_fields(ResourceScope &scope,
Span<GFieldRef> fields_to_evaluate,
const GGrid &mask,
const FieldContext &context,
Span<GMutableGrid *> dst_grids)
Vector<GVGrid> evaluate_volume_fields(ResourceScope &scope,
Span<GFieldRef> fields_to_evaluate,
const GVGrid &mask,
const FieldContext &context,
Span<GVMutableGrid> dst_grids)
{
Vector<GGrid> r_grids(fields_to_evaluate.size());
Vector<GVGrid> r_grids(fields_to_evaluate.size());
Array<bool> is_output_written_to_dst(fields_to_evaluate.size(), false);
if (mask.is_empty()) {
for (const int i : fields_to_evaluate.index_range()) {
const CPPType &type = fields_to_evaluate[i].cpp_type();
r_grids[i] = GMutableGrid::create(type);
scope.add_value<GGrid::GridPtr>(std::move(r_grids[i].grid_));
}
return r_grids;
}
/* Destination grids are optional. Create a small utility method to access them. */
auto get_dst_grid = [&](int index) -> GMutableGrid * {
auto get_dst_grid = [&](int index) -> GVMutableGrid {
if (dst_grids.is_empty()) {
return {};
}
GMutableGrid *grid_ptr = dst_grids[index];
GVMutableGrid grid_ptr = dst_grids[index];
if (!grid_ptr) {
return nullptr;
}
@ -601,7 +590,7 @@ Vector<GGrid> evaluate_volume_fields(ResourceScope &scope,
FieldTreeInfo field_tree_info = preprocess_field_tree(fields_to_evaluate);
/* Get inputs that will be passed into the field when evaluated. */
Vector<GGrid> field_context_inputs = get_volume_field_context_inputs(
Vector<GVGrid> field_context_inputs = get_volume_field_context_inputs(
scope, mask, context, field_tree_info.deduplicated_field_inputs);
/* Finish fields that don't need any processing directly. */
@ -613,7 +602,7 @@ Vector<GGrid> evaluate_volume_fields(ResourceScope &scope,
const FieldInput &field_input = static_cast<const FieldInput &>(field.node());
const int field_input_index = field_tree_info.deduplicated_field_inputs.index_of(
field_input);
const GGrid &grid = field_context_inputs[field_input_index];
const GVGrid &grid = field_context_inputs[field_input_index];
/* Input grid needs to exist, otherwise evaluator will try
* computing the input and write into the same field. */
BLI_assert(grid);
@ -622,9 +611,8 @@ Vector<GGrid> evaluate_volume_fields(ResourceScope &scope,
}
case FieldNodeType::Constant: {
const FieldConstant &field_constant = static_cast<const FieldConstant &>(field.node());
GMutableGrid grid_constant = GMutableGrid::create(field_constant.type(),
field_constant.value().get());
r_grids[out_index] = scope.add_value<GMutableGrid>(std::move(grid_constant));
r_grids[out_index] = GVGrid::ForSingleRef(field_constant.type(),
field_constant.value().get());
break;
}
case FieldNodeType::Operation: {
@ -694,20 +682,26 @@ Vector<GGrid> evaluate_volume_fields(ResourceScope &scope,
* has written the computed data in the right place already. */
if (!dst_grids.is_empty()) {
for (const int out_index : fields_to_evaluate.index_range()) {
GMutableGrid *dst_grid_ptr = get_dst_grid(out_index);
if (!dst_grid_ptr) {
GVMutableGrid dst_grid = get_dst_grid(out_index);
if (!dst_grid) {
/* Caller did not provide a destination for this output. */
continue;
}
const GGrid &computed_grid = r_grids[out_index];
const GVGrid &computed_grid = r_grids[out_index];
// BLI_assert(computed_varray.type() == dst_grid.type());
if (is_output_written_to_dst[out_index]) {
/* The result has been written into the destination provided by the caller already. */
continue;
}
/* Still have to copy over the data in the destination provided by the caller. */
*dst_grid_ptr = {computed_grid.grid_->deepCopyGrid()};
r_grids[out_index] = *dst_grid_ptr;
if (dst_grid.is_grid()) {
volume::grid_to_static_type(*dst_grid.get_internal_grid(), [&](auto &typed_grid) {
});
= GVMutableGrid::ForGrid{computed_grid.grid_->deepCopyGrid()};
}
r_grids[out_index] = *dst_grid;
}
}
return r_grids;
@ -763,9 +757,9 @@ GVArray FieldContext::get_varray_for_input(const FieldInput &field_input,
return field_input.get_varray_for_context(*this, mask, scope);
}
GGrid FieldContext::get_volume_grid_for_input(const FieldInput &field_input,
const GGrid &mask,
ResourceScope &scope) const
GVGrid FieldContext::get_volume_grid_for_input(const FieldInput &field_input,
const GGrid &mask,
ResourceScope &scope) const
{
/* By default ask the field input to create the varray. Another field context might overwrite
* the context here. */
@ -1058,10 +1052,10 @@ IndexMask FieldEvaluator::get_evaluated_selection_as_mask()
/** \name #VolumeFieldEvaluator
* \{ */
const volume::GGrid VolumeFieldEvaluator::empty_grid_ = {};
const volume::GVGrid VolumeFieldEvaluator::empty_grid_ = {};
struct BoolGridToMask {
volume::Grid<bool>::GridType::ConstAccessor accessor;
volume::VGrid<bool>::GridType::ConstAccessor accessor;
inline void operator()(const openvdb::BoolGrid::ValueOnIter &iter) const
{
@ -1073,16 +1067,16 @@ struct BoolGridToMask {
}
};
static volume::GMutableGrid grid_mask_from_selection(const volume::GGrid &full_mask,
const volume::Grid<bool> &selection,
ResourceScope & /*scope*/)
static volume::GVMutableGrid grid_mask_from_selection(const volume::GVGrid &full_mask,
const volume::VGrid<bool> &selection,
ResourceScope & /*scope*/)
{
if (!full_mask) {
return {};
}
/* Empty bool grid with same transform and metadata as the full mask */
volume::MutableGrid<bool> result = {openvdb::BoolGrid::create(*full_mask.grid_)};
volume::VMutableGrid<bool> result = {openvdb::BoolGrid::create(*full_mask.grid_)};
volume::grid_to_static_type(full_mask.grid_, [&](auto &typed_full_mask) {
result.grid_->topologyUnion(typed_full_mask);
});
@ -1120,13 +1114,13 @@ int VolumeFieldEvaluator::add(GField field)
return field_index;
}
static volume::GGrid evaluate_selection(const Field<bool> &selection_field,
const FieldContext &context,
const volume::GGrid &domain_mask,
ResourceScope &scope)
static volume::GVGrid evaluate_selection(const Field<bool> &selection_field,
const FieldContext &context,
const volume::GVGrid &domain_mask,
ResourceScope &scope)
{
if (selection_field) {
volume::Grid<bool> selection =
volume::VGrid<bool> selection =
evaluate_volume_fields(scope, {selection_field}, domain_mask, context)[0].typed<bool>();
return grid_mask_from_selection(domain_mask, selection, scope);
}
@ -1154,13 +1148,13 @@ void VolumeFieldEvaluator::evaluate()
is_evaluated_ = true;
}
volume::GGrid VolumeFieldEvaluator::get_evaluated_as_mask(const int field_index)
volume::GVGrid VolumeFieldEvaluator::get_evaluated_as_mask(const int field_index)
{
volume::Grid<bool> grid = this->get_evaluated(field_index).typed<bool>();
volume::VGrid<bool> grid = this->get_evaluated(field_index).typed<bool>();
return grid_mask_from_selection(domain_mask_, grid, scope_);
}
volume::GGrid VolumeFieldEvaluator::get_evaluated_selection_as_mask()
volume::GVGrid VolumeFieldEvaluator::get_evaluated_selection_as_mask()
{
BLI_assert(is_evaluated_);
return selection_mask_;

View File

@ -8,6 +8,7 @@
#include "BLI_math_vector_types.hh"
#include "BLI_resource_scope.hh"
#include "BLI_virtual_array.hh"
#include "BLI_virtual_grid.hh"
#include "BLI_volume_openvdb.hh"
#include "FN_field.hh"
@ -190,8 +191,8 @@ struct VGridReader_For_Accessor : public VGridReader<GridType> {
};
template<typename GridType, typename MaskGridType> struct EvalPerLeafOp {
using GGrid = volume::GGrid;
using GMutableGrid = volume::GMutableGrid;
using GGrid = GVGrid;
using GMutableGrid = GVMutableGrid;
using TreeType = typename GridType::TreeType;
using ValueType = typename GridType::ValueType;
@ -282,36 +283,33 @@ template<typename GridType, typename MaskGridType> struct EvalPerLeafOp {
};
void evaluate_procedure_on_varying_volume_fields(ResourceScope &scope,
const volume::GGrid &mask,
const GVGrid &mask,
const mf::Procedure &procedure,
Span<volume::GGrid> field_context_inputs,
Span<GVGrid> field_context_inputs,
Span<GFieldRef> fields_to_evaluate,
Span<int> field_indices,
Span<volume::GMutableGrid *> dst_grids,
MutableSpan<volume::GGrid> r_grids,
Span<GVMutableGrid> dst_grids,
MutableSpan<GVGrid> r_grids,
MutableSpan<bool> r_is_output_written_to_dst)
{
/* Execute a multifunction procedure on each leaf buffer of the mask.
* Each leaf buffer is a contiguous array that can be used as a span.
* The leaf buffers' active voxel masks are used as index masks. */
using volume::GGrid;
using volume::GMutableGrid;
if (mask.is_empty()) {
if (mask) {
return;
}
/* Destination arrays are optional. Create a small utility method to access them. */
auto get_dst_grid = [&](int index) -> GMutableGrid * {
auto get_dst_grid = [&](int index) -> GVMutableGrid {
if (dst_grids.is_empty()) {
return {};
}
GMutableGrid *grid_ptr = dst_grids[index];
if (!grid_ptr) {
GVMutableGrid grid = dst_grids[index];
if (!grid) {
return nullptr;
}
return grid_ptr;
return grid;
};
mf::ProcedureExecutor procedure_executor{procedure};
@ -321,14 +319,14 @@ void evaluate_procedure_on_varying_volume_fields(ResourceScope &scope,
const CPPType &type = field.cpp_type();
const int out_index = field_indices[i];
/* Try to get an existing virtual array that the result should be written into. */
GMutableGrid *dst_grid_ptr = get_dst_grid(out_index);
/* Try to get an existing virtual grid that the result should be written into. */
GVMutableGrid dst_grid = get_dst_grid(out_index);
{
GMutableGrid grid_base = GMutableGrid::create(
type, mask, type.default_value(), type.default_value());
GVMutableGrid grid_base = GVMutableGrid::ForGrid(*buffer);
if (!dst_grid_ptr) {
/* Create a destination grid pointer in the resource scope. */
GMutableGrid &dst_grid = scope.add_value<GMutableGrid>(std::move(grid_base));
openvdb::GridBase *buffer = volume::make_grid_for_attribute_type(type);
GVMutableGrid &dst_grid = scope.add_value<GVMutableGrid>(std::move(grid_base));
dst_grid_ptr = &dst_grid;
}
else {
@ -371,14 +369,11 @@ void evaluate_procedure_on_varying_volume_fields(ResourceScope &scope,
void evaluate_procedure_on_constant_volume_fields(ResourceScope & /*scope*/,
const mf::Procedure &procedure,
Span<volume::GGrid> field_context_inputs,
Span<GVGrid> field_context_inputs,
Span<GFieldRef> fields_to_evaluate,
Span<int> field_indices,
MutableSpan<volume::GGrid> r_grids)
MutableSpan<GVGrid> r_grids)
{
using volume::GGrid;
using volume::GMutableGrid;
mf::ProcedureExecutor procedure_executor{procedure};
const IndexMask mask(1);
mf::ParamsBuilder mf_params{procedure_executor, &mask};
@ -429,7 +424,7 @@ void evaluate_procedure_on_constant_volume_fields(ResourceScope & /*scope*/,
using Converter = volume::grid_types::Converter<GridType>;
const T &value = *static_cast<T *>(output_buffers[i]);
r_grids[out_index] = GGrid{GridType::create(Converter::single_value_to_grid(value))};
r_grids[out_index] = GVGrid{GridType::create(Converter::single_value_to_grid(value))};
});
/* Destruct output value buffers, value is stored in grid backgrounds now. */

View File

@ -405,11 +405,6 @@ if(WITH_OPENVDB)
${OPENVDB_INCLUDE_DIRS}
)
add_definitions(-DWITH_OPENVDB ${OPENVDB_DEFINITIONS})
endif()
if(WITH_OPENVDB)
add_definitions(-DWITH_OPENVDB)
if(WITH_OPENVDB_BLOSC)
add_definitions(-DWITH_OPENVDB_BLOSC)
endif()

View File

@ -123,8 +123,8 @@ class EvaluateAtIndexInput final : public bke::GeometryFieldInput {
GVArray get_varray_for_context(const bke::GeometryFieldContext &context,
const IndexMask &mask) const final;
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
return {};
}

View File

@ -284,8 +284,8 @@ class AccumulateFieldInput final : public bke::GeometryFieldInput {
return attributes.adapt_domain(std::move(g_output), source_domain_, context.domain());
}
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
/* XXX Grids don't have a simple axis over which to accumulate. */
return {};
@ -380,8 +380,8 @@ class TotalFieldInput final : public bke::GeometryFieldInput {
return attributes.adapt_domain(std::move(g_outputs), source_domain_, context.domain());
}
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
/* TODO */
return {};

View File

@ -455,8 +455,8 @@ class BlurAttributeFieldInput final : public bke::GeometryFieldInput {
return GVArray::ForGArray(std::move(buffer_b));
}
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
/* TODO */
return {};

View File

@ -120,8 +120,8 @@ class EvaluateOnDomainInput final : public bke::GeometryFieldInput {
GVArray::ForGArray(std::move(values)), src_domain_, context.domain());
}
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
return {};
}

View File

@ -132,8 +132,8 @@ class IndexOfNearestFieldInput final : public bke::GeometryFieldInput {
return VArray<int>::ForContainer(std::move(result));
}
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
return {};
}
@ -207,8 +207,8 @@ class HasNeighborFieldInput final : public bke::GeometryFieldInput {
return VArray<bool>::ForContainer(std::move(result));
}
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
return {};
}

View File

@ -84,8 +84,8 @@ class MaterialSelectionFieldInput final : public bke::GeometryFieldInput {
return mesh->attributes().adapt_domain<bool>(std::move(selection), ATTR_DOMAIN_FACE, domain);
}
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
return {};
}

View File

@ -46,8 +46,8 @@ class ToolSelectionFieldInput final : public bke::GeometryFieldInput {
return {};
}
}
volume::GGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GGrid & /*mask*/) const final
volume::GVGrid get_volume_grid_for_context(const bke::GeometryFieldContext & /*context*/,
const volume::GVGrid & /*mask*/) const final
{
return {};
}

View File

@ -390,7 +390,7 @@ struct OutputAttributeToStore {
eAttrDomain domain;
StringRefNull name;
GMutableSpan data;
volume::GMutableGrid *grid_data;
volume::GVMutableGrid *grid_data;
};
/**
@ -469,7 +469,7 @@ static Vector<OutputAttributeToStore> compute_attributes_to_store(
continue;
}
const int domain_size = attributes.domain_size(domain);
const volume::GGrid domain_mask = attributes.domain_grid_mask(domain, main_grid);
const volume::GVGrid domain_mask = attributes.domain_grid_mask(domain, main_grid);
bke::GeometryFieldContext field_context{component, domain};
fn::FieldEvaluator field_evaluator{field_context, domain_size};
fn::VolumeFieldEvaluator volume_field_evaluator{field_context, domain_mask};
@ -485,7 +485,7 @@ static Vector<OutputAttributeToStore> compute_attributes_to_store(
field_evaluator.add_with_destination(std::move(field), store.data);
break;
case bke::GeometryComponent::AttributeType::Grid:
store.grid_data = MEM_new<volume::GMutableGrid>(__func__);
store.grid_data = MEM_new<volume::GVMutableGrid>(__func__);
volume_field_evaluator.add_with_destination(std::move(field), *store.grid_data);
break;
}