Geometry Nodes: add simulation support #104924

Closed
Hans Goudey wants to merge 211 commits from geometry-nodes-simulation into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
4 changed files with 132 additions and 12 deletions
Showing only changes of commit 8c81c7de4a - Show all commits

View File

@ -0,0 +1,93 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include "BLI_bit_vector.hh"
namespace blender::bits {
/**
* A #BitGroupVector is a compact data structure that allows storing an arbitrary but fixed number
* of bits per element. For example, it could be used to compactly store 5 bits per vertex in a
* mesh. The data structure stores the bits in a way so that the #BitSpan for every element is
* bounded according to #is_bounded_span. The makes sure that operations on entire groups can be
* implemented efficiently. For example, one can easy `or` one group into another.
*/
template<int64_t InlineBufferCapacity = 64, typename Allocator = GuardedAllocator>
class BitGroupVector {
private:
/**
* Number of bits per group.
*/
int64_t group_size_ = 0;
/**
* Actually stored number of bits per group so that individual groups are bounded according to
* #is_bounded_span.
*/
int64_t aligned_group_size_ = 0;
BitVector<InlineBufferCapacity, Allocator> data_;
static int64_t align_group_size(const int64_t group_size)
{
if (group_size < 64) {
/* Align to next power of two so that a single group never spans across two ints. */
return int64_t(power_of_2_max_u(uint32_t(group_size)));
}
/* Align to multiple of BitsPerInt. */
return (group_size + BitsPerInt - 1) & ~(BitsPerInt - 1);
}
public:
BitGroupVector() = default;
BitGroupVector(const int64_t size_in_groups,
const int64_t group_size,
const bool value = false,
Allocator allocator = {})
: group_size_(group_size),
aligned_group_size_(align_group_size(group_size)),
data_(size_in_groups * aligned_group_size_, value, allocator)
{
BLI_assert(group_size >= 0);
BLI_assert(size_in_groups >= 0);
}
/** Get all the bits at an index. */
BoundedBitSpan operator[](const int64_t i) const
{
const int64_t offset = aligned_group_size_ * i;
return {data_.data() + (offset >> BitToIntIndexShift),
IndexRange(offset & BitIndexMask, group_size_)};
}
/** Get all the bits at an index. */
MutableBoundedBitSpan operator[](const int64_t i)
{
const int64_t offset = aligned_group_size_ * i;
return {data_.data() + (offset >> BitToIntIndexShift),
IndexRange(offset & BitIndexMask, group_size_)};
}
/** Number of groups. */
int64_t size() const
{
return aligned_group_size_ == 0 ? 0 : data_.size() / aligned_group_size_;
}
/** Number of bits per group. */
int64_t group_size() const
{
return group_size_;
}
IndexRange index_range() const
{
return IndexRange{this->size()};
}
};
} // namespace blender::bits
namespace blender {
using bits::BitGroupVector;
}

View File

@ -182,6 +182,7 @@ set(SRC
BLI_assert.h
BLI_astar.h
BLI_atomic_disjoint_set.hh
BLI_bit_group_vector.hh
BLI_bit_ref.hh
BLI_bit_span.hh
BLI_bit_span_ops.hh
@ -479,6 +480,7 @@ if(WITH_GTESTS)
tests/BLI_array_store_test.cc
tests/BLI_array_test.cc
tests/BLI_array_utils_test.cc
tests/BLI_bit_group_vector_test.cc
tests/BLI_bit_ref_test.cc
tests/BLI_bit_span_test.cc
tests/BLI_bit_array_vector_test.cc

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "BLI_bit_group_vector.hh"
#include "BLI_strict_flags.h"
#include "testing/testing.h"
namespace blender::bits::tests {
TEST(bit_group_vector, DefaultConstruct)
{
BitGroupVector<> groups;
EXPECT_EQ(groups.size(), 0);
}
TEST(bit_group_vector, Construct)
{
BitGroupVector<> groups(12, 5);
EXPECT_EQ(groups.size(), 12);
EXPECT_EQ(groups[0].size(), 5);
EXPECT_EQ(groups[4].size(), 5);
}
} // namespace blender::bits::tests

View File

@ -18,7 +18,7 @@
#include "NOD_multi_function.hh"
#include "NOD_node_declaration.hh"
#include "BLI_bit_array_vector.hh"
#include "BLI_bit_group_vector.hh"
#include "BLI_bit_span_ops.hh"
#include "BLI_cpp_types.hh"
#include "BLI_dot_export.hh"
@ -2576,17 +2576,17 @@ struct GeometryNodesLazyFunctionGraphBuilder {
const int sockets_num = btree_.all_sockets().size();
const int attribute_references_num = attribute_reference_keys.size();
/* The code below uses #BitArrayVector to store a set of attribute references per socket. Each
/* The code below uses #BitGroupVector to store a set of attribute references per socket. Each
* socket has a bit span where each bit corresponds to one attribute reference. */
BitArrayVector<> referenced_by_field_socket(sockets_num, attribute_references_num, false);
BitArrayVector<> propagated_to_geometry_socket(sockets_num, attribute_references_num, false);
BitGroupVector<> referenced_by_field_socket(sockets_num, attribute_references_num, false);
BitGroupVector<> propagated_to_geometry_socket(sockets_num, attribute_references_num, false);
this->gather_referenced_and_potentially_propagated_data(relations_by_node,
attribute_reference_keys,
attribute_reference_infos,
referenced_by_field_socket,
propagated_to_geometry_socket);
BitArrayVector<> required_propagated_to_geometry_socket(
BitGroupVector<> required_propagated_to_geometry_socket(
sockets_num, attribute_references_num, false);
this->gather_required_propagated_data(relations_by_node,
attribute_reference_keys,
@ -2688,8 +2688,8 @@ struct GeometryNodesLazyFunctionGraphBuilder {
const Span<const aal::RelationsInNode *> relations_by_node,
const Span<AttributeReferenceKey> attribute_reference_keys,
const Span<AttributeReferenceInfo> attribute_reference_infos,
BitArrayVector<> &r_referenced_by_field_socket,
BitArrayVector<> &r_propagated_to_geometry_socket)
BitGroupVector<> &r_referenced_by_field_socket,
BitGroupVector<> &r_propagated_to_geometry_socket)
{
/* Insert initial referenced/propagated attributes. */
for (const int key_index : attribute_reference_keys.index_range()) {
@ -2759,14 +2759,14 @@ struct GeometryNodesLazyFunctionGraphBuilder {
void gather_required_propagated_data(
const Span<const aal::RelationsInNode *> relations_by_node,
const VectorSet<AttributeReferenceKey> &attribute_reference_keys,
const BitArrayVector<> &referenced_by_field_socket,
const BitArrayVector<> &propagated_to_geometry_socket,
BitArrayVector<> &r_required_propagated_to_geometry_socket)
const BitGroupVector<> &referenced_by_field_socket,
const BitGroupVector<> &propagated_to_geometry_socket,
BitGroupVector<> &r_required_propagated_to_geometry_socket)
{
const aal::RelationsInNode &tree_relations = *btree_.runtime->anonymous_attribute_relations;
const int sockets_num = btree_.all_sockets().size();
const int attribute_references_num = referenced_by_field_socket.group_size();
BitArrayVector<> required_by_geometry_socket(sockets_num, attribute_references_num, false);
BitGroupVector<> required_by_geometry_socket(sockets_num, attribute_references_num, false);
/* Initialize required attributes at group output. */
if (const bNode *group_output_bnode = btree_.group_output_node()) {
@ -2846,7 +2846,7 @@ struct GeometryNodesLazyFunctionGraphBuilder {
void build_attribute_sets_to_propagate(
const Span<AttributeReferenceKey> attribute_reference_keys,
const Span<AttributeReferenceInfo> attribute_reference_infos,
const BitArrayVector<> &required_propagated_to_geometry_socket)
const BitGroupVector<> &required_propagated_to_geometry_socket)
{
JoinAttibuteSetsCache join_attribute_sets_cache;