BLI: refactor IndexMask for better performance and memory usage #104629

Merged
Jacques Lucke merged 254 commits from JacquesLucke/blender:index-mask-refactor into main 2023-05-24 18:11:47 +02:00
7 changed files with 1593 additions and 2373 deletions
Showing only changes of commit 5d92520aab - Show all commits

View File

@ -2,311 +2,630 @@
#pragma once
/** \file
* \ingroup bli
*
* An IndexMask references an array of unsigned integers with the following property:
* The integers must be in ascending order and there must not be duplicates.
*
* Remember that the array is only referenced and not owned by an IndexMask instance.
*
* In most cases the integers in the array represent some indices into another array. So they
* "select" or "mask" a some elements in that array. Hence the name IndexMask.
*
* The invariant stated above has the nice property that it makes it easy to check if an integer
* array is an IndexRange, i.e. no indices are skipped. That allows functions to implement two code
* paths: One where it iterates over the index array and one where it iterates over the index
* range. The latter one is more efficient due to less memory reads and potential usage of SIMD
* instructions.
*
* The IndexMask.foreach_index method helps writing code that implements both code paths at the
* same time.
*/
#include <array>
#include <optional>
#include <variant>
#include "BLI_bit_span.hh"
#include "BLI_function_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_linear_allocator.hh"
#include "BLI_offset_indices.hh"
#include "BLI_offset_span.hh"
#include "BLI_span.hh"
#include "BLI_vector.hh"
namespace blender {
namespace index_mask {
class IndexMask;
/* Chunks contain up to 2^14 = 16384 indices. */
static constexpr int64_t chunk_size_shift = 14;
static constexpr int64_t chunk_mask_low = (1 << chunk_size_shift) - 1;
static constexpr int64_t chunk_mask_high = ~chunk_mask_low;
static constexpr int64_t chunk_capacity = (1 << chunk_size_shift);
std::array<int16_t, chunk_capacity> build_static_indices_array();
JacquesLucke marked this conversation as resolved
Review

Might be helpful to mention why it's helpful that max_segment_size fits in int16_t

Might be helpful to mention why it's helpful that `max_segment_size` fits in `int16_t`
const IndexMask &get_static_index_mask_for_min_size(const int64_t min_size);
struct RawChunkIterator {
int16_t segment_i;
int16_t index_in_segment;
};
struct RawMaskIterator {
int64_t chunk_i;
RawChunkIterator chunk_it;
};
/**
* A #Chunk contains an ordered list of segments. Each segment is an array of 16-bit integers.
*/
struct Chunk {
int16_t segments_num;
const int16_t **indices_by_segment;
const int16_t *cumulative_segment_sizes;
RawChunkIterator end_iterator() const;
OffsetIndices<int16_t> segment_offsets() const;
RawChunkIterator index_to_iterator(const int16_t index) const;
int16_t iterator_to_index(const RawChunkIterator &it) const;
int16_t size() const;
int16_t segment_size(const int16_t segment_i) const;
bool is_full() const;
bool is_full_after_inclusive(const RawChunkIterator &it) const;
bool is_full_until_exclusive(const RawChunkIterator &it) const;
template<typename Fn> void foreach_span(Fn &&fn) const;
};
struct IndexMaskData {
int64_t chunks_num;
int64_t indices_num;
const Chunk *chunks;
const int64_t *chunk_ids;
const int64_t *cumulative_chunk_sizes;
RawChunkIterator begin_it;
RawChunkIterator end_it;
};
struct ChunkSlice {
const Chunk *chunk;
RawChunkIterator begin_it;
RawChunkIterator end_it;
template<typename Fn> void foreach_span(Fn &&fn) const;
};
Review

Hmm, why would the first element not be 0 always?

Hmm, why would the first element not be 0 always?
Review

The first element is often not 0 when the IndexMask is a slice of another mask.

The first element is often not 0 when the `IndexMask` is a slice of another mask.
struct Expr {
enum class Type {
Atomic,
Union,
Difference,
Complement,
Intersection,
};
Type type;
Expr(const Type type) : type(type)
{
}
};
struct AtomicExpr : public Expr {
const IndexMask *mask;
AtomicExpr(const IndexMask &mask) : Expr(Type::Atomic), mask(&mask)
{
}
};
struct UnionExpr : public Expr {
Vector<const Expr *> children;
UnionExpr(Vector<const Expr *> children) : Expr(Type::Union), children(std::move(children))
{
}
};
struct DifferenceExpr : public Expr {
const Expr *base = nullptr;
Vector<const Expr *> children;
DifferenceExpr(const Expr &base, Vector<const Expr *> children)
: Expr(Type::Difference), base(&base), children(std::move(children))
{
}
};
struct ComplementExpr : public Expr {
const Expr *base = nullptr;
ComplementExpr(const Expr &base) : Expr(Type::Complement), base(&base)
{
}
};
struct IntersectionExpr : public Expr {
Vector<const Expr *> children;
IntersectionExpr(Vector<const Expr *> children)
: Expr(Type::Intersection), children(std::move(children))
{
}
};
class IndexMaskMemory : public LinearAllocator<> {
};
class IndexMask {
private:
/** The underlying reference to sorted integers. */
Span<int64_t> indices_;
IndexMaskData data_;
public:
/** Creates an IndexMask that contains no indices. */
IndexMask() = default;
IndexMask();
IndexMask(int64_t size);
IndexMask(IndexRange range);
/**
* Create an IndexMask using the given integer array.
* This constructor asserts that the given integers are in ascending order and that there are no
* duplicates.
*/
IndexMask(Span<int64_t> indices) : indices_(indices)
{
BLI_assert(IndexMask::indices_are_valid_index_mask(indices));
}
int64_t size() const;
OffsetIndices<int64_t> chunk_offsets() const;
int64_t first() const;
int64_t last() const;
int64_t min_array_size() const;
/**
* Use this method when you know that no indices are skipped. It is more efficient than preparing
* an integer array all the time.
*/
IndexMask(IndexRange range) : indices_(range.as_span())
{
}
RawMaskIterator index_to_iterator(const int64_t index) const;
int64_t iterator_to_index(const RawMaskIterator &it) const;
/**
* Construct an IndexMask from a sorted list of indices. Note, the created IndexMask is only
* valid as long as the initializer_list is valid.
*
* Don't do this:
* IndexMask mask = {3, 4, 5};
*
* Do this:
* do_something_with_an_index_mask({3, 4, 5});
*/
IndexMask(const std::initializer_list<int64_t> &indices) : IndexMask(Span<int64_t>(indices))
{
}
IndexMask slice(IndexRange range) const;
/**
* Creates an IndexMask that references the indices [0, n-1].
*/
explicit IndexMask(int64_t n) : IndexMask(IndexRange(n))
{
}
void foreach_span(FunctionRef<void(OffsetSpan<int64_t, int16_t>)> fn) const;
template<typename Fn> void foreach_range(Fn &&fn) const;
template<typename Fn> void foreach_span_or_range(Fn &&fn) const;
template<typename Fn> void foreach_index(Fn &&fn) const;
/** Checks that the indices are non-negative and in ascending order. */
static bool indices_are_valid_index_mask(Span<int64_t> indices)
{
if (!indices.is_empty()) {
if (indices.first() < 0) {
return false;
}
}
for (int64_t i = 1; i < indices.size(); i++) {
if (indices[i - 1] >= indices[i]) {
return false;
}
}
return true;
}
template<typename T> static IndexMask from_indices(Span<T> indices, IndexMaskMemory &memory);
static IndexMask from_bits(BitSpan bits, IndexMaskMemory &memory, int64_t offset = 0);
static IndexMask from_expr(const Expr &expr, IndexRange universe, IndexMaskMemory &memory);
operator Span<int64_t>() const
{
return indices_;
}
template<typename T> void to_indices(MutableSpan<T> r_indices) const;
void to_bits(MutableBitSpan r_bits, int64_t offset = 0) const;
std::optional<IndexRange> to_range() const;
const int64_t *begin() const
{
return indices_.begin();
}
const int64_t *end() const
{
return indices_.end();
}
/**
* Returns the n-th index referenced by this IndexMask. The `index_range` method returns an
* IndexRange containing all indices that can be used as parameter here.
*/
int64_t operator[](int64_t n) const
{
return indices_[n];
}
/**
* Returns the minimum size an array has to have, if the integers in this IndexMask are going to
* be used as indices in that array.
*/
int64_t min_array_size() const
{
if (indices_.size() == 0) {
return 0;
}
else {
return indices_.last() + 1;
}
}
Span<int64_t> indices() const
{
return indices_;
}
/**
* Returns true if this IndexMask does not skip any indices. This check requires O(1) time.
*/
bool is_range() const
{
return indices_.size() > 0 && indices_.last() - indices_.first() == indices_.size() - 1;
}
/**
* Returns the IndexRange referenced by this IndexMask. This method should only be called after
* the caller made sure that this IndexMask is actually a range.
*/
IndexRange as_range() const
{
BLI_assert(this->is_range());
return IndexRange{indices_.first(), indices_.size()};
}
/**
* Calls the given callback for every referenced index. The callback has to take one unsigned
* integer as parameter.
*
* This method implements different code paths for the cases when the IndexMask represents a
* range or not.
*/
template<typename CallbackT> void foreach_index(const CallbackT &callback) const
{
this->to_best_mask_type([&](const auto &mask) {
for (const int64_t i : mask) {
callback(i);
}
});
}
/**
* Often an #IndexMask wraps a range of indices without any gaps. In this case, it is more
* efficient to compute the indices in a loop on-the-fly instead of reading them from memory.
* This method makes it easy to generate code for both cases.
*
* The given function is expected to take one parameter that can either be of type #IndexRange or
* #Span<int64_t>.
*/
template<typename Fn> void to_best_mask_type(const Fn &fn) const
{
if (this->is_range()) {
const IndexRange masked_range = this->as_range();
fn(masked_range);
}
else {
const Span<int64_t> masked_indices = indices_;
fn(masked_indices);
}
}
/**
* Returns an IndexRange that can be used to index this IndexMask.
*
* The range is [0, number of indices - 1].
*
* This is not to be confused with the `as_range` method.
*/
IndexRange index_range() const
{
return indices_.index_range();
}
/**
* Returns the largest index that is referenced by this IndexMask.
*/
int64_t last() const
{
return indices_.last();
}
/**
* Returns the number of indices referenced by this IndexMask.
*/
int64_t size() const
{
return indices_.size();
}
bool is_empty() const
{
return indices_.is_empty();
}
bool contained_in(const IndexRange range) const
{
if (indices_.is_empty()) {
return true;
}
if (range.size() < indices_.size()) {
return false;
}
return indices_.first() >= range.first() && indices_.last() <= range.last();
}
IndexMask slice(const int64_t start, const int64_t size) const
{
return IndexMask(indices_.slice(start, size));
}
IndexMask slice(const IndexRange slice) const
{
return IndexMask(indices_.slice(slice));
}
IndexMask slice_safe(int64_t start, int64_t size) const;
IndexMask slice_safe(IndexRange slice) const;
/**
* Create a sub-mask that is also shifted to the beginning.
* The shifting to the beginning allows code to work with smaller indices,
* which is more memory efficient.
*
* \return New index mask with the size of #slice. It is either empty or starts with 0.
* It might reference indices that have been appended to #r_new_indices.
*
* Example:
* \code{.unparsed}
* this: [2, 3, 5, 7, 8, 9, 10]
* slice: ^--------^
* output: [0, 2, 4, 5]
* \endcode
*
* All the indices in the sub-mask are shifted by 3 towards zero,
* so that the first index in the output is zero.
*/
IndexMask slice_and_offset(IndexRange slice, Vector<int64_t> &r_new_indices) const;
/**
* Get a new mask that contains all the indices that are not in the current mask.
* If necessary, the indices referenced by the new mask are inserted in #r_new_indices.
*/
IndexMask invert(const IndexRange full_range, Vector<int64_t> &r_new_indices) const;
/**
* Get all contiguous index ranges within the mask.
*/
Vector<IndexRange> extract_ranges() const;
/**
* Similar to #extract ranges, but works on the inverted mask. So the returned ranges are
* in-between the indices in the mask.
*
* Using this method is generally more efficient than first inverting the index mask and then
* extracting the ranges.
*
* If #r_skip_amounts is passed in, it will contain the number of indices that have been skipped
* before each range in the return value starts.
*/
Vector<IndexRange> extract_ranges_invert(const IndexRange full_range,
Vector<int64_t> *r_skip_amounts = nullptr) const;
const IndexMaskData &data() const;
IndexMaskData &data_for_inplace_construction();
};
/** To be used with #call_with_devirtualized_parameters. */
template<bool UseRange, bool UseSpan> struct IndexMaskDevirtualizer {
const IndexMask &mask;
std::ostream &operator<<(std::ostream &stream, const IndexMask &mask);
template<typename Fn> bool devirtualize(const Fn &fn) const
namespace unique_sorted_indices {
template<typename T> Vector<IndexRange> split_by_chunk(Span<T> indices);
template<typename T>
int64_t split_to_ranges_and_spans(Span<T> indices,
int64_t range_threshold,
Vector<std::variant<IndexRange, Span<T>>> &r_parts);
template<typename T> bool non_empty_is_range(const Span<T> indices);
template<typename T> IndexRange non_empty_as_range(const Span<T> indices);
template<typename T> int64_t find_size_of_next_range(const Span<T> indices);
template<typename T>
int64_t find_size_until_next_range(const Span<T> indices, const int64_t min_range_size);
} // namespace unique_sorted_indices
template<typename Fn>
inline IndexMask grow_indices_to_ranges(const IndexMask &mask,
const Fn &fn,
IndexMaskMemory &memory)
{
Vector<int64_t> indices;
mask.foreach_index([&](const int64_t i) {
const IndexRange new_range = fn(i);
for (const int64_t new_index : new_range) {
indices.append(new_index);
}
});
return IndexMask::from_indices<int64_t>(indices, memory);
}
/* -------------------------------------------------------------------- */
/** \name Inline Utilities
* \{ */
inline const std::array<int16_t, chunk_capacity> &get_static_indices_array()
{
alignas(64) static const std::array<int16_t, chunk_capacity> data = build_static_indices_array();
return data;
}
inline int64_t index_to_chunk_id(const int64_t i)
{
return i >> chunk_size_shift;
}
inline int64_t size_to_chunk_num(const int64_t size)
{
return (size + chunk_capacity - 1) >> chunk_size_shift;
}
class IndexRangeChecker {
private:
const int16_t *data_;
uintptr_t adder_;
public:
IndexRangeChecker() : data_(get_static_indices_array().data())
{
if constexpr (UseRange) {
if (this->mask.is_range()) {
return fn(this->mask.as_range());
adder_ = std::numeric_limits<uintptr_t>::max() -
uintptr_t(get_static_indices_array().data() + chunk_capacity);
}
bool check(const Span<int16_t> indices) const;
bool check_static(const Span<int16_t> indices) const;
};
inline bool IndexRangeChecker::check(const Span<int16_t> indices) const
{
return indices.last() - indices.first() == indices.size() - 1;
}
inline bool IndexRangeChecker::check_static(const Span<int16_t> indices) const
{
const uintptr_t indices_ptr = uintptr_t(indices.data());
return indices_ptr + adder_ >
std::numeric_limits<uintptr_t>::max() - chunk_capacity * sizeof(int16_t);
}
/* -------------------------------------------------------------------- */
/** \name Unique Sorted Indices Inline Methods
* \{ */
namespace unique_sorted_indices {
template<typename T> inline bool non_empty_is_range(const Span<T> indices)
{
BLI_assert(!indices.is_empty());
return indices.last() - indices.first() == indices.size() - 1;
}
template<typename T> inline IndexRange non_empty_as_range(const Span<T> indices)
{
BLI_assert(!indices.is_empty());
BLI_assert(non_empty_is_range(indices));
return IndexRange(indices.first(), indices.size());
}
template<typename T> inline int64_t find_size_of_next_range(const Span<T> indices)
{
BLI_assert(!indices.is_empty());
return std::lower_bound(
indices.begin(),
indices.end(),
0,
[indices, offset = indices[0]](const T &element, const int64_t /*dummy*/) {
const int64_t element_index = &element - indices.begin();
return element - offset == element_index;
}) -
indices.begin();
}
template<typename T>
JacquesLucke marked this conversation as resolved Outdated

"optimizes for" might be more helpful if it said "generates a separate case for "

"optimizes for" might be more helpful if it said "generates a separate case for "
inline int64_t find_size_until_next_range(const Span<T> indices, const int64_t min_range_size)
{
BLI_assert(!indices.is_empty());
int64_t current_range_size = 1;
int64_t last_value = indices[0];
for (const int64_t i : indices.index_range().drop_front(1)) {
const T current_value = indices[i];
if (current_value == last_value + 1) {
current_range_size++;
if (current_range_size >= min_range_size) {
return i - min_range_size + 1;
}
}
if constexpr (UseSpan) {
return fn(this->mask.indices());
else {
current_range_size = 1;
}
last_value = current_value;
}
return indices.size();
}
} // namespace unique_sorted_indices
/* -------------------------------------------------------------------- */
/** \name #Chunk Inline Methods
* \{ */
inline RawChunkIterator Chunk::index_to_iterator(const int16_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < this->segment_offsets().total_size());
RawChunkIterator it;
it.segment_i = this->segment_offsets().find_range_index(index);
it.index_in_segment = index - this->cumulative_segment_sizes[it.segment_i];
return it;
}
inline int16_t Chunk::iterator_to_index(const RawChunkIterator &it) const
{
BLI_assert(it.segment_i >= 0);
BLI_assert(it.segment_i < this->segments_num);
BLI_assert(it.index_in_segment >= 0);
BLI_assert(it.index_in_segment < this->segment_offsets().size(it.segment_i));
return this->cumulative_segment_sizes[it.segment_i] + it.index_in_segment;
}
/* -------------------------------------------------------------------- */
/** \name #RawChunkIterator Inline Methods
* \{ */
inline bool operator!=(const RawChunkIterator &a, const RawChunkIterator &b)
{
return a.index_in_segment != b.index_in_segment || a.segment_i != b.segment_i;
}
/* -------------------------------------------------------------------- */
/** \name #RawMaskIterator Inline Methods
* \{ */
inline bool operator!=(const RawMaskIterator &a, const RawMaskIterator &b)
{
return a.chunk_it != b.chunk_it || a.chunk_i != b.chunk_i;
JacquesLucke marked this conversation as resolved Outdated

IndexMaskFromSegment could probably get a more "private" API, with only public mask() and update() methods and everything else private. That might make it more obvious how it's supposed to be used.

`IndexMaskFromSegment` could probably get a more "private" API, with only public `mask()` and `update()` methods and everything else private. That might make it more obvious how it's supposed to be used.
}
inline bool operator==(const RawMaskIterator &a, const RawMaskIterator &b)
{
return !(a != b);
}
/* -------------------------------------------------------------------- */
/** \name #IndexMask Inline Methods
* \{ */
inline IndexMask::IndexMask()
{
static constexpr int64_t cumulative_sizes_for_empty_mask[1] = {0};
data_.chunks_num = 0;
data_.indices_num = 0;
data_.chunks = nullptr;
data_.chunk_ids = nullptr;
data_.cumulative_chunk_sizes = cumulative_sizes_for_empty_mask;
data_.begin_it.segment_i = 0;
data_.begin_it.index_in_segment = 0;
data_.end_it.segment_i = 0;
data_.end_it.index_in_segment = 0;
}
inline IndexMask::IndexMask(const int64_t size)
{
*this = get_static_index_mask_for_min_size(size);
data_.chunks_num = size_to_chunk_num(size);
data_.indices_num = size;
data_.end_it.index_in_segment = (size == chunk_capacity) ? chunk_capacity :
size & chunk_mask_low;
}
inline IndexMask::IndexMask(const IndexRange range)
{
if (range.is_empty()) {
return;
}
*this = get_static_index_mask_for_min_size(range.one_after_last());
const int64_t first_chunk_id = index_to_chunk_id(range.first());
const int64_t last_chunk_id = index_to_chunk_id(range.last());
data_.chunks_num = last_chunk_id - first_chunk_id + 1;
data_.indices_num = range.size();
data_.chunks -= first_chunk_id;
data_.chunk_ids -= first_chunk_id;
data_.cumulative_chunk_sizes -= first_chunk_id;
data_.begin_it.segment_i = 0;
data_.begin_it.index_in_segment = range.first() & chunk_mask_low;
data_.end_it.segment_i = 0;
data_.end_it.index_in_segment = range.one_after_last() & chunk_mask_low;
}
inline int64_t IndexMask::size() const
{
return data_.indices_num;
}
inline OffsetIndices<int64_t> IndexMask::chunk_offsets() const
{
return Span<int64_t>(data_.cumulative_chunk_sizes, data_.chunks_num + 1);
}
inline int64_t IndexMask::first() const
{
BLI_assert(data_.indices_num > 0);
return chunk_capacity * data_.chunk_ids[0] +
data_.chunks[0]
.indices_by_segment[data_.begin_it.segment_i][data_.begin_it.index_in_segment];
}
inline int64_t IndexMask::last() const
{
BLI_assert(data_.indices_num > 0);
const int64_t chunk_i = data_.chunks_num - 1;
return chunk_capacity * data_.chunk_ids[chunk_i] +
data_.chunks[chunk_i]
.indices_by_segment[data_.end_it.segment_i][data_.end_it.index_in_segment - 1];
}
inline int64_t IndexMask::min_array_size() const
{
if (data_.indices_num == 0) {
return 0;
}
return this->last() + 1;
}
inline RawMaskIterator IndexMask::index_to_iterator(const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < data_.indices_num);
RawMaskIterator it;
const int16_t begin_index = data_.chunks[0].iterator_to_index(data_.begin_it);
it.chunk_i = this->chunk_offsets().find_range_index(index + begin_index +
data_.cumulative_chunk_sizes[0]);
const Chunk &chunk = data_.chunks[it.chunk_i];
it.chunk_it = chunk.index_to_iterator((index + begin_index) & chunk_mask_low);
return it;
}
inline int64_t IndexMask::iterator_to_index(const RawMaskIterator &it) const
{
BLI_assert(it.chunk_i >= 0);
BLI_assert(it.chunk_i < data_.chunks_num);
const int16_t begin_index = data_.chunks[0].iterator_to_index(data_.begin_it);
return data_.cumulative_chunk_sizes[it.chunk_i] - data_.cumulative_chunk_sizes[0] - begin_index;
}
inline IndexMask IndexMask::slice(const IndexRange range) const
{
if (range.is_empty()) {
return {};
}
const RawMaskIterator first_it = this->index_to_iterator(range.first());
const RawMaskIterator last_it = this->index_to_iterator(range.last());
IndexMask sliced;
sliced.data_.chunks_num = last_it.chunk_i - first_it.chunk_i + 1;
sliced.data_.indices_num = range.size();
sliced.data_.chunks = data_.chunks + first_it.chunk_i;
sliced.data_.chunk_ids = data_.chunk_ids + first_it.chunk_i;
sliced.data_.cumulative_chunk_sizes = data_.cumulative_chunk_sizes + first_it.chunk_i;
sliced.data_.begin_it = first_it.chunk_it;
sliced.data_.end_it.segment_i = last_it.chunk_it.segment_i;
sliced.data_.end_it.index_in_segment = last_it.chunk_it.index_in_segment + 1;
return sliced;
}
inline const IndexMaskData &IndexMask::data() const
{
return data_;
}
inline IndexMaskData &IndexMask::data_for_inplace_construction()
{
return const_cast<IndexMaskData &>(data_);
}
inline RawChunkIterator Chunk::end_iterator() const
{
RawChunkIterator data;
if (this->segments_num > 0) {
data.segment_i = this->segments_num - 1;
data.index_in_segment = this->segment_offsets().size(this->segments_num - 1);
}
else {
data.segment_i = 0;
data.index_in_segment = 0;
}
return data;
}
inline OffsetIndices<int16_t> Chunk::segment_offsets() const
{
return Span<int16_t>(this->cumulative_segment_sizes, this->segments_num + 1);
}
inline int16_t Chunk::size() const
{
return this->cumulative_segment_sizes[this->segments_num] - this->cumulative_segment_sizes[0];
}
inline bool Chunk::is_full() const
{
return this->size() == chunk_capacity;
}
inline bool Chunk::is_full_after_inclusive(const RawChunkIterator &it) const
{
const Span<int16_t> indices{this->indices_by_segment[it.segment_i] + it.index_in_segment,
this->segment_size(it.segment_i) - it.index_in_segment};
return unique_sorted_indices::non_empty_is_range(indices);
}
inline bool Chunk::is_full_until_exclusive(const RawChunkIterator &it) const
{
if (it.segment_i > 0) {
return false;
}
};
return this->indices_by_segment[0][it.index_in_segment] == it.index_in_segment;
}
inline int16_t Chunk::segment_size(const int16_t segment_i) const
{
return this->cumulative_segment_sizes[segment_i + 1] - this->cumulative_segment_sizes[segment_i];
}
template<typename Fn> inline void Chunk::foreach_span(Fn &&fn) const
{
for (const int64_t segment_i : IndexRange(this->segments_num)) {
const Span<int16_t> indices{this->indices_by_segment[segment_i],
this->segment_size(segment_i)};
fn(indices);
}
}
template<typename Fn> inline void ChunkSlice::foreach_span(Fn &&fn) const
{
if (this->begin_it.segment_i == this->end_it.segment_i) {
const int64_t segment_i = this->begin_it.segment_i;
const int64_t begin_i = this->begin_it.index_in_segment;
const int64_t end_i = this->end_it.index_in_segment;
const Span<int16_t> indices{this->chunk->indices_by_segment[segment_i] + begin_i,
end_i - begin_i};
fn(indices);
}
else {
{
const int64_t first_segment_i = this->begin_it.segment_i;
const int64_t begin_i = this->begin_it.index_in_segment;
const int64_t end_i = this->chunk->segment_size(first_segment_i);
const Span<int16_t> indices{this->chunk->indices_by_segment[first_segment_i] + begin_i,
end_i - begin_i};
fn(indices);
}
for (int64_t segment_i = this->begin_it.segment_i + 1; segment_i < this->end_it.segment_i;
segment_i++) {
const int64_t begin_i = 0;
const int64_t end_i = this->chunk->segment_size(segment_i);
const Span<int16_t> indices{this->chunk->indices_by_segment[segment_i] + begin_i,
end_i - begin_i};
fn(indices);
}
{
const int64_t last_segment_i = this->end_it.segment_i;
const int64_t begin_i = 0;
const int64_t end_i = this->end_it.index_in_segment;
const Span<int16_t> indices{this->chunk->indices_by_segment[last_segment_i] + begin_i,
end_i - begin_i};
fn(indices);
}
}
}
template<typename Fn> inline void IndexMask::foreach_index(Fn &&fn) const
{
this->foreach_span([&](const OffsetSpan<int64_t, int16_t> indices) {
for (const int64_t index : indices) {
fn(index);
}
});
}
template<typename Fn> inline void IndexMask::foreach_span_or_range(Fn &&fn) const
{
IndexRangeChecker is_index_mask;
this->foreach_span([&, is_index_mask](const OffsetSpan<int64_t, int16_t> indices) {
if (is_index_mask.check(indices.base_span())) {
fn(IndexRange(indices[0], indices.size()));
}
else {
fn(indices);
}
});
}
template<typename Fn> inline void IndexMask::foreach_range(Fn &&fn) const
{
this->foreach_span([&](const OffsetSpan<int64_t, int16_t> indices) {
Span<int16_t> base_indices = indices.base_span();
while (!base_indices.is_empty()) {
const int64_t next_range_size = unique_sorted_indices::find_size_of_next_range(base_indices);
fn(IndexRange(int64_t(base_indices[0]) + indices.offset(), next_range_size));
base_indices = base_indices.drop_front(next_range_size);
}
});
}
} // namespace index_mask
} // namespace blender

View File

@ -1,631 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include <array>
#include <optional>
#include <variant>
#include "BLI_bit_span.hh"
#include "BLI_function_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_linear_allocator.hh"
#include "BLI_offset_indices.hh"
#include "BLI_offset_span.hh"
#include "BLI_span.hh"
#include "BLI_vector.hh"
namespace blender {
namespace index_mask {
class IndexMask;
/* Chunks contain up to 2^14 = 16384 indices. */
static constexpr int64_t chunk_size_shift = 14;
static constexpr int64_t chunk_mask_low = (1 << chunk_size_shift) - 1;
static constexpr int64_t chunk_mask_high = ~chunk_mask_low;
static constexpr int64_t chunk_capacity = (1 << chunk_size_shift);
std::array<int16_t, chunk_capacity> build_static_indices_array();
const IndexMask &get_static_index_mask_for_min_size(const int64_t min_size);
struct RawChunkIterator {
int16_t segment_i;
int16_t index_in_segment;
};
struct RawMaskIterator {
int64_t chunk_i;
RawChunkIterator chunk_it;
};
/**
* A #Chunk contains an ordered list of segments. Each segment is an array of 16-bit integers.
*/
struct Chunk {
int16_t segments_num;
const int16_t **indices_by_segment;
const int16_t *cumulative_segment_sizes;
RawChunkIterator end_iterator() const;
OffsetIndices<int16_t> segment_offsets() const;
RawChunkIterator index_to_iterator(const int16_t index) const;
int16_t iterator_to_index(const RawChunkIterator &it) const;
int16_t size() const;
int16_t segment_size(const int16_t segment_i) const;
bool is_full() const;
bool is_full_after_inclusive(const RawChunkIterator &it) const;
bool is_full_until_exclusive(const RawChunkIterator &it) const;
template<typename Fn> void foreach_span(Fn &&fn) const;
};
struct IndexMaskData {
int64_t chunks_num;
int64_t indices_num;
const Chunk *chunks;
const int64_t *chunk_ids;
const int64_t *cumulative_chunk_sizes;
RawChunkIterator begin_it;
RawChunkIterator end_it;
};
struct ChunkSlice {
const Chunk *chunk;
RawChunkIterator begin_it;
RawChunkIterator end_it;
template<typename Fn> void foreach_span(Fn &&fn) const;
};
struct Expr {
enum class Type {
Atomic,
Union,
Difference,
Complement,
Intersection,
};
Type type;
Expr(const Type type) : type(type)
{
}
};
struct AtomicExpr : public Expr {
const IndexMask *mask;
AtomicExpr(const IndexMask &mask) : Expr(Type::Atomic), mask(&mask)
{
}
};
struct UnionExpr : public Expr {
Vector<const Expr *> children;
UnionExpr(Vector<const Expr *> children) : Expr(Type::Union), children(std::move(children))
{
}
};
struct DifferenceExpr : public Expr {
const Expr *base = nullptr;
Vector<const Expr *> children;
DifferenceExpr(const Expr &base, Vector<const Expr *> children)
: Expr(Type::Difference), base(&base), children(std::move(children))
{
}
};
struct ComplementExpr : public Expr {
const Expr *base = nullptr;
ComplementExpr(const Expr &base) : Expr(Type::Complement), base(&base)
{
}
};
struct IntersectionExpr : public Expr {
Vector<const Expr *> children;
IntersectionExpr(Vector<const Expr *> children)
: Expr(Type::Intersection), children(std::move(children))
{
}
};
class IndexMaskMemory : public LinearAllocator<> {
};
class IndexMask {
private:
IndexMaskData data_;
public:
IndexMask();
IndexMask(int64_t size);
IndexMask(IndexRange range);
int64_t size() const;
OffsetIndices<int64_t> chunk_offsets() const;
int64_t first() const;
int64_t last() const;
int64_t min_array_size() const;
RawMaskIterator index_to_iterator(const int64_t index) const;
int64_t iterator_to_index(const RawMaskIterator &it) const;
IndexMask slice(IndexRange range) const;
void foreach_span(FunctionRef<void(OffsetSpan<int64_t, int16_t>)> fn) const;
template<typename Fn> void foreach_range(Fn &&fn) const;
template<typename Fn> void foreach_span_or_range(Fn &&fn) const;
template<typename Fn> void foreach_index(Fn &&fn) const;
template<typename T> static IndexMask from_indices(Span<T> indices, IndexMaskMemory &memory);
static IndexMask from_bits(BitSpan bits, IndexMaskMemory &memory, int64_t offset = 0);
static IndexMask from_expr(const Expr &expr, IndexRange universe, IndexMaskMemory &memory);
template<typename T> void to_indices(MutableSpan<T> r_indices) const;
void to_bits(MutableBitSpan r_bits, int64_t offset = 0) const;
std::optional<IndexRange> to_range() const;
const IndexMaskData &data() const;
IndexMaskData &data_for_inplace_construction();
};
std::ostream &operator<<(std::ostream &stream, const IndexMask &mask);
namespace unique_sorted_indices {
template<typename T> Vector<IndexRange> split_by_chunk(Span<T> indices);
template<typename T>
int64_t split_to_ranges_and_spans(Span<T> indices,
int64_t range_threshold,
Vector<std::variant<IndexRange, Span<T>>> &r_parts);
template<typename T> bool non_empty_is_range(const Span<T> indices);
template<typename T> IndexRange non_empty_as_range(const Span<T> indices);
template<typename T> int64_t find_size_of_next_range(const Span<T> indices);
template<typename T>
int64_t find_size_until_next_range(const Span<T> indices, const int64_t min_range_size);
} // namespace unique_sorted_indices
template<typename Fn>
inline IndexMask grow_indices_to_ranges(const IndexMask &mask,
const Fn &fn,
IndexMaskMemory &memory)
{
Vector<int64_t> indices;
mask.foreach_index([&](const int64_t i) {
const IndexRange new_range = fn(i);
for (const int64_t new_index : new_range) {
indices.append(new_index);
}
});
return IndexMask::from_indices<int64_t>(indices, memory);
}
/* -------------------------------------------------------------------- */
/** \name Inline Utilities
* \{ */
inline const std::array<int16_t, chunk_capacity> &get_static_indices_array()
{
alignas(64) static const std::array<int16_t, chunk_capacity> data = build_static_indices_array();
return data;
}
inline int64_t index_to_chunk_id(const int64_t i)
{
return i >> chunk_size_shift;
}
inline int64_t size_to_chunk_num(const int64_t size)
{
return (size + chunk_capacity - 1) >> chunk_size_shift;
}
class IndexRangeChecker {
private:
const int16_t *data_;
uintptr_t adder_;
public:
IndexRangeChecker() : data_(get_static_indices_array().data())
{
adder_ = std::numeric_limits<uintptr_t>::max() -
uintptr_t(get_static_indices_array().data() + chunk_capacity);
}
bool check(const Span<int16_t> indices) const;
bool check_static(const Span<int16_t> indices) const;
};
inline bool IndexRangeChecker::check(const Span<int16_t> indices) const
{
return indices.last() - indices.first() == indices.size() - 1;
}
inline bool IndexRangeChecker::check_static(const Span<int16_t> indices) const
{
const uintptr_t indices_ptr = uintptr_t(indices.data());
return indices_ptr + adder_ >
std::numeric_limits<uintptr_t>::max() - chunk_capacity * sizeof(int16_t);
}
/* -------------------------------------------------------------------- */
/** \name Unique Sorted Indices Inline Methods
* \{ */
namespace unique_sorted_indices {
template<typename T> inline bool non_empty_is_range(const Span<T> indices)
{
BLI_assert(!indices.is_empty());
return indices.last() - indices.first() == indices.size() - 1;
}
template<typename T> inline IndexRange non_empty_as_range(const Span<T> indices)
{
BLI_assert(!indices.is_empty());
BLI_assert(non_empty_is_range(indices));
return IndexRange(indices.first(), indices.size());
}
template<typename T> inline int64_t find_size_of_next_range(const Span<T> indices)
{
BLI_assert(!indices.is_empty());
return std::lower_bound(
indices.begin(),
indices.end(),
0,
[indices, offset = indices[0]](const T &element, const int64_t /*dummy*/) {
const int64_t element_index = &element - indices.begin();
return element - offset == element_index;
}) -
indices.begin();
}
template<typename T>
inline int64_t find_size_until_next_range(const Span<T> indices, const int64_t min_range_size)
{
BLI_assert(!indices.is_empty());
int64_t current_range_size = 1;
int64_t last_value = indices[0];
for (const int64_t i : indices.index_range().drop_front(1)) {
const T current_value = indices[i];
if (current_value == last_value + 1) {
current_range_size++;
if (current_range_size >= min_range_size) {
return i - min_range_size + 1;
}
}
else {
current_range_size = 1;
}
last_value = current_value;
}
return indices.size();
}
} // namespace unique_sorted_indices
/* -------------------------------------------------------------------- */
/** \name #Chunk Inline Methods
* \{ */
inline RawChunkIterator Chunk::index_to_iterator(const int16_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < this->segment_offsets().total_size());
RawChunkIterator it;
it.segment_i = this->segment_offsets().find_range_index(index);
it.index_in_segment = index - this->cumulative_segment_sizes[it.segment_i];
return it;
}
inline int16_t Chunk::iterator_to_index(const RawChunkIterator &it) const
{
BLI_assert(it.segment_i >= 0);
BLI_assert(it.segment_i < this->segments_num);
BLI_assert(it.index_in_segment >= 0);
BLI_assert(it.index_in_segment < this->segment_offsets().size(it.segment_i));
return this->cumulative_segment_sizes[it.segment_i] + it.index_in_segment;
}
/* -------------------------------------------------------------------- */
/** \name #RawChunkIterator Inline Methods
* \{ */
inline bool operator!=(const RawChunkIterator &a, const RawChunkIterator &b)
{
return a.index_in_segment != b.index_in_segment || a.segment_i != b.segment_i;
}
/* -------------------------------------------------------------------- */
/** \name #RawMaskIterator Inline Methods
* \{ */
inline bool operator!=(const RawMaskIterator &a, const RawMaskIterator &b)
{
return a.chunk_it != b.chunk_it || a.chunk_i != b.chunk_i;
}
inline bool operator==(const RawMaskIterator &a, const RawMaskIterator &b)
{
return !(a != b);
}
/* -------------------------------------------------------------------- */
/** \name #IndexMask Inline Methods
* \{ */
inline IndexMask::IndexMask()
{
static constexpr int64_t cumulative_sizes_for_empty_mask[1] = {0};
data_.chunks_num = 0;
data_.indices_num = 0;
data_.chunks = nullptr;
data_.chunk_ids = nullptr;
data_.cumulative_chunk_sizes = cumulative_sizes_for_empty_mask;
data_.begin_it.segment_i = 0;
data_.begin_it.index_in_segment = 0;
data_.end_it.segment_i = 0;
data_.end_it.index_in_segment = 0;
}
inline IndexMask::IndexMask(const int64_t size)
{
*this = get_static_index_mask_for_min_size(size);
data_.chunks_num = size_to_chunk_num(size);
data_.indices_num = size;
data_.end_it.index_in_segment = (size == chunk_capacity) ? chunk_capacity :
size & chunk_mask_low;
}
inline IndexMask::IndexMask(const IndexRange range)
{
if (range.is_empty()) {
return;
}
*this = get_static_index_mask_for_min_size(range.one_after_last());
const int64_t first_chunk_id = index_to_chunk_id(range.first());
const int64_t last_chunk_id = index_to_chunk_id(range.last());
data_.chunks_num = last_chunk_id - first_chunk_id + 1;
data_.indices_num = range.size();
data_.chunks -= first_chunk_id;
data_.chunk_ids -= first_chunk_id;
data_.cumulative_chunk_sizes -= first_chunk_id;
data_.begin_it.segment_i = 0;
data_.begin_it.index_in_segment = range.first() & chunk_mask_low;
data_.end_it.segment_i = 0;
data_.end_it.index_in_segment = range.one_after_last() & chunk_mask_low;
}
inline int64_t IndexMask::size() const
{
return data_.indices_num;
}
inline OffsetIndices<int64_t> IndexMask::chunk_offsets() const
{
return Span<int64_t>(data_.cumulative_chunk_sizes, data_.chunks_num + 1);
}
inline int64_t IndexMask::first() const
{
BLI_assert(data_.indices_num > 0);
return chunk_capacity * data_.chunk_ids[0] +
data_.chunks[0]
.indices_by_segment[data_.begin_it.segment_i][data_.begin_it.index_in_segment];
}
inline int64_t IndexMask::last() const
{
BLI_assert(data_.indices_num > 0);
const int64_t chunk_i = data_.chunks_num - 1;
return chunk_capacity * data_.chunk_ids[chunk_i] +
data_.chunks[chunk_i]
.indices_by_segment[data_.end_it.segment_i][data_.end_it.index_in_segment - 1];
}
inline int64_t IndexMask::min_array_size() const
{
if (data_.indices_num == 0) {
return 0;
}
return this->last() + 1;
}
inline RawMaskIterator IndexMask::index_to_iterator(const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < data_.indices_num);
RawMaskIterator it;
const int16_t begin_index = data_.chunks[0].iterator_to_index(data_.begin_it);
it.chunk_i = this->chunk_offsets().find_range_index(index + begin_index +
data_.cumulative_chunk_sizes[0]);
const Chunk &chunk = data_.chunks[it.chunk_i];
it.chunk_it = chunk.index_to_iterator((index + begin_index) & chunk_mask_low);
return it;
}
inline int64_t IndexMask::iterator_to_index(const RawMaskIterator &it) const
{
BLI_assert(it.chunk_i >= 0);
BLI_assert(it.chunk_i < data_.chunks_num);
const int16_t begin_index = data_.chunks[0].iterator_to_index(data_.begin_it);
return data_.cumulative_chunk_sizes[it.chunk_i] - data_.cumulative_chunk_sizes[0] - begin_index;
}
inline IndexMask IndexMask::slice(const IndexRange range) const
{
if (range.is_empty()) {
return {};
}
const RawMaskIterator first_it = this->index_to_iterator(range.first());
const RawMaskIterator last_it = this->index_to_iterator(range.last());
IndexMask sliced;
sliced.data_.chunks_num = last_it.chunk_i - first_it.chunk_i + 1;
sliced.data_.indices_num = range.size();
sliced.data_.chunks = data_.chunks + first_it.chunk_i;
sliced.data_.chunk_ids = data_.chunk_ids + first_it.chunk_i;
sliced.data_.cumulative_chunk_sizes = data_.cumulative_chunk_sizes + first_it.chunk_i;
sliced.data_.begin_it = first_it.chunk_it;
sliced.data_.end_it.segment_i = last_it.chunk_it.segment_i;
sliced.data_.end_it.index_in_segment = last_it.chunk_it.index_in_segment + 1;
return sliced;
}
inline const IndexMaskData &IndexMask::data() const
{
return data_;
}
inline IndexMaskData &IndexMask::data_for_inplace_construction()
{
return const_cast<IndexMaskData &>(data_);
}
inline RawChunkIterator Chunk::end_iterator() const
{
RawChunkIterator data;
if (this->segments_num > 0) {
data.segment_i = this->segments_num - 1;
data.index_in_segment = this->segment_offsets().size(this->segments_num - 1);
}
else {
data.segment_i = 0;
data.index_in_segment = 0;
}
return data;
}
inline OffsetIndices<int16_t> Chunk::segment_offsets() const
{
return Span<int16_t>(this->cumulative_segment_sizes, this->segments_num + 1);
}
inline int16_t Chunk::size() const
{
return this->cumulative_segment_sizes[this->segments_num] - this->cumulative_segment_sizes[0];
}
inline bool Chunk::is_full() const
{
return this->size() == chunk_capacity;
}
inline bool Chunk::is_full_after_inclusive(const RawChunkIterator &it) const
{
const Span<int16_t> indices{this->indices_by_segment[it.segment_i] + it.index_in_segment,
this->segment_size(it.segment_i) - it.index_in_segment};
return unique_sorted_indices::non_empty_is_range(indices);
}
inline bool Chunk::is_full_until_exclusive(const RawChunkIterator &it) const
{
if (it.segment_i > 0) {
return false;
}
return this->indices_by_segment[0][it.index_in_segment] == it.index_in_segment;
}
inline int16_t Chunk::segment_size(const int16_t segment_i) const
{
return this->cumulative_segment_sizes[segment_i + 1] - this->cumulative_segment_sizes[segment_i];
}
template<typename Fn> inline void Chunk::foreach_span(Fn &&fn) const
{
for (const int64_t segment_i : IndexRange(this->segments_num)) {
const Span<int16_t> indices{this->indices_by_segment[segment_i],
this->segment_size(segment_i)};
fn(indices);
}
}
template<typename Fn> inline void ChunkSlice::foreach_span(Fn &&fn) const
{
if (this->begin_it.segment_i == this->end_it.segment_i) {
const int64_t segment_i = this->begin_it.segment_i;
const int64_t begin_i = this->begin_it.index_in_segment;
const int64_t end_i = this->end_it.index_in_segment;
const Span<int16_t> indices{this->chunk->indices_by_segment[segment_i] + begin_i,
end_i - begin_i};
fn(indices);
}
else {
{
const int64_t first_segment_i = this->begin_it.segment_i;
const int64_t begin_i = this->begin_it.index_in_segment;
const int64_t end_i = this->chunk->segment_size(first_segment_i);
const Span<int16_t> indices{this->chunk->indices_by_segment[first_segment_i] + begin_i,
end_i - begin_i};
fn(indices);
}
for (int64_t segment_i = this->begin_it.segment_i + 1; segment_i < this->end_it.segment_i;
segment_i++) {
const int64_t begin_i = 0;
const int64_t end_i = this->chunk->segment_size(segment_i);
const Span<int16_t> indices{this->chunk->indices_by_segment[segment_i] + begin_i,
end_i - begin_i};
fn(indices);
}
{
const int64_t last_segment_i = this->end_it.segment_i;
const int64_t begin_i = 0;
const int64_t end_i = this->end_it.index_in_segment;
const Span<int16_t> indices{this->chunk->indices_by_segment[last_segment_i] + begin_i,
end_i - begin_i};
fn(indices);
}
}
}
template<typename Fn> inline void IndexMask::foreach_index(Fn &&fn) const
{
this->foreach_span([&](const OffsetSpan<int64_t, int16_t> indices) {
for (const int64_t index : indices) {
fn(index);
}
});
}
template<typename Fn> inline void IndexMask::foreach_span_or_range(Fn &&fn) const
{
IndexRangeChecker is_index_mask;
this->foreach_span([&, is_index_mask](const OffsetSpan<int64_t, int16_t> indices) {
if (is_index_mask.check(indices.base_span())) {
fn(IndexRange(indices[0], indices.size()));
}
else {
fn(indices);
}
});
}
template<typename Fn> inline void IndexMask::foreach_range(Fn &&fn) const
{
this->foreach_span([&](const OffsetSpan<int64_t, int16_t> indices) {
Span<int16_t> base_indices = indices.base_span();
while (!base_indices.is_empty()) {
const int64_t next_range_size = unique_sorted_indices::find_size_of_next_range(base_indices);
fn(IndexRange(int64_t(base_indices[0]) + indices.offset(), next_range_size));
base_indices = base_indices.drop_front(next_range_size);
}
});
}
} // namespace index_mask
} // namespace blender

View File

@ -83,7 +83,6 @@ set(SRC
intern/hash_mm2a.c
intern/hash_mm3.c
intern/index_mask.cc
intern/index_mask2.cc
intern/jitter_2d.c
intern/kdtree_1d.c
intern/kdtree_2d.c
@ -242,7 +241,6 @@ set(SRC
BLI_heap.h
BLI_heap_simple.h
BLI_index_mask.hh
BLI_index_mask2.hh
BLI_index_mask_ops.hh
BLI_index_range.hh
BLI_inplace_priority_queue.hh
@ -492,7 +490,6 @@ if(WITH_GTESTS)
tests/BLI_heap_simple_test.cc
tests/BLI_heap_test.cc
tests/BLI_index_mask_test.cc
tests/BLI_index_mask2_test.cc
tests/BLI_index_range_test.cc
tests/BLI_inplace_priority_queue_test.cc
tests/BLI_kdopbvh_test.cc

File diff suppressed because it is too large Load Diff

View File

@ -1,855 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_array.hh"
#include "BLI_bit_vector.hh"
#include "BLI_enumerable_thread_specific.hh"
#include "BLI_index_mask2.hh"
#include "BLI_set.hh"
#include "BLI_strict_flags.h"
#include "BLI_task.hh"
#include "BLI_timeit.hh"
namespace blender::index_mask {
std::array<int16_t, chunk_capacity> build_static_indices_array()
{
std::array<int16_t, chunk_capacity> data;
for (int16_t i = 0; i < chunk_capacity; i++) {
data[size_t(i)] = i;
}
return data;
}
const IndexMask &get_static_index_mask_for_min_size(const int64_t min_size)
{
static constexpr int64_t size_shift = 30;
static constexpr int64_t max_size = (1 << size_shift);
static constexpr int64_t chunks_num = max_size / chunk_capacity;
BLI_assert(min_size <= max_size);
UNUSED_VARS_NDEBUG(min_size);
static IndexMask static_mask = []() {
static Array<Chunk> chunks_array(chunks_num);
static Array<int64_t> chunk_ids_array(chunks_num);
static Array<int64_t> cumulative_chunk_sizes(chunks_num + 1);
static const int16_t *static_offsets = get_static_indices_array().data();
static const int16_t static_cumulative_segment_sizes[2] = {0, chunk_capacity};
threading::parallel_for(IndexRange(chunks_num), 1024, [&](const IndexRange range) {
for (const int64_t i : range) {
Chunk &chunk = chunks_array[i];
chunk.segments_num = 1;
chunk.indices_by_segment = &static_offsets;
chunk.cumulative_segment_sizes = static_cumulative_segment_sizes;
chunk_ids_array[i] = i;
cumulative_chunk_sizes[i] = i * chunk_capacity;
}
});
cumulative_chunk_sizes.last() = max_size;
IndexMask mask;
IndexMaskData &mask_data = mask.data_for_inplace_construction();
mask_data.chunks_num = chunks_num;
mask_data.indices_num = max_size;
mask_data.chunks = chunks_array.data();
mask_data.chunk_ids = chunk_ids_array.data();
mask_data.cumulative_chunk_sizes = cumulative_chunk_sizes.data();
mask_data.begin_it.segment_i = 0;
mask_data.begin_it.index_in_segment = 0;
mask_data.end_it.segment_i = 0;
mask_data.end_it.index_in_segment = chunk_capacity;
return mask;
}();
return static_mask;
}
std::ostream &operator<<(std::ostream &stream, const IndexMask &mask)
{
Array<int64_t> indices(mask.size());
mask.to_indices<int64_t>(indices);
Vector<std::variant<IndexRange, Span<int64_t>>> segments;
unique_sorted_indices::split_to_ranges_and_spans<int64_t>(indices, 16, segments);
std::cout << "(Size: " << mask.size() << " | ";
for (const std::variant<IndexRange, Span<int64_t>> &segment : segments) {
if (std::holds_alternative<IndexRange>(segment)) {
const IndexRange range = std::get<IndexRange>(segment);
std::cout << range;
}
else {
const Span<int64_t> segment_indices = std::get<Span<int64_t>>(segment);
std::cout << "[";
for (const int64_t index : segment_indices) {
std::cout << index << ",";
}
std::cout << "]";
}
std::cout << ", ";
}
std::cout << ")";
return stream;
}
namespace unique_sorted_indices {
template<typename T>
static void split_by_chunk_recursive(const Span<T> indices,
const int64_t offset,
Vector<IndexRange> &r_chunks)
{
if (indices.is_empty()) {
return;
}
const T first_index = indices.first();
const T last_index = indices.last();
const int64_t first_chunk_id = index_to_chunk_id(first_index);
const int64_t last_chunk_id = index_to_chunk_id(last_index);
if (first_chunk_id == last_chunk_id) {
r_chunks.append_as(offset, indices.size());
return;
}
const int64_t middle_chunk_index = (first_chunk_id + last_chunk_id + 1) / 2;
const int64_t split_value = middle_chunk_index * chunk_capacity - 1;
const int64_t left_split_size = std::upper_bound(indices.begin(), indices.end(), split_value) -
indices.begin();
split_by_chunk_recursive(indices.take_front(left_split_size), offset, r_chunks);
split_by_chunk_recursive(
indices.drop_front(left_split_size), offset + left_split_size, r_chunks);
}
template<typename T> Vector<IndexRange> split_by_chunk(const Span<T> indices)
{
BLI_assert(std::is_sorted(indices.begin(), indices.end()));
Vector<IndexRange> chunks;
/* This can be too low in some cases, but it's never too large. */
chunks.reserve(size_to_chunk_num(indices.size()));
split_by_chunk_recursive(indices, 0, chunks);
return chunks;
}
template<typename T>
int64_t split_to_ranges_and_spans(const Span<T> indices,
const int64_t range_threshold,
Vector<std::variant<IndexRange, Span<T>>> &r_parts)
{
BLI_assert(range_threshold >= 1);
const int64_t old_parts_num = r_parts.size();
Span<T> remaining_indices = indices;
while (!remaining_indices.is_empty()) {
if (non_empty_is_range(remaining_indices)) {
/* All remaining indices are range. */
r_parts.append(non_empty_as_range(remaining_indices));
break;
}
if (non_empty_is_range(remaining_indices.take_front(range_threshold))) {
/* Next segment is a range. Now find the place where the range ends. */
const int64_t segment_size = find_size_of_next_range(remaining_indices);
r_parts.append(IndexRange(remaining_indices[0], segment_size));
remaining_indices = remaining_indices.drop_front(segment_size);
continue;
}
/* Next segment is just indices. Now find the place where the next range starts. */
const int64_t segment_size = find_size_until_next_range(remaining_indices, range_threshold);
const Span<T> segment_indices = remaining_indices.take_front(segment_size);
if (non_empty_is_range(segment_indices)) {
r_parts.append(non_empty_as_range(segment_indices));
}
else {
r_parts.append(segment_indices);
}
remaining_indices = remaining_indices.drop_front(segment_size);
}
return r_parts.size() - old_parts_num;
}
template<typename T> IndexMask to_index_mask(const Span<T> indices, IndexMaskMemory &memory)
{
if (indices.is_empty()) {
return {};
}
if (non_empty_is_range(indices)) {
return non_empty_as_range(indices);
}
const Vector<IndexRange> split_ranges = split_by_chunk(indices);
const int64_t chunks_num = split_ranges.size();
MutableSpan<Chunk> chunks = memory.allocate_array<Chunk>(chunks_num);
MutableSpan<int64_t> chunk_ids = memory.allocate_array<int64_t>(chunks_num);
static const int16_t *static_offsets = get_static_indices_array().data();
[[maybe_unused]] const Chunk full_chunk_template = IndexMask(chunk_capacity).data().chunks[0];
std::mutex scope_mutex;
threading::parallel_for(split_ranges.index_range(), 32, [&](const IndexRange slice) {
Vector<std::variant<IndexRange, Span<T>>> segments_in_chunks;
Vector<int64_t> segments_per_chunk_cumulative;
segments_per_chunk_cumulative.reserve(slice.size() + 1);
segments_per_chunk_cumulative.append(0);
int64_t index_allocations_num = 0;
Vector<int64_t> chunks_to_postprocess;
for (const int64_t index_in_slice : IndexRange(slice.size())) {
const int64_t chunk_i = slice[index_in_slice];
const IndexRange range_for_chunk = split_ranges[chunk_i];
const Span<T> indices_in_chunk = indices.slice(range_for_chunk);
BLI_assert(!indices_in_chunk.is_empty());
const int64_t chunk_id = index_to_chunk_id(int64_t(indices_in_chunk[0]));
BLI_assert(chunk_id == index_to_chunk_id(int64_t(indices_in_chunk.last())));
Chunk &chunk = chunks[chunk_i];
chunk_ids[chunk_i] = chunk_id;
if (indices_in_chunk.size() == chunk_capacity) {
chunk = full_chunk_template;
continue;
}
chunks_to_postprocess.append(index_in_slice);
const int16_t segments_in_chunk_num = int16_t(
split_to_ranges_and_spans(indices_in_chunk, 64, segments_in_chunks));
BLI_assert(segments_in_chunk_num > 0);
segments_per_chunk_cumulative.append(segments_per_chunk_cumulative.last() +
segments_in_chunk_num);
for (const int64_t segment_i :
segments_in_chunks.index_range().take_back(segments_in_chunk_num)) {
const std::variant<IndexRange, Span<T>> &segment = segments_in_chunks[segment_i];
if (std::holds_alternative<IndexRange>(segment)) {
/* No extra allocations necessary because static memory is used. */
}
else {
const Span<T> indices_in_segment = std::get<Span<T>>(segment);
index_allocations_num += indices_in_segment.size();
}
}
chunk.segments_num = segments_in_chunk_num;
}
if (chunks_to_postprocess.is_empty()) {
return;
}
MutableSpan<int16_t> remaining_indices;
MutableSpan<const int16_t *> remaining_indices_by_segment;
MutableSpan<int16_t> remaining_cumulative_segment_sizes;
{
std::lock_guard lock{scope_mutex};
remaining_indices_by_segment = memory.allocate_array<const int16_t *>(
segments_in_chunks.size());
remaining_indices = memory.allocate_array<int16_t>(index_allocations_num);
remaining_cumulative_segment_sizes = memory.allocate_array<int16_t>(
segments_in_chunks.size() + chunks_to_postprocess.size());
}
const OffsetIndices<int64_t> segments_by_chunk = segments_per_chunk_cumulative.as_span();
const auto take_front_and_drop = [](auto &span, const int64_t n) {
auto front = span.take_front(n);
BLI_assert(front.size() == n);
span = span.drop_front(n);
return front;
};
for (const int64_t i : chunks_to_postprocess.index_range()) {
const int64_t index_in_slice = chunks_to_postprocess[i];
const int64_t chunk_i = slice[index_in_slice];
const IndexRange segments_in_chunk = segments_by_chunk[i];
const int16_t segments_num = int16_t(segments_in_chunk.size());
Chunk &chunk = chunks[chunk_i];
const int64_t chunk_offset = chunk_ids[chunk_i] * chunk_capacity;
MutableSpan<const int16_t *> indices_by_segment = take_front_and_drop(
remaining_indices_by_segment, segments_num);
MutableSpan<int16_t> cumulative_segment_sizes = take_front_and_drop(
remaining_cumulative_segment_sizes, segments_num + 1);
int64_t cumulative_size = 0;
for (const int64_t segment_i : IndexRange(segments_num)) {
const std::variant<IndexRange, Span<T>> &segment =
segments_in_chunks[segments_in_chunk[segment_i]];
cumulative_segment_sizes[segment_i] = int16_t(cumulative_size);
if (std::holds_alternative<IndexRange>(segment)) {
const IndexRange range_in_segment = std::get<IndexRange>(segment);
indices_by_segment[segment_i] = static_offsets +
(range_in_segment.first() - chunk_offset);
cumulative_size += range_in_segment.size();
}
else {
const Span<T> indices_in_segment = std::get<Span<T>>(segment);
MutableSpan<int16_t> new_indices = take_front_and_drop(remaining_indices,
indices_in_segment.size());
for (const int64_t index_in_segment : new_indices.index_range()) {
new_indices[index_in_segment] = int16_t(indices_in_segment[index_in_segment] -
chunk_offset);
}
indices_by_segment[segment_i] = new_indices.data();
cumulative_size += indices_in_segment.size();
}
}
cumulative_segment_sizes[segments_num] = int16_t(cumulative_size);
chunk.indices_by_segment = indices_by_segment.data();
chunk.cumulative_segment_sizes = cumulative_segment_sizes.data();
}
BLI_assert(remaining_indices.is_empty());
BLI_assert(remaining_indices_by_segment.is_empty());
BLI_assert(remaining_cumulative_segment_sizes.is_empty());
});
MutableSpan<int64_t> cumulative_chunk_sizes = memory.allocate_array<int64_t>(chunks_num + 1);
int64_t cumulative_size = 0;
for (const int64_t i : chunks.index_range()) {
cumulative_chunk_sizes[i] = cumulative_size;
cumulative_size += chunks[i].size();
}
cumulative_chunk_sizes.last() = cumulative_size;
IndexMask mask;
IndexMaskData &mask_data = mask.data_for_inplace_construction();
mask_data.chunks_num = chunks_num;
mask_data.indices_num = indices.size();
mask_data.chunks = chunks.data();
mask_data.chunk_ids = chunk_ids.data();
mask_data.cumulative_chunk_sizes = cumulative_chunk_sizes.data();
mask_data.begin_it = {0, 0};
mask_data.end_it = chunks.last().end_iterator();
return mask;
}
template<typename T> void from_index_mask(const IndexMask &mask, MutableSpan<T> r_indices)
{
BLI_assert(mask.size() == r_indices.size());
int64_t current_i = 0;
mask.foreach_index([&](const int64_t index) mutable {
r_indices[current_i] = T(index);
current_i++;
});
}
} // namespace unique_sorted_indices
void IndexMask::foreach_span(FunctionRef<void(OffsetSpan<int64_t, int16_t>)> fn) const
{
if (data_.indices_num == 0) {
return;
}
int64_t chunk_i = 0;
int64_t segment_i = data_.begin_it.segment_i;
int64_t segment_drop_front = data_.begin_it.index_in_segment;
const int64_t final_drop_back = data_.chunks[data_.chunks_num - 1].segment_size(
data_.end_it.segment_i) -
data_.end_it.index_in_segment;
const int64_t final_segment_i = data_.end_it.segment_i;
const int64_t final_segments_num = data_.end_it.segment_i + 1;
while (chunk_i < data_.chunks_num) {
const Chunk &chunk = data_.chunks[chunk_i];
const int64_t chunk_id = data_.chunk_ids[chunk_i];
const bool is_last_chunk = (chunk_i == data_.chunks_num - 1);
const int64_t segments_num = is_last_chunk ? final_segments_num : chunk.segments_num;
const int64_t offset = chunk_capacity * chunk_id;
int64_t prev_cumulative_segment_size = chunk.cumulative_segment_sizes[segment_i];
while (segment_i < segments_num) {
const int64_t next_segment_i = segment_i + 1;
const int64_t cumulative_segment_size = chunk.cumulative_segment_sizes[next_segment_i];
const int64_t stored_segment_size = cumulative_segment_size - prev_cumulative_segment_size;
prev_cumulative_segment_size = cumulative_segment_size;
const bool is_last_segment = is_last_chunk & (segment_i == final_segment_i);
const int64_t segment_drop_back = is_last_segment * final_drop_back;
const int16_t *indices_in_segment = chunk.indices_by_segment[segment_i] + segment_drop_front;
const int64_t segment_size = stored_segment_size - segment_drop_front - segment_drop_back;
const Span<int16_t> indices_span{indices_in_segment, segment_size};
const OffsetSpan<int64_t, int16_t> segment{offset, indices_span};
fn(segment);
segment_drop_front = 0;
segment_i = next_segment_i;
}
segment_i = 0;
chunk_i++;
}
}
static IndexMask bits_to_index_mask(const BitSpan bits,
const int64_t start,
IndexMaskMemory &memory)
{
Vector<int64_t> indices;
for (const int64_t i : bits.index_range()) {
if (bits[i]) {
indices.append(i + start);
}
}
return unique_sorted_indices::to_index_mask<int64_t>(indices, memory);
}
static void index_mask_to_bits(const IndexMask &mask, const int64_t start, MutableBitSpan r_bits)
{
BLI_assert(r_bits.size() >= mask.min_array_size() - start);
r_bits.reset_all();
mask.foreach_index([&](const int64_t i) { r_bits[i - start].set(); });
}
template<typename T>
IndexMask IndexMask::from_indices(const Span<T> indices, IndexMaskMemory &memory)
{
return unique_sorted_indices::to_index_mask(indices, memory);
}
IndexMask IndexMask::from_bits(const BitSpan bits, IndexMaskMemory &memory, const int64_t offset)
{
return bits_to_index_mask(bits, offset, memory);
}
static Set<int64_t> eval_expr(const Expr &base_expr, const IndexRange universe)
{
Set<int64_t> result;
switch (base_expr.type) {
case Expr::Type::Atomic: {
const AtomicExpr &expr = static_cast<const AtomicExpr &>(base_expr);
expr.mask->foreach_index([&](const int64_t i) {
BLI_assert(universe.contains(i));
result.add_new(i);
});
break;
}
case Expr::Type::Union: {
const UnionExpr &expr = static_cast<const UnionExpr &>(base_expr);
for (const Expr *child : expr.children) {
const Set<int64_t> child_result = eval_expr(*child, universe);
for (const int64_t i : child_result) {
result.add(i);
}
}
break;
}
case Expr::Type::Difference: {
const DifferenceExpr &expr = static_cast<const DifferenceExpr &>(base_expr);
result = eval_expr(*expr.base, universe);
for (const Expr *child : expr.children) {
const Set<int64_t> child_result = eval_expr(*child, universe);
for (const int64_t i : child_result) {
result.remove(i);
}
}
break;
}
case Expr::Type::Complement: {
const ComplementExpr &expr = static_cast<const ComplementExpr &>(base_expr);
const Set<int64_t> child_result = eval_expr(*expr.base, universe);
for (const int64_t i : universe) {
if (!child_result.contains(i)) {
result.add_new(i);
}
}
break;
}
case Expr::Type::Intersection: {
const IntersectionExpr &expr = static_cast<const IntersectionExpr &>(base_expr);
BLI_assert(!expr.children.is_empty());
result = eval_expr(*expr.children.first(), universe);
for (const Expr *child : expr.children.as_span().drop_front(1)) {
const Set<int64_t> child_result = eval_expr(*child, universe);
result.remove_if([&](const int64_t i) { return !child_result.contains(i); });
}
break;
}
}
return result;
}
static Set<int64_t> find_chunk_ids_to_process(const Expr &base_expr, const IndexRange universe)
{
Set<int64_t> result;
switch (base_expr.type) {
case Expr::Type::Atomic: {
const AtomicExpr &expr = static_cast<const AtomicExpr &>(base_expr);
for (const int64_t chunk_i : IndexRange(expr.mask->data().chunks_num)) {
result.add_new(expr.mask->data().chunk_ids[chunk_i]);
}
break;
}
case Expr::Type::Union: {
const UnionExpr &expr = static_cast<const UnionExpr &>(base_expr);
for (const Expr *child : expr.children) {
const Set<int64_t> child_result = find_chunk_ids_to_process(*child, universe);
for (const int64_t chunk_id : child_result) {
result.add(chunk_id);
}
}
break;
}
case Expr::Type::Difference: {
const DifferenceExpr &expr = static_cast<const DifferenceExpr &>(base_expr);
result = find_chunk_ids_to_process(*expr.base, universe);
break;
}
case Expr::Type::Complement: {
const int64_t first_chunk_id = index_to_chunk_id(universe.first());
const int64_t last_chunk_id = index_to_chunk_id(universe.last());
for (const int64_t chunk_id :
IndexRange(first_chunk_id, last_chunk_id - first_chunk_id + 1)) {
result.add(chunk_id);
}
break;
}
case Expr::Type::Intersection: {
const IntersectionExpr &expr = static_cast<const IntersectionExpr &>(base_expr);
BLI_assert(!expr.children.is_empty());
result = find_chunk_ids_to_process(*expr.children.first(), universe);
for (const Expr *child : expr.children.as_span().drop_front(1)) {
const Set<int64_t> child_result = find_chunk_ids_to_process(*child, universe);
result.remove_if([&](const int64_t chunk_id) { return !child_result.contains(chunk_id); });
}
break;
}
}
return result;
}
static void find_chunk_ids_to_process(const Expr &base_expr,
const IndexRange universe,
MutableBitSpan r_chunk_is_full,
MutableBitSpan r_chunk_non_empty)
{
using TmpBitVector = BitVector<1024>;
const int64_t max_chunk_id = index_to_chunk_id(universe.last());
const int64_t r_size = max_chunk_id + 1;
BLI_assert(r_chunk_is_full.size() == r_size);
BLI_assert(r_chunk_non_empty.size() == r_size);
switch (base_expr.type) {
case Expr::Type::Atomic: {
const AtomicExpr &expr = static_cast<const AtomicExpr &>(base_expr);
const IndexMaskData &data = expr.mask->data();
if (data.chunks_num == 0) {
break;
}
if (data.chunks_num == 1) {
const int64_t chunk_id = data.chunk_ids[0];
r_chunk_non_empty[chunk_id].set();
if (data.indices_num == chunk_capacity) {
r_chunk_is_full[data.chunk_ids[0]].set();
}
break;
}
for (const int64_t chunk_i : IndexRange(data.chunks_num)) {
const int64_t chunk_id = data.chunk_ids[chunk_i];
const Chunk &chunk = data.chunks[chunk_i];
r_chunk_non_empty[chunk_id].set();
MutableBitRef is_full = r_chunk_is_full[chunk_id];
if (chunk.is_full()) {
if (chunk_i == 0 && data.begin_it.segment_i == 0 &&
data.begin_it.index_in_segment == 0) {
is_full.set();
}
else if (chunk_i == data.chunks_num - 1 && data.end_it.segment_i == 0 &&
data.end_it.index_in_segment == chunk_capacity) {
is_full.set();
}
else {
is_full.set();
}
}
}
break;
}
case Expr::Type::Union: {
const UnionExpr &expr = static_cast<const UnionExpr &>(base_expr);
for (const Expr *child : expr.children) {
TmpBitVector child_chunk_is_full(r_size, false);
TmpBitVector child_chunk_non_empty(r_size, false);
find_chunk_ids_to_process(*child, universe, child_chunk_is_full, child_chunk_non_empty);
r_chunk_is_full |= child_chunk_is_full;
r_chunk_non_empty |= child_chunk_non_empty;
}
break;
}
case Expr::Type::Difference: {
const DifferenceExpr &expr = static_cast<const DifferenceExpr &>(base_expr);
find_chunk_ids_to_process(*expr.base, universe, r_chunk_is_full, r_chunk_non_empty);
for (const Expr *child : expr.children) {
TmpBitVector child_chunk_is_full(r_size, false);
TmpBitVector child_chunk_non_empty(r_size, false);
find_chunk_ids_to_process(*child, universe, child_chunk_is_full, child_chunk_non_empty);
r_chunk_is_full.clear_by_set_bits(child_chunk_non_empty);
r_chunk_non_empty.clear_by_set_bits(child_chunk_is_full);
}
break;
}
case Expr::Type::Complement: {
const ComplementExpr &expr = static_cast<const ComplementExpr &>(base_expr);
/* The parameters are reversed intentionally. */
find_chunk_ids_to_process(*expr.base, universe, r_chunk_non_empty, r_chunk_is_full);
r_chunk_is_full.flip();
r_chunk_non_empty.flip();
break;
}
case Expr::Type::Intersection: {
const IntersectionExpr &expr = static_cast<const IntersectionExpr &>(base_expr);
BLI_assert(!expr.children.is_empty());
find_chunk_ids_to_process(*expr.children[0], universe, r_chunk_is_full, r_chunk_non_empty);
for (const Expr *child : expr.children.as_span().drop_front(1)) {
TmpBitVector child_chunk_is_full(r_size, false);
TmpBitVector child_chunk_non_empty(r_size, false);
find_chunk_ids_to_process(*child, universe, child_chunk_is_full, child_chunk_non_empty);
r_chunk_is_full &= child_chunk_is_full;
r_chunk_non_empty &= child_chunk_non_empty;
}
break;
}
}
}
static Vector<int64_t> get_chunk_ids_to_evaluate_expression_in(const Expr & /*expr*/,
const IndexRange universe)
{
const int64_t first_chunk_id = index_to_chunk_id(universe.first());
const int64_t last_chunk_id = index_to_chunk_id(universe.last());
Vector<int64_t> chunk_ids(last_chunk_id - first_chunk_id + 1);
for (const int64_t i : chunk_ids.index_range()) {
chunk_ids[i] = first_chunk_id + i;
}
return chunk_ids;
}
static std::optional<ChunkSlice> try_get_chunk_by_id(const IndexMask &mask, const int64_t chunk_id)
{
const IndexMaskData &data = mask.data();
const int64_t *chunk_id_iterator = std::lower_bound(
data.chunk_ids, data.chunk_ids + data.chunks_num, chunk_id);
const int64_t index = chunk_id_iterator - data.chunk_ids;
if (index == data.chunks_num) {
return std::nullopt;
}
if (data.chunk_ids[index] != chunk_id) {
return std::nullopt;
}
ChunkSlice chunk_slice;
chunk_slice.chunk = data.chunks + index;
chunk_slice.begin_it = (index == 0) ? data.begin_it : RawChunkIterator{0, 0};
chunk_slice.end_it = (index == data.chunks_num - 1) ? data.end_it :
chunk_slice.chunk->end_iterator();
return chunk_slice;
}
static Set<int16_t> eval_expr_for_chunk_id__index_set(const Expr &base_expr,
const IndexRange universe,
const int64_t chunk_id)
{
Set<int16_t> result;
switch (base_expr.type) {
case Expr::Type::Atomic: {
const AtomicExpr &expr = static_cast<const AtomicExpr &>(base_expr);
const IndexMask &mask = *expr.mask;
const std::optional<ChunkSlice> chunk_slice = try_get_chunk_by_id(mask, chunk_id);
if (!chunk_slice.has_value()) {
break;
}
chunk_slice->foreach_span([&](const Span<int16_t> indices) {
for (const int16_t index : indices) {
result.add_new(index);
}
});
break;
}
case Expr::Type::Union: {
const UnionExpr &expr = static_cast<const UnionExpr &>(base_expr);
for (const Expr *child : expr.children) {
const Set<int16_t> child_result = eval_expr_for_chunk_id__index_set(
*child, universe, chunk_id);
for (const int16_t index : child_result) {
result.add(index);
}
}
break;
}
case Expr::Type::Difference: {
const DifferenceExpr &expr = static_cast<const DifferenceExpr &>(base_expr);
result = eval_expr_for_chunk_id__index_set(*expr.base, universe, chunk_id);
for (const Expr *child : expr.children) {
const Set<int16_t> child_result = eval_expr_for_chunk_id__index_set(
*child, universe, chunk_id);
result.remove_if([&](const int16_t index) { return child_result.contains(index); });
}
break;
}
case Expr::Type::Complement: {
const ComplementExpr &expr = static_cast<const ComplementExpr &>(base_expr);
const Set<int16_t> child_result = eval_expr_for_chunk_id__index_set(
*expr.base, universe, chunk_id);
for (const int64_t index : IndexRange(chunk_capacity)) {
if (!child_result.contains(int16_t(index))) {
result.add_new(int16_t(index));
}
}
break;
}
case Expr::Type::Intersection: {
const IntersectionExpr &expr = static_cast<const IntersectionExpr &>(base_expr);
result = eval_expr_for_chunk_id__index_set(*expr.children[0], universe, chunk_id);
for (const Expr *child : expr.children.as_span().drop_front(1)) {
const Set<int16_t> child_result = eval_expr_for_chunk_id__index_set(
*child, universe, chunk_id);
result.remove_if([&](const int16_t index) { return !child_result.contains(index); });
}
break;
}
}
return result;
}
static void eval_expressions_for_chunk_ids(const Expr &expr,
const IndexRange universe,
const Span<int64_t> chunk_ids,
MutableSpan<Chunk> r_chunks,
IndexMaskMemory &memory,
std::mutex &memory_mutex)
{
BLI_assert(chunk_ids.size() == r_chunks.size());
for (const int64_t chunk_i : chunk_ids.index_range()) {
const int64_t chunk_id = chunk_ids[chunk_i];
Chunk &chunk = r_chunks[chunk_i];
const Set<int16_t> indices_in_chunk = eval_expr_for_chunk_id__index_set(
expr, universe, chunk_id);
MutableSpan<const int16_t *> indices_by_segment;
MutableSpan<int16_t> indices_in_segment;
MutableSpan<int16_t> cumulative_segment_sizes;
{
std::lock_guard lock{memory_mutex};
indices_by_segment = memory.allocate_array<const int16_t *>(1);
indices_in_segment = memory.allocate_array<int16_t>(indices_in_chunk.size());
cumulative_segment_sizes = memory.allocate_array<int16_t>(2);
}
indices_by_segment[0] = indices_in_segment.data();
cumulative_segment_sizes[0] = 0;
cumulative_segment_sizes[1] = int16_t(indices_in_chunk.size());
int64_t counter = 0;
for (const int16_t index : indices_in_chunk) {
indices_in_segment[counter] = index;
counter++;
}
std::sort(indices_in_segment.begin(), indices_in_segment.end());
chunk.segments_num = 1;
chunk.indices_by_segment = indices_by_segment.data();
chunk.cumulative_segment_sizes = cumulative_segment_sizes.data();
}
}
IndexMask IndexMask::from_expr(const Expr &expr,
const IndexRange universe,
IndexMaskMemory &memory)
{
if (universe.is_empty()) {
return {};
}
const Vector<int64_t> possible_chunk_ids = get_chunk_ids_to_evaluate_expression_in(expr,
universe);
Vector<Chunk> possible_chunks(possible_chunk_ids.size());
std::mutex memory_mutex;
threading::parallel_for(possible_chunk_ids.index_range(), 32, [&](const IndexRange range) {
eval_expressions_for_chunk_ids(expr,
universe,
possible_chunk_ids.as_span().slice(range),
possible_chunks.as_mutable_span().slice(range),
memory,
memory_mutex);
});
Vector<int64_t> non_empty_chunks;
for (const int64_t i : possible_chunks.index_range()) {
const Chunk &chunk = possible_chunks[i];
if (chunk.size() > 0) {
non_empty_chunks.append(i);
}
}
const int64_t chunks_num = non_empty_chunks.size();
if (chunks_num == 0) {
return {};
}
MutableSpan<Chunk> final_chunks = memory.allocate_array<Chunk>(chunks_num);
MutableSpan<int64_t> final_chunk_ids = memory.allocate_array<int64_t>(chunks_num);
MutableSpan<int64_t> final_cumulative_chunk_sizes = memory.allocate_array<int64_t>(chunks_num +
1);
int64_t counter = 0;
for (const int64_t i : IndexRange(chunks_num)) {
const int64_t i2 = non_empty_chunks[i];
const Chunk &chunk = possible_chunks[i2];
final_chunks[i] = chunk;
final_chunk_ids[i] = possible_chunk_ids[i2];
final_cumulative_chunk_sizes[i] = counter;
counter += chunk.size();
}
final_cumulative_chunk_sizes.last() = counter;
const Chunk &last_chunk = final_chunks.last();
IndexMask mask;
IndexMaskData &data = mask.data_for_inplace_construction();
data.chunks_num = chunks_num;
data.indices_num = counter;
data.chunks = final_chunks.data();
data.chunk_ids = final_chunk_ids.data();
data.cumulative_chunk_sizes = final_cumulative_chunk_sizes.data();
data.begin_it = RawChunkIterator{0, 0};
data.end_it = last_chunk.end_iterator();
return mask;
}
template<typename T> void IndexMask::to_indices(MutableSpan<T> r_indices) const
{
unique_sorted_indices::from_index_mask(*this, r_indices);
}
void IndexMask::to_bits(MutableBitSpan r_bits, int64_t offset) const
{
index_mask_to_bits(*this, offset, r_bits);
}
std::optional<IndexRange> IndexMask::to_range() const
{
if (data_.indices_num == 0) {
return IndexRange{};
}
const int64_t first_index = this->first();
const int64_t last_index = this->last();
if (last_index - first_index == data_.indices_num - 1) {
return IndexRange(first_index, data_.indices_num);
}
return std::nullopt;
}
template IndexMask IndexMask::from_indices(Span<int32_t>, IndexMaskMemory &);
template IndexMask IndexMask::from_indices(Span<int64_t>, IndexMaskMemory &);
template void IndexMask::to_indices(MutableSpan<int32_t>) const;
template void IndexMask::to_indices(MutableSpan<int64_t>) const;
namespace unique_sorted_indices {
template Vector<IndexRange> split_by_chunk(const Span<int32_t> indices);
template Vector<IndexRange> split_by_chunk(const Span<int64_t> indices);
} // namespace unique_sorted_indices
void do_benchmark(const int64_t total);
void do_benchmark(const int64_t /*total*/)
{
}
} // namespace blender::index_mask

View File

@ -1,221 +0,0 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "BLI_array.hh"
#include "BLI_index_mask2.hh"
#include "BLI_strict_flags.h"
#include "BLI_timeit.hh"
#include "testing/testing.h"
namespace blender::index_mask {
void do_benchmark(const int64_t total);
}
namespace blender::index_mask::tests {
TEST(index_mask2, FindRangeEnd)
{
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4}), 1);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4, 5, 6, 7}), 4);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4, 5, 6, 8, 9}), 3);
}
TEST(index_mask2, NonEmptyIsRange)
{
EXPECT_TRUE(unique_sorted_indices::non_empty_is_range<int>({0, 1, 2}));
EXPECT_TRUE(unique_sorted_indices::non_empty_is_range<int>({5}));
EXPECT_TRUE(unique_sorted_indices::non_empty_is_range<int>({7, 8, 9, 10}));
EXPECT_FALSE(unique_sorted_indices::non_empty_is_range<int>({3, 5}));
EXPECT_FALSE(unique_sorted_indices::non_empty_is_range<int>({3, 4, 5, 6, 8, 9}));
}
TEST(index_mask2, NonEmptyAsRange)
{
EXPECT_EQ(unique_sorted_indices::non_empty_as_range<int>({0, 1, 2}), IndexRange(0, 3));
EXPECT_EQ(unique_sorted_indices::non_empty_as_range<int>({5}), IndexRange(5, 1));
EXPECT_EQ(unique_sorted_indices::non_empty_as_range<int>({10, 11}), IndexRange(10, 2));
}
TEST(index_mask2, FindSizeOfNextRange)
{
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({0, 3, 4}), 1);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4, 5, 6, 7}), 4);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4}), 1);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({5, 6, 7, 10, 11, 100}), 3);
}
TEST(index_mask2, FindStartOfNextRange)
{
EXPECT_EQ(unique_sorted_indices::find_size_until_next_range<int>({4}, 3), 1);
EXPECT_EQ(unique_sorted_indices::find_size_until_next_range<int>({4, 5}, 3), 2);
EXPECT_EQ(unique_sorted_indices::find_size_until_next_range<int>({4, 5, 6}, 3), 0);
EXPECT_EQ(unique_sorted_indices::find_size_until_next_range<int>({4, 5, 6, 7}, 3), 0);
EXPECT_EQ(
unique_sorted_indices::find_size_until_next_range<int>({0, 1, 3, 5, 10, 11, 12, 20}, 3), 4);
}
TEST(index_mask2, SplitToRangesAndSpans)
{
Array<int> data = {1, 2, 3, 4, 7, 9, 10, 13, 14, 15, 20, 21, 22, 23, 24};
Vector<std::variant<IndexRange, Span<int>>> parts;
const int64_t parts_num = unique_sorted_indices::split_to_ranges_and_spans<int>(data, 3, parts);
EXPECT_EQ(parts_num, 4);
EXPECT_EQ(parts.size(), 4);
EXPECT_EQ(std::get<IndexRange>(parts[0]), IndexRange(1, 4));
EXPECT_EQ(std::get<Span<int>>(parts[1]), Span<int>({7, 9, 10}));
EXPECT_EQ(std::get<IndexRange>(parts[2]), IndexRange(13, 3));
EXPECT_EQ(std::get<IndexRange>(parts[3]), IndexRange(20, 5));
}
TEST(index_mask2, SplitByChunk)
{
Array<int> data = {5, 100, 16383, 16384, 16385, 20000, 20001, 100000, 101000};
Vector<IndexRange> ranges = unique_sorted_indices::split_by_chunk<int>(data);
EXPECT_EQ(ranges.size(), 3);
EXPECT_EQ(data.as_span().slice(ranges[0]), Span<int>({5, 100, 16383}));
EXPECT_EQ(data.as_span().slice(ranges[1]), Span<int>({16384, 16385, 20000, 20001}));
EXPECT_EQ(data.as_span().slice(ranges[2]), Span<int>({100000, 101000}));
}
TEST(index_mask2, IndicesToMask)
{
IndexMaskMemory memory;
Array<int> data = {
5, 100, 16383, 16384, 16385, 20000, 20001, 50000, 50001, 50002, 100000, 101000};
IndexMask mask = IndexMask::from_indices<int>(data, memory);
EXPECT_EQ(mask.first(), 5);
EXPECT_EQ(mask.last(), 101000);
EXPECT_EQ(mask.min_array_size(), 101001);
}
TEST(index_mask2, FromBits)
{
IndexMaskMemory memory;
const uint64_t bits =
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'0010'0000;
const IndexMask mask = IndexMask::from_bits(BitSpan(&bits, IndexRange(2, 40)), memory, 100);
Array<int> indices(5);
mask.to_indices<int>(indices);
EXPECT_EQ(indices[0], 103);
EXPECT_EQ(indices[1], 106);
EXPECT_EQ(indices[2], 107);
EXPECT_EQ(indices[3], 108);
EXPECT_EQ(indices[4], 109);
uint64_t new_bits = 0;
mask.to_bits(MutableBitSpan(&new_bits, IndexRange(5, 40)), 100);
EXPECT_EQ(new_bits, bits << 3);
}
TEST(index_mask2, FromSize)
{
{
IndexMask mask(5);
Vector<OffsetSpan<int64_t, int16_t>> segments;
mask.foreach_span(
[&](const OffsetSpan<int64_t, int16_t> segment) { segments.append(segment); });
EXPECT_EQ(segments.size(), 1);
EXPECT_EQ(segments[0].size(), 5);
EXPECT_EQ(mask.first(), 0);
EXPECT_EQ(mask.last(), 4);
EXPECT_EQ(mask.min_array_size(), 5);
}
{
IndexMask mask(chunk_capacity);
Vector<OffsetSpan<int64_t, int16_t>> segments;
mask.foreach_span(
[&](const OffsetSpan<int64_t, int16_t> segment) { segments.append(segment); });
EXPECT_EQ(segments.size(), 1);
EXPECT_EQ(segments[0].size(), chunk_capacity);
EXPECT_EQ(mask.first(), 0);
EXPECT_EQ(mask.last(), chunk_capacity - 1);
EXPECT_EQ(mask.min_array_size(), chunk_capacity);
}
}
TEST(index_mask2, DefaultConstructor)
{
IndexMask mask;
EXPECT_EQ(mask.size(), 0);
EXPECT_EQ(mask.min_array_size(), 0);
}
TEST(index_mask2, IndicesToRanges)
{
IndexMaskMemory memory;
const IndexMask mask = IndexMask::from_indices<int>({0, 1, 5}, memory);
const IndexMask new_mask = grow_indices_to_ranges(
mask, [&](const int64_t i) { return IndexRange(i * 10, 3); }, memory);
Vector<int64_t> indices(new_mask.size());
new_mask.to_indices<int64_t>(indices);
EXPECT_EQ(indices.size(), 9);
EXPECT_EQ(indices[0], 0);
EXPECT_EQ(indices[1], 1);
EXPECT_EQ(indices[2], 2);
EXPECT_EQ(indices[3], 10);
EXPECT_EQ(indices[4], 11);
EXPECT_EQ(indices[5], 12);
EXPECT_EQ(indices[6], 50);
EXPECT_EQ(indices[7], 51);
EXPECT_EQ(indices[8], 52);
}
TEST(index_mask2, ForeachRange)
{
IndexMaskMemory memory;
const IndexMask mask = IndexMask::from_indices<int>({2, 3, 4, 10, 40, 41}, memory);
Vector<IndexRange> ranges;
mask.foreach_range([&](const IndexRange range) { ranges.append(range); });
EXPECT_EQ(ranges.size(), 3);
EXPECT_EQ(ranges[0], IndexRange(2, 3));
EXPECT_EQ(ranges[1], IndexRange(10, 1));
EXPECT_EQ(ranges[2], IndexRange(40, 2));
}
TEST(index_mask2, Expr)
{
IndexMaskMemory memory;
const IndexMask mask1(IndexRange(10, 5));
const IndexMask mask2(IndexRange(40, 5));
const IndexMask mask3 = IndexMask::from_indices<int>({12, 13, 20, 21, 22}, memory);
const AtomicExpr expr1{mask1};
const AtomicExpr expr2{mask2};
const AtomicExpr expr3{mask3};
const UnionExpr union_expr({&expr1, &expr2});
const DifferenceExpr difference_expr(union_expr, {&expr3});
const IndexMask result = IndexMask::from_expr(difference_expr, IndexRange(100), memory);
std::cout << result << "\n";
}
TEST(index_mask2, ToRange)
{
IndexMaskMemory memory;
{
const IndexMask mask = IndexMask::from_indices<int>({4, 5, 6, 7}, memory);
EXPECT_TRUE(mask.to_range().has_value());
EXPECT_EQ(*mask.to_range(), IndexRange(4, 4));
}
{
const IndexMask mask = IndexMask::from_indices<int>({}, memory);
EXPECT_TRUE(mask.to_range().has_value());
EXPECT_EQ(*mask.to_range(), IndexRange());
}
{
const IndexMask mask = IndexMask::from_indices<int>({0, 1, 3, 4}, memory);
EXPECT_FALSE(mask.to_range().has_value());
}
{
const IndexRange range{16000, 40000};
const IndexMask mask{range};
EXPECT_TRUE(mask.to_range().has_value());
EXPECT_EQ(*mask.to_range(), range);
}
}
} // namespace blender::index_mask::tests

View File

@ -1,216 +1,221 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "BLI_array.hh"
#include "BLI_index_mask.hh"
#include "BLI_strict_flags.h"
#include "BLI_timeit.hh"
#include "testing/testing.h"
namespace blender::tests {
namespace blender::index_mask {
void do_benchmark(const int64_t total);
}
namespace blender::index_mask::tests {
TEST(index_mask, FindRangeEnd)
{
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4}), 1);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4, 5, 6, 7}), 4);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4, 5, 6, 8, 9}), 3);
}
TEST(index_mask, NonEmptyIsRange)
{
EXPECT_TRUE(unique_sorted_indices::non_empty_is_range<int>({0, 1, 2}));
EXPECT_TRUE(unique_sorted_indices::non_empty_is_range<int>({5}));
EXPECT_TRUE(unique_sorted_indices::non_empty_is_range<int>({7, 8, 9, 10}));
EXPECT_FALSE(unique_sorted_indices::non_empty_is_range<int>({3, 5}));
EXPECT_FALSE(unique_sorted_indices::non_empty_is_range<int>({3, 4, 5, 6, 8, 9}));
}
TEST(index_mask, NonEmptyAsRange)
{
EXPECT_EQ(unique_sorted_indices::non_empty_as_range<int>({0, 1, 2}), IndexRange(0, 3));
EXPECT_EQ(unique_sorted_indices::non_empty_as_range<int>({5}), IndexRange(5, 1));
EXPECT_EQ(unique_sorted_indices::non_empty_as_range<int>({10, 11}), IndexRange(10, 2));
}
TEST(index_mask, FindSizeOfNextRange)
{
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({0, 3, 4}), 1);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4, 5, 6, 7}), 4);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({4}), 1);
EXPECT_EQ(unique_sorted_indices::find_size_of_next_range<int>({5, 6, 7, 10, 11, 100}), 3);
}
TEST(index_mask, FindStartOfNextRange)
{
EXPECT_EQ(unique_sorted_indices::find_size_until_next_range<int>({4}, 3), 1);
EXPECT_EQ(unique_sorted_indices::find_size_until_next_range<int>({4, 5}, 3), 2);
EXPECT_EQ(unique_sorted_indices::find_size_until_next_range<int>({4, 5, 6}, 3), 0);
EXPECT_EQ(unique_sorted_indices::find_size_until_next_range<int>({4, 5, 6, 7}, 3), 0);
EXPECT_EQ(
unique_sorted_indices::find_size_until_next_range<int>({0, 1, 3, 5, 10, 11, 12, 20}, 3), 4);
}
TEST(index_mask, SplitToRangesAndSpans)
{
Array<int> data = {1, 2, 3, 4, 7, 9, 10, 13, 14, 15, 20, 21, 22, 23, 24};
Vector<std::variant<IndexRange, Span<int>>> parts;
const int64_t parts_num = unique_sorted_indices::split_to_ranges_and_spans<int>(data, 3, parts);
EXPECT_EQ(parts_num, 4);
EXPECT_EQ(parts.size(), 4);
EXPECT_EQ(std::get<IndexRange>(parts[0]), IndexRange(1, 4));
EXPECT_EQ(std::get<Span<int>>(parts[1]), Span<int>({7, 9, 10}));
EXPECT_EQ(std::get<IndexRange>(parts[2]), IndexRange(13, 3));
EXPECT_EQ(std::get<IndexRange>(parts[3]), IndexRange(20, 5));
}
TEST(index_mask, SplitByChunk)
{
Array<int> data = {5, 100, 16383, 16384, 16385, 20000, 20001, 100000, 101000};
Vector<IndexRange> ranges = unique_sorted_indices::split_by_chunk<int>(data);
EXPECT_EQ(ranges.size(), 3);
EXPECT_EQ(data.as_span().slice(ranges[0]), Span<int>({5, 100, 16383}));
EXPECT_EQ(data.as_span().slice(ranges[1]), Span<int>({16384, 16385, 20000, 20001}));
EXPECT_EQ(data.as_span().slice(ranges[2]), Span<int>({100000, 101000}));
}
TEST(index_mask, IndicesToMask)
{
IndexMaskMemory memory;
Array<int> data = {
5, 100, 16383, 16384, 16385, 20000, 20001, 50000, 50001, 50002, 100000, 101000};
IndexMask mask = IndexMask::from_indices<int>(data, memory);
EXPECT_EQ(mask.first(), 5);
EXPECT_EQ(mask.last(), 101000);
EXPECT_EQ(mask.min_array_size(), 101001);
}
TEST(index_mask, FromBits)
{
IndexMaskMemory memory;
const uint64_t bits =
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'0010'0000;
const IndexMask mask = IndexMask::from_bits(BitSpan(&bits, IndexRange(2, 40)), memory, 100);
Array<int> indices(5);
mask.to_indices<int>(indices);
EXPECT_EQ(indices[0], 103);
EXPECT_EQ(indices[1], 106);
EXPECT_EQ(indices[2], 107);
EXPECT_EQ(indices[3], 108);
EXPECT_EQ(indices[4], 109);
uint64_t new_bits = 0;
mask.to_bits(MutableBitSpan(&new_bits, IndexRange(5, 40)), 100);
EXPECT_EQ(new_bits, bits << 3);
}
TEST(index_mask, FromSize)
{
{
IndexMask mask(5);
Vector<OffsetSpan<int64_t, int16_t>> segments;
mask.foreach_span(
[&](const OffsetSpan<int64_t, int16_t> segment) { segments.append(segment); });
EXPECT_EQ(segments.size(), 1);
EXPECT_EQ(segments[0].size(), 5);
EXPECT_EQ(mask.first(), 0);
EXPECT_EQ(mask.last(), 4);
EXPECT_EQ(mask.min_array_size(), 5);
}
{
IndexMask mask(chunk_capacity);
Vector<OffsetSpan<int64_t, int16_t>> segments;
mask.foreach_span(
[&](const OffsetSpan<int64_t, int16_t> segment) { segments.append(segment); });
EXPECT_EQ(segments.size(), 1);
EXPECT_EQ(segments[0].size(), chunk_capacity);
EXPECT_EQ(mask.first(), 0);
EXPECT_EQ(mask.last(), chunk_capacity - 1);
EXPECT_EQ(mask.min_array_size(), chunk_capacity);
}
}
TEST(index_mask, DefaultConstructor)
{
IndexMask mask;
EXPECT_EQ(mask.min_array_size(), 0);
EXPECT_EQ(mask.size(), 0);
EXPECT_EQ(mask.min_array_size(), 0);
}
TEST(index_mask, ArrayConstructor)
TEST(index_mask, IndicesToRanges)
{
[](IndexMask mask) {
EXPECT_EQ(mask.size(), 4);
EXPECT_EQ(mask.min_array_size(), 8);
EXPECT_FALSE(mask.is_range());
EXPECT_EQ(mask[0], 3);
EXPECT_EQ(mask[1], 5);
EXPECT_EQ(mask[2], 6);
EXPECT_EQ(mask[3], 7);
}({3, 5, 6, 7});
IndexMaskMemory memory;
const IndexMask mask = IndexMask::from_indices<int>({0, 1, 5}, memory);
const IndexMask new_mask = grow_indices_to_ranges(
mask, [&](const int64_t i) { return IndexRange(i * 10, 3); }, memory);
Vector<int64_t> indices(new_mask.size());
new_mask.to_indices<int64_t>(indices);
EXPECT_EQ(indices.size(), 9);
EXPECT_EQ(indices[0], 0);
EXPECT_EQ(indices[1], 1);
EXPECT_EQ(indices[2], 2);
EXPECT_EQ(indices[3], 10);
EXPECT_EQ(indices[4], 11);
EXPECT_EQ(indices[5], 12);
EXPECT_EQ(indices[6], 50);
EXPECT_EQ(indices[7], 51);
EXPECT_EQ(indices[8], 52);
}
TEST(index_mask, RangeConstructor)
TEST(index_mask, ForeachRange)
{
IndexMask mask = IndexRange(3, 5);
EXPECT_EQ(mask.size(), 5);
EXPECT_EQ(mask.min_array_size(), 8);
EXPECT_EQ(mask.last(), 7);
EXPECT_TRUE(mask.is_range());
EXPECT_EQ(mask.as_range().first(), 3);
EXPECT_EQ(mask.as_range().last(), 7);
Span<int64_t> indices = mask.indices();
EXPECT_EQ(indices[0], 3);
EXPECT_EQ(indices[1], 4);
EXPECT_EQ(indices[2], 5);
IndexMaskMemory memory;
const IndexMask mask = IndexMask::from_indices<int>({2, 3, 4, 10, 40, 41}, memory);
Vector<IndexRange> ranges;
mask.foreach_range([&](const IndexRange range) { ranges.append(range); });
EXPECT_EQ(ranges.size(), 3);
EXPECT_EQ(ranges[0], IndexRange(2, 3));
EXPECT_EQ(ranges[1], IndexRange(10, 1));
EXPECT_EQ(ranges[2], IndexRange(40, 2));
}
TEST(index_mask, SliceAndOffset)
TEST(index_mask, Expr)
{
Vector<int64_t> indices;
{
IndexMask mask{IndexRange(10)};
IndexMask new_mask = mask.slice_and_offset(IndexRange(3, 5), indices);
EXPECT_TRUE(new_mask.is_range());
EXPECT_EQ(new_mask.size(), 5);
EXPECT_EQ(new_mask[0], 0);
EXPECT_EQ(new_mask[1], 1);
}
{
Vector<int64_t> original_indices = {2, 3, 5, 7, 8, 9, 10};
IndexMask mask{original_indices.as_span()};
IndexMask new_mask = mask.slice_and_offset(IndexRange(1, 4), indices);
EXPECT_FALSE(new_mask.is_range());
EXPECT_EQ(new_mask.size(), 4);
EXPECT_EQ(new_mask[0], 0);
EXPECT_EQ(new_mask[1], 2);
EXPECT_EQ(new_mask[2], 4);
EXPECT_EQ(new_mask[3], 5);
}
IndexMaskMemory memory;
const IndexMask mask1(IndexRange(10, 5));
const IndexMask mask2(IndexRange(40, 5));
const IndexMask mask3 = IndexMask::from_indices<int>({12, 13, 20, 21, 22}, memory);
const AtomicExpr expr1{mask1};
const AtomicExpr expr2{mask2};
const AtomicExpr expr3{mask3};
const UnionExpr union_expr({&expr1, &expr2});
const DifferenceExpr difference_expr(union_expr, {&expr3});
const IndexMask result = IndexMask::from_expr(difference_expr, IndexRange(100), memory);
std::cout << result << "\n";
}
TEST(index_mask, ExtractRanges)
TEST(index_mask, ToRange)
{
IndexMaskMemory memory;
{
Vector<int64_t> indices = {1, 2, 3, 5, 7, 8};
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges();
EXPECT_EQ(ranges.size(), 3);
EXPECT_EQ(ranges[0], IndexRange(1, 3));
EXPECT_EQ(ranges[1], IndexRange(5, 1));
EXPECT_EQ(ranges[2], IndexRange(7, 2));
const IndexMask mask = IndexMask::from_indices<int>({4, 5, 6, 7}, memory);
EXPECT_TRUE(mask.to_range().has_value());
EXPECT_EQ(*mask.to_range(), IndexRange(4, 4));
}
{
Vector<int64_t> indices;
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges();
EXPECT_EQ(ranges.size(), 0);
const IndexMask mask = IndexMask::from_indices<int>({}, memory);
EXPECT_TRUE(mask.to_range().has_value());
EXPECT_EQ(*mask.to_range(), IndexRange());
}
{
Vector<int64_t> indices = {5, 6, 7, 8, 9, 10};
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges();
EXPECT_EQ(ranges.size(), 1);
EXPECT_EQ(ranges[0], IndexRange(5, 6));
const IndexMask mask = IndexMask::from_indices<int>({0, 1, 3, 4}, memory);
EXPECT_FALSE(mask.to_range().has_value());
}
{
Vector<int64_t> indices = {1, 3, 6, 8};
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges();
EXPECT_EQ(ranges.size(), 4);
EXPECT_EQ(ranges[0], IndexRange(1, 1));
EXPECT_EQ(ranges[1], IndexRange(3, 1));
EXPECT_EQ(ranges[2], IndexRange(6, 1));
EXPECT_EQ(ranges[3], IndexRange(8, 1));
}
{
Vector<int64_t> indices;
IndexRange range1{4, 10};
IndexRange range2{20, 30};
IndexRange range3{100, 1};
IndexRange range4{150, 100};
for (const IndexRange &range : {range1, range2, range3, range4}) {
for (const int64_t i : range) {
indices.append(i);
}
}
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges();
EXPECT_EQ(ranges.size(), 4);
EXPECT_EQ(ranges[0], range1);
EXPECT_EQ(ranges[1], range2);
EXPECT_EQ(ranges[2], range3);
EXPECT_EQ(ranges[3], range4);
}
{
const int64_t max_test_range_size = 50;
Vector<int64_t> indices;
int64_t offset = 0;
for (const int64_t range_size : IndexRange(1, max_test_range_size)) {
for (const int i : IndexRange(range_size)) {
indices.append(offset + i);
}
offset += range_size + 1;
}
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges();
EXPECT_EQ(ranges.size(), max_test_range_size);
for (const int64_t range_size : IndexRange(1, max_test_range_size)) {
const IndexRange range = ranges[range_size - 1];
EXPECT_EQ(range.size(), range_size);
}
const IndexRange range{16000, 40000};
const IndexMask mask{range};
EXPECT_TRUE(mask.to_range().has_value());
EXPECT_EQ(*mask.to_range(), range);
}
}
TEST(index_mask, Invert)
{
{
Vector<int64_t> indices;
Vector<int64_t> new_indices;
IndexMask inverted_mask = IndexMask(indices).invert(IndexRange(10), new_indices);
EXPECT_EQ(inverted_mask.size(), 10);
EXPECT_TRUE(new_indices.is_empty());
}
{
Vector<int64_t> indices = {3, 4, 5, 6};
Vector<int64_t> new_indices;
IndexMask inverted_mask = IndexMask(indices).invert(IndexRange(3, 4), new_indices);
EXPECT_TRUE(inverted_mask.is_empty());
}
{
Vector<int64_t> indices = {5};
Vector<int64_t> new_indices;
IndexMask inverted_mask = IndexMask(indices).invert(IndexRange(10), new_indices);
EXPECT_EQ(inverted_mask.size(), 9);
EXPECT_EQ(inverted_mask.indices(), Span<int64_t>({0, 1, 2, 3, 4, 6, 7, 8, 9}));
}
{
Vector<int64_t> indices = {0, 1, 2, 6, 7, 9};
Vector<int64_t> new_indices;
IndexMask inverted_mask = IndexMask(indices).invert(IndexRange(10), new_indices);
EXPECT_EQ(inverted_mask.size(), 4);
EXPECT_EQ(inverted_mask.indices(), Span<int64_t>({3, 4, 5, 8}));
}
}
TEST(index_mask, ExtractRangesInvert)
{
{
Vector<int64_t> indices;
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges_invert(IndexRange(10), nullptr);
EXPECT_EQ(ranges.size(), 1);
EXPECT_EQ(ranges[0], IndexRange(10));
}
{
Vector<int64_t> indices = {1, 2, 3, 6, 7};
Vector<int64_t> skip_amounts;
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges_invert(IndexRange(10),
&skip_amounts);
EXPECT_EQ(ranges.size(), 3);
EXPECT_EQ(ranges[0], IndexRange(0, 1));
EXPECT_EQ(ranges[1], IndexRange(4, 2));
EXPECT_EQ(ranges[2], IndexRange(8, 2));
EXPECT_EQ(skip_amounts[0], 0);
EXPECT_EQ(skip_amounts[1], 3);
EXPECT_EQ(skip_amounts[2], 5);
}
{
Vector<int64_t> indices = {0, 1, 2, 3, 4};
Vector<int64_t> skip_amounts;
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges_invert(IndexRange(5),
&skip_amounts);
EXPECT_TRUE(ranges.is_empty());
EXPECT_TRUE(skip_amounts.is_empty());
}
{
Vector<int64_t> indices = {5, 6, 7, 10, 11};
Vector<int64_t> skip_amounts;
Vector<IndexRange> ranges = IndexMask(indices).extract_ranges_invert(IndexRange(5, 20),
&skip_amounts);
EXPECT_EQ(ranges.size(), 2);
EXPECT_EQ(ranges[0], IndexRange(8, 2));
EXPECT_EQ(ranges[1], IndexRange(12, 13));
EXPECT_EQ(skip_amounts[0], 3);
EXPECT_EQ(skip_amounts[1], 5);
}
}
TEST(index_mask, ContainedIn)
{
EXPECT_TRUE(IndexMask({3, 4, 5}).contained_in(IndexRange(10)));
EXPECT_TRUE(IndexMask().contained_in(IndexRange(5, 0)));
EXPECT_FALSE(IndexMask({3}).contained_in(IndexRange(3)));
EXPECT_FALSE(IndexMask({4, 5, 6}).contained_in(IndexRange(5, 10)));
EXPECT_FALSE(IndexMask({5, 6}).contained_in(IndexRange()));
}
} // namespace blender::tests
} // namespace blender::index_mask::tests