1
1

BLI: generally improve C++ data structures

The main focus here was to improve the docs significantly. Furthermore,
I reimplemented `Set`, `Map` and `VectorSet`. They are now (usually)
faster, simpler and more customizable. I also rewrote `Stack` to make
it more efficient by avoiding unnecessary copies.

Thanks to everyone who helped with constructive feedback.

Approved by brecht and sybren.

Differential Revision: https://developer.blender.org/D7931
This commit is contained in:
2020-06-09 10:10:56 +02:00
parent 50258d55e7
commit d8678e02ec
47 changed files with 6161 additions and 3164 deletions

View File

@@ -257,6 +257,10 @@ ForEachMacros:
- SURFACE_QUAD_ITER_BEGIN
- foreach
- ED_screen_areas_iter
- SLOT_PROBING_BEGIN
- SET_SLOT_PROBING_BEGIN
- MAP_SLOT_PROBING_BEGIN
- VECTOR_SET_SLOT_PROBING_BEGIN
# Use once we bump the minimum version to version 8.
# # Without this string literals that in-line 'STRINGIFY' behave strangely (a bug?).

View File

@@ -19,14 +19,23 @@
/** \file
* \ingroup bli
*
* This file offers a couple of memory allocators that can be used with containers such as Vector
* and Map. Note that these allocators are not designed to work with standard containers like
* An `Allocator` can allocate and deallocate memory. It is used by containers such as BLI::Vector.
* The allocators defined in this file do not work with standard library containers such as
* std::vector.
*
* Also see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2271.html for why the standard
* allocators are not a good fit applications like Blender. The current implementations in this
* file are fairly simple still, more complexity can be added when necessary. For now they do their
* job good enough.
* Every allocator has to implement two methods:
* void *allocate(size_t size, size_t alignment, const char *name);
* void deallocate(void *ptr);
*
* We don't use the std::allocator interface, because it does more than is really necessary for an
* allocator and has some other quirks. It mixes the concepts of allocation and construction. It is
* essentially forced to be a template, even though the allocator should not care about the type.
* Also see http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2271.html#std_allocator. Some
* of these aspects have been improved in new versions of C++, so we might have to reevaluate the
* strategy later on.
*
* The allocator interface dictated by this file is very simplistic, but for now that is all we
* need. More complexity can be added when it seems necessary.
*/
#include <algorithm>
@@ -40,18 +49,14 @@
namespace BLI {
/**
* Use Blenders guarded allocator (aka MEM_malloc). This should always be used except there is a
* Use Blender's guarded allocator (aka MEM_*). This should always be used except there is a
* good reason not to use it.
*/
class GuardedAllocator {
public:
void *allocate(uint size, const char *name)
{
return MEM_mallocN(size, name);
}
void *allocate_aligned(uint size, uint alignment, const char *name)
void *allocate(size_t size, size_t alignment, const char *name)
{
/* Should we use MEM_mallocN, when alignment is small? If yes, how small must alignment be? */
return MEM_mallocN_aligned(size, alignment, name);
}
@@ -62,8 +67,9 @@ class GuardedAllocator {
};
/**
* This is a simple wrapper around malloc/free. Only use this when the GuardedAllocator cannot be
* used. This can be the case when the allocated element might live longer than Blenders Allocator.
* This is a wrapper around malloc/free. Only use this when the GuardedAllocator cannot be
* used. This can be the case when the allocated memory might live longer than Blender's
* allocator. For example, when the memory is owned by a static variable.
*/
class RawAllocator {
private:
@@ -72,14 +78,7 @@ class RawAllocator {
};
public:
void *allocate(uint size, const char *UNUSED(name))
{
void *ptr = malloc(size + sizeof(MemHead));
((MemHead *)ptr)->offset = sizeof(MemHead);
return POINTER_OFFSET(ptr, sizeof(MemHead));
}
void *allocate_aligned(uint size, uint alignment, const char *UNUSED(name))
void *allocate(size_t size, size_t alignment, const char *UNUSED(name))
{
BLI_assert(is_power_of_2_i((int)alignment));
void *ptr = malloc(size + alignment + sizeof(MemHead));

View File

@@ -19,8 +19,23 @@
/** \file
* \ingroup bli
*
* This is a container that contains a fixed size array. Note however, the size of the array is not
* a template argument. Instead it can be specified at the construction time.
* A `BLI::Array<T>` is a container for a fixed size array the size of which is NOT known at
* compile time.
*
* If the size is known at compile time, `std::array<T, N>` should be used instead.
*
* BLI::Array should usually be used instead of BLI::Vector whenever the number of elements is
* known at construction time. Note however, that BLI::Array will default construct all elements
* when initialized with the size-constructor. For trivial types, this does nothing. In all other
* cases, this adds overhead. If this becomes a problem, a different constructor which does not do
* default construction can be added.
*
* A main benefit of using Array over Vector is that it expresses the intent of the developer
* better. It indicates that the size of the data structure is not expected to change. Furthermore,
* you can be more certain that an array does not overallocate.
*
* BLI::Array supports small object optimization to improve performance when the size turns out to
* be small at run-time.
*/
#include "BLI_allocator.hh"
@@ -31,42 +46,83 @@
namespace BLI {
template<typename T, uint InlineBufferCapacity = 4, typename Allocator = GuardedAllocator>
template<
/**
* The type of the values stored in the array.
*/
typename T,
/**
* The number of values that can be stored in the array, without doing a heap allocation.
*
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitely though.
*/
uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this array. Should rarely be changed, except when you don't want that
* MEM_* functions are used internally.
*/
typename Allocator = GuardedAllocator>
class Array {
private:
/** The beginning of the array. It might point into the inline buffer. */
T *m_data;
/** Number of elements in the array. */
uint m_size;
/** Used for allocations when the inline buffer is too small. */
Allocator m_allocator;
AlignedBuffer<sizeof(T) * InlineBufferCapacity, alignof(T)> m_inline_storage;
/** A placeholder buffer that will remain uninitialized until it is used. */
AlignedBuffer<sizeof(T) * InlineBufferCapacity, alignof(T)> m_inline_buffer;
public:
/**
* By default an empty array is created.
*/
Array()
{
m_data = this->inline_storage();
m_data = this->inline_buffer();
m_size = 0;
}
/**
* Create a new array that contains copies of all values.
*/
Array(ArrayRef<T> values)
{
m_size = values.size();
m_data = this->get_buffer_for_size(values.size());
uninitialized_copy_n(values.begin(), m_size, m_data);
uninitialized_copy_n(values.data(), m_size, m_data);
}
/**
* Create a new array that contains copies of all values.
*/
Array(const std::initializer_list<T> &values) : Array(ArrayRef<T>(values))
{
}
/**
* Create a new array with the given size. All values will be default constructed. For trivial
* types like int, default construction does nothing.
*
* We might want another version of this in the future, that does not do default construction
* even for non-trivial types. This should not be the default though, because one can easily mess
* up when dealing with uninitialized memory.
*/
explicit Array(uint size)
{
m_size = size;
m_data = this->get_buffer_for_size(size);
for (uint i = 0; i < m_size; i++) {
new (m_data + i) T();
}
default_construct_n(m_data, size);
}
/**
* Create a new array with the given size. All values will be initialized by copying the given
* default.
*/
Array(uint size, const T &value)
{
m_size = size;
@@ -74,21 +130,19 @@ class Array {
uninitialized_fill_n(m_data, m_size, value);
}
Array(const Array &other)
Array(const Array &other) : m_allocator(other.m_allocator)
{
m_size = other.size();
m_allocator = other.m_allocator;
m_data = this->get_buffer_for_size(other.size());
uninitialized_copy_n(other.begin(), m_size, m_data);
uninitialized_copy_n(other.data(), m_size, m_data);
}
Array(Array &&other) noexcept
Array(Array &&other) noexcept : m_allocator(other.m_allocator)
{
m_size = other.m_size;
m_allocator = other.m_allocator;
if (!other.uses_inline_storage()) {
if (!other.uses_inline_buffer()) {
m_data = other.m_data;
}
else {
@@ -96,14 +150,14 @@ class Array {
uninitialized_relocate_n(other.m_data, m_size, m_data);
}
other.m_data = other.inline_storage();
other.m_data = other.inline_buffer();
other.m_size = 0;
}
~Array()
{
destruct_n(m_data, m_size);
if (!this->uses_inline_storage()) {
if (!this->uses_inline_buffer()) {
m_allocator.deallocate((void *)m_data);
}
}
@@ -162,21 +216,50 @@ class Array {
return m_data[index];
}
/**
* Returns the number of elements in the array.
*/
uint size() const
{
return m_size;
}
void fill(const T &value)
/**
* Returns true when the number of elements in the array is zero.
*/
bool is_empty() const
{
MutableArrayRef<T>(*this).fill(value);
return m_size == 0;
}
/**
* Copies the value to all indices in the array.
*/
void fill(const T &value)
{
initialized_fill_n(m_data, m_size, value);
}
/**
* Copies the value to the given indices in the array.
*/
void fill_indices(ArrayRef<uint> indices, const T &value)
{
MutableArrayRef<T>(*this).fill_indices(indices, value);
}
/**
* Get a pointer to the beginning of the array.
*/
const T *data() const
{
return m_data;
}
T *data()
{
return m_data;
}
const T *begin() const
{
return m_data;
@@ -197,41 +280,64 @@ class Array {
return m_data + m_size;
}
/**
* Get an index range containing all valid indices for this array.
*/
IndexRange index_range() const
{
return IndexRange(m_size);
}
/**
* Sets the size to zero. This should only be used when you have manually destructed all elements
* in the array beforehand. Use with care.
*/
void clear_without_destruct()
{
m_size = 0;
}
/**
* Access the allocator used by this array.
*/
Allocator &allocator()
{
return m_allocator;
}
/**
* Get the value of the InlineBufferCapacity template argument. This is the number of elements
* that can be stored without doing an allocation.
*/
static uint inline_buffer_capacity()
{
return InlineBufferCapacity;
}
private:
T *get_buffer_for_size(uint size)
{
if (size <= InlineBufferCapacity) {
return this->inline_storage();
return this->inline_buffer();
}
else {
return this->allocate(size);
}
}
T *inline_storage() const
T *inline_buffer() const
{
return (T *)m_inline_storage.ptr();
return (T *)m_inline_buffer.ptr();
}
T *allocate(uint size)
{
return (T *)m_allocator.allocate_aligned(
size * sizeof(T), std::alignment_of<T>::value, __func__);
return (T *)m_allocator.allocate(size * sizeof(T), alignof(T), AT);
}
bool uses_inline_storage() const
bool uses_inline_buffer() const
{
return m_data == this->inline_storage();
return m_data == this->inline_buffer();
}
};

View File

@@ -20,19 +20,38 @@
/** \file
* \ingroup bli
*
* These classes offer a convenient way to work with continuous chunks of memory of a certain type.
* We differentiate #ArrayRef and #MutableArrayRef. The elements in the former are const while the
* elements in the other are not.
* A `BLI::ArrayRef<T>` references an array that is owned by someone else. It is just a pointer and
* a size. Since the memory is not owned, ArrayRef should not be used to transfer ownership. The
* array cannot be modified through the ArrayRef. However, if T is a non-const pointer, the
* pointed-to elements can be modified.
*
* Passing array references as parameters has multiple benefits:
* - Less templates are used because the function does not have to work with different
* container types.
* - It encourages an Struct-of-Arrays data layout which is often beneficial when
* writing high performance code. Also it makes it easier to reuse code.
* - Array references offer convenient ways of slicing and other operations.
* There is also `BLI::MutableArrayRef<T>`. It is mostly the same as ArrayRef, but allows the array
* to be modified.
*
* The instances of #ArrayRef and #MutableArrayRef are very small and should be passed by value.
* Since array references do not own any memory, it is generally not save to store them.
* An (Mutable)ArrayRef can refer to data owned by many different data structures including
* BLI::Vector, BLI::Array, BLI::VectorSet, std::vector, std::array, std::string,
* std::initializer_list and c-style array.
*
* `BLI::ArrayRef<T>` should be your default choice when you have to pass a read-only array into a
* function. It is better than passing a `const Vector &`, because then the function only works for
* vectors and not for e.g. arrays. Using ArrayRef as function parameter makes it usable in more
* contexts, better expresses the intent and does not sacrifice performance. It is also better than
* passing a raw pointer and size separately, because it is more convenient and safe.
*
* `BLI::MutableArrayRef<T>` can be used when a function is supposed to return an array, the size
* of which is known before the function is called. One advantage of this approach is that the
* caller is responsible for allocation and deallocation. Furthermore, the function can focus on
* its task, without having to worry about memory allocation. Alternatively, a function could
* return an Array or Vector.
*
* Note: When a function has a MutableArrayRef<T> output parameter and T is not a trivial type,
* then the function has to specify whether the referenced array is expected to be initialized or
* not.
*
* Since the arrays are only referenced, it is generally unsafe to store an ArrayRef. When you
* store one, you should know who owns the memory.
*
* Instances of ArrayRef and MutableArrayRef are small and should be passed by value.
*/
#include <algorithm>
@@ -48,7 +67,8 @@
namespace BLI {
/**
* References an array of data. The elements in the array should not be changed.
* References an array of type T that is owned by someone else. The data in the array cannot be
* modified.
*/
template<typename T> class ArrayRef {
private:
@@ -58,7 +78,6 @@ template<typename T> class ArrayRef {
public:
/**
* Create a reference to an empty array.
* The pointer is allowed to be nullptr.
*/
ArrayRef() = default;
@@ -66,11 +85,22 @@ template<typename T> class ArrayRef {
{
}
ArrayRef(const std::initializer_list<T> &list) : ArrayRef(list.begin(), list.size())
/**
* Reference an initializer_list. Note that the data in the initializer_list is only valid until
* the expression containing it is fully computed.
*
* Do:
* call_function_with_array({1, 2, 3, 4});
*
* Don't:
* ArrayRef<int> ref = {1, 2, 3, 4};
* call_function_with_array(ref);
*/
ArrayRef(const std::initializer_list<T> &list) : ArrayRef(list.begin(), (uint)list.size())
{
}
ArrayRef(const std::vector<T> &vector) : ArrayRef(vector.data(), vector.size())
ArrayRef(const std::vector<T> &vector) : ArrayRef(vector.data(), (uint)vector.size())
{
}
@@ -79,18 +109,19 @@ template<typename T> class ArrayRef {
}
/**
* ArrayRef<T *> -> ArrayRef<const T *>
* ArrayRef<Derived *> -> ArrayRef<Base *>
* Support implicit conversions like the ones below:
* ArrayRef<T *> -> ArrayRef<const T *>
* ArrayRef<Derived *> -> ArrayRef<Base *>
*/
template<typename U,
typename std::enable_if<std::is_convertible<U *, T>::value>::type * = nullptr>
ArrayRef(ArrayRef<U *> array) : ArrayRef((T *)array.begin(), array.size())
ArrayRef(ArrayRef<U *> array) : ArrayRef((T *)array.data(), array.size())
{
}
/**
* Return a continuous part of the array.
* Asserts that the slice stays within the array.
* Returns a contiguous part of the array. This invokes undefined behavior when the slice does
* not stay within the bounds of the array.
*/
ArrayRef slice(uint start, uint size) const
{
@@ -104,28 +135,28 @@ template<typename T> class ArrayRef {
}
/**
* Return a new ArrayRef with n elements removed from the beginning.
* Asserts that the array contains enough elements.
* Returns a new ArrayRef with n elements removed from the beginning. This invokes undefined
* behavior when the array is too small.
*/
ArrayRef drop_front(uint n = 1) const
ArrayRef drop_front(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(n, this->size() - n);
}
/**
* Return a new ArrayRef with n elements removed from the beginning.
* Asserts that the array contains enough elements.
* Returns a new ArrayRef with n elements removed from the beginning. This invokes undefined
* behavior when the array is too small.
*/
ArrayRef drop_back(uint n = 1) const
ArrayRef drop_back(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(0, this->size() - n);
}
/**
* Return a new ArrayRef that only contains the first n elements.
* Asserts that the array contains enough elements.
* Returns a new ArrayRef that only contains the first n elements. This invokes undefined
* behavior when the array is too small.
*/
ArrayRef take_front(uint n) const
{
@@ -134,8 +165,8 @@ template<typename T> class ArrayRef {
}
/**
* Return a new ArrayRef that only contains the last n elements.
* Asserts that the array contains enough elements.
* Returns a new ArrayRef that only contains the last n elements. This invokes undefined
* behavior when the array is too small.
*/
ArrayRef take_back(uint n) const
{
@@ -144,11 +175,12 @@ template<typename T> class ArrayRef {
}
/**
* Copy the values in this array to another array.
* Returns the pointer to the beginning of the referenced array. This may be nullptr when the
* size is zero.
*/
void copy_to(T *ptr) const
const T *data() const
{
BLI::copy_n(m_start, m_size, ptr);
return m_start;
}
const T *begin() const
@@ -162,8 +194,8 @@ template<typename T> class ArrayRef {
}
/**
* Access an element in the array.
* Asserts that the index is in the bounds of the array.
* Access an element in the array. This invokes undefined behavior when the index is out of
* bounds.
*/
const T &operator[](uint index) const
{
@@ -172,7 +204,7 @@ template<typename T> class ArrayRef {
}
/**
* Return the number of elements in the referenced array.
* Returns the number of elements in the referenced array.
*/
uint size() const
{
@@ -180,16 +212,24 @@ template<typename T> class ArrayRef {
}
/**
* Return the number of bytes referenced by this ArrayRef.
* Returns true if the size is zero.
*/
uint byte_size() const
bool is_empty() const
{
return m_size == 0;
}
/**
* Returns the number of bytes referenced by this ArrayRef.
*/
uint size_in_bytes() const
{
return sizeof(T) * m_size;
}
/**
* Does a linear search to see of the value is in the array.
* Return true if it is, otherwise false.
* Returns true if it is, otherwise false.
*/
bool contains(const T &value) const
{
@@ -202,7 +242,7 @@ template<typename T> class ArrayRef {
}
/**
* Does a constant time check to see if the pointer is within the referenced array.
* Does a constant time check to see if the pointer points to a value in the referenced array.
* Return true if it is, otherwise false.
*/
bool contains_ptr(const T *ptr) const
@@ -226,8 +266,8 @@ template<typename T> class ArrayRef {
}
/**
* Return a reference to the first element in the array.
* Asserts that the array is not empty.
* Return a reference to the first element in the array. This invokes undefined behavior when the
* array is empty.
*/
const T &first() const
{
@@ -236,8 +276,8 @@ template<typename T> class ArrayRef {
}
/**
* Return a reference to the last element in the array.
* Asserts that the array is not empty.
* Returns a reference to the last element in the array. This invokes undefined behavior when the
* array is empty.
*/
const T &last() const
{
@@ -246,7 +286,8 @@ template<typename T> class ArrayRef {
}
/**
* Get element at the given index. If the index is out of range, return the fallback value.
* Returns the element at the given index. If the index is out of range, return the fallback
* value.
*/
T get(uint index, const T &fallback) const
{
@@ -277,6 +318,11 @@ template<typename T> class ArrayRef {
return false;
}
/**
* Returns true when this and the other array have an element in common. This should only be
* called on small arrays, because it has a running time of O(n*m) where n and m are the sizes of
* the arrays.
*/
bool intersects__linear_search(ArrayRef other) const
{
/* The size should really be smaller than that. If it is not, the calling code should be
@@ -292,6 +338,10 @@ template<typename T> class ArrayRef {
return false;
}
/**
* Returns the index of the first occurrence of the given value. This invokes undefined behavior
* when the value is not in the array.
*/
uint first_index(const T &search_value) const
{
int index = this->first_index_try(search_value);
@@ -299,6 +349,9 @@ template<typename T> class ArrayRef {
return (uint)index;
}
/**
* Returns the index of the first occurrence of the given value or -1 if it does not exist.
*/
int first_index_try(const T &search_value) const
{
for (uint i = 0; i < m_size; i++) {
@@ -309,16 +362,6 @@ template<typename T> class ArrayRef {
return -1;
}
template<typename PredicateT> bool any(const PredicateT predicate)
{
for (uint i = 0; i < m_size; i++) {
if (predicate(m_start[i])) {
return true;
}
}
return false;
}
/**
* Utility to make it more convenient to iterate over all indices that can be used with this
* array.
@@ -329,7 +372,7 @@ template<typename T> class ArrayRef {
}
/**
* Get a new array ref to the same underlying memory buffer. No conversions are done.
* Returns a new ArrayRef to the same underlying memory buffer. No conversions are done.
*/
template<typename NewT> ArrayRef<NewT> cast() const
{
@@ -339,7 +382,7 @@ template<typename T> class ArrayRef {
}
/**
* A debug utility to print the content of the array ref. Every element will be printed on a
* A debug utility to print the content of the ArrayRef. Every element will be printed on a
* separate line using the given callback.
*/
template<typename PrintLineF> void print_as_lines(std::string name, PrintLineF print_line) const
@@ -352,6 +395,10 @@ template<typename T> class ArrayRef {
}
}
/**
* A debug utility to print the content of the array ref. Every element be printed on a separate
* line.
*/
void print_as_lines(std::string name) const
{
this->print_as_lines(name, [](const T &value) { std::cout << value; });
@@ -359,7 +406,8 @@ template<typename T> class ArrayRef {
};
/**
* Mostly the same as ArrayRef, except that one can change the array elements via this reference.
* Mostly the same as ArrayRef, except that one can change the array elements through a
* MutableArrayRef.
*/
template<typename T> class MutableArrayRef {
private:
@@ -373,6 +421,17 @@ template<typename T> class MutableArrayRef {
{
}
/**
* Reference an initializer_list. Note that the data in the initializer_list is only valid until
* the expression containing it is fully computed.
*
* Do:
* call_function_with_array({1, 2, 3, 4});
*
* Don't:
* MutableArrayRef<int> ref = {1, 2, 3, 4};
* call_function_with_array(ref);
*/
MutableArrayRef(std::initializer_list<T> &list) : MutableArrayRef(list.begin(), list.size())
{
}
@@ -392,7 +451,7 @@ template<typename T> class MutableArrayRef {
}
/**
* Get the number of elements in the array.
* Returns the number of elements in the array.
*/
uint size() const
{
@@ -402,33 +461,30 @@ template<typename T> class MutableArrayRef {
/**
* Replace all elements in the referenced array with the given value.
*/
void fill(const T &element)
void fill(const T &value)
{
std::fill_n(m_start, m_size, element);
initialized_fill_n(m_start, m_size, value);
}
/**
* Replace a subset of all elements with the given value.
* Replace a subset of all elements with the given value. This invokes undefined behavior when
* one of the indices is out of bounds.
*/
void fill_indices(ArrayRef<uint> indices, const T &element)
void fill_indices(ArrayRef<uint> indices, const T &value)
{
for (uint i : indices) {
m_start[i] = element;
BLI_assert(i < m_size);
m_start[i] = value;
}
}
/**
* Copy the values from another array into the references array.
* Returns a pointer to the beginning of the referenced array. This may be nullptr, when the size
* is zero.
*/
void copy_from(const T *ptr)
T *data() const
{
BLI::copy_n(ptr, m_size, m_start);
}
void copy_from(ArrayRef<T> other)
{
BLI_assert(this->size() == other.size());
this->copy_from(other.begin());
return m_start;
}
T *begin() const
@@ -448,8 +504,8 @@ template<typename T> class MutableArrayRef {
}
/**
* Return a continuous part of the array.
* Asserts that the slice stays in the array bounds.
* Returns a contiguous part of the array. This invokes undefined behavior when the slice would
* go out of bounds.
*/
MutableArrayRef slice(uint start, uint length) const
{
@@ -458,25 +514,28 @@ template<typename T> class MutableArrayRef {
}
/**
* Return a new MutableArrayRef with n elements removed from the beginning.
* Returns a new MutableArrayRef with n elements removed from the beginning. This invokes
* undefined behavior when the array is too small.
*/
MutableArrayRef drop_front(uint n = 1) const
MutableArrayRef drop_front(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(n, this->size() - n);
}
/**
* Return a new MutableArrayRef with n elements removed from the beginning.
* Returns a new MutableArrayRef with n elements removed from the end. This invokes undefined
* behavior when the array is too small.
*/
MutableArrayRef drop_back(uint n = 1) const
MutableArrayRef drop_back(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(0, this->size() - n);
}
/**
* Return a new MutableArrayRef that only contains the first n elements.
* Returns a new MutableArrayRef that only contains the first n elements. This invokes undefined
* behavior when the array is too small.
*/
MutableArrayRef take_front(uint n) const
{
@@ -485,7 +544,8 @@ template<typename T> class MutableArrayRef {
}
/**
* Return a new MutableArrayRef that only contains the last n elements.
* Return a new MutableArrayRef that only contains the last n elements. This invokes undefined
* behavior when the array is too small.
*/
MutableArrayRef take_back(uint n) const
{
@@ -493,16 +553,28 @@ template<typename T> class MutableArrayRef {
return this->slice(this->size() - n, n);
}
/**
* Returns an (immutable) ArrayRef that references the same array. This is usually not needed,
* due to implicit conversions. However, sometimes automatic type deduction needs some help.
*/
ArrayRef<T> as_ref() const
{
return ArrayRef<T>(m_start, m_size);
}
/**
* Utility to make it more convenient to iterate over all indices that can be used with this
* array.
*/
IndexRange index_range() const
{
return IndexRange(m_size);
}
/**
* Returns a reference to the last element. This invokes undefined behavior when the array is
* empty.
*/
const T &last() const
{
BLI_assert(m_size > 0);
@@ -510,7 +582,7 @@ template<typename T> class MutableArrayRef {
}
/**
* Get a new array ref to the same underlying memory buffer. No conversions are done.
* Returns a new array ref to the same underlying memory buffer. No conversions are done.
*/
template<typename NewT> MutableArrayRef<NewT> cast() const
{
@@ -528,6 +600,9 @@ template<typename T> ArrayRef<T> ref_c_array(const T *array, uint size)
return ArrayRef<T>(array, size);
}
/**
* Utilities to check that arrays have the same size in debug builds.
*/
template<typename T1, typename T2> void assert_same_size(const T1 &v1, const T2 &v2)
{
UNUSED_VARS_NDEBUG(v1, v2);

View File

@@ -27,7 +27,6 @@
#include "BLI_map.hh"
#include "BLI_optional.hh"
#include "BLI_set.hh"
#include "BLI_string_map.hh"
#include "BLI_utility_mixins.hh"
#include "BLI_vector.hh"
@@ -57,7 +56,7 @@ class AttributeList {
void set(StringRef key, StringRef value)
{
m_attributes.add_override(key, value);
m_attributes.add_overwrite(key, value);
}
};

View File

@@ -20,8 +20,58 @@
/** \file
* \ingroup bli
*
* This file provides default hash functions for some primitive types. The hash functions can be
* used by containers such as Map and Set.
* A specialization of `BLI::DefaultHash<T>` provides a hash function for values of type T. This
* hash function is used by default in hash table implementations in blenlib.
*
* The actual hash function is in the `operator()` method of DefaultHash<T>. The following code
* computes the hash of some value using DefaultHash.
*
* T value = ...;
* DefaultHash<T> hash_function;
* uint32_t hash = hash_function(value);
*
* Hash table implementations like BLI::Set support heterogeneous key lookups. That means that one
* can do a lookup with a key of type A in a hash table that stores keys of type B. This is
* commonly done when B is std::string, because the conversion from e.g. a StringRef to std::string
* can be costly and is unnecessary. To make this work, values of type A and B that compare equal
* have to have the same hash value. This is achieved by defining potentially multiple `operator()`
* in a specialization of DefaultHash. All those methods have to compute the same hash for values
* that compare equal.
*
* The computed hash is an unsigned 32 bit integer. Ideally, the hash function would generate
* uniformly random hash values for a set of keys. However, in many cases trivial hash functions
* are faster and produce a good enough distribution. In general it is better when more information
* is in the lower bits of the hash. By choosing a good probing strategy, the effects of a bad hash
* function are less noticable though. In this context a good probing strategy is one that takes
* all bits of the hash into account eventually. One has to check on a case by case basis to see if
* a better but more expensive or trivial hash function works better.
*
* There are three main ways to provide a hash table implementation with a custom hash function.
*
* - When you want to provide a default hash function for your own custom type: Add a `hash`
* member function to it. The function should return `uint32_t` and take no arguments. This
* method will be called by the default implementation of DefaultHash. It will automatically be
* used by hash table implementations.
*
* - When you want to provide a default hash function for a type that you cannot modify: Add a new
* specialization to the DefaultHash struct. This can be done by writing code like below in
* either global or BLI namespace.
*
* template<> struct BLI::DefaultHash<TheType> {
* uint32_t operator()(const TheType &value) const {
* return ...;
* }
* };
*
* - When you want to provide a different hash function for a type that already has a default hash
* function: Implement a struct like the one below and pass it as template parameter to the hash
* table explicitely.
*
* struct MyCustomHash {
* uint32_t operator()(const TheType &value) const {
* return ...;
* }
* };
*/
#include <functional>
@@ -35,7 +85,16 @@
namespace BLI {
/**
* If there is no other specialization of DefaultHash for a given type, try to call `hash()` on the
* value. If there is no such method, this will result in a compiler error. Usually that means that
* you have to implement a hash function using one of three strategies listed above.
*/
template<typename T> struct DefaultHash {
uint32_t operator()(const T &value) const
{
return value.hash();
}
};
#define TRIVIAL_DEFAULT_INT_HASH(TYPE) \
@@ -47,9 +106,9 @@ template<typename T> struct DefaultHash {
}
/**
* Cannot make any assumptions about the distribution of keys, so use a trivial hash function by
* default. The hash table implementations are designed to take all bits of the hash into account
* to avoid really bad behavior when the lower bits are all zero. Special hash functions can be
* We cannot make any assumptions about the distribution of keys, so use a trivial hash function by
* default. The default probing strategy is designed to take all bits of the hash into account
* to avoid worst case behavior when the lower bits are all zero. Special hash functions can be
* implemented when more knowledge about a specific key distribution is available.
*/
TRIVIAL_DEFAULT_INT_HASH(int8_t);
@@ -58,9 +117,26 @@ TRIVIAL_DEFAULT_INT_HASH(int16_t);
TRIVIAL_DEFAULT_INT_HASH(uint16_t);
TRIVIAL_DEFAULT_INT_HASH(int32_t);
TRIVIAL_DEFAULT_INT_HASH(uint32_t);
TRIVIAL_DEFAULT_INT_HASH(int64_t);
TRIVIAL_DEFAULT_INT_HASH(uint64_t);
template<> struct DefaultHash<uint64_t> {
uint32_t operator()(uint64_t value) const
{
uint32_t low = (uint32_t)value;
uint32_t high = (uint32_t)(value >> 32);
return low ^ (high * 0x45d9f3b);
}
};
template<> struct DefaultHash<int64_t> {
uint32_t operator()(uint64_t value) const
{
return DefaultHash<uint64_t>{}((uint64_t)value);
}
};
/**
* One should try to avoid using floats as keys in hash tables, but sometimes it is convenient.
*/
template<> struct DefaultHash<float> {
uint32_t operator()(float value) const
{
@@ -78,35 +154,38 @@ inline uint32_t hash_string(StringRef str)
}
template<> struct DefaultHash<std::string> {
uint32_t operator()(const std::string &value) const
/**
* Take a StringRef as parameter to support heterogeneous lookups in hash table implementations
* when std::string is used as key.
*/
uint32_t operator()(StringRef value) const
{
return hash_string(value);
}
};
template<> struct DefaultHash<StringRef> {
uint32_t operator()(const StringRef &value) const
uint32_t operator()(StringRef value) const
{
return hash_string(value);
}
};
template<> struct DefaultHash<StringRefNull> {
uint32_t operator()(const StringRefNull &value) const
uint32_t operator()(StringRef value) const
{
return hash_string(value);
}
};
/**
* While we cannot guarantee that the lower 3 bits or a pointer are zero, it is safe to assume
* this in the general case. MEM_malloc only returns 8 byte aligned addresses on 64-bit systems.
* While we cannot guarantee that the lower 4 bits of a pointer are zero, it is often the case.
*/
template<typename T> struct DefaultHash<T *> {
uint32_t operator()(const T *value) const
{
uintptr_t ptr = POINTER_AS_UINT(value);
uint32_t hash = (uint32_t)(ptr >> 3);
uintptr_t ptr = (uintptr_t)value;
uint32_t hash = (uint32_t)(ptr >> 4);
return hash;
}
};

View File

@@ -0,0 +1,350 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __BLI_OPEN_ADDRESSING_HH__
#define __BLI_OPEN_ADDRESSING_HH__
/** \file
* \ingroup bli
*
* This file contains code that can be shared between different hash table implementations.
*/
#include <cmath>
#include "BLI_allocator.hh"
#include "BLI_array.hh"
#include "BLI_math_base.h"
#include "BLI_memory_utils.hh"
#include "BLI_string.h"
#include "BLI_utildefines.h"
#include "BLI_vector.hh"
namespace BLI {
/* -------------------------------------------------------------------- */
/** \name Constexpr Utility Functions
*
* Those should eventually be deduplicated with functions in BLI_math_base.h.
* \{ */
inline constexpr int is_power_of_2_i_constexpr(int n)
{
return (n & (n - 1)) == 0;
}
inline constexpr uint32_t log2_floor_u_constexpr(uint32_t x)
{
return x <= 1 ? 0 : 1 + log2_floor_u_constexpr(x >> 1);
}
inline constexpr uint32_t log2_ceil_u_constexpr(uint32_t x)
{
return (is_power_of_2_i_constexpr((int)x)) ? log2_floor_u_constexpr(x) :
log2_floor_u_constexpr(x) + 1;
}
inline constexpr uint32_t power_of_2_max_u_constexpr(uint32_t x)
{
return 1 << log2_ceil_u_constexpr(x);
}
template<typename IntT> inline constexpr IntT ceil_division(IntT x, IntT y)
{
BLI_STATIC_ASSERT(!std::is_signed<IntT>::value, "");
return x / y + ((x % y) != 0);
}
template<typename IntT> inline constexpr IntT floor_division(IntT x, IntT y)
{
BLI_STATIC_ASSERT(!std::is_signed<IntT>::value, "");
return x / y;
}
inline constexpr uint32_t ceil_division_by_fraction(uint32_t x,
uint32_t numerator,
uint32_t denominator)
{
return (uint32_t)ceil_division((uint64_t)x * (uint64_t)denominator, (uint64_t)numerator);
}
inline constexpr uint32_t floor_multiplication_with_fraction(uint32_t x,
uint32_t numerator,
uint32_t denominator)
{
return (uint32_t)((uint64_t)x * (uint64_t)numerator / (uint64_t)denominator);
}
inline constexpr uint32_t total_slot_amount_for_usable_slots(uint32_t min_usable_slots,
uint32_t max_load_factor_numerator,
uint32_t max_load_factor_denominator)
{
return power_of_2_max_u_constexpr(ceil_division_by_fraction(
min_usable_slots, max_load_factor_numerator, max_load_factor_denominator));
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Load Factor
*
* This is an abstraction for a fractional load factor. The hash table using this class is assumed
* to use arrays with a size that is a power of two.
*
* \{ */
class LoadFactor {
private:
uint8_t m_numerator;
uint8_t m_denominator;
public:
LoadFactor(uint8_t numerator, uint8_t denominator)
: m_numerator(numerator), m_denominator(denominator)
{
BLI_assert(numerator > 0);
BLI_assert(numerator < denominator);
}
void compute_total_and_usable_slots(uint32_t min_total_slots,
uint32_t min_usable_slots,
uint32_t *r_total_slots,
uint32_t *r_usable_slots) const
{
BLI_assert(is_power_of_2_i((int)min_total_slots));
uint32_t total_slots = this->compute_total_slots(min_usable_slots, m_numerator, m_denominator);
total_slots = std::max(total_slots, min_total_slots);
uint32_t usable_slots = floor_multiplication_with_fraction(
total_slots, m_numerator, m_denominator);
BLI_assert(min_usable_slots <= usable_slots);
*r_total_slots = total_slots;
*r_usable_slots = usable_slots;
}
static constexpr uint32_t compute_total_slots(uint32_t min_usable_slots,
uint8_t numerator,
uint8_t denominator)
{
return total_slot_amount_for_usable_slots(min_usable_slots, numerator, denominator);
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Intrusive Key Info
*
* A hash table slot has to maintain state about whether the slot is empty, occupied or removed.
* Usually, this state information is stored in its own variable. While it only needs two bits in
* theory, in practice often 4 or 8 bytes are used, due to alignment requirements.
*
* One solution to deal with this problem is to embed the state information in the key. That means,
* two values of the key type are selected to indicate whether the slot is empty or removed.
*
* The classes below tell a slot implementation which special key values it can use. They can be
* used as KeyInfo in slot types like IntrusiveSetSlot and IntrusiveMapSlot.
*
* A KeyInfo type has to implement a couple of static methods that are descriped in
* TemplatedKeyInfo.
*
* \{ */
/**
* The template arguments EmptyValue and RemovedValue define which special are used. This can be
* used when a hash table has integer keys and there are two specific integers that will never be
* used as keys.
*/
template<typename Key, Key EmptyValue, Key RemovedValue> struct TemplatedKeyInfo {
/**
* Get the value that indicates that the slot is empty. This is used to indicate new slots.
*/
static Key get_empty()
{
return EmptyValue;
}
/**
* Modify the given key so that it represents a removed slot.
*/
static void remove(Key &key)
{
key = RemovedValue;
}
/**
* Return true, when the given key indicates that the slot is empty.
*/
static bool is_empty(const Key &key)
{
return key == EmptyValue;
}
/**
* Return true, when the given key indicates that the slot is removed.
*/
static bool is_removed(const Key &key)
{
return key == RemovedValue;
}
/**
* Return true, when the key is valid, i.e. it can be contained in an occupied slot.
*/
static bool is_not_empty_or_removed(const Key &key)
{
return key != EmptyValue && key != RemovedValue;
}
};
/**
* 0xffff...ffff indicates an empty slot.
* 0xffff...fffe indicates a removed slot.
*
* Those specific values are used, because with them a single comparison is enough to check whether
* a slot is occupied. The keys 0x0000...0000 and 0x0000...0001 also satisfy this constraint.
* However, nullptr is much more likely to be used as valid key.
*/
template<typename Pointer> struct PointerKeyInfo {
static Pointer get_empty()
{
return (Pointer)UINTPTR_MAX;
}
static void remove(Pointer &pointer)
{
pointer = (Pointer)(UINTPTR_MAX - 1);
}
static bool is_empty(Pointer pointer)
{
return (uintptr_t)pointer == UINTPTR_MAX;
}
static bool is_removed(Pointer pointer)
{
return (uintptr_t)pointer == UINTPTR_MAX - 1;
}
static bool is_not_empty_or_removed(Pointer pointer)
{
return (uintptr_t)pointer < UINTPTR_MAX - 1;
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Hash Table Stats
*
* A utility class that makes it easier for hash table implementations to provide statistics to the
* developer. These statistics can be helpful when trying to figure out why a hash table is slow.
*
* To use this utility, a hash table has to implement various methods, that are mentioned below.
*
* \{ */
class HashTableStats {
private:
Vector<uint32_t> m_keys_by_collision_count;
uint32_t m_total_collisions;
float m_average_collisions;
uint32_t m_size;
uint32_t m_capacity;
uint32_t m_removed_amount;
float m_load_factor;
float m_removed_load_factor;
uint32_t m_size_per_element;
uint32_t m_size_in_bytes;
const void *m_address;
public:
/**
* Requires that the hash table has the following methods:
* - count_collisions(key) -> uint32_t
* - size() -> uint32_t
* - capacity() -> uint32_t
* - removed_amount() -> uint32_t
* - size_per_element() -> uint32_t
* - size_in_bytes() -> uint32_t
*/
template<typename HashTable, typename Keys>
HashTableStats(const HashTable &hash_table, const Keys &keys)
{
m_total_collisions = 0;
m_size = hash_table.size();
m_capacity = hash_table.capacity();
m_removed_amount = hash_table.removed_amount();
m_size_per_element = hash_table.size_per_element();
m_size_in_bytes = hash_table.size_in_bytes();
m_address = (const void *)&hash_table;
for (const auto &key : keys) {
uint32_t collisions = hash_table.count_collisions(key);
if (m_keys_by_collision_count.size() <= collisions) {
m_keys_by_collision_count.append_n_times(
0, collisions - m_keys_by_collision_count.size() + 1);
}
m_keys_by_collision_count[collisions]++;
m_total_collisions += collisions;
}
m_average_collisions = (m_size == 0) ? 0 : (float)m_total_collisions / (float)m_size;
m_load_factor = (float)m_size / (float)m_capacity;
m_removed_load_factor = (float)m_removed_amount / (float)m_capacity;
}
void print(StringRef name = "")
{
std::cout << "Hash Table Stats: " << name << "\n";
std::cout << " Address: " << m_address << "\n";
std::cout << " Total Slots: " << m_capacity << "\n";
std::cout << " Occupied Slots: " << m_size << " (" << m_load_factor * 100.0f << " %)\n";
std::cout << " Removed Slots: " << m_removed_amount << " (" << m_removed_load_factor * 100.0f
<< " %)\n";
char memory_size_str[15];
BLI_str_format_byte_unit(memory_size_str, m_size_in_bytes, true);
std::cout << " Size: ~" << memory_size_str << "\n";
std::cout << " Size per Slot: " << m_size_per_element << " bytes\n";
std::cout << " Average Collisions: " << m_average_collisions << "\n";
for (uint32_t collision_count : m_keys_by_collision_count.index_range()) {
std::cout << " " << collision_count
<< " Collisions: " << m_keys_by_collision_count[collision_count] << "\n";
}
}
};
/** \} */
/**
* This struct provides an equality operator that returns true for all objects that compare equal
* when one would use the `==` operator. This is different from std::equal_to<T>, because that
* requires the parameters to be of type T. Our hash tables support lookups using other types
* without conversion, therefore DefaultEquality needs to be more generic.
*/
struct DefaultEquality {
template<typename T1, typename T2> bool operator()(const T1 &a, const T2 &b) const
{
return a == b;
}
};
} // namespace BLI
#endif /* __BLI_OPEN_ADDRESSING_HH__ */

View File

@@ -20,9 +20,37 @@
/** \file
* \ingroup bli
*
* Allows passing iterators over ranges of integers without actually allocating an array or passing
* separate values. A range always has a step of one. If other step sizes are required in some
* cases, a separate data structure should be used.
* A `BLI::IndexRange` wraps an interval of non-negative integers. It can be used to reference
* consecutive elements in an array. Furthermore, it can make for loops more convenient and less
* error prone, especially when using nested loops.
*
* I'd argue that the second loop is more readable and less error prone than the first one. That is
* not necessarily always the case, but often it is.
*
* for (uint i = 0; i < 10; i++) {
* for (uint j = 0; j < 20; j++) {
* for (uint k = 0; k < 30; k++) {
*
* for (uint i : IndexRange(10)) {
* for (uint j : IndexRange(20)) {
* for (uint k : IndexRange(30)) {
*
* Some containers like BLI::Vector have an index_range() method. This will return the IndexRange
* that contains all indices that can be used to access the container. This is particularly useful
* when you want to iterate over the indices and the elements (much like Python's enumerate(), just
* worse). Again, I think the second example here is better:
*
* for (uint i = 0; i < my_vector_with_a_long_name.size(); i++) {
* do_something(i, my_vector_with_a_long_name[i]);
*
* for (uint i : my_vector_with_a_long_name.index_range()) {
* do_something(i, my_vector_with_a_long_name[i]);
*
* Ideally this could be could be even closer to Python's enumerate(). We might get that in the
* future with newer C++ versions.
*
* One other important feature is the as_array_ref method. This method returns an ArrayRef<uint>
* that contains the interval as individual numbers.
*/
#include <algorithm>
@@ -182,13 +210,15 @@ class IndexRange {
return value >= m_start && value < m_start + m_size;
}
/**
* Returns a new range, that contains a subinterval of the current one.
*/
IndexRange slice(uint start, uint size) const
{
uint new_start = m_start + start;
BLI_assert(new_start + size <= m_start + m_size || size == 0);
return IndexRange(new_start, size);
}
IndexRange slice(IndexRange range) const
{
return this->slice(range.start(), range.size());

View File

@@ -130,7 +130,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
template<typename T> MutableArrayRef<T> construct_array_copy(ArrayRef<T> src)
{
MutableArrayRef<T> dst = this->allocate_array<T>(src.size());
uninitialized_copy_n(src.begin(), src.size(), dst.begin());
uninitialized_copy_n(src.data(), src.size(), dst.data());
return dst;
}
@@ -186,7 +186,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
m_unused_borrowed_buffers.append(ArrayRef<char>((char *)buffer, size));
}
template<uint Size, uint Alignment>
template<size_t Size, size_t Alignment>
void provide_buffer(AlignedBuffer<Size, Alignment> &aligned_buffer)
{
this->provide_buffer(aligned_buffer.ptr(), Size);
@@ -208,7 +208,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
uint size_in_bytes = power_of_2_min_u(std::max(min_allocation_size, m_next_min_alloc_size));
m_next_min_alloc_size = size_in_bytes * 2;
void *buffer = m_allocator.allocate(size_in_bytes, __func__);
void *buffer = m_allocator.allocate(size_in_bytes, 8, AT);
m_owned_buffers.append(buffer);
m_current_begin = (uintptr_t)buffer;
m_current_end = m_current_begin + size_in_bytes;

View File

@@ -20,8 +20,10 @@
/** \file
* \ingroup bli
*
* The purpose of this wrapper is just to make it more comfortable to iterate of ListBase
* instances, that are used in many places in Blender.
* `BLI::ListBaseWrapper` is a typed wrapper for the ListBase struct. That makes it safer and more
* convenient to use in C++ in some cases. However, if you find yourself iterating over a linked
* list a lot, consider to convert it into a vector for further processing. This improves
* performance and debugability.
*/
#include "BLI_listbase.h"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,361 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __BLI_MAP_SLOTS_HH__
#define __BLI_MAP_SLOTS_HH__
/** \file
* \ingroup bli
*
* This file contains slot types that are supposed to be used with BLI::Map.
*
* Every slot type has to be able to hold a value of type Key, a value of type Value and state
* information. A map slot has three possible states: empty, occupied and removed.
*
* When a slot is occupied, it stores instances of type Key and Value.
*
* A map slot type has to implement a couple of methods that are explained in SimpleMapSlot.
* A slot type is assumed to be trivially destructible, when it is not in occupied state. So the
* destructor might not be called in that case.
*
* Possible Improvements:
* - Implement slot type that stores the hash.
*/
#include "BLI_memory_utils.hh"
namespace BLI {
/**
* The simplest possible map slot. It stores the slot state and the optional key and value
* instances in separate variables. Depending on the alignment requirement of the key and value,
* many bytes might be wasted.
*/
template<typename Key, typename Value> class SimpleMapSlot {
private:
enum State : uint8_t {
Empty = 0,
Occupied = 1,
Removed = 2,
};
State m_state;
AlignedBuffer<sizeof(Key), alignof(Key)> m_key_buffer;
AlignedBuffer<sizeof(Value), alignof(Value)> m_value_buffer;
public:
/**
* After the default constructor has run, the slot has to be in the empty state.
*/
SimpleMapSlot()
{
m_state = Empty;
}
/**
* The destructor also has to destruct the key and value, if the slot is currently occupied.
*/
~SimpleMapSlot()
{
if (m_state == Occupied) {
this->key()->~Key();
this->value()->~Value();
}
}
/**
* The copy constructor has to copy the state. If the other slot was occupied, a copy of the key
* and value have to be made as well.
*/
SimpleMapSlot(const SimpleMapSlot &other)
{
m_state = other.m_state;
if (other.m_state == Occupied) {
new (this->key()) Key(*other.key());
new (this->value()) Value(*other.value());
}
}
/**
* The move construtor has to copy the state. If the other slot was occupied, the key and value
* from the other have to moved as well. The other slot stays in the state it was in before. Its
* optionally stored key and value remain in a moved-from state.
*/
SimpleMapSlot(SimpleMapSlot &&other) noexcept
{
m_state = other.m_state;
if (other.m_state == Occupied) {
new (this->key()) Key(std::move(*other.key()));
new (this->value()) Value(std::move(*other.value()));
}
}
/**
* Returns a non-const pointer to the position where the key is stored.
*/
Key *key()
{
return (Key *)m_key_buffer.ptr();
}
/**
* Returns a const pointer to the position where the key is stored.
*/
const Key *key() const
{
return (const Key *)m_key_buffer.ptr();
}
/**
* Returns a non-const pointer to the position where the value is stored.
*/
Value *value()
{
return (Value *)m_value_buffer.ptr();
}
/**
* Returns a const pointer to the position where the value is stored.
*/
const Value *value() const
{
return (const Value *)m_value_buffer.ptr();
}
/**
* Returns true if the slot currently contains a key and a value.
*/
bool is_occupied() const
{
return m_state == Occupied;
}
/**
* Returns true if the slot is empty, i.e. it does not contain a key and is not in removed state.
*/
bool is_empty() const
{
return m_state == Empty;
}
/**
* Returns the hash of the currently stored key. In this simple map slot implementation, we just
* computed the hash here. Other implementations might store the hash in the slot instead.
*/
template<typename Hash> uint32_t get_hash(const Hash &hash)
{
BLI_assert(this->is_occupied());
return hash(*this->key());
}
/**
* Move the other slot into this slot and destruct it. We do destruction here, because this way
* we can avoid a comparison with the state, since we know the slot is occupied.
*/
void relocate_occupied_here(SimpleMapSlot &other, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
m_state = Occupied;
new (this->key()) Key(std::move(*other.key()));
new (this->value()) Value(std::move(*other.value()));
other.key()->~Key();
other.value()->~Value();
}
/**
* Returns true, when this slot is occupied and contains a key that compares equal to the given
* key. The hash can be used by other slot implementations to determine inequality faster.
*/
template<typename ForwardKey, typename IsEqual>
bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
{
if (m_state == Occupied) {
return is_equal(key, *this->key());
}
return false;
}
/**
* Change the state of this slot from empty/removed to occupied. The key/value has to be
* constructed by calling the constructor with the given key/value as parameter.
*/
template<typename ForwardKey, typename ForwardValue>
void occupy(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
{
BLI_assert(!this->is_occupied());
this->occupy_without_value(std::forward<ForwardKey>(key), hash);
new (this->value()) Value(std::forward<ForwardValue>(value));
}
/**
* Change the state of this slot from empty/removed to occupied, but leave the value
* uninitialized. The caller is responsible to construct the value afterwards.
*/
template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
m_state = Occupied;
new (this->key()) Key(std::forward<ForwardKey>(key));
}
/**
* Change the state of this slot from occupied to removed. The key and value have to be
* destructed as well.
*/
void remove()
{
BLI_assert(this->is_occupied());
m_state = Removed;
this->key()->~Key();
this->value()->~Value();
}
};
/**
* An IntrusiveMapSlot uses two special values of the key to indicate whether the slot is empty or
* removed. This saves some memory in all cases and is more efficient in many cases. The KeyInfo
* type indicates which specific values are used. An example for a KeyInfo implementation is
* PointerKeyInfo.
*
* The special key values are expected to be trivially destructible.
*/
template<typename Key, typename Value, typename KeyInfo> class IntrusiveMapSlot {
private:
Key m_key = KeyInfo::get_empty();
AlignedBuffer<sizeof(Value), alignof(Value)> m_value_buffer;
public:
IntrusiveMapSlot() = default;
~IntrusiveMapSlot()
{
if (KeyInfo::is_not_empty_or_removed(m_key)) {
this->value()->~Value();
}
}
IntrusiveMapSlot(const IntrusiveMapSlot &other) : m_key(other.m_key)
{
if (KeyInfo::is_not_empty_or_removed(m_key)) {
new (this->value()) Value(*other.value());
}
}
IntrusiveMapSlot(IntrusiveMapSlot &&other) noexcept : m_key(other.m_key)
{
if (KeyInfo::is_not_empty_or_removed(m_key)) {
new (this->value()) Value(std::move(*other.value()));
}
}
Key *key()
{
return &m_key;
}
const Key *key() const
{
return &m_key;
}
Value *value()
{
return (Value *)m_value_buffer.ptr();
}
const Value *value() const
{
return (const Value *)m_value_buffer.ptr();
}
bool is_occupied() const
{
return KeyInfo::is_not_empty_or_removed(m_key);
}
bool is_empty() const
{
return KeyInfo::is_empty(m_key);
}
template<typename Hash> uint32_t get_hash(const Hash &hash)
{
BLI_assert(this->is_occupied());
return hash(*this->key());
}
void relocate_occupied_here(IntrusiveMapSlot &other, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
m_key = std::move(other.m_key);
new (this->value()) Value(std::move(*other.value()));
other.m_key.~Key();
other.value()->~Value();
}
template<typename ForwardKey, typename IsEqual>
bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
{
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
return is_equal(key, m_key);
}
template<typename ForwardKey, typename ForwardValue>
void occupy(ForwardKey &&key, ForwardValue &&value, uint32_t hash)
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
this->occupy_without_value(std::forward<ForwardKey>(key), hash);
new (this->value()) Value(std::forward<ForwardValue>(value));
}
template<typename ForwardKey> void occupy_without_value(ForwardKey &&key, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
m_key = std::forward<ForwardKey>(key);
}
void remove()
{
BLI_assert(this->is_occupied());
KeyInfo::remove(m_key);
this->value()->~Value();
}
};
template<typename Key, typename Value> struct DefaultMapSlot;
/**
* Use SimpleMapSlot by default, because it is the smallest slot type, that works for all keys.
*/
template<typename Key, typename Value> struct DefaultMapSlot {
using type = SimpleMapSlot<Key, Value>;
};
/**
* Use a special slot type for pointer keys, because we can store whether a slot is empty or
* removed with special pointer values.
*/
template<typename Key, typename Value> struct DefaultMapSlot<Key *, Value> {
using type = IntrusiveMapSlot<Key *, Value, PointerKeyInfo<Key *>>;
};
} // namespace BLI
#endif /* __BLI_MAP_SLOTS_HH__ */

View File

@@ -23,6 +23,7 @@
*/
#include "BLI_math_inline.h"
#include "BLI_utildefines.h"
#ifdef __cplusplus
extern "C" {

View File

@@ -21,71 +21,191 @@
* \ingroup bli
*/
#include <algorithm>
#include <memory>
#include "BLI_utildefines.h"
namespace BLI {
using std::copy;
using std::copy_n;
using std::uninitialized_copy;
using std::uninitialized_copy_n;
using std::uninitialized_fill;
using std::uninitialized_fill_n;
template<typename T> void construct_default(T *ptr)
/**
* Call the default constructor on n consecutive elements. For trivially constructible types, this
* does nothing.
*
* Before:
* ptr: uninitialized
* After:
* ptr: initialized
*/
template<typename T> void default_construct_n(T *ptr, uint n)
{
new (ptr) T();
}
template<typename T> void destruct(T *ptr)
{
ptr->~T();
/* This is not strictly necessary, because the loop below will be optimized away anyway. It is
* nice to make behavior this explicitely, though. */
if (std::is_trivially_constructible<T>::value) {
return;
}
for (uint i = 0; i < n; i++) {
new (ptr + i) T;
}
}
/**
* Call the destructor on n consecutive values. For trivially destructible types, this does
* nothing.
*
* Before:
* ptr: initialized
* After:
* ptr: uninitialized
*/
template<typename T> void destruct_n(T *ptr, uint n)
{
/* This is not strictly necessary, because the loop below will be optimized away anyway. It is
* nice to make behavior this explicitely, though. */
if (std::is_trivially_destructible<T>::value) {
return;
}
for (uint i = 0; i < n; i++) {
ptr[i].~T();
}
}
/**
* Copy n values from src to dst.
*
* Before:
* src: initialized
* dst: initialized
* After:
* src: initialized
* dst: initialized
*/
template<typename T> void initialized_copy_n(const T *src, uint n, T *dst)
{
for (uint i = 0; i < n; i++) {
dst[i] = src[i];
}
}
/**
* Copy n values from src to dst.
*
* Before:
* src: initialized
* dst: uninitialized
* After:
* src: initialized
* dst: initialized
*/
template<typename T> void uninitialized_copy_n(const T *src, uint n, T *dst)
{
for (uint i = 0; i < n; i++) {
new (dst + i) T(src[i]);
}
}
/**
* Move n values from src to dst.
*
* Before:
* src: initialized
* dst: initialized
* After:
* src: initialized, moved-from
* dst: initialized
*/
template<typename T> void initialized_move_n(T *src, uint n, T *dst)
{
for (uint i = 0; i < n; i++) {
dst[i] = std::move(src[i]);
}
}
/**
* Move n values from src to dst.
*
* Before:
* src: initialized
* dst: uninitialized
* After:
* src: initialized, moved-from
* dst: initialized
*/
template<typename T> void uninitialized_move_n(T *src, uint n, T *dst)
{
std::uninitialized_copy_n(std::make_move_iterator(src), n, dst);
for (uint i = 0; i < n; i++) {
new (dst + i) T(std::move(src[i]));
}
}
template<typename T> void move_n(T *src, uint n, T *dst)
/**
* Relocate n values from src to dst. Relocation is a move followed by destruction of the src
* value.
*
* Before:
* src: initialized
* dst: initialized
* After:
* src: uninitialized
* dst: initialized
*/
template<typename T> void initialized_relocate_n(T *src, uint n, T *dst)
{
std::copy_n(std::make_move_iterator(src), n, dst);
}
template<typename T> void uninitialized_relocate(T *src, T *dst)
{
new (dst) T(std::move(*src));
destruct(src);
initialized_move_n(src, n, dst);
destruct_n(src, n);
}
/**
* Relocate n values from src to dst. Relocation is a move followed by destruction of the src
* value.
*
* Before:
* src: initialized
* dst: uinitialized
* After:
* src: uninitialized
* dst: initialized
*/
template<typename T> void uninitialized_relocate_n(T *src, uint n, T *dst)
{
uninitialized_move_n(src, n, dst);
destruct_n(src, n);
}
template<typename T> void relocate(T *src, T *dst)
/**
* Copy the value to n consecutive elements.
*
* Before:
* dst: initialized
* After:
* dst: initialized
*/
template<typename T> void initialized_fill_n(T *dst, uint n, const T &value)
{
*dst = std::move(*src);
destruct(src);
for (uint i = 0; i < n; i++) {
dst[i] = value;
}
}
template<typename T> void relocate_n(T *src, uint n, T *dst)
/**
* Copy the value to n consecutive elements.
*
* Before:
* dst: uninitialized
* After:
* dst: initialized
*/
template<typename T> void uninitialized_fill_n(T *dst, uint n, const T &value)
{
move_n(src, n, dst);
destruct_n(src, n);
for (uint i = 0; i < n; i++) {
new (dst + i) T(value);
}
}
/**
* The same as std::unique_ptr. This can be removed when we start using C++14.
*/
template<typename T, typename... Args> std::unique_ptr<T> make_unique(Args &&... args)
{
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
@@ -98,13 +218,24 @@ template<typename T> struct DestructValueAtAddress {
}
};
/**
* A destruct_ptr is like unique_ptr, but it will only call the destructor and will not free the
* memory. This is useful when using custom allocators.
*/
template<typename T> using destruct_ptr = std::unique_ptr<T, DestructValueAtAddress<T>>;
template<uint Size, uint Alignment> class alignas(Alignment) AlignedBuffer {
/**
* An `AlignedBuffer` is simply a byte array with the given size and alignment. The buffer will
* not be initialized by the default constructor.
*
* This can be used to reserve memory for C++ objects whose lifetime is different from the
* lifetime of the object they are embedded in. It's used by containers with small buffer
* optimization and hash table implementations.
*/
template<size_t Size, size_t Alignment> class alignas(Alignment) AlignedBuffer {
private:
/* Don't create an empty array. This causes problems with some compilers. */
static constexpr uint ActualSize = (Size > 0) ? Size : 1;
char m_buffer[ActualSize];
char m_buffer[(Size > 0) ? Size : 1];
public:
void *ptr()

View File

@@ -1,316 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __BLI_OPEN_ADDRESSING_HH__
#define __BLI_OPEN_ADDRESSING_HH__
/** \file
* \ingroup bli
*
* This class offers a useful abstraction for other containers that implement hash tables using
* open addressing. It handles the following aspects:
* - Allocation and deallocation of the open addressing array.
* - Optional small object optimization.
* - Keeps track of how many elements and dummies are in the table.
*
* The nice thing about this abstraction is that it does not get in the way of any performance
* optimizations. The data that is actually stored in the table is still fully defined by the
* actual hash table implementation.
*/
#include <cmath>
#include "BLI_allocator.hh"
#include "BLI_array.hh"
#include "BLI_math_base.h"
#include "BLI_memory_utils.hh"
#include "BLI_utildefines.h"
namespace BLI {
/** \name Constexpr utility functions.
* \{ */
inline constexpr int is_power_of_2_i_constexpr(int n)
{
return (n & (n - 1)) == 0;
}
inline constexpr uint32_t log2_floor_u_constexpr(uint32_t x)
{
return x <= 1 ? 0 : 1 + log2_floor_u_constexpr(x >> 1);
}
inline constexpr uint32_t log2_ceil_u_constexpr(uint32_t x)
{
return (is_power_of_2_i_constexpr((int)x)) ? log2_floor_u_constexpr(x) :
log2_floor_u_constexpr(x) + 1;
}
template<typename IntT> inline constexpr IntT ceil_division(IntT x, IntT y)
{
BLI_STATIC_ASSERT(!std::is_signed<IntT>::value, "");
return x / y + ((x % y) != 0);
}
template<typename IntT> inline constexpr IntT floor_division(IntT x, IntT y)
{
BLI_STATIC_ASSERT(!std::is_signed<IntT>::value, "");
return x / y;
}
inline constexpr uint8_t compute_item_exponent(uint32_t min_usable_slots,
uint32_t slots_per_item,
uint32_t max_load_factor_numerator,
uint32_t max_load_factor_denominator)
{
// uint64_t min_total_slots = ceil_division((uint64_t)min_usable_slots *
// (uint64_t)max_load_factor_denominator,
// (uint64_t)max_load_factor_numerator);
// uint32_t min_total_items = (uint32_t)ceil_division(min_total_slots, (uint64_t)slots_per_item);
// uint8_t item_exponent = (uint8_t)log2_ceil_u_constexpr(min_total_items);
// return item_exponent;
return (uint8_t)log2_ceil_u_constexpr((uint32_t)ceil_division(
ceil_division((uint64_t)min_usable_slots * (uint64_t)max_load_factor_denominator,
(uint64_t)max_load_factor_numerator),
(uint64_t)slots_per_item));
}
/** \} */
template<typename Item,
uint32_t MinUsableSlotsInSmallStorage = 1,
typename Allocator = GuardedAllocator>
class OpenAddressingArray {
private:
static constexpr uint32_t s_max_load_factor_numerator = 1;
static constexpr uint32_t s_max_load_factor_denominator = 2;
static constexpr uint32_t s_slots_per_item = Item::slots_per_item;
static constexpr uint8_t s_small_storage_item_exponent = compute_item_exponent(
MinUsableSlotsInSmallStorage,
s_slots_per_item,
s_max_load_factor_numerator,
s_max_load_factor_denominator);
static constexpr uint32_t s_items_in_small_storage = 1u << s_small_storage_item_exponent;
/* Invariants:
* 2^m_item_exponent = m_item_amount
* m_item_amount * s_slots_per_item = m_slots_total
* m_slot_mask = m_slots_total - 1
* m_slots_set_or_dummy < m_slots_total
*/
/* Number of items in the hash table. Must be a power of two. */
uint32_t m_item_amount;
/* Exponent of the current item amount. */
uint8_t m_item_exponent;
/* Number of elements that could be stored in the table when the load factor is 1. */
uint32_t m_slots_total;
/* Number of elements that are not empty. */
uint32_t m_slots_set_or_dummy;
/* Number of dummy entries. */
uint32_t m_slots_dummy;
/* Max number of slots that can be non-empty according to the load factor. */
uint32_t m_slots_usable;
/* Can be used to map a hash value into the range of valid slot indices. */
uint32_t m_slot_mask;
Array<Item, s_items_in_small_storage, Allocator> m_items;
public:
explicit OpenAddressingArray(uint8_t item_exponent = s_small_storage_item_exponent)
{
m_item_exponent = item_exponent;
m_item_amount = 1u << item_exponent;
m_slots_total = m_item_amount * s_slots_per_item;
m_slot_mask = m_slots_total - 1;
m_slots_set_or_dummy = 0;
m_slots_dummy = 0;
m_slots_usable = (uint32_t)floor_division((uint64_t)m_slots_total *
(uint64_t)s_max_load_factor_numerator,
(uint64_t)s_max_load_factor_denominator);
m_items = Array<Item, s_items_in_small_storage, Allocator>(m_item_amount);
}
~OpenAddressingArray() = default;
OpenAddressingArray(const OpenAddressingArray &other) = default;
OpenAddressingArray(OpenAddressingArray &&other) noexcept
{
m_slots_total = other.m_slots_total;
m_slots_set_or_dummy = other.m_slots_set_or_dummy;
m_slots_dummy = other.m_slots_dummy;
m_slots_usable = other.m_slots_usable;
m_slot_mask = other.m_slot_mask;
m_item_amount = other.m_item_amount;
m_item_exponent = other.m_item_exponent;
m_items = std::move(other.m_items);
other.~OpenAddressingArray();
new (&other) OpenAddressingArray();
}
OpenAddressingArray &operator=(const OpenAddressingArray &other)
{
if (this == &other) {
return *this;
}
this->~OpenAddressingArray();
new (this) OpenAddressingArray(other);
return *this;
}
OpenAddressingArray &operator=(OpenAddressingArray &&other)
{
if (this == &other) {
return *this;
}
this->~OpenAddressingArray();
new (this) OpenAddressingArray(std::move(other));
return *this;
}
Allocator &allocator()
{
return m_items.allocator();
}
/* Prepare a new array that can hold a minimum of min_usable_slots elements. All entries are
* empty. */
OpenAddressingArray init_reserved(uint32_t min_usable_slots) const
{
uint8_t item_exponent = compute_item_exponent(min_usable_slots,
s_slots_per_item,
s_max_load_factor_numerator,
s_max_load_factor_denominator);
OpenAddressingArray grown(item_exponent);
grown.m_slots_set_or_dummy = this->slots_set();
return grown;
}
/**
* Amount of items in the array times the number of slots per item.
*/
uint32_t slots_total() const
{
return m_slots_total;
}
/**
* Amount of slots that are initialized with some value that is not empty or dummy.
*/
uint32_t slots_set() const
{
return m_slots_set_or_dummy - m_slots_dummy;
}
/**
* Amount of slots that can be used before the array should grow.
*/
uint32_t slots_usable() const
{
return m_slots_usable;
}
/**
* Update the counters after one empty element is used for a newly added element.
*/
void update__empty_to_set()
{
m_slots_set_or_dummy++;
}
/**
* Update the counters after one previously dummy element becomes set.
*/
void update__dummy_to_set()
{
m_slots_dummy--;
}
/**
* Update the counters after one previously set element becomes a dummy.
*/
void update__set_to_dummy()
{
m_slots_dummy++;
}
/**
* Access the current slot mask for this array.
*/
uint32_t slot_mask() const
{
return m_slot_mask;
}
/**
* Access the item for a specific item index.
* Note: The item index is not necessarily the slot index.
*/
const Item &item(uint32_t item_index) const
{
return m_items[item_index];
}
Item &item(uint32_t item_index)
{
return m_items[item_index];
}
uint8_t item_exponent() const
{
return m_item_exponent;
}
uint32_t item_amount() const
{
return m_item_amount;
}
bool should_grow() const
{
return m_slots_set_or_dummy >= m_slots_usable;
}
Item *begin()
{
return m_items.begin();
}
Item *end()
{
return m_items.end();
}
const Item *begin() const
{
return m_items.begin();
}
const Item *end() const
{
return m_items.end();
}
};
} // namespace BLI
#endif /* __BLI_OPEN_ADDRESSING_HH__ */

View File

@@ -37,16 +37,6 @@ template<typename T> class Optional {
bool m_set;
public:
static Optional FromPointer(const T *ptr)
{
if (ptr == nullptr) {
return Optional();
}
else {
return Optional(*ptr);
}
}
Optional() : m_set(false)
{
}

View File

@@ -0,0 +1,250 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __BLI_PROBING_STRATEGIES_HH__
#define __BLI_PROBING_STRATEGIES_HH__
/** \file
* \ingroup bli
*
* This file implements different probing strategies. Those can be used by different hash table
* implementations like BLI::Set and BLI::Map. A probing strategy produces a sequence of values
* based on an initial hash value.
*
* A probing strategy has to implement the following methods:
* - Constructor(uint32_t hash): Start a new probing sequence based on the given hash.
* - get() const -> uint32_t: Get the current value in the sequence.
* - next() -> void: Update the internal state, so that the next value can be accessed with get().
* - linear_steps() -> uint32_t: Returns number of linear probing steps that should be done.
*
* Using linear probing steps between larger jumps can result in better performance, due to
* improved cache usage. It's a way of getting the benefits or linear probing without the
* clustering issues. However, more linear steps can also make things slower when the initial hash
* produces many collisions.
*
* Every probing strategy has to guarantee, that every possible uint32_t is returned eventually.
* This is necessary for correctness. If this is not the case, empty slots might not be found.
*
* The SLOT_PROBING_BEGIN and SLOT_PROBING_END macros can be used to implement a loop that iterates
* over a probing sequence.
*
* Probing strategies can be evaluated with many different criteria. Different use cases often
* have different optimal strategies. Examples:
* - If the hash function generates a well distributed initial hash value, the constructor should
* be as short as possible. This is because the hash value can be used as slot index almost
* immediately, without too many collisions. This is also a perfect use case for linear steps.
* - If the hash function is bad, it can help if the probing strategy remixes the hash value,
* before the first slot is accessed.
* - Different next() methods can remix the hash value in different ways. Depending on which bits
* of the hash value contain the most information, different rehashing strategies work best.
* - When the hash table is very small, having a trivial hash function and then doing linear
* probing might work best.
*/
#include "BLI_sys_types.h"
namespace BLI {
/**
* The simplest probing strategy. It's bad in most cases, because it produces clusters in the hash
* table, which result in many collisions. However, if the hash function is very good or the hash
* table is small, this strategy might even work best.
*/
class LinearProbingStrategy {
private:
uint32_t m_hash;
public:
LinearProbingStrategy(uint32_t hash) : m_hash(hash)
{
}
void next()
{
m_hash++;
}
uint32_t get() const
{
return m_hash;
}
uint32_t linear_steps() const
{
return UINT32_MAX;
}
};
/**
* A slightly adapted quadratic probing strategy. The distance to the original slot increases
* quadratically. This method also leads to clustering. Another disadvantage is that not all bits
* of the original hash are used.
*
* The distance i * i is not used, because it does not guarantee, that every slot is hit.
* Instead (i * i + i) / 2 is used, which has this desired property.
*
* In the first few steps, this strategy can have good cache performance. It largely depends on how
* many keys fit into a cache line in the hash table.
*/
class QuadraticProbingStrategy {
private:
uint32_t m_original_hash;
uint32_t m_current_hash;
uint32_t m_iteration;
public:
QuadraticProbingStrategy(uint32_t hash)
: m_original_hash(hash), m_current_hash(hash), m_iteration(1)
{
}
void next()
{
m_current_hash = m_original_hash + ((m_iteration * m_iteration + m_iteration) >> 1);
m_iteration++;
}
uint32_t get() const
{
return m_current_hash;
}
uint32_t linear_steps() const
{
return 1;
}
};
/**
* This is the probing strategy used by CPython (in 2020).
*
* It is very fast when the original hash value is good. If there are collisions, more bits of the
* hash value are taken into account.
*
* LinearSteps: Can be set to something larger than 1 for improved cache performance in some cases.
* PreShuffle: When true, the initial call to next() will be done to the constructor. This can help
* when the hash function has put little information into the lower bits.
*/
template<uint32_t LinearSteps = 1, bool PreShuffle = false> class PythonProbingStrategy {
private:
uint32_t m_hash;
uint32_t m_perturb;
public:
PythonProbingStrategy(uint32_t hash) : m_hash(hash), m_perturb(hash)
{
if (PreShuffle) {
this->next();
}
}
void next()
{
m_perturb >>= 5;
m_hash = 5 * m_hash + 1 + m_perturb;
}
uint32_t get() const
{
return m_hash;
}
uint32_t linear_steps() const
{
return LinearSteps;
}
};
/**
* Similar to the Python probing strategy. However, it does a bit more shuffling in the next()
* method. This way more bits are taken into account earlier. After a couple of collisions (that
* should happen rarely), it will fallback to a sequence that hits every slot.
*/
template<uint32_t LinearSteps = 2, bool PreShuffle = false> class ShuffleProbingStrategy {
private:
uint32_t m_hash;
uint32_t m_perturb;
public:
ShuffleProbingStrategy(uint32_t hash) : m_hash(hash), m_perturb(hash)
{
if (PreShuffle) {
this->next();
}
}
void next()
{
if (m_perturb != 0) {
m_perturb >>= 10;
m_hash = ((m_hash >> 16) ^ m_hash) * 0x45d9f3b + m_perturb;
}
else {
m_hash = 5 * m_hash + 1;
}
}
uint32_t get() const
{
return m_hash;
}
uint32_t linear_steps() const
{
return LinearSteps;
}
};
/**
* Having a specified default is convenient.
*/
using DefaultProbingStrategy = PythonProbingStrategy<>;
/* Turning off clang format here, because otherwise it will mess up the alignment between the
* macros. */
// clang-format off
/**
* Both macros together form a loop that iterates over slot indices in a hash table with a
* power-of-two size.
*
* You must not `break` out of this loop. Only `return` is permitted. If you don't return
* out of the loop, it will be an infinite loop. These loops should not be nested within the
* same function.
*
* PROBING_STRATEGY: Class describing the probing strategy.
* HASH: The initial hash as produced by a hash function.
* MASK: A bit mask such that (hash & MASK) is a valid slot index.
* R_SLOT_INDEX: Name of the variable that will contain the slot index.
*/
#define SLOT_PROBING_BEGIN(PROBING_STRATEGY, HASH, MASK, R_SLOT_INDEX) \
PROBING_STRATEGY probing_strategy(HASH); \
do { \
uint32_t linear_offset = 0; \
uint32_t current_hash = probing_strategy.get(); \
do { \
uint32_t R_SLOT_INDEX = (current_hash + linear_offset) & MASK;
#define SLOT_PROBING_END() \
} while (++linear_offset < probing_strategy.linear_steps()); \
probing_strategy.next(); \
} while (true)
// clang-format on
} // namespace BLI
#endif /* __BLI_PROBING_STRATEGIES_HH__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,415 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __BLI_SET_SLOTS_HH__
#define __BLI_SET_SLOTS_HH__
/** \file
* \ingroup bli
*
* This file contains different slot types that are supposed to be used with BLI::Set.
*
* Every slot type has to be able to hold a value of the Key type and state information.
* A set slot has three possible states: empty, occupied and removed.
*
* Only when a slot is occupied, it stores an instance of type Key.
*
* A set slot type has to implement a couple of methods that are explained in SimpleSetSlot.
* A slot type is assumed to be trivially destructible, when it is not in occupied state. So the
* destructor might not be called in that case.
*/
#include "BLI_memory_utils.hh"
#include "BLI_string_ref.hh"
namespace BLI {
/**
* The simplest possible set slot. It stores the slot state and the optional key instance in
* separate variables. Depending on the alignment requirement of the key, many bytes might be
* wasted.
*/
template<typename Key> class SimpleSetSlot {
private:
enum State : uint8_t {
Empty = 0,
Occupied = 1,
Removed = 2,
};
State m_state;
AlignedBuffer<sizeof(Key), alignof(Key)> m_buffer;
public:
/**
* After the default constructor has run, the slot has to be in the empty state.
*/
SimpleSetSlot()
{
m_state = Empty;
}
/**
* The destructor also has to destruct the key, if the slot is currently occupied.
*/
~SimpleSetSlot()
{
if (m_state == Occupied) {
this->key()->~Key();
}
}
/**
* The copy constructor has to copy the state. If the other slot was occupied, a copy of the key
* has to be made as well.
*/
SimpleSetSlot(const SimpleSetSlot &other)
{
m_state = other.m_state;
if (other.m_state == Occupied) {
new (this->key()) Key(*other.key());
}
}
/**
* The move constructor has to copy the state. If the other slot was occupied, the key from the
* other slot has to be moved as well. The other slot stays in the state it was in before. Its
* optionally stored key remains in a moved-from state.
*/
SimpleSetSlot(SimpleSetSlot &&other) noexcept
{
m_state = other.m_state;
if (other.m_state == Occupied) {
new (this->key()) Key(std::move(*other.key()));
}
}
/**
* Get a non-const pointer to the position where the key is stored.
*/
Key *key()
{
return (Key *)m_buffer.ptr();
}
/**
* Get a const pointer to the position where the key is stored.
*/
const Key *key() const
{
return (const Key *)m_buffer.ptr();
}
/**
* Return true if the slot currently contains a key.
*/
bool is_occupied() const
{
return m_state == Occupied;
}
/**
* Return true if the slot is empty, i.e. it does not contain a key and is not in removed state.
*/
bool is_empty() const
{
return m_state == Empty;
}
/**
* Return the hash of the currently stored key. In this simple set slot implementation, we just
* compute the hash here. Other implementations might store the hash in the slot instead.
*/
template<typename Hash> uint32_t get_hash(const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(*this->key());
}
/**
* Move the other slot into this slot and destruct it. We do destruction here, because this way
* we can avoid a comparison with the state, since we know the slot is occupied.
*/
void relocate_occupied_here(SimpleSetSlot &other, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
m_state = Occupied;
new (this->key()) Key(std::move(*other.key()));
other.key()->~Key();
}
/**
* Return true, when this slot is occupied and contains a key that compares equal to the given
* key. The hash is used by other slot implementations to determine inequality faster.
*/
template<typename ForwardKey, typename IsEqual>
bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
{
if (m_state == Occupied) {
return is_equal(key, *this->key());
}
return false;
}
/**
* Change the state of this slot from empty/removed to occupied. The key has to be constructed
* by calling the constructor with the given key as parameter.
*/
template<typename ForwardKey> void occupy(ForwardKey &&key, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
m_state = Occupied;
new (this->key()) Key(std::forward<ForwardKey>(key));
}
/**
* Change the state of this slot from occupied to removed. The key has to be destructed as well.
*/
void remove()
{
BLI_assert(this->is_occupied());
m_state = Removed;
this->key()->~Key();
}
};
/**
* This set slot implementation stores the hash of the key within the slot. This helps when
* computing the hash or an equality check is expensive.
*/
template<typename Key> class HashedSetSlot {
private:
enum State : uint8_t {
Empty = 0,
Occupied = 1,
Removed = 2,
};
uint32_t m_hash;
State m_state;
AlignedBuffer<sizeof(Key), alignof(Key)> m_buffer;
public:
HashedSetSlot()
{
m_state = Empty;
}
~HashedSetSlot()
{
if (m_state == Occupied) {
this->key()->~Key();
}
}
HashedSetSlot(const HashedSetSlot &other)
{
m_state = other.m_state;
if (other.m_state == Occupied) {
m_hash = other.m_hash;
new (this->key()) Key(*other.key());
}
}
HashedSetSlot(HashedSetSlot &&other) noexcept
{
m_state = other.m_state;
if (other.m_state == Occupied) {
m_hash = other.m_hash;
new (this->key()) Key(std::move(*other.key()));
}
}
Key *key()
{
return (Key *)m_buffer.ptr();
}
const Key *key() const
{
return (const Key *)m_buffer.ptr();
}
bool is_occupied() const
{
return m_state == Occupied;
}
bool is_empty() const
{
return m_state == Empty;
}
template<typename Hash> uint32_t get_hash(const Hash &UNUSED(hash)) const
{
BLI_assert(this->is_occupied());
return m_hash;
}
void relocate_occupied_here(HashedSetSlot &other, uint32_t hash)
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
m_state = Occupied;
m_hash = hash;
new (this->key()) Key(std::move(*other.key()));
other.key()->~Key();
}
template<typename ForwardKey, typename IsEqual>
bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t hash) const
{
/* m_hash might be uninitialized here, but that is ok. */
if (m_hash == hash) {
if (m_state == Occupied) {
return is_equal(key, *this->key());
}
}
return false;
}
template<typename ForwardKey> void occupy(ForwardKey &&key, uint32_t hash)
{
BLI_assert(!this->is_occupied());
m_state = Occupied;
m_hash = hash;
new (this->key()) Key(std::forward<ForwardKey>(key));
}
void remove()
{
BLI_assert(this->is_occupied());
m_state = Removed;
this->key()->~Key();
}
};
/**
* An IntrusiveSetSlot uses two special values of the key to indicate whether the slot is empty or
* removed. This saves some memory in all cases and is more efficient in many cases. The KeyInfo
* type indicates which specific values are used. An example for a KeyInfo implementation is
* PointerKeyInfo.
*
* The special key values are expected to be trivially destructible.
*/
template<typename Key, typename KeyInfo> class IntrusiveSetSlot {
private:
Key m_key = KeyInfo::get_empty();
public:
IntrusiveSetSlot() = default;
~IntrusiveSetSlot() = default;
IntrusiveSetSlot(const IntrusiveSetSlot &other) = default;
IntrusiveSetSlot(IntrusiveSetSlot &&other) noexcept = default;
Key *key()
{
return &m_key;
}
const Key *key() const
{
return &m_key;
}
bool is_occupied() const
{
return KeyInfo::is_not_empty_or_removed(m_key);
}
bool is_empty() const
{
return KeyInfo::is_empty(m_key);
}
template<typename Hash> uint32_t get_hash(const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(m_key);
}
void relocate_occupied_here(IntrusiveSetSlot &other, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
m_key = std::move(other.m_key);
other.m_key.~Key();
}
template<typename ForwardKey, typename IsEqual>
bool contains(const ForwardKey &key, const IsEqual &is_equal, uint32_t UNUSED(hash)) const
{
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
return is_equal(m_key, key);
}
template<typename ForwardKey> void occupy(ForwardKey &&key, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(KeyInfo::is_not_empty_or_removed(key));
m_key = std::forward<ForwardKey>(key);
}
void remove()
{
BLI_assert(this->is_occupied());
KeyInfo::remove(m_key);
}
};
/**
* This exists just to make it more convenient to define which special integer values can be used
* to indicate an empty and removed value.
*/
template<typename Int, Int EmptyValue, Int RemovedValue>
using IntegerSetSlot = IntrusiveSetSlot<Int, TemplatedKeyInfo<Int, EmptyValue, RemovedValue>>;
template<typename Key> struct DefaultSetSlot;
/**
* Use SimpleSetSlot by default, because it is the smallest slot type that works for all key types.
*/
template<typename Key> struct DefaultSetSlot {
using type = SimpleSetSlot<Key>;
};
/**
* Store the hash of a string in the slot by default. Recomputing the hash or doing string
* comparisons can be relatively costly.
*/
template<> struct DefaultSetSlot<std::string> {
using type = HashedSetSlot<std::string>;
};
template<> struct DefaultSetSlot<StringRef> {
using type = HashedSetSlot<StringRef>;
};
template<> struct DefaultSetSlot<StringRefNull> {
using type = HashedSetSlot<StringRefNull>;
};
/**
* Use a special slot type for pointer keys, because we can store whether a slot is empty or
* removed with special pointer values.
*/
template<typename Key> struct DefaultSetSlot<Key *> {
using type = IntrusiveSetSlot<Key *, PointerKeyInfo<Key *>>;
};
} // namespace BLI
#endif /* __BLI_SET_SLOTS_HH__ */

View File

@@ -20,48 +20,208 @@
/** \file
* \ingroup bli
*
* Basic stack implementation with support for small object optimization.
* A `BLI::Stack<T>` is a dynamically growing FILO (first-in, last-out) data structure. It is
* designed to be a more convenient and efficient replacement for `std::stack`.
*
* The improved efficiency is mainly achieved by supporting small buffer optimization. As long as
* the number of elements added to the stack stays below InlineBufferCapacity, no heap allocation
* is done. Consequently, values stored in the stack have to be movable and they might be moved,
* when the stack is moved.
*
* A Vector can be used to emulate a stack. However, this stack implementation is more efficient
* when all you have to do is to push and pop elements. That is because a vector guarantees that
* all elements are in a contiguous array. Therefore, it has to copy all elements to a new buffer
* when it grows. This stack implementation does not have to copy all previously pushed elements
* when it grows.
*
* BLI::Stack is implemented using a double linked list of chunks. Each chunk contains an array of
* elements. The chunk size increases exponentially with every new chunk that is required. The
* lowest chunk, i.e. the one that is used for the first few pushed elements, is embedded into the
* stack.
*/
#include "BLI_vector.hh"
#include "BLI_allocator.hh"
#include "BLI_array_ref.hh"
#include "BLI_memory_utils.hh"
namespace BLI {
template<typename T, uint InlineBufferCapacity = 4, typename Allocator = GuardedAllocator>
/**
* A StackChunk references a contiguous memory buffer. Multiple StackChunk instances are linked in
* a double linked list.
*/
template<typename T> struct StackChunk {
/** The below chunk contains the elements that have been pushed on the stack before. */
StackChunk *below;
/** The above chunk contains the elements that have been pushed on the stack afterwards. */
StackChunk *above;
/** Pointer to the first element of the referenced buffer. */
T *begin;
/** Pointer to one element past the end of the referenced buffer. */
T *capacity_end;
uint capacity() const
{
return capacity_end - begin;
}
};
template<
/** Type of the elements that are stored in the stack. */
typename T,
/**
* The number of values that can be stored in this stack, without doing a heap allocation.
* Sometimes it can make sense to increase this value a lot. The memory in the inline buffer is
* not initialized when it is not needed.
*
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitely though.
*/
uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this stack. Should rarely be changed, except when you don't want that
* MEM_* is used internally.
*/
typename Allocator = GuardedAllocator>
class Stack {
private:
Vector<T, InlineBufferCapacity, Allocator> m_elements;
using Chunk = StackChunk<T>;
/**
* Points to one element after top-most value in the stack.
*
* Invariant:
* If m_size == 0
* then: m_top == m_inline_chunk.begin
* else: &peek() == m_top - 1;
*/
T *m_top;
/** Points to the chunk that references the memory pointed to by m_top. */
Chunk *m_top_chunk;
/**
* Number of elements in the entire stack. The sum of initialized element counts in the chunks.
*/
uint m_size;
/** The buffer used to implement small object optimization. */
AlignedBuffer<sizeof(T) * InlineBufferCapacity, alignof(T)> m_inline_buffer;
/**
* A chunk referencing the inline buffer. This is always the bottom-most chunk.
* So m_inline_chunk.below == nullptr.
*/
Chunk m_inline_chunk;
/** Used for allocations when the inline buffer is not large enough. */
Allocator m_allocator;
public:
Stack() = default;
/**
* Construct a stack from an array ref. The elements will be pushed in the same order they are in
* the array.
* Initialize an empty stack. No heap allocation is done.
*/
Stack(ArrayRef<T> values) : m_elements(values)
Stack(Allocator allocator = {}) : m_allocator(allocator)
{
}
T *inline_buffer = this->inline_buffer();
operator ArrayRef<T>()
{
return m_elements;
m_inline_chunk.below = nullptr;
m_inline_chunk.above = nullptr;
m_inline_chunk.begin = inline_buffer;
m_inline_chunk.capacity_end = inline_buffer + InlineBufferCapacity;
m_top = inline_buffer;
m_top_chunk = &m_inline_chunk;
m_size = 0;
}
/**
* Return the number of elements in the stack.
* Create a new stack that contains the given elements. The values are pushed to the stack in
* the order they are in the array.
*/
uint size() const
Stack(ArrayRef<T> values) : Stack()
{
return m_elements.size();
this->push_multiple(values);
}
/**
* Return true when the stack is empty, otherwise false.
* Create a new stack that contains the given elements. The values are pushed to the stack in the
* order they are in the array.
*
* Example:
* Stack<int> stack = {4, 5, 6};
* assert(stack.pop() == 6);
* assert(stack.pop() == 5);
*/
bool is_empty() const
Stack(const std::initializer_list<T> &values) : Stack(ArrayRef<T>(values))
{
return this->size() == 0;
}
Stack(const Stack &other) : Stack(other.m_allocator)
{
for (const Chunk *chunk = &other.m_inline_chunk; chunk; chunk = chunk->above) {
const T *begin = chunk->begin;
const T *end = (chunk == other.m_top_chunk) ? other.m_top : chunk->capacity_end;
this->push_multiple(ArrayRef<T>(begin, end - begin));
}
}
Stack(Stack &&other) noexcept : Stack(other.m_allocator)
{
uninitialized_relocate_n(other.inline_buffer(),
std::min(other.m_size, InlineBufferCapacity),
this->inline_buffer());
m_inline_chunk.above = other.m_inline_chunk.above;
m_size = other.m_size;
if (m_size <= InlineBufferCapacity) {
m_top_chunk = &m_inline_chunk;
m_top = this->inline_buffer() + m_size;
}
else {
m_top_chunk = other.m_top_chunk;
m_top = other.m_top;
}
other.m_size = 0;
other.m_inline_chunk.above = nullptr;
other.m_top_chunk = &other.m_inline_chunk;
other.m_top = other.m_top_chunk->begin;
}
~Stack()
{
this->destruct_all_elements();
Chunk *above_chunk;
for (Chunk *chunk = m_inline_chunk.above; chunk; chunk = above_chunk) {
above_chunk = chunk->above;
m_allocator.deallocate(chunk);
}
}
Stack &operator=(const Stack &stack)
{
if (this == &stack) {
return *this;
}
this->~Stack();
new (this) Stack(stack);
return *this;
}
Stack &operator=(Stack &&stack)
{
if (this == &stack) {
return *this;
}
this->~Stack();
new (this) Stack(std::move(stack));
return *this;
}
/**
@@ -69,80 +229,159 @@ class Stack {
*/
void push(const T &value)
{
m_elements.append(value);
if (m_top == m_top_chunk->capacity_end) {
this->activate_next_chunk(1);
}
new (m_top) T(value);
m_top++;
m_size++;
}
void push(T &&value)
{
m_elements.append(std::move(value));
}
void push_multiple(ArrayRef<T> values)
{
m_elements.extend(values);
if (m_top == m_top_chunk->capacity_end) {
this->activate_next_chunk(1);
}
new (m_top) T(std::move(value));
m_top++;
m_size++;
}
/**
* Remove the element from the top of the stack and return it.
* This will assert when the stack is empty.
* Remove and return the top-most element from the stack. This invokes undefined behavior when
* the stack is empty.
*/
T pop()
{
return m_elements.pop_last();
BLI_assert(m_size > 0);
m_top--;
T value = std::move(*m_top);
m_top->~T();
m_size--;
if (m_top == m_top_chunk->begin) {
if (m_top_chunk->below != nullptr) {
m_top_chunk = m_top_chunk->below;
m_top = m_top_chunk->capacity_end;
}
}
return value;
}
/**
* Return a reference to the value a the top of the stack.
* This will assert when the stack is empty.
* Get a reference to the top-most element without removing it from the stack. This invokes
* undefined behavior when the stack is empty.
*/
T &peek()
{
BLI_assert(!this->is_empty());
return m_elements[this->size() - 1];
BLI_assert(m_size > 0);
BLI_assert(m_top > m_top_chunk->begin);
return *(m_top - 1);
}
T *begin()
const T &peek() const
{
return m_elements.begin();
}
T *end()
{
return m_elements.end();
}
const T *begin() const
{
return m_elements.begin();
}
const T *end() const
{
return m_elements.end();
BLI_assert(m_size > 0);
BLI_assert(m_top > m_top_chunk->begin);
return *(m_top - 1);
}
/**
* Remove all elements from the stack but keep the memory.
* Add multiple elements to the stack. The values are pushed in the order they are in the array.
* This method is more efficient than pushing multiple elements individually and might cause less
* heap allocations.
*/
void push_multiple(ArrayRef<T> values)
{
ArrayRef<T> remaining_values = values;
while (!remaining_values.is_empty()) {
if (m_top == m_top_chunk->capacity_end) {
this->activate_next_chunk(remaining_values.size());
}
uint remaining_capacity = m_top_chunk->capacity_end - m_top;
uint amount = std::min(remaining_values.size(), remaining_capacity);
uninitialized_copy_n(remaining_values.data(), amount, m_top);
m_top += amount;
remaining_values = remaining_values.drop_front(amount);
}
m_size += values.size();
}
/**
* Returns true when the size is zero.
*/
bool is_empty() const
{
return m_size == 0;
}
/**
* Returns the number of elements in the stack.
*/
uint size() const
{
return m_size;
}
/**
* Removes all elements from the stack. The memory is not freed, so it is more efficient to reuse
* the stack than to create a new one.
*/
void clear()
{
m_elements.clear();
this->destruct_all_elements();
m_top_chunk = &m_inline_chunk;
m_top = m_top_chunk->begin;
}
private:
T *inline_buffer() const
{
return (T *)m_inline_buffer.ptr();
}
/**
* Remove all elements and free any allocated memory.
* Changes m_top_chunk to point to a new chunk that is above the current one. The new chunk might
* be smaller than the given size_hint. This happens when a chunk that has been allocated before
* is reused. The size of the new chunk will be at least one.
*
* This invokes undefined behavior when the currently active chunk is not full.
*/
void clear_and_make_small()
void activate_next_chunk(uint size_hint)
{
m_elements.clear_and_make_small();
BLI_assert(m_top == m_top_chunk->capacity_end);
if (m_top_chunk->above == nullptr) {
uint new_capacity = std::max(size_hint, m_top_chunk->capacity() * 2 + 10);
/* Do a single memory allocation for the Chunk and the array it references. */
void *buffer = m_allocator.allocate(
sizeof(Chunk) + sizeof(T) * new_capacity + alignof(T), alignof(Chunk), AT);
void *chunk_buffer = buffer;
void *data_buffer = (void *)(((uintptr_t)buffer + sizeof(Chunk) + alignof(T) - 1) &
~(alignof(T) - 1));
Chunk *new_chunk = new (chunk_buffer) Chunk();
new_chunk->begin = (T *)data_buffer;
new_chunk->capacity_end = new_chunk->begin + new_capacity;
new_chunk->above = nullptr;
new_chunk->below = m_top_chunk;
m_top_chunk->above = new_chunk;
}
m_top_chunk = m_top_chunk->above;
m_top = m_top_chunk->begin;
}
/**
* Does a linear search to check if the value is in the stack.
*/
bool contains(const T &value)
void destruct_all_elements()
{
return m_elements.contains(value);
for (T *value = m_top_chunk->begin; value != m_top; value++) {
value->~T();
}
for (Chunk *chunk = m_top_chunk->below; chunk; chunk = chunk->below) {
for (T *value = chunk->begin; value != chunk->capacity_end; value++) {
value->~T();
}
}
}
};

View File

@@ -1,540 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __BLI_STRING_MAP_HH__
#define __BLI_STRING_MAP_HH__
/** \file
* \ingroup bli
*
* This tries to solve the issue that a normal map with std::string as key might do many
* allocations when the keys are longer than 16 bytes (the usual small string optimization size).
*
* For now this still uses std::string, but having this abstraction in place will make it easier to
* make it more efficient later on. Also, even if we will never implement this optimization, having
* a special map with string keys can be quite handy. */
#include "BLI_map.hh"
#include "BLI_optional.hh"
#include "BLI_string_ref.hh"
#include "BLI_vector.hh"
namespace BLI {
// clang-format off
#define ITER_SLOTS_BEGIN(HASH, ARRAY, OPTIONAL_CONST, R_ITEM, R_OFFSET) \
uint32_t hash_copy = HASH; \
uint32_t perturb = HASH; \
while (true) { \
uint32_t item_index = (hash_copy & ARRAY.slot_mask()) >> OFFSET_SHIFT; \
uint8_t R_OFFSET = hash_copy & OFFSET_MASK; \
uint8_t initial_offset = R_OFFSET; \
OPTIONAL_CONST Item &R_ITEM = ARRAY.item(item_index); \
do {
#define ITER_SLOTS_END(R_OFFSET) \
R_OFFSET = (R_OFFSET + 1) & OFFSET_MASK; \
} while (R_OFFSET != initial_offset); \
perturb >>= 5; \
hash_copy = hash_copy * 5 + 1 + perturb; \
} ((void)0)
// clang-format on
template<typename T, typename Allocator = GuardedAllocator> class StringMap {
private:
static constexpr uint32_t OFFSET_MASK = 3;
static constexpr uint32_t OFFSET_SHIFT = 2;
class Item {
private:
static constexpr int32_t IS_EMPTY = -1;
uint32_t m_hashes[4];
int32_t m_indices[4];
char m_values[sizeof(T) * 4];
public:
static constexpr uint slots_per_item = 4;
Item()
{
for (uint offset = 0; offset < 4; offset++) {
m_indices[offset] = IS_EMPTY;
}
}
~Item()
{
for (uint offset = 0; offset < 4; offset++) {
if (this->is_set(offset)) {
destruct(this->value(offset));
}
}
}
Item(const Item &other)
{
for (uint offset = 0; offset < 4; offset++) {
m_indices[offset] = other.m_indices[offset];
if (other.is_set(offset)) {
m_hashes[offset] = other.m_hashes[offset];
new (this->value(offset)) T(*other.value(offset));
}
}
}
Item(Item &&other) noexcept
{
for (uint offset = 0; offset < 4; offset++) {
m_indices[offset] = other.m_indices[offset];
if (other.is_set(offset)) {
m_hashes[offset] = other.m_hashes[offset];
new (this->value(offset)) T(std::move(*other.value(offset)));
}
}
}
uint32_t index(uint offset) const
{
return m_indices[offset];
}
uint32_t hash(uint offset) const
{
return m_hashes[offset];
}
T *value(uint offset) const
{
return (T *)POINTER_OFFSET(m_values, offset * sizeof(T));
}
bool is_set(uint offset) const
{
return m_indices[offset] >= 0;
}
bool is_empty(uint offset) const
{
return m_indices[offset] == IS_EMPTY;
}
bool has_hash(uint offset, uint32_t hash) const
{
BLI_assert(this->is_set(offset));
return m_hashes[offset] == hash;
}
bool has_exact_key(uint offset, StringRef key, const Vector<char> &chars) const
{
return key == this->get_key(offset, chars);
}
StringRefNull get_key(uint offset, const Vector<char> &chars) const
{
const char *ptr = chars.begin() + m_indices[offset];
uint length = *(uint *)ptr;
const char *start = ptr + sizeof(uint);
return StringRefNull(start, length);
}
template<typename ForwardT>
void store(uint offset, uint32_t hash, uint32_t index, ForwardT &&value)
{
this->store_without_value(offset, hash, index);
new (this->value(offset)) T(std::forward<ForwardT>(value));
}
void store_without_value(uint offset, uint32_t hash, uint32_t index)
{
BLI_assert(!this->is_set(offset));
m_hashes[offset] = hash;
m_indices[offset] = index;
}
};
using ArrayType = OpenAddressingArray<Item, 1, Allocator>;
ArrayType m_array;
Vector<char> m_chars;
public:
StringMap() = default;
/**
* Get the number of key-value pairs in the map.
*/
uint size() const
{
return m_array.slots_set();
}
/**
* Add a new element to the map. It is assumed that the key did not exist before.
*/
void add_new(StringRef key, const T &value)
{
this->add_new__impl(key, value);
}
void add_new(StringRef key, T &&value)
{
this->add_new__impl(key, std::move(value));
}
/**
* Add a new element to the map if the key does not exist yet.
*/
void add(StringRef key, const T &value)
{
this->add__impl(key, value);
}
void add(StringRef key, T &&value)
{
this->add__impl(key, std::move(value));
}
/**
* First, checks if the key exists in the map.
* If it does exist, call the modify function with a pointer to the corresponding value.
* If it does not exist, call the create function with a pointer to where the value should be
* created.
*
* Returns whatever is returned from one of the callback functions. Both callbacks have to return
* the same type.
*
* CreateValueF: Takes a pointer to where the value should be created.
* ModifyValueF: Takes a pointer to the value that should be modified.
*/
template<typename CreateValueF, typename ModifyValueF>
auto add_or_modify(StringRef key,
const CreateValueF &create_value,
const ModifyValueF &modify_value) -> decltype(create_value(nullptr))
{
using CreateReturnT = decltype(create_value(nullptr));
using ModifyReturnT = decltype(modify_value(nullptr));
BLI_STATIC_ASSERT((std::is_same<CreateReturnT, ModifyReturnT>::value),
"Both callbacks should return the same type.");
this->ensure_can_add();
uint32_t hash = this->compute_string_hash(key);
ITER_SLOTS_BEGIN (hash, m_array, , item, offset) {
if (item.is_empty(offset)) {
m_array.update__empty_to_set();
uint32_t index = this->save_key_in_array(key);
item.store_without_value(offset, hash, index);
T *value_ptr = item.value(offset);
return create_value(value_ptr);
}
else if (item.has_hash(offset, hash) && item.has_exact_key(offset, key, m_chars)) {
T *value_ptr = item.value(offset);
return modify_value(value_ptr);
}
}
ITER_SLOTS_END(offset);
}
/**
* Return true when the key exists in the map, otherwise false.
*/
bool contains(StringRef key) const
{
uint32_t hash = this->compute_string_hash(key);
ITER_SLOTS_BEGIN (hash, m_array, const, item, offset) {
if (item.is_empty(offset)) {
return false;
}
else if (item.has_hash(offset, hash) && item.has_exact_key(offset, key, m_chars)) {
return true;
}
}
ITER_SLOTS_END(offset);
}
/**
* Get a reference to the value corresponding to a key. It is assumed that the key does exist.
*/
const T &lookup(StringRef key) const
{
BLI_assert(this->contains(key));
T *found_value = nullptr;
uint32_t hash = this->compute_string_hash(key);
ITER_SLOTS_BEGIN (hash, m_array, const, item, offset) {
if (item.is_empty(offset)) {
return *found_value;
}
else if (item.has_hash(offset, hash)) {
if (found_value == nullptr) {
/* Common case: the first slot with the correct hash contains the key.
* However, still need to iterate until the next empty slot to make sure there is no
* other key with the exact same hash. */
/* TODO: Check if we can guarantee that every hash only exists once in some cases. */
found_value = item.value(offset);
}
else if (item.has_exact_key(offset, key, m_chars)) {
/* Found the hash more than once, now check for actual string equality. */
return *item.value(offset);
}
}
}
ITER_SLOTS_END(offset);
}
T &lookup(StringRef key)
{
return const_cast<T &>(const_cast<const StringMap *>(this)->lookup(key));
}
/**
* Get a pointer to the value corresponding to the key. Return nullptr, if the key does not
* exist.
*/
const T *lookup_ptr(StringRef key) const
{
uint32_t hash = this->compute_string_hash(key);
ITER_SLOTS_BEGIN (hash, m_array, const, item, offset) {
if (item.is_empty(offset)) {
return nullptr;
}
else if (item.has_hash(offset, hash) && item.has_exact_key(offset, key, m_chars)) {
return item.value(offset);
}
}
ITER_SLOTS_END(offset);
}
T *lookup_ptr(StringRef key)
{
return const_cast<T *>(const_cast<const StringMap *>(this)->lookup_ptr(key));
}
Optional<T> try_lookup(StringRef key) const
{
return Optional<T>::FromPointer(this->lookup_ptr(key));
}
/**
* Get a copy of the value corresponding to the key. If the key does not exist, return the
* default value.
*/
T lookup_default(StringRef key, const T &default_value) const
{
const T *ptr = this->lookup_ptr(key);
if (ptr != nullptr) {
return *ptr;
}
else {
return default_value;
}
}
/**
* Return the value that corresponds to the given key.
* If it does not exist yet, create and insert it first.
*/
template<typename CreateValueF> T &lookup_or_add(StringRef key, const CreateValueF &create_value)
{
return *this->add_or_modify(
key,
[&](T *value) { return new (value) T(create_value()); },
[](T *value) { return value; });
}
/**
* Return the value that corresponds to the given key.
* If it does not exist yet, insert a new default constructed value and return that.
*/
T &lookup_or_add_default(StringRef key)
{
return this->lookup_or_add(key, []() { return T(); });
}
/**
* Do a linear search over all items to find a key for a value.
*/
StringRefNull find_key_for_value(const T &value) const
{
for (const Item &item : m_array) {
for (uint offset = 0; offset < 4; offset++) {
if (item.is_set(offset) && value == *item.value(offset)) {
return item.get_key(offset, m_chars);
}
}
}
BLI_assert(false);
return {};
}
/**
* Run a function for every value in the map.
*/
template<typename FuncT> void foreach_value(const FuncT &func)
{
for (Item &item : m_array) {
for (uint offset = 0; offset < 4; offset++) {
if (item.is_set(offset)) {
func(*item.value(offset));
}
}
}
}
/**
* Run a function for every key in the map.
*/
template<typename FuncT> void foreach_key(const FuncT &func)
{
for (Item &item : m_array) {
for (uint offset = 0; offset < 4; offset++) {
if (item.is_set(offset)) {
StringRefNull key = item.get_key(offset, m_chars);
func(key);
}
}
}
}
/**
* Run a function for every key-value-pair in the map.
*/
template<typename FuncT> void foreach_item(const FuncT &func)
{
for (Item &item : m_array) {
for (uint offset = 0; offset < 4; offset++) {
if (item.is_set(offset)) {
StringRefNull key = item.get_key(offset, m_chars);
T &value = *item.value(offset);
func(key, value);
}
}
}
}
template<typename FuncT> void foreach_item(const FuncT &func) const
{
for (const Item &item : m_array) {
for (uint offset = 0; offset < 4; offset++) {
if (item.is_set(offset)) {
StringRefNull key = item.get_key(offset, m_chars);
const T &value = *item.value(offset);
func(key, value);
}
}
}
}
private:
uint32_t compute_string_hash(StringRef key) const
{
/* TODO: check if this can be optimized more because we know the key length already. */
uint32_t hash = 5381;
for (char c : key) {
hash = hash * 33 + c;
}
return hash;
}
uint32_t save_key_in_array(StringRef key)
{
uint index = m_chars.size();
uint string_size = key.size();
m_chars.extend(ArrayRef<char>((char *)&string_size, sizeof(uint)));
m_chars.extend(key);
m_chars.append('\0');
return index;
}
StringRefNull key_from_index(uint32_t index) const
{
const char *ptr = m_chars.begin() + index;
uint length = *(uint *)ptr;
const char *start = ptr + sizeof(uint);
return StringRefNull(start, length);
}
void ensure_can_add()
{
if (UNLIKELY(m_array.should_grow())) {
this->grow(this->size() + 1);
}
}
BLI_NOINLINE void grow(uint min_usable_slots)
{
ArrayType new_array = m_array.init_reserved(min_usable_slots);
for (Item &old_item : m_array) {
for (uint offset = 0; offset < 4; offset++) {
if (old_item.is_set(offset)) {
this->add_after_grow(
*old_item.value(offset), old_item.hash(offset), old_item.index(offset), new_array);
}
}
}
m_array = std::move(new_array);
}
void add_after_grow(T &value, uint32_t hash, uint32_t index, ArrayType &new_array)
{
ITER_SLOTS_BEGIN (hash, new_array, , item, offset) {
if (item.is_empty(offset)) {
item.store(offset, hash, index, std::move(value));
return;
}
}
ITER_SLOTS_END(offset);
}
template<typename ForwardT> bool add__impl(StringRef key, ForwardT &&value)
{
this->ensure_can_add();
uint32_t hash = this->compute_string_hash(key);
ITER_SLOTS_BEGIN (hash, m_array, , item, offset) {
if (item.is_empty(offset)) {
uint32_t index = this->save_key_in_array(key);
item.store(offset, hash, index, std::forward<ForwardT>(value));
m_array.update__empty_to_set();
return true;
}
else if (item.has_hash(offset, hash) && item.has_exact_key(offset, key, m_chars)) {
return false;
}
}
ITER_SLOTS_END(offset);
}
template<typename ForwardT> void add_new__impl(StringRef key, ForwardT &&value)
{
BLI_assert(!this->contains(key));
this->ensure_can_add();
uint32_t hash = this->compute_string_hash(key);
ITER_SLOTS_BEGIN (hash, m_array, , item, offset) {
if (item.is_empty(offset)) {
uint32_t index = this->save_key_in_array(key);
item.store(offset, hash, index, std::forward<ForwardT>(value));
m_array.update__empty_to_set();
return;
}
}
ITER_SLOTS_END(offset);
}
};
#undef ITER_SLOTS_BEGIN
#undef ITER_SLOTS_END
} // namespace BLI
#endif /* __BLI_STRING_MAP_HH__ */

View File

@@ -20,12 +20,26 @@
/** \file
* \ingroup bli
*
* A StringRef is a pointer to a string somewhere in memory. It should not be used to transfer
* ownership of that string. When a function gets a StringRef as input, it cannot expect, that
* the string will still exist after the function ends.
* A `BLI::StringRef` references a const char array owned by someone else. It is just a pointer and
* a size. Since the memory is not owned, StringRef should not be used to transfer ownership of the
* string. The data referenced by a StringRef cannot be mutated through it.
*
* There are two types of string references: One that guarantees null termination and one that does
* not.
* A StringRef is NOT null-terminated. This makes it much more powerful within C++, because we can
* also cut off parts of the end without creating a copy. When interfacing with C code that expects
* null-terminated strings, `BLI::StringRefNull` can be used. It is essentially the same as
* StringRef, but with the restriction that the string has to be null-terminated.
*
* Whenever possible, string parameters should be of type StringRef and the string return type
* should be StringRefNull. Don't forget that the StringRefNull does not own the string, so don't
* return it when the string exists only in the scope of the function. This convention makes
* functions usable in the most contexts.
*
* BLI::StringRef vs. std::string_view:
* Both types are certainly very similar. The main benefit of using StringRef in Blender is that
* this allows us to add convenience methods at any time. Especially, when doing a lot of string
* manipulation, this helps to keep the code clean. Furthermore, we need StringRefNull anyway,
* because there is a lot of C code that expects null-terminated strings. Once we use C++17,
* implicit conversions to and from string_view can be added.
*/
#include <cstring>
@@ -39,15 +53,16 @@ namespace BLI {
class StringRef;
/**
* A common base class for StringRef and StringRefNull. This should never be used in other files.
* It only exists to avoid some code duplication.
*/
class StringRefBase {
public:
using size_type = size_t;
protected:
const char *m_data;
size_type m_size;
uint m_size;
StringRefBase(const char *data, size_type size) : m_data(data), m_size(size)
StringRefBase(const char *data, uint size) : m_data(data), m_size(size)
{
}
@@ -55,7 +70,7 @@ class StringRefBase {
/**
* Return the (byte-)length of the referenced string, without any null-terminator.
*/
size_type size() const
uint size() const
{
return m_size;
}
@@ -68,17 +83,15 @@ class StringRefBase {
return m_data;
}
char operator[](size_type index) const
{
BLI_assert(index <= m_size);
return m_data[index];
}
operator ArrayRef<char>() const
{
return ArrayRef<char>(m_data, m_size);
}
/**
* Implicitely convert to std::string. This is convenient in most cases, but you have to be a bit
* careful not to convert to std::string accidentally.
*/
operator std::string() const
{
return std::string(m_data, m_size);
@@ -94,12 +107,21 @@ class StringRefBase {
return m_data + m_size;
}
/**
* Copy the string into a buffer. The buffer has to be one byte larger than the size of the
* string, because the copied string will be null-terminated. Only use this when you are
* absolutely sure that the buffer is large enough.
*/
void unsafe_copy(char *dst) const
{
memcpy(dst, m_data, m_size);
dst[m_size] = '\0';
}
/**
* Copy the string into a buffer. The copied string will be null-terminated. This invokes
* undefined behavior when dst_size is too small. (Should we define the behavior?)
*/
void copy(char *dst, uint dst_size) const
{
if (m_size < dst_size) {
@@ -111,6 +133,10 @@ class StringRefBase {
}
}
/**
* Copy the string into a char array. The copied string will be null-terminated. This invokes
* undefined behavior when dst is too small.
*/
template<uint N> void copy(char (&dst)[N])
{
this->copy(dst, N);
@@ -130,7 +156,7 @@ class StringRefBase {
};
/**
* References a null-terminated char array.
* References a null-terminated const char array.
*/
class StringRefNull : public StringRefBase {
@@ -139,24 +165,45 @@ class StringRefNull : public StringRefBase {
{
}
StringRefNull(const char *str) : StringRefBase(str, strlen(str))
/**
* Construct a StringRefNull from a null terminated c-string. The pointer must not point to NULL.
*/
StringRefNull(const char *str) : StringRefBase(str, (uint)strlen(str))
{
BLI_assert(str != NULL);
BLI_assert(m_data[m_size] == '\0');
}
StringRefNull(const char *str, size_type size) : StringRefBase(str, size)
/**
* Construct a StringRefNull from a null terminated c-string. This invokes undefined behavior
* when the given size is not the correct size of the string.
*/
StringRefNull(const char *str, uint size) : StringRefBase(str, size)
{
BLI_assert(str[size] == '\0');
BLI_assert((uint)strlen(str) == size);
}
/**
* Reference a std::string. Remember that when the std::string is destructed, the StringRefNull
* will point to uninitialized memory.
*/
StringRefNull(const std::string &str) : StringRefNull(str.data())
{
}
/**
* Get the char at the given index.
*/
char operator[](uint index) const
{
/* Use '<=' instead of just '<', so that the null character can be accessed as well. */
BLI_assert(index <= m_size);
return m_data[index];
}
};
/**
* References a char array. It might not be null terminated.
* References a const char array. It might not be null terminated.
*/
class StringRef : public StringRefBase {
public:
@@ -164,19 +211,29 @@ class StringRef : public StringRefBase {
{
}
/**
* StringRefNull can be converted into StringRef, but not the other way around.
*/
StringRef(StringRefNull other) : StringRefBase(other.data(), other.size())
{
}
StringRef(const char *str) : StringRefBase(str, str ? strlen(str) : 0)
/**
* Create a StringRef from a null-terminated c-string.
*/
StringRef(const char *str) : StringRefBase(str, str ? (uint)strlen(str) : 0)
{
}
StringRef(const char *str, size_type length) : StringRefBase(str, length)
StringRef(const char *str, uint length) : StringRefBase(str, length)
{
}
StringRef(const std::string &str) : StringRefBase(str.data(), str.size())
/**
* Reference a std::string. Remember that when the std::string is destructed, the StringRef
* will point to uninitialized memory.
*/
StringRef(const std::string &str) : StringRefBase(str.data(), (uint)str.size())
{
}
@@ -198,6 +255,15 @@ class StringRef : public StringRefBase {
BLI_assert(this->startswith(prefix));
return this->drop_prefix(prefix.size());
}
/**
* Get the char at the given index.
*/
char operator[](uint index) const
{
BLI_assert(index < m_size);
return m_data[index];
}
};
/* More inline functions
@@ -215,6 +281,10 @@ inline std::ostream &operator<<(std::ostream &stream, StringRefNull ref)
return stream;
}
/**
* Adding two StringRefs will allocate an std::string. This is not efficient, but convenient in
* most cases.
*/
inline std::string operator+(StringRef a, StringRef b)
{
return std::string(a) + std::string(b);
@@ -233,6 +303,9 @@ inline bool operator!=(StringRef a, StringRef b)
return !(a == b);
}
/**
* Return true when the string starts with the given prefix.
*/
inline bool StringRefBase::startswith(StringRef prefix) const
{
if (m_size < prefix.m_size) {
@@ -246,6 +319,9 @@ inline bool StringRefBase::startswith(StringRef prefix) const
return true;
}
/**
* Return true when the string ends with the given suffix.
*/
inline bool StringRefBase::endswith(StringRef suffix) const
{
if (m_size < suffix.m_size) {
@@ -260,6 +336,9 @@ inline bool StringRefBase::endswith(StringRef suffix) const
return true;
}
/**
* Return a new StringRef containing only a substring of the original string.
*/
inline StringRef StringRefBase::substr(uint start, uint size) const
{
BLI_assert(start + size <= m_size);

View File

@@ -23,6 +23,9 @@
namespace BLI {
/**
* A type that inherits from NonCopyable cannot be copied anymore.
*/
class NonCopyable {
public:
/* Disable copy construction and assignment. */
@@ -35,6 +38,9 @@ class NonCopyable {
NonCopyable &operator=(NonCopyable &&other) = default;
};
/**
* A type that inherits from NonMovable cannot be moved anymore.
*/
class NonMovable {
public:
/* Disable move construction and assignment. */

View File

@@ -20,9 +20,21 @@
/** \file
* \ingroup bli
*
* This vector wraps a dynamically sized array of a specific type. It supports small object
* optimization. That means, when the vector only contains a few elements, no memory allocation is
* performed. Instead, those elements are stored directly in the vector.
* A `BLI::Vector<T>` is a dynamically growing contiguous array for values of type T. It is
* designed to be a more convenient and efficient replacement for `std::vector`. Note that the term
* "vector" has nothing to do with a vector from computer graphics here.
*
* A vector supports efficient insertion and removal at the end (O(1) amortized). Removal in other
* places takes O(n) time, because all elements afterwards have to be moved. If the order of
* elements is not important, `remove_and_reorder` can be used instead of `remove` for better
* performance.
*
* The improved efficiency is mainly achieved by supporting small buffer optimization. As long as
* the number of elements in the vector does not become larger than InlineBufferCapacity, no memory
* allocation is done. As a consequence, iterators are invalidated when a BLI::Vector is moved
* (iterators of std::vector remain valid when the vector is moved).
*
* `BLI::Vector` should be your default choice for a vector data structure in Blender.
*/
#include <algorithm>
@@ -37,30 +49,69 @@
#include "BLI_listbase_wrapper.hh"
#include "BLI_math_base.h"
#include "BLI_memory_utils.hh"
#include "BLI_string.h"
#include "BLI_string_ref.hh"
#include "BLI_utildefines.h"
#include "MEM_guardedalloc.h"
namespace BLI {
template<typename T, uint InlineBufferCapacity = 4, typename Allocator = GuardedAllocator>
template<
/**
* Type of the values stored in this vector. It has to be movable.
*/
typename T,
/**
* The number of values that can be stored in this vector, without doing a heap allocation.
* Sometimes it makes sense to increase this value a lot. The memory in the inline buffer is
* not initialized when it is not needed.
*
* When T is large, the small buffer optimization is disabled by default to avoid large
* unexpected allocations on the stack. It can still be enabled explicitely though.
*/
uint InlineBufferCapacity = (sizeof(T) < 100) ? 4 : 0,
/**
* The allocator used by this vector. Should rarely be changed, except when you don't want that
* MEM_* is used internally.
*/
typename Allocator = GuardedAllocator>
class Vector {
private:
/**
* Use pointers instead of storing the size explicitely. This reduces the number of instructions
* in `append`.
*
* The pointers might point to the memory in the inline buffer.
*/
T *m_begin;
T *m_end;
T *m_capacity_end;
Allocator m_allocator;
AlignedBuffer<(uint)sizeof(T) * InlineBufferCapacity, (uint)alignof(T)> m_small_buffer;
/** Used for allocations when the inline buffer is too small. */
Allocator m_allocator;
/** A placeholder buffer that will remain uninitialized until it is used. */
AlignedBuffer<(uint)sizeof(T) * InlineBufferCapacity, (uint)alignof(T)> m_inline_buffer;
/**
* Store the size of the vector explicitely in debug builds. Otherwise you'd always have to call
* the `size` function or do the math to compute it from the pointers manually. This is rather
* annoying. Knowing the size of a vector is often quite essential when debugging some code.
*/
#ifndef NDEBUG
/* Storing size in debug builds, because it makes debugging much easier sometimes. */
uint m_debug_size;
# define UPDATE_VECTOR_SIZE(ptr) (ptr)->m_debug_size = (uint)((ptr)->m_end - (ptr)->m_begin)
#else
# define UPDATE_VECTOR_SIZE(ptr) ((void)0)
#endif
template<typename OtherT, uint OtherN, typename OtherAllocator> friend class Vector;
/**
* Be a friend with other vector instanciations. This is necessary to implement some memory
* management logic.
*/
template<typename OtherT, uint OtherInlineBufferCapacity, typename OtherAllocator>
friend class Vector;
public:
/**
@@ -69,7 +120,7 @@ class Vector {
*/
Vector()
{
m_begin = this->small_buffer();
m_begin = this->inline_buffer();
m_end = m_begin;
m_capacity_end = m_begin + InlineBufferCapacity;
UPDATE_VECTOR_SIZE(this);
@@ -77,15 +128,12 @@ class Vector {
/**
* Create a vector with a specific size.
* The elements will be default initialized.
* The elements will be default constructed.
* If T is trivially constructible, the elements in the vector are not touched.
*/
explicit Vector(uint size) : Vector()
{
this->reserve(size);
this->increase_size_unchecked(size);
for (T *current = m_begin; current != m_end; current++) {
new (current) T();
}
this->resize(size);
}
/**
@@ -94,25 +142,29 @@ class Vector {
Vector(uint size, const T &value) : Vector()
{
this->reserve(size);
this->increase_size_unchecked(size);
this->increase_size_by_unchecked(size);
BLI::uninitialized_fill_n(m_begin, size, value);
}
/**
* Create a vector from an initializer list.
* Create a vector that contains copys of the values in the initialized list.
*
* This allows you to write code like:
* Vector<int> vec = {3, 4, 5};
*/
Vector(std::initializer_list<T> values) : Vector(ArrayRef<T>(values))
Vector(const std::initializer_list<T> &values) : Vector(ArrayRef<T>(values))
{
}
/**
* Create a vector from an array ref.
* Create a vector from an array ref. The values in the vector are copy constructed.
*/
Vector(ArrayRef<T> values) : Vector()
{
this->reserve(values.size());
this->increase_size_unchecked(values.size());
BLI::uninitialized_copy_n(values.begin(), values.size(), this->begin());
uint size = values.size();
this->reserve(size);
this->increase_size_by_unchecked(size);
BLI::uninitialized_copy_n(values.data(), size, m_begin);
}
/**
@@ -129,45 +181,53 @@ class Vector {
}
/**
* Create a vector from a ListBase.
* Create a vector from a ListBase. The caller has to make sure that the values in the linked
* list have the correct type.
*
* Example Usage:
* Vector<ModifierData *> modifiers(ob->modifiers);
*/
Vector(ListBase &values) : Vector()
{
for (T value : ListBaseWrapper<typename std::remove_pointer<T>::type>(values)) {
LISTBASE_FOREACH (T, value, &values) {
this->append(value);
}
}
/**
* Create a copy of another vector.
* The other vector will not be changed.
* If the other vector has less than InlineBufferCapacity elements, no allocation will be made.
* Create a copy of another vector. The other vector will not be changed. If the other vector has
* less than InlineBufferCapacity elements, no allocation will be made.
*/
Vector(const Vector &other) : m_allocator(other.m_allocator)
{
this->init_copy_from_other_vector(other);
}
template<uint OtherN>
Vector(const Vector<T, OtherN, Allocator> &other) : m_allocator(other.m_allocator)
/**
* Create a copy of a vector with a different InlineBufferCapacity. This needs to be handled
* separately, so that the other one is a valid copy constructor.
*/
template<uint OtherInlineBufferCapacity>
Vector(const Vector<T, OtherInlineBufferCapacity, Allocator> &other)
: m_allocator(other.m_allocator)
{
this->init_copy_from_other_vector(other);
}
/**
* Steal the elements from another vector.
* This does not do an allocation.
* The other vector will have zero elements afterwards.
* Steal the elements from another vector. This does not do an allocation. The other vector will
* have zero elements afterwards.
*/
template<uint OtherN>
Vector(Vector<T, OtherN, Allocator> &&other) noexcept : m_allocator(other.m_allocator)
template<uint OtherInlineBufferCapacity>
Vector(Vector<T, OtherInlineBufferCapacity, Allocator> &&other) noexcept
: m_allocator(other.m_allocator)
{
uint size = other.size();
if (other.is_small()) {
if (other.is_inline()) {
if (size <= InlineBufferCapacity) {
/* Copy between inline buffers. */
m_begin = this->small_buffer();
m_begin = this->inline_buffer();
m_end = m_begin + size;
m_capacity_end = m_begin + InlineBufferCapacity;
uninitialized_relocate_n(other.m_begin, size, m_begin);
@@ -175,8 +235,7 @@ class Vector {
else {
/* Copy from inline buffer to newly allocated buffer. */
uint capacity = size;
m_begin = (T *)m_allocator.allocate_aligned(
sizeof(T) * capacity, std::alignment_of<T>::value, __func__);
m_begin = (T *)m_allocator.allocate(sizeof(T) * capacity, alignof(T), AT);
m_end = m_begin + size;
m_capacity_end = m_begin + capacity;
uninitialized_relocate_n(other.m_begin, size, m_begin);
@@ -189,9 +248,9 @@ class Vector {
m_capacity_end = other.m_capacity_end;
}
other.m_begin = other.small_buffer();
other.m_begin = other.inline_buffer();
other.m_end = other.m_begin;
other.m_capacity_end = other.m_begin + OtherN;
other.m_capacity_end = other.m_begin + OtherInlineBufferCapacity;
UPDATE_VECTOR_SIZE(this);
UPDATE_VECTOR_SIZE(&other);
}
@@ -199,7 +258,7 @@ class Vector {
~Vector()
{
destruct_n(m_begin, this->size());
if (!this->is_small()) {
if (!this->is_inline()) {
m_allocator.deallocate(m_begin);
}
}
@@ -242,8 +301,8 @@ class Vector {
return *this;
}
/* This can fail, when the vector is used to build a recursive data structure.
See https://youtu.be/7Qgd9B1KuMQ?t=840. */
/* This can be incorrect, when the vector is used to build a recursive data structure. However,
we don't take care of it at this low level. See https://youtu.be/7Qgd9B1KuMQ?t=840. */
this->~Vector();
new (this) Vector(std::move(other));
@@ -251,13 +310,55 @@ class Vector {
}
/**
* Make sure that enough memory is allocated to hold size elements.
* This won't necessarily make an allocation when size is small.
* Make sure that enough memory is allocated to hold min_capacity elements.
* This won't necessarily make an allocation when min_capacity is small.
* The actual size of the vector does not change.
*/
void reserve(uint size)
void reserve(uint min_capacity)
{
this->grow(size);
if (min_capacity > this->capacity()) {
this->realloc_to_at_least(min_capacity);
}
}
/**
* Change the size of the vector so that it contains new_size elements.
* If new_size is smaller than the old size, the elements at the end of the vector are
* destructed. If new_size is larger than the old size, the new elements at the end are default
* constructed. If T is trivially constructible, the memory is not touched by this function.
*/
void resize(uint new_size)
{
uint old_size = this->size();
if (new_size > old_size) {
this->reserve(new_size);
default_construct_n(m_begin + old_size, new_size - old_size);
}
else {
destruct_n(m_begin + new_size, old_size - new_size);
}
m_end = m_begin + new_size;
UPDATE_VECTOR_SIZE(this);
}
/**
* Change the size of the vector so that it contains new_size elements.
* If new_size is smaller than the old size, the elements at the end of the vector are
* destructed. If new_size is larger than the old size, the new elements will be copy constructed
* from the given value.
*/
void resize(uint new_size, const T &value)
{
uint old_size = this->size();
if (new_size > old_size) {
this->reserve(new_size);
uninitialized_fill_n(m_begin + old_size, new_size - old_size, value);
}
else {
destruct_n(m_begin + new_size, old_size - new_size);
}
m_end = m_begin + new_size;
UPDATE_VECTOR_SIZE(this);
}
/**
@@ -275,14 +376,14 @@ class Vector {
* Afterwards the vector has 0 elements and any allocated memory
* will be freed.
*/
void clear_and_make_small()
void clear_and_make_inline()
{
destruct_n(m_begin, this->size());
if (!this->is_small()) {
if (!this->is_inline()) {
m_allocator.deallocate(m_begin);
}
m_begin = this->small_buffer();
m_begin = this->inline_buffer();
m_end = m_begin;
m_capacity_end = m_begin + InlineBufferCapacity;
UPDATE_VECTOR_SIZE(this);
@@ -291,19 +392,24 @@ class Vector {
/**
* Insert a new element at the end of the vector.
* This might cause a reallocation with the capacity is exceeded.
*
* This is similar to std::vector::push_back.
*/
void append(const T &value)
{
this->ensure_space_for_one();
this->append_unchecked(value);
}
void append(T &&value)
{
this->ensure_space_for_one();
this->append_unchecked(std::move(value));
}
/**
* Append the value to the vector and return the index that can be used to access the newly
* added value.
*/
uint append_and_get_index(const T &value)
{
uint index = this->size();
@@ -311,6 +417,11 @@ class Vector {
return index;
}
/**
* Append the value if it is not yet in the vector. This has to do a linear search to check if
* the value is in the vector. Therefore, this should only be called when it is known that the
* vector is small.
*/
void append_non_duplicates(const T &value)
{
if (!this->contains(value)) {
@@ -318,6 +429,11 @@ class Vector {
}
}
/**
* Append the value and assume that vector has enough memory reserved. This invokes undefined
* behavior when not enough capacity has been reserved beforehand. Only use this in performance
* critical code.
*/
void append_unchecked(const T &value)
{
BLI_assert(m_end < m_capacity_end);
@@ -325,7 +441,6 @@ class Vector {
m_end++;
UPDATE_VECTOR_SIZE(this);
}
void append_unchecked(T &&value)
{
BLI_assert(m_end < m_capacity_end);
@@ -342,10 +457,16 @@ class Vector {
{
this->reserve(this->size() + n);
BLI::uninitialized_fill_n(m_end, n, value);
this->increase_size_unchecked(n);
this->increase_size_by_unchecked(n);
}
void increase_size_unchecked(uint n)
/**
* Enlarges the size of the internal buffer that is considered to be initialized. This invokes
* undefined behavior when when the new size is larger than the capacity. The method can be
* useful when you want to call constructors in the vector yourself. This should only be done in
* very rare cases and has to be justified every time.
*/
void increase_size_by_unchecked(uint n)
{
BLI_assert(m_end + n <= m_capacity_end);
m_end += n;
@@ -354,18 +475,24 @@ class Vector {
/**
* Copy the elements of another array to the end of this vector.
*
* This can be used to emulate parts of std::vector::insert.
*/
void extend(ArrayRef<T> array)
{
this->extend(array.begin(), array.size());
this->extend(array.data(), array.size());
}
void extend(const T *start, uint amount)
{
this->reserve(this->size() + amount);
this->extend_unchecked(start, amount);
}
/**
* Adds all elements from the array that are not already in the vector. This is an expensive
* operation when the vector is large, but can be very cheap when it is known that the vector is
* small.
*/
void extend_non_duplicates(ArrayRef<T> array)
{
for (const T &value : array) {
@@ -373,11 +500,14 @@ class Vector {
}
}
/**
* Extend the vector without bounds checking. It is assumed that enough memory has been reserved
* beforehand. Only use this in performance critical code.
*/
void extend_unchecked(ArrayRef<T> array)
{
this->extend_unchecked(array.begin(), array.size());
this->extend_unchecked(array.data(), array.size());
}
void extend_unchecked(const T *start, uint amount)
{
BLI_assert(m_begin + amount <= m_capacity_end);
@@ -395,7 +525,6 @@ class Vector {
BLI_assert(this->size() > 0);
return *(m_end - 1);
}
T &last()
{
BLI_assert(this->size() > 0);
@@ -407,9 +536,12 @@ class Vector {
*/
void fill(const T &value)
{
std::fill(m_begin, m_end, value);
initialized_fill_n(m_begin, this->size(), value);
}
/**
* Copy the value to all positions specified by the indices array.
*/
void fill_indices(ArrayRef<uint> indices, const T &value)
{
MutableArrayRef<T>(*this).fill_indices(indices, value);
@@ -426,6 +558,8 @@ class Vector {
/**
* Returns true when the vector contains no elements, otherwise false.
*
* This is the same as std::vector::empty.
*/
bool is_empty() const
{
@@ -433,33 +567,36 @@ class Vector {
}
/**
* Deconstructs the last element and decreases the size by one.
* This will assert when the vector is empty.
* Destructs the last element and decreases the size by one. This invokes undefined behavior when
* the vector is empty.
*/
void remove_last()
{
BLI_assert(!this->is_empty());
m_end--;
destruct(m_end);
m_end->~T();
UPDATE_VECTOR_SIZE(this);
}
/**
* Remove the last element from the vector and return it.
* Remove the last element from the vector and return it. This invokes undefined behavior when
* the vector is empty.
*
* This is similar to std::vector::pop_back.
*/
T pop_last()
{
BLI_assert(!this->is_empty());
m_end--;
T value = std::move(*m_end);
destruct(m_end);
m_end->~T();
UPDATE_VECTOR_SIZE(this);
return value;
}
/**
* Delete any element in the vector.
* The empty space will be filled by the previously last element.
* Delete any element in the vector. The empty space will be filled by the previously last
* element. This takes O(1) time.
*/
void remove_and_reorder(uint index)
{
@@ -469,37 +606,60 @@ class Vector {
if (element_to_remove < m_end) {
*element_to_remove = std::move(*m_end);
}
destruct(m_end);
m_end->~T();
UPDATE_VECTOR_SIZE(this);
}
/**
* Finds the first occurence of the value, removes it and copies the last element to the hole in
* the vector. This takes O(n) time.
*/
void remove_first_occurrence_and_reorder(const T &value)
{
uint index = this->index(value);
uint index = this->first_index_of(value);
this->remove_and_reorder((uint)index);
}
/**
* Remove the element at the given index and move all values coming after it one towards the
* front. This takes O(n) time. If the order is not important, remove_and_reorder should be used
* instead.
*
* This is similar to std::vector::erase.
*/
void remove(uint index)
{
BLI_assert(index < this->size());
uint last_index = this->size() - 1;
for (uint i = index; i < last_index; i++) {
m_begin[i] = std::move(m_begin[i + 1]);
}
m_begin[last_index].~T();
m_end--;
UPDATE_VECTOR_SIZE(this);
}
/**
* Do a linear search to find the value in the vector.
* When found, return the first index, otherwise return -1.
*/
int index_try(const T &value) const
int first_index_of_try(const T &value) const
{
for (T *current = m_begin; current != m_end; current++) {
if (*current == value) {
return current - m_begin;
return (int)(current - m_begin);
}
}
return -1;
}
/**
* Do a linear search to find the value in the vector.
* When found, return the first index, otherwise fail.
* Do a linear search to find the value in the vector and return the found index. This invokes
* undefined behavior when the value is not in the vector.
*/
uint index(const T &value) const
uint first_index_of(const T &value) const
{
int index = this->index_try(value);
int index = this->first_index_of_try(value);
BLI_assert(index >= 0);
return (uint)index;
}
@@ -510,27 +670,13 @@ class Vector {
*/
bool contains(const T &value) const
{
return this->index_try(value) != -1;
return this->first_index_of_try(value) != -1;
}
/**
* Compare vectors element-wise.
* Return true when they have the same length and all elements
* compare equal, otherwise false.
* Get the value at the given index. This invokes undefined behavior when the index is out of
* bounds.
*/
static bool all_equal(const Vector &a, const Vector &b)
{
if (a.size() != b.size()) {
return false;
}
for (uint i = 0; i < a.size(); i++) {
if (a[i] != b[i]) {
return false;
}
}
return true;
}
const T &operator[](uint index) const
{
BLI_assert(index < this->size());
@@ -543,6 +689,22 @@ class Vector {
return m_begin[index];
}
/**
* Get access to the underlying array.
*/
T *data()
{
return m_begin;
}
/**
* Get access to the underlying array.
*/
const T *data() const
{
return m_begin;
}
T *begin()
{
return m_begin;
@@ -562,74 +724,94 @@ class Vector {
}
/**
* Get the current capacity of the vector.
* Get the current capacity of the vector, i.e. the maximum number of elements the vector can
* hold, before it has to reallocate.
*/
uint capacity() const
{
return (uint)(m_capacity_end - m_begin);
}
/**
* Get an index range that makes looping over all indices more convenient and less error prone.
* Obviously, this should only be used when you actually need the index in the loop.
*
* Example:
* for (uint i : myvector.index_range()) {
* do_something(i, my_vector[i]);
* }
*/
IndexRange index_range() const
{
return IndexRange(this->size());
}
void print_stats() const
/**
* Print some debug information about the vector.
*/
void print_stats(StringRef name = "") const
{
std::cout << "Small Vector at " << (void *)this << ":" << std::endl;
std::cout << " Elements: " << this->size() << std::endl;
std::cout << " Capacity: " << (m_capacity_end - m_begin) << std::endl;
std::cout << " Small Elements: " << InlineBufferCapacity
<< " Size on Stack: " << sizeof(*this) << std::endl;
std::cout << "Vector Stats: " << name << "\n";
std::cout << " Address: " << this << "\n";
std::cout << " Elements: " << this->size() << "\n";
std::cout << " Capacity: " << (m_capacity_end - m_begin) << "\n";
std::cout << " Inline Capacity: " << InlineBufferCapacity << "\n";
char memory_size_str[15];
BLI_str_format_byte_unit(memory_size_str, sizeof(*this), true);
std::cout << " Size on Stack: " << memory_size_str << "\n";
}
private:
T *small_buffer() const
T *inline_buffer() const
{
return (T *)m_small_buffer.ptr();
return (T *)m_inline_buffer.ptr();
}
bool is_small() const
bool is_inline() const
{
return m_begin == this->small_buffer();
return m_begin == this->inline_buffer();
}
void ensure_space_for_one()
{
if (UNLIKELY(m_end >= m_capacity_end)) {
this->grow(std::max(this->size() * 2, (uint)1));
this->realloc_to_at_least(this->size() + 1);
}
std::vector<int> a;
a.push_back(4);
}
BLI_NOINLINE void grow(uint min_capacity)
BLI_NOINLINE void realloc_to_at_least(uint min_capacity)
{
if (this->capacity() >= min_capacity) {
return;
}
/* Round up to the next power of two. Otherwise consecutive calls to grow can cause a
* reallocation every time even though the min_capacity only increments. */
min_capacity = power_of_2_max_u(min_capacity);
/* At least double the size of the previous allocation. Otherwise consecutive calls to grow can
* cause a reallocation every time even though min_capacity only increments. */
uint min_new_capacity = this->capacity() * 2;
uint new_capacity = std::max(min_capacity, min_new_capacity);
uint size = this->size();
T *new_array = (T *)m_allocator.allocate_aligned(
min_capacity * (uint)sizeof(T), std::alignment_of<T>::value, "grow BLI::Vector");
T *new_array = (T *)m_allocator.allocate(new_capacity * (uint)sizeof(T), alignof(T), AT);
uninitialized_relocate_n(m_begin, size, new_array);
if (!this->is_small()) {
if (!this->is_inline()) {
m_allocator.deallocate(m_begin);
}
m_begin = new_array;
m_end = m_begin + size;
m_capacity_end = m_begin + min_capacity;
m_capacity_end = m_begin + new_capacity;
}
/**
* Initialize all properties, except for m_allocator, which has to be initialized beforehand.
*/
template<uint OtherN> void init_copy_from_other_vector(const Vector<T, OtherN, Allocator> &other)
template<uint OtherInlineBufferCapacity>
void init_copy_from_other_vector(const Vector<T, OtherInlineBufferCapacity, Allocator> &other)
{
m_allocator = other.m_allocator;
@@ -637,19 +819,18 @@ class Vector {
uint capacity = size;
if (size <= InlineBufferCapacity) {
m_begin = this->small_buffer();
m_begin = this->inline_buffer();
capacity = InlineBufferCapacity;
}
else {
m_begin = (T *)m_allocator.allocate_aligned(
sizeof(T) * size, std::alignment_of<T>::value, __func__);
m_begin = (T *)m_allocator.allocate(sizeof(T) * size, alignof(T), AT);
capacity = size;
}
m_end = m_begin + size;
m_capacity_end = m_begin + capacity;
uninitialized_copy(other.begin(), other.end(), m_begin);
uninitialized_copy_n(other.data(), size, m_begin);
UPDATE_VECTOR_SIZE(this);
}
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,171 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __BLI_VECTOR_SET_SLOTS_HH__
#define __BLI_VECTOR_SET_SLOTS_HH__
/** \file
* \ingroup bli
*
* This file contains slot types that are supposed to be used with BLI::VectorSet.
*
* Every slot type has to be able to hold an integer index and state information.
* A vector set slot has three possible states: empty, occupied and removed.
*
* A vector slot type has to implement a couple of methods that are explained in
* SimpleVectorSetSlot.
* A vector slot type is assumed to be trivially destructible, when it is in empty or removed
* state.
*
* Possible Improvements:
* - Implement a slot type that stores the hash.
* - Implement a slot type that stores the key. That means that the key would be stored in two
* places: the key vector and the slot itself. Maybe storing the key in the slot as well, can
* result in better performance, due to better cache utilization.
*/
#include "BLI_sys_types.h"
namespace BLI {
/**
* The simplest possible vector set slot. It stores the index and state in a signed integer. If the
* value is negative, it represents empty or occupied state. Otherwise it represents the index.
*/
template<typename Key> class SimpleVectorSetSlot {
private:
#define s_is_empty -1
#define s_is_removed -2
/**
* After the default constructor has run, the slot has to be in the empty state.
*/
int32_t m_state = s_is_empty;
public:
/**
* Return true if this slot contains an index to a key.
*/
bool is_occupied() const
{
return m_state >= 0;
}
/**
* Return true if the slot is empty, i.e. it does not contain an index.
*/
bool is_empty() const
{
return m_state == s_is_empty;
}
/**
* Return the stored index. It is assumed that the slot is occupied.
*/
uint32_t index() const
{
BLI_assert(this->is_occupied());
return (uint32_t)m_state;
}
/**
* Return true if the slot contains the given key, i.e. its index points to a key that compares
* equal to it. The hash can be used by other implementations to determine inequality faster.
*/
template<typename ForwardKey, typename IsEqual>
bool contains(const ForwardKey &key,
const IsEqual &is_equal,
uint32_t UNUSED(hash),
const Key *keys) const
{
if (m_state >= 0) {
return is_equal(key, keys[m_state]);
}
return false;
}
/**
* Move the other slot into this slot and destruct it. We do destruction here, because this way
* we can avoid a comparison with the state, since we know the slot is occupied. For this
* specific slot implementation, this does not make a difference.
*/
void relocate_occupied_here(SimpleVectorSetSlot &other, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
BLI_assert(other.is_occupied());
m_state = other.m_state;
}
/**
* Change the state of this slot from empty/removed to occupied. The hash can be used by other
* slot implementations.
*/
void occupy(uint32_t index, uint32_t UNUSED(hash))
{
BLI_assert(!this->is_occupied());
m_state = (int32_t)index;
}
/**
* The key has changed its position in the vector, so the index has to be updated. This method
* can assume that the slot is currently occupied.
*/
void update_index(uint32_t index)
{
BLI_assert(this->is_occupied());
m_state = (int32_t)index;
}
/**
* Change the state of this slot from occupied to removed.
*/
void remove()
{
BLI_assert(this->is_occupied());
m_state = s_is_removed;
}
/**
* Return true if this slot is currently occupied and its corresponding key has the given index.
*/
bool has_index(uint32_t index) const
{
return (uint32_t)m_state == index;
}
/**
* Return the hash of the currently stored key. In this simple set slot implementation, we just
* compute the hash here. Other implementations might store the hash in the slot instead.
*/
template<typename Hash> uint32_t get_hash(const Key &key, const Hash &hash) const
{
BLI_assert(this->is_occupied());
return hash(key);
}
#undef s_is_empty
#undef s_is_removed
};
template<typename Key> struct DefaultVectorSetSlot;
template<typename Key> struct DefaultVectorSetSlot {
using type = SimpleVectorSetSlot<Key>;
};
} // namespace BLI
#endif /* __BLI_VECTOR_SET_SLOTS_HH__ */

View File

@@ -187,6 +187,7 @@ set(SRC
BLI_hash_md5.h
BLI_hash_mm2a.h
BLI_hash_mm3.h
BLI_hash_tables.hh
BLI_heap.h
BLI_heap_simple.h
BLI_index_mask.hh
@@ -204,6 +205,7 @@ set(SRC
BLI_listbase.h
BLI_listbase_wrapper.hh
BLI_map.hh
BLI_map_slots.hh
BLI_math.h
BLI_math_base.h
BLI_math_bits.h
@@ -224,16 +226,17 @@ set(SRC
BLI_memory_utils.hh
BLI_mempool.h
BLI_noise.h
BLI_open_addressing.hh
BLI_optional.hh
BLI_path_util.h
BLI_polyfill_2d.h
BLI_polyfill_2d_beautify.h
BLI_probing_strategies.hh
BLI_quadric.h
BLI_rand.h
BLI_rect.h
BLI_scanfill.h
BLI_set.hh
BLI_set_slots.hh
BLI_smallhash.h
BLI_sort.h
BLI_sort_utils.h
@@ -242,7 +245,6 @@ set(SRC
BLI_strict_flags.h
BLI_string.h
BLI_string_cursor_utf8.h
BLI_string_map.hh
BLI_string_ref.hh
BLI_string_utf8.h
BLI_string_utils.h
@@ -261,6 +263,7 @@ set(SRC
BLI_uvproject.h
BLI_vector.hh
BLI_vector_set.hh
BLI_vector_set_slots.hh
BLI_vfontdata.h
BLI_voronoi_2d.h
BLI_voxel.h

View File

@@ -50,7 +50,7 @@ ArrayRef<uint> IndexRange::as_array_ref() const
}
arrays.append(std::move(new_array));
current_array = arrays.last().begin();
current_array = arrays.last().data();
std::atomic_thread_fence(std::memory_order_seq_cst);
current_array_size = new_size;

View File

@@ -77,8 +77,7 @@ MINLINE int bitscan_reverse_i(int a)
MINLINE unsigned int bitscan_reverse_clear_uint(unsigned int *a)
{
unsigned int i = bitscan_reverse_uint(*a);
/* TODO(sergey): This could probably be optimized. */
*a &= ~(1 << (sizeof(unsigned int) * 8 - i - 1));
*a &= ~(0x80000000 >> i);
return i;
}
@@ -97,10 +96,10 @@ MINLINE unsigned int highest_order_bit_uint(unsigned int n)
MINLINE unsigned short highest_order_bit_s(unsigned short n)
{
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (unsigned short)(n >> 1);
n |= (unsigned short)(n >> 2);
n |= (unsigned short)(n >> 4);
n |= (unsigned short)(n >> 8);
return (unsigned short)(n - (n >> 1));
}

View File

@@ -66,6 +66,13 @@ bool IDNode::ComponentIDKey::operator==(const ComponentIDKey &other) const
return type == other.type && STREQ(name, other.name);
}
uint32_t IDNode::ComponentIDKey::hash() const
{
const int type_as_int = static_cast<int>(type);
return BLI_ghashutil_combine_hash(BLI_ghashutil_uinthash(type_as_int),
BLI_ghashutil_strhash_p(name));
}
/* Initialize 'id' node - from pointer data given. */
void IDNode::init(const ID *id, const char *UNUSED(subdata))
{

View File

@@ -50,6 +50,7 @@ const char *linkedStateAsString(eDepsNode_LinkedState_Type linked_state);
struct IDNode : public Node {
struct ComponentIDKey {
ComponentIDKey(NodeType type, const char *name = "");
uint32_t hash() const;
bool operator==(const ComponentIDKey &other) const;
NodeType type;
@@ -115,16 +116,3 @@ struct IDNode : public Node {
};
} // namespace DEG
namespace BLI {
template<> struct DefaultHash<DEG::IDNode::ComponentIDKey> {
uint32_t operator()(const DEG::IDNode::ComponentIDKey &key) const
{
const int type_as_int = static_cast<int>(key.type);
return BLI_ghashutil_combine_hash(BLI_ghashutil_uinthash(type_as_int),
BLI_ghashutil_strhash_p(key.name));
}
};
} // namespace BLI

View File

@@ -534,22 +534,20 @@ namespace CPPTypeUtil {
template<typename T> void construct_default_cb(void *ptr)
{
BLI::construct_default((T *)ptr);
new (ptr) T;
}
template<typename T> void construct_default_n_cb(void *ptr, uint n)
{
for (uint i = 0; i < n; i++) {
BLI::construct_default((T *)ptr + i);
}
BLI::default_construct_n((T *)ptr, n);
}
template<typename T> void construct_default_indices_cb(void *ptr, IndexMask index_mask)
{
index_mask.foreach_index([&](uint i) { BLI::construct_default((T *)ptr + i); });
index_mask.foreach_index([&](uint i) { new ((T *)ptr + i) T; });
}
template<typename T> void destruct_cb(void *ptr)
{
BLI::destruct((T *)ptr);
((T *)ptr)->~T();
}
template<typename T> void destruct_n_cb(void *ptr, uint n)
{
@@ -557,7 +555,8 @@ template<typename T> void destruct_n_cb(void *ptr, uint n)
}
template<typename T> void destruct_indices_cb(void *ptr, IndexMask index_mask)
{
index_mask.foreach_index([&](uint i) { BLI::destruct((T *)ptr + i); });
T *ptr_ = (T *)ptr;
index_mask.foreach_index([&](uint i) { ptr_[i].~T(); });
}
template<typename T> void copy_to_initialized_cb(const void *src, void *dst)
@@ -601,11 +600,15 @@ void copy_to_uninitialized_indices_cb(const void *src, void *dst, IndexMask inde
template<typename T> void relocate_to_initialized_cb(void *src, void *dst)
{
BLI::relocate((T *)src, (T *)dst);
T *src_ = (T *)src;
T *dst_ = (T *)dst;
*dst_ = std::move(*src_);
src_->~T();
}
template<typename T> void relocate_to_initialized_n_cb(void *src, void *dst, uint n)
{
BLI::relocate_n((T *)src, n, (T *)dst);
BLI::initialized_relocate_n((T *)src, n, (T *)dst);
}
template<typename T>
void relocate_to_initialized_indices_cb(void *src, void *dst, IndexMask index_mask)
@@ -621,7 +624,11 @@ void relocate_to_initialized_indices_cb(void *src, void *dst, IndexMask index_ma
template<typename T> void relocate_to_uninitialized_cb(void *src, void *dst)
{
BLI::uninitialized_relocate((T *)src, (T *)dst);
T *src_ = (T *)src;
T *dst_ = (T *)dst;
new (dst_) T(std::move(*src_));
src_->~T();
}
template<typename T> void relocate_to_uninitialized_n_cb(void *src, void *dst, uint n)
{

View File

@@ -1,4 +1,5 @@
#include "BLI_array_ref.hh"
#include "BLI_strict_flags.h"
#include "BLI_vector.hh"
#include "testing/testing.h"
@@ -136,14 +137,6 @@ TEST(array_ref, Count)
EXPECT_EQ(a_ref.count(5), 0);
}
TEST(array_ref, ToSmallVector)
{
IntVector a = {1, 2, 3, 4};
IntArrayRef a_ref = a;
IntVector b = a_ref;
IntVector::all_equal(a, b);
}
static void test_ref_from_initializer_list(IntArrayRef ref)
{
EXPECT_EQ(ref.size(), 4);
@@ -202,37 +195,12 @@ TEST(array_ref, FillIndices)
EXPECT_EQ(a[4], 0);
}
TEST(array_ref, CopyFrom)
{
std::array<int, 3> a = {3, 4, 5};
MutableIntArrayRef a_ref(a);
EXPECT_EQ(a[0], 3);
EXPECT_EQ(a[1], 4);
EXPECT_EQ(a[2], 5);
a_ref.copy_from({1, 2, 3});
EXPECT_EQ(a[0], 1);
EXPECT_EQ(a[1], 2);
EXPECT_EQ(a[2], 3);
}
TEST(array_ref, ByteSize)
TEST(array_ref, SizeInBytes)
{
std::array<int, 10> a;
IntArrayRef a_ref(a);
EXPECT_EQ(a_ref.byte_size(), sizeof(a));
EXPECT_EQ(a_ref.byte_size(), 40);
}
TEST(array_ref, CopyTo)
{
std::array<int, 3> a = {5, 6, 7};
int b[3] = {0};
IntArrayRef a_ref(a);
a_ref.copy_to(b);
EXPECT_EQ(b[0], 5);
EXPECT_EQ(b[1], 6);
EXPECT_EQ(b[2], 7);
EXPECT_EQ(a_ref.size_in_bytes(), sizeof(a));
EXPECT_EQ(a_ref.size_in_bytes(), 40);
}
TEST(array_ref, FirstLast)

View File

@@ -1,4 +1,5 @@
#include "BLI_array.hh"
#include "BLI_strict_flags.h"
#include "testing/testing.h"
using namespace BLI;
@@ -7,12 +8,14 @@ TEST(array, DefaultConstructor)
{
Array<int> array;
EXPECT_EQ(array.size(), 0);
EXPECT_TRUE(array.is_empty());
}
TEST(array, SizeConstructor)
{
Array<int> array(5);
EXPECT_EQ(array.size(), 5);
EXPECT_FALSE(array.is_empty());
}
TEST(array, FillConstructor)
@@ -55,7 +58,7 @@ TEST(array, CopyConstructor)
EXPECT_EQ(array.size(), 4);
EXPECT_EQ(new_array.size(), 4);
EXPECT_NE(array.begin(), new_array.begin());
EXPECT_NE(array.data(), new_array.data());
EXPECT_EQ(new_array[0], 5);
EXPECT_EQ(new_array[1], 6);
EXPECT_EQ(new_array[2], 7);
@@ -83,7 +86,7 @@ TEST(array, CopyAssignment)
new_array = array;
EXPECT_EQ(new_array.size(), 3);
EXPECT_EQ(array.size(), 3);
EXPECT_NE(array.begin(), new_array.begin());
EXPECT_NE(array.data(), new_array.data());
EXPECT_EQ(new_array[0], 1);
EXPECT_EQ(new_array[1], 2);
EXPECT_EQ(new_array[2], 3);
@@ -101,3 +104,23 @@ TEST(array, MoveAssignment)
EXPECT_EQ(new_array[1], 2);
EXPECT_EQ(new_array[2], 3);
}
/**
* Tests that the trivially constructible types are not zero-initialized. We do not want that for
* performance reasons.
*/
TEST(array, TrivialTypeSizeConstructor)
{
Array<char, 1> *array = new Array<char, 1>(1);
char *ptr = &(*array)[0];
array->~Array();
const char magic = 42;
*ptr = magic;
EXPECT_EQ(*ptr, magic);
new (array) Array<char, 1>(1);
EXPECT_EQ((*array)[0], magic);
EXPECT_EQ(*ptr, magic);
delete array;
}

View File

@@ -1,18 +1,17 @@
#include "BLI_index_range.hh"
#include "BLI_strict_flags.h"
#include "BLI_vector.hh"
#include "testing/testing.h"
using BLI::ArrayRef;
using BLI::IndexRange;
using IntVector = BLI::Vector<int>;
using namespace BLI;
TEST(index_range, DefaultConstructor)
{
IndexRange range;
EXPECT_EQ(range.size(), 0);
IntVector vector;
for (int value : range) {
Vector<uint> vector;
for (uint value : range) {
vector.append(value);
}
EXPECT_EQ(vector.size(), 0);
@@ -24,8 +23,8 @@ TEST(index_range, SingleElementRange)
EXPECT_EQ(range.size(), 1);
EXPECT_EQ(*range.begin(), 4);
IntVector vector;
for (int value : range) {
Vector<uint> vector;
for (uint value : range) {
vector.append(value);
}
@@ -38,8 +37,8 @@ TEST(index_range, MultipleElementRange)
IndexRange range(6, 4);
EXPECT_EQ(range.size(), 4);
IntVector vector;
for (int value : range) {
Vector<uint> vector;
for (uint value : range) {
vector.append(value);
}

View File

@@ -1,11 +1,12 @@
#include "BLI_linear_allocator.hh"
#include "BLI_strict_flags.h"
#include "testing/testing.h"
using namespace BLI;
static bool is_aligned(void *ptr, uint alignment)
{
BLI_assert(is_power_of_2_i(alignment));
BLI_assert(is_power_of_2_i((int)alignment));
return (POINTER_AS_UINT(ptr) & (alignment - 1)) == 0;
}
@@ -105,7 +106,7 @@ TEST(linear_allocator, ConstructArrayCopy)
Vector<int> values = {1, 2, 3};
MutableArrayRef<int> array1 = allocator.construct_array_copy(values.as_ref());
MutableArrayRef<int> array2 = allocator.construct_array_copy(values.as_ref());
EXPECT_NE(array1.begin(), array2.begin());
EXPECT_NE(array1.data(), array2.data());
EXPECT_EQ(array1.size(), 3);
EXPECT_EQ(array2.size(), 3);
EXPECT_EQ(array1[1], 2);

View File

@@ -1,20 +1,23 @@
#include "BLI_map.hh"
#include "BLI_rand.h"
#include "BLI_set.hh"
#include "BLI_strict_flags.h"
#include "BLI_timeit.hh"
#include "BLI_vector.hh"
#include "testing/testing.h"
using BLI::Map;
using IntFloatMap = Map<int, float>;
using namespace BLI;
TEST(map, DefaultConstructor)
{
IntFloatMap map;
Map<int, float> map;
EXPECT_EQ(map.size(), 0);
EXPECT_TRUE(map.is_empty());
}
TEST(map, AddIncreasesSize)
{
IntFloatMap map;
Map<int, float> map;
EXPECT_EQ(map.size(), 0);
EXPECT_TRUE(map.is_empty());
map.add(2, 5.0f);
@@ -27,7 +30,7 @@ TEST(map, AddIncreasesSize)
TEST(map, Contains)
{
IntFloatMap map;
Map<int, float> map;
EXPECT_FALSE(map.contains(4));
map.add(5, 6.0f);
EXPECT_FALSE(map.contains(4));
@@ -37,7 +40,7 @@ TEST(map, Contains)
TEST(map, LookupExisting)
{
IntFloatMap map;
Map<int, float> map;
map.add(2, 6.0f);
map.add(4, 1.0f);
EXPECT_EQ(map.lookup(2), 6.0f);
@@ -46,7 +49,7 @@ TEST(map, LookupExisting)
TEST(map, LookupNotExisting)
{
IntFloatMap map;
Map<int, float> map;
map.add(2, 4.0f);
map.add(1, 1.0f);
EXPECT_EQ(map.lookup_ptr(0), nullptr);
@@ -55,15 +58,16 @@ TEST(map, LookupNotExisting)
TEST(map, AddMany)
{
IntFloatMap map;
Map<int, int> map;
for (int i = 0; i < 100; i++) {
map.add(i, i);
map.add(i * 30, i);
map.add(i * 31, i);
}
}
TEST(map, PopItem)
{
IntFloatMap map;
Map<int, float> map;
map.add(2, 3.0f);
map.add(1, 9.0f);
EXPECT_TRUE(map.contains(2));
@@ -80,21 +84,21 @@ TEST(map, PopItem)
TEST(map, PopItemMany)
{
IntFloatMap map;
for (uint i = 0; i < 100; i++) {
Map<int, int> map;
for (int i = 0; i < 100; i++) {
map.add_new(i, i);
}
for (uint i = 25; i < 80; i++) {
for (int i = 25; i < 80; i++) {
EXPECT_EQ(map.pop(i), i);
}
for (uint i = 0; i < 100; i++) {
for (int i = 0; i < 100; i++) {
EXPECT_EQ(map.contains(i), i < 25 || i >= 80);
}
}
TEST(map, ValueIterator)
{
IntFloatMap map;
Map<int, float> map;
map.add(3, 5.0f);
map.add(1, 2.0f);
map.add(7, -2.0f);
@@ -115,7 +119,7 @@ TEST(map, ValueIterator)
TEST(map, KeyIterator)
{
IntFloatMap map;
Map<int, float> map;
map.add(6, 3.0f);
map.add(2, 4.0f);
map.add(1, 3.0f);
@@ -136,7 +140,7 @@ TEST(map, KeyIterator)
TEST(map, ItemIterator)
{
IntFloatMap map;
Map<int, float> map;
map.add(5, 3.0f);
map.add(2, 9.0f);
map.add(1, 0.0f);
@@ -160,6 +164,34 @@ TEST(map, ItemIterator)
EXPECT_TRUE(values.contains(0.0f));
}
TEST(map, MutableValueIterator)
{
Map<int, int> map;
map.add(3, 6);
map.add(2, 1);
for (int &value : map.values()) {
value += 10;
}
EXPECT_EQ(map.lookup(3), 16);
EXPECT_EQ(map.lookup(2), 11);
}
TEST(map, MutableItemIterator)
{
Map<int, int> map;
map.add(3, 6);
map.add(2, 1);
for (auto item : map.items()) {
item.value += item.key;
}
EXPECT_EQ(map.lookup(3), 9.0f);
EXPECT_EQ(map.lookup(2), 3.0f);
}
static float return_42()
{
return 42.0f;
@@ -167,14 +199,16 @@ static float return_42()
TEST(map, LookupOrAdd_SeparateFunction)
{
IntFloatMap map;
Map<int, float> map;
EXPECT_EQ(map.lookup_or_add(0, return_42), 42.0f);
EXPECT_EQ(map.lookup(0), 42);
map.keys();
}
TEST(map, LookupOrAdd_Lambdas)
{
IntFloatMap map;
Map<int, float> map;
auto lambda1 = []() { return 11.0f; };
EXPECT_EQ(map.lookup_or_add(0, lambda1), 11.0f);
auto lambda2 = []() { return 20.0f; };
@@ -186,7 +220,7 @@ TEST(map, LookupOrAdd_Lambdas)
TEST(map, AddOrModify)
{
IntFloatMap map;
Map<int, float> map;
auto create_func = [](float *value) {
*value = 10.0f;
return true;
@@ -201,13 +235,13 @@ TEST(map, AddOrModify)
EXPECT_EQ(map.lookup(1), 15.0f);
}
TEST(map, AddOverride)
TEST(map, AddOverwrite)
{
IntFloatMap map;
Map<int, float> map;
EXPECT_FALSE(map.contains(3));
EXPECT_TRUE(map.add_override(3, 6.0f));
EXPECT_TRUE(map.add_overwrite(3, 6.0f));
EXPECT_EQ(map.lookup(3), 6.0f);
EXPECT_FALSE(map.add_override(3, 7.0f));
EXPECT_FALSE(map.add_overwrite(3, 7.0f));
EXPECT_EQ(map.lookup(3), 7.0f);
EXPECT_FALSE(map.add(3, 8.0f));
EXPECT_EQ(map.lookup(3), 7.0f);
@@ -215,7 +249,7 @@ TEST(map, AddOverride)
TEST(map, LookupOrAddDefault)
{
IntFloatMap map;
Map<int, float> map;
map.lookup_or_add_default(3) = 6;
EXPECT_EQ(map.lookup(3), 6);
map.lookup_or_add_default(5) = 2;
@@ -226,10 +260,10 @@ TEST(map, LookupOrAddDefault)
TEST(map, MoveConstructorSmall)
{
IntFloatMap map1;
Map<int, float> map1;
map1.add(1, 2.0f);
map1.add(4, 1.0f);
IntFloatMap map2(std::move(map1));
Map<int, float> map2(std::move(map1));
EXPECT_EQ(map2.size(), 2);
EXPECT_EQ(map2.lookup(1), 2.0f);
EXPECT_EQ(map2.lookup(4), 1.0f);
@@ -239,24 +273,25 @@ TEST(map, MoveConstructorSmall)
TEST(map, MoveConstructorLarge)
{
IntFloatMap map1;
for (uint i = 0; i < 100; i++) {
Map<int, int> map1;
for (int i = 0; i < 100; i++) {
map1.add_new(i, i);
}
IntFloatMap map2(std::move(map1));
Map<int, int> map2(std::move(map1));
EXPECT_EQ(map2.size(), 100);
EXPECT_EQ(map2.lookup(1), 1.0f);
EXPECT_EQ(map2.lookup(4), 4.0f);
EXPECT_EQ(map2.lookup(1), 1);
EXPECT_EQ(map2.lookup(4), 4);
EXPECT_EQ(map1.size(), 0);
EXPECT_EQ(map1.lookup_ptr(4), nullptr);
}
TEST(map, MoveAssignment)
{
IntFloatMap map1;
Map<int, float> map1;
map1.add(1, 2.0f);
map1.add(4, 1.0f);
IntFloatMap map2 = std::move(map1);
Map<int, float> map2;
map2 = std::move(map1);
EXPECT_EQ(map2.size(), 2);
EXPECT_EQ(map2.lookup(1), 2.0f);
EXPECT_EQ(map2.lookup(4), 1.0f);
@@ -264,9 +299,23 @@ TEST(map, MoveAssignment)
EXPECT_EQ(map1.lookup_ptr(4), nullptr);
}
TEST(map, CopyAssignment)
{
Map<int, float> map1;
map1.add(1, 2.0f);
map1.add(4, 1.0f);
Map<int, float> map2;
map2 = map1;
EXPECT_EQ(map2.size(), 2);
EXPECT_EQ(map2.lookup(1), 2.0f);
EXPECT_EQ(map2.lookup(4), 1.0f);
EXPECT_EQ(map1.size(), 2);
EXPECT_EQ(*map1.lookup_ptr(4), 1.0f);
}
TEST(map, Clear)
{
IntFloatMap map;
Map<int, float> map;
map.add(1, 1.0f);
map.add(2, 5.0f);
@@ -292,12 +341,150 @@ TEST(map, UniquePtrValue)
Map<int, std::unique_ptr<int>> map;
map.add_new(1, std::move(value1));
map.add(2, std::move(value2));
map.add_override(3, std::move(value3));
map.add_overwrite(3, std::move(value3));
map.lookup_or_add(4, []() { return std::unique_ptr<int>(new int()); });
map.add_new(5, std::unique_ptr<int>(new int()));
map.add(6, std::unique_ptr<int>(new int()));
map.add_override(7, std::unique_ptr<int>(new int()));
map.add_overwrite(7, std::unique_ptr<int>(new int()));
EXPECT_EQ(map.lookup(1).get(), value1_ptr);
EXPECT_EQ(map.lookup_ptr(100), nullptr);
}
TEST(map, Remove)
{
Map<int, int> map;
map.add(2, 4);
EXPECT_EQ(map.size(), 1);
EXPECT_FALSE(map.remove(3));
EXPECT_EQ(map.size(), 1);
EXPECT_TRUE(map.remove(2));
EXPECT_EQ(map.size(), 0);
}
TEST(map, PointerKeys)
{
char a, b, c, d;
Map<char *, int> map;
EXPECT_TRUE(map.add(&a, 5));
EXPECT_FALSE(map.add(&a, 4));
map.add_new(&b, 1);
map.add_new(&c, 1);
EXPECT_EQ(map.size(), 3);
EXPECT_TRUE(map.remove(&b));
EXPECT_TRUE(map.add(&b, 8));
EXPECT_FALSE(map.remove(&d));
EXPECT_TRUE(map.remove(&a));
EXPECT_TRUE(map.remove(&b));
EXPECT_TRUE(map.remove(&c));
EXPECT_TRUE(map.is_empty());
}
/**
* Set this to 1 to activate the benchmark. It is disabled by default, because it prints a lot.
*/
#if 0
template<typename MapT>
BLI_NOINLINE void benchmark_random_ints(StringRef name, uint amount, uint factor)
{
RNG *rng = BLI_rng_new(0);
Vector<int> values;
for (uint i = 0; i < amount; i++) {
values.append(BLI_rng_get_int(rng) * factor);
}
BLI_rng_free(rng);
MapT map;
{
SCOPED_TIMER(name + " Add");
for (int value : values) {
map.add(value, value);
}
}
int count = 0;
{
SCOPED_TIMER(name + " Contains");
for (int value : values) {
count += map.contains(value);
}
}
{
SCOPED_TIMER(name + " Remove");
for (int value : values) {
count += map.remove(value);
}
}
/* Print the value for simple error checking and to avoid some compiler optimizations. */
std::cout << "Count: " << count << "\n";
}
TEST(map, Benchmark)
{
for (uint i = 0; i < 3; i++) {
benchmark_random_ints<BLI::Map<int, int>>("BLI::Map ", 1000000, 1);
benchmark_random_ints<BLI::StdUnorderedMapWrapper<int, int>>("std::unordered_map", 1000000, 1);
}
std::cout << "\n";
for (uint i = 0; i < 3; i++) {
uint32_t factor = (3 << 10);
benchmark_random_ints<BLI::Map<int, int>>("BLI::Map ", 1000000, factor);
benchmark_random_ints<BLI::StdUnorderedMapWrapper<int, int>>(
"std::unordered_map", 1000000, factor);
}
}
/**
* Timer 'BLI::Map Add' took 61.7616 ms
* Timer 'BLI::Map Contains' took 18.4989 ms
* Timer 'BLI::Map Remove' took 20.5864 ms
* Count: 1999755
* Timer 'std::unordered_map Add' took 188.674 ms
* Timer 'std::unordered_map Contains' took 44.3741 ms
* Timer 'std::unordered_map Remove' took 169.52 ms
* Count: 1999755
* Timer 'BLI::Map Add' took 37.9196 ms
* Timer 'BLI::Map Contains' took 16.7361 ms
* Timer 'BLI::Map Remove' took 20.9568 ms
* Count: 1999755
* Timer 'std::unordered_map Add' took 166.09 ms
* Timer 'std::unordered_map Contains' took 40.6133 ms
* Timer 'std::unordered_map Remove' took 142.85 ms
* Count: 1999755
* Timer 'BLI::Map Add' took 37.3053 ms
* Timer 'BLI::Map Contains' took 16.6731 ms
* Timer 'BLI::Map Remove' took 18.8304 ms
* Count: 1999755
* Timer 'std::unordered_map Add' took 170.964 ms
* Timer 'std::unordered_map Contains' took 38.1824 ms
* Timer 'std::unordered_map Remove' took 140.263 ms
* Count: 1999755
*
* Timer 'BLI::Map Add' took 50.1131 ms
* Timer 'BLI::Map Contains' took 25.0491 ms
* Timer 'BLI::Map Remove' took 32.4225 ms
* Count: 1889920
* Timer 'std::unordered_map Add' took 150.129 ms
* Timer 'std::unordered_map Contains' took 34.6999 ms
* Timer 'std::unordered_map Remove' took 120.907 ms
* Count: 1889920
* Timer 'BLI::Map Add' took 50.4438 ms
* Timer 'BLI::Map Contains' took 25.2677 ms
* Timer 'BLI::Map Remove' took 32.3047 ms
* Count: 1889920
* Timer 'std::unordered_map Add' took 144.015 ms
* Timer 'std::unordered_map Contains' took 36.3387 ms
* Timer 'std::unordered_map Remove' took 119.109 ms
* Count: 1889920
* Timer 'BLI::Map Add' took 48.6995 ms
* Timer 'BLI::Map Contains' took 25.1846 ms
* Timer 'BLI::Map Remove' took 33.0283 ms
* Count: 1889920
* Timer 'std::unordered_map Add' took 143.494 ms
* Timer 'std::unordered_map Contains' took 34.8905 ms
* Timer 'std::unordered_map Remove' took 122.739 ms
* Count: 1889920
*/
#endif /* Benchmark */

View File

@@ -0,0 +1,48 @@
#include "BLI_math_bits.h"
#include "testing/testing.h"
#include <iostream>
TEST(math_bits, BitscanReverseClearUint)
{
uint a = 1234;
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 21);
EXPECT_EQ(a, 210);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 24);
EXPECT_EQ(a, 82);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 25);
EXPECT_EQ(a, 18);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 27);
EXPECT_EQ(a, 2);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 30);
EXPECT_EQ(a, 0);
a = 3563987529;
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 0);
EXPECT_EQ(a, 1416503881);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 1);
EXPECT_EQ(a, 342762057);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 3);
EXPECT_EQ(a, 74326601);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 5);
EXPECT_EQ(a, 7217737);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 9);
EXPECT_EQ(a, 3023433);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 10);
EXPECT_EQ(a, 926281);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 12);
EXPECT_EQ(a, 401993);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 13);
EXPECT_EQ(a, 139849);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 14);
EXPECT_EQ(a, 8777);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 18);
EXPECT_EQ(a, 585);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 22);
EXPECT_EQ(a, 73);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 25);
EXPECT_EQ(a, 9);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 28);
EXPECT_EQ(a, 1);
EXPECT_EQ(bitscan_reverse_clear_uint(&a), 31);
EXPECT_EQ(a, 0);
}

View File

@@ -1,4 +1,5 @@
#include "BLI_optional.hh"
#include "BLI_strict_flags.h"
#include "testing/testing.h"
#include <string>
@@ -36,20 +37,6 @@ TEST(optional, Reset)
EXPECT_FALSE(a.has_value());
}
TEST(optional, FromNullPointer)
{
Optional<int> a = Optional<int>::FromPointer(nullptr);
EXPECT_FALSE(a.has_value());
}
TEST(optional, FromNonNullPointer)
{
int value = 42;
Optional<int> a = Optional<int>::FromPointer(&value);
EXPECT_TRUE(a.has_value());
EXPECT_EQ(a.value(), 42);
}
TEST(optional, Extract)
{
Optional<int> a(32);

View File

@@ -1,27 +1,32 @@
#include <set>
#include <unordered_set>
#include "BLI_ghash.h"
#include "BLI_rand.h"
#include "BLI_set.hh"
#include "BLI_strict_flags.h"
#include "BLI_timeit.hh"
#include "BLI_vector.hh"
#include "testing/testing.h"
using BLI::Set;
using BLI::Vector;
using IntSet = Set<int>;
using namespace BLI;
TEST(set, Defaultconstructor)
TEST(set, DefaultConstructor)
{
IntSet set;
Set<int> set;
EXPECT_EQ(set.size(), 0);
EXPECT_TRUE(set.is_empty());
}
TEST(set, ContainsNotExistant)
{
IntSet set;
Set<int> set;
EXPECT_FALSE(set.contains(3));
}
TEST(set, ContainsExistant)
{
IntSet set;
Set<int> set;
EXPECT_FALSE(set.contains(5));
EXPECT_TRUE(set.is_empty());
set.add(5);
@@ -31,7 +36,7 @@ TEST(set, ContainsExistant)
TEST(set, AddMany)
{
IntSet set;
Set<int> set;
for (int i = 0; i < 100; i++) {
set.add(i);
}
@@ -46,7 +51,7 @@ TEST(set, AddMany)
TEST(set, InitializerListConstructor)
{
IntSet set = {4, 5, 6};
Set<int> set = {4, 5, 6};
EXPECT_EQ(set.size(), 3);
EXPECT_TRUE(set.contains(4));
EXPECT_TRUE(set.contains(5));
@@ -57,11 +62,11 @@ TEST(set, InitializerListConstructor)
TEST(set, CopyConstructor)
{
IntSet set = {3};
Set<int> set = {3};
EXPECT_TRUE(set.contains(3));
EXPECT_FALSE(set.contains(4));
IntSet set2 = set;
Set<int> set2(set);
set2.add(4);
EXPECT_TRUE(set2.contains(3));
EXPECT_TRUE(set2.contains(4));
@@ -71,47 +76,72 @@ TEST(set, CopyConstructor)
TEST(set, MoveConstructor)
{
IntSet set = {1, 2, 3};
Set<int> set = {1, 2, 3};
EXPECT_EQ(set.size(), 3);
IntSet set2 = std::move(set);
Set<int> set2(std::move(set));
EXPECT_EQ(set.size(), 0);
EXPECT_EQ(set2.size(), 3);
}
TEST(set, Remove)
TEST(set, CopyAssignment)
{
IntSet set = {3, 4, 5};
Set<int> set = {3};
EXPECT_TRUE(set.contains(3));
EXPECT_FALSE(set.contains(4));
Set<int> set2;
set2 = set;
set2.add(4);
EXPECT_TRUE(set2.contains(3));
EXPECT_TRUE(set2.contains(4));
EXPECT_FALSE(set.contains(4));
}
TEST(set, MoveAssignment)
{
Set<int> set = {1, 2, 3};
EXPECT_EQ(set.size(), 3);
Set<int> set2;
set2 = std::move(set);
EXPECT_EQ(set.size(), 0);
EXPECT_EQ(set2.size(), 3);
}
TEST(set, RemoveContained)
{
Set<int> set = {3, 4, 5};
EXPECT_TRUE(set.contains(3));
EXPECT_TRUE(set.contains(4));
EXPECT_TRUE(set.contains(5));
set.remove(4);
set.remove_contained(4);
EXPECT_TRUE(set.contains(3));
EXPECT_FALSE(set.contains(4));
EXPECT_TRUE(set.contains(5));
set.remove(3);
set.remove_contained(3);
EXPECT_FALSE(set.contains(3));
EXPECT_FALSE(set.contains(4));
EXPECT_TRUE(set.contains(5));
set.remove(5);
set.remove_contained(5);
EXPECT_FALSE(set.contains(3));
EXPECT_FALSE(set.contains(4));
EXPECT_FALSE(set.contains(5));
}
TEST(set, RemoveMany)
TEST(set, RemoveContainedMany)
{
IntSet set;
for (uint i = 0; i < 1000; i++) {
Set<int> set;
for (int i = 0; i < 1000; i++) {
set.add(i);
}
for (uint i = 100; i < 1000; i++) {
set.remove(i);
for (int i = 100; i < 1000; i++) {
set.remove_contained(i);
}
for (uint i = 900; i < 1000; i++) {
for (int i = 900; i < 1000; i++) {
set.add(i);
}
for (uint i = 0; i < 1000; i++) {
for (int i = 0; i < 1000; i++) {
if (i < 100 || i >= 900) {
EXPECT_TRUE(set.contains(i));
}
@@ -123,23 +153,23 @@ TEST(set, RemoveMany)
TEST(set, Intersects)
{
IntSet a = {3, 4, 5, 6};
IntSet b = {1, 2, 5};
EXPECT_TRUE(IntSet::Intersects(a, b));
EXPECT_FALSE(IntSet::Disjoint(a, b));
Set<int> a = {3, 4, 5, 6};
Set<int> b = {1, 2, 5};
EXPECT_TRUE(Set<int>::Intersects(a, b));
EXPECT_FALSE(Set<int>::Disjoint(a, b));
}
TEST(set, Disjoint)
{
IntSet a = {5, 6, 7, 8};
IntSet b = {2, 3, 4, 9};
EXPECT_FALSE(IntSet::Intersects(a, b));
EXPECT_TRUE(IntSet::Disjoint(a, b));
Set<int> a = {5, 6, 7, 8};
Set<int> b = {2, 3, 4, 9};
EXPECT_FALSE(Set<int>::Intersects(a, b));
EXPECT_TRUE(Set<int>::Disjoint(a, b));
}
TEST(set, AddMultiple)
{
IntSet a;
Set<int> a;
a.add_multiple({5, 7});
EXPECT_TRUE(a.contains(5));
EXPECT_TRUE(a.contains(7));
@@ -152,7 +182,7 @@ TEST(set, AddMultiple)
TEST(set, AddMultipleNew)
{
IntSet a;
Set<int> a;
a.add_multiple_new({5, 6});
EXPECT_TRUE(a.contains(5));
EXPECT_TRUE(a.contains(6));
@@ -160,7 +190,7 @@ TEST(set, AddMultipleNew)
TEST(set, Iterator)
{
IntSet set = {1, 3, 2, 5, 4};
Set<int> set = {1, 3, 2, 5, 4};
BLI::Vector<int> vec;
for (int value : set) {
vec.append(value);
@@ -173,13 +203,13 @@ TEST(set, Iterator)
EXPECT_TRUE(vec.contains(4));
}
TEST(set, OftenAddRemove)
TEST(set, OftenAddRemoveContained)
{
IntSet set;
Set<int> set;
for (int i = 0; i < 100; i++) {
set.add(42);
EXPECT_EQ(set.size(), 1);
set.remove(42);
set.remove_contained(42);
EXPECT_EQ(set.size(), 0);
}
}
@@ -202,3 +232,282 @@ TEST(set, Clear)
set.clear();
EXPECT_EQ(set.size(), 0);
}
TEST(set, StringSet)
{
Set<std::string> set;
set.add("hello");
set.add("world");
EXPECT_EQ(set.size(), 2);
EXPECT_TRUE(set.contains("hello"));
EXPECT_TRUE(set.contains("world"));
EXPECT_FALSE(set.contains("world2"));
}
TEST(set, PointerSet)
{
int a, b, c;
Set<int *> set;
set.add(&a);
set.add(&b);
EXPECT_EQ(set.size(), 2);
EXPECT_TRUE(set.contains(&a));
EXPECT_TRUE(set.contains(&b));
EXPECT_FALSE(set.contains(&c));
}
TEST(set, Remove)
{
Set<int> set = {1, 2, 3, 4, 5, 6};
EXPECT_EQ(set.size(), 6);
EXPECT_TRUE(set.remove(2));
EXPECT_EQ(set.size(), 5);
EXPECT_FALSE(set.contains(2));
EXPECT_FALSE(set.remove(2));
EXPECT_EQ(set.size(), 5);
EXPECT_TRUE(set.remove(5));
EXPECT_EQ(set.size(), 4);
}
struct Type1 {
uint32_t value;
};
struct Type2 {
uint32_t value;
};
bool operator==(const Type1 &a, const Type1 &b)
{
return a.value == b.value;
}
bool operator==(const Type1 &a, const Type2 &b)
{
return a.value == b.value;
}
bool operator==(const Type2 &a, const Type1 &b)
{
return a.value == b.value;
}
template<> struct BLI::DefaultHash<Type1> {
uint32_t operator()(const Type1 &value) const
{
return value.value;
}
uint32_t operator()(const Type2 &value) const
{
return value.value;
}
};
TEST(set, ContainsAs)
{
Set<Type1> set;
set.add(Type1{5});
EXPECT_TRUE(set.contains_as(Type1{5}));
EXPECT_TRUE(set.contains_as(Type2{5}));
EXPECT_FALSE(set.contains_as(Type1{6}));
EXPECT_FALSE(set.contains_as(Type2{6}));
}
TEST(set, ContainsAsString)
{
Set<std::string> set;
set.add("test");
EXPECT_TRUE(set.contains_as("test"));
EXPECT_TRUE(set.contains_as(StringRef("test")));
EXPECT_FALSE(set.contains_as("string"));
EXPECT_FALSE(set.contains_as(StringRef("string")));
}
TEST(set, RemoveContainedAs)
{
Set<Type1> set;
set.add(Type1{5});
EXPECT_TRUE(set.contains_as(Type2{5}));
set.remove_contained_as(Type2{5});
EXPECT_FALSE(set.contains_as(Type2{5}));
}
TEST(set, RemoveAs)
{
Set<Type1> set;
set.add(Type1{5});
EXPECT_TRUE(set.contains_as(Type2{5}));
set.remove_as(Type2{6});
EXPECT_TRUE(set.contains_as(Type2{5}));
set.remove_as(Type2{5});
EXPECT_FALSE(set.contains_as(Type2{5}));
set.remove_as(Type2{5});
EXPECT_FALSE(set.contains_as(Type2{5}));
}
TEST(set, AddAs)
{
Set<std::string> set;
EXPECT_TRUE(set.add_as("test"));
EXPECT_TRUE(set.add_as(StringRef("qwe")));
EXPECT_FALSE(set.add_as(StringRef("test")));
EXPECT_FALSE(set.add_as("qwe"));
}
template<uint N> struct EqualityIntModN {
bool operator()(uint a, uint b) const
{
return (a % N) == (b % N);
}
};
template<uint N> struct HashIntModN {
uint32_t operator()(uint value) const
{
return value % N;
}
};
TEST(set, CustomizeHashAndEquality)
{
Set<uint, 0, DefaultProbingStrategy, HashIntModN<10>, EqualityIntModN<10>> set;
set.add(4);
EXPECT_TRUE(set.contains(4));
EXPECT_TRUE(set.contains(14));
EXPECT_TRUE(set.contains(104));
EXPECT_FALSE(set.contains(5));
set.add(55);
EXPECT_TRUE(set.contains(5));
EXPECT_TRUE(set.contains(14));
set.remove(1004);
EXPECT_FALSE(set.contains(14));
}
TEST(set, IntrusiveIntKey)
{
Set<int,
2,
DefaultProbingStrategy,
DefaultHash<int>,
DefaultEquality,
IntegerSetSlot<int, 100, 200>>
set;
EXPECT_TRUE(set.add(4));
EXPECT_TRUE(set.add(3));
EXPECT_TRUE(set.add(11));
EXPECT_TRUE(set.add(8));
EXPECT_FALSE(set.add(3));
EXPECT_FALSE(set.add(4));
EXPECT_TRUE(set.remove(4));
EXPECT_FALSE(set.remove(7));
EXPECT_TRUE(set.add(4));
EXPECT_TRUE(set.remove(4));
}
/**
* Set this to 1 to activate the benchmark. It is disabled by default, because it prints a lot.
*/
#if 0
template<typename SetT>
BLI_NOINLINE void benchmark_random_ints(StringRef name, uint amount, uint factor)
{
RNG *rng = BLI_rng_new(0);
Vector<int> values;
for (uint i = 0; i < amount; i++) {
values.append(BLI_rng_get_int(rng) * factor);
}
BLI_rng_free(rng);
SetT set;
{
SCOPED_TIMER(name + " Add");
for (int value : values) {
set.add(value);
}
}
int count = 0;
{
SCOPED_TIMER(name + " Contains");
for (int value : values) {
count += set.contains(value);
}
}
{
SCOPED_TIMER(name + " Remove");
for (int value : values) {
count += set.remove(value);
}
}
/* Print the value for simple error checking and to avoid some compiler optimizations. */
std::cout << "Count: " << count << "\n";
}
TEST(set, Benchmark)
{
for (uint i = 0; i < 3; i++) {
benchmark_random_ints<BLI::Set<int>>("BLI::Set ", 100000, 1);
benchmark_random_ints<BLI::StdUnorderedSetWrapper<int>>("std::unordered_set", 100000, 1);
}
std::cout << "\n";
for (uint i = 0; i < 3; i++) {
uint32_t factor = (3 << 10);
benchmark_random_ints<BLI::Set<int>>("BLI::Set ", 100000, factor);
benchmark_random_ints<BLI::StdUnorderedSetWrapper<int>>("std::unordered_set", 100000, factor);
}
}
/**
* Output of the rudimentary benchmark above on my hardware.
*
* Timer 'BLI::Set Add' took 5.5573 ms
* Timer 'BLI::Set Contains' took 0.807384 ms
* Timer 'BLI::Set Remove' took 0.953436 ms
* Count: 199998
* Timer 'std::unordered_set Add' took 12.551 ms
* Timer 'std::unordered_set Contains' took 2.3323 ms
* Timer 'std::unordered_set Remove' took 5.07082 ms
* Count: 199998
* Timer 'BLI::Set Add' took 2.62526 ms
* Timer 'BLI::Set Contains' took 0.407499 ms
* Timer 'BLI::Set Remove' took 0.472981 ms
* Count: 199998
* Timer 'std::unordered_set Add' took 6.26945 ms
* Timer 'std::unordered_set Contains' took 1.17236 ms
* Timer 'std::unordered_set Remove' took 3.77402 ms
* Count: 199998
* Timer 'BLI::Set Add' took 2.59152 ms
* Timer 'BLI::Set Contains' took 0.415254 ms
* Timer 'BLI::Set Remove' took 0.477559 ms
* Count: 199998
* Timer 'std::unordered_set Add' took 6.28129 ms
* Timer 'std::unordered_set Contains' took 1.17562 ms
* Timer 'std::unordered_set Remove' took 3.77811 ms
* Count: 199998
*
* Timer 'BLI::Set Add' took 3.16514 ms
* Timer 'BLI::Set Contains' took 0.732895 ms
* Timer 'BLI::Set Remove' took 1.08171 ms
* Count: 198790
* Timer 'std::unordered_set Add' took 6.57377 ms
* Timer 'std::unordered_set Contains' took 1.17008 ms
* Timer 'std::unordered_set Remove' took 3.7946 ms
* Count: 198790
* Timer 'BLI::Set Add' took 3.11439 ms
* Timer 'BLI::Set Contains' took 0.740159 ms
* Timer 'BLI::Set Remove' took 1.06749 ms
* Count: 198790
* Timer 'std::unordered_set Add' took 6.35597 ms
* Timer 'std::unordered_set Contains' took 1.17713 ms
* Timer 'std::unordered_set Remove' took 3.77826 ms
* Count: 198790
* Timer 'BLI::Set Add' took 3.09876 ms
* Timer 'BLI::Set Contains' took 0.742072 ms
* Timer 'BLI::Set Remove' took 1.06622 ms
* Count: 198790
* Timer 'std::unordered_set Add' took 6.4469 ms
* Timer 'std::unordered_set Contains' took 1.16515 ms
* Timer 'std::unordered_set Remove' took 3.80639 ms
* Count: 198790
*/
#endif /* Benchmark */

View File

@@ -1,12 +1,13 @@
#include "BLI_stack.hh"
#include "BLI_strict_flags.h"
#include "BLI_vector.hh"
#include "testing/testing.h"
using BLI::Stack;
using IntStack = Stack<int>;
using namespace BLI;
TEST(stack, DefaultConstructor)
{
IntStack stack;
Stack<int> stack;
EXPECT_EQ(stack.size(), 0);
EXPECT_TRUE(stack.is_empty());
}
@@ -14,7 +15,7 @@ TEST(stack, DefaultConstructor)
TEST(stack, ArrayRefConstructor)
{
std::array<int, 3> array = {4, 7, 2};
IntStack stack(array);
Stack<int> stack(array);
EXPECT_EQ(stack.size(), 3);
EXPECT_EQ(stack.pop(), 2);
EXPECT_EQ(stack.pop(), 7);
@@ -22,9 +23,66 @@ TEST(stack, ArrayRefConstructor)
EXPECT_TRUE(stack.is_empty());
}
TEST(stack, CopyConstructor)
{
Stack<int> stack1 = {1, 2, 3, 4, 5, 6, 7};
Stack<int> stack2 = stack1;
EXPECT_EQ(stack1.size(), 7);
EXPECT_EQ(stack2.size(), 7);
for (int i = 7; i >= 1; i--) {
EXPECT_FALSE(stack1.is_empty());
EXPECT_FALSE(stack2.is_empty());
EXPECT_EQ(stack1.pop(), i);
EXPECT_EQ(stack2.pop(), i);
}
EXPECT_TRUE(stack1.is_empty());
EXPECT_TRUE(stack2.is_empty());
}
TEST(stack, MoveConstructor)
{
Stack<int> stack1 = {1, 2, 3, 4, 5, 6, 7};
Stack<int> stack2 = std::move(stack1);
EXPECT_EQ(stack1.size(), 0);
EXPECT_EQ(stack2.size(), 7);
for (int i = 7; i >= 1; i--) {
EXPECT_EQ(stack2.pop(), i);
}
}
TEST(stack, CopyAssignment)
{
Stack<int> stack1 = {1, 2, 3, 4, 5, 6, 7};
Stack<int> stack2 = {2, 3, 4, 5, 6, 7};
stack2 = stack1;
EXPECT_EQ(stack1.size(), 7);
EXPECT_EQ(stack2.size(), 7);
for (int i = 7; i >= 1; i--) {
EXPECT_FALSE(stack1.is_empty());
EXPECT_FALSE(stack2.is_empty());
EXPECT_EQ(stack1.pop(), i);
EXPECT_EQ(stack2.pop(), i);
}
EXPECT_TRUE(stack1.is_empty());
EXPECT_TRUE(stack2.is_empty());
}
TEST(stack, MoveAssignment)
{
Stack<int> stack1 = {1, 2, 3, 4, 5, 6, 7};
Stack<int> stack2 = {5, 3, 7, 2, 2};
stack2 = std::move(stack1);
EXPECT_EQ(stack1.size(), 0);
EXPECT_EQ(stack2.size(), 7);
for (int i = 7; i >= 1; i--) {
EXPECT_EQ(stack2.pop(), i);
}
}
TEST(stack, Push)
{
IntStack stack;
Stack<int> stack;
EXPECT_EQ(stack.size(), 0);
stack.push(3);
EXPECT_EQ(stack.size(), 1);
@@ -34,7 +92,7 @@ TEST(stack, Push)
TEST(stack, PushMultiple)
{
IntStack stack;
Stack<int> stack;
EXPECT_EQ(stack.size(), 0);
stack.push_multiple({1, 2, 3});
EXPECT_EQ(stack.size(), 3);
@@ -43,9 +101,52 @@ TEST(stack, PushMultiple)
EXPECT_EQ(stack.pop(), 1);
}
TEST(stack, PushPopMany)
{
Stack<int> stack;
for (int i = 0; i < 1000; i++) {
stack.push(i);
EXPECT_EQ(stack.size(), i + 1);
}
for (int i = 999; i > 50; i--) {
EXPECT_EQ(stack.pop(), i);
EXPECT_EQ(stack.size(), i);
}
for (int i = 51; i < 5000; i++) {
stack.push(i);
EXPECT_EQ(stack.size(), i + 1);
}
for (int i = 4999; i >= 0; i--) {
EXPECT_EQ(stack.pop(), i);
EXPECT_EQ(stack.size(), i);
}
}
TEST(stack, PushMultipleAfterPop)
{
Stack<int> stack;
for (int i = 0; i < 1000; i++) {
stack.push(i);
}
for (int i = 999; i >= 0; i--) {
EXPECT_EQ(stack.pop(), i);
}
Vector<int> values;
for (int i = 0; i < 5000; i++) {
values.append(i);
}
stack.push_multiple(values);
EXPECT_EQ(stack.size(), 5000);
for (int i = 4999; i >= 0; i--) {
EXPECT_EQ(stack.pop(), i);
}
}
TEST(stack, Pop)
{
IntStack stack;
Stack<int> stack;
stack.push(4);
stack.push(6);
EXPECT_EQ(stack.pop(), 6);
@@ -54,7 +155,7 @@ TEST(stack, Pop)
TEST(stack, Peek)
{
IntStack stack;
Stack<int> stack;
stack.push(3);
stack.push(4);
EXPECT_EQ(stack.peek(), 4);
@@ -72,3 +173,12 @@ TEST(stack, UniquePtrValues)
std::unique_ptr<int> &b = stack.peek();
UNUSED_VARS(a, b);
}
TEST(stack, OveralignedValues)
{
Stack<AlignedBuffer<1, 512>, 2> stack;
for (int i = 0; i < 100; i++) {
stack.push({});
EXPECT_EQ((uintptr_t)&stack.peek() % 512, 0);
}
}

View File

@@ -1,275 +0,0 @@
#include "BLI_string_map.hh"
#include "BLI_vector.hh"
#include "testing/testing.h"
using namespace BLI;
TEST(string_map, DefaultConstructor)
{
StringMap<int> map;
EXPECT_EQ(map.size(), 0);
}
TEST(string_map, CopyConstructor)
{
StringMap<Vector<int, 4>> map1;
map1.add_new("A", {1, 2, 3});
map1.add_new("B", {1, 2, 3, 4, 5, 6});
StringMap<Vector<int>> map2(map1);
EXPECT_EQ(map1.size(), 2);
EXPECT_EQ(map2.size(), 2);
EXPECT_EQ(map1.lookup("A")[1], 2);
EXPECT_EQ(map2.lookup("A")[1], 2);
EXPECT_EQ(map1.lookup("B")[5], 6);
EXPECT_EQ(map2.lookup("B")[5], 6);
}
TEST(string_map, MoveConstructor)
{
StringMap<Vector<int, 4>> map1;
map1.add_new("A", {1, 2, 3});
map1.add_new("B", {1, 2, 3, 4, 5, 6});
StringMap<Vector<int>> map2(std::move(map1));
EXPECT_EQ(map1.size(), 0);
EXPECT_FALSE(map1.contains("A"));
EXPECT_FALSE(map1.contains("B"));
EXPECT_EQ(map2.size(), 2);
EXPECT_EQ(map2.lookup("A")[1], 2);
EXPECT_EQ(map2.lookup("B")[5], 6);
}
TEST(string_map, Add)
{
StringMap<int> map;
EXPECT_EQ(map.size(), 0);
map.add("test", 1);
EXPECT_EQ(map.lookup("test"), 1);
map.add("test", 2);
EXPECT_EQ(map.lookup("test"), 1);
map.add("test2", 2);
EXPECT_EQ(map.lookup("test2"), 2);
}
TEST(string_map, AddNew)
{
StringMap<int> map;
EXPECT_EQ(map.size(), 0);
map.add_new("Why", 5);
EXPECT_EQ(map.size(), 1);
EXPECT_EQ(map.lookup("Why"), 5);
map.add_new("Where", 6);
EXPECT_EQ(map.size(), 2);
EXPECT_EQ(map.lookup("Where"), 6);
}
TEST(string_map, AddNew_Many)
{
StringMap<int> map;
for (uint i = 0; i < 100; i++) {
map.add_new(std::to_string(i), i);
}
EXPECT_EQ(map.size(), 100);
}
TEST(string_map, Contains)
{
StringMap<int> map;
map.add_new("A", 0);
map.add_new("B", 0);
EXPECT_TRUE(map.contains("A"));
EXPECT_TRUE(map.contains("B"));
EXPECT_FALSE(map.contains("C"));
}
TEST(string_map, Contains_Many)
{
StringMap<int> map;
for (uint i = 0; i < 50; i++) {
map.add_new(std::to_string(i), i);
}
for (uint i = 100; i < 200; i++) {
map.add_new(std::to_string(i), i);
}
EXPECT_EQ(map.size(), 150);
for (uint i = 0; i < 200; i++) {
if (i < 50 || i >= 100) {
EXPECT_TRUE(map.contains(std::to_string(i)));
}
else {
EXPECT_FALSE(map.contains(std::to_string(i)));
}
}
}
TEST(string_map, Lookup)
{
StringMap<int> map;
map.add_new("A", 5);
map.add_new("B", 8);
map.add_new("C", 10);
EXPECT_EQ(map.lookup("A"), 5);
EXPECT_EQ(map.lookup("B"), 8);
EXPECT_EQ(map.lookup("C"), 10);
}
TEST(string_map, LookupPtr)
{
StringMap<int> map;
map.add_new("test1", 13);
map.add_new("test2", 14);
map.add_new("test3", 15);
EXPECT_EQ(*map.lookup_ptr("test1"), 13);
EXPECT_EQ(*map.lookup_ptr("test2"), 14);
EXPECT_EQ(*map.lookup_ptr("test3"), 15);
EXPECT_EQ(map.lookup_ptr("test4"), nullptr);
}
TEST(string_map, LookupDefault)
{
StringMap<int> map;
EXPECT_EQ(map.lookup_default("test", 42), 42);
map.add_new("test", 5);
EXPECT_EQ(map.lookup_default("test", 42), 5);
}
TEST(string_map, TryLookup)
{
StringMap<int> map;
map.add_new("test", 4);
EXPECT_TRUE(map.try_lookup("test").has_value());
EXPECT_FALSE(map.try_lookup("value").has_value());
EXPECT_EQ(map.try_lookup("test").value(), 4);
}
TEST(string_map, FindKeyForValue)
{
StringMap<int> map;
map.add_new("A", 1);
map.add_new("B", 2);
map.add_new("C", 3);
EXPECT_EQ(map.find_key_for_value(1), "A");
EXPECT_EQ(map.find_key_for_value(2), "B");
EXPECT_EQ(map.find_key_for_value(3), "C");
}
TEST(string_map, ForeachValue)
{
StringMap<int> map;
map.add_new("A", 4);
map.add_new("B", 5);
map.add_new("C", 1);
Vector<int> values;
map.foreach_value([&values](int &value) { values.append(value); });
EXPECT_EQ(values.size(), 3);
EXPECT_TRUE(values.contains(1));
EXPECT_TRUE(values.contains(4));
EXPECT_TRUE(values.contains(5));
}
TEST(string_map, ForeachKey)
{
StringMap<int> map;
map.add_new("A", 4);
map.add_new("B", 5);
map.add_new("C", 1);
Vector<std::string> keys;
map.foreach_key([&keys](StringRefNull key) { keys.append(key); });
EXPECT_EQ(keys.size(), 3);
EXPECT_TRUE(keys.contains("A"));
EXPECT_TRUE(keys.contains("B"));
EXPECT_TRUE(keys.contains("C"));
}
TEST(string_map, ForeachKeyValuePair)
{
StringMap<int> map;
map.add_new("A", 4);
map.add_new("B", 5);
map.add_new("C", 1);
Vector<std::string> keys;
Vector<int> values;
map.foreach_item([&keys, &values](StringRefNull key, int value) {
keys.append(key);
values.append(value);
});
EXPECT_EQ(keys.size(), 3);
EXPECT_EQ(values[keys.index("A")], 4);
EXPECT_EQ(values[keys.index("B")], 5);
EXPECT_EQ(values[keys.index("C")], 1);
}
TEST(string_map, WithVectors)
{
StringMap<Vector<int>> map;
map.add_new("A", {1, 2, 3});
map.add_new("B", {1, 2, 3, 4, 5, 6, 7});
EXPECT_EQ(map.size(), 2);
EXPECT_EQ(map.lookup("A").size(), 3);
EXPECT_EQ(map.lookup("B").size(), 7);
}
TEST(string_map, UniquePtrValues)
{
StringMap<std::unique_ptr<int>> map;
map.add_new("A", std::unique_ptr<int>(new int()));
std::unique_ptr<int> &a = map.lookup("A");
std::unique_ptr<int> *b = map.lookup_ptr("A");
EXPECT_EQ(a.get(), b->get());
}
TEST(string_map, AddOrModify)
{
StringMap<int> map;
auto create_func = [](int *value) {
*value = 10;
return true;
};
auto modify_func = [](int *value) {
*value += 5;
return false;
};
EXPECT_TRUE(map.add_or_modify("Hello", create_func, modify_func));
EXPECT_EQ(map.lookup("Hello"), 10);
EXPECT_FALSE(map.add_or_modify("Hello", create_func, modify_func));
EXPECT_EQ(map.lookup("Hello"), 15);
}
TEST(string_map, LookupOrAdd)
{
StringMap<int> map;
auto return_5 = []() { return 5; };
auto return_8 = []() { return 8; };
int &a = map.lookup_or_add("A", return_5);
EXPECT_EQ(a, 5);
EXPECT_EQ(map.lookup_or_add("A", return_8), 5);
EXPECT_EQ(map.lookup_or_add("B", return_8), 8);
}
TEST(string_map, LookupOrAddDefault)
{
StringMap<std::string> map;
std::string &a = map.lookup_or_add_default("A");
EXPECT_EQ(a.size(), 0);
a += "Test";
EXPECT_EQ(a.size(), 4);
std::string &b = map.lookup_or_add_default("A");
EXPECT_EQ(b, "Test");
}

View File

@@ -1,3 +1,4 @@
#include "BLI_strict_flags.h"
#include "BLI_string_ref.hh"
#include "BLI_vector.hh"
#include "testing/testing.h"

View File

@@ -1,63 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __ANY_TYPE_MOCK_TEST_H__
#define __ANY_TYPE_MOCK_TEST_H__
#include "BLI_sys_types.h"
class TypeConstructMock {
public:
bool default_constructed = false;
bool copy_constructed = false;
bool move_constructed = false;
bool copy_assigned = false;
bool move_assigned = false;
TypeConstructMock() : default_constructed(true)
{
}
TypeConstructMock(const TypeConstructMock &other) : copy_constructed(true)
{
}
TypeConstructMock(TypeConstructMock &&other) : move_constructed(true)
{
}
TypeConstructMock &operator=(const TypeConstructMock &other)
{
if (this == &other) {
return *this;
}
copy_assigned = true;
return *this;
}
TypeConstructMock &operator=(TypeConstructMock &&other)
{
if (this == &other) {
return *this;
}
move_assigned = true;
return *this;
}
};
#endif /* __ANY_TYPE_MOCK_TEST_H__ */

View File

@@ -1,19 +1,19 @@
#include "BLI_strict_flags.h"
#include "BLI_vector_set.hh"
#include "testing/testing.h"
using BLI::VectorSet;
using IntVectorSet = VectorSet<int>;
TEST(vector_set, DefaultConstructor)
{
IntVectorSet set;
VectorSet<int> set;
EXPECT_EQ(set.size(), 0);
EXPECT_TRUE(set.is_empty());
}
TEST(vector_set, InitializerListConstructor_WithoutDuplicates)
{
IntVectorSet set = {1, 4, 5};
VectorSet<int> set = {1, 4, 5};
EXPECT_EQ(set.size(), 3);
EXPECT_EQ(set[0], 1);
EXPECT_EQ(set[1], 4);
@@ -22,7 +22,7 @@ TEST(vector_set, InitializerListConstructor_WithoutDuplicates)
TEST(vector_set, InitializerListConstructor_WithDuplicates)
{
IntVectorSet set = {1, 3, 3, 2, 1, 5};
VectorSet<int> set = {1, 3, 3, 2, 1, 5};
EXPECT_EQ(set.size(), 4);
EXPECT_EQ(set[0], 1);
EXPECT_EQ(set[1], 3);
@@ -32,37 +32,37 @@ TEST(vector_set, InitializerListConstructor_WithDuplicates)
TEST(vector_set, Copy)
{
IntVectorSet set1 = {1, 2, 3};
IntVectorSet set2 = set1;
VectorSet<int> set1 = {1, 2, 3};
VectorSet<int> set2 = set1;
EXPECT_EQ(set1.size(), 3);
EXPECT_EQ(set2.size(), 3);
EXPECT_EQ(set1.index(2), 1);
EXPECT_EQ(set2.index(2), 1);
EXPECT_EQ(set1.index_of(2), 1);
EXPECT_EQ(set2.index_of(2), 1);
}
TEST(vector_set, CopyAssignment)
{
IntVectorSet set1 = {1, 2, 3};
IntVectorSet set2 = {};
VectorSet<int> set1 = {1, 2, 3};
VectorSet<int> set2 = {};
set2 = set1;
EXPECT_EQ(set1.size(), 3);
EXPECT_EQ(set2.size(), 3);
EXPECT_EQ(set1.index(2), 1);
EXPECT_EQ(set2.index(2), 1);
EXPECT_EQ(set1.index_of(2), 1);
EXPECT_EQ(set2.index_of(2), 1);
}
TEST(vector_set, Move)
{
IntVectorSet set1 = {1, 2, 3};
IntVectorSet set2 = std::move(set1);
VectorSet<int> set1 = {1, 2, 3};
VectorSet<int> set2 = std::move(set1);
EXPECT_EQ(set1.size(), 0);
EXPECT_EQ(set2.size(), 3);
}
TEST(vector_set, MoveAssignment)
{
IntVectorSet set1 = {1, 2, 3};
IntVectorSet set2 = {};
VectorSet<int> set1 = {1, 2, 3};
VectorSet<int> set2 = {};
set2 = std::move(set1);
EXPECT_EQ(set1.size(), 0);
EXPECT_EQ(set2.size(), 3);
@@ -70,7 +70,7 @@ TEST(vector_set, MoveAssignment)
TEST(vector_set, AddNewIncreasesSize)
{
IntVectorSet set;
VectorSet<int> set;
EXPECT_TRUE(set.is_empty());
EXPECT_EQ(set.size(), 0);
set.add(5);
@@ -80,57 +80,81 @@ TEST(vector_set, AddNewIncreasesSize)
TEST(vector_set, AddExistingDoesNotIncreaseSize)
{
IntVectorSet set;
VectorSet<int> set;
EXPECT_EQ(set.size(), 0);
set.add(5);
EXPECT_TRUE(set.add(5));
EXPECT_EQ(set.size(), 1);
set.add(5);
EXPECT_FALSE(set.add(5));
EXPECT_EQ(set.size(), 1);
}
TEST(vector_set, Index)
{
IntVectorSet set = {3, 6, 4};
EXPECT_EQ(set.index(6), 1);
EXPECT_EQ(set.index(3), 0);
EXPECT_EQ(set.index(4), 2);
VectorSet<int> set = {3, 6, 4};
EXPECT_EQ(set.index_of(6), 1);
EXPECT_EQ(set.index_of(3), 0);
EXPECT_EQ(set.index_of(4), 2);
}
TEST(vector_set, IndexTry)
{
IntVectorSet set = {3, 6, 4};
EXPECT_EQ(set.index_try(5), -1);
EXPECT_EQ(set.index_try(3), 0);
EXPECT_EQ(set.index_try(6), 1);
EXPECT_EQ(set.index_try(2), -1);
VectorSet<int> set = {3, 6, 4};
EXPECT_EQ(set.index_of_try(5), -1);
EXPECT_EQ(set.index_of_try(3), 0);
EXPECT_EQ(set.index_of_try(6), 1);
EXPECT_EQ(set.index_of_try(2), -1);
}
TEST(vector_set, Remove)
TEST(vector_set, RemoveContained)
{
IntVectorSet set = {4, 5, 6, 7};
VectorSet<int> set = {4, 5, 6, 7};
EXPECT_EQ(set.size(), 4);
set.remove(5);
set.remove_contained(5);
EXPECT_EQ(set.size(), 3);
EXPECT_EQ(set[0], 4);
EXPECT_EQ(set[1], 7);
EXPECT_EQ(set[2], 6);
set.remove(6);
set.remove_contained(6);
EXPECT_EQ(set.size(), 2);
EXPECT_EQ(set[0], 4);
EXPECT_EQ(set[1], 7);
set.remove(4);
set.remove_contained(4);
EXPECT_EQ(set.size(), 1);
EXPECT_EQ(set[0], 7);
set.remove(7);
set.remove_contained(7);
EXPECT_EQ(set.size(), 0);
}
TEST(vector_set, AddMultipleTimes)
{
VectorSet<int> set;
for (int i = 0; i < 100; i++) {
EXPECT_FALSE(set.contains(i * 13));
set.add(i * 12);
set.add(i * 13);
EXPECT_TRUE(set.contains(i * 13));
}
}
TEST(vector_set, UniquePtrValue)
{
VectorSet<std::unique_ptr<int>> set;
set.add_new(std::unique_ptr<int>(new int()));
set.add(std::unique_ptr<int>(new int()));
set.index_try(std::unique_ptr<int>(new int()));
set.index_of_try(std::unique_ptr<int>(new int()));
std::unique_ptr<int> value = set.pop();
UNUSED_VARS(value);
}
TEST(vector_set, Remove)
{
VectorSet<int> set;
EXPECT_TRUE(set.add(5));
EXPECT_TRUE(set.contains(5));
EXPECT_FALSE(set.remove(6));
EXPECT_TRUE(set.contains(5));
EXPECT_TRUE(set.remove(5));
EXPECT_FALSE(set.contains(5));
EXPECT_FALSE(set.remove(5));
EXPECT_FALSE(set.contains(5));
}

View File

@@ -1,9 +1,9 @@
#include "BLI_type_construct_mock.hh"
#include "BLI_strict_flags.h"
#include "BLI_vector.hh"
#include "testing/testing.h"
#include <forward_list>
using BLI::Vector;
using namespace BLI;
TEST(vector, DefaultConstructor)
{
@@ -15,9 +15,26 @@ TEST(vector, SizeConstructor)
{
Vector<int> vec(3);
EXPECT_EQ(vec.size(), 3);
EXPECT_EQ(vec[0], 0);
EXPECT_EQ(vec[1], 0);
EXPECT_EQ(vec[2], 0);
}
/**
* Tests that the trivially constructible types are not zero-initialized. We do not want that for
* performance reasons.
*/
TEST(vector, TrivialTypeSizeConstructor)
{
Vector<char, 1> *vec = new Vector<char, 1>(1);
char *ptr = &(*vec)[0];
vec->~Vector();
const char magic = 42;
*ptr = magic;
EXPECT_EQ(*ptr, magic);
new (vec) Vector<char, 1>(1);
EXPECT_EQ((*vec)[0], magic);
EXPECT_EQ(*ptr, magic);
delete vec;
}
TEST(vector, SizeValueConstructor)
@@ -102,7 +119,7 @@ TEST(vector, CopyConstructor2)
EXPECT_EQ(vec1.size(), 4);
EXPECT_EQ(vec2.size(), 4);
EXPECT_NE(vec1.begin(), vec2.begin());
EXPECT_NE(vec1.data(), vec2.data());
EXPECT_EQ(vec2[0], 1);
EXPECT_EQ(vec2[1], 2);
EXPECT_EQ(vec2[2], 3);
@@ -116,7 +133,7 @@ TEST(vector, CopyConstructor3)
EXPECT_EQ(vec1.size(), 4);
EXPECT_EQ(vec2.size(), 4);
EXPECT_NE(vec1.begin(), vec2.begin());
EXPECT_NE(vec1.data(), vec2.data());
EXPECT_EQ(vec2[2], 3);
}
@@ -127,7 +144,7 @@ TEST(vector, CopyConstructor4)
EXPECT_EQ(vec1.size(), 4);
EXPECT_EQ(vec2.size(), 4);
EXPECT_NE(vec1.begin(), vec2.begin());
EXPECT_NE(vec1.data(), vec2.data());
EXPECT_EQ(vec2[3], 4);
}
@@ -288,7 +305,7 @@ TEST(vector, BecomeLarge)
vec.append(i * 5);
}
EXPECT_EQ(vec.size(), 100);
for (int i = 0; i < 100; i++) {
for (uint i = 0; i < 100; i++) {
EXPECT_EQ(vec[i], i * 5);
}
}
@@ -387,20 +404,21 @@ TEST(vector, RemoveFirstOccurrenceAndReorder)
EXPECT_EQ(vec.size(), 0);
}
TEST(vector, AllEqual_False)
TEST(vector, Remove)
{
Vector<int> a = {1, 2, 3};
Vector<int> b = {1, 2, 4};
bool result = Vector<int>::all_equal(a, b);
EXPECT_FALSE(result);
}
TEST(vector, AllEqual_True)
{
Vector<int> a = {4, 5, 6};
Vector<int> b = {4, 5, 6};
bool result = Vector<int>::all_equal(a, b);
EXPECT_TRUE(result);
Vector<int> vec = {1, 2, 3, 4, 5, 6};
vec.remove(3);
EXPECT_TRUE(std::equal(vec.begin(), vec.end(), ArrayRef<int>({1, 2, 3, 5, 6}).begin()));
vec.remove(0);
EXPECT_TRUE(std::equal(vec.begin(), vec.end(), ArrayRef<int>({2, 3, 5, 6}).begin()));
vec.remove(3);
EXPECT_TRUE(std::equal(vec.begin(), vec.end(), ArrayRef<int>({2, 3, 5}).begin()));
vec.remove(1);
EXPECT_TRUE(std::equal(vec.begin(), vec.end(), ArrayRef<int>({2, 5}).begin()));
vec.remove(1);
EXPECT_TRUE(std::equal(vec.begin(), vec.end(), ArrayRef<int>({2}).begin()));
vec.remove(0);
EXPECT_TRUE(std::equal(vec.begin(), vec.end(), ArrayRef<int>({}).begin()));
}
TEST(vector, ExtendSmallVector)
@@ -453,14 +471,59 @@ TEST(vector, UniquePtrValue)
vec.append(std::unique_ptr<int>(new int()));
vec.append(std::unique_ptr<int>(new int()));
vec.append(std::unique_ptr<int>(new int()));
vec.append(std::unique_ptr<int>(new int()));
EXPECT_EQ(vec.size(), 4);
std::unique_ptr<int> &a = vec.last();
std::unique_ptr<int> b = vec.pop_last();
vec.remove_and_reorder(0);
vec.remove(0);
EXPECT_EQ(vec.size(), 1);
UNUSED_VARS(a, b);
}
class TypeConstructMock {
public:
bool default_constructed = false;
bool copy_constructed = false;
bool move_constructed = false;
bool copy_assigned = false;
bool move_assigned = false;
TypeConstructMock() : default_constructed(true)
{
}
TypeConstructMock(const TypeConstructMock &other) : copy_constructed(true)
{
}
TypeConstructMock(TypeConstructMock &&other) : move_constructed(true)
{
}
TypeConstructMock &operator=(const TypeConstructMock &other)
{
if (this == &other) {
return *this;
}
copy_assigned = true;
return *this;
}
TypeConstructMock &operator=(TypeConstructMock &&other)
{
if (this == &other) {
return *this;
}
move_assigned = true;
return *this;
}
};
TEST(vector, SizeConstructorCallsDefaultConstructor)
{
Vector<TypeConstructMock> vec(3);
@@ -525,3 +588,51 @@ TEST(vector, LargeVectorMoveCallsNoConstructor)
EXPECT_FALSE(dst[0].move_constructed);
EXPECT_FALSE(dst[0].copy_constructed);
}
TEST(vector, Resize)
{
std::string long_string = "012345678901234567890123456789";
Vector<std::string> vec;
EXPECT_EQ(vec.size(), 0);
vec.resize(2);
EXPECT_EQ(vec.size(), 2);
EXPECT_EQ(vec[0], "");
EXPECT_EQ(vec[1], "");
vec.resize(5, long_string);
EXPECT_EQ(vec.size(), 5);
EXPECT_EQ(vec[0], "");
EXPECT_EQ(vec[1], "");
EXPECT_EQ(vec[2], long_string);
EXPECT_EQ(vec[3], long_string);
EXPECT_EQ(vec[4], long_string);
vec.resize(1);
EXPECT_EQ(vec.size(), 1);
EXPECT_EQ(vec[0], "");
}
TEST(vector, FirstIndexOf)
{
Vector<int> vec = {2, 3, 5, 7, 5, 9};
EXPECT_EQ(vec.first_index_of(2), 0);
EXPECT_EQ(vec.first_index_of(5), 2);
EXPECT_EQ(vec.first_index_of(9), 5);
}
TEST(vector, FirstIndexTryOf)
{
Vector<int> vec = {2, 3, 5, 7, 5, 9};
EXPECT_EQ(vec.first_index_of_try(2), 0);
EXPECT_EQ(vec.first_index_of_try(4), -1);
EXPECT_EQ(vec.first_index_of_try(5), 2);
EXPECT_EQ(vec.first_index_of_try(9), 5);
EXPECT_EQ(vec.first_index_of_try(1), -1);
}
TEST(vector, OveralignedValues)
{
Vector<AlignedBuffer<1, 512>, 2> vec;
for (int i = 0; i < 100; i++) {
vec.append({});
EXPECT_EQ((uintptr_t)&vec.last() % 512, 0);
}
}

View File

@@ -58,6 +58,7 @@ BLENDER_TEST(BLI_linklist_lockfree "bf_blenlib;bf_intern_numaapi")
BLENDER_TEST(BLI_listbase "bf_blenlib")
BLENDER_TEST(BLI_map "bf_blenlib")
BLENDER_TEST(BLI_math_base "bf_blenlib")
BLENDER_TEST(BLI_math_bits "bf_blenlib")
BLENDER_TEST(BLI_math_color "bf_blenlib")
BLENDER_TEST(BLI_math_geom "bf_blenlib")
BLENDER_TEST(BLI_math_vector "bf_blenlib")
@@ -69,7 +70,6 @@ BLENDER_TEST(BLI_set "bf_blenlib")
BLENDER_TEST(BLI_stack "bf_blenlib")
BLENDER_TEST(BLI_stack_cxx "bf_blenlib")
BLENDER_TEST(BLI_string "bf_blenlib")
BLENDER_TEST(BLI_string_map "bf_blenlib")
BLENDER_TEST(BLI_string_ref "bf_blenlib")
BLENDER_TEST(BLI_string_utf8 "bf_blenlib")
BLENDER_TEST(BLI_task "bf_blenlib;bf_intern_numaapi")