BLI: new bit span data structure #104671

Merged
Jacques Lucke merged 26 commits from JacquesLucke/blender:bit-span into main 2023-02-17 00:42:54 +01:00
8 changed files with 876 additions and 235 deletions

View File

@ -0,0 +1,234 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bli
*
* This file provides the basis for processing "indexed bits" (i.e. every bit has an index).
* The main purpose of this file is to define how bits are indexed within a memory buffer.
* For example, one has to define whether the first bit is the least or most significant bit and
* how endianness affect the bit order.
JacquesLucke marked this conversation as resolved
Review

"This is necessary, because there are many different ways to do it" seems unnecessary and a bit awkward. The same point is made without the sentence.

"This is necessary, because there are many different ways to do it" seems unnecessary and a bit awkward. The same point is made without the sentence.
*
* The order is defined as follows:
* - Every indexed bit is part of an #BitInt. These ints are ordered by their address as usual.
* - Within each #BitInt, the bits are ordered from least to most significant.
*/
#include "BLI_index_range.hh"
#include "BLI_utildefines.h"
#include <ostream>
namespace blender::bits {
/** Using a large integer type is better because then it's easier to process many bits at once. */
using BitInt = uint64_t;
/** Number of bits that fit into #BitInt. */
static constexpr int64_t BitsPerInt = int64_t(sizeof(BitInt) * 8);
/** Shift amount to get from a bit index to an int index. Equivalent to `log(BitsPerInt, 2)`. */
static constexpr int64_t BitToIntIndexShift = 3 + (sizeof(BitInt) >= 2) + (sizeof(BitInt) >= 4) +
(sizeof(BitInt) >= 8);
/** Bit mask containing a 1 for the last few bits that index a bit inside of an #BitInt. */
static constexpr BitInt BitIndexMask = (BitInt(1) << BitToIntIndexShift) - 1;
inline BitInt mask_first_n_bits(const int64_t n)
{
BLI_assert(n >= 0);
BLI_assert(n <= BitsPerInt);
if (n == BitsPerInt) {
return BitInt(-1);
}
return (BitInt(1) << n) - 1;
}
inline BitInt mask_last_n_bits(const int64_t n)
{
return ~mask_first_n_bits(BitsPerInt - n);
}
inline BitInt mask_range_bits(const int64_t start, const int64_t size)
{
BLI_assert(start >= 0);
BLI_assert(size >= 0);
const int64_t end = start + size;
BLI_assert(end <= BitsPerInt);
if (end == BitsPerInt) {
return mask_last_n_bits(size);
}
return ((BitInt(1) << end) - 1) & ~((BitInt(1) << start) - 1);
}
inline BitInt mask_single_bit(const int64_t bit_index)
{
BLI_assert(bit_index >= 0);
BLI_assert(bit_index < BitsPerInt);
return BitInt(1) << bit_index;
}
inline BitInt *int_containing_bit(BitInt *data, const int64_t bit_index)
{
return data + (bit_index >> BitToIntIndexShift);
}
inline const BitInt *int_containing_bit(const BitInt *data, const int64_t bit_index)
{
return data + (bit_index >> BitToIntIndexShift);
}
/**
* This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but
* not changed.
*/
class BitRef {
private:
/** Points to the exact integer that the bit is in. */
const BitInt *int_;
JacquesLucke marked this conversation as resolved
Review

What about calling this int_ to help separate it from the data variables that point to a BitInt array?

What about calling this `int_` to help separate it from the `data` variables that point to a `BitInt` array?
Review

Sounds reasonable.

Sounds reasonable.
/** All zeros except for a single one at the bit that is referenced. */
BitInt mask_;
friend class MutableBitRef;
public:
BitRef() = default;
/**
* Reference a specific bit in an array. Note that #data does *not* have to point to the
* exact integer the bit is in.
*/
BitRef(const BitInt *data, const int64_t bit_index)
{
int_ = int_containing_bit(data, bit_index);
mask_ = mask_single_bit(bit_index & BitIndexMask);
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const BitInt value = *int_;
const BitInt masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
};
/**
* Similar to #BitRef, but also allows changing the referenced bit.
*/
class MutableBitRef {
private:
/** Points to the integer that the bit is in. */
BitInt *int_;
/** All zeros except for a single one at the bit that is referenced. */
BitInt mask_;
public:
MutableBitRef() = default;
/**
* Reference a specific bit in an array. Note that #data does *not* have to point to the
* exact int the bit is in.
*/
MutableBitRef(BitInt *data, const int64_t bit_index)
{
int_ = int_containing_bit(data, bit_index);
mask_ = mask_single_bit(bit_index & BitIndexMask);
}
/**
* Support implicitly casting to a read-only #BitRef.
*/
operator BitRef() const
{
BitRef bit_ref;
bit_ref.int_ = int_;
bit_ref.mask_ = mask_;
return bit_ref;
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const BitInt value = *int_;
const BitInt masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
/**
* Change the bit to a 1.
*/
void set()
{
*int_ |= mask_;
}
/**
* Change the bit to a 0.
*/
void reset()
{
*int_ &= ~mask_;
}
/**
* Change the bit to a 1 if #value is true and 0 otherwise. If the value is highly unpredictable
* by the CPU branch predictor, it can be faster to use #set_branchless instead.
*/
void set(const bool value)
{
if (value) {
this->set();
}
else {
this->reset();
}
}
/**
* Does the same as #set, but does not use a branch. This is faster when the input value is
* unpredictable for the CPU branch predictor (best case for this function is a uniform random
JacquesLucke marked this conversation as resolved
Review

Saying "worst case" here is confusing because from another perspective the uniform random distribution is the best case for the branchless version.

Saying "worst case" here is confusing because from another perspective the uniform random distribution is the best case for the branchless version.
* distribution with 50% probability for true and false). If the value is predictable, this is
* likely slower than #set.
*/
void set_branchless(const bool value)
{
const BitInt value_int = BitInt(value);
BLI_assert(ELEM(value_int, 0, 1));
const BitInt old = *int_;
*int_ =
/* Unset bit. */
(~mask_ & old)
/* Optionally set it again. The -1 turns a 1 into `0x00...` and a 0 into `0xff...`. */
| (mask_ & ~(value_int - 1));
}
};
inline std::ostream &operator<<(std::ostream &stream, const BitRef &bit)
{
return stream << (bit ? "1" : "0");
}
inline std::ostream &operator<<(std::ostream &stream, const MutableBitRef &bit)
{
return stream << BitRef(bit);
}
} // namespace blender::bits
namespace blender {
using bits::BitRef;
using bits::MutableBitRef;
} // namespace blender

View File

@ -0,0 +1,290 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include "BLI_bit_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_memory_utils.hh"
namespace blender::bits {
/** Base class for a const and non-const bit-iterator. */
class BitIteratorBase {
protected:
const BitInt *data_;
int64_t bit_index_;
public:
BitIteratorBase(const BitInt *data, const int64_t bit_index) : data_(data), bit_index_(bit_index)
{
}
BitIteratorBase &operator++()
{
bit_index_++;
return *this;
}
friend bool operator!=(const BitIteratorBase &a, const BitIteratorBase &b)
{
BLI_assert(a.data_ == b.data_);
return a.bit_index_ != b.bit_index_;
}
};
/** Allows iterating over the bits in a memory buffer. */
class BitIterator : public BitIteratorBase {
public:
BitIterator(const BitInt *data, const int64_t bit_index) : BitIteratorBase(data, bit_index)
{
}
BitRef operator*() const
{
return BitRef(data_, bit_index_);
}
};
/** Allows iterating over the bits in a memory buffer. */
class MutableBitIterator : public BitIteratorBase {
public:
MutableBitIterator(BitInt *data, const int64_t bit_index) : BitIteratorBase(data, bit_index)
{
}
MutableBitRef operator*() const
{
return MutableBitRef(const_cast<BitInt *>(data_), bit_index_);
}
};
/**
* Similar to #Span, but references a range of bits instead of normal C++ types (which must be at
JacquesLucke marked this conversation as resolved
Review

which are -> which must be

`which are` -> `which must be`
* least one byte large). Use #MutableBitSpan if the values are supposed to be modified.
*
* The beginning and end of a #BitSpan does *not* have to be at byte/int boundaries. It can start
* and end at any bit.
*/
class BitSpan {
private:
/** Base pointer to the integers containing the bits. The actual bit span might start at a much
* higher address when `bit_range_.start()` is large. */
const BitInt *data_ = nullptr;
/** The range of referenced bits. */
IndexRange bit_range_ = {0, 0};
public:
/** Construct an empty span. */
BitSpan() = default;
BitSpan(const BitInt *data, const int64_t size_in_bits) : data_(data), bit_range_(size_in_bits)
{
}
BitSpan(const BitInt *data, const IndexRange bit_range) : data_(data), bit_range_(bit_range)
{
}
/** Number of bits referenced by the span. */
int64_t size() const
{
return bit_range_.size();
}
bool is_empty() const
{
return bit_range_.is_empty();
}
IndexRange index_range() const
{
return IndexRange(bit_range_.size());
}
BitRef operator[](const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < bit_range_.size());
return {data_, bit_range_.start() + index};
}
BitSpan slice(const IndexRange range) const
{
return {data_, bit_range_.slice(range)};
}
const BitInt *data() const
{
return data_;
}
const IndexRange &bit_range() const
{
return bit_range_;
}
BitIterator begin() const
{
return {data_, bit_range_.start()};
}
BitIterator end() const
{
return {data_, bit_range_.one_after_last()};
}
};
/** Same as #BitSpan, but also allows modifying the referenced bits. */
class MutableBitSpan {
private:
BitInt *data_ = nullptr;
IndexRange bit_range_ = {0, 0};
public:
MutableBitSpan() = default;
MutableBitSpan(BitInt *data, const int64_t size) : data_(data), bit_range_(size)
{
}
MutableBitSpan(BitInt *data, const IndexRange bit_range) : data_(data), bit_range_(bit_range)
{
}
int64_t size() const
{
return bit_range_.size();
}
bool is_empty() const
{
return bit_range_.is_empty();
}
IndexRange index_range() const
{
return IndexRange(bit_range_.size());
}
MutableBitRef operator[](const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < bit_range_.size());
return {data_, bit_range_.start() + index};
}
MutableBitSpan slice(const IndexRange range) const
{
return {data_, bit_range_.slice(range)};
}
BitInt *data() const
{
return data_;
}
const IndexRange &bit_range() const
{
return bit_range_;
}
MutableBitIterator begin() const
{
return {data_, bit_range_.start()};
}
MutableBitIterator end() const
{
return {data_, bit_range_.one_after_last()};
}
operator BitSpan() const
{
return {data_, bit_range_};
}
/** Sets all referenced bits to 1. */
void set_all()
JacquesLucke marked this conversation as resolved
Review

It seems confusing to have set() and reset() methods for the whole span and for each bit. What about calling the functions on the span set_all() and reset_all(). Some name like fill() would be nice too since it mirrors MutableSpan

It seems confusing to have `set()` and `reset()` methods for the whole span and for each bit. What about calling the functions on the span `set_all()` and `reset_all()`. Some name like `fill()` would be nice too since it mirrors `MutableSpan`
{
const AlignedIndexRanges ranges = split_index_range_by_alignment(bit_range_, BitsPerInt);
{
BitInt &first_int = *int_containing_bit(data_, bit_range_.start());
const BitInt first_int_mask = mask_range_bits(ranges.prefix.start() & BitIndexMask,
ranges.prefix.size());
first_int |= first_int_mask;
}
{
BitInt *start = int_containing_bit(data_, ranges.aligned.start());
const int64_t ints_to_fill = ranges.aligned.size() / BitsPerInt;
constexpr BitInt fill_value = BitInt(-1);
initialized_fill_n(start, ints_to_fill, fill_value);
}
{
BitInt &last_int = *int_containing_bit(data_, bit_range_.one_after_last() - 1);
const BitInt last_int_mask = mask_first_n_bits(ranges.suffix.size());
last_int |= last_int_mask;
}
}
/** Sets all referenced bits to 0. */
void reset_all()
{
const AlignedIndexRanges ranges = split_index_range_by_alignment(bit_range_, BitsPerInt);
{
BitInt &first_int = *int_containing_bit(data_, bit_range_.start());
const BitInt first_int_mask = mask_range_bits(ranges.prefix.start() & BitIndexMask,
ranges.prefix.size());
first_int &= ~first_int_mask;
}
{
BitInt *start = int_containing_bit(data_, ranges.aligned.start());
const int64_t ints_to_fill = ranges.aligned.size() / BitsPerInt;
constexpr BitInt fill_value = 0;
initialized_fill_n(start, ints_to_fill, fill_value);
}
{
BitInt &last_int = *int_containing_bit(data_, bit_range_.one_after_last() - 1);
const BitInt last_int_mask = mask_first_n_bits(ranges.suffix.size());
last_int &= ~last_int_mask;
}
}
/** Sets all referenced bits to either 0 or 1. */
void set_all(const bool value)
{
if (value) {
this->set_all();
}
else {
this->reset_all();
}
}
/** Same as #set_all to mirror #MutableSpan. */
void fill(const bool value)
{
this->set_all(value);
}
};
inline std::ostream &operator<<(std::ostream &stream, const BitSpan &span)
{
stream << "(Size: " << span.size() << ", ";
for (const BitRef bit : span) {
stream << bit;
}
stream << ")";
return stream;
}
inline std::ostream &operator<<(std::ostream &stream, const MutableBitSpan &span)
{
return stream << BitSpan(span);
}
} // namespace blender::bits
namespace blender {
using bits::BitSpan;
using bits::MutableBitSpan;
} // namespace blender

View File

@ -38,142 +38,11 @@
#include <cstring>
#include "BLI_allocator.hh"
#include "BLI_index_range.hh"
#include "BLI_memory_utils.hh"
#include "BLI_bit_span.hh"
#include "BLI_span.hh"
namespace blender::bits {
/**
* Using a large integer type is better because then it's easier to process many bits at once.
*/
using IntType = uint64_t;
static constexpr int64_t BitsPerInt = int64_t(sizeof(IntType) * 8);
static constexpr int64_t BitToIntIndexShift = 3 + (sizeof(IntType) >= 2) + (sizeof(IntType) >= 4) +
(sizeof(IntType) >= 8);
static constexpr IntType BitIndexMask = (IntType(1) << BitToIntIndexShift) - 1;
/**
* This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but
* not changed.
*/
class BitRef {
private:
/** Points to the integer that the bit is in. */
const IntType *ptr_;
/** All zeros except for a single one at the bit that is referenced. */
IntType mask_;
friend class MutableBitRef;
public:
BitRef() = default;
/**
* Reference a specific bit in an array. Note that #ptr does *not* have to point to the
* exact integer the bit is in.
*/
BitRef(const IntType *ptr, const int64_t bit_index)
{
ptr_ = ptr + (bit_index >> BitToIntIndexShift);
mask_ = IntType(1) << (bit_index & BitIndexMask);
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const IntType value = *ptr_;
const IntType masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
};
/**
* Similar to #BitRef, but also allows changing the referenced bit.
*/
class MutableBitRef {
private:
/** Points to the integer that the bit is in. */
IntType *ptr_;
/** All zeros except for a single one at the bit that is referenced. */
IntType mask_;
public:
MutableBitRef() = default;
/**
* Reference a specific bit in an array. Note that #ptr does *not* have to point to the
* exact int the bit is in.
*/
MutableBitRef(IntType *ptr, const int64_t bit_index)
{
ptr_ = ptr + (bit_index >> BitToIntIndexShift);
mask_ = IntType(1) << IntType(bit_index & BitIndexMask);
}
/**
* Support implicitly casting to a read-only #BitRef.
*/
operator BitRef() const
{
BitRef bit_ref;
bit_ref.ptr_ = ptr_;
bit_ref.mask_ = mask_;
return bit_ref;
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const IntType value = *ptr_;
const IntType masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
/**
* Change the bit to a 1.
*/
void set()
{
*ptr_ |= mask_;
}
/**
* Change the bit to a 0.
*/
void reset()
{
*ptr_ &= ~mask_;
}
/**
* Change the bit to a 1 if #value is true and 0 otherwise.
*/
void set(const bool value)
{
if (value) {
this->set();
}
else {
this->reset();
}
}
};
template<
/**
* Number of bits that can be stored in the vector without doing an allocation.
@ -193,13 +62,13 @@ class BitVector {
static constexpr int64_t IntsInInlineBuffer = required_ints_for_bits(InlineBufferCapacity);
static constexpr int64_t BitsInInlineBuffer = IntsInInlineBuffer * BitsPerInt;
static constexpr int64_t AllocationAlignment = alignof(IntType);
static constexpr int64_t AllocationAlignment = alignof(BitInt);
/**
* Points to the first integer used by the vector. It might point to the memory in the inline
* buffer.
*/
IntType *data_;
BitInt *data_;
/** Current size of the vector in bits. */
int64_t size_in_bits_;
@ -211,7 +80,7 @@ class BitVector {
BLI_NO_UNIQUE_ADDRESS Allocator allocator_;
/** Contains the bits as long as the vector is small enough. */
BLI_NO_UNIQUE_ADDRESS TypedBuffer<IntType, IntsInInlineBuffer> inline_buffer_;
BLI_NO_UNIQUE_ADDRESS TypedBuffer<BitInt, IntsInInlineBuffer> inline_buffer_;
public:
BitVector(Allocator allocator = {}) noexcept : allocator_(allocator)
@ -219,7 +88,7 @@ class BitVector {
data_ = inline_buffer_;
size_in_bits_ = 0;
capacity_in_bits_ = BitsInInlineBuffer;
uninitialized_fill_n(data_, IntsInInlineBuffer, IntType(0));
uninitialized_fill_n(data_, IntsInInlineBuffer, BitInt(0));
}
BitVector(NoExceptConstructor, Allocator allocator = {}) noexcept : BitVector(allocator)
@ -236,8 +105,8 @@ class BitVector {
}
else {
/* Allocate a new array because the inline buffer is too small. */
data_ = static_cast<IntType *>(
allocator_.allocate(ints_to_copy * sizeof(IntType), AllocationAlignment, __func__));
data_ = static_cast<BitInt *>(
allocator_.allocate(ints_to_copy * sizeof(BitInt), AllocationAlignment, __func__));
capacity_in_bits_ = ints_to_copy * BitsPerInt;
}
size_in_bits_ = other.size_in_bits_;
@ -303,6 +172,16 @@ class BitVector {
return move_assign_container(*this, std::move(other));
}
operator BitSpan() const
{
return {data_, IndexRange(size_in_bits_)};
}
operator MutableBitSpan()
{
return {data_, IndexRange(size_in_bits_)};
}
/**
* Number of bits in the bit vector.
*/
@ -352,80 +231,24 @@ class BitVector {
size_in_bits_++;
}
class Iterator {
private:
const BitVector *vector_;
int64_t index_;
public:
Iterator(const BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
{
}
Iterator &operator++()
{
index_++;
return *this;
}
friend bool operator!=(const Iterator &a, const Iterator &b)
{
BLI_assert(a.vector_ == b.vector_);
return a.index_ != b.index_;
}
BitRef operator*() const
{
return (*vector_)[index_];
}
};
class MutableIterator {
private:
BitVector *vector_;
int64_t index_;
public:
MutableIterator(BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
{
}
MutableIterator &operator++()
{
index_++;
return *this;
}
friend bool operator!=(const MutableIterator &a, const MutableIterator &b)
{
BLI_assert(a.vector_ == b.vector_);
return a.index_ != b.index_;
}
MutableBitRef operator*() const
{
return (*vector_)[index_];
}
};
Iterator begin() const
BitIterator begin() const
{
return {*this, 0};
return {data_, 0};
}
Iterator end() const
BitIterator end() const
{
return {*this, size_in_bits_};
return {data_, size_in_bits_};
}
MutableIterator begin()
MutableBitIterator begin()
{
return {*this, 0};
return {data_, 0};
}
MutableIterator end()
MutableBitIterator end()
{
return {*this, size_in_bits_};
return {data_, size_in_bits_};
}
/**
@ -441,31 +264,8 @@ class BitVector {
}
size_in_bits_ = new_size_in_bits;
if (old_size_in_bits < new_size_in_bits) {
this->fill_range(IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits), value);
}
}
/**
* Set #value for every element in #range.
*/
void fill_range(const IndexRange range, const bool value)
{
const AlignedIndexRanges aligned_ranges = split_index_range_by_alignment(range, BitsPerInt);
/* Fill first few bits. */
for (const int64_t i : aligned_ranges.prefix) {
(*this)[i].set(value);
}
/* Fill entire ints at once. */
const int64_t start_fill_int_index = aligned_ranges.aligned.start() / BitsPerInt;
const int64_t ints_to_fill = aligned_ranges.aligned.size() / BitsPerInt;
const IntType fill_value = value ? IntType(-1) : IntType(0);
initialized_fill_n(data_ + start_fill_int_index, ints_to_fill, fill_value);
/* Fill bits in the end that don't cover a full int. */
for (const int64_t i : aligned_ranges.suffix) {
(*this)[i].set(value);
MutableBitSpan(data_, IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits))
.set_all(value);
}
}
@ -474,7 +274,7 @@ class BitVector {
*/
void fill(const bool value)
{
this->fill_range(IndexRange(0, size_in_bits_), value);
MutableBitSpan(data_, size_in_bits_).set_all(value);
}
/**
@ -517,7 +317,7 @@ class BitVector {
}
BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity_in_bits,
const IntType initial_value_for_new_ints = 0x00)
const BitInt initial_value_for_new_ints = 0)
{
if (capacity_in_bits_ >= min_capacity_in_bits) {
return;
@ -531,8 +331,8 @@ class BitVector {
const int64_t new_capacity_in_ints = std::max(min_capacity_in_ints, min_new_capacity_in_ints);
const int64_t ints_to_copy = this->used_ints_amount();
IntType *new_data = static_cast<IntType *>(allocator_.allocate(
new_capacity_in_ints * sizeof(IntType), AllocationAlignment, __func__));
BitInt *new_data = static_cast<BitInt *>(
allocator_.allocate(new_capacity_in_ints * sizeof(BitInt), AllocationAlignment, __func__));
uninitialized_copy_n(data_, ints_to_copy, new_data);
/* Always initialize new capacity even if it isn't used yet. That's necessary to avoid warnings
* caused by using uninitialized memory. This happens when e.g. setting a clearing a bit in an
@ -562,7 +362,5 @@ class BitVector {
} // namespace blender::bits
namespace blender {
using bits::BitRef;
using bits::BitVector;
using bits::MutableBitRef;
} // namespace blender

View File

@ -457,6 +457,8 @@ if(WITH_GTESTS)
tests/BLI_array_store_test.cc
tests/BLI_array_test.cc
tests/BLI_array_utils_test.cc
tests/BLI_bit_ref_test.cc
tests/BLI_bit_span_test.cc
tests/BLI_bit_vector_test.cc
tests/BLI_bitmap_test.cc
tests/BLI_bounds_test.cc

View File

@ -0,0 +1,160 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include <array>
#include "BLI_bit_ref.hh"
#include "testing/testing.h"
namespace blender::bits::tests {
TEST(bit_ref, MaskFirstNBits)
{
EXPECT_EQ(mask_first_n_bits(0), 0);
EXPECT_EQ(mask_first_n_bits(1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001);
EXPECT_EQ(mask_first_n_bits(5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111);
EXPECT_EQ(mask_first_n_bits(63),
0b0111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
EXPECT_EQ(mask_first_n_bits(64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
TEST(bit_ref, MaskLastNBits)
{
EXPECT_EQ(mask_last_n_bits(0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(1),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(5),
0b1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(63),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1110);
EXPECT_EQ(mask_last_n_bits(64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
TEST(bit_ref, MaskSingleBit)
{
EXPECT_EQ(mask_single_bit(0), 1);
EXPECT_EQ(mask_single_bit(1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010);
EXPECT_EQ(mask_single_bit(5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
EXPECT_EQ(mask_single_bit(63),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
}
TEST(bit_ref, IntContainingBit)
{
std::array<uint64_t, 5> array;
uint64_t *data = array.data();
EXPECT_EQ(int_containing_bit(data, 0), data);
EXPECT_EQ(int_containing_bit(data, 1), data);
EXPECT_EQ(int_containing_bit(data, 63), data);
EXPECT_EQ(int_containing_bit(data, 64), data + 1);
EXPECT_EQ(int_containing_bit(data, 65), data + 1);
EXPECT_EQ(int_containing_bit(data, 100), data + 1);
EXPECT_EQ(int_containing_bit(data, 127), data + 1);
EXPECT_EQ(int_containing_bit(data, 128), data + 2);
const uint64_t *data_const = data;
EXPECT_EQ(int_containing_bit(data_const, 0), data_const);
EXPECT_EQ(int_containing_bit(data_const, 1), data_const);
EXPECT_EQ(int_containing_bit(data_const, 63), data_const);
EXPECT_EQ(int_containing_bit(data_const, 64), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 65), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 100), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 127), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 128), data_const + 2);
}
TEST(bit_ref, Test)
{
uint64_t data = (1 << 3) | (1 << 7);
EXPECT_FALSE(BitRef(&data, 0).test());
EXPECT_FALSE(BitRef(&data, 1).test());
EXPECT_FALSE(BitRef(&data, 2).test());
EXPECT_TRUE(BitRef(&data, 3).test());
EXPECT_FALSE(BitRef(&data, 4));
EXPECT_FALSE(BitRef(&data, 5));
EXPECT_FALSE(BitRef(&data, 6));
EXPECT_TRUE(BitRef(&data, 7));
EXPECT_FALSE(MutableBitRef(&data, 0).test());
EXPECT_FALSE(MutableBitRef(&data, 1).test());
EXPECT_FALSE(MutableBitRef(&data, 2).test());
EXPECT_TRUE(MutableBitRef(&data, 3).test());
EXPECT_FALSE(MutableBitRef(&data, 4));
EXPECT_FALSE(MutableBitRef(&data, 5));
EXPECT_FALSE(MutableBitRef(&data, 6));
EXPECT_TRUE(MutableBitRef(&data, 7));
}
TEST(bit_ref, Set)
{
uint64_t data = 0;
MutableBitRef(&data, 0).set();
MutableBitRef(&data, 1).set();
MutableBitRef(&data, 1).set();
MutableBitRef(&data, 4).set();
EXPECT_EQ(data, (1 << 0) | (1 << 1) | (1 << 4));
MutableBitRef(&data, 5).set(true);
MutableBitRef(&data, 1).set(false);
EXPECT_EQ(data, (1 << 0) | (1 << 4) | (1 << 5));
}
TEST(bit_ref, Reset)
{
uint64_t data = -1;
MutableBitRef(&data, 0).reset();
MutableBitRef(&data, 2).reset();
EXPECT_EQ(data, uint64_t(-1) & ~(1 << 0) & ~(1 << 2));
}
TEST(bit_ref, SetBranchless)
{
uint64_t data = 0;
MutableBitRef(&data, 0).set_branchless(true);
EXPECT_EQ(data, 1);
MutableBitRef(&data, 0).set_branchless(false);
EXPECT_EQ(data, 0);
MutableBitRef(&data, 3).set_branchless(false);
MutableBitRef(&data, 4).set_branchless(true);
EXPECT_EQ(data, 16);
MutableBitRef(&data, 3).set_branchless(true);
MutableBitRef(&data, 4).set_branchless(true);
EXPECT_EQ(data, 24);
}
TEST(bit_ref, Cast)
{
uint64_t data = 0;
MutableBitRef mutable_ref(&data, 3);
BitRef ref = mutable_ref;
EXPECT_FALSE(ref);
mutable_ref.set();
EXPECT_TRUE(ref);
}
TEST(bit_ref, MaskRangeBits)
{
EXPECT_EQ(mask_range_bits(0, 0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(0, 1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001);
EXPECT_EQ(mask_range_bits(0, 5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111);
EXPECT_EQ(mask_range_bits(64, 0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(63, 1),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(59, 5),
0b1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(8, 3),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0111'0000'0000);
EXPECT_EQ(mask_range_bits(0, 64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
} // namespace blender::bits::tests

View File

@ -0,0 +1,139 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include <array>
#include "BLI_bit_span.hh"
#include "testing/testing.h"
namespace blender::bits::tests {
TEST(bit_span, DefaultConstructor)
{
{
char buffer[sizeof(BitSpan)];
memset(buffer, 0xff, sizeof(BitSpan));
BitSpan &span = *new (buffer) BitSpan();
EXPECT_TRUE(span.is_empty());
EXPECT_EQ(span.size(), 0);
}
{
char buffer[sizeof(MutableBitSpan)];
memset(buffer, 0xff, sizeof(MutableBitSpan));
MutableBitSpan &span = *new (buffer) MutableBitSpan();
EXPECT_TRUE(span.is_empty());
EXPECT_EQ(span.size(), 0);
}
}
TEST(bit_span, Iteration)
{
uint64_t data = (1 << 2) | (1 << 3);
const BitSpan span(&data, 30);
EXPECT_EQ(span.size(), 30);
int index = 0;
for (const BitRef bit : span) {
EXPECT_EQ(bit.test(), ELEM(index, 2, 3));
index++;
}
}
TEST(bit_span, MutableIteration)
{
uint64_t data = 0;
MutableBitSpan span(&data, 40);
EXPECT_EQ(span.size(), 40);
int index = 0;
for (MutableBitRef bit : span) {
bit.set(index % 4 == 0);
index++;
}
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0001'0001'0001'0001'0001'0001'0001'0001'0001'0001);
}
TEST(bit_span, SubscriptOperator)
{
uint64_t data[2] = {0, 0};
MutableBitSpan mutable_span(data, 128);
BitSpan span = mutable_span;
EXPECT_EQ(mutable_span.data(), data);
EXPECT_EQ(mutable_span.bit_range(), IndexRange(128));
EXPECT_EQ(span.data(), data);
EXPECT_EQ(span.bit_range(), IndexRange(128));
EXPECT_FALSE(mutable_span[5].test());
EXPECT_FALSE(span[5].test());
mutable_span[5].set(5);
EXPECT_TRUE(mutable_span[5].test());
EXPECT_TRUE(span[5].test());
EXPECT_FALSE(mutable_span[120].test());
EXPECT_FALSE(span[120].test());
mutable_span[120].set(120);
EXPECT_TRUE(mutable_span[120].test());
EXPECT_TRUE(span[120].test());
EXPECT_EQ(data[0],
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
EXPECT_EQ(data[1],
0b0000'0001'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
}
TEST(bit_span, RangeConstructor)
{
uint64_t data = 0;
MutableBitSpan mutable_span(&data, IndexRange(4, 3));
BitSpan span = mutable_span;
EXPECT_FALSE(mutable_span[1].test());
EXPECT_FALSE(span[1].test());
mutable_span[0].set(true);
mutable_span[1].set(true);
mutable_span[2].set(true);
mutable_span[0].set(false);
mutable_span[2].set(false);
EXPECT_TRUE(mutable_span[1].test());
EXPECT_TRUE(span[1].test());
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
}
TEST(bit_span, Set)
{
uint64_t data = 0;
MutableBitSpan(&data, 64).set_all(true);
EXPECT_EQ(data, uint64_t(-1));
MutableBitSpan(&data, 64).set_all(false);
EXPECT_EQ(data, uint64_t(0));
MutableBitSpan(&data, IndexRange(4, 8)).set_all(true);
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'1111'0000);
MutableBitSpan(&data, IndexRange(8, 30)).set_all(false);
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'0000);
}
TEST(bit_span, SetSliced)
{
std::array<uint64_t, 10> data;
memset(data.data(), 0, sizeof(data));
MutableBitSpan span{data.data(), 640};
span.slice(IndexRange(5, 500)).set_all(true);
for (const int64_t i : IndexRange(640)) {
EXPECT_EQ(span[i], i >= 5 && i < 505);
}
span.slice(IndexRange(10, 190)).set_all(false);
for (const int64_t i : IndexRange(640)) {
EXPECT_EQ(span[i], (i >= 5 && i < 10) || (i >= 200 && i < 505));
}
}
} // namespace blender::bits::tests

View File

@ -6,7 +6,7 @@
#include "testing/testing.h"
namespace blender::tests {
namespace blender::bits::tests {
TEST(bit_vector, DefaultConstructor)
{
@ -183,4 +183,4 @@ TEST(bit_vector, AppendMany)
EXPECT_TRUE(vec[5]);
}
} // namespace blender::tests
} // namespace blender::bits::tests

View File

@ -290,6 +290,24 @@ TEST(index_range, SplitByAlignment)
EXPECT_EQ(ranges.aligned, IndexRange());
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64), 64);
EXPECT_EQ(ranges.prefix, IndexRange());
EXPECT_EQ(ranges.aligned, IndexRange(64));
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64, 64), 64);
EXPECT_EQ(ranges.prefix, IndexRange());
EXPECT_EQ(ranges.aligned, IndexRange(64, 64));
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(4, 8), 64);
EXPECT_EQ(ranges.prefix, IndexRange(4, 8));
EXPECT_EQ(ranges.aligned, IndexRange());
EXPECT_EQ(ranges.suffix, IndexRange());
}
}
} // namespace blender::tests