forked from blender/blender
WIP: uv-simple-select (Version2) #2
@ -108,7 +108,7 @@ BVHTree *bvhtree_from_editmesh_verts(
|
|||||||
*/
|
*/
|
||||||
BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
|
BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
|
||||||
struct BMEditMesh *em,
|
struct BMEditMesh *em,
|
||||||
const blender::BitVector<> &mask,
|
blender::BitSpan mask,
|
||||||
int verts_num_active,
|
int verts_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -124,7 +124,7 @@ BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
|
|||||||
BVHTree *bvhtree_from_mesh_verts_ex(struct BVHTreeFromMesh *data,
|
BVHTree *bvhtree_from_mesh_verts_ex(struct BVHTreeFromMesh *data,
|
||||||
const float (*vert_positions)[3],
|
const float (*vert_positions)[3],
|
||||||
int verts_num,
|
int verts_num,
|
||||||
const blender::BitVector<> &verts_mask,
|
blender::BitSpan verts_mask,
|
||||||
int verts_num_active,
|
int verts_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -138,7 +138,7 @@ BVHTree *bvhtree_from_editmesh_edges(
|
|||||||
*/
|
*/
|
||||||
BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
|
BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
|
||||||
struct BMEditMesh *em,
|
struct BMEditMesh *em,
|
||||||
const blender::BitVector<> &edges_mask,
|
blender::BitSpan edges_mask,
|
||||||
int edges_num_active,
|
int edges_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -156,7 +156,7 @@ BVHTree *bvhtree_from_mesh_edges_ex(struct BVHTreeFromMesh *data,
|
|||||||
const float (*vert_positions)[3],
|
const float (*vert_positions)[3],
|
||||||
const struct MEdge *edge,
|
const struct MEdge *edge,
|
||||||
int edges_num,
|
int edges_num,
|
||||||
const blender::BitVector<> &edges_mask,
|
blender::BitSpan edges_mask,
|
||||||
int edges_num_active,
|
int edges_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -170,7 +170,7 @@ BVHTree *bvhtree_from_editmesh_looptri(
|
|||||||
*/
|
*/
|
||||||
BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
|
BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
|
||||||
struct BMEditMesh *em,
|
struct BMEditMesh *em,
|
||||||
const blender::BitVector<> &mask,
|
blender::BitSpan mask,
|
||||||
int looptri_num_active,
|
int looptri_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -184,7 +184,7 @@ BVHTree *bvhtree_from_mesh_looptri_ex(struct BVHTreeFromMesh *data,
|
|||||||
const struct MLoop *mloop,
|
const struct MLoop *mloop,
|
||||||
const struct MLoopTri *looptri,
|
const struct MLoopTri *looptri,
|
||||||
int looptri_num,
|
int looptri_num,
|
||||||
const blender::BitVector<> &mask,
|
blender::BitSpan mask,
|
||||||
int looptri_num_active,
|
int looptri_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
|
|
||||||
#include "MEM_guardedalloc.h"
|
#include "MEM_guardedalloc.h"
|
||||||
|
|
||||||
|
using blender::BitSpan;
|
||||||
using blender::BitVector;
|
using blender::BitVector;
|
||||||
using blender::float3;
|
using blender::float3;
|
||||||
using blender::IndexRange;
|
using blender::IndexRange;
|
||||||
@ -672,7 +673,7 @@ static BVHTree *bvhtree_from_editmesh_verts_create_tree(float epsilon,
|
|||||||
int tree_type,
|
int tree_type,
|
||||||
int axis,
|
int axis,
|
||||||
BMEditMesh *em,
|
BMEditMesh *em,
|
||||||
const BitVector<> &verts_mask,
|
const BitSpan verts_mask,
|
||||||
int verts_num_active)
|
int verts_num_active)
|
||||||
{
|
{
|
||||||
BM_mesh_elem_table_ensure(em->bm, BM_VERT);
|
BM_mesh_elem_table_ensure(em->bm, BM_VERT);
|
||||||
@ -706,7 +707,7 @@ static BVHTree *bvhtree_from_mesh_verts_create_tree(float epsilon,
|
|||||||
int axis,
|
int axis,
|
||||||
const float (*positions)[3],
|
const float (*positions)[3],
|
||||||
const int verts_num,
|
const int verts_num,
|
||||||
const BitVector<> &verts_mask,
|
const BitSpan verts_mask,
|
||||||
int verts_num_active)
|
int verts_num_active)
|
||||||
{
|
{
|
||||||
if (!verts_mask.is_empty()) {
|
if (!verts_mask.is_empty()) {
|
||||||
@ -737,7 +738,7 @@ static BVHTree *bvhtree_from_mesh_verts_create_tree(float epsilon,
|
|||||||
|
|
||||||
BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
|
BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
|
||||||
BMEditMesh *em,
|
BMEditMesh *em,
|
||||||
const BitVector<> &verts_mask,
|
const BitSpan verts_mask,
|
||||||
int verts_num_active,
|
int verts_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -764,7 +765,7 @@ BVHTree *bvhtree_from_editmesh_verts(
|
|||||||
BVHTree *bvhtree_from_mesh_verts_ex(BVHTreeFromMesh *data,
|
BVHTree *bvhtree_from_mesh_verts_ex(BVHTreeFromMesh *data,
|
||||||
const float (*vert_positions)[3],
|
const float (*vert_positions)[3],
|
||||||
const int verts_num,
|
const int verts_num,
|
||||||
const BitVector<> &verts_mask,
|
const BitSpan verts_mask,
|
||||||
int verts_num_active,
|
int verts_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -794,7 +795,7 @@ static BVHTree *bvhtree_from_editmesh_edges_create_tree(float epsilon,
|
|||||||
int tree_type,
|
int tree_type,
|
||||||
int axis,
|
int axis,
|
||||||
BMEditMesh *em,
|
BMEditMesh *em,
|
||||||
const BitVector<> &edges_mask,
|
const BitSpan edges_mask,
|
||||||
int edges_num_active)
|
int edges_num_active)
|
||||||
{
|
{
|
||||||
BM_mesh_elem_table_ensure(em->bm, BM_EDGE);
|
BM_mesh_elem_table_ensure(em->bm, BM_EDGE);
|
||||||
@ -833,7 +834,7 @@ static BVHTree *bvhtree_from_editmesh_edges_create_tree(float epsilon,
|
|||||||
static BVHTree *bvhtree_from_mesh_edges_create_tree(const float (*positions)[3],
|
static BVHTree *bvhtree_from_mesh_edges_create_tree(const float (*positions)[3],
|
||||||
const MEdge *edge,
|
const MEdge *edge,
|
||||||
const int edge_num,
|
const int edge_num,
|
||||||
const BitVector<> &edges_mask,
|
const BitSpan edges_mask,
|
||||||
int edges_num_active,
|
int edges_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -871,7 +872,7 @@ static BVHTree *bvhtree_from_mesh_edges_create_tree(const float (*positions)[3],
|
|||||||
|
|
||||||
BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
|
BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
|
||||||
BMEditMesh *em,
|
BMEditMesh *em,
|
||||||
const BitVector<> &edges_mask,
|
const BitSpan edges_mask,
|
||||||
int edges_num_active,
|
int edges_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -899,7 +900,7 @@ BVHTree *bvhtree_from_mesh_edges_ex(BVHTreeFromMesh *data,
|
|||||||
const float (*vert_positions)[3],
|
const float (*vert_positions)[3],
|
||||||
const MEdge *edge,
|
const MEdge *edge,
|
||||||
const int edges_num,
|
const int edges_num,
|
||||||
const BitVector<> &edges_mask,
|
const BitSpan edges_mask,
|
||||||
int edges_num_active,
|
int edges_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -931,7 +932,7 @@ static BVHTree *bvhtree_from_mesh_faces_create_tree(float epsilon,
|
|||||||
const float (*positions)[3],
|
const float (*positions)[3],
|
||||||
const MFace *face,
|
const MFace *face,
|
||||||
const int faces_num,
|
const int faces_num,
|
||||||
const BitVector<> &faces_mask,
|
const BitSpan faces_mask,
|
||||||
int faces_num_active)
|
int faces_num_active)
|
||||||
{
|
{
|
||||||
if (faces_num == 0) {
|
if (faces_num == 0) {
|
||||||
@ -984,7 +985,7 @@ static BVHTree *bvhtree_from_editmesh_looptri_create_tree(float epsilon,
|
|||||||
int tree_type,
|
int tree_type,
|
||||||
int axis,
|
int axis,
|
||||||
BMEditMesh *em,
|
BMEditMesh *em,
|
||||||
const BitVector<> &looptri_mask,
|
const BitSpan looptri_mask,
|
||||||
int looptri_num_active)
|
int looptri_num_active)
|
||||||
{
|
{
|
||||||
const int looptri_num = em->tottri;
|
const int looptri_num = em->tottri;
|
||||||
@ -1038,7 +1039,7 @@ static BVHTree *bvhtree_from_mesh_looptri_create_tree(float epsilon,
|
|||||||
const MLoop *mloop,
|
const MLoop *mloop,
|
||||||
const MLoopTri *looptri,
|
const MLoopTri *looptri,
|
||||||
const int looptri_num,
|
const int looptri_num,
|
||||||
const BitVector<> &looptri_mask,
|
const BitSpan looptri_mask,
|
||||||
int looptri_num_active)
|
int looptri_num_active)
|
||||||
{
|
{
|
||||||
if (!looptri_mask.is_empty()) {
|
if (!looptri_mask.is_empty()) {
|
||||||
@ -1079,7 +1080,7 @@ static BVHTree *bvhtree_from_mesh_looptri_create_tree(float epsilon,
|
|||||||
|
|
||||||
BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
|
BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
|
||||||
BMEditMesh *em,
|
BMEditMesh *em,
|
||||||
const BitVector<> &looptri_mask,
|
const BitSpan looptri_mask,
|
||||||
int looptri_num_active,
|
int looptri_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
@ -1109,7 +1110,7 @@ BVHTree *bvhtree_from_mesh_looptri_ex(BVHTreeFromMesh *data,
|
|||||||
const struct MLoop *mloop,
|
const struct MLoop *mloop,
|
||||||
const struct MLoopTri *looptri,
|
const struct MLoopTri *looptri,
|
||||||
const int looptri_num,
|
const int looptri_num,
|
||||||
const BitVector<> &looptri_mask,
|
const BitSpan looptri_mask,
|
||||||
int looptri_num_active,
|
int looptri_num_active,
|
||||||
float epsilon,
|
float epsilon,
|
||||||
int tree_type,
|
int tree_type,
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
#include "DNA_meshdata_types.h"
|
#include "DNA_meshdata_types.h"
|
||||||
#include "DNA_object_types.h"
|
#include "DNA_object_types.h"
|
||||||
|
|
||||||
#include "BLI_bit_vector.hh"
|
|
||||||
#include "BLI_bounds.hh"
|
#include "BLI_bounds.hh"
|
||||||
#include "BLI_edgehash.h"
|
#include "BLI_edgehash.h"
|
||||||
#include "BLI_endian_switch.h"
|
#include "BLI_endian_switch.h"
|
||||||
@ -66,7 +65,6 @@
|
|||||||
|
|
||||||
#include "BLO_read_write.h"
|
#include "BLO_read_write.h"
|
||||||
|
|
||||||
using blender::BitVector;
|
|
||||||
using blender::float3;
|
using blender::float3;
|
||||||
using blender::MutableSpan;
|
using blender::MutableSpan;
|
||||||
using blender::Span;
|
using blender::Span;
|
||||||
|
@ -316,7 +316,7 @@ void BKE_mesh_foreach_mapped_subdiv_face_center(
|
|||||||
BKE_mesh_vertex_normals_ensure(mesh) :
|
BKE_mesh_vertex_normals_ensure(mesh) :
|
||||||
nullptr;
|
nullptr;
|
||||||
const int *index = static_cast<const int *>(CustomData_get_layer(&mesh->pdata, CD_ORIGINDEX));
|
const int *index = static_cast<const int *>(CustomData_get_layer(&mesh->pdata, CD_ORIGINDEX));
|
||||||
const blender::BitVector<> &facedot_tags = mesh->runtime->subsurf_face_dot_tags;
|
const blender::BitSpan facedot_tags = mesh->runtime->subsurf_face_dot_tags;
|
||||||
|
|
||||||
if (index) {
|
if (index) {
|
||||||
for (int i = 0; i < mesh->totpoly; i++, mp++) {
|
for (int i = 0; i < mesh->totpoly; i++, mp++) {
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
using blender::BitVector;
|
using blender::BitVector;
|
||||||
using blender::float3;
|
using blender::float3;
|
||||||
using blender::int2;
|
using blender::int2;
|
||||||
|
using blender::MutableBitSpan;
|
||||||
using blender::MutableSpan;
|
using blender::MutableSpan;
|
||||||
using blender::short2;
|
using blender::short2;
|
||||||
using blender::Span;
|
using blender::Span;
|
||||||
@ -1238,7 +1239,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<MLoop> mloop
|
|||||||
const Span<int2> edge_to_loops,
|
const Span<int2> edge_to_loops,
|
||||||
const Span<int> loop_to_poly,
|
const Span<int> loop_to_poly,
|
||||||
const int *e2l_prev,
|
const int *e2l_prev,
|
||||||
BitVector<> &skip_loops,
|
MutableBitSpan skip_loops,
|
||||||
const int ml_curr_index,
|
const int ml_curr_index,
|
||||||
const int ml_prev_index,
|
const int ml_prev_index,
|
||||||
const int mp_curr_index)
|
const int mp_curr_index)
|
||||||
|
234
source/blender/blenlib/BLI_bit_ref.hh
Normal file
234
source/blender/blenlib/BLI_bit_ref.hh
Normal file
@ -0,0 +1,234 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
/** \file
|
||||||
|
* \ingroup bli
|
||||||
|
*
|
||||||
|
* This file provides the basis for processing "indexed bits" (i.e. every bit has an index).
|
||||||
|
* The main purpose of this file is to define how bits are indexed within a memory buffer.
|
||||||
|
* For example, one has to define whether the first bit is the least or most significant bit and
|
||||||
|
* how endianness affect the bit order.
|
||||||
|
*
|
||||||
|
* The order is defined as follows:
|
||||||
|
* - Every indexed bit is part of an #BitInt. These ints are ordered by their address as usual.
|
||||||
|
* - Within each #BitInt, the bits are ordered from least to most significant.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "BLI_index_range.hh"
|
||||||
|
#include "BLI_utildefines.h"
|
||||||
|
|
||||||
|
#include <ostream>
|
||||||
|
|
||||||
|
namespace blender::bits {
|
||||||
|
|
||||||
|
/** Using a large integer type is better because then it's easier to process many bits at once. */
|
||||||
|
using BitInt = uint64_t;
|
||||||
|
/** Number of bits that fit into #BitInt. */
|
||||||
|
static constexpr int64_t BitsPerInt = int64_t(sizeof(BitInt) * 8);
|
||||||
|
/** Shift amount to get from a bit index to an int index. Equivalent to `log(BitsPerInt, 2)`. */
|
||||||
|
static constexpr int64_t BitToIntIndexShift = 3 + (sizeof(BitInt) >= 2) + (sizeof(BitInt) >= 4) +
|
||||||
|
(sizeof(BitInt) >= 8);
|
||||||
|
/** Bit mask containing a 1 for the last few bits that index a bit inside of an #BitInt. */
|
||||||
|
static constexpr BitInt BitIndexMask = (BitInt(1) << BitToIntIndexShift) - 1;
|
||||||
|
|
||||||
|
inline BitInt mask_first_n_bits(const int64_t n)
|
||||||
|
{
|
||||||
|
BLI_assert(n >= 0);
|
||||||
|
BLI_assert(n <= BitsPerInt);
|
||||||
|
if (n == BitsPerInt) {
|
||||||
|
return BitInt(-1);
|
||||||
|
}
|
||||||
|
return (BitInt(1) << n) - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline BitInt mask_last_n_bits(const int64_t n)
|
||||||
|
{
|
||||||
|
return ~mask_first_n_bits(BitsPerInt - n);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline BitInt mask_range_bits(const int64_t start, const int64_t size)
|
||||||
|
{
|
||||||
|
BLI_assert(start >= 0);
|
||||||
|
BLI_assert(size >= 0);
|
||||||
|
const int64_t end = start + size;
|
||||||
|
BLI_assert(end <= BitsPerInt);
|
||||||
|
if (end == BitsPerInt) {
|
||||||
|
return mask_last_n_bits(size);
|
||||||
|
}
|
||||||
|
return ((BitInt(1) << end) - 1) & ~((BitInt(1) << start) - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline BitInt mask_single_bit(const int64_t bit_index)
|
||||||
|
{
|
||||||
|
BLI_assert(bit_index >= 0);
|
||||||
|
BLI_assert(bit_index < BitsPerInt);
|
||||||
|
return BitInt(1) << bit_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline BitInt *int_containing_bit(BitInt *data, const int64_t bit_index)
|
||||||
|
{
|
||||||
|
return data + (bit_index >> BitToIntIndexShift);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline const BitInt *int_containing_bit(const BitInt *data, const int64_t bit_index)
|
||||||
|
{
|
||||||
|
return data + (bit_index >> BitToIntIndexShift);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but
|
||||||
|
* not changed.
|
||||||
|
*/
|
||||||
|
class BitRef {
|
||||||
|
private:
|
||||||
|
/** Points to the exact integer that the bit is in. */
|
||||||
|
const BitInt *int_;
|
||||||
|
/** All zeros except for a single one at the bit that is referenced. */
|
||||||
|
BitInt mask_;
|
||||||
|
|
||||||
|
friend class MutableBitRef;
|
||||||
|
|
||||||
|
public:
|
||||||
|
BitRef() = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reference a specific bit in an array. Note that #data does *not* have to point to the
|
||||||
|
* exact integer the bit is in.
|
||||||
|
*/
|
||||||
|
BitRef(const BitInt *data, const int64_t bit_index)
|
||||||
|
{
|
||||||
|
int_ = int_containing_bit(data, bit_index);
|
||||||
|
mask_ = mask_single_bit(bit_index & BitIndexMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return true when the bit is currently 1 and false otherwise.
|
||||||
|
*/
|
||||||
|
bool test() const
|
||||||
|
{
|
||||||
|
const BitInt value = *int_;
|
||||||
|
const BitInt masked_value = value & mask_;
|
||||||
|
return masked_value != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
operator bool() const
|
||||||
|
{
|
||||||
|
return this->test();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Similar to #BitRef, but also allows changing the referenced bit.
|
||||||
|
*/
|
||||||
|
class MutableBitRef {
|
||||||
|
private:
|
||||||
|
/** Points to the integer that the bit is in. */
|
||||||
|
BitInt *int_;
|
||||||
|
/** All zeros except for a single one at the bit that is referenced. */
|
||||||
|
BitInt mask_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
MutableBitRef() = default;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reference a specific bit in an array. Note that #data does *not* have to point to the
|
||||||
|
* exact int the bit is in.
|
||||||
|
*/
|
||||||
|
MutableBitRef(BitInt *data, const int64_t bit_index)
|
||||||
|
{
|
||||||
|
int_ = int_containing_bit(data, bit_index);
|
||||||
|
mask_ = mask_single_bit(bit_index & BitIndexMask);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support implicitly casting to a read-only #BitRef.
|
||||||
|
*/
|
||||||
|
operator BitRef() const
|
||||||
|
{
|
||||||
|
BitRef bit_ref;
|
||||||
|
bit_ref.int_ = int_;
|
||||||
|
bit_ref.mask_ = mask_;
|
||||||
|
return bit_ref;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return true when the bit is currently 1 and false otherwise.
|
||||||
|
*/
|
||||||
|
bool test() const
|
||||||
|
{
|
||||||
|
const BitInt value = *int_;
|
||||||
|
const BitInt masked_value = value & mask_;
|
||||||
|
return masked_value != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
operator bool() const
|
||||||
|
{
|
||||||
|
return this->test();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Change the bit to a 1.
|
||||||
|
*/
|
||||||
|
void set()
|
||||||
|
{
|
||||||
|
*int_ |= mask_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Change the bit to a 0.
|
||||||
|
*/
|
||||||
|
void reset()
|
||||||
|
{
|
||||||
|
*int_ &= ~mask_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Change the bit to a 1 if #value is true and 0 otherwise. If the value is highly unpredictable
|
||||||
|
* by the CPU branch predictor, it can be faster to use #set_branchless instead.
|
||||||
|
*/
|
||||||
|
void set(const bool value)
|
||||||
|
{
|
||||||
|
if (value) {
|
||||||
|
this->set();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
this->reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Does the same as #set, but does not use a branch. This is faster when the input value is
|
||||||
|
* unpredictable for the CPU branch predictor (best case for this function is a uniform random
|
||||||
|
* distribution with 50% probability for true and false). If the value is predictable, this is
|
||||||
|
* likely slower than #set.
|
||||||
|
*/
|
||||||
|
void set_branchless(const bool value)
|
||||||
|
{
|
||||||
|
const BitInt value_int = BitInt(value);
|
||||||
|
BLI_assert(ELEM(value_int, 0, 1));
|
||||||
|
const BitInt old = *int_;
|
||||||
|
*int_ =
|
||||||
|
/* Unset bit. */
|
||||||
|
(~mask_ & old)
|
||||||
|
/* Optionally set it again. The -1 turns a 1 into `0x00...` and a 0 into `0xff...`. */
|
||||||
|
| (mask_ & ~(value_int - 1));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
inline std::ostream &operator<<(std::ostream &stream, const BitRef &bit)
|
||||||
|
{
|
||||||
|
return stream << (bit ? "1" : "0");
|
||||||
|
}
|
||||||
|
|
||||||
|
inline std::ostream &operator<<(std::ostream &stream, const MutableBitRef &bit)
|
||||||
|
{
|
||||||
|
return stream << BitRef(bit);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace blender::bits
|
||||||
|
|
||||||
|
namespace blender {
|
||||||
|
using bits::BitRef;
|
||||||
|
using bits::MutableBitRef;
|
||||||
|
} // namespace blender
|
290
source/blender/blenlib/BLI_bit_span.hh
Normal file
290
source/blender/blenlib/BLI_bit_span.hh
Normal file
@ -0,0 +1,290 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "BLI_bit_ref.hh"
|
||||||
|
#include "BLI_index_range.hh"
|
||||||
|
#include "BLI_memory_utils.hh"
|
||||||
|
|
||||||
|
namespace blender::bits {
|
||||||
|
|
||||||
|
/** Base class for a const and non-const bit-iterator. */
|
||||||
|
class BitIteratorBase {
|
||||||
|
protected:
|
||||||
|
const BitInt *data_;
|
||||||
|
int64_t bit_index_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
BitIteratorBase(const BitInt *data, const int64_t bit_index) : data_(data), bit_index_(bit_index)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
BitIteratorBase &operator++()
|
||||||
|
{
|
||||||
|
bit_index_++;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
friend bool operator!=(const BitIteratorBase &a, const BitIteratorBase &b)
|
||||||
|
{
|
||||||
|
BLI_assert(a.data_ == b.data_);
|
||||||
|
return a.bit_index_ != b.bit_index_;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Allows iterating over the bits in a memory buffer. */
|
||||||
|
class BitIterator : public BitIteratorBase {
|
||||||
|
public:
|
||||||
|
BitIterator(const BitInt *data, const int64_t bit_index) : BitIteratorBase(data, bit_index)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
BitRef operator*() const
|
||||||
|
{
|
||||||
|
return BitRef(data_, bit_index_);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Allows iterating over the bits in a memory buffer. */
|
||||||
|
class MutableBitIterator : public BitIteratorBase {
|
||||||
|
public:
|
||||||
|
MutableBitIterator(BitInt *data, const int64_t bit_index) : BitIteratorBase(data, bit_index)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableBitRef operator*() const
|
||||||
|
{
|
||||||
|
return MutableBitRef(const_cast<BitInt *>(data_), bit_index_);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Similar to #Span, but references a range of bits instead of normal C++ types (which must be at
|
||||||
|
* least one byte large). Use #MutableBitSpan if the values are supposed to be modified.
|
||||||
|
*
|
||||||
|
* The beginning and end of a #BitSpan does *not* have to be at byte/int boundaries. It can start
|
||||||
|
* and end at any bit.
|
||||||
|
*/
|
||||||
|
class BitSpan {
|
||||||
|
private:
|
||||||
|
/** Base pointer to the integers containing the bits. The actual bit span might start at a much
|
||||||
|
* higher address when `bit_range_.start()` is large. */
|
||||||
|
const BitInt *data_ = nullptr;
|
||||||
|
/** The range of referenced bits. */
|
||||||
|
IndexRange bit_range_ = {0, 0};
|
||||||
|
|
||||||
|
public:
|
||||||
|
/** Construct an empty span. */
|
||||||
|
BitSpan() = default;
|
||||||
|
|
||||||
|
BitSpan(const BitInt *data, const int64_t size_in_bits) : data_(data), bit_range_(size_in_bits)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
BitSpan(const BitInt *data, const IndexRange bit_range) : data_(data), bit_range_(bit_range)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Number of bits referenced by the span. */
|
||||||
|
int64_t size() const
|
||||||
|
{
|
||||||
|
return bit_range_.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_empty() const
|
||||||
|
{
|
||||||
|
return bit_range_.is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
IndexRange index_range() const
|
||||||
|
{
|
||||||
|
return IndexRange(bit_range_.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
BitRef operator[](const int64_t index) const
|
||||||
|
{
|
||||||
|
BLI_assert(index >= 0);
|
||||||
|
BLI_assert(index < bit_range_.size());
|
||||||
|
return {data_, bit_range_.start() + index};
|
||||||
|
}
|
||||||
|
|
||||||
|
BitSpan slice(const IndexRange range) const
|
||||||
|
{
|
||||||
|
return {data_, bit_range_.slice(range)};
|
||||||
|
}
|
||||||
|
|
||||||
|
const BitInt *data() const
|
||||||
|
{
|
||||||
|
return data_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const IndexRange &bit_range() const
|
||||||
|
{
|
||||||
|
return bit_range_;
|
||||||
|
}
|
||||||
|
|
||||||
|
BitIterator begin() const
|
||||||
|
{
|
||||||
|
return {data_, bit_range_.start()};
|
||||||
|
}
|
||||||
|
|
||||||
|
BitIterator end() const
|
||||||
|
{
|
||||||
|
return {data_, bit_range_.one_after_last()};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/** Same as #BitSpan, but also allows modifying the referenced bits. */
|
||||||
|
class MutableBitSpan {
|
||||||
|
private:
|
||||||
|
BitInt *data_ = nullptr;
|
||||||
|
IndexRange bit_range_ = {0, 0};
|
||||||
|
|
||||||
|
public:
|
||||||
|
MutableBitSpan() = default;
|
||||||
|
|
||||||
|
MutableBitSpan(BitInt *data, const int64_t size) : data_(data), bit_range_(size)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableBitSpan(BitInt *data, const IndexRange bit_range) : data_(data), bit_range_(bit_range)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t size() const
|
||||||
|
{
|
||||||
|
return bit_range_.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_empty() const
|
||||||
|
{
|
||||||
|
return bit_range_.is_empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
IndexRange index_range() const
|
||||||
|
{
|
||||||
|
return IndexRange(bit_range_.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableBitRef operator[](const int64_t index) const
|
||||||
|
{
|
||||||
|
BLI_assert(index >= 0);
|
||||||
|
BLI_assert(index < bit_range_.size());
|
||||||
|
return {data_, bit_range_.start() + index};
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableBitSpan slice(const IndexRange range) const
|
||||||
|
{
|
||||||
|
return {data_, bit_range_.slice(range)};
|
||||||
|
}
|
||||||
|
|
||||||
|
BitInt *data() const
|
||||||
|
{
|
||||||
|
return data_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const IndexRange &bit_range() const
|
||||||
|
{
|
||||||
|
return bit_range_;
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableBitIterator begin() const
|
||||||
|
{
|
||||||
|
return {data_, bit_range_.start()};
|
||||||
|
}
|
||||||
|
|
||||||
|
MutableBitIterator end() const
|
||||||
|
{
|
||||||
|
return {data_, bit_range_.one_after_last()};
|
||||||
|
}
|
||||||
|
|
||||||
|
operator BitSpan() const
|
||||||
|
{
|
||||||
|
return {data_, bit_range_};
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Sets all referenced bits to 1. */
|
||||||
|
void set_all()
|
||||||
|
{
|
||||||
|
const AlignedIndexRanges ranges = split_index_range_by_alignment(bit_range_, BitsPerInt);
|
||||||
|
{
|
||||||
|
BitInt &first_int = *int_containing_bit(data_, bit_range_.start());
|
||||||
|
const BitInt first_int_mask = mask_range_bits(ranges.prefix.start() & BitIndexMask,
|
||||||
|
ranges.prefix.size());
|
||||||
|
first_int |= first_int_mask;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
BitInt *start = int_containing_bit(data_, ranges.aligned.start());
|
||||||
|
const int64_t ints_to_fill = ranges.aligned.size() / BitsPerInt;
|
||||||
|
constexpr BitInt fill_value = BitInt(-1);
|
||||||
|
initialized_fill_n(start, ints_to_fill, fill_value);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
BitInt &last_int = *int_containing_bit(data_, bit_range_.one_after_last() - 1);
|
||||||
|
const BitInt last_int_mask = mask_first_n_bits(ranges.suffix.size());
|
||||||
|
last_int |= last_int_mask;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Sets all referenced bits to 0. */
|
||||||
|
void reset_all()
|
||||||
|
{
|
||||||
|
const AlignedIndexRanges ranges = split_index_range_by_alignment(bit_range_, BitsPerInt);
|
||||||
|
{
|
||||||
|
BitInt &first_int = *int_containing_bit(data_, bit_range_.start());
|
||||||
|
const BitInt first_int_mask = mask_range_bits(ranges.prefix.start() & BitIndexMask,
|
||||||
|
ranges.prefix.size());
|
||||||
|
first_int &= ~first_int_mask;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
BitInt *start = int_containing_bit(data_, ranges.aligned.start());
|
||||||
|
const int64_t ints_to_fill = ranges.aligned.size() / BitsPerInt;
|
||||||
|
constexpr BitInt fill_value = 0;
|
||||||
|
initialized_fill_n(start, ints_to_fill, fill_value);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
BitInt &last_int = *int_containing_bit(data_, bit_range_.one_after_last() - 1);
|
||||||
|
const BitInt last_int_mask = mask_first_n_bits(ranges.suffix.size());
|
||||||
|
last_int &= ~last_int_mask;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Sets all referenced bits to either 0 or 1. */
|
||||||
|
void set_all(const bool value)
|
||||||
|
{
|
||||||
|
if (value) {
|
||||||
|
this->set_all();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
this->reset_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Same as #set_all to mirror #MutableSpan. */
|
||||||
|
void fill(const bool value)
|
||||||
|
{
|
||||||
|
this->set_all(value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
inline std::ostream &operator<<(std::ostream &stream, const BitSpan &span)
|
||||||
|
{
|
||||||
|
stream << "(Size: " << span.size() << ", ";
|
||||||
|
for (const BitRef bit : span) {
|
||||||
|
stream << bit;
|
||||||
|
}
|
||||||
|
stream << ")";
|
||||||
|
return stream;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline std::ostream &operator<<(std::ostream &stream, const MutableBitSpan &span)
|
||||||
|
{
|
||||||
|
return stream << BitSpan(span);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace blender::bits
|
||||||
|
|
||||||
|
namespace blender {
|
||||||
|
using bits::BitSpan;
|
||||||
|
using bits::MutableBitSpan;
|
||||||
|
} // namespace blender
|
@ -38,142 +38,11 @@
|
|||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
#include "BLI_allocator.hh"
|
#include "BLI_allocator.hh"
|
||||||
#include "BLI_index_range.hh"
|
#include "BLI_bit_span.hh"
|
||||||
#include "BLI_memory_utils.hh"
|
|
||||||
#include "BLI_span.hh"
|
#include "BLI_span.hh"
|
||||||
|
|
||||||
namespace blender::bits {
|
namespace blender::bits {
|
||||||
|
|
||||||
/**
|
|
||||||
* Using a large integer type is better because then it's easier to process many bits at once.
|
|
||||||
*/
|
|
||||||
using IntType = uint64_t;
|
|
||||||
static constexpr int64_t BitsPerInt = int64_t(sizeof(IntType) * 8);
|
|
||||||
static constexpr int64_t BitToIntIndexShift = 3 + (sizeof(IntType) >= 2) + (sizeof(IntType) >= 4) +
|
|
||||||
(sizeof(IntType) >= 8);
|
|
||||||
static constexpr IntType BitIndexMask = (IntType(1) << BitToIntIndexShift) - 1;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but
|
|
||||||
* not changed.
|
|
||||||
*/
|
|
||||||
class BitRef {
|
|
||||||
private:
|
|
||||||
/** Points to the integer that the bit is in. */
|
|
||||||
const IntType *ptr_;
|
|
||||||
/** All zeros except for a single one at the bit that is referenced. */
|
|
||||||
IntType mask_;
|
|
||||||
|
|
||||||
friend class MutableBitRef;
|
|
||||||
|
|
||||||
public:
|
|
||||||
BitRef() = default;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Reference a specific bit in an array. Note that #ptr does *not* have to point to the
|
|
||||||
* exact integer the bit is in.
|
|
||||||
*/
|
|
||||||
BitRef(const IntType *ptr, const int64_t bit_index)
|
|
||||||
{
|
|
||||||
ptr_ = ptr + (bit_index >> BitToIntIndexShift);
|
|
||||||
mask_ = IntType(1) << (bit_index & BitIndexMask);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return true when the bit is currently 1 and false otherwise.
|
|
||||||
*/
|
|
||||||
bool test() const
|
|
||||||
{
|
|
||||||
const IntType value = *ptr_;
|
|
||||||
const IntType masked_value = value & mask_;
|
|
||||||
return masked_value != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
operator bool() const
|
|
||||||
{
|
|
||||||
return this->test();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Similar to #BitRef, but also allows changing the referenced bit.
|
|
||||||
*/
|
|
||||||
class MutableBitRef {
|
|
||||||
private:
|
|
||||||
/** Points to the integer that the bit is in. */
|
|
||||||
IntType *ptr_;
|
|
||||||
/** All zeros except for a single one at the bit that is referenced. */
|
|
||||||
IntType mask_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
MutableBitRef() = default;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Reference a specific bit in an array. Note that #ptr does *not* have to point to the
|
|
||||||
* exact int the bit is in.
|
|
||||||
*/
|
|
||||||
MutableBitRef(IntType *ptr, const int64_t bit_index)
|
|
||||||
{
|
|
||||||
ptr_ = ptr + (bit_index >> BitToIntIndexShift);
|
|
||||||
mask_ = IntType(1) << IntType(bit_index & BitIndexMask);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Support implicitly casting to a read-only #BitRef.
|
|
||||||
*/
|
|
||||||
operator BitRef() const
|
|
||||||
{
|
|
||||||
BitRef bit_ref;
|
|
||||||
bit_ref.ptr_ = ptr_;
|
|
||||||
bit_ref.mask_ = mask_;
|
|
||||||
return bit_ref;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return true when the bit is currently 1 and false otherwise.
|
|
||||||
*/
|
|
||||||
bool test() const
|
|
||||||
{
|
|
||||||
const IntType value = *ptr_;
|
|
||||||
const IntType masked_value = value & mask_;
|
|
||||||
return masked_value != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
operator bool() const
|
|
||||||
{
|
|
||||||
return this->test();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Change the bit to a 1.
|
|
||||||
*/
|
|
||||||
void set()
|
|
||||||
{
|
|
||||||
*ptr_ |= mask_;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Change the bit to a 0.
|
|
||||||
*/
|
|
||||||
void reset()
|
|
||||||
{
|
|
||||||
*ptr_ &= ~mask_;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Change the bit to a 1 if #value is true and 0 otherwise.
|
|
||||||
*/
|
|
||||||
void set(const bool value)
|
|
||||||
{
|
|
||||||
if (value) {
|
|
||||||
this->set();
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
this->reset();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template<
|
template<
|
||||||
/**
|
/**
|
||||||
* Number of bits that can be stored in the vector without doing an allocation.
|
* Number of bits that can be stored in the vector without doing an allocation.
|
||||||
@ -193,13 +62,13 @@ class BitVector {
|
|||||||
|
|
||||||
static constexpr int64_t IntsInInlineBuffer = required_ints_for_bits(InlineBufferCapacity);
|
static constexpr int64_t IntsInInlineBuffer = required_ints_for_bits(InlineBufferCapacity);
|
||||||
static constexpr int64_t BitsInInlineBuffer = IntsInInlineBuffer * BitsPerInt;
|
static constexpr int64_t BitsInInlineBuffer = IntsInInlineBuffer * BitsPerInt;
|
||||||
static constexpr int64_t AllocationAlignment = alignof(IntType);
|
static constexpr int64_t AllocationAlignment = alignof(BitInt);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Points to the first integer used by the vector. It might point to the memory in the inline
|
* Points to the first integer used by the vector. It might point to the memory in the inline
|
||||||
* buffer.
|
* buffer.
|
||||||
*/
|
*/
|
||||||
IntType *data_;
|
BitInt *data_;
|
||||||
|
|
||||||
/** Current size of the vector in bits. */
|
/** Current size of the vector in bits. */
|
||||||
int64_t size_in_bits_;
|
int64_t size_in_bits_;
|
||||||
@ -211,7 +80,7 @@ class BitVector {
|
|||||||
BLI_NO_UNIQUE_ADDRESS Allocator allocator_;
|
BLI_NO_UNIQUE_ADDRESS Allocator allocator_;
|
||||||
|
|
||||||
/** Contains the bits as long as the vector is small enough. */
|
/** Contains the bits as long as the vector is small enough. */
|
||||||
BLI_NO_UNIQUE_ADDRESS TypedBuffer<IntType, IntsInInlineBuffer> inline_buffer_;
|
BLI_NO_UNIQUE_ADDRESS TypedBuffer<BitInt, IntsInInlineBuffer> inline_buffer_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
BitVector(Allocator allocator = {}) noexcept : allocator_(allocator)
|
BitVector(Allocator allocator = {}) noexcept : allocator_(allocator)
|
||||||
@ -219,7 +88,7 @@ class BitVector {
|
|||||||
data_ = inline_buffer_;
|
data_ = inline_buffer_;
|
||||||
size_in_bits_ = 0;
|
size_in_bits_ = 0;
|
||||||
capacity_in_bits_ = BitsInInlineBuffer;
|
capacity_in_bits_ = BitsInInlineBuffer;
|
||||||
uninitialized_fill_n(data_, IntsInInlineBuffer, IntType(0));
|
uninitialized_fill_n(data_, IntsInInlineBuffer, BitInt(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
BitVector(NoExceptConstructor, Allocator allocator = {}) noexcept : BitVector(allocator)
|
BitVector(NoExceptConstructor, Allocator allocator = {}) noexcept : BitVector(allocator)
|
||||||
@ -236,8 +105,8 @@ class BitVector {
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
/* Allocate a new array because the inline buffer is too small. */
|
/* Allocate a new array because the inline buffer is too small. */
|
||||||
data_ = static_cast<IntType *>(
|
data_ = static_cast<BitInt *>(
|
||||||
allocator_.allocate(ints_to_copy * sizeof(IntType), AllocationAlignment, __func__));
|
allocator_.allocate(ints_to_copy * sizeof(BitInt), AllocationAlignment, __func__));
|
||||||
capacity_in_bits_ = ints_to_copy * BitsPerInt;
|
capacity_in_bits_ = ints_to_copy * BitsPerInt;
|
||||||
}
|
}
|
||||||
size_in_bits_ = other.size_in_bits_;
|
size_in_bits_ = other.size_in_bits_;
|
||||||
@ -303,6 +172,16 @@ class BitVector {
|
|||||||
return move_assign_container(*this, std::move(other));
|
return move_assign_container(*this, std::move(other));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
operator BitSpan() const
|
||||||
|
{
|
||||||
|
return {data_, IndexRange(size_in_bits_)};
|
||||||
|
}
|
||||||
|
|
||||||
|
operator MutableBitSpan()
|
||||||
|
{
|
||||||
|
return {data_, IndexRange(size_in_bits_)};
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Number of bits in the bit vector.
|
* Number of bits in the bit vector.
|
||||||
*/
|
*/
|
||||||
@ -352,80 +231,24 @@ class BitVector {
|
|||||||
size_in_bits_++;
|
size_in_bits_++;
|
||||||
}
|
}
|
||||||
|
|
||||||
class Iterator {
|
BitIterator begin() const
|
||||||
private:
|
|
||||||
const BitVector *vector_;
|
|
||||||
int64_t index_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
Iterator(const BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
|
|
||||||
{
|
{
|
||||||
|
return {data_, 0};
|
||||||
}
|
}
|
||||||
|
|
||||||
Iterator &operator++()
|
BitIterator end() const
|
||||||
{
|
{
|
||||||
index_++;
|
return {data_, size_in_bits_};
|
||||||
return *this;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
friend bool operator!=(const Iterator &a, const Iterator &b)
|
MutableBitIterator begin()
|
||||||
{
|
{
|
||||||
BLI_assert(a.vector_ == b.vector_);
|
return {data_, 0};
|
||||||
return a.index_ != b.index_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
BitRef operator*() const
|
MutableBitIterator end()
|
||||||
{
|
{
|
||||||
return (*vector_)[index_];
|
return {data_, size_in_bits_};
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
class MutableIterator {
|
|
||||||
private:
|
|
||||||
BitVector *vector_;
|
|
||||||
int64_t index_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
MutableIterator(BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
MutableIterator &operator++()
|
|
||||||
{
|
|
||||||
index_++;
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
friend bool operator!=(const MutableIterator &a, const MutableIterator &b)
|
|
||||||
{
|
|
||||||
BLI_assert(a.vector_ == b.vector_);
|
|
||||||
return a.index_ != b.index_;
|
|
||||||
}
|
|
||||||
|
|
||||||
MutableBitRef operator*() const
|
|
||||||
{
|
|
||||||
return (*vector_)[index_];
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Iterator begin() const
|
|
||||||
{
|
|
||||||
return {*this, 0};
|
|
||||||
}
|
|
||||||
|
|
||||||
Iterator end() const
|
|
||||||
{
|
|
||||||
return {*this, size_in_bits_};
|
|
||||||
}
|
|
||||||
|
|
||||||
MutableIterator begin()
|
|
||||||
{
|
|
||||||
return {*this, 0};
|
|
||||||
}
|
|
||||||
|
|
||||||
MutableIterator end()
|
|
||||||
{
|
|
||||||
return {*this, size_in_bits_};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -441,31 +264,8 @@ class BitVector {
|
|||||||
}
|
}
|
||||||
size_in_bits_ = new_size_in_bits;
|
size_in_bits_ = new_size_in_bits;
|
||||||
if (old_size_in_bits < new_size_in_bits) {
|
if (old_size_in_bits < new_size_in_bits) {
|
||||||
this->fill_range(IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits), value);
|
MutableBitSpan(data_, IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits))
|
||||||
}
|
.set_all(value);
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set #value for every element in #range.
|
|
||||||
*/
|
|
||||||
void fill_range(const IndexRange range, const bool value)
|
|
||||||
{
|
|
||||||
const AlignedIndexRanges aligned_ranges = split_index_range_by_alignment(range, BitsPerInt);
|
|
||||||
|
|
||||||
/* Fill first few bits. */
|
|
||||||
for (const int64_t i : aligned_ranges.prefix) {
|
|
||||||
(*this)[i].set(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Fill entire ints at once. */
|
|
||||||
const int64_t start_fill_int_index = aligned_ranges.aligned.start() / BitsPerInt;
|
|
||||||
const int64_t ints_to_fill = aligned_ranges.aligned.size() / BitsPerInt;
|
|
||||||
const IntType fill_value = value ? IntType(-1) : IntType(0);
|
|
||||||
initialized_fill_n(data_ + start_fill_int_index, ints_to_fill, fill_value);
|
|
||||||
|
|
||||||
/* Fill bits in the end that don't cover a full int. */
|
|
||||||
for (const int64_t i : aligned_ranges.suffix) {
|
|
||||||
(*this)[i].set(value);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -474,7 +274,7 @@ class BitVector {
|
|||||||
*/
|
*/
|
||||||
void fill(const bool value)
|
void fill(const bool value)
|
||||||
{
|
{
|
||||||
this->fill_range(IndexRange(0, size_in_bits_), value);
|
MutableBitSpan(data_, size_in_bits_).set_all(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -517,7 +317,7 @@ class BitVector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity_in_bits,
|
BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity_in_bits,
|
||||||
const IntType initial_value_for_new_ints = 0x00)
|
const BitInt initial_value_for_new_ints = 0)
|
||||||
{
|
{
|
||||||
if (capacity_in_bits_ >= min_capacity_in_bits) {
|
if (capacity_in_bits_ >= min_capacity_in_bits) {
|
||||||
return;
|
return;
|
||||||
@ -531,8 +331,8 @@ class BitVector {
|
|||||||
const int64_t new_capacity_in_ints = std::max(min_capacity_in_ints, min_new_capacity_in_ints);
|
const int64_t new_capacity_in_ints = std::max(min_capacity_in_ints, min_new_capacity_in_ints);
|
||||||
const int64_t ints_to_copy = this->used_ints_amount();
|
const int64_t ints_to_copy = this->used_ints_amount();
|
||||||
|
|
||||||
IntType *new_data = static_cast<IntType *>(allocator_.allocate(
|
BitInt *new_data = static_cast<BitInt *>(
|
||||||
new_capacity_in_ints * sizeof(IntType), AllocationAlignment, __func__));
|
allocator_.allocate(new_capacity_in_ints * sizeof(BitInt), AllocationAlignment, __func__));
|
||||||
uninitialized_copy_n(data_, ints_to_copy, new_data);
|
uninitialized_copy_n(data_, ints_to_copy, new_data);
|
||||||
/* Always initialize new capacity even if it isn't used yet. That's necessary to avoid warnings
|
/* Always initialize new capacity even if it isn't used yet. That's necessary to avoid warnings
|
||||||
* caused by using uninitialized memory. This happens when e.g. setting a clearing a bit in an
|
* caused by using uninitialized memory. This happens when e.g. setting a clearing a bit in an
|
||||||
@ -562,7 +362,5 @@ class BitVector {
|
|||||||
} // namespace blender::bits
|
} // namespace blender::bits
|
||||||
|
|
||||||
namespace blender {
|
namespace blender {
|
||||||
using bits::BitRef;
|
|
||||||
using bits::BitVector;
|
using bits::BitVector;
|
||||||
using bits::MutableBitRef;
|
|
||||||
} // namespace blender
|
} // namespace blender
|
||||||
|
@ -457,6 +457,8 @@ if(WITH_GTESTS)
|
|||||||
tests/BLI_array_store_test.cc
|
tests/BLI_array_store_test.cc
|
||||||
tests/BLI_array_test.cc
|
tests/BLI_array_test.cc
|
||||||
tests/BLI_array_utils_test.cc
|
tests/BLI_array_utils_test.cc
|
||||||
|
tests/BLI_bit_ref_test.cc
|
||||||
|
tests/BLI_bit_span_test.cc
|
||||||
tests/BLI_bit_vector_test.cc
|
tests/BLI_bit_vector_test.cc
|
||||||
tests/BLI_bitmap_test.cc
|
tests/BLI_bitmap_test.cc
|
||||||
tests/BLI_bounds_test.cc
|
tests/BLI_bounds_test.cc
|
||||||
|
160
source/blender/blenlib/tests/BLI_bit_ref_test.cc
Normal file
160
source/blender/blenlib/tests/BLI_bit_ref_test.cc
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
/* SPDX-License-Identifier: Apache-2.0 */
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
#include "BLI_bit_ref.hh"
|
||||||
|
|
||||||
|
#include "testing/testing.h"
|
||||||
|
|
||||||
|
namespace blender::bits::tests {
|
||||||
|
|
||||||
|
TEST(bit_ref, MaskFirstNBits)
|
||||||
|
{
|
||||||
|
EXPECT_EQ(mask_first_n_bits(0), 0);
|
||||||
|
EXPECT_EQ(mask_first_n_bits(1),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001);
|
||||||
|
EXPECT_EQ(mask_first_n_bits(5),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111);
|
||||||
|
EXPECT_EQ(mask_first_n_bits(63),
|
||||||
|
0b0111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
|
||||||
|
EXPECT_EQ(mask_first_n_bits(64),
|
||||||
|
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, MaskLastNBits)
|
||||||
|
{
|
||||||
|
EXPECT_EQ(mask_last_n_bits(0),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
EXPECT_EQ(mask_last_n_bits(1),
|
||||||
|
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
EXPECT_EQ(mask_last_n_bits(5),
|
||||||
|
0b1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
EXPECT_EQ(mask_last_n_bits(63),
|
||||||
|
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1110);
|
||||||
|
EXPECT_EQ(mask_last_n_bits(64),
|
||||||
|
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, MaskSingleBit)
|
||||||
|
{
|
||||||
|
EXPECT_EQ(mask_single_bit(0), 1);
|
||||||
|
EXPECT_EQ(mask_single_bit(1),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010);
|
||||||
|
EXPECT_EQ(mask_single_bit(5),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
|
||||||
|
EXPECT_EQ(mask_single_bit(63),
|
||||||
|
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, IntContainingBit)
|
||||||
|
{
|
||||||
|
std::array<uint64_t, 5> array;
|
||||||
|
uint64_t *data = array.data();
|
||||||
|
EXPECT_EQ(int_containing_bit(data, 0), data);
|
||||||
|
EXPECT_EQ(int_containing_bit(data, 1), data);
|
||||||
|
EXPECT_EQ(int_containing_bit(data, 63), data);
|
||||||
|
EXPECT_EQ(int_containing_bit(data, 64), data + 1);
|
||||||
|
EXPECT_EQ(int_containing_bit(data, 65), data + 1);
|
||||||
|
EXPECT_EQ(int_containing_bit(data, 100), data + 1);
|
||||||
|
EXPECT_EQ(int_containing_bit(data, 127), data + 1);
|
||||||
|
EXPECT_EQ(int_containing_bit(data, 128), data + 2);
|
||||||
|
const uint64_t *data_const = data;
|
||||||
|
EXPECT_EQ(int_containing_bit(data_const, 0), data_const);
|
||||||
|
EXPECT_EQ(int_containing_bit(data_const, 1), data_const);
|
||||||
|
EXPECT_EQ(int_containing_bit(data_const, 63), data_const);
|
||||||
|
EXPECT_EQ(int_containing_bit(data_const, 64), data_const + 1);
|
||||||
|
EXPECT_EQ(int_containing_bit(data_const, 65), data_const + 1);
|
||||||
|
EXPECT_EQ(int_containing_bit(data_const, 100), data_const + 1);
|
||||||
|
EXPECT_EQ(int_containing_bit(data_const, 127), data_const + 1);
|
||||||
|
EXPECT_EQ(int_containing_bit(data_const, 128), data_const + 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, Test)
|
||||||
|
{
|
||||||
|
uint64_t data = (1 << 3) | (1 << 7);
|
||||||
|
EXPECT_FALSE(BitRef(&data, 0).test());
|
||||||
|
EXPECT_FALSE(BitRef(&data, 1).test());
|
||||||
|
EXPECT_FALSE(BitRef(&data, 2).test());
|
||||||
|
EXPECT_TRUE(BitRef(&data, 3).test());
|
||||||
|
EXPECT_FALSE(BitRef(&data, 4));
|
||||||
|
EXPECT_FALSE(BitRef(&data, 5));
|
||||||
|
EXPECT_FALSE(BitRef(&data, 6));
|
||||||
|
EXPECT_TRUE(BitRef(&data, 7));
|
||||||
|
|
||||||
|
EXPECT_FALSE(MutableBitRef(&data, 0).test());
|
||||||
|
EXPECT_FALSE(MutableBitRef(&data, 1).test());
|
||||||
|
EXPECT_FALSE(MutableBitRef(&data, 2).test());
|
||||||
|
EXPECT_TRUE(MutableBitRef(&data, 3).test());
|
||||||
|
EXPECT_FALSE(MutableBitRef(&data, 4));
|
||||||
|
EXPECT_FALSE(MutableBitRef(&data, 5));
|
||||||
|
EXPECT_FALSE(MutableBitRef(&data, 6));
|
||||||
|
EXPECT_TRUE(MutableBitRef(&data, 7));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, Set)
|
||||||
|
{
|
||||||
|
uint64_t data = 0;
|
||||||
|
MutableBitRef(&data, 0).set();
|
||||||
|
MutableBitRef(&data, 1).set();
|
||||||
|
MutableBitRef(&data, 1).set();
|
||||||
|
MutableBitRef(&data, 4).set();
|
||||||
|
EXPECT_EQ(data, (1 << 0) | (1 << 1) | (1 << 4));
|
||||||
|
MutableBitRef(&data, 5).set(true);
|
||||||
|
MutableBitRef(&data, 1).set(false);
|
||||||
|
EXPECT_EQ(data, (1 << 0) | (1 << 4) | (1 << 5));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, Reset)
|
||||||
|
{
|
||||||
|
uint64_t data = -1;
|
||||||
|
MutableBitRef(&data, 0).reset();
|
||||||
|
MutableBitRef(&data, 2).reset();
|
||||||
|
EXPECT_EQ(data, uint64_t(-1) & ~(1 << 0) & ~(1 << 2));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, SetBranchless)
|
||||||
|
{
|
||||||
|
uint64_t data = 0;
|
||||||
|
MutableBitRef(&data, 0).set_branchless(true);
|
||||||
|
EXPECT_EQ(data, 1);
|
||||||
|
MutableBitRef(&data, 0).set_branchless(false);
|
||||||
|
EXPECT_EQ(data, 0);
|
||||||
|
MutableBitRef(&data, 3).set_branchless(false);
|
||||||
|
MutableBitRef(&data, 4).set_branchless(true);
|
||||||
|
EXPECT_EQ(data, 16);
|
||||||
|
MutableBitRef(&data, 3).set_branchless(true);
|
||||||
|
MutableBitRef(&data, 4).set_branchless(true);
|
||||||
|
EXPECT_EQ(data, 24);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, Cast)
|
||||||
|
{
|
||||||
|
uint64_t data = 0;
|
||||||
|
MutableBitRef mutable_ref(&data, 3);
|
||||||
|
BitRef ref = mutable_ref;
|
||||||
|
EXPECT_FALSE(ref);
|
||||||
|
mutable_ref.set();
|
||||||
|
EXPECT_TRUE(ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_ref, MaskRangeBits)
|
||||||
|
{
|
||||||
|
EXPECT_EQ(mask_range_bits(0, 0),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
EXPECT_EQ(mask_range_bits(0, 1),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001);
|
||||||
|
EXPECT_EQ(mask_range_bits(0, 5),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111);
|
||||||
|
EXPECT_EQ(mask_range_bits(64, 0),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
EXPECT_EQ(mask_range_bits(63, 1),
|
||||||
|
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
EXPECT_EQ(mask_range_bits(59, 5),
|
||||||
|
0b1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
EXPECT_EQ(mask_range_bits(8, 3),
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0111'0000'0000);
|
||||||
|
EXPECT_EQ(mask_range_bits(0, 64),
|
||||||
|
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace blender::bits::tests
|
139
source/blender/blenlib/tests/BLI_bit_span_test.cc
Normal file
139
source/blender/blenlib/tests/BLI_bit_span_test.cc
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
/* SPDX-License-Identifier: Apache-2.0 */
|
||||||
|
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
#include "BLI_bit_span.hh"
|
||||||
|
|
||||||
|
#include "testing/testing.h"
|
||||||
|
|
||||||
|
namespace blender::bits::tests {
|
||||||
|
|
||||||
|
TEST(bit_span, DefaultConstructor)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
char buffer[sizeof(BitSpan)];
|
||||||
|
memset(buffer, 0xff, sizeof(BitSpan));
|
||||||
|
BitSpan &span = *new (buffer) BitSpan();
|
||||||
|
EXPECT_TRUE(span.is_empty());
|
||||||
|
EXPECT_EQ(span.size(), 0);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
char buffer[sizeof(MutableBitSpan)];
|
||||||
|
memset(buffer, 0xff, sizeof(MutableBitSpan));
|
||||||
|
MutableBitSpan &span = *new (buffer) MutableBitSpan();
|
||||||
|
EXPECT_TRUE(span.is_empty());
|
||||||
|
EXPECT_EQ(span.size(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_span, Iteration)
|
||||||
|
{
|
||||||
|
uint64_t data = (1 << 2) | (1 << 3);
|
||||||
|
const BitSpan span(&data, 30);
|
||||||
|
EXPECT_EQ(span.size(), 30);
|
||||||
|
int index = 0;
|
||||||
|
for (const BitRef bit : span) {
|
||||||
|
EXPECT_EQ(bit.test(), ELEM(index, 2, 3));
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_span, MutableIteration)
|
||||||
|
{
|
||||||
|
uint64_t data = 0;
|
||||||
|
MutableBitSpan span(&data, 40);
|
||||||
|
EXPECT_EQ(span.size(), 40);
|
||||||
|
int index = 0;
|
||||||
|
for (MutableBitRef bit : span) {
|
||||||
|
bit.set(index % 4 == 0);
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
EXPECT_EQ(data,
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0001'0001'0001'0001'0001'0001'0001'0001'0001'0001);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_span, SubscriptOperator)
|
||||||
|
{
|
||||||
|
uint64_t data[2] = {0, 0};
|
||||||
|
MutableBitSpan mutable_span(data, 128);
|
||||||
|
BitSpan span = mutable_span;
|
||||||
|
|
||||||
|
EXPECT_EQ(mutable_span.data(), data);
|
||||||
|
EXPECT_EQ(mutable_span.bit_range(), IndexRange(128));
|
||||||
|
EXPECT_EQ(span.data(), data);
|
||||||
|
EXPECT_EQ(span.bit_range(), IndexRange(128));
|
||||||
|
|
||||||
|
EXPECT_FALSE(mutable_span[5].test());
|
||||||
|
EXPECT_FALSE(span[5].test());
|
||||||
|
mutable_span[5].set(5);
|
||||||
|
EXPECT_TRUE(mutable_span[5].test());
|
||||||
|
EXPECT_TRUE(span[5].test());
|
||||||
|
|
||||||
|
EXPECT_FALSE(mutable_span[120].test());
|
||||||
|
EXPECT_FALSE(span[120].test());
|
||||||
|
mutable_span[120].set(120);
|
||||||
|
EXPECT_TRUE(mutable_span[120].test());
|
||||||
|
EXPECT_TRUE(span[120].test());
|
||||||
|
|
||||||
|
EXPECT_EQ(data[0],
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
|
||||||
|
EXPECT_EQ(data[1],
|
||||||
|
0b0000'0001'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_span, RangeConstructor)
|
||||||
|
{
|
||||||
|
uint64_t data = 0;
|
||||||
|
MutableBitSpan mutable_span(&data, IndexRange(4, 3));
|
||||||
|
BitSpan span = mutable_span;
|
||||||
|
|
||||||
|
EXPECT_FALSE(mutable_span[1].test());
|
||||||
|
EXPECT_FALSE(span[1].test());
|
||||||
|
mutable_span[0].set(true);
|
||||||
|
mutable_span[1].set(true);
|
||||||
|
mutable_span[2].set(true);
|
||||||
|
mutable_span[0].set(false);
|
||||||
|
mutable_span[2].set(false);
|
||||||
|
EXPECT_TRUE(mutable_span[1].test());
|
||||||
|
EXPECT_TRUE(span[1].test());
|
||||||
|
|
||||||
|
EXPECT_EQ(data,
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_span, Set)
|
||||||
|
{
|
||||||
|
uint64_t data = 0;
|
||||||
|
MutableBitSpan(&data, 64).set_all(true);
|
||||||
|
EXPECT_EQ(data, uint64_t(-1));
|
||||||
|
MutableBitSpan(&data, 64).set_all(false);
|
||||||
|
EXPECT_EQ(data, uint64_t(0));
|
||||||
|
|
||||||
|
MutableBitSpan(&data, IndexRange(4, 8)).set_all(true);
|
||||||
|
EXPECT_EQ(data,
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'1111'0000);
|
||||||
|
MutableBitSpan(&data, IndexRange(8, 30)).set_all(false);
|
||||||
|
|
||||||
|
EXPECT_EQ(data,
|
||||||
|
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'0000);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(bit_span, SetSliced)
|
||||||
|
{
|
||||||
|
std::array<uint64_t, 10> data;
|
||||||
|
memset(data.data(), 0, sizeof(data));
|
||||||
|
MutableBitSpan span{data.data(), 640};
|
||||||
|
span.slice(IndexRange(5, 500)).set_all(true);
|
||||||
|
|
||||||
|
for (const int64_t i : IndexRange(640)) {
|
||||||
|
EXPECT_EQ(span[i], i >= 5 && i < 505);
|
||||||
|
}
|
||||||
|
|
||||||
|
span.slice(IndexRange(10, 190)).set_all(false);
|
||||||
|
|
||||||
|
for (const int64_t i : IndexRange(640)) {
|
||||||
|
EXPECT_EQ(span[i], (i >= 5 && i < 10) || (i >= 200 && i < 505));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace blender::bits::tests
|
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
#include "testing/testing.h"
|
#include "testing/testing.h"
|
||||||
|
|
||||||
namespace blender::tests {
|
namespace blender::bits::tests {
|
||||||
|
|
||||||
TEST(bit_vector, DefaultConstructor)
|
TEST(bit_vector, DefaultConstructor)
|
||||||
{
|
{
|
||||||
@ -183,4 +183,4 @@ TEST(bit_vector, AppendMany)
|
|||||||
EXPECT_TRUE(vec[5]);
|
EXPECT_TRUE(vec[5]);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace blender::tests
|
} // namespace blender::bits::tests
|
||||||
|
@ -290,6 +290,24 @@ TEST(index_range, SplitByAlignment)
|
|||||||
EXPECT_EQ(ranges.aligned, IndexRange());
|
EXPECT_EQ(ranges.aligned, IndexRange());
|
||||||
EXPECT_EQ(ranges.suffix, IndexRange());
|
EXPECT_EQ(ranges.suffix, IndexRange());
|
||||||
}
|
}
|
||||||
|
{
|
||||||
|
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64), 64);
|
||||||
|
EXPECT_EQ(ranges.prefix, IndexRange());
|
||||||
|
EXPECT_EQ(ranges.aligned, IndexRange(64));
|
||||||
|
EXPECT_EQ(ranges.suffix, IndexRange());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64, 64), 64);
|
||||||
|
EXPECT_EQ(ranges.prefix, IndexRange());
|
||||||
|
EXPECT_EQ(ranges.aligned, IndexRange(64, 64));
|
||||||
|
EXPECT_EQ(ranges.suffix, IndexRange());
|
||||||
|
}
|
||||||
|
{
|
||||||
|
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(4, 8), 64);
|
||||||
|
EXPECT_EQ(ranges.prefix, IndexRange(4, 8));
|
||||||
|
EXPECT_EQ(ranges.aligned, IndexRange());
|
||||||
|
EXPECT_EQ(ranges.suffix, IndexRange());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace blender::tests
|
} // namespace blender::tests
|
||||||
|
@ -211,7 +211,7 @@ void *BMO_iter_as_arrayN(BMOpSlot slot_args[BMO_OP_MAX_SLOTS],
|
|||||||
|
|
||||||
int BM_iter_mesh_bitmap_from_filter(const char itype,
|
int BM_iter_mesh_bitmap_from_filter(const char itype,
|
||||||
BMesh *bm,
|
BMesh *bm,
|
||||||
blender::BitVector<> &bitmap,
|
blender::MutableBitSpan bitmap,
|
||||||
bool (*test_fn)(BMElem *, void *user_data),
|
bool (*test_fn)(BMElem *, void *user_data),
|
||||||
void *user_data)
|
void *user_data)
|
||||||
{
|
{
|
||||||
@ -234,7 +234,7 @@ int BM_iter_mesh_bitmap_from_filter(const char itype,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int BM_iter_mesh_bitmap_from_filter_tessface(BMesh *bm,
|
int BM_iter_mesh_bitmap_from_filter_tessface(BMesh *bm,
|
||||||
blender::BitVector<> &bitmap,
|
blender::MutableBitSpan bitmap,
|
||||||
bool (*test_fn)(BMFace *, void *user_data),
|
bool (*test_fn)(BMFace *, void *user_data),
|
||||||
void *user_data)
|
void *user_data)
|
||||||
{
|
{
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
#include "BLI_mempool.h"
|
#include "BLI_mempool.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
# include "BLI_bit_vector.hh"
|
# include "BLI_bit_span.hh"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
@ -228,14 +228,14 @@ void *BMO_iter_as_arrayN(BMOpSlot slot_args[BMO_OP_MAX_SLOTS],
|
|||||||
|
|
||||||
int BM_iter_mesh_bitmap_from_filter(char itype,
|
int BM_iter_mesh_bitmap_from_filter(char itype,
|
||||||
BMesh *bm,
|
BMesh *bm,
|
||||||
blender::BitVector<> &bitmap,
|
blender::MutableBitSpan bitmap,
|
||||||
bool (*test_fn)(BMElem *, void *user_data),
|
bool (*test_fn)(BMElem *, void *user_data),
|
||||||
void *user_data);
|
void *user_data);
|
||||||
/**
|
/**
|
||||||
* Needed when we want to check faces, but return a loop aligned array.
|
* Needed when we want to check faces, but return a loop aligned array.
|
||||||
*/
|
*/
|
||||||
int BM_iter_mesh_bitmap_from_filter_tessface(BMesh *bm,
|
int BM_iter_mesh_bitmap_from_filter_tessface(BMesh *bm,
|
||||||
blender::BitVector<> &bitmap,
|
blender::MutableBitSpan bitmap,
|
||||||
bool (*test_fn)(BMFace *, void *user_data),
|
bool (*test_fn)(BMFace *, void *user_data),
|
||||||
void *user_data);
|
void *user_data);
|
||||||
|
|
||||||
|
@ -557,7 +557,7 @@ static void extract_edituv_fdots_iter_poly_mesh(const MeshRenderData *mr,
|
|||||||
const bool mp_select = (efa) ? BM_elem_flag_test_bool(efa, BM_ELEM_SELECT) : false;
|
const bool mp_select = (efa) ? BM_elem_flag_test_bool(efa, BM_ELEM_SELECT) : false;
|
||||||
|
|
||||||
if (mr->use_subsurf_fdots) {
|
if (mr->use_subsurf_fdots) {
|
||||||
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
|
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
|
||||||
|
|
||||||
const MLoop *mloop = mr->mloop;
|
const MLoop *mloop = mr->mloop;
|
||||||
const int ml_index_end = mp->loopstart + mp->totloop;
|
const int ml_index_end = mp->loopstart + mp->totloop;
|
||||||
|
@ -46,7 +46,7 @@ static void extract_fdots_iter_poly_mesh(const MeshRenderData *mr,
|
|||||||
|
|
||||||
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_userdata);
|
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_userdata);
|
||||||
if (mr->use_subsurf_fdots) {
|
if (mr->use_subsurf_fdots) {
|
||||||
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
|
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
|
||||||
|
|
||||||
const MLoop *mloop = mr->mloop;
|
const MLoop *mloop = mr->mloop;
|
||||||
const int ml_index_end = mp->loopstart + mp->totloop;
|
const int ml_index_end = mp->loopstart + mp->totloop;
|
||||||
|
@ -102,7 +102,7 @@ static void extract_edge_fac_iter_poly_mesh(const MeshRenderData *mr,
|
|||||||
void *_data)
|
void *_data)
|
||||||
{
|
{
|
||||||
MeshExtract_EdgeFac_Data *data = static_cast<MeshExtract_EdgeFac_Data *>(_data);
|
MeshExtract_EdgeFac_Data *data = static_cast<MeshExtract_EdgeFac_Data *>(_data);
|
||||||
const BitVector<> &optimal_display_edges = mr->me->runtime->subsurf_optimal_display_edges;
|
const BitSpan optimal_display_edges = mr->me->runtime->subsurf_optimal_display_edges;
|
||||||
|
|
||||||
const MLoop *mloop = mr->mloop;
|
const MLoop *mloop = mr->mloop;
|
||||||
const int ml_index_end = mp->loopstart + mp->totloop;
|
const int ml_index_end = mp->loopstart + mp->totloop;
|
||||||
|
@ -76,7 +76,7 @@ static void extract_fdots_pos_iter_poly_mesh(const MeshRenderData *mr,
|
|||||||
zero_v3(co);
|
zero_v3(co);
|
||||||
|
|
||||||
const MLoop *mloop = mr->mloop;
|
const MLoop *mloop = mr->mloop;
|
||||||
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
|
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
|
||||||
|
|
||||||
const int ml_index_end = mp->loopstart + mp->totloop;
|
const int ml_index_end = mp->loopstart + mp->totloop;
|
||||||
for (int ml_index = mp->loopstart; ml_index < ml_index_end; ml_index += 1) {
|
for (int ml_index = mp->loopstart; ml_index < ml_index_end; ml_index += 1) {
|
||||||
|
@ -74,7 +74,7 @@ static void extract_fdots_uv_iter_poly_mesh(const MeshRenderData *mr,
|
|||||||
void *_data)
|
void *_data)
|
||||||
{
|
{
|
||||||
MeshExtract_FdotUV_Data *data = static_cast<MeshExtract_FdotUV_Data *>(_data);
|
MeshExtract_FdotUV_Data *data = static_cast<MeshExtract_FdotUV_Data *>(_data);
|
||||||
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
|
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
|
||||||
|
|
||||||
const MLoop *mloop = mr->mloop;
|
const MLoop *mloop = mr->mloop;
|
||||||
const int ml_index_end = mp->loopstart + mp->totloop;
|
const int ml_index_end = mp->loopstart + mp->totloop;
|
||||||
|
Loading…
Reference in New Issue
Block a user