Cleanup: unify method parameters for virtual arrays

This makes `GVArrayImpl` and `VArrayImpl` more similar.
Only passing the pointer instead of the span also increases
efficiency a little bit. The downside is that a few asserts had
to be removed as well. However, in practice the same asserts
are in place at a higher level as well (in `VArrayCommon`).
This commit is contained in:
2023-01-14 19:13:51 +01:00
parent 72cc68e299
commit ff15edc6ab
3 changed files with 58 additions and 83 deletions

View File

@@ -984,26 +984,26 @@ class VArrayImpl_For_VertexWeights final : public VMutableArrayImpl<float> {
}); });
} }
void materialize(IndexMask mask, MutableSpan<float> r_span) const override void materialize(IndexMask mask, float *dst) const override
{ {
if (dverts_ == nullptr) { if (dverts_ == nullptr) {
return r_span.fill_indices(mask, 0.0f); mask.foreach_index([&](const int i) { dst[i] = 0.0f; });
} }
threading::parallel_for(mask.index_range(), 4096, [&](const IndexRange range) { threading::parallel_for(mask.index_range(), 4096, [&](const IndexRange range) {
for (const int64_t i : mask.slice(range)) { for (const int64_t i : mask.slice(range)) {
if (const MDeformWeight *weight = this->find_weight_at_index(i)) { if (const MDeformWeight *weight = this->find_weight_at_index(i)) {
r_span[i] = weight->weight; dst[i] = weight->weight;
} }
else { else {
r_span[i] = 0.0f; dst[i] = 0.0f;
} }
} }
}); });
} }
void materialize_to_uninitialized(IndexMask mask, MutableSpan<float> r_span) const override void materialize_to_uninitialized(IndexMask mask, float *dst) const override
{ {
this->materialize(mask, r_span); this->materialize(mask, dst);
} }
private: private:

View File

@@ -388,25 +388,24 @@ template<typename T> class VArrayImpl_For_GVArray : public VArrayImpl<T> {
return true; return true;
} }
void materialize(IndexMask mask, MutableSpan<T> r_span) const override void materialize(IndexMask mask, T *dst) const override
{ {
varray_.materialize(mask, r_span.data()); varray_.materialize(mask, dst);
} }
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override void materialize_to_uninitialized(IndexMask mask, T *dst) const override
{ {
varray_.materialize_to_uninitialized(mask, r_span.data()); varray_.materialize_to_uninitialized(mask, dst);
} }
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override void materialize_compressed(IndexMask mask, T *dst) const override
{ {
varray_.materialize_compressed(mask, r_span.data()); varray_.materialize_compressed(mask, dst);
} }
void materialize_compressed_to_uninitialized(IndexMask mask, void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override
MutableSpan<T> r_span) const override
{ {
varray_.materialize_compressed_to_uninitialized(mask, r_span.data()); varray_.materialize_compressed_to_uninitialized(mask, dst);
} }
}; };
@@ -539,25 +538,24 @@ template<typename T> class VMutableArrayImpl_For_GVMutableArray : public VMutabl
return true; return true;
} }
void materialize(IndexMask mask, MutableSpan<T> r_span) const override void materialize(IndexMask mask, T *dst) const override
{ {
varray_.materialize(mask, r_span.data()); varray_.materialize(mask, dst);
} }
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override void materialize_to_uninitialized(IndexMask mask, T *dst) const override
{ {
varray_.materialize_to_uninitialized(mask, r_span.data()); varray_.materialize_to_uninitialized(mask, dst);
} }
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override void materialize_compressed(IndexMask mask, T *dst) const override
{ {
varray_.materialize_compressed(mask, r_span.data()); varray_.materialize_compressed(mask, dst);
} }
void materialize_compressed_to_uninitialized(IndexMask mask, void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override
MutableSpan<T> r_span) const override
{ {
varray_.materialize_compressed_to_uninitialized(mask, r_span.data()); varray_.materialize_compressed_to_uninitialized(mask, dst);
} }
}; };

View File

@@ -107,17 +107,16 @@ template<typename T> class VArrayImpl {
* Copy values from the virtual array into the provided span. The index of the value in the * Copy values from the virtual array into the provided span. The index of the value in the
* virtual array is the same as the index in the span. * virtual array is the same as the index in the span.
*/ */
virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const virtual void materialize(IndexMask mask, T *dst) const
{ {
mask.foreach_index([&](const int64_t i) { r_span[i] = this->get(i); }); mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
} }
/** /**
* Same as #materialize but #r_span is expected to be uninitialized. * Same as #materialize but #r_span is expected to be uninitialized.
*/ */
virtual void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const virtual void materialize_to_uninitialized(IndexMask mask, T *dst) const
{ {
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); }); mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
} }
@@ -126,12 +125,11 @@ template<typename T> class VArrayImpl {
* in virtual array is not the same as the index in the output span. Instead, the span is filled * in virtual array is not the same as the index in the output span. Instead, the span is filled
* without gaps. * without gaps.
*/ */
virtual void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const virtual void materialize_compressed(IndexMask mask, T *dst) const
{ {
BLI_assert(mask.size() == r_span.size());
mask.to_best_mask_type([&](auto best_mask) { mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) { for (const int64_t i : IndexRange(best_mask.size())) {
r_span[i] = this->get(best_mask[i]); dst[i] = this->get(best_mask[i]);
} }
}); });
} }
@@ -139,10 +137,8 @@ template<typename T> class VArrayImpl {
/** /**
* Same as #materialize_compressed but #r_span is expected to be uninitialized. * Same as #materialize_compressed but #r_span is expected to be uninitialized.
*/ */
virtual void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const virtual void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const
{ {
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) { mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) { for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(this->get(best_mask[i])); new (dst + i) T(this->get(best_mask[i]));
@@ -254,32 +250,27 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
return data_ == static_cast<const T *>(other_info.data); return data_ == static_cast<const T *>(other_info.data);
} }
void materialize(IndexMask mask, MutableSpan<T> r_span) const override void materialize(IndexMask mask, T *dst) const override
{ {
mask.foreach_index([&](const int64_t i) { r_span[i] = data_[i]; }); mask.foreach_index([&](const int64_t i) { dst[i] = data_[i]; });
} }
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override void materialize_to_uninitialized(IndexMask mask, T *dst) const override
{ {
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(data_[i]); }); mask.foreach_index([&](const int64_t i) { new (dst + i) T(data_[i]); });
} }
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override void materialize_compressed(IndexMask mask, T *dst) const override
{ {
BLI_assert(mask.size() == r_span.size());
mask.to_best_mask_type([&](auto best_mask) { mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) { for (const int64_t i : IndexRange(best_mask.size())) {
r_span[i] = data_[best_mask[i]]; dst[i] = data_[best_mask[i]];
} }
}); });
} }
void materialize_compressed_to_uninitialized(IndexMask mask, void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override
MutableSpan<T> r_span) const override
{ {
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) { mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) { for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(data_[best_mask[i]]); new (dst + i) T(data_[best_mask[i]]);
@@ -357,29 +348,24 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_); return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_);
} }
void materialize(IndexMask mask, MutableSpan<T> r_span) const override void materialize(IndexMask mask, T *dst) const override
{ {
r_span.fill_indices(mask, value_); mask.foreach_index([&](const int64_t i) { dst[i] = value_; });
} }
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override void materialize_to_uninitialized(IndexMask mask, T *dst) const override
{ {
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); }); mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); });
} }
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override void materialize_compressed(IndexMask mask, T *dst) const override
{ {
BLI_assert(mask.size() == r_span.size()); initialized_fill_n(dst, mask.size(), value_);
UNUSED_VARS_NDEBUG(mask);
r_span.fill(value_);
} }
void materialize_compressed_to_uninitialized(IndexMask mask, void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override
MutableSpan<T> r_span) const override
{ {
BLI_assert(mask.size() == r_span.size()); uninitialized_fill_n(dst, mask.size(), value_);
uninitialized_fill_n(r_span.data(), mask.size(), value_);
} }
}; };
@@ -406,22 +392,18 @@ template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public
return get_func_(index); return get_func_(index);
} }
void materialize(IndexMask mask, MutableSpan<T> r_span) const override void materialize(IndexMask mask, T *dst) const override
{ {
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); }); mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
} }
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override void materialize_to_uninitialized(IndexMask mask, T *dst) const override
{ {
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); }); mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
} }
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override void materialize_compressed(IndexMask mask, T *dst) const override
{ {
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) { mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) { for (const int64_t i : IndexRange(best_mask.size())) {
dst[i] = get_func_(best_mask[i]); dst[i] = get_func_(best_mask[i]);
@@ -429,11 +411,8 @@ template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public
}); });
} }
void materialize_compressed_to_uninitialized(IndexMask mask, void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override
MutableSpan<T> r_span) const override
{ {
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) { mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) { for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(get_func_(best_mask[i])); new (dst + i) T(get_func_(best_mask[i]));
@@ -476,22 +455,18 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
SetFunc(data_[index], std::move(value)); SetFunc(data_[index], std::move(value));
} }
void materialize(IndexMask mask, MutableSpan<ElemT> r_span) const override void materialize(IndexMask mask, ElemT *dst) const override
{ {
ElemT *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); }); mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
} }
void materialize_to_uninitialized(IndexMask mask, MutableSpan<ElemT> r_span) const override void materialize_to_uninitialized(IndexMask mask, ElemT *dst) const override
{ {
ElemT *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); }); mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
} }
void materialize_compressed(IndexMask mask, MutableSpan<ElemT> r_span) const override void materialize_compressed(IndexMask mask, ElemT *dst) const override
{ {
BLI_assert(mask.size() == r_span.size());
ElemT *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) { mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) { for (const int64_t i : IndexRange(best_mask.size())) {
dst[i] = GetFunc(data_[best_mask[i]]); dst[i] = GetFunc(data_[best_mask[i]]);
@@ -499,11 +474,8 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
}); });
} }
void materialize_compressed_to_uninitialized(IndexMask mask, void materialize_compressed_to_uninitialized(IndexMask mask, ElemT *dst) const override
MutableSpan<ElemT> r_span) const override
{ {
BLI_assert(mask.size() == r_span.size());
ElemT *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) { mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) { for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) ElemT(GetFunc(data_[best_mask[i]])); new (dst + i) ElemT(GetFunc(data_[best_mask[i]]));
@@ -835,7 +807,7 @@ template<typename T> class VArrayCommon {
void materialize(IndexMask mask, MutableSpan<T> r_span) const void materialize(IndexMask mask, MutableSpan<T> r_span) const
{ {
BLI_assert(mask.min_array_size() <= this->size()); BLI_assert(mask.min_array_size() <= this->size());
impl_->materialize(mask, r_span); impl_->materialize(mask, r_span.data());
} }
void materialize_to_uninitialized(MutableSpan<T> r_span) const void materialize_to_uninitialized(MutableSpan<T> r_span) const
@@ -846,18 +818,18 @@ template<typename T> class VArrayCommon {
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{ {
BLI_assert(mask.min_array_size() <= this->size()); BLI_assert(mask.min_array_size() <= this->size());
impl_->materialize_to_uninitialized(mask, r_span); impl_->materialize_to_uninitialized(mask, r_span.data());
} }
/** Copy some elements of the virtual array into a span. */ /** Copy some elements of the virtual array into a span. */
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
{ {
impl_->materialize_compressed(mask, r_span); impl_->materialize_compressed(mask, r_span.data());
} }
void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{ {
impl_->materialize_compressed_to_uninitialized(mask, r_span); impl_->materialize_compressed_to_uninitialized(mask, r_span.data());
} }
/** See #GVArrayImpl::try_assign_GVArray. */ /** See #GVArrayImpl::try_assign_GVArray. */
@@ -865,6 +837,11 @@ template<typename T> class VArrayCommon {
{ {
return impl_->try_assign_GVArray(varray); return impl_->try_assign_GVArray(varray);
} }
const VArrayImpl<T> *get_implementation() const
{
return impl_;
}
}; };
template<typename T> class VMutableArray; template<typename T> class VMutableArray;