diff --git a/source/blender/blenkernel/intern/geometry_component_mesh.cc b/source/blender/blenkernel/intern/geometry_component_mesh.cc index 7b694be324a..bdb1b0edf8b 100644 --- a/source/blender/blenkernel/intern/geometry_component_mesh.cc +++ b/source/blender/blenkernel/intern/geometry_component_mesh.cc @@ -984,26 +984,26 @@ class VArrayImpl_For_VertexWeights final : public VMutableArrayImpl { }); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, float *dst) const override { if (dverts_ == nullptr) { - return r_span.fill_indices(mask, 0.0f); + mask.foreach_index([&](const int i) { dst[i] = 0.0f; }); } threading::parallel_for(mask.index_range(), 4096, [&](const IndexRange range) { for (const int64_t i : mask.slice(range)) { if (const MDeformWeight *weight = this->find_weight_at_index(i)) { - r_span[i] = weight->weight; + dst[i] = weight->weight; } else { - r_span[i] = 0.0f; + dst[i] = 0.0f; } } }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, float *dst) const override { - this->materialize(mask, r_span); + this->materialize(mask, dst); } private: diff --git a/source/blender/blenlib/BLI_generic_virtual_array.hh b/source/blender/blenlib/BLI_generic_virtual_array.hh index cba767341c1..cb45da5e495 100644 --- a/source/blender/blenlib/BLI_generic_virtual_array.hh +++ b/source/blender/blenlib/BLI_generic_virtual_array.hh @@ -388,25 +388,24 @@ template class VArrayImpl_For_GVArray : public VArrayImpl { return true; } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - varray_.materialize(mask, r_span.data()); + varray_.materialize(mask, dst); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - varray_.materialize_to_uninitialized(mask, r_span.data()); + varray_.materialize_to_uninitialized(mask, dst); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - varray_.materialize_compressed(mask, r_span.data()); + varray_.materialize_compressed(mask, dst); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - varray_.materialize_compressed_to_uninitialized(mask, r_span.data()); + varray_.materialize_compressed_to_uninitialized(mask, dst); } }; @@ -539,25 +538,24 @@ template class VMutableArrayImpl_For_GVMutableArray : public VMutabl return true; } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - varray_.materialize(mask, r_span.data()); + varray_.materialize(mask, dst); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - varray_.materialize_to_uninitialized(mask, r_span.data()); + varray_.materialize_to_uninitialized(mask, dst); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - varray_.materialize_compressed(mask, r_span.data()); + varray_.materialize_compressed(mask, dst); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - varray_.materialize_compressed_to_uninitialized(mask, r_span.data()); + varray_.materialize_compressed_to_uninitialized(mask, dst); } }; diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh index 189cb85d468..819807843df 100644 --- a/source/blender/blenlib/BLI_virtual_array.hh +++ b/source/blender/blenlib/BLI_virtual_array.hh @@ -107,17 +107,16 @@ template class VArrayImpl { * Copy values from the virtual array into the provided span. The index of the value in the * virtual array is the same as the index in the span. */ - virtual void materialize(IndexMask mask, MutableSpan r_span) const + virtual void materialize(IndexMask mask, T *dst) const { - mask.foreach_index([&](const int64_t i) { r_span[i] = this->get(i); }); + mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); }); } /** * Same as #materialize but #r_span is expected to be uninitialized. */ - virtual void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const + virtual void materialize_to_uninitialized(IndexMask mask, T *dst) const { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); }); } @@ -126,12 +125,11 @@ template class VArrayImpl { * in virtual array is not the same as the index in the output span. Instead, the span is filled * without gaps. */ - virtual void materialize_compressed(IndexMask mask, MutableSpan r_span) const + virtual void materialize_compressed(IndexMask mask, T *dst) const { - BLI_assert(mask.size() == r_span.size()); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { - r_span[i] = this->get(best_mask[i]); + dst[i] = this->get(best_mask[i]); } }); } @@ -139,10 +137,8 @@ template class VArrayImpl { /** * Same as #materialize_compressed but #r_span is expected to be uninitialized. */ - virtual void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan r_span) const + virtual void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const { - BLI_assert(mask.size() == r_span.size()); - T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { new (dst + i) T(this->get(best_mask[i])); @@ -254,32 +250,27 @@ template class VArrayImpl_For_Span : public VMutableArrayImpl { return data_ == static_cast(other_info.data); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - mask.foreach_index([&](const int64_t i) { r_span[i] = data_[i]; }); + mask.foreach_index([&](const int64_t i) { dst[i] = data_[i]; }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) T(data_[i]); }); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { - r_span[i] = data_[best_mask[i]]; + dst[i] = data_[best_mask[i]]; } }); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { new (dst + i) T(data_[best_mask[i]]); @@ -357,29 +348,24 @@ template class VArrayImpl_For_Single final : public VArrayImpl { return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - r_span.fill_indices(mask, value_); + mask.foreach_index([&](const int64_t i) { dst[i] = value_; }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); }); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - UNUSED_VARS_NDEBUG(mask); - r_span.fill(value_); + initialized_fill_n(dst, mask.size(), value_); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - uninitialized_fill_n(r_span.data(), mask.size(), value_); + uninitialized_fill_n(dst, mask.size(), value_); } }; @@ -406,22 +392,18 @@ template class VArrayImpl_For_Func final : public return get_func_(index); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); }); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { dst[i] = get_func_(best_mask[i]); @@ -429,11 +411,8 @@ template class VArrayImpl_For_Func final : public }); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { new (dst + i) T(get_func_(best_mask[i])); @@ -476,22 +455,18 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl { SetFunc(data_[index], std::move(value)); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, ElemT *dst) const override { - ElemT *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, ElemT *dst) const override { - ElemT *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); }); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, ElemT *dst) const override { - BLI_assert(mask.size() == r_span.size()); - ElemT *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { dst[i] = GetFunc(data_[best_mask[i]]); @@ -499,11 +474,8 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl { }); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, ElemT *dst) const override { - BLI_assert(mask.size() == r_span.size()); - ElemT *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { new (dst + i) ElemT(GetFunc(data_[best_mask[i]])); @@ -835,7 +807,7 @@ template class VArrayCommon { void materialize(IndexMask mask, MutableSpan r_span) const { BLI_assert(mask.min_array_size() <= this->size()); - impl_->materialize(mask, r_span); + impl_->materialize(mask, r_span.data()); } void materialize_to_uninitialized(MutableSpan r_span) const @@ -846,18 +818,18 @@ template class VArrayCommon { void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const { BLI_assert(mask.min_array_size() <= this->size()); - impl_->materialize_to_uninitialized(mask, r_span); + impl_->materialize_to_uninitialized(mask, r_span.data()); } /** Copy some elements of the virtual array into a span. */ void materialize_compressed(IndexMask mask, MutableSpan r_span) const { - impl_->materialize_compressed(mask, r_span); + impl_->materialize_compressed(mask, r_span.data()); } void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan r_span) const { - impl_->materialize_compressed_to_uninitialized(mask, r_span); + impl_->materialize_compressed_to_uninitialized(mask, r_span.data()); } /** See #GVArrayImpl::try_assign_GVArray. */ @@ -865,6 +837,11 @@ template class VArrayCommon { { return impl_->try_assign_GVArray(varray); } + + const VArrayImpl *get_implementation() const + { + return impl_; + } }; template class VMutableArray;