1
1

Compare commits

...

13 Commits

Author SHA1 Message Date
3bdd415254 realize non-standard virtual arrays 2021-09-26 15:07:48 +02:00
46fe816fc6 use threads 2021-09-26 15:07:27 +02:00
2dbbbc7e85 add slice method 2021-09-26 14:52:12 +02:00
34ed0de287 improve 2021-09-26 14:11:42 +02:00
3bcb30b13b cleanup 2021-09-26 13:39:27 +02:00
f55023c82a progress 2021-09-26 13:23:09 +02:00
d00c68fc7b progress 2021-09-25 16:26:56 +02:00
ab36a7de75 progress 2021-09-25 15:59:20 +02:00
42d3210681 progress 2021-09-25 15:35:43 +02:00
318e2dd00f progress 2021-09-25 15:22:07 +02:00
8fdc78678a progress 2021-09-25 15:10:42 +02:00
ad114f806b progress 2021-09-25 15:01:09 +02:00
02d9d13a83 initial commit 2021-09-25 14:37:12 +02:00
15 changed files with 660 additions and 290 deletions

View File

@@ -300,7 +300,7 @@ static bool add_custom_data_layer_from_attribute_init(CustomData &custom_data,
return false;
}
const GVArray *varray = static_cast<const AttributeInitVArray &>(initializer).varray;
varray->materialize_to_uninitialized(IndexRange(varray->size()), data);
varray->get_multiple_to_uninitialized(data);
return true;
}
case AttributeInit::Type::MoveArray: {
@@ -495,7 +495,7 @@ static bool add_custom_data_layer_from_attribute_init(const AttributeIDRef &attr
return false;
}
const GVArray *varray = static_cast<const AttributeInitVArray &>(initializer).varray;
varray->materialize_to_uninitialized(IndexRange(varray->size()), data);
varray->get_multiple_to_uninitialized(data);
return true;
}
case AttributeInit::Type::MoveArray: {
@@ -1300,7 +1300,7 @@ static OutputAttribute create_output_attribute(GeometryComponent &component,
/* Fill the temporary array with values from the existing attribute. */
GVArrayPtr old_varray = component.attribute_get_for_read(
attribute_id, domain, data_type, default_value);
old_varray->materialize_to_uninitialized(IndexRange(domain_size), data);
old_varray->get_multiple_to_uninitialized(data);
}
GVMutableArrayPtr varray = std::make_unique<GVMutableAttribute_For_OutputAttribute>(
GMutableSpan{*cpp_type, data, domain_size}, component, attribute_id);

View File

@@ -294,14 +294,15 @@ template<typename T> class VArray_For_SplineToPoint final : public VArray<T> {
return original_data_[indices.spline_index];
}
void materialize_impl(const IndexMask mask, MutableSpan<T> r_span) const final
void get_multiple_impl(VMutableArray<T> &dst_varray, const IndexMask mask) const final
{
const int total_size = offsets_.last();
if (mask.is_range() && mask.as_range() == IndexRange(total_size)) {
for (const int spline_index : original_data_.index_range()) {
const int offset = offsets_[spline_index];
const int next_offset = offsets_[spline_index + 1];
r_span.slice(offset, next_offset - offset).fill(original_data_[spline_index]);
dst_varray.set_multiple(original_data_[spline_index],
IndexRange{offset, next_offset - offset});
}
}
else {
@@ -310,14 +311,13 @@ template<typename T> class VArray_For_SplineToPoint final : public VArray<T> {
while (offsets_[spline_index] < dst_index) {
spline_index++;
}
r_span[dst_index] = original_data_[spline_index];
dst_varray.set(dst_index, original_data_[spline_index]);
}
}
}
void materialize_to_uninitialized_impl(const IndexMask mask, MutableSpan<T> r_span) const final
void get_multiple_to_uninitialized_impl(T *dst, const IndexMask mask) const final
{
T *dst = r_span.data();
const int total_size = offsets_.last();
if (mask.is_range() && mask.as_range() == IndexRange(total_size)) {
for (const int spline_index : original_data_.index_range()) {
@@ -562,34 +562,6 @@ static void point_attribute_materialize(Span<Span<T>> data,
}
}
template<typename T>
static void point_attribute_materialize_to_uninitialized(Span<Span<T>> data,
Span<int> offsets,
const IndexMask mask,
MutableSpan<T> r_span)
{
T *dst = r_span.data();
const int total_size = offsets.last();
if (mask.is_range() && mask.as_range() == IndexRange(total_size)) {
for (const int spline_index : data.index_range()) {
const int offset = offsets[spline_index];
const int next_offset = offsets[spline_index + 1];
uninitialized_copy_n(data[spline_index].data(), next_offset - offset, dst + offset);
}
}
else {
int spline_index = 0;
for (const int dst_index : mask) {
while (offsets[spline_index] < dst_index) {
spline_index++;
}
const int index_in_spline = dst_index - offsets[spline_index];
new (dst + dst_index) T(data[spline_index][index_in_spline]);
}
}
}
/**
* Virtual array for any control point data accessed with spans and an offset array.
*/
@@ -609,16 +581,6 @@ template<typename T> class VArray_For_SplinePoints : public VArray<T> {
const PointIndices indices = lookup_point_indices(offsets_, index);
return data_[indices.spline_index][indices.point_index];
}
void materialize_impl(const IndexMask mask, MutableSpan<T> r_span) const final
{
point_attribute_materialize(data_.as_span(), offsets_, mask, r_span);
}
void materialize_to_uninitialized_impl(const IndexMask mask, MutableSpan<T> r_span) const final
{
point_attribute_materialize_to_uninitialized(data_.as_span(), offsets_, mask, r_span);
}
};
/**
@@ -646,26 +608,6 @@ template<typename T> class VMutableArray_For_SplinePoints final : public VMutabl
const PointIndices indices = lookup_point_indices(offsets_, index);
data_[indices.spline_index][indices.point_index] = value;
}
void set_all_impl(Span<T> src) final
{
for (const int spline_index : data_.index_range()) {
const int offset = offsets_[spline_index];
const int next_offsets = offsets_[spline_index + 1];
data_[spline_index].copy_from(src.slice(offset, next_offsets - offset));
}
}
void materialize_impl(const IndexMask mask, MutableSpan<T> r_span) const final
{
point_attribute_materialize({(Span<T> *)data_.data(), data_.size()}, offsets_, mask, r_span);
}
void materialize_to_uninitialized_impl(const IndexMask mask, MutableSpan<T> r_span) const final
{
point_attribute_materialize_to_uninitialized(
{(Span<T> *)data_.data(), data_.size()}, offsets_, mask, r_span);
}
};
template<typename T> GVArrayPtr point_data_gvarray(Array<Span<T>> spans, Array<int> offsets)
@@ -722,29 +664,6 @@ class VMutableArray_For_SplinePosition final : public VMutableArray<float3> {
}
}
void set_all_impl(Span<float3> src) final
{
for (const int spline_index : splines_.index_range()) {
Spline &spline = *splines_[spline_index];
const int offset = offsets_[spline_index];
const int next_offset = offsets_[spline_index + 1];
if (BezierSpline *bezier_spline = dynamic_cast<BezierSpline *>(&spline)) {
MutableSpan<float3> positions = bezier_spline->positions();
MutableSpan<float3> handle_positions_left = bezier_spline->handle_positions_left();
MutableSpan<float3> handle_positions_right = bezier_spline->handle_positions_right();
for (const int i : IndexRange(next_offset - offset)) {
const float3 delta = src[offset + i] - positions[i];
handle_positions_left[i] += delta;
handle_positions_right[i] += delta;
positions[i] = src[offset + i];
}
}
else {
spline.positions().copy_from(src.slice(offset, next_offset - offset));
}
}
}
/** Utility so we can pass positions to the materialize functions above. */
Array<Span<float3>> get_position_spans() const
{
@@ -754,19 +673,6 @@ class VMutableArray_For_SplinePosition final : public VMutableArray<float3> {
}
return spans;
}
void materialize_impl(const IndexMask mask, MutableSpan<float3> r_span) const final
{
Array<Span<float3>> spans = this->get_position_spans();
point_attribute_materialize(spans.as_span(), offsets_, mask, r_span);
}
void materialize_to_uninitialized_impl(const IndexMask mask,
MutableSpan<float3> r_span) const final
{
Array<Span<float3>> spans = this->get_position_spans();
point_attribute_materialize_to_uninitialized(spans.as_span(), offsets_, mask, r_span);
}
};
/**
@@ -1115,10 +1021,9 @@ class DynamicPointAttributeProvider final : public DynamicAttributesProvider {
const int total_size = curve->control_point_offsets().last();
GVArrayPtr source_varray = varray_from_initializer(initializer, data_type, total_size);
/* TODO: When we can call a variant of #set_all with a virtual array argument,
/* TODO: When we can call a variant of #set_multiple_by_copy with a virtual array argument,
* this theoretically unnecessary materialize step could be removed. */
GVArray_GSpan source_varray_span{*source_varray};
write_attribute.varray->set_all(source_varray_span.data());
write_attribute.varray->set_multiple_by_copy(*source_varray);
if (initializer.type == AttributeInit::Type::MoveArray) {
MEM_freeN(static_cast<const AttributeInitMove &>(initializer).data);

View File

@@ -449,7 +449,7 @@ Span<float3> NURBSpline::evaluated_positions() const
/* TODO: Avoid copying the evaluated data from the temporary array. */
GVArray_Typed<float3> evaluated = Spline::interpolate_to_evaluated(positions_.as_span());
evaluated->materialize(evaluated_position_cache_);
evaluated->get_multiple(evaluated_position_cache_);
position_cache_dirty_ = false;
return evaluated_position_cache_;

View File

@@ -223,6 +223,11 @@ class IndexMask {
return indices_.is_empty();
}
IndexMask slice(IndexRange slice) const
{
return indices_.slice(slice);
}
IndexMask slice_and_offset(IndexRange slice, Vector<int64_t> &r_new_indices) const;
};

View File

@@ -43,6 +43,11 @@
namespace blender {
template<typename T> class VMutableArray;
template<typename T> class VArray_For_Span;
template<typename T> class VMutableArray_For_MutableSpan;
template<typename T> class VArray_For_Single;
/* An immutable virtual array. */
template<typename T> class VArray {
protected:
@@ -63,6 +68,60 @@ template<typename T> class VArray {
return this->get_impl(index);
}
void get_multiple(VMutableArray<T> &dst_varray) const
{
this->get_multiple(dst_varray, IndexMask(size_));
}
void get_multiple(MutableSpan<T> dst) const
{
return this->get_multiple(dst, IndexMask(size_));
}
void get_multiple(MutableSpan<T> dst, IndexMask mask) const
{
VMutableArray_For_MutableSpan<T> varray(dst);
return this->get_multiple(varray, mask);
}
void get_multiple(VMutableArray<T> &dst_varray, const IndexMask mask) const
{
BLI_assert(mask.min_array_size() <= size_);
BLI_assert(mask.min_array_size() <= dst_varray.size());
if (dst_varray._can_set_multiple_efficiently(*this)) {
dst_varray._set_multiple(*this, mask);
}
else {
this->_get_multiple(dst_varray, mask);
}
}
void _get_multiple(VMutableArray<T> &dst_varray, const IndexMask mask) const
{
this->get_multiple_impl(dst_varray, mask);
}
bool _can_get_multiple_efficiently(const VMutableArray<T> &dst_varray) const
{
return this->can_get_multiple_efficiently_impl(dst_varray);
}
void get_multiple_to_uninitialized(T *dst) const
{
this->get_multiple_to_uninitialized(dst, IndexMask(size_));
}
void get_multiple_to_uninitialized(T *dst, IndexMask mask) const
{
BLI_assert(mask.min_array_size() <= size_);
if constexpr (std::is_trivial_v<T>) {
this->get_multiple(MutableSpan(dst, mask.min_array_size()), mask);
}
else {
this->get_multiple_to_uninitialized_impl(dst, mask);
}
}
int64_t size() const
{
return size_;
@@ -125,33 +184,25 @@ template<typename T> class VArray {
return this->get(index);
}
/* Copy the entire virtual array into a span. */
void materialize(MutableSpan<T> r_span) const
{
this->materialize(IndexMask(size_), r_span);
}
/* Copy some indices of the virtual array into a span. */
void materialize(IndexMask mask, MutableSpan<T> r_span) const
{
BLI_assert(mask.min_array_size() <= size_);
this->materialize_impl(mask, r_span);
}
void materialize_to_uninitialized(MutableSpan<T> r_span) const
{
this->materialize_to_uninitialized(IndexMask(size_), r_span);
}
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
BLI_assert(mask.min_array_size() <= size_);
this->materialize_to_uninitialized_impl(mask, r_span);
}
protected:
virtual T get_impl(const int64_t index) const = 0;
virtual void get_multiple_impl(VMutableArray<T> &dst_varray, IndexMask mask) const
{
mask.foreach_index([&](const int64_t i) { dst_varray.set(i, this->get(i)); });
}
virtual void get_multiple_to_uninitialized_impl(T *dst, IndexMask mask) const
{
mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
virtual bool can_get_multiple_efficiently_impl(const VMutableArray<T> &dst_varray) const
{
UNUSED_VARS(dst_varray);
return false;
}
virtual bool is_span_impl() const
{
return false;
@@ -175,38 +226,6 @@ template<typename T> class VArray {
BLI_assert_unreachable();
return T();
}
virtual void materialize_impl(IndexMask mask, MutableSpan<T> r_span) const
{
T *dst = r_span.data();
if (this->is_span()) {
const T *src = this->get_internal_span().data();
mask.foreach_index([&](const int64_t i) { dst[i] = src[i]; });
}
else if (this->is_single()) {
const T single = this->get_internal_single();
mask.foreach_index([&](const int64_t i) { dst[i] = single; });
}
else {
mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
}
}
virtual void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<T> r_span) const
{
T *dst = r_span.data();
if (this->is_span()) {
const T *src = this->get_internal_span().data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(src[i]); });
}
else if (this->is_single()) {
const T single = this->get_internal_single();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(single); });
}
else {
mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
}
};
/* Similar to VArray, but the elements are mutable. */
@@ -223,11 +242,41 @@ template<typename T> class VMutableArray : public VArray<T> {
this->set_impl(index, std::move(value));
}
/* Copy the values from the source span to all elements in the virtual array. */
void set_all(Span<T> src)
void set_multiple(const VArray<T> &src_varray)
{
BLI_assert(src.size() == this->size_);
this->set_all_impl(src);
this->set_multiple(src_varray, IndexMask(this->size_));
}
void set_multiple(const Span<T> src)
{
this->set_multiple(VArray_For_Span<T>{src});
}
void set_multiple(const T &value, const IndexMask mask)
{
this->set_multiple(VArray_For_Single<T>{value, this->size_}, mask);
}
void set_multiple(const VArray<T> &src_varray, const IndexMask mask)
{
BLI_assert(mask.min_array_size() <= this->size_);
BLI_assert(mask.min_array_size() <= src_varray.size());
if (src_varray._can_get_multiple_efficiently(*this)) {
src_varray._get_multiple(*this, mask);
}
else {
this->_set_multiple(src_varray, mask);
}
}
void _set_multiple(const VArray<T> &src_varray, const IndexMask mask)
{
this->set_multiple_impl(src_varray, mask);
}
bool _can_set_multiple_efficiently(const VArray<T> &src_varray) const
{
return this->can_set_multiple_efficiently_impl(src_varray);
}
MutableSpan<T> get_internal_span()
@@ -240,18 +289,15 @@ template<typename T> class VMutableArray : public VArray<T> {
protected:
virtual void set_impl(const int64_t index, T value) = 0;
virtual void set_all_impl(Span<T> src)
virtual void set_multiple_impl(const VArray<T> &src_varray, IndexMask mask)
{
if (this->is_span()) {
const MutableSpan<T> span = this->get_internal_span();
initialized_copy_n(src.data(), this->size_, span.data());
}
else {
const int64_t size = this->size_;
for (int64_t i = 0; i < size; i++) {
this->set(i, src[i]);
}
}
mask.foreach_index([&](const int64_t i) { this->set(i, src_varray.get(i)); });
}
virtual bool can_set_multiple_efficiently_impl(const VArray<T> &src_varray) const
{
UNUSED_VARS(src_varray);
return false;
}
};
@@ -290,6 +336,27 @@ template<typename T> class VArray_For_Span : public VArray<T> {
{
return Span<T>(data_, this->size_);
}
void get_multiple_impl(VMutableArray<T> &dst_varray, IndexMask mask) const
{
if (dst_varray.is_span()) {
T *dst_ptr = dst_varray.get_internal_span().data();
mask.foreach_index([&](const int64_t i) { dst_ptr[i] = data_[i]; });
}
else {
mask.foreach_index([&](const int64_t i) { dst_varray.set(i, data_[i]); });
}
}
void get_multiple_to_uninitialized_impl(T *dst, IndexMask mask) const
{
mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
bool can_get_multiple_efficiently_impl(const VMutableArray<T> &dst_varray) const
{
return dst_varray.is_span();
}
};
template<typename T> class VMutableArray_For_MutableSpan : public VMutableArray<T> {
@@ -312,11 +379,52 @@ template<typename T> class VMutableArray_For_MutableSpan : public VMutableArray<
return data_[index];
}
void get_multiple_impl(VMutableArray<T> &dst_varray, IndexMask mask) const
{
if (dst_varray.is_span()) {
T *dst_ptr = dst_varray.get_internal_span().data();
mask.foreach_index([&](const int64_t i) { dst_ptr[i] = data_[i]; });
}
else {
mask.foreach_index([&](const int64_t i) { dst_varray.set(i, data_[i]); });
}
}
void get_multiple_to_uninitialized_impl(T *dst, IndexMask mask) const
{
mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
bool can_get_multiple_efficiently_impl(const VMutableArray<T> &dst_varray) const
{
return dst_varray.is_span();
}
void set_impl(const int64_t index, T value) final
{
data_[index] = value;
}
void set_multiple_impl(const VArray<T> &src_varray, IndexMask mask)
{
if (src_varray.is_span()) {
const T *src_ptr = src_varray.get_internal_span().data();
mask.foreach_index([&](const int64_t i) { data_[i] = src_ptr[i]; });
}
else if (src_varray.is_single()) {
const T src_value = src_varray.get_internal_single();
mask.foreach_index([&](const int64_t i) { data_[i] = src_value; });
}
else {
mask.foreach_index([&](const int64_t i) { data_[i] = src_varray.get(i); });
}
}
bool can_set_multiple_efficiently_impl(const VArray<T> &src_varray) const
{
return src_varray.is_span() || src_varray.is_single();
}
bool is_span_impl() const override
{
return true;
@@ -367,6 +475,27 @@ template<typename T> class VArray_For_Single final : public VArray<T> {
return value_;
}
void get_multiple_impl(VMutableArray<T> &dst_varray, IndexMask mask) const
{
if (dst_varray.is_span()) {
T *dst_ptr = dst_varray.get_internal_span().data();
mask.foreach_index([&](const int64_t i) { dst_ptr[i] = value_; });
}
else {
mask.foreach_index([&](const int64_t i) { dst_varray.set(i, value_); });
}
}
void get_multiple_to_uninitialized_impl(T *dst, IndexMask mask) const
{
mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); });
}
bool can_get_multiple_efficiently_impl(const VMutableArray<T> &dst_varray) const
{
return dst_varray.is_span();
}
bool is_span_impl() const override
{
return this->size_ == 1;
@@ -414,7 +543,7 @@ template<typename T> class VArray_Span final : public Span<T> {
else {
owned_data_.~Array();
new (&owned_data_) Array<T>(varray_.size(), NoInitialization{});
varray_.materialize_to_uninitialized(owned_data_);
varray_.get_multiple_to_uninitialized(owned_data_.data());
this->data_ = owned_data_.data();
}
}
@@ -448,7 +577,7 @@ template<typename T> class VMutableArray_Span final : public MutableSpan<T> {
if (copy_values_to_span) {
owned_data_.~Array();
new (&owned_data_) Array<T>(varray_.size(), NoInitialization{});
varray_.materialize_to_uninitialized(owned_data_);
varray_.get_multiple_to_uninitialized(owned_data_.data());
}
else {
owned_data_.reinitialize(varray_.size());
@@ -473,7 +602,7 @@ template<typename T> class VMutableArray_Span final : public MutableSpan<T> {
if (this->data_ != owned_data_.data()) {
return;
}
varray_.set_all(owned_data_);
varray_.set_multiple(owned_data_);
}
void disable_not_applied_warning()
@@ -502,17 +631,26 @@ template<typename T, typename GetFunc> class VArray_For_Func final : public VArr
return get_func_(index);
}
void materialize_impl(IndexMask mask, MutableSpan<T> r_span) const override
void get_multiple_impl(VMutableArray<T> &dst_varray, IndexMask mask) const
{
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
if (dst_varray.is_span()) {
T *dst_ptr = dst_varray.get_internal_span().data();
mask.foreach_index([&](const int64_t i) { dst_ptr[i] = get_func_(i); });
}
else {
mask.foreach_index([&](const int64_t i) { dst_varray.set(i, get_func_(i)); });
}
}
void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<T> r_span) const override
void get_multiple_to_uninitialized_impl(T *dst, IndexMask mask) const
{
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
}
bool can_get_multiple_efficiently_impl(const VMutableArray<T> &dst_varray) const
{
return dst_varray.is_span();
}
};
template<typename StructT, typename ElemT, ElemT (*GetFunc)(const StructT &)>
@@ -520,6 +658,12 @@ class VArray_For_DerivedSpan : public VArray<ElemT> {
private:
const StructT *data_;
template<typename OtherStructT,
typename OtherElemT,
OtherElemT (*OtherGetFunc)(const OtherStructT &),
void (*OtherSetFunc)(OtherStructT &, OtherElemT)>
friend class VMutableArray_For_DerivedSpan;
public:
VArray_For_DerivedSpan(const Span<StructT> data) : VArray<ElemT>(data.size()), data_(data.data())
{
@@ -531,17 +675,26 @@ class VArray_For_DerivedSpan : public VArray<ElemT> {
return GetFunc(data_[index]);
}
void materialize_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
void get_multiple_impl(VMutableArray<ElemT> &dst_varray, IndexMask mask) const
{
ElemT *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
if (dst_varray.is_span()) {
ElemT *dst_ptr = dst_varray.get_internal_span().data();
mask.foreach_index([&](const int64_t i) { dst_ptr[i] = GetFunc(data_[i]); });
}
else {
mask.foreach_index([&](const int64_t i) { dst_varray.set(i, GetFunc(data_[i])); });
}
}
void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
void get_multiple_to_uninitialized_impl(ElemT *dst, IndexMask mask) const
{
ElemT *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
}
bool can_get_multiple_efficiently_impl(const VMutableArray<ElemT> &dst_varray) const
{
return dst_varray.is_span();
}
};
template<typename StructT,
@@ -552,6 +705,9 @@ class VMutableArray_For_DerivedSpan : public VMutableArray<ElemT> {
private:
StructT *data_;
using SelfT = VMutableArray_For_DerivedSpan;
using ConstSelfT = const VArray_For_DerivedSpan<StructT, ElemT, GetFunc>;
public:
VMutableArray_For_DerivedSpan(const MutableSpan<StructT> data)
: VMutableArray<ElemT>(data.size()), data_(data.data())
@@ -569,17 +725,64 @@ class VMutableArray_For_DerivedSpan : public VMutableArray<ElemT> {
SetFunc(data_[index], std::move(value));
}
void materialize_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
void get_multiple_impl(VMutableArray<ElemT> &dst_varray, IndexMask mask) const
{
ElemT *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
if (dst_varray.is_span()) {
ElemT *dst_ptr = dst_varray.get_internal_span().data();
mask.foreach_index([&](const int64_t i) { dst_ptr[i] = GetFunc(data_[i]); });
}
else {
mask.foreach_index([&](const int64_t i) { dst_varray.set(i, GetFunc(data_[i])); });
}
}
void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
void get_multiple_to_uninitialized_impl(ElemT *dst, IndexMask mask) const
{
ElemT *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
}
bool can_get_multiple_efficiently_impl(const VMutableArray<ElemT> &dst_varray) const
{
return dst_varray.is_span();
}
virtual void set_multiple_impl(const VArray<ElemT> &src_varray, IndexMask mask)
{
if (src_varray.is_span()) {
const ElemT *src_ptr = src_varray.get_internal_span().data();
mask.foreach_index([&](const int64_t i) { SetFunc(data_[i], src_ptr[i]); });
}
else if (src_varray.is_single()) {
const ElemT src_value = src_varray.get_internal_single();
mask.foreach_index([&](const int64_t i) { SetFunc(data_[i], src_value); });
}
else if (const SelfT *src_varray_typed = dynamic_cast<const SelfT *>(&src_varray)) {
if (src_varray_typed->data_ == data_) {
/* Nothing to do. */
return;
}
mask.foreach_index(
[&](const int64_t i) { SetFunc(data_[i], GetFunc(src_varray_typed->data_[i])); });
}
else if (const ConstSelfT *src_varray_typed = dynamic_cast<const ConstSelfT *>(&src_varray)) {
if (src_varray_typed->data_ == data_) {
/* Nothing to do. */
return;
}
mask.foreach_index(
[&](const int64_t i) { SetFunc(data_[i], GetFunc(src_varray_typed->data_[i])); });
}
else {
mask.foreach_index([&](const int64_t i) { SetFunc(data_[i], src_varray.get(i)); });
}
}
virtual bool can_set_multiple_efficiently_impl(const VArray<ElemT> &src_varray) const
{
return src_varray.is_span() || src_varray.is_single() ||
dynamic_cast<const SelfT *>(&src_varray) != nullptr ||
dynamic_cast<const ConstSelfT *>(&src_varray) != nullptr;
}
};
/**

View File

@@ -86,6 +86,15 @@ class GVArray {
this->get_to_uninitialized_impl(index, r_value);
}
void get_multiple(GVMutableArray &dst_varray) const;
void get_multiple(GMutableSpan dst) const;
void get_multiple(GVMutableArray &dst_varray, IndexMask mask) const;
void _get_multiple(GVMutableArray &dst_varray, IndexMask mask) const;
bool _can_get_multiple_efficiently(const GVMutableArray &dst_varray) const;
void get_multiple_to_uninitialized(void *dst) const;
void get_multiple_to_uninitialized(void *dst, IndexMask mask) const;
/* Returns true when the virtual array is stored as a span internally. */
bool is_span() const
{
@@ -135,12 +144,6 @@ class GVArray {
this->get_internal_single(r_value);
}
void materialize(void *dst) const;
void materialize(const IndexMask mask, void *dst) const;
void materialize_to_uninitialized(void *dst) const;
void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
template<typename T> const VArray<T> *try_get_internal_varray() const
{
BLI_assert(type_->is<T>());
@@ -159,15 +162,17 @@ class GVArray {
virtual void get_impl(const int64_t index, void *r_value) const;
virtual void get_to_uninitialized_impl(const int64_t index, void *r_value) const = 0;
virtual void get_multiple_impl(GVMutableArray &dst_varray, IndexMask mask) const;
virtual bool can_get_multiple_efficiently_impl(const GVMutableArray &dst_varray) const;
virtual void get_multiple_to_uninitialized_impl(void *dst, IndexMask mask) const;
virtual bool is_span_impl() const;
virtual GSpan get_internal_span_impl() const;
virtual bool is_single_impl() const;
virtual void get_internal_single_impl(void *UNUSED(r_value)) const;
virtual void materialize_impl(const IndexMask mask, void *dst) const;
virtual void materialize_to_uninitialized_impl(const IndexMask mask, void *dst) const;
virtual const void *try_get_internal_varray_impl() const;
};
@@ -199,6 +204,12 @@ class GVMutableArray : public GVArray {
this->set_by_relocate_impl(index, value);
}
void set_multiple_by_copy(const GVArray &src_varray);
void set_multiple_by_copy(const GSpan &src);
void set_multiple_by_copy(const GVArray &src_varray, const IndexMask mask);
void _set_multiple_by_copy(const GVArray &src_varray, const IndexMask mask);
bool _can_set_multiple_efficiently(const GVArray &src_varray) const;
GMutableSpan get_internal_span()
{
BLI_assert(this->is_span());
@@ -220,18 +231,13 @@ class GVMutableArray : public GVArray {
void fill(const void *value);
/* Copy the values from the source buffer to all elements in the virtual array. */
void set_all(const void *src)
{
this->set_all_impl(src);
}
protected:
virtual void set_by_copy_impl(const int64_t index, const void *value);
virtual void set_by_relocate_impl(const int64_t index, void *value);
virtual void set_by_move_impl(const int64_t index, void *value) = 0;
virtual void set_all_impl(const void *src);
virtual void set_multiple_by_copy_impl(const GVArray &src_varray, IndexMask mask);
virtual bool can_set_multiple_efficiently_impl(const GVArray &src_varray) const;
virtual void *try_get_internal_mutable_varray_impl();
};
@@ -256,6 +262,10 @@ class GVArray_For_GSpan : public GVArray {
void get_impl(const int64_t index, void *r_value) const override;
void get_to_uninitialized_impl(const int64_t index, void *r_value) const override;
void get_multiple_impl(GVMutableArray &dst_varray, IndexMask mask) const override;
void get_multiple_to_uninitialized_impl(void *dst, IndexMask mask) const override;
bool can_get_multiple_efficiently_impl(const GVMutableArray &dst_varray) const override;
bool is_span_impl() const override;
GSpan get_internal_span_impl() const override;
};
@@ -295,10 +305,17 @@ class GVMutableArray_For_GMutableSpan : public GVMutableArray {
void get_impl(const int64_t index, void *r_value) const override;
void get_to_uninitialized_impl(const int64_t index, void *r_value) const override;
void get_multiple_impl(GVMutableArray &dst_varray, IndexMask mask) const override;
void get_multiple_to_uninitialized_impl(void *dst, IndexMask mask) const override;
bool can_get_multiple_efficiently_impl(const GVMutableArray &dst_varray) const override;
void set_by_copy_impl(const int64_t index, const void *value) override;
void set_by_move_impl(const int64_t index, void *value) override;
void set_by_relocate_impl(const int64_t index, void *value) override;
void set_multiple_by_copy_impl(const GVArray &src_varray, IndexMask mask) override;
bool can_set_multiple_efficiently_impl(const GVArray &src_varray) const override;
bool is_span_impl() const override;
GSpan get_internal_span_impl() const override;
};
@@ -322,6 +339,10 @@ class GVArray_For_SingleValueRef : public GVArray {
void get_impl(const int64_t index, void *r_value) const override;
void get_to_uninitialized_impl(const int64_t index, void *r_value) const override;
void get_multiple_impl(GVMutableArray &dst_varray, IndexMask mask) const override;
void get_multiple_to_uninitialized_impl(void *dst, IndexMask mask) const override;
bool can_get_multiple_efficiently_impl(const GVMutableArray &dst_varray) const override;
bool is_span_impl() const override;
GSpan get_internal_span_impl() const override;
@@ -362,6 +383,25 @@ template<typename T> class GVArray_For_VArray : public GVArray {
new (r_value) T(varray_->get(index));
}
void get_multiple_impl(GVMutableArray &dst_varray, IndexMask mask) const override
{
/* `const_cast` is ok because the data is not actually modified. */
GVMutableArray_Typed<T> dst_typed{const_cast<GVMutableArray &>(dst_varray)};
varray_->get_multiple(*dst_typed, mask);
}
void get_multiple_to_uninitialized_impl(void *dst, IndexMask mask) const override
{
varray_->get_multiple_to_uninitialized((T *)dst, mask);
}
bool can_get_multiple_efficiently_impl(const GVMutableArray &dst_varray) const override
{
/* `const_cast` is ok because the data is not actually modified. */
GVMutableArray_Typed<T> dst_typed{const_cast<GVMutableArray &>(dst_varray)};
return varray_->_can_get_multiple_efficiently(*dst_typed);
}
bool is_span_impl() const override
{
return varray_->is_span();
@@ -382,22 +422,14 @@ template<typename T> class GVArray_For_VArray : public GVArray {
*(T *)r_value = varray_->get_internal_single();
}
void materialize_impl(const IndexMask mask, void *dst) const override
{
varray_->materialize(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
void materialize_to_uninitialized_impl(const IndexMask mask, void *dst) const override
{
varray_->materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
const void *try_get_internal_varray_impl() const override
{
return varray_;
}
};
template<typename T> class GVMutableArray_For_VMutableArray;
/* Used to convert any generic virtual array into a typed one. */
template<typename T> class VArray_For_GVArray : public VArray<T> {
protected:
@@ -421,6 +453,23 @@ template<typename T> class VArray_For_GVArray : public VArray<T> {
return value;
}
void get_multiple_impl(VMutableArray<T> &dst_varray, IndexMask mask) const override
{
GVMutableArray_For_VMutableArray<T> generic_dst{dst_varray};
varray_->get_multiple(generic_dst, mask);
}
void get_multiple_to_uninitialized_impl(T *dst, IndexMask mask) const override
{
varray_->get_multiple_to_uninitialized(dst, mask);
}
bool can_get_multiple_efficiently_impl(const VMutableArray<T> &dst_varray) const override
{
GVMutableArray_For_VMutableArray<T> generic_dst{const_cast<VMutableArray<T> &>(dst_varray)};
return varray_->_can_get_multiple_efficiently(generic_dst);
}
bool is_span_impl() const override
{
return varray_->is_span();
@@ -468,11 +517,40 @@ template<typename T> class VMutableArray_For_GVMutableArray : public VMutableArr
return value;
}
void get_multiple_impl(VMutableArray<T> &dst_varray, IndexMask mask) const override
{
GVMutableArray_For_VMutableArray<T> generic_dst{dst_varray};
varray_->get_multiple(generic_dst, mask);
}
void get_multiple_to_uninitialized_impl(T *dst, IndexMask mask) const override
{
varray_->get_multiple_to_uninitialized(dst, mask);
}
bool can_get_multiple_efficiently_impl(const VMutableArray<T> &dst_varray) const override
{
GVMutableArray_For_VMutableArray<T> generic_dst{const_cast<VMutableArray<T> &>(dst_varray)};
return varray_->_can_get_multiple_efficiently(generic_dst);
}
void set_impl(const int64_t index, T value) override
{
varray_->set_by_relocate(index, &value);
}
void set_multiple_impl(const VArray<T> &src_varray, IndexMask mask) override
{
GVArray_For_VArray<T> generic_src{src_varray};
varray_->set_multiple_by_copy(generic_src, mask);
}
bool can_set_multiple_efficiently_impl(const VArray<T> &src_varray) const override
{
GVArray_For_VArray<T> generic_src{src_varray};
return varray_->_can_set_multiple_efficiently(generic_src);
}
bool is_span_impl() const override
{
return varray_->is_span();
@@ -522,6 +600,25 @@ template<typename T> class GVMutableArray_For_VMutableArray : public GVMutableAr
new (r_value) T(varray_->get(index));
}
void get_multiple_impl(GVMutableArray &dst_varray, IndexMask mask) const override
{
/* `const_cast` is ok because the data is not actually modified. */
GVMutableArray_Typed<T> dst_typed{const_cast<GVMutableArray &>(dst_varray)};
varray_->get_multiple(*dst_typed, mask);
}
void get_multiple_to_uninitialized_impl(void *dst, IndexMask mask) const override
{
varray_->get_multiple_to_uninitialized((T *)dst, mask);
}
bool can_get_multiple_efficiently_impl(const GVMutableArray &dst_varray) const override
{
/* `const_cast` is ok because the data is not actually modified. */
GVMutableArray_Typed<T> dst_typed{const_cast<GVMutableArray &>(dst_varray)};
return varray_->_can_get_multiple_efficiently(*dst_typed);
}
bool is_span_impl() const override
{
return varray_->is_span();
@@ -562,19 +659,16 @@ template<typename T> class GVMutableArray_For_VMutableArray : public GVMutableAr
varray_->set(index, std::move(value_));
}
void set_all_impl(const void *src) override
void set_multiple_by_copy_impl(const GVArray &src_varray, IndexMask mask) override
{
varray_->set_all(Span((T *)src, size_));
GVArray_Typed<T> src_typed{src_varray};
varray_->set_multiple(*src_typed, mask);
}
void materialize_impl(const IndexMask mask, void *dst) const override
bool can_set_multiple_efficiently_impl(const GVArray &src_varray) const override
{
varray_->materialize(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
void materialize_to_uninitialized_impl(const IndexMask mask, void *dst) const override
{
varray_->materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
GVArray_Typed<T> src_typed{src_varray};
return varray_->_can_set_multiple_efficiently(*src_typed);
}
const void *try_get_internal_varray_impl() const override

View File

@@ -18,6 +18,7 @@
#include "BLI_multi_value_map.hh"
#include "BLI_set.hh"
#include "BLI_stack.hh"
#include "BLI_task.hh"
#include "BLI_vector_set.hh"
#include "FN_field.hh"
@@ -468,21 +469,10 @@ Vector<const GVArray *> evaluate_fields(ResourceScope &scope,
/* The result has been written into the destination provided by the caller already. */
continue;
}
/* Still have to copy over the data in the destination provided by the caller. */
if (output_varray->is_span()) {
/* Materialize into a span. */
computed_varray->materialize_to_uninitialized(mask,
output_varray->get_internal_span().data());
}
else {
/* Slower materialize into a different structure. */
const CPPType &type = computed_varray->type();
BUFFER_FOR_CPP_TYPE_VALUE(type, buffer);
for (const int i : mask) {
computed_varray->get_to_uninitialized(i, buffer);
output_varray->set_by_relocate(i, buffer);
}
}
threading::parallel_for(mask.index_range(), 1024, [&](IndexRange range) {
const IndexMask sub_mask = mask.slice(range);
output_varray->set_multiple_by_copy(*computed_varray, sub_mask);
});
r_varrays[out_index] = output_varray;
}
}

View File

@@ -42,52 +42,77 @@ class GVArray_For_ShallowCopy : public GVArray {
{
varray_.get_to_uninitialized(index, r_value);
}
void materialize_to_uninitialized_impl(const IndexMask mask, void *dst) const override
{
varray_.materialize_to_uninitialized(mask, dst);
}
};
/* --------------------------------------------------------------------
* GVArray.
*/
void GVArray::materialize(void *dst) const
void GVArray::get_multiple(GVMutableArray &dst_varray) const
{
this->materialize(IndexMask(size_), dst);
this->get_multiple(dst_varray, IndexMask(size_));
}
void GVArray::materialize(const IndexMask mask, void *dst) const
void GVArray::get_multiple(GMutableSpan dst) const
{
this->materialize_impl(mask, dst);
GVMutableArray_For_GMutableSpan varray(dst);
this->get_multiple(varray);
}
void GVArray::materialize_impl(const IndexMask mask, void *dst) const
void GVArray::get_multiple(GVMutableArray &dst_varray, const IndexMask mask) const
{
for (const int64_t i : mask) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
this->get(i, elem_dst);
if (dst_varray._can_set_multiple_efficiently(*this)) {
dst_varray._set_multiple_by_copy(*this, mask);
}
else {
this->_get_multiple(dst_varray, mask);
}
}
void GVArray::materialize_to_uninitialized(void *dst) const
bool GVArray::_can_get_multiple_efficiently(const GVMutableArray &dst_varray) const
{
this->materialize_to_uninitialized(IndexMask(size_), dst);
return this->can_get_multiple_efficiently_impl(dst_varray);
}
void GVArray::materialize_to_uninitialized(const IndexMask mask, void *dst) const
void GVArray::_get_multiple(GVMutableArray &dst_varray, const IndexMask mask) const
{
BLI_assert(dst_varray.type() == *type_);
BLI_assert(mask.min_array_size() <= size_);
BLI_assert(mask.min_array_size() <= dst_varray.size());
this->get_multiple_impl(dst_varray, mask);
}
void GVArray::get_multiple_impl(GVMutableArray &dst_varray, const IndexMask mask) const
{
BUFFER_FOR_CPP_TYPE_VALUE(*type_, buffer);
for (const int64_t i : mask) {
this->get_to_uninitialized(i, buffer);
dst_varray.set_by_relocate(i, buffer);
}
}
bool GVArray::can_get_multiple_efficiently_impl(const GVMutableArray &UNUSED(dst_varray)) const
{
return false;
}
void GVArray::get_multiple_to_uninitialized(void *dst) const
{
this->get_multiple_to_uninitialized(dst, IndexMask(size_));
}
void GVArray::get_multiple_to_uninitialized(void *dst, IndexMask mask) const
{
BLI_assert(mask.min_array_size() <= size_);
this->materialize_to_uninitialized_impl(mask, dst);
this->get_multiple_to_uninitialized_impl(dst, mask);
}
void GVArray::materialize_to_uninitialized_impl(const IndexMask mask, void *dst) const
void GVArray::get_multiple_to_uninitialized_impl(void *dst, IndexMask mask) const
{
for (const int64_t i : mask) {
mask.foreach_index([&](const int64_t i) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
this->get_to_uninitialized(i, elem_dst);
}
});
}
void GVArray::get_impl(const int64_t index, void *r_value) const
@@ -160,19 +185,53 @@ void GVMutableArray::set_by_relocate_impl(const int64_t index, void *value)
type_->destruct(value);
}
void GVMutableArray::set_all_impl(const void *src)
void GVMutableArray::set_multiple_by_copy(const GVArray &src_varray)
{
if (this->is_span()) {
const GMutableSpan span = this->get_internal_span();
type_->copy_assign_n(src, span.data(), size_);
this->set_multiple_by_copy(src_varray, IndexMask(size_));
}
void GVMutableArray::set_multiple_by_copy(const GSpan &src)
{
this->set_multiple_by_copy(GVArray_For_GSpan{src});
}
void GVMutableArray::set_multiple_by_copy(const GVArray &src_varray, const IndexMask mask)
{
if (src_varray._can_get_multiple_efficiently(*this)) {
src_varray._get_multiple(*this, mask);
}
else {
for (int64_t i : IndexRange(size_)) {
this->set_by_copy(i, POINTER_OFFSET(src, type_->size() * i));
}
this->_set_multiple_by_copy(src_varray, mask);
}
}
void GVMutableArray::_set_multiple_by_copy(const GVArray &src_varray, const IndexMask mask)
{
BLI_assert(src_varray.type() == *type_);
BLI_assert(mask.min_array_size() <= size_);
BLI_assert(mask.min_array_size() <= src_varray.size());
this->set_multiple_by_copy_impl(src_varray, mask);
}
bool GVMutableArray::_can_set_multiple_efficiently(const GVArray &src_varray) const
{
return this->can_set_multiple_efficiently_impl(src_varray);
}
void GVMutableArray::set_multiple_by_copy_impl(const GVArray &src_varray, const IndexMask mask)
{
BUFFER_FOR_CPP_TYPE_VALUE(*type_, buffer);
for (const int64_t i : mask) {
src_varray.get_to_uninitialized(i, buffer);
this->set_by_relocate(i, buffer);
}
}
bool GVMutableArray::can_set_multiple_efficiently_impl(const GVArray &UNUSED(src_varray)) const
{
return false;
}
void *GVMutableArray::try_get_internal_mutable_varray_impl()
{
return nullptr;
@@ -205,6 +264,23 @@ void GVArray_For_GSpan::get_to_uninitialized_impl(const int64_t index, void *r_v
type_->copy_construct(POINTER_OFFSET(data_, element_size_ * index), r_value);
}
void GVArray_For_GSpan::get_multiple_impl(GVMutableArray &dst_varray, IndexMask mask) const
{
mask.foreach_index([&](const int64_t i) {
dst_varray.set_by_copy(i, POINTER_OFFSET(data_, element_size_ * i));
});
}
void GVArray_For_GSpan::get_multiple_to_uninitialized_impl(void *dst, IndexMask mask) const
{
type_->copy_construct_indices(data_, dst, mask);
}
bool GVArray_For_GSpan::can_get_multiple_efficiently_impl(const GVMutableArray &dst_varray) const
{
return dst_varray.is_span();
}
bool GVArray_For_GSpan::is_span_impl() const
{
return true;
@@ -230,6 +306,26 @@ void GVMutableArray_For_GMutableSpan::get_to_uninitialized_impl(const int64_t in
type_->copy_construct(POINTER_OFFSET(data_, element_size_ * index), r_value);
}
void GVMutableArray_For_GMutableSpan::get_multiple_impl(GVMutableArray &dst_varray,
IndexMask mask) const
{
mask.foreach_index([&](const int64_t i) {
dst_varray.set_by_copy(i, POINTER_OFFSET(data_, element_size_ * i));
});
}
void GVMutableArray_For_GMutableSpan::get_multiple_to_uninitialized_impl(void *dst,
IndexMask mask) const
{
type_->copy_construct_indices(data_, dst, mask);
}
bool GVMutableArray_For_GMutableSpan::can_get_multiple_efficiently_impl(
const GVMutableArray &dst_varray) const
{
return dst_varray.is_span();
}
void GVMutableArray_For_GMutableSpan::set_by_copy_impl(const int64_t index, const void *value)
{
type_->copy_assign(value, POINTER_OFFSET(data_, element_size_ * index));
@@ -245,6 +341,34 @@ void GVMutableArray_For_GMutableSpan::set_by_relocate_impl(const int64_t index,
type_->relocate_assign(value, POINTER_OFFSET(data_, element_size_ * index));
}
void GVMutableArray_For_GMutableSpan::set_multiple_by_copy_impl(const GVArray &src_varray,
IndexMask mask)
{
if (src_varray.is_span()) {
const void *src_ptr = src_varray.get_internal_span().data();
type_->copy_assign_indices(src_ptr, data_, mask);
}
else if (src_varray.is_single()) {
BUFFER_FOR_CPP_TYPE_VALUE(*type_, buffer);
src_varray.get_internal_single(buffer);
type_->fill_assign_indices(buffer, data_, mask);
type_->destruct(buffer);
}
else {
BUFFER_FOR_CPP_TYPE_VALUE(*type_, buffer);
mask.foreach_index([&](const int64_t i) {
src_varray.get_to_uninitialized(i, buffer);
type_->relocate_assign(buffer, POINTER_OFFSET(data_, element_size_ * i));
});
}
}
bool GVMutableArray_For_GMutableSpan::can_set_multiple_efficiently_impl(
const GVArray &src_varray) const
{
return src_varray.is_span() || src_varray.is_single();
}
bool GVMutableArray_For_GMutableSpan::is_span_impl() const
{
return true;
@@ -270,6 +394,30 @@ void GVArray_For_SingleValueRef::get_to_uninitialized_impl(const int64_t UNUSED(
type_->copy_construct(value_, r_value);
}
void GVArray_For_SingleValueRef::get_multiple_impl(GVMutableArray &dst_varray,
IndexMask mask) const
{
if (dst_varray.is_span()) {
void *dst_ptr = dst_varray.get_internal_span().data();
type_->fill_assign_indices(value_, dst_ptr, mask);
}
else {
mask.foreach_index([&](const int64_t i) { dst_varray.set_by_copy(i, value_); });
}
}
void GVArray_For_SingleValueRef::get_multiple_to_uninitialized_impl(void *dst,
IndexMask mask) const
{
type_->fill_construct_indices(value_, dst, mask);
}
bool GVArray_For_SingleValueRef::can_get_multiple_efficiently_impl(
const GVMutableArray &dst_varray) const
{
return dst_varray.is_span();
}
bool GVArray_For_SingleValueRef::is_span_impl() const
{
return size_ == 1;
@@ -321,7 +469,7 @@ GVArray_GSpan::GVArray_GSpan(const GVArray &varray) : GSpan(varray.type()), varr
}
else {
owned_data_ = MEM_mallocN_aligned(type_->size() * size_, type_->alignment(), __func__);
varray_.materialize_to_uninitialized(IndexRange(size_), owned_data_);
varray_.get_multiple_to_uninitialized(owned_data_);
data_ = owned_data_;
}
}
@@ -348,7 +496,7 @@ GVMutableArray_GSpan::GVMutableArray_GSpan(GVMutableArray &varray, const bool co
else {
owned_data_ = MEM_mallocN_aligned(type_->size() * size_, type_->alignment(), __func__);
if (copy_values_to_span) {
varray_.materialize_to_uninitialized(IndexRange(size_), owned_data_);
varray_.get_multiple_to_uninitialized(owned_data_);
}
else {
type_->default_construct_n(owned_data_, size_);

View File

@@ -156,7 +156,7 @@ void CustomMF_GenericCopy::call(IndexMask mask, MFParams params, MFContext UNUSE
case MFDataType::Single: {
const GVArray &inputs = params.readonly_single_input(0, "Input");
GMutableSpan outputs = params.uninitialized_single_output(1, "Output");
inputs.materialize_to_uninitialized(mask, outputs.data());
inputs.get_multiple_to_uninitialized(outputs.data(), mask);
break;
}
case MFDataType::Vector: {

View File

@@ -53,7 +53,7 @@ void ParallelMultiFunction::call(IndexMask full_mask, MFParams params, MFContext
const int64_t input_slice_size = full_mask[mask_slice.last()] - input_slice_start + 1;
const IndexRange input_slice_range{input_slice_start, input_slice_size};
MFParamsBuilder sub_params{fn_, sub_mask.min_array_size()};
MFParamsBuilder sub_params{fn_, &sub_mask};
ResourceScope &scope = sub_params.resource_scope();
/* All parameters are sliced so that the wrapped multi-function does not have to take care of
@@ -63,8 +63,32 @@ void ParallelMultiFunction::call(IndexMask full_mask, MFParams params, MFContext
switch (param_type.category()) {
case MFParamType::SingleInput: {
const GVArray &varray = params.readonly_single_input(param_index);
const GVArray &sliced_varray = scope.construct<GVArray_Slice>(varray, input_slice_range);
sub_params.add_readonly_single_input(sliced_varray);
if (varray.is_single()) {
sub_params.add_readonly_single_input(varray);
}
else if (varray.is_span()) {
const GSpan span = varray.get_internal_span();
const GVArray &sliced_varray = scope.construct<GVArray_For_GSpan>(
span.slice(input_slice_start, input_slice_size));
sub_params.add_readonly_single_input(sliced_varray);
}
else {
/* Copy non-standard virtual arrays into array for faster access. */
const CPPType &type = param_type.data_type().single_type();
void *buffer = scope.linear_allocator().allocate(
sub_mask.min_array_size() * type.size(), type.alignment());
const IndexMask full_mask_slice = full_mask.slice(mask_slice);
void *fake_buffer_start = POINTER_OFFSET(buffer, -type.size() * full_mask_slice[0]);
varray.get_multiple_to_uninitialized(fake_buffer_start, full_mask_slice);
sub_params.add_readonly_single_input(GSpan(type, buffer, sub_mask.min_array_size()));
if (!type.is_trivially_destructible()) {
scope.add_destruct_call(
[&type, buffer, &sub_mask]() { type.destruct_indices(buffer, sub_mask); });
}
}
break;
}
case MFParamType::SingleMutable: {

View File

@@ -413,8 +413,8 @@ class VariableState : NonCopyable, NonMovable {
}
if (value_->type == ValueType::GVArray) {
/* Fill new buffer with data from virtual array. */
this->value_as<VariableValue_GVArray>()->data.materialize_to_uninitialized(
full_mask, new_value->data);
this->value_as<VariableValue_GVArray>()->data.get_multiple_to_uninitialized(
new_value->data, full_mask);
}
else if (value_->type == ValueType::OneSingle) {
auto *old_value_typed_ = this->value_as<VariableValue_OneSingle>();

View File

@@ -202,7 +202,7 @@ static Mesh *compute_hull(const GeometrySet &geometry_set)
const MeshComponent *component = geometry_set.get_component_for_read<MeshComponent>();
GVArray_Typed<float3> varray = component->attribute_get_for_read<float3>(
"position", ATTR_DOMAIN_POINT, {0, 0, 0});
varray->materialize(positions.as_mutable_span().slice(offset, varray.size()));
varray->get_multiple(positions.as_mutable_span().slice(offset, varray.size()));
offset += varray.size();
}
@@ -211,7 +211,7 @@ static Mesh *compute_hull(const GeometrySet &geometry_set)
geometry_set.get_component_for_read<PointCloudComponent>();
GVArray_Typed<float3> varray = component->attribute_get_for_read<float3>(
"position", ATTR_DOMAIN_POINT, {0, 0, 0});
varray->materialize(positions.as_mutable_span().slice(offset, varray.size()));
varray->get_multiple(positions.as_mutable_span().slice(offset, varray.size()));
offset += varray.size();
}

View File

@@ -141,8 +141,8 @@ static SplinePtr resample_spline_evaluated(const Spline &src)
dst->positions().copy_from(src.evaluated_positions());
dst->positions().copy_from(src.evaluated_positions());
src.interpolate_to_evaluated(src.radii())->materialize(dst->radii());
src.interpolate_to_evaluated(src.tilts())->materialize(dst->tilts());
src.interpolate_to_evaluated(src.radii())->get_multiple(dst->radii());
src.interpolate_to_evaluated(src.tilts())->get_multiple(dst->tilts());
src.attributes.foreach_attribute(
[&](const AttributeIDRef &attribute_id, const AttributeMetaData &meta_data) {
@@ -150,7 +150,7 @@ static SplinePtr resample_spline_evaluated(const Spline &src)
if (dst->attributes.create(attribute_id, meta_data.data_type)) {
std::optional<GMutableSpan> dst_attribute = dst->attributes.get_for_write(attribute_id);
if (dst_attribute) {
src.interpolate_to_evaluated(*src_attribute)->materialize(dst_attribute->data());
src.interpolate_to_evaluated(*src_attribute)->get_multiple(*dst_attribute);
return true;
}
}

View File

@@ -177,8 +177,10 @@ static void copy_evaluated_point_attributes(Span<SplinePtr> splines,
const int size = offsets[i + 1] - offsets[i];
data.positions.slice(offset, size).copy_from(spline.evaluated_positions());
spline.interpolate_to_evaluated(spline.radii())->materialize(data.radii.slice(offset, size));
spline.interpolate_to_evaluated(spline.tilts())->materialize(data.tilts.slice(offset, size));
spline.interpolate_to_evaluated(spline.radii())
->get_multiple(data.radii.slice(offset, size));
spline.interpolate_to_evaluated(spline.tilts())
->get_multiple(data.tilts.slice(offset, size));
for (const Map<AttributeIDRef, GMutableSpan>::Item &item : data.point_attributes.items()) {
const AttributeIDRef attribute_id = item.key;
@@ -187,8 +189,7 @@ static void copy_evaluated_point_attributes(Span<SplinePtr> splines,
BLI_assert(spline.attributes.get_for_read(attribute_id));
GSpan spline_span = *spline.attributes.get_for_read(attribute_id);
spline.interpolate_to_evaluated(spline_span)
->materialize(point_span.slice(offset, size).data());
spline.interpolate_to_evaluated(spline_span)->get_multiple(point_span.slice(offset, size));
}
data.tangents.slice(offset, size).copy_from(spline.evaluated_tangents());

View File

@@ -321,7 +321,7 @@ static void ensure_control_point_attribute(const AttributeIDRef &attribute_id,
const DataTypeConversions &conversions = blender::nodes::get_implicit_type_conversions();
conversions.try_convert(std::make_unique<GVArray_For_GSpan>(*attribute), type)
->materialize(converted_buffer);
->get_multiple(GMutableSpan{type, converted_buffer, spline->size()});
spline->attributes.remove(attribute_id);
spline->attributes.create_by_move(attribute_id, data_type, converted_buffer);