1
1

Compare commits

...

11 Commits

Author SHA1 Message Date
cf059c950e cleanup 2022-04-06 16:13:37 +02:00
b21856dbee Merge branch 'master' into varray-improve 2022-04-06 15:51:18 +02:00
0d66f3ae0d progress 2022-04-06 15:47:58 +02:00
55661f7c21 progress 2022-04-06 15:28:36 +02:00
1777772a17 progress 2022-04-06 15:27:06 +02:00
8897e4a696 improve ownership hint 2022-04-06 15:11:46 +02:00
64018d55cf progress 2022-04-06 14:01:53 +02:00
6b2c1016df progress 2022-04-06 13:50:59 +02:00
55b130f0f3 progress 2022-04-06 13:13:27 +02:00
8c46337e47 materialize compressed 2022-04-06 12:40:24 +02:00
0098cf57d0 add execute_SI_SI_SI_SI_SO 2022-04-06 12:24:17 +02:00
9 changed files with 466 additions and 25 deletions

View File

@@ -118,9 +118,11 @@ class CPPType : NonCopyable, NonMovable {
void (*copy_assign_)(const void *src, void *dst) = nullptr;
void (*copy_assign_indices_)(const void *src, void *dst, IndexMask mask) = nullptr;
void (*copy_assign_compressed_)(const void *src, void *dst, IndexMask mask) = nullptr;
void (*copy_construct_)(const void *src, void *dst) = nullptr;
void (*copy_construct_indices_)(const void *src, void *dst, IndexMask mask) = nullptr;
void (*copy_construct_compressed_)(const void *src, void *dst, IndexMask mask) = nullptr;
void (*move_assign_)(void *src, void *dst) = nullptr;
void (*move_assign_indices_)(void *src, void *dst, IndexMask mask) = nullptr;
@@ -408,6 +410,15 @@ class CPPType : NonCopyable, NonMovable {
copy_assign_indices_(src, dst, mask);
}
void copy_assign_compressed(const void *src, void *dst, IndexMask mask) const
{
BLI_assert(mask.size() == 0 || src != dst);
BLI_assert(mask.size() == 0 || this->pointer_can_point_to_instance(src));
BLI_assert(mask.size() == 0 || this->pointer_can_point_to_instance(dst));
copy_assign_compressed_(src, dst, mask);
}
/**
* Copy an instance of this type from src to dst.
*
@@ -439,6 +450,15 @@ class CPPType : NonCopyable, NonMovable {
copy_construct_indices_(src, dst, mask);
}
void copy_construct_compressed(const void *src, void *dst, IndexMask mask) const
{
BLI_assert(mask.size() == 0 || src != dst);
BLI_assert(mask.size() == 0 || this->pointer_can_point_to_instance(src));
BLI_assert(mask.size() == 0 || this->pointer_can_point_to_instance(dst));
copy_construct_compressed_(src, dst, mask);
}
/**
* Move an instance of this type from src to dst.
*

View File

@@ -51,6 +51,17 @@ template<typename T> void copy_assign_indices_cb(const void *src, void *dst, Ind
mask.foreach_index([&](int64_t i) { dst_[i] = src_[i]; });
}
template<typename T> void copy_assign_compressed_cb(const void *src, void *dst, IndexMask mask)
{
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
dst_[i] = src_[best_mask[i]];
}
});
}
template<typename T> void copy_construct_cb(const void *src, void *dst)
{
@@ -63,6 +74,17 @@ template<typename T> void copy_construct_indices_cb(const void *src, void *dst,
mask.foreach_index([&](int64_t i) { new (dst_ + i) T(src_[i]); });
}
template<typename T> void copy_construct_compressed_cb(const void *src, void *dst, IndexMask mask)
{
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst_ + i) T(src_[best_mask[i]]);
}
});
}
template<typename T> void move_assign_cb(void *src, void *dst)
{
@@ -208,10 +230,12 @@ CPPType::CPPType(CPPTypeParam<T, Flags> /* unused */, StringRef debug_name)
if constexpr (std::is_copy_assignable_v<T>) {
copy_assign_ = copy_assign_cb<T>;
copy_assign_indices_ = copy_assign_indices_cb<T>;
copy_assign_compressed_ = copy_assign_compressed_cb<T>;
}
if constexpr (std::is_copy_constructible_v<T>) {
copy_construct_ = copy_construct_cb<T>;
copy_construct_indices_ = copy_construct_indices_cb<T>;
copy_construct_compressed_ = copy_construct_compressed_cb<T>;
}
if constexpr (std::is_move_assignable_v<T>) {
move_assign_ = move_assign_cb<T>;

View File

@@ -51,6 +51,9 @@ class GVArrayImpl {
virtual void materialize(const IndexMask mask, void *dst) const;
virtual void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
virtual void materialize_compressed(IndexMask mask, void *dst) const;
virtual void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
virtual bool try_assign_VArray(void *varray) const;
virtual bool may_have_ownership() const;
};
@@ -133,6 +136,9 @@ class GVArrayCommon {
void materialize_to_uninitialized(void *dst) const;
void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
void materialize_compressed(IndexMask mask, void *dst) const;
void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
/**
* Returns true when the virtual array is stored as a span internally.
*/
@@ -336,6 +342,16 @@ template<typename T> class GVArrayImpl_For_VArray : public GVArrayImpl {
varray_.materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
void materialize_compressed(const IndexMask mask, void *dst) const override
{
varray_.materialize_compressed(mask, MutableSpan((T *)dst, mask.size()));
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
{
varray_.materialize_compressed_to_uninitialized(mask, MutableSpan((T *)dst, mask.size()));
}
bool try_assign_VArray(void *varray) const override
{
*(VArray<T> *)varray = varray_;
@@ -400,6 +416,27 @@ template<typename T> class VArrayImpl_For_GVArray : public VArrayImpl<T> {
{
return varray_.may_have_ownership();
}
void materialize(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize(mask, r_span.data());
}
void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize_to_uninitialized(mask, r_span.data());
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
varray_.materialize_compressed(mask, r_span.data());
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
varray_.materialize_compressed_to_uninitialized(mask, r_span.data());
}
};
/* Used to convert any typed virtual mutable array into a generic one. */
@@ -479,6 +516,16 @@ template<typename T> class GVMutableArrayImpl_For_VMutableArray : public GVMutab
varray_.materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
void materialize_compressed(const IndexMask mask, void *dst) const override
{
varray_.materialize_compressed(mask, MutableSpan((T *)dst, mask.size()));
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
{
varray_.materialize_compressed_to_uninitialized(mask, MutableSpan((T *)dst, mask.size()));
}
bool try_assign_VArray(void *varray) const override
{
*(VArray<T> *)varray = varray_;
@@ -590,6 +637,13 @@ class GVArrayImpl_For_GSpan : public GVMutableArrayImpl {
bool is_span() const override;
GSpan get_internal_span() const override;
virtual void materialize(const IndexMask mask, void *dst) const override;
virtual void materialize_to_uninitialized(const IndexMask mask, void *dst) const override;
virtual void materialize_compressed(const IndexMask mask, void *dst) const override;
virtual void materialize_compressed_to_uninitialized(const IndexMask mask,
void *dst) const override;
};
/** \} */

View File

@@ -107,7 +107,7 @@ template<typename T> class VArrayImpl {
/**
* Copy values from the virtual array into the provided span. The index of the value in the
* virtual is the same as the index in the span.
* virtual array is the same as the index in the span.
*/
virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const
{
@@ -146,6 +146,35 @@ template<typename T> class VArrayImpl {
}
}
/**
* Copy values from the virtual array into the provided span. Contrary to #materialize, the index
* in virtual array is not the same as the index in the output span. Instead, the span is filled
* without gaps.
*/
virtual void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
{
BLI_assert(mask.size() == r_span.size());
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
r_span[i] = this->get(best_mask[i]);
}
});
}
/**
* Same as #materialize_compressed but #r_span is expected to be uninitialized.
*/
virtual void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(this->get(best_mask[i]));
}
});
}
/**
* If this virtual wraps another #GVArray, this method should assign the wrapped array to the
* provided reference. This allows losslessly converting between generic and typed virtual
@@ -265,6 +294,25 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
const Span<T> other_span = other.get_internal_span();
return data_ == other_span.data();
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
{
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
r_span[i] = data_[best_mask[i]];
}
});
}
void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(data_[best_mask[i]]);
}
});
}
};
/**
@@ -341,6 +389,20 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
{
return value_;
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
UNUSED_VARS_NDEBUG(mask);
r_span.fill(value_);
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
uninitialized_fill_n(r_span.data(), mask.size(), value_);
}
};
/**
@@ -374,6 +436,34 @@ template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
}
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
dst[i] = get_func_(best_mask[i]);
}
});
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) T(get_func_(best_mask[i]));
}
});
}
bool may_have_ownership() const override
{
return !std::is_trivially_destructible_v<GetFunc>;
}
};
/**
@@ -422,6 +512,29 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
}
void materialize_compressed(IndexMask mask, MutableSpan<ElemT> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
ElemT *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
dst[i] = GetFunc(data_[best_mask[i]]);
}
});
}
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<ElemT> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
ElemT *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
new (dst + i) ElemT(GetFunc(data_[best_mask[i]]));
}
});
}
bool may_have_ownership() const override
{
return false;
@@ -740,6 +853,17 @@ template<typename T> class VArrayCommon {
impl_->materialize_to_uninitialized(mask, r_span);
}
/** Copy some elements of the virtual array into a span. */
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
{
impl_->materialize_compressed(mask, r_span);
}
void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
impl_->materialize_compressed_to_uninitialized(mask, r_span);
}
/** See #GVArrayImpl::try_assign_GVArray. */
bool try_assign_GVArray(GVArray &varray) const
{

View File

@@ -24,6 +24,22 @@ void GVArrayImpl::materialize_to_uninitialized(const IndexMask mask, void *dst)
}
}
void GVArrayImpl::materialize_compressed(IndexMask mask, void *dst) const
{
for (const int64_t i : mask.index_range()) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
this->get(mask[i], elem_dst);
}
}
void GVArrayImpl::materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const
{
for (const int64_t i : mask.index_range()) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
this->get_to_uninitialized(mask[i], elem_dst);
}
}
void GVArrayImpl::get(const int64_t index, void *r_value) const
{
type_->destruct(r_value);
@@ -172,6 +188,27 @@ GSpan GVArrayImpl_For_GSpan::get_internal_span() const
return GSpan(*type_, data_, size_);
}
void GVArrayImpl_For_GSpan::materialize(const IndexMask mask, void *dst) const
{
type_->copy_assign_indices(data_, dst, mask);
}
void GVArrayImpl_For_GSpan::materialize_to_uninitialized(const IndexMask mask, void *dst) const
{
type_->copy_construct_indices(data_, dst, mask);
}
void GVArrayImpl_For_GSpan::materialize_compressed(const IndexMask mask, void *dst) const
{
type_->copy_assign_compressed(data_, dst, mask);
}
void GVArrayImpl_For_GSpan::materialize_compressed_to_uninitialized(const IndexMask mask,
void *dst) const
{
type_->copy_construct_compressed(data_, dst, mask);
}
class GVArrayImpl_For_GSpan_final final : public GVArrayImpl_For_GSpan {
public:
using GVArrayImpl_For_GSpan::GVArrayImpl_For_GSpan;
@@ -231,6 +268,26 @@ class GVArrayImpl_For_SingleValueRef : public GVArrayImpl {
{
type_->copy_assign(value_, r_value);
}
void materialize(const IndexMask mask, void *dst) const override
{
type_->fill_assign_indices(value_, dst, mask);
}
void materialize_to_uninitialized(const IndexMask mask, void *dst) const override
{
type_->fill_construct_indices(value_, dst, mask);
}
void materialize_compressed(const IndexMask mask, void *dst) const
{
type_->fill_assign_n(value_, dst, mask.size());
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const
{
type_->fill_construct_n(value_, dst, mask.size());
}
};
class GVArrayImpl_For_SingleValueRef_final final : public GVArrayImpl_For_SingleValueRef {
@@ -448,6 +505,22 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
{
varray_.get_internal_single(r_value);
}
void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
{
if (mask.is_range()) {
const IndexRange mask_range = mask.as_range();
const IndexRange offset_mask_range{mask_range.start() + offset_, mask_range.size()};
varray_.materialize_compressed_to_uninitialized(offset_mask_range, dst);
}
else {
Vector<int64_t, 32> offset_mask_indices(mask.size());
for (const int64_t i : mask.index_range()) {
offset_mask_indices[i] = mask[i] + offset_;
}
varray_.materialize_compressed_to_uninitialized(offset_mask_indices.as_span(), dst);
}
}
};
/** \} */
@@ -505,6 +578,16 @@ void GVArrayCommon::materialize_to_uninitialized(const IndexMask mask, void *dst
impl_->materialize_to_uninitialized(mask, dst);
}
void GVArrayCommon::materialize_compressed(IndexMask mask, void *dst) const
{
impl_->materialize_compressed(mask, dst);
}
void GVArrayCommon::materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const
{
impl_->materialize_compressed_to_uninitialized(mask, dst);
}
bool GVArrayCommon::may_have_ownership() const
{
return impl_->may_have_ownership();

View File

@@ -180,4 +180,46 @@ TEST(virtual_array, MutableToImmutable)
}
}
TEST(virtual_array, MaterializeCompressed)
{
{
std::array<int, 10> array = {0, 10, 20, 30, 40, 50, 60, 70, 80, 90};
VArray<int> varray = VArray<int>::ForSpan(array);
std::array<int, 3> compressed_array;
varray.materialize_compressed({3, 6, 7}, compressed_array);
EXPECT_EQ(compressed_array[0], 30);
EXPECT_EQ(compressed_array[1], 60);
EXPECT_EQ(compressed_array[2], 70);
varray.materialize_compressed_to_uninitialized({2, 8, 9}, compressed_array);
EXPECT_EQ(compressed_array[0], 20);
EXPECT_EQ(compressed_array[1], 80);
EXPECT_EQ(compressed_array[2], 90);
}
{
VArray<int> varray = VArray<int>::ForSingle(4, 10);
std::array<int, 3> compressed_array;
varray.materialize_compressed({2, 6, 7}, compressed_array);
EXPECT_EQ(compressed_array[0], 4);
EXPECT_EQ(compressed_array[1], 4);
EXPECT_EQ(compressed_array[2], 4);
compressed_array.fill(0);
varray.materialize_compressed_to_uninitialized({0, 1, 2}, compressed_array);
EXPECT_EQ(compressed_array[0], 4);
EXPECT_EQ(compressed_array[1], 4);
EXPECT_EQ(compressed_array[2], 4);
}
{
VArray<int> varray = VArray<int>::ForFunc(10, [](const int64_t i) { return (int)(i * i); });
std::array<int, 3> compressed_array;
varray.materialize_compressed({5, 7, 8}, compressed_array);
EXPECT_EQ(compressed_array[0], 25);
EXPECT_EQ(compressed_array[1], 49);
EXPECT_EQ(compressed_array[2], 64);
varray.materialize_compressed_to_uninitialized({1, 2, 3}, compressed_array);
EXPECT_EQ(compressed_array[0], 1);
EXPECT_EQ(compressed_array[1], 4);
EXPECT_EQ(compressed_array[2], 9);
}
}
} // namespace blender::tests

View File

@@ -47,11 +47,53 @@ template<typename In1, typename Out1> class CustomMF_SI_SO : public MultiFunctio
template<typename ElementFuncT> static FunctionT create_function(ElementFuncT element_fn)
{
return [=](IndexMask mask, const VArray<In1> &in1, MutableSpan<Out1> out1) {
/* Devirtualization results in a 2-3x speedup for some simple functions. */
devirtualize_varray(in1, [&](const auto &in1) {
mask.to_best_mask_type(
[&](const auto &mask) { execute_SI_SO(element_fn, mask, in1, out1.data()); });
});
const int64_t mask_size = mask.size();
const bool in1_is_single = in1.is_single();
const bool in1_is_span = in1.is_span();
static constexpr int64_t MaxChunkSize = 32;
/* Properly handle initialization. */
TypedBuffer<In1, MaxChunkSize> in1_buffer_owner;
MutableSpan<In1> in1_buffer{in1_buffer_owner.ptr(), MaxChunkSize};
if (in1_is_single) {
const In1 in1_single = in1.get_internal_single();
in1_buffer.fill(in1_single);
}
Span<In1> in1_span;
if (in1_is_span) {
in1_span = in1.get_internal_span();
}
for (int64_t chunk_start = 0; chunk_start < mask_size; chunk_start += MaxChunkSize) {
const int64_t chunk_size = std::min(mask_size - chunk_start, MaxChunkSize);
const IndexMask sliced_mask = mask.slice(chunk_start, chunk_size);
if (sliced_mask.is_range()) {
const IndexRange sliced_mask_range = sliced_mask.as_range();
Span<In1> in1_chunk;
if (in1_is_single) {
in1_chunk = in1_buffer;
}
else if (in1_is_span) {
in1_chunk = in1_span.slice(sliced_mask_range);
}
else {
in1.materialize_compressed_to_uninitialized(sliced_mask,
in1_buffer.take_front(chunk_size));
in1_chunk = in1_buffer;
}
execute_SI_SO(element_fn, IndexRange(chunk_size), in1_chunk, out1.data() + chunk_start);
}
else {
// const Span<int64_t> sliced_mask_indices = sliced_mask.indices();
/* TODO */
BLI_assert_unreachable();
}
}
return;
};
}
@@ -185,12 +227,27 @@ class CustomMF_SI_SI_SI_SO : public MultiFunction {
MutableSpan<Out1> out1) {
/* Virtual arrays are not devirtualized yet, to avoid generating lots of code without further
* consideration. */
for (const int64_t i : mask) {
new (static_cast<void *>(&out1[i])) Out1(element_fn(in1[i], in2[i], in3[i]));
}
execute_SI_SI_SI_SO(element_fn, mask, in1, in2, in3, out1.data());
};
}
template<typename ElementFuncT,
typename MaskT,
typename In1Array,
typename In2Array,
typename In3Array>
BLI_NOINLINE static void execute_SI_SI_SI_SO(const ElementFuncT &element_fn,
MaskT mask,
const In1Array &in1,
const In2Array &in2,
const In3Array &in3,
Out1 *__restrict r_out)
{
for (const int64_t i : mask) {
new (r_out + i) Out1(element_fn(in1[i], in2[i], in3[i]));
}
}
void call(IndexMask mask, MFParams params, MFContext UNUSED(context)) const override
{
const VArray<In1> &in1 = params.readonly_single_input<In1>(0);
@@ -250,12 +307,29 @@ class CustomMF_SI_SI_SI_SI_SO : public MultiFunction {
MutableSpan<Out1> out1) {
/* Virtual arrays are not devirtualized yet, to avoid generating lots of code without further
* consideration. */
for (const int64_t i : mask) {
new (static_cast<void *>(&out1[i])) Out1(element_fn(in1[i], in2[i], in3[i], in4[i]));
}
execute_SI_SI_SI_SI_SO(element_fn, mask, in1, in2, in3, in4, out1.data());
};
}
template<typename ElementFuncT,
typename MaskT,
typename In1Array,
typename In2Array,
typename In3Array,
typename In4Array>
BLI_NOINLINE static void execute_SI_SI_SI_SI_SO(const ElementFuncT &element_fn,
MaskT mask,
const In1Array &in1,
const In2Array &in2,
const In3Array &in3,
const In4Array &in4,
Out1 *__restrict r_out)
{
for (const int64_t i : mask) {
new (r_out + i) Out1(element_fn(in1[i], in2[i], in3[i], in4[i]));
}
}
void call(IndexMask mask, MFParams params, MFContext UNUSED(context)) const override
{
const VArray<In1> &in1 = params.readonly_single_input<In1>(0);

View File

@@ -468,16 +468,21 @@ Vector<GVArray> evaluate_fields(ResourceScope &scope,
/* Still have to copy over the data in the destination provided by the caller. */
if (dst_varray.is_span()) {
/* Materialize into a span. */
computed_varray.materialize_to_uninitialized(mask, dst_varray.get_internal_span().data());
threading::parallel_for(mask.index_range(), 2048, [&](const IndexRange range) {
computed_varray.materialize_to_uninitialized(mask.slice(range),
dst_varray.get_internal_span().data());
});
}
else {
/* Slower materialize into a different structure. */
const CPPType &type = computed_varray.type();
BUFFER_FOR_CPP_TYPE_VALUE(type, buffer);
for (const int i : mask) {
computed_varray.get_to_uninitialized(i, buffer);
dst_varray.set_by_relocate(i, buffer);
}
threading::parallel_for(mask.index_range(), 2048, [&](const IndexRange range) {
BUFFER_FOR_CPP_TYPE_VALUE(type, buffer);
for (const int i : mask.slice(range)) {
computed_varray.get_to_uninitialized(i, buffer);
dst_varray.set_by_relocate(i, buffer);
}
});
}
r_varrays[out_index] = dst_varray;
}

View File

@@ -91,18 +91,33 @@ static void try_capture_field_on_geometry(GeometryComponent &component,
const int domain_size = component.attribute_domain_size(domain);
const IndexMask mask{IndexMask(domain_size)};
const CustomDataType data_type = bke::cpp_type_to_custom_data_type(field.cpp_type());
const CPPType &type = field.cpp_type();
const CustomDataType data_type = bke::cpp_type_to_custom_data_type(type);
void *buffer = MEM_mallocN(type.size() * domain_size, __func__);
/* Don't use #add_with_destination because the field might depend on an attribute
* with that name, and changing it as part of evaluation might affect the result. */
fn::FieldEvaluator evaluator{field_context, &mask};
evaluator.add(field);
evaluator.add_with_destination(field, GMutableSpan{type, buffer, domain_size});
evaluator.evaluate();
const GVArray &result = evaluator.get_evaluated(0);
OutputAttribute attribute = component.attribute_try_get_for_output_only(name, domain, data_type);
if (attribute) {
result.materialize(attribute.as_span().data());
attribute.save();
component.attribute_try_delete(name);
if (component.attribute_exists(name)) {
WriteAttributeLookup write_attribute = component.attribute_try_get_for_write(name);
if (write_attribute && write_attribute.domain == domain &&
write_attribute.varray.type() == type) {
write_attribute.varray.set_all(buffer);
write_attribute.tag_modified_fn();
}
else {
/* Cannot change type of built-in attribute. */
}
type.destruct_n(buffer, domain_size);
MEM_freeN(buffer);
}
else {
component.attribute_try_create(name, domain, data_type, AttributeInitMove{buffer});
}
}