BLI: refactor IndexMask for better performance and memory usage #104629

Merged
Jacques Lucke merged 254 commits from JacquesLucke/blender:index-mask-refactor into main 2023-05-24 18:11:47 +02:00
12 changed files with 91 additions and 78 deletions
Showing only changes of commit 0fb6d648cc - Show all commits

View File

@ -20,7 +20,7 @@ template<typename T> void default_construct_indices_cb(void *ptr, const IndexMas
if constexpr (std::is_trivially_constructible_v<T>) {
return;
}
mask.foreach_index_optimized([&](int64_t i) { new (static_cast<T *>(ptr) + i) T; });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { new (static_cast<T *>(ptr) + i) T; });
}
template<typename T> void value_initialize_cb(void *ptr)
@ -30,7 +30,7 @@ template<typename T> void value_initialize_cb(void *ptr)
template<typename T> void value_initialize_indices_cb(void *ptr, const IndexMask &mask)
{
mask.foreach_index_optimized([&](int64_t i) { new (static_cast<T *>(ptr) + i) T(); });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { new (static_cast<T *>(ptr) + i) T(); });
}
template<typename T> void destruct_cb(void *ptr)
@ -43,7 +43,7 @@ template<typename T> void destruct_indices_cb(void *ptr, const IndexMask &mask)
return;
}
T *ptr_ = static_cast<T *>(ptr);
mask.foreach_index_optimized([&](int64_t i) { ptr_[i].~T(); });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { ptr_[i].~T(); });
}
template<typename T> void copy_assign_cb(const void *src, void *dst)
@ -55,7 +55,7 @@ template<typename T> void copy_assign_indices_cb(const void *src, void *dst, con
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](int64_t i) { dst_[i] = src_[i]; });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { dst_[i] = src_[i]; });
}
template<typename T>
void copy_assign_compressed_cb(const void *src, void *dst, const IndexMask &mask)
@ -63,7 +63,8 @@ void copy_assign_compressed_cb(const void *src, void *dst, const IndexMask &mask
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](const int64_t i, const int64_t pos) { dst_[pos] = src_[i]; });
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst_[pos] = src_[i]; });
}
template<typename T> void copy_construct_cb(const void *src, void *dst)
@ -76,7 +77,7 @@ void copy_construct_indices_cb(const void *src, void *dst, const IndexMask &mask
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](int64_t i) { new (dst_ + i) T(src_[i]); });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { new (dst_ + i) T(src_[i]); });
}
template<typename T>
void copy_construct_compressed_cb(const void *src, void *dst, const IndexMask &mask)
@ -84,7 +85,7 @@ void copy_construct_compressed_cb(const void *src, void *dst, const IndexMask &m
const T *src_ = static_cast<const T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized(
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst_ + pos) T(src_[i]); });
}
@ -97,7 +98,7 @@ template<typename T> void move_assign_indices_cb(void *src, void *dst, const Ind
T *src_ = static_cast<T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](int64_t i) { dst_[i] = std::move(src_[i]); });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { dst_[i] = std::move(src_[i]); });
}
template<typename T> void move_construct_cb(void *src, void *dst)
@ -109,7 +110,7 @@ template<typename T> void move_construct_indices_cb(void *src, void *dst, const
T *src_ = static_cast<T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](int64_t i) { new (dst_ + i) T(std::move(src_[i])); });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { new (dst_ + i) T(std::move(src_[i])); });
}
template<typename T> void relocate_assign_cb(void *src, void *dst)
@ -125,7 +126,7 @@ template<typename T> void relocate_assign_indices_cb(void *src, void *dst, const
T *src_ = static_cast<T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](int64_t i) {
mask.foreach_index_optimized<int64_t>([&](int64_t i) {
dst_[i] = std::move(src_[i]);
src_[i].~T();
});
@ -145,7 +146,7 @@ void relocate_construct_indices_cb(void *src, void *dst, const IndexMask &mask)
T *src_ = static_cast<T *>(src);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](int64_t i) {
mask.foreach_index_optimized<int64_t>([&](int64_t i) {
new (dst_ + i) T(std::move(src_[i]));
src_[i].~T();
});
@ -166,7 +167,7 @@ void fill_assign_indices_cb(const void *value, void *dst, const IndexMask &mask)
const T &value_ = *static_cast<const T *>(value);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](int64_t i) { dst_[i] = value_; });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { dst_[i] = value_; });
}
template<typename T> void fill_construct_cb(const void *value, void *dst, int64_t n)
@ -184,7 +185,7 @@ void fill_construct_indices_cb(const void *value, void *dst, const IndexMask &ma
const T &value_ = *static_cast<const T *>(value);
T *dst_ = static_cast<T *>(dst);
mask.foreach_index_optimized([&](int64_t i) { new (dst_ + i) T(value_); });
mask.foreach_index_optimized<int64_t>([&](int64_t i) { new (dst_ + i) T(value_); });
}
template<typename T> void print_cb(const void *value, std::stringstream &ss)

View File

@ -295,8 +295,9 @@ class IndexMask : private IndexMaskData {
* when segments are a range internally. Use this only when the function itself is doing very
* little work and will likely be called many times.
*/
template<typename Fn> void foreach_index_optimized(Fn &&fn) const;
template<typename Fn> void foreach_index_optimized(GrainSize grain_size, Fn &&fn) const;
template<typename IndexT, typename Fn> void foreach_index_optimized(Fn &&fn) const;
template<typename IndexT, typename Fn>
void foreach_index_optimized(GrainSize grain_size, Fn &&fn) const;
/**
* Calls the function once for every range. Note that this might call the function for each index
@ -409,7 +410,7 @@ inline const std::array<int16_t, max_segment_size> &get_static_indices_array()
template<typename T>
inline void masked_fill(MutableSpan<T> data, const T &value, const IndexMask &mask)
{
mask.foreach_index_optimized([&](const int64_t i) { data[i] = value; });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { data[i] = value; });
}
/* -------------------------------------------------------------------- */
@ -621,52 +622,57 @@ inline void IndexMask::foreach_index(const GrainSize grain_size, Fn &&fn) const
});
}
template<typename Fn>
template<typename T, typename Fn>
#if (defined(__GNUC__) && !defined(__clang__))
[[gnu::optimize("-funroll-loops")]] [[gnu::optimize("O3")]]
[[gnu::optimize("O3")]]
#endif
inline void
foreach_index_in_range(const IndexRange range, Fn &&fn)
optimized_foreach_index_in_range(const IndexRange segment, const Fn fn)
{
const int64_t start = range.start();
const int64_t end = range.one_after_last();
for (int64_t i = start; i < end; i++) {
BLI_assert(segment.one_after_last() <= std::numeric_limits<T>::max());
const T start = T(segment.start());
const T end = T(segment.one_after_last());
for (T i = start; i < end; i++) {
fn(i);
}
}
template<typename Fn>
template<typename T, typename Fn>
#if (defined(__GNUC__) && !defined(__clang__))
[[gnu::optimize("-funroll-loops")]] [[gnu::optimize("O3")]]
[[gnu::optimize("O3")]]
#endif
inline void
foreach_index_in_range(const IndexRange range, const int64_t start_mask_position, Fn &&fn)
optimized_foreach_index_in_range_with_pos(const IndexRange segment,
const T segment_pos,
const Fn fn)
{
const int64_t start = range.start();
const int64_t end = range.one_after_last();
for (int64_t i = start, mask_position = start_mask_position; i < end; i++, mask_position++) {
fn(i, mask_position);
BLI_assert(segment.one_after_last() <= std::numeric_limits<T>::max());
const T start = T(segment.start());
const T end = T(segment.one_after_last());
for (T i = start, pos = segment_pos; i < end; i++, pos++) {
fn(i, pos);
}
}
template<typename Fn> inline void IndexMask::foreach_index_optimized(Fn &&fn) const
template<typename IndexT, typename Fn>
inline void IndexMask::foreach_index_optimized(Fn &&fn) const
{
this->foreach_segment_optimized(
[&](const auto segment, [[maybe_unused]] const int64_t start_mask_position) {
[fn](const auto segment, [[maybe_unused]] const int64_t segment_pos) {
constexpr bool is_range = std::is_same_v<std::decay_t<decltype(segment)>, IndexRange>;
if constexpr (std::is_invocable_r_v<void, Fn, int64_t, int64_t>) {
if constexpr (std::is_invocable_r_v<void, Fn, IndexT, IndexT>) {
if constexpr (is_range) {
foreach_index_in_range(segment, start_mask_position, fn);
optimized_foreach_index_in_range_with_pos<IndexT>(segment, IndexT(segment_pos), fn);
}
else {
for (const int64_t i : segment.index_range()) {
fn(segment[i], start_mask_position + i);
fn(segment[i], segment_pos + i);
}
}
}
else {
if constexpr (is_range) {
foreach_index_in_range(segment, fn);
optimized_foreach_index_in_range<IndexT>(segment, fn);
}
else {
for (const int64_t index : segment) {
@ -677,19 +683,19 @@ template<typename Fn> inline void IndexMask::foreach_index_optimized(Fn &&fn) co
});
}
template<typename Fn>
template<typename IndexT, typename Fn>
inline void IndexMask::foreach_index_optimized(const GrainSize grain_size, Fn &&fn) const
{
threading::parallel_for(this->index_range(), grain_size.value, [&](const IndexRange range) {
const IndexMask sub_mask = this->slice(range);
if constexpr (std::is_invocable_r_v<void, Fn, int64_t, int64_t>) {
sub_mask.foreach_index_optimized(
[&fn, range_start = range.start()](const int64_t i, const int64_t mask_position) {
fn(i, mask_position + range_start);
if constexpr (std::is_invocable_r_v<void, Fn, IndexT, IndexT>) {
sub_mask.foreach_index_optimized<IndexT>(
[fn, range_start = IndexT(range.start())](const IndexT i, const IndexT pos) {
fn(i, pos + range_start);
});
}
else {
sub_mask.foreach_index_optimized(fn);
sub_mask.foreach_index_optimized<IndexT>(fn);
}
});
}

View File

@ -127,7 +127,7 @@ template<typename T> class VArrayImpl {
*/
virtual void materialize_compressed(const IndexMask &mask, T *dst) const
{
mask.foreach_index_optimized(
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = this->get(i); });
}
@ -136,7 +136,7 @@ template<typename T> class VArrayImpl {
*/
virtual void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const
{
mask.foreach_index_optimized(
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(this->get(i)); });
}
@ -223,22 +223,23 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
void materialize(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized([&](const int64_t i) { dst[i] = data_[i]; });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { dst[i] = data_[i]; });
}
void materialize_to_uninitialized(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized([&](const int64_t i) { new (dst + i) T(data_[i]); });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { new (dst + i) T(data_[i]); });
}
void materialize_compressed(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized([&](const int64_t i, const int64_t pos) { dst[pos] = data_[i]; });
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = data_[i]; });
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized(
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(data_[i]); });
}
};
@ -358,23 +359,23 @@ template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public
void materialize(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized([&](const int64_t i) { dst[i] = get_func_(i); });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { dst[i] = get_func_(i); });
}
void materialize_to_uninitialized(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
}
void materialize_compressed(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized(
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = get_func_(i); });
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, T *dst) const override
{
mask.foreach_index_optimized(
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst + pos) T(get_func_(i)); });
}
};
@ -415,23 +416,24 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
void materialize(const IndexMask &mask, ElemT *dst) const override
{
mask.foreach_index_optimized([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
}
void materialize_to_uninitialized(const IndexMask &mask, ElemT *dst) const override
{
mask.foreach_index_optimized([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
}
void materialize_compressed(const IndexMask &mask, ElemT *dst) const override
{
mask.foreach_index_optimized(
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { dst[pos] = GetFunc(data_[i]); });
}
void materialize_compressed_to_uninitialized(const IndexMask &mask, ElemT *dst) const override
{
mask.foreach_index_optimized(
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { new (dst + pos) ElemT(GetFunc(data_[i])); });
}
};

View File

@ -10,7 +10,7 @@ namespace blender {
void GVArrayImpl::materialize(const IndexMask &mask, void *dst) const
{
mask.foreach_index_optimized([&](const int64_t i) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
this->get(i, elem_dst);
});
@ -18,7 +18,7 @@ void GVArrayImpl::materialize(const IndexMask &mask, void *dst) const
void GVArrayImpl::materialize_to_uninitialized(const IndexMask &mask, void *dst) const
{
mask.foreach_index_optimized([&](const int64_t i) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * i);
this->get_to_uninitialized(i, elem_dst);
});
@ -26,7 +26,7 @@ void GVArrayImpl::materialize_to_uninitialized(const IndexMask &mask, void *dst)
void GVArrayImpl::materialize_compressed(const IndexMask &mask, void *dst) const
{
mask.foreach_index_optimized([&](const int64_t i, const int64_t pos) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * pos);
this->get(i, elem_dst);
});
@ -34,7 +34,7 @@ void GVArrayImpl::materialize_compressed(const IndexMask &mask, void *dst) const
void GVArrayImpl::materialize_compressed_to_uninitialized(const IndexMask &mask, void *dst) const
{
mask.foreach_index_optimized([&](const int64_t i, const int64_t pos) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i, const int64_t pos) {
void *elem_dst = POINTER_OFFSET(dst, type_->size() * pos);
this->get_to_uninitialized(i, elem_dst);
});

View File

@ -407,8 +407,10 @@ IndexMask IndexMask::from_bools(const IndexMask &universe,
template<typename T> void IndexMask::to_indices(MutableSpan<T> r_indices) const
{
BLI_assert(this->size() == r_indices.size());
this->foreach_index_optimized(
GrainSize(1024), [&](const int64_t i, const int64_t pos) mutable { r_indices[pos] = T(i); });
this->foreach_index_optimized<int64_t>(
GrainSize(1024), [r_indices = r_indices.data()](const int64_t i, const int64_t pos) {
r_indices[pos] = T(i);
});
}
void IndexMask::to_bits(MutableBitSpan r_bits) const
@ -432,7 +434,8 @@ void IndexMask::to_bools(MutableSpan<bool> r_bools) const
{
BLI_assert(r_bools.size() >= this->min_array_size());
r_bools.fill(false);
this->foreach_index_optimized(GrainSize(2048), [&](const int64_t i) { r_bools[i] = true; });
this->foreach_index_optimized<int64_t>(GrainSize(2048),
[&](const int64_t i) { r_bools[i] = true; });
}
Vector<IndexRange> IndexMask::to_ranges() const

View File

@ -704,7 +704,7 @@ template<typename T> class CustomMF_Constant : public MultiFunction {
void call(const IndexMask &mask, Params params, Context /*context*/) const override
{
MutableSpan<T> output = params.uninitialized_single_output<T>(0);
mask.foreach_index_optimized([&](const int64_t i) { new (&output[i]) T(value_); });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { new (&output[i]) T(value_); });
}
uint64_t hash() const override

View File

@ -26,7 +26,7 @@ PointCloud *point_merge_by_distance(const PointCloud &src_points,
/* Create the KD tree based on only the selected points, to speed up merge detection and
* balancing. */
KDTree_3d *tree = BLI_kdtree_3d_new(selection.size());
selection.foreach_index_optimized(
selection.foreach_index_optimized<int64_t>(
[&](const int64_t i, const int64_t pos) { BLI_kdtree_3d_insert(tree, pos, positions[i]); });
BLI_kdtree_3d_balance(tree);

View File

@ -122,12 +122,12 @@ class SeparateHSVAFunction : public mf::MultiFunction {
MutableSpan<float> value = params.uninitialized_single_output<float>(3, "Value");
MutableSpan<float> alpha = params.uninitialized_single_output_if_required<float>(4, "Alpha");
mask.foreach_index_optimized([&](const int64_t i) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
rgb_to_hsv(colors[i].r, colors[i].g, colors[i].b, &hue[i], &saturation[i], &value[i]);
});
if (!alpha.is_empty()) {
mask.foreach_index_optimized([&](const int64_t i) { alpha[i] = colors[i].a; });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { alpha[i] = colors[i].a; });
}
}
};
@ -158,12 +158,12 @@ class SeparateHSLAFunction : public mf::MultiFunction {
MutableSpan<float> lightness = params.uninitialized_single_output<float>(3, "Lightness");
MutableSpan<float> alpha = params.uninitialized_single_output_if_required<float>(4, "Alpha");
mask.foreach_index_optimized([&](const int64_t i) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
rgb_to_hsl(colors[i].r, colors[i].g, colors[i].b, &hue[i], &saturation[i], &lightness[i]);
});
if (!alpha.is_empty()) {
mask.foreach_index_optimized([&](const int64_t i) { alpha[i] = colors[i].a; });
mask.foreach_index_optimized<int64_t>([&](const int64_t i) { alpha[i] = colors[i].a; });
}
}
};

View File

@ -136,7 +136,7 @@ static void sample_indices_and_lengths(const Span<float> accumulated_lengths,
const float total_length = accumulated_lengths.last();
length_parameterize::SampleSegmentHint hint;
mask.foreach_index_optimized([&](const int i) {
mask.foreach_index_optimized<int>([&](const int i) {
const float sample_length = length_mode == GEO_NODE_CURVE_SAMPLE_FACTOR ?
sample_lengths[i] * total_length :
sample_lengths[i];
@ -168,7 +168,7 @@ static void sample_indices_and_factors_to_compressed(const Span<float> accumulat
switch (length_mode) {
case GEO_NODE_CURVE_SAMPLE_FACTOR:
mask.foreach_index_optimized([&](const int i, const int pos) {
mask.foreach_index_optimized<int>([&](const int i, const int pos) {
const float length = sample_lengths[i] * total_length;
length_parameterize::sample_at_length(accumulated_lengths,
std::clamp(length, 0.0f, total_length),
@ -178,7 +178,7 @@ static void sample_indices_and_factors_to_compressed(const Span<float> accumulat
});
break;
case GEO_NODE_CURVE_SAMPLE_LENGTH:
mask.foreach_index_optimized([&](const int i, const int pos) {
mask.foreach_index_optimized<int>([&](const int i, const int pos) {
const float length = sample_lengths[i];
length_parameterize::sample_at_length(accumulated_lengths,
std::clamp(length, 0.0f, total_length),

View File

@ -62,7 +62,7 @@ static void set_computed_position_and_offset(GeometryComponent &component,
MutableVArraySpan<float3> out_positions_span = positions.varray;
devirtualize_varray2(
in_positions, in_offsets, [&](const auto in_positions, const auto in_offsets) {
selection.foreach_index_optimized(grain_size, [&](const int i) {
selection.foreach_index_optimized<int>(grain_size, [&](const int i) {
const float3 new_position = in_positions[i] + in_offsets[i];
const float3 delta = new_position - out_positions_span[i];
handle_right_attribute.span[i] += delta;
@ -87,14 +87,14 @@ static void set_computed_position_and_offset(GeometryComponent &component,
MutableVArraySpan<float3> out_positions_span = positions.varray;
if (positions_are_original) {
devirtualize_varray(in_offsets, [&](const auto in_offsets) {
selection.foreach_index_optimized(
selection.foreach_index_optimized<int>(
grain_size, [&](const int i) { out_positions_span[i] += in_offsets[i]; });
});
}
else {
devirtualize_varray2(
in_positions, in_offsets, [&](const auto in_positions, const auto in_offsets) {
selection.foreach_index_optimized(grain_size, [&](const int i) {
selection.foreach_index_optimized<int>(grain_size, [&](const int i) {
out_positions_span[i] = in_positions[i] + in_offsets[i];
});
});

View File

@ -154,7 +154,7 @@ class ClampWrapperFunction : public mf::MultiFunction {
/* This has actually been initialized in the call above. */
MutableSpan<float> results = params.uninitialized_single_output<float>(output_param_index);
mask.foreach_index_optimized([&](const int i) {
mask.foreach_index_optimized<int>([&](const int i) {
float &value = results[i];
CLAMP(value, 0.0f, 1.0f);
});

View File

@ -403,20 +403,21 @@ class MixColorFunction : public mf::MultiFunction {
3, "Result");
if (clamp_factor_) {
mask.foreach_index_optimized([&](const int64_t i) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
results[i] = col1[i];
ramp_blend(blend_type_, results[i], std::clamp(fac[i], 0.0f, 1.0f), col2[i]);
});
}
else {
mask.foreach_index_optimized([&](const int64_t i) {
mask.foreach_index_optimized<int64_t>([&](const int64_t i) {
results[i] = col1[i];
ramp_blend(blend_type_, results[i], fac[i], col2[i]);
});
}
if (clamp_result_) {
mask.foreach_index_optimized([&](const int64_t i) { clamp_v3(results[i], 0.0f, 1.0f); });
mask.foreach_index_optimized<int64_t>(
[&](const int64_t i) { clamp_v3(results[i], 0.0f, 1.0f); });
}
}
};