Alternative Upload geometry data in parallel to multiple GPUs using the "Multi-Device" #107552

Open
William Leeson wants to merge 137 commits from leesonw/blender-cluster:upload_changed into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
39 changed files with 491 additions and 434 deletions
Showing only changes of commit 2485460fb6 - Show all commits

View File

@ -5782,8 +5782,14 @@ GHOST_TSuccess GHOST_SystemWayland::getModifierKeys(GHOST_ModifierKeys &keys) co
}
const GWL_ModifierInfo &mod_info = g_modifier_info_table[i];
const bool val = (state & (1 << seat->xkb_keymap_mod_index[i])) != 0;
bool val_l = seat->key_depressed.mods[GHOST_KEY_MODIFIER_TO_INDEX(mod_info.key_l)] > 0;
bool val_r = seat->key_depressed.mods[GHOST_KEY_MODIFIER_TO_INDEX(mod_info.key_r)] > 0;
/* NOTE(@ideasman42): it's important to write the XKB state back to #GWL_KeyboardDepressedState
* otherwise changes to modifiers in the future wont generate events.
* This can cause modifiers to be stuck when switching between windows in GNOME because
* window activation is handled before the keyboard enter callback runs, see: #107314. */
int16_t &depressed_l = seat->key_depressed.mods[GHOST_KEY_MODIFIER_TO_INDEX(mod_info.key_l)];
int16_t &depressed_r = seat->key_depressed.mods[GHOST_KEY_MODIFIER_TO_INDEX(mod_info.key_r)];
bool val_l = depressed_l > 0;
bool val_r = depressed_r > 0;
/* This shouldn't be needed, but guard against any possibility of modifiers being stuck.
* Warn so if this happens it can be investigated. */
@ -5796,6 +5802,7 @@ GHOST_TSuccess GHOST_SystemWayland::getModifierKeys(GHOST_ModifierKeys &keys) co
}
/* Picking the left is arbitrary. */
val_l = true;
depressed_l = 1;
}
}
else {
@ -5807,6 +5814,8 @@ GHOST_TSuccess GHOST_SystemWayland::getModifierKeys(GHOST_ModifierKeys &keys) co
}
val_l = false;
val_r = false;
depressed_l = 0;
depressed_r = 0;
}
}

View File

@ -43,9 +43,10 @@ def get_context_modifier(context):
if context.area.type == 'PROPERTIES':
modifier = context.modifier
else:
if context.object is None:
ob = context.object
if ob is None:
return False
modifier = context.object.modifiers.active
modifier = ob.modifiers.active
if modifier is None or modifier.type != 'NODES':
return None
return modifier
@ -207,7 +208,8 @@ class NewGeometryNodesModifier(Operator):
return geometry_modifier_poll(context)
def execute(self, context):
modifier = context.object.modifiers.new(data_("GeometryNodes"), "NODES")
ob = context.object
modifier = ob.modifiers.new(data_("GeometryNodes"), 'NODES')
if not modifier:
return {'CANCELLED'}

View File

@ -1345,6 +1345,7 @@ rna_custom_property_name = StringProperty(
maxlen=63,
)
# Most useful entries of rna_enum_property_subtype_items:
rna_custom_property_type_items = (
('FLOAT', "Float", "A single floating-point value"),
('FLOAT_ARRAY', "Float Array", "An array of floating-point values"),
@ -1356,9 +1357,22 @@ rna_custom_property_type_items = (
('PYTHON', "Python", "Edit a python value directly, for unsupported property types"),
)
# Most useful entries of rna_enum_property_subtype_items for number arrays:
rna_vector_subtype_items = (
('NONE', "Plain Data", "Data values without special behavior"),
rna_custom_property_subtype_none_item = ('NONE', "Plain Data", "Data values without special behavior")
rna_custom_property_subtype_number_items = (
rna_custom_property_subtype_none_item,
('PIXEL', "Pixel", ""),
('PERCENTAGE', "Percentage", ""),
('FACTOR', "Factor", ""),
('ANGLE', "Angle", ""),
('TIME_ABSOLUTE', "Time", "Time specified in seconds"),
('DISTANCE', "Distance", ""),
('POWER', "Power", ""),
('TEMPERATURE', "Temperature", ""),
)
rna_custom_property_subtype_vector_items = (
rna_custom_property_subtype_none_item,
('COLOR', "Linear Color", "Color in the linear space"),
('COLOR_GAMMA', "Gamma-Corrected Color", "Color in the gamma corrected space"),
('EULER', "Euler Angles", "Euler rotation angles in radians"),
@ -1373,6 +1387,17 @@ class WM_OT_properties_edit(Operator):
# register only because invoke_props_popup requires.
bl_options = {'REGISTER', 'INTERNAL'}
def subtype_items_cb(self, context):
match self.property_type:
case 'FLOAT':
return rna_custom_property_subtype_number_items
case 'FLOAT_ARRAY':
return rna_custom_property_subtype_vector_items
return ()
def property_type_update_cb(self, context):
self.subtype = 'NONE'
# Common settings used for all property types. Generally, separate properties are used for each
# type to improve the experience when choosing UI data values.
@ -1381,6 +1406,7 @@ class WM_OT_properties_edit(Operator):
property_type: EnumProperty(
name="Type",
items=rna_custom_property_type_items,
update=property_type_update_cb
)
is_overridable_library: BoolProperty(
name="Library Overridable",
@ -1481,7 +1507,7 @@ class WM_OT_properties_edit(Operator):
)
subtype: EnumProperty(
name="Subtype",
items=WM_OT_properties_edit.subtype_items,
items=subtype_items_cb,
)
# String properties.
@ -1497,9 +1523,6 @@ class WM_OT_properties_edit(Operator):
description="Python value for unsupported custom property types",
)
type_items = rna_custom_property_type_items
subtype_items = rna_vector_subtype_items
# Helper method to avoid repetitive code to retrieve a single value from sequences and non-sequences.
@staticmethod
def _convert_new_value_single(old_value, new_type):
@ -1567,15 +1590,7 @@ class WM_OT_properties_edit(Operator):
return 'PYTHON'
def _init_subtype(self, subtype):
subtype = subtype or 'NONE'
subtype_items = rna_vector_subtype_items
# Add a temporary enum entry to preserve unknown subtypes
if not any(subtype == item[0] for item in subtype_items):
subtype_items += ((subtype, subtype, ""),)
WM_OT_properties_edit.subtype_items = subtype_items
self.subtype = subtype
self.subtype = subtype or 'NONE'
# Fill the operator's properties with the UI data properties from the existing custom property.
# Note that if the UI data doesn't exist yet, the access will create it and use those default values.
@ -1904,9 +1919,7 @@ class WM_OT_properties_edit(Operator):
layout.prop(self, "step_float")
layout.prop(self, "precision")
# Subtype is only supported for float properties currently.
if self.property_type != 'FLOAT':
layout.prop(self, "subtype")
layout.prop(self, "subtype")
elif self.property_type in {'INT', 'INT_ARRAY'}:
if self.property_type == 'INT_ARRAY':
layout.prop(self, "array_length")

View File

@ -110,7 +110,8 @@ class DATA_PT_bone_groups(ArmatureButtonsPanel, Panel):
@classmethod
def poll(cls, context):
return (context.object and context.object.type == 'ARMATURE' and context.object.pose)
ob = context.object
return (ob and ob.type == 'ARMATURE' and ob.pose)
def draw(self, context):
layout = self.layout

View File

@ -276,7 +276,7 @@ class OBJECT_PT_instancing_size(ObjectButtonsPanel, Panel):
@classmethod
def poll(cls, context):
ob = context.object
return ob.instance_type == 'FACES'
return (ob is not None) and (ob.instance_type == 'FACES')
def draw_header(self, context):
@ -304,7 +304,8 @@ class OBJECT_PT_lineart(ObjectButtonsPanel, Panel):
def draw(self, context):
layout = self.layout
lineart = context.object.lineart
ob = context.object
lineart = ob.lineart
layout.use_property_split = True
@ -385,7 +386,7 @@ class OBJECT_PT_visibility(ObjectButtonsPanel, Panel):
col.prop(ob, "hide_viewport", text="Viewports", toggle=False, invert_checkbox=True)
col.prop(ob, "hide_render", text="Renders", toggle=False, invert_checkbox=True)
if context.object.type == 'GPENCIL':
if ob.type == 'GPENCIL':
col = layout.column(heading="Grease Pencil")
col.prop(ob, "use_grease_pencil_lights", toggle=False)

View File

@ -653,6 +653,11 @@ class MutableAttributeAccessor : public AttributeAccessor {
return {};
}
/**
* Replace the existing attribute with a new one with a different name.
*/
bool rename(const AttributeIDRef &old_attribute_id, const AttributeIDRef &new_attribute_id);
/**
* Create a new attribute.
* \return True, when a new attribute has been created. False, when it's not possible to create

View File

@ -16,7 +16,6 @@
#include "BKE_customdata.h"
#include "BKE_mesh_types.h"
struct BLI_Stack;
struct BMesh;
struct BMeshCreateParams;
struct BMeshFromMeshParams;
@ -44,13 +43,6 @@ struct Scene;
extern "C" {
#endif
/* setting zero so we can catch bugs in OpenMP/BMesh */
#ifdef DEBUG
# define BKE_MESH_OMP_LIMIT 0
#else
# define BKE_MESH_OMP_LIMIT 10000
#endif
/* mesh_runtime.cc */
/**
@ -397,9 +389,6 @@ typedef struct MLoopNorSpace {
* - BMLoop pointers. */
struct LinkNode *loops;
char flags;
/** To be used for extended processing related to loop normal spaces (aka smooth fans). */
void *user_data;
} MLoopNorSpace;
/**
* MLoopNorSpace.flags
@ -449,6 +438,9 @@ void BKE_lnor_spacearr_tls_join(MLoopNorSpaceArray *lnors_spacearr,
MLoopNorSpaceArray *lnors_spacearr_tls);
MLoopNorSpace *BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr);
#ifdef __cplusplus
/**
* Should only be called once.
* Beware, this modifies ref_vec and other_vec in place!
@ -459,7 +451,10 @@ void BKE_lnor_space_define(MLoopNorSpace *lnor_space,
const float lnor[3],
float vec_ref[3],
float vec_other[3],
struct BLI_Stack *edge_vectors);
blender::Span<blender::float3> edge_vectors);
#endif
/**
* Add a new given loop to given lnor_space.
* Depending on \a lnor_space->data_type, we expect \a bm_loop to be a pointer to BMLoop struct

View File

@ -911,6 +911,41 @@ GSpanAttributeWriter MutableAttributeAccessor::lookup_or_add_for_write_only_span
return {};
}
bool MutableAttributeAccessor::rename(const AttributeIDRef &old_attribute_id,
const AttributeIDRef &new_attribute_id)
{
if (old_attribute_id == new_attribute_id) {
return true;
}
if (this->contains(new_attribute_id)) {
return false;
}
const GAttributeReader old_attribute = this->lookup(old_attribute_id);
if (!old_attribute) {
return false;
}
const eCustomDataType type = cpp_type_to_custom_data_type(old_attribute.varray.type());
if (old_attribute.sharing_info != nullptr && old_attribute.varray.is_span()) {
if (!this->add(new_attribute_id,
old_attribute.domain,
type,
AttributeInitShared{old_attribute.varray.get_internal_span().data(),
*old_attribute.sharing_info})) {
return false;
}
}
else {
if (!this->add(new_attribute_id,
old_attribute.domain,
type,
AttributeInitVArray{old_attribute.varray})) {
return false;
}
}
this->remove(old_attribute_id);
return true;
}
fn::GField AttributeValidator::validate_field_if_necessary(const fn::GField &field) const
{
if (function) {

View File

@ -281,7 +281,7 @@ void CurvesGeometry::fill_curve_types(const IndexMask selection, const CurveType
}
}
/* A potential performance optimization is only counting the changed indices. */
this->curve_types_for_write().fill_indices(selection, type);
this->curve_types_for_write().fill_indices(selection.indices(), type);
this->update_curve_types();
this->tag_topology_changed();
}

View File

@ -19,13 +19,10 @@
#include "BLI_array_utils.hh"
#include "BLI_bit_vector.hh"
#include "BLI_linklist.h"
#include "BLI_linklist_stack.h"
#include "BLI_math.h"
#include "BLI_math_vector.hh"
#include "BLI_memarena.h"
#include "BLI_span.hh"
#include "BLI_stack.h"
#include "BLI_task.h"
#include "BLI_task.hh"
#include "BLI_timeit.hh"
#include "BLI_utildefines.h"
@ -474,7 +471,7 @@ void BKE_lnor_space_define(MLoopNorSpace *lnor_space,
const float lnor[3],
float vec_ref[3],
float vec_other[3],
BLI_Stack *edge_vectors)
const blender::Span<blender::float3> edge_vectors)
{
const float pi2 = float(M_PI) * 2.0f;
float tvec[3], dtp;
@ -486,31 +483,24 @@ void BKE_lnor_space_define(MLoopNorSpace *lnor_space,
/* If vec_ref or vec_other are too much aligned with lnor, we can't build lnor space,
* tag it as invalid and abort. */
lnor_space->ref_alpha = lnor_space->ref_beta = 0.0f;
if (edge_vectors) {
BLI_stack_clear(edge_vectors);
}
return;
}
copy_v3_v3(lnor_space->vec_lnor, lnor);
/* Compute ref alpha, average angle of all available edge vectors to lnor. */
if (edge_vectors) {
if (!edge_vectors.is_empty()) {
float alpha = 0.0f;
int count = 0;
while (!BLI_stack_is_empty(edge_vectors)) {
const float *vec = (const float *)BLI_stack_peek(edge_vectors);
for (const blender::float3 &vec : edge_vectors) {
alpha += saacosf(dot_v3v3(vec, lnor));
BLI_stack_discard(edge_vectors);
count++;
}
/* This piece of code shall only be called for more than one loop. */
/* NOTE: In theory, this could be `count > 2`,
* but there is one case where we only have two edges for two loops:
* a smooth vertex with only two edges and two faces (our Monkey's nose has that, e.g.).
*/
BLI_assert(count >= 2); /* This piece of code shall only be called for more than one loop. */
lnor_space->ref_alpha = alpha / float(count);
BLI_assert(edge_vectors.size() >= 2);
lnor_space->ref_alpha = alpha / float(edge_vectors.size());
}
else {
lnor_space->ref_alpha = (saacosf(dot_v3v3(vec_ref, lnor)) +
@ -666,23 +656,6 @@ void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space,
namespace blender::bke::mesh {
#define LOOP_SPLIT_TASK_BLOCK_SIZE 1024
struct LoopSplitTaskData {
enum class Type : int8_t {
BlockEnd = 0, /* Set implicitly by calloc. */
Fan = 1,
Single = 2,
};
/** We have to create those outside of tasks, since #MemArena is not thread-safe. */
MLoopNorSpace *lnor_space;
int ml_curr_index;
int poly_index;
Type flag;
};
struct LoopSplitTaskDataCommon {
/* Read/write.
* Note we do not need to protect it, though, since two different tasks will *always* affect
@ -855,54 +828,39 @@ static void loop_manifold_fan_around_vert_next(const Span<int> corner_verts,
}
}
static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopSplitTaskData *data)
static void lnor_space_for_single_fan(LoopSplitTaskDataCommon *common_data,
const int ml_curr_index,
MLoopNorSpace *lnor_space)
{
MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
const Span<short2> clnors_data = common_data->clnors_data;
const Span<float3> positions = common_data->positions;
const Span<int2> edges = common_data->edges;
const OffsetIndices polys = common_data->polys;
const Span<int> corner_verts = common_data->corner_verts;
const Span<int> corner_edges = common_data->corner_edges;
const Span<int> loop_to_poly = common_data->loop_to_poly;
const Span<float3> poly_normals = common_data->poly_normals;
MutableSpan<float3> loop_normals = common_data->loop_normals;
MLoopNorSpace *lnor_space = data->lnor_space;
const int ml_curr_index = data->ml_curr_index;
const int poly_index = data->poly_index;
loop_normals[ml_curr_index] = poly_normals[loop_to_poly[ml_curr_index]];
/* Simple case (both edges around that vertex are sharp in current polygon),
* this loop just takes its poly normal.
*/
loop_normals[ml_curr_index] = poly_normals[poly_index];
if (MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr) {
const Span<float3> positions = common_data->positions;
const Span<int2> edges = common_data->edges;
const OffsetIndices polys = common_data->polys;
const Span<int> corner_verts = common_data->corner_verts;
const Span<int> corner_edges = common_data->corner_edges;
const Span<short2> clnors_data = common_data->clnors_data;
#if 0
printf("BASIC: handling loop %d / edge %d / vert %d / poly %d\n",
ml_curr_index,
loops[ml_curr_index].e,
loops[ml_curr_index].v,
poly_index);
#endif
/* If needed, generate this (simple!) lnor space. */
if (lnors_spacearr) {
float vec_curr[3], vec_prev[3];
const int poly_index = loop_to_poly[ml_curr_index];
const int ml_prev_index = mesh::poly_corner_prev(polys[poly_index], ml_curr_index);
/* The vertex we are "fanning" around. */
const int vert_pivot = corner_verts[ml_curr_index];
const int2 &edge = edges[corner_edges[ml_curr_index]];
const int vert_2 = edge_other_vert(edge, vert_pivot);
const int2 &edge_prev = edges[corner_edges[ml_prev_index]];
const int vert_3 = edge_other_vert(edge_prev, vert_pivot);
const int vert_2 = edge_other_vert(edges[corner_edges[ml_curr_index]], vert_pivot);
const int vert_3 = edge_other_vert(edges[corner_edges[ml_prev_index]], vert_pivot);
sub_v3_v3v3(vec_curr, positions[vert_2], positions[vert_pivot]);
normalize_v3(vec_curr);
sub_v3_v3v3(vec_prev, positions[vert_3], positions[vert_pivot]);
normalize_v3(vec_prev);
BKE_lnor_space_define(lnor_space, loop_normals[ml_curr_index], vec_curr, vec_prev, nullptr);
BKE_lnor_space_define(lnor_space, loop_normals[ml_curr_index], vec_curr, vec_prev, {});
/* We know there is only one loop in this space, no need to create a link-list in this case. */
BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, ml_curr_index, nullptr, true);
@ -914,8 +872,9 @@ static void split_loop_nor_single_do(LoopSplitTaskDataCommon *common_data, LoopS
}
static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
LoopSplitTaskData *data,
BLI_Stack *edge_vectors)
const int ml_curr_index,
MLoopNorSpace *lnor_space,
Vector<float3> *edge_vectors)
{
MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
MutableSpan<float3> loop_normals = common_data->loop_normals;
@ -930,12 +889,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
const Span<int> loop_to_poly = common_data->loop_to_poly;
const Span<float3> poly_normals = common_data->poly_normals;
MLoopNorSpace *lnor_space = data->lnor_space;
#if 0 /* Not needed for 'fan' loops. */
float(*lnor)[3] = data->lnor;
#endif
const int ml_curr_index = data->ml_curr_index;
const int poly_index = data->poly_index;
const int poly_index = loop_to_poly[ml_curr_index];
const int ml_prev_index = poly_corner_prev(polys[poly_index], ml_curr_index);
/* Sigh! we have to fan around current vertex, until we find the other non-smooth edge,
@ -950,7 +904,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
const int2 &edge_orig = edges[corner_edges[ml_curr_index]];
float vec_curr[3], vec_prev[3], vec_org[3];
float lnor[3] = {0.0f, 0.0f, 0.0f};
float3 lnor(0.0f);
/* We validate clnors data on the fly - cheapest way to do! */
int clnors_avg[2] = {0, 0};
@ -958,10 +912,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
int clnors_count = 0;
bool clnors_invalid = false;
/* Temp loop normal stack. */
BLI_SMALLSTACK_DECLARE(normal, float *);
/* Temp clnors stack. */
BLI_SMALLSTACK_DECLARE(clnors, short *);
Vector<int, 8> processed_corners;
/* `mlfan_vert_index` the loop of our current edge might not be the loop of our current vertex!
*/
@ -981,7 +932,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
copy_v3_v3(vec_prev, vec_org);
if (lnors_spacearr) {
BLI_stack_push(edge_vectors, vec_org);
edge_vectors->append(vec_org);
}
}
@ -1021,20 +972,17 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
clnors_avg[0] += (*clnor)[0];
clnors_avg[1] += (*clnor)[1];
clnors_count++;
/* We store here a pointer to all custom loop_normals processed. */
BLI_SMALLSTACK_PUSH(clnors, (short *)*clnor);
}
}
/* We store here a pointer to all loop-normals processed. */
BLI_SMALLSTACK_PUSH(normal, (float *)(loop_normals[mlfan_vert_index]));
processed_corners.append(mlfan_vert_index);
if (lnors_spacearr) {
/* Assign current lnor space to current 'vertex' loop. */
BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, mlfan_vert_index, nullptr, false);
if (edge != edge_orig) {
/* We store here all edges-normalized vectors processed. */
BLI_stack_push(edge_vectors, vec_curr);
edge_vectors->append(vec_curr);
}
}
@ -1071,23 +1019,19 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
lnor_len = 1.0f;
}
BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_curr, edge_vectors);
BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_curr, *edge_vectors);
edge_vectors->clear();
if (!clnors_data.is_empty()) {
if (clnors_invalid) {
short *clnor;
clnors_avg[0] /= clnors_count;
clnors_avg[1] /= clnors_count;
/* Fix/update all clnors of this fan with computed average value. */
if (G.debug & G_DEBUG) {
printf("Invalid clnors in this fan!\n");
}
while ((clnor = (short *)BLI_SMALLSTACK_POP(clnors))) {
// print_v2("org clnor", clnor);
clnor[0] = short(clnors_avg[0]);
clnor[1] = short(clnors_avg[1]);
}
clnors_data.fill_indices(processed_corners.as_span(),
short2(clnors_avg[0], clnors_avg[1]));
// print_v2("new clnors", clnors_avg);
}
/* Extra bonus: since small-stack is local to this function,
@ -1100,50 +1044,8 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data,
/* In case we get a zero normal here, just use vertex normal already set! */
if (LIKELY(lnor_len != 0.0f)) {
/* Copy back the final computed normal into all related loop-normals. */
float *nor;
while ((nor = (float *)BLI_SMALLSTACK_POP(normal))) {
copy_v3_v3(nor, lnor);
}
loop_normals.fill_indices(processed_corners.as_span(), lnor);
}
/* Extra bonus: since small-stack is local to this function,
* no more need to empty it at all cost! */
}
}
static void loop_split_worker_do(LoopSplitTaskDataCommon *common_data,
LoopSplitTaskData *data,
BLI_Stack *edge_vectors)
{
if (data->flag == LoopSplitTaskData::Type::Fan) {
BLI_assert((edge_vectors == nullptr) || BLI_stack_is_empty(edge_vectors));
split_loop_nor_fan_do(common_data, data, edge_vectors);
}
else {
/* No need for edge_vectors for 'single' case! */
split_loop_nor_single_do(common_data, data);
}
}
static void loop_split_worker(TaskPool *__restrict pool, void *taskdata)
{
LoopSplitTaskDataCommon *common_data = (LoopSplitTaskDataCommon *)BLI_task_pool_user_data(pool);
LoopSplitTaskData *data = (LoopSplitTaskData *)taskdata;
/* Temp edge vectors stack, only used when computing lnor spacearr. */
BLI_Stack *edge_vectors = common_data->lnors_spacearr ?
BLI_stack_new(sizeof(float[3]), __func__) :
nullptr;
for (int i = 0; i < LOOP_SPLIT_TASK_BLOCK_SIZE; i++, data++) {
if (data->flag == LoopSplitTaskData::Type::BlockEnd) {
break;
}
loop_split_worker_do(common_data, data, edge_vectors);
}
if (edge_vectors) {
BLI_stack_free(edge_vectors);
}
}
@ -1218,10 +1120,10 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<int> corner_
}
}
static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common_data)
static void loop_split_generator(LoopSplitTaskDataCommon *common_data,
Vector<int> &r_single_corners,
Vector<int> &r_fan_corners)
{
MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr;
const Span<int> corner_verts = common_data->corner_verts;
const Span<int> corner_edges = common_data->corner_edges;
const OffsetIndices polys = common_data->polys;
@ -1230,23 +1132,10 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
BitVector<> skip_loops(corner_verts.size(), false);
LoopSplitTaskData *data_buff = nullptr;
int data_idx = 0;
/* Temp edge vectors stack, only used when computing lnor spacearr
* (and we are not multi-threading). */
BLI_Stack *edge_vectors = nullptr;
#ifdef DEBUG_TIME
SCOPED_TIMER_AVERAGED(__func__);
#endif
if (!pool) {
if (lnors_spacearr) {
edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
}
}
/* We now know edges that can be smoothed (with their vector, and their two loops),
* and edges that will be hard! Now, time to generate the normals.
*/
@ -1290,30 +1179,11 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
// printf("SKIPPING!\n");
}
else {
LoopSplitTaskData *data, data_local;
// printf("PROCESSING!\n");
if (pool) {
if (data_idx == 0) {
data_buff = (LoopSplitTaskData *)MEM_calloc_arrayN(
LOOP_SPLIT_TASK_BLOCK_SIZE, sizeof(*data_buff), __func__);
}
data = &data_buff[data_idx];
}
else {
data = &data_local;
memset(data, 0, sizeof(*data));
}
if (IS_EDGE_SHARP(edge_to_loops[corner_edges[ml_curr_index]]) &&
IS_EDGE_SHARP(edge_to_loops[corner_edges[ml_prev_index]])) {
data->ml_curr_index = ml_curr_index;
data->flag = LoopSplitTaskData::Type::Single;
data->poly_index = poly_index;
if (lnors_spacearr) {
data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
}
/* Simple case (both edges around that vertex are sharp in current polygon),
* this corner just takes its poly normal. */
r_single_corners.append(ml_curr_index);
}
else {
/* We do not need to check/tag loops as already computed. Due to the fact that a loop
@ -1323,35 +1193,11 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
* current edge, smooth previous edge), and not the alternative (smooth current edge,
* sharp previous edge). All this due/thanks to the link between normals and loop
* ordering (i.e. winding). */
data->ml_curr_index = ml_curr_index;
data->flag = LoopSplitTaskData::Type::Fan;
data->poly_index = poly_index;
if (lnors_spacearr) {
data->lnor_space = BKE_lnor_space_create(lnors_spacearr);
}
}
if (pool) {
data_idx++;
if (data_idx == LOOP_SPLIT_TASK_BLOCK_SIZE) {
BLI_task_pool_push(pool, loop_split_worker, data_buff, true, nullptr);
data_idx = 0;
}
}
else {
loop_split_worker_do(common_data, data, edge_vectors);
r_fan_corners.append(ml_curr_index);
}
}
}
}
if (pool && data_idx) {
BLI_task_pool_push(pool, loop_split_worker, data_buff, true, nullptr);
}
if (edge_vectors) {
BLI_stack_free(edge_vectors);
}
}
void normals_calc_loop(const Span<float3> vert_positions,
@ -1472,19 +1318,34 @@ void normals_calc_loop(const Span<float3> vert_positions,
edge_to_loops,
{});
if (corner_verts.size() < LOOP_SPLIT_TASK_BLOCK_SIZE * 8) {
/* Not enough loops to be worth the whole threading overhead. */
loop_split_generator(nullptr, &common_data);
Vector<int> single_corners;
Vector<int> fan_corners;
loop_split_generator(&common_data, single_corners, fan_corners);
MLoopNorSpace *lnor_spaces = nullptr;
if (r_lnors_spacearr) {
r_lnors_spacearr->spaces_num = single_corners.size() + fan_corners.size();
lnor_spaces = static_cast<MLoopNorSpace *>(BLI_memarena_calloc(
r_lnors_spacearr->mem, sizeof(MLoopNorSpace) * r_lnors_spacearr->spaces_num));
}
else {
TaskPool *task_pool = BLI_task_pool_create(&common_data, TASK_PRIORITY_HIGH);
loop_split_generator(task_pool, &common_data);
threading::parallel_for(single_corners.index_range(), 1024, [&](const IndexRange range) {
for (const int i : range) {
const int corner = single_corners[i];
lnor_space_for_single_fan(&common_data, corner, lnor_spaces ? &lnor_spaces[i] : nullptr);
}
});
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
}
threading::parallel_for(fan_corners.index_range(), 1024, [&](const IndexRange range) {
Vector<float3> edge_vectors;
for (const int i : range) {
const int corner = fan_corners[i];
split_loop_nor_fan_do(&common_data,
corner,
lnor_spaces ? &lnor_spaces[single_corners.size() + i] : nullptr,
&edge_vectors);
}
});
if (r_lnors_spacearr) {
if (r_lnors_spacearr == &_lnors_spacearr) {
@ -1534,7 +1395,7 @@ static void mesh_normals_loop_custom_set(Span<float3> positions,
const bool use_split_normals = true;
const float split_angle = float(M_PI);
BLI_SMALLSTACK_DECLARE(clnors_data, short *);
Vector<short *> clnors_data;
/* Compute current lnor spacearr. */
normals_calc_loop(positions,
@ -1708,7 +1569,7 @@ static void mesh_normals_loop_custom_set(Span<float3> positions,
else {
int avg_nor_count = 0;
float avg_nor[3];
short clnor_data_tmp[2], *clnor_data;
short clnor_data_tmp[2];
zero_v3(avg_nor);
while (loop_link) {
@ -1718,7 +1579,7 @@ static void mesh_normals_loop_custom_set(Span<float3> positions,
avg_nor_count++;
add_v3_v3(avg_nor, nor);
BLI_SMALLSTACK_PUSH(clnors_data, (short *)r_clnors_data[lidx]);
clnors_data.append(r_clnors_data[lidx]);
loop_link = loop_link->next;
done_loops[lidx].reset();
@ -1727,7 +1588,8 @@ static void mesh_normals_loop_custom_set(Span<float3> positions,
mul_v3_fl(avg_nor, 1.0f / float(avg_nor_count));
BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], avg_nor, clnor_data_tmp);
while ((clnor_data = (short *)BLI_SMALLSTACK_POP(clnors_data))) {
while (!clnors_data.is_empty()) {
short *clnor_data = clnors_data.pop_last();
clnor_data[0] = clnor_data_tmp[0];
clnor_data[1] = clnor_data_tmp[1];
}

View File

@ -412,7 +412,7 @@ static void shrinkwrap_calc_nearest_vertex(ShrinkwrapCalcData *calc)
data.tree = calc->tree;
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = (calc->numVerts > BKE_MESH_OMP_LIMIT);
settings.use_threading = (calc->numVerts > 10000);
settings.userdata_chunk = &nearest;
settings.userdata_chunk_size = sizeof(nearest);
BLI_task_parallel_range(
@ -691,7 +691,7 @@ static void shrinkwrap_calc_normal_projection(ShrinkwrapCalcData *calc)
data.local2aux = &local2aux;
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = (calc->numVerts > BKE_MESH_OMP_LIMIT);
settings.use_threading = (calc->numVerts > 10000);
settings.userdata_chunk = &hit;
settings.userdata_chunk_size = sizeof(hit);
BLI_task_parallel_range(
@ -1363,7 +1363,7 @@ static void shrinkwrap_calc_nearest_surface_point(ShrinkwrapCalcData *calc)
data.tree = calc->tree;
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = (calc->numVerts > BKE_MESH_OMP_LIMIT);
settings.use_threading = (calc->numVerts > 10000);
settings.userdata_chunk = &nearest;
settings.userdata_chunk_size = sizeof(nearest);
BLI_task_parallel_range(

View File

@ -1269,15 +1269,21 @@ int BKE_unit_base_of_type_get(int system, int type)
const char *BKE_unit_name_get(const void *usys_pt, int index)
{
return ((bUnitCollection *)usys_pt)->units[index].name;
const bUnitCollection *usys = usys_pt;
BLI_assert((uint)index < (uint)usys->length);
return usys->units[index].name;
}
const char *BKE_unit_display_name_get(const void *usys_pt, int index)
{
return ((bUnitCollection *)usys_pt)->units[index].name_display;
const bUnitCollection *usys = usys_pt;
BLI_assert((uint)index < (uint)usys->length);
return usys->units[index].name_display;
}
const char *BKE_unit_identifier_get(const void *usys_pt, int index)
{
const bUnitDef *unit = ((const bUnitCollection *)usys_pt)->units + index;
const bUnitCollection *usys = usys_pt;
BLI_assert((uint)index < (uint)usys->length);
const bUnitDef *unit = &usys->units[index];
if (unit->identifier == NULL) {
BLI_assert_msg(0, "identifier for this unit is not specified yet");
}
@ -1286,10 +1292,14 @@ const char *BKE_unit_identifier_get(const void *usys_pt, int index)
double BKE_unit_scalar_get(const void *usys_pt, int index)
{
return ((bUnitCollection *)usys_pt)->units[index].scalar;
const bUnitCollection *usys = usys_pt;
BLI_assert((uint)index < (uint)usys->length);
return usys->units[index].scalar;
}
bool BKE_unit_is_suppressed(const void *usys_pt, int index)
{
return (((bUnitCollection *)usys_pt)->units[index].flag & B_UNIT_DEF_SUPPRESS) != 0;
const bUnitCollection *usys = usys_pt;
BLI_assert((uint)index < (uint)usys->length);
return (usys->units[index].flag & B_UNIT_DEF_SUPPRESS) != 0;
}

View File

@ -810,7 +810,7 @@ static bool vfont_to_curve(Object *ob,
VChar *che;
struct CharTrans *chartransdata = NULL, *ct;
struct TempLineInfo *lineinfo;
float *f, xof, yof, xtrax, linedist;
float xof, yof, xtrax, linedist;
float twidth = 0, maxlen = 0;
int i, slen, j;
int curbox;
@ -1536,25 +1536,59 @@ static bool vfont_to_curve(Object *ob,
/* Cursor first. */
if (ef) {
float si, co;
ct = &chartransdata[ef->pos];
si = sinf(ct->rot);
co = cosf(ct->rot);
const float cursor_width = 0.04f;
const float cursor_half = 0.02f;
const float xoffset = ct->xof;
const float yoffset = ct->yof;
f = ef->textcurs[0];
/* By default the cursor is exactly between the characters
* and matches the rotation of the character to the right. */
float cursor_left = 0.0f - cursor_half;
float rotation = ct->rot;
f[0] = font_size * (-0.02f * co + ct->xof);
f[1] = font_size * (0.1f * si - (0.25f * co) + ct->yof);
if (ef->selboxes) {
if (ef->selend >= ef->selstart) {
/* Cursor at right edge of a text selection. Match rotation to the character at the
* end of selection. Cursor is further right to show the selected characters better. */
rotation = chartransdata[ef->selend - 1].rot;
cursor_left = 0.0f;
}
else {
/* Cursor at the left edge of a text selection. Cursor
* is further left to show the selected characters better. */
cursor_left = 0.0f - cursor_width;
}
}
else if ((ef->pos == ef->len) && (ef->len > 0)) {
/* Nothing selected, but at the end of the string. Match rotation to previous character. */
rotation = chartransdata[ef->len - 1].rot;
}
f[2] = font_size * (0.02f * co + ct->xof);
f[3] = font_size * (-0.1f * si - (0.25f * co) + ct->yof);
/* We need the rotation to be around the bottom-left corner. So we make
* that the zero point before rotation, rotate, then apply offsets afterward. */
f[4] = font_size * (0.02f * co + 0.8f * si + ct->xof);
f[5] = font_size * (-0.1f * si + 0.75f * co + ct->yof);
/* Bottom left. */
ef->textcurs[0][0] = cursor_left;
ef->textcurs[0][1] = 0.0f;
/* Bottom right. */
ef->textcurs[1][0] = cursor_left + cursor_width;
ef->textcurs[1][1] = 0.0f;
/* Top left. */
ef->textcurs[3][0] = cursor_left;
ef->textcurs[3][1] = 1.0f;
/* Top right. */
ef->textcurs[2][0] = cursor_left + cursor_width;
ef->textcurs[2][1] = 1.0f;
f[6] = font_size * (-0.02f * co + 0.8f * si + ct->xof);
f[7] = font_size * (0.1f * si + 0.75f * co + ct->yof);
for (int vert = 0; vert < 4; vert++) {
float temp_fl[2];
/* Rotate around the cursor's bottom-left corner. */
rotate_v2_v2fl(temp_fl, &ef->textcurs[vert][0], -rotation);
ef->textcurs[vert][0] = font_size * (xoffset + temp_fl[0]);
/* Shift down vertically so we are 25% below and 75% above baseline. */
ef->textcurs[vert][1] = font_size * (yoffset + temp_fl[1] - 0.25f);
}
}
if (mode == FO_SELCHANGE) {

View File

@ -321,12 +321,25 @@ void BLI_path_sequence_encode(
char *string, const char *head, const char *tail, unsigned short numlen, int pic);
/**
* Remove redundant characters from \a path and optionally make absolute.
* Remove redundant characters from \a path.
*
* \param path: Can be any input, and this function converts it to a regular full path.
* Also removes garbage from directory paths, like `/../` or double slashes etc.
* The following operations are performed:
* - Redundant path components such as `//`, `/./` & `./` (prefix) are stripped.
* (with the exception of `//` prefix used for blend-file relative paths).
* - `..` are resolved so `<parent>/../<child>/` resolves to `<child>/`.
* Note that the resulting path may begin with `..` if it's relative.
*
* \note \a path isn't protected for max string names.
* Details:
* - The slash direction is expected to be native (see #SEP).
* When calculating a canonical paths you may need to run #BLI_path_slash_native first.
* #BLI_path_cmp_normalized can be used for canonical path comparison.
* - Trailing slashes are left intact (unlike Python which strips them).
* - Handling paths beginning with `..` depends on them being absolute or relative.
* For absolute paths they are removed (e.g. `/../path` becomes `/path`).
* For relative paths they are kept as it's valid to reference paths above a relative location
* such as `//../parent` or `../parent`.
*
* \param path: The path to a file or directory which can be absolute or relative.
*/
void BLI_path_normalize(char *path) ATTR_NONNULL(1);
/**

View File

@ -551,9 +551,10 @@ template<typename T> class MutableSpan {
* Replace a subset of all elements with the given value. This invokes undefined behavior when
* one of the indices is out of bounds.
*/
constexpr void fill_indices(Span<int64_t> indices, const T &value)
template<typename IndexT> constexpr void fill_indices(Span<IndexT> indices, const T &value)
{
for (int64_t i : indices) {
static_assert(std::is_integral_v<IndexT>);
for (IndexT i : indices) {
BLI_assert(i < size_);
data_[i] = value;
}

View File

@ -106,8 +106,8 @@ typedef struct BLI_mempool_chunk {
* The mempool, stores and tracks memory \a chunks and elements within those chunks \a free.
*/
struct BLI_mempool {
/* Serialize access to mempools when debugging wih ASAN. */
#ifdef WITH_ASAN
/** Serialize access to memory-pools when debugging with ASAN. */
ThreadMutex mutex;
#endif
/** Single linked list of allocated chunks. */

View File

@ -115,13 +115,12 @@ void BLI_path_sequence_encode(
void BLI_path_normalize(char *path)
{
const char *path_orig = path;
int path_len;
ptrdiff_t a;
char *start, *eind;
path_len = strlen(path);
int path_len = strlen(path);
/*
* Skip absolute prefix.
* ---------------------
*/
if (path[0] == '/' && path[1] == '/') {
path = path + 2; /* Leave the initial `//` untouched. */
path_len -= 2;
@ -157,10 +156,14 @@ void BLI_path_normalize(char *path)
}
}
#endif /* WIN32 */
/* Works on WIN32 as well, because the drive component is skipped. */
const bool is_relative = path[0] && (path[0] != SEP);
/*
* Strip redundant path components.
* --------------------------------
*/
/* NOTE(@ideasman42):
* `memmove(start, eind, strlen(eind) + 1);`
* is the same as
@ -189,7 +192,6 @@ void BLI_path_normalize(char *path)
else {
break;
}
} while (i > 0);
if (i < i_end) {
@ -200,8 +202,7 @@ void BLI_path_normalize(char *path)
}
}
}
/* Remove redundant `./` prefix, while it could be kept, it confuses the loop below. */
/* Remove redundant `./` prefix as it's redundant & complicates collapsing directories. */
if (is_relative) {
if ((path_len > 2) && (path[0] == '.') && (path[1] == SEP)) {
memmove(path, path + 2, (path_len - 2) + 1);
@ -209,69 +210,127 @@ void BLI_path_normalize(char *path)
}
}
const ptrdiff_t a_start = is_relative ? 0 : 1;
start = path;
while ((start = strstr(start, SEP_STR ".."))) {
if (!ELEM(start[3], SEP, '\0')) {
start += 3;
continue;
}
/*
* Collapse Parent Directories.
* ----------------------------
*
* Example: `<parent>/<child>/../` -> `<parent>/`
*
* Notes:
* - Leading `../` are skipped as they cannot be collapsed (see `start_base`).
* - Multiple parent directories are handled at once to reduce number of `memmove` calls.
*/
a = (start - path) - 1;
if (a >= a_start) {
/* `<prefix>/<parent>/../<postfix> => <prefix>/<postfix>`. */
eind = start + (4 - 1) /* `strlen("/../") - 1` */; /* Strip "/.." and keep the char after. */
while (a > 0 && path[a] != SEP) { /* Find start of `<parent>`. */
a--;
}
#define IS_PARENT_DIR(p) ((p)[0] == '.' && (p)[1] == '.' && ELEM((p)[2], SEP, '\0'))
if (is_relative && (a == 0) && *eind) {
/* When the path does not start with a slash, don't copy the first `/` to the destination
* as it will make a relative path into an absolute path. */
eind += 1;
}
const size_t eind_len = path_len - (eind - path);
BLI_assert(eind_len == strlen(eind));
/* Only remove the parent if it's not also a `..`. */
if (is_relative && STRPREFIX(path + ((path[a] == SEP) ? a + 1 : a), ".." SEP_STR)) {
start += 3 /* `strlen("/..")` */;
}
else {
start = path + a;
BLI_assert(start < eind);
memmove(start, eind, eind_len + 1);
path_len -= (eind - start);
BLI_assert(strlen(path) == path_len);
BLI_assert(!is_relative || (path[0] != SEP));
}
}
else {
/* Support for odd paths: eg `/../home/me` --> `/home/me`
* this is a valid path in blender but we can't handle this the usual way below
* simply strip this prefix then evaluate the path as usual.
* Python's `os.path.normpath()` does this. */
/* NOTE: previous version of following call used an offset of 3 instead of 4,
* which meant that the `/../home/me` example actually became `home/me`.
* Using offset of 3 gives behavior consistent with the aforementioned
* Python routine. */
eind = start + 3;
const size_t eind_len = path_len - (eind - path);
memmove(start, eind, eind_len + 1);
path_len -= 3;
BLI_assert(strlen(path) == path_len);
BLI_assert(!is_relative || (path[0] != SEP));
}
/* First non prefix path component. */
char *path_first_non_slash_part = path;
while (*path_first_non_slash_part && *path_first_non_slash_part == SEP) {
path_first_non_slash_part++;
}
if (is_relative && path_len == 0 && (path == path_orig)) {
path[0] = '.';
path[1] = '\0';
path_len += 1;
/* Maintain a pointer to the end of leading `..` component.
* Skip leading parent directories because logically they cannot be collapsed. */
char *start_base = path_first_non_slash_part;
while (IS_PARENT_DIR(start_base)) {
start_base += 3;
}
/* It's possible the entire path is made of up `../`,
* in this case there is nothing to do. */
if (start_base < path + path_len) {
/* Step over directories, always starting out on the character after the slash. */
char *start = start_base;
char *start_temp;
while (((start_temp = strstr(start, SEP_STR ".." SEP_STR)) ||
/* Check if the string ends with `/..` & assign when found, else NULL. */
(start_temp = ((start <= &path[path_len - 3]) &&
STREQ(&path[path_len - 3], SEP_STR "..")) ?
&path[path_len - 3] :
NULL))) {
start = start_temp + 1; /* Skip the `/`. */
BLI_assert(start_base != start);
/* Step `end_all` forwards (over all `..`). */
char *end_all = start;
do {
BLI_assert(IS_PARENT_DIR(end_all));
end_all += 3;
BLI_assert(end_all <= path + path_len + 1);
} while (IS_PARENT_DIR(end_all));
/* Step `start` backwards (until `end` meets `end_all` or `start` meets `start_base`). */
char *end = start;
do {
BLI_assert(start_base < start);
BLI_assert(*(start - 1) == SEP);
/* Step `start` backwards one. */
do {
start--;
} while (start_base < start && *(start - 1) != SEP);
BLI_assert(*start != SEP); /* Ensure the loop ran at least once. */
BLI_assert(!IS_PARENT_DIR(start)); /* Clamping by `start_base` prevents this. */
end += 3;
} while ((start != start_base) && (end < end_all));
if (end > path + path_len) {
BLI_assert(*(end - 1) == '\0');
end--;
end_all--;
}
BLI_assert(start < end && start >= start_base);
const size_t start_len = path_len - (end - path);
memmove(start, end, start_len + 1);
path_len -= end - start;
BLI_assert(strlen(path) == path_len);
/* Other `..` directories may have been moved to the front, step `start_base` past them. */
if (UNLIKELY(start == start_base && (end != end_all))) {
start_base += (end_all - end);
start = (start_base < path + path_len) ? start_base : start_base - 1;
}
}
}
BLI_assert(strlen(path) == path_len);
/* Characters before the `start_base` must *only* be `../../../` (multiples of 3). */
BLI_assert((start_base - path_first_non_slash_part) % 3 == 0);
/* All `..` ahead of `start_base` were collapsed (including trailing `/..`). */
BLI_assert(!(start_base < path + path_len) ||
(!strstr(start_base, SEP_STR ".." SEP_STR) &&
!(path_len >= 3 && STREQ(&path[path_len - 3], SEP_STR ".."))));
/*
* Final Prefix Cleanup.
* ---------------------
*/
if (is_relative) {
if (path_len == 0 && (path == path_orig)) {
path[0] = '.';
path[1] = '\0';
path_len = 1;
}
}
else {
/* Support for odd paths: eg `/../home/me` --> `/home/me`
* this is a valid path in blender but we can't handle this the usual way below
* simply strip this prefix then evaluate the path as usual.
* Python's `os.path.normpath()` does this. */
if (start_base != path_first_non_slash_part) {
char *start = start_base > path + path_len ? start_base - 1 : start_base;
/* As long as `start` is set correctly, it should never begin with `../`
* as these directories are expected to be skipped. */
BLI_assert(!IS_PARENT_DIR(start));
const size_t start_len = path_len - (start - path);
memmove(path_first_non_slash_part, start, start_len + 1);
BLI_assert(strlen(start) == start_len);
path_len -= start - path_first_non_slash_part;
BLI_assert(strlen(path) == path_len);
}
}
BLI_assert(strlen(path) == path_len);
#undef IS_PARENT_DIR
}
void BLI_path_normalize_dir(char *dir, size_t dir_maxlen)

View File

@ -84,11 +84,16 @@ TEST(path_util, Normalize_Dot)
NORMALIZE("/a/./././b/", "/a/b/");
}
/* #BLI_path_normalize: complex "/./" -> "/", "//" -> "/", "./path/../" -> "./". */
TEST(path_util, Normalize_Complex)
TEST(path_util, Normalize_ComplexAbsolute)
{
NORMALIZE("/a/./b/./c/./.././.././", "/a/");
NORMALIZE("/a//.//b//.//c//.//..//.//..//.//", "/a/");
}
TEST(path_util, Normalize_ComplexRelative)
{
NORMALIZE("a/b/c/d/e/f/g/../a/../b/../../c/../../../d/../../../..", ".");
NORMALIZE("a/b/c/d/e/f/g/../a/../../../../b/../../../c/../../d/..", ".");
}
/* #BLI_path_normalize: "//" -> "/" */
TEST(path_util, Normalize_DoubleSlash)
{

View File

@ -225,7 +225,7 @@ TEST(span, FillIndices)
{
std::array<int, 5> a = {0, 0, 0, 0, 0};
MutableSpan<int> a_span(a);
a_span.fill_indices({0, 2, 3}, 1);
a_span.fill_indices(Span({0, 2, 3}), 1);
EXPECT_EQ(a[0], 1);
EXPECT_EQ(a[1], 0);
EXPECT_EQ(a[2], 1);

View File

@ -15,9 +15,9 @@
#include "BLI_bitmap.h"
#include "BLI_linklist_stack.h"
#include "BLI_math.h"
#include "BLI_stack.h"
#include "BLI_task.h"
#include "BLI_utildefines.h"
#include "BLI_vector.hh"
#include "BKE_customdata.h"
#include "BKE_editmesh.h"
@ -470,7 +470,7 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
const int cd_loop_clnors_offset,
const bool has_clnors,
/* Cache. */
BLI_Stack *edge_vectors,
blender::Vector<blender::float3, 16> *edge_vectors,
/* Iterate. */
BMLoop *l_curr,
/* Result. */
@ -534,7 +534,7 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
normalize_v3(vec_prev);
}
BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, nullptr);
BKE_lnor_space_define(lnor_space, r_lnos[l_curr_index], vec_curr, vec_prev, {});
/* We know there is only one loop in this space,
* no need to create a linklist in this case... */
BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, l_curr_index, l_curr, true);
@ -586,7 +586,7 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
MLoopNorSpace *lnor_space = r_lnors_spacearr ? BKE_lnor_space_create(r_lnors_spacearr) :
nullptr;
BLI_assert((edge_vectors == nullptr) || BLI_stack_is_empty(edge_vectors));
BLI_assert((edge_vectors == nullptr) || edge_vectors->is_empty());
lfan_pivot = l_curr;
lfan_pivot_index = BM_elem_index_get(lfan_pivot);
@ -605,7 +605,7 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
copy_v3_v3(vec_curr, vec_org);
if (r_lnors_spacearr) {
BLI_stack_push(edge_vectors, vec_org);
edge_vectors->append(vec_org);
}
}
@ -671,7 +671,7 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
BKE_lnor_space_add_loop(r_lnors_spacearr, lnor_space, lfan_pivot_index, lfan_pivot, false);
if (e_next != e_org) {
/* We store here all edges-normalized vectors processed. */
BLI_stack_push(edge_vectors, vec_next);
edge_vectors->append(vec_next);
}
}
@ -700,7 +700,7 @@ static int bm_mesh_loops_calc_normals_for_loop(BMesh *bm,
lnor_len = 1.0f;
}
BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_next, edge_vectors);
BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_next, *edge_vectors);
if (has_clnors) {
if (clnors_invalid) {
@ -863,19 +863,20 @@ static void bm_edge_tag_from_smooth_and_set_sharp(const float (*fnos)[3],
* operating on vertices this is needed for multi-threading
* so there is a guarantee that each thread has isolated loops.
*/
static void bm_mesh_loops_calc_normals_for_vert_with_clnors(BMesh *bm,
const float (*vcos)[3],
const float (*fnos)[3],
float (*r_lnos)[3],
const short (*clnors_data)[2],
const int cd_loop_clnors_offset,
const bool do_rebuild,
const float split_angle_cos,
/* TLS */
MLoopNorSpaceArray *r_lnors_spacearr,
BLI_Stack *edge_vectors,
/* Iterate over. */
BMVert *v)
static void bm_mesh_loops_calc_normals_for_vert_with_clnors(
BMesh *bm,
const float (*vcos)[3],
const float (*fnos)[3],
float (*r_lnos)[3],
const short (*clnors_data)[2],
const int cd_loop_clnors_offset,
const bool do_rebuild,
const float split_angle_cos,
/* TLS */
MLoopNorSpaceArray *r_lnors_spacearr,
blender::Vector<blender::float3, 16> *edge_vectors,
/* Iterate over. */
BMVert *v)
{
/* Respecting face order is necessary so the initial starting loop is consistent
* with looping over loops of all faces.
@ -992,7 +993,7 @@ static void bm_mesh_loops_calc_normals_for_vert_without_clnors(
const float split_angle_cos,
/* TLS */
MLoopNorSpaceArray *r_lnors_spacearr,
BLI_Stack *edge_vectors,
blender::Vector<blender::float3, 16> *edge_vectors,
/* Iterate over. */
BMVert *v)
{
@ -1078,7 +1079,7 @@ static void bm_mesh_loops_calc_normals__single_threaded(BMesh *bm,
MLoopNorSpaceArray _lnors_spacearr = {nullptr};
BLI_Stack *edge_vectors = nullptr;
std::unique_ptr<blender::Vector<blender::float3, 16>> edge_vectors = nullptr;
{
char htype = 0;
@ -1095,7 +1096,7 @@ static void bm_mesh_loops_calc_normals__single_threaded(BMesh *bm,
}
if (r_lnors_spacearr) {
BKE_lnor_spacearr_init(r_lnors_spacearr, bm->totloop, MLNOR_SPACEARR_BMLOOP_PTR);
edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
edge_vectors = std::make_unique<blender::Vector<blender::float3, 16>>();
}
/* Clear all loops' tags (means none are to be skipped for now). */
@ -1138,7 +1139,7 @@ static void bm_mesh_loops_calc_normals__single_threaded(BMesh *bm,
clnors_data,
cd_loop_clnors_offset,
has_clnors,
edge_vectors,
edge_vectors.get(),
l_curr,
r_lnos,
r_lnors_spacearr);
@ -1146,7 +1147,6 @@ static void bm_mesh_loops_calc_normals__single_threaded(BMesh *bm,
}
if (r_lnors_spacearr) {
BLI_stack_free(edge_vectors);
if (r_lnors_spacearr == &_lnors_spacearr) {
BKE_lnor_spacearr_free(r_lnors_spacearr);
}
@ -1169,7 +1169,7 @@ typedef struct BMLoopsCalcNormalsWithCoordsData {
} BMLoopsCalcNormalsWithCoordsData;
typedef struct BMLoopsCalcNormalsWithCoords_TLS {
BLI_Stack *edge_vectors;
blender::Vector<blender::float3, 16> *edge_vectors;
/** Copied from #BMLoopsCalcNormalsWithCoordsData.r_lnors_spacearr when it's not nullptr. */
MLoopNorSpaceArray *lnors_spacearr;
@ -1182,7 +1182,7 @@ static void bm_mesh_loops_calc_normals_for_vert_init_fn(const void *__restrict u
auto *data = static_cast<const BMLoopsCalcNormalsWithCoordsData *>(userdata);
auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
if (data->r_lnors_spacearr) {
tls_data->edge_vectors = BLI_stack_new(sizeof(float[3]), __func__);
tls_data->edge_vectors = MEM_new<blender::Vector<blender::float3, 16>>(__func__);
BKE_lnor_spacearr_tls_init(data->r_lnors_spacearr, &tls_data->lnors_spacearr_buf);
tls_data->lnors_spacearr = &tls_data->lnors_spacearr_buf;
}
@ -1210,7 +1210,7 @@ static void bm_mesh_loops_calc_normals_for_vert_free_fn(const void *__restrict u
auto *tls_data = static_cast<BMLoopsCalcNormalsWithCoords_TLS *>(chunk);
if (data->r_lnors_spacearr) {
BLI_stack_free(tls_data->edge_vectors);
MEM_delete(tls_data->edge_vectors);
}
}

View File

@ -11,6 +11,7 @@
#include "GPU_texture.h"
#include "BKE_image.h"
#include "BKE_texture.h"
#include "DNA_ID.h"
@ -50,6 +51,9 @@ bool operator==(const CachedTextureKey &a, const CachedTextureKey &b)
CachedTexture::CachedTexture(
Tex *texture, const Scene *scene, int2 size, float2 offset, float2 scale)
{
ImagePool *image_pool = BKE_image_pool_new();
BKE_texture_fetch_images_for_pool(texture, image_pool);
Array<float4> color_pixels(size.x * size.y);
Array<float> value_pixels(size.x * size.y);
threading::parallel_for(IndexRange(size.y), 1, [&](const IndexRange sub_y_range) {
@ -61,7 +65,7 @@ CachedTexture::CachedTexture(
/* Note that it is expected that the offset is scaled by the scale. */
coordinates = (coordinates + offset) * scale;
TexResult texture_result;
BKE_texture_get_value(scene, texture, coordinates, &texture_result, true);
BKE_texture_get_value_ex(scene, texture, coordinates, &texture_result, image_pool, true);
color_pixels[y * size.x + x] = float4(texture_result.trgba);
value_pixels[y * size.x + x] = texture_result.talpha ? texture_result.trgba[3] :
texture_result.tin;
@ -69,6 +73,8 @@ CachedTexture::CachedTexture(
}
});
BKE_image_pool_free(image_pool);
color_texture_ = GPU_texture_create_2d("Cached Color Texture",
size.x,
size.y,

View File

@ -71,18 +71,12 @@ static const DTreeContext *find_active_context(const DerivedNodeTree &tree)
}
/* Return the output node which is marked as NODE_DO_OUTPUT. If multiple types of output nodes are
* marked, then the preference will be CMP_NODE_COMPOSITE > CMP_NODE_VIEWER > CMP_NODE_SPLITVIEWER.
* marked, then the preference will be CMP_NODE_VIEWER > CMP_NODE_SPLITVIEWER > CMP_NODE_COMPOSITE.
* If no output node exists, a null node will be returned. */
static DNode find_output_in_context(const DTreeContext *context)
{
const bNodeTree &tree = context->btree();
for (const bNode *node : tree.nodes_by_type("CompositorNodeComposite")) {
if (node->flag & NODE_DO_OUTPUT) {
return DNode(context, node);
}
}
for (const bNode *node : tree.nodes_by_type("CompositorNodeViewer")) {
if (node->flag & NODE_DO_OUTPUT) {
return DNode(context, node);
@ -95,6 +89,12 @@ static DNode find_output_in_context(const DTreeContext *context)
}
}
for (const bNode *node : tree.nodes_by_type("CompositorNodeComposite")) {
if (node->flag & NODE_DO_OUTPUT) {
return DNode(context, node);
}
}
return DNode();
}

View File

@ -34,6 +34,7 @@ void node_composite_separate_rgba(vec4 color, out float r, out float g, out floa
void node_composite_combine_hsva(float h, float s, float v, float a, out vec4 color)
{
hsv_to_rgb(vec4(h, s, v, a), color);
color.rgb = max(color.rgb, vec3(0.0));
}
void node_composite_separate_hsva(vec4 color, out float h, out float s, out float v, out float a)
@ -51,6 +52,7 @@ void node_composite_separate_hsva(vec4 color, out float h, out float s, out floa
void node_composite_combine_hsla(float h, float s, float l, float a, out vec4 color)
{
hsl_to_rgb(vec4(h, s, l, a), color);
color.rgb = max(color.rgb, vec3(0.0));
}
void node_composite_separate_hsla(vec4 color, out float h, out float s, out float l, out float a)

View File

@ -184,29 +184,29 @@ BLI_INLINE const CustomData *mesh_cd_vdata_get_from_mesh(const Mesh *me)
BLI_INLINE BMFace *bm_original_face_get(const MeshRenderData *mr, int idx)
{
return ((mr->p_origindex != NULL) && (mr->p_origindex[idx] != ORIGINDEX_NONE) && mr->bm) ?
return ((mr->p_origindex != nullptr) && (mr->p_origindex[idx] != ORIGINDEX_NONE) && mr->bm) ?
BM_face_at_index(mr->bm, mr->p_origindex[idx]) :
NULL;
nullptr;
}
BLI_INLINE BMEdge *bm_original_edge_get(const MeshRenderData *mr, int idx)
{
return ((mr->e_origindex != NULL) && (mr->e_origindex[idx] != ORIGINDEX_NONE) && mr->bm) ?
return ((mr->e_origindex != nullptr) && (mr->e_origindex[idx] != ORIGINDEX_NONE) && mr->bm) ?
BM_edge_at_index(mr->bm, mr->e_origindex[idx]) :
NULL;
nullptr;
}
BLI_INLINE BMVert *bm_original_vert_get(const MeshRenderData *mr, int idx)
{
return ((mr->v_origindex != NULL) && (mr->v_origindex[idx] != ORIGINDEX_NONE) && mr->bm) ?
return ((mr->v_origindex != nullptr) && (mr->v_origindex[idx] != ORIGINDEX_NONE) && mr->bm) ?
BM_vert_at_index(mr->bm, mr->v_origindex[idx]) :
NULL;
nullptr;
}
BLI_INLINE const float *bm_vert_co_get(const MeshRenderData *mr, const BMVert *eve)
{
const float(*vert_coords)[3] = mr->bm_vert_coords;
if (vert_coords != NULL) {
if (vert_coords != nullptr) {
return vert_coords[BM_elem_index_get(eve)];
}
@ -217,7 +217,7 @@ BLI_INLINE const float *bm_vert_co_get(const MeshRenderData *mr, const BMVert *e
BLI_INLINE const float *bm_vert_no_get(const MeshRenderData *mr, const BMVert *eve)
{
const float(*vert_normals)[3] = mr->bm_vert_normals;
if (vert_normals != NULL) {
if (vert_normals != nullptr) {
return vert_normals[BM_elem_index_get(eve)];
}
@ -228,7 +228,7 @@ BLI_INLINE const float *bm_vert_no_get(const MeshRenderData *mr, const BMVert *e
BLI_INLINE const float *bm_face_no_get(const MeshRenderData *mr, const BMFace *efa)
{
const float(*poly_normals)[3] = mr->bm_poly_normals;
if (poly_normals != NULL) {
if (poly_normals != nullptr) {
return poly_normals[BM_elem_index_get(efa)];
}

View File

@ -926,8 +926,7 @@ float ED_view3d_grid_view_scale(Scene *scene,
const void *usys;
int len;
BKE_unit_system_get(scene->unit.system, B_UNIT_LENGTH, &usys, &len);
if (usys) {
if (usys && i < len) {
*r_grid_unit = IFACE_(BKE_unit_display_name_get(usys, len - i - 1));
}
}

View File

@ -178,7 +178,7 @@ class OptionalOutputsFunction : public MultiFunction {
{
if (params.single_output_is_required(0, "Out 1")) {
MutableSpan<int> values = params.uninitialized_single_output<int>(0, "Out 1");
values.fill_indices(mask, 5);
values.fill_indices(mask.indices(), 5);
}
MutableSpan<std::string> values = params.uninitialized_single_output<std::string>(1, "Out 2");
for (const int i : mask) {

View File

@ -518,8 +518,9 @@ static bke::CurvesGeometry convert_curves_to_nurbs(
};
auto catmull_rom_to_nurbs = [&](IndexMask selection) {
dst_curves.nurbs_orders_for_write().fill_indices(selection, 4);
dst_curves.nurbs_knots_modes_for_write().fill_indices(selection, NURBS_KNOT_MODE_BEZIER);
dst_curves.nurbs_orders_for_write().fill_indices(selection.indices(), 4);
dst_curves.nurbs_knots_modes_for_write().fill_indices(selection.indices(),
NURBS_KNOT_MODE_BEZIER);
fill_weights_if_necessary(selection);
threading::parallel_for(selection.index_range(), 512, [&](IndexRange range) {
@ -544,7 +545,7 @@ static bke::CurvesGeometry convert_curves_to_nurbs(
};
auto poly_to_nurbs = [&](IndexMask selection) {
dst_curves.nurbs_orders_for_write().fill_indices(selection, 4);
dst_curves.nurbs_orders_for_write().fill_indices(selection.indices(), 4);
bke::curves::copy_point_data(
src_points_by_curve, dst_points_by_curve, selection, src_positions, dst_positions);
fill_weights_if_necessary(selection);
@ -553,7 +554,7 @@ static bke::CurvesGeometry convert_curves_to_nurbs(
* start/end. */
if (src_cyclic.is_single()) {
dst_curves.nurbs_knots_modes_for_write().fill_indices(
selection,
selection.indices(),
src_cyclic.get_internal_single() ? NURBS_KNOT_MODE_NORMAL : NURBS_KNOT_MODE_ENDPOINT);
}
else {
@ -576,8 +577,9 @@ static bke::CurvesGeometry convert_curves_to_nurbs(
const Span<float3> src_handles_l = src_curves.handle_positions_left();
const Span<float3> src_handles_r = src_curves.handle_positions_right();
dst_curves.nurbs_orders_for_write().fill_indices(selection, 4);
dst_curves.nurbs_knots_modes_for_write().fill_indices(selection, NURBS_KNOT_MODE_BEZIER);
dst_curves.nurbs_orders_for_write().fill_indices(selection.indices(), 4);
dst_curves.nurbs_knots_modes_for_write().fill_indices(selection.indices(),
NURBS_KNOT_MODE_BEZIER);
fill_weights_if_necessary(selection);
threading::parallel_for(selection.index_range(), 512, [&](IndexRange range) {

View File

@ -1065,7 +1065,7 @@ bke::CurvesGeometry trim_curves(const bke::CurvesGeometry &src_curves,
else {
/* Only trimmed curves are no longer cyclic. */
if (bke::SpanAttributeWriter cyclic = dst_attributes.lookup_for_write_span<bool>("cyclic")) {
cyclic.span.fill_indices(selection, false);
cyclic.span.fill_indices(selection.indices(), false);
cyclic.finish();
}

View File

@ -17,6 +17,7 @@
#include "BLI_linklist.h"
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_memory_utils.hh"
#include "BLI_path_util.h"
#include "BLI_string.h"
@ -378,6 +379,7 @@ static std::string get_in_memory_texture_filename(Image *ima)
ImageFormatData imageFormat;
BKE_image_format_from_imbuf(&imageFormat, imbuf);
BKE_image_release_ibuf(ima, imbuf, nullptr);
char file_name[FILE_MAX];
/* Use the image name for the file name. */
@ -405,6 +407,7 @@ static void export_in_memory_texture(Image *ima,
}
ImBuf *imbuf = BKE_image_acquire_ibuf(ima, nullptr, nullptr);
BLI_SCOPED_DEFER([&]() { BKE_image_release_ibuf(ima, imbuf, nullptr); });
if (!imbuf) {
return;
}

View File

@ -687,10 +687,12 @@ typedef struct FluidDomainSettings {
int viewsettings;
char _pad12[4]; /* Unused. */
/* Pointcache options. */
/* Smoke uses only one cache from now on (index [0]), but keeping the array for now for reading
* old files. */
struct PointCache *point_cache[2]; /* Definition is in DNA_object_force_types.h. */
/**
* Point-cache options.
* Smoke uses only one cache from now on (index [0]),
* but keeping the array for now for reading old files.
*/
struct PointCache *point_cache[2];
struct ListBase ptcaches[2];
int cache_comp;
int cache_high_comp;

View File

@ -214,6 +214,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
blender::Array<blender::float3> loop_normals;
WeightedNormalDataAggregateItem *items_data = nullptr;
Array<int> item_index_per_corner(corner_verts.size(), 0);
int items_num = 0;
if (keep_sharp) {
BLI_bitmap *done_loops = BLI_BITMAP_NEW(corner_verts.size(), __func__);
@ -255,7 +256,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
itdt->curr_strength = FACE_STRENGTH_WEAK;
MLoopNorSpace *lnor_space = lnors_spacearr.lspacearr[ml_index];
lnor_space->user_data = itdt;
item_index_per_corner[ml_index] = item_index;
if (!(lnor_space->flags & MLNOR_SPACE_IS_SINGLE)) {
for (LinkNode *lnode = lnor_space->loops; lnode; lnode = lnode->next) {
@ -294,9 +295,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
for (const int ml_index : polys[poly_index]) {
const int mv_index = corner_verts[ml_index];
WeightedNormalDataAggregateItem *item_data =
keep_sharp ? static_cast<WeightedNormalDataAggregateItem *>(
lnors_spacearr.lspacearr[ml_index]->user_data) :
&items_data[mv_index];
keep_sharp ? &items_data[item_index_per_corner[ml_index]] : &items_data[mv_index];
aggregate_item_normal(
wnmd, wn_data, item_data, mv_index, poly_index, mp_val, use_face_influence);
@ -312,9 +311,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
const int poly_index = loop_to_poly[ml_index];
const int mv_index = corner_verts[ml_index];
WeightedNormalDataAggregateItem *item_data =
keep_sharp ? static_cast<WeightedNormalDataAggregateItem *>(
lnors_spacearr.lspacearr[ml_index]->user_data) :
&items_data[mv_index];
keep_sharp ? &items_data[item_index_per_corner[ml_index]] : &items_data[mv_index];
aggregate_item_normal(
wnmd, wn_data, item_data, mv_index, poly_index, ml_val, use_face_influence);
@ -337,8 +334,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
* (before this modifier is applied, at start of this function),
* so no need to recompute them here. */
for (int ml_index = 0; ml_index < corner_verts.size(); ml_index++) {
WeightedNormalDataAggregateItem *item_data = static_cast<WeightedNormalDataAggregateItem *>(
lnors_spacearr.lspacearr[ml_index]->user_data);
WeightedNormalDataAggregateItem *item_data = &items_data[item_index_per_corner[ml_index]];
if (!is_zero_v3(item_data->normal)) {
copy_v3_v3(loop_normals[ml_index], item_data->normal);
}
@ -366,7 +362,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd,
/* NOTE: in theory, we could avoid this extra allocation & copying...
* But think we can live with it for now,
* and it makes code simpler & cleaner. */
blender::Array<blender::float3> vert_normals(verts_num, float3(0));
blender::Array<blender::float3> vert_normals(verts_num, float3(0.0f));
for (int ml_index = 0; ml_index < corner_verts.size(); ml_index++) {
const int mv_index = corner_verts[ml_index];

View File

@ -282,13 +282,13 @@ class SampleCurveFunction : public mf::MultiFunction {
auto return_default = [&]() {
if (!sampled_positions.is_empty()) {
sampled_positions.fill_indices(mask, {0, 0, 0});
sampled_positions.fill_indices(mask.indices(), {0, 0, 0});
}
if (!sampled_tangents.is_empty()) {
sampled_tangents.fill_indices(mask, {0, 0, 0});
sampled_tangents.fill_indices(mask.indices(), {0, 0, 0});
}
if (!sampled_normals.is_empty()) {
sampled_normals.fill_indices(mask, {0, 0, 0});
sampled_normals.fill_indices(mask.indices(), {0, 0, 0});
}
};
@ -325,18 +325,18 @@ class SampleCurveFunction : public mf::MultiFunction {
auto fill_invalid = [&](const IndexMask mask) {
if (!sampled_positions.is_empty()) {
sampled_positions.fill_indices(mask, float3(0));
sampled_positions.fill_indices(mask.indices(), float3(0));
}
if (!sampled_tangents.is_empty()) {
sampled_tangents.fill_indices(mask, float3(0));
sampled_tangents.fill_indices(mask.indices(), float3(0));
}
if (!sampled_normals.is_empty()) {
sampled_normals.fill_indices(mask, float3(0));
sampled_normals.fill_indices(mask.indices(), float3(0));
}
if (!sampled_values.is_empty()) {
attribute_math::convert_to_static_type(source_data_->type(), [&](auto dummy) {
using T = decltype(dummy);
sampled_values.typed<T>().fill_indices(mask, {});
sampled_values.typed<T>().fill_indices(mask.indices(), {});
});
}
};

View File

@ -63,10 +63,10 @@ static void set_handle_type(bke::CurvesGeometry &curves,
const IndexMask selection = evaluator.get_evaluated_selection_as_mask();
if (mode & GEO_NODE_CURVE_HANDLE_LEFT) {
curves.handle_types_left_for_write().fill_indices(selection, new_handle_type);
curves.handle_types_left_for_write().fill_indices(selection.indices(), new_handle_type);
}
if (mode & GEO_NODE_CURVE_HANDLE_RIGHT) {
curves.handle_types_right_for_write().fill_indices(selection, new_handle_type);
curves.handle_types_right_for_write().fill_indices(selection.indices(), new_handle_type);
}
/* Eagerly calculate automatically derived handle positions if necessary. */

View File

@ -82,7 +82,7 @@ static void save_selection_as_attribute(Mesh &mesh,
attribute.span.slice(selection.as_range()).fill(true);
}
else {
attribute.span.fill_indices(selection, true);
attribute.span.fill_indices(selection.indices(), true);
}
attribute.finish();

View File

@ -162,7 +162,7 @@ class ProximityFunction : public mf::MultiFunction {
* comparison per vertex, so it's likely not worth it. */
MutableSpan<float> distances = params.uninitialized_single_output<float>(2, "Distance");
distances.fill_indices(mask, FLT_MAX);
distances.fill_indices(mask.indices(), FLT_MAX);
bool success = false;
if (target_.has_mesh()) {
@ -177,10 +177,10 @@ class ProximityFunction : public mf::MultiFunction {
if (!success) {
if (!positions.is_empty()) {
positions.fill_indices(mask, float3(0));
positions.fill_indices(mask.indices(), float3(0));
}
if (!distances.is_empty()) {
distances.fill_indices(mask, 0.0f);
distances.fill_indices(mask.indices(), 0.0f);
}
return;
}

View File

@ -255,7 +255,7 @@ class SampleNearestFunction : public mf::MultiFunction {
const VArray<float3> &positions = params.readonly_single_input<float3>(0, "Position");
MutableSpan<int> indices = params.uninitialized_single_output<int>(1, "Index");
if (!src_component_) {
indices.fill_indices(mask, 0);
indices.fill_indices(mask.indices(), 0);
return;
}

View File

@ -35,7 +35,7 @@ static void set_normal_mode(bke::CurvesGeometry &curves,
evaluator.set_selection(selection_field);
evaluator.evaluate();
const IndexMask selection = evaluator.get_evaluated_selection_as_mask();
curves.normal_mode_for_write().fill_indices(selection, mode);
curves.normal_mode_for_write().fill_indices(selection.indices(), mode);
curves.tag_normals_changed();
}

View File

@ -53,7 +53,7 @@ static void assign_material_to_faces(Mesh &mesh, const IndexMask selection, Mate
MutableAttributeAccessor attributes = mesh.attributes_for_write();
SpanAttributeWriter<int> material_indices = attributes.lookup_or_add_for_write_span<int>(
"material_index", ATTR_DOMAIN_FACE);
material_indices.span.fill_indices(selection, new_material_index);
material_indices.span.fill_indices(selection.indices(), new_material_index);
material_indices.finish();
}

View File

@ -79,6 +79,7 @@ dict_custom = {
"decrement",
"decrementing",
"deduplicate",
"deduplicates",
"deduplicating",
"deduplication",
"defocus",
@ -88,6 +89,7 @@ dict_custom = {
"denoised",
"denoiser",
"denoising",
"denormalized",
"dereference",
"dereferenced",
"dereferences",