diff --git a/source/blender/blenkernel/BKE_mesh.hh b/source/blender/blenkernel/BKE_mesh.hh index 95c67822020..d2e03ee3755 100644 --- a/source/blender/blenkernel/BKE_mesh.hh +++ b/source/blender/blenkernel/BKE_mesh.hh @@ -79,6 +79,52 @@ void normals_calc_poly_vert(Span vert_positions, MutableSpan poly_normals, MutableSpan vert_normals); +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name Face Corner Normal Calculation + * \{ */ + +/** + * Combined with the automatically calculated face corner normal, this gives a dimentional + * coordinate space used to convert normals between the "custom normal" #short2 representation and + * a regular #float3 format. + */ +struct CornerNormalSpace { + /** Reference vector, orthogonal to corner normal. */ + float3 vec_ref; + /** Third vector, orthogonal to corner normal and #vec_ref. */ + float3 vec_ortho; + /** Reference angle around #vec_ortho, in [0, pi] range (0.0 marks space as invalid). */ + float ref_alpha; + /** Reference angle around corner normal, in [0, 2pi] range (0.0 marks space as invalid). */ + float ref_beta; +}; + +/** + * Storage for corner fan coordinate spaces for an entire mesh. + */ +struct CornerNormalSpaceArray { + /** + * The normal coordinate spaces, potentially shared between multiple face corners in a smooth fan + * connected to a vertex (and not per face corner). Depending on the mesh (the amount of sharing + * / number of sharp edges / size of each fan), there may be many fewer spaces than face corners, + * so they are stored in a separate array. + */ + Array spaces; + + /** + * The index of the data in the #spaces array for each face corner (the array size is the + * same as #Mesh::totloop). Rare -1 values define face corners without a coordinate space. + */ + Array corner_space_indices; +}; + +void lnor_space_custom_normal_to_data(const CornerNormalSpace *lnor_space, + float3 lnor_no_custom, + const float custom_lnor[3], + short r_clnor_data[2]); + /** * Compute split normals, i.e. vertex normals associated with each poly (hence 'loop normals'). * Useful to materialize sharp edges (or non-smooth faces) without actually modifying the geometry @@ -87,6 +133,8 @@ void normals_calc_poly_vert(Span vert_positions, * \param loop_to_poly_map: Optional pre-created map from corners to their polygon. * \param sharp_edges: Optional array of sharp edge tags, used to split the evaluated normals on * each side of the edge. + * \param r_lnors_spacearr: Optional return data filled with information about the custom + * normals spaces for each grouped fan of face corners. */ void normals_calc_loop(Span vert_positions, Span edges, @@ -101,7 +149,7 @@ void normals_calc_loop(Span vert_positions, bool use_split_normals, float split_angle, short2 *clnors_data, - MLoopNorSpaceArray *r_lnors_spacearr, + CornerNormalSpaceArray *r_lnors_spacearr, MutableSpan r_loop_normals); void normals_loop_custom_set(Span vert_positions, diff --git a/source/blender/blenkernel/intern/mesh_mirror.cc b/source/blender/blenkernel/intern/mesh_mirror.cc index 6308a582767..103947f91c2 100644 --- a/source/blender/blenkernel/intern/mesh_mirror.cc +++ b/source/blender/blenkernel/intern/mesh_mirror.cc @@ -390,7 +390,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd, CustomData *ldata = &result->ldata; blender::short2 *clnors = static_cast( CustomData_get_layer_for_write(ldata, CD_CUSTOMLOOPNORMAL, result->totloop)); - MLoopNorSpaceArray lnors_spacearr = {nullptr}; + blender::bke::mesh::CornerNormalSpaceArray lnors_spacearr; /* The transform matrix of a normal must be * the transpose of inverse of transform matrix of the geometry... */ @@ -432,12 +432,14 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd, } copy_v3_v3(loop_normals[mirrorj], loop_normals[j]); mul_m4_v3(mtx_nor, loop_normals[mirrorj]); - BKE_lnor_space_custom_normal_to_data( - lnors_spacearr.lspacearr[mirrorj], loop_normals[mirrorj], clnors[mirrorj]); + + const int space_index = lnors_spacearr.corner_space_indices[mirrorj]; + blender::bke::mesh::lnor_space_custom_normal_to_data(&lnors_spacearr.spaces[space_index], + loop_normals[mirrorj], + loop_normals[mirrorj], + clnors[mirrorj]); } } - - BKE_lnor_spacearr_free(&lnors_spacearr); } /* handle vgroup stuff */ diff --git a/source/blender/blenkernel/intern/mesh_normals.cc b/source/blender/blenkernel/intern/mesh_normals.cc index 107681a558a..78efd3e2256 100644 --- a/source/blender/blenkernel/intern/mesh_normals.cc +++ b/source/blender/blenkernel/intern/mesh_normals.cc @@ -467,11 +467,12 @@ MLoopNorSpace *BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr) /* This threshold is a bit touchy (usual float precision issue), this value seems OK. */ #define LNOR_SPACE_TRIGO_THRESHOLD (1.0f - 1e-4f) -void BKE_lnor_space_define(MLoopNorSpace *lnor_space, - const float lnor[3], - float vec_ref[3], - float vec_other[3], - const blender::Span edge_vectors) +namespace blender::bke::mesh { +static void lnor_space_define(CornerNormalSpace *lnor_space, + const float lnor[3], + float vec_ref[3], + float vec_other[3], + const blender::Span edge_vectors) { const float pi2 = float(M_PI) * 2.0f; float tvec[3], dtp; @@ -487,8 +488,6 @@ void BKE_lnor_space_define(MLoopNorSpace *lnor_space, return; } - copy_v3_v3(lnor_space->vec_lnor, lnor); - /* Compute ref alpha, average angle of all available edge vectors to lnor. */ if (!edge_vectors.is_empty()) { float alpha = 0.0f; @@ -532,6 +531,23 @@ void BKE_lnor_space_define(MLoopNorSpace *lnor_space, lnor_space->ref_beta = pi2; } } +} // namespace blender::bke::mesh + +void BKE_lnor_space_define(MLoopNorSpace *lnor_space, + const float lnor[3], + float vec_ref[3], + float vec_other[3], + const blender::Span edge_vectors) +{ + using namespace blender::bke::mesh; + CornerNormalSpace space{}; + lnor_space_define(&space, lnor, vec_ref, vec_other, edge_vectors); + copy_v3_v3(lnor_space->vec_lnor, lnor); + copy_v3_v3(lnor_space->vec_ref, space.vec_ref); + copy_v3_v3(lnor_space->vec_ortho, space.vec_ortho); + lnor_space->ref_alpha = space.ref_alpha; + lnor_space->ref_beta = space.ref_beta; +} void BKE_lnor_space_add_loop(MLoopNorSpaceArray *lnors_spacearr, MLoopNorSpace *lnor_space, @@ -568,13 +584,15 @@ MINLINE short unit_float_to_short(const float val) return short(floorf(val * float(SHRT_MAX) + 0.5f)); } -void BKE_lnor_space_custom_data_to_normal(const MLoopNorSpace *lnor_space, - const short clnor_data[2], - float r_custom_lnor[3]) +namespace blender::bke::mesh { +static void lnor_space_custom_data_to_normal(const CornerNormalSpace *lnor_space, + const float3 lnor_no_custom, + const short clnor_data[2], + float r_custom_lnor[3]) { /* NOP custom normal data or invalid lnor space, return. */ if (clnor_data[0] == 0 || lnor_space->ref_alpha == 0.0f || lnor_space->ref_beta == 0.0f) { - copy_v3_v3(r_custom_lnor, lnor_space->vec_lnor); + copy_v3_v3(r_custom_lnor, lnor_no_custom); return; } @@ -587,7 +605,7 @@ void BKE_lnor_space_custom_data_to_normal(const MLoopNorSpace *lnor_space, alphafac; const float betafac = unit_short_to_float(clnor_data[1]); - mul_v3_v3fl(r_custom_lnor, lnor_space->vec_lnor, cosf(alpha)); + mul_v3_v3fl(r_custom_lnor, lnor_no_custom, cosf(alpha)); if (betafac == 0.0f) { madd_v3_v3fl(r_custom_lnor, lnor_space->vec_ref, sinf(alpha)); @@ -601,21 +619,37 @@ void BKE_lnor_space_custom_data_to_normal(const MLoopNorSpace *lnor_space, } } } +} // namespace blender::bke::mesh -void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space, - const float custom_lnor[3], - short r_clnor_data[2]) +void BKE_lnor_space_custom_data_to_normal(const MLoopNorSpace *lnor_space, + const short clnor_data[2], + float r_custom_lnor[3]) +{ + using namespace blender::bke::mesh; + CornerNormalSpace space; + space.vec_ref = lnor_space->vec_ref; + space.vec_ortho = lnor_space->vec_ortho; + space.ref_alpha = lnor_space->ref_alpha; + space.ref_beta = lnor_space->ref_beta; + lnor_space_custom_data_to_normal(&space, lnor_space->vec_lnor, clnor_data, r_custom_lnor); +} + +namespace blender::bke::mesh { +void lnor_space_custom_normal_to_data(const CornerNormalSpace *lnor_space, + const float3 lnor_no_custom, + const float custom_lnor[3], + short r_clnor_data[2]) { /* We use nullptr vector as NOP custom normal (can be simpler than giving auto-computed `lnor`). */ - if (is_zero_v3(custom_lnor) || compare_v3v3(lnor_space->vec_lnor, custom_lnor, 1e-4f)) { + if (is_zero_v3(custom_lnor) || compare_v3v3(lnor_no_custom, custom_lnor, 1e-4f)) { r_clnor_data[0] = r_clnor_data[1] = 0; return; } { const float pi2 = float(M_PI * 2.0); - const float cos_alpha = dot_v3v3(lnor_space->vec_lnor, custom_lnor); + const float cos_alpha = dot_v3v3(lnor_no_custom, custom_lnor); float vec[3], cos_beta; float alpha; @@ -630,7 +664,7 @@ void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space, } /* Project custom lnor on (vec_ref, vec_ortho) plane. */ - mul_v3_v3fl(vec, lnor_space->vec_lnor, -cos_alpha); + mul_v3_v3fl(vec, lnor_no_custom, -cos_alpha); add_v3_v3(vec, custom_lnor); normalize_v3(vec); @@ -654,6 +688,20 @@ void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space, } } } +} // namespace blender::bke::mesh + +void BKE_lnor_space_custom_normal_to_data(const MLoopNorSpace *lnor_space, + const float custom_lnor[3], + short r_clnor_data[2]) +{ + using namespace blender::bke::mesh; + CornerNormalSpace space; + space.vec_ref = lnor_space->vec_ref; + space.vec_ortho = lnor_space->vec_ortho; + space.ref_alpha = lnor_space->ref_alpha; + space.ref_beta = lnor_space->ref_beta; + lnor_space_custom_normal_to_data(&space, lnor_space->vec_lnor, custom_lnor, r_clnor_data); +} namespace blender::bke::mesh { @@ -661,7 +709,7 @@ struct LoopSplitTaskDataCommon { /* Read/write. * Note we do not need to protect it, though, since two different tasks will *always* affect * different elements in the arrays. */ - MLoopNorSpaceArray *lnors_spacearr; + CornerNormalSpaceArray *lnors_spacearr; MutableSpan loop_normals; MutableSpan clnors_data; @@ -833,7 +881,7 @@ static void loop_manifold_fan_around_vert_next(const Span corner_verts, static void lnor_space_for_single_fan(LoopSplitTaskDataCommon *common_data, const int ml_curr_index, - MLoopNorSpace *lnor_space) + const int space_index) { const Span loop_to_poly = common_data->loop_to_poly; const Span poly_normals = common_data->poly_normals; @@ -841,7 +889,7 @@ static void lnor_space_for_single_fan(LoopSplitTaskDataCommon *common_data, loop_normals[ml_curr_index] = poly_normals[loop_to_poly[ml_curr_index]]; - if (MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr) { + if (CornerNormalSpaceArray *lnors_spacearr = common_data->lnors_spacearr) { const Span positions = common_data->positions; const Span edges = common_data->edges; const OffsetIndices polys = common_data->polys; @@ -864,23 +912,25 @@ static void lnor_space_for_single_fan(LoopSplitTaskDataCommon *common_data, sub_v3_v3v3(vec_prev, positions[vert_3], positions[vert_pivot]); normalize_v3(vec_prev); - BKE_lnor_space_define(lnor_space, loop_normals[ml_curr_index], vec_curr, vec_prev, {}); - /* We know there is only one loop in this space, no need to create a link-list in this case. */ - BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, ml_curr_index, nullptr, true); + CornerNormalSpace *lnor_space = &lnors_spacearr->spaces[space_index]; + lnor_space_define(lnor_space, loop_normals[ml_curr_index], vec_curr, vec_prev, {}); + lnors_spacearr->corner_space_indices[ml_curr_index] = space_index; if (!clnors_data.is_empty()) { - BKE_lnor_space_custom_data_to_normal( - lnor_space, clnors_data[ml_curr_index], loop_normals[ml_curr_index]); + lnor_space_custom_data_to_normal(lnor_space, + loop_normals[ml_curr_index], + clnors_data[ml_curr_index], + loop_normals[ml_curr_index]); } } } static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, const int ml_curr_index, - MLoopNorSpace *lnor_space, + const int space_index, Vector *edge_vectors) { - MLoopNorSpaceArray *lnors_spacearr = common_data->lnors_spacearr; + CornerNormalSpaceArray *lnors_spacearr = common_data->lnors_spacearr; MutableSpan loop_normals = common_data->loop_normals; MutableSpan clnors_data = common_data->clnors_data; @@ -984,8 +1034,6 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, processed_corners.append(mlfan_vert_index); if (lnors_spacearr) { - /* Assign current lnor space to current 'vertex' loop. */ - BKE_lnor_space_add_loop(lnors_spacearr, lnor_space, mlfan_vert_index, nullptr, false); if (edge != edge_orig) { /* We store here all edges-normalized vectors processed. */ edge_vectors->append(vec_curr); @@ -1024,7 +1072,10 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, length = 1.0f; } - BKE_lnor_space_define(lnor_space, lnor, vec_org, vec_curr, *edge_vectors); + CornerNormalSpace *lnor_space = &lnors_spacearr->spaces[space_index]; + lnor_space_define(lnor_space, lnor, vec_org, vec_curr, *edge_vectors); + lnors_spacearr->corner_space_indices.as_mutable_span().fill_indices( + processed_corners.as_span(), space_index); edge_vectors->clear(); if (!clnors_data.is_empty()) { @@ -1042,7 +1093,7 @@ static void split_loop_nor_fan_do(LoopSplitTaskDataCommon *common_data, /* Extra bonus: since small-stack is local to this function, * no more need to empty it at all cost! */ - BKE_lnor_space_custom_data_to_normal(lnor_space, *clnor_ref, lnor); + lnor_space_custom_data_to_normal(lnor_space, lnor, *clnor_ref, lnor); } } @@ -1219,7 +1270,7 @@ void normals_calc_loop(const Span vert_positions, bool use_split_normals, float split_angle, short2 *clnors_data, - MLoopNorSpaceArray *r_lnors_spacearr, + CornerNormalSpaceArray *r_lnors_spacearr, MutableSpan r_loop_normals) { /* For now this is not supported. @@ -1277,7 +1328,7 @@ void normals_calc_loop(const Span vert_positions, /* When using custom loop normals, disable the angle feature! */ const bool check_angle = (split_angle < float(M_PI)) && (clnors_data == nullptr); - MLoopNorSpaceArray _lnors_spacearr = {nullptr}; + CornerNormalSpaceArray _lnors_spacearr; #ifdef DEBUG_TIME SCOPED_TIMER_AVERAGED(__func__); @@ -1287,9 +1338,6 @@ void normals_calc_loop(const Span vert_positions, /* We need to compute lnor spacearr if some custom lnor data are given to us! */ r_lnors_spacearr = &_lnors_spacearr; } - if (r_lnors_spacearr) { - BKE_lnor_spacearr_init(r_lnors_spacearr, corner_verts.size(), MLNOR_SPACEARR_LOOP_INDEX); - } /* Init data common to all tasks. */ LoopSplitTaskDataCommon common_data; @@ -1328,19 +1376,15 @@ void normals_calc_loop(const Span vert_positions, Vector fan_corners; loop_split_generator(&common_data, single_corners, fan_corners); - MLoopNorSpace *lnor_spaces = nullptr; if (r_lnors_spacearr) { - r_lnors_spacearr->spaces_num = single_corners.size() + fan_corners.size(); - if (r_lnors_spacearr->spaces_num > 0) { - lnor_spaces = static_cast(BLI_memarena_calloc( - r_lnors_spacearr->mem, sizeof(MLoopNorSpace) * r_lnors_spacearr->spaces_num)); - } + r_lnors_spacearr->spaces.reinitialize(single_corners.size() + fan_corners.size()); + r_lnors_spacearr->corner_space_indices = Array(corner_verts.size(), -1); } threading::parallel_for(single_corners.index_range(), 1024, [&](const IndexRange range) { for (const int i : range) { const int corner = single_corners[i]; - lnor_space_for_single_fan(&common_data, corner, lnor_spaces ? &lnor_spaces[i] : nullptr); + lnor_space_for_single_fan(&common_data, corner, i); } }); @@ -1348,24 +1392,48 @@ void normals_calc_loop(const Span vert_positions, Vector edge_vectors; for (const int i : range) { const int corner = fan_corners[i]; - split_loop_nor_fan_do(&common_data, - corner, - lnor_spaces ? &lnor_spaces[single_corners.size() + i] : nullptr, - &edge_vectors); + const int space_index = single_corners.size() + i; + split_loop_nor_fan_do(&common_data, corner, space_index, &edge_vectors); } }); - - if (r_lnors_spacearr) { - if (r_lnors_spacearr == &_lnors_spacearr) { - BKE_lnor_spacearr_free(r_lnors_spacearr); - } - } } #undef INDEX_UNSET #undef INDEX_INVALID #undef IS_EDGE_SHARP +/** + * Take an array of N indices that points to \a items_num items, where multiple indices map to the + * same item, and reverse the indices to create an array of all the indices that reference each + * item. Group each item's indices together consecutively, encoding the grouping in #r_offsets, + * which is meant to be used by #OffsetIndices. + * + * \param r_offsets: An array to be filled with the first index of each item in + * \a r_reverse_indices, used to split the indices into chunks by item. (See #OffsetIndices). + * \param r_reverse_indices: The indices into \a item_indices that point to each item, split by \a + * r_offsets. + */ +static void reverse_index_array(const Span item_indices, + const int items_num, + Array &r_offsets, + Array &r_reverse_indices) +{ + r_offsets = Array(items_num + 1, 0); + for (const int index : item_indices) { + r_offsets[index]++; + } + + offset_indices::accumulate_counts_to_offsets(r_offsets); + r_reverse_indices.reinitialize(r_offsets.last()); + + Array count_per_item(items_num, 0); + for (const int corner : item_indices.index_range()) { + const int space = item_indices[corner]; + r_reverse_indices[r_offsets[space] + count_per_item[space]] = corner; + count_per_item[space]++; + } +} + /** * Compute internal representation of given custom normals (as an array of float[2]). * It also makes sure the mesh matches those custom normals, by setting sharp edges flag as needed @@ -1394,7 +1462,7 @@ static void mesh_normals_loop_custom_set(Span positions, * function *is not* performance-critical, since it is mostly expected to be called by io add-ons * when importing custom normals, and modifier (and perhaps from some editing tools later?). So * better to keep some simplicity here, and just call #bke::mesh::normals_calc_loop() twice! */ - MLoopNorSpaceArray lnors_spacearr = {nullptr}; + CornerNormalSpaceArray lnors_spacearr; BitVector<> done_loops(corner_verts.size(), false); Array loop_normals(corner_verts.size()); const Array loop_to_poly = mesh_topology::build_loop_to_poly_map(polys); @@ -1436,8 +1504,6 @@ static void mesh_normals_loop_custom_set(Span positions, } } - BLI_assert(lnors_spacearr.data_type == MLNOR_SPACEARR_LOOP_INDEX); - /* Now, check each current smooth fan (one lnor space per smooth fan!), * and if all its matching custom loop_normals are not (enough) equal, add sharp edges as needed. * This way, next time we run bke::mesh::normals_calc_loop(), we'll get lnor spacearr/smooth fans @@ -1448,10 +1514,18 @@ static void mesh_normals_loop_custom_set(Span positions, done_loops.fill(true); } else { + Array fan_corners_from_space_offset_indices; + Array fan_corners_from_space_data; + reverse_index_array(lnors_spacearr.corner_space_indices, + lnors_spacearr.spaces.size(), + fan_corners_from_space_offset_indices, + fan_corners_from_space_data); + const OffsetIndices fan_corner_offsets(fan_corners_from_space_offset_indices); + for (const int i : corner_verts.index_range()) { - if (!lnors_spacearr.lspacearr[i]) { + if (lnors_spacearr.corner_space_indices[i] == -1) { /* This should not happen in theory, but in some rare case (probably ugly geometry) - * we can get some nullptr loopspacearr at this point. :/ + * we can get some missing loopspacearr at this point. :/ * Maybe we should set those loops' edges as sharp? */ done_loops[i].set(); if (G.debug & G_DEBUG) { @@ -1463,6 +1537,10 @@ static void mesh_normals_loop_custom_set(Span positions, continue; } + const int space_index = lnors_spacearr.corner_space_indices[i]; + const Span fan_corners = fan_corners_from_space_data.as_span().slice( + fan_corner_offsets[space_index]); + /* Notes: * - In case of mono-loop smooth fan, we have nothing to do. * - Loops in this linklist are ordered (in reversed order compared to how they were @@ -1472,17 +1550,15 @@ static void mesh_normals_loop_custom_set(Span positions, * - In smooth fan case, we compare each clnor against a ref one, * to avoid small differences adding up into a real big one in the end! */ - if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) { + if (fan_corners.is_empty()) { done_loops[i].set(); continue; } - LinkNode *loop_link = lnors_spacearr.lspacearr[i]->loops; int prev_corner = -1; const float *org_nor = nullptr; - while (loop_link) { - const int lidx = POINTER_AS_INT(loop_link->link); + for (const int lidx : fan_corners) { float *nor = r_custom_loop_normals[lidx]; if (!org_nor) { @@ -1504,7 +1580,6 @@ static void mesh_normals_loop_custom_set(Span positions, } prev_corner = lidx; - loop_link = loop_link->next; done_loops[lidx].set(); } @@ -1512,9 +1587,8 @@ static void mesh_normals_loop_custom_set(Span positions, * otherwise we may miss some sharp edges here! * This is just a simplified version of above while loop. * See #45984. */ - loop_link = lnors_spacearr.lspacearr[i]->loops; - if (loop_link && org_nor) { - const int lidx = POINTER_AS_INT(loop_link->link); + if (fan_corners.size() > 1 && org_nor) { + const int lidx = fan_corners.last(); float *nor = r_custom_loop_normals[lidx]; if (dot_v3v3(org_nor, nor) < LNOR_SPACE_TRIGO_THRESHOLD) { @@ -1529,7 +1603,7 @@ static void mesh_normals_loop_custom_set(Span positions, } /* And now, recompute our new auto `loop_normals` and lnor spacearr! */ - BKE_lnor_spacearr_clear(&lnors_spacearr); + lnors_spacearr = {}; normals_calc_loop(positions, edges, polys, @@ -1547,12 +1621,18 @@ static void mesh_normals_loop_custom_set(Span positions, loop_normals); } - Vector processed_corners; + Array fan_corners_from_space_offset_indices; + Array fan_corners_from_space_data; + reverse_index_array(lnors_spacearr.corner_space_indices, + lnors_spacearr.spaces.size(), + fan_corners_from_space_offset_indices, + fan_corners_from_space_data); + const OffsetIndices fan_corner_offsets(fan_corners_from_space_offset_indices); /* And we just have to convert plain object-space custom normals to our * lnor space-encoded ones. */ for (const int i : corner_verts.index_range()) { - if (!lnors_spacearr.lspacearr[i]) { + if (lnors_spacearr.corner_space_indices[i] == -1) { done_loops[i].reset(); if (G.debug & G_DEBUG) { printf("WARNING! Still getting invalid nullptr loop space in second loop for loop %d!\n", @@ -1564,44 +1644,41 @@ static void mesh_normals_loop_custom_set(Span positions, continue; } + const int space_index = lnors_spacearr.corner_space_indices[i]; + const Span fan_corners = fan_corners_from_space_data.as_span().slice( + fan_corner_offsets[space_index]); + /* Note we accumulate and average all custom normals in current smooth fan, * to avoid getting different clnors data (tiny differences in plain custom normals can * give rather huge differences in computed 2D factors). */ - LinkNode *loop_link = lnors_spacearr.lspacearr[i]->loops; - if (lnors_spacearr.lspacearr[i]->flags & MLNOR_SPACE_IS_SINGLE) { - BLI_assert(POINTER_AS_INT(loop_link) == i); + if (fan_corners.size() < 2) { const int nidx = use_vertices ? corner_verts[i] : i; float *nor = r_custom_loop_normals[nidx]; - BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], nor, r_clnors_data[i]); + const int space_index = lnors_spacearr.corner_space_indices[i]; + lnor_space_custom_normal_to_data( + &lnors_spacearr.spaces[space_index], loop_normals[i], nor, r_clnors_data[i]); done_loops[i].reset(); } else { - int avg_nor_count = 0; float3 avg_nor(0.0f); - while (loop_link) { - const int lidx = POINTER_AS_INT(loop_link->link); + for (const int lidx : fan_corners) { const int nidx = use_vertices ? corner_verts[lidx] : lidx; float *nor = r_custom_loop_normals[nidx]; - avg_nor_count++; add_v3_v3(avg_nor, nor); - processed_corners.append(lidx); - loop_link = loop_link->next; done_loops[lidx].reset(); } - mul_v3_fl(avg_nor, 1.0f / float(avg_nor_count)); + mul_v3_fl(avg_nor, 1.0f / float(fan_corners.size())); short2 clnor_data_tmp; - BKE_lnor_space_custom_normal_to_data(lnors_spacearr.lspacearr[i], avg_nor, clnor_data_tmp); + lnor_space_custom_normal_to_data( + &lnors_spacearr.spaces[space_index], loop_normals[i], avg_nor, clnor_data_tmp); - r_clnors_data.fill_indices(processed_corners.as_span(), clnor_data_tmp); - processed_corners.clear(); + r_clnors_data.fill_indices(fan_corners, clnor_data_tmp); } } - - BKE_lnor_spacearr_free(&lnors_spacearr); } void normals_loop_custom_set(const Span vert_positions, diff --git a/source/blender/modifiers/intern/MOD_weighted_normal.cc b/source/blender/modifiers/intern/MOD_weighted_normal.cc index efa96443076..ac09aa22568 100644 --- a/source/blender/modifiers/intern/MOD_weighted_normal.cc +++ b/source/blender/modifiers/intern/MOD_weighted_normal.cc @@ -205,7 +205,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd, const bool has_clnors = wn_data->has_clnors; const float split_angle = wn_data->split_angle; - MLoopNorSpaceArray lnors_spacearr = {nullptr}; + bke::mesh::CornerNormalSpaceArray lnors_spacearr; const bool keep_sharp = (wnmd->flag & MOD_WEIGHTEDNORMAL_KEEP_SHARP) != 0; const bool use_face_influence = (wnmd->flag & MOD_WEIGHTEDNORMAL_FACE_INFLUENCE) != 0 && @@ -215,10 +215,7 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd, blender::Array loop_normals; Array items_data; - Array item_index_per_corner; if (keep_sharp) { - BLI_bitmap *done_loops = BLI_BITMAP_NEW(corner_verts.size(), __func__); - /* This will give us loop normal spaces, * we do not actually care about computed loop_normals for now... */ loop_normals.reinitialize(corner_verts.size()); @@ -238,51 +235,14 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd, &lnors_spacearr, loop_normals); - item_index_per_corner.reinitialize(corner_verts.size()); - items_data = Array(lnors_spacearr.spaces_num, - WeightedNormalDataAggregateItem{}); - - /* In this first loop, we assign each WeightedNormalDataAggregateItem - * to its smooth fan of loops (aka lnor space). */ - int item_index = 0; - for (const int i : polys.index_range()) { - for (const int ml_index : polys[i]) { - if (BLI_BITMAP_TEST(done_loops, ml_index)) { - /* Smooth fan of this loop has already been processed, skip it. */ - continue; - } - - WeightedNormalDataAggregateItem *itdt = &items_data[item_index]; - itdt->curr_strength = FACE_STRENGTH_WEAK; - - MLoopNorSpace *lnor_space = lnors_spacearr.lspacearr[ml_index]; - item_index_per_corner[ml_index] = item_index; - - if (!(lnor_space->flags & MLNOR_SPACE_IS_SINGLE)) { - for (LinkNode *lnode = lnor_space->loops; lnode; lnode = lnode->next) { - const int ml_fan_index = POINTER_AS_INT(lnode->link); - item_index_per_corner[ml_fan_index] = item_index; - BLI_BITMAP_ENABLE(done_loops, ml_fan_index); - } - } - else { - BLI_BITMAP_ENABLE(done_loops, ml_index); - } - - item_index++; - } - } - - MEM_freeN(done_loops); + WeightedNormalDataAggregateItem start_item{}; + start_item.curr_strength = FACE_STRENGTH_WEAK; + items_data = Array(lnors_spacearr.spaces.size(), start_item); } else { - items_data = Array(verts_num, - WeightedNormalDataAggregateItem{}); - if (use_face_influence) { - for (int item_index : items_data.index_range()) { - items_data[item_index].curr_strength = FACE_STRENGTH_WEAK; - } - } + WeightedNormalDataAggregateItem start_item{}; + start_item.curr_strength = FACE_STRENGTH_WEAK; + items_data = Array(verts_num, start_item); } wn_data->items_data = items_data; @@ -294,8 +254,10 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd, for (const int ml_index : polys[poly_index]) { const int mv_index = corner_verts[ml_index]; - WeightedNormalDataAggregateItem *item_data = - keep_sharp ? &items_data[item_index_per_corner[ml_index]] : &items_data[mv_index]; + const int space_index = lnors_spacearr.corner_space_indices[ml_index]; + + WeightedNormalDataAggregateItem *item_data = keep_sharp ? &items_data[space_index] : + &items_data[mv_index]; aggregate_item_normal( wnmd, wn_data, item_data, mv_index, poly_index, mp_val, use_face_influence); @@ -307,11 +269,12 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd, for (int i = 0; i < corner_verts.size(); i++) { const int ml_index = mode_pair[i].index; const float ml_val = mode_pair[i].val; + const int space_index = lnors_spacearr.corner_space_indices[ml_index]; const int poly_index = loop_to_poly[ml_index]; const int mv_index = corner_verts[ml_index]; - WeightedNormalDataAggregateItem *item_data = - keep_sharp ? &items_data[item_index_per_corner[ml_index]] : &items_data[mv_index]; + WeightedNormalDataAggregateItem *item_data = keep_sharp ? &items_data[space_index] : + &items_data[mv_index]; aggregate_item_normal( wnmd, wn_data, item_data, mv_index, poly_index, ml_val, use_face_influence); @@ -334,7 +297,8 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd, * (before this modifier is applied, at start of this function), * so no need to recompute them here. */ for (int ml_index = 0; ml_index < corner_verts.size(); ml_index++) { - WeightedNormalDataAggregateItem *item_data = &items_data[item_index_per_corner[ml_index]]; + const int space_index = lnors_spacearr.corner_space_indices[ml_index]; + WeightedNormalDataAggregateItem *item_data = &items_data[space_index]; if (!is_zero_v3(item_data->normal)) { copy_v3_v3(loop_normals[ml_index], item_data->normal); } @@ -418,10 +382,6 @@ static void apply_weights_vertex_normal(WeightedNormalModifierData *wnmd, clnors); } } - - if (keep_sharp) { - BKE_lnor_spacearr_free(&lnors_spacearr); - } } static void wn_face_area(WeightedNormalModifierData *wnmd, WeightedNormalData *wn_data)