Mesh: Rewrite split edges algorithm #110661
|
@ -362,6 +362,8 @@ class IndexMask : private IndexMaskData {
|
|||
template<int64_t N = 4>
|
||||
Vector<std::variant<IndexRange, IndexMaskSegment>, N> to_spans_and_ranges() const;
|
||||
|
||||
template<typename T> void to_reverse_map(MutableSpan<T> r_indices) const;
|
||||
|
||||
/**
|
||||
* Is used by some functions to get low level access to the mask in order to construct it.
|
||||
*/
|
||||
|
|
|
@ -617,6 +617,22 @@ Vector<IndexRange> IndexMask::to_ranges_invert(const IndexRange universe) const
|
|||
return this->complement(universe, memory).to_ranges();
|
||||
}
|
||||
|
||||
template<typename T> void IndexMask::to_reverse_map(MutableSpan<T> r_map) const
|
||||
{
|
||||
#ifdef DEBUG
|
||||
r_map.fill(-1);
|
||||
#endif
|
||||
this->foreach_index_optimized<T>(GrainSize(4096),
|
||||
[&](const T src_i, const T dst_i) { r_map[src_i] = dst_i; });
|
||||
}
|
||||
|
||||
Vector<IndexRange> IndexMask::to_ranges_invert(const IndexRange universe) const
|
||||
{
|
||||
|
||||
IndexMaskMemory memory;
|
||||
return this->complement(universe, memory).to_ranges();
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
|
||||
/**
|
||||
|
@ -727,5 +743,7 @@ template IndexMask IndexMask::from_indices(Span<int32_t>, IndexMaskMemory &);
|
|||
template IndexMask IndexMask::from_indices(Span<int64_t>, IndexMaskMemory &);
|
||||
template void IndexMask::to_indices(MutableSpan<int32_t>) const;
|
||||
template void IndexMask::to_indices(MutableSpan<int64_t>) const;
|
||||
template void IndexMask::to_reverse_map(MutableSpan<int32_t>) const;
|
||||
template void IndexMask::to_reverse_map(MutableSpan<int64_t>) const;
|
||||
|
||||
} // namespace blender::index_mask
|
||||
|
|
|
@ -15,15 +15,6 @@
|
|||
|
||||
namespace blender::geometry {
|
||||
|
||||
static void create_reverse_map(const IndexMask &mask, MutableSpan<int> r_map)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
r_map.fill(-1);
|
||||
#endif
|
||||
mask.foreach_index_optimized<int>(
|
||||
GrainSize(4096), [&](const int src_i, const int dst_i) { r_map[src_i] = dst_i; });
|
||||
}
|
||||
|
||||
static void remap_verts(const OffsetIndices<int> src_polys,
|
||||
const OffsetIndices<int> dst_polys,
|
||||
const int src_verts_num,
|
||||
|
@ -36,7 +27,7 @@ static void remap_verts(const OffsetIndices<int> src_polys,
|
|||
MutableSpan<int> dst_corner_verts)
|
||||
{
|
||||
Array<int> map(src_verts_num);
|
||||
create_reverse_map(vert_mask, map);
|
||||
vert_mask.to_reverse_map(map);
|
||||
threading::parallel_invoke(
|
||||
vert_mask.size() > 1024,
|
||||
[&]() {
|
||||
|
@ -65,7 +56,7 @@ static void remap_edges(const OffsetIndices<int> src_polys,
|
|||
MutableSpan<int> dst_corner_edges)
|
||||
{
|
||||
Array<int> map(src_edges_num);
|
||||
create_reverse_map(edge_mask, map);
|
||||
edge_mask.to_reverse_map(map);
|
||||
poly_mask.foreach_index(GrainSize(512), [&](const int64_t src_i, const int64_t dst_i) {
|
||||
const IndexRange src_poly = src_polys[src_i];
|
||||
const IndexRange dst_poly = dst_polys[dst_i];
|
||||
|
|
|
@ -443,21 +443,30 @@ static void swap_edge_vert(int2 &edge, const int old_vert, const int new_vert)
|
|||
static void reassign_loose_edge_verts(const IndexMask &affected_verts,
|
||||
const Span<Vector<Fan>> &vert_fans,
|
||||
const OffsetIndices<int> new_verts_by_affected_vert,
|
||||
const int orig_edges_num,
|
||||
const IndexMask &unselected_edges,
|
||||
const IndexMask &selected_loose_edges,
|
||||
const int selected_loose_edge_start,
|
||||
MutableSpan<int2> edges)
|
||||
{
|
||||
/* This map is only useful because loose edges are not duplicated. Non-loose
|
||||
* edges can potentially be duplicated into multiple final edges. */
|
||||
Array<int> old_to_new_loose_edge_map(orig_edges_num);
|
||||
unselected_edges.to_reverse_map<int>(old_to_new_loose_edge_map);
|
||||
selected_loose_edges.to_reverse_map<int>(old_to_new_loose_edge_map);
|
||||
|
||||
affected_verts.foreach_index(GrainSize(1024), [&](const int vert, const int mask) {
|
||||
const Vector<Fan> &fans = vert_fans[mask];
|
||||
const IndexRange new_verts = new_verts_by_affected_vert[mask];
|
||||
wannes.malfait marked this conversation as resolved
Outdated
|
||||
for (const int i : fans.index_range().drop_back(1)) {
|
||||
if (std::holds_alternative<SplitLooseEdgeFan>(fans[i])) {
|
||||
const int orig_edge = std::get<SplitLooseEdgeFan>(fans[i]);
|
||||
const int new_edge = selected_loose_edge_start + 0; // TODO!!!!
|
||||
const int new_edge = old_to_new_loose_edge_map[orig_edge];
|
||||
swap_edge_vert(edges[new_edge], vert, new_verts[i]);
|
||||
}
|
||||
else if (std::holds_alternative<LooseEdgeGroupfan>(fans[i])) {
|
||||
for (const int orig_edge : std::get<LooseEdgeGroupfan>(fans[i])) {
|
||||
const int new_edge = selected_loose_edge_start + 0; // TODO!!!!
|
||||
const int new_edge = old_to_new_loose_edge_map[orig_edge] + selected_loose_edge_start;
|
||||
swap_edge_vert(edges[new_edge], vert, new_verts[i]);
|
||||
}
|
||||
}
|
||||
|
@ -552,10 +561,14 @@ void split_edges(Mesh &mesh,
|
|||
Array<int2> result_edges = combine_all_final_edges(
|
||||
orig_edges, unselected_edges, duplicate_edges, selected_loose_edges);
|
||||
if (loose_edge_cache.count > 0) {
|
||||
const int selected_loose_edge_start = unselected_edges.size() + duplicate_edges.size();
|
||||
reassign_loose_edge_verts(affected_verts,
|
||||
vert_fans,
|
||||
new_verts_by_affected_vert,
|
||||
0, // TODO
|
||||
orig_edges.size(),
|
||||
unselected_edges,
|
||||
selected_loose_edges,
|
||||
selected_loose_edge_start,
|
||||
result_edges);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
These functions take up a significant part of the total runtime of the node. Have you tried looking into invoking them in parallel, or are all the threads already saturated by the functions individually? Of course, these might just become look-ups if a topology cache is available
Yes, I hope this can be improved in the future. Currently their work is mostly single threaded, but I'd like to change that (and Iliya looked into that here: !110707). If they are multithreaded, running them in parallel won't be as helpful. But definitely worth looking into this sort of thing!
Probably best to keep that for a separate patch indeed, since this is really supposed to be a bugfix