From 4ba30ee25797d392b509beebd4a51a0055035692 Mon Sep 17 00:00:00 2001 From: Hans Goudey Date: Wed, 3 May 2023 23:20:34 -0400 Subject: [PATCH 1/4] WIP: Simulation Nodes: Implement subframe mixing Add an implementation for the placeholder to subframe mixing. The purpose of subframe mixing is higher quality motion blur, without requiring baking the entire geometry output for every substep. Linear mixing can fill the gaps while maintaining lower memory usage. All attributes are mixed, besides builtin topology storage attributes. Mixing is only supported when the domain size is unchanged, or when an `id` attribute gives a mapping between items when the topology is changed. Attributes that are ignored for better performance. TODO? - [ ] ID attribute based mixing on domains besides points - [ ] Mixing geometries from nested instances - [ ] Testing and demo file --- .../nodes/node_geo_simulation_output.cc | 314 +++++++++++++++++- 1 file changed, 311 insertions(+), 3 deletions(-) diff --git a/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc b/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc index ee15aa2f1d2..efb170a1892 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc @@ -1,7 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ +#include "BLI_math_matrix.hh" #include "BLI_string_utils.h" +#include "BLI_task.hh" +#include "BKE_attribute_math.hh" #include "BKE_compute_contexts.hh" #include "BKE_curves.hh" #include "BKE_instances.hh" @@ -366,6 +369,295 @@ struct EvalData { bool is_first_evaluation = true; }; +static bool sharing_info_equal(const ImplicitSharingInfo *a, const ImplicitSharingInfo *b) +{ + if (!a || !b) { + return false; + } + if (a != b) { + return false; + } + if (a->version() != b->version()) { + return false; + } + return true; +} + +template +void mix_with_indices(MutableSpan prev, + const VArray &next, + const Span index_map, + const float factor) +{ + threading::parallel_for(prev.index_range(), 1024, [&](const IndexRange range) { + devirtualize_varray(next, [&](const auto next) { + for (const int i : range) { + if (index_map[i] != -1) { + prev[i] = bke::attribute_math::mix2(factor, prev[i], next[index_map[i]]); + } + } + }); + }); +} + +static void mix_with_indices(GMutableSpan prev, + const GVArray &next, + const Span index_map, + const float factor) +{ + bke::attribute_math::convert_to_static_type(prev.type(), [&](auto dummy) { + using T = decltype(dummy); + mix_with_indices(prev.typed(), next.typed(), index_map, factor); + }); +} + +template void mix(MutableSpan prev, const VArray &next, const float factor) +{ + threading::parallel_for(prev.index_range(), 1024, [&](const IndexRange range) { + devirtualize_varray(next, [&](const auto next) { + for (const int i : range) { + prev[i] = bke::attribute_math::mix2(factor, prev[i], next[i]); + } + }); + }); +} + +static void mix(GMutableSpan prev, const GVArray &next, const float factor) +{ + bke::attribute_math::convert_to_static_type(prev.type(), [&](auto dummy) { + using T = decltype(dummy); + mix(prev.typed(), next.typed(), factor); + }); +} + +static void mix(MutableSpan prev, const Span next, const float factor) +{ + threading::parallel_for(prev.index_range(), 1024, [&](const IndexRange range) { + for (const int i : range) { + prev[i] = math::interpolate(prev[i], next[i], factor); + } + }); +} + +static void mix_with_indices(MutableSpan prev, + const Span next, + const Span index_map, + const float factor) +{ + threading::parallel_for(prev.index_range(), 1024, [&](const IndexRange range) { + for (const int i : range) { + if (index_map[i] != -1) { + prev[i] = math::interpolate(prev[i], next[index_map[i]], factor); + } + } + }); +} + +static void mix_attributes(MutableAttributeAccessor prev_attributes, + const AttributeAccessor next_attributes, + const std::array, ATTR_DOMAIN_NUM> &index_maps, + const float factor, + const Set &names_to_skip = {}) +{ + Set ids = prev_attributes.all_ids(); + ids.remove("id"); + for (const StringRef name : names_to_skip) { + ids.remove(name); + } + + for (const AttributeIDRef &id : ids) { + const GAttributeReader prev = prev_attributes.lookup(id); + const eAttrDomain domain = prev.domain; + const eCustomDataType type = bke::cpp_type_to_custom_data_type(prev.varray.type()); + if (ELEM(type, CD_PROP_STRING, CD_PROP_BOOL)) { + /* String attributes can't be mixed, and there's no point in mixing boolean attributes. */ + continue; + } + const GAttributeReader next = next_attributes.lookup(id, prev.domain, type); + if (sharing_info_equal(prev.sharing_info, next.sharing_info)) { + continue; + } + GSpanAttributeWriter dst = prev_attributes.lookup_for_write_span(id); + const Span index_map = index_maps[domain]; + if (!index_map.is_empty()) { + /* If there's an ID attribute, use its values to mix with potentially changed indices. */ + mix_with_indices(dst.span, *next, index_map, factor); + } + else if (prev_attributes.domain_size(domain) == next_attributes.domain_size(domain)) { + /* With no ID attribute to find matching elements, we can only support mixing when the domain + * size (topology) is the same. Other options like mixing just the start of arrays might work + * too, but give bad results too. */ + mix(dst.span, next.varray, factor); + } + dst.finish(); + } +} + +static Map create_value_to_first_index_map(const Span values) +{ + Map map; + map.reserve(values.size()); + for (const int i : values.index_range()) { + map.add(values[i], i); + } + return map; +} + +static Array create_id_index_map(const AttributeAccessor prev_attributes, + const AttributeAccessor next_attributes) +{ + const AttributeReader prev_ids = prev_attributes.lookup("id"); + const AttributeReader next_ids = next_attributes.lookup("id"); + if (!prev_ids || !next_ids) { + return; + } + if (sharing_info_equal(prev_ids.sharing_info, next_ids.sharing_info)) { + return {}; + } + + const VArraySpan prev(*prev_ids); + const VArraySpan next(*next_ids); + + const Map next_id_map = create_value_to_first_index_map(VArraySpan(*next_ids)); + Array index_map(prev.size()); + threading::parallel_for(prev.index_range(), 1024, [&](const IndexRange range) { + for (const int i : range) { + index_map[i] = next_id_map.lookup_default(prev[i], -1); + } + }); + return index_map; +} + +static void create_mesh_index_maps(const Mesh &prev_mesh, + const Mesh &next_mesh, + Array &vert_map, + Array &edge_map, + Array &face_map) +{ + const AttributeReader prev_ids = prev_mesh.attributes().lookup("id", + ATTR_DOMAIN_POINT); + const AttributeReader next_ids = next_mesh.attributes().lookup("id", + ATTR_DOMAIN_POINT); + if (!prev_ids || !next_ids) { + return; + } + if (sharing_info_equal(prev_ids.sharing_info, next_ids.sharing_info)) { + return; + } + + const VArraySpan prev_ids_span(*prev_ids); + const VArraySpan next_ids_span(*next_ids); + + const Map vert_hash_map = create_value_to_first_index_map(VArraySpan(*next_ids)); + + vert_map.reinitialize(prev_mesh.totvert); + threading::parallel_for(prev_ids_span.index_range(), 1024, [&](const IndexRange range) { + for (const int i : range) { + vert_map[i] = vert_hash_map.lookup_default(prev_ids_span[i], -1); + } + }); +} + +static void mix_mesh_attributes(Mesh &prev_mesh, const Mesh &next_mesh, const float factor) +{ + Array vert_map; + Array edge_map; + Array face_map; + create_mesh_index_maps(prev_mesh, next_mesh, vert_map, edge_map, face_map); + mix_attributes(prev_mesh.attributes_for_write(), + next_mesh.attributes(), + {vert_map, edge_map, face_map}, + factor, + {".edge_verts", ".corner_vert", ".corner_edge"}); +} + +static void mix_geometries(GeometrySet &prev, const GeometrySet &next, const float factor) +{ + if (Mesh *mesh_prev = prev.get_mesh_for_write()) { + if (const Mesh *mesh_next = next.get_mesh_for_read()) { + mix_mesh_attributes(*mesh_prev, *mesh_next, factor); + } + } + if (PointCloud *points_prev = prev.get_pointcloud_for_write()) { + if (const PointCloud *points_next = next.get_pointcloud_for_read()) { + const Array index_map = create_id_index_map(points_prev->attributes(), + points_next->attributes()); + mix_attributes( + points_prev->attributes_for_write(), points_next->attributes(), index_map, factor); + } + } + if (Curves *curves_prev = prev.get_curves_for_write()) { + if (const Curves *curves_next = next.get_curves_for_read()) { + MutableAttributeAccessor prev = curves_prev->geometry.wrap().attributes_for_write(); + const AttributeAccessor next = curves_next->geometry.wrap().attributes(); + const Array index_map = create_id_index_map(prev, next); + mix_attributes( + prev, + next, + index_map, + factor, + {"curve_type", "normal_mode", "handle_type_left", "handle_type_right", "knots_mode"}); + } + } + if (bke::Instances *instances_prev = prev.get_instances_for_write()) { + if (const bke::Instances *instances_next = next.get_instances_for_read()) { + const Array index_map = create_id_index_map(instances_prev->attributes(), + instances_next->attributes()); + mix_attributes(instances_prev->attributes_for_write(), + instances_next->attributes(), + index_map, + factor, + {"position"}); + if (index_map.is_empty()) { + mix(instances_prev->transforms(), instances_next->transforms(), factor); + } + else { + mix_with_indices(instances_prev->transforms(), instances_next->transforms(), map, factor); + } + + // TODO: Mix nested instance geometries. Try to match instance geometries by id. + } + } +} + +static void mix_simulation_state(const NodeSimulationItem &item, + void *prev, + const void *next, + const float factor) +{ + switch (eNodeSocketDatatype(item.socket_type)) { + case SOCK_GEOMETRY: { + mix_geometries( + *static_cast(prev), *static_cast(next), factor); + break; + } + case SOCK_FLOAT: + case SOCK_VECTOR: + case SOCK_INT: + case SOCK_BOOLEAN: + case SOCK_RGBA: { + const CPPType &type = get_simulation_item_cpp_type(item); + const fn::ValueOrFieldCPPType &value_or_field_type = *fn::ValueOrFieldCPPType::get_from_self( + type); + if (value_or_field_type.is_field(prev)) { + /* Fields are evaluated on geometries and are mixed there. */ + break; + } + + void *prev = value_or_field_type.get_value_ptr(prev); + const void *next = value_or_field_type.get_value_ptr(next); + bke::attribute_math::convert_to_static_type(value_or_field_type.value, [&](auto dummy) { + using T = decltype(dummy); + *static_cast(prev) = bke::attribute_math::mix2( + factor, *static_cast(prev), *static_cast(next)); + }); + break; + } + default: + break; + } +} + class LazyFunctionForSimulationOutputNode final : public LazyFunction { const bNode &node_; Span simulation_items_; @@ -499,9 +791,25 @@ class LazyFunctionForSimulationOutputNode final : public LazyFunction { const bke::sim::SimulationZoneState &next_state, const float mix_factor) const { - /* TODO: Implement subframe mixing. */ - this->output_cached_state(params, self_object, compute_context, prev_state); - UNUSED_VARS(next_state, mix_factor); + Array output_values(simulation_items_.size()); + for (const int i : simulation_items_.index_range()) { + output_values[i] = params.get_output_data_ptr(i); + } + simulation_state_to_values( + simulation_items_, prev_state, self_object, compute_context, node_, output_values); + + Array next_values(simulation_items_.size()); + LinearAllocator<> allocator; + for (const int i : simulation_items_.index_range()) { + const CPPType &type = *outputs_[i].type; + next_values[i] = allocator.allocate(type.size(), type.alignment()); + } + simulation_state_to_values( + simulation_items_, next_state, self_object, compute_context, node_, next_values); + + for (const int i : simulation_items_.index_range()) { + mix_simulation_state(simulation_items_[i], output_values[i], next_values[i], mix_factor); + } } }; -- 2.30.2 From db523f23479843a41d14adc477db2b9efd1f3088 Mon Sep 17 00:00:00 2001 From: Hans Goudey Date: Tue, 9 May 2023 14:56:10 -0400 Subject: [PATCH 2/4] Fix sharing info equal --- .../nodes/geometry/nodes/node_geo_simulation_output.cc | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc b/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc index f7281de49ce..be8c4da1f4b 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc @@ -374,13 +374,7 @@ static bool sharing_info_equal(const ImplicitSharingInfo *a, const ImplicitShari if (!a || !b) { return false; } - if (a != b) { - return false; - } - if (a->version() != b->version()) { - return false; - } - return true; + return a == b; } template -- 2.30.2 From 9b66d473b1688135cb00fb8fe8538920b3bcb101 Mon Sep 17 00:00:00 2001 From: Hans Goudey Date: Tue, 9 May 2023 15:49:15 -0400 Subject: [PATCH 3/4] Fixes, cleanup --- .../nodes/node_geo_simulation_output.cc | 94 ++++++++----------- 1 file changed, 37 insertions(+), 57 deletions(-) diff --git a/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc b/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc index be8c4da1f4b..2d027fe72d3 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc @@ -449,7 +449,8 @@ static void mix_with_indices(MutableSpan prev, static void mix_attributes(MutableAttributeAccessor prev_attributes, const AttributeAccessor next_attributes, - const std::array, ATTR_DOMAIN_NUM> &index_maps, + const Span index_map, + const eAttrDomain mix_domain, const float factor, const Set &names_to_skip = {}) { @@ -462,6 +463,9 @@ static void mix_attributes(MutableAttributeAccessor prev_attributes, for (const AttributeIDRef &id : ids) { const GAttributeReader prev = prev_attributes.lookup(id); const eAttrDomain domain = prev.domain; + if (domain != mix_domain) { + continue; + } const eCustomDataType type = bke::cpp_type_to_custom_data_type(prev.varray.type()); if (ELEM(type, CD_PROP_STRING, CD_PROP_BOOL)) { /* String attributes can't be mixed, and there's no point in mixing boolean attributes. */ @@ -472,7 +476,6 @@ static void mix_attributes(MutableAttributeAccessor prev_attributes, continue; } GSpanAttributeWriter dst = prev_attributes.lookup_for_write_span(id); - const Span index_map = index_maps[domain]; if (!index_map.is_empty()) { /* If there's an ID attribute, use its values to mix with potentially changed indices. */ mix_with_indices(dst.span, *next, index_map, factor); @@ -503,7 +506,7 @@ static Array create_id_index_map(const AttributeAccessor prev_attributes, const AttributeReader prev_ids = prev_attributes.lookup("id"); const AttributeReader next_ids = next_attributes.lookup("id"); if (!prev_ids || !next_ids) { - return; + return {}; } if (sharing_info_equal(prev_ids.sharing_info, next_ids.sharing_info)) { return {}; @@ -522,62 +525,28 @@ static Array create_id_index_map(const AttributeAccessor prev_attributes, return index_map; } -static void create_mesh_index_maps(const Mesh &prev_mesh, - const Mesh &next_mesh, - Array &vert_map, - Array &edge_map, - Array &face_map) -{ - const AttributeReader prev_ids = prev_mesh.attributes().lookup("id", - ATTR_DOMAIN_POINT); - const AttributeReader next_ids = next_mesh.attributes().lookup("id", - ATTR_DOMAIN_POINT); - if (!prev_ids || !next_ids) { - return; - } - if (sharing_info_equal(prev_ids.sharing_info, next_ids.sharing_info)) { - return; - } - - const VArraySpan prev_ids_span(*prev_ids); - const VArraySpan next_ids_span(*next_ids); - - const Map vert_hash_map = create_value_to_first_index_map(VArraySpan(*next_ids)); - - vert_map.reinitialize(prev_mesh.totvert); - threading::parallel_for(prev_ids_span.index_range(), 1024, [&](const IndexRange range) { - for (const int i : range) { - vert_map[i] = vert_hash_map.lookup_default(prev_ids_span[i], -1); - } - }); -} - -static void mix_mesh_attributes(Mesh &prev_mesh, const Mesh &next_mesh, const float factor) -{ - Array vert_map; - Array edge_map; - Array face_map; - create_mesh_index_maps(prev_mesh, next_mesh, vert_map, edge_map, face_map); - mix_attributes(prev_mesh.attributes_for_write(), - next_mesh.attributes(), - {vert_map, edge_map, face_map}, - factor, - {".edge_verts", ".corner_vert", ".corner_edge"}); -} - static void mix_geometries(GeometrySet &prev, const GeometrySet &next, const float factor) { if (Mesh *mesh_prev = prev.get_mesh_for_write()) { if (const Mesh *mesh_next = next.get_mesh_for_read()) { - mix_mesh_attributes(*mesh_prev, *mesh_next, factor); + Array vert_map = create_id_index_map(mesh_prev->attributes(), mesh_next->attributes()); + mix_attributes(mesh_prev->attributes_for_write(), + mesh_next->attributes(), + vert_map, + ATTR_DOMAIN_POINT, + factor, + {}); } } if (PointCloud *points_prev = prev.get_pointcloud_for_write()) { if (const PointCloud *points_next = next.get_pointcloud_for_read()) { const Array index_map = create_id_index_map(points_prev->attributes(), points_next->attributes()); - mix_attributes( - points_prev->attributes_for_write(), points_next->attributes(), index_map, factor); + mix_attributes(points_prev->attributes_for_write(), + points_next->attributes(), + index_map, + ATTR_DOMAIN_POINT, + factor); } } if (Curves *curves_prev = prev.get_curves_for_write()) { @@ -585,12 +554,12 @@ static void mix_geometries(GeometrySet &prev, const GeometrySet &next, const flo MutableAttributeAccessor prev = curves_prev->geometry.wrap().attributes_for_write(); const AttributeAccessor next = curves_next->geometry.wrap().attributes(); const Array index_map = create_id_index_map(prev, next); - mix_attributes( - prev, - next, - index_map, - factor, - {"curve_type", "normal_mode", "handle_type_left", "handle_type_right", "knots_mode"}); + mix_attributes(prev, + next, + index_map, + ATTR_DOMAIN_POINT, + factor, + {"handle_type_left", "handle_type_right"}); } } if (bke::Instances *instances_prev = prev.get_instances_for_write()) { @@ -600,13 +569,15 @@ static void mix_geometries(GeometrySet &prev, const GeometrySet &next, const flo mix_attributes(instances_prev->attributes_for_write(), instances_next->attributes(), index_map, + ATTR_DOMAIN_INSTANCE, factor, {"position"}); if (index_map.is_empty()) { mix(instances_prev->transforms(), instances_next->transforms(), factor); } else { - mix_with_indices(instances_prev->transforms(), instances_next->transforms(), map, factor); + mix_with_indices( + instances_prev->transforms(), instances_next->transforms(), index_map, factor); } // TODO: Mix nested instance geometries. Try to match instance geometries by id. @@ -633,7 +604,7 @@ static void mix_simulation_state(const NodeSimulationItem &item, const CPPType &type = get_simulation_item_cpp_type(item); const fn::ValueOrFieldCPPType &value_or_field_type = *fn::ValueOrFieldCPPType::get_from_self( type); - if (value_or_field_type.is_field(prev)) { + if (value_or_field_type.is_field(prev) || value_or_field_type.is_field(next)) { /* Fields are evaluated on geometries and are mixed there. */ break; } @@ -804,6 +775,15 @@ class LazyFunctionForSimulationOutputNode final : public LazyFunction { for (const int i : simulation_items_.index_range()) { mix_simulation_state(simulation_items_[i], output_values[i], next_values[i], mix_factor); } + + for (const int i : simulation_items_.index_range()) { + const CPPType &type = *outputs_[i].type; + type.destruct(next_values[i]); + } + + for (const int i : simulation_items_.index_range()) { + params.output_set(i); + } } }; -- 2.30.2 From 3b55a0ea898d3156f78b515cbd8195e0543a6f24 Mon Sep 17 00:00:00 2001 From: Hans Goudey Date: Tue, 9 May 2023 15:51:01 -0400 Subject: [PATCH 4/4] Cleanup --- .../nodes/node_geo_simulation_output.cc | 369 +++++++++--------- 1 file changed, 182 insertions(+), 187 deletions(-) diff --git a/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc b/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc index 2d027fe72d3..5398b5f5fbe 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_simulation_output.cc @@ -579,213 +579,208 @@ static void mix_geometries(GeometrySet &prev, const GeometrySet &next, const flo mix_with_indices( instances_prev->transforms(), instances_next->transforms(), index_map, factor); } - - // TODO: Mix nested instance geometries. Try to match instance geometries by id. } } -} -static void mix_simulation_state(const NodeSimulationItem &item, - void *prev, - const void *next, - const float factor) -{ - switch (eNodeSocketDatatype(item.socket_type)) { - case SOCK_GEOMETRY: { - mix_geometries( - *static_cast(prev), *static_cast(next), factor); - break; - } - case SOCK_FLOAT: - case SOCK_VECTOR: - case SOCK_INT: - case SOCK_BOOLEAN: - case SOCK_RGBA: { - const CPPType &type = get_simulation_item_cpp_type(item); - const fn::ValueOrFieldCPPType &value_or_field_type = *fn::ValueOrFieldCPPType::get_from_self( - type); - if (value_or_field_type.is_field(prev) || value_or_field_type.is_field(next)) { - /* Fields are evaluated on geometries and are mixed there. */ + static void mix_simulation_state( + const NodeSimulationItem &item, void *prev, const void *next, const float factor) + { + switch (eNodeSocketDatatype(item.socket_type)) { + case SOCK_GEOMETRY: { + mix_geometries( + *static_cast(prev), *static_cast(next), factor); break; } + case SOCK_FLOAT: + case SOCK_VECTOR: + case SOCK_INT: + case SOCK_BOOLEAN: + case SOCK_RGBA: { + const CPPType &type = get_simulation_item_cpp_type(item); + const fn::ValueOrFieldCPPType &value_or_field_type = + *fn::ValueOrFieldCPPType::get_from_self(type); + if (value_or_field_type.is_field(prev) || value_or_field_type.is_field(next)) { + /* Fields are evaluated on geometries and are mixed there. */ + break; + } - void *prev = value_or_field_type.get_value_ptr(prev); - const void *next = value_or_field_type.get_value_ptr(next); - bke::attribute_math::convert_to_static_type(value_or_field_type.value, [&](auto dummy) { - using T = decltype(dummy); - *static_cast(prev) = bke::attribute_math::mix2( - factor, *static_cast(prev), *static_cast(next)); - }); - break; - } - default: - break; - } -} - -class LazyFunctionForSimulationOutputNode final : public LazyFunction { - const bNode &node_; - Span simulation_items_; - - public: - LazyFunctionForSimulationOutputNode(const bNode &node, - GeometryNodesLazyFunctionGraphInfo &own_lf_graph_info) - : node_(node) - { - debug_name_ = "Simulation Output"; - const NodeGeometrySimulationOutput &storage = node_storage(node); - simulation_items_ = {storage.items, storage.items_num}; - - MutableSpan lf_index_by_bsocket = own_lf_graph_info.mapping.lf_index_by_bsocket; - - for (const int i : simulation_items_.index_range()) { - const NodeSimulationItem &item = simulation_items_[i]; - const bNodeSocket &input_bsocket = node.input_socket(i); - const bNodeSocket &output_bsocket = node.output_socket(i); - - const CPPType &type = get_simulation_item_cpp_type(item); - - lf_index_by_bsocket[input_bsocket.index_in_tree()] = inputs_.append_and_get_index_as( - item.name, type, lf::ValueUsage::Maybe); - lf_index_by_bsocket[output_bsocket.index_in_tree()] = outputs_.append_and_get_index_as( - item.name, type); - } - } - - void *init_storage(LinearAllocator<> &allocator) const - { - return allocator.construct().release(); - } - - void destruct_storage(void *storage) const - { - std::destroy_at(static_cast(storage)); - } - - void execute_impl(lf::Params ¶ms, const lf::Context &context) const final - { - GeoNodesLFUserData &user_data = *static_cast(context.user_data); - GeoNodesModifierData &modifier_data = *user_data.modifier_data; - EvalData &eval_data = *static_cast(context.storage); - BLI_SCOPED_DEFER([&]() { eval_data.is_first_evaluation = false; }); - - const bke::sim::SimulationZoneID zone_id = get_simulation_zone_id(*user_data.compute_context, - node_.identifier); - - const bke::sim::SimulationZoneState *current_zone_state = - modifier_data.current_simulation_state ? - modifier_data.current_simulation_state->get_zone_state(zone_id) : - nullptr; - if (eval_data.is_first_evaluation && current_zone_state != nullptr) { - /* Common case when data is cached already. */ - this->output_cached_state( - params, *modifier_data.self_object, *user_data.compute_context, *current_zone_state); - return; - } - - if (modifier_data.current_simulation_state_for_write == nullptr) { - const bke::sim::SimulationZoneState *prev_zone_state = - modifier_data.prev_simulation_state ? - modifier_data.prev_simulation_state->get_zone_state(zone_id) : - nullptr; - if (prev_zone_state == nullptr) { - /* There is no previous simulation state and we also don't create a new one, so just output - * defaults. */ - params.set_default_remaining_outputs(); - return; + void *prev = value_or_field_type.get_value_ptr(prev); + const void *next = value_or_field_type.get_value_ptr(next); + bke::attribute_math::convert_to_static_type(value_or_field_type.value, [&](auto dummy) { + using T = decltype(dummy); + *static_cast(prev) = bke::attribute_math::mix2( + factor, *static_cast(prev), *static_cast(next)); + }); + break; } - const bke::sim::SimulationZoneState *next_zone_state = - modifier_data.next_simulation_state ? - modifier_data.next_simulation_state->get_zone_state(zone_id) : + default: + break; + } + } + + class LazyFunctionForSimulationOutputNode final : public LazyFunction { + const bNode &node_; + Span simulation_items_; + + public: + LazyFunctionForSimulationOutputNode(const bNode &node, + GeometryNodesLazyFunctionGraphInfo &own_lf_graph_info) + : node_(node) + { + debug_name_ = "Simulation Output"; + const NodeGeometrySimulationOutput &storage = node_storage(node); + simulation_items_ = {storage.items, storage.items_num}; + + MutableSpan lf_index_by_bsocket = own_lf_graph_info.mapping.lf_index_by_bsocket; + + for (const int i : simulation_items_.index_range()) { + const NodeSimulationItem &item = simulation_items_[i]; + const bNodeSocket &input_bsocket = node.input_socket(i); + const bNodeSocket &output_bsocket = node.output_socket(i); + + const CPPType &type = get_simulation_item_cpp_type(item); + + lf_index_by_bsocket[input_bsocket.index_in_tree()] = inputs_.append_and_get_index_as( + item.name, type, lf::ValueUsage::Maybe); + lf_index_by_bsocket[output_bsocket.index_in_tree()] = outputs_.append_and_get_index_as( + item.name, type); + } + } + + void *init_storage(LinearAllocator<> &allocator) const + { + return allocator.construct().release(); + } + + void destruct_storage(void *storage) const + { + std::destroy_at(static_cast(storage)); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &context) const final + { + GeoNodesLFUserData &user_data = *static_cast(context.user_data); + GeoNodesModifierData &modifier_data = *user_data.modifier_data; + EvalData &eval_data = *static_cast(context.storage); + BLI_SCOPED_DEFER([&]() { eval_data.is_first_evaluation = false; }); + + const bke::sim::SimulationZoneID zone_id = get_simulation_zone_id(*user_data.compute_context, + node_.identifier); + + const bke::sim::SimulationZoneState *current_zone_state = + modifier_data.current_simulation_state ? + modifier_data.current_simulation_state->get_zone_state(zone_id) : nullptr; - if (next_zone_state == nullptr) { - /* Output the last cached simulation state. */ + if (eval_data.is_first_evaluation && current_zone_state != nullptr) { + /* Common case when data is cached already. */ this->output_cached_state( - params, *modifier_data.self_object, *user_data.compute_context, *prev_zone_state); + params, *modifier_data.self_object, *user_data.compute_context, *current_zone_state); return; } - /* A previous and next frame is cached already, but the current frame is not. */ - this->output_mixed_cached_state(params, - *modifier_data.self_object, - *user_data.compute_context, - *prev_zone_state, - *next_zone_state, - modifier_data.simulation_state_mix_factor); - return; + + if (modifier_data.current_simulation_state_for_write == nullptr) { + const bke::sim::SimulationZoneState *prev_zone_state = + modifier_data.prev_simulation_state ? + modifier_data.prev_simulation_state->get_zone_state(zone_id) : + nullptr; + if (prev_zone_state == nullptr) { + /* There is no previous simulation state and we also don't create a new one, so just + * output defaults. */ + params.set_default_remaining_outputs(); + return; + } + const bke::sim::SimulationZoneState *next_zone_state = + modifier_data.next_simulation_state ? + modifier_data.next_simulation_state->get_zone_state(zone_id) : + nullptr; + if (next_zone_state == nullptr) { + /* Output the last cached simulation state. */ + this->output_cached_state( + params, *modifier_data.self_object, *user_data.compute_context, *prev_zone_state); + return; + } + /* A previous and next frame is cached already, but the current frame is not. */ + this->output_mixed_cached_state(params, + *modifier_data.self_object, + *user_data.compute_context, + *prev_zone_state, + *next_zone_state, + modifier_data.simulation_state_mix_factor); + return; + } + + bke::sim::SimulationZoneState &new_zone_state = + modifier_data.current_simulation_state_for_write->get_zone_state_for_write(zone_id); + if (eval_data.is_first_evaluation) { + new_zone_state.item_by_identifier.clear(); + } + + Array input_values(simulation_items_.size(), nullptr); + for (const int i : simulation_items_.index_range()) { + input_values[i] = params.try_get_input_data_ptr_or_request(i); + } + if (input_values.as_span().contains(nullptr)) { + /* Wait until all inputs are available. */ + return; + } + values_to_simulation_state(simulation_items_, input_values, new_zone_state); + this->output_cached_state( + params, *modifier_data.self_object, *user_data.compute_context, new_zone_state); } - bke::sim::SimulationZoneState &new_zone_state = - modifier_data.current_simulation_state_for_write->get_zone_state_for_write(zone_id); - if (eval_data.is_first_evaluation) { - new_zone_state.item_by_identifier.clear(); + void output_cached_state(lf::Params ¶ms, + const Object &self_object, + const ComputeContext &compute_context, + const bke::sim::SimulationZoneState &state) const + { + Array output_values(simulation_items_.size()); + for (const int i : simulation_items_.index_range()) { + output_values[i] = params.get_output_data_ptr(i); + } + simulation_state_to_values( + simulation_items_, state, self_object, compute_context, node_, output_values); + for (const int i : simulation_items_.index_range()) { + params.output_set(i); + } } - Array input_values(simulation_items_.size(), nullptr); - for (const int i : simulation_items_.index_range()) { - input_values[i] = params.try_get_input_data_ptr_or_request(i); - } - if (input_values.as_span().contains(nullptr)) { - /* Wait until all inputs are available. */ - return; - } - values_to_simulation_state(simulation_items_, input_values, new_zone_state); - this->output_cached_state( - params, *modifier_data.self_object, *user_data.compute_context, new_zone_state); - } + void output_mixed_cached_state(lf::Params ¶ms, + const Object &self_object, + const ComputeContext &compute_context, + const bke::sim::SimulationZoneState &prev_state, + const bke::sim::SimulationZoneState &next_state, + const float mix_factor) const + { + Array output_values(simulation_items_.size()); + for (const int i : simulation_items_.index_range()) { + output_values[i] = params.get_output_data_ptr(i); + } + simulation_state_to_values( + simulation_items_, prev_state, self_object, compute_context, node_, output_values); - void output_cached_state(lf::Params ¶ms, - const Object &self_object, - const ComputeContext &compute_context, - const bke::sim::SimulationZoneState &state) const - { - Array output_values(simulation_items_.size()); - for (const int i : simulation_items_.index_range()) { - output_values[i] = params.get_output_data_ptr(i); - } - simulation_state_to_values( - simulation_items_, state, self_object, compute_context, node_, output_values); - for (const int i : simulation_items_.index_range()) { - params.output_set(i); - } - } + Array next_values(simulation_items_.size()); + LinearAllocator<> allocator; + for (const int i : simulation_items_.index_range()) { + const CPPType &type = *outputs_[i].type; + next_values[i] = allocator.allocate(type.size(), type.alignment()); + } + simulation_state_to_values( + simulation_items_, next_state, self_object, compute_context, node_, next_values); - void output_mixed_cached_state(lf::Params ¶ms, - const Object &self_object, - const ComputeContext &compute_context, - const bke::sim::SimulationZoneState &prev_state, - const bke::sim::SimulationZoneState &next_state, - const float mix_factor) const - { - Array output_values(simulation_items_.size()); - for (const int i : simulation_items_.index_range()) { - output_values[i] = params.get_output_data_ptr(i); - } - simulation_state_to_values( - simulation_items_, prev_state, self_object, compute_context, node_, output_values); + for (const int i : simulation_items_.index_range()) { + mix_simulation_state(simulation_items_[i], output_values[i], next_values[i], mix_factor); + } - Array next_values(simulation_items_.size()); - LinearAllocator<> allocator; - for (const int i : simulation_items_.index_range()) { - const CPPType &type = *outputs_[i].type; - next_values[i] = allocator.allocate(type.size(), type.alignment()); - } - simulation_state_to_values( - simulation_items_, next_state, self_object, compute_context, node_, next_values); + for (const int i : simulation_items_.index_range()) { + const CPPType &type = *outputs_[i].type; + type.destruct(next_values[i]); + } - for (const int i : simulation_items_.index_range()) { - mix_simulation_state(simulation_items_[i], output_values[i], next_values[i], mix_factor); + for (const int i : simulation_items_.index_range()) { + params.output_set(i); + } } - - for (const int i : simulation_items_.index_range()) { - const CPPType &type = *outputs_[i].type; - type.destruct(next_values[i]); - } - - for (const int i : simulation_items_.index_range()) { - params.output_set(i); - } - } -}; + }; } // namespace blender::nodes::node_geo_simulation_output_cc -- 2.30.2