Simulation Nodes: bake simulation states to disk #106937

Merged
Jacques Lucke merged 116 commits from JacquesLucke/blender:sim-bake into geometry-nodes-simulation 2023-04-22 14:48:56 +02:00
33 changed files with 2204 additions and 104 deletions

View File

@ -236,6 +236,17 @@ class OBJECT_PT_display(ObjectButtonsPanel, Panel):
row.prop_decorator(obj, "display_bounds_type")
class OBJECT_PT_baking(ObjectButtonsPanel, Panel):
bl_label = "Baking"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
col = layout.column()
col.label(text="Simulation Nodes Cache:")
col.operator("object.simulation_nodes_cache_bake", text="Bake").selected = True
col.operator("object.simulation_nodes_cache_delete", text="Delete").selected = True
class OBJECT_PT_instancing(ObjectButtonsPanel, Panel):
bl_label = "Instancing"
bl_options = {'DEFAULT_CLOSED'}
@ -412,6 +423,7 @@ classes = (
OBJECT_PT_motion_paths,
OBJECT_PT_motion_paths_display,
OBJECT_PT_display,
OBJECT_PT_baking,
OBJECT_PT_visibility,
OBJECT_PT_lineart,
OBJECT_PT_custom_props,

View File

@ -5,9 +5,13 @@
#include "BKE_geometry_set.hh"
#include "BLI_map.hh"
#include "BLI_sub_frame.hh"
namespace blender::bke::sim {
class BDataSharing;
class ModifierSimulationCache;
class SimulationStateItem {
public:
virtual ~SimulationStateItem() = default;
@ -47,10 +51,15 @@ struct SimulationZoneID {
class ModifierSimulationState {
private:
mutable std::mutex mutex_;
Map<SimulationZoneID, std::unique_ptr<SimulationZoneState>> zone_states_;
mutable bool bake_loaded_;
public:
ModifierSimulationCache *owner_;
mutable std::mutex mutex_;
Map<SimulationZoneID, std::unique_ptr<SimulationZoneState>> zone_states_;
std::optional<std::string> meta_path_;
std::optional<std::string> bdata_dir_;
const SimulationZoneState *get_zone_state(const SimulationZoneID &zone_id) const
{
std::lock_guard lock{mutex_};
@ -66,62 +75,112 @@ class ModifierSimulationState {
return *zone_states_.lookup_or_add_cb(
zone_id, []() { return std::make_unique<SimulationZoneState>(); });
}
void ensure_bake_loaded() const;
};
struct ModifierSimulationStateAtFrame {
SubFrame frame;
ModifierSimulationState state;
};
enum class CacheState {
Valid,
Invalid,
Baked,
};
struct StatesAroundFrame {
const ModifierSimulationStateAtFrame *prev = nullptr;
const ModifierSimulationStateAtFrame *current = nullptr;
const ModifierSimulationStateAtFrame *next = nullptr;
};
class ModifierSimulationCache {
private:
Map<float, std::unique_ptr<ModifierSimulationState>> states_by_time_;
bool invalid_ = false;
Vector<std::unique_ptr<ModifierSimulationStateAtFrame>> states_at_frames_;
std::unique_ptr<BDataSharing> bdata_sharing_;
friend ModifierSimulationState;
public:
bool has_state_at_time(const float time) const
CacheState cache_state_ = CacheState::Valid;
bool failed_finding_bake_ = false;
void try_discover_bake(StringRefNull meta_dir, StringRefNull bdata_dir);
bool has_state_at_frame(const SubFrame &frame) const
{
return states_by_time_.contains(time);
for (const auto &item : states_at_frames_) {
if (item->frame == frame) {
return true;
}
}
return false;
}
const ModifierSimulationState *get_state_at_time(const float time) const
bool has_states() const
{
if (auto *ptr = states_by_time_.lookup_ptr(time)) {
return ptr->get();
return !states_at_frames_.is_empty();
}
const ModifierSimulationState *get_state_at_exact_frame(const SubFrame &frame) const
{
for (const auto &item : states_at_frames_) {
if (item->frame == frame) {
return &item->state;
}
}
return nullptr;
}
ModifierSimulationState &get_state_for_write(const float time)
ModifierSimulationState &get_state_at_frame_for_write(const SubFrame &frame)
{
return *states_by_time_.lookup_or_add_cb(
time, []() { return std::make_unique<ModifierSimulationState>(); });
}
std::pair<float, const ModifierSimulationState *> try_get_last_state_before(
const float time) const
{
float last_time = -FLT_MAX;
const ModifierSimulationState *last_state = nullptr;
for (const auto &item : states_by_time_.items()) {
if (item.key < time && item.key > last_time) {
last_time = item.key;
last_state = item.value.get();
for (const auto &item : states_at_frames_) {
if (item->frame == frame) {
return item->state;
}
}
return {last_time, last_state};
states_at_frames_.append(std::make_unique<ModifierSimulationStateAtFrame>());
states_at_frames_.last()->frame = frame;
states_at_frames_.last()->state.owner_ = this;
return states_at_frames_.last()->state;
}
StatesAroundFrame get_states_around_frame(const SubFrame &frame) const
{
StatesAroundFrame states_around_frame;
for (const auto &item : states_at_frames_) {
if (item->frame < frame) {
if (states_around_frame.prev == nullptr || item->frame > states_around_frame.prev->frame) {
states_around_frame.prev = item.get();
}
}
if (item->frame == frame) {
if (states_around_frame.current == nullptr) {
states_around_frame.current = item.get();
}
}
if (item->frame > frame) {
if (states_around_frame.next == nullptr || item->frame < states_around_frame.next->frame) {
states_around_frame.next = item.get();
}
}
}
return states_around_frame;
}
void invalidate()
{
invalid_ = true;
cache_state_ = CacheState::Invalid;
}
bool is_invalid() const
CacheState cache_state() const
{
return invalid_;
return cache_state_;
}
void reset()
{
states_by_time_.clear();
invalid_ = false;
}
void reset();
};
} // namespace blender::bke::sim

View File

@ -0,0 +1,170 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include "BKE_simulation_state.hh"
#include "BLI_serialize.hh"
struct Main;
struct ModifierData;
namespace blender {
class fstream;
}
namespace blender::bke::sim {
using DictionaryValue = io::serialize::DictionaryValue;
using DictionaryValuePtr = std::shared_ptr<DictionaryValue>;
/**
* Reference to a slice of memory typically stored on disk.
*/
struct BDataSlice {
std::string name;
Review

name -> file_name maybe? It could be anything right now

`name` -> `file_name` maybe? It could be anything right now
Review

Could be file_name, but it's somewhat intentionally not, because it's not strictly necessary that this refers to a file (even though currently it always does).

Could be `file_name`, but it's somewhat intentionally not, because it's not strictly necessary that this refers to a file (even though currently it always does).
IndexRange range;
DictionaryValuePtr serialize() const;
static std::optional<BDataSlice> deserialize(const io::serialize::DictionaryValue &io_slice);
};
/**
* Abstract base class for loading binary data.
*/
class BDataReader {
public:
/**
* Read the data from the given slice into the provided memory buffer.
* \return True on success, otherwise false.
*/
[[nodiscard]] virtual bool read(const BDataSlice &slice, void *r_data) const = 0;
};
/**
* Abstract base class for writing binary data.
*/
class BDataWriter {
public:
/**
* Write the provided binary data.
* \return Slice where the data has been written to.
*/
virtual BDataSlice write(const void *data, int64_t size) = 0;
};
/**
* Allows for simple data deduplication when writing or reading data by making use of implicit
* sharing.
*/
class BDataSharing {
private:
struct StoredByRuntimeValue {
/**
* Version of the shared data that was written before. This is needed because the data might
* be changed later without changing the #ImplicitSharingInfo pointer.
*/
int64_t sharing_info_version;
/**
* Identifier of the stored data. This includes information for where the data is stored (a

A couple more words after "Identifier" could help the reader figure out the type of this value, something like "Identifier file name"

A couple more words after "Identifier" could help the reader figure out the type of this value, something like "Identifier file name"
* #BDataSlice) and optionally information for how it is loaded (e.g. endian information).
*/
DictionaryValuePtr io_data;
};
/**
HooglyBoogly marked this conversation as resolved Outdated

Simpler wording suggestion:

Map used to detect when some data has already been written. It keeps a weak reference to #ImplicitSharingInfo, allowing it to check for equality of two arrays just by comparing the sharing info's pointer and version.

Simpler wording suggestion: >Map used to detect when some data has already been written. It keeps a weak reference to #ImplicitSharingInfo, allowing it to check for equality of two arrays just by comparing the sharing info's pointer and version.
* Map used to detect when some data has already been written. It keeps a weak reference to
* #ImplicitSharingInfo, allowing it to check for equality of two arrays just by comparing the
* sharing info's pointer and version.
*/
Map<const ImplicitSharingInfo *, StoredByRuntimeValue> stored_by_runtime_;
Review

A comment mentioning the need for a mutex might be nice here. Also fine to skip if you think it's obvious or not necessary for the user of the API

A comment mentioning the need for a mutex might be nice here. Also fine to skip if you think it's obvious or not necessary for the user of the API
/**
* Use a mutex so that #read_shared can be implemented in a thread-safe way.
HooglyBoogly marked this conversation as resolved Outdated

Map that is used -> Map used

`Map that is used` -> `Map used`
*/
mutable std::mutex mutex_;
/**
* Map used to detect when some data has been previously loaded. This keeps strong
* references to #ImplicitSharingInfo.
*/
mutable Map<std::string, ImplicitSharingInfoAndData> runtime_by_stored_;
public:
~BDataSharing();
/**
* Check if the data referenced by `sharing_info` has been written before. If yes, return the
* identifier for the previously written data. Otherwise, write the data now and store the
* identifier for later use.
* \return Identifier that indicates from where the data has been written.
*/
[[nodiscard]] DictionaryValuePtr write_shared(const ImplicitSharingInfo *sharing_info,
FunctionRef<DictionaryValuePtr()> write_fn);
/**
* Check if the data identified by `io_data` has been read before or load it now.
* \return Shared ownership to the read data, or none if there was an error.
*/
[[nodiscard]] std::optional<ImplicitSharingInfoAndData> read_shared(
const DictionaryValue &io_data,
FunctionRef<std::optional<ImplicitSharingInfoAndData>()> read_fn) const;
};
/**
* A specific #BDataReader that reads from disk.
*/
class DiskBDataReader : public BDataReader {
private:
const std::string bdata_dir_;
mutable std::mutex mutex_;
mutable Map<std::string, std::unique_ptr<fstream>> open_input_streams_;
public:
DiskBDataReader(std::string bdata_dir);
[[nodiscard]] bool read(const BDataSlice &slice, void *r_data) const override;
};
/**
* A specific #BDataWriter that writes to a file on disk.
*/
class DiskBDataWriter : public BDataWriter {
private:
/** Name of the file that data is written to. */
std::string bdata_name_;
/** File handle. */
std::ostream &bdata_file_;
/** Current position in the file. */
int64_t current_offset_;
public:
DiskBDataWriter(std::string bdata_name, std::ostream &bdata_file, int64_t current_offset);
BDataSlice write(const void *data, int64_t size) override;
};
/**
* Get the directory that contains all baked simulation data for the given modifier. This is a
* parent directory of the two directories below.
*/
std::string get_bake_directory(const Main &bmain, const Object &object, const ModifierData &md);
std::string get_bdata_directory(const Main &bmain, const Object &object, const ModifierData &md);
std::string get_meta_directory(const Main &bmain, const Object &object, const ModifierData &md);
/**
* Encode the simulation state in a #DictionaryValue which also contains references to external
* binary data that has been written using #bdata_writer.
*/
void serialize_modifier_simulation_state(const ModifierSimulationState &state,
BDataWriter &bdata_writer,
BDataSharing &bdata_sharing,
DictionaryValue &r_io_root);
/**
* Fill the simulation state by parsing the provided #DictionaryValue which also contains
* references to external binary data that is read using #bdata_reader.
*/
void deserialize_modifier_simulation_state(const DictionaryValue &io_root,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing,
ModifierSimulationState &r_state);
} // namespace blender::bke::sim

View File

@ -266,6 +266,8 @@ set(SRC
intern/shader_fx.c
intern/shrinkwrap.cc
intern/simulation.cc
intern/simulation_state.cc
intern/simulation_state_serialize.cc
intern/softbody.c
intern/sound.c
intern/speaker.c
@ -467,6 +469,7 @@ set(SRC
BKE_shrinkwrap.h
BKE_simulation.h
BKE_simulation_state.hh
BKE_simulation_state_serialize.hh
BKE_softbody.h
BKE_sound.h
BKE_speaker.h

View File

@ -2358,12 +2358,12 @@ CustomData CustomData_shallow_copy_remove_non_bmesh_attributes(const CustomData
class CustomDataLayerImplicitSharing : public ImplicitSharingInfo {
private:
const void *data_;
const int totelem_;
int totelem_;
const eCustomDataType type_;
public:
CustomDataLayerImplicitSharing(const void *data, const int totelem, const eCustomDataType type)
: ImplicitSharingInfo(1), data_(data), totelem_(totelem), type_(type)
: ImplicitSharingInfo(), data_(data), totelem_(totelem), type_(type)
{
}
@ -2373,6 +2373,13 @@ class CustomDataLayerImplicitSharing : public ImplicitSharingInfo {
free_layer_data(type_, data_, totelem_);
MEM_delete(this);
}
void delete_data_only() override
{
free_layer_data(type_, data_, totelem_);
data_ = nullptr;
totelem_ = 0;
}
};
/** Create a #ImplicitSharingInfo that takes ownership of the data. */
@ -2404,6 +2411,9 @@ static void ensure_layer_data_is_mutable(CustomDataLayer &layer, const int totel
layer.sharing_info->remove_user_and_delete_if_last();
layer.sharing_info = make_implicit_sharing_info_for_layer(type, layer.data, totelem);
}
else {
layer.sharing_info->tag_ensured_mutable();
}
}
void CustomData_realloc(CustomData *data, const int old_size, const int new_size)

View File

@ -372,7 +372,7 @@ static ComponentAttributeProviders create_attribute_providers_for_curve()
ATTR_DOMAIN_POINT,
CD_PROP_FLOAT3,
CD_PROP_FLOAT3,
BuiltinAttributeProvider::NonCreatable,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::NonDeletable,
point_access,
tag_component_positions_changed);

View File

@ -1153,7 +1153,7 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
ATTR_DOMAIN_EDGE,
CD_PROP_INT32_2D,
CD_PROP_INT32_2D,
BuiltinAttributeProvider::NonCreatable,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::NonDeletable,
edge_access,
nullptr,
@ -1169,7 +1169,7 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
ATTR_DOMAIN_CORNER,
CD_PROP_INT32,
CD_PROP_INT32,
BuiltinAttributeProvider::NonCreatable,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::NonDeletable,
corner_access,
nullptr,
@ -1178,7 +1178,7 @@ static ComponentAttributeProviders create_attribute_providers_for_mesh()
ATTR_DOMAIN_CORNER,
CD_PROP_INT32,
CD_PROP_INT32,
BuiltinAttributeProvider::NonCreatable,
BuiltinAttributeProvider::Creatable,
BuiltinAttributeProvider::NonDeletable,
corner_access,
nullptr,

View File

@ -120,6 +120,7 @@ GeometryComponent &GeometrySet::get_component_for_write(GeometryComponentType co
}
if (component_ptr->is_mutable()) {
/* If the referenced component is already mutable, return it directly. */
component_ptr->tag_ensured_mutable();
return *component_ptr;
}
/* If the referenced component is shared, make a copy. The copy is not shared and is

View File

@ -0,0 +1,93 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BKE_simulation_state.hh"
#include "BKE_simulation_state_serialize.hh"
#include "BLI_fileops.hh"
#include "BLI_path_util.h"
namespace blender::bke::sim {
void ModifierSimulationCache::try_discover_bake(const StringRefNull meta_dir,
const StringRefNull bdata_dir)
{
if (failed_finding_bake_) {
return;
}
if (!BLI_is_dir(meta_dir.c_str()) || !BLI_is_dir(bdata_dir.c_str())) {
failed_finding_bake_ = true;
return;
}
direntry *dir_entries = nullptr;
const int dir_entries_num = BLI_filelist_dir_contents(meta_dir.c_str(), &dir_entries);
BLI_SCOPED_DEFER([&]() { BLI_filelist_free(dir_entries, dir_entries_num); });
if (dir_entries_num == 0) {
failed_finding_bake_ = true;
return;
}
this->reset();
for (const int i : IndexRange(dir_entries_num)) {
const direntry &dir_entry = dir_entries[i];
const StringRefNull dir_entry_path = dir_entry.path;
if (!dir_entry_path.endswith(".json")) {
continue;
}
char modified_file_name[FILENAME_MAX];
BLI_strncpy(modified_file_name, dir_entry.relname, sizeof(modified_file_name));
BLI_str_replace_char(modified_file_name, '_', '.');
const SubFrame frame = std::stof(modified_file_name);
auto new_state_at_frame = std::make_unique<ModifierSimulationStateAtFrame>();
new_state_at_frame->frame = frame;
new_state_at_frame->state.bdata_dir_ = bdata_dir;
new_state_at_frame->state.meta_path_ = dir_entry.path;
new_state_at_frame->state.owner_ = this;
states_at_frames_.append(std::move(new_state_at_frame));
}
bdata_sharing_ = std::make_unique<BDataSharing>();
cache_state_ = CacheState::Baked;
}
void ModifierSimulationState::ensure_bake_loaded() const
{
std::scoped_lock lock{mutex_};
if (bake_loaded_) {
return;
}
if (!meta_path_ || !bdata_dir_) {
return;
}
const std::shared_ptr<io::serialize::Value> io_root_value = io::serialize::read_json_file(
*meta_path_);
if (!io_root_value) {
return;
}
const DictionaryValue *io_root = io_root_value->as_dictionary_value();
if (!io_root) {
return;
}
const DiskBDataReader bdata_reader{*bdata_dir_};
deserialize_modifier_simulation_state(*io_root,
bdata_reader,
*owner_->bdata_sharing_,
const_cast<ModifierSimulationState &>(*this));
bake_loaded_ = true;
}
void ModifierSimulationCache::reset()
{
states_at_frames_.clear();
bdata_sharing_.reset();
cache_state_ = CacheState::Valid;
}
} // namespace blender::bke::sim

View File

@ -0,0 +1,945 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BKE_curves.hh"
#include "BKE_instances.hh"
#include "BKE_lib_id.h"
#include "BKE_main.h"
#include "BKE_mesh.hh"
#include "BKE_pointcloud.h"
#include "BKE_simulation_state_serialize.hh"
#include "DNA_material_types.h"
#include "DNA_modifier_types.h"
#include "DNA_object_types.h"
#include "BLI_endian_defines.h"
#include "BLI_endian_switch.h"
#include "BLI_fileops.hh"
#include "BLI_math_matrix_types.hh"
#include "BLI_path_util.h"
#include "RNA_access.h"
#include "RNA_enum_types.h"
namespace blender::bke::sim {
/**
* Turn the name into something that can be used as file name. It does not necessarily have to be
* human readible, but it can help if it is at least partially readible.
*/
static std::string escape_name(const StringRef name)
{
std::stringstream ss;
for (const char c : name) {
/* Only some letters allowed. Digits are not because they could lead to name collisions. */
if (('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')) {
ss << c;
}
else {
ss << int(c);
}
}
return ss.str();
}
static std::string get_blendcache_directory(const Main &bmain)
{
StringRefNull blend_file_path = BKE_main_blendfile_path(&bmain);
char blend_directory[FILE_MAX];
char blend_name[FILE_MAX];
BLI_split_dirfile(blend_file_path.c_str(),
blend_directory,
blend_name,
sizeof(blend_directory),
sizeof(blend_name));
blend_name[StringRef(blend_name).rfind(".")] = '\0';
const std::string blendcache_name = "blendcache_" + StringRef(blend_name);
char blendcache_dir[FILE_MAX];
BLI_path_join(blendcache_dir, sizeof(blendcache_dir), blend_directory, blendcache_name.c_str());
return blendcache_dir;
}
static std::string get_modifier_sim_name(const Object &object, const ModifierData &md)
{
const std::string object_name_escaped = escape_name(object.id.name + 2);
const std::string modifier_name_escaped = escape_name(md.name);
return "sim_" + object_name_escaped + "_" + modifier_name_escaped;
}
std::string get_bake_directory(const Main &bmain, const Object &object, const ModifierData &md)
{
char bdata_dir[FILE_MAX];
BLI_path_join(bdata_dir,
sizeof(bdata_dir),
get_blendcache_directory(bmain).c_str(),
get_modifier_sim_name(object, md).c_str());
return bdata_dir;
}
std::string get_bdata_directory(const Main &bmain, const Object &object, const ModifierData &md)
{
char bdata_dir[FILE_MAX];
BLI_path_join(
bdata_dir, sizeof(bdata_dir), get_bake_directory(bmain, object, md).c_str(), "bdata");
return bdata_dir;
}
std::string get_meta_directory(const Main &bmain, const Object &object, const ModifierData &md)
{
char meta_dir[FILE_MAX];
BLI_path_join(meta_dir, sizeof(meta_dir), get_bake_directory(bmain, object, md).c_str(), "meta");
return meta_dir;
}
std::shared_ptr<DictionaryValue> BDataSlice::serialize() const
{
auto io_slice = std::make_shared<DictionaryValue>();
io_slice->append_str("name", this->name);
io_slice->append_int("start", range.start());
io_slice->append_int("size", range.size());
return io_slice;
}
std::optional<BDataSlice> BDataSlice::deserialize(const DictionaryValue &io_slice)
{
const std::optional<StringRefNull> name = io_slice.lookup_str("name");
const std::optional<int64_t> start = io_slice.lookup_int("start");
const std::optional<int64_t> size = io_slice.lookup_int("size");
if (!name || !start || !size) {
return std::nullopt;
}
return BDataSlice{*name, {*start, *size}};
}
static StringRefNull get_endian_io_name(const int endian)
{
if (endian == L_ENDIAN) {
return "little";
}
BLI_assert(endian == B_ENDIAN);
return "big";
}
static StringRefNull get_domain_io_name(const eAttrDomain domain)
{
const char *io_name = "unknown";
RNA_enum_id_from_value(rna_enum_attribute_domain_items, domain, &io_name);
return io_name;
}
static StringRefNull get_data_type_io_name(const eCustomDataType data_type)
{
const char *io_name = "unknown";
RNA_enum_id_from_value(rna_enum_attribute_type_items, data_type, &io_name);
return io_name;
}
static std::optional<eAttrDomain> get_domain_from_io_name(const StringRefNull io_name)
{
int domain;
if (!RNA_enum_value_from_identifier(rna_enum_attribute_domain_items, io_name.c_str(), &domain)) {
return std::nullopt;
}
return eAttrDomain(domain);
}
static std::optional<eCustomDataType> get_data_type_from_io_name(const StringRefNull io_name)
{
int domain;
if (!RNA_enum_value_from_identifier(rna_enum_attribute_type_items, io_name.c_str(), &domain)) {
return std::nullopt;
}
return eCustomDataType(domain);
}
/**
* Write the data and remember which endianness the data had.
*/
static std::shared_ptr<DictionaryValue> write_bdata_raw_data_with_endian(
BDataWriter &bdata_writer, const void *data, const int64_t size_in_bytes)
{
auto io_data = bdata_writer.write(data, size_in_bytes).serialize();
if (ENDIAN_ORDER == B_ENDIAN) {
io_data->append_str("endian", get_endian_io_name(ENDIAN_ORDER));
}
return io_data;
}
/**
* Read data of an into an array and optionally perform an endian switch if necessary.
*/
[[nodiscard]] static bool read_bdata_raw_data_with_endian(const BDataReader &bdata_reader,
const DictionaryValue &io_data,
const int64_t element_size,
const int64_t elements_num,
void *r_data)
{
const std::optional<BDataSlice> slice = BDataSlice::deserialize(io_data);
if (!slice) {
return false;
}
if (slice->range.size() != element_size * elements_num) {
return false;
}
if (!bdata_reader.read(*slice, r_data)) {
return false;
}
const StringRefNull stored_endian = io_data.lookup_str("endian").value_or("little");
const StringRefNull current_endian = get_endian_io_name(ENDIAN_ORDER);
const bool need_endian_switch = stored_endian != current_endian;
if (need_endian_switch) {
switch (element_size) {
case 1:
break;
case 2:
BLI_endian_switch_uint16_array(static_cast<uint16_t *>(r_data), elements_num);
break;
case 4:
BLI_endian_switch_uint32_array(static_cast<uint32_t *>(r_data), elements_num);
break;
case 8:
BLI_endian_switch_uint64_array(static_cast<uint64_t *>(r_data), elements_num);
break;
default:
return false;
}
}
return true;
}
/** Write bytes ignoring endianness. */
static std::shared_ptr<DictionaryValue> write_bdata_raw_bytes(BDataWriter &bdata_writer,
const void *data,
const int64_t size_in_bytes)
{
return bdata_writer.write(data, size_in_bytes).serialize();
}
/** Read bytes ignoring endianness. */
[[nodiscard]] static bool read_bdata_raw_bytes(const BDataReader &bdata_reader,
const DictionaryValue &io_data,
const int64_t bytes_num,
void *r_data)
{
const std::optional<BDataSlice> slice = BDataSlice::deserialize(io_data);
if (!slice) {
return false;
}
if (slice->range.size() != bytes_num) {
return false;
}
return bdata_reader.read(*slice, r_data);
}
static std::shared_ptr<DictionaryValue> write_bdata_simple_gspan(BDataWriter &bdata_writer,
const GSpan data)
{
const CPPType &type = data.type();
BLI_assert(type.is_trivial());
if (type.size() == 1 || type.is<ColorGeometry4b>()) {
Review

No need to do this now necessarily, but I wonder if a CPPType should know if its values need endian switches

No need to do this now necessarily, but I wonder if a `CPPType` should know if its values need endian switches
Review

Hm it could. Only makes sense for trivial types of course.

Hm it could. Only makes sense for trivial types of course.
return write_bdata_raw_bytes(bdata_writer, data.data(), data.size_in_bytes());
}
return write_bdata_raw_data_with_endian(bdata_writer, data.data(), data.size_in_bytes());
}
[[nodiscard]] static bool read_bdata_simple_gspan(const BDataReader &bdata_reader,
const DictionaryValue &io_data,
GMutableSpan r_data)
{
const CPPType &type = r_data.type();
BLI_assert(type.is_trivial());
if (type.size() == 1 || type.is<ColorGeometry4b>()) {
return read_bdata_raw_bytes(bdata_reader, io_data, r_data.size_in_bytes(), r_data.data());
}
if (type.is_any<int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t, float>()) {
return read_bdata_raw_data_with_endian(
bdata_reader, io_data, type.size(), r_data.size(), r_data.data());
}
if (type.is_any<float2, int2>()) {
return read_bdata_raw_data_with_endian(
bdata_reader, io_data, sizeof(int32_t), r_data.size() * 2, r_data.data());

Maybe replace float here with int32_t or 4?

Maybe replace `float` here with `int32_t` or `4`?

Voice against the magic four.

Voice against the magic four.
}
if (type.is<float3>()) {
return read_bdata_raw_data_with_endian(
bdata_reader, io_data, sizeof(float), r_data.size() * 3, r_data.data());
HooglyBoogly marked this conversation as resolved
Review

Better to add a size_in_bytes() method to GMutableSpan or GSpan IMO

Better to add a `size_in_bytes()` method to `GMutableSpan` or `GSpan` IMO
Review

Oops, never mind I misunderstood this.

Oops, never mind I misunderstood this.
}
if (type.is<float4x4>()) {
return read_bdata_raw_data_with_endian(
bdata_reader, io_data, sizeof(float), r_data.size() * 16, r_data.data());
}
if (type.is<ColorGeometry4f>()) {
return read_bdata_raw_data_with_endian(
bdata_reader, io_data, sizeof(float), r_data.size() * 4, r_data.data());
}
return false;
}
static std::shared_ptr<DictionaryValue> write_bdata_shared_simple_gspan(
BDataWriter &bdata_writer,
BDataSharing &bdata_sharing,
const GSpan data,
const ImplicitSharingInfo *sharing_info)
{
return bdata_sharing.write_shared(
sharing_info, [&]() { return write_bdata_simple_gspan(bdata_writer, data); });
}
[[nodiscard]] static const void *read_bdata_shared_simple_gspan(

When I was working on the stuff in the implicit_sharing namespace, I found void ** to be quite annoying, it's also not portable technically. Something like this avoids that issue, though you might not like it better, not sure:

[[nodiscard]] static const void *read_bdata_shared_simple_gspan(
    const DictionaryValue &io_data,
    const BDataReader &bdata_reader,
    const BDataSharing &bdata_sharing,
    const CPPType &cpp_type,
    const int size,
    const ImplicitSharingInfo **r_sharing_info)
{
  const std::optional<ImplicitSharingInfoAndData> sharing_info_and_data =
      bdata_sharing.read_shared(io_data, [&]() -> std::optional<ImplicitSharingInfoAndData> {
        void *data_mem = MEM_mallocN_aligned(
            size * cpp_type.size(), cpp_type.alignment(), __func__);
        if (!read_bdata_simple_gspan(bdata_reader, io_data, {cpp_type, data_mem, size})) {
          MEM_freeN(data_mem);
          return std::nullopt;
        }
        return ImplicitSharingInfoAndData{implicit_sharing::info_for_mem_free(data_mem), data_mem};
      });
  if (!sharing_info_and_data) {
    *r_sharing_info = nullptr;
    return nullptr;
  }
  *r_sharing_info = sharing_info_and_data->sharing_info;
  return sharing_info_and_data->data;
}

template<typename T>
[[nodiscard]] static bool read_bdata_shared_simple_span(const DictionaryValue &io_data,
                                                        const BDataReader &bdata_reader,
                                                        const BDataSharing &bdata_sharing,
                                                        const int size,
                                                        T **r_data,
                                                        ImplicitSharingInfo **r_sharing_info)
{
  *r_data = read_bdata_shared_simple_gspan(io_data,
                                           bdata_reader,
                                           bdata_sharing,
                                           CPPType::get<T>(),
                                           size,
                                           (const ImplicitSharingInfo **)r_sharing_info);
  return *r_data != nullptr;
}
When I was working on the stuff in the `implicit_sharing` namespace, I found `void **` to be quite annoying, it's also not portable technically. Something like this avoids that issue, though you might not like it better, not sure: ```cpp [[nodiscard]] static const void *read_bdata_shared_simple_gspan( const DictionaryValue &io_data, const BDataReader &bdata_reader, const BDataSharing &bdata_sharing, const CPPType &cpp_type, const int size, const ImplicitSharingInfo **r_sharing_info) { const std::optional<ImplicitSharingInfoAndData> sharing_info_and_data = bdata_sharing.read_shared(io_data, [&]() -> std::optional<ImplicitSharingInfoAndData> { void *data_mem = MEM_mallocN_aligned( size * cpp_type.size(), cpp_type.alignment(), __func__); if (!read_bdata_simple_gspan(bdata_reader, io_data, {cpp_type, data_mem, size})) { MEM_freeN(data_mem); return std::nullopt; } return ImplicitSharingInfoAndData{implicit_sharing::info_for_mem_free(data_mem), data_mem}; }); if (!sharing_info_and_data) { *r_sharing_info = nullptr; return nullptr; } *r_sharing_info = sharing_info_and_data->sharing_info; return sharing_info_and_data->data; } template<typename T> [[nodiscard]] static bool read_bdata_shared_simple_span(const DictionaryValue &io_data, const BDataReader &bdata_reader, const BDataSharing &bdata_sharing, const int size, T **r_data, ImplicitSharingInfo **r_sharing_info) { *r_data = read_bdata_shared_simple_gspan(io_data, bdata_reader, bdata_sharing, CPPType::get<T>(), size, (const ImplicitSharingInfo **)r_sharing_info); return *r_data != nullptr; } ```
const DictionaryValue &io_data,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing,
const CPPType &cpp_type,
const int size,
const ImplicitSharingInfo **r_sharing_info)
{
const std::optional<ImplicitSharingInfoAndData> sharing_info_and_data =
bdata_sharing.read_shared(io_data, [&]() -> std::optional<ImplicitSharingInfoAndData> {
void *data_mem = MEM_mallocN_aligned(
size * cpp_type.size(), cpp_type.alignment(), __func__);
if (!read_bdata_simple_gspan(bdata_reader, io_data, {cpp_type, data_mem, size})) {
MEM_freeN(data_mem);
return std::nullopt;
}
return ImplicitSharingInfoAndData{implicit_sharing::info_for_mem_free(data_mem), data_mem};
});
if (!sharing_info_and_data) {
*r_sharing_info = nullptr;
return nullptr;
}
*r_sharing_info = sharing_info_and_data->sharing_info;
return sharing_info_and_data->data;
}
template<typename T>
[[nodiscard]] static bool read_bdata_shared_simple_span(const DictionaryValue &io_data,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing,
const int size,
T **r_data,
const ImplicitSharingInfo **r_sharing_info)
{
*r_data = const_cast<T *>(static_cast<const T *>(read_bdata_shared_simple_gspan(
io_data, bdata_reader, bdata_sharing, CPPType::get<T>(), size, r_sharing_info)));
return *r_data != nullptr;
}
[[nodiscard]] static bool load_attributes(const io::serialize::ArrayValue &io_attributes,
bke::MutableAttributeAccessor &attributes,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing)
{
for (const auto &io_attribute_value : io_attributes.elements()) {
const auto *io_attribute = io_attribute_value->as_dictionary_value();
if (!io_attribute) {
return false;
}
const std::optional<StringRefNull> name = io_attribute->lookup_str("name");
const std::optional<StringRefNull> domain_str = io_attribute->lookup_str("domain");
const std::optional<StringRefNull> type_str = io_attribute->lookup_str("type");
auto io_data = io_attribute->lookup_dict("data");
if (!name || !domain_str || !type_str || !io_data) {
return false;
}
const std::optional<eAttrDomain> domain = get_domain_from_io_name(*domain_str);
const std::optional<eCustomDataType> data_type = get_data_type_from_io_name(*type_str);
if (!domain || !data_type) {
return false;
}
const CPPType *cpp_type = custom_data_type_to_cpp_type(*data_type);
if (!cpp_type) {
return false;
}
const int domain_size = attributes.domain_size(*domain);
const ImplicitSharingInfo *attribute_sharing_info;
const void *attribute_data = read_bdata_shared_simple_gspan(
*io_data, bdata_reader, bdata_sharing, *cpp_type, domain_size, &attribute_sharing_info);
if (!attribute_data) {
return false;
}
BLI_SCOPED_DEFER([&]() { attribute_sharing_info->remove_user_and_delete_if_last(); });
if (attributes.contains(*name)) {
/* If the attribute exists already, copy the values over to the existing array. */
bke::GSpanAttributeWriter attribute = attributes.lookup_or_add_for_write_only_span(
*name, *domain, *data_type);
if (!attribute) {
return false;
}
cpp_type->copy_assign_n(attribute_data, attribute.span.data(), domain_size);
attribute.finish();
}
else {
/* Add a new attribute that shares the data. */
if (!attributes.add(*name,
*domain,
*data_type,
AttributeInitShared(attribute_data, *attribute_sharing_info))) {
return false;
}
}
}
return true;
}
static PointCloud *try_load_pointcloud(const DictionaryValue &io_geometry,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing)
{
const DictionaryValue *io_pointcloud = io_geometry.lookup_dict("pointcloud");
if (!io_pointcloud) {
return nullptr;
}
const io::serialize::ArrayValue *io_attributes = io_pointcloud->lookup_array("attributes");
if (!io_attributes) {
return nullptr;
}
PointCloud *pointcloud = BKE_pointcloud_new_nomain(0);
CustomData_free_layer_named(&pointcloud->pdata, "position", 0);
pointcloud->totpoint = io_pointcloud->lookup_int("num_points").value_or(0);
auto cancel = [&]() {
BKE_id_free(nullptr, pointcloud);
return nullptr;
};
bke::MutableAttributeAccessor attributes = pointcloud->attributes_for_write();
if (!load_attributes(*io_attributes, attributes, bdata_reader, bdata_sharing)) {
return cancel();
}
return pointcloud;
}
static Curves *try_load_curves(const DictionaryValue &io_geometry,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing)
{
const DictionaryValue *io_curves = io_geometry.lookup_dict("curves");
if (!io_curves) {
return nullptr;
}
const io::serialize::ArrayValue *io_attributes = io_curves->lookup_array("attributes");
if (!io_attributes) {
return nullptr;
}
Curves *curves_id = bke::curves_new_nomain(0, 0);
bke::CurvesGeometry &curves = curves_id->geometry.wrap();
CustomData_free_layer_named(&curves.point_data, "position", 0);
HooglyBoogly marked this conversation as resolved Outdated

It's probably worth creating empty curves first, then assigning the offsets, to avoid allocating them and then freeing them.

This could be a TODO comment too I guess

It's probably worth creating empty curves first, then assigning the offsets, to avoid allocating them and then freeing them. This could be a TODO comment too I guess
curves.point_num = io_curves->lookup_int("num_points").value_or(0);
curves.curve_num = io_curves->lookup_int("num_curves").value_or(0);
auto cancel = [&]() {
BKE_id_free(nullptr, curves_id);
return nullptr;
};
if (curves.curves_num() > 0) {
const auto io_curve_offsets = io_curves->lookup_dict("curve_offsets");
if (!io_curve_offsets) {
return cancel();
}
if (!read_bdata_shared_simple_span(*io_curve_offsets,
bdata_reader,
bdata_sharing,
curves.curves_num() + 1,
&curves.curve_offsets,
&curves.runtime->curve_offsets_sharing_info)) {
return cancel();
}
}
bke::MutableAttributeAccessor attributes = curves.attributes_for_write();
if (!load_attributes(*io_attributes, attributes, bdata_reader, bdata_sharing)) {
return cancel();
}
return curves_id;
}
static Mesh *try_load_mesh(const DictionaryValue &io_geometry,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing)
{
const DictionaryValue *io_mesh = io_geometry.lookup_dict("mesh");
if (!io_mesh) {
return nullptr;
}
const io::serialize::ArrayValue *io_attributes = io_mesh->lookup_array("attributes");
if (!io_attributes) {
return nullptr;
}
Mesh *mesh = BKE_mesh_new_nomain(0, 0, 0, 0);
CustomData_free_layer_named(&mesh->vdata, "position", 0);
CustomData_free_layer_named(&mesh->edata, ".edge_verts", 0);
CustomData_free_layer_named(&mesh->ldata, ".corner_vert", 0);
CustomData_free_layer_named(&mesh->ldata, ".corner_edge", 0);
mesh->totvert = io_mesh->lookup_int("num_vertices").value_or(0);
mesh->totedge = io_mesh->lookup_int("num_edges").value_or(0);
mesh->totpoly = io_mesh->lookup_int("num_polygons").value_or(0);
mesh->totloop = io_mesh->lookup_int("num_corners").value_or(0);
auto cancel = [&]() {
BKE_id_free(nullptr, mesh);
return nullptr;
};
if (mesh->totpoly > 0) {
const auto io_poly_offsets = io_mesh->lookup_dict("poly_offsets");
if (!io_poly_offsets) {
return cancel();
}
if (!read_bdata_shared_simple_span(*io_poly_offsets,
bdata_reader,
bdata_sharing,
mesh->totpoly + 1,
&mesh->poly_offset_indices,
&mesh->runtime->poly_offsets_sharing_info)) {
return cancel();
}
}
bke::MutableAttributeAccessor attributes = mesh->attributes_for_write();
if (!load_attributes(*io_attributes, attributes, bdata_reader, bdata_sharing)) {
return cancel();
}
return mesh;
}
static GeometrySet load_geometry(const DictionaryValue &io_geometry,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing);
static std::unique_ptr<bke::Instances> try_load_instances(const DictionaryValue &io_geometry,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing)
{
const DictionaryValue *io_instances = io_geometry.lookup_dict("instances");
if (!io_instances) {
return nullptr;
}
const int num_instances = io_instances->lookup_int("num_instances").value_or(0);
if (num_instances == 0) {
return nullptr;
}
const io::serialize::ArrayValue *io_attributes = io_instances->lookup_array("attributes");
if (!io_attributes) {
return nullptr;
}
const io::serialize::ArrayValue *io_references = io_instances->lookup_array("references");
if (!io_references) {
return nullptr;
}
HooglyBoogly marked this conversation as resolved
Review

Using unique_ptr<bke::Instances> at least withing this function should remove the need for this

Using `unique_ptr<bke::Instances>` at least withing this function should remove the need for this
std::unique_ptr<bke::Instances> instances = std::make_unique<bke::Instances>();
instances->resize(num_instances);
for (const auto &io_reference_value : io_references->elements()) {
const DictionaryValue *io_reference = io_reference_value->as_dictionary_value();
GeometrySet reference_geometry;
if (io_reference) {
reference_geometry = load_geometry(*io_reference, bdata_reader, bdata_sharing);
}
instances->add_reference(std::move(reference_geometry));
}
const auto io_transforms = io_instances->lookup_dict("transforms");
if (!io_transforms) {
return {};
}
if (!read_bdata_simple_gspan(bdata_reader, *io_transforms, instances->transforms())) {
return {};
}
const auto io_handles = io_instances->lookup_dict("handles");
if (!io_handles) {
return {};
}
if (!read_bdata_simple_gspan(bdata_reader, *io_handles, instances->reference_handles())) {
return {};
}
bke::MutableAttributeAccessor attributes = instances->attributes_for_write();
if (!load_attributes(*io_attributes, attributes, bdata_reader, bdata_sharing)) {
return {};
}
return instances;
}
static GeometrySet load_geometry(const DictionaryValue &io_geometry,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing)
{
HooglyBoogly marked this conversation as resolved Outdated

These functions handle null inputs already, this is a bit prettier :)

  GeometrySet geometry;
  geometry.replace_mesh(try_load_mesh(io_geometry, bdata_reader, bdata_sharing));
  geometry.replace_pointcloud(try_load_pointcloud(io_geometry, bdata_reader, bdata_sharing));
  geometry.replace_curves(try_load_curves(io_geometry, bdata_reader, bdata_sharing));
  geometry.replace_instances(try_load_instances(io_geometry, bdata_reader, bdata_sharing));
  return geometry;
These functions handle null inputs already, this is a bit prettier :) ```cpp GeometrySet geometry; geometry.replace_mesh(try_load_mesh(io_geometry, bdata_reader, bdata_sharing)); geometry.replace_pointcloud(try_load_pointcloud(io_geometry, bdata_reader, bdata_sharing)); geometry.replace_curves(try_load_curves(io_geometry, bdata_reader, bdata_sharing)); geometry.replace_instances(try_load_instances(io_geometry, bdata_reader, bdata_sharing)); return geometry; ```
GeometrySet geometry;
geometry.replace_mesh(try_load_mesh(io_geometry, bdata_reader, bdata_sharing));
geometry.replace_pointcloud(try_load_pointcloud(io_geometry, bdata_reader, bdata_sharing));
geometry.replace_curves(try_load_curves(io_geometry, bdata_reader, bdata_sharing));
geometry.replace_instances(
try_load_instances(io_geometry, bdata_reader, bdata_sharing).release());
return geometry;
}
static std::shared_ptr<io::serialize::ArrayValue> serialize_material_slots(
const Span<const Material *> material_slots)
{
auto io_materials = std::make_shared<io::serialize::ArrayValue>();
for (const Material *material : material_slots) {
if (material == nullptr) {
io_materials->append_null();
}
else {
auto io_material = io_materials->append_dict();
io_material->append_str("name", material->id.name + 2);
if (material->id.lib != nullptr) {
io_material->append_str("lib_name", material->id.lib->id.name + 2);
}
}
}
return io_materials;
}
static std::shared_ptr<io::serialize::ArrayValue> serialize_attributes(
const bke::AttributeAccessor &attributes,
BDataWriter &bdata_writer,
BDataSharing &bdata_sharing,
const Set<std::string> &attributes_to_ignore)
{
auto io_attributes = std::make_shared<io::serialize::ArrayValue>();
attributes.for_all(
[&](const bke::AttributeIDRef &attribute_id, const bke::AttributeMetaData &meta_data) {
if (attributes_to_ignore.contains_as(attribute_id.name())) {
return true;
}
auto io_attribute = io_attributes->append_dict();
io_attribute->append_str("name", attribute_id.name());
const StringRefNull domain_name = get_domain_io_name(meta_data.domain);
io_attribute->append_str("domain", domain_name);
const StringRefNull type_name = get_data_type_io_name(meta_data.data_type);
io_attribute->append_str("type", type_name);
const bke::GAttributeReader attribute = attributes.lookup(attribute_id);
const GVArraySpan attribute_span(attribute.varray);
io_attribute->append("data",
write_bdata_shared_simple_gspan(
bdata_writer,
bdata_sharing,
attribute_span,
attribute.varray.is_span() ? attribute.sharing_info : nullptr));
return true;
});
return io_attributes;
}
static std::shared_ptr<DictionaryValue> serialize_geometry_set(const GeometrySet &geometry,
BDataWriter &bdata_writer,
BDataSharing &bdata_sharing)
{
auto io_geometry = std::make_shared<DictionaryValue>();
if (geometry.has_mesh()) {
const Mesh &mesh = *geometry.get_mesh_for_read();
auto io_mesh = io_geometry->append_dict("mesh");
io_mesh->append_int("num_vertices", mesh.totvert);
io_mesh->append_int("num_edges", mesh.totedge);
io_mesh->append_int("num_polygons", mesh.totpoly);
io_mesh->append_int("num_corners", mesh.totloop);
if (mesh.totpoly > 0) {
io_mesh->append("poly_offsets",
write_bdata_shared_simple_gspan(bdata_writer,
bdata_sharing,
mesh.poly_offsets(),
mesh.runtime->poly_offsets_sharing_info));
}
auto io_materials = serialize_material_slots({mesh.mat, mesh.totcol});
io_mesh->append("materials", io_materials);
auto io_attributes = serialize_attributes(mesh.attributes(), bdata_writer, bdata_sharing, {});
io_mesh->append("attributes", io_attributes);
}
if (geometry.has_pointcloud()) {
const PointCloud &pointcloud = *geometry.get_pointcloud_for_read();
auto io_pointcloud = io_geometry->append_dict("pointcloud");
io_pointcloud->append_int("num_points", pointcloud.totpoint);
auto io_materials = serialize_material_slots({pointcloud.mat, pointcloud.totcol});
io_pointcloud->append("materials", io_materials);
auto io_attributes = serialize_attributes(
pointcloud.attributes(), bdata_writer, bdata_sharing, {});
io_pointcloud->append("attributes", io_attributes);
}
if (geometry.has_curves()) {
const Curves &curves_id = *geometry.get_curves_for_read();
const bke::CurvesGeometry &curves = curves_id.geometry.wrap();
auto io_curves = io_geometry->append_dict("curves");
io_curves->append_int("num_points", curves.point_num);
io_curves->append_int("num_curves", curves.curve_num);
if (curves.curve_num > 0) {
io_curves->append(
"curve_offsets",
write_bdata_shared_simple_gspan(bdata_writer,
bdata_sharing,
curves.offsets(),
curves.runtime->curve_offsets_sharing_info));
}
auto io_materials = serialize_material_slots({curves_id.mat, curves_id.totcol});
io_curves->append("materials", io_materials);
auto io_attributes = serialize_attributes(
curves.attributes(), bdata_writer, bdata_sharing, {});
io_curves->append("attributes", io_attributes);
}
if (geometry.has_instances()) {
const bke::Instances &instances = *geometry.get_instances_for_read();
auto io_instances = io_geometry->append_dict("instances");
io_instances->append_int("num_instances", instances.instances_num());
auto io_references = io_instances->append_array("references");
for (const bke::InstanceReference &reference : instances.references()) {
BLI_assert(reference.type() == bke::InstanceReference::Type::GeometrySet);
io_references->append(
serialize_geometry_set(reference.geometry_set(), bdata_writer, bdata_sharing));
}
io_instances->append("transforms",
write_bdata_simple_gspan(bdata_writer, instances.transforms()));
io_instances->append("handles",
write_bdata_simple_gspan(bdata_writer, instances.reference_handles()));
auto io_attributes = serialize_attributes(
instances.attributes(), bdata_writer, bdata_sharing, {"position"});
io_instances->append("attributes", io_attributes);
}
return io_geometry;
}
void serialize_modifier_simulation_state(const ModifierSimulationState &state,
BDataWriter &bdata_writer,
BDataSharing &bdata_sharing,
DictionaryValue &r_io_root)
{
r_io_root.append_int("version", 1);
auto io_zones = r_io_root.append_array("zones");
for (const auto item : state.zone_states_.items()) {
const SimulationZoneID &zone_id = item.key;
const SimulationZoneState &zone_state = *item.value;
auto io_zone = io_zones->append_dict();
auto io_zone_id = io_zone->append_array("zone_id");
for (const int node_id : zone_id.node_ids) {
io_zone_id->append_int(node_id);
}
auto io_state_items = io_zone->append_array("state_items");
for (const std::unique_ptr<SimulationStateItem> &state_item : zone_state.items) {
/* TODO: Use better id. */
const std::string state_item_id = std::to_string(&state_item - zone_state.items.begin());
auto io_state_item = io_state_items->append_dict();
io_state_item->append_str("id", state_item_id);
if (const GeometrySimulationStateItem *geometry_state_item =
dynamic_cast<const GeometrySimulationStateItem *>(state_item.get())) {
io_state_item->append_str("type", "geometry");
const GeometrySet &geometry = geometry_state_item->geometry();
auto io_geometry = serialize_geometry_set(geometry, bdata_writer, bdata_sharing);
io_state_item->append("data", io_geometry);
}
}
}
}
void deserialize_modifier_simulation_state(const DictionaryValue &io_root,
const BDataReader &bdata_reader,
const BDataSharing &bdata_sharing,
ModifierSimulationState &r_state)
{
io::serialize::JsonFormatter formatter;
const std::optional<int> version = io_root.lookup_int("version");
if (!version) {
return;
}
if (*version != 1) {
return;
}
const io::serialize::ArrayValue *io_zones = io_root.lookup_array("zones");
if (!io_zones) {
return;
}
for (const auto &io_zone_value : io_zones->elements()) {
const DictionaryValue *io_zone = io_zone_value->as_dictionary_value();
if (!io_zone) {
continue;
}
const io::serialize::ArrayValue *io_zone_id = io_zone->lookup_array("zone_id");
bke::sim::SimulationZoneID zone_id;
for (const auto &io_zone_id_element : io_zone_id->elements()) {
const io::serialize::IntValue *io_node_id = io_zone_id_element->as_int_value();
if (!io_node_id) {
continue;
}
zone_id.node_ids.append(io_node_id->value());
}
const io::serialize::ArrayValue *io_state_items = io_zone->lookup_array("state_items");
if (!io_state_items) {
continue;
}
auto zone_state = std::make_unique<bke::sim::SimulationZoneState>();
for (const auto &io_state_item_value : io_state_items->elements()) {
const DictionaryValue *io_state_item = io_state_item_value->as_dictionary_value();
if (!io_state_item) {
continue;
}
const std::optional<StringRefNull> state_item_type = io_state_item->lookup_str("type");
if (!state_item_type) {
continue;
}
if (*state_item_type == StringRef("geometry")) {
const DictionaryValue *io_geometry = io_state_item->lookup_dict("data");
if (!io_geometry) {
continue;
}
GeometrySet geometry = load_geometry(*io_geometry, bdata_reader, bdata_sharing);
auto state_item = std::make_unique<bke::sim::GeometrySimulationStateItem>(
std::move(geometry));
zone_state->items.append(std::move(state_item));
}
}
r_state.zone_states_.add_overwrite(zone_id, std::move(zone_state));
}
}
DiskBDataReader::DiskBDataReader(std::string bdata_dir) : bdata_dir_(std::move(bdata_dir)) {}
[[nodiscard]] bool DiskBDataReader::read(const BDataSlice &slice, void *r_data) const
{
if (slice.range.is_empty()) {
return true;
}
char bdata_path[FILE_MAX];
BLI_path_join(bdata_path, sizeof(bdata_path), bdata_dir_.c_str(), slice.name.c_str());
std::lock_guard lock{mutex_};
std::unique_ptr<fstream> &bdata_file = open_input_streams_.lookup_or_add_cb_as(
bdata_path,
[&]() { return std::make_unique<fstream>(bdata_path, std::ios::in | std::ios::binary); });
bdata_file->seekg(slice.range.start());
bdata_file->read(static_cast<char *>(r_data), slice.range.size());
if (bdata_file->gcount() != slice.range.size()) {
return false;
}
return true;
}
DiskBDataWriter::DiskBDataWriter(std::string bdata_name,
std::ostream &bdata_file,
const int64_t current_offset)
: bdata_name_(std::move(bdata_name)), bdata_file_(bdata_file), current_offset_(current_offset)
{
}
BDataSlice DiskBDataWriter::write(const void *data, const int64_t size)
{
const int64_t old_offset = current_offset_;
bdata_file_.write(static_cast<const char *>(data), size);
current_offset_ += size;
return {bdata_name_, {old_offset, size}};
}
BDataSharing::~BDataSharing()
{
for (const ImplicitSharingInfo *sharing_info : stored_by_runtime_.keys()) {
sharing_info->remove_weak_user_and_delete_if_last();
}
for (const ImplicitSharingInfoAndData &value : runtime_by_stored_.values()) {
if (value.sharing_info) {
value.sharing_info->remove_user_and_delete_if_last();
}
}
}
DictionaryValuePtr BDataSharing::write_shared(const ImplicitSharingInfo *sharing_info,
FunctionRef<DictionaryValuePtr()> write_fn)
{
if (sharing_info == nullptr) {
return write_fn();
}
return stored_by_runtime_.add_or_modify(
sharing_info,
/* Create new value. */
[&](StoredByRuntimeValue *value) {
new (value) StoredByRuntimeValue();
value->io_data = write_fn();
value->sharing_info_version = sharing_info->version();
sharing_info->add_weak_user();
return value->io_data;
},
/* Potentially modify existing value. */
[&](StoredByRuntimeValue *value) {
const int64_t new_version = sharing_info->version();
BLI_assert(value->sharing_info_version <= new_version);
if (value->sharing_info_version < new_version) {
value->io_data = write_fn();
value->sharing_info_version = new_version;
}
return value->io_data;
});
}
std::optional<ImplicitSharingInfoAndData> BDataSharing::read_shared(
const DictionaryValue &io_data,
FunctionRef<std::optional<ImplicitSharingInfoAndData>()> read_fn) const
{
std::lock_guard lock{mutex_};
io::serialize::JsonFormatter formatter;
std::stringstream ss;
formatter.serialize(ss, io_data);
const std::string key = ss.str();
if (const ImplicitSharingInfoAndData *shared_data = runtime_by_stored_.lookup_ptr(key)) {
shared_data->sharing_info->add_user();
return *shared_data;
}
std::optional<ImplicitSharingInfoAndData> data = read_fn();
if (!data) {
return std::nullopt;
}
if (data->sharing_info != nullptr) {
data->sharing_info->add_user();
runtime_by_stored_.add_new(key, *data);
}
return data;
}
} // namespace blender::bke::sim

View File

@ -705,6 +705,11 @@ class CPPType : NonCopyable, NonMovable {
return this == &CPPType::get<std::decay_t<T>>();
}
template<typename... T> bool is_any() const
{
return (this->is<T>() || ...);
}
/**
* Convert a #CPPType that is only known at run-time, to a static type that is known at
* compile-time. This allows the compiler to optimize a function for specific types, while all

View File

@ -65,6 +65,11 @@ class GSpan {
return size_;
}
int64_t size_in_bytes() const
{
return type_->size() * size_;
}
const void *data() const
{
return data_;
@ -186,6 +191,11 @@ class GMutableSpan {
return size_;
}
int64_t size_in_bytes() const
{
return type_->size() * size_;
}
void *data() const
{
return data_;

View File

@ -37,11 +37,25 @@ namespace blender {
*/
class ImplicitSharingInfo : NonCopyable, NonMovable {
private:
mutable std::atomic<int> users_;
/**
* Number of users that want to own the shared data.
*/
mutable std::atomic<int> strong_users_ = 1;
/**
* Number of users that only keep a reference to the `ImplicitSharingInfo` but don't need to own
* the shared data. One additional weak user is added as long as there is at least one strong
* user. Together with the version below this adds an efficient way to detect if data has been
* changed.
*/
mutable std::atomic<int> weak_users_ = 1;
/**
* The data referenced by an #ImplicitSharingInfo can change over time. This version is
* incremented whenever the referenced data is about to be changed. This allows weak users to
* detect if the data has changed since the weak user was created.
*/
mutable std::atomic<int64_t> version_ = 0;
public:
ImplicitSharingInfo(const int initial_users) : users_(initial_users) {}
virtual ~ImplicitSharingInfo()
{
BLI_assert(this->is_mutable());
@ -50,7 +64,7 @@ class ImplicitSharingInfo : NonCopyable, NonMovable {
/** True if there are other const references to the resource, meaning it cannot be modified. */
bool is_shared() const
{
return users_.load(std::memory_order_relaxed) >= 2;
return strong_users_.load(std::memory_order_relaxed) >= 2;
}
/** Whether the resource can be modified without a copy because there is only one owner. */
@ -59,10 +73,56 @@ class ImplicitSharingInfo : NonCopyable, NonMovable {
return !this->is_shared();
}
/**
* Weak users don't protect the referenced data from being freed. If the data is freed while
* there is still a weak referenced, this returns true.
*/
bool expired() const
{
return strong_users_.load(std::memory_order_acquire) == 0;
}
/** Call when a the data has a new additional owner. */
void add_user() const
{
users_.fetch_add(1, std::memory_order_relaxed);
strong_users_.fetch_add(1, std::memory_order_relaxed);
}
/**
* Adding a weak owner prevents the #ImplicitSharingInfo from being freed but not the referenced
* data.
*
* \note Unlike std::shared_ptr a weak user cannot be turned into a strong user. This is

Maybe "Unlike std::shared_ptr" instead of "Other than with std::shared_ptr"

Maybe "Unlike std::shared_ptr" instead of "Other than with std::shared_ptr"
* because some code might change the referenced data assuming that there is only one strong user
* while a new strong user is added by another thread.
*/
void add_weak_user() const
{
weak_users_.fetch_add(1, std::memory_order_relaxed);
}
/**
* Call this when making sure that the referenced data is mutable, which also implies that it is
* about to be modified. This allows other code to detect whether data has not been changed very
* efficiently.
*/
void tag_ensured_mutable() const
{
BLI_assert(this->is_mutable());
/* This might not need an atomic increment when the #version method below is only called when

"Doesn't need an atomic increment" but it seems to do one anyway, am I missing something?

"Doesn't need an atomic increment" but it seems to do one anyway, am I missing something?

Oops, yeah. I thought this wouldn't need one but then wasn't so sure anymore.

Oops, yeah. I thought this wouldn't need one but then wasn't so sure anymore.
* the code calling it is a strong user of this sharing info. Better be safe and use an atomic
* for now. */
version_.fetch_add(1, std::memory_order_acq_rel);
}
/**
* Get a version number that is increased when the data is modified. If the version is the same
* at two points in time on the same #ImplicitSharingInfo, one can be sure that the referenced
* data has not been modified.
*/
int64_t version() const
{
return version_.load(std::memory_order_acquire);
}
/**
@ -71,10 +131,31 @@ class ImplicitSharingInfo : NonCopyable, NonMovable {
*/
void remove_user_and_delete_if_last() const
{
const int old_user_count = users_.fetch_sub(1, std::memory_order_acq_rel);
const int old_user_count = strong_users_.fetch_sub(1, std::memory_order_acq_rel);
BLI_assert(old_user_count >= 1);
const bool was_last_user = old_user_count == 1;
if (was_last_user) {
const int old_weak_user_count = weak_users_.load(std::memory_order_acquire);
if (old_weak_user_count == 1) {
const_cast<ImplicitSharingInfo *>(this)->delete_self_with_data();
}
else {
const_cast<ImplicitSharingInfo *>(this)->delete_data_only();
this->remove_weak_user_and_delete_if_last();
}
}
}
/**
* This might just decrement the weak user count or might delete the data. Should be used in
* conjunction with #add_weak_user.
*/
void remove_weak_user_and_delete_if_last() const
{
const int old_weak_user_count = weak_users_.fetch_sub(1, std::memory_order_acq_rel);
BLI_assert(old_weak_user_count >= 1);
const bool was_last_weak_user = old_weak_user_count == 1;
if (was_last_weak_user) {
const_cast<ImplicitSharingInfo *>(this)->delete_self_with_data();
}
}
@ -82,6 +163,8 @@ class ImplicitSharingInfo : NonCopyable, NonMovable {
private:
/** Has to free the #ImplicitSharingInfo and the referenced data. */
virtual void delete_self_with_data() = 0;
/** Can free the referenced data but the #ImplicitSharingInfo still has to be kept alive. */
virtual void delete_data_only() {}
};
/**
@ -89,8 +172,6 @@ class ImplicitSharingInfo : NonCopyable, NonMovable {
* class can be used with #ImplicitSharingPtr.
*/
class ImplicitSharingMixin : public ImplicitSharingInfo {
public:
ImplicitSharingMixin() : ImplicitSharingInfo(1) {}
private:
void delete_self_with_data() override
@ -102,6 +183,14 @@ class ImplicitSharingMixin : public ImplicitSharingInfo {
virtual void delete_self() = 0;
};
/**
* Utility that contains sharing information and the data that is shared.
*/
struct ImplicitSharingInfoAndData {
const ImplicitSharingInfo *sharing_info = nullptr;
const void *data = nullptr;
};
namespace implicit_sharing {
namespace detail {

View File

@ -87,12 +87,7 @@ template<typename T, eValueType V> class PrimitiveValue;
using IntValue = PrimitiveValue<int64_t, eValueType::Int>;
using DoubleValue = PrimitiveValue<double, eValueType::Double>;
using BooleanValue = PrimitiveValue<bool, eValueType::Boolean>;
template<typename Container, eValueType V, typename ContainerItem = typename Container::value_type>
class ContainerValue;
/* ArrayValue stores its items as shared pointer as it shares data with a lookup table that can
* be created by calling `create_lookup`. */
using ArrayValue = ContainerValue<Vector<std::shared_ptr<Value>>, eValueType::Array>;
class ArrayValue;
/**
* Class containing a (de)serializable value.
@ -214,7 +209,7 @@ template<
eValueType V,
/** Type of the data inside the container. */
typename ContainerItem>
typename ContainerItem = typename Container::value_type>
class ContainerValue : public Value {
public:
using Items = Container;
@ -237,6 +232,20 @@ class ContainerValue : public Value {
}
};
class ArrayValue : public ContainerValue<Vector<std::shared_ptr<Value>>, eValueType::Array> {
public:
void append(std::shared_ptr<Value> value)
{
this->elements().append(std::move(value));
}
void append_int(int value);
void append_str(std::string value);
void append_null();
std::shared_ptr<DictionaryValue> append_dict();
std::shared_ptr<ArrayValue> append_array();
};
/**
* Internal storage type for DictionaryValue.
*
@ -268,6 +277,62 @@ class DictionaryValue
}
return result;
}
const std::shared_ptr<Value> *lookup_value(const StringRef key) const
{
for (const auto &item : this->elements()) {
if (item.first == key) {
return &item.second;
}
}
return nullptr;
}
std::optional<StringRefNull> lookup_str(const StringRef key) const
{
if (const std::shared_ptr<Value> *value = this->lookup_value(key)) {
if (const StringValue *str_value = (*value)->as_string_value()) {
return StringRefNull(str_value->value());
}
}
return std::nullopt;
}
std::optional<int64_t> lookup_int(const StringRef key) const
{
if (const std::shared_ptr<Value> *value = this->lookup_value(key)) {
if (const IntValue *int_value = (*value)->as_int_value()) {
return int_value->value();
}
}
return std::nullopt;
}
const DictionaryValue *lookup_dict(const StringRef key) const
{
if (const std::shared_ptr<Value> *value = this->lookup_value(key)) {
return (*value)->as_dictionary_value();
}
return nullptr;
}
const ArrayValue *lookup_array(const StringRef key) const
{
if (const std::shared_ptr<Value> *value = this->lookup_value(key)) {
return (*value)->as_array_value();
}
return nullptr;
}
void append(std::string key, std::shared_ptr<Value> value)
{
this->elements().append({std::move(key), std::move(value)});
}
void append_int(std::string key, int64_t value);
void append_str(std::string key, std::string value);
std::shared_ptr<DictionaryValue> append_dict(std::string key);
std::shared_ptr<ArrayValue> append_array(std::string key);
};
/**
@ -300,4 +365,7 @@ class JsonFormatter : public Formatter {
std::unique_ptr<Value> deserialize(std::istream &is) override;
};
void write_json_file(StringRef path, const Value &value);
[[nodiscard]] std::shared_ptr<Value> read_json_file(StringRef path);
} // namespace blender::io::serialize

View File

@ -0,0 +1,93 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include "BLI_assert.h"
#include "BLI_math_base.h"
namespace blender {
/**
* Contains an integer frame number and a subframe float in the range [0, 1).
*/
struct SubFrame {
private:
int frame_;
float subframe_;
public:
SubFrame(const int frame = 0, float subframe = 0.0f) : frame_(frame), subframe_(subframe)
{
BLI_assert(subframe >= 0.0f);
BLI_assert(subframe < 1.0f);
}
SubFrame(const float frame) : SubFrame(int(floorf(frame)), fractf(frame)) {}
int frame() const
{
return frame_;
}
float subframe() const
{
return subframe_;
}
explicit operator float() const
{
return float(frame_) + float(subframe_);
}
explicit operator double() const
{
return double(frame_) + double(subframe_);
}
static SubFrame min()
{
return {INT32_MIN, 0.0f};
}
static SubFrame max()
{
return {INT32_MAX, std::nexttowardf(1.0f, 0.0)};
}
friend bool operator==(const SubFrame &a, const SubFrame &b)
{
return a.frame_ == b.frame_ && a.subframe_ == b.subframe_;
}
friend bool operator!=(const SubFrame &a, const SubFrame &b)
{
return !(a == b);
}
friend bool operator<(const SubFrame &a, const SubFrame &b)
{
return a.frame_ < b.frame_ || (a.frame_ == b.frame_ && a.subframe_ < b.subframe_);
}
friend bool operator<=(const SubFrame &a, const SubFrame &b)
{
return a.frame_ <= b.frame_ || (a.frame_ == b.frame_ && a.subframe_ <= b.subframe_);
}
friend bool operator>(const SubFrame &a, const SubFrame &b)
{
return a.frame_ > b.frame_ || (a.frame_ == b.frame_ && a.subframe_ > b.subframe_);
}
friend bool operator>=(const SubFrame &a, const SubFrame &b)
{
return a.frame_ >= b.frame_ || (a.frame_ == b.frame_ && a.subframe_ >= b.subframe_);
}
friend std::ostream &operator<<(std::ostream &stream, const SubFrame &a)
{
return stream << float(a);
}
};
} // namespace blender

View File

@ -352,6 +352,7 @@ set(SRC
BLI_string_search.h
BLI_string_utf8.h
BLI_string_utils.h
BLI_sub_frame.hh
BLI_sys_types.h
BLI_system.h
BLI_task.h

View File

@ -13,7 +13,7 @@ class MEMFreeImplicitSharing : public ImplicitSharingInfo {
public:
void *data;
MEMFreeImplicitSharing(void *data) : ImplicitSharingInfo(1), data(data)
MEMFreeImplicitSharing(void *data) : data(data)
{
BLI_assert(data != nullptr);
}
@ -51,6 +51,9 @@ void *make_trivial_data_mutable_impl(void *old_data,
*sharing_info = info_for_mem_free(new_data);
return new_data;
}
else {
(*sharing_info)->tag_ensured_mutable();
}
return old_data;
}
@ -85,6 +88,7 @@ void *resize_trivial_array_impl(void *old_data,
* could theoretically give better performance if the data can be reused in place. */
void *new_data = static_cast<int *>(MEM_reallocN(old_data, new_size));
info->data = new_data;
(*sharing_info)->tag_ensured_mutable();
return new_data;
}
}

View File

@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_fileops.hh"
#include "BLI_serialize.hh"
#include "json.hpp"
@ -197,6 +198,59 @@ static std::unique_ptr<Value> convert_from_json(const nlohmann::ordered_json &j)
return std::make_unique<NullValue>();
}
void ArrayValue::append_int(const int value)
{
this->append(std::make_shared<IntValue>(value));
}
void ArrayValue::append_str(std::string value)
{
this->append(std::make_shared<StringValue>(std::move(value)));
}
void ArrayValue::append_null()
{
this->append(std::make_shared<NullValue>());
}
std::shared_ptr<DictionaryValue> ArrayValue::append_dict()
{
auto value = std::make_shared<DictionaryValue>();
this->append(value);
return value;
}
std::shared_ptr<ArrayValue> ArrayValue::append_array()
{
auto value = std::make_shared<ArrayValue>();
this->append(value);
return value;
}
void DictionaryValue::append_int(std::string key, const int64_t value)
{
this->append(std::move(key), std::make_shared<IntValue>(value));
}
void DictionaryValue::append_str(std::string key, const std::string value)
{
this->append(std::move(key), std::make_shared<StringValue>(value));
}
std::shared_ptr<DictionaryValue> DictionaryValue::append_dict(std::string key)
{
auto value = std::make_shared<DictionaryValue>();
this->append(std::move(key), value);
return value;
}
std::shared_ptr<ArrayValue> DictionaryValue::append_array(std::string key)
{
auto value = std::make_shared<ArrayValue>();
this->append(std::move(key), value);
return value;
}
void JsonFormatter::serialize(std::ostream &os, const Value &value)
{
nlohmann::ordered_json j;
@ -216,4 +270,18 @@ std::unique_ptr<Value> JsonFormatter::deserialize(std::istream &is)
return convert_from_json(j);
}
void write_json_file(const StringRef path, const Value &value)
{
JsonFormatter formatter;
fstream stream(path, std::ios::out);
formatter.serialize(stream, value);
}
std::shared_ptr<Value> read_json_file(const StringRef path)
{
JsonFormatter formatter;
fstream stream(path, std::ios::in);
return formatter.deserialize(stream);
}
} // namespace blender::io::serialize

View File

@ -39,6 +39,7 @@ class SharedDataContainer {
return nullptr;
}
if (data_->is_mutable()) {
data_->tag_ensured_mutable();
return data_.get();
}
data_ = data_->copy();

View File

@ -911,7 +911,8 @@ void DepsgraphNodeBuilder::build_object_modifiers(Object *object)
return;
}
if (modifier_node->flag & DEPSOP_FLAG_USER_MODIFIED) {
if (nmd->simulation_cache) {
if (nmd->simulation_cache &&
nmd->simulation_cache->cache_state() == bke::sim::CacheState::Valid) {
nmd->simulation_cache->invalidate();
}
}

View File

@ -36,6 +36,7 @@ set(SRC
object_add.cc
object_bake.cc
object_bake_api.cc
object_bake_simulation.cc
object_collection.c
object_constraint.c
object_data_transfer.c

View File

@ -0,0 +1,345 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <fstream>
#include <iomanip>
#include <random>
#include "BLI_endian_defines.h"
#include "BLI_endian_switch.h"
#include "BLI_fileops.hh"
#include "BLI_path_util.h"
#include "BLI_serialize.hh"
#include "BLI_vector.hh"
#include "PIL_time.h"
#include "WM_api.h"
#include "WM_types.h"
#include "ED_screen.h"
#include "DNA_curves_types.h"
#include "DNA_material_types.h"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_modifier_types.h"
#include "DNA_pointcloud_types.h"
#include "DNA_windowmanager_types.h"
#include "BKE_context.h"
#include "BKE_curves.hh"
#include "BKE_global.h"
#include "BKE_instances.hh"
#include "BKE_lib_id.h"
#include "BKE_main.h"
#include "BKE_mesh.hh"
#include "BKE_object.h"
#include "BKE_pointcloud.h"
#include "BKE_report.h"
#include "BKE_scene.h"
#include "BKE_simulation_state.hh"
#include "BKE_simulation_state_serialize.hh"
#include "RNA_access.h"
#include "RNA_define.h"
#include "RNA_enum_types.h"
#include "DEG_depsgraph.h"
#include "DEG_depsgraph_build.h"
#include "object_intern.h"
namespace blender::ed::object::bake_simulation {
static bool bake_simulation_poll(bContext *C)
{
if (!ED_operator_object_active(C)) {
return false;
}
Main *bmain = CTX_data_main(C);
const StringRefNull path = BKE_main_blendfile_path(bmain);
if (path.is_empty()) {
CTX_wm_operator_poll_msg_set(C, "File has to be saved");
return false;
}
return true;
}
struct ModifierBakeData {
NodesModifierData *nmd;
std::string meta_dir;
std::string bdata_dir;
std::unique_ptr<bke::sim::BDataSharing> bdata_sharing;
};
struct ObjectBakeData {
Object *object;
Vector<ModifierBakeData> modifiers;
};
struct BakeSimulationJob {
wmWindowManager *wm;
Main *bmain;
Depsgraph *depsgraph;
Scene *scene;
Vector<Object *> objects;
};
static void bake_simulation_job_startjob(void *customdata,
bool *stop,
bool *do_update,
float *progress)
{
using namespace bke::sim;
BakeSimulationJob &job = *static_cast<BakeSimulationJob *>(customdata);
G.is_rendering = true;
G.is_break = false;
WM_set_locked_interface(job.wm, true);
Vector<ObjectBakeData> objects_to_bake;
for (Object *object : job.objects) {
if (!BKE_id_is_editable(job.bmain, &object->id)) {
continue;
}
ObjectBakeData bake_data;
bake_data.object = object;
LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) {
if (md->type == eModifierType_Nodes) {
NodesModifierData *nmd = reinterpret_cast<NodesModifierData *>(md);
if (nmd->simulation_cache != nullptr) {
nmd->simulation_cache->reset();
}
bake_data.modifiers.append({nmd,
bke::sim::get_meta_directory(*job.bmain, *object, *md),
bke::sim::get_bdata_directory(*job.bmain, *object, *md),
std::make_unique<BDataSharing>()});
}
}
objects_to_bake.append(std::move(bake_data));
}
*progress = 0.0f;
*do_update = true;
const float frame_step_size = 1.0f;
const float progress_per_frame = 1.0f / (float(job.scene->r.efra - job.scene->r.sfra + 1) /
frame_step_size);
const int old_frame = job.scene->r.cfra;
for (float frame_f = job.scene->r.sfra; frame_f <= job.scene->r.efra;
frame_f += frame_step_size) {
const SubFrame frame{frame_f};
if (G.is_break || (stop != nullptr && *stop)) {
break;
}
job.scene->r.cfra = frame.frame();
job.scene->r.subframe = frame.subframe();
char frame_file_c_str[64];
BLI_snprintf(frame_file_c_str, sizeof(frame_file_c_str), "%011.5f", double(frame));
BLI_str_replace_char(frame_file_c_str, '.', '_');
const StringRefNull frame_file_str = frame_file_c_str;
BKE_scene_graph_update_for_newframe(job.depsgraph);
for (ObjectBakeData &object_bake_data : objects_to_bake) {
for (ModifierBakeData &modifier_bake_data : object_bake_data.modifiers) {
NodesModifierData &nmd = *modifier_bake_data.nmd;
if (nmd.simulation_cache == nullptr) {
continue;
}
ModifierSimulationCache &sim_cache = *nmd.simulation_cache;
const ModifierSimulationState *sim_state = sim_cache.get_state_at_exact_frame(frame);
if (sim_state == nullptr) {
continue;
}
const std::string bdata_file_name = frame_file_str + ".bdata";
const std::string meta_file_name = frame_file_str + ".json";
char bdata_path[FILE_MAX];
BLI_path_join(bdata_path,
sizeof(bdata_path),
modifier_bake_data.bdata_dir.c_str(),
bdata_file_name.c_str());
char meta_path[FILE_MAX];
BLI_path_join(meta_path,
sizeof(meta_path),
modifier_bake_data.meta_dir.c_str(),
meta_file_name.c_str());
BLI_make_existing_file(bdata_path);
fstream bdata_file{bdata_path, std::ios::out | std::ios::binary};
bke::sim::DiskBDataWriter bdata_writer{bdata_file_name, bdata_file, 0};
io::serialize::DictionaryValue io_root;
bke::sim::serialize_modifier_simulation_state(
*sim_state, bdata_writer, *modifier_bake_data.bdata_sharing, io_root);
BLI_make_existing_file(meta_path);
io::serialize::write_json_file(meta_path, io_root);
}
}
*progress += progress_per_frame;
*do_update = true;
}
for (ObjectBakeData &object_bake_data : objects_to_bake) {
for (ModifierBakeData &modifier_bake_data : object_bake_data.modifiers) {
NodesModifierData &nmd = *modifier_bake_data.nmd;
if (nmd.simulation_cache) {
/* Tag the caches as being baked so that they are not changed anymore. */
nmd.simulation_cache->cache_state_ = CacheState::Baked;
}
}
DEG_id_tag_update(&object_bake_data.object->id, ID_RECALC_GEOMETRY);
}
job.scene->r.cfra = old_frame;
DEG_time_tag_update(job.bmain);
*progress = 1.0f;
*do_update = true;
}
static void bake_simulation_job_endjob(void *customdata)
{
BakeSimulationJob &job = *static_cast<BakeSimulationJob *>(customdata);
WM_set_locked_interface(job.wm, false);
G.is_rendering = false;
WM_main_add_notifier(NC_OBJECT | ND_MODIFIER, nullptr);
}
static int bake_simulation_invoke(bContext *C, wmOperator *op, const wmEvent * /*event*/)
{
wmWindowManager *wm = CTX_wm_manager(C);
Scene *scene = CTX_data_scene(C);
Depsgraph *depsgraph = CTX_data_depsgraph_pointer(C);
Main *bmain = CTX_data_main(C);
BakeSimulationJob *job = MEM_new<BakeSimulationJob>(__func__);
job->wm = wm;
job->bmain = bmain;
job->depsgraph = depsgraph;
job->scene = scene;
if (RNA_boolean_get(op->ptr, "selected")) {
CTX_DATA_BEGIN (C, Object *, object, selected_objects) {
job->objects.append(object);
}
CTX_DATA_END;
}
else {
if (Object *object = CTX_data_active_object(C)) {
job->objects.append(object);
}
}
wmJob *wm_job = WM_jobs_get(wm,
CTX_wm_window(C),
CTX_data_scene(C),
"Bake Simulation Nodes",
WM_JOB_PROGRESS,
WM_JOB_TYPE_BAKE_SIMULATION_NODES);
WM_jobs_customdata_set(
wm_job, job, [](void *job) { MEM_delete(static_cast<BakeSimulationJob *>(job)); });
WM_jobs_timer(wm_job, 0.1, NC_OBJECT | ND_MODIFIER, NC_OBJECT | ND_MODIFIER);
WM_jobs_callbacks(
wm_job, bake_simulation_job_startjob, nullptr, nullptr, bake_simulation_job_endjob);
WM_jobs_start(CTX_wm_manager(C), wm_job);
WM_event_add_modal_handler(C, op);
return OPERATOR_RUNNING_MODAL;
}
static int bake_simulation_modal(bContext *C, wmOperator * /*op*/, const wmEvent * /*event*/)
{
if (!WM_jobs_test(CTX_wm_manager(C), CTX_data_scene(C), WM_JOB_TYPE_BAKE_SIMULATION_NODES)) {
return OPERATOR_FINISHED | OPERATOR_PASS_THROUGH;
}
return OPERATOR_PASS_THROUGH;
}
static int delete_baked_simulation_exec(bContext *C, wmOperator *op)
{
Main *bmain = CTX_data_main(C);
Vector<Object *> objects;
if (RNA_boolean_get(op->ptr, "selected")) {
CTX_DATA_BEGIN (C, Object *, object, selected_objects) {
objects.append(object);
}
CTX_DATA_END;
}
else {
if (Object *object = CTX_data_active_object(C)) {
objects.append(object);
}
}
HooglyBoogly marked this conversation as resolved
Review
  if (objects.is_empty()) {
    return OPERATOR_CANCELLED;
  }
```cpp if (objects.is_empty()) { return OPERATOR_CANCELLED; } ```
if (objects.is_empty()) {
return OPERATOR_CANCELLED;
}
for (Object *object : objects) {

An error message when this fails maybe?

        if (BLI_exists(bake_directory.c_str())) {
          if (!BLI_delete(bake_directory.c_str(), true, true)) {
            BKE_reportf(op->reports,
                        RPT_ERROR,
                        "Failed to remove bake directory %s",
                        bake_directory.c_str());
          }
        }
An error message when this fails maybe? ```cpp if (BLI_exists(bake_directory.c_str())) { if (!BLI_delete(bake_directory.c_str(), true, true)) { BKE_reportf(op->reports, RPT_ERROR, "Failed to remove bake directory %s", bake_directory.c_str()); } } ```
LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) {
if (md->type == eModifierType_Nodes) {
NodesModifierData *nmd = reinterpret_cast<NodesModifierData *>(md);
const std::string bake_directory = bke::sim::get_bake_directory(*bmain, *object, *md);
if (BLI_exists(bake_directory.c_str())) {
if (BLI_delete(bake_directory.c_str(), true, true)) {
BKE_reportf(op->reports,
RPT_ERROR,
"Failed to remove bake directory %s",
bake_directory.c_str());
}
}
if (nmd->simulation_cache != nullptr) {
nmd->simulation_cache->reset();
}
}
}
DEG_id_tag_update(&object->id, ID_RECALC_GEOMETRY);
}
WM_event_add_notifier(C, NC_OBJECT | ND_MODIFIER, nullptr);
return OPERATOR_FINISHED;
}
} // namespace blender::ed::object::bake_simulation
void OBJECT_OT_simulation_nodes_cache_bake(wmOperatorType *ot)
{
using namespace blender::ed::object::bake_simulation;
ot->name = "Bake Simulation";
ot->description = "Bake simulations in geometry nodes modifiers";
ot->idname = __func__;
ot->invoke = bake_simulation_invoke;
ot->modal = bake_simulation_modal;
ot->poll = bake_simulation_poll;
RNA_def_boolean(ot->srna, "selected", false, "Selected", "Bake cache on all selected objects");
}
void OBJECT_OT_simulation_nodes_cache_delete(wmOperatorType *ot)
{
using namespace blender::ed::object::bake_simulation;
ot->name = "Delete Cached Simulation";
ot->description = "Delete cached/baked simulations in geometry nodes modifiers";
ot->idname = __func__;
ot->exec = delete_baked_simulation_exec;
ot->poll = ED_operator_object_active;
RNA_def_boolean(ot->srna, "selected", false, "Selected", "Delete cache on all selected objects");
}

View File

@ -338,6 +338,11 @@ void OBJECT_OT_collection_objects_select(struct wmOperatorType *ot);
void OBJECT_OT_bake_image(wmOperatorType *ot);
void OBJECT_OT_bake(wmOperatorType *ot);
/* object_bake_simulation.cc */
void OBJECT_OT_simulation_nodes_cache_bake(wmOperatorType *ot);
void OBJECT_OT_simulation_nodes_cache_delete(wmOperatorType *ot);
/* object_random.c */
void TRANSFORM_OT_vertex_random(struct wmOperatorType *ot);

View File

@ -260,6 +260,8 @@ void ED_operatortypes_object(void)
WM_operatortype_append(OBJECT_OT_bake_image);
WM_operatortype_append(OBJECT_OT_bake);
WM_operatortype_append(OBJECT_OT_simulation_nodes_cache_bake);
WM_operatortype_append(OBJECT_OT_simulation_nodes_cache_delete);
WM_operatortype_append(OBJECT_OT_drop_named_material);
WM_operatortype_append(OBJECT_OT_drop_geometry_nodes);
WM_operatortype_append(OBJECT_OT_unlink_data);

View File

@ -688,10 +688,21 @@ static void timeline_cache_draw_simulation_nodes(
GPU_matrix_scale_2f(1.0, height);
float color[4];
copy_v4_fl4(color, 0.8, 0.8, 0.2, 1.0);
if (cache.is_invalid()) {
color[3] = 0.3f;
switch (cache.cache_state()) {
case blender::bke::sim::CacheState::Invalid: {
copy_v4_fl4(color, 0.8, 0.8, 0.2, 0.3);
break;
}
case blender::bke::sim::CacheState::Valid: {
copy_v4_fl4(color, 0.8, 0.8, 0.2, 1.0);
break;
}
case blender::bke::sim::CacheState::Baked: {
copy_v4_fl4(color, 1.0, 0.6, 0.2, 1.0);
break;
}
}
immUniformColor4fv(color);
const int start_frame = scene.r.sfra;
@ -701,7 +712,7 @@ static void timeline_cache_draw_simulation_nodes(
immBeginAtMost(GPU_PRIM_TRIS, frames_num * 6);
for (const int frame : frames_range) {
if (cache.has_state_at_time(float(frame))) {
if (cache.has_state_at_frame(frame)) {
immRectf_fast(pos_id, frame - 0.5f, 0, frame + 0.5f, 1.0f);
}
}

View File

@ -34,6 +34,7 @@
#include "BKE_compute_contexts.hh"
#include "BKE_context.h"
#include "BKE_curves.hh"
#include "BKE_global.h"
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
#include "BKE_main.h"
@ -3403,6 +3404,9 @@ static void draw_background_color(const SpaceNode &snode)
void node_draw_space(const bContext &C, ARegion &region)
{
if (G.is_rendering) {
return;
}
wmWindow *win = CTX_wm_window(&C);
SpaceNode &snode = *CTX_wm_space_node(&C);
View2D &v2d = region.v2d;

View File

@ -216,6 +216,9 @@ bool RNA_enum_name(const EnumPropertyItem *item, int value, const char **r_name)
bool RNA_enum_description(const EnumPropertyItem *item, int value, const char **description);
int RNA_enum_from_value(const EnumPropertyItem *item, int value);
int RNA_enum_from_identifier(const EnumPropertyItem *item, const char *identifier);
bool RNA_enum_value_from_identifier(const EnumPropertyItem *item,
const char *identifier,
int *r_value);
/**
* Take care using this with translated enums,
* prefer #RNA_enum_from_identifier where possible.

View File

@ -1754,6 +1754,18 @@ int RNA_enum_from_identifier(const EnumPropertyItem *item, const char *identifie
return -1;
}
bool RNA_enum_value_from_identifier(const EnumPropertyItem *item,
const char *identifier,
int *r_value)
{
const int i = RNA_enum_from_identifier(item, identifier);
if (i == -1) {
return false;
}
*r_value = item[i].value;
return true;
}
int RNA_enum_from_name(const EnumPropertyItem *item, const char *name)
{
int i = 0;

View File

@ -55,6 +55,7 @@
#include "BKE_screen.h"
#include "BKE_simulation.h"
#include "BKE_simulation_state.hh"
#include "BKE_simulation_state_serialize.hh"
#include "BKE_workspace.h"
#include "BLO_read_write.h"
@ -1160,6 +1161,94 @@ static void store_output_attributes(GeometrySet &geometry,
store_computed_output_attributes(geometry, attributes_to_store);
}
static void prepare_simulation_states_for_evaluation(
const NodesModifierData &nmd,
NodesModifierData &nmd_orig,

Pass by reference and maybe const reference?

Pass by reference and maybe const reference?
const ModifierEvalContext &ctx,
nodes::GeoNodesModifierData &geo_nodes_modifier_data)
{
const Main *bmain = DEG_get_bmain(ctx.depsgraph);
const SubFrame current_frame = DEG_get_ctime(ctx.depsgraph);
const Scene *scene = DEG_get_input_scene(ctx.depsgraph);
const SubFrame start_frame = scene->r.sfra;
const bool is_start_frame = current_frame == start_frame;
if (DEG_is_active(ctx.depsgraph)) {
if (nmd_orig.simulation_cache == nullptr) {
nmd_orig.simulation_cache = MEM_new<bke::sim::ModifierSimulationCache>(__func__);
}
{
/* Try to use baked data. */
const StringRefNull bmain_path = BKE_main_blendfile_path(bmain);
if (nmd_orig.simulation_cache->cache_state() != bke::sim::CacheState::Baked &&
!bmain_path.is_empty()) {
nmd_orig.simulation_cache->try_discover_bake(
bke::sim::get_meta_directory(*bmain, *ctx.object, nmd.modifier),
bke::sim::get_bdata_directory(*bmain, *ctx.object, nmd.modifier));
}
}
{
/* Reset cached data if necessary. */
const bke::sim::StatesAroundFrame sim_states =
nmd_orig.simulation_cache->get_states_around_frame(current_frame);
if (nmd_orig.simulation_cache->cache_state() == bke::sim::CacheState::Invalid &&
(current_frame == start_frame ||
(sim_states.current == nullptr && sim_states.prev == nullptr &&
sim_states.next != nullptr))) {
nmd_orig.simulation_cache->reset();
}
}

Do you think you'd be able to split any of this new code in compute_geometry to a separate function(s)? I think that would go a long way toward making it more readable and less overwhelming

Do you think you'd be able to split any of this new code in `compute_geometry` to a separate function(s)? I think that would go a long way toward making it more readable and less overwhelming
/* Decide if a new simulation state should be created in this evaluation. */
const bke::sim::StatesAroundFrame sim_states =
nmd_orig.simulation_cache->get_states_around_frame(current_frame);
if (nmd_orig.simulation_cache->cache_state() != bke::sim::CacheState::Baked) {
if (sim_states.current == nullptr) {
if (is_start_frame || !nmd_orig.simulation_cache->has_states()) {
bke::sim::ModifierSimulationState &current_sim_state =
nmd_orig.simulation_cache->get_state_at_frame_for_write(current_frame);
geo_nodes_modifier_data.current_simulation_state_for_write = &current_sim_state;
geo_nodes_modifier_data.simulation_time_delta = 0.0f;
if (!is_start_frame) {
/* When starting a new simulation at another frame than the start frame, it can't match
* what would be baked, so invalidate it immediately. */
nmd_orig.simulation_cache->invalidate();
}
}
else if (sim_states.prev != nullptr && sim_states.next == nullptr) {
const float delta_frames = float(current_frame) - float(sim_states.prev->frame);
if (delta_frames <= 1.0f) {
bke::sim::ModifierSimulationState &current_sim_state =
nmd_orig.simulation_cache->get_state_at_frame_for_write(current_frame);
geo_nodes_modifier_data.current_simulation_state_for_write = &current_sim_state;
const float delta_seconds = delta_frames / FPS;
geo_nodes_modifier_data.simulation_time_delta = delta_seconds;
}
}
}
}
}
/* Load read-only states to give nodes access to cached data. */
const bke::sim::StatesAroundFrame sim_states =
nmd_orig.simulation_cache->get_states_around_frame(current_frame);
if (sim_states.current) {
HooglyBoogly marked this conversation as resolved Outdated

Decide of -> Decide if

`Decide of` -> `Decide if`
sim_states.current->state.ensure_bake_loaded();
geo_nodes_modifier_data.current_simulation_state = &sim_states.current->state;
}
if (sim_states.prev) {
sim_states.prev->state.ensure_bake_loaded();
geo_nodes_modifier_data.prev_simulation_state = &sim_states.prev->state;
if (sim_states.next) {
sim_states.next->state.ensure_bake_loaded();
geo_nodes_modifier_data.next_simulation_state = &sim_states.next->state;
geo_nodes_modifier_data.simulation_state_mix_factor =
(float(current_frame) - float(sim_states.prev->frame)) /
(float(sim_states.next->frame) - float(sim_states.prev->frame));
}
}
}
/**
* Evaluate a node group to compute the output geometry.
*/
@ -1198,39 +1287,7 @@ static GeometrySet compute_geometry(const bNodeTree &btree,
geo_nodes_modifier_data.self_object = ctx->object;
auto eval_log = std::make_unique<geo_log::GeoModifierLog>();
const float current_time = DEG_get_ctime(ctx->depsgraph);
if (DEG_is_active(ctx->depsgraph)) {
const Scene *scene = DEG_get_input_scene(ctx->depsgraph);
const int start_frame = scene->r.sfra;
if (nmd_orig->simulation_cache == nullptr) {
nmd_orig->simulation_cache = MEM_new<blender::bke::sim::ModifierSimulationCache>(__func__);
}
if (nmd_orig->simulation_cache->is_invalid() && current_time == start_frame) {
nmd_orig->simulation_cache->reset();
}
std::pair<float, const blender::bke::sim::ModifierSimulationState *> prev_sim_state =
nmd_orig->simulation_cache->try_get_last_state_before(current_time);
if (prev_sim_state.second != nullptr) {
geo_nodes_modifier_data.prev_simulation_state = prev_sim_state.second;
geo_nodes_modifier_data.simulation_time_delta = current_time - prev_sim_state.first;
if (geo_nodes_modifier_data.simulation_time_delta > 1.0f) {
nmd_orig->simulation_cache->invalidate();
}
}
geo_nodes_modifier_data.current_simulation_state_for_write =
&nmd_orig->simulation_cache->get_state_for_write(current_time);
geo_nodes_modifier_data.current_simulation_state =
geo_nodes_modifier_data.current_simulation_state_for_write;
}
else {
/* TODO: Should probably only access baked data that is not modified in the original data
* anymore. */
if (nmd_orig->simulation_cache != nullptr) {
geo_nodes_modifier_data.current_simulation_state =
nmd_orig->simulation_cache->get_state_at_time(current_time);
}
}
prepare_simulation_states_for_evaluation(*nmd, *nmd_orig, *ctx, geo_nodes_modifier_data);
Set<blender::ComputeContextHash> socket_log_contexts;
if (logging_enabled(ctx)) {

View File

@ -47,8 +47,10 @@ struct GeoNodesModifierData {
/** Optional logger. */
geo_eval_log::GeoModifierLog *eval_log = nullptr;
const bke::sim::ModifierSimulationState *prev_simulation_state = nullptr;
const bke::sim::ModifierSimulationState *current_simulation_state = nullptr;
const bke::sim::ModifierSimulationState *prev_simulation_state = nullptr;
const bke::sim::ModifierSimulationState *next_simulation_state = nullptr;
float simulation_state_mix_factor = 0.0f;
bke::sim::ModifierSimulationState *current_simulation_state_for_write = nullptr;
float simulation_time_delta = 0.0f;

View File

@ -41,7 +41,7 @@ class LazyFunctionForSimulationInputNode final : public LazyFunction {
GeoNodesLFUserData &user_data = *static_cast<GeoNodesLFUserData *>(context.user_data);
GeoNodesModifierData &modifier_data = *user_data.modifier_data;
if (modifier_data.current_simulation_state == nullptr) {
if (modifier_data.current_simulation_state_for_write == nullptr) {
params.set_default_remaining_outputs();
return;
}

View File

@ -147,23 +147,37 @@ class LazyFunctionForSimulationOutputNode final : public LazyFunction {
EvalData &eval_data = *static_cast<EvalData *>(context.storage);
BLI_SCOPED_DEFER([&]() { eval_data.is_first_evaluation = false; });
if (modifier_data.current_simulation_state == nullptr) {
params.set_default_remaining_outputs();
return;
}
const bke::sim::SimulationZoneID zone_id = get_simulation_zone_id(*user_data.compute_context,
node_id_);
const bke::sim::SimulationZoneState *cached_zone_state =
modifier_data.current_simulation_state->get_zone_state(zone_id);
if (cached_zone_state != nullptr && eval_data.is_first_evaluation) {
this->output_cached_state(params, *cached_zone_state);
const bke::sim::SimulationZoneState *current_zone_state =
modifier_data.current_simulation_state ?
modifier_data.current_simulation_state->get_zone_state(zone_id) :
nullptr;
if (eval_data.is_first_evaluation && current_zone_state != nullptr) {
this->output_cached_state(params, *current_zone_state);
return;
}
if (modifier_data.current_simulation_state_for_write == nullptr) {
params.set_default_remaining_outputs();
const bke::sim::SimulationZoneState *prev_zone_state =
modifier_data.prev_simulation_state ?
modifier_data.prev_simulation_state->get_zone_state(zone_id) :
nullptr;
if (prev_zone_state == nullptr) {
params.set_default_remaining_outputs();
return;
}
const bke::sim::SimulationZoneState *next_zone_state =
modifier_data.next_simulation_state ?
modifier_data.next_simulation_state->get_zone_state(zone_id) :
nullptr;
if (next_zone_state == nullptr) {
this->output_cached_state(params, *prev_zone_state);
return;
}
this->output_mixed_cached_state(
params, *prev_zone_state, *next_zone_state, modifier_data.simulation_state_mix_factor);
return;
}
@ -205,6 +219,16 @@ class LazyFunctionForSimulationOutputNode final : public LazyFunction {
}
params.set_default_remaining_outputs();
}
void output_mixed_cached_state(lf::Params &params,
const bke::sim::SimulationZoneState &prev_state,
const bke::sim::SimulationZoneState &next_state,
const float mix_factor) const
{
/* TODO: Implement subframe mixing. */
HooglyBoogly marked this conversation as resolved Outdated

TODO comment

TODO comment
this->output_cached_state(params, prev_state);
UNUSED_VARS(next_state, mix_factor);
}
};
} // namespace blender::nodes::node_geo_simulation_output_cc

View File

@ -1516,6 +1516,7 @@ typedef enum eWM_JobType {
WM_JOB_TYPE_LINEART,
WM_JOB_TYPE_SEQ_DRAW_THUMBNAIL,
WM_JOB_TYPE_SEQ_DRAG_DROP_PREVIEW,
WM_JOB_TYPE_BAKE_SIMULATION_NODES,
/* add as needed, bake, seq proxy build
* if having hard coded values is a problem */
} eWM_JobType;