Cleanup: Correcting default cases in node.cc #107339

Merged
Hans Goudey merged 10 commits from mod_moder/blender:tmp_cleanup_ensure_default_cases into main 2023-06-06 19:19:59 +02:00
37 changed files with 1048 additions and 563 deletions
Showing only changes of commit 3adfd8e58e - Show all commits

View File

@ -1,3 +1,5 @@
#!/bin/sh
# Disable ALSA and OSS as they are not available, and trying to initialize them
# breaks sound in other apps. Use PulseAudio instead.
export ALSOFT_DRIVERS=-oss,-alsa,

View File

@ -887,21 +887,33 @@ ListBase BKE_collection_object_cache_instanced_get(Collection *collection)
static void collection_object_cache_free(Collection *collection)
{
/* Clear own cache an for all parents, since those are affected by changes as well. */
collection->flag &= ~(COLLECTION_HAS_OBJECT_CACHE | COLLECTION_HAS_OBJECT_CACHE_INSTANCED);
BLI_freelistN(&collection->runtime.object_cache);
BLI_freelistN(&collection->runtime.object_cache_instanced);
}
void BKE_collection_object_cache_free(Collection *collection)
static void collection_object_cache_free_parent_recursive(Collection *collection)
{
collection_object_cache_free(collection);
/* Clear cache in all parents recursively, since those are affected by changes as well. */
LISTBASE_FOREACH (CollectionParent *, parent, &collection->runtime.parents) {
collection_object_cache_free(parent->collection);
/* In theory there should be no NULL pointer here. However, this code can be called from
* non-valid temporary states (e.g. indirectly from #BKE_collections_object_remove_invalids
* as part of ID remapping process). */
if (parent->collection == NULL) {
continue;
}
collection_object_cache_free_parent_recursive(parent->collection);
}
}
void BKE_collection_object_cache_free(Collection *collection)
{
BLI_assert(collection != NULL);
collection_object_cache_free_parent_recursive(collection);
}
void BKE_main_collections_object_cache_free(const Main *bmain)
{
for (Scene *scene = bmain->scenes.first; scene != NULL; scene = scene->id.next) {

View File

@ -2076,6 +2076,7 @@ static bool lib_override_library_resync(Main *bmain,
}
BKE_id_remapper_free(id_remapper);
BLI_linklist_free(id_override_old_list, nullptr);
id_override_old_list = nullptr;
/* Delete old override IDs.
* Note that we have to use tagged group deletion here, since ID deletion also uses
@ -2083,40 +2084,55 @@ static bool lib_override_library_resync(Main *bmain,
int user_edited_overrides_deletion_count = 0;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (id->tag & LIB_TAG_DOIT) {
/* Note that this works because linked IDs are always after local ones (including
* overrides), so we will only ever tag an old override ID after we have already checked it
* in this loop, hence we cannot untag it later. */
/* Since this code can also be called on linked liboverride now (during recursive resync),
* order of processing cannot guarantee anymore that the old liboverride won't be tagged for
* deletion before being processed by this loop (which would then untag it again).
*
* So instead store old liboverrides in Main into a temp list again, and do the tagging
* separately once this loop over all IDs in main is done. */
if (id->newid != nullptr && id->lib == id_root_reference->lib) {
ID *id_override_old = static_cast<ID *>(BLI_ghash_lookup(linkedref_to_old_override, id));
if (id_override_old != nullptr) {
id->newid->tag &= ~LIB_TAG_DOIT;
id_override_old->tag |= LIB_TAG_DOIT;
if (id_override_old->tag & LIB_TAG_NO_MAIN) {
id_override_old->tag |= LIB_TAG_DOIT;
BLI_assert(BLI_findindex(no_main_ids_list, id_override_old) != -1);
}
else {
/* Defer tagging. */
BLI_linklist_prepend(&id_override_old_list, id_override_old);
}
}
}
id->tag &= ~LIB_TAG_DOIT;
}
/* Also deal with old overrides that went missing in new linked data - only for real local
* overrides for now, not those who are linked. */
else if (id->tag & LIB_TAG_MISSING && !ID_IS_LINKED(id) && ID_IS_OVERRIDE_LIBRARY(id)) {
if (ID_IS_OVERRIDE_LIBRARY_REAL(id) &&
id->override_library->reference->lib->id.tag & LIB_TAG_MISSING) {
else if (id->tag & LIB_TAG_MISSING && !ID_IS_LINKED(id) && ID_IS_OVERRIDE_LIBRARY_REAL(id)) {
bool do_delete;
ID *hierarchy_root = id->override_library->hierarchy_root;
if (id->override_library->reference->lib->id.tag & LIB_TAG_MISSING) {
/* Do not delete overrides which reference is missing because the library itself is missing
* (ref. #100586). */
do_delete = false;
}
else if (hierarchy_root != nullptr &&
hierarchy_root->override_library->reference->tag & LIB_TAG_MISSING) {
/* Do not delete overrides which root hierarchy reference is missing. This would typically
* cause more harm than good. */
do_delete = false;
}
else if (!BKE_lib_override_library_is_user_edited(id)) {
/* If user never edited them, we can delete them. */
id->tag |= LIB_TAG_DOIT;
id->tag &= ~LIB_TAG_MISSING;
do_delete = true;
CLOG_INFO(&LOG, 2, "Old override %s is being deleted", id->name);
}
#if 0
else {
/* Otherwise, keep them, user needs to decide whether what to do with them. */
BLI_assert((id->tag & LIB_TAG_DOIT) == 0);
do_delete = false;
id_fake_user_set(id);
id->flag |= LIB_LIB_OVERRIDE_RESYNC_LEFTOVER;
CLOG_INFO(&LOG, 2, "Old override %s is being kept around as it was user-edited", id->name);
@ -2125,17 +2141,27 @@ static bool lib_override_library_resync(Main *bmain,
else {
/* Delete them nevertheless, with fat warning, user needs to decide whether they want to
* save that version of the file (and accept the loss), or not. */
id->tag |= LIB_TAG_DOIT;
id->tag &= ~LIB_TAG_MISSING;
do_delete = true;
CLOG_WARN(
&LOG, "Old override %s is being deleted even though it was user-edited", id->name);
user_edited_overrides_deletion_count++;
}
#endif
if (do_delete) {
id->tag |= LIB_TAG_DOIT;
id->tag &= ~LIB_TAG_MISSING;
}
}
}
FOREACH_MAIN_ID_END;
/* Finalize tagging old liboverrides for deletion. */
for (LinkNode *ln_iter = id_override_old_list; ln_iter != nullptr; ln_iter = ln_iter->next) {
ID *id_override_old = static_cast<ID *>(ln_iter->link);
id_override_old->tag |= LIB_TAG_DOIT;
}
BLI_linklist_free(id_override_old_list, nullptr);
/* Cleanup, many pointers in this GHash are already invalid now. */
BLI_ghash_free(linkedref_to_old_override, nullptr, nullptr);
@ -2375,6 +2401,41 @@ static bool lib_override_resync_tagging_finalize_recurse(
return is_ancestor_tagged_for_resync;
}
/* Return true if the ID should be skipped for resync given current context. */
static bool lib_override_library_main_resync_id_skip_check(ID *id,
const int library_indirect_level)
{
if (!ID_IS_OVERRIDE_LIBRARY_REAL(id)) {
return true;
}
if (!lib_override_resync_id_lib_level_is_valid(id, library_indirect_level, true)) {
return true;
}
/* Do not attempt to resync from missing data. */
if (((id->tag | id->override_library->reference->tag) & LIB_TAG_MISSING) != 0) {
return true;
}
if (id->override_library->flag & IDOVERRIDE_LIBRARY_FLAG_NO_HIERARCHY) {
/* This ID is not part of an override hierarchy. */
BLI_assert((id->tag & LIB_TAG_LIB_OVERRIDE_NEED_RESYNC) == 0);
return true;
}
/* Do not attempt to resync when hierarchy root is missing, this would usually do more harm
* than good. */
ID *hierarchy_root = id->override_library->hierarchy_root;
if (hierarchy_root == nullptr ||
((hierarchy_root->tag | hierarchy_root->override_library->reference->tag) &
LIB_TAG_MISSING) != 0) {
return true;
}
return false;
}
/* Ensure resync of all overrides at one level of indirect usage.
*
* We need to handle each level independently, since an override at level n may be affected by
@ -2411,11 +2472,7 @@ static void lib_override_library_main_resync_on_library_indirect_level(
lib_override_group_tag_data_object_to_collection_init(&data);
ID *id;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (!ID_IS_OVERRIDE_LIBRARY_REAL(id)) {
continue;
}
if (!lib_override_resync_id_lib_level_is_valid(id, library_indirect_level, true)) {
if (lib_override_library_main_resync_id_skip_check(id, library_indirect_level)) {
continue;
}
@ -2424,16 +2481,6 @@ static void lib_override_library_main_resync_on_library_indirect_level(
continue;
}
/* Do not attempt to resync from missing data. */
if (((id->tag | id->override_library->reference->tag) & LIB_TAG_MISSING) != 0) {
continue;
}
if (id->override_library->flag & IDOVERRIDE_LIBRARY_FLAG_NO_HIERARCHY) {
/* This ID is not part of an override hierarchy. */
continue;
}
data.id_root = id->override_library->reference;
lib_override_linked_group_tag(&data);
BKE_main_relations_tag_set(bmain, MAINIDRELATIONS_ENTRY_TAGS_PROCESSED, false);
@ -2449,22 +2496,7 @@ static void lib_override_library_main_resync_on_library_indirect_level(
* such, or the one using linked data that is now tagged as needing override. */
BKE_main_relations_tag_set(bmain, MAINIDRELATIONS_ENTRY_TAGS_PROCESSED, false);
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (!ID_IS_OVERRIDE_LIBRARY_REAL(id)) {
continue;
}
if (!lib_override_resync_id_lib_level_is_valid(id, library_indirect_level, true)) {
continue;
}
/* Do not attempt to resync from missing data. */
if (((id->tag | id->override_library->reference->tag) & LIB_TAG_MISSING) != 0) {
continue;
}
if (id->override_library->flag & IDOVERRIDE_LIBRARY_FLAG_NO_HIERARCHY) {
/* This ID is not part of an override hierarchy. */
BLI_assert((id->tag & LIB_TAG_LIB_OVERRIDE_NEED_RESYNC) == 0);
if (lib_override_library_main_resync_id_skip_check(id, library_indirect_level)) {
continue;
}
@ -2687,6 +2719,9 @@ static int lib_override_libraries_index_define(Main *bmain)
do_continue = false;
ID *id;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
/* NOTE: In theory all non-liboverride IDs could be skipped here. This does not gives any
* performances boost though, so for now keep it as is (i.e. also consider non-liboverride
* relationships to establish libraries hierarchy). */
BKE_library_foreach_ID_link(
bmain, id, lib_override_sort_libraries_func, &do_continue, IDWALK_READONLY);
}

File diff suppressed because it is too large Load Diff

View File

@ -2,16 +2,7 @@
#pragma once
#include <memory>
#include "BLI_map.hh"
#include "BLI_math_vector_types.hh"
#include "DNA_scene_types.h"
#include "DNA_texture_types.h"
#include "COM_cached_texture.hh"
#include "COM_context.hh"
#include "COM_morphological_distance_feather_weights.hh"
#include "COM_smaa_precomputed_textures.hh"
#include "COM_symmetric_blur_weights.hh"
@ -19,15 +10,14 @@
namespace blender::realtime_compositor {
class Context;
/* -------------------------------------------------------------------------------------------------
* Static Cache Manager
*
* A static cache manager is a collection of cached resources that can be retrieved when needed and
* created if not already available. In particular, each cached resource type has its own Map in
* the class, where all instances of that cached resource type are stored and tracked. See the
* CachedResource class for more information.
* created if not already available. In particular, each cached resource type has its own instance
* of a container derived from the CachedResourceContainer type in the class. All instances of that
* cached resource type are stored and tracked in the container. See the CachedResource and
* CachedResourceContainer classes for more information.
*
* The manager deletes the cached resources that are no longer needed. A cached resource is said to
* be not needed when it was not used in the previous evaluation. This is done through the
@ -43,65 +33,18 @@ class Context;
* evaluation will be deleted before the next evaluation. This mechanism is implemented in the
* reset() method of the class, which should be called before every evaluation. */
class StaticCacheManager {
private:
/* A map that stores all SymmetricBlurWeights cached resources. */
Map<SymmetricBlurWeightsKey, std::unique_ptr<SymmetricBlurWeights>> symmetric_blur_weights_;
/* A map that stores all SymmetricSeparableBlurWeights cached resources. */
Map<SymmetricSeparableBlurWeightsKey, std::unique_ptr<SymmetricSeparableBlurWeights>>
symmetric_separable_blur_weights_;
/* A map that stores all MorphologicalDistanceFeatherWeights cached resources. */
Map<MorphologicalDistanceFeatherWeightsKey, std::unique_ptr<MorphologicalDistanceFeatherWeights>>
morphological_distance_feather_weights_;
/* A nested map that stores all CachedTexture cached resources. The outer map identifies the
* textures using their ID name, while the inner map identifies the textures using their
* parameters. */
Map<std::string, Map<CachedTextureKey, std::unique_ptr<CachedTexture>>> cached_textures_;
/* A unique pointers that stores the cached SMAAPrecomputedTextures, if one is cached. */
std::unique_ptr<SMAAPrecomputedTextures> smaa_precomputed_textures_;
public:
SymmetricBlurWeightsContainer symmetric_blur_weights;
SymmetricSeparableBlurWeightsContainer symmetric_separable_blur_weights;
MorphologicalDistanceFeatherWeightsContainer morphological_distance_feather_weights;
SMAAPrecomputedTexturesContainer smaa_precomputed_textures;
CachedTextureContainer cached_textures;
/* Reset the cache manager by deleting the cached resources that are no longer needed because
* they weren't used in the last evaluation and prepare the remaining cached resources to track
* their needed status in the next evaluation. See the class description for more information.
* This should be called before every evaluation. */
void reset();
/* Check if there is an available SymmetricBlurWeights cached resource with the given parameters
* in the manager, if one exists, return it, otherwise, return a newly created one and add it to
* the manager. In both cases, tag the cached resource as needed to keep it cached for the next
* evaluation. */
SymmetricBlurWeights &get_symmetric_blur_weights(int type, float2 radius);
/* Check if there is an available SymmetricSeparableBlurWeights cached resource with the given
* parameters in the manager, if one exists, return it, otherwise, return a newly created one and
* add it to the manager. In both cases, tag the cached resource as needed to keep it cached for
* the next evaluation. */
SymmetricSeparableBlurWeights &get_symmetric_separable_blur_weights(int type, float radius);
/* Check if there is an available MorphologicalDistanceFeatherWeights cached resource with the
* given parameters in the manager, if one exists, return it, otherwise, return a newly created
* one and add it to the manager. In both cases, tag the cached resource as needed to keep it
* cached for the next evaluation. */
MorphologicalDistanceFeatherWeights &get_morphological_distance_feather_weights(int type,
int radius);
/* Check if the given texture ID has changed since the last time it was retrieved through its
* recalculate flag, and if so, invalidate its corresponding cached textures and reset the
* recalculate flag to ready it to track the next change. Then, check if there is an available
* CachedTexture cached resource with the given parameters in the manager, if one exists, return
* it, otherwise, return a newly created one and add it to the manager. In both cases, tag the
* cached resource as needed to keep it cached for the next evaluation. */
CachedTexture &get_cached_texture(
Context &context, Tex *texture, const Scene *scene, int2 size, float2 offset, float2 scale);
/* Check if a cached SMAA precomputed texture exists, if it does, return it, otherwise, return
* a newly created one and store it in the manager. In both cases, tag the cached resource as
* needed to keep it cached for the next evaluation. */
SMAAPrecomputedTextures &get_smaa_precomputed_textures();
};
} // namespace blender::realtime_compositor

View File

@ -73,7 +73,7 @@ static Result calculate_blending_weights(Context &context, Result &edges, int co
edges.bind_as_texture(shader, "edges_tx");
const SMAAPrecomputedTextures &smaa_precomputed_textures =
context.cache_manager().get_smaa_precomputed_textures();
context.cache_manager().smaa_precomputed_textures.get();
smaa_precomputed_textures.bind_area_texture(shader, "area_tx");
smaa_precomputed_textures.bind_search_texture(shader, "search_tx");

View File

@ -33,7 +33,7 @@ static Result horizontal_pass(Context &context,
input.bind_as_texture(shader, "input_tx");
const SymmetricSeparableBlurWeights &weights =
context.cache_manager().get_symmetric_separable_blur_weights(filter_type, radius);
context.cache_manager().symmetric_separable_blur_weights.get(filter_type, radius);
weights.bind_as_texture(shader, "weights_tx");
Domain domain = input.domain();
@ -84,7 +84,7 @@ static void vertical_pass(Context &context,
horizontal_pass_result.bind_as_texture(shader, "input_tx");
const SymmetricSeparableBlurWeights &weights =
context.cache_manager().get_symmetric_separable_blur_weights(filter_type, radius.y);
context.cache_manager().symmetric_separable_blur_weights.get(filter_type, radius.y);
weights.bind_as_texture(shader, "weights_tx");
Domain domain = original_input.domain();

View File

@ -8,17 +8,17 @@ namespace blender::realtime_compositor {
* Cached Resource.
*
* A cached resource is any resource that can be cached across compositor evaluations and across
* multiple operations. Cached resources are managed by an instance of a StaticCacheManager and are
* freed when they are no longer needed, a state which is represented by the `needed` member in the
* class. For more information on the caching mechanism, see the StaticCacheManager class.
* multiple operations. Cached resources are managed by an instance of a StaticCacheManager, stored
* in an instance of a CachedResourceContainer, and are freed when they are no longer needed, a
* state which is represented by the `needed` member in the class. For more information on the
* caching mechanism, see the StaticCacheManager class.
*
* To add a new cached resource:
*
* - Create a key class that can be used to identify the resource in a Map if needed.
* - Create a derived class from CachedResource to represent the resource.
* - Create a key class that can be used in a Map to identify the resource.
* - Add a new Map to StaticCacheManager mapping the key to the resource.
* - Reset the contents of the added map in StaticCacheManager::reset.
* - Add an appropriate getter method in StaticCacheManager.
* - Create a derived class from CachedResourceContainer to store the resources.
* - Add an instance of the container to StaticCacheManager and call its reset method.
*
* See the existing cached resources for reference. */
class CachedResource {
@ -28,4 +28,23 @@ class CachedResource {
bool needed = true;
};
/* -------------------------------------------------------------------------------------------------
* Cached Resource Container.
*
* A cached resource container stores all the cached resources for a specific cached resource type.
* The cached resources are typically stored in a map identified by a key type. The reset method
* should be implemented as described in StaticCacheManager::reset. An appropriate getter method
* should be provided that properly sets the CachedResource::needed flag as described in the
* description of the StaticCacheManager class.
*
* See the existing cached resources for reference. */
class CachedResourceContainer {
public:
/* Reset the container by deleting the cached resources that are no longer needed because they
* weren't used in the last evaluation and prepare the remaining cached resources to track their
* needed status in the next evaluation. See the description of the StaticCacheManager class for
* more information. This should be called in StaticCacheManager::reset. */
virtual void reset() = 0;
};
} // namespace blender::realtime_compositor

View File

@ -3,7 +3,10 @@
#pragma once
#include <cstdint>
#include <memory>
#include <string>
#include "BLI_map.hh"
#include "BLI_math_vector_types.hh"
#include "GPU_texture.h"
@ -15,6 +18,8 @@
namespace blender::realtime_compositor {
class Context;
/* ------------------------------------------------------------------------------------------------
* Cached Texture Key.
*/
@ -52,4 +57,24 @@ class CachedTexture : public CachedResource {
GPUTexture *value_texture();
};
/* ------------------------------------------------------------------------------------------------
* Cached Texture Container.
*/
class CachedTextureContainer : CachedResourceContainer {
private:
Map<std::string, Map<CachedTextureKey, std::unique_ptr<CachedTexture>>> map_;
public:
void reset() override;
/* Check if the given texture ID has changed since the last time it was retrieved through its
* recalculate flag, and if so, invalidate its corresponding cached textures and reset the
* recalculate flag to ready it to track the next change. Then, check if there is an available
* CachedTexture cached resource with the given parameters in the container, if one exists,
* return it, otherwise, return a newly created one and add it to the container. In both cases,
* tag the cached resource as needed to keep it cached for the next evaluation. */
CachedTexture &get(
Context &context, Tex *texture, const Scene *scene, int2 size, float2 offset, float2 scale);
};
} // namespace blender::realtime_compositor

View File

@ -3,6 +3,9 @@
#pragma once
#include <cstdint>
#include <memory>
#include "BLI_map.hh"
#include "GPU_shader.h"
#include "GPU_texture.h"
@ -58,4 +61,22 @@ class MorphologicalDistanceFeatherWeights : public CachedResource {
void unbind_distance_falloffs_as_texture() const;
};
/* ------------------------------------------------------------------------------------------------
* Morphological Distance Feather Key.
*/
class MorphologicalDistanceFeatherWeightsContainer : CachedResourceContainer {
private:
Map<MorphologicalDistanceFeatherWeightsKey, std::unique_ptr<MorphologicalDistanceFeatherWeights>>
map_;
public:
void reset() override;
/* Check if there is an available MorphologicalDistanceFeatherWeights cached resource with the
* given parameters in the container, if one exists, return it, otherwise, return a newly created
* one and add it to the container. In both cases, tag the cached resource as needed to keep it
* cached for the next evaluation. */
MorphologicalDistanceFeatherWeights &get(int type, int radius);
};
} // namespace blender::realtime_compositor

View File

@ -2,6 +2,8 @@
#pragma once
#include <memory>
#include "GPU_shader.h"
#include "GPU_texture.h"
@ -33,4 +35,20 @@ class SMAAPrecomputedTextures : public CachedResource {
void unbind_area_texture() const;
};
/* ------------------------------------------------------------------------------------------------
* SMAA Precomputed Textures Container.
*/
class SMAAPrecomputedTexturesContainer : public CachedResourceContainer {
private:
std::unique_ptr<SMAAPrecomputedTextures> textures_;
public:
void reset() override;
/* Check if a cached SMAA precomputed texture exists, if it does, return it, otherwise, return
* a newly created one and store it in the container. In both cases, tag the cached resource as
* needed to keep it cached for the next evaluation. */
SMAAPrecomputedTextures &get();
};
} // namespace blender::realtime_compositor

View File

@ -3,7 +3,9 @@
#pragma once
#include <cstdint>
#include <memory>
#include "BLI_map.hh"
#include "BLI_math_vector_types.hh"
#include "GPU_shader.h"
@ -49,4 +51,21 @@ class SymmetricBlurWeights : public CachedResource {
void unbind_as_texture() const;
};
/* ------------------------------------------------------------------------------------------------
* Symmetric Blur Weights Container.
*/
class SymmetricBlurWeightsContainer : public CachedResourceContainer {
private:
Map<SymmetricBlurWeightsKey, std::unique_ptr<SymmetricBlurWeights>> map_;
public:
void reset() override;
/* Check if there is an available SymmetricBlurWeights cached resource with the given parameters
* in the container, if one exists, return it, otherwise, return a newly created one and add it
* to the container. In both cases, tag the cached resource as needed to keep it cached for the
* next evaluation. */
SymmetricBlurWeights &get(int type, float2 radius);
};
} // namespace blender::realtime_compositor

View File

@ -3,7 +3,9 @@
#pragma once
#include <cstdint>
#include <memory>
#include "BLI_map.hh"
#include "BLI_math_vector_types.hh"
#include "GPU_shader.h"
@ -50,4 +52,22 @@ class SymmetricSeparableBlurWeights : public CachedResource {
void unbind_as_texture() const;
};
/* ------------------------------------------------------------------------------------------------
* Symmetric Separable Blur Weights Container.
*/
class SymmetricSeparableBlurWeightsContainer : public CachedResourceContainer {
private:
Map<SymmetricSeparableBlurWeightsKey, std::unique_ptr<SymmetricSeparableBlurWeights>> map_;
public:
void reset() override;
/* Check if there is an available SymmetricSeparableBlurWeights cached resource with the given
* parameters in the container, if one exists, return it, otherwise, return a newly created one
* and add it to the container. In both cases, tag the cached resource as needed to keep it
* cached for the next evaluation. */
SymmetricSeparableBlurWeights &get(int type, float radius);
};
} // namespace blender::realtime_compositor

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <cstdint>
#include <memory>
#include "BLI_array.hh"
#include "BLI_hash.hh"
@ -12,12 +13,14 @@
#include "BKE_texture.h"
#include "DNA_ID.h"
#include "DNA_scene_types.h"
#include "DNA_texture_types.h"
#include "RE_texture.h"
#include "COM_cached_texture.hh"
#include "COM_context.hh"
namespace blender::realtime_compositor {
@ -99,4 +102,44 @@ GPUTexture *CachedTexture::value_texture()
return value_texture_;
}
/* --------------------------------------------------------------------
* Cached Texture Container.
*/
void CachedTextureContainer::reset()
{
/* First, delete all cached textures that are no longer needed. */
for (auto &cached_textures_for_id : map_.values()) {
cached_textures_for_id.remove_if([](auto item) { return !item.value->needed; });
}
map_.remove_if([](auto item) { return item.value.is_empty(); });
/* Second, reset the needed status of the remaining cached textures to false to ready them to
* track their needed status for the next evaluation. */
for (auto &cached_textures_for_id : map_.values()) {
for (auto &value : cached_textures_for_id.values()) {
value->needed = false;
}
}
}
CachedTexture &CachedTextureContainer::get(
Context &context, Tex *texture, const Scene *scene, int2 size, float2 offset, float2 scale)
{
const CachedTextureKey key(size, offset, scale);
auto &cached_textures_for_id = map_.lookup_or_add_default(texture->id.name);
/* Invalidate the cache for that texture ID if it was changed and reset the recalculate flag. */
if (context.query_id_recalc_flag(reinterpret_cast<ID *>(texture)) & ID_RECALC_ALL) {
cached_textures_for_id.clear();
}
auto &cached_texture = *cached_textures_for_id.lookup_or_add_cb(
key, [&]() { return std::make_unique<CachedTexture>(texture, scene, size, offset, scale); });
cached_texture.needed = true;
return cached_texture;
}
} // namespace blender::realtime_compositor

View File

@ -2,6 +2,7 @@
#include <cmath>
#include <cstdint>
#include <memory>
#include "BLI_array.hh"
#include "BLI_hash.hh"
@ -157,4 +158,32 @@ void MorphologicalDistanceFeatherWeights::unbind_distance_falloffs_as_texture()
GPU_texture_unbind(distance_falloffs_texture_);
}
/* --------------------------------------------------------------------
* Morphological Distance Feather Weights Container.
*/
void MorphologicalDistanceFeatherWeightsContainer::reset()
{
/* First, delete all resources that are no longer needed. */
map_.remove_if([](auto item) { return !item.value->needed; });
/* Second, reset the needed status of the remaining resources to false to ready them to track
* their needed status for the next evaluation. */
for (auto &value : map_.values()) {
value->needed = false;
}
}
MorphologicalDistanceFeatherWeights &MorphologicalDistanceFeatherWeightsContainer::get(int type,
int radius)
{
const MorphologicalDistanceFeatherWeightsKey key(type, radius);
auto &weights = *map_.lookup_or_add_cb(
key, [&]() { return std::make_unique<MorphologicalDistanceFeatherWeights>(type, radius); });
weights.needed = true;
return weights;
}
} // namespace blender::realtime_compositor

View File

@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <memory>
#include "BLI_smaa_textures.h"
#include "GPU_shader.h"
@ -9,6 +11,10 @@
namespace blender::realtime_compositor {
/* ------------------------------------------------------------------------------------------------
* SMAA Precomputed Textures.
*/
SMAAPrecomputedTextures::SMAAPrecomputedTextures()
{
search_texture_ = GPU_texture_create_2d("SMAA Search",
@ -61,4 +67,32 @@ void SMAAPrecomputedTextures::unbind_area_texture() const
GPU_texture_unbind(area_texture_);
}
/* ------------------------------------------------------------------------------------------------
* SMAA Precomputed Textures Container.
*/
void SMAAPrecomputedTexturesContainer::reset()
{
/* First, delete the textures if they are no longer needed. */
if (textures_ && !textures_->needed) {
textures_.reset();
}
/* Second, if they were not deleted, reset their needed status to false to ready them to track
* their needed status for the next evaluation. */
if (textures_) {
textures_->needed = false;
}
}
SMAAPrecomputedTextures &SMAAPrecomputedTexturesContainer::get()
{
if (!textures_) {
textures_ = std::make_unique<SMAAPrecomputedTextures>();
}
textures_->needed = true;
return *textures_;
}
} // namespace blender::realtime_compositor

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <cstdint>
#include <memory>
#include "BLI_array.hh"
#include "BLI_hash.hh"
@ -113,4 +114,31 @@ void SymmetricBlurWeights::unbind_as_texture() const
GPU_texture_unbind(texture_);
}
/* --------------------------------------------------------------------
* Symmetric Blur Weights Container.
*/
void SymmetricBlurWeightsContainer::reset()
{
/* First, delete all resources that are no longer needed. */
map_.remove_if([](auto item) { return !item.value->needed; });
/* Second, reset the needed status of the remaining resources to false to ready them to track
* their needed status for the next evaluation. */
for (auto &value : map_.values()) {
value->needed = false;
}
}
SymmetricBlurWeights &SymmetricBlurWeightsContainer::get(int type, float2 radius)
{
const SymmetricBlurWeightsKey key(type, radius);
auto &weights = *map_.lookup_or_add_cb(
key, [&]() { return std::make_unique<SymmetricBlurWeights>(type, radius); });
weights.needed = true;
return weights;
}
} // namespace blender::realtime_compositor

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <cstdint>
#include <memory>
#include "BLI_array.hh"
#include "BLI_hash.hh"
@ -91,4 +92,31 @@ void SymmetricSeparableBlurWeights::unbind_as_texture() const
GPU_texture_unbind(texture_);
}
/* --------------------------------------------------------------------
* Symmetric Separable Blur Weights Container.
*/
void SymmetricSeparableBlurWeightsContainer::reset()
{
/* First, delete all resources that are no longer needed. */
map_.remove_if([](auto item) { return !item.value->needed; });
/* Second, reset the needed status of the remaining resources to false to ready them to track
* their needed status for the next evaluation. */
for (auto &value : map_.values()) {
value->needed = false;
}
}
SymmetricSeparableBlurWeights &SymmetricSeparableBlurWeightsContainer::get(int type, float radius)
{
const SymmetricSeparableBlurWeightsKey key(type, radius);
auto &weights = *map_.lookup_or_add_cb(
key, [&]() { return std::make_unique<SymmetricSeparableBlurWeights>(type, radius); });
weights.needed = true;
return weights;
}
} // namespace blender::realtime_compositor

View File

@ -1,125 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <memory>
#include "BLI_math_vector_types.hh"
#include "DNA_ID.h"
#include "DNA_scene_types.h"
#include "DNA_texture_types.h"
#include "COM_context.hh"
#include "COM_morphological_distance_feather_weights.hh"
#include "COM_smaa_precomputed_textures.hh"
#include "COM_symmetric_blur_weights.hh"
#include "COM_symmetric_separable_blur_weights.hh"
#include "COM_static_cache_manager.hh"
namespace blender::realtime_compositor {
/* --------------------------------------------------------------------
* Static Cache Manager.
*/
void StaticCacheManager::reset()
{
/* First, delete all resources that are no longer needed. */
symmetric_blur_weights_.remove_if([](auto item) { return !item.value->needed; });
symmetric_separable_blur_weights_.remove_if([](auto item) { return !item.value->needed; });
morphological_distance_feather_weights_.remove_if([](auto item) { return !item.value->needed; });
for (auto &cached_textures_for_id : cached_textures_.values()) {
cached_textures_for_id.remove_if([](auto item) { return !item.value->needed; });
}
cached_textures_.remove_if([](auto item) { return item.value.is_empty(); });
if (smaa_precomputed_textures_ && !smaa_precomputed_textures_->needed) {
smaa_precomputed_textures_.reset();
}
/* Second, reset the needed status of the remaining resources to false to ready them to track
* their needed status for the next evaluation. */
for (auto &value : symmetric_blur_weights_.values()) {
value->needed = false;
}
for (auto &value : symmetric_separable_blur_weights_.values()) {
value->needed = false;
}
for (auto &value : morphological_distance_feather_weights_.values()) {
value->needed = false;
}
for (auto &cached_textures_for_id : cached_textures_.values()) {
for (auto &value : cached_textures_for_id.values()) {
value->needed = false;
}
}
if (smaa_precomputed_textures_) {
smaa_precomputed_textures_->needed = false;
}
}
SymmetricBlurWeights &StaticCacheManager::get_symmetric_blur_weights(int type, float2 radius)
{
const SymmetricBlurWeightsKey key(type, radius);
auto &weights = *symmetric_blur_weights_.lookup_or_add_cb(
key, [&]() { return std::make_unique<SymmetricBlurWeights>(type, radius); });
weights.needed = true;
return weights;
}
SymmetricSeparableBlurWeights &StaticCacheManager::get_symmetric_separable_blur_weights(
int type, float radius)
{
const SymmetricSeparableBlurWeightsKey key(type, radius);
auto &weights = *symmetric_separable_blur_weights_.lookup_or_add_cb(
key, [&]() { return std::make_unique<SymmetricSeparableBlurWeights>(type, radius); });
weights.needed = true;
return weights;
}
MorphologicalDistanceFeatherWeights &StaticCacheManager::
get_morphological_distance_feather_weights(int type, int radius)
{
const MorphologicalDistanceFeatherWeightsKey key(type, radius);
auto &weights = *morphological_distance_feather_weights_.lookup_or_add_cb(
key, [&]() { return std::make_unique<MorphologicalDistanceFeatherWeights>(type, radius); });
weights.needed = true;
return weights;
}
CachedTexture &StaticCacheManager::get_cached_texture(
Context &context, Tex *texture, const Scene *scene, int2 size, float2 offset, float2 scale)
{
const CachedTextureKey key(size, offset, scale);
auto &cached_textures_for_id = cached_textures_.lookup_or_add_default(texture->id.name);
if (context.query_id_recalc_flag(reinterpret_cast<ID *>(texture)) & ID_RECALC_ALL) {
cached_textures_for_id.clear();
}
auto &cached_texture = *cached_textures_for_id.lookup_or_add_cb(
key, [&]() { return std::make_unique<CachedTexture>(texture, scene, size, offset, scale); });
cached_texture.needed = true;
return cached_texture;
}
SMAAPrecomputedTextures &StaticCacheManager::get_smaa_precomputed_textures()
{
if (!smaa_precomputed_textures_) {
smaa_precomputed_textures_ = std::make_unique<SMAAPrecomputedTextures>();
}
smaa_precomputed_textures_->needed = true;
return *smaa_precomputed_textures_;
symmetric_blur_weights.reset();
symmetric_separable_blur_weights.reset();
morphological_distance_feather_weights.reset();
cached_textures.reset();
smaa_precomputed_textures.reset();
}
} // namespace blender::realtime_compositor

View File

@ -395,8 +395,7 @@ static int wm_usd_import_exec(bContext *C, wmOperator *op)
const bool create_collection = RNA_boolean_get(op->ptr, "create_collection");
char prim_path_mask[1024];
RNA_string_get(op->ptr, "prim_path_mask", prim_path_mask);
char *prim_path_mask = RNA_string_get_alloc(op->ptr, "prim_path_mask", NULL, 0, NULL);
const bool import_guide = RNA_boolean_get(op->ptr, "import_guide");
const bool import_proxy = RNA_boolean_get(op->ptr, "import_proxy");
@ -448,6 +447,7 @@ static int wm_usd_import_exec(bContext *C, wmOperator *op)
.import_meshes = import_meshes,
.import_volumes = import_volumes,
.import_shapes = import_shapes,
.prim_path_mask = prim_path_mask,
.import_subdiv = import_subdiv,
.import_instance_proxies = import_instance_proxies,
.create_collection = create_collection,
@ -464,7 +464,6 @@ static int wm_usd_import_exec(bContext *C, wmOperator *op)
.tex_name_collision_mode = tex_name_collision_mode,
.import_all_materials = import_all_materials};
STRNCPY(params.prim_path_mask, prim_path_mask);
STRNCPY(params.import_textures_dir, import_textures_dir);
const bool ok = USD_import(C, filename, &params, as_background_job);
@ -620,12 +619,14 @@ void WM_OT_usd_import(struct wmOperatorType *ot)
RNA_def_boolean(
ot->srna, "read_mesh_colors", true, "Color Attributes", "Read mesh color attributes");
RNA_def_string(ot->srna,
"prim_path_mask",
NULL,
1024,
"Path Mask",
"Import only the subset of the USD scene rooted at the given primitive");
RNA_def_string(
ot->srna,
"prim_path_mask",
NULL,
0,
"Path Mask",
"Import only the primitive at the given path and its descendents. "
"Multiple paths may be specified in a list delimited by commas or semicolons");
RNA_def_boolean(ot->srna, "import_guide", false, "Guide", "Import guide geometry");

View File

@ -16,21 +16,15 @@
namespace blender::ed::outliner {
static void outliner_context_selected_ids_recursive(const SpaceOutliner &space_outliner,
bContextDataResult *result)
static void outliner_context_selected_ids(const SpaceOutliner *space_outliner,
bContextDataResult *result)
{
tree_iterator::all(space_outliner, [&](const TreeElement *te) {
tree_iterator::all(*space_outliner, [&](const TreeElement *te) {
const TreeStoreElem *tse = TREESTORE(te);
if ((tse->flag & TSE_SELECTED) && ELEM(tse->type, TSE_SOME_ID, TSE_LAYER_COLLECTION)) {
CTX_data_id_list_add(result, tse->id);
}
});
}
static void outliner_context_selected_ids(const SpaceOutliner *space_outliner,
bContextDataResult *result)
{
outliner_context_selected_ids_recursive(*space_outliner, result);
CTX_data_type_set(result, CTX_DATA_TYPE_COLLECTION);
}

View File

@ -25,9 +25,8 @@ static VmaAllocationCreateFlagBits vma_allocation_flags(GPUUsageType usage)
{
switch (usage) {
case GPU_USAGE_STATIC:
return static_cast<VmaAllocationCreateFlagBits>(
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT);
case GPU_USAGE_DYNAMIC:
case GPU_USAGE_STREAM:
return static_cast<VmaAllocationCreateFlagBits>(
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT);
case GPU_USAGE_DEVICE_ONLY:
@ -35,7 +34,6 @@ static VmaAllocationCreateFlagBits vma_allocation_flags(GPUUsageType usage)
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT);
case GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY:
case GPU_USAGE_STREAM:
break;
}
BLI_assert_msg(false, "Unimplemented GPUUsageType");
@ -83,6 +81,10 @@ void VKBuffer::update(const void *data) const
{
BLI_assert_msg(is_mapped(), "Cannot update a non-mapped buffer.");
memcpy(mapped_memory_, data, size_in_bytes_);
VKContext &context = *VKContext::get();
VmaAllocator mem_allocator = context.mem_allocator_get();
vmaFlushAllocation(mem_allocator, allocation_, 0, VK_WHOLE_SIZE);
}
void VKBuffer::clear(VKContext &context, uint32_t clear_value)

View File

@ -63,4 +63,5 @@ class VKBuffer {
bool map(VKContext &context);
void unmap(VKContext &context);
};
} // namespace blender::gpu

View File

@ -12,6 +12,7 @@
#include "vk_memory.hh"
#include "vk_pipeline.hh"
#include "vk_texture.hh"
#include "vk_vertex_buffer.hh"
#include "BLI_assert.h"
@ -34,30 +35,42 @@ void VKCommandBuffer::init(const VkDevice vk_device,
vk_queue_ = vk_queue;
vk_command_buffer_ = vk_command_buffer;
submission_id_.reset();
state.stage = Stage::Initial;
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
VkFenceCreateInfo fenceInfo{};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
vkCreateFence(vk_device_, &fenceInfo, vk_allocation_callbacks, &vk_fence_);
}
else {
vkResetFences(vk_device_, 1, &vk_fence_);
}
}
void VKCommandBuffer::begin_recording()
{
vkWaitForFences(vk_device_, 1, &vk_fence_, VK_TRUE, UINT64_MAX);
vkResetFences(vk_device_, 1, &vk_fence_);
vkResetCommandBuffer(vk_command_buffer_, 0);
if (is_in_stage(Stage::Submitted)) {
vkWaitForFences(vk_device_, 1, &vk_fence_, VK_TRUE, FenceTimeout);
vkResetFences(vk_device_, 1, &vk_fence_);
stage_transfer(Stage::Submitted, Stage::Executed);
}
if (is_in_stage(Stage::Executed)) {
vkResetCommandBuffer(vk_command_buffer_, 0);
stage_transfer(Stage::Executed, Stage::Initial);
}
VkCommandBufferBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(vk_command_buffer_, &begin_info);
stage_transfer(Stage::Initial, Stage::Recording);
}
void VKCommandBuffer::end_recording()
{
ensure_no_active_framebuffer();
vkEndCommandBuffer(vk_command_buffer_);
stage_transfer(Stage::Recording, Stage::BetweenRecordingAndSubmitting);
}
void VKCommandBuffer::bind(const VKPipeline &pipeline, VkPipelineBindPoint bind_point)
@ -74,19 +87,35 @@ void VKCommandBuffer::bind(const VKDescriptorSet &descriptor_set,
vk_command_buffer_, bind_point, vk_pipeline_layout, 0, 1, &vk_descriptor_set, 0, 0);
}
void VKCommandBuffer::begin_render_pass(const VKFrameBuffer &framebuffer)
void VKCommandBuffer::bind(const uint32_t binding,
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset)
{
VkRenderPassBeginInfo render_pass_begin_info = {};
render_pass_begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_pass_begin_info.renderPass = framebuffer.vk_render_pass_get();
render_pass_begin_info.framebuffer = framebuffer.vk_framebuffer_get();
render_pass_begin_info.renderArea = framebuffer.vk_render_area_get();
vkCmdBeginRenderPass(vk_command_buffer_, &render_pass_begin_info, VK_SUBPASS_CONTENTS_INLINE);
bind(binding, vertex_buffer.vk_handle(), offset);
}
void VKCommandBuffer::end_render_pass(const VKFrameBuffer & /*framebuffer*/)
void VKCommandBuffer::bind(const uint32_t binding,
const VkBuffer &vk_vertex_buffer,
const VkDeviceSize offset)
{
vkCmdEndRenderPass(vk_command_buffer_);
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdBindVertexBuffers(vk_command_buffer_, binding, 1, &vk_vertex_buffer, &offset);
}
void VKCommandBuffer::begin_render_pass(const VKFrameBuffer &framebuffer)
{
validate_framebuffer_not_exists();
state.framebuffer_ = &framebuffer;
}
void VKCommandBuffer::end_render_pass(const VKFrameBuffer &framebuffer)
{
UNUSED_VARS_NDEBUG(framebuffer)
validate_framebuffer_exists();
BLI_assert(state.framebuffer_ == &framebuffer);
ensure_no_active_framebuffer();
state.framebuffer_ = nullptr;
}
void VKCommandBuffer::push_constants(const VKPushConstants &push_constants,
@ -105,6 +134,7 @@ void VKCommandBuffer::push_constants(const VKPushConstants &push_constants,
void VKCommandBuffer::fill(VKBuffer &buffer, uint32_t clear_data)
{
ensure_no_active_framebuffer();
vkCmdFillBuffer(vk_command_buffer_, buffer.vk_handle(), 0, buffer.size_in_bytes(), clear_data);
}
@ -112,6 +142,7 @@ void VKCommandBuffer::copy(VKBuffer &dst_buffer,
VKTexture &src_texture,
Span<VkBufferImageCopy> regions)
{
ensure_no_active_framebuffer();
vkCmdCopyImageToBuffer(vk_command_buffer_,
src_texture.vk_image_handle(),
src_texture.current_layout_get(),
@ -123,6 +154,7 @@ void VKCommandBuffer::copy(VKTexture &dst_texture,
VKBuffer &src_buffer,
Span<VkBufferImageCopy> regions)
{
ensure_no_active_framebuffer();
vkCmdCopyBufferToImage(vk_command_buffer_,
src_buffer.vk_handle(),
dst_texture.vk_image_handle(),
@ -130,12 +162,27 @@ void VKCommandBuffer::copy(VKTexture &dst_texture,
regions.size(),
regions.data());
}
void VKCommandBuffer::blit(VKTexture &dst_texture,
VKTexture &src_buffer,
Span<VkImageBlit> regions)
{
ensure_no_active_framebuffer();
vkCmdBlitImage(vk_command_buffer_,
src_buffer.vk_image_handle(),
src_buffer.current_layout_get(),
dst_texture.vk_image_handle(),
dst_texture.current_layout_get(),
regions.size(),
regions.data(),
VK_FILTER_NEAREST);
}
void VKCommandBuffer::clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges)
{
ensure_no_active_framebuffer();
vkCmdClearColorImage(vk_command_buffer_,
vk_image,
vk_image_layout,
@ -146,13 +193,36 @@ void VKCommandBuffer::clear(VkImage vk_image,
void VKCommandBuffer::clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdClearAttachments(
vk_command_buffer_, attachments.size(), attachments.data(), areas.size(), areas.data());
}
void VKCommandBuffer::draw(int v_first, int v_count, int i_first, int i_count)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdDraw(vk_command_buffer_, v_count, i_count, v_first, i_first);
state.draw_counts++;
}
void VKCommandBuffer::draw(
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdDrawIndexed(
vk_command_buffer_, index_count, instance_count, first_index, vertex_offset, first_instance);
state.draw_counts++;
}
void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages)
{
if (state.framebuffer_) {
ensure_active_framebuffer();
}
vkCmdPipelineBarrier(vk_command_buffer_,
source_stages,
destination_stages,
@ -167,6 +237,7 @@ void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
void VKCommandBuffer::pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers)
{
ensure_no_active_framebuffer();
vkCmdPipelineBarrier(vk_command_buffer_,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
@ -181,11 +252,13 @@ void VKCommandBuffer::pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_b
void VKCommandBuffer::dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
{
ensure_no_active_framebuffer();
vkCmdDispatch(vk_command_buffer_, groups_x_len, groups_y_len, groups_z_len);
}
void VKCommandBuffer::submit()
{
ensure_no_active_framebuffer();
end_recording();
encode_recorded_commands();
submit_encoded_commands();
@ -208,6 +281,55 @@ void VKCommandBuffer::submit_encoded_commands()
vkQueueSubmit(vk_queue_, 1, &submit_info, vk_fence_);
submission_id_.next();
stage_transfer(Stage::BetweenRecordingAndSubmitting, Stage::Submitted);
}
/* -------------------------------------------------------------------- */
/** \name Framebuffer/RenderPass state tracking
* \{ */
void VKCommandBuffer::validate_framebuffer_not_exists()
{
BLI_assert_msg(state.framebuffer_ == nullptr && state.framebuffer_active_ == false,
"State error: expected no framebuffer being tracked.");
}
void VKCommandBuffer::validate_framebuffer_exists()
{
BLI_assert_msg(state.framebuffer_, "State error: expected framebuffer being tracked.");
}
void VKCommandBuffer::ensure_no_active_framebuffer()
{
state.checks_++;
if (state.framebuffer_ && state.framebuffer_active_) {
vkCmdEndRenderPass(vk_command_buffer_);
state.framebuffer_active_ = false;
state.switches_++;
}
}
void VKCommandBuffer::ensure_active_framebuffer()
{
BLI_assert(state.framebuffer_);
state.checks_++;
if (!state.framebuffer_active_) {
VkRenderPassBeginInfo render_pass_begin_info = {};
render_pass_begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_pass_begin_info.renderPass = state.framebuffer_->vk_render_pass_get();
render_pass_begin_info.framebuffer = state.framebuffer_->vk_framebuffer_get();
render_pass_begin_info.renderArea = state.framebuffer_->vk_render_area_get();
/* We don't use clear ops, but vulkan wants to have at least one. */
VkClearValue clear_value = {};
render_pass_begin_info.clearValueCount = 1;
render_pass_begin_info.pClearValues = &clear_value;
vkCmdBeginRenderPass(vk_command_buffer_, &render_pass_begin_info, VK_SUBPASS_CONTENTS_INLINE);
state.framebuffer_active_ = true;
state.switches_++;
}
}
/** \} */
} // namespace blender::gpu

View File

@ -16,21 +16,116 @@ namespace blender::gpu {
class VKBuffer;
class VKDescriptorSet;
class VKFrameBuffer;
class VKIndexBuffer;
class VKPipeline;
class VKPushConstants;
class VKTexture;
class VKVertexBuffer;
/** Command buffer to keep track of the life-time of a command buffer. */
class VKCommandBuffer : NonCopyable, NonMovable {
/** None owning handle to the command buffer and device. Handle is owned by `GHOST_ContextVK`. */
/** Not owning handle to the command buffer and device. Handle is owned by `GHOST_ContextVK`. */
VkDevice vk_device_ = VK_NULL_HANDLE;
VkCommandBuffer vk_command_buffer_ = VK_NULL_HANDLE;
VkQueue vk_queue_ = VK_NULL_HANDLE;
/**
* Timeout to use when waiting for fences in nanoseconds.
*
* Currently added as the fence will halt when there are no commands in the command buffer for
* the second time. This should be solved and this timeout should be removed.
*/
static constexpr uint64_t FenceTimeout = UINT64_MAX;
/** Owning handles */
VkFence vk_fence_ = VK_NULL_HANDLE;
VKSubmissionID submission_id_;
private:
enum class Stage {
Initial,
Recording,
BetweenRecordingAndSubmitting,
Submitted,
Executed,
};
/*
* Some vulkan command require an active frame buffer. Others require no active frame-buffer. As
* our current API does not provide a solution for this we need to keep track of the actual state
* and do the changes when recording the next command.
*
* This is a temporary solution to get things rolling.
* TODO: In a future solution we should decide the scope of a command buffer.
*
* - command buffer per draw command.
* - minimize command buffers and track render passes.
* - add custom encoder to also track resource usages.
*
* Currently I expect the custom encoder has to be done eventually. But want to keep post-poning
* the custom encoder for now to collect more use cases it should solve. (first pixel drawn on
* screen).
*
* Some command can also be encoded in another way when encoded as a first command. For example
* clearing a framebuffer textures isn't allowed inside a render pass, but clearing the
* framebuffer textures via ops is allowed. When clearing a framebuffer texture directly after
* beginning a render pass could be re-encoded to do this in the same command.
*
* So for now we track the state and temporary switch to another state if the command requires
* it.
*/
struct {
/* Reference to the last_framebuffer where begin_render_pass was called for. */
const VKFrameBuffer *framebuffer_ = nullptr;
/* Is last_framebuffer_ currently bound. Each call should ensure the correct state. */
bool framebuffer_active_ = false;
/* Amount of times a check has been requested. */
uint64_t checks_ = 0;
/* Amount of times a check required to change the render pass. */
uint64_t switches_ = 0;
/* Number of times a vkDraw command has been recorded. */
uint64_t draw_counts = 0;
/**
* Current stage of the command buffer to keep track of inconsistencies & incorrect usage.
*/
Stage stage = Stage::Initial;
} state;
bool is_in_stage(Stage stage)
{
return state.stage == stage;
}
void stage_set(Stage stage)
{
state.stage = stage;
}
std::string to_string(Stage stage)
{
switch (stage) {
case Stage::Initial:
return "INITIAL";
case Stage::Recording:
return "RECORDING";
case Stage::BetweenRecordingAndSubmitting:
return "BEFORE_SUBMIT";
case Stage::Submitted:
return "SUBMITTED";
case Stage::Executed:
return "EXECUTED";
}
return "UNKNOWN";
}
void stage_transfer(Stage stage_from, Stage stage_to)
{
BLI_assert(is_in_stage(stage_from));
#if 0
printf(" *** Transfer stage from %s to %s\n",
to_string(stage_from).c_str(),
to_string(stage_to).c_str());
#endif
stage_set(stage_to);
}
public:
virtual ~VKCommandBuffer();
void init(const VkDevice vk_device, const VkQueue vk_queue, VkCommandBuffer vk_command_buffer);
@ -40,6 +135,12 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point);
void bind(const uint32_t binding,
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset);
/* Bind the given buffer as a vertex buffer. */
void bind(const uint32_t binding, const VkBuffer &vk_vertex_buffer, const VkDeviceSize offset);
void begin_render_pass(const VKFrameBuffer &framebuffer);
void end_render_pass(const VKFrameBuffer &framebuffer);
@ -55,6 +156,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
/** Copy the contents of a texture MIP level to the dst buffer. */
void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKBuffer &src_buffer, Span<VkBufferImageCopy> regions);
void blit(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageBlit> regions);
void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages);
void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers);
@ -72,6 +174,10 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas);
void fill(VKBuffer &buffer, uint32_t data);
void draw(int v_first, int v_count, int i_first, int i_count);
void draw(
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance);
/**
* Stop recording commands, encode + send the recordings to Vulkan, wait for the until the
* commands have been executed and start the command buffer to accept recordings again.
@ -86,6 +192,30 @@ class VKCommandBuffer : NonCopyable, NonMovable {
private:
void encode_recorded_commands();
void submit_encoded_commands();
/**
* Validate that there isn't a framebuffer being tracked (bound or not bound).
*
* Raises an assert in debug when a framebuffer is being tracked.
*/
void validate_framebuffer_not_exists();
/**
* Validate that there is a framebuffer being tracked (bound or not bound).
*
* Raises an assert in debug when no framebuffer is being tracked.
*/
void validate_framebuffer_exists();
/**
* Ensure that there is no framebuffer being tracked or the tracked framebuffer isn't bound.
*/
void ensure_no_active_framebuffer();
/**
* Ensure that the tracked framebuffer is bound.
*/
void ensure_active_framebuffer();
};
} // namespace blender::gpu

View File

@ -203,17 +203,22 @@ class VKPushConstants : VKResourceTracker<VKUniformBuffer> {
const T *input_data)
{
const Layout::PushConstant *push_constant_layout = layout_->find(location);
BLI_assert(push_constant_layout);
if (push_constant_layout == nullptr) {
/* Legacy code can still try to update push constants when they don't exist. For example
* `immDrawPixelsTexSetup` will bind an image slot manually. This works in OpenGL, but in
* vulkan images aren't stored as push constants. */
return;
}
uint8_t *bytes = static_cast<uint8_t *>(data_);
T *dst = static_cast<T *>(static_cast<void *>(&bytes[push_constant_layout->offset]));
const bool is_tightly_std140_packed = (comp_len % 4) == 0;
if (layout_->storage_type_get() == StorageType::PUSH_CONSTANTS || array_size == 0 ||
is_tightly_std140_packed) {
BLI_assert_msg(push_constant_layout->offset + comp_len * array_size * sizeof(T) <=
layout_->size_in_bytes(),
push_constant_layout->array_size == 0 || is_tightly_std140_packed) {
const size_t copy_size_in_bytes = comp_len * max_ii(array_size, 1) * sizeof(T);
BLI_assert_msg(push_constant_layout->offset + copy_size_in_bytes <= layout_->size_in_bytes(),
"Tried to write outside the push constant allocated memory.");
memcpy(dst, input_data, comp_len * array_size * sizeof(T));
memcpy(dst, input_data, copy_size_in_bytes);
is_dirty_ = true;
return;
}

View File

@ -984,12 +984,14 @@ void VKShader::unbind()
void VKShader::uniform_float(int location, int comp_len, int array_size, const float *data)
{
pipeline_get().push_constants_get().push_constant_set(location, comp_len, array_size, data);
VKPushConstants &push_constants = pipeline_get().push_constants_get();
push_constants.push_constant_set(location, comp_len, array_size, data);
}
void VKShader::uniform_int(int location, int comp_len, int array_size, const int *data)
{
pipeline_get().push_constants_get().push_constant_set(location, comp_len, array_size, data);
VKPushConstants &push_constants = pipeline_get().push_constants_get();
push_constants.push_constant_set(location, comp_len, array_size, data);
}
std::string VKShader::resources_declare(const shader::ShaderCreateInfo &info) const
@ -1183,6 +1185,7 @@ std::string VKShader::geometry_layout_declare(const shader::ShaderCreateInfo &in
}
ss << "\n";
location = 0;
for (const StageInterfaceInfo *iface : info.geometry_out_interfaces_) {
bool has_matching_input_iface = find_interface_by_name(info.vertex_out_interfaces_,
iface->instance_name) != nullptr;

View File

@ -17,7 +17,7 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
using namespace blender::gpu::shader;
attr_len_ = 0;
attr_len_ = info.vertex_inputs_.size();
uniform_len_ = info.push_constants_.size();
ssbo_len_ = 0;
ubo_len_ = 0;
@ -58,7 +58,7 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
/* Make sure that the image slots don't overlap with the sampler slots. */
image_offset_++;
int32_t input_tot_len = ubo_len_ + uniform_len_ + ssbo_len_;
int32_t input_tot_len = attr_len_ + ubo_len_ + uniform_len_ + ssbo_len_;
inputs_ = static_cast<ShaderInput *>(
MEM_calloc_arrayN(input_tot_len, sizeof(ShaderInput), __func__));
ShaderInput *input = inputs_;
@ -66,6 +66,20 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
name_buffer_ = (char *)MEM_mallocN(names_size, "name_buffer");
uint32_t name_buffer_offset = 0;
/* Attributes */
for (const ShaderCreateInfo::VertIn &attr : info.vertex_inputs_) {
copy_input_name(input, attr.name, name_buffer_, name_buffer_offset);
input->location = input->binding = attr.index;
if (input->location != -1) {
enabled_attr_mask_ |= (1 << input->location);
/* Used in `GPU_shader_get_attribute_info`. */
attr_types_[input->location] = uint8_t(attr.type);
}
input++;
}
/* Uniform blocks */
for (const ShaderCreateInfo::Resource &res : all_resources) {
if (res.bind_type == ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER) {
@ -131,7 +145,9 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
}
/* Determine the descriptor set locations after the inputs have been sorted. */
descriptor_set_locations_ = Array<VKDescriptorSet::Location>(input_tot_len);
/* Note: input_tot_len is sometimes more than we need. */
const uint32_t resources_len = input_tot_len;
descriptor_set_locations_ = Array<VKDescriptorSet::Location>(resources_len);
uint32_t descriptor_set_location = 0;
for (ShaderCreateInfo::Resource &res : all_resources) {
const ShaderInput *input = shader_input_get(res);

View File

@ -7,6 +7,8 @@
#include "vk_uniform_buffer.hh"
#include "vk_context.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
namespace blender::gpu {
@ -22,13 +24,42 @@ void VKUniformBuffer::update(const void *data)
void VKUniformBuffer::allocate(VKContext &context)
{
buffer_.create(context, size_in_bytes_, GPU_USAGE_STATIC, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
debug::object_label(&context, buffer_.vk_handle(), name_);
}
void VKUniformBuffer::clear_to_zero() {}
void VKUniformBuffer::clear_to_zero()
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
buffer_.clear(context, 0);
}
void VKUniformBuffer::bind(int /*slot*/) {}
void VKUniformBuffer::bind(int slot, shader::ShaderCreateInfo::Resource::BindType bind_type)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
void VKUniformBuffer::bind_as_ssbo(int /*slot*/) {}
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
const VKDescriptorSet::Location location = shader_interface.descriptor_set_location(bind_type,
slot);
VKDescriptorSetTracker &descriptor_set = shader->pipeline_get().descriptor_set_get();
descriptor_set.bind(*this, location);
}
void VKUniformBuffer::bind(int slot)
{
bind(slot, shader::ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER);
}
void VKUniformBuffer::bind_as_ssbo(int slot)
{
bind(slot, shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER);
}
void VKUniformBuffer::unbind() {}

View File

@ -39,6 +39,7 @@ class VKUniformBuffer : public UniformBuf, NonCopyable {
private:
void allocate(VKContext &context);
void bind(int slot, shader::ShaderCreateInfo::Resource::BindType bind_type);
};
} // namespace blender::gpu

View File

@ -202,7 +202,21 @@ static void import_startjob(void *customdata, bool *stop, bool *do_update, float
*data->do_update = true;
*data->progress = 0.1f;
pxr::UsdStageRefPtr stage = pxr::UsdStage::Open(data->filepath);
std::string prim_path_mask(data->params.prim_path_mask);
pxr::UsdStagePopulationMask pop_mask;
if (!prim_path_mask.empty()) {
const std::vector<std::string> mask_tokens = pxr::TfStringTokenize(prim_path_mask, ",;");
for (const std::string &tok : mask_tokens) {
pxr::SdfPath prim_path(tok);
if (!prim_path.IsEmpty()) {
pop_mask.Add(prim_path);
}
}
}
pxr::UsdStageRefPtr stage = pop_mask.IsEmpty() ?
pxr::UsdStage::Open(data->filepath) :
pxr::UsdStage::OpenMasked(data->filepath, pop_mask);
if (!stage) {
WM_reportf(RPT_ERROR, "USD Import: unable to open stage to read %s", data->filepath);
@ -376,6 +390,8 @@ static void import_endjob(void *customdata)
break;
}
MEM_SAFE_FREE(data->params.prim_path_mask);
WM_main_add_notifier(NC_SCENE | ND_FRAME, data->scene);
report_job_duration(data);
}

View File

@ -312,19 +312,6 @@ void USDStageReader::collect_readers(Main *bmain)
/* Iterate through the stage. */
pxr::UsdPrim root = stage_->GetPseudoRoot();
std::string prim_path_mask(params_.prim_path_mask);
if (!prim_path_mask.empty()) {
pxr::UsdPrim prim = stage_->GetPrimAtPath(pxr::SdfPath(prim_path_mask));
if (prim.IsValid()) {
root = prim;
}
else {
std::cerr << "WARNING: Prim Path Mask " << prim_path_mask
<< " does not specify a valid prim.\n";
}
}
stage_->SetInterpolationType(pxr::UsdInterpolationType::UsdInterpolationTypeHeld);
collect_readers(bmain, root);
}

View File

@ -67,7 +67,7 @@ struct USDImportParams {
bool import_meshes;
bool import_volumes;
bool import_shapes;
char prim_path_mask[1024];
char *prim_path_mask;
bool import_subdiv;
bool import_instance_proxies;
bool create_collection;

View File

@ -136,7 +136,7 @@ class BlurOperation : public NodeOperation {
const float2 blur_radius = compute_blur_radius();
const SymmetricBlurWeights &weights = context().cache_manager().get_symmetric_blur_weights(
const SymmetricBlurWeights &weights = context().cache_manager().symmetric_blur_weights.get(
node_storage(bnode()).filtertype, blur_radius);
weights.bind_as_texture(shader, "weights_tx");
@ -171,7 +171,7 @@ class BlurOperation : public NodeOperation {
const float2 blur_radius = compute_blur_radius();
const SymmetricBlurWeights &weights = context().cache_manager().get_symmetric_blur_weights(
const SymmetricBlurWeights &weights = context().cache_manager().symmetric_blur_weights.get(
node_storage(bnode()).filtertype, blur_radius);
weights.bind_as_texture(shader, "weights_tx");

View File

@ -256,7 +256,7 @@ class DilateErodeOperation : public NodeOperation {
input_image.bind_as_texture(shader, "input_tx");
const MorphologicalDistanceFeatherWeights &weights =
context().cache_manager().get_morphological_distance_feather_weights(
context().cache_manager().morphological_distance_feather_weights.get(
node_storage(bnode()).falloff, math::abs(get_distance()));
weights.bind_weights_as_texture(shader, "weights_tx");
weights.bind_distance_falloffs_as_texture(shader, "falloffs_tx");
@ -297,7 +297,7 @@ class DilateErodeOperation : public NodeOperation {
GPU_texture_bind(horizontal_pass_result, texture_image_unit);
const MorphologicalDistanceFeatherWeights &weights =
context().cache_manager().get_morphological_distance_feather_weights(
context().cache_manager().morphological_distance_feather_weights.get(
node_storage(bnode()).falloff, math::abs(get_distance()));
weights.bind_weights_as_texture(shader, "weights_tx");
weights.bind_distance_falloffs_as_texture(shader, "falloffs_tx");

View File

@ -55,7 +55,7 @@ class TextureOperation : public NodeOperation {
}
const Domain domain = compute_domain();
CachedTexture &cached_texture = context().cache_manager().get_cached_texture(
CachedTexture &cached_texture = context().cache_manager().cached_textures.get(
context(),
get_texture(),
context().get_scene(),

View File

@ -568,6 +568,7 @@ void register_node_type_group_input()
node_type_base(ntype, NODE_GROUP_INPUT, "Group Input", NODE_CLASS_INTERFACE);
node_type_size(ntype, 140, 80, 400);
ntype->gather_add_node_search_ops = blender::nodes::search_node_add_ops_for_basic_node;
ntype->declare_dynamic = blender::nodes::group_input_declare_dynamic;
ntype->insert_link = blender::nodes::group_input_insert_link;
@ -593,6 +594,7 @@ void register_node_type_group_output()
node_type_base(ntype, NODE_GROUP_OUTPUT, "Group Output", NODE_CLASS_INTERFACE);
node_type_size(ntype, 140, 80, 400);
ntype->gather_add_node_search_ops = blender::nodes::search_node_add_ops_for_basic_node;
ntype->declare_dynamic = blender::nodes::group_output_declare_dynamic;
ntype->insert_link = blender::nodes::group_output_insert_link;