Animation: Match Slope slider #110567

Merged
Christoph Lendenfeld merged 24 commits from ChrisLend/blender:blend_to_infinity_slider into main 2023-08-17 10:28:56 +02:00
43 changed files with 1432 additions and 337 deletions
Showing only changes of commit 232dc85eea - Show all commits

View File

@ -1300,16 +1300,29 @@ macro(windows_install_shared_manifest)
endif()
if(WINDOWS_INSTALL_DEBUG)
set(WINDOWS_CONFIGURATIONS "${WINDOWS_CONFIGURATIONS};Debug")
list(APPEND WINDOWS_SHARED_MANIFEST_DEBUG ${WINDOWS_INSTALL_FILES})
endif()
if(WINDOWS_INSTALL_RELEASE)
list(APPEND WINDOWS_SHARED_MANIFEST_RELEASE ${WINDOWS_INSTALL_FILES})
set(WINDOWS_CONFIGURATIONS "${WINDOWS_CONFIGURATIONS};Release;RelWithDebInfo;MinSizeRel")
endif()
install(FILES ${WINDOWS_INSTALL_FILES}
CONFIGURATIONS ${WINDOWS_CONFIGURATIONS}
DESTINATION "./blender.shared"
)
if(NOT WITH_PYTHON_MODULE)
# Blender executable with manifest.
if(WINDOWS_INSTALL_DEBUG)
list(APPEND WINDOWS_SHARED_MANIFEST_DEBUG ${WINDOWS_INSTALL_FILES})
endif()
if(WINDOWS_INSTALL_RELEASE)
list(APPEND WINDOWS_SHARED_MANIFEST_RELEASE ${WINDOWS_INSTALL_FILES})
endif()
install(FILES ${WINDOWS_INSTALL_FILES}
CONFIGURATIONS ${WINDOWS_CONFIGURATIONS}
DESTINATION "./blender.shared"
)
else()
# Python module without manifest.
install(FILES ${WINDOWS_INSTALL_FILES}
CONFIGURATIONS ${WINDOWS_CONFIGURATIONS}
DESTINATION "./bpy"
)
endif()
endmacro()
macro(windows_generate_manifest)
@ -1326,24 +1339,28 @@ macro(windows_generate_manifest)
endmacro()
macro(windows_generate_shared_manifest)
windows_generate_manifest(
FILES "${WINDOWS_SHARED_MANIFEST_DEBUG}"
OUTPUT "${CMAKE_BINARY_DIR}/Debug/blender.shared.manifest"
NAME "blender.shared"
)
windows_generate_manifest(
FILES "${WINDOWS_SHARED_MANIFEST_RELEASE}"
OUTPUT "${CMAKE_BINARY_DIR}/Release/blender.shared.manifest"
NAME "blender.shared"
)
install(
FILES ${CMAKE_BINARY_DIR}/Release/blender.shared.manifest
DESTINATION "./blender.shared"
CONFIGURATIONS Release;RelWithDebInfo;MinSizeRel
)
install(
FILES ${CMAKE_BINARY_DIR}/Debug/blender.shared.manifest
DESTINATION "./blender.shared"
CONFIGURATIONS Debug
)
if(WINDOWS_SHARED_MANIFEST_DEBUG)
windows_generate_manifest(
FILES "${WINDOWS_SHARED_MANIFEST_DEBUG}"
OUTPUT "${CMAKE_BINARY_DIR}/Debug/blender.shared.manifest"
NAME "blender.shared"
)
install(
FILES ${CMAKE_BINARY_DIR}/Debug/blender.shared.manifest
DESTINATION "./blender.shared"
CONFIGURATIONS Debug
)
endif()
if(WINDOWS_SHARED_MANIFEST_RELEASE)
windows_generate_manifest(
FILES "${WINDOWS_SHARED_MANIFEST_RELEASE}"
OUTPUT "${CMAKE_BINARY_DIR}/Release/blender.shared.manifest"
NAME "blender.shared"
)
install(
FILES ${CMAKE_BINARY_DIR}/Release/blender.shared.manifest
DESTINATION "./blender.shared"
CONFIGURATIONS Release;RelWithDebInfo;MinSizeRel
)
endif()
endmacro()

View File

@ -114,12 +114,13 @@ add_definitions(-D_WIN32_WINNT=0x603)
# First generate the manifest for tests since it will not need the dependency on the CRT.
configure_file(${CMAKE_SOURCE_DIR}/release/windows/manifest/blender.exe.manifest.in ${CMAKE_CURRENT_BINARY_DIR}/tests.exe.manifest @ONLY)
if(WITH_WINDOWS_BUNDLE_CRT)
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
include(InstallRequiredSystemLibraries)
# Always detect CRT paths, but only manually install with WITH_WINDOWS_BUNDLE_CRT.
set(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_SKIP TRUE)
set(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
set(CMAKE_INSTALL_OPENMP_LIBRARIES ${WITH_OPENMP})
include(InstallRequiredSystemLibraries)
if(WITH_WINDOWS_BUNDLE_CRT)
# ucrtbase(d).dll cannot be in the manifest, due to the way windows 10 handles
# redirects for this dll, for details see #88813.
foreach(lib ${CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS})
@ -141,7 +142,9 @@ if(WITH_WINDOWS_BUNDLE_CRT)
install(FILES ${CMAKE_BINARY_DIR}/blender.crt.manifest DESTINATION ./blender.crt)
set(BUNDLECRT "<dependency><dependentAssembly><assemblyIdentity type=\"win32\" name=\"blender.crt\" version=\"1.0.0.0\" /></dependentAssembly></dependency>")
endif()
set(BUNDLECRT "${BUNDLECRT}<dependency><dependentAssembly><assemblyIdentity type=\"win32\" name=\"blender.shared\" version=\"1.0.0.0\" /></dependentAssembly></dependency>")
if(NOT WITH_PYTHON_MODULE)
set(BUNDLECRT "${BUNDLECRT}<dependency><dependentAssembly><assemblyIdentity type=\"win32\" name=\"blender.shared\" version=\"1.0.0.0\" /></dependentAssembly></dependency>")
endif()
configure_file(${CMAKE_SOURCE_DIR}/release/windows/manifest/blender.exe.manifest.in ${CMAKE_CURRENT_BINARY_DIR}/blender.exe.manifest @ONLY)

View File

@ -231,3 +231,22 @@ index 355ee008246..a770bbee60c 100644
}
allocator.deallocate(values, capacity);
capacity = 0;
diff --git a/extern/quadriflow/src/hierarchy.cpp b/extern/quadriflow/src/hierarchy.cpp
index 8cc41da23d0..70a9628320f 100644
--- a/extern/quadriflow/src/hierarchy.cpp
+++ b/extern/quadriflow/src/hierarchy.cpp
@@ -269,7 +269,13 @@ void Hierarchy::DownsampleGraph(const AdjacentMatrix adj, const MatrixXd& V, con
for (auto it = ad.begin(); it != ad.end(); ++it, ++entry_it) {
int k = it->id;
double dp = N.col(i).dot(N.col(k));
- double ratio = A[i] > A[k] ? (A[i] / A[k]) : (A[k] / A[i]);
+ double ratio;
+ if (A[i] > A[k]) {
+ ratio = (A[k] == 0.0f) ? 1.0f : A[i] / A[k];
+ }
+ else {
+ ratio = (A[i] == 0.0f) ? 1.0f : A[k] / A[i];
+ }
*entry_it = Entry(i, k, dp * ratio);
}
}

View File

@ -269,7 +269,13 @@ void Hierarchy::DownsampleGraph(const AdjacentMatrix adj, const MatrixXd& V, con
for (auto it = ad.begin(); it != ad.end(); ++it, ++entry_it) {
int k = it->id;
double dp = N.col(i).dot(N.col(k));
double ratio = A[i] > A[k] ? (A[i] / A[k]) : (A[k] / A[i]);
double ratio;
if (A[i] > A[k]) {
ratio = (A[k] == 0.0f) ? 1.0f : A[i] / A[k];
}
else {
ratio = (A[i] == 0.0f) ? 1.0f : A[k] / A[i];
}
*entry_it = Entry(i, k, dp * ratio);
}
}

View File

@ -619,7 +619,12 @@ ccl_device_forceinline void volume_integrate_heterogeneous(
const Spectrum emission = volume_emission_integrate(
&coeff, closure_flag, transmittance, dt);
accum_emission += result.indirect_throughput * emission;
guiding_record_volume_emission(kg, state, emission);
# if OPENPGL_VERSION_MINOR < 5 // WORKAROUND #104329
if (kernel_data.integrator.max_volume_bounce > 1)
# endif
{
guiding_record_volume_emission(kg, state, emission);
}
}
}
@ -961,9 +966,13 @@ ccl_device_forceinline bool integrate_volume_phase_scatter(
const Spectrum phase_weight = bsdf_eval_sum(&phase_eval) / phase_pdf;
/* Add phase function sampling data to the path segment. */
guiding_record_volume_bounce(
kg, state, sd, phase_weight, phase_pdf, normalize(phase_wo), sampled_roughness);
# if OPENPGL_VERSION_MINOR < 5 // WORKAROUND #104329
if (kernel_data.integrator.max_volume_bounce > 1)
# endif
{
guiding_record_volume_bounce(
kg, state, sd, phase_weight, phase_pdf, normalize(phase_wo), sampled_roughness);
}
/* Update throughput. */
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
const Spectrum throughput_phase = throughput * phase_weight;
@ -1058,7 +1067,11 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
const float3 direct_P = ray->P + result.direct_t * ray->D;
# ifdef __PATH_GUIDING__
# if OPENPGL_VERSION_MINOR < 5 // WORKAROUND #104329
if (kernel_data.integrator.use_guiding && kernel_data.integrator.max_volume_bounce > 1) {
# else
if (kernel_data.integrator.use_guiding) {
# endif
# if PATH_GUIDING_LEVEL >= 1
if (result.direct_sample_method == VOLUME_SAMPLE_DISTANCE) {
/* If the direct scatter event is generated using VOLUME_SAMPLE_DISTANCE the direct event
@ -1131,7 +1144,12 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
# if defined(__PATH_GUIDING__)
# if PATH_GUIDING_LEVEL >= 1
if (!guiding_generated_new_segment) {
guiding_record_volume_segment(kg, state, sd.P, sd.wi);
# if OPENPGL_VERSION_MINOR < 5 // WORKAROUND #104329
if (kernel_data.integrator.max_volume_bounce > 1)
# endif
{
guiding_record_volume_segment(kg, state, sd.P, sd.wi);
}
}
# endif
# if PATH_GUIDING_LEVEL >= 4

View File

@ -23,6 +23,9 @@
#include <cstdio>
#include <cstring>
#include <iostream>
#include <sstream>
#include <sys/stat.h>
/* Set to 0 to allow devices that do not have the required features.
* This allows development on OSX until we really needs these features. */
@ -80,6 +83,21 @@ static const char *vulkan_error_as_string(VkResult result)
}
}
enum class VkLayer : uint8_t { KHRONOS_validation };
static bool vklayer_config_exist(const char *vk_extension_config)
{
const char *ev_val = getenv("VK_LAYER_PATH");
if (ev_val == nullptr) {
return false;
}
std::stringstream filename;
filename << ev_val;
filename << "/" << vk_extension_config;
struct stat buffer;
return (stat(filename.str().c_str(), &buffer) == 0);
}
#define __STR(A) "" #A
#define VK_CHECK(__expression) \
do { \
@ -401,16 +419,38 @@ static bool checkLayerSupport(vector<VkLayerProperties> &layers_available, const
static void enableLayer(vector<VkLayerProperties> &layers_available,
vector<const char *> &layers_enabled,
const char *layer_name,
const bool debug)
const VkLayer layer,
const bool display_warning)
{
if (checkLayerSupport(layers_available, layer_name)) {
layers_enabled.push_back(layer_name);
#define PUSH_VKLAYER(name, name2) \
if (vklayer_config_exist("VkLayer_" #name ".json") && \
checkLayerSupport(layers_available, "VK_LAYER_" #name2)) { \
layers_enabled.push_back("VK_LAYER_" #name2); \
enabled = true; \
} \
else { \
warnings << "VK_LAYER_" #name2; \
}
else if (debug) {
fprintf(
stderr, "Warning: Layer requested, but not supported by the platform. [%s]\n", layer_name);
bool enabled = false;
std::stringstream warnings;
switch (layer) {
case VkLayer::KHRONOS_validation:
PUSH_VKLAYER(khronos_validation, KHRONOS_validation);
};
if (enabled) {
return;
}
if (display_warning) {
fprintf(stderr,
"Warning: Layer requested, but not supported by the platform. [%s] \n",
warnings.str().c_str());
}
#undef PUSH_VKLAYER
}
static bool device_extensions_support(VkPhysicalDevice device, vector<const char *> required_exts)
@ -864,7 +904,7 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
vector<const char *> layers_enabled;
if (m_debug) {
enableLayer(layers_available, layers_enabled, "VK_LAYER_KHRONOS_validation", m_debug);
enableLayer(layers_available, layers_enabled, VkLayer::KHRONOS_validation, m_debug);
}
vector<const char *> extensions_device;

View File

@ -398,7 +398,7 @@ Copyright Contributors to the OpenColorIO Project.
** OpenEXR; version 3.1.5 --
https://github.com/AcademySoftwareFoundation/openexr
Copyright Contributors to the OpenEXR Project. All rights reserved.
** OpenImageIO; version 2.4.6.0 -- http://www.openimageio.org
** OpenImageIO; version 2.4.9.0 -- http://www.openimageio.org
Copyright (c) 2008-present by Contributors to the OpenImageIO project. All
Rights Reserved.
** Pystring; version 1.1.3 -- https://github.com/imageworks/pystring

View File

@ -318,9 +318,9 @@ class NODE_MT_node(Menu):
layout.separator()
layout.operator("node.clipboard_copy", text="Copy")
row = layout.row()
row.operator_context = 'EXEC_DEFAULT'
row.operator("node.clipboard_paste", text="Paste")
layout.operator_context = 'EXEC_DEFAULT'
layout.operator("node.clipboard_paste", text="Paste")
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("node.duplicate_move")
layout.operator("node.duplicate_move_linked")
layout.operator("node.delete")

View File

@ -4,9 +4,9 @@
#include <atomic>
#include "BLI_implicit_sharing_ptr.hh"
#include "BLI_set.hh"
#include "BLI_string_ref.hh"
#include "BLI_user_counter.hh"
namespace blender::bke {
@ -32,10 +32,7 @@ namespace blender::bke {
* because that is not available in C code. If possible, the #AutoAnonymousAttributeID wrapper
* should be used to avoid manual reference counting in C++ code.
*/
class AnonymousAttributeID {
private:
mutable std::atomic<int> users_ = 1;
class AnonymousAttributeID : public ImplicitSharingMixin {
protected:
std::string name_;
@ -49,22 +46,15 @@ class AnonymousAttributeID {
virtual std::string user_name() const;
void user_add() const
private:
void delete_self() override
{
users_.fetch_add(1);
}
void user_remove() const
{
const int new_users = users_.fetch_sub(1) - 1;
if (new_users == 0) {
MEM_delete(this);
}
MEM_delete(this);
}
};
/** Wrapper for #AnonymousAttributeID that avoids manual reference counting. */
using AutoAnonymousAttributeID = UserCounter<const AnonymousAttributeID>;
using AutoAnonymousAttributeID = ImplicitSharingPtr<const AnonymousAttributeID>;
/**
* A set of anonymous attribute names that is passed around in geometry nodes.

View File

@ -12,8 +12,6 @@
#include "BLI_function_ref.hh"
#include "BLI_map.hh"
#include "BLI_math_vector_types.hh"
#include "BLI_user_counter.hh"
#include "BLI_vector_set.hh"
#include "BKE_attribute.hh"
@ -40,18 +38,13 @@ class CurvesEditHints;
class Instances;
} // namespace blender::bke
class GeometryComponent;
/**
* This is the base class for specialized geometry component types. A geometry component handles
* a user count to allow avoiding duplication when it is wrapped with #UserCounter. It also handles
* the attribute API, which generalizes storing and modifying generic information on a geometry.
* This is the base class for specialized geometry component types. A geometry component uses
* implicit sharing to avoid read-only copies. It also integrates with attribute API, which
* generalizes storing and modifying generic information on a geometry.
*/
class GeometryComponent {
class GeometryComponent : public blender::ImplicitSharingMixin {
private:
/* The reference count has two purposes. When it becomes zero, the component is freed. When it is
* larger than one, the component becomes immutable. */
mutable std::atomic<int> users_ = 1;
GeometryComponentType type_;
public:
@ -77,13 +70,12 @@ class GeometryComponent {
virtual bool owns_direct_data() const = 0;
virtual void ensure_owns_direct_data() = 0;
void user_add() const;
void user_remove() const;
bool is_mutable() const;
GeometryComponentType type() const;
virtual bool is_empty() const;
private:
void delete_self() override;
};
template<typename T>
@ -109,7 +101,7 @@ inline constexpr bool is_geometry_component_v = std::is_base_of_v<GeometryCompon
*/
struct GeometrySet {
private:
using GeometryComponentPtr = blender::UserCounter<class GeometryComponent>;
using GeometryComponentPtr = blender::ImplicitSharingPtr<class GeometryComponent>;
/* Indexed by #GeometryComponentType. */
std::array<GeometryComponentPtr, GEO_COMPONENT_TYPE_ENUM_SIZE> components_;

View File

@ -2273,7 +2273,7 @@ bool CustomData_merge(const CustomData *source,
layer->anonymous_id = nullptr;
}
else {
layer->anonymous_id->user_add();
layer->anonymous_id->add_user();
}
}
if (alloctype == CD_ASSIGN) {
@ -2365,7 +2365,7 @@ static void customData_free_layer__internal(CustomDataLayer *layer, const int to
const LayerTypeInfo *typeInfo;
if (layer->anonymous_id != nullptr) {
layer->anonymous_id->user_remove();
layer->anonymous_id->remove_user_and_delete_if_last();
layer->anonymous_id = nullptr;
}
if (!(layer->flag & CD_FLAG_NOFREE) && layer->data) {
@ -2956,7 +2956,7 @@ void *CustomData_add_layer_anonymous(CustomData *data,
return nullptr;
}
anonymous_id->user_add();
anonymous_id->add_user();
layer->anonymous_id = anonymous_id;
return layer->data;
}

View File

@ -84,26 +84,6 @@ std::optional<blender::bke::MutableAttributeAccessor> GeometryComponent::attribu
return std::nullopt;
}
void GeometryComponent::user_add() const
{
users_.fetch_add(1);
}
void GeometryComponent::user_remove() const
{
const int new_users = users_.fetch_sub(1) - 1;
if (new_users == 0) {
delete this;
}
}
bool GeometryComponent::is_mutable() const
{
/* If the item is shared, it is read-only. */
/* The user count can be 0, when this is called from the destructor. */
return users_ <= 1;
}
GeometryComponentType GeometryComponent::type() const
{
return type_;
@ -114,6 +94,11 @@ bool GeometryComponent::is_empty() const
return false;
}
void GeometryComponent::delete_self()
{
delete this;
}
/** \} */
/* -------------------------------------------------------------------- */
@ -198,7 +183,7 @@ void GeometrySet::remove_geometry_during_modify()
void GeometrySet::add(const GeometryComponent &component)
{
BLI_assert(!components_[component.type()]);
component.user_add();
component.add_user();
components_[component.type()] = const_cast<GeometryComponent *>(&component);
}

View File

@ -0,0 +1,109 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bli
*/
#include <atomic>
#include "BLI_compiler_attrs.h"
#include "BLI_utildefines.h"
#include "BLI_utility_mixins.hh"
namespace blender {
/**
* #ImplicitSharingInfo is the core data structure for implicit sharing in Blender. Implicit
* sharing is a technique that avoids copying data when it is not necessary. This results in better
* memory usage and performance. Only read-only data can be shared, because otherwise multiple
* owners might want to change the data in conflicting ways.
*
* To determine whether data is shared, #ImplicitSharingInfo keeps a user count. If the count is 1,
* the data only has a single owner and is therefore mutable. If some code wants to modify data
* that is currently shared, it has to make a copy first.
* This behavior is also called "copy on write".
*
* In addition to containing the reference count, #ImplicitSharingInfo also knows how to destruct
* the referenced data. This is important because the code freeing the data in the end might not
* know how it was allocated (for example, it doesn't know whether an array was allocated using the
* system or guarded allocator).
*
* #ImplicitSharingInfo can be used in two ways:
* - It can be allocated separately from the referenced data. This is used when the shared data is
* e.g. a plain data array.
* - It can be embedded into another struct. For that it's best to use #ImplicitSharingMixin.
*/
class ImplicitSharingInfo : NonCopyable, NonMovable {
private:
mutable std::atomic<int> users_;
public:
ImplicitSharingInfo(const int initial_users) : users_(initial_users)
{
}
virtual ~ImplicitSharingInfo()
{
BLI_assert(this->is_mutable());
}
/** True if there are other const references to the resource, meaning it cannot be modified. */
bool is_shared() const
{
return users_.load(std::memory_order_relaxed) >= 2;
}
/** Whether the resource can be modified without a copy because there is only one owner. */
bool is_mutable() const
{
return !this->is_shared();
}
/** Call when a the data has a new additional owner. */
void add_user() const
{
users_.fetch_add(1, std::memory_order_relaxed);
}
/**
* Call when the data is no longer needed. This might just decrement the user count, or it might
* also delete the data if this was the last user.
*/
void remove_user_and_delete_if_last() const
{
const int old_user_count = users_.fetch_sub(1, std::memory_order_acq_rel);
BLI_assert(old_user_count >= 1);
const bool was_last_user = old_user_count == 1;
if (was_last_user) {
const_cast<ImplicitSharingInfo *>(this)->delete_self_with_data();
}
}
private:
/** Has to free the #ImplicitSharingInfo and the referenced data. */
virtual void delete_self_with_data() = 0;
};
/**
* Makes it easy to embed implicit-sharing behavior into a struct. Structs that derive from this
* class can be used with #ImplicitSharingPtr.
*/
class ImplicitSharingMixin : public ImplicitSharingInfo {
public:
ImplicitSharingMixin() : ImplicitSharingInfo(1)
{
}
private:
void delete_self_with_data() override
{
/* Can't use `delete this` here, because we don't know what allocator was used. */
this->delete_self();
}
virtual void delete_self() = 0;
};
} // namespace blender

View File

@ -6,59 +6,60 @@
* \ingroup bli
*/
#include <atomic>
#include "BLI_implicit_sharing.hh"
namespace blender {
/**
* A simple automatic reference counter. It is similar to std::shared_ptr, but expects that the
* reference count is inside the object.
* #ImplicitSharingPtr is a smart pointer that manages implicit sharing. It's designed to work with
* types that derive from #ImplicitSharingMixin. It is fairly similar to #std::shared_ptr but
* requires the reference count to be embedded in the data.
*/
template<typename T> class UserCounter {
template<typename T> class ImplicitSharingPtr {
private:
T *data_ = nullptr;
public:
UserCounter() = default;
ImplicitSharingPtr() = default;
UserCounter(T *data) : data_(data)
ImplicitSharingPtr(T *data) : data_(data)
{
}
UserCounter(const UserCounter &other) : data_(other.data_)
ImplicitSharingPtr(const ImplicitSharingPtr &other) : data_(other.data_)
{
this->user_add(data_);
this->add_user(data_);
}
UserCounter(UserCounter &&other) : data_(other.data_)
ImplicitSharingPtr(ImplicitSharingPtr &&other) : data_(other.data_)
{
other.data_ = nullptr;
}
~UserCounter()
~ImplicitSharingPtr()
{
this->user_remove(data_);
this->remove_user_and_delete_if_last(data_);
}
UserCounter &operator=(const UserCounter &other)
ImplicitSharingPtr &operator=(const ImplicitSharingPtr &other)
{
if (this == &other) {
return *this;
}
this->user_remove(data_);
this->remove_user_and_delete_if_last(data_);
data_ = other.data_;
this->user_add(data_);
this->add_user(data_);
return *this;
}
UserCounter &operator=(UserCounter &&other)
ImplicitSharingPtr &operator=(ImplicitSharingPtr &&other)
{
if (this == &other) {
return *this;
}
this->user_remove(data_);
this->remove_user_and_delete_if_last(data_);
data_ = other.data_;
other.data_ = nullptr;
return *this;
@ -112,7 +113,7 @@ template<typename T> class UserCounter {
void reset()
{
this->user_remove(data_);
this->remove_user_and_delete_if_last(data_);
data_ = nullptr;
}
@ -126,29 +127,23 @@ template<typename T> class UserCounter {
return get_default_hash(data_);
}
friend bool operator==(const UserCounter &a, const UserCounter &b)
friend bool operator==(const ImplicitSharingPtr &a, const ImplicitSharingPtr &b)
{
return a.data_ == b.data_;
}
friend std::ostream &operator<<(std::ostream &stream, const UserCounter &value)
{
stream << value.data_;
return stream;
}
private:
static void user_add(T *data)
static void add_user(T *data)
{
if (data != nullptr) {
data->user_add();
data->add_user();
}
}
static void user_remove(T *data)
static void remove_user_and_delete_if_last(T *data)
{
if (data != nullptr) {
data->user_remove();
data->remove_user_and_delete_if_last();
}
}
};

View File

@ -646,6 +646,24 @@ class Set {
return !Intersects(a, b);
}
friend bool operator==(const Set &a, const Set &b)
{
if (a.size() != b.size()) {
return false;
}
for (const Key &key : a) {
if (!b.contains(key)) {
return false;
}
}
return true;
}
friend bool operator!=(const Set &a, const Set &b)
{
return !(a == b);
}
private:
BLI_NOINLINE void realloc_and_reinsert(const int64_t min_usable_slots)
{

View File

@ -243,6 +243,8 @@ set(SRC
BLI_hash_tables.hh
BLI_heap.h
BLI_heap_simple.h
BLI_implicit_sharing.hh
BLI_implicit_sharing_ptr.hh
BLI_index_mask.hh
BLI_index_mask_ops.hh
BLI_index_range.hh
@ -355,7 +357,6 @@ set(SRC
BLI_timecode.h
BLI_timeit.hh
BLI_timer.h
BLI_user_counter.hh
BLI_utildefines.h
BLI_utildefines_iter.h
BLI_utildefines_stack.h
@ -493,6 +494,7 @@ if(WITH_GTESTS)
tests/BLI_hash_mm2a_test.cc
tests/BLI_heap_simple_test.cc
tests/BLI_heap_test.cc
tests/BLI_implicit_sharing_test.cc
tests/BLI_index_mask_test.cc
tests/BLI_index_range_test.cc
tests/BLI_inplace_priority_queue_test.cc

View File

@ -0,0 +1,85 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "MEM_guardedalloc.h"
#include "BLI_implicit_sharing_ptr.hh"
#include "testing/testing.h"
namespace blender::tests {
class ImplicitlySharedData : public ImplicitSharingMixin {
public:
ImplicitSharingPtr<ImplicitlySharedData> copy() const
{
return MEM_new<ImplicitlySharedData>(__func__);
}
void delete_self() override
{
MEM_delete(this);
}
};
class SharedDataContainer {
private:
ImplicitSharingPtr<ImplicitlySharedData> data_;
public:
SharedDataContainer() : data_(MEM_new<ImplicitlySharedData>(__func__))
{
}
const ImplicitlySharedData *get_for_read() const
{
return data_.get();
}
ImplicitlySharedData *get_for_write()
{
if (!data_) {
return nullptr;
}
if (data_->is_mutable()) {
return data_.get();
}
data_ = data_->copy();
return data_.get();
}
};
TEST(implicit_sharing, CopyOnWriteAccess)
{
/* Create the initial data. */
SharedDataContainer a;
EXPECT_NE(a.get_for_read(), nullptr);
/* a and b share the same underlying data now. */
SharedDataContainer b = a;
EXPECT_EQ(a.get_for_read(), b.get_for_read());
/* c now shares the data with a and b. */
SharedDataContainer c = a;
EXPECT_EQ(b.get_for_read(), c.get_for_read());
/* Retrieving write access on b should make a copy because the data is shared. */
ImplicitlySharedData *data_b1 = b.get_for_write();
EXPECT_NE(data_b1, nullptr);
EXPECT_EQ(data_b1, b.get_for_read());
EXPECT_NE(data_b1, a.get_for_read());
EXPECT_NE(data_b1, c.get_for_read());
/* Retrieving the same write access again should *not* make another copy. */
ImplicitlySharedData *data_b2 = b.get_for_write();
EXPECT_EQ(data_b1, data_b2);
/* Moving b should also move the data. b then does not have ownership anymore. Since the data in
* b only had one owner, the data is still mutable now that d is the owner. */
SharedDataContainer d = std::move(b);
EXPECT_EQ(b.get_for_read(), nullptr);
EXPECT_EQ(b.get_for_write(), nullptr);
EXPECT_EQ(d.get_for_read(), data_b1);
EXPECT_EQ(d.get_for_write(), data_b1);
}
} // namespace blender::tests

View File

@ -600,6 +600,28 @@ TEST(set, RemoveUniquePtrWithRaw)
EXPECT_TRUE(set.is_empty());
}
TEST(set, Equality)
{
const Set<int> a = {1, 2, 3, 4, 5};
const Set<int> b = {5, 2, 3, 1, 4};
const Set<int> c = {1, 2, 3};
const Set<int> d = {1, 2, 3, 4, 5, 6};
const Set<int> e = {};
const Set<int> f = {10, 11, 12, 13, 14};
EXPECT_EQ(a, a);
EXPECT_EQ(a, b);
EXPECT_EQ(b, a);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
EXPECT_NE(a, f);
EXPECT_NE(c, a);
EXPECT_NE(d, a);
EXPECT_NE(e, a);
EXPECT_NE(f, a);
}
/**
* Set this to 1 to activate the benchmark. It is disabled by default, because it prints a lot.
*/

View File

@ -1141,7 +1141,7 @@ static bool write_file_handle(Main *mainvar,
* asap afterward. */
id_lib_extern(id_iter);
}
else if (ID_FAKE_USERS(id_iter) > 0) {
else if (ID_FAKE_USERS(id_iter) > 0 && id_iter->asset_data == nullptr) {
/* Even though fake user is not directly editable by the user on linked data, it is a
* common 'work-around' to set it in library files on data-blocks that need to be linked
* but typically do not have an actual real user (e.g. texts, etc.).

View File

@ -3,6 +3,7 @@
#include "COM_MaskNode.h"
#include "COM_MaskOperation.h"
#include "COM_ScaleOperation.h"
namespace blender::compositor {
@ -50,7 +51,21 @@ void MaskNode::convert_to_operations(NodeConverter &converter,
}
converter.add_operation(operation);
converter.map_output_socket(output_mask, operation->get_output_socket());
ScaleFixedSizeOperation *scale_operation = new ScaleFixedSizeOperation();
scale_operation->set_variable_size(true);
/* Consider aspect ratio from scene. */
const int new_height = rd->xasp / rd->yasp * operation->get_mask_height();
scale_operation->set_new_height(new_height);
scale_operation->set_new_width(operation->get_mask_width());
scale_operation->set_is_aspect(false);
scale_operation->set_is_crop(false);
scale_operation->set_scale_canvas_max_size({float(data->size_x), float(data->size_y)});
converter.add_operation(scale_operation);
converter.add_link(operation->get_output_socket(0), scale_operation->get_input_socket(0));
converter.map_output_socket(output_mask, scale_operation->get_output_socket(0));
}
} // namespace blender::compositor

View File

@ -63,6 +63,14 @@ class MaskOperation : public MultiThreadedOperation {
mask_height_inv_ = 1.0f / (float)height;
mask_px_ofs_[1] = mask_height_inv_ * 0.5f;
}
int get_mask_width()
{
return mask_width_;
}
int get_mask_height()
{
return mask_height_;
}
void set_framenumber(int frame_number)
{
frame_number_ = frame_number;

View File

@ -4949,28 +4949,31 @@ static void draw_setting_widget(bAnimContext *ac,
bAnimListElem *ale,
const bAnimChannelType *acf,
uiBlock *block,
int xpos,
int ypos,
int setting)
const int xpos,
const int ypos,
const eAnimChannel_Settings setting)
{
short ptrsize, butType;
bool negflag;
bool usetoggle = true;
int flag, icon;
void *ptr;
int icon;
const char *tooltip;
uiBut *but = NULL;
bool enabled;
/* get the flag and the pointer to that flag */
flag = acf->setting_flag(ac, setting, &negflag);
ptr = acf->setting_ptr(ale, setting, &ptrsize);
enabled = ANIM_channel_setting_get(ac, ale, setting);
bool negflag;
const int flag = acf->setting_flag(ac, setting, &negflag);
short ptrsize;
void *ptr = acf->setting_ptr(ale, setting, &ptrsize);
if (!ptr || !flag) {
return;
}
const bool enabled = ANIM_channel_setting_get(ac, ale, setting);
/* get the base icon for the setting */
switch (setting) {
case ACHANNEL_SETTING_VISIBLE: /* visibility eyes */
// icon = ((enabled) ? ICON_HIDE_OFF : ICON_HIDE_ON);
// icon = (enabled ? ICON_HIDE_OFF : ICON_HIDE_ON);
icon = ICON_HIDE_ON;
if (ELEM(ale->type, ANIMTYPE_FCURVE, ANIMTYPE_NLACURVE)) {
@ -4995,13 +4998,13 @@ static void draw_setting_widget(bAnimContext *ac,
break;
case ACHANNEL_SETTING_EXPAND: /* expanded triangle */
// icon = ((enabled) ? ICON_TRIA_DOWN : ICON_TRIA_RIGHT);
// icon = (enabled ? ICON_TRIA_DOWN : ICON_TRIA_RIGHT);
icon = ICON_TRIA_RIGHT;
tooltip = TIP_("Make channels grouped under this channel visible");
break;
case ACHANNEL_SETTING_SOLO: /* NLA Tracks only */
// icon = ((enabled) ? ICON_SOLO_OFF : ICON_SOLO_ON);
// icon = (enabled ? ICON_SOLO_OFF : ICON_SOLO_ON);
icon = ICON_SOLO_OFF;
tooltip = TIP_(
"NLA Track is the only one evaluated in this animation data-block, with all others "
@ -5012,7 +5015,7 @@ static void draw_setting_widget(bAnimContext *ac,
case ACHANNEL_SETTING_PROTECT: /* protected lock */
/* TODO: what about when there's no protect needed? */
// icon = ((enabled) ? ICON_LOCKED : ICON_UNLOCKED);
// icon = (enabled ? ICON_LOCKED : ICON_UNLOCKED);
icon = ICON_UNLOCKED;
if (ale->datatype != ALE_NLASTRIP) {
@ -5024,7 +5027,7 @@ static void draw_setting_widget(bAnimContext *ac,
break;
case ACHANNEL_SETTING_MUTE: /* muted speaker */
icon = ((enabled) ? ICON_CHECKBOX_DEHLT : ICON_CHECKBOX_HLT);
icon = (enabled ? ICON_CHECKBOX_DEHLT : ICON_CHECKBOX_HLT);
usetoggle = false;
if (ELEM(ale->type, ANIMTYPE_FCURVE, ANIMTYPE_NLACURVE)) {
@ -5045,7 +5048,7 @@ static void draw_setting_widget(bAnimContext *ac,
break;
case ACHANNEL_SETTING_PINNED: /* pin icon */
// icon = ((enabled) ? ICON_PINNED : ICON_UNPINNED);
// icon = (enabled ? ICON_PINNED : ICON_UNPINNED);
icon = ICON_UNPINNED;
if (ale->type == ANIMTYPE_NLAACTION) {
@ -5064,6 +5067,7 @@ static void draw_setting_widget(bAnimContext *ac,
}
/* type of button */
short butType;
if (usetoggle) {
if (negflag) {
butType = UI_BTYPE_ICON_TOGGLE_N;
@ -5080,100 +5084,99 @@ static void draw_setting_widget(bAnimContext *ac,
butType = UI_BTYPE_TOGGLE;
}
}
/* draw button for setting */
if (ptr && flag) {
switch (ptrsize) {
case sizeof(int): /* integer pointer for setting */
but = uiDefIconButBitI(block,
butType,
flag,
0,
icon,
xpos,
ypos,
ICON_WIDTH,
ICON_WIDTH,
ptr,
0,
0,
0,
0,
tooltip);
break;
uiBut *but = NULL;
switch (ptrsize) {
case sizeof(int): /* integer pointer for setting */
but = uiDefIconButBitI(block,
butType,
flag,
0,
icon,
xpos,
ypos,
ICON_WIDTH,
ICON_WIDTH,
ptr,
0,
0,
0,
0,
tooltip);
break;
case sizeof(short): /* short pointer for setting */
but = uiDefIconButBitS(block,
butType,
flag,
0,
icon,
xpos,
ypos,
ICON_WIDTH,
ICON_WIDTH,
ptr,
0,
0,
0,
0,
tooltip);
break;
case sizeof(short): /* short pointer for setting */
but = uiDefIconButBitS(block,
butType,
flag,
0,
icon,
xpos,
ypos,
ICON_WIDTH,
ICON_WIDTH,
ptr,
0,
0,
0,
0,
tooltip);
break;
case sizeof(char): /* char pointer for setting */
but = uiDefIconButBitC(block,
butType,
flag,
0,
icon,
xpos,
ypos,
ICON_WIDTH,
ICON_WIDTH,
ptr,
0,
0,
0,
0,
tooltip);
break;
}
case sizeof(char): /* char pointer for setting */
but = uiDefIconButBitC(block,
butType,
flag,
0,
icon,
xpos,
ypos,
ICON_WIDTH,
ICON_WIDTH,
ptr,
0,
0,
0,
0,
tooltip);
break;
}
if (!but) {
return;
}
/* set call to send relevant notifiers and/or perform type-specific updates */
if (but) {
switch (setting) {
/* Settings needing flushing up/down hierarchy. */
case ACHANNEL_SETTING_VISIBLE: /* Graph Editor - 'visibility' toggles */
case ACHANNEL_SETTING_PROTECT: /* General - protection flags */
case ACHANNEL_SETTING_MUTE: /* General - muting flags */
case ACHANNEL_SETTING_PINNED: /* NLA Actions - 'map/nomap' */
case ACHANNEL_SETTING_MOD_OFF:
case ACHANNEL_SETTING_ALWAYS_VISIBLE:
UI_but_funcN_set(but,
achannel_setting_flush_widget_cb,
MEM_dupallocN(ale),
POINTER_FROM_INT(setting));
break;
/* set call to send relevant notifiers and/or perform type-specific updates */
switch (setting) {
/* Settings needing flushing up/down hierarchy. */
case ACHANNEL_SETTING_VISIBLE: /* Graph Editor - 'visibility' toggles */
case ACHANNEL_SETTING_PROTECT: /* General - protection flags */
case ACHANNEL_SETTING_MUTE: /* General - muting flags */
case ACHANNEL_SETTING_PINNED: /* NLA Actions - 'map/nomap' */
case ACHANNEL_SETTING_MOD_OFF:
case ACHANNEL_SETTING_ALWAYS_VISIBLE:
UI_but_funcN_set(
but, achannel_setting_flush_widget_cb, MEM_dupallocN(ale), POINTER_FROM_INT(setting));
break;
/* settings needing special attention */
case ACHANNEL_SETTING_SOLO: /* NLA Tracks - Solo toggle */
UI_but_funcN_set(but, achannel_nlatrack_solo_widget_cb, MEM_dupallocN(ale), NULL);
break;
/* settings needing special attention */
case ACHANNEL_SETTING_SOLO: /* NLA Tracks - Solo toggle */
UI_but_funcN_set(but, achannel_nlatrack_solo_widget_cb, MEM_dupallocN(ale), NULL);
break;
/* no flushing */
case ACHANNEL_SETTING_EXPAND: /* expanding - cannot flush,
* otherwise all would open/close at once */
default:
UI_but_func_set(but, achannel_setting_widget_cb, NULL, NULL);
break;
}
/* no flushing */
case ACHANNEL_SETTING_EXPAND: /* expanding - cannot flush,
* otherwise all would open/close at once */
default:
UI_but_func_set(but, achannel_setting_widget_cb, NULL, NULL);
break;
}
if ((ale->fcurve_owner_id != NULL && !BKE_id_is_editable(ac->bmain, ale->fcurve_owner_id)) ||
(ale->fcurve_owner_id == NULL && ale->id != NULL &&
!BKE_id_is_editable(ac->bmain, ale->id))) {
if (setting != ACHANNEL_SETTING_EXPAND) {
UI_but_disable(but, TIP_("Can't edit this property from a linked data-block"));
}
}
if ((ale->fcurve_owner_id != NULL && !BKE_id_is_editable(ac->bmain, ale->fcurve_owner_id)) ||
(ale->fcurve_owner_id == NULL && ale->id != NULL &&
!BKE_id_is_editable(ac->bmain, ale->id))) {
if (setting != ACHANNEL_SETTING_EXPAND) {
UI_but_disable(but, TIP_("Can't edit this property from a linked data-block"));
}
}
}

View File

@ -127,6 +127,7 @@ class AbstractViewItem {
* If this wasn't done, the behavior of items is undefined.
*/
AbstractView *view_ = nullptr;
bool is_interactive_ = true;
bool is_active_ = false;
bool is_renaming_ = false;
@ -171,6 +172,11 @@ class AbstractViewItem {
/** Get the view this item is registered for using #AbstractView::register_item(). */
AbstractView &get_view() const;
/** Disable the interacting with this item, meaning the buttons drawn will be disabled and there
* will be no mouse hover feedback for the view row. */
void disable_interaction();
bool is_interactive() const;
/**
* Requires the view to have completed reconstruction, see #is_reconstructed(). Otherwise we
* can't be sure about the item state.

View File

@ -3254,6 +3254,7 @@ void UI_interface_tag_script_reload(void);
/* Support click-drag motion which presses the button and closes a popover (like a menu). */
#define USE_UI_POPOVER_ONCE
bool UI_view_item_is_interactive(const uiViewItemHandle *item_handle);
bool UI_view_item_is_active(const uiViewItemHandle *item_handle);
bool UI_view_item_matches(const uiViewItemHandle *a_handle, const uiViewItemHandle *b_handle);
/**

View File

@ -108,6 +108,8 @@ using TreeViewOrItem = TreeViewItemContainer;
* \{ */
class AbstractTreeView : public AbstractView, public TreeViewItemContainer {
int min_rows_ = 0;
friend class AbstractTreeViewItem;
friend class TreeViewBuilder;
@ -116,6 +118,12 @@ class AbstractTreeView : public AbstractView, public TreeViewItemContainer {
void foreach_item(ItemIterFn iter_fn, IterOptions options = IterOptions::None) const;
/** Visual feature: Define a number of item rows the view will always show at minimum. If there
* are fewer items, empty dummy items will be added. These contribute to the view bounds, so the
* drop target of the view includes them, but they are not interactive (e.g. no mouse-hover
* highlight). */
void set_min_rows(int min_rows);
protected:
virtual void build_tree() = 0;
@ -308,6 +316,9 @@ class BasicTreeViewItem : public AbstractTreeViewItem {
class TreeViewBuilder {
public:
static void build_tree_view(AbstractTreeView &tree_view, uiLayout &layout);
private:
static void ensure_min_rows_items(AbstractTreeView &tree_view);
};
/** \} */

View File

@ -90,6 +90,10 @@ bool ui_but_is_interactive_ex(const uiBut *but, const bool labeledit, const bool
if ((but->type == UI_BTYPE_LISTROW) && labeledit) {
return false;
}
if (but->type == UI_BTYPE_VIEW_ITEM) {
const uiButViewItem *but_item = static_cast<const uiButViewItem *>(but);
return UI_view_item_is_interactive(but_item->view_item);
}
return true;
}

View File

@ -208,6 +208,16 @@ AbstractView &AbstractViewItem::get_view() const
return *view_;
}
void AbstractViewItem::disable_interaction()
{
is_interactive_ = false;
}
bool AbstractViewItem::is_interactive() const
{
return is_interactive_;
}
bool AbstractViewItem::is_active() const
{
BLI_assert_msg(get_view().is_reconstructed(),
@ -282,6 +292,12 @@ class ViewItemAPIWrapper {
using namespace blender::ui;
bool UI_view_item_is_interactive(const uiViewItemHandle *item_handle)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_handle);
return item.is_interactive();
}
bool UI_view_item_is_active(const uiViewItemHandle *item_handle)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_handle);

View File

@ -70,6 +70,11 @@ void AbstractTreeView::foreach_item(ItemIterFn iter_fn, IterOptions options) con
foreach_item_recursive(iter_fn, options);
}
void AbstractTreeView::set_min_rows(int min_rows)
{
min_rows_ = min_rows;
}
void AbstractTreeView::update_children_from_old(const AbstractView &old_view)
{
const AbstractTreeView &old_tree_view = dynamic_cast<const AbstractTreeView &>(old_view);
@ -455,6 +460,10 @@ void TreeViewLayoutBuilder::build_row(AbstractTreeViewItem &item) const
uiLayout *overlap = uiLayoutOverlap(&prev_layout);
if (!item.is_interactive_) {
uiLayoutSetActive(overlap, false);
}
uiLayoutRow(overlap, false);
/* Every item gets one! Other buttons can be overlapped on top. */
item.add_treerow_button(block_);
@ -490,6 +499,23 @@ uiLayout &TreeViewLayoutBuilder::current_layout() const
/* ---------------------------------------------------------------------- */
void TreeViewBuilder::ensure_min_rows_items(AbstractTreeView &tree_view)
{
int tot_visible_items = 0;
tree_view.foreach_item(
[&tot_visible_items](AbstractTreeViewItem & /*item*/) { tot_visible_items++; },
AbstractTreeView::IterOptions::SkipCollapsed);
if (tot_visible_items >= tree_view.min_rows_) {
return;
}
for (int i = 0; i < (tree_view.min_rows_ - tot_visible_items); i++) {
BasicTreeViewItem &new_item = tree_view.add_tree_item<BasicTreeViewItem>("");
new_item.disable_interaction();
}
}
void TreeViewBuilder::build_tree_view(AbstractTreeView &tree_view, uiLayout &layout)
{
uiBlock &block = *uiLayoutGetBlock(&layout);
@ -498,6 +524,8 @@ void TreeViewBuilder::build_tree_view(AbstractTreeView &tree_view, uiLayout &lay
tree_view.update_from_old(block);
tree_view.change_state_delayed();
ensure_min_rows_items(tree_view);
/* Ensure the given layout is actually active. */
UI_block_layout_set_current(&block, &layout);

View File

@ -2,7 +2,6 @@
#include "BLI_array_utils.hh"
#include "BLI_index_mask.hh"
#include "BLI_user_counter.hh"
#include "BKE_attribute.hh"
#include "BKE_attribute_math.hh"
@ -69,7 +68,7 @@ static void add_new_edges(Mesh &mesh,
/* Store a copy of the IDs locally since we will remove the existing attributes which
* can also free the names, since the API does not provide pointer stability. */
Vector<std::string> named_ids;
Vector<UserCounter<const bke::AnonymousAttributeID>> anonymous_ids;
Vector<bke::AutoAnonymousAttributeID> anonymous_ids;
for (const bke::AttributeIDRef &id : attributes.all_ids()) {
if (attributes.lookup_meta_data(id)->domain != ATTR_DOMAIN_EDGE) {
continue;
@ -82,14 +81,14 @@ static void add_new_edges(Mesh &mesh,
}
else {
anonymous_ids.append(&id.anonymous_id());
id.anonymous_id().user_add();
id.anonymous_id().add_user();
}
}
Vector<bke::AttributeIDRef> local_edge_ids;
for (const StringRef name : named_ids) {
local_edge_ids.append(name);
}
for (const UserCounter<const bke::AnonymousAttributeID> &id : anonymous_ids) {
for (const bke::AutoAnonymousAttributeID &id : anonymous_ids) {
local_edge_ids.append(*id);
}

View File

@ -229,8 +229,8 @@ struct GatherTasks {
/* Volumes only have very simple support currently. Only the first found volume is put into the
* output. */
UserCounter<const VolumeComponent> first_volume;
UserCounter<const GeometryComponentEditData> first_edit_data;
ImplicitSharingPtr<const VolumeComponent> first_volume;
ImplicitSharingPtr<const GeometryComponentEditData> first_edit_data;
};
/** Current offsets while during the gather operation. */
@ -611,7 +611,7 @@ static void gather_realize_tasks_recursive(GatherTasksInfo &gather_info,
case GEO_COMPONENT_TYPE_VOLUME: {
const VolumeComponent *volume_component = static_cast<const VolumeComponent *>(component);
if (!gather_info.r_tasks.first_volume) {
volume_component->user_add();
volume_component->add_user();
gather_info.r_tasks.first_volume = volume_component;
}
break;
@ -620,7 +620,7 @@ static void gather_realize_tasks_recursive(GatherTasksInfo &gather_info,
const GeometryComponentEditData *edit_component =
static_cast<const GeometryComponentEditData *>(component);
if (!gather_info.r_tasks.first_edit_data) {
edit_component->user_add();
edit_component->add_user();
gather_info.r_tasks.first_edit_data = edit_component;
}
break;

View File

@ -832,6 +832,7 @@ if(WITH_GTESTS)
set(TEST_SRC
tests/gpu_testing.cc
tests/framebuffer_test.cc
tests/index_buffer_test.cc
tests/push_constants_test.cc
tests/shader_test.cc

View File

@ -0,0 +1,200 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "testing/testing.h"
#include "GPU_framebuffer.h"
#include "gpu_testing.hh"
#include "BLI_math_vector.hh"
namespace blender::gpu::tests {
static void test_framebuffer_clear_color_single_attachment()
{
const int2 size(10, 10);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d(
__func__, UNPACK2(size), 1, GPU_RGBA32F, usage, nullptr);
GPUFrameBuffer *framebuffer = GPU_framebuffer_create(__func__);
GPU_framebuffer_ensure_config(&framebuffer,
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(texture)});
GPU_framebuffer_bind(framebuffer);
const float4 clear_color(0.1f, 0.2f, 0.5f, 1.0f);
GPU_framebuffer_clear_color(framebuffer, clear_color);
GPU_finish();
float4 *read_data = static_cast<float4 *>(GPU_texture_read(texture, GPU_DATA_FLOAT, 0));
for (float4 pixel_color : Span<float4>(read_data, size.x * size.y)) {
EXPECT_EQ(pixel_color, clear_color);
}
MEM_freeN(read_data);
GPU_framebuffer_free(framebuffer);
GPU_texture_free(texture);
}
GPU_TEST(framebuffer_clear_color_single_attachment);
static void test_framebuffer_clear_color_multiple_attachments()
{
const int2 size(10, 10);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture1 = GPU_texture_create_2d(
__func__, UNPACK2(size), 1, GPU_RGBA32F, usage, nullptr);
GPUTexture *texture2 = GPU_texture_create_2d(
__func__, UNPACK2(size), 1, GPU_RGBA32UI, usage, nullptr);
GPUFrameBuffer *framebuffer = GPU_framebuffer_create(__func__);
GPU_framebuffer_ensure_config(
&framebuffer,
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(texture1), GPU_ATTACHMENT_TEXTURE(texture2)});
GPU_framebuffer_bind(framebuffer);
const float4 clear_color(0.1f, 0.2f, 0.5f, 1.0f);
GPU_framebuffer_clear_color(framebuffer, clear_color);
GPU_finish();
float4 *read_data1 = static_cast<float4 *>(GPU_texture_read(texture1, GPU_DATA_FLOAT, 0));
for (float4 pixel_color : Span<float4>(read_data1, size.x * size.y)) {
EXPECT_EQ(pixel_color, clear_color);
}
MEM_freeN(read_data1);
uint4 *read_data2 = static_cast<uint4 *>(GPU_texture_read(texture2, GPU_DATA_UINT, 0));
uint4 clear_color_uint(1036831949, 1045220557, 1056964608, 1065353216);
for (uint4 pixel_color : Span<uint4>(read_data2, size.x * size.y)) {
EXPECT_EQ(pixel_color, clear_color_uint);
}
MEM_freeN(read_data2);
GPU_framebuffer_free(framebuffer);
GPU_texture_free(texture1);
GPU_texture_free(texture2);
}
GPU_TEST(framebuffer_clear_color_multiple_attachments);
static void test_framebuffer_clear_multiple_color_multiple_attachments()
{
const int2 size(10, 10);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture1 = GPU_texture_create_2d(
__func__, UNPACK2(size), 1, GPU_RGBA32F, usage, nullptr);
GPUTexture *texture2 = GPU_texture_create_2d(
__func__, UNPACK2(size), 1, GPU_RGBA32F, usage, nullptr);
GPUFrameBuffer *framebuffer = GPU_framebuffer_create(__func__);
GPU_framebuffer_ensure_config(
&framebuffer,
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(texture1), GPU_ATTACHMENT_TEXTURE(texture2)});
GPU_framebuffer_bind(framebuffer);
const float4 clear_color[2] = {float4(0.1f, 0.2f, 0.5f, 1.0f), float4(0.5f, 0.2f, 0.1f, 1.0f)};
GPU_framebuffer_multi_clear(
framebuffer, static_cast<const float(*)[4]>(static_cast<const void *>(clear_color)));
GPU_finish();
float4 *read_data1 = static_cast<float4 *>(GPU_texture_read(texture1, GPU_DATA_FLOAT, 0));
for (float4 pixel_color : Span<float4>(read_data1, size.x * size.y)) {
EXPECT_EQ(pixel_color, clear_color[0]);
}
MEM_freeN(read_data1);
float4 *read_data2 = static_cast<float4 *>(GPU_texture_read(texture2, GPU_DATA_FLOAT, 0));
for (float4 pixel_color : Span<float4>(read_data1, size.x * size.y)) {
EXPECT_EQ(pixel_color, clear_color[1]);
}
MEM_freeN(read_data2);
GPU_framebuffer_free(framebuffer);
GPU_texture_free(texture1);
GPU_texture_free(texture2);
}
GPU_TEST(framebuffer_clear_multiple_color_multiple_attachments);
static void test_framebuffer_clear_depth()
{
const int2 size(10, 10);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d(
__func__, UNPACK2(size), 1, GPU_DEPTH_COMPONENT32F, usage, nullptr);
GPUFrameBuffer *framebuffer = GPU_framebuffer_create(__func__);
GPU_framebuffer_ensure_config(&framebuffer, {GPU_ATTACHMENT_TEXTURE(texture)});
GPU_framebuffer_bind(framebuffer);
const float clear_depth = 0.5f;
GPU_framebuffer_clear_depth(framebuffer, clear_depth);
GPU_finish();
float *read_data = static_cast<float *>(GPU_texture_read(texture, GPU_DATA_FLOAT, 0));
for (float pixel_depth : Span<float>(read_data, size.x * size.y)) {
EXPECT_EQ(pixel_depth, clear_depth);
}
MEM_freeN(read_data);
GPU_framebuffer_free(framebuffer);
GPU_texture_free(texture);
}
GPU_TEST(framebuffer_clear_depth);
static void test_framebuffer_scissor_test()
{
const int2 size(128, 128);
const int bar_size = 16;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d(
__func__, UNPACK2(size), 1, GPU_RGBA32F, usage, nullptr);
GPUFrameBuffer *framebuffer = GPU_framebuffer_create(__func__);
GPU_framebuffer_ensure_config(&framebuffer,
{GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(texture)});
GPU_framebuffer_bind(framebuffer);
const float4 color1(0.0f);
const float4 color2(0.5f);
const float4 color3(1.0f);
GPU_framebuffer_clear_color(framebuffer, color1);
GPU_scissor_test(true);
for (int x = 0; x < size.x; x += 2 * bar_size) {
GPU_scissor(x, 0, bar_size, size.y);
GPU_framebuffer_clear_color(framebuffer, color2);
}
for (int y = 0; y < size.y; y += 2 * bar_size) {
GPU_scissor(0, y, size.x, bar_size);
GPU_framebuffer_clear_color(framebuffer, color3);
}
GPU_scissor_test(false);
GPU_finish();
float4 *read_data = static_cast<float4 *>(GPU_texture_read(texture, GPU_DATA_FLOAT, 0));
int offset = 0;
for (float4 pixel_color : Span<float4>(read_data, size.x * size.y)) {
int x = offset % size.x;
int y = offset / size.x;
int bar_x = x / bar_size;
int bar_y = y / bar_size;
if (bar_y % 2 == 0) {
EXPECT_EQ(pixel_color, color3);
}
else {
if (bar_x % 2 == 0) {
EXPECT_EQ(pixel_color, color2);
}
else {
EXPECT_EQ(pixel_color, color1);
}
}
offset++;
}
MEM_freeN(read_data);
GPU_framebuffer_free(framebuffer);
GPU_texture_free(texture);
}
GPU_TEST(framebuffer_scissor_test);
} // namespace blender::gpu::tests

View File

@ -8,6 +8,7 @@
#include "vk_command_buffer.hh"
#include "vk_buffer.hh"
#include "vk_context.hh"
#include "vk_framebuffer.hh"
#include "vk_memory.hh"
#include "vk_pipeline.hh"
#include "vk_texture.hh"
@ -73,6 +74,21 @@ void VKCommandBuffer::bind(const VKDescriptorSet &descriptor_set,
vk_command_buffer_, bind_point, vk_pipeline_layout, 0, 1, &vk_descriptor_set, 0, 0);
}
void VKCommandBuffer::begin_render_pass(const VKFrameBuffer &framebuffer)
{
VkRenderPassBeginInfo render_pass_begin_info = {};
render_pass_begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_pass_begin_info.renderPass = framebuffer.vk_render_pass_get();
render_pass_begin_info.framebuffer = framebuffer.vk_framebuffer_get();
render_pass_begin_info.renderArea = framebuffer.vk_render_area_get();
vkCmdBeginRenderPass(vk_command_buffer_, &render_pass_begin_info, VK_SUBPASS_CONTENTS_INLINE);
}
void VKCommandBuffer::end_render_pass(const VKFrameBuffer & /*framebuffer*/)
{
vkCmdEndRenderPass(vk_command_buffer_);
}
void VKCommandBuffer::push_constants(const VKPushConstants &push_constants,
const VkPipelineLayout vk_pipeline_layout,
const VkShaderStageFlags vk_shader_stages)
@ -98,7 +114,7 @@ void VKCommandBuffer::copy(VKBuffer &dst_buffer,
{
vkCmdCopyImageToBuffer(vk_command_buffer_,
src_texture.vk_image_handle(),
VK_IMAGE_LAYOUT_GENERAL,
src_texture.current_layout_get(),
dst_buffer.vk_handle(),
regions.size(),
regions.data());
@ -110,7 +126,7 @@ void VKCommandBuffer::copy(VKTexture &dst_texture,
vkCmdCopyBufferToImage(vk_command_buffer_,
src_buffer.vk_handle(),
dst_texture.vk_image_handle(),
VK_IMAGE_LAYOUT_GENERAL,
dst_texture.current_layout_get(),
regions.size(),
regions.data());
}
@ -128,6 +144,12 @@ void VKCommandBuffer::clear(VkImage vk_image,
ranges.data());
}
void VKCommandBuffer::clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas)
{
vkCmdClearAttachments(
vk_command_buffer_, attachments.size(), attachments.data(), areas.size(), areas.data());
}
void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages)
{

View File

@ -14,10 +14,11 @@
namespace blender::gpu {
class VKBuffer;
class VKTexture;
class VKPushConstants;
class VKPipeline;
class VKDescriptorSet;
class VKFrameBuffer;
class VKPipeline;
class VKPushConstants;
class VKTexture;
/** Command buffer to keep track of the life-time of a command buffer. */
class VKCommandBuffer : NonCopyable, NonMovable {
@ -39,6 +40,9 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point);
void begin_render_pass(const VKFrameBuffer &framebuffer);
void end_render_pass(const VKFrameBuffer &framebuffer);
/**
* Add a push constant command to the command buffer.
*
@ -61,6 +65,11 @@ class VKCommandBuffer : NonCopyable, NonMovable {
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges);
/**
* Clear attachments of the active framebuffer.
*/
void clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas);
void fill(VKBuffer &buffer, uint32_t data);
/**

View File

@ -307,4 +307,46 @@ VkComponentMapping to_vk_component_mapping(const eGPUTextureFormat /*format*/)
return component_mapping;
}
template<typename T> void copy_color(T dst[4], const T *src)
{
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
VkClearColorValue to_vk_clear_color_value(const eGPUDataFormat format, const void *data)
{
VkClearColorValue result = {0.0f};
switch (format) {
case GPU_DATA_FLOAT: {
const float *float_data = static_cast<const float *>(data);
copy_color<float>(result.float32, float_data);
break;
}
case GPU_DATA_INT: {
const int32_t *int_data = static_cast<const int32_t *>(data);
copy_color<int32_t>(result.int32, int_data);
break;
}
case GPU_DATA_UINT: {
const uint32_t *uint_data = static_cast<const uint32_t *>(data);
copy_color<uint32_t>(result.uint32, uint_data);
break;
}
case GPU_DATA_HALF_FLOAT:
case GPU_DATA_UBYTE:
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
case GPU_DATA_2_10_10_10_REV: {
BLI_assert_unreachable();
break;
}
}
return result;
}
} // namespace blender::gpu

View File

@ -24,5 +24,6 @@ VkFormat to_vk_format(const eGPUTextureFormat format);
VkComponentMapping to_vk_component_mapping(const eGPUTextureFormat format);
VkImageViewType to_vk_image_view_type(const eGPUTextureType type);
VkImageType to_vk_image_type(const eGPUTextureType type);
VkClearColorValue to_vk_clear_color_value(const eGPUDataFormat format, const void *data);
} // namespace blender::gpu

View File

@ -51,7 +51,7 @@ VKContext::VKContext(void *ghost_window, void *ghost_context)
VKBackend::capabilities_init(*this);
/* For off-screen contexts. Default frame-buffer is empty. */
active_fb = back_left = new VKFrameBuffer("back_left");
back_left = new VKFrameBuffer("back_left");
}
VKContext::~VKContext()
@ -71,19 +71,24 @@ void VKContext::activate()
{
if (ghost_window_) {
VkImage image; /* TODO will be used for reading later... */
VkFramebuffer framebuffer;
VkFramebuffer vk_framebuffer;
VkRenderPass render_pass;
VkExtent2D extent;
uint32_t fb_id;
GHOST_GetVulkanBackbuffer(
(GHOST_WindowHandle)ghost_window_, &image, &framebuffer, &render_pass, &extent, &fb_id);
(GHOST_WindowHandle)ghost_window_, &image, &vk_framebuffer, &render_pass, &extent, &fb_id);
/* Recreate the gpu::VKFrameBuffer wrapper after every swap. */
if (has_active_framebuffer()) {
deactivate_framebuffer();
}
delete back_left;
back_left = new VKFrameBuffer("back_left", framebuffer, render_pass, extent);
active_fb = back_left;
VKFrameBuffer *framebuffer = new VKFrameBuffer(
"back_left", vk_framebuffer, render_pass, extent);
back_left = framebuffer;
framebuffer->bind(false);
}
}
@ -113,6 +118,9 @@ void VKContext::flush()
void VKContext::finish()
{
if (has_active_framebuffer()) {
deactivate_framebuffer();
}
command_buffer_.submit();
}
@ -120,4 +128,28 @@ void VKContext::memory_statistics_get(int * /*total_mem*/, int * /*free_mem*/)
{
}
void VKContext::activate_framebuffer(VKFrameBuffer &framebuffer)
{
if (has_active_framebuffer()) {
deactivate_framebuffer();
}
BLI_assert(active_fb == nullptr);
active_fb = &framebuffer;
command_buffer_.begin_render_pass(framebuffer);
}
bool VKContext::has_active_framebuffer() const
{
return active_fb != nullptr;
}
void VKContext::deactivate_framebuffer()
{
BLI_assert(active_fb != nullptr);
VKFrameBuffer *framebuffer = unwrap(active_fb);
command_buffer_.end_render_pass(*framebuffer);
active_fb = nullptr;
}
} // namespace blender::gpu

View File

@ -13,6 +13,7 @@
#include "vk_descriptor_pools.hh"
namespace blender::gpu {
class VKFrameBuffer;
class VKContext : public Context {
private:
@ -55,6 +56,9 @@ class VKContext : public Context {
bool debug_capture_scope_begin(void *scope) override;
void debug_capture_scope_end(void *scope) override;
void activate_framebuffer(VKFrameBuffer &framebuffer);
void deactivate_framebuffer();
static VKContext *get(void)
{
return static_cast<VKContext *>(Context::get());
@ -102,6 +106,8 @@ class VKContext : public Context {
private:
void init_physical_device_limits();
bool has_active_framebuffer() const;
};
} // namespace blender::gpu

View File

@ -6,6 +6,8 @@
*/
#include "vk_framebuffer.hh"
#include "vk_memory.hh"
#include "vk_texture.hh"
namespace blender::gpu {
@ -20,7 +22,7 @@ VKFrameBuffer::VKFrameBuffer(const char *name) : FrameBuffer(name)
VKFrameBuffer::VKFrameBuffer(const char *name,
VkFramebuffer vk_framebuffer,
VkRenderPass /*vk_render_pass*/,
VkRenderPass vk_render_pass,
VkExtent2D vk_extent)
: FrameBuffer(name)
{
@ -30,6 +32,7 @@ VKFrameBuffer::VKFrameBuffer(const char *name,
width_ = vk_extent.width;
height_ = vk_extent.height;
vk_framebuffer_ = vk_framebuffer;
vk_render_pass_ = vk_render_pass;
viewport_[0] = scissor_[0] = 0;
viewport_[1] = scissor_[1] = 0;
@ -39,8 +42,8 @@ VKFrameBuffer::VKFrameBuffer(const char *name,
VKFrameBuffer::~VKFrameBuffer()
{
if (!immutable_ && vk_framebuffer_ != VK_NULL_HANDLE) {
vkDestroyFramebuffer(vk_device_, vk_framebuffer_, NULL);
if (!immutable_) {
render_pass_free();
}
}
@ -48,6 +51,32 @@ VKFrameBuffer::~VKFrameBuffer()
void VKFrameBuffer::bind(bool /*enabled_srgb*/)
{
update_attachments();
VKContext &context = *VKContext::get();
context.activate_framebuffer(*this);
}
VkRect2D VKFrameBuffer::vk_render_area_get() const
{
VkRect2D render_area = {};
if (scissor_test_get()) {
int scissor_rect[4];
scissor_get(scissor_rect);
render_area.offset.x = scissor_rect[0];
render_area.offset.y = scissor_rect[1];
render_area.extent.width = scissor_rect[2];
render_area.extent.height = scissor_rect[3];
}
else {
render_area.offset.x = 0;
render_area.offset.y = 0;
render_area.extent.width = width_;
render_area.extent.height = height_;
}
return render_area;
}
bool VKFrameBuffer::check(char /*err_out*/[256])
@ -55,29 +84,110 @@ bool VKFrameBuffer::check(char /*err_out*/[256])
return false;
}
void VKFrameBuffer::clear(eGPUFrameBufferBits /*buffers*/,
const float /*clear_col*/[4],
float /*clear_depth*/,
uint /*clear_stencil*/)
void VKFrameBuffer::build_clear_attachments_depth_stencil(
const eGPUFrameBufferBits buffers,
float clear_depth,
uint32_t clear_stencil,
Vector<VkClearAttachment> &r_attachments) const
{
VkClearAttachment clear_attachment = {};
clear_attachment.aspectMask = (buffers & GPU_DEPTH_BIT ? VK_IMAGE_ASPECT_DEPTH_BIT : 0) |
(buffers & GPU_STENCIL_BIT ? VK_IMAGE_ASPECT_STENCIL_BIT : 0);
clear_attachment.clearValue.depthStencil.depth = clear_depth;
clear_attachment.clearValue.depthStencil.stencil = clear_stencil;
r_attachments.append(clear_attachment);
}
void VKFrameBuffer::clear_multi(const float (*/*clear_col*/)[4])
void VKFrameBuffer::build_clear_attachments_color(const float (*clear_colors)[4],
const bool multi_clear_colors,
Vector<VkClearAttachment> &r_attachments) const
{
int color_index = 0;
for (int color_slot = 0; color_slot < GPU_FB_MAX_COLOR_ATTACHMENT; color_slot++) {
const GPUAttachment &attachment = attachments_[GPU_FB_COLOR_ATTACHMENT0 + color_slot];
if (attachment.tex == nullptr) {
continue;
}
VkClearAttachment clear_attachment = {};
clear_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
clear_attachment.colorAttachment = color_slot;
eGPUDataFormat data_format = to_data_format(GPU_texture_format(attachment.tex));
clear_attachment.clearValue.color = to_vk_clear_color_value(data_format,
&clear_colors[color_index]);
r_attachments.append(clear_attachment);
color_index += multi_clear_colors ? 1 : 0;
}
}
/* -------------------------------------------------------------------- */
/** \name Clear
* \{ */
void VKFrameBuffer::clear(const Vector<VkClearAttachment> &attachments) const
{
VkClearRect clear_rect = {};
clear_rect.rect = vk_render_area_get();
clear_rect.baseArrayLayer = 0;
clear_rect.layerCount = 1;
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.clear(attachments, Span<VkClearRect>(&clear_rect, 1));
}
void VKFrameBuffer::clear(const eGPUFrameBufferBits buffers,
const float clear_color[4],
float clear_depth,
uint clear_stencil)
{
Vector<VkClearAttachment> attachments;
if (buffers & (GPU_DEPTH_BIT | GPU_STENCIL_BIT)) {
build_clear_attachments_depth_stencil(buffers, clear_depth, clear_stencil, attachments);
}
if (buffers & GPU_COLOR_BIT) {
float clear_color_single[4];
copy_v4_v4(clear_color_single, clear_color);
build_clear_attachments_color(&clear_color_single, false, attachments);
}
clear(attachments);
}
void VKFrameBuffer::clear_multi(const float (*clear_color)[4])
{
Vector<VkClearAttachment> attachments;
build_clear_attachments_color(clear_color, true, attachments);
clear(attachments);
}
void VKFrameBuffer::clear_attachment(GPUAttachmentType /*type*/,
eGPUDataFormat /*data_format*/,
const void * /*clear_value*/)
{
/* Clearing of a single attachment was added to implement `clear_multi` in OpenGL. As
* `clear_multi` is supported in Vulkan it isn't needed to implement this method.
*/
BLI_assert_unreachable();
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Load/Store operations
* \{ */
void VKFrameBuffer::attachment_set_loadstore_op(GPUAttachmentType /*type*/,
eGPULoadOp /*load_action*/,
eGPUStoreOp /*store_action*/)
{
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Read back
* \{ */
void VKFrameBuffer::read(eGPUFrameBufferBits /*planes*/,
eGPUDataFormat /*format*/,
const int /*area*/[4],
@ -87,6 +197,12 @@ void VKFrameBuffer::read(eGPUFrameBufferBits /*planes*/,
{
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Blit operations
* \{ */
void VKFrameBuffer::blit_to(eGPUFrameBufferBits /*planes*/,
int /*src_slot*/,
FrameBuffer * /*dst*/,
@ -96,4 +212,169 @@ void VKFrameBuffer::blit_to(eGPUFrameBufferBits /*planes*/,
{
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Update attachments
* \{ */
void VKFrameBuffer::update_attachments()
{
if (immutable_) {
return;
}
if (!dirty_attachments_) {
return;
}
render_pass_free();
render_pass_create();
dirty_attachments_ = false;
}
void VKFrameBuffer::render_pass_create()
{
BLI_assert(!immutable_);
BLI_assert(vk_render_pass_ == VK_NULL_HANDLE);
BLI_assert(vk_framebuffer_ == VK_NULL_HANDLE);
VK_ALLOCATION_CALLBACKS
/* Track first attachment for size.*/
GPUAttachmentType first_attachment = GPU_FB_MAX_ATTACHMENT;
std::array<VkAttachmentDescription, GPU_FB_MAX_ATTACHMENT> attachment_descriptions;
std::array<VkImageView, GPU_FB_MAX_ATTACHMENT> image_views;
std::array<VkAttachmentReference, GPU_FB_MAX_ATTACHMENT> attachment_references;
/*Vector<VkAttachmentReference> color_attachments;
VkAttachmentReference depth_attachment = {};
*/
bool has_depth_attachment = false;
bool found_attachment = false;
int depth_location = -1;
for (int type = GPU_FB_MAX_ATTACHMENT - 1; type >= 0; type--) {
GPUAttachment &attachment = attachments_[type];
if (attachment.tex == nullptr && !found_attachment) {
/* Move the depth texture to the next binding point after all color textures. The binding
* location of the color textures should be kept in sync between ShaderCreateInfos and the
* framebuffer attachments. The depth buffer should be the last slot. */
depth_location = max_ii(type - GPU_FB_COLOR_ATTACHMENT0, 0);
continue;
}
found_attachment |= attachment.tex != nullptr;
/* Keep the first attachment to the first color attachment, or to the depth buffer when there
* is no color attachment. */
if (attachment.tex != nullptr &&
(first_attachment == GPU_FB_MAX_ATTACHMENT || type >= GPU_FB_COLOR_ATTACHMENT0)) {
first_attachment = static_cast<GPUAttachmentType>(type);
}
int attachment_location = type >= GPU_FB_COLOR_ATTACHMENT0 ? type - GPU_FB_COLOR_ATTACHMENT0 :
depth_location;
if (attachment.tex) {
/* Ensure texture is allocated to ensure the image view.*/
VKTexture &texture = *static_cast<VKTexture *>(unwrap(attachment.tex));
texture.ensure_allocated();
image_views[attachment_location] = texture.vk_image_view_handle();
VkAttachmentDescription &attachment_description =
attachment_descriptions[attachment_location];
attachment_description.flags = 0;
attachment_description.format = to_vk_format(texture.format_get());
attachment_description.samples = VK_SAMPLE_COUNT_1_BIT;
attachment_description.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachment_description.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attachment_description.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachment_description.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachment_description.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
attachment_description.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
/* Create the attachment reference. */
const bool is_depth_attachment = ELEM(
type, GPU_FB_DEPTH_ATTACHMENT, GPU_FB_DEPTH_STENCIL_ATTACHMENT);
BLI_assert_msg(!is_depth_attachment || !has_depth_attachment,
"There can only be one depth/stencil attachment.");
has_depth_attachment |= is_depth_attachment;
VkAttachmentReference &attachment_reference = attachment_references[attachment_location];
attachment_reference.attachment = attachment_location;
attachment_reference.layout = is_depth_attachment ?
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL :
VK_IMAGE_LAYOUT_GENERAL;
}
}
/* Update the size, viewport & scissor based on the first attachment. */
if (first_attachment != GPU_FB_MAX_ATTACHMENT) {
GPUAttachment &attachment = attachments_[first_attachment];
BLI_assert(attachment.tex);
int size[3];
GPU_texture_get_mipmap_size(attachment.tex, attachment.mip, size);
size_set(size[0], size[1]);
}
else {
this->size_set(0, 0);
}
viewport_reset();
scissor_reset();
/* Create render pass. */
const int attachment_len = has_depth_attachment ? depth_location + 1 : depth_location;
const int color_attachment_len = depth_location;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = color_attachment_len;
subpass.pColorAttachments = attachment_references.begin();
if (has_depth_attachment) {
subpass.pDepthStencilAttachment = &attachment_references[depth_location];
}
VkRenderPassCreateInfo render_pass_info = {};
render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
render_pass_info.attachmentCount = attachment_len;
render_pass_info.pAttachments = attachment_descriptions.data();
render_pass_info.subpassCount = 1;
render_pass_info.pSubpasses = &subpass;
VKContext &context = *VKContext::get();
vkCreateRenderPass(
context.device_get(), &render_pass_info, vk_allocation_callbacks, &vk_render_pass_);
/* We might want to split framebuffer and render pass....*/
VkFramebufferCreateInfo framebuffer_create_info = {};
framebuffer_create_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebuffer_create_info.renderPass = vk_render_pass_;
framebuffer_create_info.attachmentCount = attachment_len;
framebuffer_create_info.pAttachments = image_views.begin();
framebuffer_create_info.width = width_;
framebuffer_create_info.height = height_;
framebuffer_create_info.layers = 1;
vkCreateFramebuffer(
context.device_get(), &framebuffer_create_info, vk_allocation_callbacks, &vk_framebuffer_);
}
void VKFrameBuffer::render_pass_free()
{
BLI_assert(!immutable_);
if (vk_render_pass_ == VK_NULL_HANDLE) {
return;
}
VK_ALLOCATION_CALLBACKS
VKContext &context = *VKContext::get();
vkDestroyRenderPass(context.device_get(), vk_render_pass_, vk_allocation_callbacks);
vkDestroyFramebuffer(context.device_get(), vk_framebuffer_, vk_allocation_callbacks);
vk_render_pass_ = VK_NULL_HANDLE;
vk_framebuffer_ = VK_NULL_HANDLE;
}
/** \} */
} // namespace blender::gpu

View File

@ -7,6 +7,10 @@
#pragma once
#include "BLI_math_vector.hh"
#include "BLI_span.hh"
#include "BLI_vector.hh"
#include "gpu_framebuffer_private.hh"
#include "vk_common.hh"
@ -20,7 +24,7 @@ class VKFrameBuffer : public FrameBuffer {
/* Vulkan device who created the handle. */
VkDevice vk_device_ = VK_NULL_HANDLE;
/* Base render pass used for framebuffer creation. */
VkRenderPass render_pass_ = VK_NULL_HANDLE;
VkRenderPass vk_render_pass_ = VK_NULL_HANDLE;
/* Number of layers if the attachments are layered textures. */
int depth_ = 1;
/** Internal frame-buffers are immutable. */
@ -46,10 +50,10 @@ class VKFrameBuffer : public FrameBuffer {
void bind(bool enabled_srgb) override;
bool check(char err_out[256]) override;
void clear(eGPUFrameBufferBits buffers,
const float clear_col[4],
const float clear_color[4],
float clear_depth,
uint clear_stencil) override;
void clear_multi(const float (*clear_col)[4]) override;
void clear_multi(const float (*clear_color)[4]) override;
void clear_attachment(GPUAttachmentType type,
eGPUDataFormat data_format,
const void *clear_value) override;
@ -71,6 +75,39 @@ class VKFrameBuffer : public FrameBuffer {
int dst_slot,
int dst_offset_x,
int dst_offset_y) override;
VkFramebuffer vk_framebuffer_get() const
{
BLI_assert(vk_framebuffer_ != VK_NULL_HANDLE);
return vk_framebuffer_;
}
VkRenderPass vk_render_pass_get() const
{
BLI_assert(vk_render_pass_ != VK_NULL_HANDLE);
return vk_render_pass_;
}
VkRect2D vk_render_area_get() const;
private:
void update_attachments();
void render_pass_free();
void render_pass_create();
/* Clearing attachments */
void build_clear_attachments_depth_stencil(eGPUFrameBufferBits buffers,
float clear_depth,
uint32_t clear_stencil,
Vector<VkClearAttachment> &r_attachments) const;
void build_clear_attachments_color(const float (*clear_colors)[4],
const bool multi_clear_colors,
Vector<VkClearAttachment> &r_attachments) const;
void clear(const Vector<VkClearAttachment> &attachments) const;
};
static inline VKFrameBuffer *unwrap(FrameBuffer *framebuffer)
{
return static_cast<VKFrameBuffer *>(framebuffer);
}
} // namespace blender::gpu

View File

@ -37,48 +37,6 @@ void VKTexture::copy_to(Texture * /*tex*/)
{
}
template<typename T> void copy_color(T dst[4], const T *src)
{
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static VkClearColorValue to_vk_clear_color_value(eGPUDataFormat format, const void *data)
{
VkClearColorValue result = {{0.0f}};
switch (format) {
case GPU_DATA_FLOAT: {
const float *float_data = static_cast<const float *>(data);
copy_color<float>(result.float32, float_data);
break;
}
case GPU_DATA_INT: {
const int32_t *int_data = static_cast<const int32_t *>(data);
copy_color<int32_t>(result.int32, int_data);
break;
}
case GPU_DATA_UINT: {
const uint32_t *uint_data = static_cast<const uint32_t *>(data);
copy_color<uint32_t>(result.uint32, uint_data);
break;
}
case GPU_DATA_HALF_FLOAT:
case GPU_DATA_UBYTE:
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
case GPU_DATA_2_10_10_10_REV: {
BLI_assert_unreachable();
break;
}
}
return result;
}
void VKTexture::clear(eGPUDataFormat format, const void *data)
{
if (!is_allocated()) {
@ -92,9 +50,10 @@ void VKTexture::clear(eGPUDataFormat format, const void *data)
range.aspectMask = to_vk_image_aspect_flag_bits(format_);
range.levelCount = VK_REMAINING_MIP_LEVELS;
range.layerCount = VK_REMAINING_ARRAY_LAYERS;
layout_ensure(context, VK_IMAGE_LAYOUT_GENERAL);
command_buffer.clear(
vk_image_, VK_IMAGE_LAYOUT_GENERAL, clear_color, Span<VkImageSubresourceRange>(&range, 1));
vk_image_, current_layout_get(), clear_color, Span<VkImageSubresourceRange>(&range, 1));
}
void VKTexture::swizzle_set(const char /*swizzle_mask*/[4])
@ -111,8 +70,10 @@ void VKTexture::mip_range_set(int /*min*/, int /*max*/)
void *VKTexture::read(int mip, eGPUDataFormat format)
{
/* Vulkan images cannot be directly mapped to host memory and requires a staging buffer. */
VKContext &context = *VKContext::get();
layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
/* Vulkan images cannot be directly mapped to host memory and requires a staging buffer. */
VKBuffer staging_buffer;
/* NOTE: mip_size_get() won't override any dimension that is equal to 0. */
@ -170,6 +131,7 @@ void VKTexture::update_sub(
region.imageSubresource.mipLevel = mip;
region.imageSubresource.layerCount = 1;
layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(*this, staging_buffer, Span<VkBufferImageCopy>(&region, 1));
command_buffer.submit();
@ -208,11 +170,51 @@ bool VKTexture::init_internal(const GPUTexture * /*src*/, int /*mip_offset*/, in
return false;
}
bool VKTexture::is_allocated()
void VKTexture::ensure_allocated()
{
if (!is_allocated()) {
allocate();
}
}
bool VKTexture::is_allocated() const
{
return vk_image_ != VK_NULL_HANDLE && allocation_ != VK_NULL_HANDLE;
}
static VkImageUsageFlagBits to_vk_image_usage(const eGPUTextureUsage usage,
const eGPUTextureFormatFlag format_flag)
{
VkImageUsageFlagBits result = static_cast<VkImageUsageFlagBits>(VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT);
if (usage & GPU_TEXTURE_USAGE_SHADER_READ) {
result = static_cast<VkImageUsageFlagBits>(result | VK_IMAGE_USAGE_STORAGE_BIT);
}
if (usage & GPU_TEXTURE_USAGE_SHADER_WRITE) {
result = static_cast<VkImageUsageFlagBits>(result | VK_IMAGE_USAGE_STORAGE_BIT);
}
if (usage & GPU_TEXTURE_USAGE_ATTACHMENT) {
if (format_flag & (GPU_FORMAT_NORMALIZED_INTEGER | GPU_FORMAT_COMPRESSED)) {
/* These formats aren't supported as an attachment. When using GPU_TEXTURE_USAGE_DEFAULT they
* are still being evaluated to be attachable. So we need to skip them.*/
}
else {
if (format_flag & (GPU_FORMAT_DEPTH | GPU_FORMAT_STENCIL)) {
result = static_cast<VkImageUsageFlagBits>(result |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
}
else {
result = static_cast<VkImageUsageFlagBits>(result | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
}
}
}
if (usage & GPU_TEXTURE_USAGE_HOST_READ) {
result = static_cast<VkImageUsageFlagBits>(result | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
}
return result;
}
bool VKTexture::allocate()
{
BLI_assert(!is_allocated());
@ -230,10 +232,14 @@ bool VKTexture::allocate()
image_info.mipLevels = 1;
image_info.arrayLayers = 1;
image_info.format = to_vk_format(format_);
image_info.tiling = VK_IMAGE_TILING_LINEAR;
/* Some platforms (NVIDIA) requires that attached textures are always tiled optimal.
*
* As image data are always accessed via an staging buffer we can enable optimal tiling for all
* texture. Tilings based on actual usages should be done in `VKFramebuffer`.
*/
image_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT;
image_info.usage = to_vk_image_usage(gpu_image_usage_flags_, format_flag_);
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
VkResult result;
@ -254,8 +260,6 @@ bool VKTexture::allocate()
VmaAllocationCreateInfo allocCreateInfo = {};
allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
allocCreateInfo.flags = static_cast<VmaAllocationCreateFlagBits>(
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT);
allocCreateInfo.priority = 1.0f;
result = vmaCreateImage(context.mem_allocator_get(),
&image_info,
@ -268,15 +272,7 @@ bool VKTexture::allocate()
}
/* Promote image to the correct layout. */
VkImageMemoryBarrier barrier{};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
barrier.image = vk_image_;
barrier.subresourceRange.aspectMask = to_vk_image_aspect_flag_bits(format_);
barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
context.command_buffer_get().pipeline_barrier(Span<VkImageMemoryBarrier>(&barrier, 1));
layout_ensure(context, VK_IMAGE_LAYOUT_GENERAL);
VK_ALLOCATION_CALLBACKS
VkImageViewCreateInfo image_view_info = {};
@ -307,4 +303,37 @@ void VKTexture::image_bind(int binding)
shader->pipeline_get().descriptor_set_get().image_bind(*this, location);
}
/* -------------------------------------------------------------------- */
/** \name Image Layout
* \{ */
VkImageLayout VKTexture::current_layout_get() const
{
return current_layout_;
}
void VKTexture::current_layout_set(const VkImageLayout new_layout)
{
current_layout_ = new_layout;
}
void VKTexture::layout_ensure(VKContext &context, const VkImageLayout requested_layout)
{
const VkImageLayout current_layout = current_layout_get();
if (current_layout == requested_layout) {
return;
}
VkImageMemoryBarrier barrier{};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = current_layout;
barrier.newLayout = requested_layout;
barrier.image = vk_image_;
barrier.subresourceRange.aspectMask = to_vk_image_aspect_flag_bits(format_);
barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
context.command_buffer_get().pipeline_barrier(Span<VkImageMemoryBarrier>(&barrier, 1));
current_layout_set(requested_layout);
}
/** \} */
} // namespace blender::gpu

View File

@ -17,6 +17,12 @@ class VKTexture : public Texture {
VkImageView vk_image_view_ = VK_NULL_HANDLE;
VmaAllocation allocation_ = VK_NULL_HANDLE;
/* Last image layout of the texture. Framebuffer and barriers can alter/require the actual layout
* to be changed. During this it requires to set the current layout in order to know which
* conversion should happen. #current_layout_ keep track of the layout so the correct conversion
* can be done.*/
VkImageLayout current_layout_ = VK_IMAGE_LAYOUT_UNDEFINED;
public:
VKTexture(const char *name) : Texture(name)
{
@ -43,13 +49,17 @@ class VKTexture : public Texture {
void image_bind(int location);
VkImage vk_image_handle() const
{
BLI_assert(is_allocated());
return vk_image_;
}
VkImageView vk_image_view_handle() const
{
BLI_assert(is_allocated());
return vk_image_view_;
}
void ensure_allocated();
protected:
bool init_internal() override;
bool init_internal(GPUVertBuf *vbo) override;
@ -57,7 +67,8 @@ class VKTexture : public Texture {
private:
/** Is this texture already allocated on device. */
bool is_allocated();
bool is_allocated() const;
/**
* Allocate the texture of the device. Result is `true` when texture is successfully allocated
* on the device.
@ -65,6 +76,36 @@ class VKTexture : public Texture {
bool allocate();
VkImageViewType vk_image_view_type() const;
/* -------------------------------------------------------------------- */
/** \name Image Layout
* \{ */
public:
/**
* Update the current layout attribute, without actually changing the layout.
*
* Vulkan can change the layout of an image, when a command is being executed.
* The start of a render pass or the end of a render pass can also alter the
* actual layout of the image. This method allows to change the last known layout
* that the image is using.
*
* NOTE: When we add command encoding, this should partly being done inside
* the command encoder, as there is more accurate determination of the transition
* of the layout. Only the final transition should then be stored inside the texture
* to be used by as initial layout for the next set of commands.
*/
void current_layout_set(VkImageLayout new_layout);
VkImageLayout current_layout_get() const;
/**
* Ensure the layout of the texture. This also performs the conversion by adding a memory
* barrier to the active command buffer to perform the conversion.
*
* When texture is already in the requested layout, nothing will be done.
*/
void layout_ensure(VKContext &context, VkImageLayout requested_layout);
/** \} */
};
static inline VKTexture *unwrap(Texture *tex)

View File

@ -352,10 +352,12 @@ elseif(WIN32)
set(TARGETDIR_VER ${CMAKE_INSTALL_PREFIX_WITH_CONFIG}/bpy/${BLENDER_VERSION})
# Important the DLL's are next to `__init__.pyd` otherwise it won't load.
set(TARGETDIR_LIB ${CMAKE_INSTALL_PREFIX_WITH_CONFIG}/bpy)
set(TARGETDIR_EXE ${CMAKE_INSTALL_PREFIX_WITH_CONFIG}/bpy)
else()
set(TARGETDIR_VER ${BLENDER_VERSION})
set(TARGETDIR_TEXT .)
set(TARGETDIR_LIB .)
set(TARGETDIR_LIB ./blender.shared)
set(TARGETDIR_EXE .)
endif()
elseif(APPLE)
if(WITH_PYTHON_MODULE)
@ -1005,7 +1007,7 @@ elseif(WIN32)
FILES
${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/bin/python${_PYTHON_VERSION_NO_DOTS}.dll
${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/bin/python3.dll
DESTINATION ${TARGETDIR_LIB}
DESTINATION ${TARGETDIR_EXE}
CONFIGURATIONS Release;RelWithDebInfo;MinSizeRel
)
@ -1013,7 +1015,7 @@ elseif(WIN32)
FILES
${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/bin/python${_PYTHON_VERSION_NO_DOTS}_d.dll
${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/bin/python3_d.dll
DESTINATION ${TARGETDIR_LIB}
DESTINATION ${TARGETDIR_EXE}
CONFIGURATIONS Debug
)
endif()
@ -1261,7 +1263,7 @@ elseif(WIN32)
${CMAKE_SOURCE_DIR}/release/windows/batch/blender_factory_startup.cmd
${CMAKE_SOURCE_DIR}/release/windows/batch/blender_oculus.cmd
${CMAKE_SOURCE_DIR}/release/windows/batch/oculus.json
DESTINATION ${TARGETDIR_LIB}
DESTINATION ${TARGETDIR_EXE}
)
endif()
@ -1289,7 +1291,7 @@ elseif(WIN32)
if(PLATFORM_BUNDLED_LIBRARIES)
install(
FILES ${PLATFORM_BUNDLED_LIBRARIES}
DESTINATION ${TARGETDIR_LIB}
DESTINATION ${TARGETDIR_EXE}
)
endif()
elseif(APPLE)
@ -1522,7 +1524,7 @@ if((DEFINED LIBDIR) AND TARGETDIR_LIB)
)
install(DIRECTORY
${USD_LIBRARY_DIR}/usd
DESTINATION "./blender.shared"
DESTINATION ${TARGETDIR_LIB}
)
elseif(USD_PYTHON_SUPPORT)
install(DIRECTORY