WIP: Brush assets project #106303

Draft
Julian Eisel wants to merge 351 commits from brush-assets-project into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
79 changed files with 1093 additions and 575 deletions
Showing only changes of commit fc91ee9092 - Show all commits

View File

@ -247,11 +247,13 @@ class GHOST_DeviceVK {
void *device_create_info_p_next = nullptr;
/* Enable optional vulkan 12 features when supported on physical device. */
/* Enable optional vulkan 12 features when supported on physical device.
* Support level for timelineSemaphores is 99%+. */
VkPhysicalDeviceVulkan12Features vulkan_12_features = {};
vulkan_12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
vulkan_12_features.shaderOutputLayer = features_12.shaderOutputLayer;
vulkan_12_features.shaderOutputViewportIndex = features_12.shaderOutputViewportIndex;
vulkan_12_features.timelineSemaphore = VK_TRUE;
vulkan_12_features.pNext = device_create_info_p_next;
device_create_info_p_next = &vulkan_12_features;
@ -999,7 +1001,6 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
}
extensions_device.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
extensions_device.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
requireExtension(extensions_available, extensions_enabled, VK_EXT_MEMORY_BUDGET_EXTENSION_NAME);
/* Enable MoltenVK required instance extensions. */
#ifdef VK_MVK_MOLTENVK_EXTENSION_NAME

View File

@ -471,11 +471,6 @@ class RENDER_PT_eevee_next_volumes_lighting(RenderButtonsPanel, Panel):
bl_parent_id = "RENDER_PT_eevee_next_volumes"
COMPAT_ENGINES = {'BLENDER_EEVEE_NEXT'}
def draw_header(self, context):
scene = context.scene
props = scene.eevee
self.layout.prop(props, "use_volumetric_lights", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
@ -483,7 +478,6 @@ class RENDER_PT_eevee_next_volumes_lighting(RenderButtonsPanel, Panel):
scene = context.scene
props = scene.eevee
layout.active = props.use_volumetric_lights
layout.prop(props, "volumetric_light_clamp", text="Light Clamping")

View File

@ -95,10 +95,6 @@ class SceneKeyingSetsPanel:
"use_insertkey_override_visual", "use_insertkey_visual",
userpref_fallback="use_visual_keying",
)
SceneKeyingSetsPanel._draw_keyframing_setting(
context, layout, ks, ksp, iface_("XYZ to RGB"),
"use_insertkey_override_xyz_to_rgb", "use_insertkey_xyz_to_rgb",
)
@staticmethod
def _draw_keyframing_setting(context, layout, ks, ksp, label, toggle_prop, prop, userpref_fallback=None):

View File

@ -17,6 +17,7 @@ namespace blender::animrig {
/**
* Get (or add relevant data to be able to do so) F-Curve from the given Action,
* for the given Animation Data block. This assumes that all the destinations are valid.
* \param ptr can be a null pointer.
*/
FCurve *action_fcurve_ensure(Main *bmain,
bAction *act,

View File

@ -194,4 +194,16 @@ int insert_key_action(Main *bmain,
eInsertKeyFlags insert_key_flag,
eBezTriple_KeyframeType key_type);
/** Insert keys to the ID of the given PointerRNA for the given RNA paths. Tries to create an
* action if none exists yet.
* \param scene_frame is expected to be not NLA mapped as that happens within the function.
*/
void insert_key_rna(PointerRNA *rna_pointer,
const blender::Span<std::string> rna_paths,
float scene_frame,
eInsertKeyFlags insert_key_flags,
eBezTriple_KeyframeType key_type,
Main *bmain,
ReportList *reports);
} // namespace blender::animrig

View File

@ -13,6 +13,9 @@
#include "BLI_string.h"
#include "DEG_depsgraph_build.hh"
#include "DNA_anim_types.h"
#include "RNA_access.hh"
#include "RNA_path.hh"
#include "RNA_prototypes.h"
namespace blender::animrig {
@ -57,6 +60,24 @@ FCurve *action_fcurve_ensure(Main *bmain,
fcu->rna_path = BLI_strdup(rna_path);
fcu->array_index = array_index;
if (U.autokey_flag & AUTOKEY_FLAG_XYZ2RGB && ptr != nullptr) {
/* For Loc/Rot/Scale and also Color F-Curves, the color of the F-Curve in the Graph Editor,
* is determined by the array index for the F-Curve.
*/
PropertyRNA *prop;
PointerRNA r_ptr;
const bool resolved = RNA_path_resolve_property(ptr, rna_path, &r_ptr, &prop);
if (resolved) {
PropertySubType prop_subtype = RNA_property_subtype(prop);
if (ELEM(prop_subtype, PROP_TRANSLATION, PROP_XYZ, PROP_EULER, PROP_COLOR, PROP_COORDS)) {
fcu->color_mode = FCURVE_COLOR_AUTO_RGB;
}
else if (ELEM(prop_subtype, PROP_QUATERNION)) {
fcu->color_mode = FCURVE_COLOR_AUTO_YRGB;
}
}
}
if (group) {
bActionGroup *agrp = BKE_action_group_find_name(act, group);

View File

@ -37,10 +37,8 @@
#include "ED_keyframing.hh"
#include "MEM_guardedalloc.h"
#include "RNA_access.hh"
#include "RNA_define.hh"
#include "RNA_path.hh"
#include "RNA_prototypes.h"
#include "RNA_types.hh"
#include "WM_api.hh"
#include "WM_types.hh"
@ -547,20 +545,6 @@ static bool insert_keyframe_fcurve_value(Main *bmain,
const bool is_new_curve = (fcu->totvert == 0);
/* Set color mode if the F-Curve is new (i.e. without any keyframes). */
if (is_new_curve && (flag & INSERTKEY_XYZ2RGB)) {
/* For Loc/Rot/Scale and also Color F-Curves, the color of the F-Curve in the Graph Editor,
* is determined by the array index for the F-Curve
*/
PropertySubType prop_subtype = RNA_property_subtype(prop);
if (ELEM(prop_subtype, PROP_TRANSLATION, PROP_XYZ, PROP_EULER, PROP_COLOR, PROP_COORDS)) {
fcu->color_mode = FCURVE_COLOR_AUTO_RGB;
}
else if (ELEM(prop_subtype, PROP_QUATERNION)) {
fcu->color_mode = FCURVE_COLOR_AUTO_YRGB;
}
}
/* If the curve has only one key, make it cyclic if appropriate. */
const bool is_cyclic_action = (flag & INSERTKEY_CYCLE_AWARE) && BKE_action_is_cyclic(act);
@ -1003,4 +987,81 @@ int insert_key_action(Main *bmain,
return inserted_keys;
}
static blender::Vector<float> get_keyframe_values(PointerRNA *ptr,
PropertyRNA *prop,
const bool visual_key)
{
Vector<float> values;
if (visual_key && visualkey_can_use(ptr, prop)) {
/* Visual-keying is only available for object and pchan datablocks, as
* it works by keyframing using a value extracted from the final matrix
* instead of using the kt system to extract a value.
*/
values = visualkey_get_values(ptr, prop);
}
else {
values = get_rna_values(ptr, prop);
}
return values;
}
void insert_key_rna(PointerRNA *rna_pointer,
const blender::Span<std::string> rna_paths,
const float scene_frame,
const eInsertKeyFlags insert_key_flags,
const eBezTriple_KeyframeType key_type,
Main *bmain,
ReportList *reports)
{
ID *id = rna_pointer->owner_id;
bAction *action = ED_id_action_ensure(bmain, id);
if (action == nullptr) {
BKE_reportf(reports,
RPT_ERROR,
"Could not insert keyframe, as this type does not support animation data (ID = "
"%s)",
id->name);
return;
}
AnimData *adt = BKE_animdata_from_id(id);
const float nla_frame = BKE_nla_tweakedit_remap(adt, scene_frame, NLATIME_CONVERT_UNMAP);
const bool visual_keyframing = insert_key_flags & INSERTKEY_MATRIX;
int insert_key_count = 0;
for (const std::string &rna_path : rna_paths) {
PointerRNA ptr;
PropertyRNA *prop = nullptr;
const bool path_resolved = RNA_path_resolve_property(
rna_pointer, rna_path.c_str(), &ptr, &prop);
if (!path_resolved) {
BKE_reportf(reports,
RPT_ERROR,
"Could not insert keyframe, as this property does not exist (ID = "
"%s, path = %s)",
id->name,
rna_path.c_str());
continue;
}
char *rna_path_id_to_prop = RNA_path_from_ID_to_property(&ptr, prop);
Vector<float> rna_values = get_keyframe_values(&ptr, prop, visual_keyframing);
insert_key_count += insert_key_action(bmain,
action,
rna_pointer,
rna_path_id_to_prop,
nla_frame,
rna_values.as_span(),
insert_key_flags,
key_type);
MEM_freeN(rna_path_id_to_prop);
}
if (insert_key_count == 0) {
BKE_reportf(reports, RPT_ERROR, "Failed to insert any keys");
}
}
} // namespace blender::animrig

View File

@ -115,6 +115,14 @@ bool BKE_lib_override_library_property_is_animated(
*/
bool BKE_lib_override_library_is_hierarchy_leaf(Main *bmain, ID *id);
/**
* Tag the liboverride ID for auto-refresh when it gets tagged for depsgraph update.
*
* NOTE: This should only handle direct user editing, it is assumed that indirct updates should
* never require an update of the liboverride diffing info.
*/
void BKE_lib_override_id_tag_on_deg_tag_from_user(ID *id);
/**
* Create an overridden local copy of linked reference.
*

View File

@ -457,6 +457,19 @@ bool BKE_lib_override_library_is_hierarchy_leaf(Main *bmain, ID *id)
return false;
}
void BKE_lib_override_id_tag_on_deg_tag_from_user(ID *id)
{
/* Only local liboverrides need to be tagged for refresh, linked ones should not be editable. */
if (ID_IS_LINKED(id) || !ID_IS_OVERRIDE_LIBRARY(id)) {
return;
}
/* NOTE: Valid relationships between IDs here (especially the beloved ObData <-> ShapeKey special
* case) cannot be always expected when ID get tagged. So now, embedded IDs and similar also get
* tagged, and the 'liboverride refresh' code is responsible to properly progagate the update to
* the owner ID when needed (see #BKE_lib_override_library_main_operations_create). */
id->tag |= LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
}
ID *BKE_lib_override_library_create_from_id(Main *bmain,
ID *reference_id,
const bool do_tagged_remap)
@ -4403,9 +4416,33 @@ void BKE_lib_override_library_main_operations_create(Main *bmain,
TaskPool *task_pool = BLI_task_pool_create(&create_pool_data, TASK_PRIORITY_HIGH);
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (!ID_IS_LINKED(id) && ID_IS_OVERRIDE_LIBRARY_REAL(id) &&
(force_auto || (id->tag & LIB_TAG_LIBOVERRIDE_AUTOREFRESH)))
{
if (ID_IS_LINKED(id) || !ID_IS_OVERRIDE_LIBRARY_REAL(id)) {
continue;
}
/* Propagate potential embedded data tagg to the owner ID (see also
* #BKE_lib_override_id_tag_on_deg_tag_from_user). */
if (Key *key = BKE_key_from_id(id)) {
if (key->id.tag & LIB_TAG_LIBOVERRIDE_AUTOREFRESH) {
key->id.tag &= ~LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
id->tag |= LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
}
}
if (bNodeTree *ntree = ntreeFromID(id)) {
if (ntree->id.tag & LIB_TAG_LIBOVERRIDE_AUTOREFRESH) {
ntree->id.tag &= ~LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
id->tag |= LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
}
}
if (GS(id->name) == ID_SCE) {
if (Collection *scene_collection = reinterpret_cast<Scene *>(id)->master_collection) {
if (scene_collection->id.tag & LIB_TAG_LIBOVERRIDE_AUTOREFRESH) {
scene_collection->id.tag &= ~LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
id->tag |= LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
}
}
}
if (force_auto || (id->tag & LIB_TAG_LIBOVERRIDE_AUTOREFRESH)) {
/* Usual issue with pose, it's quiet rare but sometimes they may not be up to date when this
* function is called. */
if (GS(id->name) == ID_OB) {

View File

@ -31,6 +31,7 @@
#include "BKE_anim_data.h"
#include "BKE_global.h"
#include "BKE_idtype.h"
#include "BKE_lib_override.hh"
#include "BKE_node.hh"
#include "BKE_scene.h"
#include "BKE_screen.hh"
@ -636,6 +637,10 @@ void id_tag_update(Main *bmain, ID *id, uint flags, eUpdateSource update_source)
graph_id_tag_update(bmain, depsgraph, id, flags, update_source);
}
if (update_source & DEG_UPDATE_SOURCE_USER_EDIT) {
BKE_lib_override_id_tag_on_deg_tag_from_user(id);
}
/* Accumulate all tags for an ID between two undo steps, so they can be
* replayed for undo. */
id->recalc_after_undo_push |= deg_recalc_flags_effective(nullptr, flags);

View File

@ -248,30 +248,6 @@ void flush_editors_id_update(Depsgraph *graph, const DEGEditorUpdateContext *upd
if (deg_copy_on_write_is_expanded(id_cow)) {
if (graph->is_active && id_node->is_user_modified) {
deg_editors_id_update(update_ctx, id_orig);
/* We only want to tag an ID for lib-override auto-refresh if it was actually tagged as
* changed. CoW IDs indirectly modified because of changes in other IDs should never
* require a lib-override diffing. */
if (ID_IS_OVERRIDE_LIBRARY_REAL(id_orig)) {
id_orig->tag |= LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
}
else if (ID_IS_OVERRIDE_LIBRARY_VIRTUAL(id_orig)) {
switch (GS(id_orig->name)) {
case ID_KE:
((Key *)id_orig)->from->tag |= LIB_TAG_LIBOVERRIDE_AUTOREFRESH;
break;
case ID_GR:
BLI_assert(id_orig->flag & LIB_EMBEDDED_DATA);
/* TODO. */
break;
case ID_NT:
BLI_assert(id_orig->flag & LIB_EMBEDDED_DATA);
/* TODO. */
break;
default:
BLI_assert(0);
}
}
}
/* Inform draw engines that something was changed. */
flush_engine_data_update(id_cow);

View File

@ -86,8 +86,7 @@ int EEVEE_bloom_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
effects->downsamp_texel_size[i][1] = 1.0f / float(texsize[1]);
eGPUTextureUsage downsample_usage = GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
GPU_TEXTURE_USAGE_ATTACHMENT;
effects->bloom_downsample[i] = DRW_texture_pool_query_2d_ex(
texsize[0], texsize[1], GPU_R11F_G11F_B10F, downsample_usage, &draw_engine_eevee_type);
GPU_framebuffer_ensure_config(
@ -105,8 +104,7 @@ int EEVEE_bloom_init(EEVEE_ViewLayerData * /*sldata*/, EEVEE_Data *vedata)
texsize[1] = std::max(texsize[1], 2);
eGPUTextureUsage upsample_usage = GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
GPU_TEXTURE_USAGE_ATTACHMENT;
effects->bloom_upsample[i] = DRW_texture_pool_query_2d_ex(
texsize[0], texsize[1], GPU_R11F_G11F_B10F, upsample_usage, &draw_engine_eevee_type);

View File

@ -578,8 +578,7 @@ static void dof_reduce_pass_init(EEVEE_FramebufferList *fbl,
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
void *owner = (void *)&EEVEE_depth_of_field_init;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
fx->dof_downsample_tx = DRW_texture_pool_query_2d_ex(
UNPACK2(quater_res), COLOR_FORMAT, usage, static_cast<DrawEngineType *>(owner));
@ -643,8 +642,7 @@ static void dof_reduce_pass_init(EEVEE_FramebufferList *fbl,
if (txl->dof_reduced_color == nullptr) {
/* Color needs to be signed format here. See note in shader for explanation. */
/* Do not use texture pool because of needs mipmaps. */
eGPUTextureUsage tex_flags = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage tex_flags = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
txl->dof_reduced_color = GPU_texture_create_2d(
"dof_reduced_color", UNPACK2(res), mip_count, GPU_RGBA16F, tex_flags, nullptr);
txl->dof_reduced_coc = GPU_texture_create_2d(

View File

@ -120,8 +120,7 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
});
}
else {
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
DRW_texture_ensure_2d_ex(&txl->maxzbuffer,
UNPACK2(effects->hiz_size),
GPU_DEPTH_COMPONENT24,
@ -153,8 +152,7 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
* Used for SSReflections & SSRefractions.
*/
if ((effects->enabled_effects & EFFECT_RADIANCE_BUFFER) != 0) {
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
DRW_texture_ensure_2d_ex(&txl->filtered_radiance,
UNPACK2(effects->hiz_size),
GPU_R11F_G11F_B10F,

View File

@ -686,8 +686,7 @@ static void eevee_lightbake_count_probes(EEVEE_LightBake *lbake)
static void eevee_lightbake_create_render_target(EEVEE_LightBake *lbake, int rt_res)
{
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
lbake->rt_depth = DRW_texture_create_cube_ex(
rt_res, GPU_DEPTH_COMPONENT24, usage, DRWTextureFlag(0), nullptr);
lbake->rt_color = DRW_texture_create_cube_ex(

View File

@ -103,8 +103,7 @@ static void planar_pool_ensure_alloc(EEVEE_Data *vedata, int num_planar_ref)
/* We need an Array texture so allocate it ourself */
if (!txl->planar_pool) {
eGPUTextureUsage planar_usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage planar_usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
eGPUTextureUsage planar_usage_depth = GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_SHADER_READ;
if (num_planar_ref > 0) {
@ -203,8 +202,7 @@ void EEVEE_lightprobes_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
/* Placeholder planar pool: used when rendering planar reflections (avoid dependency loop). */
if (!e_data.planar_pool_placeholder) {
eGPUTextureUsage planar_usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage planar_usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
e_data.planar_pool_placeholder = DRW_texture_create_2d_array_ex(
1, 1, 1, GPU_RGBA8, planar_usage, DRW_TEX_FILTER, nullptr);
}

View File

@ -168,7 +168,7 @@ void DepthOfField::sync()
* the reduced buffers. Color needs to be signed format here. See note in shader for
* explanation. Do not use texture pool because of needs mipmaps. */
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW | GPU_TEXTURE_USAGE_SHADER_WRITE;
GPU_TEXTURE_USAGE_SHADER_WRITE;
reduced_color_tx_.ensure_2d(GPU_RGBA16F, reduce_size, usage, nullptr, DOF_MIP_COUNT);
reduced_coc_tx_.ensure_2d(GPU_R16F, reduce_size, usage, nullptr, DOF_MIP_COUNT);
reduced_color_tx_.ensure_mip_views();

View File

@ -22,8 +22,7 @@ void HiZBuffer::sync()
int2 hiz_extent = math::ceil_to_multiple(render_extent, int2(1u << (HIZ_MIP_COUNT - 1)));
int2 dispatch_size = math::divide_ceil(hiz_extent, int2(HIZ_GROUP_SIZE));
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE;
hiz_tx_.ensure_2d(GPU_R32F, hiz_extent, usage, nullptr, HIZ_MIP_COUNT);
hiz_tx_.ensure_mip_views();
GPU_texture_mipmap_mode(hiz_tx_, true, false);

View File

@ -269,7 +269,7 @@ void IrradianceCache::set_view(View & /*view*/)
draw::Texture irradiance_d_tx = {"irradiance_d_tx"};
draw::Texture validity_tx = {"validity_tx"};
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ;
int3 grid_size = int3(cache->size);
if (cache->baking.L0) {
irradiance_a_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L0);

View File

@ -259,6 +259,9 @@ LightModule::~LightModule()
void LightModule::begin_sync()
{
use_scene_lights_ = inst_.use_scene_lights();
/* Disable sunlight if world has a volume shader as we consider the light cannot go through an
* infinite opaque medium. */
use_sun_lights_ = (inst_.world.has_volume_absorption() == false);
/* In begin_sync so it can be animated. */
if (assign_if_different(light_threshold_, max_ff(1e-16f, inst_.scene->eevee.light_threshold))) {
@ -274,6 +277,13 @@ void LightModule::sync_light(const Object *ob, ObjectHandle &handle)
if (use_scene_lights_ == false) {
return;
}
if (use_sun_lights_ == false) {
if (static_cast<const ::Light *>(ob->data)->type == LA_SUN) {
return;
}
}
Light &light = light_map_.lookup_or_add_default(handle.object_key);
light.used = true;
if (handle.recalc != 0 || !light.initialized) {

View File

@ -118,8 +118,10 @@ class LightModule {
int64_t light_map_size_ = 0;
/** Luminous intensity to consider the light boundary at. Used for culling. */
float light_threshold_ = 0.01f;
/** If false, will prevent all scene light from being synced. */
/** If false, will prevent all scene lights from being synced. */
bool use_scene_lights_ = false;
/** If false, will prevent all sun lights from being synced. */
bool use_sun_lights_ = false;
/** Number of sun lights synced during the last sync. Used as offset. */
int sun_lights_len_ = 0;
int local_lights_len_ = 0;

View File

@ -21,10 +21,9 @@ namespace blender::eevee {
/** \name Viewport Override Node-Tree
* \{ */
LookdevWorldNodeTree::LookdevWorldNodeTree()
LookdevWorld::LookdevWorld()
{
bNodeTree *ntree = ntreeAddTree(nullptr, "Lookdev World Nodetree", ntreeType_Shader->idname);
ntree_ = ntree;
bNode *coordinate = nodeAddStaticNode(nullptr, ntree, SH_NODE_TEX_COORD);
bNodeSocket *coordinate_out = nodeFindSocket(coordinate, SOCK_OUT, "Generated");
@ -71,36 +70,46 @@ LookdevWorldNodeTree::LookdevWorldNodeTree()
* overwrite the set GPU texture. A better solution would be to use image data-blocks as part of
* the studio-lights, but that requires a larger refactoring. */
BKE_image_get_gpu_texture(&image, &environment_storage->iuser, nullptr);
/* Create a dummy image data block to hold GPU textures generated by studio-lights. */
STRNCPY(world.id.name, "WOLookdev");
BKE_libblock_init_empty(&world.id);
world.use_nodes = true;
world.nodetree = ntree;
}
LookdevWorldNodeTree::~LookdevWorldNodeTree()
LookdevWorld::~LookdevWorld()
{
ntreeFreeEmbeddedTree(ntree_);
MEM_SAFE_FREE(ntree_);
BKE_libblock_free_datablock(&image.id, 0);
BKE_libblock_free_datablock(&world.id, 0);
}
bNodeTree *LookdevWorldNodeTree::nodetree_get(const LookdevParameters &parameters)
bool LookdevWorld::sync(const LookdevParameters &new_parameters)
{
intensity_socket_->value = parameters.intensity;
angle_socket_->value = parameters.rot_z;
const bool parameters_changed = assign_if_different(parameters_, new_parameters);
GPU_TEXTURE_FREE_SAFE(image.gputexture[TEXTARGET_2D][0]);
environment_node_->id = nullptr;
if (parameters_changed) {
intensity_socket_->value = parameters_.intensity;
angle_socket_->value = parameters_.rot_z;
StudioLight *sl = BKE_studiolight_find(parameters.hdri.c_str(),
STUDIOLIGHT_ORIENTATIONS_MATERIAL_MODE);
if (sl) {
BKE_studiolight_ensure_flag(sl, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
GPUTexture *texture = sl->equirect_radiance_gputexture;
if (texture != nullptr) {
GPU_texture_ref(texture);
image.gputexture[TEXTARGET_2D][0] = texture;
environment_node_->id = &image.id;
GPU_TEXTURE_FREE_SAFE(image.gputexture[TEXTARGET_2D][0]);
environment_node_->id = nullptr;
StudioLight *sl = BKE_studiolight_find(parameters_.hdri.c_str(),
STUDIOLIGHT_ORIENTATIONS_MATERIAL_MODE);
if (sl) {
BKE_studiolight_ensure_flag(sl, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
GPUTexture *texture = sl->equirect_radiance_gputexture;
if (texture != nullptr) {
GPU_texture_ref(texture);
image.gputexture[TEXTARGET_2D][0] = texture;
environment_node_->id = &image.id;
}
}
}
return ntree_;
GPU_material_free(&world.gpumaterial);
}
return parameters_changed;
}
/** \} */
@ -110,43 +119,7 @@ bNodeTree *LookdevWorldNodeTree::nodetree_get(const LookdevParameters &parameter
*
* \{ */
LookdevModule::~LookdevModule()
{
GPU_material_free(&gpu_materials_);
}
bool LookdevModule::sync_world()
{
/* Check based on the v3d if the world is overridden. */
LookdevParameters new_parameters(inst_.v3d);
const bool parameters_changed = parameters_ != new_parameters;
const bool gpu_parameters_changed = parameters_.gpu_parameters_changed(new_parameters);
if (gpu_parameters_changed) {
GPU_material_free(&gpu_materials_);
}
if (parameters_changed) {
parameters_ = new_parameters;
inst_.sampling.reset();
}
if (parameters_.show_scene_world) {
return false;
}
::bNodeTree *node_tree = world_override_tree.nodetree_get(parameters_);
GPUMaterial *gpu_material = inst_.shaders.material_shader_get(
"EEVEE Lookdev Background", gpu_materials_, node_tree, MAT_PIPE_DEFERRED, MAT_GEOM_WORLD);
if (gpu_parameters_changed) {
inst_.reflection_probes.sync_world_lookdev();
}
inst_.pipelines.world.sync(gpu_material);
inst_.pipelines.background.sync(gpu_material, parameters_.background_opacity);
return true;
}
/* TODO(fclem): This is where the lookdev balls display should go. */
/** \} */
@ -181,12 +154,6 @@ bool LookdevParameters::operator==(const LookdevParameters &other) const
intensity == other.intensity && show_scene_world == other.show_scene_world;
}
bool LookdevParameters::gpu_parameters_changed(const LookdevParameters &other) const
{
return !(hdri == other.hdri && rot_z == other.rot_z && blur == other.blur &&
intensity == other.intensity);
}
bool LookdevParameters::operator!=(const LookdevParameters &other) const
{
return !(*this == other);

View File

@ -35,38 +35,48 @@ struct LookdevParameters {
LookdevParameters(const ::View3D *v3d);
bool operator==(const LookdevParameters &other) const;
bool operator!=(const LookdevParameters &other) const;
bool gpu_parameters_changed(const LookdevParameters &other) const;
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Viewport Override Node-Tree
/** \name Viewport Override World
*
* In a viewport the world can be overridden by a custom HDRI and some settings.
* \{ */
class LookdevWorldNodeTree {
class LookdevWorld {
private:
bNodeTree *ntree_ = nullptr;
bNode *environment_node_ = nullptr;
bNodeSocketValueFloat *intensity_socket_ = nullptr;
bNodeSocketValueFloat *angle_socket_ = nullptr;
::Image image = {};
::World world = {};
LookdevParameters parameters_;
public:
LookdevWorldNodeTree();
~LookdevWorldNodeTree();
LookdevWorld();
~LookdevWorld();
bNodeTree *nodetree_get(const LookdevParameters &parameters);
/* Returns true if an update was detected. */
bool sync(const LookdevParameters &new_parameters);
::World *world_get()
{
return &world;
}
float background_opacity_get()
{
return parameters_.background_opacity;
}
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Lookdev
*
* Look Development can override the world.
*
* \{ */
@ -75,19 +85,10 @@ class LookdevModule {
private:
Instance &inst_;
LookdevWorldNodeTree world_override_tree;
LookdevParameters parameters_;
ListBase gpu_materials_ = {nullptr, nullptr};
public:
LookdevModule(Instance &inst) : inst_(inst){};
~LookdevModule();
bool sync_world();
private:
::World *get_world(::bNodeTree *node_tree);
/* TODO(fclem): This is where the lookdev balls display should go. */
};
/** \} */

View File

@ -31,8 +31,6 @@ void BackgroundPipeline::sync(GPUMaterial *gpumat, const float background_opacit
Manager &manager = *inst_.manager;
RenderBuffers &rbufs = inst_.render_buffers;
ResourceHandle handle = manager.resource_handle(float4x4::identity());
world_ps_.init();
world_ps_.state_set(DRW_STATE_WRITE_COLOR);
world_ps_.material_set(manager, gpumat);
@ -44,10 +42,8 @@ void BackgroundPipeline::sync(GPUMaterial *gpumat, const float background_opacit
world_ps_.bind_image("rp_cryptomatte_img", &rbufs.cryptomatte_tx);
/* Required by validation layers. */
inst_.cryptomatte.bind_resources(world_ps_);
inst_.bind_uniform_data(&world_ps_);
world_ps_.draw(DRW_cache_fullscreen_quad_get(), handle);
world_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
/* To allow opaque pass rendering over it. */
world_ps_.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS);
}
@ -78,12 +74,9 @@ void WorldPipeline::sync(GPUMaterial *gpumat)
pass.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_ALWAYS);
Manager &manager = *inst_.manager;
ResourceHandle handle = manager.resource_handle(float4x4::identity());
pass.material_set(manager, gpumat);
pass.push_constant("world_opacity_fade", 1.0f);
pass.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
inst_.bind_uniform_data(&pass);
pass.bind_image("rp_normal_img", dummy_renderpass_tx_);
pass.bind_image("rp_light_img", dummy_renderpass_tx_);
pass.bind_image("rp_diffuse_color_img", dummy_renderpass_tx_);
@ -92,14 +85,13 @@ void WorldPipeline::sync(GPUMaterial *gpumat)
pass.bind_image("rp_cryptomatte_img", dummy_cryptomatte_tx_);
pass.bind_image("rp_color_img", dummy_aov_color_tx_);
pass.bind_image("rp_value_img", dummy_aov_value_tx_);
/* Required by validation layers. */
inst_.cryptomatte.bind_resources(pass);
pass.bind_image("aov_color_img", dummy_aov_color_tx_);
pass.bind_image("aov_value_img", dummy_aov_value_tx_);
pass.bind_ssbo("aov_buf", &inst_.film.aovs_info);
pass.draw(DRW_cache_fullscreen_quad_get(), handle);
/* Required by validation layers. */
inst_.cryptomatte.bind_resources(pass);
inst_.bind_uniform_data(&pass);
pass.draw_procedural(GPU_PRIM_TRIS, 1, 3);
}
void WorldPipeline::render(View &view)
@ -116,7 +108,7 @@ void WorldPipeline::render(View &view)
void WorldVolumePipeline::sync(GPUMaterial *gpumat)
{
is_valid_ = GPU_material_status(gpumat) == GPU_MAT_SUCCESS;
is_valid_ = (gpumat != nullptr) && (GPU_material_status(gpumat) == GPU_MAT_SUCCESS);
if (!is_valid_) {
/* Skip if the material has not compiled yet. */
return;
@ -139,7 +131,11 @@ void WorldVolumePipeline::sync(GPUMaterial *gpumat)
void WorldVolumePipeline::render(View &view)
{
if (!is_valid_) {
/* Skip if the material has not compiled yet. */
/* Clear the properties buffer instead of rendering if there is no valid shader. */
inst_.volume.prop_scattering_tx_.clear(float4(0.0f));
inst_.volume.prop_extinction_tx_.clear(float4(0.0f));
inst_.volume.prop_emission_tx_.clear(float4(0.0f));
inst_.volume.prop_phase_tx_.clear(float4(0.0f));
return;
}
@ -844,6 +840,8 @@ void VolumeLayer::render(View &view, Texture &occupancy_tx)
void VolumePipeline::sync()
{
enabled_ = false;
has_scatter_ = false;
has_absorption_ = false;
for (auto &layer : layers_) {
layer->sync();
}
@ -950,6 +948,12 @@ void VolumePipeline::material_call(MaterialPass &volume_material_pass,
object_pass->dispatch(math::divide_ceil(visible_aabb.extent(), int3(VOLUME_GROUP_SIZE)));
/* Notify the volume module to enable itself. */
enabled_ = true;
if (GPU_material_flag_get(volume_material_pass.gpumat, GPU_MATFLAG_VOLUME_SCATTER)) {
has_scatter_ = true;
}
if (GPU_material_flag_get(volume_material_pass.gpumat, GPU_MATFLAG_VOLUME_ABSORPTION)) {
has_absorption_ = true;
}
}
}

View File

@ -89,11 +89,6 @@ class WorldVolumePipeline {
void sync(GPUMaterial *gpumat);
void render(View &view);
bool is_valid()
{
return is_valid_;
}
};
/** \} */
@ -354,6 +349,9 @@ class VolumePipeline {
/* True if any volume (any object type) creates a volume draw-call. Enables the volume module. */
bool enabled_ = false;
/* Aggregated properties of all volume objects. */
bool has_scatter_ = false;
bool has_absorption_ = false;
public:
VolumePipeline(Instance &inst) : inst_(inst){};
@ -378,6 +376,14 @@ class VolumePipeline {
{
return enabled_;
}
bool has_scatter() const
{
return has_scatter_;
}
bool has_absorption() const
{
return has_absorption_;
}
/* Returns true if any volume layer uses the hist list. */
bool use_hit_list() const;

View File

@ -238,7 +238,7 @@ static int layer_subdivision_for(const int max_resolution,
return max_ii(int(log2(max_resolution)) - i_probe_resolution, 0);
}
void ReflectionProbeModule::sync_world(::World *world, WorldHandle & /*ob_handle*/)
void ReflectionProbeModule::sync_world(::World *world)
{
ReflectionProbe &probe = probes_.lookup(world_object_key_);
@ -254,6 +254,7 @@ void ReflectionProbeModule::sync_world(::World *world, WorldHandle & /*ob_handle
void ReflectionProbeModule::sync_world_lookdev()
{
ReflectionProbe &probe = probes_.lookup(world_object_key_);
const eLightProbeResolution resolution = reflection_probe_resolution();
int layer_subdivision = layer_subdivision_for(max_resolution_, resolution);
if (layer_subdivision != probe.atlas_coord.layer_subdivision) {

View File

@ -137,7 +137,7 @@ class ReflectionProbeModule {
void init();
void begin_sync();
void sync_world(::World *world, WorldHandle &ob_handle);
void sync_world(::World *world);
void sync_world_lookdev();
void sync_object(Object *ob, ObjectHandle &ob_handle);
void end_sync();

View File

@ -69,7 +69,7 @@ void RenderBuffers::acquire(int2 extent)
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
/* Depth and combined are always needed. */
depth_tx.ensure_2d(GPU_DEPTH24_STENCIL8, extent, usage | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW);
depth_tx.ensure_2d(GPU_DEPTH24_STENCIL8, extent, usage | GPU_TEXTURE_USAGE_FORMAT_VIEW);
/* TODO(fclem): depth_tx should ideally be a texture from pool but we need stencil_view
* which is currently unsupported by pool textures. */
// depth_tx.acquire(extent, GPU_DEPTH24_STENCIL8);
@ -80,9 +80,7 @@ void RenderBuffers::acquire(int2 extent)
GPU_TEXTURE_USAGE_SHADER_WRITE;
/* TODO(fclem): Make vector pass allocation optional if no TAA or motion blur is needed. */
vector_tx.acquire(extent,
vector_tx_format(),
usage_attachment_read_write | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW);
vector_tx.acquire(extent, vector_tx_format(), usage_attachment_read_write);
int color_len = data.color_len + data.aovs.color_len;
int value_len = data.value_len + data.aovs.value_len;

View File

@ -486,12 +486,12 @@ struct VolumesInfoData {
int tile_size;
int tile_size_lod;
float shadow_steps;
bool1 use_lights;
bool1 use_soft_shadows;
float depth_near;
float depth_far;
float depth_distribution;
float _pad0;
float _pad1;
float _pad2;
};
BLI_STATIC_ASSERT_ALIGN(VolumesInfoData, 16)

View File

@ -59,9 +59,6 @@ void VolumeModule::init()
data_.shadow_steps = float(scene_eval->eevee.volumetric_shadow_samples);
}
data_.use_lights = (scene_eval->eevee.flag & SCE_EEVEE_VOLUMETRIC_LIGHTS) != 0;
data_.use_soft_shadows = (scene_eval->eevee.flag & SCE_EEVEE_SHADOW_SOFT) != 0;
data_.light_clamp = scene_eval->eevee.volumetric_light_clamp;
}
@ -94,13 +91,11 @@ void VolumeModule::begin_sync()
data_.depth_far = integration_end;
data_.depth_distribution = 1.0f / (integration_end - integration_start);
}
enabled_ = inst_.world.has_volume();
}
void VolumeModule::end_sync()
{
enabled_ = enabled_ || inst_.pipelines.volume.is_enabled();
enabled_ = inst_.world.has_volume() || inst_.pipelines.volume.is_enabled();
if (!enabled_) {
occupancy_tx_.free();
@ -119,6 +114,13 @@ void VolumeModule::end_sync()
return;
}
bool has_scatter = inst_.world.has_volume_scatter() || inst_.pipelines.volume.has_scatter();
bool has_absorption = inst_.world.has_volume_absorption() ||
inst_.pipelines.volume.has_absorption();
use_lights_ = has_scatter;
/* TODO(fclem): Allocate extinction texture as dummy (1px^3) if has_absorption are false. */
UNUSED_VARS(has_absorption);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE |
GPU_TEXTURE_USAGE_ATTACHMENT;
@ -159,13 +161,6 @@ void VolumeModule::end_sync()
occupancy_fb_.ensure(data_.tex_size.xy());
}
if (!inst_.pipelines.world_volume.is_valid()) {
prop_scattering_tx_.clear(float4(0.0f));
prop_extinction_tx_.clear(float4(0.0f));
prop_emission_tx_.clear(float4(0.0f));
prop_phase_tx_.clear(float4(0.0f));
}
scatter_tx_.ensure_3d(GPU_R11F_G11F_B10F, data_.tex_size, usage);
extinction_tx_.ensure_3d(GPU_R11F_G11F_B10F, data_.tex_size, usage);
@ -176,8 +171,8 @@ void VolumeModule::end_sync()
transparent_pass_transmit_tx_ = integrated_transmit_tx_;
scatter_ps_.init();
scatter_ps_.shader_set(inst_.shaders.static_shader_get(
data_.use_lights ? VOLUME_SCATTER_WITH_LIGHTS : VOLUME_SCATTER));
scatter_ps_.shader_set(
inst_.shaders.static_shader_get(use_lights_ ? VOLUME_SCATTER_WITH_LIGHTS : VOLUME_SCATTER));
inst_.lights.bind_resources(scatter_ps_);
inst_.reflection_probes.bind_resources(scatter_ps_);
inst_.irradiance_cache.bind_resources(scatter_ps_);

View File

@ -41,14 +41,17 @@ namespace blender::eevee {
class Instance;
class VolumePipeline;
class WorldVolumePipeline;
class VolumeModule {
friend VolumePipeline;
friend WorldVolumePipeline;
private:
Instance &inst_;
bool enabled_;
bool use_lights_;
VolumesInfoData &data_;
@ -130,7 +133,7 @@ class VolumeModule {
bool needs_shadow_tagging()
{
return enabled_ && data_.use_lights;
return enabled_ && use_lights_;
}
int3 grid_size()
@ -142,10 +145,6 @@ class VolumeModule {
void begin_sync();
void sync_world();
void material_call(MaterialPass &material_pass, Object *ob, ResourceHandle res_handle);
void end_sync();
/* Render material properties. */

View File

@ -68,7 +68,7 @@ World::~World()
{
if (default_world_ == nullptr) {
default_world_ = static_cast<::World *>(BKE_id_new_nomain(ID_WO, "EEVEEE default world"));
copy_v3_fl(&default_world_->horr, 0.0f);
default_world_->horr = default_world_->horg = default_world_->horb = 0.0f;
default_world_->use_nodes = 0;
default_world_->nodetree = nullptr;
BLI_listbase_clear(&default_world_->gpumaterial);
@ -76,42 +76,57 @@ World::~World()
return default_world_;
}
void World::world_and_ntree_get(::World *&world, bNodeTree *&ntree)
{
world = inst_.scene->world;
if (world == nullptr) {
world = default_world_get();
}
ntree = (world->nodetree && world->use_nodes && !inst_.use_studio_light()) ?
world->nodetree :
default_tree.nodetree_get(world);
}
void World::sync()
{
::World *bl_world;
bNodeTree *ntree;
world_and_ntree_get(bl_world, ntree);
::World *bl_world = inst_.use_studio_light() ? nullptr : inst_.scene->world;
GPUMaterial *volume_gpumat = inst_.shaders.world_shader_get(
bl_world, ntree, MAT_PIPE_VOLUME_MATERIAL);
inst_.pipelines.world_volume.sync(volume_gpumat);
bool has_update = false;
if (inst_.lookdev.sync_world()) {
return;
if (bl_world) {
/* Detect world update before overriding it. */
WorldHandle &wo_handle = inst_.sync.sync_world(bl_world);
has_update = (wo_handle.recalc != 0);
wo_handle.reset_recalc_flag();
}
WorldHandle &wo_handle = inst_.sync.sync_world(bl_world);
inst_.reflection_probes.sync_world(bl_world, wo_handle);
if (wo_handle.recalc != 0) {
/* Sync volume first since its result can override the surface world. */
sync_volume();
if (inst_.use_studio_light()) {
has_update = lookdev_world_.sync(LookdevParameters(inst_.v3d));
bl_world = lookdev_world_.world_get();
}
else if (has_volume_absorption_) {
bl_world = default_world_get();
}
else if (inst_.scene->world != nullptr) {
bl_world = inst_.scene->world;
}
else {
bl_world = default_world_get();
}
bNodeTree *ntree = (bl_world->nodetree && bl_world->use_nodes) ?
bl_world->nodetree :
default_tree.nodetree_get(bl_world);
{
if (has_volume_absorption_) {
/* Replace world by black world. */
bl_world = default_world_get();
}
}
inst_.reflection_probes.sync_world(bl_world);
if (has_update) {
inst_.reflection_probes.do_world_update_set(true);
inst_.sampling.reset();
}
wo_handle.reset_recalc_flag();
/* TODO(fclem) This should be detected to scene level. */
/* We have to manually test here because we have overrides. */
::World *orig_world = (::World *)DEG_get_original_id(&bl_world->id);
if (assign_if_different(prev_original_world, orig_world)) {
inst_.reflection_probes.do_world_update_set(true);
inst_.sampling.reset();
}
@ -119,18 +134,36 @@ void World::sync()
inst_.manager->register_layer_attributes(gpumat);
inst_.pipelines.background.sync(gpumat, inst_.film.background_opacity_get());
float opacity = inst_.use_studio_light() ? lookdev_world_.background_opacity_get() :
inst_.film.background_opacity_get();
inst_.pipelines.background.sync(gpumat, opacity);
inst_.pipelines.world.sync(gpumat);
}
bool World::has_volume()
void World::sync_volume()
{
::World *bl_world;
bNodeTree *ntree;
world_and_ntree_get(bl_world, ntree);
/* Studio lights have no volume shader. */
::World *world = inst_.use_studio_light() ? nullptr : inst_.scene->world;
GPUMaterial *gpumat = inst_.shaders.world_shader_get(bl_world, ntree, MAT_PIPE_VOLUME_MATERIAL);
return GPU_material_has_volume_output(gpumat);
GPUMaterial *gpumat = nullptr;
/* Only the scene world nodetree can have volume shader. */
if (world && world->nodetree && world->use_nodes) {
gpumat = inst_.shaders.world_shader_get(world, world->nodetree, MAT_PIPE_VOLUME_MATERIAL);
}
if (gpumat && (GPU_material_status(gpumat) == GPU_MAT_SUCCESS)) {
has_volume_ = true;
has_volume_scatter_ = GPU_material_flag_get(gpumat, GPU_MATFLAG_VOLUME_SCATTER);
has_volume_absorption_ = GPU_material_flag_get(gpumat, GPU_MATFLAG_VOLUME_ABSORPTION);
}
else {
has_volume_ = has_volume_absorption_ = has_volume_scatter_ = false;
}
/* World volume needs to be always synced for correct clearing of parameter buffers. */
inst_.pipelines.world_volume.sync(gpumat);
}
/** \} */

View File

@ -13,6 +13,8 @@
#include "DNA_world_types.h"
#include "eevee_lookdev.hh"
namespace blender::eevee {
class Instance;
@ -56,9 +58,14 @@ class World {
/* Used when the scene doesn't have a world. */
::World *default_world_ = nullptr;
::World *default_world_get();
/* Is true if world as a valid volume shader compiled. */
bool has_volume_ = false;
/* Is true if the volume shader has absorption. Disables distant lights. */
bool has_volume_absorption_ = false;
/* Is true if the volume shader has scattering. */
bool has_volume_scatter_ = false;
void world_and_ntree_get(::World *&world, bNodeTree *&ntree);
LookdevWorld lookdev_world_;
public:
World(Instance &inst) : inst_(inst){};
@ -66,7 +73,27 @@ class World {
void sync();
bool has_volume();
bool has_volume() const
{
return has_volume_;
}
bool has_volume_absorption() const
{
return has_volume_absorption_;
}
bool has_volume_scatter() const
{
return has_volume_scatter_;
}
private:
void sync_volume();
/* Returns a dummy black world for when a valid world isn't present or when we want to suppress
* any light comming from the world. */
::World *default_world_get();
};
/** \} */

View File

@ -77,9 +77,8 @@ void main()
/* Divide by phase total weight, to compute the mean anisotropy. */
float s_anisotropy = phase.x / max(1.0, phase.y);
scattering += volume_irradiance(P) * s_scattering * volume_phase_function_isotropic();
#ifdef VOLUME_LIGHTING
scattering += volume_irradiance(P) * s_scattering * volume_phase_function_isotropic();
LIGHT_FOREACH_BEGIN_DIRECTIONAL (light_cull_buf, l_idx) {
scattering += volume_scatter_light_eval(true, P, V, l_idx, s_anisotropy) * s_scattering;

View File

@ -124,7 +124,7 @@ void DofPass::init(const SceneState &scene_state)
int2 half_res = scene_state.resolution / 2;
half_res = {max_ii(half_res.x, 1), max_ii(half_res.y, 1)};
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ;
source_tx_.ensure_2d(GPU_RGBA16F, half_res, usage, nullptr, 3);
source_tx_.ensure_mip_views();
source_tx_.filter_mode(true);

View File

@ -199,7 +199,7 @@ void OpaquePass::draw(Manager &manager,
resolution,
GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW);
GPU_TEXTURE_USAGE_FORMAT_VIEW);
GPU_texture_copy(shadow_depth_stencil_tx, resources.depth_tx);
clear_fb.ensure(GPU_ATTACHMENT_TEXTURE(shadow_depth_stencil_tx));

View File

@ -312,8 +312,7 @@ static DRWVolumeGrid *volume_grid_cache_get(const Volume *volume,
UNPACK3(dense_grid.resolution),
1,
format,
GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
GPU_TEXTURE_USAGE_SHADER_READ,
dense_grid.voxels);
/* The texture can be null if the resolution along one axis is larger than
* GL_MAX_3D_TEXTURE_SIZE. */

View File

@ -179,12 +179,8 @@ static GPUTexture *create_volume_texture(const int dim[3],
}
while (true) {
tex = GPU_texture_create_3d("volume",
UNPACK3(final_dim),
1,
texture_format,
GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
nullptr);
tex = GPU_texture_create_3d(
"volume", UNPACK3(final_dim), 1, texture_format, GPU_TEXTURE_USAGE_SHADER_READ, nullptr);
if (tex != nullptr) {
break;

View File

@ -91,11 +91,6 @@ eInsertKeyFlags ANIM_get_keyframing_flags(Scene *scene, const bool use_autokey_m
if (is_autokey_flag(scene, AUTOKEY_FLAG_INSERTNEEDED)) {
flag |= INSERTKEY_NEEDED;
}
/* default F-Curve color mode - RGB from XYZ indices */
if (is_autokey_flag(scene, AUTOKEY_FLAG_XYZ2RGB)) {
flag |= INSERTKEY_XYZ2RGB;
}
}
/* only if including settings from the autokeying mode... */
@ -395,84 +390,6 @@ static blender::Vector<std::string> construct_rna_paths(PointerRNA *ptr)
return paths;
}
static blender::Vector<float> get_keyframe_values(PointerRNA *ptr,
PropertyRNA *prop,
const bool visual_key)
{
using namespace blender;
Vector<float> values;
if (visual_key && animrig::visualkey_can_use(ptr, prop)) {
/* Visual-keying is only available for object and pchan datablocks, as
* it works by keyframing using a value extracted from the final matrix
* instead of using the kt system to extract a value.
*/
values = animrig::visualkey_get_values(ptr, prop);
}
else {
values = animrig::get_rna_values(ptr, prop);
}
return values;
}
static void insert_key_rna(PointerRNA *rna_pointer,
const blender::Span<std::string> rna_paths,
const float scene_frame,
const eInsertKeyFlags insert_key_flags,
const eBezTriple_KeyframeType key_type,
Main *bmain,
ReportList *reports)
{
using namespace blender;
ID *id = rna_pointer->owner_id;
bAction *action = ED_id_action_ensure(bmain, id);
if (action == nullptr) {
BKE_reportf(reports,
RPT_ERROR,
"Could not insert keyframe, as this type does not support animation data (ID = "
"%s)",
id->name);
return;
}
AnimData *adt = BKE_animdata_from_id(id);
const float nla_frame = BKE_nla_tweakedit_remap(adt, scene_frame, NLATIME_CONVERT_UNMAP);
const bool visual_keyframing = insert_key_flags & INSERTKEY_MATRIX;
int insert_key_count = 0;
for (const std::string &rna_path : rna_paths) {
PointerRNA ptr;
PropertyRNA *prop = nullptr;
const bool path_resolved = RNA_path_resolve_property(
rna_pointer, rna_path.c_str(), &ptr, &prop);
if (!path_resolved) {
BKE_reportf(reports,
RPT_ERROR,
"Could not insert keyframe, as this property does not exist (ID = "
"%s, path = %s)",
id->name,
rna_path.c_str());
continue;
}
std::string rna_path_id_to_prop = RNA_path_from_ID_to_property(&ptr, prop);
Vector<float> rna_values = get_keyframe_values(&ptr, prop, visual_keyframing);
insert_key_count += animrig::insert_key_action(bmain,
action,
rna_pointer,
rna_path_id_to_prop,
nla_frame,
rna_values.as_span(),
insert_key_flags,
key_type);
}
if (insert_key_count == 0) {
BKE_reportf(reports, RPT_ERROR, "Failed to insert any keys");
}
}
/* Fill the list with CollectionPointerLink depending on the mode of the context. */
static bool get_selection(bContext *C, ListBase *r_selection)
{
@ -523,7 +440,7 @@ static int insert_key(bContext *C, wmOperator *op)
PointerRNA id_ptr = collection_ptr_link->ptr;
Vector<std::string> rna_paths = construct_rna_paths(&collection_ptr_link->ptr);
insert_key_rna(
animrig::insert_key_rna(
&id_ptr, rna_paths.as_span(), scene_frame, insert_key_flags, key_type, bmain, op->reports);
}

View File

@ -294,10 +294,6 @@ static int add_keyingset_button_exec(bContext *C, wmOperator *op)
keyingflag |= ANIM_get_keyframing_flags(scene, false);
if (blender::animrig::is_autokey_flag(scene, AUTOKEY_FLAG_XYZ2RGB)) {
keyingflag |= INSERTKEY_XYZ2RGB;
}
/* call the API func, and set the active keyingset index */
ks = BKE_keyingset_add(
&scene->keyingsets, "ButtonKeyingSet", "Button Keying Set", flag, keyingflag);
@ -1011,7 +1007,6 @@ static eInsertKeyFlags keyingset_apply_keying_flags(const eInsertKeyFlags base_f
*/
APPLY_KEYINGFLAG_OVERRIDE(INSERTKEY_NEEDED)
APPLY_KEYINGFLAG_OVERRIDE(INSERTKEY_MATRIX)
APPLY_KEYINGFLAG_OVERRIDE(INSERTKEY_XYZ2RGB)
#undef APPLY_KEYINGFLAG_OVERRIDE

View File

@ -20,6 +20,8 @@
#include "UI_resources.hh"
#include "RNA_define.hh"
#include "RNA_enum_types.hh"
#include "RNA_prototypes.h"
#include "ED_asset_library.h"
@ -78,25 +80,16 @@ const EnumPropertyItem *ED_asset_library_reference_to_rna_enum_itemf(const bool
int totitem = 0;
if (include_generated) {
const EnumPropertyItem generated_items[] = {
{ASSET_LIBRARY_ALL, "ALL", 0, "All", "Show assets from all of the listed asset libraries"},
RNA_ENUM_ITEM_SEPR,
{ASSET_LIBRARY_LOCAL,
"LOCAL",
ICON_CURRENT_FILE,
"Current File",
"Show the assets currently available in this Blender session"},
{ASSET_LIBRARY_ESSENTIALS,
"ESSENTIALS",
0,
"Essentials",
"Show the basic building blocks and utilities coming with Blender"},
{0, nullptr, 0, nullptr, nullptr},
};
/* Add predefined libraries that are generated and not simple directories that can be written
* to. */
RNA_enum_items_add(&item, &totitem, generated_items);
BLI_assert(rna_enum_asset_library_type_items[0].value == ASSET_LIBRARY_ALL);
RNA_enum_item_add(&item, &totitem, &rna_enum_asset_library_type_items[0]);
RNA_enum_item_add_separator(&item, &totitem);
BLI_assert(rna_enum_asset_library_type_items[1].value == ASSET_LIBRARY_LOCAL);
RNA_enum_item_add(&item, &totitem, &rna_enum_asset_library_type_items[1]);
BLI_assert(rna_enum_asset_library_type_items[2].value == ASSET_LIBRARY_ESSENTIALS);
RNA_enum_item_add(&item, &totitem, &rna_enum_asset_library_type_items[2]);
}
/* Add separator if needed. */

View File

@ -38,7 +38,7 @@ void operator_asset_reference_props_register(StructRNA &srna)
PropertyRNA *prop;
prop = RNA_def_enum(&srna,
"asset_library_type",
rna_enum_aset_library_type_items,
rna_enum_asset_library_type_items,
ASSET_LIBRARY_LOCAL,
"Asset Library Type",
"");

View File

@ -345,8 +345,7 @@ static int load_tex(Brush *br, ViewContext *vc, float zoom, bool col, bool prima
if (!target->overlay_texture) {
eGPUTextureFormat format = col ? GPU_RGBA8 : GPU_R8;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
target->overlay_texture = GPU_texture_create_2d(
"paint_cursor_overlay", size, size, 1, format, usage, nullptr);
GPU_texture_update(target->overlay_texture, GPU_DATA_UBYTE, buffer);
@ -465,8 +464,7 @@ static int load_tex_cursor(Brush *br, ViewContext *vc, float zoom)
BLI_task_parallel_range(0, size, &data, load_tex_cursor_task_cb, &settings);
if (!cursor_snap.overlay_texture) {
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
cursor_snap.overlay_texture = GPU_texture_create_2d(
"cursor_snap_overaly", size, size, 1, GPU_R8, usage, nullptr);
GPU_texture_update(cursor_snap.overlay_texture, GPU_DATA_UBYTE, buffer);

View File

@ -475,8 +475,7 @@ static void unlink_object_fn(bContext *C,
else if (GS(tsep->id->name) == ID_SCE) {
/* Following execution is expected to happen exclusively in the Outliner scene view. */
#ifdef NDEBUG
SpaceOutliner *space_outliner = CTX_wm_space_outliner(C);
BLI_assert(space_outliner->outlinevis == SO_SCENES);
BLI_assert(CTX_wm_space_outliner(C)->outlinevis == SO_SCENES);
#endif
Scene *scene = (Scene *)tsep->id;

View File

@ -227,9 +227,11 @@ set(VULKAN_SRC
vulkan/vk_shader.cc
vulkan/vk_shader_interface.cc
vulkan/vk_shader_log.cc
vulkan/vk_staging_buffer.cc
vulkan/vk_state_manager.cc
vulkan/vk_storage_buffer.cc
vulkan/vk_texture.cc
vulkan/vk_timeline_semaphore.cc
vulkan/vk_uniform_buffer.cc
vulkan/vk_vertex_attribute_object.cc
vulkan/vk_vertex_buffer.cc
@ -266,9 +268,11 @@ set(VULKAN_SRC
vulkan/vk_shader.hh
vulkan/vk_shader_interface.hh
vulkan/vk_shader_log.hh
vulkan/vk_staging_buffer.hh
vulkan/vk_state_manager.hh
vulkan/vk_storage_buffer.hh
vulkan/vk_texture.hh
vulkan/vk_timeline_semaphore.hh
vulkan/vk_uniform_buffer.hh
vulkan/vk_vertex_attribute_object.hh
vulkan/vk_vertex_buffer.hh
@ -875,8 +879,12 @@ endif()
if(WITH_GTESTS)
set(TEST_SRC)
set(TEST_INC)
set(TEST_LIB)
if(WITH_OPENGL_DRAW_TESTS)
set(TEST_SRC
list(APPEND TEST_SRC
tests/gpu_testing.cc
tests/buffer_texture_test.cc
@ -893,17 +901,16 @@ if(WITH_GTESTS)
tests/gpu_testing.hh
)
if(WITH_VULKAN_BACKEND)
list(APPEND TEST_SRC
tests/memory_layout_test.cc
vulkan/vk_data_conversion_test.cc
)
endif()
endif()
set(TEST_INC
)
set(TEST_LIB
if(WITH_VULKAN_BACKEND)
list(APPEND TEST_SRC
vulkan/vk_memory_layout_test.cc
vulkan/vk_data_conversion_test.cc
)
endif()
if (TEST_SRC)
include(GTestTesting)
blender_add_test_lib(bf_gpu_tests "${TEST_SRC}" "${INC};${TEST_INC}" "${INC_SYS}" "${LIB};${TEST_LIB}")
endif()

View File

@ -539,10 +539,9 @@ typedef enum eGPUTextureUsage {
GPU_TEXTURE_USAGE_SHADER_WRITE = (1 << 1),
/* Whether a texture is used as an attachment in a frame-buffer. */
GPU_TEXTURE_USAGE_ATTACHMENT = (1 << 2),
/* Whether the texture is used as a texture view, uses mip-map layer adjustment,
* OR, uses swizzle access masks. Mip-map base layer adjustment and texture channel swizzling
* requires a texture view under-the-hood. */
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW = (1 << 3),
/* Whether a texture is used to create a texture view utilising a different texture format to the
* source textures format. This includes the use of stencil views. */
GPU_TEXTURE_USAGE_FORMAT_VIEW = (1 << 3),
/* Whether the texture needs to be read from by the CPU. */
GPU_TEXTURE_USAGE_HOST_READ = (1 << 4),
/* When used, the texture will not have any backing storage and can solely exist as a virtual

View File

@ -469,9 +469,13 @@ GPUTexture *GPU_texture_create_view(const char *name,
{
BLI_assert(mip_len > 0);
BLI_assert(layer_len > 0);
BLI_assert_msg(
GPU_texture_usage(src) & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
"Source texture of TextureView must have GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW usage flag.");
BLI_assert_msg(use_stencil == false || (GPU_texture_usage(src) & GPU_TEXTURE_USAGE_FORMAT_VIEW),
"Source texture of TextureView must have GPU_TEXTURE_USAGE_FORMAT_VIEW usage "
"flag if view texture uses stencil texturing.");
BLI_assert_msg((format == GPU_texture_format(src)) ||
(GPU_texture_usage(src) & GPU_TEXTURE_USAGE_FORMAT_VIEW),
"Source texture of TextureView must have GPU_TEXTURE_USAGE_FORMAT_VIEW usage "
"flag if view texture format is different.");
Texture *view = GPUBackend::get()->texture_alloc(name);
view->init_view(src,
format,

View File

@ -168,7 +168,7 @@ static void gpu_viewport_textures_create(GPUViewport *viewport)
1,
GPU_DEPTH24_STENCIL8,
usage | GPU_TEXTURE_USAGE_HOST_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
GPU_TEXTURE_USAGE_FORMAT_VIEW,
nullptr);
if (GPU_clear_viewport_workaround()) {
static int depth_clear = 0;

View File

@ -620,7 +620,7 @@ inline MTLTextureUsage mtl_usage_from_gpu(eGPUTextureUsage usage)
if (usage & GPU_TEXTURE_USAGE_ATTACHMENT) {
mtl_usage = mtl_usage | MTLTextureUsageRenderTarget;
}
if (usage & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW) {
if (usage & GPU_TEXTURE_USAGE_FORMAT_VIEW) {
mtl_usage = mtl_usage | MTLTextureUsagePixelFormatView;
}
#if defined(MAC_OS_VERSION_14_0)
@ -650,7 +650,7 @@ inline eGPUTextureUsage gpu_usage_from_mtl(MTLTextureUsage mtl_usage)
usage = usage | GPU_TEXTURE_USAGE_ATTACHMENT;
}
if (mtl_usage & MTLTextureUsagePixelFormatView) {
usage = usage | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
usage = usage | GPU_TEXTURE_USAGE_FORMAT_VIEW;
}
return usage;
}

View File

@ -131,29 +131,38 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
}
}
/* Ensure we have texture view usage flagged. */
BLI_assert(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW);
/* if a texture view was previously created we release it. */
if (mip_swizzle_view_ != nil) {
[mip_swizzle_view_ release];
mip_swizzle_view_ = nil;
}
/* Use source texture to determine range limits. If we are using a GPU texture view, the range
* check should only validate the range */
const gpu::Texture *tex_view_src = this;
if (resource_mode_ == MTL_TEXTURE_MODE_TEXTURE_VIEW) {
tex_view_src = unwrap(source_texture_);
}
/* Determine num slices */
int max_slices = 1;
int num_slices = 1;
switch (type_) {
case GPU_TEXTURE_1D_ARRAY:
max_slices = tex_view_src->height_get();
num_slices = h_;
break;
case GPU_TEXTURE_2D_ARRAY:
max_slices = tex_view_src->depth_get();
num_slices = d_;
break;
case GPU_TEXTURE_CUBE:
max_slices = 6;
num_slices = 6;
break;
case GPU_TEXTURE_CUBE_ARRAY:
/* d_ is equal to array levels * 6, including face count. */
max_slices = tex_view_src->depth_get();
num_slices = d_;
break;
default:
@ -163,7 +172,7 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
/* Determine texture view format. If texture view is used as a stencil view, we instead provide
* the equivalent format for performing stencil reads/samples. */
MTLPixelFormat texture_view_pixel_format = texture_.pixelFormat;
MTLPixelFormat texture_view_pixel_format = gpu_texture_format_to_metal(format_);
if (texture_view_stencil_) {
switch (texture_view_pixel_format) {
case MTLPixelFormatDepth24Unorm_Stencil8:
@ -182,11 +191,21 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
* via modifying this textures type flags. */
MTLTextureType texture_view_texture_type = to_metal_type(type_);
/* Ensure we have texture view usage flagged.
* NOTE: This check exists in high level GPU API, however does not cover internal Metal backend
* uses of texture views such as when required to support SRGB enablement toggle during
* rendering. */
BLI_assert_msg(
(texture_view_pixel_format == texture_.pixelFormat) ||
(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_FORMAT_VIEW),
"Usage Flag GPU_TEXTURE_USAGE_FORMAT_VIEW must be specified if a texture view is "
"created with a different format to its source texture.");
int range_len = min_ii((mip_texture_max_level_ - mip_texture_base_level_) + 1,
(int)texture_.mipmapLevelCount - mip_texture_base_level_);
BLI_assert(range_len > 0);
BLI_assert(mip_texture_base_level_ < texture_.mipmapLevelCount);
BLI_assert(mip_texture_base_layer_ < num_slices);
BLI_assert(mip_texture_base_layer_ < max_slices);
mip_swizzle_view_ = [texture_
newTextureViewWithPixelFormat:texture_view_pixel_format
textureType:texture_view_texture_type
@ -672,7 +691,7 @@ void gpu::MTLTexture::update_sub(
* format is unwritable, if our texture has not been initialized with
* texture view support, use a staging texture. */
if ((compatible_write_format != destination_format) &&
!(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW))
!(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_FORMAT_VIEW))
{
use_staging_texture = true;
}
@ -688,7 +707,7 @@ void gpu::MTLTexture::update_sub(
use_staging_texture = true;
}
if (compatible_write_format != destination_format) {
if (!(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW)) {
if (!(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_FORMAT_VIEW)) {
use_staging_texture = true;
}
}
@ -713,7 +732,7 @@ void gpu::MTLTexture::update_sub(
else {
/* Use texture view. */
if (compatible_write_format != destination_format) {
BLI_assert(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW);
BLI_assert(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_FORMAT_VIEW);
texture_handle = [texture_ newTextureViewWithPixelFormat:compatible_write_format];
}
else {
@ -1440,8 +1459,6 @@ void gpu::MTLTexture::swizzle_set(const char swizzle_mask[4])
swizzle_to_mtl(swizzle_mask[2]),
swizzle_to_mtl(swizzle_mask[3]));
BLI_assert_msg(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
"Texture view support is required to change swizzle parameters.");
mtl_swizzle_mask_ = new_swizzle_mask;
texture_view_dirty_flags_ |= TEXTURE_VIEW_SWIZZLE_DIRTY;
}
@ -1645,7 +1662,7 @@ void gpu::MTLTexture::read_internal(int mip,
/* Texture View for SRGB special case. */
id<MTLTexture> read_texture = texture_;
if (format_ == GPU_SRGB8_A8) {
BLI_assert(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW);
BLI_assert(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_FORMAT_VIEW);
read_texture = [texture_ newTextureViewWithPixelFormat:MTLPixelFormatRGBA8Unorm];
}
@ -2098,9 +2115,6 @@ bool gpu::MTLTexture::init_internal(GPUTexture *src,
/* Assign usage. */
gpu_image_usage_flags_ = GPU_texture_usage(src);
BLI_assert_msg(
gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
"Source texture of TextureView must have GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW usage flag.");
/* Assign texture as view. */
gpu::MTLTexture *mtltex = static_cast<gpu::MTLTexture *>(unwrap(src));
@ -2187,7 +2201,7 @@ void gpu::MTLTexture::ensure_baked()
* disabled. Enabling the texture_view or texture_read usage flags disables lossless
* compression, so the situations in which it is used should be limited. */
if (format_ == GPU_SRGB8_A8) {
gpu_image_usage_flags_ = gpu_image_usage_flags_ | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW;
gpu_image_usage_flags_ = gpu_image_usage_flags_ | GPU_TEXTURE_USAGE_FORMAT_VIEW;
}
/* Create texture descriptor. */

View File

@ -26,20 +26,24 @@ void VKBatch::draw_setup()
/* Finalize graphics pipeline */
VKContext &context = *VKContext::get();
VKStateManager &state_manager = context.state_manager_get();
state_manager.apply_state();
state_manager.apply_bindings();
VKVertexAttributeObject vao;
vao.update_bindings(context, *this);
context.bind_graphics_pipeline(prim_type, vao);
/* Bind geometry resources. */
vao.bind(context);
VKIndexBuffer *index_buffer = index_buffer_get();
const bool draw_indexed = index_buffer != nullptr;
state_manager.apply_state();
state_manager.apply_bindings();
/*
* The next statements are order dependent. VBOs and IBOs must be uploaded, before resources can
* be bound. Uploading device located buffers flush the graphics pipeline and already bound
* resources will be unbound.
*/
VKVertexAttributeObject vao;
vao.update_bindings(context, *this);
vao.ensure_vbos_uploaded();
if (draw_indexed) {
index_buffer->upload_data();
index_buffer->bind(context);
}
vao.bind(context);
context.bind_graphics_pipeline(prim_type, vao);
}
void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int instance_count)

View File

@ -28,12 +28,11 @@ static VmaAllocationCreateFlags vma_allocation_flags(GPUUsageType usage)
{
switch (usage) {
case GPU_USAGE_STATIC:
case GPU_USAGE_DEVICE_ONLY:
return 0;
case GPU_USAGE_DYNAMIC:
case GPU_USAGE_STREAM:
return VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
case GPU_USAGE_DEVICE_ONLY:
return VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
case GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY:
break;
}
@ -41,7 +40,21 @@ static VmaAllocationCreateFlags vma_allocation_flags(GPUUsageType usage)
return VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
}
bool VKBuffer::create(int64_t size_in_bytes, GPUUsageType usage, VkBufferUsageFlags buffer_usage)
static VkMemoryPropertyFlags vma_preferred_flags(const bool is_host_visible)
{
return is_host_visible ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
}
/*
* TODO: Check which memory is selected and adjust the creation flag to add mapping. This way the
* staging buffer can be skipped, or in case of a vertex buffer an intermediate buffer can be
* removed.
*/
bool VKBuffer::create(int64_t size_in_bytes,
GPUUsageType usage,
VkBufferUsageFlags buffer_usage,
const bool is_host_visible)
{
BLI_assert(!is_allocated());
BLI_assert(vk_buffer_ == VK_NULL_HANDLE);
@ -70,6 +83,7 @@ bool VKBuffer::create(int64_t size_in_bytes, GPUUsageType usage, VkBufferUsageFl
VmaAllocationCreateInfo vma_create_info = {};
vma_create_info.flags = vma_allocation_flags(usage);
vma_create_info.priority = 1.0f;
vma_create_info.preferredFlags = vma_preferred_flags(is_host_visible);
vma_create_info.usage = VMA_MEMORY_USAGE_AUTO;
VkResult result = vmaCreateBuffer(
@ -78,8 +92,10 @@ bool VKBuffer::create(int64_t size_in_bytes, GPUUsageType usage, VkBufferUsageFl
return false;
}
/* All buffers are mapped to virtual memory. */
return map();
if (is_host_visible) {
return map();
}
return true;
}
void VKBuffer::update(const void *data) const

View File

@ -31,8 +31,10 @@ class VKBuffer {
/** Has this buffer been allocated? */
bool is_allocated() const;
bool create(int64_t size, GPUUsageType usage, VkBufferUsageFlags buffer_usage);
bool create(int64_t size,
GPUUsageType usage,
VkBufferUsageFlags buffer_usage,
bool is_host_visible = true);
void clear(VKContext &context, uint32_t clear_value);
void update(const void *data) const;
void flush() const;
@ -56,9 +58,13 @@ class VKBuffer {
*/
void *mapped_memory_get() const;
/**
* Is this buffer mapped (visible on host)
*/
bool is_mapped() const;
private:
/** Check if this buffer is mapped. */
bool is_mapped() const;
bool map();
void unmap();
};

View File

@ -28,11 +28,6 @@ VKCommandBuffers::~VKCommandBuffers()
VK_ALLOCATION_CALLBACKS;
const VKDevice &device = VKBackend::get().device_get();
if (vk_fence_ != VK_NULL_HANDLE) {
vkDestroyFence(device.device_get(), vk_fence_, vk_allocation_callbacks);
vk_fence_ = VK_NULL_HANDLE;
}
if (vk_command_pool_ != VK_NULL_HANDLE) {
vkDestroyCommandPool(device.device_get(), vk_command_pool_, vk_allocation_callbacks);
vk_command_pool_ = VK_NULL_HANDLE;
@ -54,7 +49,6 @@ void VKCommandBuffers::init(const VKDevice &device)
}
init_command_pool(device);
init_command_buffers(device);
init_fence(device);
submission_id_.reset();
}
@ -103,38 +97,44 @@ void VKCommandBuffers::init_command_buffers(const VKDevice &device)
"Graphics Command Buffer");
}
void VKCommandBuffers::init_fence(const VKDevice &device)
void VKCommandBuffers::submit_command_buffers(VKDevice &device,
MutableSpan<VKCommandBuffer *> command_buffers)
{
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
VkFenceCreateInfo fenceInfo{};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(device.device_get(), &fenceInfo, vk_allocation_callbacks, &vk_fence_);
}
}
VKTimelineSemaphore &timeline_semaphore = device.timeline_semaphore_get();
VkSemaphore timeline_handle = timeline_semaphore.vk_handle();
VKTimelineSemaphore::Value wait_value = timeline_semaphore.value_get();
last_signal_value_ = timeline_semaphore.value_increase();
static void submit_command_buffers(const VKDevice &device,
MutableSpan<VKCommandBuffer *> command_buffers,
VkFence vk_fence,
uint64_t timeout)
{
BLI_assert(ELEM(command_buffers.size(), 1, 2));
VkCommandBuffer handles[2];
int num_command_buffers = 0;
for (VKCommandBuffer *command_buffer : command_buffers) {
command_buffer->end_recording();
handles[num_command_buffers++] = command_buffer->vk_command_buffer();
}
VkTimelineSemaphoreSubmitInfo timelineInfo;
timelineInfo.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
timelineInfo.pNext = NULL;
timelineInfo.waitSemaphoreValueCount = 1;
timelineInfo.pWaitSemaphoreValues = wait_value;
timelineInfo.signalSemaphoreValueCount = 1;
timelineInfo.pSignalSemaphoreValues = last_signal_value_;
VkPipelineStageFlags wait_stages = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = num_command_buffers;
submit_info.pCommandBuffers = handles;
submit_info.pNext = &timelineInfo;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &timeline_handle;
submit_info.pWaitDstStageMask = &wait_stages;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &timeline_handle;
vkQueueSubmit(device.queue_get(), 1, &submit_info, vk_fence);
vkWaitForFences(device.device_get(), 1, &vk_fence, VK_TRUE, timeout);
vkResetFences(device.device_get(), 1, &vk_fence);
vkQueueSubmit(device.queue_get(), 1, &submit_info, VK_NULL_HANDLE);
finish();
for (VKCommandBuffer *command_buffer : command_buffers) {
command_buffer->commands_submitted();
@ -144,7 +144,7 @@ static void submit_command_buffers(const VKDevice &device,
void VKCommandBuffers::submit()
{
const VKDevice &device = VKBackend::get().device_get();
VKDevice &device = VKBackend::get().device_get();
VKCommandBuffer &data_transfer_compute = command_buffer_get(Type::DataTransferCompute);
VKCommandBuffer &graphics = command_buffer_get(Type::Graphics);
@ -163,22 +163,21 @@ void VKCommandBuffers::submit()
end_render_pass(*framebuffer);
command_buffers[command_buffer_index++] = &graphics;
submit_command_buffers(device,
MutableSpan<VKCommandBuffer *>(command_buffers, command_buffer_index),
vk_fence_,
FenceTimeout);
MutableSpan<VKCommandBuffer *>(command_buffers, command_buffer_index));
begin_render_pass(*framebuffer);
}
else if (has_data_transfer_compute_work) {
submit_command_buffers(device,
MutableSpan<VKCommandBuffer *>(command_buffers, command_buffer_index),
vk_fence_,
FenceTimeout);
MutableSpan<VKCommandBuffer *>(command_buffers, command_buffer_index));
}
}
const bool reset_submission_id = has_data_transfer_compute_work || has_graphics_work;
if (reset_submission_id) {
submission_id_.next();
}
void VKCommandBuffers::finish()
{
VKDevice &device = VKBackend::get().device_get();
VKTimelineSemaphore &timeline_semaphore = device.timeline_semaphore_get();
timeline_semaphore.wait(device, last_signal_value_);
submission_id_.next();
}
void VKCommandBuffers::ensure_no_draw_commands()
@ -415,7 +414,9 @@ void VKCommandBuffers::copy(VKTexture &dst_texture,
command_buffer.command_recorded();
}
void VKCommandBuffers::copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions)
void VKCommandBuffers::copy(const VKBuffer &dst_buffer,
VkBuffer src_buffer,
Span<VkBufferCopy> regions)
{
VKCommandBuffer &command_buffer = command_buffer_get(Type::DataTransferCompute);
vkCmdCopyBuffer(command_buffer.vk_command_buffer(),

View File

@ -9,6 +9,7 @@
#pragma once
#include "vk_command_buffer.hh"
#include "vk_timeline_semaphore.hh"
namespace blender::gpu {
class VKFrameBuffer;
@ -31,16 +32,12 @@ class VKCommandBuffers : public NonCopyable, NonMovable {
};
bool initialized_ = false;
/**
* Timeout to use when waiting for fences in nanoseconds.
*
* Currently added as the fence will halt when there are no commands in the command buffer for
* the second time. This should be solved and this timeout should be removed.
*/
static constexpr uint64_t FenceTimeout = UINT64_MAX;
/* Fence for CPU GPU synchronization when submitting the command buffers. */
VkFence vk_fence_ = VK_NULL_HANDLE;
/**
* Last submitted timeline value, what can be used to validate that all commands related
* submitted by this command buffers have been finished.
*/
VKTimelineSemaphore::Value last_signal_value_;
/**
* Active framebuffer for graphics command buffer.
@ -95,7 +92,7 @@ class VKCommandBuffers : public NonCopyable, NonMovable {
void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKBuffer &src_buffer, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageCopy> regions);
void copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions);
void copy(const VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions);
void blit(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageBlit> regions);
void blit(VKTexture &dst_texture,
VkImageLayout dst_layout,
@ -142,6 +139,7 @@ class VKCommandBuffers : public NonCopyable, NonMovable {
uint32_t stride);
void submit();
void finish();
const VKSubmissionID &submission_id_get() const
{
@ -149,10 +147,11 @@ class VKCommandBuffers : public NonCopyable, NonMovable {
}
private:
void init_fence(const VKDevice &device);
void init_command_pool(const VKDevice &device);
void init_command_buffers(const VKDevice &device);
void submit_command_buffers(VKDevice &device, MutableSpan<VKCommandBuffer *> command_buffers);
VKCommandBuffer &command_buffer_get(Type type)
{
return buffers_[(int)type];

View File

@ -127,7 +127,7 @@ void VKContext::flush()
void VKContext::finish()
{
command_buffers_.submit();
command_buffers_.finish();
}
void VKContext::memory_statistics_get(int *r_total_mem_kb, int *r_free_mem_kb)

View File

@ -23,10 +23,12 @@ namespace blender::gpu {
void VKDevice::deinit()
{
VK_ALLOCATION_CALLBACKS
if (!is_initialized()) {
return;
}
timeline_semaphore_.free(*this);
dummy_buffer_.free();
if (dummy_color_attachment_.has_value()) {
delete &(*dummy_color_attachment_).get();
@ -34,8 +36,10 @@ void VKDevice::deinit()
}
samplers_.free();
destroy_discarded_resources();
vkDestroyPipelineCache(vk_device_, vk_pipeline_cache_, vk_allocation_callbacks);
vmaDestroyAllocator(mem_allocator_);
mem_allocator_ = VK_NULL_HANDLE;
debugging_tools_.deinit(vk_instance_);
vk_instance_ = VK_NULL_HANDLE;
@ -68,8 +72,10 @@ void VKDevice::init(void *ghost_context)
VKBackend::capabilities_init(*this);
init_debug_callbacks();
init_memory_allocator();
init_pipeline_cache();
samplers_.init();
timeline_semaphore_.init(*this);
debug::object_label(device_get(), "LogicalDevice");
debug::object_label(queue_get(), "GenericQueue");
@ -122,6 +128,14 @@ void VKDevice::init_memory_allocator()
vmaCreateAllocator(&info, &mem_allocator_);
}
void VKDevice::init_pipeline_cache()
{
VK_ALLOCATION_CALLBACKS;
VkPipelineCacheCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
vkCreatePipelineCache(vk_device_, &create_info, vk_allocation_callbacks, &vk_pipeline_cache_);
}
void VKDevice::init_dummy_buffer(VKContext &context)
{
if (dummy_buffer_.is_allocated()) {

View File

@ -16,6 +16,7 @@
#include "vk_debug.hh"
#include "vk_descriptor_pools.hh"
#include "vk_samplers.hh"
#include "vk_timeline_semaphore.hh"
namespace blender::gpu {
class VKBackend;
@ -61,6 +62,9 @@ class VKDevice : public NonCopyable {
VKSamplers samplers_;
/* Semaphore for CPU GPU synchronization when submitting commands to the queue. */
VKTimelineSemaphore timeline_semaphore_;
/**
* Available Contexts for this device.
*
@ -74,6 +78,7 @@ class VKDevice : public NonCopyable {
/** Allocator used for texture and buffers and other resources. */
VmaAllocator mem_allocator_ = VK_NULL_HANDLE;
VkPipelineCache vk_pipeline_cache_ = VK_NULL_HANDLE;
/** Limits of the device linked to this context. */
VkPhysicalDeviceProperties vk_physical_device_properties_ = {};
@ -150,6 +155,11 @@ class VKDevice : public NonCopyable {
return mem_allocator_;
}
VkPipelineCache vk_pipeline_cache_get() const
{
return vk_pipeline_cache_;
}
debug::VKDebuggingTools &debugging_tools_get()
{
return debugging_tools_;
@ -216,12 +226,28 @@ class VKDevice : public NonCopyable {
/** \} */
/* -------------------------------------------------------------------- */
/** \name Queue management
* \{ */
VKTimelineSemaphore &timeline_semaphore_get()
{
return timeline_semaphore_;
}
const VKTimelineSemaphore &timeline_semaphore_get() const
{
return timeline_semaphore_;
}
/** \} */
private:
void init_physical_device_properties();
void init_physical_device_memory_properties();
void init_physical_device_features();
void init_debug_callbacks();
void init_memory_allocator();
void init_pipeline_cache();
/* During initialization the backend requires access to update the workarounds. */
friend VKBackend;

View File

@ -9,6 +9,7 @@
#include "vk_index_buffer.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_staging_buffer.hh"
#include "vk_state_manager.hh"
namespace blender::gpu {
@ -24,10 +25,15 @@ void VKIndexBuffer::ensure_updated()
allocate();
}
if (data_ != nullptr) {
buffer_.update(data_);
MEM_SAFE_FREE(data_);
if (data_ == nullptr) {
return;
}
VKContext &context = *VKContext::get();
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::HostToDevice);
staging_buffer.host_buffer_get().update(data_);
staging_buffer.copy_to_device(context);
MEM_SAFE_FREE(data_);
}
void VKIndexBuffer::upload_data()
@ -65,9 +71,9 @@ void VKIndexBuffer::bind(int binding,
void VKIndexBuffer::read(uint32_t *data) const
{
VKContext &context = *VKContext::get();
context.flush();
buffer_.read(data);
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::DeviceToHost);
staging_buffer.copy_from_device(context);
staging_buffer.host_buffer_get().read(data);
}
void VKIndexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data*/)
@ -83,8 +89,11 @@ void VKIndexBuffer::strip_restart_indices()
void VKIndexBuffer::allocate()
{
GPUUsageType usage = data_ == nullptr ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_STATIC;
buffer_.create(
size_get(), usage, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT);
buffer_.create(size_get(),
usage,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
false);
debug::object_label(buffer_.vk_handle(), "IndexBuffer");
}

View File

@ -4,7 +4,7 @@
#include "testing/testing.h"
#include "../vulkan/vk_memory_layout.hh"
#include "vk_memory_layout.hh"
namespace blender::gpu {

View File

@ -57,7 +57,7 @@ VKPipeline VKPipeline::create_compute_pipeline(
VkPipeline vk_pipeline;
if (vkCreateComputePipelines(device.device_get(),
nullptr,
device.vk_pipeline_cache_get(),
1,
&pipeline_info,
vk_allocation_callbacks,
@ -182,7 +182,7 @@ void VKPipeline::finalize(VKContext &context,
const VKDevice &device = VKBackend::get().device_get();
vkCreateGraphicsPipelines(device.device_get(),
VK_NULL_HANDLE,
device.vk_pipeline_cache_get(),
1,
&pipeline_create_info,
vk_allocation_callbacks,

View File

@ -0,0 +1,57 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#include "vk_staging_buffer.hh"
#include "vk_command_buffers.hh"
#include "vk_context.hh"
namespace blender::gpu {
VKStagingBuffer::VKStagingBuffer(const VKBuffer &device_buffer, Direction direction)
: device_buffer_(device_buffer)
{
VkBufferUsageFlags usage;
switch (direction) {
case Direction::HostToDevice:
usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
break;
case Direction::DeviceToHost:
usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
host_buffer_.create(device_buffer.size_in_bytes(), GPU_USAGE_STREAM, usage, true);
}
void VKStagingBuffer::copy_to_device(VKContext &context)
{
BLI_assert(host_buffer_.is_allocated() && host_buffer_.is_mapped());
VkBufferCopy buffer_copy = {};
buffer_copy.size = device_buffer_.size_in_bytes();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.copy(
device_buffer_, host_buffer_.vk_handle(), Span<VkBufferCopy>(&buffer_copy, 1));
command_buffers.submit();
}
void VKStagingBuffer::copy_from_device(VKContext &context)
{
BLI_assert(host_buffer_.is_allocated() && host_buffer_.is_mapped());
VkBufferCopy buffer_copy = {};
buffer_copy.size = device_buffer_.size_in_bytes();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.copy(
host_buffer_, device_buffer_.vk_handle(), Span<VkBufferCopy>(&buffer_copy, 1));
command_buffers.submit();
}
void VKStagingBuffer::free()
{
host_buffer_.free();
}
} // namespace blender::gpu

View File

@ -0,0 +1,76 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#pragma once
#include "vk_buffer.hh"
#include "vk_common.hh"
namespace blender::gpu {
/**
* Utility class to copy data from host to device and vise versa.
*
* This is a common as buffers on device are more performant than when located inside host memory.
*/
class VKStagingBuffer {
public:
/**
* Direction of the transfer.
*/
enum class Direction {
/**
* Transferring data from host to device.
*/
HostToDevice,
/**
* Transferring data from device to host.
*/
DeviceToHost,
};
private:
/**
* Reference to the device buffer.
*/
const VKBuffer &device_buffer_;
/**
* The temporary buffer on host for the transfer. Also called the staging buffer.
*/
VKBuffer host_buffer_;
public:
VKStagingBuffer(const VKBuffer &device_buffer, Direction direction);
/**
* Copy the content of the host buffer to the device buffer.
*/
void copy_to_device(VKContext &context);
/**
* Copy the content of the device buffer to the host buffer.
*/
void copy_from_device(VKContext &context);
/**
* Get the reference to the host buffer to update/load the data.
*/
const VKBuffer &host_buffer_get() const
{
return host_buffer_;
}
/**
* Free the host memory.
*
* In case a reference of the staging buffer is kept, but the host resource isn't needed anymore.
*/
void free();
};
} // namespace blender::gpu

View File

@ -7,6 +7,7 @@
*/
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_staging_buffer.hh"
#include "vk_state_manager.hh"
#include "vk_vertex_buffer.hh"
@ -21,8 +22,11 @@ VKStorageBuffer::VKStorageBuffer(int size, GPUUsageType usage, const char *name)
void VKStorageBuffer::update(const void *data)
{
VKContext &context = *VKContext::get();
ensure_allocated();
buffer_.update(data);
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::HostToDevice);
staging_buffer.host_buffer_get().update(data);
staging_buffer.copy_to_device(context);
}
void VKStorageBuffer::ensure_allocated()
@ -34,10 +38,12 @@ void VKStorageBuffer::ensure_allocated()
void VKStorageBuffer::allocate()
{
const bool is_host_visible = false;
buffer_.create(size_in_bytes_,
usage_,
VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT);
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
is_host_visible);
debug::object_label(buffer_.vk_handle(), name_);
}
@ -104,7 +110,9 @@ void VKStorageBuffer::read(void *data)
VKContext &context = *VKContext::get();
context.flush();
buffer_.read(data);
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::DeviceToHost);
staging_buffer.copy_from_device(context);
staging_buffer.host_buffer_get().read(data);
}
} // namespace blender::gpu

View File

@ -0,0 +1,86 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#include "vk_timeline_semaphore.hh"
#include "vk_backend.hh"
#include "vk_device.hh"
#include "vk_memory.hh"
namespace blender::gpu {
VKTimelineSemaphore::~VKTimelineSemaphore()
{
const VKDevice &device = VKBackend::get().device_get();
free(device);
}
void VKTimelineSemaphore::init(const VKDevice &device)
{
if (vk_semaphore_ != VK_NULL_HANDLE) {
return;
}
VK_ALLOCATION_CALLBACKS;
VkSemaphoreTypeCreateInfo semaphore_type_create_info = {};
semaphore_type_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO;
semaphore_type_create_info.semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE;
semaphore_type_create_info.initialValue = 0;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
semaphore_create_info.pNext = &semaphore_type_create_info;
vkCreateSemaphore(
device.device_get(), &semaphore_create_info, vk_allocation_callbacks, &vk_semaphore_);
debug::object_label(vk_semaphore_, "TimelineSemaphore");
value_.reset();
}
void VKTimelineSemaphore::free(const VKDevice &device)
{
if (vk_semaphore_ == VK_NULL_HANDLE) {
return;
}
VK_ALLOCATION_CALLBACKS;
vkDestroySemaphore(device.device_get(), vk_semaphore_, vk_allocation_callbacks);
vk_semaphore_ = VK_NULL_HANDLE;
value_.reset();
}
void VKTimelineSemaphore::wait(const VKDevice &device, const Value &wait_value)
{
BLI_assert(vk_semaphore_ != VK_NULL_HANDLE);
VkSemaphoreWaitInfo wait_info = {};
wait_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO;
wait_info.semaphoreCount = 1;
wait_info.pSemaphores = &vk_semaphore_;
wait_info.pValues = wait_value;
vkWaitSemaphores(device.device_get(), &wait_info, UINT64_MAX);
last_completed_ = wait_value;
}
VKTimelineSemaphore::Value VKTimelineSemaphore::value_increase()
{
value_.increase();
return value_;
}
VKTimelineSemaphore::Value VKTimelineSemaphore::value_get() const
{
return value_;
}
VKTimelineSemaphore::Value VKTimelineSemaphore::last_completed_value_get() const
{
return last_completed_;
}
} // namespace blender::gpu

View File

@ -0,0 +1,109 @@
/* SPDX-FileCopyrightText: 2023 Blender Authorss
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#pragma once
#include "vk_common.hh"
namespace blender::gpu {
class VKDevice;
/**
* A timeline semaphore is a special semaphore type used to syncronize between commands and
* resource usage in a time aware fasion.
*
* Synchronization is a core part of Vulkan and the Timeline Semaphore is a utility that
* facilitates its implementation in Blender.
*
* There are resources that needs to be tracked in time in order to know when to submit, free or
* reuse these resource. Some usecases are:
*
* - Command buffers can only be reset or freed when they are executed on the device. When the
* command buffers are still pending for execution they may not be reused or freed.
* - Buffers are only allowed to be reuploaded when they are not used at this moment by the device.
* This CPU/GPU synchronization can be guarded by a timeline semaphore. In this case barriers
* may not be used as they don't cover CPU synchronization for host allocated buffers.
*
* Usage:
*
* For each device queue a timeline semaphore should be constructed. Every time when a command
* buffer is submitted the submission will wait for the current timeline value to be completed.
* Locally the command buffer can keep track of the timeline value when submitting commands so
* `gpuFinish` could be implemented is a context aware fasion.
*
* #VKTimelineSemaphore::Value can be stored locally. By calling the wait function you can ensure
* that at least the given value has been finished.
*/
class VKTimelineSemaphore {
public:
/**
* VKTimelineSemaphore::Value is used to track the timeline semaphore value.
*/
class Value {
uint64_t value_ = 0;
public:
operator const uint64_t *() const
{
return &value_;
}
bool operator<(const Value &other) const
{
return this->value_ < other.value_;
}
bool operator==(const Value &other) const
{
return this->value_ == other.value_;
}
private:
void reset()
{
value_ = 0;
}
void increase()
{
value_++;
}
friend class VKTimelineSemaphore;
};
private:
VkSemaphore vk_semaphore_ = VK_NULL_HANDLE;
Value value_;
Value last_completed_;
public:
~VKTimelineSemaphore();
void init(const VKDevice &device);
void free(const VKDevice &device);
/**
* Wait for semaphore completion.
*
* Ensuring all commands queues before and including the given value have been finished.
*/
void wait(const VKDevice &device, const Value &value);
Value value_increase();
Value value_get() const;
Value last_completed_value_get() const;
VkSemaphore vk_handle() const
{
BLI_assert(vk_semaphore_ != VK_NULL_HANDLE);
return vk_semaphore_;
}
};
} // namespace blender::gpu

View File

@ -10,6 +10,7 @@
#include "vk_context.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_staging_buffer.hh"
#include "vk_state_manager.hh"
namespace blender::gpu {
@ -19,15 +20,30 @@ void VKUniformBuffer::update(const void *data)
if (!buffer_.is_allocated()) {
allocate();
}
buffer_.update(data);
VKContext &context = *VKContext::get();
if (buffer_.is_mapped()) {
buffer_.update(data);
}
else {
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::HostToDevice);
staging_buffer.host_buffer_get().update(data);
staging_buffer.copy_to_device(context);
}
}
void VKUniformBuffer::allocate()
{
/*
* TODO: make uniform buffers device local. In order to do that we should remove the upload
* during binding, as that will reset the graphics pipeline and already attached resources would
* not be bound anymore.
*/
const bool is_host_visible = true;
buffer_.create(size_in_bytes_,
GPU_USAGE_STATIC,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT);
VK_BUFFER_USAGE_TRANSFER_DST_BIT,
is_host_visible);
debug::object_label(buffer_.vk_handle(), name_);
}

View File

@ -114,6 +114,15 @@ void VKVertexAttributeObject::bind_buffers(VKContext &context)
}
}
void VKVertexAttributeObject::ensure_vbos_uploaded() const
{
for (VKVertexBuffer *vbo : vbos) {
if (vbo) {
vbo->upload();
}
}
}
/** \} */
/* -------------------------------------------------------------------- */

View File

@ -47,6 +47,14 @@ class VKVertexAttributeObject {
void update_bindings(const VKContext &context, VKBatch &batch);
void update_bindings(VKImmediate &immediate);
/**
* Ensure that all Vertex Buffers are uploaded to the GPU.
*
* This is a separate step as uploading could flush the graphics pipeline making the state
* inconsistent.
*/
void ensure_vbos_uploaded() const;
void debug_print() const;
private:

View File

@ -12,6 +12,7 @@
#include "vk_memory.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_staging_buffer.hh"
#include "vk_state_manager.hh"
#include "vk_vertex_buffer.hh"
@ -92,7 +93,14 @@ void VKVertexBuffer::read(void *data) const
{
VKContext &context = *VKContext::get();
context.flush();
buffer_.read(data);
if (buffer_.is_mapped()) {
buffer_.read(data);
return;
}
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::DeviceToHost);
staging_buffer.copy_from_device(context);
staging_buffer.host_buffer_get().read(data);
}
void VKVertexBuffer::acquire_data()
@ -128,6 +136,25 @@ void VKVertexBuffer::release_data()
MEM_SAFE_FREE(data);
}
void VKVertexBuffer::upload_data_direct(const VKBuffer &host_buffer)
{
device_format_ensure();
if (vertex_format_converter.needs_conversion()) {
vertex_format_converter.convert(host_buffer.mapped_memory_get(), data, vertex_len);
host_buffer.flush();
}
else {
host_buffer.update(data);
}
}
void VKVertexBuffer::upload_data_via_staging_buffer(VKContext &context)
{
VKStagingBuffer staging_buffer(buffer_, VKStagingBuffer::Direction::HostToDevice);
upload_data_direct(staging_buffer.host_buffer_get());
staging_buffer.copy_to_device(context);
}
void VKVertexBuffer::upload_data()
{
if (!buffer_.is_allocated()) {
@ -139,12 +166,12 @@ void VKVertexBuffer::upload_data()
if (flag & GPU_VERTBUF_DATA_DIRTY) {
device_format_ensure();
if (vertex_format_converter.needs_conversion()) {
vertex_format_converter.convert(buffer_.mapped_memory_get(), data, vertex_len);
buffer_.flush();
if (buffer_.is_mapped()) {
upload_data_direct(buffer_);
}
else {
buffer_.update(data);
VKContext &context = *VKContext::get();
upload_data_via_staging_buffer(context);
}
if (usage_ == GPU_USAGE_STATIC) {
MEM_SAFE_FREE(data);
@ -175,10 +202,15 @@ const GPUVertFormat &VKVertexBuffer::device_format_get() const
void VKVertexBuffer::allocate()
{
buffer_.create(size_alloc_get(),
usage_,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT);
const bool is_host_visible = ELEM(usage_, GPU_USAGE_DYNAMIC, GPU_USAGE_STREAM);
VkBufferUsageFlags vk_buffer_usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
if (!is_host_visible) {
vk_buffer_usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
buffer_.create(size_alloc_get(), usage_, vk_buffer_usage, is_host_visible);
debug::object_label(buffer_.vk_handle(), "VertexBuffer");
}

View File

@ -61,6 +61,9 @@ class VKVertexBuffer : public VertBuf, public VKBindableResource {
private:
void allocate();
void upload_data_direct(const VKBuffer &host_buffer);
void upload_data_via_staging_buffer(VKContext &context);
/* VKTexture requires access to `buffer_` to convert a vertex buffer to a texture. */
friend class VKTexture;
};

View File

@ -269,24 +269,12 @@ GPUTexture *IMB_touch_gpu_texture(const char *name,
GPUTexture *tex;
if (layers > 0) {
tex = GPU_texture_create_2d_array(name,
w,
h,
layers,
9999,
tex_format,
GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
nullptr);
tex = GPU_texture_create_2d_array(
name, w, h, layers, 9999, tex_format, GPU_TEXTURE_USAGE_SHADER_READ, nullptr);
}
else {
tex = GPU_texture_create_2d(name,
w,
h,
9999,
tex_format,
GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
nullptr);
tex = GPU_texture_create_2d(
name, w, h, 9999, tex_format, GPU_TEXTURE_USAGE_SHADER_READ, nullptr);
}
GPU_texture_swizzle_set(tex, imb_gpu_get_swizzle(ibuf));
@ -379,21 +367,13 @@ GPUTexture *IMB_create_gpu_texture(const char *name,
bool freebuf = false;
/* Create Texture. */
tex = GPU_texture_create_2d(name,
UNPACK2(size),
9999,
tex_format,
GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
nullptr);
tex = GPU_texture_create_2d(
name, UNPACK2(size), 9999, tex_format, GPU_TEXTURE_USAGE_SHADER_READ, nullptr);
if (tex == nullptr) {
size[0] = max_ii(1, size[0] / 2);
size[1] = max_ii(1, size[1] / 2);
tex = GPU_texture_create_2d(name,
UNPACK2(size),
9999,
tex_format,
GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
nullptr);
tex = GPU_texture_create_2d(
name, UNPACK2(size), 9999, tex_format, GPU_TEXTURE_USAGE_SHADER_READ, nullptr);
do_rescale = true;
}
BLI_assert(tex != nullptr);

View File

@ -1035,8 +1035,6 @@ typedef enum eInsertKeyFlags {
/* INSERTKEY_FASTR = (1 << 3), */ /* UNUSED */
/** only replace an existing keyframe (this overrides INSERTKEY_NEEDED) */
INSERTKEY_REPLACE = (1 << 4),
/** transform F-Curves should have XYZ->RGB color mode */
INSERTKEY_XYZ2RGB = (1 << 5),
/** ignore user-prefs (needed for predictable API use) */
INSERTKEY_NO_USERPREF = (1 << 6),
/**

View File

@ -273,7 +273,7 @@ DEF_ENUM(rna_enum_nla_mode_extend_items)
DEF_ENUM(rna_enum_nla_mode_blend_items)
DEF_ENUM(rna_enum_keyblock_type_items)
DEF_ENUM(rna_enum_aset_library_type_items)
DEF_ENUM(rna_enum_asset_library_type_items)
#endif

View File

@ -51,12 +51,6 @@ const EnumPropertyItem rna_enum_keying_flag_items[] = {
0,
"Visual Keying",
"Insert keyframes based on 'visual transforms'"},
{INSERTKEY_XYZ2RGB,
"INSERTKEY_XYZ_TO_RGB",
0,
"XYZ=RGB Colors",
"Color for newly added transformation F-Curves (Location, Rotation, Scale) "
"and also Color is based on the transform axis"},
{0, nullptr, 0, nullptr, nullptr},
};
@ -72,12 +66,6 @@ const EnumPropertyItem rna_enum_keying_flag_api_items[] = {
0,
"Visual Keying",
"Insert keyframes based on 'visual transforms'"},
{INSERTKEY_XYZ2RGB,
"INSERTKEY_XYZ_TO_RGB",
0,
"XYZ=RGB Colors",
"Color for newly added transformation F-Curves (Location, Rotation, Scale) "
"and also Color is based on the transform axis"},
{INSERTKEY_REPLACE,
"INSERTKEY_REPLACE",
0,
@ -877,17 +865,6 @@ static void rna_def_common_keying_flags(StructRNA *srna, short reg)
RNA_def_property_flag(prop, PROP_REGISTER_OPTIONAL);
}
prop = RNA_def_property(srna, "use_insertkey_override_xyz_to_rgb", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, nullptr, "keyingoverride", INSERTKEY_XYZ2RGB);
RNA_def_property_ui_text(
prop,
"Override F-Curve Colors - XYZ to RGB",
"Override default setting to set color for newly added transformation F-Curves "
"(Location, Rotation, Scale) to be based on the transform axis");
if (reg) {
RNA_def_property_flag(prop, PROP_REGISTER_OPTIONAL);
}
/* value to override defaults with */
prop = RNA_def_property(srna, "use_insertkey_needed", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, nullptr, "keyingflag", INSERTKEY_NEEDED);
@ -905,16 +882,6 @@ static void rna_def_common_keying_flags(StructRNA *srna, short reg)
if (reg) {
RNA_def_property_flag(prop, PROP_REGISTER_OPTIONAL);
}
prop = RNA_def_property(srna, "use_insertkey_xyz_to_rgb", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, nullptr, "keyingflag", INSERTKEY_XYZ2RGB);
RNA_def_property_ui_text(prop,
"F-Curve Colors - XYZ to RGB",
"Color for newly added transformation F-Curves (Location, Rotation, "
"Scale) is based on the transform axis");
if (reg) {
RNA_def_property_flag(prop, PROP_REGISTER_OPTIONAL);
}
}
/* --- */

View File

@ -19,11 +19,19 @@
#include "rna_internal.h"
const EnumPropertyItem rna_enum_aset_library_type_items[] = {
{ASSET_LIBRARY_LOCAL, "LOCAL", 0, "Local", ""},
{ASSET_LIBRARY_ALL, "ALL", 0, "All", ""},
{ASSET_LIBRARY_ESSENTIALS, "ESSENTIALS", 0, "Essentials", ""},
{ASSET_LIBRARY_CUSTOM, "CUSTOM", 0, "Custom", ""},
const EnumPropertyItem rna_enum_asset_library_type_items[] = {
{ASSET_LIBRARY_ALL, "ALL", 0, "All", "Show assets from all of the listed asset libraries"},
{ASSET_LIBRARY_LOCAL,
"LOCAL",
0,
"Current File",
"Show the assets currently available in this Blender session"},
{ASSET_LIBRARY_ESSENTIALS,
"ESSENTIALS",
0,
"Essentials",
"Show the basic building blocks and utilities coming with Blender"},
{ASSET_LIBRARY_CUSTOM, "CUSTOM", 0, "Custom", "All external 'normal' asset repositories"},
{0, nullptr, 0, nullptr, nullptr},
};
@ -701,12 +709,31 @@ PropertyRNA *rna_def_asset_library_reference_common(StructRNA *srna,
const char *set)
{
PropertyRNA *prop = RNA_def_property(srna, "asset_library_reference", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_items(prop, rna_enum_dummy_NULL_items);
RNA_def_property_enum_items(prop, rna_enum_asset_library_type_items);
RNA_def_property_enum_funcs(prop, get, set, "rna_asset_library_reference_itemf");
return prop;
}
static void rna_def_asset_weak_reference(BlenderRNA *brna)
{
StructRNA *srna;
PropertyRNA *prop;
srna = RNA_def_struct(brna, "AssetWeakReference", nullptr);
RNA_def_struct_ui_text(srna, "Asset Weak Reference", "Weak reference to some asset");
prop = RNA_def_property(srna, "asset_library_type", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_items(prop, rna_enum_asset_library_type_items);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
prop = RNA_def_property(srna, "asset_library_identifier", PROP_STRING, PROP_NONE);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
prop = RNA_def_property(srna, "relative_asset_identifier", PROP_STRING, PROP_NONE);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
}
void RNA_def_asset(BlenderRNA *brna)
{
RNA_define_animate_sdna(false);
@ -717,6 +744,7 @@ void RNA_def_asset(BlenderRNA *brna)
rna_def_asset_handle(brna);
rna_def_asset_representation(brna);
rna_def_asset_catalog_path(brna);
rna_def_asset_weak_reference(brna);
RNA_define_animate_sdna(true);
}

View File

@ -194,15 +194,13 @@ static void rna_NodeTreeInterfaceSocket_init_socket_custom(
PointerRNA ptr = RNA_pointer_create(
id, &RNA_NodeTreeInterfaceSocket, const_cast<bNodeTreeInterfaceSocket *>(interface_socket));
PointerRNA node_ptr = RNA_pointer_create(id, &RNA_Node, node);
PointerRNA socket_ptr = RNA_pointer_create(id, &RNA_NodeSocket, socket);
FunctionRNA *func = &rna_NodeTreeInterfaceSocket_init_socket_func;
ParameterList list;
RNA_parameter_list_create(&list, &ptr, func);
RNA_parameter_set_lookup(&list, "node", &node_ptr);
RNA_parameter_set_lookup(&list, "socket", &socket_ptr);
RNA_parameter_set_lookup(&list, "node", node);
RNA_parameter_set_lookup(&list, "socket", socket);
RNA_parameter_set_lookup(&list, "data_path", &data_path);
typeinfo->ext_interface.call(nullptr, &ptr, func, &list);
@ -230,16 +228,13 @@ static void rna_NodeTreeInterfaceSocket_from_socket_custom(
}
PointerRNA ptr = RNA_pointer_create(id, &RNA_NodeTreeInterfaceSocket, interface_socket);
PointerRNA node_ptr = RNA_pointer_create(id, &RNA_Node, const_cast<bNode *>(node));
PointerRNA socket_ptr = RNA_pointer_create(
id, &RNA_NodeSocket, const_cast<bNodeSocket *>(socket));
FunctionRNA *func = &rna_NodeTreeInterfaceSocket_from_socket_func;
ParameterList list;
RNA_parameter_list_create(&list, &ptr, func);
RNA_parameter_set_lookup(&list, "node", &node_ptr);
RNA_parameter_set_lookup(&list, "socket", &socket_ptr);
RNA_parameter_set_lookup(&list, "node", node);
RNA_parameter_set_lookup(&list, "socket", socket);
typeinfo->ext_interface.call(nullptr, &ptr, func, &list);
RNA_parameter_list_free(&list);

View File

@ -627,6 +627,13 @@ static void rna_def_paint(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "Brush", "Active Brush");
RNA_def_property_update(prop, 0, "rna_Paint_brush_update");
prop = RNA_def_property(srna, "brush_asset_reference", PROP_POINTER, PROP_NONE);
RNA_def_property_clear_flag(prop, PROP_EDITABLE);
RNA_def_property_ui_text(prop,
"Brush Asset Reference",
"A weak reference to the matching brush asset, used e.g. to restore "
"the last used brush on file load");
/* paint_tool_slots */
prop = RNA_def_property(srna, "tool_slots", PROP_COLLECTION, PROP_NONE);
RNA_def_property_collection_sdna(prop, nullptr, "tool_slots", "tool_slots_len");

View File

@ -2384,8 +2384,7 @@ static void radial_control_set_tex(RadialControl *rc)
ibuf->y,
1,
GPU_R8,
GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
GPU_TEXTURE_USAGE_SHADER_READ,
ibuf->float_buffer.data);
GPU_texture_filter_mode(rc->texture, true);