WIP: Basic support for registering asset shelf as a type in BPY #104991

Closed
Julian Eisel wants to merge 73 commits from JulianEisel:temp-asset-shelf-type-bpy into asset-shelf

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
106 changed files with 11798 additions and 385 deletions
Showing only changes of commit d6c4ca72dd - Show all commits

View File

@ -1,9 +1,9 @@
name: Bug Report
about: File a bug report
labels:
- "type::Report"
- "status::Needs Triage"
- "priority::Normal"
- "Type/Report"
- "Status/Needs Triage"
- "Priority/Normal"
body:
- type: markdown
attributes:

View File

@ -1,7 +1,7 @@
name: Design
about: Create a design task (for developers only)
labels:
- "type::Design"
- "Type/Design"
body:
- type: textarea
id: body

View File

@ -1,7 +1,7 @@
name: To Do
about: Create a to do task (for developers only)
labels:
- "type::To Do"
- "Type/To Do"
body:
- type: textarea
id: body

View File

@ -44,13 +44,21 @@ set(OPENVDB_EXTRA_ARGS
# -DLLVM_DIR=${LIBDIR}/llvm/lib/cmake/llvm
)
set(OPENVDB_PATCH ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/openvdb/src/openvdb < ${PATCH_DIR}/openvdb.diff)
if(APPLE)
set(OPENVDB_PATCH
${OPENVDB_PATCH} &&
${PATCH_CMD} -p 0 -d ${BUILD_DIR}/openvdb/src/openvdb < ${PATCH_DIR}/openvdb_metal.diff
)
endif()
ExternalProject_Add(openvdb
URL file://${PACKAGE_DIR}/${OPENVDB_FILE}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
URL_HASH ${OPENVDB_HASH_TYPE}=${OPENVDB_HASH}
CMAKE_GENERATOR ${PLATFORM_ALT_GENERATOR}
PREFIX ${BUILD_DIR}/openvdb
PATCH_COMMAND ${PATCH_CMD} -p 1 -d ${BUILD_DIR}/openvdb/src/openvdb < ${PATCH_DIR}/openvdb.diff
PATCH_COMMAND ${OPENVDB_PATCH}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${LIBDIR}/openvdb ${DEFAULT_CMAKE_FLAGS} ${OPENVDB_EXTRA_ARGS}
INSTALL_DIR ${LIBDIR}/openvdb
)

File diff suppressed because it is too large Load Diff

View File

@ -55,9 +55,8 @@ void device_metal_info(vector<DeviceInfo> &devices)
info.denoisers = DENOISER_NONE;
info.id = id;
if (MetalInfo::get_device_vendor(device) == METAL_GPU_AMD) {
info.has_light_tree = false;
}
info.has_nanovdb = MetalInfo::get_device_vendor(device) == METAL_GPU_APPLE;
info.has_light_tree = MetalInfo::get_device_vendor(device) != METAL_GPU_AMD;
devices.push_back(info);
device_index++;

View File

@ -67,9 +67,12 @@ class MetalDevice : public Device {
std::recursive_mutex metal_mem_map_mutex;
/* Bindless Textures */
bool is_texture(const TextureInfo &tex);
device_vector<TextureInfo> texture_info;
bool need_texture_info;
id<MTLArgumentEncoder> mtlTextureArgEncoder = nil;
id<MTLArgumentEncoder> mtlBufferArgEncoder = nil;
id<MTLBuffer> buffer_bindings_1d = nil;
id<MTLBuffer> texture_bindings_2d = nil;
id<MTLBuffer> texture_bindings_3d = nil;
std::vector<id<MTLTexture>> texture_slot_map;

View File

@ -91,11 +91,6 @@ MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
}
}
texture_bindings_2d = [mtlDevice newBufferWithLength:4096 options:default_storage_mode];
texture_bindings_3d = [mtlDevice newBufferWithLength:4096 options:default_storage_mode];
stats.mem_alloc(texture_bindings_2d.allocatedSize + texture_bindings_3d.allocatedSize);
switch (device_vendor) {
default:
break;
@ -156,6 +151,16 @@ MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
arg_desc_texture.dataType = MTLDataTypeTexture;
arg_desc_texture.access = MTLArgumentAccessReadOnly;
mtlTextureArgEncoder = [mtlDevice newArgumentEncoderWithArguments:@[ arg_desc_texture ]];
MTLArgumentDescriptor *arg_desc_buffer = [[MTLArgumentDescriptor alloc] init];
arg_desc_buffer.dataType = MTLDataTypePointer;
arg_desc_buffer.access = MTLArgumentAccessReadOnly;
mtlBufferArgEncoder = [mtlDevice newArgumentEncoderWithArguments:@[ arg_desc_buffer ]];
buffer_bindings_1d = [mtlDevice newBufferWithLength:8192 options:default_storage_mode];
texture_bindings_2d = [mtlDevice newBufferWithLength:8192 options:default_storage_mode];
texture_bindings_3d = [mtlDevice newBufferWithLength:8192 options:default_storage_mode];
stats.mem_alloc(buffer_bindings_1d.allocatedSize + texture_bindings_2d.allocatedSize +
texture_bindings_3d.allocatedSize);
/* command queue for non-tracing work on the GPU */
mtlGeneralCommandQueue = [mtlDevice newCommandQueue];
@ -180,6 +185,8 @@ MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
arg_desc_tex.dataType = MTLDataTypePointer;
arg_desc_tex.access = MTLArgumentAccessReadOnly;
arg_desc_tex.index = index++;
[ancillary_desc addObject:[arg_desc_tex copy]]; /* metal_buf_1d */
arg_desc_tex.index = index++;
[ancillary_desc addObject:[arg_desc_tex copy]]; /* metal_tex_2d */
arg_desc_tex.index = index++;
@ -225,11 +232,15 @@ MetalDevice::MetalDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
mtlAncillaryArgEncoder = [mtlDevice newArgumentEncoderWithArguments:ancillary_desc];
// preparing the blas arg encoder
MTLArgumentDescriptor *arg_desc_blas = [[MTLArgumentDescriptor alloc] init];
arg_desc_blas.dataType = MTLDataTypeInstanceAccelerationStructure;
arg_desc_blas.access = MTLArgumentAccessReadOnly;
mtlBlasArgEncoder = [mtlDevice newArgumentEncoderWithArguments:@[ arg_desc_blas ]];
[arg_desc_blas release];
if (@available(macos 11.0, *)) {
if (use_metalrt) {
MTLArgumentDescriptor *arg_desc_blas = [[MTLArgumentDescriptor alloc] init];
arg_desc_blas.dataType = MTLDataTypeInstanceAccelerationStructure;
arg_desc_blas.access = MTLArgumentAccessReadOnly;
mtlBlasArgEncoder = [mtlDevice newArgumentEncoderWithArguments:@[ arg_desc_blas ]];
[arg_desc_blas release];
}
}
for (int i = 0; i < ancillary_desc.count; i++) {
[ancillary_desc[i] release];
@ -249,22 +260,26 @@ MetalDevice::~MetalDevice()
* existing_devices_mutex). */
thread_scoped_lock lock(existing_devices_mutex);
for (auto &tex : texture_slot_map) {
if (tex) {
[tex release];
tex = nil;
int num_resources = texture_info.size();
for (int res = 0; res < num_resources; res++) {
if (is_texture(texture_info[res])) {
[texture_slot_map[res] release];
texture_slot_map[res] = nil;
}
}
flush_delayed_free_list();
if (texture_bindings_2d) {
stats.mem_free(texture_bindings_2d.allocatedSize + texture_bindings_3d.allocatedSize);
stats.mem_free(buffer_bindings_1d.allocatedSize + texture_bindings_2d.allocatedSize +
texture_bindings_3d.allocatedSize);
[buffer_bindings_1d release];
[texture_bindings_2d release];
[texture_bindings_3d release];
}
[mtlTextureArgEncoder release];
[mtlBufferKernelParamsEncoder release];
[mtlBufferArgEncoder release];
[mtlASArgEncoder release];
[mtlAncillaryArgEncoder release];
[mtlGeneralCommandQueue release];
@ -328,6 +343,9 @@ void MetalDevice::make_source(MetalPipelineType pso_type, const uint kernel_feat
break;
case METAL_GPU_APPLE:
global_defines += "#define __KERNEL_METAL_APPLE__\n";
# ifdef WITH_NANOVDB
global_defines += "#define WITH_NANOVDB\n";
# endif
break;
}
@ -542,6 +560,11 @@ void MetalDevice::compile_and_load(int device_id, MetalPipelineType pso_type)
}
}
bool MetalDevice::is_texture(const TextureInfo &tex)
{
return (tex.depth > 0 || tex.height > 0);
}
void MetalDevice::load_texture_info()
{
if (need_texture_info) {
@ -553,21 +576,20 @@ void MetalDevice::load_texture_info()
for (int tex = 0; tex < num_textures; tex++) {
uint64_t offset = tex * sizeof(void *);
id<MTLTexture> metal_texture = texture_slot_map[tex];
if (!metal_texture) {
[mtlTextureArgEncoder setArgumentBuffer:texture_bindings_2d offset:offset];
[mtlTextureArgEncoder setTexture:nil atIndex:0];
[mtlTextureArgEncoder setArgumentBuffer:texture_bindings_3d offset:offset];
[mtlTextureArgEncoder setTexture:nil atIndex:0];
}
else {
if (is_texture(texture_info[tex]) && texture_slot_map[tex]) {
id<MTLTexture> metal_texture = texture_slot_map[tex];
MTLTextureType type = metal_texture.textureType;
[mtlTextureArgEncoder setArgumentBuffer:texture_bindings_2d offset:offset];
[mtlTextureArgEncoder setTexture:type == MTLTextureType2D ? metal_texture : nil atIndex:0];
[mtlTextureArgEncoder setArgumentBuffer:texture_bindings_3d offset:offset];
[mtlTextureArgEncoder setTexture:type == MTLTextureType3D ? metal_texture : nil atIndex:0];
}
else {
[mtlTextureArgEncoder setArgumentBuffer:texture_bindings_2d offset:offset];
[mtlTextureArgEncoder setTexture:nil atIndex:0];
[mtlTextureArgEncoder setArgumentBuffer:texture_bindings_3d offset:offset];
[mtlTextureArgEncoder setTexture:nil atIndex:0];
}
}
if (default_storage_mode == MTLResourceStorageModeManaged) {
[texture_bindings_2d didModifyRange:NSMakeRange(0, num_textures * sizeof(void *))];
@ -740,7 +762,6 @@ void MetalDevice::generic_free(device_memory &mem)
mem.shared_pointer = 0;
/* Free device memory. */
delayed_free_list.push_back(mmem.mtlBuffer);
mmem.mtlBuffer = nil;
}
@ -975,7 +996,7 @@ void MetalDevice::global_free(device_memory &mem)
void MetalDevice::tex_alloc_as_buffer(device_texture &mem)
{
generic_alloc(mem);
MetalDevice::MetalMem *mmem = generic_alloc(mem);
generic_copy_to(mem);
/* Resize once */
@ -984,27 +1005,32 @@ void MetalDevice::tex_alloc_as_buffer(device_texture &mem)
/* Allocate some slots in advance, to reduce amount
* of re-allocations. */
texture_info.resize(round_up(slot + 1, 128));
texture_slot_map.resize(round_up(slot + 1, 128));
}
mem.info.data = (uint64_t)mem.device_pointer;
/* Set Mapping and tag that we need to (re-)upload to device */
texture_info[slot] = mem.info;
uint64_t offset = slot * sizeof(void *);
[mtlBufferArgEncoder setArgumentBuffer:buffer_bindings_1d offset:offset];
[mtlBufferArgEncoder setBuffer:mmem->mtlBuffer offset:0 atIndex:0];
texture_info[slot].data = *(uint64_t *)((uint64_t)buffer_bindings_1d.contents + offset);
texture_slot_map[slot] = nil;
need_texture_info = true;
}
void MetalDevice::tex_alloc(device_texture &mem)
{
/* Check that dimensions fit within maximum allowable size.
* If 1D texture is allocated, use 1D buffer.
* See: https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf */
if (mem.data_width > 16384 || mem.data_height > 16384) {
set_error(string_printf(
"Texture exceeds maximum allowed size of 16384 x 16384 (requested: %zu x %zu)",
mem.data_width,
mem.data_height));
return;
if (mem.data_height > 0) {
if (mem.data_width > 16384 || mem.data_height > 16384) {
set_error(string_printf(
"Texture exceeds maximum allowed size of 16384 x 16384 (requested: %zu x %zu)",
mem.data_width,
mem.data_height));
return;
}
}
MTLStorageMode storage_mode = MTLStorageModeManaged;
if (@available(macos 10.15, *)) {
if ([mtlDevice hasUnifiedMemory] &&
@ -1144,8 +1170,9 @@ void MetalDevice::tex_alloc(device_texture &mem)
bytesPerRow:src_pitch];
}
else {
assert(0);
/* 1D texture, using linear memory. */
tex_alloc_as_buffer(mem);
return;
}
mem.device_pointer = (device_ptr)mtlTexture;
@ -1169,17 +1196,22 @@ void MetalDevice::tex_alloc(device_texture &mem)
ssize_t min_buffer_length = sizeof(void *) * texture_info.size();
if (!texture_bindings_2d || (texture_bindings_2d.length < min_buffer_length)) {
if (texture_bindings_2d) {
delayed_free_list.push_back(buffer_bindings_1d);
delayed_free_list.push_back(texture_bindings_2d);
delayed_free_list.push_back(texture_bindings_3d);
stats.mem_free(texture_bindings_2d.allocatedSize + texture_bindings_3d.allocatedSize);
stats.mem_free(buffer_bindings_1d.allocatedSize + texture_bindings_2d.allocatedSize +
texture_bindings_3d.allocatedSize);
}
buffer_bindings_1d = [mtlDevice newBufferWithLength:min_buffer_length
options:default_storage_mode];
texture_bindings_2d = [mtlDevice newBufferWithLength:min_buffer_length
options:default_storage_mode];
texture_bindings_3d = [mtlDevice newBufferWithLength:min_buffer_length
options:default_storage_mode];
stats.mem_alloc(texture_bindings_2d.allocatedSize + texture_bindings_3d.allocatedSize);
stats.mem_alloc(buffer_bindings_1d.allocatedSize + texture_bindings_2d.allocatedSize +
texture_bindings_3d.allocatedSize);
}
}
@ -1206,12 +1238,18 @@ void MetalDevice::tex_alloc(device_texture &mem)
void MetalDevice::tex_free(device_texture &mem)
{
if (mem.data_depth == 0 && mem.data_height == 0) {
generic_free(mem);
return;
}
if (metal_mem_map.count(&mem)) {
std::lock_guard<std::recursive_mutex> lock(metal_mem_map_mutex);
MetalMem &mmem = *metal_mem_map.at(&mem);
assert(texture_slot_map[mem.slot] == mmem.mtlTexture);
texture_slot_map[mem.slot] = nil;
if (texture_slot_map[mem.slot] == mmem.mtlTexture)
texture_slot_map[mem.slot] = nil;
if (mmem.mtlTexture) {
/* Free bindless texture. */

View File

@ -477,17 +477,21 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
[metal_device_->mtlAncillaryArgEncoder setBuffer:metal_device_->texture_bindings_3d
offset:0
atIndex:1];
[metal_device_->mtlAncillaryArgEncoder setBuffer:metal_device_->buffer_bindings_1d
offset:0
atIndex:2];
if (@available(macos 12.0, *)) {
if (metal_device_->use_metalrt) {
if (metal_device_->bvhMetalRT) {
id<MTLAccelerationStructure> accel_struct = metal_device_->bvhMetalRT->accel_struct;
[metal_device_->mtlAncillaryArgEncoder setAccelerationStructure:accel_struct atIndex:2];
[metal_device_->mtlAncillaryArgEncoder setAccelerationStructure:accel_struct atIndex:3];
[metal_device_->mtlAncillaryArgEncoder setBuffer:metal_device_->blas_buffer
offset:0
atIndex:7];
atIndex:8];
[metal_device_->mtlAncillaryArgEncoder setBuffer:metal_device_->blas_lookup_buffer
offset:0
atIndex:8];
atIndex:9];
}
for (int table = 0; table < METALRT_TABLE_NUM; table++) {
@ -497,13 +501,13 @@ bool MetalDeviceQueue::enqueue(DeviceKernel kernel,
atIndex:1];
[metal_device_->mtlAncillaryArgEncoder
setIntersectionFunctionTable:metal_kernel_pso->intersection_func_table[table]
atIndex:3 + table];
atIndex:4 + table];
[mtlComputeCommandEncoder useResource:metal_kernel_pso->intersection_func_table[table]
usage:MTLResourceUsageRead];
}
else {
[metal_device_->mtlAncillaryArgEncoder setIntersectionFunctionTable:nil
atIndex:3 + table];
atIndex:4 + table];
}
}
}
@ -874,6 +878,7 @@ void MetalDeviceQueue::prepare_resources(DeviceKernel kernel)
/* ancillaries */
[mtlComputeEncoder_ useResource:metal_device_->texture_bindings_2d usage:MTLResourceUsageRead];
[mtlComputeEncoder_ useResource:metal_device_->texture_bindings_3d usage:MTLResourceUsageRead];
[mtlComputeEncoder_ useResource:metal_device_->buffer_bindings_1d usage:MTLResourceUsageRead];
}
id<MTLComputeCommandEncoder> MetalDeviceQueue::get_compute_encoder(DeviceKernel kernel)

View File

@ -5,13 +5,14 @@
CCL_NAMESPACE_BEGIN
#ifdef WITH_NANOVDB
# define NDEBUG /* Disable "assert" in device code */
# define NANOVDB_USE_INTRINSICS
# include "nanovdb/NanoVDB.h"
# include "nanovdb/util/SampleFromVoxels.h"
#if !defined __KERNEL_METAL__
# ifdef WITH_NANOVDB
# define NDEBUG /* Disable "assert" in device code */
# define NANOVDB_USE_INTRINSICS
# include "nanovdb/NanoVDB.h"
# include "nanovdb/util/SampleFromVoxels.h"
# endif
#endif
/* w0, w1, w2, and w3 are the four cubic B-spline basis functions. */
ccl_device float cubic_w0(float a)
{
@ -126,7 +127,7 @@ kernel_tex_image_interp_tricubic(ccl_global const TextureInfo &info, float x, fl
#ifdef WITH_NANOVDB
template<typename T, typename S>
ccl_device typename nanovdb::NanoGrid<T>::ValueType kernel_tex_image_interp_tricubic_nanovdb(
S &s, float x, float y, float z)
ccl_private S &s, float x, float y, float z)
{
float px = floorf(x);
float py = floorf(y);
@ -157,13 +158,19 @@ ccl_device typename nanovdb::NanoGrid<T>::ValueType kernel_tex_image_interp_tric
g1y * (g0x * s(Vec3f(x0, y1, z1)) + g1x * s(Vec3f(x1, y1, z1))));
}
# if defined(__KERNEL_METAL__)
template<typename T>
__attribute__((noinline)) typename nanovdb::NanoGrid<T>::ValueType kernel_tex_image_interp_nanovdb(
ccl_global const TextureInfo &info, float x, float y, float z, uint interpolation)
# else
template<typename T>
ccl_device_noinline typename nanovdb::NanoGrid<T>::ValueType kernel_tex_image_interp_nanovdb(
ccl_global const TextureInfo &info, float x, float y, float z, uint interpolation)
# endif
{
using namespace nanovdb;
NanoGrid<T> *const grid = (NanoGrid<T> *)info.data;
ccl_global NanoGrid<T> *const grid = (ccl_global NanoGrid<T> *)info.data;
typedef typename nanovdb::NanoGrid<T>::AccessorType AccessorType;
AccessorType acc = grid->getAccessor();

View File

@ -290,6 +290,10 @@ typedef metal::raytracing::intersector<triangle_data> metalrt_blas_intersector_t
/* texture bindings and sampler setup */
struct Buffer1DParamsMetal {
device float *buf;
};
struct Texture2DParamsMetal {
texture2d<float, access::sample> tex;
};
@ -306,6 +310,7 @@ struct MetalRTBlasWrapper {
struct MetalAncillaries {
device Texture2DParamsMetal *textures_2d;
device Texture3DParamsMetal *textures_3d;
device Buffer1DParamsMetal *buffers;
#ifdef __METALRT__
metalrt_as_type accel_struct;

View File

@ -3,6 +3,13 @@
// clang-format off
#ifdef WITH_NANOVDB
# define NDEBUG /* Disable "assert" in device code */
# define NANOVDB_USE_INTRINSICS
# include "nanovdb/NanoVDB.h"
# include "nanovdb/util/SampleFromVoxels.h"
#endif
/* Open the Metal kernel context class
* Necessary to access resource bindings */
class MetalKernelContext {

View File

@ -1195,24 +1195,89 @@ int GHOST_XrGetControllerModelData(GHOST_XrContextHandle xr_context,
#ifdef WITH_VULKAN_BACKEND
/**
* Return VULKAN handles for the given context.
* Get Vulkan handles for the given context.
*
* These handles are the same for a given context.
* Should should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
* \param context: GHOST context handle of a vulkan context to
* get the Vulkan handles from.
* \param r_instance: After calling this function the VkInstance
* referenced by this parameter will contain the VKInstance handle
* of the context associated with the `context` parameter.
* \param r_physical_device: After calling this function the VkPhysicalDevice
* referenced by this parameter will contain the VKPhysicalDevice handle
* of the context associated with the `context` parameter.
* \param r_device: After calling this function the VkDevice
* referenced by this parameter will contain the VKDevice handle
* of the context associated with the `context` parameter.
* \param r_graphic_queue_family: After calling this function the uint32_t
* referenced by this parameter will contain the graphic queue family id
* of the context associated with the `context` parameter.
* \param r_queue: After calling this function the VkQueue
* referenced by this parameter will contain the VKQueue handle
* of the context associated with the `context` parameter.
*/
void GHOST_GetVulkanHandles(GHOST_ContextHandle context,
void *r_instance,
void *r_physical_device,
void *r_device,
uint32_t *r_graphic_queue_family);
uint32_t *r_graphic_queue_family,
void *r_queue);
/**
* Return VULKAN back-buffer resources handles for the given window.
* Return Vulkan command buffer.
*
* Command buffers are different for each image in the swap chain.
* At the start of each frame the correct command buffer should be
* retrieved with this function.
*
* Should should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
* \param context: GHOST context handle to a vulkan context to get the
* command queue from.
* \param r_command_buffer: After calling this function the VkCommandBuffer
* referenced by this parameter will contain the VKCommandBuffer handle
* of the current back buffer (when swap chains are enabled) or
* it will contain a general VkCommandQueue.
*/
void GHOST_GetVulkanCommandBuffer(GHOST_ContextHandle context, void *r_command_buffer);
/**
* Gets the Vulkan backbuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the backbuffer will change.
*
* Should should only be called when using a Vulkan context with an active swap chain.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
* \param windowhandle: GHOST window handle to a window to get the resource from.
* \param r_image: After calling this function the VkImage
* referenced by this parameter will contain the VKImage handle
* of the current back buffer.
* \param r_framebuffer: After calling this function the VkFramebuffer
* referenced by this parameter will contain the VKFramebuffer handle
* of the current back buffer.
* \param r_render_pass: After calling this function the VkRenderPass
* referenced by this parameter will contain the VKRenderPass handle
* of the current back buffer.
* \param r_extent: After calling this function the VkExtent2D
* referenced by this parameter will contain the size of the
* frame buffer and image in pixels.
* \param r_fb_id: After calling this function the uint32_t
* referenced by this parameter will contain the id of the
* framebuffer of the current back buffer.
*/
void GHOST_GetVulkanBackbuffer(GHOST_WindowHandle windowhandle,
void *image,
void *framebuffer,
void *command_buffer,
void *render_pass,
void *extent,
uint32_t *fb_id);
void *r_image,
void *r_framebuffer,
void *r_render_pass,
void *r_extent,
uint32_t *r_fb_id);
#endif

View File

@ -40,19 +40,84 @@ class GHOST_IContext {
virtual unsigned int getDefaultFramebuffer() = 0;
virtual GHOST_TSuccess getVulkanHandles(void *, void *, void *, uint32_t *) = 0;
/**
* Get Vulkan handles for the given context.
*
* These handles are the same for a given context.
* Should should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
* \param r_instance: After calling this function the VkInstance
* referenced by this parameter will contain the VKInstance handle
* of the context associated with the `context` parameter.
* \param r_physical_device: After calling this function the VkPhysicalDevice
* referenced by this parameter will contain the VKPhysicalDevice handle
* of the context associated with the `context` parameter.
* \param r_device: After calling this function the VkDevice
* referenced by this parameter will contain the VKDevice handle
* of the context associated with the `context` parameter.
* \param r_graphic_queue_family: After calling this function the uint32_t
* referenced by this parameter will contain the graphic queue family id
* of the context associated with the `context` parameter.
* \param r_queue: After calling this function the VkQueue
* referenced by this parameter will contain the VKQueue handle
* of the context associated with the `context` parameter.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanHandles(void *r_instance,
void *r_physical_device,
void *r_device,
uint32_t *r_graphic_queue_family,
void *r_queue) = 0;
/**
* Gets the Vulkan framebuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap events as the framebuffer will change.
* \return A boolean success indicator.
* Return Vulkan command buffer.
*
* Command buffers are different for each image in the swap chain.
* At the start of each frame the correct command buffer should be
* retrieved with this function.
*
* \param r_command_buffer: After calling this function the VkCommandBuffer
* referenced by this parameter will contain the VKCommandBuffer handle
* of the current back buffer (when swap chains are enabled) or
* it will contain a general VkCommandQueue.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(void *image,
void *framebuffer,
void *command_buffer,
void *render_pass,
void *extent,
uint32_t *fb_id) = 0;
virtual GHOST_TSuccess getVulkanCommandBuffer(void *r_command_buffer) = 0;
/**
* Gets the Vulkan backbuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the backbuffer will change.
*
* \param r_image: After calling this function the VkImage
* referenced by this parameter will contain the VKImage handle
* of the current back buffer.
* \param r_framebuffer: After calling this function the VkFramebuffer
* referenced by this parameter will contain the VKFramebuffer handle
* of the current back buffer.
* \param r_render_pass: After calling this function the VkRenderPass
* referenced by this parameter will contain the VKRenderPass handle
* of the current back buffer.
* \param r_extent: After calling this function the VkExtent2D
* referenced by this parameter will contain the size of the
* frame buffer and image in pixels.
* \param r_fb_id: After calling this function the uint32_t
* referenced by this parameter will contain the id of the
* framebuffer of the current back buffer.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(void *r_image,
void *r_framebuffer,
void *r_render_pass,
void *r_extent,
uint32_t *r_fb_id) = 0;
virtual GHOST_TSuccess swapBuffers() = 0;

View File

@ -217,7 +217,6 @@ class GHOST_IWindow {
*/
virtual GHOST_TSuccess getVulkanBackbuffer(void *image,
void *framebuffer,
void *command_buffer,
void *render_pass,
void *extent,
uint32_t *fb_id) = 0;

View File

@ -1203,22 +1203,29 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle contexthandle,
void *r_instance,
void *r_physical_device,
void *r_device,
uint32_t *r_graphic_queue_family)
uint32_t *r_graphic_queue_family,
void *r_queue)
{
GHOST_IContext *context = (GHOST_IContext *)contexthandle;
context->getVulkanHandles(r_instance, r_physical_device, r_device, r_graphic_queue_family);
context->getVulkanHandles(
r_instance, r_physical_device, r_device, r_graphic_queue_family, r_queue);
}
void GHOST_GetVulkanCommandBuffer(GHOST_ContextHandle contexthandle, void *r_command_buffer)
{
GHOST_IContext *context = (GHOST_IContext *)contexthandle;
context->getVulkanCommandBuffer(r_command_buffer);
}
void GHOST_GetVulkanBackbuffer(GHOST_WindowHandle windowhandle,
void *image,
void *framebuffer,
void *command_buffer,
void *render_pass,
void *extent,
uint32_t *fb_id)
{
GHOST_IWindow *window = (GHOST_IWindow *)windowhandle;
window->getVulkanBackbuffer(image, framebuffer, command_buffer, render_pass, extent, fb_id);
window->getVulkanBackbuffer(image, framebuffer, render_pass, extent, fb_id);
}
#endif /* WITH_VULKAN */

View File

@ -136,27 +136,88 @@ class GHOST_Context : public GHOST_IContext {
}
/**
* Gets the Vulkan context related resource handles.
* \return A boolean success indicator.
* Get Vulkan handles for the given context.
*
* These handles are the same for a given context.
* Should should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
* \param r_instance: After calling this function the VkInstance
* referenced by this parameter will contain the VKInstance handle
* of the context associated with the `context` parameter.
* \param r_physical_device: After calling this function the VkPhysicalDevice
* referenced by this parameter will contain the VKPhysicalDevice handle
* of the context associated with the `context` parameter.
* \param r_device: After calling this function the VkDevice
* referenced by this parameter will contain the VKDevice handle
* of the context associated with the `context` parameter.
* \param r_graphic_queue_family: After calling this function the uint32_t
* referenced by this parameter will contain the graphic queue family id
* of the context associated with the `context` parameter.
* \param r_queue: After calling this function the VkQueue
* referenced by this parameter will contain the VKQueue handle
* of the context associated with the `context` parameter.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanHandles(void * /*r_instance*/,
void * /*r_physical_device*/,
void * /*r_device*/,
uint32_t * /*r_graphic_queue_family*/) override
uint32_t * /*r_graphic_queue_family*/,
void * /*r_queue*/) override
{
return GHOST_kFailure;
};
/**
* Gets the Vulkan framebuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap events as the framebuffer will change.
* \return A boolean success indicator.
* Return Vulkan command buffer.
*
* Command buffers are different for each image in the swap chain.
* At the start of each frame the correct command buffer should be
* retrieved with this function.
*
* \param r_command_buffer: After calling this function the VkCommandBuffer
* referenced by this parameter will contain the VKCommandBuffer handle
* of the current back buffer (when swap chains are enabled) or
* it will contain a general VkCommandQueue.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(void * /*image*/,
void * /*framebuffer*/,
void * /*command_buffer*/,
void * /*render_pass*/,
void * /*extent*/,
virtual GHOST_TSuccess getVulkanCommandBuffer(void * /*r_command_buffer*/) override
{
return GHOST_kFailure;
};
/**
* Gets the Vulkan backbuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the backbuffer will change.
*
* \param r_image: After calling this function the VkImage
* referenced by this parameter will contain the VKImage handle
* of the current back buffer.
* \param r_framebuffer: After calling this function the VkFramebuffer
* referenced by this parameter will contain the VKFramebuffer handle
* of the current back buffer.
* \param r_render_pass: After calling this function the VkRenderPass
* referenced by this parameter will contain the VKRenderPass handle
* of the current back buffer.
* \param r_extent: After calling this function the VkExtent2D
* referenced by this parameter will contain the size of the
* frame buffer and image in pixels.
* \param r_fb_id: After calling this function the uint32_t
* referenced by this parameter will contain the id of the
* framebuffer of the current back buffer.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(void * /*r_image*/,
void * /*r_framebuffer*/,
void * /*r_render_pass*/,
void * /*r_extent*/,
uint32_t * /*fb_id*/) override
{
return GHOST_kFailure;

View File

@ -288,19 +288,14 @@ GHOST_TSuccess GHOST_ContextVK::swapBuffers()
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::getVulkanBackbuffer(void *image,
void *framebuffer,
void *command_buffer,
void *render_pass,
void *extent,
uint32_t *fb_id)
GHOST_TSuccess GHOST_ContextVK::getVulkanBackbuffer(
void *image, void *framebuffer, void *render_pass, void *extent, uint32_t *fb_id)
{
if (m_swapchain == VK_NULL_HANDLE) {
return GHOST_kFailure;
}
*((VkImage *)image) = m_swapchain_images[m_currentImage];
*((VkFramebuffer *)framebuffer) = m_swapchain_framebuffers[m_currentImage];
*((VkCommandBuffer *)command_buffer) = m_command_buffers[m_currentImage];
*((VkRenderPass *)render_pass) = m_render_pass;
*((VkExtent2D *)extent) = m_render_extent;
*fb_id = m_swapchain_id * 10 + m_currentFrame;
@ -311,12 +306,30 @@ GHOST_TSuccess GHOST_ContextVK::getVulkanBackbuffer(void *image,
GHOST_TSuccess GHOST_ContextVK::getVulkanHandles(void *r_instance,
void *r_physical_device,
void *r_device,
uint32_t *r_graphic_queue_family)
uint32_t *r_graphic_queue_family,
void *r_queue)
{
*((VkInstance *)r_instance) = m_instance;
*((VkPhysicalDevice *)r_physical_device) = m_physical_device;
*((VkDevice *)r_device) = m_device;
*r_graphic_queue_family = m_queue_family_graphic;
*((VkQueue *)r_queue) = m_graphic_queue;
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::getVulkanCommandBuffer(void *r_command_buffer)
{
if (m_command_buffers.empty()) {
return GHOST_kFailure;
}
if (m_swapchain == VK_NULL_HANDLE) {
*((VkCommandBuffer *)r_command_buffer) = m_command_buffers[0];
}
else {
*((VkCommandBuffer *)r_command_buffer) = m_command_buffers[m_currentImage];
}
return GHOST_kSuccess;
}
@ -520,6 +533,9 @@ static GHOST_TSuccess getGraphicQueueFamily(VkPhysicalDevice device, uint32_t *r
*r_queue_index = 0;
for (const auto &queue_family : queue_families) {
/* Every vulkan implementation by spec must have one queue family that support both graphics
* and compute pipelines. We select this one; compute only queue family hints at async compute
* implementations.*/
if ((queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) &&
(queue_family.queueFlags & VK_QUEUE_COMPUTE_BIT)) {
return GHOST_kSuccess;
@ -619,16 +635,36 @@ static GHOST_TSuccess selectPresentMode(VkPhysicalDevice device,
return GHOST_kFailure;
}
GHOST_TSuccess GHOST_ContextVK::createCommandBuffers()
GHOST_TSuccess GHOST_ContextVK::createCommandPools()
{
m_command_buffers.resize(m_swapchain_image_views.size());
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
poolInfo.queueFamilyIndex = m_queue_family_graphic;
VK_CHECK(vkCreateCommandPool(m_device, &poolInfo, NULL, &m_command_pool));
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffer()
{
assert(m_command_pool != VK_NULL_HANDLE);
assert(m_command_buffers.size() == 0);
m_command_buffers.resize(1);
VkCommandBufferAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.commandPool = m_command_pool;
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = static_cast<uint32_t>(m_command_buffers.size());
VK_CHECK(vkAllocateCommandBuffers(m_device, &alloc_info, m_command_buffers.data()));
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffers()
{
assert(m_command_pool != VK_NULL_HANDLE);
m_command_buffers.resize(m_swapchain_image_views.size());
VkCommandBufferAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
@ -637,7 +673,6 @@ GHOST_TSuccess GHOST_ContextVK::createCommandBuffers()
alloc_info.commandBufferCount = static_cast<uint32_t>(m_command_buffers.size());
VK_CHECK(vkAllocateCommandBuffers(m_device, &alloc_info, m_command_buffers.data()));
return GHOST_kSuccess;
}
@ -776,7 +811,7 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
VK_CHECK(vkCreateFence(m_device, &fence_info, NULL, &m_in_flight_fences[i]));
}
createCommandBuffers();
createGraphicsCommandBuffers();
return GHOST_kSuccess;
}
@ -841,6 +876,13 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
extensions_device.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
}
extensions_device.push_back("VK_KHR_dedicated_allocation");
extensions_device.push_back("VK_KHR_get_memory_requirements2");
/* Enable MoltenVK required instance extensions.*/
#ifdef VK_MVK_MOLTENVK_EXTENSION_NAME
requireExtension(
extensions_available, extensions_enabled, "VK_KHR_get_physical_device_properties2");
#endif
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
@ -903,6 +945,15 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
return GHOST_kFailure;
}
#ifdef VK_MVK_MOLTENVK_EXTENSION_NAME
/* According to the Vulkan specs, when `VK_KHR_portability_subset` is available it should be
* enabled. See
* https://vulkan.lunarg.com/doc/view/1.2.198.1/mac/1.2-extensions/vkspec.html#VUID-VkDeviceCreateInfo-pProperties-04451*/
if (device_extensions_support(m_physical_device, {VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME})) {
extensions_device.push_back(VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME);
}
#endif
vector<VkDeviceQueueCreateInfo> queue_create_infos;
{
@ -962,11 +1013,14 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
vkGetDeviceQueue(m_device, m_queue_family_graphic, 0, &m_graphic_queue);
createCommandPools();
if (use_window_surface) {
vkGetDeviceQueue(m_device, m_queue_family_present, 0, &m_present_queue);
createSwapchain();
}
else {
createGraphicsCommandBuffer();
}
return GHOST_kSuccess;
}

View File

@ -113,18 +113,17 @@ class GHOST_ContextVK : public GHOST_Context {
GHOST_TSuccess getVulkanHandles(void *r_instance,
void *r_physical_device,
void *r_device,
uint32_t *r_graphic_queue_family);
uint32_t *r_graphic_queue_family,
void *r_queue);
GHOST_TSuccess getVulkanCommandBuffer(void *r_command_buffer);
/**
* Gets the Vulkan framebuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap events as the framebuffer will change.
* \return A boolean success indicator.
*/
GHOST_TSuccess getVulkanBackbuffer(void *image,
void *framebuffer,
void *command_buffer,
void *render_pass,
void *extent,
uint32_t *fb_id);
GHOST_TSuccess getVulkanBackbuffer(
void *image, void *framebuffer, void *render_pass, void *extent, uint32_t *fb_id);
/**
* Sets the swap interval for swapBuffers.
@ -200,6 +199,8 @@ class GHOST_ContextVK : public GHOST_Context {
GHOST_TSuccess pickPhysicalDevice(std::vector<const char *> required_exts);
GHOST_TSuccess createSwapchain();
GHOST_TSuccess destroySwapchain();
GHOST_TSuccess createCommandBuffers();
GHOST_TSuccess createCommandPools();
GHOST_TSuccess createGraphicsCommandBuffers();
GHOST_TSuccess createGraphicsCommandBuffer();
GHOST_TSuccess recordCommandBuffers();
};

View File

@ -109,13 +109,12 @@ uint GHOST_Window::getDefaultFramebuffer()
GHOST_TSuccess GHOST_Window::getVulkanBackbuffer(void *image,
void *framebuffer,
void *command_buffer,
void *render_pass,
void *extent,
uint32_t *fb_id)
{
return m_context->getVulkanBackbuffer(
image, framebuffer, command_buffer, render_pass, extent, fb_id);
image, framebuffer, render_pass, extent, fb_id);
}
GHOST_TSuccess GHOST_Window::activateDrawingContext()

View File

@ -274,12 +274,8 @@ class GHOST_Window : public GHOST_IWindow {
* Needs to be called after each swap events as the framebuffer will change.
* \return A boolean success indicator.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(void *image,
void *framebuffer,
void *command_buffer,
void *render_pass,
void *extent,
uint32_t *fb_id) override;
virtual GHOST_TSuccess getVulkanBackbuffer(
void *image, void *framebuffer, void *render_pass, void *extent, uint32_t *fb_id) override;
/**
* Returns the window user data.

View File

@ -214,6 +214,13 @@ std::string AssetLibraryService::root_path_from_library_ref(
if (ELEM(library_reference.type, ASSET_LIBRARY_ALL, ASSET_LIBRARY_LOCAL)) {
return "";
}
if (ELEM(library_reference.type, ASSET_LIBRARY_ESSENTIALS)) {
return essentials_directory_path();
}
if (ELEM(library_reference.type, ASSET_LIBRARY_ESSENTIALS)) {
return essentials_directory_path();
}
bUserAssetLibrary *custom_library = find_custom_asset_library_from_library_ref(
library_reference);

View File

@ -98,6 +98,7 @@ class NodeMultiFunctionBuilder;
class GeoNodeExecParams;
class NodeDeclaration;
class NodeDeclarationBuilder;
class GatherAddNodeSearchParams;
class GatherLinkSearchOpParams;
} // namespace nodes
namespace realtime_compositor {
@ -122,6 +123,10 @@ using SocketGetGeometryNodesCPPValueFunction = void (*)(const struct bNodeSocket
using NodeGatherSocketLinkOperationsFunction =
void (*)(blender::nodes::GatherLinkSearchOpParams &params);
/* Adds node add menu operations that are specific to this node type. */
using NodeGatherAddOperationsFunction =
void (*)(blender::nodes::GatherAddNodeSearchParams &params);
using NodeGetCompositorOperationFunction = blender::realtime_compositor::NodeOperation
*(*)(blender::realtime_compositor::Context &context, blender::nodes::DNode node);
using NodeGetCompositorShaderNodeFunction =
@ -135,6 +140,7 @@ typedef void *NodeGeometryExecFunction;
typedef void *NodeDeclareFunction;
typedef void *NodeDeclareDynamicFunction;
typedef void *NodeGatherSocketLinkOperationsFunction;
typedef void *NodeGatherAddOperationsFunction;
typedef void *SocketGetCPPTypeFunction;
typedef void *SocketGetGeometryNodesCPPTypeFunction;
typedef void *SocketGetGeometryNodesCPPValueFunction;
@ -353,6 +359,13 @@ typedef struct bNodeType {
*/
NodeGatherSocketLinkOperationsFunction gather_link_search_ops;
/**
* Add to the list of search items gathered by the add-node search. The default behavior of
* adding a single item with the node name is usually enough, but node types can have any number
* of custom search items.
*/
NodeGatherAddOperationsFunction gather_add_node_search_ops;
/** True when the node cannot be muted. */
bool no_muting;

View File

@ -263,6 +263,8 @@ static void brush_blend_write(BlendWriter *writer, ID *id, const void *id_addres
if (brush->gradient) {
BLO_write_struct(writer, ColorBand, brush->gradient);
}
BKE_previewimg_blend_write(writer, brush->preview);
}
static void brush_blend_read_data(BlendDataReader *reader, ID *id)
@ -348,7 +350,9 @@ static void brush_blend_read_data(BlendDataReader *reader, ID *id)
}
}
brush->preview = nullptr;
BLO_read_data_address(reader, &brush->preview);
BKE_previewimg_blend_read(reader, brush->preview);
brush->icon_imbuf = nullptr;
}

View File

@ -504,8 +504,9 @@ void _va_mul_m3_series_4(float r[3][3],
const float m2[3][3],
const float m3[3][3])
{
mul_m3_m3m3(r, m1, m2);
mul_m3_m3m3(r, r, m3);
float s[3][3];
mul_m3_m3m3(s, m1, m2);
mul_m3_m3m3(r, s, m3);
}
void _va_mul_m3_series_5(float r[3][3],
const float m1[3][3],
@ -513,9 +514,11 @@ void _va_mul_m3_series_5(float r[3][3],
const float m3[3][3],
const float m4[3][3])
{
mul_m3_m3m3(r, m1, m2);
mul_m3_m3m3(r, r, m3);
mul_m3_m3m3(r, r, m4);
float s[3][3];
float t[3][3];
mul_m3_m3m3(s, m1, m2);
mul_m3_m3m3(t, s, m3);
mul_m3_m3m3(r, t, m4);
}
void _va_mul_m3_series_6(float r[3][3],
const float m1[3][3],
@ -524,10 +527,12 @@ void _va_mul_m3_series_6(float r[3][3],
const float m4[3][3],
const float m5[3][3])
{
mul_m3_m3m3(r, m1, m2);
mul_m3_m3m3(r, r, m3);
mul_m3_m3m3(r, r, m4);
mul_m3_m3m3(r, r, m5);
float s[3][3];
float t[3][3];
mul_m3_m3m3(s, m1, m2);
mul_m3_m3m3(t, s, m3);
mul_m3_m3m3(s, t, m4);
mul_m3_m3m3(r, s, m5);
}
void _va_mul_m3_series_7(float r[3][3],
const float m1[3][3],
@ -537,11 +542,13 @@ void _va_mul_m3_series_7(float r[3][3],
const float m5[3][3],
const float m6[3][3])
{
mul_m3_m3m3(r, m1, m2);
mul_m3_m3m3(r, r, m3);
mul_m3_m3m3(r, r, m4);
mul_m3_m3m3(r, r, m5);
mul_m3_m3m3(r, r, m6);
float s[3][3];
float t[3][3];
mul_m3_m3m3(s, m1, m2);
mul_m3_m3m3(t, s, m3);
mul_m3_m3m3(s, t, m4);
mul_m3_m3m3(t, s, m5);
mul_m3_m3m3(r, t, m6);
}
void _va_mul_m3_series_8(float r[3][3],
const float m1[3][3],
@ -552,12 +559,14 @@ void _va_mul_m3_series_8(float r[3][3],
const float m6[3][3],
const float m7[3][3])
{
mul_m3_m3m3(r, m1, m2);
mul_m3_m3m3(r, r, m3);
mul_m3_m3m3(r, r, m4);
mul_m3_m3m3(r, r, m5);
mul_m3_m3m3(r, r, m6);
mul_m3_m3m3(r, r, m7);
float s[3][3];
float t[3][3];
mul_m3_m3m3(s, m1, m2);
mul_m3_m3m3(t, s, m3);
mul_m3_m3m3(s, t, m4);
mul_m3_m3m3(t, s, m5);
mul_m3_m3m3(s, t, m6);
mul_m3_m3m3(r, s, m7);
}
void _va_mul_m3_series_9(float r[3][3],
const float m1[3][3],
@ -569,13 +578,15 @@ void _va_mul_m3_series_9(float r[3][3],
const float m7[3][3],
const float m8[3][3])
{
mul_m3_m3m3(r, m1, m2);
mul_m3_m3m3(r, r, m3);
mul_m3_m3m3(r, r, m4);
mul_m3_m3m3(r, r, m5);
mul_m3_m3m3(r, r, m6);
mul_m3_m3m3(r, r, m7);
mul_m3_m3m3(r, r, m8);
float s[3][3];
float t[3][3];
mul_m3_m3m3(s, m1, m2);
mul_m3_m3m3(t, s, m3);
mul_m3_m3m3(s, t, m4);
mul_m3_m3m3(t, s, m5);
mul_m3_m3m3(s, t, m6);
mul_m3_m3m3(t, s, m7);
mul_m3_m3m3(r, t, m8);
}
/** \} */
@ -593,8 +604,9 @@ void _va_mul_m4_series_4(float r[4][4],
const float m2[4][4],
const float m3[4][4])
{
mul_m4_m4m4(r, m1, m2);
mul_m4_m4m4(r, r, m3);
float s[4][4];
mul_m4_m4m4(s, m1, m2);
mul_m4_m4m4(r, s, m3);
}
void _va_mul_m4_series_5(float r[4][4],
const float m1[4][4],
@ -602,9 +614,11 @@ void _va_mul_m4_series_5(float r[4][4],
const float m3[4][4],
const float m4[4][4])
{
mul_m4_m4m4(r, m1, m2);
mul_m4_m4m4(r, r, m3);
mul_m4_m4m4(r, r, m4);
float s[4][4];
float t[4][4];
mul_m4_m4m4(s, m1, m2);
mul_m4_m4m4(t, s, m3);
mul_m4_m4m4(r, t, m4);
}
void _va_mul_m4_series_6(float r[4][4],
const float m1[4][4],
@ -613,10 +627,12 @@ void _va_mul_m4_series_6(float r[4][4],
const float m4[4][4],
const float m5[4][4])
{
mul_m4_m4m4(r, m1, m2);
mul_m4_m4m4(r, r, m3);
mul_m4_m4m4(r, r, m4);
mul_m4_m4m4(r, r, m5);
float s[4][4];
float t[4][4];
mul_m4_m4m4(s, m1, m2);
mul_m4_m4m4(t, s, m3);
mul_m4_m4m4(s, t, m4);
mul_m4_m4m4(r, s, m5);
}
void _va_mul_m4_series_7(float r[4][4],
const float m1[4][4],
@ -626,11 +642,13 @@ void _va_mul_m4_series_7(float r[4][4],
const float m5[4][4],
const float m6[4][4])
{
mul_m4_m4m4(r, m1, m2);
mul_m4_m4m4(r, r, m3);
mul_m4_m4m4(r, r, m4);
mul_m4_m4m4(r, r, m5);
mul_m4_m4m4(r, r, m6);
float s[4][4];
float t[4][4];
mul_m4_m4m4(s, m1, m2);
mul_m4_m4m4(t, s, m3);
mul_m4_m4m4(s, t, m4);
mul_m4_m4m4(t, s, m5);
mul_m4_m4m4(r, t, m6);
}
void _va_mul_m4_series_8(float r[4][4],
const float m1[4][4],
@ -641,12 +659,14 @@ void _va_mul_m4_series_8(float r[4][4],
const float m6[4][4],
const float m7[4][4])
{
mul_m4_m4m4(r, m1, m2);
mul_m4_m4m4(r, r, m3);
mul_m4_m4m4(r, r, m4);
mul_m4_m4m4(r, r, m5);
mul_m4_m4m4(r, r, m6);
mul_m4_m4m4(r, r, m7);
float s[4][4];
float t[4][4];
mul_m4_m4m4(s, m1, m2);
mul_m4_m4m4(t, s, m3);
mul_m4_m4m4(s, t, m4);
mul_m4_m4m4(t, s, m5);
mul_m4_m4m4(s, t, m6);
mul_m4_m4m4(r, s, m7);
}
void _va_mul_m4_series_9(float r[4][4],
const float m1[4][4],
@ -658,13 +678,15 @@ void _va_mul_m4_series_9(float r[4][4],
const float m7[4][4],
const float m8[4][4])
{
mul_m4_m4m4(r, m1, m2);
mul_m4_m4m4(r, r, m3);
mul_m4_m4m4(r, r, m4);
mul_m4_m4m4(r, r, m5);
mul_m4_m4m4(r, r, m6);
mul_m4_m4m4(r, r, m7);
mul_m4_m4m4(r, r, m8);
float s[4][4];
float t[4][4];
mul_m4_m4m4(s, m1, m2);
mul_m4_m4m4(t, s, m3);
mul_m4_m4m4(s, t, m4);
mul_m4_m4m4(t, s, m5);
mul_m4_m4m4(s, t, m6);
mul_m4_m4m4(t, s, m7);
mul_m4_m4m4(r, t, m8);
}
/** \} */

View File

@ -15,7 +15,6 @@
#include "BLI_path_util.h"
#include "BLI_string.h"
/* Extended file attribute used by OneDrive to mark placeholder files. */
static const char *ONEDRIVE_RECALLONOPEN_ATTRIBUTE = "com.microsoft.OneDrive.RecallOnOpen";
@ -188,7 +187,8 @@ const char *BLI_expand_tilde(const char *path_with_tilde)
return path_expanded;
}
char *BLI_current_working_dir(char *dir, const size_t maxncpy) {
char *BLI_current_working_dir(char *dir, const size_t maxncpy)
{
/* Can't just copy to the *dir pointer, as [path getCString gets grumpy.*/
static char path_expanded[PATH_MAX];
@autoreleasepool {
@ -200,10 +200,11 @@ char *BLI_current_working_dir(char *dir, const size_t maxncpy) {
}
}
bool BLI_change_working_dir(const char* dir) {
bool BLI_change_working_dir(const char *dir)
{
@autoreleasepool {
NSString* path = [[NSString alloc] initWithUTF8String: dir];
if ([[NSFileManager defaultManager] changeCurrentDirectoryPath: path] == YES) {
NSString *path = [[NSString alloc] initWithUTF8String:dir];
if ([[NSFileManager defaultManager] changeCurrentDirectoryPath:path] == YES) {
return false;
}
else {

View File

@ -100,6 +100,40 @@ TEST(math_matrix, interp_m3_m3m3_singularity)
EXPECT_M3_NEAR(result, expect, 1e-5);
}
TEST(math_matrix, mul_m3_series)
{
float matrix[3][3] = {
{2.0f, 0.0f, 0.0f},
{0.0f, 3.0f, 0.0f},
{0.0f, 0.0f, 5.0f},
};
mul_m3_series(matrix, matrix, matrix, matrix);
float expect[3][3] = {
{8.0f, 0.0f, 0.0f},
{0.0f, 27.0f, 0.0f},
{0.0f, 0.0f, 125.0f},
};
EXPECT_M3_NEAR(matrix, expect, 1e-5);
}
TEST(math_matrix, mul_m4_series)
{
float matrix[4][4] = {
{2.0f, 0.0f, 0.0f, 0.0f},
{0.0f, 3.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 5.0f, 0.0f},
{0.0f, 0.0f, 0.0f, 7.0f},
};
mul_m4_series(matrix, matrix, matrix, matrix);
float expect[4][4] = {
{8.0f, 0.0f, 0.0f, 0.0f},
{0.0f, 27.0f, 0.0f, 0.0f},
{0.0f, 0.0f, 125.0f, 0.0f},
{0.0f, 0.0f, 0.0f, 343.0f},
};
EXPECT_M4_NEAR(matrix, expect, 1e-5);
}
namespace blender::tests {
using namespace blender::math;

View File

@ -126,7 +126,7 @@ const char *BLT_translate_do_new_dataname(const char *msgctxt, const char *msgid
#define BLT_I18NCONTEXT_EDITOR_VIEW3D "View3D"
#define BLT_I18NCONTEXT_EDITOR_FILEBROWSER "File browser"
/* Generic contexts. */
/* Generic contexts. */
#define BLT_I18NCONTEXT_VIRTUAL_REALITY "Virtual reality"
#define BLT_I18NCONTEXT_CONSTRAINT "Constraint"
@ -194,7 +194,7 @@ typedef struct {
BLT_I18NCONTEXTS_ITEM(BLT_I18NCONTEXT_ID_WINDOWMANAGER, "id_windowmanager"), \
BLT_I18NCONTEXTS_ITEM(BLT_I18NCONTEXT_EDITOR_VIEW3D, "editor_view3d"), \
BLT_I18NCONTEXTS_ITEM(BLT_I18NCONTEXT_EDITOR_FILEBROWSER, "editor_filebrowser"), \
BLT_I18NCONTEXTS_ITEM(BLT_I18NCONTEXT_VIRTUAL_REALITY, "virtual_reality"), \
BLT_I18NCONTEXTS_ITEM(BLT_I18NCONTEXT_VIRTUAL_REALITY, "virtual_reality"), \
BLT_I18NCONTEXTS_ITEM(BLT_I18NCONTEXT_CONSTRAINT, "constraint"), \
{ \
NULL, NULL, NULL \

View File

@ -1191,7 +1191,7 @@ void BM_mesh_bm_to_me(Main *bmain, BMesh *bm, Mesh *me, const struct BMeshToMesh
need_edgesel |= BM_ELEM_CD_GET_BOOL(l, edgesel_offset);
}
}
if (pin_layer_index) {
if (pin_layer_index >= 0) {
BM_ITER_ELEM (l, &liter, f, BM_LOOPS_OF_FACE) {
need_pin |= BM_ELEM_CD_GET_BOOL(l, pin_offset);
}

View File

@ -3,7 +3,36 @@
#include "BKE_curves.hh"
#include "BKE_geometry_fields.hh"
#include "BLI_task.hh"
#include "DNA_object_types.h"
#include "ED_curves.h"
#include "ED_transverts.h"
namespace blender::ed::curves {
void transverts_from_curves_positions_create(bke::CurvesGeometry &curves, TransVertStore *tvs)
{
Vector<int64_t> selected_indices;
IndexMask selection = retrieve_selected_points(curves, selected_indices);
MutableSpan<float3> positions = curves.positions_for_write();
tvs->transverts = static_cast<TransVert *>(
MEM_calloc_arrayN(selection.size(), sizeof(TransVert), __func__));
tvs->transverts_tot = selection.size();
threading::parallel_for(selection.index_range(), 1024, [&](const IndexRange selection_range) {
for (const int point_i : selection_range) {
TransVert &tv = tvs->transverts[point_i];
tv.loc = positions[selection[point_i]];
tv.flag = SELECT;
copy_v3_v3(tv.oldloc, tv.loc);
}
});
}
} // namespace blender::ed::curves
float (*ED_curves_point_normals_array_create(const Curves *curves_id))[3]
{
@ -21,3 +50,10 @@ float (*ED_curves_point_normals_array_create(const Curves *curves_id))[3]
return reinterpret_cast<float(*)[3]>(data);
}
void ED_curves_transverts_create(Curves *curves_id, TransVertStore *tvs)
{
using namespace blender;
bke::CurvesGeometry &curves = curves_id->geometry.wrap();
ed::curves::transverts_from_curves_positions_create(curves, tvs);
}

View File

@ -123,7 +123,7 @@ void fill_selection_true(GMutableSpan selection)
}
}
static bool contains(const VArray<bool> &varray, const bool value)
static bool contains(const VArray<bool> &varray, const IndexRange range_to_check, const bool value)
{
const CommonVArrayInfo info = varray.common_info();
if (info.type == CommonVArrayInfo::Type::Single) {
@ -132,7 +132,7 @@ static bool contains(const VArray<bool> &varray, const bool value)
if (info.type == CommonVArrayInfo::Type::Span) {
const Span<bool> span(static_cast<const bool *>(info.data), varray.size());
return threading::parallel_reduce(
span.index_range(),
range_to_check,
4096,
false,
[&](const IndexRange range, const bool init) {
@ -141,7 +141,7 @@ static bool contains(const VArray<bool> &varray, const bool value)
[&](const bool a, const bool b) { return a || b; });
}
return threading::parallel_reduce(
varray.index_range(),
range_to_check,
2048,
false,
[&](const IndexRange range, const bool init) {
@ -159,10 +159,15 @@ static bool contains(const VArray<bool> &varray, const bool value)
[&](const bool a, const bool b) { return a || b; });
}
bool has_anything_selected(const VArray<bool> &varray, const IndexRange range_to_check)
{
return contains(varray, range_to_check, true);
}
bool has_anything_selected(const bke::CurvesGeometry &curves)
{
const VArray<bool> selection = curves.attributes().lookup<bool>(".selection");
return !selection || contains(selection, true);
return !selection || contains(selection, curves.curves_range(), true);
}
bool has_anything_selected(const GSpan selection)
@ -581,7 +586,7 @@ static bool find_closest_curve_to_screen_co(const Depsgraph &depsgraph,
return b;
});
if (closest_data.index > 0) {
if (closest_data.index >= 0) {
return true;
}

View File

@ -12,6 +12,7 @@ struct UndoType;
struct SelectPick_Params;
struct ViewContext;
struct rcti;
struct TransVertStore;
#ifdef __cplusplus
extern "C" {
@ -32,6 +33,11 @@ void ED_keymap_curves(struct wmKeyConfig *keyconf);
*/
float (*ED_curves_point_normals_array_create(const struct Curves *curves_id))[3];
/**
* Wrapper for `transverts_from_curves_positions_create`.
*/
void ED_curves_transverts_create(struct Curves *curves_id, struct TransVertStore *tvs);
/** \} */
#ifdef __cplusplus
@ -56,6 +62,13 @@ bke::CurvesGeometry primitive_random_sphere(int curves_size, int points_per_curv
VectorSet<Curves *> get_unique_editable_curves(const bContext &C);
void ensure_surface_deformation_node_exists(bContext &C, Object &curves_ob);
/**
* Allocate an array of `TransVert` for cursor/selection snapping (See
* `ED_transverts_create_from_obedit` in `view3d_snap.c`).
* \note: the `TransVert` elements in \a tvs are expected to write to the positions of \a curves.
*/
void transverts_from_curves_positions_create(bke::CurvesGeometry &curves, TransVertStore *tvs);
/* -------------------------------------------------------------------- */
/** \name Poll Functions
* \{ */
@ -93,6 +106,7 @@ bool has_anything_selected(const bke::CurvesGeometry &curves);
* Return true if any element in the span is selected, on either domain with either type.
*/
bool has_anything_selected(GSpan selection);
bool has_anything_selected(const VArray<bool> &varray, IndexRange range_to_check);
/**
* Find curves that have any point selected (a selection factor greater than zero),

View File

@ -164,7 +164,7 @@ DEF_ICON(NLA)
DEF_ICON(PREFERENCES)
DEF_ICON(TIME)
DEF_ICON(NODETREE)
DEF_ICON(GEOMETRY_NODES)
DEF_ICON_MODIFIER(GEOMETRY_NODES)
DEF_ICON(CONSOLE)
DEF_ICON_BLANK(183)
DEF_ICON(TRACKER)

View File

@ -195,7 +195,7 @@ static bool SCULPT_automasking_needs_factors_cache(const Sculpt *sd, const Brush
const int automasking_flags = sculpt_automasking_mode_effective_bits(sd, brush);
if (automasking_flags & BRUSH_AUTOMASKING_TOPOLOGY &&
if (automasking_flags & BRUSH_AUTOMASKING_TOPOLOGY && brush &&
sculpt_automasking_is_constrained_by_radius(brush)) {
return true;
}

View File

@ -796,6 +796,40 @@ static void sculpt_mesh_filter_end(bContext *C, wmOperator * /*op*/)
SCULPT_flush_update_done(C, ob, SCULPT_UPDATE_COORDS);
}
static void sculpt_mesh_filter_cancel(bContext *C, wmOperator *op)
{
Object *ob = CTX_data_active_object(C);
SculptSession *ss = ob->sculpt;
PBVHNode **nodes;
int nodes_num;
if (!ss || !ss->pbvh) {
return;
}
/* Gather all PBVH leaf nodes. */
BKE_pbvh_search_gather(ss->pbvh, nullptr, nullptr, &nodes, &nodes_num);
for (int i : IndexRange(nodes_num)) {
PBVHNode *node = nodes[i];
PBVHVertexIter vd;
SculptOrigVertData orig_data;
SCULPT_orig_vert_data_init(&orig_data, ob, nodes[i], SCULPT_UNDO_COORDS);
BKE_pbvh_vertex_iter_begin (ss->pbvh, node, vd, PBVH_ITER_UNIQUE) {
SCULPT_orig_vert_data_update(&orig_data, &vd);
copy_v3_v3(vd.co, orig_data.co);
}
BKE_pbvh_vertex_iter_end;
BKE_pbvh_node_mark_update(node);
}
BKE_pbvh_update_bounds(ss->pbvh, PBVH_UpdateBB);
}
static int sculpt_mesh_filter_modal(bContext *C, wmOperator *op, const wmEvent *event)
{
Object *ob = CTX_data_active_object(C);

View File

@ -61,7 +61,8 @@ void draw_channel_names(bContext *C, bAnimContext *ac, ARegion *region)
items = ANIM_animdata_filter(ac, &anim_data, filter, ac->data, ac->datatype);
const int height = ANIM_UI_get_channels_total_height(v2d, items);
v2d->tot.ymin = -height;
const float pad_bottom = BLI_listbase_is_empty(ac->markers) ? 0 : UI_MARKER_MARGIN_Y;
v2d->tot.ymin = -(height + pad_bottom);
/* need to do a view-sync here, so that the keys area doesn't jump around (it must copy this) */
UI_view2d_sync(NULL, ac->area, v2d, V2D_LOCK_COPY);
@ -195,7 +196,8 @@ void draw_channel_strips(bAnimContext *ac, SpaceAction *saction, ARegion *region
size_t items = ANIM_animdata_filter(ac, &anim_data, filter, ac->data, ac->datatype);
const int height = ANIM_UI_get_channels_total_height(v2d, items);
v2d->tot.ymin = -height;
const float pad_bottom = BLI_listbase_is_empty(ac->markers) ? 0 : UI_MARKER_MARGIN_Y;
v2d->tot.ymin = -(height + pad_bottom);
/* Draw the manual frame ranges for actions in the background of the dopesheet.
* The action editor has already drawn the range for its action so it's not needed. */

View File

@ -26,6 +26,8 @@
#include "WM_api.h"
#include "NOD_add_node_search.hh"
#include "ED_asset.h"
#include "ED_node.h"
@ -36,12 +38,9 @@ struct bContext;
namespace blender::ed::space_node {
struct AddNodeItem {
std::string ui_name;
nodes::AddNodeInfo info;
std::string identifier;
std::string description;
std::optional<AssetHandle> asset;
std::function<void(const bContext &, bNodeTree &, bNode &)> after_add_fn;
int weight = 0;
};
struct AddNodeSearchStorage {
@ -77,11 +76,11 @@ static void search_items_for_asset_metadata(const bNodeTree &node_tree,
}
AddNodeItem item{};
item.ui_name = ED_asset_handle_get_name(&asset);
item.info.ui_name = ED_asset_handle_get_name(&asset);
item.identifier = node_tree.typeinfo->group_idname;
item.description = asset_data.description == nullptr ? "" : asset_data.description;
item.info.description = asset_data.description == nullptr ? "" : asset_data.description;
item.asset = asset;
item.after_add_fn = [asset](const bContext &C, bNodeTree &node_tree, bNode &node) {
item.info.after_add_fn = [asset](const bContext &C, bNodeTree &node_tree, bNode &node) {
Main &bmain = *CTX_data_main(&C);
node.flag &= ~NODE_OPTIONS;
node.id = asset::get_local_id_from_asset_or_append_and_reuse(bmain, asset);
@ -139,9 +138,9 @@ static void gather_search_items_for_node_groups(const bContext &C,
continue;
}
AddNodeItem item{};
item.ui_name = node_group->id.name + 2;
item.info.ui_name = node_group->id.name + 2;
item.identifier = node_tree.typeinfo->group_idname;
item.after_add_fn = [node_group](const bContext &C, bNodeTree &node_tree, bNode &node) {
item.info.after_add_fn = [node_group](const bContext &C, bNodeTree &node_tree, bNode &node) {
Main &bmain = *CTX_data_main(&C);
node.id = &node_group->id;
id_us_plus(node.id);
@ -161,19 +160,18 @@ static void gather_add_node_operations(const bContext &C,
if (!(node_type->poll && node_type->poll(node_type, &node_tree, &disabled_hint))) {
continue;
}
if (StringRefNull(node_tree.typeinfo->group_idname) == node_type->idname) {
/* Skip the empty group type. */
if (!node_type->gather_add_node_search_ops) {
continue;
}
if (StringRefNull(node_type->ui_name).endswith("(Legacy)")) {
continue;
Vector<nodes::AddNodeInfo> info_items;
nodes::GatherAddNodeSearchParams params(*node_type, node_tree, info_items);
node_type->gather_add_node_search_ops(params);
for (nodes::AddNodeInfo &info : info_items) {
AddNodeItem item{};
item.info = std::move(info);
item.identifier = node_type->idname;
r_search_items.append(item);
}
AddNodeItem item{};
item.ui_name = IFACE_(node_type->ui_name);
item.identifier = node_type->idname;
item.description = TIP_(node_type->ui_description);
r_search_items.append(std::move(item));
}
NODE_TYPES_END;
@ -199,7 +197,7 @@ static void add_node_search_update_fn(
StringSearch *search = BLI_string_search_new();
for (AddNodeItem &item : storage.search_add_items) {
BLI_string_search_add(search, item.ui_name.c_str(), &item, item.weight);
BLI_string_search_add(search, item.info.ui_name.c_str(), &item, item.info.weight);
}
/* Don't filter when the menu is first opened, but still run the search
@ -210,7 +208,7 @@ static void add_node_search_update_fn(
for (const int i : IndexRange(filtered_amount)) {
AddNodeItem &item = *filtered_items[i];
if (!UI_search_item_add(items, item.ui_name.c_str(), &item, ICON_NONE, 0, 0)) {
if (!UI_search_item_add(items, item.info.ui_name.c_str(), &item, ICON_NONE, 0, 0)) {
break;
}
}
@ -234,8 +232,8 @@ static void add_node_search_exec_fn(bContext *C, void *arg1, void *arg2)
bNode *new_node = nodeAddNode(C, &node_tree, item->identifier.c_str());
BLI_assert(new_node != nullptr);
if (item->after_add_fn) {
item->after_add_fn(*C, node_tree, *new_node);
if (item->info.after_add_fn) {
item->info.after_add_fn(*C, node_tree, *new_node);
}
new_node->locx = storage.cursor.x / UI_DPI_FAC;
@ -266,7 +264,7 @@ static ARegion *add_node_search_tooltip_fn(
uiSearchItemTooltipData tooltip_data{};
BLI_strncpy(tooltip_data.description,
item->asset ? item->description.c_str() : TIP_(item->description.c_str()),
item->asset ? item->info.description.c_str() : TIP_(item->info.description.c_str()),
sizeof(tooltip_data.description));
return UI_tooltip_create_from_search_item_generic(C, region, item_rect, &tooltip_data);

View File

@ -253,11 +253,20 @@ static void gather_search_link_ops_for_all_assets(const bContext &C,
C, node_tree, socket, library_ref, true, search_link_ops);
}
AssetLibraryReference library_ref{};
library_ref.custom_library_index = -1;
library_ref.type = ASSET_LIBRARY_LOCAL;
gather_search_link_ops_for_asset_library(
C, node_tree, socket, library_ref, false, search_link_ops);
{
AssetLibraryReference library_ref{};
library_ref.custom_library_index = -1;
library_ref.type = ASSET_LIBRARY_ESSENTIALS;
gather_search_link_ops_for_asset_library(
C, node_tree, socket, library_ref, true, search_link_ops);
}
{
AssetLibraryReference library_ref{};
library_ref.custom_library_index = -1;
library_ref.type = ASSET_LIBRARY_LOCAL;
gather_search_link_ops_for_asset_library(
C, node_tree, socket, library_ref, false, search_link_ops);
}
}
/**

View File

@ -758,8 +758,8 @@ static void init_proportional_edit(TransInfo *t)
else if (t->data_type == &TransConvertType_MeshUV && t->flag & T_PROP_CONNECTED) {
/* Already calculated by uv_set_connectivity_distance. */
}
else if (t->data_type == &TransConvertType_Curve) {
BLI_assert(t->obedit_type == OB_CURVES_LEGACY);
else if (ELEM(t->data_type, &TransConvertType_Curve, &TransConvertType_Curves)) {
BLI_assert(t->obedit_type == OB_CURVES_LEGACY || t->obedit_type == OB_CURVES);
set_prop_dist(t, false);
}
else {

View File

@ -6,6 +6,7 @@
#include "BLI_array.hh"
#include "BLI_index_mask_ops.hh"
#include "BLI_inplace_priority_queue.hh"
#include "BLI_span.hh"
#include "BKE_curves.hh"
@ -23,11 +24,46 @@
namespace blender::ed::transform::curves {
static void calculate_curve_point_distances_for_proportional_editing(
const Span<float3> positions, MutableSpan<float> r_distances)
{
Array<bool, 32> visited(positions.size(), false);
InplacePriorityQueue<float, std::less<float>> queue(r_distances);
while (!queue.is_empty()) {
int64_t index = queue.pop_index();
if (visited[index]) {
continue;
}
visited[index] = true;
/* TODO(Falk): Handle cyclic curves here. */
if (index > 0 && !visited[index - 1]) {
int adjacent = index - 1;
float dist = r_distances[index] + math::distance(positions[index], positions[adjacent]);
if (dist < r_distances[adjacent]) {
r_distances[adjacent] = dist;
queue.priority_changed(adjacent);
}
}
if (index < positions.size() - 1 && !visited[index + 1]) {
int adjacent = index + 1;
float dist = r_distances[index] + math::distance(positions[index], positions[adjacent]);
if (dist < r_distances[adjacent]) {
r_distances[adjacent] = dist;
queue.priority_changed(adjacent);
}
}
}
}
static void createTransCurvesVerts(bContext * /*C*/, TransInfo *t)
{
MutableSpan<TransDataContainer> trans_data_contrainers(t->data_container, t->data_container_len);
Array<Vector<int64_t>> selected_indices_per_object(t->data_container_len);
Array<IndexMask> selection_per_object(t->data_container_len);
const bool use_proportional_edit = (t->flag & T_PROP_EDIT_ALL) != 0;
const bool use_connected_only = (t->flag & T_PROP_CONNECTED) != 0;
/* Count selected elements per object and create TransData structs. */
for (const int i : trans_data_contrainers.index_range()) {
@ -35,10 +71,15 @@ static void createTransCurvesVerts(bContext * /*C*/, TransInfo *t)
Curves *curves_id = static_cast<Curves *>(tc.obedit->data);
bke::CurvesGeometry &curves = curves_id->geometry.wrap();
selection_per_object[i] = ed::curves::retrieve_selected_points(curves,
selected_indices_per_object[i]);
if (use_proportional_edit) {
tc.data_len = curves.point_num;
}
else {
selection_per_object[i] = ed::curves::retrieve_selected_points(
curves, selected_indices_per_object[i]);
tc.data_len = selection_per_object[i].size();
}
tc.data_len = selection_per_object[i].size();
if (tc.data_len > 0) {
tc.data = MEM_cnew_array<TransData>(tc.data_len, __func__);
}
@ -52,34 +93,92 @@ static void createTransCurvesVerts(bContext * /*C*/, TransInfo *t)
}
Curves *curves_id = static_cast<Curves *>(tc.obedit->data);
bke::CurvesGeometry &curves = curves_id->geometry.wrap();
IndexMask selected_indices = selection_per_object[i];
float mtx[3][3], smtx[3][3];
copy_m3_m4(mtx, tc.obedit->object_to_world);
pseudoinverse_m3_m3(smtx, mtx, PSEUDOINVERSE_EPSILON);
MutableSpan<float3> positions = curves.positions_for_write();
threading::parallel_for(selected_indices.index_range(), 1024, [&](const IndexRange range) {
for (const int selection_i : range) {
TransData *td = &tc.data[selection_i];
float *elem = reinterpret_cast<float *>(&positions[selected_indices[selection_i]]);
copy_v3_v3(td->iloc, elem);
copy_v3_v3(td->center, td->iloc);
td->loc = elem;
if (use_proportional_edit) {
const OffsetIndices<int> points_by_curve = curves.points_by_curve();
const VArray<bool> selection = curves.attributes().lookup_or_default<bool>(
".selection", ATTR_DOMAIN_POINT, true);
threading::parallel_for(curves.curves_range(), 512, [&](const IndexRange range) {
Vector<float> closest_distances;
for (const int curve_i : range) {
const IndexRange points = points_by_curve[curve_i];
const bool has_any_selected = ed::curves::has_anything_selected(selection, points);
if (!has_any_selected) {
for (const int point_i : points) {
TransData &td = tc.data[point_i];
td.flag |= TD_NOTCONNECTED;
td.dist = FLT_MAX;
}
if (use_connected_only) {
continue;
}
}
td->flag = TD_SELECTED;
td->ext = nullptr;
closest_distances.reinitialize(points.size());
closest_distances.fill(std::numeric_limits<float>::max());
copy_m3_m3(td->smtx, smtx);
copy_m3_m3(td->mtx, mtx);
}
});
for (const int i : IndexRange(points.size())) {
const int point_i = points[i];
TransData &td = tc.data[point_i];
float3 *elem = &positions[point_i];
copy_v3_v3(td.iloc, *elem);
copy_v3_v3(td.center, td.iloc);
td.loc = *elem;
td.flag = 0;
if (selection[point_i]) {
closest_distances[i] = 0.0f;
td.flag = TD_SELECTED;
}
td.ext = nullptr;
copy_m3_m3(td.smtx, smtx);
copy_m3_m3(td.mtx, mtx);
}
if (use_connected_only) {
calculate_curve_point_distances_for_proportional_editing(
positions.slice(points), closest_distances.as_mutable_span());
for (const int i : IndexRange(points.size())) {
TransData &td = tc.data[points[i]];
td.dist = closest_distances[i];
}
}
}
});
}
else {
const IndexMask selected_indices = selection_per_object[i];
threading::parallel_for(selected_indices.index_range(), 1024, [&](const IndexRange range) {
for (const int selection_i : range) {
TransData *td = &tc.data[selection_i];
float3 *elem = &positions[selected_indices[selection_i]];
copy_v3_v3(td->iloc, *elem);
copy_v3_v3(td->center, td->iloc);
td->loc = *elem;
td->flag = TD_SELECTED;
td->ext = nullptr;
copy_m3_m3(td->smtx, smtx);
copy_m3_m3(td->mtx, mtx);
}
});
}
}
}
static void recalcData_curves(TransInfo *t)
{
Span<TransDataContainer> trans_data_contrainers(t->data_container, t->data_container_len);
const Span<TransDataContainer> trans_data_contrainers(t->data_container, t->data_container_len);
for (const TransDataContainer &tc : trans_data_contrainers) {
Curves *curves_id = static_cast<Curves *>(tc.obedit->data);
bke::CurvesGeometry &curves = curves_id->geometry.wrap();

View File

@ -9,6 +9,7 @@
#include "DNA_armature_types.h"
#include "DNA_curve_types.h"
#include "DNA_curves_types.h"
#include "DNA_lattice_types.h"
#include "DNA_meta_types.h"
#include "DNA_object_types.h"
@ -30,6 +31,7 @@
#include "DEG_depsgraph.h"
#include "ED_armature.h"
#include "ED_curves.h"
#include "ED_transverts.h" /* own include */
@ -181,8 +183,14 @@ static void set_mapped_co(void *vuserdata, int index, const float co[3], const f
bool ED_transverts_check_obedit(const Object *obedit)
{
return (
ELEM(obedit->type, OB_ARMATURE, OB_LATTICE, OB_MESH, OB_SURF, OB_CURVES_LEGACY, OB_MBALL));
return (ELEM(obedit->type,
OB_ARMATURE,
OB_LATTICE,
OB_MESH,
OB_SURF,
OB_CURVES_LEGACY,
OB_MBALL,
OB_CURVES));
}
void ED_transverts_create_from_obedit(TransVertStore *tvs, const Object *obedit, const int mode)
@ -481,6 +489,10 @@ void ED_transverts_create_from_obedit(TransVertStore *tvs, const Object *obedit,
bp++;
}
}
else if (obedit->type == OB_CURVES) {
Curves *curves_id = obedit->data;
ED_curves_transverts_create(curves_id, tvs);
}
if (!tvs->transverts_tot && tvs->transverts) {
/* Prevent memory leak. happens for curves/lattices due to

View File

@ -17,6 +17,7 @@
#include "DNA_gpencil_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
#include "DNA_screen_types.h"
#include "BKE_gpencil.h"
@ -42,6 +43,7 @@
#include "MOD_gpencil_util.h"
#include "DEG_depsgraph.h"
#include "DEG_depsgraph_query.h"
#include "WM_api.h"
@ -254,11 +256,11 @@ static bool isDisabled(GpencilModifierData *md, int UNUSED(userRenderParams))
/* Generic "generateStrokes" callback */
static void generateStrokes(GpencilModifierData *md, Depsgraph *depsgraph, Object *ob)
{
Scene *scene = DEG_get_evaluated_scene(depsgraph);
bGPdata *gpd = ob->data;
LISTBASE_FOREACH (bGPDlayer *, gpl, &gpd->layers) {
BKE_gpencil_frame_active_set(depsgraph, gpd);
bGPDframe *gpf = gpl->actframe;
bGPDframe *gpf = BKE_gpencil_frame_retime_get(depsgraph, scene, ob, gpl);
if (gpf == NULL) {
continue;
}

View File

@ -191,16 +191,24 @@ set(OPENGL_SRC
set(VULKAN_SRC
vulkan/vk_backend.cc
vulkan/vk_batch.cc
vulkan/vk_buffer.cc
vulkan/vk_context.cc
vulkan/vk_command_buffer.cc
vulkan/vk_common.cc
vulkan/vk_descriptor_pools.cc
vulkan/vk_descriptor_set.cc
vulkan/vk_drawlist.cc
vulkan/vk_fence.cc
vulkan/vk_framebuffer.cc
vulkan/vk_index_buffer.cc
vulkan/vk_pipeline.cc
vulkan/vk_memory.cc
vulkan/vk_pixel_buffer.cc
vulkan/vk_query.cc
vulkan/vk_shader.cc
vulkan/vk_shader_interface.cc
vulkan/vk_shader_log.cc
vulkan/vk_state_manager.cc
vulkan/vk_storage_buffer.cc
vulkan/vk_texture.cc
vulkan/vk_uniform_buffer.cc
@ -208,16 +216,24 @@ set(VULKAN_SRC
vulkan/vk_backend.hh
vulkan/vk_batch.hh
vulkan/vk_buffer.hh
vulkan/vk_context.hh
vulkan/vk_command_buffer.hh
vulkan/vk_common.hh
vulkan/vk_descriptor_pools.hh
vulkan/vk_descriptor_set.hh
vulkan/vk_drawlist.hh
vulkan/vk_fence.hh
vulkan/vk_framebuffer.hh
vulkan/vk_index_buffer.hh
vulkan/vk_pipeline.hh
vulkan/vk_memory.hh
vulkan/vk_pixel_buffer.hh
vulkan/vk_query.hh
vulkan/vk_shader.hh
vulkan/vk_shader_interface.hh
vulkan/vk_shader_log.hh
vulkan/vk_state_manager.hh
vulkan/vk_storage_buffer.hh
vulkan/vk_texture.hh
vulkan/vk_uniform_buffer.hh
@ -510,6 +526,7 @@ set(GLSL_SRC_TEST
tests/shaders/gpu_compute_1d_test.glsl
tests/shaders/gpu_compute_2d_test.glsl
tests/shaders/gpu_compute_ibo_test.glsl
tests/shaders/gpu_compute_ssbo_test.glsl
tests/shaders/gpu_compute_vbo_test.glsl
tests/shaders/gpu_compute_dummy_test.glsl
)
@ -787,6 +804,7 @@ if(WITH_GTESTS)
tests/gpu_index_buffer_test.cc
tests/gpu_shader_builtin_test.cc
tests/gpu_shader_test.cc
tests/gpu_storage_buffer_test.cc
tests/gpu_testing.hh
)

View File

@ -26,6 +26,14 @@ namespace blender::gpu {
typedef struct ShaderInput {
uint32_t name_offset;
uint32_t name_hash;
/**
* Location is openGl legacy and its legacy usages should be phased out in Blender 3.7.
*
* Vulkan backend use location to encode the descriptor set binding. This binding is different
* than the binding stored in the binding attribute. In Vulkan the binding inside a descriptor
* set must be unique. In future the location will also be used to select the right descriptor
* set.
*/
int32_t location;
/** Defined at interface creation or in shader. Only for Samplers, UBOs and Vertex Attributes. */
int32_t binding;

View File

@ -14,6 +14,8 @@ struct GPUStorageBuf;
namespace blender {
namespace gpu {
class VertBuf;
#ifdef DEBUG
# define DEBUG_NAME_LEN 64
#else

View File

@ -42,6 +42,12 @@ GPU_SHADER_CREATE_INFO(gpu_compute_vbo_test)
.compute_source("gpu_compute_vbo_test.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(gpu_compute_ssbo_test)
.local_group_size(1)
.storage_buf(0, Qualifier::WRITE, "int", "data_out[]")
.compute_source("gpu_compute_ssbo_test.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(gpu_compute_ssbo_binding_test)
.local_group_size(1)
.storage_buf(0, Qualifier::WRITE, "int", "data0[]")

View File

@ -212,7 +212,7 @@ GPU_TEST(gpu_shader_compute_ibo)
static void test_gpu_shader_compute_ssbo()
{
if (!GPU_compute_shader_support()) {
if (!GPU_compute_shader_support() && !GPU_shader_storage_buffer_objects_support()) {
/* We can't test as a the platform does not support compute shaders. */
std::cout << "Skipping compute shader test: platform not supported";
return;
@ -221,14 +221,14 @@ static void test_gpu_shader_compute_ssbo()
static constexpr uint SIZE = 128;
/* Build compute shader. */
GPUShader *shader = GPU_shader_create_from_info_name("gpu_compute_ibo_test");
GPUShader *shader = GPU_shader_create_from_info_name("gpu_compute_ssbo_test");
EXPECT_NE(shader, nullptr);
GPU_shader_bind(shader);
/* Construct IBO. */
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
SIZE * sizeof(uint32_t), nullptr, GPU_USAGE_DEVICE_ONLY, __func__);
GPU_storagebuf_bind(ssbo, GPU_shader_get_ssbo_binding(shader, "out_indices"));
GPU_storagebuf_bind(ssbo, GPU_shader_get_ssbo_binding(shader, "data_out"));
/* Dispatch compute task. */
GPU_compute_dispatch(shader, SIZE, 1, 1);
@ -240,7 +240,7 @@ static void test_gpu_shader_compute_ssbo()
uint32_t data[SIZE];
GPU_storagebuf_read(ssbo, data);
for (int index = 0; index < SIZE; index++) {
uint32_t expected = index;
uint32_t expected = index * 4;
EXPECT_EQ(data[index], expected);
}

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "testing/testing.h"
#include "GPU_storage_buffer.h"
#include "BLI_vector.hh"
#include "gpu_testing.hh"
namespace blender::gpu::tests {
constexpr size_t SIZE = 128;
constexpr size_t SIZE_IN_BYTES = SIZE * sizeof(int);
static Vector<int32_t> test_data()
{
Vector<int32_t> data;
for (int i : IndexRange(SIZE)) {
data.append(i);
}
return data;
}
static void test_gpu_storage_buffer_create_update_read()
{
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
EXPECT_NE(ssbo, nullptr);
/* Upload some dummy data. */
const Vector<int32_t> data = test_data();
GPU_storagebuf_update(ssbo, data.data());
/* Read back data from SSBO. */
Vector<int32_t> read_data;
read_data.resize(SIZE, 0);
GPU_storagebuf_read(ssbo, read_data.data());
/* Check if data is the same.*/
for (int i : IndexRange(SIZE)) {
EXPECT_EQ(data[i], read_data[i]);
}
GPU_storagebuf_free(ssbo);
}
GPU_TEST(gpu_storage_buffer_create_update_read);
} // namespace blender::gpu::tests

View File

@ -15,18 +15,21 @@ namespace blender::gpu {
void GPUTest::SetUp()
{
GPU_backend_type_selection_set(gpu_backend_type);
GHOST_GLSettings glSettings = {0};
GHOST_GLSettings glSettings = {};
glSettings.context_type = draw_context_type;
glSettings.flags = GHOST_glDebugContext;
CLG_init();
ghost_system = GHOST_CreateSystem();
ghost_context = GHOST_CreateOpenGLContext(ghost_system, glSettings);
GHOST_ActivateOpenGLContext(ghost_context);
context = GPU_context_create(nullptr, ghost_context);
GPU_init();
GPU_context_begin_frame(context);
}
void GPUTest::TearDown()
{
GPU_context_end_frame(context);
GPU_exit();
GPU_context_discard(context);
GHOST_DisposeOpenGLContext(ghost_system, ghost_context);

View File

@ -0,0 +1,5 @@
void main()
{
int store_index = int(gl_GlobalInvocationID.x);
data_out[store_index] = store_index * 4;
}

View File

@ -60,8 +60,17 @@ void VKBackend::samplers_update()
{
}
void VKBackend::compute_dispatch(int /*groups_x_len*/, int /*groups_y_len*/, int /*groups_z_len*/)
void VKBackend::compute_dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
{
VKContext &context = *VKContext::get();
VKShader *shader = static_cast<VKShader *>(context.shader);
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKPipeline &pipeline = shader->pipeline_get();
VKDescriptorSet &descriptor_set = pipeline.descriptor_set_get();
descriptor_set.update(context.device_get());
command_buffer.bind(
descriptor_set, shader->vk_pipeline_layout_get(), VK_PIPELINE_BIND_POINT_COMPUTE);
command_buffer.dispatch(groups_x_len, groups_y_len, groups_z_len);
}
void VKBackend::compute_dispatch_indirect(StorageBuf * /*indirect_buf*/)
@ -123,9 +132,9 @@ UniformBuf *VKBackend::uniformbuf_alloc(int size, const char *name)
return new VKUniformBuffer(size, name);
}
StorageBuf *VKBackend::storagebuf_alloc(int size, GPUUsageType /*usage*/, const char *name)
StorageBuf *VKBackend::storagebuf_alloc(int size, GPUUsageType usage, const char *name)
{
return new VKStorageBuffer(size, name);
return new VKStorageBuffer(size, usage, name);
}
VertBuf *VKBackend::vertbuf_alloc()

View File

@ -9,11 +9,8 @@
#include "gpu_backend.hh"
#ifdef __APPLE__
# include <MoltenVK/vk_mvk_moltenvk.h>
#else
# include <vulkan/vulkan.h>
#endif
#include "vk_common.hh"
#include "shaderc/shaderc.hpp"
namespace blender::gpu {

View File

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_buffer.hh"
namespace blender::gpu {
VKBuffer::~VKBuffer()
{
VKContext &context = *VKContext::get();
free(context);
}
bool VKBuffer::is_allocated() const
{
return allocation_ != VK_NULL_HANDLE;
}
static VmaAllocationCreateFlagBits vma_allocation_flags(GPUUsageType usage)
{
switch (usage) {
case GPU_USAGE_STATIC:
return static_cast<VmaAllocationCreateFlagBits>(
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT);
case GPU_USAGE_DYNAMIC:
return static_cast<VmaAllocationCreateFlagBits>(
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT);
case GPU_USAGE_DEVICE_ONLY:
return static_cast<VmaAllocationCreateFlagBits>(
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT);
case GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY:
case GPU_USAGE_STREAM:
break;
}
BLI_assert_msg(false, "Unimplemented GPUUsageType");
return static_cast<VmaAllocationCreateFlagBits>(VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
VMA_ALLOCATION_CREATE_MAPPED_BIT);
}
bool VKBuffer::create(VKContext &context,
int64_t size_in_bytes,
GPUUsageType usage,
VkBufferUsageFlagBits buffer_usage)
{
BLI_assert(!is_allocated());
size_in_bytes_ = size_in_bytes;
VmaAllocator allocator = context.mem_allocator_get();
VkBufferCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
create_info.flags = 0;
create_info.size = size_in_bytes;
create_info.usage = buffer_usage;
/* We use the same command queue for the compute and graphics pipeline, so it is safe to use
* exclusive resource handling. */
create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
create_info.queueFamilyIndexCount = 1;
create_info.pQueueFamilyIndices = context.queue_family_ptr_get();
VmaAllocationCreateInfo vma_create_info = {};
vma_create_info.flags = vma_allocation_flags(usage);
vma_create_info.priority = 1.0f;
vma_create_info.usage = VMA_MEMORY_USAGE_AUTO;
VkResult result = vmaCreateBuffer(
allocator, &create_info, &vma_create_info, &vk_buffer_, &allocation_, nullptr);
return result == VK_SUCCESS;
}
bool VKBuffer::update(VKContext &context, const void *data)
{
void *mapped_memory;
bool result = map(context, &mapped_memory);
if (result) {
memcpy(mapped_memory, data, size_in_bytes_);
unmap(context);
}
return result;
}
bool VKBuffer::map(VKContext &context, void **r_mapped_memory) const
{
VmaAllocator allocator = context.mem_allocator_get();
VkResult result = vmaMapMemory(allocator, allocation_, r_mapped_memory);
return result == VK_SUCCESS;
}
void VKBuffer::unmap(VKContext &context) const
{
VmaAllocator allocator = context.mem_allocator_get();
vmaUnmapMemory(allocator, allocation_);
}
bool VKBuffer::free(VKContext &context)
{
VmaAllocator allocator = context.mem_allocator_get();
vmaDestroyBuffer(allocator, vk_buffer_, allocation_);
return true;
}
} // namespace blender::gpu

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "gpu_context_private.hh"
#include "vk_common.hh"
#include "vk_context.hh"
#include "vk_mem_alloc.h"
namespace blender::gpu {
/**
* Class for handing vulkan buffers (allocation/updating/binding).
*/
class VKBuffer {
int64_t size_in_bytes_;
VkBuffer vk_buffer_ = VK_NULL_HANDLE;
VmaAllocation allocation_ = VK_NULL_HANDLE;
public:
VKBuffer() = default;
virtual ~VKBuffer();
/** Has this buffer been allocated? */
bool is_allocated() const;
bool create(VKContext &context,
int64_t size,
GPUUsageType usage,
VkBufferUsageFlagBits buffer_usage);
bool update(VKContext &context, const void *data);
bool free(VKContext &context);
bool map(VKContext &context, void **r_mapped_memory) const;
void unmap(VKContext &context) const;
int64_t size_in_bytes() const
{
return size_in_bytes_;
}
VkBuffer vk_handle() const
{
return vk_buffer_;
}
};
} // namespace blender::gpu

View File

@ -0,0 +1,144 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_command_buffer.hh"
#include "vk_buffer.hh"
#include "vk_context.hh"
#include "vk_memory.hh"
#include "vk_texture.hh"
#include "BLI_assert.h"
namespace blender::gpu {
VKCommandBuffer::~VKCommandBuffer()
{
if (vk_device_ != VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
vkDestroyFence(vk_device_, vk_fence_, vk_allocation_callbacks);
vk_fence_ = VK_NULL_HANDLE;
}
}
void VKCommandBuffer::init(const VkDevice vk_device,
const VkQueue vk_queue,
VkCommandBuffer vk_command_buffer)
{
vk_device_ = vk_device;
vk_queue_ = vk_queue;
vk_command_buffer_ = vk_command_buffer;
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
VkFenceCreateInfo fenceInfo{};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
vkCreateFence(vk_device_, &fenceInfo, vk_allocation_callbacks, &vk_fence_);
}
}
void VKCommandBuffer::begin_recording()
{
vkWaitForFences(vk_device_, 1, &vk_fence_, VK_TRUE, UINT64_MAX);
vkResetFences(vk_device_, 1, &vk_fence_);
vkResetCommandBuffer(vk_command_buffer_, 0);
VkCommandBufferBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(vk_command_buffer_, &begin_info);
}
void VKCommandBuffer::end_recording()
{
vkEndCommandBuffer(vk_command_buffer_);
}
void VKCommandBuffer::bind(const VKPipeline &pipeline, VkPipelineBindPoint bind_point)
{
vkCmdBindPipeline(vk_command_buffer_, bind_point, pipeline.vk_handle());
}
void VKCommandBuffer::bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point)
{
VkDescriptorSet vk_descriptor_set = descriptor_set.vk_handle();
vkCmdBindDescriptorSets(
vk_command_buffer_, bind_point, vk_pipeline_layout, 0, 1, &vk_descriptor_set, 0, 0);
}
void VKCommandBuffer::copy(VKBuffer &dst_buffer,
VKTexture &src_texture,
Span<VkBufferImageCopy> regions)
{
vkCmdCopyImageToBuffer(vk_command_buffer_,
src_texture.vk_image_handle(),
VK_IMAGE_LAYOUT_GENERAL,
dst_buffer.vk_handle(),
regions.size(),
regions.data());
}
void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages)
{
vkCmdPipelineBarrier(vk_command_buffer_,
source_stages,
destination_stages,
0,
0,
nullptr,
0,
nullptr,
0,
nullptr);
}
void VKCommandBuffer::pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers)
{
vkCmdPipelineBarrier(vk_command_buffer_,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_DEPENDENCY_BY_REGION_BIT,
0,
nullptr,
0,
nullptr,
image_memory_barriers.size(),
image_memory_barriers.data());
}
void VKCommandBuffer::dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
{
vkCmdDispatch(vk_command_buffer_, groups_x_len, groups_y_len, groups_z_len);
}
void VKCommandBuffer::submit()
{
end_recording();
encode_recorded_commands();
submit_encoded_commands();
begin_recording();
}
void VKCommandBuffer::encode_recorded_commands()
{
/* Intentionally not implemented. For the graphics pipeline we want to extract the
* resources and its usages so we can encode multiple commands in the same command buffer with
* the correct synchorinzations. */
}
void VKCommandBuffer::submit_encoded_commands()
{
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &vk_command_buffer_;
vkQueueSubmit(vk_queue_, 1, &submit_info, vk_fence_);
}
} // namespace blender::gpu

View File

@ -0,0 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "vk_common.hh"
#include "vk_pipeline.hh"
namespace blender::gpu {
class VKBuffer;
class VKTexture;
/** Command buffer to keep track of the life-time of a command buffer.*/
class VKCommandBuffer : NonCopyable, NonMovable {
/** None owning handle to the command buffer and device. Handle is owned by `GHOST_ContextVK`.*/
VkDevice vk_device_ = VK_NULL_HANDLE;
VkCommandBuffer vk_command_buffer_ = VK_NULL_HANDLE;
VkQueue vk_queue_ = VK_NULL_HANDLE;
/** Owning handles */
VkFence vk_fence_ = VK_NULL_HANDLE;
public:
virtual ~VKCommandBuffer();
void init(const VkDevice vk_device, const VkQueue vk_queue, VkCommandBuffer vk_command_buffer);
void begin_recording();
void end_recording();
void bind(const VKPipeline &vk_pipeline, VkPipelineBindPoint bind_point);
void bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point);
void dispatch(int groups_x_len, int groups_y_len, int groups_z_len);
/* Copy the contents of a texture mip level to the dst buffer.*/
void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions);
void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages);
void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers);
/**
* Stop recording commands, encode + send the recordings to Vulkan, wait for the until the
* commands have been executed and start the command buffer to accept recordings again.
*/
void submit();
private:
void encode_recorded_commands();
void submit_encoded_commands();
};
} // namespace blender::gpu

View File

@ -0,0 +1,197 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_common.hh"
namespace blender::gpu {
VkImageAspectFlagBits to_vk_image_aspect_flag_bits(const eGPUTextureFormat format)
{
switch (format) {
case GPU_RGBA32F:
case GPU_RGBA8UI:
case GPU_RGBA8I:
case GPU_RGBA8:
case GPU_RGBA32UI:
case GPU_RGBA32I:
case GPU_RGBA16UI:
case GPU_RGBA16I:
case GPU_RGBA16F:
case GPU_RGBA16:
case GPU_RG8UI:
case GPU_RG8I:
case GPU_RG8:
case GPU_RG32UI:
case GPU_RG32I:
case GPU_RG32F:
case GPU_RG16UI:
case GPU_RG16I:
case GPU_RG16F:
case GPU_RG16:
case GPU_R8UI:
case GPU_R8I:
case GPU_R8:
case GPU_R32UI:
case GPU_R32I:
case GPU_R32F:
case GPU_R16UI:
case GPU_R16I:
case GPU_R16F:
case GPU_R16:
case GPU_RGB10_A2:
case GPU_R11F_G11F_B10F:
case GPU_SRGB8_A8:
case GPU_RGB16F:
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
return VK_IMAGE_ASPECT_COLOR_BIT;
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
return static_cast<VkImageAspectFlagBits>(VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT);
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
return VK_IMAGE_ASPECT_DEPTH_BIT;
case GPU_DEPTH_COMPONENT32F:
/* Not supported by Vulkan*/
BLI_assert_unreachable();
}
return static_cast<VkImageAspectFlagBits>(0);
}
VkFormat to_vk_format(const eGPUTextureFormat format)
{
switch (format) {
case GPU_RGBA32F:
return VK_FORMAT_R32G32B32A32_SFLOAT;
case GPU_RGBA8UI:
case GPU_RGBA8I:
case GPU_RGBA8:
case GPU_RGBA32UI:
case GPU_RGBA32I:
case GPU_RGBA16UI:
case GPU_RGBA16I:
case GPU_RGBA16F:
case GPU_RGBA16:
case GPU_RG8UI:
case GPU_RG8I:
case GPU_RG8:
case GPU_RG32UI:
case GPU_RG32I:
case GPU_RG32F:
case GPU_RG16UI:
case GPU_RG16I:
case GPU_RG16F:
case GPU_RG16:
case GPU_R8UI:
case GPU_R8I:
case GPU_R8:
case GPU_R32UI:
case GPU_R32I:
case GPU_R32F:
case GPU_R16UI:
case GPU_R16I:
case GPU_R16F:
case GPU_R16:
case GPU_RGB10_A2:
case GPU_R11F_G11F_B10F:
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
case GPU_SRGB8_A8:
/* Texture only format */
case GPU_RGB16F:
/* Special formats texture only */
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
/* Depth Formats */
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
BLI_assert_unreachable();
}
return VK_FORMAT_UNDEFINED;
}
VkImageType to_vk_image_type(const eGPUTextureType type)
{
switch (type) {
case GPU_TEXTURE_1D:
case GPU_TEXTURE_BUFFER:
case GPU_TEXTURE_1D_ARRAY:
return VK_IMAGE_TYPE_1D;
case GPU_TEXTURE_2D:
case GPU_TEXTURE_2D_ARRAY:
return VK_IMAGE_TYPE_2D;
case GPU_TEXTURE_3D:
case GPU_TEXTURE_CUBE:
case GPU_TEXTURE_CUBE_ARRAY:
return VK_IMAGE_TYPE_3D;
case GPU_TEXTURE_ARRAY:
/* GPU_TEXTURE_ARRAY should always be used together with 1D, 2D, or CUBE*/
BLI_assert_unreachable();
break;
}
return VK_IMAGE_TYPE_1D;
}
VkImageViewType to_vk_image_view_type(const eGPUTextureType type)
{
switch (type) {
case GPU_TEXTURE_1D:
case GPU_TEXTURE_BUFFER:
return VK_IMAGE_VIEW_TYPE_1D;
case GPU_TEXTURE_2D:
return VK_IMAGE_VIEW_TYPE_2D;
case GPU_TEXTURE_3D:
return VK_IMAGE_VIEW_TYPE_3D;
case GPU_TEXTURE_CUBE:
return VK_IMAGE_VIEW_TYPE_CUBE;
case GPU_TEXTURE_1D_ARRAY:
return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
case GPU_TEXTURE_2D_ARRAY:
return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
case GPU_TEXTURE_CUBE_ARRAY:
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case GPU_TEXTURE_ARRAY:
/* GPU_TEXTURE_ARRAY should always be used together with 1D, 2D, or CUBE*/
BLI_assert_unreachable();
break;
}
return VK_IMAGE_VIEW_TYPE_1D;
}
VkComponentMapping to_vk_component_mapping(const eGPUTextureFormat /*format*/)
{
/* TODO: this should map to OpenGL defaults based on the eGPUTextureFormat. The implementation of
* this function will be implemented when implementing other parts of VKTexture.*/
VkComponentMapping component_mapping;
component_mapping.r = VK_COMPONENT_SWIZZLE_R;
component_mapping.g = VK_COMPONENT_SWIZZLE_G;
component_mapping.b = VK_COMPONENT_SWIZZLE_B;
component_mapping.a = VK_COMPONENT_SWIZZLE_A;
return component_mapping;
}
} // namespace blender::gpu

View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#ifdef __APPLE__
# include <MoltenVK/vk_mvk_moltenvk.h>
#else
# include <vulkan/vulkan.h>
#endif
#include "gpu_texture_private.hh"
namespace blender::gpu {
VkImageAspectFlagBits to_vk_image_aspect_flag_bits(const eGPUTextureFormat format);
VkFormat to_vk_format(const eGPUTextureFormat format);
VkComponentMapping to_vk_component_mapping(const eGPUTextureFormat format);
VkImageViewType to_vk_image_view_type(const eGPUTextureType type);
VkImageType to_vk_image_type(const eGPUTextureType type);
} // namespace blender::gpu

View File

@ -8,6 +8,8 @@
#include "vk_context.hh"
#include "vk_backend.hh"
#include "vk_memory.hh"
#include "vk_state_manager.hh"
#include "GHOST_C-api.h"
@ -15,25 +17,34 @@ namespace blender::gpu {
VKContext::VKContext(void *ghost_window, void *ghost_context)
{
VK_ALLOCATION_CALLBACKS;
ghost_window_ = ghost_window;
if (ghost_window) {
ghost_context = GHOST_GetDrawingContext((GHOST_WindowHandle)ghost_window);
}
ghost_context_ = ghost_context;
GHOST_GetVulkanHandles((GHOST_ContextHandle)ghost_context,
&instance_,
&physical_device_,
&device_,
&graphic_queue_family_);
&vk_instance_,
&vk_physical_device_,
&vk_device_,
&vk_queue_family_,
&vk_queue_);
/* Initialize the memory allocator. */
VmaAllocatorCreateInfo info = {};
/* Should use same vulkan version as GHOST. */
info.vulkanApiVersion = VK_API_VERSION_1_2;
info.physicalDevice = physical_device_;
info.device = device_;
info.instance = instance_;
/* Should use same vulkan version as GHOST (1.2), but set to 1.0 as 1.2 requires
* correct extensions and functions to be found by VMA, which isn't working as expected and
* requires more research. To continue development we lower the API to version 1.0.*/
info.vulkanApiVersion = VK_API_VERSION_1_0;
info.physicalDevice = vk_physical_device_;
info.device = vk_device_;
info.instance = vk_instance_;
info.pAllocationCallbacks = vk_allocation_callbacks;
vmaCreateAllocator(&info, &mem_allocator_);
descriptor_pools_.init(vk_device_);
state_manager = new VKStateManager();
VKBackend::capabilities_init(*this);
}
@ -53,18 +64,27 @@ void VKContext::deactivate()
void VKContext::begin_frame()
{
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
GHOST_GetVulkanCommandBuffer(static_cast<GHOST_ContextHandle>(ghost_context_), &command_buffer);
command_buffer_.init(vk_device_, vk_queue_, command_buffer);
command_buffer_.begin_recording();
descriptor_pools_.reset();
}
void VKContext::end_frame()
{
command_buffer_.end_recording();
}
void VKContext::flush()
{
command_buffer_.submit();
}
void VKContext::finish()
{
command_buffer_.submit();
}
void VKContext::memory_statistics_get(int * /*total_mem*/, int * /*free_mem*/)

View File

@ -9,26 +9,28 @@
#include "gpu_context_private.hh"
#include "vk_mem_alloc.h"
#include "vk_command_buffer.hh"
#include "vk_descriptor_pools.hh"
#ifdef __APPLE__
# include <MoltenVK/vk_mvk_moltenvk.h>
#else
# include <vulkan/vulkan.h>
#endif
#include "vk_mem_alloc.h"
namespace blender::gpu {
class VKContext : public Context {
private:
/** Copies of the handles owned by the GHOST context. */
VkInstance instance_ = VK_NULL_HANDLE;
VkPhysicalDevice physical_device_ = VK_NULL_HANDLE;
VkDevice device_ = VK_NULL_HANDLE;
uint32_t graphic_queue_family_ = 0;
VkInstance vk_instance_ = VK_NULL_HANDLE;
VkPhysicalDevice vk_physical_device_ = VK_NULL_HANDLE;
VkDevice vk_device_ = VK_NULL_HANDLE;
VKCommandBuffer command_buffer_;
uint32_t vk_queue_family_ = 0;
VkQueue vk_queue_ = VK_NULL_HANDLE;
/** Allocator used for texture and buffers and other resources. */
VmaAllocator mem_allocator_ = VK_NULL_HANDLE;
VKDescriptorPools descriptor_pools_;
void *ghost_context_;
public:
VKContext(void *ghost_window, void *ghost_context);
@ -52,9 +54,34 @@ class VKContext : public Context {
return static_cast<VKContext *>(Context::get());
}
VkPhysicalDevice physical_device_get() const
{
return vk_physical_device_;
}
VkDevice device_get() const
{
return device_;
return vk_device_;
}
VKCommandBuffer &command_buffer_get()
{
return command_buffer_;
}
VkQueue queue_get() const
{
return vk_queue_;
}
const uint32_t *queue_family_ptr_get() const
{
return &vk_queue_family_;
}
VKDescriptorPools &descriptor_pools_get()
{
return descriptor_pools_;
}
VmaAllocator mem_allocator_get() const

View File

@ -0,0 +1,116 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_descriptor_pools.hh"
#include "vk_memory.hh"
namespace blender::gpu {
VKDescriptorPools::VKDescriptorPools()
{
}
VKDescriptorPools::~VKDescriptorPools()
{
VK_ALLOCATION_CALLBACKS
for (const VkDescriptorPool vk_descriptor_pool : pools_) {
BLI_assert(vk_device_ != VK_NULL_HANDLE);
vkDestroyDescriptorPool(vk_device_, vk_descriptor_pool, vk_allocation_callbacks);
}
vk_device_ = VK_NULL_HANDLE;
}
void VKDescriptorPools::init(const VkDevice vk_device)
{
BLI_assert(vk_device_ == VK_NULL_HANDLE);
vk_device_ = vk_device;
add_new_pool();
}
void VKDescriptorPools::reset()
{
active_pool_index_ = 0;
}
void VKDescriptorPools::add_new_pool()
{
VK_ALLOCATION_CALLBACKS
Vector<VkDescriptorPoolSize> pool_sizes = {
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, POOL_SIZE_STORAGE_BUFFER},
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, POOL_SIZE_STORAGE_IMAGE},
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, POOL_SIZE_COMBINED_IMAGE_SAMPLER},
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, POOL_SIZE_UNIFORM_BUFFER},
};
VkDescriptorPoolCreateInfo pool_info = {};
pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
pool_info.maxSets = POOL_SIZE_DESCRIPTOR_SETS;
pool_info.poolSizeCount = pool_sizes.size();
pool_info.pPoolSizes = pool_sizes.data();
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkResult result = vkCreateDescriptorPool(
vk_device_, &pool_info, vk_allocation_callbacks, &descriptor_pool);
UNUSED_VARS(result);
pools_.append(descriptor_pool);
}
VkDescriptorPool VKDescriptorPools::active_pool_get()
{
BLI_assert(!pools_.is_empty());
return pools_[active_pool_index_];
}
void VKDescriptorPools::activate_next_pool()
{
BLI_assert(!is_last_pool_active());
active_pool_index_ += 1;
}
void VKDescriptorPools::activate_last_pool()
{
active_pool_index_ = pools_.size() - 1;
}
bool VKDescriptorPools::is_last_pool_active()
{
return active_pool_index_ == pools_.size() - 1;
}
VKDescriptorSet VKDescriptorPools::allocate(const VkDescriptorSetLayout &descriptor_set_layout)
{
VkDescriptorSetAllocateInfo allocate_info = {};
VkDescriptorPool pool = active_pool_get();
allocate_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocate_info.descriptorPool = pool;
allocate_info.descriptorSetCount = 1;
allocate_info.pSetLayouts = &descriptor_set_layout;
VkDescriptorSet vk_descriptor_set = VK_NULL_HANDLE;
VkResult result = vkAllocateDescriptorSets(vk_device_, &allocate_info, &vk_descriptor_set);
if (result == VK_ERROR_OUT_OF_POOL_MEMORY) {
if (is_last_pool_active()) {
add_new_pool();
activate_last_pool();
}
else {
activate_next_pool();
}
return allocate(descriptor_set_layout);
}
return VKDescriptorSet(pool, vk_descriptor_set);
}
void VKDescriptorPools::free(VKDescriptorSet &descriptor_set)
{
VkDescriptorSet vk_descriptor_set = descriptor_set.vk_handle();
VkDescriptorPool vk_descriptor_pool = descriptor_set.vk_pool_handle();
BLI_assert(pools_.contains(vk_descriptor_pool));
vkFreeDescriptorSets(vk_device_, vk_descriptor_pool, 1, &vk_descriptor_set);
descriptor_set.mark_freed();
}
} // namespace blender::gpu

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "BLI_vector.hh"
#include "vk_descriptor_set.hh"
namespace blender::gpu {
/**
* List of VkDescriptorPools.
*
* In Vulkan a pool is constructed with a fixed size per resource type. When more resources are
* needed it a next pool should be created. VKDescriptorPools will keep track of those pools and
* construct new pools when the previous one is exhausted.
*
* At the beginning of a new frame the descriptor pools are reset. This will start allocating
* again from the first descriptor pool in order to use freed space from previous pools.
*/
class VKDescriptorPools {
/**
* Pool sizes to use. When one descriptor pool is requested to allocate a descriptor but isn't
* able to do so, it will fail.
*
* Better defaults should be set later on, when we know more about our resource usage.
*/
static constexpr uint32_t POOL_SIZE_STORAGE_BUFFER = 1000;
static constexpr uint32_t POOL_SIZE_DESCRIPTOR_SETS = 1000;
static constexpr uint32_t POOL_SIZE_STORAGE_IMAGE = 1000;
static constexpr uint32_t POOL_SIZE_COMBINED_IMAGE_SAMPLER = 1000;
static constexpr uint32_t POOL_SIZE_UNIFORM_BUFFER = 1000;
VkDevice vk_device_ = VK_NULL_HANDLE;
Vector<VkDescriptorPool> pools_;
int64_t active_pool_index_ = 0;
public:
VKDescriptorPools();
~VKDescriptorPools();
void init(const VkDevice vk_device);
VKDescriptorSet allocate(const VkDescriptorSetLayout &descriptor_set_layout);
void free(VKDescriptorSet &descriptor_set);
/**
* Reset the pools to start looking for free space from the first descriptor pool.
*/
void reset();
private:
VkDescriptorPool active_pool_get();
void activate_next_pool();
void activate_last_pool();
bool is_last_pool_active();
void add_new_pool();
};
} // namespace blender::gpu

View File

@ -0,0 +1,128 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_descriptor_set.hh"
#include "vk_index_buffer.hh"
#include "vk_storage_buffer.hh"
#include "vk_texture.hh"
#include "vk_vertex_buffer.hh"
#include "BLI_assert.h"
namespace blender::gpu {
VKDescriptorSet::~VKDescriptorSet()
{
if (vk_descriptor_set_ != VK_NULL_HANDLE) {
/* Handle should be given back to the pool.*/
VKContext &context = *VKContext::get();
context.descriptor_pools_get().free(*this);
BLI_assert(vk_descriptor_set_ == VK_NULL_HANDLE);
}
}
void VKDescriptorSet::mark_freed()
{
vk_descriptor_set_ = VK_NULL_HANDLE;
vk_descriptor_pool_ = VK_NULL_HANDLE;
}
void VKDescriptorSet::bind(VKStorageBuffer &buffer, const Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
binding.vk_buffer = buffer.vk_handle();
binding.buffer_size = buffer.size_in_bytes();
}
void VKDescriptorSet::bind_as_ssbo(VKVertexBuffer &buffer, const Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
binding.vk_buffer = buffer.vk_handle();
binding.buffer_size = buffer.size_used_get();
}
void VKDescriptorSet::bind_as_ssbo(VKIndexBuffer &buffer, const Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
binding.vk_buffer = buffer.vk_handle();
binding.buffer_size = buffer.size_get();
}
void VKDescriptorSet::image_bind(VKTexture &texture, const Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
binding.vk_image_view = texture.vk_image_view_handle();
}
VKDescriptorSet::Binding &VKDescriptorSet::ensure_location(const Location location)
{
for (Binding &binding : bindings_) {
if (binding.location == location) {
return binding;
}
}
Binding binding = {};
binding.location = location;
bindings_.append(binding);
return bindings_.last();
}
void VKDescriptorSet::update(VkDevice vk_device)
{
Vector<VkDescriptorBufferInfo> buffer_infos;
Vector<VkWriteDescriptorSet> descriptor_writes;
for (const Binding &binding : bindings_) {
if (!binding.is_buffer()) {
continue;
}
VkDescriptorBufferInfo buffer_info = {};
buffer_info.buffer = binding.vk_buffer;
buffer_info.range = binding.buffer_size;
buffer_infos.append(buffer_info);
VkWriteDescriptorSet write_descriptor = {};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = vk_descriptor_set_;
write_descriptor.dstBinding = binding.location;
write_descriptor.descriptorCount = 1;
write_descriptor.descriptorType = binding.type;
write_descriptor.pBufferInfo = &buffer_infos.last();
descriptor_writes.append(write_descriptor);
}
Vector<VkDescriptorImageInfo> image_infos;
for (const Binding &binding : bindings_) {
if (!binding.is_image()) {
continue;
}
VkDescriptorImageInfo image_info = {};
image_info.imageView = binding.vk_image_view;
image_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
image_infos.append(image_info);
VkWriteDescriptorSet write_descriptor = {};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = vk_descriptor_set_;
write_descriptor.dstBinding = binding.location;
write_descriptor.descriptorCount = 1;
write_descriptor.descriptorType = binding.type;
write_descriptor.pImageInfo = &image_infos.last();
descriptor_writes.append(write_descriptor);
}
vkUpdateDescriptorSets(
vk_device, descriptor_writes.size(), descriptor_writes.data(), 0, nullptr);
bindings_.clear();
}
} // namespace blender::gpu

View File

@ -0,0 +1,147 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "BLI_utility_mixins.hh"
#include "BLI_vector.hh"
#include "gpu_shader_private.hh"
#include "vk_common.hh"
namespace blender::gpu {
class VKStorageBuffer;
class VKVertexBuffer;
class VKIndexBuffer;
class VKTexture;
/**
* In vulkan shader resources (images and buffers) are grouped in descriptor sets.
*
* The resources inside a descriptor set can be updated and bound per set.
*
* Currently Blender only supports a single descriptor set per shader, but it is planned to be able
* to use 2 descriptor sets per shader. Only for each #blender::gpu::shader::Frequency.
*/
class VKDescriptorSet : NonCopyable {
struct Binding;
public:
/**
* Binding location of a resource in a descriptor set.
*
* Locations and bindings are used for different reasons. In the Vulkan backend we use
* ShaderInput.location to store the descriptor set + the resource binding inside the descriptor
* set. To ease the development the VKDescriptorSet::Location will be used to hide this
* confusion.
*
* NOTE: [future development] When supporting multiple descriptor sets the encoding/decoding can
* be centralized here. Location will then also contain the descriptor set index.
*/
struct Location {
private:
/**
* References to a binding in the descriptor set.
*/
uint32_t binding;
Location() = default;
public:
Location(const ShaderInput *shader_input) : binding(shader_input->location)
{
}
bool operator==(const Location &other) const
{
return binding == other.binding;
}
operator uint32_t() const
{
return binding;
}
friend struct Binding;
};
private:
struct Binding {
Location location;
VkDescriptorType type;
VkBuffer vk_buffer = VK_NULL_HANDLE;
VkDeviceSize buffer_size = 0;
VkImageView vk_image_view = VK_NULL_HANDLE;
Binding()
{
location.binding = 0;
}
bool is_buffer() const
{
return ELEM(type, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
}
bool is_image() const
{
return ELEM(type, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
}
};
VkDescriptorPool vk_descriptor_pool_ = VK_NULL_HANDLE;
VkDescriptorSet vk_descriptor_set_ = VK_NULL_HANDLE;
/** A list of bindings that needs to be updated.*/
Vector<Binding> bindings_;
public:
VKDescriptorSet() = default;
VKDescriptorSet(VkDescriptorPool vk_descriptor_pool, VkDescriptorSet vk_descriptor_set)
: vk_descriptor_pool_(vk_descriptor_pool), vk_descriptor_set_(vk_descriptor_set)
{
}
virtual ~VKDescriptorSet();
VKDescriptorSet &operator=(VKDescriptorSet &&other)
{
vk_descriptor_set_ = other.vk_descriptor_set_;
vk_descriptor_pool_ = other.vk_descriptor_pool_;
other.mark_freed();
return *this;
}
VkDescriptorSet vk_handle() const
{
return vk_descriptor_set_;
}
VkDescriptorPool vk_pool_handle() const
{
return vk_descriptor_pool_;
}
void bind_as_ssbo(VKVertexBuffer &buffer, Location location);
void bind_as_ssbo(VKIndexBuffer &buffer, Location location);
void bind(VKStorageBuffer &buffer, Location location);
void image_bind(VKTexture &texture, Location location);
/**
* Update the descriptor set on the device.
*/
void update(VkDevice vk_device);
void mark_freed();
private:
Binding &ensure_location(Location location);
};
} // namespace blender::gpu

View File

@ -6,6 +6,8 @@
*/
#include "vk_index_buffer.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
namespace blender::gpu {
@ -13,12 +15,31 @@ void VKIndexBuffer::upload_data()
{
}
void VKIndexBuffer::bind_as_ssbo(uint /*binding*/)
void VKIndexBuffer::bind_as_ssbo(uint binding)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
const ShaderInput *shader_input = shader_interface.shader_input_get(
shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER, binding);
shader->pipeline_get().descriptor_set_get().bind_as_ssbo(*this, shader_input);
}
void VKIndexBuffer::read(uint32_t * /*data*/) const
void VKIndexBuffer::read(uint32_t *data) const
{
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
void *mapped_memory;
if (buffer_.map(context, &mapped_memory)) {
memcpy(data, mapped_memory, size_get());
buffer_.unmap(context);
}
}
void VKIndexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data*/)
@ -29,4 +50,14 @@ void VKIndexBuffer::strip_restart_indices()
{
}
void VKIndexBuffer::allocate(VKContext &context)
{
GPUUsageType usage = data_ == nullptr ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_STATIC;
buffer_.create(context,
size_get(),
usage,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT));
}
} // namespace blender::gpu

View File

@ -9,9 +9,13 @@
#include "gpu_index_buffer_private.hh"
#include "vk_buffer.hh"
namespace blender::gpu {
class VKIndexBuffer : public IndexBuf {
VKBuffer buffer_;
public:
void upload_data() override;
@ -21,8 +25,14 @@ class VKIndexBuffer : public IndexBuf {
void update_sub(uint start, uint len, const void *data) override;
VkBuffer vk_handle()
{
return buffer_.vk_handle();
}
private:
void strip_restart_indices() override;
void allocate(VKContext &context);
};
} // namespace blender::gpu

View File

@ -37,7 +37,9 @@ void *vk_memory_reallocation(void *user_data,
void vk_memory_free(void * /*user_data*/, void *memory)
{
MEM_freeN(memory);
if (memory != nullptr) {
MEM_freeN(memory);
}
}
#endif

View File

@ -7,11 +7,7 @@
#pragma once
#ifdef __APPLE__
# include <MoltenVK/vk_mvk_moltenvk.h>
#else
# include <vulkan/vulkan.h>
#endif
#include "vk_common.hh"
namespace blender::gpu {

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_pipeline.hh"
#include "vk_context.hh"
#include "vk_memory.hh"
namespace blender::gpu {
VKPipeline::VKPipeline(VkPipeline vk_pipeline, VKDescriptorSet &&vk_descriptor_set)
: vk_pipeline_(vk_pipeline)
{
descriptor_set_ = std::move(vk_descriptor_set);
}
VKPipeline::~VKPipeline()
{
VK_ALLOCATION_CALLBACKS
VkDevice vk_device = VKContext::get()->device_get();
if (vk_pipeline_ != VK_NULL_HANDLE) {
vkDestroyPipeline(vk_device, vk_pipeline_, vk_allocation_callbacks);
}
}
VKPipeline VKPipeline::create_compute_pipeline(VKContext &context,
VkShaderModule compute_module,
VkDescriptorSetLayout &descriptor_set_layout,
VkPipelineLayout &pipeline_layout)
{
VK_ALLOCATION_CALLBACKS
VkDevice vk_device = context.device_get();
VkComputePipelineCreateInfo pipeline_info = {};
pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
pipeline_info.flags = 0;
pipeline_info.stage = {};
pipeline_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
pipeline_info.stage.flags = 0;
pipeline_info.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
pipeline_info.stage.module = compute_module;
pipeline_info.layout = pipeline_layout;
pipeline_info.stage.pName = "main";
VkPipeline pipeline;
if (vkCreateComputePipelines(
vk_device, nullptr, 1, &pipeline_info, vk_allocation_callbacks, &pipeline) !=
VK_SUCCESS) {
return VKPipeline();
}
VKDescriptorSet descriptor_set = context.descriptor_pools_get().allocate(descriptor_set_layout);
return VKPipeline(pipeline, std::move(descriptor_set));
}
VkPipeline VKPipeline::vk_handle() const
{
return vk_pipeline_;
}
bool VKPipeline::is_valid() const
{
return vk_pipeline_ != VK_NULL_HANDLE;
}
} // namespace blender::gpu

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "BLI_utility_mixins.hh"
#include "BLI_vector.hh"
#include "vk_common.hh"
#include "vk_descriptor_set.hh"
namespace blender::gpu {
class VKContext;
class VKPipeline : NonCopyable {
VKDescriptorSet descriptor_set_;
VkPipeline vk_pipeline_ = VK_NULL_HANDLE;
public:
VKPipeline() = default;
virtual ~VKPipeline();
VKPipeline(VkPipeline vk_pipeline, VKDescriptorSet &&vk_descriptor_set);
VKPipeline &operator=(VKPipeline &&other)
{
vk_pipeline_ = other.vk_pipeline_;
other.vk_pipeline_ = VK_NULL_HANDLE;
descriptor_set_ = std::move(other.descriptor_set_);
return *this;
}
static VKPipeline create_compute_pipeline(VKContext &context,
VkShaderModule compute_module,
VkDescriptorSetLayout &descriptor_set_layout,
VkPipelineLayout &pipeline_layouts);
VKDescriptorSet &descriptor_set_get()
{
return descriptor_set_;
}
VkPipeline vk_handle() const;
bool is_valid() const;
};
} // namespace blender::gpu

View File

@ -9,6 +9,7 @@
#include "vk_backend.hh"
#include "vk_memory.hh"
#include "vk_shader_interface.hh"
#include "vk_shader_log.hh"
#include "BLI_string_utils.h"
@ -324,9 +325,11 @@ static std::ostream &print_qualifier(std::ostream &os, const Qualifier &qualifie
return os;
}
static void print_resource(std::ostream &os, const ShaderCreateInfo::Resource &res)
static void print_resource(std::ostream &os,
const ShaderInput &shader_input,
const ShaderCreateInfo::Resource &res)
{
os << "layout(binding = " << res.slot;
os << "layout(binding = " << shader_input.location;
if (res.bind_type == ShaderCreateInfo::Resource::BindType::IMAGE) {
os << ", " << to_string(res.image.format);
}
@ -372,6 +375,18 @@ static void print_resource(std::ostream &os, const ShaderCreateInfo::Resource &r
}
}
static void print_resource(std::ostream &os,
const VKShaderInterface &shader_interface,
const ShaderCreateInfo::Resource &res)
{
const ShaderInput *shader_input = shader_interface.shader_input_get(res);
if (shader_input == nullptr) {
BLI_assert_msg(shader_input, "Cannot find shader input for resource");
return;
}
print_resource(os, *shader_input, res);
}
static void print_resource_alias(std::ostream &os, const ShaderCreateInfo::Resource &res)
{
int64_t array_offset;
@ -603,6 +618,14 @@ VKShader::~VKShader()
vkDestroyShaderModule(device, compute_module_, vk_allocation_callbacks);
compute_module_ = VK_NULL_HANDLE;
}
if (pipeline_layout_ != VK_NULL_HANDLE) {
vkDestroyPipelineLayout(device, pipeline_layout_, vk_allocation_callbacks);
pipeline_layout_ = VK_NULL_HANDLE;
}
if (layout_ != VK_NULL_HANDLE) {
vkDestroyDescriptorSetLayout(device, layout_, vk_allocation_callbacks);
layout_ = VK_NULL_HANDLE;
}
}
void VKShader::build_shader_module(MutableSpan<const char *> sources,
@ -650,52 +673,181 @@ bool VKShader::finalize(const shader::ShaderCreateInfo *info)
return false;
}
if (vertex_module_ != VK_NULL_HANDLE) {
VKShaderInterface *vk_interface = new VKShaderInterface();
vk_interface->init(*info);
VkDevice vk_device = context_->device_get();
if (!finalize_descriptor_set_layouts(vk_device, *vk_interface, *info)) {
return false;
}
if (!finalize_pipeline_layout(vk_device, *info)) {
return false;
}
/* TODO we might need to move the actual pipeline construction to a later stage as the graphics
* pipeline requires more data before it can be constructed.*/
bool result;
if (is_graphics_shader()) {
BLI_assert((fragment_module_ != VK_NULL_HANDLE && info->tf_type_ == GPU_SHADER_TFB_NONE) ||
(fragment_module_ == VK_NULL_HANDLE && info->tf_type_ != GPU_SHADER_TFB_NONE));
BLI_assert(compute_module_ == VK_NULL_HANDLE);
VkPipelineShaderStageCreateInfo vertex_stage_info = {};
vertex_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertex_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertex_stage_info.module = vertex_module_;
vertex_stage_info.pName = "main";
pipeline_infos_.append(vertex_stage_info);
if (geometry_module_ != VK_NULL_HANDLE) {
VkPipelineShaderStageCreateInfo geo_stage_info = {};
geo_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
geo_stage_info.stage = VK_SHADER_STAGE_GEOMETRY_BIT;
geo_stage_info.module = geometry_module_;
geo_stage_info.pName = "main";
pipeline_infos_.append(geo_stage_info);
}
if (fragment_module_ != VK_NULL_HANDLE) {
VkPipelineShaderStageCreateInfo fragment_stage_info = {};
fragment_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragment_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragment_stage_info.module = fragment_module_;
fragment_stage_info.pName = "main";
pipeline_infos_.append(fragment_stage_info);
}
result = finalize_graphics_pipeline(vk_device);
}
else {
BLI_assert(vertex_module_ == VK_NULL_HANDLE);
BLI_assert(geometry_module_ == VK_NULL_HANDLE);
BLI_assert(fragment_module_ == VK_NULL_HANDLE);
BLI_assert(compute_module_ != VK_NULL_HANDLE);
VkPipelineShaderStageCreateInfo compute_stage_info = {};
compute_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
compute_stage_info.stage = VK_SHADER_STAGE_GEOMETRY_BIT;
compute_stage_info.module = compute_module_;
compute_stage_info.pName = "main";
pipeline_infos_.append(compute_stage_info);
compute_pipeline_ = VKPipeline::create_compute_pipeline(
*context_, compute_module_, layout_, pipeline_layout_);
result = compute_pipeline_.is_valid();
}
#ifdef NDEBUG
UNUSED_VARS(info);
#endif
if (result) {
interface = vk_interface;
}
else {
delete vk_interface;
}
return result;
}
bool VKShader::finalize_graphics_pipeline(VkDevice /*vk_device */)
{
Vector<VkPipelineShaderStageCreateInfo> pipeline_stages;
VkPipelineShaderStageCreateInfo vertex_stage_info = {};
vertex_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertex_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertex_stage_info.module = vertex_module_;
vertex_stage_info.pName = "main";
pipeline_stages.append(vertex_stage_info);
if (geometry_module_ != VK_NULL_HANDLE) {
VkPipelineShaderStageCreateInfo geo_stage_info = {};
geo_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
geo_stage_info.stage = VK_SHADER_STAGE_GEOMETRY_BIT;
geo_stage_info.module = geometry_module_;
geo_stage_info.pName = "main";
pipeline_stages.append(geo_stage_info);
}
if (fragment_module_ != VK_NULL_HANDLE) {
VkPipelineShaderStageCreateInfo fragment_stage_info = {};
fragment_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragment_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragment_stage_info.module = fragment_module_;
fragment_stage_info.pName = "main";
pipeline_stages.append(fragment_stage_info);
}
return true;
}
bool VKShader::finalize_pipeline_layout(VkDevice vk_device,
const shader::ShaderCreateInfo & /*info*/)
{
VK_ALLOCATION_CALLBACKS
const uint32_t layout_count = layout_ == VK_NULL_HANDLE ? 0 : 1;
VkPipelineLayoutCreateInfo pipeline_info = {};
pipeline_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_info.flags = 0;
pipeline_info.setLayoutCount = layout_count;
pipeline_info.pSetLayouts = &layout_;
if (vkCreatePipelineLayout(
vk_device, &pipeline_info, vk_allocation_callbacks, &pipeline_layout_) != VK_SUCCESS) {
return false;
};
return true;
}
static VkDescriptorType descriptor_type(
const shader::ShaderCreateInfo::Resource::BindType bind_type)
{
switch (bind_type) {
case shader::ShaderCreateInfo::Resource::BindType::IMAGE:
return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
case shader::ShaderCreateInfo::Resource::BindType::SAMPLER:
return VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
case shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER:
return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
case shader::ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER:
return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
}
BLI_assert_unreachable();
return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
}
static VkDescriptorSetLayoutBinding create_descriptor_set_layout_binding(
const ShaderInput &shader_input, const shader::ShaderCreateInfo::Resource &resource)
{
VkDescriptorSetLayoutBinding binding = {};
binding.binding = shader_input.location;
binding.descriptorType = descriptor_type(resource.bind_type);
binding.descriptorCount = 1;
binding.stageFlags = VK_SHADER_STAGE_ALL;
binding.pImmutableSamplers = nullptr;
return binding;
}
static void add_descriptor_set_layout_bindings(
const VKShaderInterface &interface,
const Vector<shader::ShaderCreateInfo::Resource> &resources,
Vector<VkDescriptorSetLayoutBinding> &r_bindings)
{
for (const shader::ShaderCreateInfo::Resource &resource : resources) {
const ShaderInput *shader_input = interface.shader_input_get(resource);
if (shader_input == nullptr) {
BLI_assert_msg(shader_input, "Cannot find shader input for resource.");
continue;
}
r_bindings.append(create_descriptor_set_layout_binding(*shader_input, resource));
}
}
static VkDescriptorSetLayoutCreateInfo create_descriptor_set_layout(
const VKShaderInterface &interface,
const Vector<shader::ShaderCreateInfo::Resource> &resources,
Vector<VkDescriptorSetLayoutBinding> &r_bindings)
{
add_descriptor_set_layout_bindings(interface, resources, r_bindings);
VkDescriptorSetLayoutCreateInfo set_info = {};
set_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
set_info.flags = 0;
set_info.pNext = nullptr;
set_info.bindingCount = r_bindings.size();
set_info.pBindings = r_bindings.data();
return set_info;
}
bool VKShader::finalize_descriptor_set_layouts(VkDevice vk_device,
const VKShaderInterface &shader_interface,
const shader::ShaderCreateInfo &info)
{
if (info.pass_resources_.is_empty() && info.batch_resources_.is_empty()) {
return true;
}
VK_ALLOCATION_CALLBACKS
/* Currently we create a single descriptor set. The goal would be to create one descriptor set
* for Frequency::PASS/BATCH. This isn't possible as areas expect that the binding location is
* static and predictable (eevee-next) or the binding location can be mapped to a single number
* (python). */
Vector<ShaderCreateInfo::Resource> all_resources;
all_resources.extend(info.pass_resources_);
all_resources.extend(info.batch_resources_);
Vector<VkDescriptorSetLayoutBinding> bindings;
VkDescriptorSetLayoutCreateInfo layout_info = create_descriptor_set_layout(
shader_interface, all_resources, bindings);
if (vkCreateDescriptorSetLayout(vk_device, &layout_info, vk_allocation_callbacks, &layout_) !=
VK_SUCCESS) {
return false;
};
return true;
}
@ -716,10 +868,23 @@ void VKShader::transform_feedback_disable()
void VKShader::bind()
{
VKContext *context = VKContext::get();
if (is_compute_shader()) {
context->command_buffer_get().bind(compute_pipeline_, VK_PIPELINE_BIND_POINT_COMPUTE);
}
else {
BLI_assert_unreachable();
}
}
void VKShader::unbind()
{
if (is_compute_shader()) {
}
else {
BLI_assert_unreachable();
}
}
void VKShader::uniform_float(int /*location*/,
@ -737,11 +902,13 @@ void VKShader::uniform_int(int /*location*/,
std::string VKShader::resources_declare(const shader::ShaderCreateInfo &info) const
{
VKShaderInterface interface;
interface.init(info);
std::stringstream ss;
ss << "\n/* Pass Resources. */\n";
for (const ShaderCreateInfo::Resource &res : info.pass_resources_) {
print_resource(ss, res);
print_resource(ss, interface, res);
}
for (const ShaderCreateInfo::Resource &res : info.pass_resources_) {
print_resource_alias(ss, res);
@ -749,7 +916,7 @@ std::string VKShader::resources_declare(const shader::ShaderCreateInfo &info) co
ss << "\n/* Batch Resources. */\n";
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
print_resource(ss, res);
print_resource(ss, interface, res);
}
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
print_resource_alias(ss, res);
@ -958,4 +1125,14 @@ int VKShader::program_handle_get() const
return -1;
}
VKPipeline &VKShader::pipeline_get()
{
return compute_pipeline_;
}
const VKShaderInterface &VKShader::interface_get() const
{
return *static_cast<const VKShaderInterface *>(interface);
}
} // namespace blender::gpu

View File

@ -15,6 +15,7 @@
#include "BLI_string_ref.hh"
namespace blender::gpu {
class VKShaderInterface;
class VKShader : public Shader {
private:
@ -24,7 +25,9 @@ class VKShader : public Shader {
VkShaderModule fragment_module_ = VK_NULL_HANDLE;
VkShaderModule compute_module_ = VK_NULL_HANDLE;
bool compilation_failed_ = false;
Vector<VkPipelineShaderStageCreateInfo> pipeline_infos_;
VkDescriptorSetLayout layout_ = VK_NULL_HANDLE;
VkPipelineLayout pipeline_layout_ = VK_NULL_HANDLE;
VKPipeline compute_pipeline_;
public:
VKShader(const char *name);
@ -58,12 +61,35 @@ class VKShader : public Shader {
/* DEPRECATED: Kept only because of BGL API. */
int program_handle_get() const override;
VKPipeline &pipeline_get();
VkPipelineLayout vk_pipeline_layout_get() const
{
return pipeline_layout_;
}
const VKShaderInterface &interface_get() const;
private:
Vector<uint32_t> compile_glsl_to_spirv(Span<const char *> sources, shaderc_shader_kind kind);
void build_shader_module(Span<uint32_t> spirv_module, VkShaderModule *r_shader_module);
void build_shader_module(MutableSpan<const char *> sources,
shaderc_shader_kind stage,
VkShaderModule *r_shader_module);
bool finalize_descriptor_set_layouts(VkDevice vk_device,
const VKShaderInterface &shader_interface,
const shader::ShaderCreateInfo &info);
bool finalize_pipeline_layout(VkDevice vk_device, const shader::ShaderCreateInfo &info);
bool finalize_graphics_pipeline(VkDevice vk_device);
bool is_graphics_shader() const
{
return !is_compute_shader();
}
bool is_compute_shader() const
{
return compute_module_ != VK_NULL_HANDLE;
}
};
} // namespace blender::gpu

View File

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_shader_interface.hh"
namespace blender::gpu {
void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
{
using namespace blender::gpu::shader;
attr_len_ = 0;
uniform_len_ = 0;
ssbo_len_ = 0;
ubo_len_ = 0;
image_offset_ = -1;
Vector<ShaderCreateInfo::Resource> all_resources;
all_resources.extend(info.pass_resources_);
all_resources.extend(info.batch_resources_);
for (ShaderCreateInfo::Resource &res : all_resources) {
switch (res.bind_type) {
case ShaderCreateInfo::Resource::BindType::IMAGE:
uniform_len_++;
break;
case ShaderCreateInfo::Resource::BindType::SAMPLER:
image_offset_ = max_ii(image_offset_, res.slot);
uniform_len_++;
break;
case ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER:
ubo_len_++;
break;
case ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER:
ssbo_len_++;
break;
}
}
/* Make sure that the image slots don't overlap with the sampler slots.*/
image_offset_ += 1;
int32_t input_tot_len = ubo_len_ + uniform_len_ + ssbo_len_;
inputs_ = static_cast<ShaderInput *>(
MEM_calloc_arrayN(input_tot_len, sizeof(ShaderInput), __func__));
ShaderInput *input = inputs_;
name_buffer_ = (char *)MEM_mallocN(info.interface_names_size_, "name_buffer");
uint32_t name_buffer_offset = 0;
int location = 0;
/* Uniform blocks */
for (const ShaderCreateInfo::Resource &res : all_resources) {
if (res.bind_type == ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER) {
copy_input_name(input, res.image.name, name_buffer_, name_buffer_offset);
input->location = location++;
input->binding = res.slot;
input++;
}
}
/* Images, Samplers and buffers. */
for (const ShaderCreateInfo::Resource &res : all_resources) {
if (res.bind_type == ShaderCreateInfo::Resource::BindType::SAMPLER) {
copy_input_name(input, res.sampler.name, name_buffer_, name_buffer_offset);
input->location = location++;
input->binding = res.slot;
input++;
}
else if (res.bind_type == ShaderCreateInfo::Resource::BindType::IMAGE) {
copy_input_name(input, res.image.name, name_buffer_, name_buffer_offset);
input->location = location++;
input->binding = res.slot + image_offset_;
input++;
}
}
/* Storage buffers */
for (const ShaderCreateInfo::Resource &res : all_resources) {
if (res.bind_type == ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER) {
copy_input_name(input, res.storagebuf.name, name_buffer_, name_buffer_offset);
input->location = location++;
input->binding = res.slot;
input++;
}
}
sort_inputs();
}
const ShaderInput *VKShaderInterface::shader_input_get(
const shader::ShaderCreateInfo::Resource &resource) const
{
return shader_input_get(resource.bind_type, resource.slot);
}
const ShaderInput *VKShaderInterface::shader_input_get(
const shader::ShaderCreateInfo::Resource::BindType &bind_type, int binding) const
{
switch (bind_type) {
case shader::ShaderCreateInfo::Resource::BindType::IMAGE:
return texture_get(binding + image_offset_);
case shader::ShaderCreateInfo::Resource::BindType::SAMPLER:
return texture_get(binding);
case shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER:
return ssbo_get(binding);
case shader::ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER:
return ubo_get(binding);
}
return nullptr;
}
} // namespace blender::gpu

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "gpu_shader_create_info.hh"
#include "gpu_shader_interface.hh"
namespace blender::gpu {
class VKShaderInterface : public ShaderInterface {
private:
/**
* Offset when searching for a shader input based on a binding number.
*
* When shaders combine images and samplers, the images have to be offset to find the correct
* shader input. Both textures and images are stored in the uniform list and their ID can be
* overlapping.
*/
uint32_t image_offset_ = 0;
public:
VKShaderInterface() = default;
void init(const shader::ShaderCreateInfo &info);
/**
* Retrieve the shader input for the given resource.
*
* nullptr is returned when resource could not be found.
* Should only happen when still developing the Vulkan shader.
*/
const ShaderInput *shader_input_get(const shader::ShaderCreateInfo::Resource &resource) const;
const ShaderInput *shader_input_get(
const shader::ShaderCreateInfo::Resource::BindType &bind_type, int binding) const;
};
} // namespace blender::gpu

View File

@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_state_manager.hh"
#include "vk_texture.hh"
namespace blender::gpu {
void VKStateManager::apply_state()
{
}
void VKStateManager::force_state()
{
}
void VKStateManager::issue_barrier(eGPUBarrier /*barrier_bits*/)
{
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
/* TODO: Pipeline barriers should be added. We might be able to extract it from
* the actual pipeline, later on, but for now we submit the work as barrier. */
command_buffer.submit();
}
void VKStateManager::texture_bind(Texture * /*tex*/, eGPUSamplerState /*sampler*/, int /*unit*/)
{
}
void VKStateManager::texture_unbind(Texture * /*tex*/)
{
}
void VKStateManager::texture_unbind_all()
{
}
void VKStateManager::image_bind(Texture *tex, int binding)
{
VKTexture *texture = unwrap(tex);
texture->image_bind(binding);
}
void VKStateManager::image_unbind(Texture * /*tex*/)
{
}
void VKStateManager::image_unbind_all()
{
}
void VKStateManager::texture_unpack_row_length_set(uint /*len*/)
{
}
} // namespace blender::gpu

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "gpu_state_private.hh"
namespace blender::gpu {
class VKStateManager : public StateManager {
public:
void apply_state() override;
void force_state() override;
void issue_barrier(eGPUBarrier barrier_bits) override;
void texture_bind(Texture *tex, eGPUSamplerState sampler, int unit) override;
void texture_unbind(Texture *tex) override;
void texture_unbind_all() override;
void image_bind(Texture *tex, int unit) override;
void image_unbind(Texture *tex) override;
void image_unbind_all() override;
void texture_unpack_row_length_set(uint len) override;
};
} // namespace blender::gpu

View File

@ -4,19 +4,39 @@
/** \file
* \ingroup gpu
*/
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_vertex_buffer.hh"
#include "vk_storage_buffer.hh"
namespace blender::gpu {
void VKStorageBuffer::update(const void * /*data*/)
void VKStorageBuffer::update(const void *data)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
buffer_.update(context, data);
}
void VKStorageBuffer::bind(int /*slot*/)
void VKStorageBuffer::allocate(VKContext &context)
{
buffer_.create(context, size_in_bytes_, usage_, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
}
void VKStorageBuffer::bind(int slot)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
const ShaderInput *shader_input = shader_interface.shader_input_get(
shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER, slot);
shader->pipeline_get().descriptor_set_get().bind(*this, shader_input);
}
void VKStorageBuffer::unbind()
@ -35,8 +55,21 @@ void VKStorageBuffer::copy_sub(VertBuf * /*src*/,
{
}
void VKStorageBuffer::read(void * /*data*/)
void VKStorageBuffer::read(void *data)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
void *mapped_memory;
if (buffer_.map(context, &mapped_memory)) {
memcpy(data, mapped_memory, size_in_bytes_);
buffer_.unmap(context);
}
}
} // namespace blender::gpu

View File

@ -10,12 +10,20 @@
#include "GPU_texture.h"
#include "gpu_storage_buffer_private.hh"
#include "gpu_vertex_buffer_private.hh"
#include "vk_buffer.hh"
namespace blender::gpu {
class VertBuf;
class VKStorageBuffer : public StorageBuf {
GPUUsageType usage_;
VKBuffer buffer_;
public:
VKStorageBuffer(int size, const char *name) : StorageBuf(size, name)
VKStorageBuffer(int size, GPUUsageType usage, const char *name)
: StorageBuf(size, name), usage_(usage)
{
}
@ -25,6 +33,19 @@ class VKStorageBuffer : public StorageBuf {
void clear(eGPUTextureFormat internal_format, eGPUDataFormat data_format, void *data) override;
void copy_sub(VertBuf *src, uint dst_offset, uint src_offset, uint copy_size) override;
void read(void *data) override;
VkBuffer vk_handle() const
{
return buffer_.vk_handle();
}
int64_t size_in_bytes() const
{
return buffer_.size_in_bytes();
}
private:
void allocate(VKContext &context);
};
} // namespace blender::gpu

View File

@ -7,8 +7,25 @@
#include "vk_texture.hh"
#include "vk_buffer.hh"
#include "vk_context.hh"
#include "vk_memory.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "BKE_global.h"
namespace blender::gpu {
VKTexture::~VKTexture()
{
VK_ALLOCATION_CALLBACKS
VKContext &context = *VKContext::get();
vmaDestroyImage(context.mem_allocator_get(), vk_image_, allocation_);
vkDestroyImageView(context.device_get(), vk_image_view_, vk_allocation_callbacks);
}
void VKTexture::generate_mipmap()
{
}
@ -33,9 +50,49 @@ void VKTexture::mip_range_set(int /*min*/, int /*max*/)
{
}
void *VKTexture::read(int /*mip*/, eGPUDataFormat /*format*/)
void *VKTexture::read(int mip, eGPUDataFormat format)
{
return nullptr;
/* Vulkan images cannot be directly mapped to host memory and requires a staging buffer.*/
VKContext &context = *VKContext::get();
VKBuffer staging_buffer;
/* NOTE: mip_size_get() won't override any dimension that is equal to 0. */
int extent[3] = {1, 1, 1};
mip_size_get(mip, extent);
size_t sample_len = extent[0] * extent[1] * extent[2];
/* NOTE: to_bytesize returns number of bits. */
size_t device_memory_size = sample_len * to_component_len(format_) * to_bytesize(format_) / 8;
/* NOTE: to_bytesize returns number of bytes here. */
size_t host_memory_size = sample_len * to_bytesize(format_, format);
staging_buffer.create(
context, device_memory_size, GPU_USAGE_DEVICE_ONLY, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
VkBufferImageCopy region = {};
region.imageExtent.width = extent[0];
region.imageExtent.height = extent[1];
region.imageExtent.depth = extent[2];
region.imageSubresource.aspectMask = to_vk_image_aspect_flag_bits(format_);
region.imageSubresource.mipLevel = mip;
region.imageSubresource.layerCount = 1;
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(staging_buffer, *this, Span<VkBufferImageCopy>(&region, 1));
command_buffer.submit();
void *mapped_data;
staging_buffer.map(context, &mapped_data);
void *data = MEM_mallocN(host_memory_size, __func__);
/* TODO: add conversion when data format is different.*/
BLI_assert_msg(device_memory_size == host_memory_size,
"Memory data conversions not implemented yet");
memcpy(data, mapped_data, host_memory_size);
staging_buffer.unmap(context);
return data;
}
void VKTexture::update_sub(int /*mip*/,
@ -61,7 +118,10 @@ uint VKTexture::gl_bindcode_get() const
bool VKTexture::init_internal()
{
return false;
/* Initialization can only happen after the usage is known. By the current API this isn't set
* at this moment, so we cannot initialize here. The initialization is postponed until the
* allocation of the texture on the device.*/
return true;
}
bool VKTexture::init_internal(GPUVertBuf * /*vbo*/)
@ -74,4 +134,102 @@ bool VKTexture::init_internal(const GPUTexture * /*src*/, int /*mip_offset*/, in
return false;
}
bool VKTexture::is_allocated()
{
return vk_image_ != VK_NULL_HANDLE && allocation_ != VK_NULL_HANDLE;
}
bool VKTexture::allocate()
{
BLI_assert(!is_allocated());
int extent[3] = {1, 1, 1};
mip_size_get(0, extent);
VKContext &context = *VKContext::get();
VkImageCreateInfo image_info = {};
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_info.imageType = to_vk_image_type(type_);
image_info.extent.width = extent[0];
image_info.extent.height = extent[1];
image_info.extent.depth = extent[2];
image_info.mipLevels = 1;
image_info.arrayLayers = 1;
image_info.format = to_vk_format(format_);
image_info.tiling = VK_IMAGE_TILING_LINEAR;
image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_STORAGE_BIT;
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
VkResult result;
if (G.debug &= G_DEBUG_GPU) {
VkImageFormatProperties image_format = {};
result = vkGetPhysicalDeviceImageFormatProperties(context.physical_device_get(),
image_info.format,
image_info.imageType,
image_info.tiling,
image_info.usage,
image_info.flags,
&image_format);
if (result != VK_SUCCESS) {
printf("Image type not supported on device.\n");
return false;
}
}
VmaAllocationCreateInfo allocCreateInfo = {};
allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
allocCreateInfo.flags = static_cast<VmaAllocationCreateFlagBits>(
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT);
allocCreateInfo.priority = 1.0f;
result = vmaCreateImage(context.mem_allocator_get(),
&image_info,
&allocCreateInfo,
&vk_image_,
&allocation_,
nullptr);
if (result != VK_SUCCESS) {
return false;
}
/* Promote image to the correct layout.*/
VkImageMemoryBarrier barrier{};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
barrier.image = vk_image_;
barrier.subresourceRange.aspectMask = to_vk_image_aspect_flag_bits(format_);
barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
context.command_buffer_get().pipeline_barrier(Span<VkImageMemoryBarrier>(&barrier, 1));
VK_ALLOCATION_CALLBACKS
VkImageViewCreateInfo image_view_info = {};
image_view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_info.image = vk_image_;
image_view_info.viewType = to_vk_image_view_type(type_);
image_view_info.format = to_vk_format(format_);
image_view_info.components = to_vk_component_mapping(format_);
image_view_info.subresourceRange.aspectMask = to_vk_image_aspect_flag_bits(format_);
image_view_info.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
image_view_info.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
result = vkCreateImageView(
context.device_get(), &image_view_info, vk_allocation_callbacks, &vk_image_view_);
return result == VK_SUCCESS;
}
void VKTexture::image_bind(int binding)
{
if (!is_allocated()) {
allocate();
}
VKContext &context = *VKContext::get();
VKShader *shader = static_cast<VKShader *>(context.shader);
VKDescriptorSet::Location location(shader->interface_get().shader_input_get(
shader::ShaderCreateInfo::Resource::BindType::IMAGE, binding));
shader->pipeline_get().descriptor_set_get().image_bind(*this, location);
}
} // namespace blender::gpu

View File

@ -8,14 +8,22 @@
#pragma once
#include "gpu_texture_private.hh"
#include "vk_context.hh"
#include "vk_mem_alloc.h"
namespace blender::gpu {
class VKTexture : public Texture {
VkImage vk_image_ = VK_NULL_HANDLE;
VkImageView vk_image_view_ = VK_NULL_HANDLE;
VmaAllocation allocation_ = VK_NULL_HANDLE;
public:
VKTexture(const char *name) : Texture(name)
{
}
virtual ~VKTexture() override;
void generate_mipmap() override;
void copy_to(Texture *tex) override;
@ -34,10 +42,36 @@ class VKTexture : public Texture {
/* TODO(fclem): Legacy. Should be removed at some point. */
uint gl_bindcode_get() const override;
void image_bind(int location);
VkImage vk_image_handle() const
{
return vk_image_;
}
VkImageView vk_image_view_handle() const
{
return vk_image_view_;
}
protected:
bool init_internal() override;
bool init_internal(GPUVertBuf *vbo) override;
bool init_internal(const GPUTexture *src, int mip_offset, int layer_offset) override;
private:
/** Is this texture already allocated on device.*/
bool is_allocated();
/**
* Allocate the texture of the device. Result is `true` when texture is successfully allocated
* on the device.
*/
bool allocate();
VkImageViewType vk_image_view_type() const;
};
static inline VKTexture *unwrap(Texture *tex)
{
return static_cast<VKTexture *>(tex);
}
} // namespace blender::gpu

View File

@ -7,6 +7,8 @@
#include "MEM_guardedalloc.h"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_vertex_buffer.hh"
namespace blender::gpu {
@ -16,8 +18,18 @@ VKVertexBuffer::~VKVertexBuffer()
release_data();
}
void VKVertexBuffer::bind_as_ssbo(uint /*binding*/)
void VKVertexBuffer::bind_as_ssbo(uint binding)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
const ShaderInput *shader_input = shader_interface.shader_input_get(
shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER, binding);
shader->pipeline_get().descriptor_set_get().bind_as_ssbo(*this, shader_input);
}
void VKVertexBuffer::bind_as_texture(uint /*binding*/)
@ -32,8 +44,17 @@ void VKVertexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*dat
{
}
void VKVertexBuffer::read(void * /*data*/) const
void VKVertexBuffer::read(void *data) const
{
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
void *mapped_memory;
if (buffer_.map(context, &mapped_memory)) {
memcpy(data, mapped_memory, size_used_get());
buffer_.unmap(context);
}
}
void VKVertexBuffer::acquire_data()
@ -64,4 +85,13 @@ void VKVertexBuffer::duplicate_data(VertBuf * /*dst*/)
{
}
void VKVertexBuffer::allocate(VKContext &context)
{
buffer_.create(context,
size_used_get(),
usage_,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
}
} // namespace blender::gpu

View File

@ -9,9 +9,13 @@
#include "gpu_vertex_buffer_private.hh"
#include "vk_buffer.hh"
namespace blender::gpu {
class VKVertexBuffer : public VertBuf {
VKBuffer buffer_;
public:
~VKVertexBuffer();
@ -22,12 +26,20 @@ class VKVertexBuffer : public VertBuf {
void update_sub(uint start, uint len, const void *data) override;
void read(void *data) const override;
VkBuffer vk_handle() const
{
return buffer_.vk_handle();
}
protected:
void acquire_data() override;
void resize_data() override;
void release_data() override;
void upload_data() override;
void duplicate_data(VertBuf *dst) override;
private:
void allocate(VKContext &context);
};
} // namespace blender::gpu

View File

@ -53,7 +53,8 @@ class USDShapeReader : public USDGeomReader {
const char ** /*err_str*/) override;
bool is_time_varying();
virtual bool topology_changed(const Mesh * /*existing_mesh*/, double /*motionSampleTime*/)
virtual bool topology_changed(const Mesh * /*existing_mesh*/,
double /*motionSampleTime*/) override
{
return false;
};

View File

@ -39,6 +39,7 @@ set(INC
set(SRC
intern/add_node_search.cc
intern/derived_node_tree.cc
intern/geometry_nodes_lazy_function.cc
intern/geometry_nodes_log.cc
@ -54,6 +55,7 @@ set(SRC
intern/node_util.cc
intern/socket_search_link.cc
NOD_add_node_search.hh
NOD_common.h
NOD_composite.h
NOD_derived_node_tree.hh

View File

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include <functional>
#include "BLI_function_ref.hh"
#include "BLI_string_ref.hh"
#include "BLI_vector.hh"
#include "DNA_node_types.h" /* Necessary for eNodeSocketInOut. */
#include "NOD_node_declaration.hh"
struct bContext;
namespace blender::nodes {
struct AddNodeInfo {
using AfterAddFn = std::function<void(const bContext &C, bNodeTree &node_tree, bNode &node)>;
std::string ui_name;
std::string description;
AfterAddFn after_add_fn;
int weight = 0;
};
class GatherAddNodeSearchParams {
const bNodeType &node_type_;
const bNodeTree &node_tree_;
Vector<AddNodeInfo> &r_items;
public:
GatherAddNodeSearchParams(const bNodeType &node_type,
const bNodeTree &node_tree,
Vector<AddNodeInfo> &r_items)
: node_type_(node_type), node_tree_(node_tree), r_items(r_items)
{
}
const bNodeTree &node_tree() const
{
return node_tree_;
}
const bNodeType &node_type() const
{
return node_type_;
}
/**
* \param weight: Used to customize the order when multiple search items match.
*/
void add_item(std::string ui_name,
std::string description,
AddNodeInfo::AfterAddFn fn = {},
int weight = 0);
};
void search_node_add_ops_for_basic_node(GatherAddNodeSearchParams &params);
} // namespace blender::nodes

View File

@ -7,6 +7,7 @@
#include "BKE_node_runtime.hh"
#include "NOD_add_node_search.hh"
#include "NOD_socket_search_link.hh"
#include "node_composite_util.hh"
@ -35,4 +36,5 @@ void cmp_node_type_base(bNodeType *ntype, int type, const char *name, short ncla
ntype->updatefunc = cmp_node_update_default;
ntype->insert_link = node_insert_link_default;
ntype->gather_link_search_ops = blender::nodes::search_link_ops_for_basic_node;
ntype->gather_add_node_search_ops = blender::nodes::search_node_add_ops_for_basic_node;
}

View File

@ -428,6 +428,7 @@ void register_node_type_cmp_cryptomatte_legacy()
node_type_storage(
&ntype, "NodeCryptomatte", file_ns::node_free_cryptomatte, file_ns::node_copy_cryptomatte);
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_operation = legacy_file_ns::get_compositor_operation;
ntype.realtime_compositor_unsupported_message = N_(
"Node not supported in the Viewport compositor");

View File

@ -58,6 +58,7 @@ void register_node_type_cmp_sephsva()
&ntype, CMP_NODE_SEPHSVA_LEGACY, "Separate HSVA (Legacy)", NODE_CLASS_CONVERTER);
ntype.declare = file_ns::cmp_node_sephsva_declare;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node;
nodeRegisterType(&ntype);
@ -112,6 +113,7 @@ void register_node_type_cmp_combhsva()
&ntype, CMP_NODE_COMBHSVA_LEGACY, "Combine HSVA (Legacy)", NODE_CLASS_CONVERTER);
ntype.declare = file_ns::cmp_node_combhsva_declare;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node;
nodeRegisterType(&ntype);

View File

@ -58,6 +58,7 @@ void register_node_type_cmp_seprgba()
&ntype, CMP_NODE_SEPRGBA_LEGACY, "Separate RGBA (Legacy)", NODE_CLASS_CONVERTER);
ntype.declare = file_ns::cmp_node_seprgba_declare;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node;
nodeRegisterType(&ntype);
@ -112,6 +113,7 @@ void register_node_type_cmp_combrgba()
&ntype, CMP_NODE_COMBRGBA_LEGACY, "Combine RGBA (Legacy)", NODE_CLASS_CONVERTER);
ntype.declare = file_ns::cmp_node_combrgba_declare;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node;
nodeRegisterType(&ntype);

View File

@ -86,6 +86,7 @@ void register_node_type_cmp_sepycca()
ntype.declare = file_ns::cmp_node_sepycca_declare;
ntype.initfunc = file_ns::node_composit_init_mode_sepycca;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node;
nodeRegisterType(&ntype);
@ -174,6 +175,7 @@ void register_node_type_cmp_combycca()
ntype.declare = file_ns::cmp_node_combycca_declare;
ntype.initfunc = file_ns::node_composit_init_mode_combycca;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node;
nodeRegisterType(&ntype);

View File

@ -58,6 +58,7 @@ void register_node_type_cmp_sepyuva()
&ntype, CMP_NODE_SEPYUVA_LEGACY, "Separate YUVA (Legacy)", NODE_CLASS_CONVERTER);
ntype.declare = file_ns::cmp_node_sepyuva_declare;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node;
nodeRegisterType(&ntype);
@ -112,6 +113,7 @@ void register_node_type_cmp_combyuva()
&ntype, CMP_NODE_COMBYUVA_LEGACY, "Combine YUVA (Legacy)", NODE_CLASS_CONVERTER);
ntype.declare = file_ns::cmp_node_combyuva_declare;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node;
nodeRegisterType(&ntype);

View File

@ -3,6 +3,7 @@
#include "node_function_util.hh"
#include "node_util.h"
#include "NOD_add_node_search.hh"
#include "NOD_socket_search_link.hh"
static bool fn_node_poll_default(const bNodeType * /*ntype*/,
@ -23,4 +24,5 @@ void fn_node_type_base(bNodeType *ntype, int type, const char *name, short nclas
ntype->poll = fn_node_poll_default;
ntype->insert_link = node_insert_link_default;
ntype->gather_link_search_ops = blender::nodes::search_link_ops_for_basic_node;
ntype->gather_add_node_search_ops = blender::nodes::search_node_add_ops_for_basic_node;
}

View File

@ -10,6 +10,7 @@
#include "BKE_mesh_runtime.h"
#include "BKE_pointcloud.h"
#include "NOD_add_node_search.hh"
#include "NOD_socket_search_link.hh"
namespace blender::nodes {
@ -58,4 +59,5 @@ void geo_node_type_base(bNodeType *ntype, int type, const char *name, short ncla
ntype->poll = geo_node_poll_default;
ntype->insert_link = node_insert_link_default;
ntype->gather_link_search_ops = blender::nodes::search_link_ops_for_basic_node;
ntype->gather_add_node_search_ops = blender::nodes::search_node_add_ops_for_basic_node;
}

View File

@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BKE_node.h"
#include "BLT_translation.h"
#include "NOD_add_node_search.hh"
#include "NOD_node_declaration.hh"
namespace blender::nodes {
void GatherAddNodeSearchParams::add_item(std::string ui_name,
std::string description,
AddNodeInfo::AfterAddFn fn,
int weight)
{
r_items.append(AddNodeInfo{std::move(ui_name), std::move(description), std::move(fn), weight});
}
void search_node_add_ops_for_basic_node(GatherAddNodeSearchParams &params)
{
params.add_item(IFACE_(params.node_type().ui_name), TIP_(params.node_type().ui_description));
}
} // namespace blender::nodes

View File

@ -11,6 +11,7 @@
#include "node_shader_util.hh"
#include "NOD_add_node_search.hh"
#include "NOD_socket_search_link.hh"
#include "node_exec.h"
@ -44,6 +45,7 @@ void sh_node_type_base(struct bNodeType *ntype, int type, const char *name, shor
ntype->poll = sh_node_poll_default;
ntype->insert_link = node_insert_link_default;
ntype->gather_link_search_ops = blender::nodes::search_link_ops_for_basic_node;
ntype->gather_add_node_search_ops = blender::nodes::search_node_add_ops_for_basic_node;
}
void sh_fn_node_type_base(bNodeType *ntype, int type, const char *name, short nclass)
@ -51,6 +53,7 @@ void sh_fn_node_type_base(bNodeType *ntype, int type, const char *name, short nc
sh_node_type_base(ntype, type, name, nclass);
ntype->poll = sh_fn_poll_default;
ntype->gather_link_search_ops = blender::nodes::search_link_ops_for_basic_node;
ntype->gather_add_node_search_ops = blender::nodes::search_node_add_ops_for_basic_node;
}
/* ****** */

View File

@ -12,7 +12,9 @@
#include "node_shader_util.hh"
#include "NOD_add_node_search.hh"
#include "NOD_socket_search_link.hh"
#include "RNA_enum_types.h"
namespace blender::nodes::node_sh_mix_cc {
@ -223,6 +225,16 @@ static void node_mix_gather_link_searches(GatherLinkSearchOpParams &params)
}
}
static void gather_add_node_searches(GatherAddNodeSearchParams &params)
{
params.add_item(IFACE_("Mix"), params.node_type().ui_description);
params.add_item(IFACE_("Mix Color"),
params.node_type().ui_description,
[](const bContext & /*C*/, bNodeTree & /*node_tree*/, bNode &node) {
node_storage(node).data_type = SOCK_RGBA;
});
}
static void node_mix_init(bNodeTree * /*tree*/, bNode *node)
{
NodeShaderMix *data = MEM_cnew<NodeShaderMix>(__func__);
@ -497,5 +509,6 @@ void register_node_type_sh_mix()
ntype.draw_buttons = file_ns::sh_node_mix_layout;
ntype.labelfunc = file_ns::sh_node_mix_label;
ntype.gather_link_search_ops = file_ns::node_mix_gather_link_searches;
ntype.gather_add_node_search_ops = file_ns::gather_add_node_searches;
nodeRegisterType(&ntype);
}

View File

@ -156,5 +156,6 @@ void register_node_type_sh_mix_rgb()
ntype.gpu_fn = file_ns::gpu_shader_mix_rgb;
ntype.build_multi_function = file_ns::sh_node_mix_rgb_build_multi_function;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
nodeRegisterType(&ntype);
}

View File

@ -40,6 +40,7 @@ void register_node_type_sh_sephsv()
ntype.declare = file_ns::node_declare_sephsv;
ntype.gpu_fn = file_ns::gpu_shader_sephsv;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
nodeRegisterType(&ntype);
}
@ -77,6 +78,7 @@ void register_node_type_sh_combhsv()
ntype.declare = file_ns::node_declare_combhsv;
ntype.gpu_fn = file_ns::gpu_shader_combhsv;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
nodeRegisterType(&ntype);
}

View File

@ -80,6 +80,7 @@ void register_node_type_sh_seprgb()
ntype.gpu_fn = file_ns::gpu_shader_seprgb;
ntype.build_multi_function = file_ns::sh_node_seprgb_build_multi_function;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
nodeRegisterType(&ntype);
}
@ -125,6 +126,7 @@ void register_node_type_sh_combrgb()
ntype.gpu_fn = file_ns::gpu_shader_combrgb;
ntype.build_multi_function = file_ns::sh_node_combrgb_build_multi_function;
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
nodeRegisterType(&ntype);
}

View File

@ -36,6 +36,7 @@ void register_node_type_sh_squeeze()
sh_node_type_base(&ntype, SH_NODE_SQUEEZE, "Squeeze Value (Legacy)", NODE_CLASS_CONVERTER);
ntype.gather_link_search_ops = nullptr;
ntype.gather_add_node_search_ops = nullptr;
ntype.declare = file_ns::node_declare;
ntype.gpu_fn = file_ns::gpu_shader_squeeze;

Some files were not shown because too many files have changed in this diff Show More