Compare commits
14 Commits
tmp-volume
...
temp-vulka
Author | SHA1 | Date | |
---|---|---|---|
ec40ef19c7 | |||
6078fe34f9 | |||
79a7030da5 | |||
fbd004f570 | |||
3f3648300d | |||
404ed5a6ea | |||
e766dcc333 | |||
48e4a417a3 | |||
e17eb27747 | |||
fc834ee79f | |||
b83d03677e | |||
8b079a4888 | |||
a1ce423ae5 | |||
2474810aa0 |
@@ -1201,6 +1201,7 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle context,
|
||||
void *r_instance,
|
||||
void *r_physical_device,
|
||||
void *r_device,
|
||||
void *r_compute_command_buffer,
|
||||
uint32_t *r_graphic_queue_family);
|
||||
|
||||
/**
|
||||
|
@@ -40,7 +40,7 @@ class GHOST_IContext {
|
||||
|
||||
virtual unsigned int getDefaultFramebuffer() = 0;
|
||||
|
||||
virtual GHOST_TSuccess getVulkanHandles(void *, void *, void *, uint32_t *) = 0;
|
||||
virtual GHOST_TSuccess getVulkanHandles(void *, void *, void *, void *, uint32_t *) = 0;
|
||||
|
||||
/**
|
||||
* Gets the Vulkan framebuffer related resource handles associated with the Vulkan context.
|
||||
|
@@ -1203,10 +1203,12 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle contexthandle,
|
||||
void *r_instance,
|
||||
void *r_physical_device,
|
||||
void *r_device,
|
||||
void *r_compute_command_buffer,
|
||||
uint32_t *r_graphic_queue_family)
|
||||
{
|
||||
GHOST_IContext *context = (GHOST_IContext *)contexthandle;
|
||||
context->getVulkanHandles(r_instance, r_physical_device, r_device, r_graphic_queue_family);
|
||||
context->getVulkanHandles(
|
||||
r_instance, r_physical_device, r_device, r_compute_command_buffer, r_graphic_queue_family);
|
||||
}
|
||||
|
||||
void GHOST_GetVulkanBackbuffer(GHOST_WindowHandle windowhandle,
|
||||
|
@@ -142,6 +142,7 @@ class GHOST_Context : public GHOST_IContext {
|
||||
virtual GHOST_TSuccess getVulkanHandles(void * /*r_instance*/,
|
||||
void * /*r_physical_device*/,
|
||||
void * /*r_device*/,
|
||||
void * /*r_compute_command_buffer*/,
|
||||
uint32_t * /*r_graphic_queue_family*/) override
|
||||
{
|
||||
return GHOST_kFailure;
|
||||
|
@@ -192,6 +192,9 @@ GHOST_TSuccess GHOST_ContextVK::destroySwapchain()
|
||||
if (m_render_pass != VK_NULL_HANDLE) {
|
||||
vkDestroyRenderPass(m_device, m_render_pass, NULL);
|
||||
}
|
||||
if (m_compute_command_buffer != VK_NULL_HANDLE) {
|
||||
vkFreeCommandBuffers(m_device, m_command_pool, 1, &m_compute_command_buffer);
|
||||
}
|
||||
for (auto command_buffer : m_command_buffers) {
|
||||
vkFreeCommandBuffers(m_device, m_command_pool, 1, &command_buffer);
|
||||
}
|
||||
@@ -311,11 +314,13 @@ GHOST_TSuccess GHOST_ContextVK::getVulkanBackbuffer(void *image,
|
||||
GHOST_TSuccess GHOST_ContextVK::getVulkanHandles(void *r_instance,
|
||||
void *r_physical_device,
|
||||
void *r_device,
|
||||
void *r_compute_command_buffer,
|
||||
uint32_t *r_graphic_queue_family)
|
||||
{
|
||||
*((VkInstance *)r_instance) = m_instance;
|
||||
*((VkPhysicalDevice *)r_physical_device) = m_physical_device;
|
||||
*((VkDevice *)r_device) = m_device;
|
||||
*((VkCommandBuffer *)r_compute_command_buffer) = m_compute_command_buffer;
|
||||
*r_graphic_queue_family = m_queue_family_graphic;
|
||||
|
||||
return GHOST_kSuccess;
|
||||
@@ -619,16 +624,34 @@ static GHOST_TSuccess selectPresentMode(VkPhysicalDevice device,
|
||||
return GHOST_kFailure;
|
||||
}
|
||||
|
||||
GHOST_TSuccess GHOST_ContextVK::createCommandBuffers()
|
||||
GHOST_TSuccess GHOST_ContextVK::createCommandPools()
|
||||
{
|
||||
m_command_buffers.resize(m_swapchain_image_views.size());
|
||||
|
||||
VkCommandPoolCreateInfo poolInfo = {};
|
||||
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
|
||||
poolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
|
||||
poolInfo.queueFamilyIndex = m_queue_family_graphic;
|
||||
|
||||
VK_CHECK(vkCreateCommandPool(m_device, &poolInfo, NULL, &m_command_pool));
|
||||
return GHOST_kSuccess;
|
||||
}
|
||||
|
||||
GHOST_TSuccess GHOST_ContextVK::createComputeCommandBuffer()
|
||||
{
|
||||
assert(m_command_pool != VK_NULL_HANDLE);
|
||||
VkCommandBufferAllocateInfo alloc_info = {};
|
||||
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
alloc_info.commandPool = m_command_pool;
|
||||
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
alloc_info.commandBufferCount = 1;
|
||||
|
||||
VK_CHECK(vkAllocateCommandBuffers(m_device, &alloc_info, &m_compute_command_buffer));
|
||||
return GHOST_kSuccess;
|
||||
}
|
||||
|
||||
GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffers()
|
||||
{
|
||||
assert(m_command_pool != VK_NULL_HANDLE);
|
||||
m_command_buffers.resize(m_swapchain_image_views.size());
|
||||
|
||||
VkCommandBufferAllocateInfo alloc_info = {};
|
||||
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
@@ -637,7 +660,6 @@ GHOST_TSuccess GHOST_ContextVK::createCommandBuffers()
|
||||
alloc_info.commandBufferCount = static_cast<uint32_t>(m_command_buffers.size());
|
||||
|
||||
VK_CHECK(vkAllocateCommandBuffers(m_device, &alloc_info, m_command_buffers.data()));
|
||||
|
||||
return GHOST_kSuccess;
|
||||
}
|
||||
|
||||
@@ -776,7 +798,7 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
|
||||
VK_CHECK(vkCreateFence(m_device, &fence_info, NULL, &m_in_flight_fences[i]));
|
||||
}
|
||||
|
||||
createCommandBuffers();
|
||||
createGraphicsCommandBuffers();
|
||||
|
||||
return GHOST_kSuccess;
|
||||
}
|
||||
@@ -841,6 +863,13 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
|
||||
|
||||
extensions_device.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
|
||||
}
|
||||
extensions_device.push_back("VK_KHR_dedicated_allocation");
|
||||
extensions_device.push_back("VK_KHR_get_memory_requirements2");
|
||||
/* Enable MoltenVK required instance extensions.*/
|
||||
#ifdef VK_MVK_MOLTENVK_EXTENSION_NAME
|
||||
requireExtension(
|
||||
extensions_available, extensions_enabled, "VK_KHR_get_physical_device_properties2");
|
||||
#endif
|
||||
|
||||
VkApplicationInfo app_info = {};
|
||||
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
|
||||
@@ -903,6 +932,15 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
|
||||
return GHOST_kFailure;
|
||||
}
|
||||
|
||||
#ifdef VK_MVK_MOLTENVK_EXTENSION_NAME
|
||||
/* According to the Vulkan specs, when `VK_KHR_portability_subset` is available it should be
|
||||
* enabled. See
|
||||
* https://vulkan.lunarg.com/doc/view/1.2.198.1/mac/1.2-extensions/vkspec.html#VUID-VkDeviceCreateInfo-pProperties-04451*/
|
||||
if (device_extensions_support(m_physical_device, {VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME})) {
|
||||
extensions_device.push_back(VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME);
|
||||
}
|
||||
#endif
|
||||
|
||||
vector<VkDeviceQueueCreateInfo> queue_create_infos;
|
||||
|
||||
{
|
||||
@@ -960,6 +998,9 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
|
||||
|
||||
VK_CHECK(vkCreateDevice(m_physical_device, &device_create_info, NULL, &m_device));
|
||||
|
||||
createCommandPools();
|
||||
createComputeCommandBuffer();
|
||||
|
||||
vkGetDeviceQueue(m_device, m_queue_family_graphic, 0, &m_graphic_queue);
|
||||
|
||||
if (use_window_surface) {
|
||||
|
@@ -113,6 +113,7 @@ class GHOST_ContextVK : public GHOST_Context {
|
||||
GHOST_TSuccess getVulkanHandles(void *r_instance,
|
||||
void *r_physical_device,
|
||||
void *r_device,
|
||||
void *r_compute_command_buffer,
|
||||
uint32_t *r_graphic_queue_family);
|
||||
/**
|
||||
* Gets the Vulkan framebuffer related resource handles associated with the Vulkan context.
|
||||
@@ -182,6 +183,7 @@ class GHOST_ContextVK : public GHOST_Context {
|
||||
std::vector<VkImage> m_swapchain_images;
|
||||
std::vector<VkImageView> m_swapchain_image_views;
|
||||
std::vector<VkFramebuffer> m_swapchain_framebuffers;
|
||||
VkCommandBuffer m_compute_command_buffer;
|
||||
std::vector<VkCommandBuffer> m_command_buffers;
|
||||
VkRenderPass m_render_pass;
|
||||
VkExtent2D m_render_extent;
|
||||
@@ -200,6 +202,8 @@ class GHOST_ContextVK : public GHOST_Context {
|
||||
GHOST_TSuccess pickPhysicalDevice(std::vector<const char *> required_exts);
|
||||
GHOST_TSuccess createSwapchain();
|
||||
GHOST_TSuccess destroySwapchain();
|
||||
GHOST_TSuccess createCommandBuffers();
|
||||
GHOST_TSuccess createCommandPools();
|
||||
GHOST_TSuccess createGraphicsCommandBuffers();
|
||||
GHOST_TSuccess createComputeCommandBuffer();
|
||||
GHOST_TSuccess recordCommandBuffers();
|
||||
};
|
||||
|
@@ -190,7 +190,9 @@ set(OPENGL_SRC
|
||||
set(VULKAN_SRC
|
||||
vulkan/vk_backend.cc
|
||||
vulkan/vk_batch.cc
|
||||
vulkan/vk_buffer.cc
|
||||
vulkan/vk_context.cc
|
||||
vulkan/vk_descriptor_pools.cc
|
||||
vulkan/vk_drawlist.cc
|
||||
vulkan/vk_fence.cc
|
||||
vulkan/vk_framebuffer.cc
|
||||
@@ -198,6 +200,7 @@ set(VULKAN_SRC
|
||||
vulkan/vk_pixel_buffer.cc
|
||||
vulkan/vk_query.cc
|
||||
vulkan/vk_shader.cc
|
||||
vulkan/vk_shader_interface.cc
|
||||
vulkan/vk_shader_log.cc
|
||||
vulkan/vk_storage_buffer.cc
|
||||
vulkan/vk_texture.cc
|
||||
@@ -206,7 +209,9 @@ set(VULKAN_SRC
|
||||
|
||||
vulkan/vk_backend.hh
|
||||
vulkan/vk_batch.hh
|
||||
vulkan/vk_buffer.hh
|
||||
vulkan/vk_context.hh
|
||||
vulkan/vk_descriptor_pools.hh
|
||||
vulkan/vk_drawlist.hh
|
||||
vulkan/vk_fence.hh
|
||||
vulkan/vk_framebuffer.hh
|
||||
@@ -214,6 +219,7 @@ set(VULKAN_SRC
|
||||
vulkan/vk_pixel_buffer.hh
|
||||
vulkan/vk_query.hh
|
||||
vulkan/vk_shader.hh
|
||||
vulkan/vk_shader_interface.hh
|
||||
vulkan/vk_shader_log.hh
|
||||
vulkan/vk_storage_buffer.hh
|
||||
vulkan/vk_texture.hh
|
||||
@@ -504,6 +510,7 @@ set(GLSL_SRC_TEST
|
||||
tests/shaders/gpu_compute_1d_test.glsl
|
||||
tests/shaders/gpu_compute_2d_test.glsl
|
||||
tests/shaders/gpu_compute_ibo_test.glsl
|
||||
tests/shaders/gpu_compute_ssbo_test.glsl
|
||||
tests/shaders/gpu_compute_vbo_test.glsl
|
||||
tests/shaders/gpu_compute_dummy_test.glsl
|
||||
)
|
||||
@@ -780,6 +787,7 @@ if(WITH_GTESTS)
|
||||
tests/gpu_index_buffer_test.cc
|
||||
tests/gpu_shader_builtin_test.cc
|
||||
tests/gpu_shader_test.cc
|
||||
tests/gpu_storage_buffer_test.cc
|
||||
|
||||
tests/gpu_testing.hh
|
||||
)
|
||||
|
@@ -42,6 +42,12 @@ GPU_SHADER_CREATE_INFO(gpu_compute_vbo_test)
|
||||
.compute_source("gpu_compute_vbo_test.glsl")
|
||||
.do_static_compilation(true);
|
||||
|
||||
GPU_SHADER_CREATE_INFO(gpu_compute_ssbo_test)
|
||||
.local_group_size(1)
|
||||
.storage_buf(0, Qualifier::WRITE, "int", "data_out[]")
|
||||
.compute_source("gpu_compute_ssbo_test.glsl")
|
||||
.do_static_compilation(true);
|
||||
|
||||
GPU_SHADER_CREATE_INFO(gpu_compute_ssbo_binding_test)
|
||||
.local_group_size(1)
|
||||
.storage_buf(0, Qualifier::WRITE, "int", "data0[]")
|
||||
|
@@ -212,7 +212,7 @@ GPU_TEST(gpu_shader_compute_ibo)
|
||||
static void test_gpu_shader_compute_ssbo()
|
||||
{
|
||||
|
||||
if (!GPU_compute_shader_support()) {
|
||||
if (!GPU_compute_shader_support() && !GPU_shader_storage_buffer_objects_support()) {
|
||||
/* We can't test as a the platform does not support compute shaders. */
|
||||
std::cout << "Skipping compute shader test: platform not supported";
|
||||
return;
|
||||
@@ -221,14 +221,14 @@ static void test_gpu_shader_compute_ssbo()
|
||||
static constexpr uint SIZE = 128;
|
||||
|
||||
/* Build compute shader. */
|
||||
GPUShader *shader = GPU_shader_create_from_info_name("gpu_compute_ibo_test");
|
||||
GPUShader *shader = GPU_shader_create_from_info_name("gpu_compute_ssbo_test");
|
||||
EXPECT_NE(shader, nullptr);
|
||||
GPU_shader_bind(shader);
|
||||
|
||||
/* Construct IBO. */
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
SIZE * sizeof(uint32_t), nullptr, GPU_USAGE_DEVICE_ONLY, __func__);
|
||||
GPU_storagebuf_bind(ssbo, GPU_shader_get_ssbo(shader, "out_indices"));
|
||||
GPU_storagebuf_bind(ssbo, GPU_shader_get_ssbo(shader, "data_out"));
|
||||
|
||||
/* Dispatch compute task. */
|
||||
GPU_compute_dispatch(shader, SIZE, 1, 1);
|
||||
@@ -240,7 +240,7 @@ static void test_gpu_shader_compute_ssbo()
|
||||
uint32_t data[SIZE];
|
||||
GPU_storagebuf_read(ssbo, data);
|
||||
for (int index = 0; index < SIZE; index++) {
|
||||
uint32_t expected = index;
|
||||
uint32_t expected = index * 4;
|
||||
EXPECT_EQ(data[index], expected);
|
||||
}
|
||||
|
||||
|
50
source/blender/gpu/tests/gpu_storage_buffer_test.cc
Normal file
50
source/blender/gpu/tests/gpu_storage_buffer_test.cc
Normal file
@@ -0,0 +1,50 @@
|
||||
/* SPDX-License-Identifier: Apache-2.0 */
|
||||
|
||||
#include "testing/testing.h"
|
||||
|
||||
#include "GPU_storage_buffer.h"
|
||||
|
||||
#include "BLI_vector.hh"
|
||||
|
||||
#include "gpu_testing.hh"
|
||||
|
||||
namespace blender::gpu::tests {
|
||||
|
||||
constexpr size_t SIZE = 128;
|
||||
constexpr size_t SIZE_IN_BYTES = SIZE * sizeof(int);
|
||||
|
||||
static Vector<int32_t> test_data()
|
||||
{
|
||||
Vector<int32_t> data;
|
||||
for (int i : IndexRange(SIZE)) {
|
||||
data.append(i);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static void test_gpu_storage_buffer_create_update_read()
|
||||
{
|
||||
GPUStorageBuf *ssbo = GPU_storagebuf_create_ex(
|
||||
SIZE_IN_BYTES, nullptr, GPU_USAGE_STATIC, __func__);
|
||||
EXPECT_NE(ssbo, nullptr);
|
||||
|
||||
/* Upload some dummy data. */
|
||||
const Vector<int32_t> data = test_data();
|
||||
GPU_storagebuf_update(ssbo, data.data());
|
||||
|
||||
/* Read back data from SSBO. */
|
||||
Vector<int32_t> read_data;
|
||||
read_data.resize(SIZE, 0);
|
||||
GPU_storagebuf_read(ssbo, read_data.data());
|
||||
|
||||
/* Check if data is the same.*/
|
||||
for (int i : IndexRange(SIZE)) {
|
||||
EXPECT_EQ(data[i], read_data[i]);
|
||||
}
|
||||
|
||||
GPU_storagebuf_free(ssbo);
|
||||
}
|
||||
|
||||
GPU_TEST(gpu_storage_buffer_create_update_read);
|
||||
|
||||
} // namespace blender::gpu::tests
|
@@ -15,8 +15,9 @@ namespace blender::gpu {
|
||||
void GPUTest::SetUp()
|
||||
{
|
||||
GPU_backend_type_selection_set(gpu_backend_type);
|
||||
GHOST_GLSettings glSettings = {0};
|
||||
GHOST_GLSettings glSettings = {};
|
||||
glSettings.context_type = draw_context_type;
|
||||
glSettings.flags = GHOST_glDebugContext;
|
||||
CLG_init();
|
||||
ghost_system = GHOST_CreateSystem();
|
||||
ghost_context = GHOST_CreateOpenGLContext(ghost_system, glSettings);
|
||||
|
@@ -0,0 +1,5 @@
|
||||
void main()
|
||||
{
|
||||
int store_index = int(gl_GlobalInvocationID.x);
|
||||
data_out[store_index] = store_index * 4;
|
||||
}
|
@@ -123,9 +123,9 @@ UniformBuf *VKBackend::uniformbuf_alloc(int size, const char *name)
|
||||
return new VKUniformBuffer(size, name);
|
||||
}
|
||||
|
||||
StorageBuf *VKBackend::storagebuf_alloc(int size, GPUUsageType /*usage*/, const char *name)
|
||||
StorageBuf *VKBackend::storagebuf_alloc(int size, GPUUsageType usage, const char *name)
|
||||
{
|
||||
return new VKStorageBuffer(size, name);
|
||||
return new VKStorageBuffer(size, usage, name);
|
||||
}
|
||||
|
||||
VertBuf *VKBackend::vertbuf_alloc()
|
||||
|
104
source/blender/gpu/vulkan/vk_buffer.cc
Normal file
104
source/blender/gpu/vulkan/vk_buffer.cc
Normal file
@@ -0,0 +1,104 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2023 Blender Foundation. All rights reserved. */
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*/
|
||||
|
||||
#include "vk_buffer.hh"
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
VKBuffer::~VKBuffer()
|
||||
{
|
||||
VKContext &context = *VKContext::get();
|
||||
free(context);
|
||||
}
|
||||
|
||||
bool VKBuffer::is_allocated()
|
||||
{
|
||||
return allocation_ != VK_NULL_HANDLE;
|
||||
}
|
||||
|
||||
static VmaAllocationCreateFlagBits vma_allocation_flags(GPUUsageType usage)
|
||||
{
|
||||
switch (usage) {
|
||||
case GPU_USAGE_STATIC:
|
||||
return static_cast<VmaAllocationCreateFlagBits>(
|
||||
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT);
|
||||
case GPU_USAGE_DYNAMIC:
|
||||
return static_cast<VmaAllocationCreateFlagBits>(
|
||||
VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT);
|
||||
case GPU_USAGE_DEVICE_ONLY:
|
||||
return VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
|
||||
case GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY:
|
||||
case GPU_USAGE_STREAM:
|
||||
break;
|
||||
}
|
||||
BLI_assert_msg(false, "Incorrect GPUUsageType");
|
||||
return static_cast<VmaAllocationCreateFlagBits>(VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
|
||||
VMA_ALLOCATION_CREATE_MAPPED_BIT);
|
||||
}
|
||||
|
||||
bool VKBuffer::create(VKContext &context,
|
||||
int64_t size_in_bytes,
|
||||
GPUUsageType usage,
|
||||
VkBufferUsageFlagBits buffer_usage)
|
||||
{
|
||||
BLI_assert(!is_allocated());
|
||||
|
||||
size_in_bytes_ = size_in_bytes;
|
||||
|
||||
VmaAllocator allocator = context.mem_allocator_get();
|
||||
VkBufferCreateInfo create_info = {};
|
||||
create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
||||
create_info.flags = 0;
|
||||
create_info.size = size_in_bytes;
|
||||
create_info.usage = buffer_usage;
|
||||
/* For now the compute and graphics command queues are the same, so we can safely assume
|
||||
* exclusive mode.*/
|
||||
create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||
create_info.queueFamilyIndexCount = 1;
|
||||
create_info.pQueueFamilyIndices = context.queue_family_ptr_get();
|
||||
|
||||
VmaAllocationCreateInfo vma_create_info = {};
|
||||
vma_create_info.flags = vma_allocation_flags(usage);
|
||||
vma_create_info.priority = 1.0f;
|
||||
vma_create_info.usage = VMA_MEMORY_USAGE_AUTO;
|
||||
|
||||
VkResult result = vmaCreateBuffer(
|
||||
allocator, &create_info, &vma_create_info, &vk_buffer_, &allocation_, nullptr);
|
||||
return result == VK_SUCCESS;
|
||||
}
|
||||
|
||||
bool VKBuffer::update(VKContext &context, const void *data)
|
||||
{
|
||||
void *mapped_memory;
|
||||
bool result = map(context, &mapped_memory);
|
||||
if (result) {
|
||||
memcpy(mapped_memory, data, size_in_bytes_);
|
||||
unmap(context);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool VKBuffer::map(VKContext &context, void **r_mapped_memory)
|
||||
{
|
||||
VmaAllocator allocator = context.mem_allocator_get();
|
||||
VkResult result = vmaMapMemory(allocator, allocation_, r_mapped_memory);
|
||||
return result == VK_SUCCESS;
|
||||
}
|
||||
void VKBuffer::unmap(VKContext &context)
|
||||
{
|
||||
VmaAllocator allocator = context.mem_allocator_get();
|
||||
vmaUnmapMemory(allocator, allocation_);
|
||||
}
|
||||
|
||||
bool VKBuffer::free(VKContext &context)
|
||||
{
|
||||
VmaAllocator allocator = context.mem_allocator_get();
|
||||
vmaDestroyBuffer(allocator, vk_buffer_, allocation_);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace blender::gpu
|
45
source/blender/gpu/vulkan/vk_buffer.hh
Normal file
45
source/blender/gpu/vulkan/vk_buffer.hh
Normal file
@@ -0,0 +1,45 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2023 Blender Foundation. All rights reserved. */
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "gpu_context_private.hh"
|
||||
#include "vk_context.hh"
|
||||
|
||||
#ifdef __APPLE__
|
||||
# include <MoltenVK/vk_mvk_moltenvk.h>
|
||||
#else
|
||||
# include <vulkan/vulkan.h>
|
||||
#endif
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
/**
|
||||
* Class for handing vulkan buffers (allocation/updating/binding).
|
||||
*/
|
||||
class VKBuffer {
|
||||
int64_t size_in_bytes_;
|
||||
VkBuffer vk_buffer_ = VK_NULL_HANDLE;
|
||||
VmaAllocation allocation_ = VK_NULL_HANDLE;
|
||||
|
||||
public:
|
||||
VKBuffer() = default;
|
||||
virtual ~VKBuffer();
|
||||
|
||||
/** Has this buffer been allocated? */
|
||||
bool is_allocated();
|
||||
|
||||
bool create(VKContext &context,
|
||||
int64_t size,
|
||||
GPUUsageType usage,
|
||||
VkBufferUsageFlagBits buffer_usage);
|
||||
bool update(VKContext &context, const void *data);
|
||||
bool free(VKContext &context);
|
||||
bool map(VKContext &context, void **r_mapped_memory);
|
||||
void unmap(VKContext &context);
|
||||
};
|
||||
} // namespace blender::gpu
|
@@ -24,12 +24,15 @@ VKContext::VKContext(void *ghost_window, void *ghost_context)
|
||||
&instance_,
|
||||
&physical_device_,
|
||||
&device_,
|
||||
&compute_command_buffer_,
|
||||
&graphic_queue_family_);
|
||||
|
||||
/* Initialize the memory allocator. */
|
||||
VmaAllocatorCreateInfo info = {};
|
||||
/* Should use same vulkan version as GHOST. */
|
||||
info.vulkanApiVersion = VK_API_VERSION_1_2;
|
||||
/* Should use same vulkan version as GHOST, but set to 1.0 for now. Raising it to 1.2 requires
|
||||
* correct extensions and functions to be found, which doesn't out-of-the-box. We should fix
|
||||
* this, but to continue the development at hand we lower the API to 1.0.*/
|
||||
info.vulkanApiVersion = VK_API_VERSION_1_0;
|
||||
info.physicalDevice = physical_device_;
|
||||
info.device = device_;
|
||||
info.instance = instance_;
|
||||
|
@@ -9,6 +9,8 @@
|
||||
|
||||
#include "gpu_context_private.hh"
|
||||
|
||||
#include "vk_descriptor_pools.hh"
|
||||
|
||||
#include "vk_mem_alloc.h"
|
||||
|
||||
#ifdef __APPLE__
|
||||
@@ -25,10 +27,12 @@ class VKContext : public Context {
|
||||
VkInstance instance_ = VK_NULL_HANDLE;
|
||||
VkPhysicalDevice physical_device_ = VK_NULL_HANDLE;
|
||||
VkDevice device_ = VK_NULL_HANDLE;
|
||||
VkCommandBuffer compute_command_buffer_ = VK_NULL_HANDLE;
|
||||
uint32_t graphic_queue_family_ = 0;
|
||||
|
||||
/** Allocator used for texture and buffers and other resources. */
|
||||
VmaAllocator mem_allocator_ = VK_NULL_HANDLE;
|
||||
VKDescriptorPools descriptor_pools_;
|
||||
|
||||
public:
|
||||
VKContext(void *ghost_window, void *ghost_context);
|
||||
@@ -56,6 +60,16 @@ class VKContext : public Context {
|
||||
{
|
||||
return device_;
|
||||
}
|
||||
|
||||
VkCommandBuffer compute_command_buffer_get() const
|
||||
{
|
||||
return compute_command_buffer_;
|
||||
}
|
||||
|
||||
const uint32_t *queue_family_ptr_get() const
|
||||
{
|
||||
return &graphic_queue_family_;
|
||||
}
|
||||
|
||||
VmaAllocator mem_allocator_get() const
|
||||
{
|
||||
|
36
source/blender/gpu/vulkan/vk_descriptor_pools.cc
Normal file
36
source/blender/gpu/vulkan/vk_descriptor_pools.cc
Normal file
@@ -0,0 +1,36 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2022 Blender Foundation. All rights reserved. */
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*/
|
||||
|
||||
#include "vk_descriptor_pools.hh"
|
||||
|
||||
namespace blender::gpu {
|
||||
VKDescriptorPools::VKDescriptorPools()
|
||||
{
|
||||
}
|
||||
|
||||
VKDescriptorPools::~VKDescriptorPools()
|
||||
{
|
||||
}
|
||||
|
||||
void VKDescriptorPools::new_pool(VkDevice vk_device)
|
||||
{
|
||||
Vector<VkDescriptorPoolSize> pool_sizes = {
|
||||
{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, POOL_SIZE_UNIFORM_BUFFER},
|
||||
};
|
||||
VkDescriptorPoolCreateInfo pool_info = {};
|
||||
pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
|
||||
pool_info.flags = 0;
|
||||
pool_info.maxSets = POOL_SIZE_DESCRIPTOR_SETS;
|
||||
pool_info.poolSizeCount = pool_sizes.size();
|
||||
pool_info.pPoolSizes = pool_sizes.data();
|
||||
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
|
||||
VkResult result = vkCreateDescriptorPool(vk_device, &pool_info, nullptr, &descriptor_pool);
|
||||
UNUSED_VARS(result);
|
||||
pools_.append(descriptor_pool);
|
||||
}
|
||||
|
||||
} // namespace blender::gpu
|
41
source/blender/gpu/vulkan/vk_descriptor_pools.hh
Normal file
41
source/blender/gpu/vulkan/vk_descriptor_pools.hh
Normal file
@@ -0,0 +1,41 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2022 Blender Foundation. All rights reserved. */
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "BLI_vector.hh"
|
||||
|
||||
#ifdef __APPLE__
|
||||
# include <MoltenVK/vk_mvk_moltenvk.h>
|
||||
#else
|
||||
# include <vulkan/vulkan.h>
|
||||
#endif
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
/**
|
||||
* List of VkDescriptorPools.
|
||||
*
|
||||
* In Vulkan a pool is constructed with a certain size. When more is needed it is adviced to
|
||||
* construct a second pool. VKDescriptorPools will keep track of those pools and construct
|
||||
* new pools when the previous one is exhausted.
|
||||
*/
|
||||
class VKDescriptorPools {
|
||||
/** Number of pool sizes */
|
||||
static constexpr uint32_t POOL_SIZE_UNIFORM_BUFFER = 1000;
|
||||
static constexpr uint32_t POOL_SIZE_DESCRIPTOR_SETS = 1000;
|
||||
|
||||
Vector<VkDescriptorPool> pools_;
|
||||
|
||||
public:
|
||||
VKDescriptorPools();
|
||||
~VKDescriptorPools();
|
||||
|
||||
private:
|
||||
void new_pool(VkDevice vk_device);
|
||||
};
|
||||
} // namespace blender::gpu
|
@@ -8,6 +8,7 @@
|
||||
#include "vk_shader.hh"
|
||||
|
||||
#include "vk_backend.hh"
|
||||
#include "vk_shader_interface.hh"
|
||||
#include "vk_shader_log.hh"
|
||||
|
||||
#include "BLI_string_utils.h"
|
||||
@@ -598,6 +599,15 @@ VKShader::~VKShader()
|
||||
vkDestroyShaderModule(device, compute_module_, nullptr);
|
||||
compute_module_ = VK_NULL_HANDLE;
|
||||
}
|
||||
if (pipeline_layout_ != VK_NULL_HANDLE) {
|
||||
vkDestroyPipelineLayout(device, pipeline_layout_, nullptr);
|
||||
}
|
||||
for (VkDescriptorSetLayout &layout : layouts_) {
|
||||
vkDestroyDescriptorSetLayout(device, layout, nullptr);
|
||||
}
|
||||
if (compute_pipeline_ != VK_NULL_HANDLE) {
|
||||
vkDestroyPipeline(device, compute_pipeline_, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void VKShader::build_shader_module(MutableSpan<const char *> sources,
|
||||
@@ -641,52 +651,184 @@ bool VKShader::finalize(const shader::ShaderCreateInfo *info)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vertex_module_ != VK_NULL_HANDLE) {
|
||||
VkDevice vk_device = context_->device_get();
|
||||
if (!finalize_descriptor_set_layouts(vk_device, *info)) {
|
||||
return false;
|
||||
}
|
||||
if (!finalize_pipeline_layout(vk_device, *info)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* TODO we might need to move the actual pipeline construction to a later stage as the graphics
|
||||
* pipeline requires more data before it can be constructed.*/
|
||||
bool result;
|
||||
if (is_graphics_shader()) {
|
||||
BLI_assert((fragment_module_ != VK_NULL_HANDLE && info->tf_type_ == GPU_SHADER_TFB_NONE) ||
|
||||
(fragment_module_ == VK_NULL_HANDLE && info->tf_type_ != GPU_SHADER_TFB_NONE));
|
||||
BLI_assert(compute_module_ == VK_NULL_HANDLE);
|
||||
|
||||
VkPipelineShaderStageCreateInfo vertex_stage_info = {};
|
||||
vertex_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
vertex_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT;
|
||||
vertex_stage_info.module = vertex_module_;
|
||||
vertex_stage_info.pName = "main";
|
||||
pipeline_infos_.append(vertex_stage_info);
|
||||
|
||||
if (geometry_module_ != VK_NULL_HANDLE) {
|
||||
VkPipelineShaderStageCreateInfo geo_stage_info = {};
|
||||
geo_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
geo_stage_info.stage = VK_SHADER_STAGE_GEOMETRY_BIT;
|
||||
geo_stage_info.module = geometry_module_;
|
||||
geo_stage_info.pName = "main";
|
||||
pipeline_infos_.append(geo_stage_info);
|
||||
}
|
||||
if (fragment_module_ != VK_NULL_HANDLE) {
|
||||
VkPipelineShaderStageCreateInfo fragment_stage_info = {};
|
||||
fragment_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
fragment_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
|
||||
fragment_stage_info.module = fragment_module_;
|
||||
fragment_stage_info.pName = "main";
|
||||
pipeline_infos_.append(fragment_stage_info);
|
||||
}
|
||||
result = finalize_graphics_pipeline(vk_device);
|
||||
}
|
||||
else {
|
||||
BLI_assert(vertex_module_ == VK_NULL_HANDLE);
|
||||
BLI_assert(geometry_module_ == VK_NULL_HANDLE);
|
||||
BLI_assert(fragment_module_ == VK_NULL_HANDLE);
|
||||
BLI_assert(compute_module_ != VK_NULL_HANDLE);
|
||||
|
||||
VkPipelineShaderStageCreateInfo compute_stage_info = {};
|
||||
compute_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
compute_stage_info.stage = VK_SHADER_STAGE_GEOMETRY_BIT;
|
||||
compute_stage_info.module = compute_module_;
|
||||
compute_stage_info.pName = "main";
|
||||
pipeline_infos_.append(compute_stage_info);
|
||||
result = bake_compute_pipeline(vk_device);
|
||||
}
|
||||
|
||||
#ifdef NDEBUG
|
||||
UNUSED_VARS(info);
|
||||
#endif
|
||||
if (result) {
|
||||
VKShaderInterface *vk_interface = new VKShaderInterface();
|
||||
vk_interface->init(*info);
|
||||
interface = vk_interface;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
bool VKShader::finalize_graphics_pipeline(VkDevice /*vk_device */)
|
||||
{
|
||||
Vector<VkPipelineShaderStageCreateInfo> pipeline_stages;
|
||||
VkPipelineShaderStageCreateInfo vertex_stage_info = {};
|
||||
vertex_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
vertex_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT;
|
||||
vertex_stage_info.module = vertex_module_;
|
||||
vertex_stage_info.pName = "main";
|
||||
pipeline_stages.append(vertex_stage_info);
|
||||
|
||||
if (geometry_module_ != VK_NULL_HANDLE) {
|
||||
VkPipelineShaderStageCreateInfo geo_stage_info = {};
|
||||
geo_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
geo_stage_info.stage = VK_SHADER_STAGE_GEOMETRY_BIT;
|
||||
geo_stage_info.module = geometry_module_;
|
||||
geo_stage_info.pName = "main";
|
||||
pipeline_stages.append(geo_stage_info);
|
||||
}
|
||||
if (fragment_module_ != VK_NULL_HANDLE) {
|
||||
VkPipelineShaderStageCreateInfo fragment_stage_info = {};
|
||||
fragment_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
fragment_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
|
||||
fragment_stage_info.module = fragment_module_;
|
||||
fragment_stage_info.pName = "main";
|
||||
pipeline_stages.append(fragment_stage_info);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VKShader::bake_compute_pipeline(VkDevice vk_device)
|
||||
{
|
||||
|
||||
VkComputePipelineCreateInfo pipeline_info = {};
|
||||
pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
|
||||
pipeline_info.flags = 0;
|
||||
pipeline_info.stage = {};
|
||||
pipeline_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
pipeline_info.stage.flags = 0;
|
||||
pipeline_info.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
|
||||
pipeline_info.stage.module = compute_module_;
|
||||
pipeline_info.layout = pipeline_layout_;
|
||||
pipeline_info.stage.pName = "main";
|
||||
|
||||
if (vkCreateComputePipelines(
|
||||
vk_device, nullptr, 1, &pipeline_info, nullptr, &compute_pipeline_) != VK_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VKShader::finalize_pipeline_layout(VkDevice vk_device,
|
||||
const shader::ShaderCreateInfo & /*info*/)
|
||||
{
|
||||
VkPipelineLayoutCreateInfo pipeline_info = {};
|
||||
pipeline_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
|
||||
pipeline_info.flags = 0;
|
||||
pipeline_info.setLayoutCount = layouts_.size();
|
||||
pipeline_info.pSetLayouts = layouts_.data();
|
||||
|
||||
if (vkCreatePipelineLayout(vk_device, &pipeline_info, nullptr, &pipeline_layout_) !=
|
||||
VK_SUCCESS) {
|
||||
return false;
|
||||
};
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static VkDescriptorType descriptor_type(
|
||||
const shader::ShaderCreateInfo::Resource::BindType bind_type)
|
||||
{
|
||||
switch (bind_type) {
|
||||
case shader::ShaderCreateInfo::Resource::BindType::IMAGE:
|
||||
return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
|
||||
case shader::ShaderCreateInfo::Resource::BindType::SAMPLER:
|
||||
return VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
case shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER:
|
||||
return VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
||||
case shader::ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER:
|
||||
return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
}
|
||||
BLI_assert_unreachable();
|
||||
return VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
}
|
||||
|
||||
static VkDescriptorSetLayoutBinding create_descriptor_set_layout_binding(
|
||||
const shader::ShaderCreateInfo::Resource &resource)
|
||||
{
|
||||
VkDescriptorSetLayoutBinding binding = {};
|
||||
binding.binding = resource.slot;
|
||||
binding.descriptorType = descriptor_type(resource.bind_type);
|
||||
binding.descriptorCount = 1;
|
||||
binding.stageFlags = VK_SHADER_STAGE_ALL;
|
||||
binding.pImmutableSamplers = nullptr;
|
||||
|
||||
return binding;
|
||||
}
|
||||
|
||||
static void add_descriptor_set_layout_bindings(
|
||||
const Vector<shader::ShaderCreateInfo::Resource> &resources,
|
||||
Vector<VkDescriptorSetLayoutBinding> &r_bindings)
|
||||
{
|
||||
for (const shader::ShaderCreateInfo::Resource &resource : resources) {
|
||||
r_bindings.append(create_descriptor_set_layout_binding(resource));
|
||||
}
|
||||
}
|
||||
|
||||
static VkDescriptorSetLayoutCreateInfo create_descriptor_set_layout(
|
||||
const Vector<shader::ShaderCreateInfo::Resource> &resources,
|
||||
Vector<VkDescriptorSetLayoutBinding> &r_bindings)
|
||||
{
|
||||
add_descriptor_set_layout_bindings(resources, r_bindings);
|
||||
VkDescriptorSetLayoutCreateInfo set_info = {};
|
||||
set_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
|
||||
set_info.flags = 0;
|
||||
set_info.pNext = nullptr;
|
||||
set_info.bindingCount = r_bindings.size();
|
||||
set_info.pBindings = r_bindings.data();
|
||||
return set_info;
|
||||
}
|
||||
|
||||
bool VKShader::finalize_descriptor_set_layouts(VkDevice vk_device,
|
||||
const shader::ShaderCreateInfo &info)
|
||||
{
|
||||
if (info.pass_resources_.is_empty() && info.batch_resources_.is_empty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Currently we create a single descriptor set. The goal would be to create one descriptor set
|
||||
* for Frequency::PASS/BATCH. This isn't possible as areas expect that the binding location is
|
||||
* static and predictable (eevee-next) or the binding location can be mapped to a single number
|
||||
* (python). */
|
||||
Vector<ShaderCreateInfo::Resource> all_resources;
|
||||
all_resources.extend(info.pass_resources_);
|
||||
all_resources.extend(info.batch_resources_);
|
||||
|
||||
Vector<VkDescriptorSetLayoutBinding> bindings;
|
||||
VkDescriptorSetLayoutCreateInfo layout_info = create_descriptor_set_layout(all_resources,
|
||||
bindings);
|
||||
VkDescriptorSetLayout layout = VK_NULL_HANDLE;
|
||||
if (vkCreateDescriptorSetLayout(vk_device, &layout_info, nullptr, &layout) != VK_SUCCESS) {
|
||||
return false;
|
||||
};
|
||||
layouts_.append(layout);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -707,6 +849,15 @@ void VKShader::transform_feedback_disable()
|
||||
|
||||
void VKShader::bind()
|
||||
{
|
||||
VKContext *context = VKContext::get();
|
||||
|
||||
if (is_compute_shader()) {
|
||||
vkCmdBindPipeline(
|
||||
context->compute_command_buffer_get(), VK_PIPELINE_BIND_POINT_COMPUTE, compute_pipeline_);
|
||||
}
|
||||
else {
|
||||
BLI_assert_unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
void VKShader::unbind()
|
||||
|
@@ -24,7 +24,9 @@ class VKShader : public Shader {
|
||||
VkShaderModule fragment_module_ = VK_NULL_HANDLE;
|
||||
VkShaderModule compute_module_ = VK_NULL_HANDLE;
|
||||
bool compilation_failed_ = false;
|
||||
Vector<VkPipelineShaderStageCreateInfo> pipeline_infos_;
|
||||
Vector<VkDescriptorSetLayout> layouts_;
|
||||
VkPipelineLayout pipeline_layout_ = VK_NULL_HANDLE;
|
||||
VkPipeline compute_pipeline_ = VK_NULL_HANDLE;
|
||||
|
||||
public:
|
||||
VKShader(const char *name);
|
||||
@@ -63,6 +65,20 @@ class VKShader : public Shader {
|
||||
void build_shader_module(MutableSpan<const char *> sources,
|
||||
shaderc_shader_kind stage,
|
||||
VkShaderModule *r_shader_module);
|
||||
bool finalize_descriptor_set_layouts(VkDevice vk_device, const shader::ShaderCreateInfo &info);
|
||||
bool finalize_pipeline_layout(VkDevice vk_device, const shader::ShaderCreateInfo &info);
|
||||
bool finalize_graphics_pipeline(VkDevice vk_device);
|
||||
bool bake_compute_pipeline(VkDevice vk_device);
|
||||
|
||||
bool is_graphics_shader() const
|
||||
{
|
||||
return !is_compute_shader();
|
||||
}
|
||||
|
||||
bool is_compute_shader() const
|
||||
{
|
||||
return compute_module_ != VK_NULL_HANDLE;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace blender::gpu
|
||||
|
55
source/blender/gpu/vulkan/vk_shader_interface.cc
Normal file
55
source/blender/gpu/vulkan/vk_shader_interface.cc
Normal file
@@ -0,0 +1,55 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2023 Blender Foundation. All rights reserved. */
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*/
|
||||
|
||||
#include "vk_shader_interface.hh"
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
|
||||
{
|
||||
using namespace blender::gpu::shader;
|
||||
|
||||
ssbo_len_ = 0;
|
||||
|
||||
Vector<ShaderCreateInfo::Resource> all_resources;
|
||||
all_resources.extend(info.pass_resources_);
|
||||
all_resources.extend(info.batch_resources_);
|
||||
|
||||
for (ShaderCreateInfo::Resource &res : all_resources) {
|
||||
switch (res.bind_type) {
|
||||
case ShaderCreateInfo::Resource::BindType::IMAGE:
|
||||
case ShaderCreateInfo::Resource::BindType::SAMPLER:
|
||||
case ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER:
|
||||
// BLI_assert_msg(false, "not implemented yet");
|
||||
break;
|
||||
case ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER:
|
||||
ssbo_len_++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int32_t input_tot_len = ssbo_len_;
|
||||
inputs_ = static_cast<ShaderInput *>(
|
||||
MEM_calloc_arrayN(input_tot_len, sizeof(ShaderInput), __func__));
|
||||
ShaderInput *input = inputs_;
|
||||
|
||||
name_buffer_ = (char *)MEM_mallocN(info.interface_names_size_, "name_buffer");
|
||||
uint32_t name_buffer_offset = 0;
|
||||
|
||||
for (const ShaderCreateInfo::Resource &res : all_resources) {
|
||||
if (res.bind_type == ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER) {
|
||||
copy_input_name(input, res.storagebuf.name, name_buffer_, name_buffer_offset);
|
||||
input->location = input->binding = res.slot;
|
||||
enabled_ssbo_mask_ |= (1 << input->binding);
|
||||
input++;
|
||||
}
|
||||
}
|
||||
|
||||
sort_inputs();
|
||||
}
|
||||
|
||||
} // namespace blender::gpu
|
20
source/blender/gpu/vulkan/vk_shader_interface.hh
Normal file
20
source/blender/gpu/vulkan/vk_shader_interface.hh
Normal file
@@ -0,0 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2023 Blender Foundation. All rights reserved. */
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "gpu_shader_create_info.hh"
|
||||
#include "gpu_shader_interface.hh"
|
||||
|
||||
namespace blender::gpu {
|
||||
class VKShaderInterface : public ShaderInterface {
|
||||
public:
|
||||
VKShaderInterface() = default;
|
||||
|
||||
void init(const shader::ShaderCreateInfo &info);
|
||||
};
|
||||
} // namespace blender::gpu
|
@@ -11,8 +11,18 @@
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
void VKStorageBuffer::update(const void * /*data*/)
|
||||
void VKStorageBuffer::update(const void *data)
|
||||
{
|
||||
VKContext &context = *VKContext::get();
|
||||
if (!buffer_.is_allocated()) {
|
||||
allocate(context);
|
||||
}
|
||||
buffer_.update(context, data);
|
||||
}
|
||||
|
||||
void VKStorageBuffer::allocate(VKContext &context)
|
||||
{
|
||||
buffer_.create(context, size_in_bytes_, usage_, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
|
||||
}
|
||||
|
||||
void VKStorageBuffer::bind(int /*slot*/)
|
||||
@@ -35,8 +45,18 @@ void VKStorageBuffer::copy_sub(VertBuf * /*src*/,
|
||||
{
|
||||
}
|
||||
|
||||
void VKStorageBuffer::read(void * /*data*/)
|
||||
void VKStorageBuffer::read(void *data)
|
||||
{
|
||||
VKContext &context = *VKContext::get();
|
||||
if (!buffer_.is_allocated()) {
|
||||
allocate(context);
|
||||
}
|
||||
|
||||
void *mapped_memory;
|
||||
if (buffer_.map(context, &mapped_memory)) {
|
||||
memcpy(data, mapped_memory, size_in_bytes_);
|
||||
buffer_.unmap(context);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace blender::gpu
|
||||
|
@@ -11,11 +11,17 @@
|
||||
|
||||
#include "gpu_storage_buffer_private.hh"
|
||||
|
||||
#include "vk_buffer.hh"
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
class VKStorageBuffer : public StorageBuf {
|
||||
GPUUsageType usage_;
|
||||
VKBuffer buffer_;
|
||||
|
||||
public:
|
||||
VKStorageBuffer(int size, const char *name) : StorageBuf(size, name)
|
||||
VKStorageBuffer(int size, GPUUsageType usage, const char *name)
|
||||
: StorageBuf(size, name), usage_(usage)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -25,6 +31,9 @@ class VKStorageBuffer : public StorageBuf {
|
||||
void clear(eGPUTextureFormat internal_format, eGPUDataFormat data_format, void *data) override;
|
||||
void copy_sub(VertBuf *src, uint dst_offset, uint src_offset, uint copy_size) override;
|
||||
void read(void *data) override;
|
||||
|
||||
private:
|
||||
void allocate(VKContext &context);
|
||||
};
|
||||
|
||||
} // namespace blender::gpu
|
||||
|
Reference in New Issue
Block a user