WIP: Vulkan: Workbench #107886

Closed
Jeroen Bakker wants to merge 88 commits from Jeroen-Bakker:vulkan-draw-manager-workbench into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
9 changed files with 92 additions and 20 deletions
Showing only changes of commit 4a10381faa - Show all commits

View File

@ -8,5 +8,8 @@
#endif
#define VMA_IMPLEMENTATION
#ifdef DEBUG
#define VMA_ASSERT(test)
#endif
#include "vk_mem_alloc.h"

View File

@ -10,12 +10,12 @@
#include "vk_context.hh"
#include "vk_index_buffer.hh"
#include "vk_state_manager.hh"
#include "vk_storage_buffer.hh"
#include "vk_vertex_attribute_object.hh"
#include "vk_vertex_buffer.hh"
namespace blender::gpu {
void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int instance_count)
void VKBatch::draw_setup()
{
/* Currently the pipeline is rebuild on each draw command. Clearing the dirty flag for
* consistency with the internals of GPU module. */
@ -37,29 +37,53 @@ void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int i
if (draw_indexed) {
index_buffer->upload_data();
index_buffer->bind(context);
context.command_buffer_get().draw(index_buffer->index_len_get(),
instance_count,
index_buffer->index_start_get(),
vertex_first,
instance_first);
}
}
void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int instance_count)
{
draw_setup();
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKIndexBuffer *index_buffer = index_buffer_get();
const bool draw_indexed = index_buffer != nullptr;
if (draw_indexed) {
command_buffer.draw_indexed(index_buffer->index_len_get(),
instance_count,
index_buffer->index_start_get(),
vertex_first,
instance_first);
}
else {
context.command_buffer_get().draw(vertex_first, vertex_count, instance_first, instance_count);
command_buffer.draw(vertex_first, vertex_count, instance_first, instance_count);
}
context.command_buffer_get().submit();
command_buffer.submit();
}
void VKBatch::draw_indirect(GPUStorageBuf * /*indirect_buf*/, intptr_t /*offset*/)
void VKBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
{
NOT_YET_IMPLEMENTED;
multi_draw_indirect(indirect_buf, 1, offset, 0);
}
void VKBatch::multi_draw_indirect(GPUStorageBuf * /*indirect_buf*/,
int /*count*/,
intptr_t /*offset*/,
intptr_t /*stride*/)
void VKBatch::multi_draw_indirect(GPUStorageBuf *indirect_buf,
int count,
intptr_t offset,
intptr_t stride)
{
draw_setup();
VKStorageBuffer &indirect_buffer = *unwrap(unwrap(indirect_buf));
VKContext &context = *VKContext::get();
const bool draw_indexed = index_buffer_get() != nullptr;
VKCommandBuffer &command_buffer = context.command_buffer_get();
if (draw_indexed) {
command_buffer.draw_indexed_indirect(indirect_buffer, offset, count, stride);
}
else {
command_buffer.draw_indirect(indirect_buffer, offset, count, stride);
}
command_buffer.submit();
}
VKVertexBuffer *VKBatch::vertex_buffer_get(int index)

View File

@ -25,6 +25,9 @@ class VKBatch : public Batch {
VKVertexBuffer *vertex_buffer_get(int index);
VKVertexBuffer *instance_buffer_get(int index);
VKIndexBuffer *index_buffer_get();
private:
void draw_setup();
};
} // namespace blender::gpu

View File

@ -12,6 +12,7 @@
#include "vk_index_buffer.hh"
#include "vk_memory.hh"
#include "vk_pipeline.hh"
#include "vk_storage_buffer.hh"
#include "vk_texture.hh"
#include "vk_vertex_buffer.hh"
@ -244,7 +245,7 @@ void VKCommandBuffer::draw(int v_first, int v_count, int i_first, int i_count)
state.draw_counts++;
}
void VKCommandBuffer::draw(
void VKCommandBuffer::draw_indexed(
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance)
{
validate_framebuffer_exists();
@ -254,6 +255,28 @@ void VKCommandBuffer::draw(
state.draw_counts++;
}
void VKCommandBuffer::draw_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdDrawIndirect(vk_command_buffer_, buffer.vk_handle(), offset, draw_count, stride);
state.draw_counts++;
}
void VKCommandBuffer::draw_indexed_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdDrawIndexedIndirect(vk_command_buffer_, buffer.vk_handle(), offset, draw_count, stride);
state.draw_counts++;
}
void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages)
{

View File

@ -22,6 +22,7 @@ class VKPipeline;
class VKPushConstants;
class VKTexture;
class VKVertexBuffer;
class VKStorageBuffer;
/** Command buffer to keep track of the life-time of a command buffer. */
class VKCommandBuffer : NonCopyable, NonMovable {
@ -182,8 +183,16 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void fill(VKBuffer &buffer, uint32_t data);
void draw(int v_first, int v_count, int i_first, int i_count);
void draw(
void draw_indexed(
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance);
void draw_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride);
void draw_indexed_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride);
/**
* Stop recording commands, encode + send the recordings to Vulkan, wait for the until the

View File

@ -38,6 +38,7 @@ class VKDevice : public NonCopyable {
/* Workarounds */
struct {
bool depth_component_24 = false;
bool texture_format_rgb16f = false;
} workarounds_;

View File

@ -31,6 +31,7 @@ void VKStorageBuffer::allocate()
void VKStorageBuffer::bind(int slot)
{
/* TODO: move Storage buffer bindings to state manager to reuse bindings.*/
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate();
@ -40,8 +41,9 @@ void VKStorageBuffer::bind(int slot)
const std::optional<VKDescriptorSet::Location> location =
shader_interface.descriptor_set_location(
shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER, slot);
BLI_assert_msg(location, "Locations to SSBOs should always exist.");
shader->pipeline_get().descriptor_set_get().bind(*this, *location);
if (location) {
shader->pipeline_get().descriptor_set_get().bind(*this, *location);
}
}
void VKStorageBuffer::unbind() {}

View File

@ -48,4 +48,9 @@ class VKStorageBuffer : public StorageBuf {
void allocate();
};
static inline VKStorageBuffer *unwrap(StorageBuf *storage_buffer)
{
return static_cast<VKStorageBuffer *>(storage_buffer);
}
} // namespace blender::gpu

View File

@ -53,6 +53,7 @@ void VKTexture::copy_to(Texture *tex)
UNUSED_VARS_NDEBUG(src);
VKContext &context = *VKContext::get();
ensure_allocated();
layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
dst->ensure_allocated();
dst->layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
@ -450,6 +451,7 @@ void VKTexture::current_layout_set(const VkImageLayout new_layout)
void VKTexture::layout_ensure(VKContext &context, const VkImageLayout requested_layout)
{
BLI_assert(is_allocated());
const VkImageLayout current_layout = current_layout_get();
if (current_layout == requested_layout) {
return;