Vulkan: Separate DataTransfer, Compute, Graphics Commands #114104

Merged
Jeroen Bakker merged 23 commits from Jeroen-Bakker/blender:vulkan/specialized-command-buffers into main 2023-10-30 14:21:24 +01:00
24 changed files with 894 additions and 216 deletions
Showing only changes of commit b2e333fb5d - Show all commits

View File

@ -200,6 +200,7 @@ set(VULKAN_SRC
vulkan/vk_bindable_resource.cc
vulkan/vk_buffer.cc
vulkan/vk_command_buffer.cc
vulkan/vk_command_buffers.cc
vulkan/vk_common.cc
vulkan/vk_context.cc
vulkan/vk_data_conversion.cc
@ -237,6 +238,8 @@ set(VULKAN_SRC
vulkan/vk_bindable_resource.hh
vulkan/vk_buffer.hh
vulkan/vk_command_buffer.hh
vulkan/vk_command_buffers.hh
vulkan/vk_commands.hh
vulkan/vk_common.hh
vulkan/vk_context.hh
vulkan/vk_data_conversion.hh

View File

@ -129,9 +129,9 @@ void VKBackend::compute_dispatch(int groups_x_len, int groups_y_len, int groups_
VKContext &context = *VKContext::get();
context.state_manager_get().apply_bindings();
context.bind_compute_pipeline();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.dispatch(groups_x_len, groups_y_len, groups_z_len);
command_buffer.submit();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.dispatch(groups_x_len, groups_y_len, groups_z_len);
context.flush();
}
void VKBackend::compute_dispatch_indirect(StorageBuf *indirect_buf)
@ -141,9 +141,9 @@ void VKBackend::compute_dispatch_indirect(StorageBuf *indirect_buf)
context.state_manager_get().apply_bindings();
context.bind_compute_pipeline();
VKStorageBuffer &indirect_buffer = *unwrap(indirect_buf);
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.dispatch(indirect_buffer);
command_buffer.submit();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.dispatch(indirect_buffer);
context.flush();
}
Context *VKBackend::context_alloc(void *ghost_window, void *ghost_context)

View File

@ -47,21 +47,21 @@ void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int i
draw_setup();
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKCommandBuffers &command_buffers = context.command_buffers_get();
VKIndexBuffer *index_buffer = index_buffer_get();
const bool draw_indexed = index_buffer != nullptr;
if (draw_indexed) {
command_buffer.draw_indexed(index_buffer->index_len_get(),
instance_count,
index_buffer->index_start_get(),
vertex_first,
instance_first);
command_buffers.draw_indexed(index_buffer->index_len_get(),
instance_count,
index_buffer->index_start_get(),
vertex_first,
instance_first);
}
else {
command_buffer.draw(vertex_first, vertex_count, instance_first, instance_count);
command_buffers.draw(vertex_first, vertex_count, instance_first, instance_count);
}
command_buffer.submit();
context.flush();
}
void VKBatch::draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset)
@ -79,14 +79,14 @@ void VKBatch::multi_draw_indirect(GPUStorageBuf *indirect_buf,
VKStorageBuffer &indirect_buffer = *unwrap(unwrap(indirect_buf));
VKContext &context = *VKContext::get();
const bool draw_indexed = index_buffer_get() != nullptr;
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKCommandBuffers &command_buffers = context.command_buffers_get();
if (draw_indexed) {
command_buffer.draw_indexed_indirect(indirect_buffer, offset, count, stride);
command_buffers.draw_indexed_indirect(indirect_buffer, offset, count, stride);
}
else {
command_buffer.draw_indirect(indirect_buffer, offset, count, stride);
command_buffers.draw_indirect(indirect_buffer, offset, count, stride);
}
command_buffer.submit();
context.flush();
}
VKVertexBuffer *VKBatch::vertex_buffer_get(int index)

View File

@ -98,8 +98,8 @@ void VKBuffer::update(const void *data) const
void VKBuffer::clear(VKContext &context, uint32_t clear_value)
{
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.fill(*this, clear_value);
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.fill(*this, clear_value);
}
void VKBuffer::read(void *data) const

View File

@ -7,6 +7,7 @@
*/
#include "vk_command_buffer.hh"
#include "vk_backend.hh"
#include "vk_buffer.hh"
#include "vk_context.hh"
#include "vk_device.hh"
@ -24,10 +25,11 @@ namespace blender::gpu {
VKCommandBuffer::~VKCommandBuffer()
{
if (vk_device_ != VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
vkDestroyFence(vk_device_, vk_fence_, vk_allocation_callbacks);
vk_fence_ = VK_NULL_HANDLE;
if (vk_command_buffer_ != VK_NULL_HANDLE) {
VKDevice &device = VKBackend::get().device_get();
vkFreeCommandBuffers(
device.device_get(), device.vk_command_pool_get(), 1, &vk_command_buffer_);
vk_command_buffer_ = VK_NULL_HANDLE;
}
}
@ -36,49 +38,22 @@ bool VKCommandBuffer::is_initialized() const
return vk_command_buffer_ != VK_NULL_HANDLE;
}
void VKCommandBuffer::init(const VKDevice &device)
void VKCommandBuffer::init(const VKDevice &device, VkCommandBuffer vk_command_buffer)
{
if (is_initialized()) {
return;
}
vk_device_ = device.device_get();
vk_queue_ = device.queue_get();
vk_command_buffer_ = vk_command_buffer;
/* When a the last GHOST context is destroyed the device is deallocate. A moment later the GPU
* context is destroyed. The first step is to activate it. Activating would retrieve the device
* from GHOST which in that case is a #VK_NULL_HANDLE. */
if (vk_device_ == VK_NULL_HANDLE) {
return;
}
VkCommandBufferAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.commandPool = device.vk_command_pool_get();
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = 1;
vkAllocateCommandBuffers(vk_device_, &alloc_info, &vk_command_buffer_);
submission_id_.reset();
state.stage = Stage::Initial;
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
VkFenceCreateInfo fenceInfo{};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(vk_device_, &fenceInfo, vk_allocation_callbacks, &vk_fence_);
}
else {
vkResetFences(vk_device_, 1, &vk_fence_);
}
}
void VKCommandBuffer::begin_recording()
{
ensure_no_active_framebuffer();
if (is_in_stage(Stage::Submitted)) {
vkWaitForFences(vk_device_, 1, &vk_fence_, VK_TRUE, FenceTimeout);
vkResetFences(vk_device_, 1, &vk_fence_);
stage_transfer(Stage::Submitted, Stage::Executed);
}
if (is_in_stage(Stage::Executed)) {
@ -90,6 +65,7 @@ void VKCommandBuffer::begin_recording()
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(vk_command_buffer_, &begin_info);
stage_transfer(Stage::Initial, Stage::Recording);
state.recorded_command_counts = 0;
}
void VKCommandBuffer::end_recording()
@ -102,6 +78,7 @@ void VKCommandBuffer::end_recording()
void VKCommandBuffer::bind(const VKPipeline &pipeline, VkPipelineBindPoint bind_point)
{
vkCmdBindPipeline(vk_command_buffer_, bind_point, pipeline.vk_handle());
state.recorded_command_counts++;
}
void VKCommandBuffer::bind(const VKDescriptorSet &descriptor_set,
@ -111,6 +88,7 @@ void VKCommandBuffer::bind(const VKDescriptorSet &descriptor_set,
VkDescriptorSet vk_descriptor_set = descriptor_set.vk_handle();
vkCmdBindDescriptorSets(
vk_command_buffer_, bind_point, vk_pipeline_layout, 0, 1, &vk_descriptor_set, 0, 0);
state.recorded_command_counts++;
}
void VKCommandBuffer::bind(const uint32_t binding,
@ -132,6 +110,7 @@ void VKCommandBuffer::bind(const uint32_t binding,
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdBindVertexBuffers(vk_command_buffer_, binding, 1, &vk_vertex_buffer, &offset);
state.recorded_command_counts++;
}
void VKCommandBuffer::bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type)
@ -140,6 +119,7 @@ void VKCommandBuffer::bind(const VKBufferWithOffset &index_buffer, VkIndexType i
ensure_active_framebuffer();
vkCmdBindIndexBuffer(
vk_command_buffer_, index_buffer.buffer.vk_handle(), index_buffer.offset, index_type);
state.recorded_command_counts++;
}
void VKCommandBuffer::begin_render_pass(VKFrameBuffer &framebuffer)
@ -168,12 +148,14 @@ void VKCommandBuffer::push_constants(const VKPushConstants &push_constants,
push_constants.offset(),
push_constants.layout_get().size_in_bytes(),
push_constants.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::fill(VKBuffer &buffer, uint32_t clear_data)
{
ensure_no_active_framebuffer();
vkCmdFillBuffer(vk_command_buffer_, buffer.vk_handle(), 0, buffer.size_in_bytes(), clear_data);
state.recorded_command_counts++;
}
void VKCommandBuffer::copy(VKBuffer &dst_buffer,
@ -187,6 +169,7 @@ void VKCommandBuffer::copy(VKBuffer &dst_buffer,
dst_buffer.vk_handle(),
regions.size(),
regions.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::copy(VKTexture &dst_texture,
@ -200,6 +183,7 @@ void VKCommandBuffer::copy(VKTexture &dst_texture,
dst_texture.current_layout_get(),
regions.size(),
regions.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::copy(VKTexture &dst_texture,
@ -214,6 +198,7 @@ void VKCommandBuffer::copy(VKTexture &dst_texture,
dst_texture.current_layout_get(),
regions.size(),
regions.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions)
@ -221,6 +206,7 @@ void VKCommandBuffer::copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBuf
ensure_no_active_framebuffer();
vkCmdCopyBuffer(
vk_command_buffer_, src_buffer, dst_buffer.vk_handle(), regions.size(), regions.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::blit(VKTexture &dst_texture,
@ -249,6 +235,7 @@ void VKCommandBuffer::blit(VKTexture &dst_texture,
regions.size(),
regions.data(),
VK_FILTER_NEAREST);
state.recorded_command_counts++;
}
void VKCommandBuffer::clear(VkImage vk_image,
@ -263,6 +250,7 @@ void VKCommandBuffer::clear(VkImage vk_image,
&vk_clear_color,
ranges.size(),
ranges.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::clear(VkImage vk_image,
@ -277,6 +265,7 @@ void VKCommandBuffer::clear(VkImage vk_image,
&vk_clear_value,
ranges.size(),
ranges.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas)
@ -285,6 +274,7 @@ void VKCommandBuffer::clear(Span<VkClearAttachment> attachments, Span<VkClearRec
ensure_active_framebuffer();
vkCmdClearAttachments(
vk_command_buffer_, attachments.size(), attachments.data(), areas.size(), areas.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::draw(int v_first, int v_count, int i_first, int i_count)
@ -293,6 +283,7 @@ void VKCommandBuffer::draw(int v_first, int v_count, int i_first, int i_count)
ensure_active_framebuffer();
vkCmdDraw(vk_command_buffer_, v_count, i_count, v_first, i_first);
state.draw_counts++;
state.recorded_command_counts++;
}
void VKCommandBuffer::draw_indexed(
@ -303,6 +294,7 @@ void VKCommandBuffer::draw_indexed(
vkCmdDrawIndexed(
vk_command_buffer_, index_count, instance_count, first_index, vertex_offset, first_instance);
state.draw_counts++;
state.recorded_command_counts++;
}
void VKCommandBuffer::draw_indirect(const VKStorageBuffer &buffer,
@ -314,6 +306,7 @@ void VKCommandBuffer::draw_indirect(const VKStorageBuffer &buffer,
ensure_active_framebuffer();
vkCmdDrawIndirect(vk_command_buffer_, buffer.vk_handle(), offset, draw_count, stride);
state.draw_counts++;
state.recorded_command_counts++;
}
void VKCommandBuffer::draw_indexed_indirect(const VKStorageBuffer &buffer,
@ -326,6 +319,7 @@ void VKCommandBuffer::draw_indexed_indirect(const VKStorageBuffer &buffer,
ensure_active_framebuffer();
vkCmdDrawIndexedIndirect(vk_command_buffer_, buffer.vk_handle(), offset, draw_count, stride);
state.draw_counts++;
state.recorded_command_counts++;
}
void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
@ -344,6 +338,7 @@ void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
nullptr,
0,
nullptr);
state.recorded_command_counts++;
}
void VKCommandBuffer::pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers)
@ -359,37 +354,25 @@ void VKCommandBuffer::pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_b
nullptr,
image_memory_barriers.size(),
image_memory_barriers.data());
state.recorded_command_counts++;
}
void VKCommandBuffer::dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
{
ensure_no_active_framebuffer();
vkCmdDispatch(vk_command_buffer_, groups_x_len, groups_y_len, groups_z_len);
state.recorded_command_counts++;
}
void VKCommandBuffer::dispatch(VKStorageBuffer &command_buffer)
{
ensure_no_active_framebuffer();
vkCmdDispatchIndirect(vk_command_buffer_, command_buffer.vk_handle(), 0);
state.recorded_command_counts++;
}
void VKCommandBuffer::submit()
void VKCommandBuffer::commands_submitted()
{
ensure_no_active_framebuffer();
end_recording();
submit_commands();
begin_recording();
}
void VKCommandBuffer::submit_commands()
{
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &vk_command_buffer_;
vkQueueSubmit(vk_queue_, 1, &submit_info, vk_fence_);
submission_id_.next();
stage_transfer(Stage::BetweenRecordingAndSubmitting, Stage::Submitted);
}
@ -415,6 +398,7 @@ void VKCommandBuffer::ensure_no_active_framebuffer()
vkCmdEndRenderPass(vk_command_buffer_);
state.framebuffer_active_ = false;
state.switches_++;
state.recorded_command_counts++;
}
}
@ -437,6 +421,7 @@ void VKCommandBuffer::ensure_active_framebuffer()
vkCmdBeginRenderPass(vk_command_buffer_, &render_pass_begin_info, VK_SUBPASS_CONTENTS_INLINE);
state.framebuffer_active_ = true;
state.switches_++;
state.recorded_command_counts++;
}
}

View File

@ -8,41 +8,20 @@
#pragma once
#include "vk_commands.hh"
#include "vk_common.hh"
#include "vk_resource_tracker.hh"
#include "BLI_utility_mixins.hh"
namespace blender::gpu {
class VKBuffer;
struct VKBufferWithOffset;
class VKDescriptorSet;
class VKFrameBuffer;
class VKIndexBuffer;
class VKPipeline;
class VKPushConstants;
class VKStorageBuffer;
class VKTexture;
class VKVertexBuffer;
class VKDevice;
/** Command buffer to keep track of the life-time of a command buffer. */
class VKCommandBuffer : NonCopyable, NonMovable {
class VKCommandBuffer : VKCommands, NonCopyable, NonMovable {
/** Not owning handle to the command buffer and device. Handle is owned by `GHOST_ContextVK`. */
VkDevice vk_device_ = VK_NULL_HANDLE;
VkCommandBuffer vk_command_buffer_ = VK_NULL_HANDLE;
VkQueue vk_queue_ = VK_NULL_HANDLE;
/**
* Timeout to use when waiting for fences in nanoseconds.
*
* Currently added as the fence will halt when there are no commands in the command buffer for
* the second time. This should be solved and this timeout should be removed.
*/
static constexpr uint64_t FenceTimeout = UINT64_MAX;
/** Owning handles */
VkFence vk_fence_ = VK_NULL_HANDLE;
VKSubmissionID submission_id_;
private:
enum class Stage {
@ -94,6 +73,11 @@ class VKCommandBuffer : NonCopyable, NonMovable {
*/
Stage stage = Stage::Initial;
/**
* The number of command added to the command buffer since last submission.
*/
uint64_t recorded_command_counts = 0;
} state;
bool is_in_stage(Stage stage)
{
@ -134,25 +118,27 @@ class VKCommandBuffer : NonCopyable, NonMovable {
public:
virtual ~VKCommandBuffer();
bool is_initialized() const;
void init(const VKDevice &vk_device);
void init(const VKDevice &vk_device, VkCommandBuffer vk_command_buffer);
void begin_recording();
void end_recording();
void bind(const VKPipeline &vk_pipeline, VkPipelineBindPoint bind_point);
void bind(const VKPipeline &vk_pipeline, VkPipelineBindPoint bind_point) override;
void bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point);
VkPipelineBindPoint bind_point) override;
void bind(const uint32_t binding,
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset);
const VkDeviceSize offset) override;
/* Bind the given buffer as a vertex buffer. */
void bind(const uint32_t binding, const VKBufferWithOffset &vertex_buffer);
void bind(const uint32_t binding, const VkBuffer &vk_vertex_buffer, const VkDeviceSize offset);
void bind(const uint32_t binding, const VKBufferWithOffset &vertex_buffer) override;
void bind(const uint32_t binding,
const VkBuffer &vk_vertex_buffer,
const VkDeviceSize offset) override;
/* Bind the given buffer as an index buffer. */
void bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type);
void bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type) override;
void begin_render_pass(VKFrameBuffer &framebuffer);
void end_render_pass(const VKFrameBuffer &framebuffer);
void begin_render_pass(VKFrameBuffer &framebuffer) override;
void end_render_pass(const VKFrameBuffer &framebuffer) override;
/**
* Add a push constant command to the command buffer.
@ -161,23 +147,27 @@ class VKCommandBuffer : NonCopyable, NonMovable {
*/
void push_constants(const VKPushConstants &push_constants,
const VkPipelineLayout vk_pipeline_layout,
const VkShaderStageFlags vk_shader_stages);
void dispatch(int groups_x_len, int groups_y_len, int groups_z_len);
void dispatch(VKStorageBuffer &command_buffer);
const VkShaderStageFlags vk_shader_stages) override;
void dispatch(int groups_x_len, int groups_y_len, int groups_z_len) override;
void dispatch(VKStorageBuffer &command_buffer) override;
/** Copy the contents of a texture MIP level to the dst buffer. */
void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKBuffer &src_buffer, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageCopy> regions);
void copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions);
void blit(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageBlit> regions);
void copy(VKBuffer &dst_buffer,
VKTexture &src_texture,
Span<VkBufferImageCopy> regions) override;
void copy(VKTexture &dst_texture,
VKBuffer &src_buffer,
Span<VkBufferImageCopy> regions) override;
void copy(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageCopy> regions) override;
void copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions) override;
void blit(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageBlit> regions) override;
void blit(VKTexture &dst_texture,
VkImageLayout dst_layout,
VKTexture &src_texture,
VkImageLayout src_layout,
Span<VkImageBlit> regions);
Span<VkImageBlit> regions) override;
void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages);
void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers);
VkPipelineStageFlags destination_stages) override;
void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers) override;
/**
* Clear color image resource.
@ -185,7 +175,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges);
Span<VkImageSubresourceRange> ranges) override;
/**
* Clear depth/stencil aspect of an image resource.
@ -193,41 +183,46 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearDepthStencilValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges);
Span<VkImageSubresourceRange> ranges) override;
/**
* Clear attachments of the active framebuffer.
*/
void clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas);
void fill(VKBuffer &buffer, uint32_t data);
void clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas) override;
void fill(VKBuffer &buffer, uint32_t data) override;
void draw(int v_first, int v_count, int i_first, int i_count);
void draw_indexed(
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance);
void draw(int v_first, int v_count, int i_first, int i_count) override;
void draw_indexed(int index_count,
int instance_count,
int first_index,
int vertex_offset,
int first_instance) override;
void draw_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride);
uint32_t stride) override;
void draw_indexed_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride);
uint32_t stride) override;
/**
* Stop recording commands, encode + send the recordings to Vulkan, wait for the until the
* commands have been executed and start the command buffer to accept recordings again.
* Receive the vulkan handle of the command buffer.
*/
void submit();
const VKSubmissionID &submission_id_get() const
VkCommandBuffer vk_command_buffer() const
{
return submission_id_;
return vk_command_buffer_;
}
private:
void submit_commands();
bool has_recorded_commands() const
{
return state.recorded_command_counts != 0;
}
void commands_submitted();
private:
/**
* Validate that there isn't a framebuffer being tracked (bound or not bound).
*

View File

@ -0,0 +1,398 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#include "vk_command_buffers.hh"
#include "vk_device.hh"
#include "vk_memory.hh"
#include "BLI_assert.h"
namespace blender::gpu {
VKCommandBuffers::~VKCommandBuffers()
{
if (vk_fence_ != VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
vkDestroyFence(vk_device_, vk_fence_, vk_allocation_callbacks);
vk_fence_ = VK_NULL_HANDLE;
}
}
void VKCommandBuffers::init(const VKDevice &device)
{
if (initialized_) {
return;
}
initialized_ = true;
vk_device_ = device.device_get();
vk_queue_ = device.queue_get();
/* When a the last GHOST context is destroyed the device is deallocate. A moment later the GPU
* context is destroyed. The first step is to activate it. Activating would retrieve the device
* from GHOST which in that case is a #VK_NULL_HANDLE. */
if (vk_device_ == VK_NULL_HANDLE) {
return;
}
init_command_buffers(device);
init_fence();
submission_id_.reset();
}
static void init_command_buffer(const VKDevice &device,
VKCommandBuffer &command_buffer,
VkCommandBuffer vk_command_buffer,
const char *name)
{
command_buffer.init(device, vk_command_buffer);
command_buffer.begin_recording();
debug::object_label(vk_command_buffer, name);
}
void VKCommandBuffers::init_command_buffers(const VKDevice &device)
{
VkCommandBuffer vk_command_buffers[4] = {VK_NULL_HANDLE};
VkCommandBufferAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.commandPool = device.vk_command_pool_get();
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = (uint32_t)Type::Max;
vkAllocateCommandBuffers(vk_device_, &alloc_info, vk_command_buffers);
init_command_buffer(device,
command_buffer_get(Type::DataTransfer),
vk_command_buffers[(int)Type::DataTransfer],
"Data Transfer Command Buffer");
init_command_buffer(device,
command_buffer_get(Type::Compute),
vk_command_buffers[(int)Type::Compute],
"Compute Command Buffer");
init_command_buffer(device,
command_buffer_get(Type::Graphics),
vk_command_buffers[(int)Type::Graphics],
"Graphics Command Buffer");
}
void VKCommandBuffers::init_fence()
{
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
VkFenceCreateInfo fenceInfo{};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(vk_device_, &fenceInfo, vk_allocation_callbacks, &vk_fence_);
}
}
static void submit_command_buffer(VkDevice vk_device,
VkQueue vk_queue,
VKCommandBuffer &command_buffer,
VkFence vk_fence,
uint64_t timeout)
{
VkCommandBuffer handles[1];
command_buffer.end_recording();
handles[0] = command_buffer.vk_command_buffer();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = handles;
vkQueueSubmit(vk_queue, 1, &submit_info, vk_fence);
command_buffer.commands_submitted();
vkWaitForFences(vk_device, 1, &vk_fence, VK_TRUE, timeout);
vkResetFences(vk_device, 1, &vk_fence);
command_buffer.begin_recording();
}
void VKCommandBuffers::submit()
{
bool work_submitted = false;
/* TODO data transfers should be queued together with compute or draw commands. */
VKCommandBuffer &data_transfer = command_buffer_get(Type::DataTransfer);
if (data_transfer.has_recorded_commands()) {
submit_command_buffer(vk_device_, vk_queue_, data_transfer, vk_fence_, FenceTimeout);
work_submitted = true;
}
VKCommandBuffer &compute = command_buffer_get(Type::Compute);
if (compute.has_recorded_commands()) {
submit_command_buffer(vk_device_, vk_queue_, compute, vk_fence_, FenceTimeout);
work_submitted = true;
}
VKCommandBuffer &graphics = command_buffer_get(Type::Graphics);
if (graphics.has_recorded_commands()) {
submit_command_buffer(vk_device_, vk_queue_, graphics, vk_fence_, FenceTimeout);
work_submitted = true;
}
if (work_submitted) {
submission_id_.next();
}
}
void VKCommandBuffers::ensure_no_compute_or_draw_commands()
{
if (command_buffer_get(Type::Compute).has_recorded_commands() ||
command_buffer_get(Type::Graphics).has_recorded_commands())
{
submit();
}
}
void VKCommandBuffers::ensure_no_compute_commands()
{
if (command_buffer_get(Type::Compute).has_recorded_commands()) {
submit();
}
}
void VKCommandBuffers::ensure_no_draw_commands()
{
if (command_buffer_get(Type::Graphics).has_recorded_commands()) {
submit();
}
}
/**
* \name Vulkan commands
* \{
*/
void VKCommandBuffers::bind(const VKPipeline &vk_pipeline, VkPipelineBindPoint bind_point)
{
Type type;
if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
ensure_no_draw_commands();
type = Type::Compute;
}
if (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) {
ensure_no_compute_commands();
type = Type::Graphics;
}
command_buffer_get(type).bind(vk_pipeline, bind_point);
}
void VKCommandBuffers::bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point)
{
Type type;
if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
ensure_no_draw_commands();
type = Type::Compute;
}
if (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) {
ensure_no_compute_commands();
type = Type::Graphics;
}
command_buffer_get(type).bind(descriptor_set, vk_pipeline_layout, bind_point);
}
void VKCommandBuffers::bind(const uint32_t binding,
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).bind(binding, vertex_buffer, offset);
}
void VKCommandBuffers::bind(const uint32_t binding, const VKBufferWithOffset &vertex_buffer)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).bind(binding, vertex_buffer);
}
void VKCommandBuffers::bind(const uint32_t binding,
const VkBuffer &vk_vertex_buffer,
const VkDeviceSize offset)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).bind(binding, vk_vertex_buffer, offset);
}
void VKCommandBuffers::bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).bind(index_buffer, index_type);
}
void VKCommandBuffers::begin_render_pass(VKFrameBuffer &framebuffer)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).begin_render_pass(framebuffer);
}
void VKCommandBuffers::end_render_pass(const VKFrameBuffer &framebuffer)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).end_render_pass(framebuffer);
}
void VKCommandBuffers::push_constants(const VKPushConstants &push_constants,
const VkPipelineLayout vk_pipeline_layout,
const VkShaderStageFlags vk_shader_stages)
{
Type type;
if (vk_shader_stages == VK_SHADER_STAGE_COMPUTE_BIT) {
ensure_no_draw_commands();
type = Type::Compute;
}
else {
ensure_no_compute_commands();
type = Type::Graphics;
}
command_buffer_get(type).push_constants(push_constants, vk_pipeline_layout, vk_shader_stages);
}
void VKCommandBuffers::dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
{
ensure_no_draw_commands();
command_buffer_get(Type::Compute).dispatch(groups_x_len, groups_y_len, groups_z_len);
}
void VKCommandBuffers::dispatch(VKStorageBuffer &command_buffer)
{
ensure_no_draw_commands();
command_buffer_get(Type::Compute).dispatch(command_buffer);
}
void VKCommandBuffers::copy(VKBuffer &dst_buffer,
VKTexture &src_texture,
Span<VkBufferImageCopy> regions)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).copy(dst_buffer, src_texture, regions);
}
void VKCommandBuffers::copy(VKTexture &dst_texture,
VKBuffer &src_buffer,
Span<VkBufferImageCopy> regions)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).copy(dst_texture, src_buffer, regions);
}
void VKCommandBuffers::copy(VKTexture &dst_texture,
VKTexture &src_texture,
Span<VkImageCopy> regions)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).copy(dst_texture, src_texture, regions);
}
void VKCommandBuffers::copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).copy(dst_buffer, src_buffer, regions);
}
void VKCommandBuffers::blit(VKTexture &dst_texture,
VKTexture &src_texture,
Span<VkImageBlit> regions)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).blit(dst_texture, src_texture, regions);
}
void VKCommandBuffers::blit(VKTexture &dst_texture,
VkImageLayout dst_layout,
VKTexture &src_texture,
VkImageLayout src_layout,
Span<VkImageBlit> regions)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer)
.blit(dst_texture, dst_layout, src_texture, src_layout, regions);
}
void VKCommandBuffers::pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages)
{
/* TODO: Command isn't used.*/
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).pipeline_barrier(source_stages, destination_stages);
}
void VKCommandBuffers::pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).pipeline_barrier(image_memory_barriers);
}
void VKCommandBuffers::clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).clear(vk_image, vk_image_layout, vk_clear_color, ranges);
}
void VKCommandBuffers::clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearDepthStencilValue &vk_clear_depth_stencil,
Span<VkImageSubresourceRange> ranges)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer)
.clear(vk_image, vk_image_layout, vk_clear_depth_stencil, ranges);
}
void VKCommandBuffers::clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).clear(attachments, areas);
}
void VKCommandBuffers::fill(VKBuffer &buffer, uint32_t data)
{
ensure_no_compute_or_draw_commands();
command_buffer_get(Type::DataTransfer).fill(buffer, data);
}
void VKCommandBuffers::draw(int v_first, int v_count, int i_first, int i_count)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).draw(v_first, v_count, i_first, i_count);
}
void VKCommandBuffers::draw_indexed(
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics)
.draw_indexed(index_count, instance_count, first_index, vertex_offset, first_instance);
}
void VKCommandBuffers::draw_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).draw_indirect(buffer, offset, draw_count, stride);
}
void VKCommandBuffers::draw_indexed_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride)
{
ensure_no_compute_commands();
command_buffer_get(Type::Graphics).draw_indexed_indirect(buffer, offset, draw_count, stride);
}
/** \} */
} // namespace blender::gpu

View File

@ -0,0 +1,184 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#pragma once
#include "vk_command_buffer.hh"
namespace blender::gpu {
class VKCommandBuffers : public VKCommands, NonCopyable, NonMovable {
enum class Type {
DataTransfer = 0,
Compute = 1,
Graphics = 2,
Max = 3,
};
bool initialized_ = false;
/**
* Timeout to use when waiting for fences in nanoseconds.
*
* Currently added as the fence will halt when there are no commands in the command buffer for
* the second time. This should be solved and this timeout should be removed.
*/
static constexpr uint64_t FenceTimeout = UINT64_MAX;
/* Not owning handle */
/** Handles to the command queue and device. Handle is owned by `GHOST_ContextVK`. */
VkDevice vk_device_ = VK_NULL_HANDLE;
VkQueue vk_queue_ = VK_NULL_HANDLE;
/* Owning handles */
/* Fence for CPU GPU synchronization when submitting the command buffers. */
VkFence vk_fence_ = VK_NULL_HANDLE;
/* TODO: General command buffer should not be used, but is added to help during the transition.*/
VKCommandBuffer buffers_[(int)Type::Max];
VKSubmissionID submission_id_;
public:
~VKCommandBuffers();
void init(const VKDevice &device);
/**
* Have these command buffers already been initialized?
*/
bool is_initialized() const
{
return initialized_;
}
void bind(const VKPipeline &vk_pipeline, VkPipelineBindPoint bind_point) override;
void bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point) override;
void bind(const uint32_t binding,
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset) override;
/* Bind the given buffer as a vertex buffer. */
void bind(const uint32_t binding, const VKBufferWithOffset &vertex_buffer) override;
void bind(const uint32_t binding,
const VkBuffer &vk_vertex_buffer,
const VkDeviceSize offset) override;
/* Bind the given buffer as an index buffer. */
void bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type) override;
void begin_render_pass(VKFrameBuffer &framebuffer) override;
void end_render_pass(const VKFrameBuffer &framebuffer) override;
/**
* Add a push constant command to the command buffer.
*
* Only valid when the storage type of push_constants is StorageType::PUSH_CONSTANTS.
*/
void push_constants(const VKPushConstants &push_constants,
const VkPipelineLayout vk_pipeline_layout,
const VkShaderStageFlags vk_shader_stages) override;
void dispatch(int groups_x_len, int groups_y_len, int groups_z_len) override;
void dispatch(VKStorageBuffer &command_buffer) override;
/** Copy the contents of a texture MIP level to the dst buffer. */
void copy(VKBuffer &dst_buffer,
VKTexture &src_texture,
Span<VkBufferImageCopy> regions) override;
void copy(VKTexture &dst_texture,
VKBuffer &src_buffer,
Span<VkBufferImageCopy> regions) override;
void copy(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageCopy> regions) override;
void copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions) override;
void blit(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageBlit> regions) override;
void blit(VKTexture &dst_texture,
VkImageLayout dst_layout,
VKTexture &src_texture,
VkImageLayout src_layout,
Span<VkImageBlit> regions) override;
void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages) override;
void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers) override;
/**
* Clear color image resource.
*/
void clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges) override;
/**
* Clear depth/stencil aspect of an image resource.
*/
void clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearDepthStencilValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges) override;
/**
* Clear attachments of the active framebuffer.
*/
void clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas) override;
void fill(VKBuffer &buffer, uint32_t data) override;
void draw(int v_first, int v_count, int i_first, int i_count) override;
void draw_indexed(int index_count,
int instance_count,
int first_index,
int vertex_offset,
int first_instance) override;
void draw_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride) override;
void draw_indexed_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride) override;
void submit();
const VKSubmissionID &submission_id_get() const
{
return submission_id_;
}
private:
void init_fence();
void init_command_buffers(const VKDevice &device);
VKCommandBuffer &command_buffer_get(Type type)
{
return buffers_[(int)type];
}
/**
* Ensure that no compute or draw commands are scheduled.
*
* To ensure correct operation all compute and draw commands should be flushed when adding a new
* data transfer command.
*/
void ensure_no_compute_or_draw_commands();
/**
* Ensure that no compute commands are scheduled.
*
* To ensure correct operation all compute commands should be flushed when adding a new draw
* command.
*/
void ensure_no_compute_commands();
/**
* Ensire that no draw_commands are scheduled.
*
* To ensure correct operation all draw commands should be flushed when adding a new compute
* command.
*/
void ensure_no_draw_commands();
};
} // namespace blender::gpu

View File

@ -0,0 +1,115 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#pragma once
#include "vk_common.hh"
namespace blender::gpu {
class VKPipeline;
class VKDescriptorSet;
class VKVertexBuffer;
class VKBufferWithOffset;
class VKFrameBuffer;
class VKPushConstants;
class VKStorageBuffer;
class VKBuffer;
class VKTexture;
/**
* Interface containing all supported commands so they can be shared between a single
* #VKCommandBuffer and #VKCommandBuffers.
*/
class VKCommands {
virtual void bind(const VKPipeline &vk_pipeline, VkPipelineBindPoint bind_point) = 0;
virtual void bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point) = 0;
virtual void bind(const uint32_t binding,
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset) = 0;
/* Bind the given buffer as a vertex buffer. */
virtual void bind(const uint32_t binding, const VKBufferWithOffset &vertex_buffer) = 0;
virtual void bind(const uint32_t binding,
const VkBuffer &vk_vertex_buffer,
const VkDeviceSize offset) = 0;
/* Bind the given buffer as an index buffer. */
virtual void bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type) = 0;
virtual void begin_render_pass(VKFrameBuffer &framebuffer) = 0;
virtual void end_render_pass(const VKFrameBuffer &framebuffer) = 0;
/**
* Add a push constant command to the command buffer.
*
* Only valid when the storage type of push_constants is StorageType::PUSH_CONSTANTS.
*/
virtual void push_constants(const VKPushConstants &push_constants,
const VkPipelineLayout vk_pipeline_layout,
const VkShaderStageFlags vk_shader_stages) = 0;
virtual void dispatch(int groups_x_len, int groups_y_len, int groups_z_len) = 0;
virtual void dispatch(VKStorageBuffer &command_buffer) = 0;
/** Copy the contents of a texture MIP level to the dst buffer. */
virtual void copy(VKBuffer &dst_buffer,
VKTexture &src_texture,
Span<VkBufferImageCopy> regions) = 0;
virtual void copy(VKTexture &dst_texture,
VKBuffer &src_buffer,
Span<VkBufferImageCopy> regions) = 0;
virtual void copy(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageCopy> regions) = 0;
virtual void copy(VKBuffer &dst_buffer, VkBuffer src_buffer, Span<VkBufferCopy> regions) = 0;
virtual void blit(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageBlit> regions) = 0;
virtual void blit(VKTexture &dst_texture,
VkImageLayout dst_layout,
VKTexture &src_texture,
VkImageLayout src_layout,
Span<VkImageBlit> regions) = 0;
virtual void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages) = 0;
virtual void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers) = 0;
/**
* Clear color image resource.
*/
virtual void clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges) = 0;
/**
* Clear depth/stencil aspect of an image resource.
*/
virtual void clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearDepthStencilValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges) = 0;
/**
* Clear attachments of the active framebuffer.
*/
virtual void clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas) = 0;
virtual void fill(VKBuffer &buffer, uint32_t data) = 0;
virtual void draw(int v_first, int v_count, int i_first, int i_count) = 0;
virtual void draw_indexed(int index_count,
int instance_count,
int first_index,
int vertex_offset,
int first_instance) = 0;
virtual void draw_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride) = 0;
virtual void draw_indexed_indirect(const VKStorageBuffer &buffer,
VkDeviceSize offset,
uint32_t draw_count,
uint32_t stride) = 0;
};
} // namespace blender::gpu

View File

@ -51,9 +51,8 @@ void VKContext::sync_backbuffer()
{
if (ghost_context_) {
VKDevice &device = VKBackend::get().device_;
if (!command_buffer_.is_initialized()) {
command_buffer_.init(device);
command_buffer_.begin_recording();
if (!command_buffers_.is_initialized()) {
command_buffers_.init(device);
device.init_dummy_buffer(*this);
device.init_dummy_color_attachment();
}
@ -122,12 +121,12 @@ void VKContext::end_frame()
void VKContext::flush()
{
command_buffer_.submit();
command_buffers_.submit();
}
void VKContext::finish()
{
command_buffer_.submit();
command_buffers_.submit();
}
void VKContext::memory_statistics_get(int * /*total_mem*/, int * /*free_mem*/) {}
@ -157,7 +156,7 @@ void VKContext::activate_framebuffer(VKFrameBuffer &framebuffer)
active_fb = &framebuffer;
framebuffer.update_size();
framebuffer.update_srgb();
command_buffer_.begin_render_pass(framebuffer);
command_buffers_get().begin_render_pass(framebuffer);
}
VKFrameBuffer *VKContext::active_framebuffer_get() const
@ -174,7 +173,7 @@ void VKContext::deactivate_framebuffer()
{
VKFrameBuffer *framebuffer = active_framebuffer_get();
BLI_assert(framebuffer != nullptr);
command_buffer_.end_render_pass(*framebuffer);
command_buffers_get().end_render_pass(*framebuffer);
active_fb = nullptr;
}
@ -266,13 +265,13 @@ void VKContext::swap_buffers_pre_handler(const GHOST_VulkanSwapChainData &swap_c
image_blit.dstSubresource.baseArrayLayer = 0;
image_blit.dstSubresource.layerCount = 1;
command_buffer_.blit(wrapper,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
*color_attachment,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
Span<VkImageBlit>(&image_blit, 1));
command_buffers_get().blit(wrapper,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
*color_attachment,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
Span<VkImageBlit>(&image_blit, 1));
wrapper.layout_ensure(*this, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
command_buffer_.submit();
command_buffers_get().submit();
}
void VKContext::swap_buffers_post_handler()

View File

@ -12,7 +12,7 @@
#include "GHOST_Types.h"
#include "vk_command_buffer.hh"
#include "vk_command_buffers.hh"
#include "vk_common.hh"
#include "vk_debug.hh"
#include "vk_descriptor_pools.hh"
@ -25,7 +25,7 @@ class VKStateManager;
class VKContext : public Context, NonCopyable {
private:
VKCommandBuffer command_buffer_;
VKCommandBuffers command_buffers_;
VkExtent2D vk_extent_ = {};
VkFormat swap_chain_format_ = {};
@ -69,9 +69,9 @@ class VKContext : public Context, NonCopyable {
return static_cast<VKContext *>(Context::get());
}
VKCommandBuffer &command_buffer_get()
VKCommandBuffers &command_buffers_get()
{
return command_buffer_;
return command_buffers_;
}
VKStateManager &state_manager_get() const;

View File

@ -146,8 +146,8 @@ void VKFrameBuffer::clear(const Vector<VkClearAttachment> &attachments) const
clear_rect.layerCount = 1;
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.clear(attachments, Span<VkClearRect>(&clear_rect, 1));
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.clear(attachments, Span<VkClearRect>(&clear_rect, 1));
}
void VKFrameBuffer::clear(const eGPUFrameBufferBits buffers,
@ -270,7 +270,7 @@ void VKFrameBuffer::read(eGPUFrameBufferBits plane,
/** \name Blit operations
* \{ */
static void blit_aspect(VKCommandBuffer &command_buffer,
static void blit_aspect(VKCommandBuffers &command_buffer,
VKTexture &dst_texture,
VKTexture &src_texture,
int dst_offset_x,
@ -329,7 +329,7 @@ void VKFrameBuffer::blit_to(eGPUFrameBufferBits planes,
UNUSED_VARS_NDEBUG(planes);
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKCommandBuffers &command_buffers = context.command_buffers_get();
if (!context.has_active_framebuffer()) {
BLI_assert_unreachable();
return;
@ -347,7 +347,7 @@ void VKFrameBuffer::blit_to(eGPUFrameBufferBits planes,
dst_framebuffer.color_attachment_layout_ensure(
context, dst_slot, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
blit_aspect(command_buffer,
blit_aspect(command_buffers,
dst_texture,
src_texture,
dst_offset_x,
@ -372,7 +372,7 @@ void VKFrameBuffer::blit_to(eGPUFrameBufferBits planes,
dst_framebuffer.depth_attachment_layout_ensure(context,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
blit_aspect(command_buffer,
blit_aspect(command_buffers,
dst_texture,
src_texture,
dst_offset_x,
@ -380,7 +380,7 @@ void VKFrameBuffer::blit_to(eGPUFrameBufferBits planes,
VK_IMAGE_ASPECT_DEPTH_BIT);
}
}
command_buffer.submit();
context.flush();
}
/** \} */

View File

@ -56,7 +56,7 @@ void VKImmediate::end()
context.bind_graphics_pipeline(prim_type, vertex_attributes_);
vertex_attributes_.bind(context);
context.command_buffer_get().draw(0, vertex_idx, 0, 1);
context.command_buffers_get().draw(0, vertex_idx, 0, 1);
buffer_offset_ += current_subbuffer_len_;
current_subbuffer_len_ = 0;

View File

@ -37,7 +37,7 @@ void VKIndexBuffer::upload_data()
void VKIndexBuffer::bind(VKContext &context)
{
context.command_buffer_get().bind(buffer_with_offset(), to_vk_index_type(index_type_));
context.command_buffers_get().bind(buffer_with_offset(), to_vk_index_type(index_type_));
}
void VKIndexBuffer::bind_as_ssbo(uint binding)
@ -63,8 +63,7 @@ void VKIndexBuffer::bind(int binding, shader::ShaderCreateInfo::Resource::BindTy
void VKIndexBuffer::read(uint32_t *data) const
{
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
context.flush();
buffer_.read(data);
}

View File

@ -205,12 +205,12 @@ void VKPipeline::update_and_bind(VKContext &context,
VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint vk_pipeline_bind_point)
{
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.bind(*this, vk_pipeline_bind_point);
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.bind(*this, vk_pipeline_bind_point);
push_constants_.update(context);
if (descriptor_set_.has_layout()) {
descriptor_set_.update(context);
command_buffer.bind(
command_buffers.bind(
*descriptor_set_.active_descriptor_set(), vk_pipeline_layout, vk_pipeline_bind_point);
}
}

View File

@ -147,7 +147,7 @@ VKPushConstants &VKPushConstants::operator=(VKPushConstants &&other)
void VKPushConstants::update(VKContext &context)
{
VKShader *shader = static_cast<VKShader *>(context.shader);
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKCommandBuffers &command_buffers = context.command_buffers_get();
VKPipeline &pipeline = shader->pipeline_get();
BLI_assert_msg(&pipeline.push_constants_get() == this,
"Invalid state detected. Push constants doesn't belong to the active shader of "
@ -159,7 +159,10 @@ void VKPushConstants::update(VKContext &context)
break;
case VKPushConstants::StorageType::PUSH_CONSTANTS:
command_buffer.push_constants(*this, shader->vk_pipeline_layout_get(), VK_SHADER_STAGE_ALL);
command_buffers.push_constants(*this,
shader->vk_pipeline_layout_get(),
shader->is_graphics_shader() ? VK_SHADER_STAGE_ALL_GRAPHICS :
VK_SHADER_STAGE_COMPUTE_BIT);
break;
case VKPushConstants::StorageType::UNIFORM_BUFFER:

View File

@ -12,8 +12,8 @@
namespace blender::gpu {
bool VKSubmissionTracker::is_changed(VKContext &context)
{
VKCommandBuffer &command_buffer = context.command_buffer_get();
const VKSubmissionID &current_id = command_buffer.submission_id_get();
VKCommandBuffers &command_buffers = context.command_buffers_get();
const VKSubmissionID &current_id = command_buffers.submission_id_get();
if (last_known_id_ != current_id) {
last_known_id_ = current_id;
return true;

View File

@ -15,7 +15,7 @@
namespace blender::gpu {
class VKContext;
class VKCommandBuffer;
class VKCommandBuffers;
/**
* In vulkan multiple commands can be in flight simultaneously.
@ -81,7 +81,7 @@ struct VKSubmissionID {
return id_ != other.id_;
}
friend class VKCommandBuffer;
friend class VKCommandBuffers;
};
/**

View File

@ -85,6 +85,16 @@ class VKShader : public Shader {
const GPUPrimType prim_type,
const VKVertexAttributeObject &vertex_attribute_object);
bool is_graphics_shader() const
{
return !is_compute_shader();
}
bool is_compute_shader() const
{
return compute_module_ != VK_NULL_HANDLE;
}
private:
Vector<uint32_t> compile_glsl_to_spirv(Span<const char *> sources, shaderc_shader_kind kind);
void build_shader_module(Span<uint32_t> spirv_module, VkShaderModule *r_shader_module);
@ -96,16 +106,6 @@ class VKShader : public Shader {
const shader::ShaderCreateInfo &info);
bool finalize_pipeline_layout(VkDevice vk_device, const VKShaderInterface &shader_interface);
bool is_graphics_shader() const
{
return !is_compute_shader();
}
bool is_compute_shader() const
{
return compute_module_ != VK_NULL_HANDLE;
}
/**
* \brief features available on newer implementation such as native barycentric coordinates
* and layered rendering, necessitate a geometry shader to work on older hardware.

View File

@ -52,10 +52,9 @@ void VKStateManager::force_state()
void VKStateManager::issue_barrier(eGPUBarrier /*barrier_bits*/)
{
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
/* TODO: Pipeline barriers should be added. We might be able to extract it from
* the actual pipeline, later on, but for now we submit the work as barrier. */
command_buffer.submit();
context.flush();
}
void VKStateManager::texture_bind(Texture *tex, GPUSamplerState /*sampler*/, int unit)

View File

@ -86,9 +86,9 @@ void VKStorageBuffer::copy_sub(VertBuf *src, uint dst_offset, uint src_offset, u
region.size = copy_size;
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(buffer_, src_vertex_buffer.vk_handle(), Span<VkBufferCopy>(&region, 1));
command_buffer.submit();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.copy(buffer_, src_vertex_buffer.vk_handle(), Span<VkBufferCopy>(&region, 1));
context.flush();
}
void VKStorageBuffer::async_flush_to_host()
@ -100,8 +100,7 @@ void VKStorageBuffer::read(void *data)
{
ensure_allocated();
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
context.flush();
buffer_.read(data);
}

View File

@ -49,7 +49,7 @@ void VKTexture::generate_mipmap()
}
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKCommandBuffers &command_buffers = context.command_buffers_get();
layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
for (int src_mipmap : IndexRange(mipmaps_ - 1)) {
@ -92,14 +92,14 @@ void VKTexture::generate_mipmap()
image_blit.dstSubresource.baseArrayLayer = 0;
image_blit.dstSubresource.layerCount = vk_layer_count(1);
command_buffer.blit(*this,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
*this,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
Span<VkImageBlit>(&image_blit, 1));
command_buffers.blit(*this,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
*this,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
Span<VkImageBlit>(&image_blit, 1));
/* TODO: Until we do actual command encoding we need to submit each transfer operation
* individually. */
command_buffer.submit();
context.flush();
}
/* Ensure that all mipmap levels are in `VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL`.
* All MIP-levels are except the last one. */
@ -125,9 +125,9 @@ void VKTexture::copy_to(VKTexture &dst_texture, VkImageAspectFlagBits vk_image_a
region.dstSubresource.layerCount = vk_layer_count(1);
region.extent = vk_extent_3d(0);
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(dst_texture, *this, Span<VkImageCopy>(&region, 1));
command_buffer.submit();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.copy(dst_texture, *this, Span<VkImageCopy>(&region, 1));
context.flush();
}
void VKTexture::copy_to(Texture *tex)
@ -148,7 +148,7 @@ void VKTexture::clear(eGPUDataFormat format, const void *data)
BLI_assert(!is_texture_view());
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKCommandBuffers &command_buffers = context.command_buffers_get();
VkClearColorValue clear_color = to_vk_clear_color_value(format, data);
VkImageSubresourceRange range = {0};
range.aspectMask = to_vk_image_aspect_flag_bits(format_);
@ -156,7 +156,7 @@ void VKTexture::clear(eGPUDataFormat format, const void *data)
range.layerCount = VK_REMAINING_ARRAY_LAYERS;
layout_ensure(context, VK_IMAGE_LAYOUT_GENERAL);
command_buffer.clear(
command_buffers.clear(
vk_image_, current_layout_get(), clear_color, Span<VkImageSubresourceRange>(&range, 1));
}
@ -167,7 +167,7 @@ void VKTexture::clear_depth_stencil(const eGPUFrameBufferBits buffers,
BLI_assert(buffers & (GPU_DEPTH_BIT | GPU_STENCIL_BIT));
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKCommandBuffers &command_buffers = context.command_buffers_get();
VkClearDepthStencilValue clear_depth_stencil;
clear_depth_stencil.depth = clear_depth;
clear_depth_stencil.stencil = clear_stencil;
@ -177,10 +177,10 @@ void VKTexture::clear_depth_stencil(const eGPUFrameBufferBits buffers,
range.layerCount = VK_REMAINING_ARRAY_LAYERS;
layout_ensure(context, VK_IMAGE_LAYOUT_GENERAL);
command_buffer.clear(vk_image_,
current_layout_get(),
clear_depth_stencil,
Span<VkImageSubresourceRange>(&range, 1));
command_buffers.clear(vk_image_,
current_layout_get(),
clear_depth_stencil,
Span<VkImageSubresourceRange>(&range, 1));
}
void VKTexture::swizzle_set(const char swizzle_mask[4])
@ -225,9 +225,9 @@ void VKTexture::read_sub(int mip, eGPUDataFormat format, const int area[4], void
region.imageSubresource.mipLevel = mip;
region.imageSubresource.layerCount = vk_layer_count(1);
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(staging_buffer, *this, Span<VkBufferImageCopy>(&region, 1));
command_buffer.submit();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.copy(staging_buffer, *this, Span<VkBufferImageCopy>(&region, 1));
context.flush();
convert_device_to_host(r_data, staging_buffer.mapped_memory_get(), sample_len, format, format_);
}
@ -283,9 +283,9 @@ void VKTexture::update_sub(
region.imageSubresource.layerCount = layers;
layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(*this, staging_buffer, Span<VkBufferImageCopy>(&region, 1));
command_buffer.submit();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.copy(*this, staging_buffer, Span<VkBufferImageCopy>(&region, 1));
context.flush();
}
void VKTexture::update_sub(int /*offset*/[3],
@ -339,9 +339,9 @@ bool VKTexture::init_internal(GPUVertBuf *vbo)
VKContext &context = *VKContext::get();
layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(*this, vertex_buffer->buffer_, Span<VkBufferImageCopy>(&region, 1));
command_buffer.submit();
VKCommandBuffers &command_buffers = context.command_buffers_get();
command_buffers.copy(*this, vertex_buffer->buffer_, Span<VkBufferImageCopy>(&region, 1));
context.flush();
return true;
}
@ -557,7 +557,7 @@ void VKTexture::layout_ensure(VKContext &context,
barrier.subresourceRange.levelCount = uint32_t(mipmap_range.size());
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
context.command_buffer_get().pipeline_barrier(Span<VkImageMemoryBarrier>(&barrier, 1));
context.command_buffers_get().pipeline_barrier(Span<VkImageMemoryBarrier>(&barrier, 1));
}
/** \} */

View File

@ -80,12 +80,12 @@ void VKVertexAttributeObject::bind_vbos(VKContext &context)
BLI_assert(vbos[attribute.binding]);
VKVertexBuffer &vbo = *vbos[attribute.binding];
vbo.upload();
context.command_buffer_get().bind(attribute.binding, vbo, 0);
context.command_buffers_get().bind(attribute.binding, vbo, 0);
}
else {
const VKBuffer &buffer = VKBackend::get().device_get().dummy_buffer_get();
const VKBufferWithOffset buffer_with_offset = {buffer, 0};
context.command_buffer_get().bind(attribute.binding, buffer_with_offset);
context.command_buffers_get().bind(attribute.binding, buffer_with_offset);
}
}
}
@ -104,12 +104,12 @@ void VKVertexAttributeObject::bind_buffers(VKContext &context)
if (attribute.binding < buffers.size()) {
VKBufferWithOffset &buffer = buffers[attribute.binding];
context.command_buffer_get().bind(attribute.binding, buffer);
context.command_buffers_get().bind(attribute.binding, buffer);
}
else {
const VKBuffer &buffer = VKBackend::get().device_get().dummy_buffer_get();
const VKBufferWithOffset buffer_with_offset = {buffer, 0};
context.command_buffer_get().bind(attribute.binding, buffer_with_offset);
context.command_buffers_get().bind(attribute.binding, buffer_with_offset);
}
}
}

View File

@ -89,8 +89,7 @@ void VKVertexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*dat
void VKVertexBuffer::read(void *data) const
{
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();
context.flush();
buffer_.read(data);
}