Vulkan: Rewrite GHOST_ContextVK #111473

Merged
Jeroen Bakker merged 21 commits from Jeroen-Bakker/blender:vulkan/swap-chain-callbacks into main 2023-08-29 15:05:16 +02:00
25 changed files with 594 additions and 612 deletions

View File

@ -1260,56 +1260,35 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle context,
void *r_queue);
/**
* Return Vulkan command buffer.
* Set the pre and post callbacks for vulkan swap chain in the given context.
*
* Command buffers are different for each image in the swap chain.
* At the start of each frame the correct command buffer should be
* retrieved with this function.
*
* Should only be called when using a Vulkan context.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
* \param context: GHOST context handle to a vulkan context to get the
* command queue from.
* \param r_command_buffer: After calling this function the VkCommandBuffer
* referenced by this parameter will contain the VKCommandBuffer handle
* of the current back buffer (when swap chains are enabled) or
* it will contain a general VkCommandQueue.
* \param context: GHOST context handle of a vulkan context to
* get the Vulkan handles from.
* \param swap_buffers_pre_callback: Function pointer to be called at the beginning of swapBuffers.
* Inside this callback the next swap chain image needs to be acquired and filled.
* \param swap_buffers_post_callback: Function to be called at th end of swapBuffers. swapBuffers
* can recreate the swap chain. When this is done the application should be informed by those
* changes.
*/
void GHOST_GetVulkanCommandBuffer(GHOST_ContextHandle context, void *r_command_buffer);
void GHOST_SetVulkanSwapBuffersCallbacks(
GHOST_ContextHandle context,
void (*swap_buffers_pre_callback)(const GHOST_VulkanSwapChainData *),
void (*swap_buffers_post_callback)(void));
/**
* Gets the Vulkan back-buffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the back-buffer will change.
*
* Should only be called when using a Vulkan context with an active swap chain.
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
* Acquire the current swap chain format.
*
* \param windowhandle: GHOST window handle to a window to get the resource from.
* \param r_image: After calling this function the VkImage
* referenced by this parameter will contain the VKImage handle
* of the current back buffer.
* \param r_framebuffer: After calling this function the VkFramebuffer
* referenced by this parameter will contain the VKFramebuffer handle
* of the current back buffer.
* \param r_render_pass: After calling this function the VkRenderPass
* referenced by this parameter will contain the VKRenderPass handle
* of the current back buffer.
* \param r_surface_format: After calling this function the VkSurfaceFormatKHR
* referenced by this parameter will contain the surface format of the
* surface. The format is the same as the image returned in the r_image
* parameter.
* \param r_extent: After calling this function the VkExtent2D
* referenced by this parameter will contain the size of the
* frame buffer and image in pixels.
* \param r_fb_id: After calling this function the uint32_t
* referenced by this parameter will contain the id of the
* framebuffer of the current back buffer.
*/
void GHOST_GetVulkanBackbuffer(GHOST_WindowHandle windowhandle,
void *r_image,
void *r_framebuffer,
void *r_render_pass,
void *r_extent,
uint32_t *r_fb_id);
void GHOST_GetVulkanSwapChainFormat(GHOST_WindowHandle windowhandle,
GHOST_VulkanSwapChainData *r_swap_chain_data);
#endif

View File

@ -9,6 +9,8 @@
#pragma once
#include <functional>
#include "GHOST_Types.h"
/**
@ -38,7 +40,9 @@ class GHOST_IContext {
virtual GHOST_TSuccess releaseDrawingContext() = 0;
virtual unsigned int getDefaultFramebuffer() = 0;
virtual GHOST_TSuccess swapBuffers() = 0;
#ifdef WITH_VULKAN_BACKEND
/**
* Get Vulkan handles for the given context.
*
@ -47,6 +51,8 @@ class GHOST_IContext {
* Other contexts will not return any handles and leave the
* handles where the parameters are referring to unmodified.
*
* \param context: GHOST context handle of a vulkan context to
* get the Vulkan handles from.
* \param r_instance: After calling this function the VkInstance
* referenced by this parameter will contain the VKInstance handle
* of the context associated with the `context` parameter.
@ -62,9 +68,6 @@ class GHOST_IContext {
* \param r_queue: After calling this function the VkQueue
* referenced by this parameter will contain the VKQueue handle
* of the context associated with the `context` parameter.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanHandles(void *r_instance,
void *r_physical_device,
@ -73,52 +76,35 @@ class GHOST_IContext {
void *r_queue) = 0;
/**
* Return Vulkan command buffer.
* Acquire the current swap chain format.
*
* Command buffers are different for each image in the swap chain.
* At the start of each frame the correct command buffer should be
* retrieved with this function.
*
* \param r_command_buffer: After calling this function the VkCommandBuffer
* referenced by this parameter will contain the VKCommandBuffer handle
* of the current back buffer (when swap chains are enabled) or
* it will contain a general VkCommandQueue.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanCommandBuffer(void *r_command_buffer) = 0;
/**
* Gets the Vulkan back-buffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the back-buffer will change.
*
* \param r_image: After calling this function the VkImage
* referenced by this parameter will contain the VKImage handle
* of the current back buffer.
* \param r_framebuffer: After calling this function the VkFramebuffer
* referenced by this parameter will contain the VKFramebuffer handle
* of the current back buffer.
* \param r_render_pass: After calling this function the VkRenderPass
* referenced by this parameter will contain the VKRenderPass handle
* of the current back buffer.
* \param windowhandle: GHOST window handle to a window to get the resource from.
* \param r_surface_format: After calling this function the VkSurfaceFormatKHR
* referenced by this parameter will contain the surface format of the
* surface. The format is the same as the image returned in the r_image
* parameter.
* \param r_extent: After calling this function the VkExtent2D
* referenced by this parameter will contain the size of the
* frame buffer and image in pixels.
* \param r_fb_id: After calling this function the uint32_t
* referenced by this parameter will contain the id of the
* framebuffer of the current back buffer.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(void *r_image,
void *r_framebuffer,
void *r_render_pass,
void *r_extent,
uint32_t *r_fb_id) = 0;
virtual GHOST_TSuccess getVulkanSwapChainFormat(
GHOST_VulkanSwapChainData *r_swap_chain_data) = 0;
virtual GHOST_TSuccess swapBuffers() = 0;
/**
* Set the pre and post callbacks for vulkan swap chain in the given context.
*
* \param context: GHOST context handle of a vulkan context to
* get the Vulkan handles from.
* \param swap_buffers_pre_callback: Function pointer to be called at the beginning of
* swapBuffers. Inside this callback the next swap chain image needs to be acquired and filled.
* \param swap_buffers_post_callback: Function to be called at th end of swapBuffers. swapBuffers
* can recreate the swap chain. When this is done the application should be informed by those
* changes.
*/
virtual GHOST_TSuccess setVulkanSwapBuffersCallbacks(
std::function<void(const GHOST_VulkanSwapChainData *)> swap_buffers_pre_callback,
std::function<void(void)> swap_buffers_post_callback) = 0;
#endif
#ifdef WITH_CXX_GUARDEDALLOC
MEM_CXX_CLASS_ALLOC_FUNCS("GHOST:GHOST_IContext")

View File

@ -209,13 +209,10 @@ class GHOST_IWindow {
*/
virtual unsigned int getDefaultFramebuffer() = 0;
/**
* Gets the Vulkan framebuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap events as the framebuffer will change.
* \return A boolean success indicator.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(
void *image, void *framebuffer, void *render_pass, void *extent, uint32_t *fb_id) = 0;
#ifdef WITH_VULKAN_BACKEND
virtual GHOST_TSuccess getVulkanSwapChainFormat(
GHOST_VulkanSwapChainData *r_swap_chain_data) = 0;
#endif
/**
* Invalidates the contents of this window.

View File

@ -10,6 +10,14 @@
#include <stdint.h>
#ifdef WITH_VULKAN_BACKEND
# ifdef __APPLE__
# include <MoltenVK/vk_mvk_moltenvk.h>
# else
# include <vulkan/vulkan.h>
# endif
#endif
/* This is used by `GHOST_C-api.h` too, cannot use C++ conventions. */
// NOLINTBEGIN: modernize-use-using
@ -691,6 +699,17 @@ typedef struct {
GHOST_TDrawingContextType context_type;
} GHOST_GPUSettings;
#ifdef WITH_VULKAN_BACKEND
typedef struct {
/** Image handle to the image that will be presented to the user. */
VkImage image;
/** Format of the image. */
VkFormat format;
/** Resolution of the image. */
VkExtent2D extent;
} GHOST_VulkanSwapChainData;
#endif
typedef enum {
/** Axis that cursor grab will wrap. */
GHOST_kDebugDefault = (1 << 1),

View File

@ -1237,21 +1237,20 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle contexthandle,
r_instance, r_physical_device, r_device, r_graphic_queue_family, r_queue);
}
void GHOST_GetVulkanCommandBuffer(GHOST_ContextHandle contexthandle, void *r_command_buffer)
void GHOST_SetVulkanSwapBuffersCallbacks(
GHOST_ContextHandle contexthandle,
void (*swap_buffers_pre_callback)(const GHOST_VulkanSwapChainData *),
void (*swap_buffers_post_callback)(void))
{
GHOST_IContext *context = (GHOST_IContext *)contexthandle;
context->getVulkanCommandBuffer(r_command_buffer);
context->setVulkanSwapBuffersCallbacks(swap_buffers_pre_callback, swap_buffers_post_callback);
}
void GHOST_GetVulkanBackbuffer(GHOST_WindowHandle windowhandle,
void *image,
void *framebuffer,
void *render_pass,
void *extent,
uint32_t *fb_id)
void GHOST_GetVulkanSwapChainFormat(GHOST_WindowHandle windowhandle,
GHOST_VulkanSwapChainData *r_swap_chain_data)
{
GHOST_IWindow *window = (GHOST_IWindow *)windowhandle;
window->getVulkanBackbuffer(image, framebuffer, render_pass, extent, fb_id);
window->getVulkanSwapChainFormat(r_swap_chain_data);
}
#endif /* WITH_VULKAN_BACKEND */

View File

@ -130,6 +130,7 @@ class GHOST_Context : public GHOST_IContext {
return 0;
}
#ifdef WITH_VULKAN_BACKEND
/**
* Get Vulkan handles for the given context.
*
@ -166,58 +167,20 @@ class GHOST_Context : public GHOST_IContext {
return GHOST_kFailure;
};
/**
* Return Vulkan command buffer.
*
* Command buffers are different for each image in the swap chain.
* At the start of each frame the correct command buffer should be
* retrieved with this function.
*
* \param r_command_buffer: After calling this function the VkCommandBuffer
* referenced by this parameter will contain the VKCommandBuffer handle
* of the current back buffer (when swap chains are enabled) or
* it will contain a general VkCommandQueue.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanCommandBuffer(void * /*r_command_buffer*/) override
{
return GHOST_kFailure;
};
/**
* Gets the Vulkan back-buffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap event as the back-buffer will change.
*
* \param r_image: After calling this function the VkImage
* referenced by this parameter will contain the VKImage handle
* of the current back buffer.
* \param r_framebuffer: After calling this function the VkFramebuffer
* referenced by this parameter will contain the VKFramebuffer handle
* of the current back buffer.
* \param r_render_pass: After calling this function the VkRenderPass
* referenced by this parameter will contain the VKRenderPass handle
* of the current back buffer.
* \param r_extent: After calling this function the VkExtent2D
* referenced by this parameter will contain the size of the
* frame buffer and image in pixels.
* \param r_fb_id: After calling this function the uint32_t
* referenced by this parameter will contain the id of the
* framebuffer of the current back buffer.
* \returns GHOST_kFailure when context isn't a Vulkan context.
* GHOST_kSuccess when the context is a Vulkan context and the
* handles have been set.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(void * /*r_image*/,
void * /*r_framebuffer*/,
void * /*r_render_pass*/,
void * /*r_extent*/,
uint32_t * /*fb_id*/) override
virtual GHOST_TSuccess getVulkanSwapChainFormat(
GHOST_VulkanSwapChainData * /*r_swap_chain_data */) override
{
return GHOST_kFailure;
}
virtual GHOST_TSuccess setVulkanSwapBuffersCallbacks(
std::function<void(const GHOST_VulkanSwapChainData *)> /*swap_buffers_pre_callback*/,
std::function<void(void)> /*swap_buffers_post_callback*/) override
{
return GHOST_kFailure;
}
#endif
protected:
bool m_stereoVisual;

View File

@ -222,7 +222,12 @@ class GHOST_DeviceVK {
device_features.multiViewport = VK_TRUE;
#endif
VkPhysicalDeviceMaintenance4FeaturesKHR maintenance_4 = {};
maintenance_4.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_4_FEATURES_KHR;
maintenance_4.maintenance4 = VK_TRUE;
VkDeviceCreateInfo device_create_info = {};
device_create_info.pNext = &maintenance_4;
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.queueCreateInfoCount = static_cast<uint32_t>(queue_create_infos.size());
device_create_info.pQueueCreateInfos = queue_create_infos.data();
@ -390,9 +395,10 @@ GHOST_ContextVK::GHOST_ContextVK(bool stereoVisual,
m_context_minor_version(contextMinorVersion),
m_debug(debug),
m_command_pool(VK_NULL_HANDLE),
m_command_buffer(VK_NULL_HANDLE),
m_surface(VK_NULL_HANDLE),
m_swapchain(VK_NULL_HANDLE),
m_render_pass(VK_NULL_HANDLE)
m_fence(VK_NULL_HANDLE)
{
}
@ -404,6 +410,10 @@ GHOST_ContextVK::~GHOST_ContextVK()
destroySwapchain();
if (m_command_buffer != VK_NULL_HANDLE) {
vkFreeCommandBuffers(device_vk.device, m_command_pool, 1, &m_command_buffer);
m_command_buffer = VK_NULL_HANDLE;
}
if (m_command_pool != VK_NULL_HANDLE) {
vkDestroyCommandPool(device_vk.device, m_command_pool, nullptr);
}
@ -423,30 +433,13 @@ GHOST_TSuccess GHOST_ContextVK::destroySwapchain()
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
VkDevice device = vulkan_device->device;
for (auto semaphore : m_image_available_semaphores) {
vkDestroySemaphore(device, semaphore, nullptr);
}
for (auto semaphore : m_render_finished_semaphores) {
vkDestroySemaphore(device, semaphore, nullptr);
}
for (auto fence : m_in_flight_fences) {
vkDestroyFence(device, fence, nullptr);
}
for (auto framebuffer : m_swapchain_framebuffers) {
vkDestroyFramebuffer(device, framebuffer, nullptr);
}
if (m_render_pass != VK_NULL_HANDLE) {
vkDestroyRenderPass(device, m_render_pass, nullptr);
}
for (auto command_buffer : m_command_buffers) {
vkFreeCommandBuffers(device, m_command_pool, 1, &command_buffer);
}
for (auto imageView : m_swapchain_image_views) {
vkDestroyImageView(device, imageView, nullptr);
}
if (m_swapchain != VK_NULL_HANDLE) {
vkDestroySwapchainKHR(device, m_swapchain, nullptr);
}
if (m_fence != VK_NULL_HANDLE) {
vkDestroyFence(device, m_fence, nullptr);
m_fence = VK_NULL_HANDLE;
}
return GHOST_kSuccess;
}
@ -456,109 +449,65 @@ GHOST_TSuccess GHOST_ContextVK::swapBuffers()
return GHOST_kFailure;
}
if (m_lastFrame != m_currentFrame) {
return GHOST_kSuccess;
}
VkPipelineStageFlags wait_stages[] = {VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT};
/* Image should be in present src layout before presenting to screen. */
VkCommandBufferBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
VK_CHECK(vkBeginCommandBuffer(m_command_buffers[m_currentImage], &begin_info));
VkImageMemoryBarrier barrier{};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.image = m_swapchain_images[m_currentImage];
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
vkCmdPipelineBarrier(m_command_buffers[m_currentImage],
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_DEPENDENCY_BY_REGION_BIT,
0,
nullptr,
0,
nullptr,
1,
&barrier);
VK_CHECK(vkEndCommandBuffer(m_command_buffers[m_currentImage]));
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pWaitDstStageMask = wait_stages;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_command_buffers[m_currentImage];
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &m_render_finished_semaphores[m_currentFrame];
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
VkDevice device = vulkan_device->device;
vkAcquireNextImageKHR(device, m_swapchain, UINT64_MAX, VK_NULL_HANDLE, m_fence, &m_currentImage);
VK_CHECK(vkWaitForFences(device, 1, &m_fence, VK_TRUE, UINT64_MAX));
VK_CHECK(vkResetFences(device, 1, &m_fence));
VkResult result;
VK_CHECK(vkQueueSubmit(m_graphic_queue, 1, &submit_info, m_in_flight_fences[m_currentFrame]));
do {
result = vkWaitForFences(device, 1, &m_in_flight_fences[m_currentFrame], VK_TRUE, 10000);
} while (result == VK_TIMEOUT);
GHOST_VulkanSwapChainData swap_chain_data;
swap_chain_data.image = m_swapchain_images[m_currentImage];
swap_chain_data.format = m_surface_format.format;
swap_chain_data.extent = m_render_extent;
VK_CHECK(vkQueueWaitIdle(m_graphic_queue));
if (swap_buffers_pre_callback_) {
swap_buffers_pre_callback_(&swap_chain_data);
}
VkPresentInfoKHR present_info = {};
present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present_info.waitSemaphoreCount = 1;
present_info.pWaitSemaphores = &m_render_finished_semaphores[m_currentFrame];
present_info.waitSemaphoreCount = 0;
present_info.pWaitSemaphores = nullptr;
present_info.swapchainCount = 1;
present_info.pSwapchains = &m_swapchain;
present_info.pImageIndices = &m_currentImage;
present_info.pResults = nullptr;
result = vkQueuePresentKHR(m_present_queue, &present_info);
VkResult result = vkQueuePresentKHR(m_present_queue, &present_info);
if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) {
/* Swap-chain is out of date. Recreate swap-chain and skip this frame. */
destroySwapchain();
createSwapchain();
if (swap_buffers_post_callback_) {
swap_buffers_post_callback_();
}
return GHOST_kSuccess;
}
else if (result != VK_SUCCESS) {
fprintf(stderr,
"Error: Failed to present swap chain image : %s\n",
vulkan_error_as_string(result));
if (swap_buffers_post_callback_) {
swap_buffers_post_callback_();
}
return GHOST_kFailure;
}
m_currentFrame = (m_currentFrame + 1) % MAX_FRAMES_IN_FLIGHT;
vkResetFences(device, 1, &m_in_flight_fences[m_currentFrame]);
if (swap_buffers_post_callback_) {
swap_buffers_post_callback_();
}
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::getVulkanBackbuffer(
void *image, void *framebuffer, void *render_pass, void *extent, uint32_t *fb_id)
GHOST_TSuccess GHOST_ContextVK::getVulkanSwapChainFormat(
GHOST_VulkanSwapChainData *r_swap_chain_data)
{
if (m_swapchain == VK_NULL_HANDLE) {
return GHOST_kFailure;
}
if (m_currentFrame != m_lastFrame) {
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
VkDevice device = vulkan_device->device;
vkAcquireNextImageKHR(device,
m_swapchain,
UINT64_MAX,
m_image_available_semaphores[m_currentFrame],
VK_NULL_HANDLE,
&m_currentImage);
m_lastFrame = m_currentFrame;
}
*((VkImage *)image) = m_swapchain_images[m_currentImage];
*((VkFramebuffer *)framebuffer) = m_swapchain_framebuffers[m_currentImage];
*((VkRenderPass *)render_pass) = m_render_pass;
*((VkExtent2D *)extent) = m_render_extent;
*fb_id = m_swapchain_id * 10 + m_currentFrame;
r_swap_chain_data->image = VK_NULL_HANDLE;
r_swap_chain_data->format = m_surface_format.format;
r_swap_chain_data->extent = m_render_extent;
return GHOST_kSuccess;
}
@ -585,19 +534,12 @@ GHOST_TSuccess GHOST_ContextVK::getVulkanHandles(void *r_instance,
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::getVulkanCommandBuffer(void *r_command_buffer)
GHOST_TSuccess GHOST_ContextVK::setVulkanSwapBuffersCallbacks(
std::function<void(const GHOST_VulkanSwapChainData *)> swap_buffers_pre_callback,
std::function<void(void)> swap_buffers_post_callback)
{
if (m_command_buffers.empty()) {
return GHOST_kFailure;
}
if (m_swapchain == VK_NULL_HANDLE) {
*((VkCommandBuffer *)r_command_buffer) = m_command_buffers[0];
}
else {
*((VkCommandBuffer *)r_command_buffer) = m_command_buffers[m_currentImage];
}
swap_buffers_pre_callback_ = swap_buffers_pre_callback;
swap_buffers_post_callback_ = swap_buffers_post_callback;
return GHOST_kSuccess;
}
@ -703,41 +645,6 @@ static void enableLayer(vector<VkLayerProperties> &layers_available,
#undef PUSH_VKLAYER
}
static GHOST_TSuccess create_render_pass(VkDevice device,
VkFormat format,
VkRenderPass *r_renderPass)
{
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = format;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
VK_CHECK(vkCreateRenderPass(device, &renderPassInfo, nullptr, r_renderPass));
return GHOST_kSuccess;
}
static GHOST_TSuccess selectPresentMode(VkPhysicalDevice device,
VkSurfaceKHR surface,
VkPresentModeKHR *r_presentMode)
@ -783,31 +690,14 @@ GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffer()
{
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
assert(m_command_pool != VK_NULL_HANDLE);
assert(m_command_buffers.size() == 0);
m_command_buffers.resize(1);
assert(m_command_buffer == VK_NULL_HANDLE);
VkCommandBufferAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.commandPool = m_command_pool;
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = static_cast<uint32_t>(m_command_buffers.size());
alloc_info.commandBufferCount = 1;
VK_CHECK(vkAllocateCommandBuffers(vulkan_device->device, &alloc_info, m_command_buffers.data()));
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffers()
{
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
assert(m_command_pool != VK_NULL_HANDLE);
m_command_buffers.resize(m_swapchain_image_views.size());
VkCommandBufferAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.commandPool = m_command_pool;
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = static_cast<uint32_t>(m_command_buffers.size());
VK_CHECK(vkAllocateCommandBuffers(vulkan_device->device, &alloc_info, m_command_buffers.data()));
VK_CHECK(vkAllocateCommandBuffers(vulkan_device->device, &alloc_info, &m_command_buffer));
return GHOST_kSuccess;
}
@ -857,17 +747,16 @@ static bool selectSurfaceFormat(const VkPhysicalDevice physical_device,
GHOST_TSuccess GHOST_ContextVK::createSwapchain()
{
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
m_swapchain_id++;
VkPhysicalDevice physical_device = vulkan_device->physical_device;
VkSurfaceFormatKHR format = {};
m_surface_format = {};
#if SELECT_COMPATIBLE_SURFACES_ONLY
if (!selectSurfaceFormat(physical_device, m_surface, format)) {
if (!selectSurfaceFormat(physical_device, m_surface, m_surface_format)) {
return GHOST_kFailure;
}
#else
selectSurfaceFormat(physical_device, m_surface, format);
selectSurfaceFormat(physical_device, m_surface, m_surface_format);
#endif
VkPresentModeKHR present_mode;
@ -903,8 +792,8 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
create_info.surface = m_surface;
create_info.minImageCount = image_count;
create_info.imageFormat = format.format;
create_info.imageColorSpace = format.colorSpace;
create_info.imageFormat = m_surface_format.format;
create_info.imageColorSpace = m_surface_format.colorSpace;
create_info.imageExtent = m_render_extent;
create_info.imageArrayLayers = 1;
create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
@ -920,70 +809,56 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
VkDevice device = vulkan_device->device;
VK_CHECK(vkCreateSwapchainKHR(device, &create_info, nullptr, &m_swapchain));
create_render_pass(device, format.format, &m_render_pass);
/* image_count may not be what we requested! Getter for final value. */
vkGetSwapchainImagesKHR(device, m_swapchain, &image_count, nullptr);
m_swapchain_images.resize(image_count);
vkGetSwapchainImagesKHR(device, m_swapchain, &image_count, m_swapchain_images.data());
m_swapchain_image_views.resize(image_count);
m_swapchain_framebuffers.resize(image_count);
for (int i = 0; i < image_count; i++) {
VkImageViewCreateInfo view_create_info = {};
view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
view_create_info.image = m_swapchain_images[i];
view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
view_create_info.format = format.format;
view_create_info.components = {
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY,
};
view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
view_create_info.subresourceRange.baseMipLevel = 0;
view_create_info.subresourceRange.levelCount = 1;
view_create_info.subresourceRange.baseArrayLayer = 0;
view_create_info.subresourceRange.layerCount = 1;
VK_CHECK(vkCreateImageView(device, &view_create_info, nullptr, &m_swapchain_image_views[i]));
VkImageView attachments[] = {m_swapchain_image_views[i]};
VkFramebufferCreateInfo fb_create_info = {};
fb_create_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fb_create_info.renderPass = m_render_pass;
fb_create_info.attachmentCount = 1;
fb_create_info.pAttachments = attachments;
fb_create_info.width = m_render_extent.width;
fb_create_info.height = m_render_extent.height;
fb_create_info.layers = 1;
VK_CHECK(vkCreateFramebuffer(device, &fb_create_info, nullptr, &m_swapchain_framebuffers[i]));
}
m_image_available_semaphores.resize(MAX_FRAMES_IN_FLIGHT);
m_render_finished_semaphores.resize(MAX_FRAMES_IN_FLIGHT);
m_in_flight_fences.resize(MAX_FRAMES_IN_FLIGHT);
VkSemaphoreCreateInfo semaphore_info = {};
semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkFenceCreateInfo fence_info = {};
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
VK_CHECK(vkCreateFence(device, &fence_info, nullptr, &m_fence));
for (int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) {
/* Change image layout from VK_IMAGE_LAYOUT_UNDEFINED to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR. */
VkCommandBufferBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
VK_CHECK(vkBeginCommandBuffer(m_command_buffer, &begin_info));
VkImageMemoryBarrier *barriers = new VkImageMemoryBarrier[image_count];
for (int i = 0; i < image_count; i++) {
VkImageMemoryBarrier &barrier = barriers[i];
barrier = {};
VK_CHECK(
vkCreateSemaphore(device, &semaphore_info, nullptr, &m_image_available_semaphores[i]));
VK_CHECK(
vkCreateSemaphore(device, &semaphore_info, nullptr, &m_render_finished_semaphores[i]));
VK_CHECK(vkCreateFence(device, &fence_info, nullptr, &m_in_flight_fences[i]));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.image = m_swapchain_images[i];
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
}
vkCmdPipelineBarrier(m_command_buffer,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_DEPENDENCY_BY_REGION_BIT,
0,
nullptr,
0,
nullptr,
image_count,
barriers);
VK_CHECK(vkEndCommandBuffer(m_command_buffer));
createGraphicsCommandBuffers();
VkPipelineStageFlags wait_stages[] = {VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT};
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pWaitDstStageMask = wait_stages;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_command_buffer;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
VK_CHECK(vkQueueSubmit(m_graphic_queue, 1, &submit_info, nullptr));
VK_CHECK(vkQueueWaitIdle(m_graphic_queue));
delete barriers;
return GHOST_kSuccess;
}
@ -1051,6 +926,8 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
}
extensions_device.push_back("VK_KHR_dedicated_allocation");
extensions_device.push_back("VK_KHR_get_memory_requirements2");
/* Allow relaxed interface matching between shader stages.*/
extensions_device.push_back("VK_KHR_maintenance4");
/* Enable MoltenVK required instance extensions. */
#ifdef VK_MVK_MOLTENVK_EXTENSION_NAME
requireExtension(
@ -1138,14 +1015,13 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
vulkan_device->device, vulkan_device->generic_queue_family, 0, &m_graphic_queue);
createCommandPools();
createGraphicsCommandBuffer();
if (use_window_surface) {
vkGetDeviceQueue(
vulkan_device->device, vulkan_device->generic_queue_family, 0, &m_present_queue);
createSwapchain();
}
else {
createGraphicsCommandBuffer();
}
return GHOST_kSuccess;
}

View File

@ -26,12 +26,6 @@
#include <vector>
#ifdef __APPLE__
# include <MoltenVK/vk_mvk_moltenvk.h>
#else
# include <vulkan/vulkan.h>
#endif
#ifndef GHOST_OPENGL_VK_CONTEXT_FLAGS
/* leave as convenience define for the future */
# define GHOST_OPENGL_VK_CONTEXT_FLAGS 0
@ -117,15 +111,12 @@ class GHOST_ContextVK : public GHOST_Context {
void *r_device,
uint32_t *r_graphic_queue_family,
void *r_queue);
GHOST_TSuccess getVulkanCommandBuffer(void *r_command_buffer);
/**
* Gets the Vulkan framebuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap events as the framebuffer will change.
* \return A boolean success indicator.
*/
GHOST_TSuccess getVulkanBackbuffer(
void *image, void *framebuffer, void *render_pass, void *extent, uint32_t *fb_id);
GHOST_TSuccess getVulkanSwapChainFormat(GHOST_VulkanSwapChainData *r_swap_chain_data) override;
GHOST_TSuccess setVulkanSwapBuffersCallbacks(
std::function<void(const GHOST_VulkanSwapChainData *)> swap_buffers_pre_callback,
std::function<void(void)> swap_buffers_post_callback) override;
/**
* Sets the swap interval for `swapBuffers`.
@ -167,6 +158,7 @@ class GHOST_ContextVK : public GHOST_Context {
const int m_debug;
VkCommandPool m_command_pool;
VkCommandBuffer m_command_buffer;
VkQueue m_graphic_queue;
VkQueue m_present_queue;
@ -175,29 +167,18 @@ class GHOST_ContextVK : public GHOST_Context {
VkSurfaceKHR m_surface;
VkSwapchainKHR m_swapchain;
std::vector<VkImage> m_swapchain_images;
std::vector<VkImageView> m_swapchain_image_views;
std::vector<VkFramebuffer> m_swapchain_framebuffers;
std::vector<VkCommandBuffer> m_command_buffers;
VkRenderPass m_render_pass;
VkExtent2D m_render_extent;
std::vector<VkSemaphore> m_image_available_semaphores;
std::vector<VkSemaphore> m_render_finished_semaphores;
std::vector<VkFence> m_in_flight_fences;
VkSurfaceFormatKHR m_surface_format;
VkFence m_fence;
/** frame modulo swapchain_len. Used as index for sync objects. */
int m_currentFrame = 0;
/**
* Last frame where the vulkan handles where retrieved from. This attribute is used to determine
* if a new image from the swap chain needs to be acquired.
*
* In a regular vulkan application this is done in the same method, but due to GHOST API this
* isn't possible. Swap chains are triggered by the window manager and the GPUBackend isn't
* informed about these changes.
*/
int m_lastFrame = -1;
/** Image index in the swapchain. Used as index for render objects. */
uint32_t m_currentImage = 0;
/** Used to unique framebuffer ids to return when swapchain is recreated. */
uint32_t m_swapchain_id = 0;
std::function<void(const GHOST_VulkanSwapChainData *)> swap_buffers_pre_callback_;
std::function<void(void)> swap_buffers_post_callback_;
const char *getPlatformSpecificSurfaceExtension() const;
GHOST_TSuccess createSwapchain();

View File

@ -107,11 +107,12 @@ uint GHOST_Window::getDefaultFramebuffer()
return (m_context) ? m_context->getDefaultFramebuffer() : 0;
}
GHOST_TSuccess GHOST_Window::getVulkanBackbuffer(
void *image, void *framebuffer, void *render_pass, void *extent, uint32_t *fb_id)
#ifdef WITH_VULKAN_BACKEND
GHOST_TSuccess GHOST_Window::getVulkanSwapChainFormat(GHOST_VulkanSwapChainData *r_swap_chain_data)
{
return m_context->getVulkanBackbuffer(image, framebuffer, render_pass, extent, fb_id);
return m_context->getVulkanSwapChainFormat(r_swap_chain_data);
}
#endif
GHOST_TSuccess GHOST_Window::activateDrawingContext()
{

View File

@ -270,13 +270,10 @@ class GHOST_Window : public GHOST_IWindow {
*/
virtual unsigned int getDefaultFramebuffer() override;
/**
* Gets the Vulkan framebuffer related resource handles associated with the Vulkan context.
* Needs to be called after each swap events as the framebuffer will change.
* \return A boolean success indicator.
*/
virtual GHOST_TSuccess getVulkanBackbuffer(
void *image, void *framebuffer, void *render_pass, void *extent, uint32_t *fb_id) override;
#ifdef WITH_VULKAN_BACKEND
virtual GHOST_TSuccess getVulkanSwapChainFormat(
GHOST_VulkanSwapChainData *r_swap_chain_data) override;
#endif
/**
* Returns the window user data.

View File

@ -134,6 +134,9 @@ Context *VKBackend::context_alloc(void *ghost_window, void *ghost_context)
VKContext *context = new VKContext(ghost_window, ghost_context);
device_.context_register(*context);
GHOST_SetVulkanSwapBuffersCallbacks((GHOST_ContextHandle)ghost_context,
VKContext::swap_buffers_pre_callback,
VKContext::swap_buffers_post_callback);
return context;
}

View File

@ -9,6 +9,7 @@
#include "vk_command_buffer.hh"
#include "vk_buffer.hh"
#include "vk_context.hh"
#include "vk_device.hh"
#include "vk_framebuffer.hh"
#include "vk_index_buffer.hh"
#include "vk_memory.hh"
@ -30,23 +31,37 @@ VKCommandBuffer::~VKCommandBuffer()
}
}
void VKCommandBuffer::init(const VkDevice vk_device,
const VkQueue vk_queue,
VkCommandBuffer vk_command_buffer)
bool VKCommandBuffer::is_initialized() const
{
vk_device_ = vk_device;
vk_queue_ = vk_queue;
vk_command_buffer_ = vk_command_buffer;
submission_id_.reset();
state.stage = Stage::Initial;
return vk_command_buffer_ != VK_NULL_HANDLE;
}
void VKCommandBuffer::init(const VKDevice &device)
{
if (is_initialized()) {
return;
}
vk_device_ = device.device_get();
vk_queue_ = device.queue_get();
/* When a the last GHOST context is destroyed the device is deallocate. A moment later the GPU
* context is destroyed. The first step is to activate it. Activating would retrieve the device
* from GHOST which in that case is a #VK_NULL_HANDLE. */
if (vk_device == VK_NULL_HANDLE) {
if (vk_device_ == VK_NULL_HANDLE) {
return;
}
VkCommandBufferAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.commandPool = device.vk_command_pool_get();
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = 1;
vkAllocateCommandBuffers(vk_device_, &alloc_info, &vk_command_buffer_);
submission_id_.reset();
state.stage = Stage::Initial;
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
VkFenceCreateInfo fenceInfo{};
@ -60,6 +75,7 @@ void VKCommandBuffer::init(const VkDevice vk_device,
void VKCommandBuffer::begin_recording()
{
ensure_no_active_framebuffer();
if (is_in_stage(Stage::Submitted)) {
vkWaitForFences(vk_device_, 1, &vk_fence_, VK_TRUE, FenceTimeout);
vkResetFences(vk_device_, 1, &vk_fence_);
@ -126,7 +142,7 @@ void VKCommandBuffer::bind(const VKBufferWithOffset &index_buffer, VkIndexType i
vk_command_buffer_, index_buffer.buffer.vk_handle(), index_buffer.offset, index_type);
}
void VKCommandBuffer::begin_render_pass(const VKFrameBuffer &framebuffer)
void VKCommandBuffer::begin_render_pass(VKFrameBuffer &framebuffer)
{
validate_framebuffer_not_exists();
state.framebuffer_ = &framebuffer;
@ -135,8 +151,7 @@ void VKCommandBuffer::begin_render_pass(const VKFrameBuffer &framebuffer)
void VKCommandBuffer::end_render_pass(const VKFrameBuffer &framebuffer)
{
UNUSED_VARS_NDEBUG(framebuffer);
validate_framebuffer_exists();
BLI_assert(state.framebuffer_ == &framebuffer);
BLI_assert(state.framebuffer_ == nullptr || state.framebuffer_ == &framebuffer);
ensure_no_active_framebuffer();
state.framebuffer_ = nullptr;
}
@ -243,6 +258,20 @@ void VKCommandBuffer::clear(VkImage vk_image,
ranges.data());
}
void VKCommandBuffer::clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearDepthStencilValue &vk_clear_value,
Span<VkImageSubresourceRange> ranges)
{
ensure_no_active_framebuffer();
vkCmdClearDepthStencilImage(vk_command_buffer_,
vk_image,
vk_image_layout,
&vk_clear_value,
ranges.size(),
ranges.data());
}
void VKCommandBuffer::clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas)
{
validate_framebuffer_exists();
@ -341,19 +370,11 @@ void VKCommandBuffer::submit()
{
ensure_no_active_framebuffer();
end_recording();
encode_recorded_commands();
submit_encoded_commands();
submit_commands();
begin_recording();
}
void VKCommandBuffer::encode_recorded_commands()
{
/* Intentionally not implemented. For the graphics pipeline we want to extract the
* resources and its usages so we can encode multiple commands in the same command buffer with
* the correct synchronizations. */
}
void VKCommandBuffer::submit_encoded_commands()
void VKCommandBuffer::submit_commands()
{
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
@ -397,6 +418,7 @@ void VKCommandBuffer::ensure_active_framebuffer()
if (!state.framebuffer_active_) {
VkRenderPassBeginInfo render_pass_begin_info = {};
render_pass_begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
state.framebuffer_->vk_render_pass_ensure();
render_pass_begin_info.renderPass = state.framebuffer_->vk_render_pass_get();
render_pass_begin_info.framebuffer = state.framebuffer_->vk_framebuffer_get();
render_pass_begin_info.renderArea = state.framebuffer_->vk_render_areas_get()[0];

View File

@ -24,6 +24,7 @@ class VKPushConstants;
class VKStorageBuffer;
class VKTexture;
class VKVertexBuffer;
class VKDevice;
/** Command buffer to keep track of the life-time of a command buffer. */
class VKCommandBuffer : NonCopyable, NonMovable {
@ -77,7 +78,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
*/
struct {
/* Reference to the last_framebuffer where begin_render_pass was called for. */
const VKFrameBuffer *framebuffer_ = nullptr;
VKFrameBuffer *framebuffer_ = nullptr;
/* Is last_framebuffer_ currently bound. Each call should ensure the correct state. */
bool framebuffer_active_ = false;
/* Amount of times a check has been requested. */
@ -132,7 +133,8 @@ class VKCommandBuffer : NonCopyable, NonMovable {
public:
virtual ~VKCommandBuffer();
void init(const VkDevice vk_device, const VkQueue vk_queue, VkCommandBuffer vk_command_buffer);
bool is_initialized() const;
void init(const VKDevice &vk_device);
void begin_recording();
void end_recording();
@ -149,7 +151,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
/* Bind the given buffer as an index buffer. */
void bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type);
void begin_render_pass(const VKFrameBuffer &framebuffer);
void begin_render_pass(VKFrameBuffer &framebuffer);
void end_render_pass(const VKFrameBuffer &framebuffer);
/**
@ -175,6 +177,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages);
void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers);
/**
* Clear color image resource.
*/
@ -183,6 +186,14 @@ class VKCommandBuffer : NonCopyable, NonMovable {
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges);
/**
* Clear depth/stencil aspect of an image resource.
*/
void clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearDepthStencilValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges);
/**
* Clear attachments of the active framebuffer.
*/
@ -214,8 +225,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
}
private:
void encode_recorded_commands();
void submit_encoded_commands();
void submit_commands();
/**
* Validate that there isn't a framebuffer being tracked (bound or not bound).

View File

@ -101,6 +101,34 @@ VkImageAspectFlagBits to_vk_image_aspect_flag_bits(const eGPUTextureFormat forma
return static_cast<VkImageAspectFlagBits>(0);
}
VkImageAspectFlagBits to_vk_image_aspect_flag_bits(const eGPUFrameBufferBits buffers)
{
VkImageAspectFlagBits result = static_cast<VkImageAspectFlagBits>(0);
if (buffers & GPU_COLOR_BIT) {
result = static_cast<VkImageAspectFlagBits>(result | VK_IMAGE_ASPECT_COLOR_BIT);
}
if (buffers & GPU_DEPTH_BIT) {
result = static_cast<VkImageAspectFlagBits>(result | VK_IMAGE_ASPECT_DEPTH_BIT);
}
if (buffers & GPU_STENCIL_BIT) {
result = static_cast<VkImageAspectFlagBits>(result | VK_IMAGE_ASPECT_STENCIL_BIT);
}
return result;
}
eGPUTextureFormat to_gpu_format(const VkFormat format)
{
switch (format) {
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_B8G8R8A8_UNORM:
return GPU_RGBA8;
default:
BLI_assert_unreachable();
}
return GPU_RGBA32F;
}
VkFormat to_vk_format(const eGPUTextureFormat format)
{
switch (format) {
@ -394,8 +422,8 @@ static VkFormat to_vk_format_float(const GPUVertCompType type, const uint32_t si
case GPU_COMP_I32:
case GPU_COMP_U32:
/* NOTE: GPU_COMP_I32/U32 using GPU_FETCH_INT_TO_FLOAT isn't natively supported. These are
* converted on host-side to signed floats. */
/* NOTE: GPU_COMP_I32/U32 using GPU_FETCH_INT_TO_FLOAT isn't natively supported. These
* are converted on host-side to signed floats. */
switch (size) {
case 4:
return VK_FORMAT_R32_SFLOAT;
@ -695,8 +723,9 @@ VkImageViewType to_vk_image_view_type(const eGPUTextureType type, const eImageVi
VkComponentMapping to_vk_component_mapping(const eGPUTextureFormat /*format*/)
{
/* TODO: this should map to OpenGL defaults based on the eGPUTextureFormat. The implementation of
* this function will be implemented when implementing other parts of VKTexture. */
/* TODO: this should map to OpenGL defaults based on the eGPUTextureFormat. The
* implementation of this function will be implemented when implementing other parts of
* VKTexture. */
VkComponentMapping component_mapping;
component_mapping.r = VK_COMPONENT_SWIZZLE_R;
component_mapping.g = VK_COMPONENT_SWIZZLE_G;

View File

@ -39,7 +39,9 @@ enum class eImageViewUsage {
};
VkImageAspectFlagBits to_vk_image_aspect_flag_bits(const eGPUTextureFormat format);
VkImageAspectFlagBits to_vk_image_aspect_flag_bits(const eGPUFrameBufferBits buffers);
VkFormat to_vk_format(const eGPUTextureFormat format);
eGPUTextureFormat to_gpu_format(const VkFormat format);
VkFormat to_vk_format(const GPUVertCompType type,
const uint32_t size,
const GPUVertFetchMode fetch_mode);

View File

@ -36,6 +36,10 @@ VKContext::VKContext(void *ghost_window, void *ghost_context)
VKContext::~VKContext()
{
if (surface_texture_) {
GPU_texture_free(surface_texture_);
surface_texture_ = nullptr;
}
VKBackend::get().device_.context_unregister(*this);
delete imm;
@ -44,41 +48,47 @@ VKContext::~VKContext()
void VKContext::sync_backbuffer()
{
if (ghost_window_) {
VkImage vk_image;
VkFramebuffer vk_framebuffer;
VkRenderPass render_pass;
VkExtent2D extent;
uint32_t fb_id;
GHOST_GetVulkanBackbuffer((GHOST_WindowHandle)ghost_window_,
&vk_image,
&vk_framebuffer,
&render_pass,
&extent,
&fb_id);
/* Recreate the gpu::VKFrameBuffer wrapper after every swap. */
if (has_active_framebuffer()) {
deactivate_framebuffer();
if (ghost_context_) {
VKDevice &device = VKBackend::get().device_;
if (!command_buffer_.is_initialized()) {
command_buffer_.init(device);
command_buffer_.begin_recording();
device.init_dummy_buffer(*this);
}
delete back_left;
VKFrameBuffer *framebuffer = new VKFrameBuffer(
"back_left", vk_image, vk_framebuffer, render_pass, extent);
back_left = framebuffer;
back_left->bind(false);
device.descriptor_pools_get().reset();
}
if (ghost_context_) {
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
GHOST_GetVulkanCommandBuffer(static_cast<GHOST_ContextHandle>(ghost_context_),
&command_buffer);
VKDevice &device = VKBackend::get().device_;
command_buffer_.init(device.device_get(), device.queue_get(), command_buffer);
command_buffer_.begin_recording();
device.descriptor_pools_get().reset();
device.init_dummy_buffer(*this);
if (ghost_window_) {
GHOST_VulkanSwapChainData swap_chain_data = {};
GHOST_GetVulkanSwapChainFormat((GHOST_WindowHandle)ghost_window_, &swap_chain_data);
const bool reset_framebuffer = swap_chain_format_ != swap_chain_data.format ||
vk_extent_.width != swap_chain_data.extent.width ||
vk_extent_.height != swap_chain_data.extent.height;
if (reset_framebuffer) {
if (has_active_framebuffer()) {
deactivate_framebuffer();
}
if (surface_texture_) {
GPU_texture_free(surface_texture_);
surface_texture_ = nullptr;
}
surface_texture_ = GPU_texture_create_2d("back-left",
swap_chain_data.extent.width,
swap_chain_data.extent.height,
1,
to_gpu_format(swap_chain_data.format),
GPU_TEXTURE_USAGE_ATTACHMENT,
nullptr);
back_left->attachment_set(GPU_FB_COLOR_ATTACHMENT0,
GPU_ATTACHMENT_TEXTURE(surface_texture_));
back_left->bind(false);
swap_chain_format_ = swap_chain_data.format;
vk_extent_ = swap_chain_data.extent;
}
}
}
@ -100,15 +110,9 @@ void VKContext::deactivate()
is_active_ = false;
}
void VKContext::begin_frame()
{
sync_backbuffer();
}
void VKContext::begin_frame() {}
void VKContext::end_frame()
{
command_buffer_.end_recording();
}
void VKContext::end_frame() {}
void VKContext::flush()
{
@ -145,6 +149,7 @@ void VKContext::activate_framebuffer(VKFrameBuffer &framebuffer)
BLI_assert(active_fb == nullptr);
active_fb = &framebuffer;
framebuffer.update_size();
command_buffer_.begin_render_pass(framebuffer);
}
@ -162,9 +167,7 @@ void VKContext::deactivate_framebuffer()
{
VKFrameBuffer *framebuffer = active_framebuffer_get();
BLI_assert(framebuffer != nullptr);
if (framebuffer->is_valid()) {
command_buffer_.end_render_pass(*framebuffer);
}
command_buffer_.end_render_pass(*framebuffer);
active_fb = nullptr;
}
@ -203,4 +206,67 @@ void VKContext::bind_graphics_pipeline(const GPUPrimType prim_type,
/** \} */
/* -------------------------------------------------------------------- */
/** \name Graphics pipeline
* \{ */
void VKContext::swap_buffers_pre_callback(const GHOST_VulkanSwapChainData *swap_chain_data)
{
VKContext *context = VKContext::get();
BLI_assert(context);
context->swap_buffers_pre_handler(*swap_chain_data);
}
void VKContext::swap_buffers_post_callback()
{
VKContext *context = VKContext::get();
BLI_assert(context);
context->swap_buffers_post_handler();
}
void VKContext::swap_buffers_pre_handler(const GHOST_VulkanSwapChainData &swap_chain_data)
{
VKFrameBuffer &framebuffer = *unwrap(back_left);
VKTexture wrapper("display_texture");
wrapper.init(swap_chain_data.image,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
to_gpu_format(swap_chain_data.format));
wrapper.layout_ensure(*this, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
framebuffer.color_attachment_layout_ensure(*this, 0, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
VKTexture *color_attachment = unwrap(unwrap(framebuffer.color_tex(0)));
color_attachment->layout_ensure(*this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
VkImageBlit image_blit = {};
image_blit.srcOffsets[0] = {0, int32_t(swap_chain_data.extent.height) - 1, 0};
image_blit.srcOffsets[1] = {int32_t(swap_chain_data.extent.width), 0, 1};
image_blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
image_blit.srcSubresource.mipLevel = 0;
image_blit.srcSubresource.baseArrayLayer = 0;
image_blit.srcSubresource.layerCount = 1;
image_blit.dstOffsets[0] = {0, 0, 0};
image_blit.dstOffsets[1] = {
int32_t(swap_chain_data.extent.width), int32_t(swap_chain_data.extent.height), 1};
image_blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
image_blit.dstSubresource.mipLevel = 0;
image_blit.dstSubresource.baseArrayLayer = 0;
image_blit.dstSubresource.layerCount = 1;
command_buffer_.blit(wrapper,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
*color_attachment,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
Span<VkImageBlit>(&image_blit, 1));
wrapper.layout_ensure(*this, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
command_buffer_.submit();
}
void VKContext::swap_buffers_post_handler()
{
sync_backbuffer();
}
/** \} */
} // namespace blender::gpu

View File

@ -9,6 +9,9 @@
#pragma once
#include "gpu_context_private.hh"
#include "GHOST_Types.h"
#include "vk_command_buffer.hh"
#include "vk_common.hh"
#include "vk_debug.hh"
@ -24,6 +27,9 @@ class VKContext : public Context, NonCopyable {
private:
VKCommandBuffer command_buffer_;
VkExtent2D vk_extent_ = {};
VkFormat swap_chain_format_ = {};
GPUTexture *surface_texture_ = nullptr;
void *ghost_context_;
public:
@ -69,6 +75,13 @@ class VKContext : public Context, NonCopyable {
}
VKStateManager &state_manager_get() const;
static void swap_buffers_pre_callback(const GHOST_VulkanSwapChainData *data);
static void swap_buffers_post_callback();
private:
void swap_buffers_pre_handler(const GHOST_VulkanSwapChainData &data);
void swap_buffers_post_handler();
};
BLI_INLINE bool operator==(const VKContext &a, const VKContext &b)

View File

@ -23,6 +23,9 @@ namespace blender::gpu {
void VKDevice::deinit()
{
VK_ALLOCATION_CALLBACKS;
vkDestroyCommandPool(vk_device_, vk_command_pool_, vk_allocation_callbacks);
dummy_buffer_.free();
sampler_.free();
vmaDestroyAllocator(mem_allocator_);
@ -57,6 +60,7 @@ void VKDevice::init(void *ghost_context)
VKBackend::capabilities_init(*this);
init_debug_callbacks();
init_memory_allocator();
init_command_pools();
init_descriptor_pools();
sampler_.create();
@ -88,6 +92,17 @@ void VKDevice::init_memory_allocator()
vmaCreateAllocator(&info, &mem_allocator_);
}
void VKDevice::init_command_pools()
{
VK_ALLOCATION_CALLBACKS;
VkCommandPoolCreateInfo command_pool_info = {};
command_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
command_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
command_pool_info.queueFamilyIndex = vk_queue_family_;
vkCreateCommandPool(vk_device_, &command_pool_info, vk_allocation_callbacks, &vk_command_pool_);
}
void VKDevice::init_descriptor_pools()
{
descriptor_pools_.init(vk_device_);
@ -99,7 +114,10 @@ void VKDevice::init_dummy_buffer(VKContext &context)
return;
}
dummy_buffer_.create(sizeof(float4x4), GPU_USAGE_DEVICE_ONLY, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
dummy_buffer_.create(sizeof(float4x4),
GPU_USAGE_DEVICE_ONLY,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT));
dummy_buffer_.clear(context, 0);
}

View File

@ -38,6 +38,7 @@ class VKDevice : public NonCopyable {
VkDevice vk_device_ = VK_NULL_HANDLE;
uint32_t vk_queue_family_ = 0;
VkQueue vk_queue_ = VK_NULL_HANDLE;
VkCommandPool vk_command_pool_ = VK_NULL_HANDLE;
/* Dummy sampler for now. */
VKSampler sampler_;
@ -125,6 +126,11 @@ class VKDevice : public NonCopyable {
return sampler_;
}
const VkCommandPool vk_command_pool_get() const
{
return vk_command_pool_;
}
bool is_initialized() const;
void init(void *ghost_context);
/**
@ -164,6 +170,7 @@ class VKDevice : public NonCopyable {
void init_physical_device_properties();
void init_debug_callbacks();
void init_memory_allocator();
void init_command_pools();
void init_descriptor_pools();
/* During initialization the backend requires access to update the workarounds. */

View File

@ -10,6 +10,7 @@
#include "vk_backend.hh"
#include "vk_context.hh"
#include "vk_memory.hh"
#include "vk_state_manager.hh"
#include "vk_texture.hh"
namespace blender::gpu {
@ -20,36 +21,12 @@ namespace blender::gpu {
VKFrameBuffer::VKFrameBuffer(const char *name) : FrameBuffer(name)
{
immutable_ = false;
flip_viewport_ = false;
size_set(1, 1);
}
VKFrameBuffer::VKFrameBuffer(const char *name,
VkImage vk_image,
VkFramebuffer vk_framebuffer,
VkRenderPass vk_render_pass,
VkExtent2D vk_extent)
: FrameBuffer(name)
{
immutable_ = true;
flip_viewport_ = true;
/* Never update an internal frame-buffer. */
dirty_attachments_ = false;
vk_image_ = vk_image;
vk_framebuffer_ = vk_framebuffer;
vk_render_pass_ = vk_render_pass;
size_set(vk_extent.width, vk_extent.height);
viewport_reset();
scissor_reset();
}
VKFrameBuffer::~VKFrameBuffer()
{
if (!immutable_) {
render_pass_free();
}
render_pass_free();
}
/** \} */
@ -66,8 +43,6 @@ void VKFrameBuffer::bind(bool /*enabled_srgb*/)
context.deactivate_framebuffer();
}
update_attachments();
context.activate_framebuffer(*this);
}
@ -83,15 +58,6 @@ Array<VkViewport, 16> VKFrameBuffer::vk_viewports_get() const
viewport.height = viewport_[index][3];
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
/*
* Vulkan has origin to the top left, Blender bottom left. We counteract this by using a
* negative viewport when flip_viewport_ is set. This flips the viewport making any draw/blit
* use the correct orientation.
*/
if (flip_viewport_) {
viewport.y = height_ - viewport_[index][1];
viewport.height = -viewport_[index][3];
}
index++;
}
return viewports;
@ -187,14 +153,35 @@ void VKFrameBuffer::clear(const eGPUFrameBufferBits buffers,
{
Vector<VkClearAttachment> attachments;
if (buffers & (GPU_DEPTH_BIT | GPU_STENCIL_BIT)) {
build_clear_attachments_depth_stencil(buffers, clear_depth, clear_stencil, attachments);
VKContext &context = *VKContext::get();
/* Clearing depth via vkCmdClearAttachments requires a render pass with write depth enabled.
* When not enabled, clearing should be done via texture directly. */
if (context.state_manager_get().state.write_mask & GPU_WRITE_DEPTH) {
build_clear_attachments_depth_stencil(buffers, clear_depth, clear_stencil, attachments);
}
else {
VKTexture *depth_texture = unwrap(unwrap(depth_tex()));
if (depth_texture != nullptr) {
if (G.debug & G_DEBUG_GPU) {
std::cout
<< "PERFORMANCE: impact clearing depth texture in render pass that doesn't allow "
"depth writes.\n";
}
depth_texture->ensure_allocated();
depth_attachment_layout_ensure(context, VK_IMAGE_LAYOUT_GENERAL);
depth_texture->clear_depth_stencil(buffers, clear_depth, clear_stencil);
}
}
}
if (buffers & GPU_COLOR_BIT) {
float clear_color_single[4];
copy_v4_v4(clear_color_single, clear_color);
build_clear_attachments_color(&clear_color_single, false, attachments);
}
clear(attachments);
if (!attachments.is_empty()) {
clear(attachments);
}
}
void VKFrameBuffer::clear_multi(const float (*clear_color)[4])
@ -224,6 +211,7 @@ void VKFrameBuffer::attachment_set_loadstore_op(GPUAttachmentType /*type*/,
eGPULoadOp /*load_action*/,
eGPUStoreOp /*store_action*/)
{
NOT_YET_IMPLEMENTED;
}
/** \} */
@ -288,23 +276,19 @@ void VKFrameBuffer::blit_to(eGPUFrameBufferBits planes,
if (src_attachment.tex == nullptr) {
return;
}
color_attachment_layout_ensure(context, src_slot, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
VKTexture &src_texture = *unwrap(unwrap(src_attachment.tex));
src_texture.layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
/* Retrieve destination texture. */
const VKFrameBuffer &dst_framebuffer = *unwrap(dst);
VKFrameBuffer &dst_framebuffer = *unwrap(dst);
dst_framebuffer.color_attachment_layout_ensure(
context, dst_slot, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const GPUAttachment &dst_attachment =
dst_framebuffer.attachments_[GPU_FB_COLOR_ATTACHMENT0 + dst_slot];
VKTexture *dst_texture = nullptr;
VKTexture tmp_texture("FramebufferTexture");
if (dst_attachment.tex) {
dst_texture = unwrap(unwrap(dst_attachment.tex));
dst_texture->layout_ensure(context, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
}
else {
tmp_texture.init(dst_framebuffer.vk_image_get(), VK_IMAGE_LAYOUT_GENERAL);
dst_texture = &tmp_texture;
if (dst_attachment.tex == nullptr) {
return;
}
VKTexture &dst_texture = *unwrap(unwrap(dst_attachment.tex));
VkImageBlit image_blit = {};
image_blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
@ -326,16 +310,10 @@ void VKFrameBuffer::blit_to(eGPUFrameBufferBits planes,
image_blit.dstOffsets[0].y = dst_offset_y;
image_blit.dstOffsets[0].z = 0;
image_blit.dstOffsets[1].x = dst_offset_x + src_texture.width_get();
image_blit.dstOffsets[1].y = dst_offset_x + src_texture.height_get();
image_blit.dstOffsets[1].y = dst_offset_y + src_texture.height_get();
image_blit.dstOffsets[1].z = 1;
const bool should_flip = flip_viewport_ != dst_framebuffer.flip_viewport_;
if (should_flip) {
image_blit.dstOffsets[0].y = dst_framebuffer.height_ - dst_offset_y;
image_blit.dstOffsets[1].y = dst_framebuffer.height_ - dst_offset_y - src_texture.height_get();
}
context.command_buffer_get().blit(*dst_texture, src_texture, Span<VkImageBlit>(&image_blit, 1));
context.command_buffer_get().blit(dst_texture, src_texture, Span<VkImageBlit>(&image_blit, 1));
}
/** \} */
@ -344,15 +322,11 @@ void VKFrameBuffer::blit_to(eGPUFrameBufferBits planes,
/** \name Update attachments
* \{ */
void VKFrameBuffer::update_attachments()
void VKFrameBuffer::vk_render_pass_ensure()
{
if (immutable_) {
return;
}
if (!dirty_attachments_) {
return;
}
render_pass_free();
render_pass_create();
@ -361,7 +335,6 @@ void VKFrameBuffer::update_attachments()
void VKFrameBuffer::render_pass_create()
{
BLI_assert(!immutable_);
BLI_assert(vk_render_pass_ == VK_NULL_HANDLE);
BLI_assert(vk_framebuffer_ == VK_NULL_HANDLE);
@ -494,7 +467,6 @@ void VKFrameBuffer::render_pass_create()
void VKFrameBuffer::render_pass_free()
{
BLI_assert(!immutable_);
if (vk_render_pass_ == VK_NULL_HANDLE) {
return;
}
@ -510,6 +482,56 @@ void VKFrameBuffer::render_pass_free()
vk_framebuffer_ = VK_NULL_HANDLE;
}
void VKFrameBuffer::color_attachment_layout_ensure(VKContext &context,
int color_attachment,
VkImageLayout requested_layout)
{
VKTexture *color_texture = unwrap(unwrap(color_tex(color_attachment)));
if (color_texture == nullptr) {
return;
}
if (color_texture->current_layout_get() == requested_layout) {
return;
}
color_texture->layout_ensure(context, requested_layout);
dirty_attachments_ = true;
}
void VKFrameBuffer::depth_attachment_layout_ensure(VKContext &context,
VkImageLayout requested_layout)
{
VKTexture *depth_texture = unwrap(unwrap(depth_tex()));
if (depth_texture == nullptr) {
return;
}
if (depth_texture->current_layout_get() == requested_layout) {
return;
}
depth_texture->layout_ensure(context, requested_layout);
dirty_attachments_ = true;
}
void VKFrameBuffer::update_size()
{
if (!dirty_attachments_) {
return;
}
for (int i = 0; i < GPU_FB_MAX_ATTACHMENT; i++) {
GPUAttachment &attachment = attachments_[i];
if (attachment.tex) {
int size[3];
GPU_texture_get_mipmap_size(attachment.tex, attachment.mip, size);
size_set(size[0], size[1]);
return;
}
}
size_set(1, 1);
}
/** \} */
} // namespace blender::gpu

View File

@ -19,6 +19,7 @@
#include "vk_image_view.hh"
namespace blender::gpu {
class VKContext;
class VKFrameBuffer : public FrameBuffer {
private:
@ -28,20 +29,8 @@ class VKFrameBuffer : public FrameBuffer {
VkDevice vk_device_ = VK_NULL_HANDLE;
/* Base render pass used for framebuffer creation. */
VkRenderPass vk_render_pass_ = VK_NULL_HANDLE;
VkImage vk_image_ = VK_NULL_HANDLE;
/* Number of layers if the attachments are layered textures. */
int depth_ = 1;
/** Internal frame-buffers are immutable. */
bool immutable_;
/**
* Should we flip the viewport to match Blenders coordinate system. We flip the viewport for
* off-screen frame-buffers.
*
* When two frame-buffers are blitted we also check if the coordinate system should be flipped
* during blitting.
*/
bool flip_viewport_ = false;
Vector<VKImageView, GPU_FB_MAX_ATTACHMENT> image_views_;
@ -51,16 +40,6 @@ class VKFrameBuffer : public FrameBuffer {
**/
VKFrameBuffer(const char *name);
/**
* Special frame-buffer encapsulating internal window frame-buffer.
* This just act as a wrapper, the actual allocations are done by GHOST_ContextVK.
**/
VKFrameBuffer(const char *name,
VkImage vk_image,
VkFramebuffer vk_framebuffer,
VkRenderPass vk_render_pass,
VkExtent2D vk_extent);
~VKFrameBuffer();
void bind(bool enabled_srgb) override;
@ -103,30 +82,31 @@ class VKFrameBuffer : public FrameBuffer {
return vk_framebuffer_;
}
void vk_render_pass_ensure();
VkRenderPass vk_render_pass_get() const
{
BLI_assert(vk_render_pass_ != VK_NULL_HANDLE);
BLI_assert(!dirty_attachments_);
return vk_render_pass_;
}
Array<VkViewport, 16> vk_viewports_get() const;
Array<VkRect2D, 16> vk_render_areas_get() const;
VkImage vk_image_get() const
{
BLI_assert(vk_image_ != VK_NULL_HANDLE);
return vk_image_;
}
void depth_attachment_layout_ensure(VKContext &context, VkImageLayout requested_layout);
void color_attachment_layout_ensure(VKContext &context,
int color_attachment,
VkImageLayout requested_layout);
/**
* Is this frame-buffer immutable?
* Ensure that the size of the framebuffer matches the first attachment resolution.
*
* Frame-buffers that are owned by GHOST are immutable and
* don't have any attachments assigned. It should be assumed that there is a single color texture
* in slot 0.
* Frame buffers attachments are updated when actually used as the image layout has to be
* correct. After binding framebuffers the layout of images can still be modified.
*
* But for correct behavior of blit/clear operation the size of the framebuffer should be
* set, when activating the frame buffer.
*/
bool is_immutable() const
{
return immutable_;
}
void update_size();
private:
void update_attachments();

View File

@ -135,6 +135,7 @@ void VKPipeline::finalize(VKContext &context,
}
VKFrameBuffer &framebuffer = *context.active_framebuffer_get();
framebuffer.vk_render_pass_ensure();
VkGraphicsPipelineCreateInfo pipeline_create_info = {};
pipeline_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;

View File

@ -78,28 +78,11 @@ void VKPipelineStateManager::force_state(const GPUState &state,
void VKPipelineStateManager::finalize_color_blend_state(const VKFrameBuffer &framebuffer)
{
color_blend_attachments.clear();
if (framebuffer.is_immutable()) {
/* Immutable frame-buffers are owned by GHOST and don't have any attachments assigned. In this
* case we assume that there is a single color texture assigned. */
color_blend_attachments.append(color_blend_attachment_template);
}
else {
bool is_sequential = true;
for (int color_slot = 0; color_slot < GPU_FB_MAX_COLOR_ATTACHMENT; color_slot++) {
VKTexture *texture = unwrap(unwrap(framebuffer.color_tex(color_slot)));
if (texture) {
BLI_assert(is_sequential);
color_blend_attachments.append(color_blend_attachment_template);
}
else {
/* Test to detect if all color textures are sequential attached from the first slot. We
* assume at this moment that this is the case. Otherwise we need to rewire how attachments
* and bindings work. */
is_sequential = false;
}
for (int color_slot = 0; color_slot < GPU_FB_MAX_COLOR_ATTACHMENT; color_slot++) {
VKTexture *texture = unwrap(unwrap(framebuffer.color_tex(color_slot)));
if (texture) {
color_blend_attachments.append(color_blend_attachment_template);
}
UNUSED_VARS_NDEBUG(is_sequential);
}
pipeline_color_blend_state.attachmentCount = color_blend_attachments.size();

View File

@ -31,10 +31,11 @@ VKTexture::~VKTexture()
}
}
void VKTexture::init(VkImage vk_image, VkImageLayout layout)
void VKTexture::init(VkImage vk_image, VkImageLayout layout, eGPUTextureFormat texture_format)
{
vk_image_ = vk_image;
current_layout_ = layout;
format_ = texture_format;
}
void VKTexture::generate_mipmap()
@ -144,6 +145,32 @@ void VKTexture::clear(eGPUDataFormat format, const void *data)
vk_image_, current_layout_get(), clear_color, Span<VkImageSubresourceRange>(&range, 1));
}
void VKTexture::clear_depth_stencil(const eGPUFrameBufferBits buffers,
float clear_depth,
uint clear_stencil)
{
BLI_assert(buffers & (GPU_DEPTH_BIT | GPU_STENCIL_BIT));
if (!is_allocated()) {
allocate();
}
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VkClearDepthStencilValue clear_depth_stencil;
clear_depth_stencil.depth = clear_depth;
clear_depth_stencil.stencil = clear_stencil;
VkImageSubresourceRange range = {0};
range.aspectMask = to_vk_image_aspect_flag_bits(buffers & (GPU_DEPTH_BIT | GPU_STENCIL_BIT));
range.levelCount = VK_REMAINING_MIP_LEVELS;
range.layerCount = VK_REMAINING_ARRAY_LAYERS;
layout_ensure(context, VK_IMAGE_LAYOUT_GENERAL);
command_buffer.clear(vk_image_,
current_layout_get(),
clear_depth_stencil,
Span<VkImageSubresourceRange>(&range, 1));
}
void VKTexture::swizzle_set(const char /*swizzle_mask*/[4])
{
NOT_YET_IMPLEMENTED;
@ -499,7 +526,6 @@ void VKTexture::current_layout_set(const VkImageLayout new_layout)
void VKTexture::layout_ensure(VKContext &context, const VkImageLayout requested_layout)
{
BLI_assert(is_allocated());
const VkImageLayout current_layout = current_layout_get();
if (current_layout == requested_layout) {
return;
@ -513,7 +539,6 @@ void VKTexture::layout_ensure(VKContext &context,
const VkImageLayout current_layout,
const VkImageLayout requested_layout)
{
BLI_assert(is_allocated());
VkImageMemoryBarrier barrier{};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = current_layout;

View File

@ -42,11 +42,14 @@ class VKTexture : public Texture, public VKBindableResource {
virtual ~VKTexture() override;
void init(VkImage vk_image, VkImageLayout layout);
void init(VkImage vk_image, VkImageLayout layout, eGPUTextureFormat texture_format);
void generate_mipmap() override;
void copy_to(Texture *tex) override;
void clear(eGPUDataFormat format, const void *data) override;
void clear_depth_stencil(const eGPUFrameBufferBits buffer,
float clear_depth,
uint clear_stencil);
void swizzle_set(const char swizzle_mask[4]) override;
void mip_range_set(int min, int max) override;
void *read(int mip, eGPUDataFormat format) override;