Vulkan: Share Device Between Contexts #107606

Merged
Jeroen Bakker merged 2 commits from Jeroen-Bakker/blender:vulkan-share-device into main 2023-05-04 10:06:54 +02:00
29 changed files with 773 additions and 658 deletions

View File

@ -23,6 +23,7 @@
#include <cstdio>
#include <cstring>
#include <iostream>
#include <optional>
#include <sstream>
#include <sys/stat.h>
@ -121,6 +122,224 @@ static bool vklayer_config_exist(const char *vk_extension_config)
/* Triple buffering. */
const int MAX_FRAMES_IN_FLIGHT = 2;
/* -------------------------------------------------------------------- */
/** \name Vulkan Device
* \{ */
class GHOST_DeviceVK {
public:
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
VkDevice device = VK_NULL_HANDLE;
uint32_t generic_queue_family = 0;
VkPhysicalDeviceProperties properties = {};
VkPhysicalDeviceFeatures features = {};
int users = 0;
public:
GHOST_DeviceVK(VkInstance vk_instance, VkPhysicalDevice vk_physical_device)
: instance(vk_instance), physical_device(vk_physical_device)
{
vkGetPhysicalDeviceProperties(physical_device, &properties);
vkGetPhysicalDeviceFeatures(physical_device, &features);
}
~GHOST_DeviceVK()
{
if (device != VK_NULL_HANDLE) {
vkDestroyDevice(device, NULL);
}
}
void wait_idle()
{
if (device) {
vkDeviceWaitIdle(device);
}
}
bool extensions_support(const vector<const char *> &required_extensions)
{
uint32_t ext_count;
vkEnumerateDeviceExtensionProperties(physical_device, NULL, &ext_count, NULL);
vector<VkExtensionProperties> available_exts(ext_count);
vkEnumerateDeviceExtensionProperties(physical_device, NULL, &ext_count, available_exts.data());
for (const auto &extension_needed : required_extensions) {
bool found = false;
for (const auto &extension : available_exts) {
if (strcmp(extension_needed, extension.extensionName) == 0) {
found = true;
break;
}
}
if (!found) {
return false;
}
}
return true;
}
void ensure_device(vector<const char *> &layers_enabled, vector<const char *> &extensions_device)
{
if (device != VK_NULL_HANDLE) {
return;
}
init_generic_queue_family();
vector<VkDeviceQueueCreateInfo> queue_create_infos;
float queue_priorities[] = {1.0f};
VkDeviceQueueCreateInfo graphic_queue_create_info = {};
graphic_queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
graphic_queue_create_info.queueFamilyIndex = generic_queue_family;
graphic_queue_create_info.queueCount = 1;
graphic_queue_create_info.pQueuePriorities = queue_priorities;
queue_create_infos.push_back(graphic_queue_create_info);
VkPhysicalDeviceFeatures device_features = {};
#if STRICT_REQUIREMENTS
device_features.geometryShader = VK_TRUE;
device_features.dualSrcBlend = VK_TRUE;
device_features.logicOp = VK_TRUE;
#endif
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.queueCreateInfoCount = static_cast<uint32_t>(queue_create_infos.size());
device_create_info.pQueueCreateInfos = queue_create_infos.data();
/* layers_enabled are the same as instance extensions.
* This is only needed for 1.0 implementations. */
device_create_info.enabledLayerCount = static_cast<uint32_t>(layers_enabled.size());
device_create_info.ppEnabledLayerNames = layers_enabled.data();
device_create_info.enabledExtensionCount = static_cast<uint32_t>(extensions_device.size());
device_create_info.ppEnabledExtensionNames = extensions_device.data();
device_create_info.pEnabledFeatures = &device_features;
vkCreateDevice(physical_device, &device_create_info, NULL, &device);
}
void init_generic_queue_family()
{
uint32_t queue_family_count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &queue_family_count, NULL);
vector<VkQueueFamilyProperties> queue_families(queue_family_count);
vkGetPhysicalDeviceQueueFamilyProperties(
physical_device, &queue_family_count, queue_families.data());
generic_queue_family = 0;
for (const auto &queue_family : queue_families) {
/* Every vulkan implementation by spec must have one queue family that support both graphics
* and compute pipelines. We select this one; compute only queue family hints at async
* compute implementations. */
if ((queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) &&
(queue_family.queueFlags & VK_QUEUE_COMPUTE_BIT))
{
return;
}
generic_queue_family++;
}
fprintf(stderr, "Couldn't find any Graphic queue family on selected device\n");
return;
}
};
/**
* A shared device between multiple contexts.
*
* The logical device needs to be shared as multiple contexts can be created and the logical vulkan
* device they share should be the same otherwise memory operations might be done on the incorrect
* device.
*/
static std::optional<GHOST_DeviceVK> vulkan_device;
static GHOST_TSuccess ensure_vulkan_device(VkInstance vk_instance,
VkSurfaceKHR vk_surface,
vector<const char *> required_extensions)
{
if (vulkan_device.has_value()) {
return GHOST_kSuccess;
}
VkPhysicalDevice best_physical_device = VK_NULL_HANDLE;
uint32_t device_count = 0;
vkEnumeratePhysicalDevices(vk_instance, &device_count, NULL);
vector<VkPhysicalDevice> physical_devices(device_count);
vkEnumeratePhysicalDevices(vk_instance, &device_count, physical_devices.data());
int best_device_score = -1;
for (const auto &physical_device : physical_devices) {
GHOST_DeviceVK device_vk(vk_instance, physical_device);
if (!device_vk.extensions_support(required_extensions)) {
continue;
}
if (vk_surface != VK_NULL_HANDLE) {
uint32_t format_count;
vkGetPhysicalDeviceSurfaceFormatsKHR(
device_vk.physical_device, vk_surface, &format_count, NULL);
uint32_t present_count;
vkGetPhysicalDeviceSurfacePresentModesKHR(
device_vk.physical_device, vk_surface, &present_count, NULL);
/* For now anything will do. */
if (format_count == 0 || present_count == 0) {
continue;
}
}
#if STRICT_REQUIREMENTS
if (!device_vk.features.geometryShader || !device_vk.features.dualSrcBlend ||
!device_vk.features.logicOp)
{
continue;
}
#endif
int device_score = 0;
switch (device_vk.properties.deviceType) {
case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
device_score = 400;
break;
case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
device_score = 300;
break;
case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
device_score = 200;
break;
case VK_PHYSICAL_DEVICE_TYPE_CPU:
device_score = 100;
break;
default:
break;
}
if (device_score > best_device_score) {
best_physical_device = physical_device;
best_device_score = device_score;
}
}
if (best_physical_device == VK_NULL_HANDLE) {
fprintf(stderr, "Error: No suitable Vulkan Device found!\n");
return GHOST_kFailure;
}
vulkan_device = std::make_optional<GHOST_DeviceVK>(vk_instance, best_physical_device);
return GHOST_kSuccess;
}
/** \} */
GHOST_ContextVK::GHOST_ContextVK(bool stereoVisual,
#ifdef _WIN32
HWND hwnd,
@ -155,9 +374,6 @@ GHOST_ContextVK::GHOST_ContextVK(bool stereoVisual,
m_context_major_version(contextMajorVersion),
m_context_minor_version(contextMinorVersion),
m_debug(debug),
m_instance(VK_NULL_HANDLE),
m_physical_device(VK_NULL_HANDLE),
m_device(VK_NULL_HANDLE),
m_command_pool(VK_NULL_HANDLE),
m_surface(VK_NULL_HANDLE),
m_swapchain(VK_NULL_HANDLE),
@ -167,57 +383,55 @@ GHOST_ContextVK::GHOST_ContextVK(bool stereoVisual,
GHOST_ContextVK::~GHOST_ContextVK()
{
if (m_device) {
vkDeviceWaitIdle(m_device);
}
if (vulkan_device.has_value()) {
GHOST_DeviceVK &device_vk = *vulkan_device;
device_vk.wait_idle();
destroySwapchain();
destroySwapchain();
if (m_command_pool != VK_NULL_HANDLE) {
vkDestroyCommandPool(m_device, m_command_pool, NULL);
}
if (m_device != VK_NULL_HANDLE) {
vkDestroyDevice(m_device, NULL);
}
if (m_surface != VK_NULL_HANDLE) {
vkDestroySurfaceKHR(m_instance, m_surface, NULL);
}
if (m_instance != VK_NULL_HANDLE) {
vkDestroyInstance(m_instance, NULL);
if (m_command_pool != VK_NULL_HANDLE) {
vkDestroyCommandPool(device_vk.device, m_command_pool, NULL);
}
if (m_surface != VK_NULL_HANDLE) {
vkDestroySurfaceKHR(device_vk.instance, m_surface, NULL);
}
device_vk.users--;
if (device_vk.users == 0) {
vulkan_device.reset();
}
}
}
GHOST_TSuccess GHOST_ContextVK::destroySwapchain()
{
if (m_device != VK_NULL_HANDLE) {
vkDeviceWaitIdle(m_device);
}
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
m_in_flight_images.resize(0);
VkDevice device = vulkan_device->device;
for (auto semaphore : m_image_available_semaphores) {
vkDestroySemaphore(m_device, semaphore, NULL);
vkDestroySemaphore(device, semaphore, NULL);
}
for (auto semaphore : m_render_finished_semaphores) {
vkDestroySemaphore(m_device, semaphore, NULL);
vkDestroySemaphore(device, semaphore, NULL);
}
for (auto fence : m_in_flight_fences) {
vkDestroyFence(m_device, fence, NULL);
vkDestroyFence(device, fence, NULL);
}
for (auto framebuffer : m_swapchain_framebuffers) {
vkDestroyFramebuffer(m_device, framebuffer, NULL);
vkDestroyFramebuffer(device, framebuffer, NULL);
}
if (m_render_pass != VK_NULL_HANDLE) {
vkDestroyRenderPass(m_device, m_render_pass, NULL);
vkDestroyRenderPass(device, m_render_pass, NULL);
}
for (auto command_buffer : m_command_buffers) {
vkFreeCommandBuffers(m_device, m_command_pool, 1, &command_buffer);
vkFreeCommandBuffers(device, m_command_pool, 1, &command_buffer);
}
for (auto imageView : m_swapchain_image_views) {
vkDestroyImageView(m_device, imageView, NULL);
vkDestroyImageView(device, imageView, NULL);
}
if (m_swapchain != VK_NULL_HANDLE) {
vkDestroySwapchainKHR(m_device, m_swapchain, NULL);
vkDestroySwapchainKHR(device, m_swapchain, NULL);
}
return GHOST_kSuccess;
}
@ -228,9 +442,12 @@ GHOST_TSuccess GHOST_ContextVK::swapBuffers()
return GHOST_kFailure;
}
vkWaitForFences(m_device, 1, &m_in_flight_fences[m_currentFrame], VK_TRUE, UINT64_MAX);
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
VkDevice device = vulkan_device->device;
VkResult result = vkAcquireNextImageKHR(m_device,
vkWaitForFences(device, 1, &m_in_flight_fences[m_currentFrame], VK_TRUE, UINT64_MAX);
VkResult result = vkAcquireNextImageKHR(device,
m_swapchain,
UINT64_MAX,
m_image_available_semaphores[m_currentFrame],
@ -252,12 +469,36 @@ GHOST_TSuccess GHOST_ContextVK::swapBuffers()
/* Check if a previous frame is using this image (i.e. there is its fence to wait on) */
if (m_in_flight_images[m_currentImage] != VK_NULL_HANDLE) {
vkWaitForFences(m_device, 1, &m_in_flight_images[m_currentImage], VK_TRUE, UINT64_MAX);
vkWaitForFences(device, 1, &m_in_flight_images[m_currentImage], VK_TRUE, UINT64_MAX);
}
m_in_flight_images[m_currentImage] = m_in_flight_fences[m_currentFrame];
VkPipelineStageFlags wait_stages[] = {VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT};
/* Image should be in present src layout before presenting to screen. */
VkCommandBufferBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
VK_CHECK(vkBeginCommandBuffer(m_command_buffers[m_currentImage], &begin_info));
VkImageMemoryBarrier barrier{};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.image = m_swapchain_images[m_currentImage];
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
vkCmdPipelineBarrier(m_command_buffers[m_currentImage],
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_DEPENDENCY_BY_REGION_BIT,
0,
nullptr,
0,
nullptr,
1,
&barrier);
VK_CHECK(vkEndCommandBuffer(m_command_buffers[m_currentImage]));
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.waitSemaphoreCount = 1;
@ -268,11 +509,11 @@ GHOST_TSuccess GHOST_ContextVK::swapBuffers()
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &m_render_finished_semaphores[m_currentFrame];
vkResetFences(m_device, 1, &m_in_flight_fences[m_currentFrame]);
vkResetFences(device, 1, &m_in_flight_fences[m_currentFrame]);
VK_CHECK(vkQueueSubmit(m_graphic_queue, 1, &submit_info, m_in_flight_fences[m_currentFrame]));
do {
result = vkWaitForFences(m_device, 1, &m_in_flight_fences[m_currentFrame], VK_TRUE, 10000);
result = vkWaitForFences(device, 1, &m_in_flight_fences[m_currentFrame], VK_TRUE, 10000);
} while (result == VK_TIMEOUT);
VK_CHECK(vkQueueWaitIdle(m_graphic_queue));
@ -327,10 +568,17 @@ GHOST_TSuccess GHOST_ContextVK::getVulkanHandles(void *r_instance,
uint32_t *r_graphic_queue_family,
void *r_queue)
{
*((VkInstance *)r_instance) = m_instance;
*((VkPhysicalDevice *)r_physical_device) = m_physical_device;
*((VkDevice *)r_device) = m_device;
*r_graphic_queue_family = m_queue_family_graphic;
*((VkInstance *)r_instance) = VK_NULL_HANDLE;
*((VkPhysicalDevice *)r_physical_device) = VK_NULL_HANDLE;
*((VkDevice *)r_device) = VK_NULL_HANDLE;
if (vulkan_device.has_value()) {
*((VkInstance *)r_instance) = vulkan_device->instance;
*((VkPhysicalDevice *)r_physical_device) = vulkan_device->physical_device;
*((VkDevice *)r_device) = vulkan_device->device;
*r_graphic_queue_family = vulkan_device->generic_queue_family;
}
*((VkQueue *)r_queue) = m_graphic_queue;
return GHOST_kSuccess;
@ -454,168 +702,6 @@ static void enableLayer(vector<VkLayerProperties> &layers_available,
#undef PUSH_VKLAYER
}
static bool device_extensions_support(VkPhysicalDevice device, vector<const char *> required_exts)
{
uint32_t ext_count;
vkEnumerateDeviceExtensionProperties(device, NULL, &ext_count, NULL);
vector<VkExtensionProperties> available_exts(ext_count);
vkEnumerateDeviceExtensionProperties(device, NULL, &ext_count, available_exts.data());
for (const auto &extension_needed : required_exts) {
bool found = false;
for (const auto &extension : available_exts) {
if (strcmp(extension_needed, extension.extensionName) == 0) {
found = true;
break;
}
}
if (!found) {
return false;
}
}
return true;
}
GHOST_TSuccess GHOST_ContextVK::pickPhysicalDevice(vector<const char *> required_exts)
{
m_physical_device = VK_NULL_HANDLE;
uint32_t device_count = 0;
vkEnumeratePhysicalDevices(m_instance, &device_count, NULL);
vector<VkPhysicalDevice> physical_devices(device_count);
vkEnumeratePhysicalDevices(m_instance, &device_count, physical_devices.data());
int best_device_score = -1;
for (const auto &physical_device : physical_devices) {
VkPhysicalDeviceProperties device_properties;
vkGetPhysicalDeviceProperties(physical_device, &device_properties);
VkPhysicalDeviceFeatures features;
vkGetPhysicalDeviceFeatures(physical_device, &features);
DEBUG_PRINTF("%s : \n", device_properties.deviceName);
if (!device_extensions_support(physical_device, required_exts)) {
DEBUG_PRINTF(" - Device does not support required device extensions.\n");
continue;
}
if (m_surface != VK_NULL_HANDLE) {
uint32_t format_count;
vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, m_surface, &format_count, NULL);
uint32_t present_count;
vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, m_surface, &present_count, NULL);
/* For now anything will do. */
if (format_count == 0 || present_count == 0) {
DEBUG_PRINTF(" - Device does not support presentation.\n");
continue;
}
}
if (!features.geometryShader) {
/* Needed for wide lines emulation and barycentric coords and a few others. */
DEBUG_PRINTF(" - Device does not support geometryShader.\n");
}
if (!features.dualSrcBlend) {
DEBUG_PRINTF(" - Device does not support dualSrcBlend.\n");
}
if (!features.logicOp) {
/* Needed by UI. */
DEBUG_PRINTF(" - Device does not support logicOp.\n");
}
#if STRICT_REQUIREMENTS
if (!features.geometryShader || !features.dualSrcBlend || !features.logicOp) {
continue;
}
#endif
int device_score = 0;
switch (device_properties.deviceType) {
case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
device_score = 400;
break;
case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
device_score = 300;
break;
case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
device_score = 200;
break;
case VK_PHYSICAL_DEVICE_TYPE_CPU:
device_score = 100;
break;
default:
break;
}
if (device_score > best_device_score) {
m_physical_device = physical_device;
best_device_score = device_score;
}
DEBUG_PRINTF(" - Device suitable.\n");
}
if (m_physical_device == VK_NULL_HANDLE) {
fprintf(stderr, "Error: No suitable Vulkan Device found!\n");
return GHOST_kFailure;
}
return GHOST_kSuccess;
}
static GHOST_TSuccess getGraphicQueueFamily(VkPhysicalDevice device, uint32_t *r_queue_index)
{
uint32_t queue_family_count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, NULL);
vector<VkQueueFamilyProperties> queue_families(queue_family_count);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, queue_families.data());
*r_queue_index = 0;
for (const auto &queue_family : queue_families) {
/* Every vulkan implementation by spec must have one queue family that support both graphics
* and compute pipelines. We select this one; compute only queue family hints at async compute
* implementations. */
if ((queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) &&
(queue_family.queueFlags & VK_QUEUE_COMPUTE_BIT))
{
return GHOST_kSuccess;
}
(*r_queue_index)++;
}
fprintf(stderr, "Couldn't find any Graphic queue family on selected device\n");
return GHOST_kFailure;
}
static GHOST_TSuccess getPresetQueueFamily(VkPhysicalDevice device,
VkSurfaceKHR surface,
uint32_t *r_queue_index)
{
uint32_t queue_family_count = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, NULL);
vector<VkQueueFamilyProperties> queue_families(queue_family_count);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queue_family_count, queue_families.data());
*r_queue_index = 0;
for (int i = 0; i < queue_family_count; i++) {
VkBool32 present_support = false;
vkGetPhysicalDeviceSurfaceSupportKHR(device, *r_queue_index, surface, &present_support);
if (present_support) {
return GHOST_kSuccess;
}
(*r_queue_index)++;
}
fprintf(stderr, "Couldn't find any Present queue family on selected device\n");
return GHOST_kFailure;
}
static GHOST_TSuccess create_render_pass(VkDevice device,
VkFormat format,
VkRenderPass *r_renderPass)
@ -623,7 +709,7 @@ static GHOST_TSuccess create_render_pass(VkDevice device,
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = format;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
@ -681,17 +767,19 @@ static GHOST_TSuccess selectPresentMode(VkPhysicalDevice device,
GHOST_TSuccess GHOST_ContextVK::createCommandPools()
{
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
poolInfo.queueFamilyIndex = m_queue_family_graphic;
poolInfo.queueFamilyIndex = vulkan_device->generic_queue_family;
VK_CHECK(vkCreateCommandPool(m_device, &poolInfo, NULL, &m_command_pool));
VK_CHECK(vkCreateCommandPool(vulkan_device->device, &poolInfo, NULL, &m_command_pool));
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffer()
{
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
assert(m_command_pool != VK_NULL_HANDLE);
assert(m_command_buffers.size() == 0);
m_command_buffers.resize(1);
@ -701,12 +789,13 @@ GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffer()
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = static_cast<uint32_t>(m_command_buffers.size());
VK_CHECK(vkAllocateCommandBuffers(m_device, &alloc_info, m_command_buffers.data()));
VK_CHECK(vkAllocateCommandBuffers(vulkan_device->device, &alloc_info, m_command_buffers.data()));
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffers()
{
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
assert(m_command_pool != VK_NULL_HANDLE);
m_command_buffers.resize(m_swapchain_image_views.size());
@ -716,31 +805,32 @@ GHOST_TSuccess GHOST_ContextVK::createGraphicsCommandBuffers()
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = static_cast<uint32_t>(m_command_buffers.size());
VK_CHECK(vkAllocateCommandBuffers(m_device, &alloc_info, m_command_buffers.data()));
VK_CHECK(vkAllocateCommandBuffers(vulkan_device->device, &alloc_info, m_command_buffers.data()));
return GHOST_kSuccess;
}
GHOST_TSuccess GHOST_ContextVK::createSwapchain()
{
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
m_swapchain_id++;
VkPhysicalDevice device = m_physical_device;
VkPhysicalDevice physical_device = vulkan_device->physical_device;
uint32_t format_count;
vkGetPhysicalDeviceSurfaceFormatsKHR(device, m_surface, &format_count, NULL);
vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, m_surface, &format_count, NULL);
vector<VkSurfaceFormatKHR> formats(format_count);
vkGetPhysicalDeviceSurfaceFormatsKHR(device, m_surface, &format_count, formats.data());
vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, m_surface, &format_count, formats.data());
/* TODO choose appropriate format. */
VkSurfaceFormatKHR format = formats[0];
VkPresentModeKHR present_mode;
if (!selectPresentMode(device, m_surface, &present_mode)) {
if (!selectPresentMode(physical_device, m_surface, &present_mode)) {
return GHOST_kFailure;
}
VkSurfaceCapabilitiesKHR capabilities;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, m_surface, &capabilities);
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device, m_surface, &capabilities);
m_render_extent = capabilities.currentExtent;
if (m_render_extent.width == UINT32_MAX) {
@ -771,34 +861,25 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
create_info.imageColorSpace = format.colorSpace;
create_info.imageExtent = m_render_extent;
create_info.imageArrayLayers = 1;
create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
create_info.preTransform = capabilities.currentTransform;
create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
create_info.presentMode = present_mode;
create_info.clipped = VK_TRUE;
create_info.oldSwapchain = VK_NULL_HANDLE; /* TODO Window resize */
create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
create_info.queueFamilyIndexCount = 0;
create_info.pQueueFamilyIndices = NULL;
uint32_t queueFamilyIndices[] = {m_queue_family_graphic, m_queue_family_present};
VkDevice device = vulkan_device->device;
VK_CHECK(vkCreateSwapchainKHR(device, &create_info, NULL, &m_swapchain));
if (m_queue_family_graphic != m_queue_family_present) {
create_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
create_info.queueFamilyIndexCount = 2;
create_info.pQueueFamilyIndices = queueFamilyIndices;
}
else {
create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
create_info.queueFamilyIndexCount = 0;
create_info.pQueueFamilyIndices = NULL;
}
VK_CHECK(vkCreateSwapchainKHR(m_device, &create_info, NULL, &m_swapchain));
create_render_pass(m_device, format.format, &m_render_pass);
create_render_pass(device, format.format, &m_render_pass);
/* image_count may not be what we requested! Getter for final value. */
vkGetSwapchainImagesKHR(m_device, m_swapchain, &image_count, NULL);
vkGetSwapchainImagesKHR(device, m_swapchain, &image_count, NULL);
m_swapchain_images.resize(image_count);
vkGetSwapchainImagesKHR(m_device, m_swapchain, &image_count, m_swapchain_images.data());
vkGetSwapchainImagesKHR(device, m_swapchain, &image_count, m_swapchain_images.data());
m_in_flight_images.resize(image_count, VK_NULL_HANDLE);
m_swapchain_image_views.resize(image_count);
@ -821,7 +902,7 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
view_create_info.subresourceRange.baseArrayLayer = 0;
view_create_info.subresourceRange.layerCount = 1;
VK_CHECK(vkCreateImageView(m_device, &view_create_info, NULL, &m_swapchain_image_views[i]));
VK_CHECK(vkCreateImageView(device, &view_create_info, NULL, &m_swapchain_image_views[i]));
VkImageView attachments[] = {m_swapchain_image_views[i]};
@ -834,7 +915,7 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
fb_create_info.height = m_render_extent.height;
fb_create_info.layers = 1;
VK_CHECK(vkCreateFramebuffer(m_device, &fb_create_info, NULL, &m_swapchain_framebuffers[i]));
VK_CHECK(vkCreateFramebuffer(device, &fb_create_info, NULL, &m_swapchain_framebuffers[i]));
}
m_image_available_semaphores.resize(MAX_FRAMES_IN_FLIGHT);
@ -845,14 +926,14 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
VkSemaphoreCreateInfo semaphore_info = {};
semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VK_CHECK(vkCreateSemaphore(m_device, &semaphore_info, NULL, &m_image_available_semaphores[i]));
VK_CHECK(vkCreateSemaphore(m_device, &semaphore_info, NULL, &m_render_finished_semaphores[i]));
VK_CHECK(vkCreateSemaphore(device, &semaphore_info, NULL, &m_image_available_semaphores[i]));
VK_CHECK(vkCreateSemaphore(device, &semaphore_info, NULL, &m_render_finished_semaphores[i]));
VkFenceCreateInfo fence_info = {};
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
VK_CHECK(vkCreateFence(m_device, &fence_info, NULL, &m_in_flight_fences[i]));
VK_CHECK(vkCreateFence(device, &fence_info, NULL, &m_in_flight_fences[i]));
}
createGraphicsCommandBuffers();
@ -929,23 +1010,28 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
extensions_available, extensions_enabled, "VK_KHR_get_physical_device_properties2");
#endif
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app_info.pApplicationName = "Blender";
app_info.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
app_info.pEngineName = "Blender";
app_info.engineVersion = VK_MAKE_VERSION(1, 0, 0);
app_info.apiVersion = VK_MAKE_VERSION(m_context_major_version, m_context_minor_version, 0);
VkInstance instance = VK_NULL_HANDLE;
if (!vulkan_device.has_value()) {
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
app_info.pApplicationName = "Blender";
app_info.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
app_info.pEngineName = "Blender";
app_info.engineVersion = VK_MAKE_VERSION(1, 0, 0);
app_info.apiVersion = VK_MAKE_VERSION(m_context_major_version, m_context_minor_version, 0);
VkInstanceCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
create_info.pApplicationInfo = &app_info;
create_info.enabledLayerCount = static_cast<uint32_t>(layers_enabled.size());
create_info.ppEnabledLayerNames = layers_enabled.data();
create_info.enabledExtensionCount = static_cast<uint32_t>(extensions_enabled.size());
create_info.ppEnabledExtensionNames = extensions_enabled.data();
VK_CHECK(vkCreateInstance(&create_info, NULL, &m_instance));
VkInstanceCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
create_info.pApplicationInfo = &app_info;
create_info.enabledLayerCount = static_cast<uint32_t>(layers_enabled.size());
create_info.ppEnabledLayerNames = layers_enabled.data();
create_info.enabledExtensionCount = static_cast<uint32_t>(extensions_enabled.size());
create_info.ppEnabledExtensionNames = extensions_enabled.data();
VK_CHECK(vkCreateInstance(&create_info, NULL, &instance));
}
else {
instance = vulkan_device->instance;
}
if (use_window_surface) {
#ifdef _WIN32
@ -953,14 +1039,14 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
surface_create_info.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
surface_create_info.hinstance = GetModuleHandle(NULL);
surface_create_info.hwnd = m_hwnd;
VK_CHECK(vkCreateWin32SurfaceKHR(m_instance, &surface_create_info, NULL, &m_surface));
VK_CHECK(vkCreateWin32SurfaceKHR(instance, &surface_create_info, NULL, &m_surface));
#elif defined(__APPLE__)
VkMetalSurfaceCreateInfoEXT info = {};
info.sType = VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT;
info.pNext = NULL;
info.flags = 0;
info.pLayer = m_metal_layer;
VK_CHECK(vkCreateMetalSurfaceEXT(m_instance, &info, nullptr, &m_surface));
VK_CHECK(vkCreateMetalSurfaceEXT(instance, &info, nullptr, &m_surface));
#else
switch (m_platform) {
case GHOST_kVulkanPlatformX11: {
@ -968,7 +1054,7 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
surface_create_info.sType = VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR;
surface_create_info.dpy = m_display;
surface_create_info.window = m_window;
VK_CHECK(vkCreateXlibSurfaceKHR(m_instance, &surface_create_info, NULL, &m_surface));
VK_CHECK(vkCreateXlibSurfaceKHR(instance, &surface_create_info, NULL, &m_surface));
break;
}
# ifdef WITH_GHOST_WAYLAND
@ -977,7 +1063,7 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
surface_create_info.sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR;
surface_create_info.display = m_wayland_display;
surface_create_info.surface = m_wayland_surface;
VK_CHECK(vkCreateWaylandSurfaceKHR(m_instance, &surface_create_info, NULL, &m_surface));
VK_CHECK(vkCreateWaylandSurfaceKHR(instance, &surface_create_info, NULL, &m_surface));
break;
}
# endif
@ -986,7 +1072,7 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
#endif
}
if (!pickPhysicalDevice(extensions_device)) {
if (!ensure_vulkan_device(instance, m_surface, extensions_device)) {
return GHOST_kFailure;
}
@ -994,73 +1080,22 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
/* According to the Vulkan specs, when `VK_KHR_portability_subset` is available it should be
* enabled. See
* https://vulkan.lunarg.com/doc/view/1.2.198.1/mac/1.2-extensions/vkspec.html#VUID-VkDeviceCreateInfo-pProperties-04451*/
if (device_extensions_support(m_physical_device, {VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME})) {
if (device_extensions_support(vulkan_device->physical_device,
{VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME}))
{
extensions_device.push_back(VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME);
}
#endif
vulkan_device->users++;
vulkan_device->ensure_device(layers_enabled, extensions_device);
vector<VkDeviceQueueCreateInfo> queue_create_infos;
{
/* A graphic queue is required to draw anything. */
if (!getGraphicQueueFamily(m_physical_device, &m_queue_family_graphic)) {
return GHOST_kFailure;
}
float queue_priorities[] = {1.0f};
VkDeviceQueueCreateInfo graphic_queue_create_info = {};
graphic_queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
graphic_queue_create_info.queueFamilyIndex = m_queue_family_graphic;
graphic_queue_create_info.queueCount = 1;
graphic_queue_create_info.pQueuePriorities = queue_priorities;
queue_create_infos.push_back(graphic_queue_create_info);
}
if (use_window_surface) {
/* A present queue is required only if we render to a window. */
if (!getPresetQueueFamily(m_physical_device, m_surface, &m_queue_family_present)) {
return GHOST_kFailure;
}
float queue_priorities[] = {1.0f};
VkDeviceQueueCreateInfo present_queue_create_info = {};
present_queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
present_queue_create_info.queueFamilyIndex = m_queue_family_present;
present_queue_create_info.queueCount = 1;
present_queue_create_info.pQueuePriorities = queue_priorities;
/* Each queue must be unique. */
if (m_queue_family_graphic != m_queue_family_present) {
queue_create_infos.push_back(present_queue_create_info);
}
}
VkPhysicalDeviceFeatures device_features = {};
#if STRICT_REQUIREMENTS
device_features.geometryShader = VK_TRUE;
device_features.dualSrcBlend = VK_TRUE;
device_features.logicOp = VK_TRUE;
#endif
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.queueCreateInfoCount = static_cast<uint32_t>(queue_create_infos.size());
device_create_info.pQueueCreateInfos = queue_create_infos.data();
/* layers_enabled are the same as instance extensions.
* This is only needed for 1.0 implementations. */
device_create_info.enabledLayerCount = static_cast<uint32_t>(layers_enabled.size());
device_create_info.ppEnabledLayerNames = layers_enabled.data();
device_create_info.enabledExtensionCount = static_cast<uint32_t>(extensions_device.size());
device_create_info.ppEnabledExtensionNames = extensions_device.data();
device_create_info.pEnabledFeatures = &device_features;
VK_CHECK(vkCreateDevice(m_physical_device, &device_create_info, NULL, &m_device));
vkGetDeviceQueue(m_device, m_queue_family_graphic, 0, &m_graphic_queue);
vkGetDeviceQueue(
vulkan_device->device, vulkan_device->generic_queue_family, 0, &m_graphic_queue);
createCommandPools();
if (use_window_surface) {
vkGetDeviceQueue(m_device, m_queue_family_present, 0, &m_present_queue);
vkGetDeviceQueue(
vulkan_device->device, vulkan_device->generic_queue_family, 0, &m_present_queue);
createSwapchain();
}
else {

View File

@ -164,14 +164,8 @@ class GHOST_ContextVK : public GHOST_Context {
const int m_context_minor_version;
const int m_debug;
VkInstance m_instance;
VkPhysicalDevice m_physical_device;
VkDevice m_device;
VkCommandPool m_command_pool;
uint32_t m_queue_family_graphic;
uint32_t m_queue_family_present;
VkQueue m_graphic_queue;
VkQueue m_present_queue;
@ -196,7 +190,6 @@ class GHOST_ContextVK : public GHOST_Context {
uint32_t m_swapchain_id = 0;
const char *getPlatformSpecificSurfaceExtension() const;
GHOST_TSuccess pickPhysicalDevice(std::vector<const char *> required_exts);
GHOST_TSuccess createSwapchain();
GHOST_TSuccess destroySwapchain();
GHOST_TSuccess createCommandPools();

View File

@ -208,6 +208,7 @@ set(VULKAN_SRC
vulkan/vk_debug.cc
vulkan/vk_descriptor_pools.cc
vulkan/vk_descriptor_set.cc
vulkan/vk_device.cc
vulkan/vk_drawlist.cc
vulkan/vk_fence.cc
vulkan/vk_framebuffer.cc
@ -238,6 +239,7 @@ set(VULKAN_SRC
vulkan/vk_debug.hh
vulkan/vk_descriptor_pools.hh
vulkan/vk_descriptor_set.hh
vulkan/vk_device.hh
vulkan/vk_drawlist.hh
vulkan/vk_fence.hh
vulkan/vk_framebuffer.hh

View File

@ -52,7 +52,12 @@ void VKBackend::platform_exit()
GPG.clear();
}
void VKBackend::delete_resources() {}
void VKBackend::delete_resources()
{
if (device_.is_initialized()) {
device_.deinit();
}
}
void VKBackend::samplers_update() {}
@ -151,9 +156,10 @@ shaderc::Compiler &VKBackend::get_shaderc_compiler()
return shaderc_compiler_;
}
void VKBackend::capabilities_init(VKContext &context)
void VKBackend::capabilities_init()
{
const VkPhysicalDeviceLimits limits = context.physical_device_limits_get();
const VkPhysicalDeviceLimits &limits =
VKBackend::get().device_get().physical_device_limits_get();
/* Reset all capabilities from previous context. */
GCaps = {};

View File

@ -14,12 +14,15 @@
#endif
#include "vk_common.hh"
#include "vk_device.hh"
#include "shaderc/shaderc.hpp"
namespace blender::gpu {
class VKContext;
class VKDescriptorSet;
class VKDescriptorSetTracker;
class VKBackend : public GPUBackend {
private:
@ -27,6 +30,8 @@ class VKBackend : public GPUBackend {
#ifdef WITH_RENDERDOC
renderdoc::api::Renderdoc renderdoc_api_;
#endif
/* Global instance to device handles. */
VKDevice device_;
public:
VKBackend()
@ -66,21 +71,31 @@ class VKBackend : public GPUBackend {
void render_end() override;
void render_step() override;
bool debug_capture_begin(VkInstance vk_instance);
void debug_capture_end(VkInstance vk_instance);
bool debug_capture_begin();
void debug_capture_end();
shaderc::Compiler &get_shaderc_compiler();
static void capabilities_init(VKContext &context);
static void capabilities_init();
static VKBackend &get()
{
return *static_cast<VKBackend *>(GPUBackend::get());
}
const VKDevice &device_get() const
{
return device_;
}
private:
static void init_platform();
static void platform_exit();
/* These classes are allowed to modify the global device. */
friend class VKContext;
friend class VKDescriptorSet;
friend class VKDescriptorSetTracker;
};
} // namespace blender::gpu

View File

@ -6,14 +6,14 @@
*/
#include "vk_buffer.hh"
#include "vk_backend.hh"
#include "vk_context.hh"
namespace blender::gpu {
VKBuffer::~VKBuffer()
{
VKContext &context = *VKContext::get();
free(context);
free();
}
bool VKBuffer::is_allocated() const
@ -41,16 +41,16 @@ static VmaAllocationCreateFlagBits vma_allocation_flags(GPUUsageType usage)
VMA_ALLOCATION_CREATE_MAPPED_BIT);
}
bool VKBuffer::create(VKContext &context,
int64_t size_in_bytes,
bool VKBuffer::create(int64_t size_in_bytes,
GPUUsageType usage,
VkBufferUsageFlagBits buffer_usage)
{
BLI_assert(!is_allocated());
size_in_bytes_ = size_in_bytes;
const VKDevice &device = VKBackend::get().device_get();
VmaAllocator allocator = context.mem_allocator_get();
VmaAllocator allocator = device.mem_allocator_get();
VkBufferCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
create_info.flags = 0;
@ -60,7 +60,7 @@ bool VKBuffer::create(VKContext &context,
* exclusive resource handling. */
create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
create_info.queueFamilyIndexCount = 1;
create_info.pQueueFamilyIndices = context.queue_family_ptr_get();
create_info.pQueueFamilyIndices = device.queue_family_ptr_get();
VmaAllocationCreateInfo vma_create_info = {};
vma_create_info.flags = vma_allocation_flags(usage);
@ -74,7 +74,7 @@ bool VKBuffer::create(VKContext &context,
}
/* All buffers are mapped to virtual memory. */
return map(context);
return map();
}
void VKBuffer::update(const void *data) const
@ -82,9 +82,9 @@ void VKBuffer::update(const void *data) const
BLI_assert_msg(is_mapped(), "Cannot update a non-mapped buffer.");
memcpy(mapped_memory_, data, size_in_bytes_);
VKContext &context = *VKContext::get();
VmaAllocator mem_allocator = context.mem_allocator_get();
vmaFlushAllocation(mem_allocator, allocation_, 0, VK_WHOLE_SIZE);
const VKDevice &device = VKBackend::get().device_get();
VmaAllocator allocator = device.mem_allocator_get();
vmaFlushAllocation(allocator, allocation_, 0, VK_WHOLE_SIZE);
}
void VKBuffer::clear(VKContext &context, uint32_t clear_value)
@ -110,29 +110,32 @@ bool VKBuffer::is_mapped() const
return mapped_memory_ != nullptr;
}
bool VKBuffer::map(VKContext &context)
bool VKBuffer::map()
{
BLI_assert(!is_mapped());
VmaAllocator allocator = context.mem_allocator_get();
const VKDevice &device = VKBackend::get().device_get();
VmaAllocator allocator = device.mem_allocator_get();
VkResult result = vmaMapMemory(allocator, allocation_, &mapped_memory_);
return result == VK_SUCCESS;
}
void VKBuffer::unmap(VKContext &context)
void VKBuffer::unmap()
{
BLI_assert(is_mapped());
VmaAllocator allocator = context.mem_allocator_get();
const VKDevice &device = VKBackend::get().device_get();
VmaAllocator allocator = device.mem_allocator_get();
vmaUnmapMemory(allocator, allocation_);
mapped_memory_ = nullptr;
}
bool VKBuffer::free(VKContext &context)
bool VKBuffer::free()
{
if (is_mapped()) {
unmap(context);
unmap();
}
VmaAllocator allocator = context.mem_allocator_get();
const VKDevice &device = VKBackend::get().device_get();
VmaAllocator allocator = device.mem_allocator_get();
vmaDestroyBuffer(allocator, vk_buffer_, allocation_);
return true;
}

View File

@ -31,14 +31,11 @@ class VKBuffer {
/** Has this buffer been allocated? */
bool is_allocated() const;
bool create(VKContext &context,
int64_t size,
GPUUsageType usage,
VkBufferUsageFlagBits buffer_usage);
bool create(int64_t size, GPUUsageType usage, VkBufferUsageFlagBits buffer_usage);
void clear(VKContext &context, uint32_t clear_value);
void update(const void *data) const;
void read(void *data) const;
bool free(VKContext &context);
bool free();
int64_t size_in_bytes() const
{
@ -60,8 +57,8 @@ class VKBuffer {
private:
/** Check if this buffer is mapped. */
bool is_mapped() const;
bool map(VKContext &context);
void unmap(VKContext &context);
bool map();
void unmap();
};
} // namespace blender::gpu

View File

@ -18,56 +18,23 @@ namespace blender::gpu {
VKContext::VKContext(void *ghost_window, void *ghost_context)
{
VK_ALLOCATION_CALLBACKS;
ghost_window_ = ghost_window;
if (ghost_window) {
ghost_context = GHOST_GetDrawingContext((GHOST_WindowHandle)ghost_window);
}
ghost_context_ = ghost_context;
GHOST_GetVulkanHandles((GHOST_ContextHandle)ghost_context,
&vk_instance_,
&vk_physical_device_,
&vk_device_,
&vk_queue_family_,
&vk_queue_);
debug::init_callbacks(this, vkGetInstanceProcAddr);
init_physical_device_limits();
debug::object_label(this, vk_device_, "LogicalDevice");
debug::object_label(this, vk_queue_, "GenericQueue");
/* Initialize the memory allocator. */
VmaAllocatorCreateInfo info = {};
info.vulkanApiVersion = VK_API_VERSION_1_2;
info.physicalDevice = vk_physical_device_;
info.device = vk_device_;
info.instance = vk_instance_;
info.pAllocationCallbacks = vk_allocation_callbacks;
vmaCreateAllocator(&info, &mem_allocator_);
descriptor_pools_.init(vk_device_);
VKDevice &device = VKBackend::get().device_;
if (!device.is_initialized()) {
device.init(ghost_context);
}
state_manager = new VKStateManager();
VKBackend::capabilities_init(*this);
/* For off-screen contexts. Default frame-buffer is empty. */
back_left = new VKFrameBuffer("back_left");
}
VKContext::~VKContext()
{
vmaDestroyAllocator(mem_allocator_);
debug::destroy_callbacks(this);
}
void VKContext::init_physical_device_limits()
{
BLI_assert(vk_physical_device_ != VK_NULL_HANDLE);
VkPhysicalDeviceProperties properties = {};
vkGetPhysicalDeviceProperties(vk_physical_device_, &properties);
vk_physical_device_limits_ = properties.limits;
}
VKContext::~VKContext() {}
void VKContext::activate()
{
@ -100,10 +67,10 @@ void VKContext::begin_frame()
{
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
GHOST_GetVulkanCommandBuffer(static_cast<GHOST_ContextHandle>(ghost_context_), &command_buffer);
command_buffer_.init(vk_device_, vk_queue_, command_buffer);
VKDevice &device = VKBackend::get().device_;
command_buffer_.init(device.device_get(), device.queue_get(), command_buffer);
command_buffer_.begin_recording();
descriptor_pools_.reset();
device.descriptor_pools_get().reset();
}
void VKContext::end_frame()

View File

@ -19,23 +19,7 @@ class VKStateManager;
class VKContext : public Context, NonCopyable {
private:
/** Copies of the handles owned by the GHOST context. */
VkInstance vk_instance_ = VK_NULL_HANDLE;
VkPhysicalDevice vk_physical_device_ = VK_NULL_HANDLE;
VkDevice vk_device_ = VK_NULL_HANDLE;
VKCommandBuffer command_buffer_;
uint32_t vk_queue_family_ = 0;
VkQueue vk_queue_ = VK_NULL_HANDLE;
/** Allocator used for texture and buffers and other resources. */
VmaAllocator mem_allocator_ = VK_NULL_HANDLE;
VKDescriptorPools descriptor_pools_;
/** Limits of the device linked to this context. */
VkPhysicalDeviceLimits vk_physical_device_limits_;
/** Functions of vk_ext_debugutils to use in this context. */
debug::VKDebuggingTools debugging_tools_;
void *ghost_context_;
@ -69,66 +53,15 @@ class VKContext : public Context, NonCopyable {
return static_cast<VKContext *>(Context::get());
}
VkPhysicalDevice physical_device_get() const
{
return vk_physical_device_;
}
const VkPhysicalDeviceLimits &physical_device_limits_get() const
{
return vk_physical_device_limits_;
}
VkInstance instance_get() const
{
return vk_instance_;
};
VkDevice device_get() const
{
return vk_device_;
}
VKCommandBuffer &command_buffer_get()
{
return command_buffer_;
}
VkQueue queue_get() const
{
return vk_queue_;
}
const uint32_t *queue_family_ptr_get() const
{
return &vk_queue_family_;
}
VKDescriptorPools &descriptor_pools_get()
{
return descriptor_pools_;
}
const VKStateManager &state_manager_get() const;
VmaAllocator mem_allocator_get() const
{
return mem_allocator_;
}
debug::VKDebuggingTools &debugging_tools_get()
{
return debugging_tools_;
}
const debug::VKDebuggingTools &debugging_tools_get() const
{
return debugging_tools_;
}
private:
void init_physical_device_limits();
bool has_active_framebuffer() const;
};

View File

@ -14,40 +14,39 @@
namespace blender::gpu {
void VKContext::debug_group_begin(const char *name, int)
{
debug::push_marker(this, vk_queue_, name);
const VKDevice &device = VKBackend::get().device_get();
debug::push_marker(device, name);
}
void VKContext::debug_group_end()
{
debug::pop_marker(this, vk_queue_);
const VKDevice &device = VKBackend::get().device_get();
debug::pop_marker(device);
}
bool VKContext::debug_capture_begin()
{
return VKBackend::get().debug_capture_begin(vk_instance_);
return VKBackend::get().debug_capture_begin();
}
bool VKBackend::debug_capture_begin(VkInstance vk_instance)
bool VKBackend::debug_capture_begin()
{
#ifdef WITH_RENDERDOC
return renderdoc_api_.start_frame_capture(vk_instance, nullptr);
return renderdoc_api_.start_frame_capture(device_get().instance_get(), nullptr);
#else
UNUSED_VARS(vk_instance);
return false;
#endif
}
void VKContext::debug_capture_end()
{
VKBackend::get().debug_capture_end(vk_instance_);
VKBackend::get().debug_capture_end();
}
void VKBackend::debug_capture_end(VkInstance vk_instance)
void VKBackend::debug_capture_end()
{
#ifdef WITH_RENDERDOC
renderdoc_api_.end_frame_capture(vk_instance, nullptr);
#else
UNUSED_VARS(vk_instance);
renderdoc_api_.end_frame_capture(device_get().instance_get(), nullptr);
#endif
}
@ -66,96 +65,74 @@ void VKContext::debug_capture_scope_end(void * /*scope*/) {}
namespace blender::gpu::debug {
static void load_dynamic_functions(VKContext *context,
PFN_vkGetInstanceProcAddr instance_proc_addr)
void VKDebuggingTools::init(VkInstance vk_instance)
{
VKDebuggingTools &debugging_tools = context->debugging_tools_get();
VkInstance vk_instance = context->instance_get();
if (instance_proc_addr) {
debugging_tools.enabled = false;
debugging_tools.vkCmdBeginDebugUtilsLabelEXT_r = (PFN_vkCmdBeginDebugUtilsLabelEXT)
instance_proc_addr(vk_instance, "vkCmdBeginDebugUtilsLabelEXT");
debugging_tools.vkCmdEndDebugUtilsLabelEXT_r = (PFN_vkCmdEndDebugUtilsLabelEXT)
instance_proc_addr(vk_instance, "vkCmdEndDebugUtilsLabelEXT");
debugging_tools.vkCmdInsertDebugUtilsLabelEXT_r = (PFN_vkCmdInsertDebugUtilsLabelEXT)
instance_proc_addr(vk_instance, "vkCmdInsertDebugUtilsLabelEXT");
debugging_tools.vkCreateDebugUtilsMessengerEXT_r = (PFN_vkCreateDebugUtilsMessengerEXT)
instance_proc_addr(vk_instance, "vkCreateDebugUtilsMessengerEXT");
debugging_tools.vkDestroyDebugUtilsMessengerEXT_r = (PFN_vkDestroyDebugUtilsMessengerEXT)
instance_proc_addr(vk_instance, "vkDestroyDebugUtilsMessengerEXT");
debugging_tools.vkQueueBeginDebugUtilsLabelEXT_r = (PFN_vkQueueBeginDebugUtilsLabelEXT)
instance_proc_addr(vk_instance, "vkQueueBeginDebugUtilsLabelEXT");
debugging_tools.vkQueueEndDebugUtilsLabelEXT_r = (PFN_vkQueueEndDebugUtilsLabelEXT)
instance_proc_addr(vk_instance, "vkQueueEndDebugUtilsLabelEXT");
debugging_tools.vkQueueInsertDebugUtilsLabelEXT_r = (PFN_vkQueueInsertDebugUtilsLabelEXT)
instance_proc_addr(vk_instance, "vkQueueInsertDebugUtilsLabelEXT");
debugging_tools.vkSetDebugUtilsObjectNameEXT_r = (PFN_vkSetDebugUtilsObjectNameEXT)
instance_proc_addr(vk_instance, "vkSetDebugUtilsObjectNameEXT");
debugging_tools.vkSetDebugUtilsObjectTagEXT_r = (PFN_vkSetDebugUtilsObjectTagEXT)
instance_proc_addr(vk_instance, "vkSetDebugUtilsObjectTagEXT");
debugging_tools.vkSubmitDebugUtilsMessageEXT_r = (PFN_vkSubmitDebugUtilsMessageEXT)
instance_proc_addr(vk_instance, "vkSubmitDebugUtilsMessageEXT");
if (debugging_tools.vkCmdBeginDebugUtilsLabelEXT_r) {
debugging_tools.enabled = true;
}
}
else {
debugging_tools.vkCmdBeginDebugUtilsLabelEXT_r = nullptr;
debugging_tools.vkCmdEndDebugUtilsLabelEXT_r = nullptr;
debugging_tools.vkCmdInsertDebugUtilsLabelEXT_r = nullptr;
debugging_tools.vkCreateDebugUtilsMessengerEXT_r = nullptr;
debugging_tools.vkDestroyDebugUtilsMessengerEXT_r = nullptr;
debugging_tools.vkQueueBeginDebugUtilsLabelEXT_r = nullptr;
debugging_tools.vkQueueEndDebugUtilsLabelEXT_r = nullptr;
debugging_tools.vkQueueInsertDebugUtilsLabelEXT_r = nullptr;
debugging_tools.vkSetDebugUtilsObjectNameEXT_r = nullptr;
debugging_tools.vkSetDebugUtilsObjectTagEXT_r = nullptr;
debugging_tools.vkSubmitDebugUtilsMessageEXT_r = nullptr;
debugging_tools.enabled = false;
PFN_vkGetInstanceProcAddr instance_proc_addr = vkGetInstanceProcAddr;
enabled = false;
vkCmdBeginDebugUtilsLabelEXT_r = (PFN_vkCmdBeginDebugUtilsLabelEXT)instance_proc_addr(
vk_instance, "vkCmdBeginDebugUtilsLabelEXT");
vkCmdEndDebugUtilsLabelEXT_r = (PFN_vkCmdEndDebugUtilsLabelEXT)instance_proc_addr(
vk_instance, "vkCmdEndDebugUtilsLabelEXT");
vkCmdInsertDebugUtilsLabelEXT_r = (PFN_vkCmdInsertDebugUtilsLabelEXT)instance_proc_addr(
vk_instance, "vkCmdInsertDebugUtilsLabelEXT");
vkCreateDebugUtilsMessengerEXT_r = (PFN_vkCreateDebugUtilsMessengerEXT)instance_proc_addr(
vk_instance, "vkCreateDebugUtilsMessengerEXT");
vkDestroyDebugUtilsMessengerEXT_r = (PFN_vkDestroyDebugUtilsMessengerEXT)instance_proc_addr(
vk_instance, "vkDestroyDebugUtilsMessengerEXT");
vkQueueBeginDebugUtilsLabelEXT_r = (PFN_vkQueueBeginDebugUtilsLabelEXT)instance_proc_addr(
vk_instance, "vkQueueBeginDebugUtilsLabelEXT");
vkQueueEndDebugUtilsLabelEXT_r = (PFN_vkQueueEndDebugUtilsLabelEXT)instance_proc_addr(
vk_instance, "vkQueueEndDebugUtilsLabelEXT");
vkQueueInsertDebugUtilsLabelEXT_r = (PFN_vkQueueInsertDebugUtilsLabelEXT)instance_proc_addr(
vk_instance, "vkQueueInsertDebugUtilsLabelEXT");
vkSetDebugUtilsObjectNameEXT_r = (PFN_vkSetDebugUtilsObjectNameEXT)instance_proc_addr(
vk_instance, "vkSetDebugUtilsObjectNameEXT");
vkSetDebugUtilsObjectTagEXT_r = (PFN_vkSetDebugUtilsObjectTagEXT)instance_proc_addr(
vk_instance, "vkSetDebugUtilsObjectTagEXT");
vkSubmitDebugUtilsMessageEXT_r = (PFN_vkSubmitDebugUtilsMessageEXT)instance_proc_addr(
vk_instance, "vkSubmitDebugUtilsMessageEXT");
if (vkCmdBeginDebugUtilsLabelEXT_r) {
enabled = true;
}
}
bool init_callbacks(VKContext *context, PFN_vkGetInstanceProcAddr instance_proc_addr)
void VKDebuggingTools::deinit()
{
if (instance_proc_addr) {
load_dynamic_functions(context, instance_proc_addr);
return true;
};
return false;
vkCmdBeginDebugUtilsLabelEXT_r = nullptr;
vkCmdEndDebugUtilsLabelEXT_r = nullptr;
vkCmdInsertDebugUtilsLabelEXT_r = nullptr;
vkCreateDebugUtilsMessengerEXT_r = nullptr;
vkDestroyDebugUtilsMessengerEXT_r = nullptr;
vkQueueBeginDebugUtilsLabelEXT_r = nullptr;
vkQueueEndDebugUtilsLabelEXT_r = nullptr;
vkQueueInsertDebugUtilsLabelEXT_r = nullptr;
vkSetDebugUtilsObjectNameEXT_r = nullptr;
vkSetDebugUtilsObjectTagEXT_r = nullptr;
vkSubmitDebugUtilsMessageEXT_r = nullptr;
enabled = false;
}
void destroy_callbacks(VKContext *context)
{
VKDebuggingTools &debugging_tools = context->debugging_tools_get();
if (debugging_tools.enabled) {
load_dynamic_functions(context, nullptr);
}
}
void object_label(VKContext *context,
VkObjectType vk_object_type,
uint64_t object_handle,
const char *name)
void object_label(VkObjectType vk_object_type, uint64_t object_handle, const char *name)
{
if (G.debug & G_DEBUG_GPU) {
const VKDebuggingTools &debugging_tools = context->debugging_tools_get();
const VKDevice &device = VKBackend::get().device_get();
const VKDebuggingTools &debugging_tools = device.debugging_tools_get();
if (debugging_tools.enabled) {
VkDebugUtilsObjectNameInfoEXT info = {};
info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
info.objectType = vk_object_type;
info.objectHandle = object_handle;
info.pObjectName = name;
debugging_tools.vkSetDebugUtilsObjectNameEXT_r(context->device_get(), &info);
debugging_tools.vkSetDebugUtilsObjectNameEXT_r(device.device_get(), &info);
}
}
}
void push_marker(VKContext *context, VkCommandBuffer vk_command_buffer, const char *name)
void push_marker(VkCommandBuffer vk_command_buffer, const char *name)
{
if (G.debug & G_DEBUG_GPU) {
const VKDebuggingTools &debugging_tools = context->debugging_tools_get();
const VKDevice &device = VKBackend::get().device_get();
const VKDebuggingTools &debugging_tools = device.debugging_tools_get();
if (debugging_tools.enabled) {
VkDebugUtilsLabelEXT info = {};
info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
@ -165,10 +142,11 @@ void push_marker(VKContext *context, VkCommandBuffer vk_command_buffer, const ch
}
}
void set_marker(VKContext *context, VkCommandBuffer vk_command_buffer, const char *name)
void set_marker(VkCommandBuffer vk_command_buffer, const char *name)
{
if (G.debug & G_DEBUG_GPU) {
const VKDebuggingTools &debugging_tools = context->debugging_tools_get();
const VKDevice &device = VKBackend::get().device_get();
const VKDebuggingTools &debugging_tools = device.debugging_tools_get();
if (debugging_tools.enabled) {
VkDebugUtilsLabelEXT info = {};
info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
@ -178,49 +156,51 @@ void set_marker(VKContext *context, VkCommandBuffer vk_command_buffer, const cha
}
}
void pop_marker(VKContext *context, VkCommandBuffer vk_command_buffer)
void pop_marker(VkCommandBuffer vk_command_buffer)
{
if (G.debug & G_DEBUG_GPU) {
const VKDebuggingTools &debugging_tools = context->debugging_tools_get();
const VKDevice &device = VKBackend::get().device_get();
const VKDebuggingTools &debugging_tools = device.debugging_tools_get();
if (debugging_tools.enabled) {
debugging_tools.vkCmdEndDebugUtilsLabelEXT_r(vk_command_buffer);
}
}
}
void push_marker(VKContext *context, VkQueue vk_queue, const char *name)
void push_marker(const VKDevice &device, const char *name)
{
if (G.debug & G_DEBUG_GPU) {
const VKDebuggingTools &debugging_tools = context->debugging_tools_get();
const VKDebuggingTools &debugging_tools = device.debugging_tools_get();
if (debugging_tools.enabled) {
VkDebugUtilsLabelEXT info = {};
info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
info.pLabelName = name;
debugging_tools.vkQueueBeginDebugUtilsLabelEXT_r(vk_queue, &info);
debugging_tools.vkQueueBeginDebugUtilsLabelEXT_r(device.queue_get(), &info);
}
}
}
void set_marker(VKContext *context, VkQueue vk_queue, const char *name)
void set_marker(const VKDevice &device, const char *name)
{
if (G.debug & G_DEBUG_GPU) {
const VKDebuggingTools &debugging_tools = context->debugging_tools_get();
const VKDebuggingTools &debugging_tools = device.debugging_tools_get();
if (debugging_tools.enabled) {
VkDebugUtilsLabelEXT info = {};
info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
info.pLabelName = name;
debugging_tools.vkQueueInsertDebugUtilsLabelEXT_r(vk_queue, &info);
debugging_tools.vkQueueInsertDebugUtilsLabelEXT_r(device.queue_get(), &info);
}
}
}
void pop_marker(VKContext *context, VkQueue vk_queue)
void pop_marker(const VKDevice &device)
{
if (G.debug & G_DEBUG_GPU) {
const VKDebuggingTools &debugging_tools = context->debugging_tools_get();
const VKDebuggingTools &debugging_tools = device.debugging_tools_get();
if (debugging_tools.enabled) {
debugging_tools.vkQueueEndDebugUtilsLabelEXT_r(vk_queue);
debugging_tools.vkQueueEndDebugUtilsLabelEXT_r(device.queue_get());
}
}
}
} // namespace blender::gpu::debug

View File

@ -15,8 +15,10 @@
namespace blender::gpu {
class VKContext;
class VKDevice;
namespace debug {
typedef struct VKDebuggingTools {
struct VKDebuggingTools {
bool enabled = false;
/* Function pointer definitions. */
PFN_vkCreateDebugUtilsMessengerEXT vkCreateDebugUtilsMessengerEXT_r = nullptr;
@ -31,16 +33,12 @@ typedef struct VKDebuggingTools {
PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT_r = nullptr;
PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT_r = nullptr;
} VKDebuggingTools;
void init(VkInstance vk_instance);
void deinit();
};
bool init_callbacks(VKContext *context, PFN_vkGetInstanceProcAddr instance_proc_addr);
void destroy_callbacks(VKContext *context);
void object_label(VKContext *context,
VkObjectType vk_object_type,
uint64_t object_handle,
const char *name);
template<typename T> void object_label(VKContext *context, T vk_object_type, const char *name)
void object_label(VkObjectType vk_object_type, uint64_t object_handle, const char *name);
template<typename T> void object_label(T vk_object_type, const char *name)
{
if (!(G.debug & G_DEBUG_GPU)) {
return;
@ -50,15 +48,14 @@ template<typename T> void object_label(VKContext *context, T vk_object_type, con
memset(label, 0, label_size);
static int stats = 0;
SNPRINTF(label, "%s_%d", name, stats++);
object_label(
context, to_vk_object_type(vk_object_type), (uint64_t)vk_object_type, (const char *)label);
object_label(to_vk_object_type(vk_object_type), (uint64_t)vk_object_type, (const char *)label);
};
void push_marker(VKContext *context, VkCommandBuffer vk_command_buffer, const char *name);
void set_marker(VKContext *context, VkCommandBuffer vk_command_buffer, const char *name);
void pop_marker(VKContext *context, VkCommandBuffer vk_command_buffer);
void push_marker(VKContext *context, VkQueue vk_queue, const char *name);
void set_marker(VKContext *context, VkQueue vk_queue, const char *name);
void pop_marker(VKContext *context, VkQueue vk_queue);
void push_marker(VkCommandBuffer vk_command_buffer, const char *name);
void set_marker(VkCommandBuffer vk_command_buffer, const char *name);
void pop_marker(VkCommandBuffer vk_command_buffer);
void push_marker(const VKDevice &device, const char *name);
void set_marker(const VKDevice &device, const char *name);
void pop_marker(const VKDevice &device);
} // namespace debug
} // namespace blender::gpu

View File

@ -27,8 +27,9 @@ VKDescriptorSet::~VKDescriptorSet()
{
if (vk_descriptor_set_ != VK_NULL_HANDLE) {
/* Handle should be given back to the pool. */
VKContext &context = *VKContext::get();
context.descriptor_pools_get().free(*this);
BLI_assert(VKContext::get());
VKDevice &device = VKBackend::get().device_;
device.descriptor_pools_get().free(*this);
BLI_assert(vk_descriptor_set_ == VK_NULL_HANDLE);
}
}
@ -150,16 +151,17 @@ void VKDescriptorSetTracker::update(VKContext &context)
"Not all changes have been converted to a write descriptor. Check "
"`Binding::is_buffer` and `Binding::is_image`.");
VkDevice vk_device = context.device_get();
const VKDevice &device = VKBackend::get().device_get();
vkUpdateDescriptorSets(
vk_device, descriptor_writes.size(), descriptor_writes.data(), 0, nullptr);
device.device_get(), descriptor_writes.size(), descriptor_writes.data(), 0, nullptr);
bindings_.clear();
}
std::unique_ptr<VKDescriptorSet> VKDescriptorSetTracker::create_resource(VKContext &context)
{
return context.descriptor_pools_get().allocate(layout_);
VKDevice &device = VKBackend::get().device_;
return device.descriptor_pools_get().allocate(layout_);
}
} // namespace blender::gpu

View File

@ -0,0 +1,90 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation */
/** \file
* \ingroup gpu
*/
#include "vk_device.hh"
#include "vk_backend.hh"
#include "vk_memory.hh"
#include "GHOST_C-api.h"
namespace blender::gpu {
void VKDevice::deinit()
{
vmaDestroyAllocator(mem_allocator_);
mem_allocator_ = VK_NULL_HANDLE;
debugging_tools_.deinit();
vk_instance_ = VK_NULL_HANDLE;
vk_physical_device_ = VK_NULL_HANDLE;
vk_device_ = VK_NULL_HANDLE;
vk_queue_family_ = 0;
vk_queue_ = VK_NULL_HANDLE;
vk_physical_device_limits_ = {};
}
bool VKDevice::is_initialized() const
{
return vk_device_ != VK_NULL_HANDLE;
}
void VKDevice::init(void *ghost_context)
{
BLI_assert(!is_initialized());
GHOST_GetVulkanHandles((GHOST_ContextHandle)ghost_context,
&vk_instance_,
&vk_physical_device_,
&vk_device_,
&vk_queue_family_,
&vk_queue_);
init_physical_device_limits();
init_capabilities();
init_debug_callbacks();
init_memory_allocator();
init_descriptor_pools();
debug::object_label(device_get(), "LogicalDevice");
debug::object_label(queue_get(), "GenericQueue");
}
void VKDevice::init_debug_callbacks()
{
debugging_tools_.init(vk_instance_);
}
void VKDevice::init_physical_device_limits()
{
BLI_assert(vk_physical_device_ != VK_NULL_HANDLE);
VkPhysicalDeviceProperties properties = {};
vkGetPhysicalDeviceProperties(vk_physical_device_, &properties);
vk_physical_device_limits_ = properties.limits;
}
void VKDevice::init_capabilities()
{
VKBackend::capabilities_init();
}
void VKDevice::init_memory_allocator()
{
VK_ALLOCATION_CALLBACKS;
VmaAllocatorCreateInfo info = {};
info.vulkanApiVersion = VK_API_VERSION_1_2;
info.physicalDevice = vk_physical_device_;
info.device = vk_device_;
info.instance = vk_instance_;
info.pAllocationCallbacks = vk_allocation_callbacks;
vmaCreateAllocator(&info, &mem_allocator_);
}
void VKDevice::init_descriptor_pools()
{
descriptor_pools_.init(vk_device_);
}
} // namespace blender::gpu

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation */
/** \file
* \ingroup gpu
*/
#pragma once
#include "BLI_utility_mixins.hh"
#include "vk_common.hh"
#include "vk_debug.hh"
#include "vk_descriptor_pools.hh"
namespace blender::gpu {
class VKDevice : public NonCopyable {
private:
/** Copies of the handles owned by the GHOST context. */
VkInstance vk_instance_ = VK_NULL_HANDLE;
VkPhysicalDevice vk_physical_device_ = VK_NULL_HANDLE;
VkDevice vk_device_ = VK_NULL_HANDLE;
uint32_t vk_queue_family_ = 0;
VkQueue vk_queue_ = VK_NULL_HANDLE;
/** Allocator used for texture and buffers and other resources. */
VmaAllocator mem_allocator_ = VK_NULL_HANDLE;
VKDescriptorPools descriptor_pools_;
/** Limits of the device linked to this context. */
VkPhysicalDeviceLimits vk_physical_device_limits_;
/** Functions of vk_ext_debugutils for this device/instance. */
debug::VKDebuggingTools debugging_tools_;
public:
VkPhysicalDevice physical_device_get() const
{
return vk_physical_device_;
}
const VkPhysicalDeviceLimits &physical_device_limits_get() const
{
return vk_physical_device_limits_;
}
VkInstance instance_get() const
{
return vk_instance_;
};
VkDevice device_get() const
{
return vk_device_;
}
VkQueue queue_get() const
{
return vk_queue_;
}
VKDescriptorPools &descriptor_pools_get()
{
return descriptor_pools_;
}
const uint32_t *queue_family_ptr_get() const
{
return &vk_queue_family_;
}
VmaAllocator mem_allocator_get() const
{
return mem_allocator_;
}
debug::VKDebuggingTools &debugging_tools_get()
{
return debugging_tools_;
}
const debug::VKDebuggingTools &debugging_tools_get() const
{
return debugging_tools_;
}
bool is_initialized() const;
void init(void *ghost_context);
void deinit();
private:
void init_physical_device_limits();
void init_capabilities();
void init_debug_callbacks();
void init_memory_allocator();
void init_descriptor_pools();
};
} // namespace blender::gpu

View File

@ -6,6 +6,7 @@
*/
#include "vk_framebuffer.hh"
#include "vk_backend.hh"
#include "vk_memory.hh"
#include "vk_texture.hh"
@ -344,9 +345,9 @@ void VKFrameBuffer::render_pass_create()
render_pass_info.subpassCount = 1;
render_pass_info.pSubpasses = &subpass;
VKContext &context = *VKContext::get();
const VKDevice &device = VKBackend::get().device_get();
vkCreateRenderPass(
context.device_get(), &render_pass_info, vk_allocation_callbacks, &vk_render_pass_);
device.device_get(), &render_pass_info, vk_allocation_callbacks, &vk_render_pass_);
/* We might want to split frame-buffer and render pass. */
VkFramebufferCreateInfo framebuffer_create_info = {};
@ -359,7 +360,7 @@ void VKFrameBuffer::render_pass_create()
framebuffer_create_info.layers = 1;
vkCreateFramebuffer(
context.device_get(), &framebuffer_create_info, vk_allocation_callbacks, &vk_framebuffer_);
device.device_get(), &framebuffer_create_info, vk_allocation_callbacks, &vk_framebuffer_);
}
void VKFrameBuffer::render_pass_free()
@ -370,9 +371,9 @@ void VKFrameBuffer::render_pass_free()
}
VK_ALLOCATION_CALLBACKS
VKContext &context = *VKContext::get();
vkDestroyRenderPass(context.device_get(), vk_render_pass_, vk_allocation_callbacks);
vkDestroyFramebuffer(context.device_get(), vk_framebuffer_, vk_allocation_callbacks);
const VKDevice &device = VKBackend::get().device_get();
vkDestroyRenderPass(device.device_get(), vk_render_pass_, vk_allocation_callbacks);
vkDestroyFramebuffer(device.device_get(), vk_framebuffer_, vk_allocation_callbacks);
vk_render_pass_ = VK_NULL_HANDLE;
vk_framebuffer_ = VK_NULL_HANDLE;
}

View File

@ -18,9 +18,8 @@ void VKIndexBuffer::ensure_updated()
return;
}
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
allocate();
}
if (data_ != nullptr) {
@ -64,15 +63,14 @@ void VKIndexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data
void VKIndexBuffer::strip_restart_indices() {}
void VKIndexBuffer::allocate(VKContext &context)
void VKIndexBuffer::allocate()
{
GPUUsageType usage = data_ == nullptr ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_STATIC;
buffer_.create(context,
size_get(),
buffer_.create(size_get(),
usage,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT));
debug::object_label(&context, buffer_.vk_handle(), "IndexBuffer");
debug::object_label(buffer_.vk_handle(), "IndexBuffer");
}
} // namespace blender::gpu

View File

@ -33,7 +33,7 @@ class VKIndexBuffer : public IndexBuf {
private:
void strip_restart_indices() override;
void allocate(VKContext &context);
void allocate();
void ensure_updated();
};

View File

@ -6,6 +6,7 @@
*/
#include "vk_pipeline.hh"
#include "vk_backend.hh"
#include "vk_context.hh"
#include "vk_memory.hh"
@ -23,21 +24,20 @@ VKPipeline::VKPipeline(VkPipeline vk_pipeline,
VKPipeline::~VKPipeline()
{
VK_ALLOCATION_CALLBACKS
VkDevice vk_device = VKContext::get()->device_get();
const VKDevice &device = VKBackend::get().device_get();
if (vk_pipeline_ != VK_NULL_HANDLE) {
vkDestroyPipeline(vk_device, vk_pipeline_, vk_allocation_callbacks);
vkDestroyPipeline(device.device_get(), vk_pipeline_, vk_allocation_callbacks);
}
}
VKPipeline VKPipeline::create_compute_pipeline(
VKContext &context,
VkShaderModule compute_module,
VkDescriptorSetLayout &descriptor_set_layout,
VkPipelineLayout &pipeline_layout,
const VKPushConstants::Layout &push_constants_layout)
{
VK_ALLOCATION_CALLBACKS
VkDevice vk_device = context.device_get();
const VKDevice &device = VKBackend::get().device_get();
VkComputePipelineCreateInfo pipeline_info = {};
pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
pipeline_info.flags = 0;
@ -50,9 +50,12 @@ VKPipeline VKPipeline::create_compute_pipeline(
pipeline_info.stage.pName = "main";
VkPipeline vk_pipeline;
if (vkCreateComputePipelines(
vk_device, nullptr, 1, &pipeline_info, vk_allocation_callbacks, &vk_pipeline) !=
VK_SUCCESS)
if (vkCreateComputePipelines(device.device_get(),
nullptr,
1,
&pipeline_info,
vk_allocation_callbacks,
&vk_pipeline) != VK_SUCCESS)
{
return VKPipeline();
}

View File

@ -40,8 +40,7 @@ class VKPipeline : NonCopyable {
return *this;
}
static VKPipeline create_compute_pipeline(VKContext &context,
VkShaderModule compute_module,
static VKPipeline create_compute_pipeline(VkShaderModule compute_module,
VkDescriptorSetLayout &descriptor_set_layout,
VkPipelineLayout &pipeline_layouts,
const VKPushConstants::Layout &push_constants_layout);

View File

@ -13,12 +13,11 @@ namespace blender::gpu {
VKPixelBuffer::VKPixelBuffer(int64_t size) : PixelBuffer(size)
{
VKContext &context = *VKContext::get();
buffer_.create(context,
size,
buffer_.create(size,
GPU_USAGE_STATIC,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT));
debug::object_label(buffer_.vk_handle(), "PixelBuffer");
}
void *VKPixelBuffer::map()

View File

@ -561,10 +561,9 @@ void VKShader::build_shader_module(Span<uint32_t> spirv_module, VkShaderModule *
create_info.codeSize = spirv_module.size() * sizeof(uint32_t);
create_info.pCode = spirv_module.data();
VKContext &context = *static_cast<VKContext *>(VKContext::get());
const VKDevice &device = VKBackend::get().device_get();
VkResult result = vkCreateShaderModule(
context.device_get(), &create_info, vk_allocation_callbacks, r_shader_module);
device.device_get(), &create_info, vk_allocation_callbacks, r_shader_module);
if (result != VK_SUCCESS) {
compilation_failed_ = true;
*r_shader_module = VK_NULL_HANDLE;
@ -580,29 +579,29 @@ VKShader::~VKShader()
{
VK_ALLOCATION_CALLBACKS
VkDevice device = context_->device_get();
const VKDevice &device = VKBackend::get().device_get();
if (vertex_module_ != VK_NULL_HANDLE) {
vkDestroyShaderModule(device, vertex_module_, vk_allocation_callbacks);
vkDestroyShaderModule(device.device_get(), vertex_module_, vk_allocation_callbacks);
vertex_module_ = VK_NULL_HANDLE;
}
if (geometry_module_ != VK_NULL_HANDLE) {
vkDestroyShaderModule(device, geometry_module_, vk_allocation_callbacks);
vkDestroyShaderModule(device.device_get(), geometry_module_, vk_allocation_callbacks);
geometry_module_ = VK_NULL_HANDLE;
}
if (fragment_module_ != VK_NULL_HANDLE) {
vkDestroyShaderModule(device, fragment_module_, vk_allocation_callbacks);
vkDestroyShaderModule(device.device_get(), fragment_module_, vk_allocation_callbacks);
fragment_module_ = VK_NULL_HANDLE;
}
if (compute_module_ != VK_NULL_HANDLE) {
vkDestroyShaderModule(device, compute_module_, vk_allocation_callbacks);
vkDestroyShaderModule(device.device_get(), compute_module_, vk_allocation_callbacks);
compute_module_ = VK_NULL_HANDLE;
}
if (pipeline_layout_ != VK_NULL_HANDLE) {
vkDestroyPipelineLayout(device, pipeline_layout_, vk_allocation_callbacks);
vkDestroyPipelineLayout(device.device_get(), pipeline_layout_, vk_allocation_callbacks);
pipeline_layout_ = VK_NULL_HANDLE;
}
if (layout_ != VK_NULL_HANDLE) {
vkDestroyDescriptorSetLayout(device, layout_, vk_allocation_callbacks);
vkDestroyDescriptorSetLayout(device.device_get(), layout_, vk_allocation_callbacks);
layout_ = VK_NULL_HANDLE;
}
}
@ -653,11 +652,11 @@ bool VKShader::finalize(const shader::ShaderCreateInfo *info)
VKShaderInterface *vk_interface = new VKShaderInterface();
vk_interface->init(*info);
VkDevice vk_device = context_->device_get();
if (!finalize_descriptor_set_layouts(vk_device, *vk_interface, *info)) {
const VKDevice &device = VKBackend::get().device_get();
if (!finalize_descriptor_set_layouts(device.device_get(), *vk_interface, *info)) {
return false;
}
if (!finalize_pipeline_layout(vk_device, *vk_interface)) {
if (!finalize_pipeline_layout(device.device_get(), *vk_interface)) {
return false;
}
@ -668,7 +667,7 @@ bool VKShader::finalize(const shader::ShaderCreateInfo *info)
BLI_assert((fragment_module_ != VK_NULL_HANDLE && info->tf_type_ == GPU_SHADER_TFB_NONE) ||
(fragment_module_ == VK_NULL_HANDLE && info->tf_type_ != GPU_SHADER_TFB_NONE));
BLI_assert(compute_module_ == VK_NULL_HANDLE);
result = finalize_graphics_pipeline(vk_device);
result = finalize_graphics_pipeline(device.device_get());
}
else {
BLI_assert(vertex_module_ == VK_NULL_HANDLE);
@ -676,11 +675,7 @@ bool VKShader::finalize(const shader::ShaderCreateInfo *info)
BLI_assert(fragment_module_ == VK_NULL_HANDLE);
BLI_assert(compute_module_ != VK_NULL_HANDLE);
compute_pipeline_ = VKPipeline::create_compute_pipeline(
*context_,
compute_module_,
layout_,
pipeline_layout_,
vk_interface->push_constants_layout_get());
compute_module_, layout_, pipeline_layout_, vk_interface->push_constants_layout_get());
result = compute_pipeline_.is_valid();
}

View File

@ -6,6 +6,7 @@
*/
#include "vk_shader_interface.hh"
#include "vk_backend.hh"
#include "vk_context.hh"
namespace blender::gpu {
@ -47,9 +48,9 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
/* Reserve 1 uniform buffer for push constants fallback. */
size_t names_size = info.interface_names_size_;
VKContext &context = *VKContext::get();
const VKDevice &device = VKBackend::get().device_get();
const VKPushConstants::StorageType push_constants_storage_type =
VKPushConstants::Layout::determine_storage_type(info, context.physical_device_limits_get());
VKPushConstants::Layout::determine_storage_type(info, device.physical_device_limits_get());
if (push_constants_storage_type == VKPushConstants::StorageType::UNIFORM_BUFFER) {
ubo_len_++;
names_size += PUSH_CONSTANTS_FALLBACK_NAME_LEN + 1;

View File

@ -15,26 +15,25 @@ namespace blender::gpu {
void VKStorageBuffer::update(const void *data)
{
if (!buffer_.is_allocated()) {
VKContext &context = *VKContext::get();
allocate(context);
allocate();
}
buffer_.update(data);
}
void VKStorageBuffer::allocate(VKContext &context)
void VKStorageBuffer::allocate()
{
buffer_.create(context,
size_in_bytes_,
buffer_.create(size_in_bytes_,
usage_,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT));
debug::object_label(buffer_.vk_handle(), name_);
}
void VKStorageBuffer::bind(int slot)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
allocate();
}
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
@ -49,7 +48,7 @@ void VKStorageBuffer::clear(uint32_t clear_value)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
allocate();
}
buffer_.clear(context, clear_value);
}
@ -63,11 +62,11 @@ void VKStorageBuffer::copy_sub(VertBuf * /*src*/,
void VKStorageBuffer::read(void *data)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
allocate();
}
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.submit();

View File

@ -45,7 +45,7 @@ class VKStorageBuffer : public StorageBuf {
}
private:
void allocate(VKContext &context);
void allocate();
};
} // namespace blender::gpu

View File

@ -25,9 +25,9 @@ VKTexture::~VKTexture()
{
VK_ALLOCATION_CALLBACKS
VKContext &context = *VKContext::get();
vmaDestroyImage(context.mem_allocator_get(), vk_image_, allocation_);
vkDestroyImageView(context.device_get(), vk_image_view_, vk_allocation_callbacks);
const VKDevice &device = VKBackend::get().device_get();
vmaDestroyImage(device.mem_allocator_get(), vk_image_, allocation_);
vkDestroyImageView(device.device_get(), vk_image_view_, vk_allocation_callbacks);
}
void VKTexture::generate_mipmap() {}
@ -75,7 +75,7 @@ void *VKTexture::read(int mip, eGPUDataFormat format)
size_t host_memory_size = sample_len * to_bytesize(format_, format);
staging_buffer.create(
context, device_memory_size, GPU_USAGE_DEVICE_ONLY, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
device_memory_size, GPU_USAGE_DEVICE_ONLY, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
VkBufferImageCopy region = {};
region.imageExtent.width = extent[0];
@ -113,7 +113,7 @@ void VKTexture::update_sub(
size_t device_memory_size = sample_len * to_bytesize(format_);
staging_buffer.create(
context, device_memory_size, GPU_USAGE_DEVICE_ONLY, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
device_memory_size, GPU_USAGE_DEVICE_ONLY, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
uint buffer_row_length = context.state_manager_get().texture_unpack_row_length_get();
if (buffer_row_length) {
@ -234,6 +234,7 @@ bool VKTexture::allocate()
mip_size_get(0, extent);
VKContext &context = *VKContext::get();
const VKDevice &device = VKBackend::get().device_get();
VkImageCreateInfo image_info = {};
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_info.imageType = to_vk_image_type(type_);
@ -256,7 +257,7 @@ bool VKTexture::allocate()
VkResult result;
if (G.debug & G_DEBUG_GPU) {
VkImageFormatProperties image_format = {};
result = vkGetPhysicalDeviceImageFormatProperties(context.physical_device_get(),
result = vkGetPhysicalDeviceImageFormatProperties(device.physical_device_get(),
image_info.format,
image_info.imageType,
image_info.tiling,
@ -272,7 +273,7 @@ bool VKTexture::allocate()
VmaAllocationCreateInfo allocCreateInfo = {};
allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
allocCreateInfo.priority = 1.0f;
result = vmaCreateImage(context.mem_allocator_get(),
result = vmaCreateImage(device.mem_allocator_get(),
&image_info,
&allocCreateInfo,
&vk_image_,
@ -281,7 +282,7 @@ bool VKTexture::allocate()
if (result != VK_SUCCESS) {
return false;
}
debug::object_label(&context, vk_image_, name_);
debug::object_label( vk_image_, name_);
/* Promote image to the correct layout. */
layout_ensure(context, VK_IMAGE_LAYOUT_GENERAL);
@ -298,8 +299,8 @@ bool VKTexture::allocate()
image_view_info.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
result = vkCreateImageView(
context.device_get(), &image_view_info, vk_allocation_callbacks, &vk_image_view_);
debug::object_label(&context, vk_image_view_, name_);
device.device_get(), &image_view_info, vk_allocation_callbacks, &vk_image_view_);
debug::object_label( vk_image_view_, name_);
return result == VK_SUCCESS;
}

View File

@ -15,34 +15,33 @@ namespace blender::gpu {
void VKUniformBuffer::update(const void *data)
{
if (!buffer_.is_allocated()) {
VKContext &context = *VKContext::get();
allocate(context);
allocate();
}
buffer_.update(data);
}
void VKUniformBuffer::allocate(VKContext &context)
void VKUniformBuffer::allocate()
{
buffer_.create(context, size_in_bytes_, GPU_USAGE_STATIC, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
debug::object_label(&context, buffer_.vk_handle(), name_);
buffer_.create(size_in_bytes_, GPU_USAGE_STATIC, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
debug::object_label(buffer_.vk_handle(), name_);
}
void VKUniformBuffer::clear_to_zero()
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
allocate();
}
VKContext &context = *VKContext::get();
buffer_.clear(context, 0);
}
void VKUniformBuffer::bind(int slot, shader::ShaderCreateInfo::Resource::BindType bind_type)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
allocate();
}
VKContext &context = *VKContext::get();
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
const VKDescriptorSet::Location location = shader_interface.descriptor_set_location(bind_type,

View File

@ -38,7 +38,7 @@ class VKUniformBuffer : public UniformBuf, NonCopyable {
}
private:
void allocate(VKContext &context);
void allocate();
void bind(int slot, shader::ShaderCreateInfo::Resource::BindType bind_type);
};

View File

@ -20,11 +20,11 @@ VKVertexBuffer::~VKVertexBuffer()
void VKVertexBuffer::bind_as_ssbo(uint binding)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
allocate();
}
VKContext &context = *VKContext::get();
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
const VKDescriptorSet::Location location = shader_interface.descriptor_set_location(
@ -68,13 +68,13 @@ void VKVertexBuffer::upload_data() {}
void VKVertexBuffer::duplicate_data(VertBuf * /*dst*/) {}
void VKVertexBuffer::allocate(VKContext &context)
void VKVertexBuffer::allocate()
{
buffer_.create(context,
size_used_get(),
buffer_.create(size_used_get(),
usage_,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
debug::object_label(buffer_.vk_handle(), "VertexBuffer");
}
} // namespace blender::gpu

View File

@ -39,7 +39,7 @@ class VKVertexBuffer : public VertBuf {
void duplicate_data(VertBuf *dst) override;
private:
void allocate(VKContext &context);
void allocate();
};
} // namespace blender::gpu