diff --git a/source/blender/gpu/vulkan/vk_context.cc b/source/blender/gpu/vulkan/vk_context.cc index e9b23d0fd61..c2d8890eef2 100644 --- a/source/blender/gpu/vulkan/vk_context.cc +++ b/source/blender/gpu/vulkan/vk_context.cc @@ -126,6 +126,17 @@ void VKContext::finish() void VKContext::memory_statistics_get(int * /*total_mem*/, int * /*free_mem*/) {} +/* -------------------------------------------------------------------- */ +/** \name State manager + * \{ */ + +const VKStateManager &VKContext::state_manager_get() const +{ + return *static_cast(state_manager); +} + +/** \} */ + void VKContext::activate_framebuffer(VKFrameBuffer &framebuffer) { if (has_active_framebuffer()) { diff --git a/source/blender/gpu/vulkan/vk_context.hh b/source/blender/gpu/vulkan/vk_context.hh index b65fa13edd7..35980051386 100644 --- a/source/blender/gpu/vulkan/vk_context.hh +++ b/source/blender/gpu/vulkan/vk_context.hh @@ -15,8 +15,9 @@ namespace blender::gpu { class VKFrameBuffer; +class VKStateManager; -class VKContext : public Context { +class VKContext : public Context, NonCopyable { private: /** Copies of the handles owned by the GHOST context. */ VkInstance vk_instance_ = VK_NULL_HANDLE; @@ -108,6 +109,8 @@ class VKContext : public Context { return descriptor_pools_; } + const VKStateManager &state_manager_get() const; + VmaAllocator mem_allocator_get() const { return mem_allocator_; diff --git a/source/blender/gpu/vulkan/vk_data_conversion.cc b/source/blender/gpu/vulkan/vk_data_conversion.cc index 5522e79c45b..c44e6b6f2d2 100644 --- a/source/blender/gpu/vulkan/vk_data_conversion.cc +++ b/source/blender/gpu/vulkan/vk_data_conversion.cc @@ -764,6 +764,28 @@ void convert_host_to_device(void *dst_buffer, convert_buffer(dst_buffer, src_buffer, buffer_size, device_format, conversion_type); } +void convert_host_to_device(void *dst_buffer, + const void *src_buffer, + uint2 src_size, + uint src_row_length, + eGPUDataFormat host_format, + eGPUTextureFormat device_format) +{ + const uint8_t *src = static_cast(src_buffer); + uint8_t *dst = static_cast(dst_buffer); + ConversionType conversion_type = host_to_device(host_format, device_format); + size_t src_row_len = src_row_length * to_bytesize(device_format, host_format); + size_t dst_row_len = src_size.x * to_bytesize(device_format); + + for (uint row : IndexRange(src_size.y)) { + convert_buffer(&dst[dst_row_len * row], + &src[src_row_len * row], + src_size.x, + device_format, + conversion_type); + } +} + void convert_device_to_host(void *dst_buffer, const void *src_buffer, size_t buffer_size, diff --git a/source/blender/gpu/vulkan/vk_data_conversion.hh b/source/blender/gpu/vulkan/vk_data_conversion.hh index 1ca3111d49d..f9fe41da782 100644 --- a/source/blender/gpu/vulkan/vk_data_conversion.hh +++ b/source/blender/gpu/vulkan/vk_data_conversion.hh @@ -7,6 +7,8 @@ #pragma once +#include "BLI_math_vector_types.hh" + #include "gpu_texture_private.hh" namespace blender::gpu { @@ -17,7 +19,7 @@ namespace blender::gpu { * \param dst_buffer: device buffer. * \param src_buffer: host buffer. * \param buffer_size: number of pixels to convert from the start of the given buffer. - * \param host_format: format of the host buffer + * \param host_format: format of the host buffer. * \param device_format: format of the device buffer. * * \note Will assert when the host_format/device_format combination isn't valid @@ -30,6 +32,27 @@ void convert_host_to_device(void *dst_buffer, eGPUDataFormat host_format, eGPUTextureFormat device_format); +/** + * Convert host buffer to device buffer with row length. + * + * \param dst_buffer: device buffer. + * \param src_buffer: host buffer. + * \param src_size: size of the host buffer. + * \param src_row_length: Length of a single row of the buffer (in pixels). + * \param host_format: format of the host buffer. + * \param device_format: format of the device buffer. + * + * \note Will assert when the host_format/device_format combination isn't valid + * (#validate_data_format) or supported. Some combinations aren't supported in Vulkan due to + * platform incompatibility. + */ +void convert_host_to_device(void *dst_buffer, + const void *src_buffer, + uint2 src_size, + uint src_row_length, + eGPUDataFormat host_format, + eGPUTextureFormat device_format); + /** * Convert device buffer to host buffer. * diff --git a/source/blender/gpu/vulkan/vk_state_manager.cc b/source/blender/gpu/vulkan/vk_state_manager.cc index 3c284e07a61..5815b9cae6a 100644 --- a/source/blender/gpu/vulkan/vk_state_manager.cc +++ b/source/blender/gpu/vulkan/vk_state_manager.cc @@ -38,6 +38,14 @@ void VKStateManager::image_unbind(Texture * /*tex*/) {} void VKStateManager::image_unbind_all() {} -void VKStateManager::texture_unpack_row_length_set(uint /*len*/) {} +void VKStateManager::texture_unpack_row_length_set(uint len) +{ + texture_unpack_row_length_ = len; +} + +uint VKStateManager::texture_unpack_row_length_get() const +{ + return texture_unpack_row_length_; +} } // namespace blender::gpu diff --git a/source/blender/gpu/vulkan/vk_state_manager.hh b/source/blender/gpu/vulkan/vk_state_manager.hh index f6aae2db04f..de12c0b5457 100644 --- a/source/blender/gpu/vulkan/vk_state_manager.hh +++ b/source/blender/gpu/vulkan/vk_state_manager.hh @@ -11,6 +11,8 @@ namespace blender::gpu { class VKStateManager : public StateManager { + uint texture_unpack_row_length_; + public: void apply_state() override; void force_state() override; @@ -26,5 +28,12 @@ class VKStateManager : public StateManager { void image_unbind_all() override; void texture_unpack_row_length_set(uint len) override; + + /** + * Row length for unpacking host data when uploading texture data. + * + * When set to zero (0) host data can be assumed to be stored sequential. + */ + uint texture_unpack_row_length_get() const; }; } // namespace blender::gpu diff --git a/source/blender/gpu/vulkan/vk_texture.cc b/source/blender/gpu/vulkan/vk_texture.cc index c17dcd9484c..35cf9847b46 100644 --- a/source/blender/gpu/vulkan/vk_texture.cc +++ b/source/blender/gpu/vulkan/vk_texture.cc @@ -13,6 +13,7 @@ #include "vk_memory.hh" #include "vk_shader.hh" #include "vk_shader_interface.hh" +#include "vk_state_manager.hh" #include "BLI_math_vector.hh" @@ -94,8 +95,12 @@ void *VKTexture::read(int mip, eGPUDataFormat format) } void VKTexture::update_sub( - int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data) + int mip, int offset[3], int extent_[3], eGPUDataFormat format, const void *data) { + if (mip != 0) { + /* TODO: not implemented yet. */ + return; + } if (!is_allocated()) { allocate(); } @@ -103,17 +108,31 @@ void VKTexture::update_sub( /* Vulkan images cannot be directly mapped to host memory and requires a staging buffer. */ VKContext &context = *VKContext::get(); VKBuffer staging_buffer; - size_t sample_len = extent[0] * extent[1] * extent[2]; + int3 extent = int3(extent_[0], max_ii(extent_[1], 1), max_ii(extent_[2], 1)); + size_t sample_len = extent.x * extent.y * extent.z; size_t device_memory_size = sample_len * to_bytesize(format_); staging_buffer.create( context, device_memory_size, GPU_USAGE_DEVICE_ONLY, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); - convert_host_to_device(staging_buffer.mapped_memory_get(), data, sample_len, format, format_); + + uint buffer_row_length = context.state_manager_get().texture_unpack_row_length_get(); + if (buffer_row_length) { + /* Use custom row length #GPU_texture_unpack_row_length */ + convert_host_to_device(staging_buffer.mapped_memory_get(), + data, + uint2(extent), + buffer_row_length, + format, + format_); + } + else { + convert_host_to_device(staging_buffer.mapped_memory_get(), data, sample_len, format, format_); + } VkBufferImageCopy region = {}; - region.imageExtent.width = extent[0]; - region.imageExtent.height = extent[1]; - region.imageExtent.depth = extent[2]; + region.imageExtent.width = extent.x; + region.imageExtent.height = extent.y; + region.imageExtent.depth = extent.z; region.imageOffset.x = offset[0]; region.imageOffset.y = offset[1]; region.imageOffset.z = offset[2]; @@ -175,7 +194,8 @@ bool VKTexture::is_allocated() const static VkImageUsageFlagBits to_vk_image_usage(const eGPUTextureUsage usage, const eGPUTextureFormatFlag format_flag) { - VkImageUsageFlagBits result = static_cast(VK_IMAGE_USAGE_TRANSFER_DST_BIT | + VkImageUsageFlagBits result = static_cast(VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT); if (usage & GPU_TEXTURE_USAGE_SHADER_READ) { result = static_cast(result | VK_IMAGE_USAGE_STORAGE_BIT); @@ -184,7 +204,7 @@ static VkImageUsageFlagBits to_vk_image_usage(const eGPUTextureUsage usage, result = static_cast(result | VK_IMAGE_USAGE_STORAGE_BIT); } if (usage & GPU_TEXTURE_USAGE_ATTACHMENT) { - if (format_flag & (GPU_FORMAT_NORMALIZED_INTEGER | GPU_FORMAT_COMPRESSED)) { + if (format_flag & GPU_FORMAT_COMPRESSED) { /* These formats aren't supported as an attachment. When using GPU_TEXTURE_USAGE_DEFAULT they * are still being evaluated to be attachable. So we need to skip them. */ } @@ -207,6 +227,7 @@ static VkImageUsageFlagBits to_vk_image_usage(const eGPUTextureUsage usage, bool VKTexture::allocate() { + BLI_assert(vk_image_ == VK_NULL_HANDLE); BLI_assert(!is_allocated()); int extent[3] = {1, 1, 1}; @@ -260,6 +281,7 @@ bool VKTexture::allocate() if (result != VK_SUCCESS) { return false; } + debug::object_label(&context, vk_image_, name_); /* Promote image to the correct layout. */ layout_ensure(context, VK_IMAGE_LAYOUT_GENERAL); @@ -277,6 +299,7 @@ bool VKTexture::allocate() result = vkCreateImageView( context.device_get(), &image_view_info, vk_allocation_callbacks, &vk_image_view_); + debug::object_label(&context, vk_image_view_, name_); return result == VK_SUCCESS; }