Vulkan: Index Buffer #107358

Merged
Jeroen Bakker merged 4 commits from Jeroen-Bakker/blender:vulkan-index-buffer into main 2023-04-26 08:09:34 +02:00
8 changed files with 603 additions and 286 deletions
Showing only changes of commit 610f48f655 - Show all commits

File diff suppressed because it is too large Load Diff

View File

@ -12,6 +12,7 @@
#include "vk_memory.hh"
#include "vk_pipeline.hh"
#include "vk_texture.hh"
#include "vk_vertex_buffer.hh"
#include "BLI_assert.h"
@ -34,30 +35,42 @@ void VKCommandBuffer::init(const VkDevice vk_device,
vk_queue_ = vk_queue;
vk_command_buffer_ = vk_command_buffer;
submission_id_.reset();
state.stage = Stage::Initial;
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
VkFenceCreateInfo fenceInfo{};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
vkCreateFence(vk_device_, &fenceInfo, vk_allocation_callbacks, &vk_fence_);
}
else {
vkResetFences(vk_device_, 1, &vk_fence_);
}
}
void VKCommandBuffer::begin_recording()
{
vkWaitForFences(vk_device_, 1, &vk_fence_, VK_TRUE, UINT64_MAX);
vkResetFences(vk_device_, 1, &vk_fence_);
vkResetCommandBuffer(vk_command_buffer_, 0);
if (is_in_stage(Stage::Submitted)) {
vkWaitForFences(vk_device_, 1, &vk_fence_, VK_TRUE, FenceTimeout);
vkResetFences(vk_device_, 1, &vk_fence_);
stage_transfer(Stage::Submitted, Stage::Executed);
}
if (is_in_stage(Stage::Executed)) {
vkResetCommandBuffer(vk_command_buffer_, 0);
stage_transfer(Stage::Executed, Stage::Initial);
}
VkCommandBufferBeginInfo begin_info = {};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(vk_command_buffer_, &begin_info);
stage_transfer(Stage::Initial, Stage::Recording);
}
void VKCommandBuffer::end_recording()
{
ensure_no_active_framebuffer();
vkEndCommandBuffer(vk_command_buffer_);
stage_transfer(Stage::Recording, Stage::BetweenRecordingAndSubmitting);
}
void VKCommandBuffer::bind(const VKPipeline &pipeline, VkPipelineBindPoint bind_point)
@ -74,19 +87,35 @@ void VKCommandBuffer::bind(const VKDescriptorSet &descriptor_set,
vk_command_buffer_, bind_point, vk_pipeline_layout, 0, 1, &vk_descriptor_set, 0, 0);
}
void VKCommandBuffer::begin_render_pass(const VKFrameBuffer &framebuffer)
void VKCommandBuffer::bind(const uint32_t binding,
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset)
{
VkRenderPassBeginInfo render_pass_begin_info = {};
render_pass_begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_pass_begin_info.renderPass = framebuffer.vk_render_pass_get();
render_pass_begin_info.framebuffer = framebuffer.vk_framebuffer_get();
render_pass_begin_info.renderArea = framebuffer.vk_render_area_get();
vkCmdBeginRenderPass(vk_command_buffer_, &render_pass_begin_info, VK_SUBPASS_CONTENTS_INLINE);
bind(binding, vertex_buffer.vk_handle(), offset);
}
void VKCommandBuffer::end_render_pass(const VKFrameBuffer & /*framebuffer*/)
void VKCommandBuffer::bind(const uint32_t binding,
const VkBuffer &vk_vertex_buffer,
const VkDeviceSize offset)
{
vkCmdEndRenderPass(vk_command_buffer_);
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdBindVertexBuffers(vk_command_buffer_, binding, 1, &vk_vertex_buffer, &offset);
}
void VKCommandBuffer::begin_render_pass(const VKFrameBuffer &framebuffer)
{
validate_framebuffer_not_exists();
state.framebuffer_ = &framebuffer;
}
void VKCommandBuffer::end_render_pass(const VKFrameBuffer &framebuffer)
{
UNUSED_VARS_NDEBUG(framebuffer)
validate_framebuffer_exists();
BLI_assert(state.framebuffer_ == &framebuffer);
ensure_no_active_framebuffer();
state.framebuffer_ = nullptr;
}
void VKCommandBuffer::push_constants(const VKPushConstants &push_constants,
@ -105,6 +134,7 @@ void VKCommandBuffer::push_constants(const VKPushConstants &push_constants,
void VKCommandBuffer::fill(VKBuffer &buffer, uint32_t clear_data)
{
ensure_no_active_framebuffer();
vkCmdFillBuffer(vk_command_buffer_, buffer.vk_handle(), 0, buffer.size_in_bytes(), clear_data);
}
@ -112,6 +142,7 @@ void VKCommandBuffer::copy(VKBuffer &dst_buffer,
VKTexture &src_texture,
Span<VkBufferImageCopy> regions)
{
ensure_no_active_framebuffer();
vkCmdCopyImageToBuffer(vk_command_buffer_,
src_texture.vk_image_handle(),
src_texture.current_layout_get(),
@ -123,6 +154,7 @@ void VKCommandBuffer::copy(VKTexture &dst_texture,
VKBuffer &src_buffer,
Span<VkBufferImageCopy> regions)
{
ensure_no_active_framebuffer();
vkCmdCopyBufferToImage(vk_command_buffer_,
src_buffer.vk_handle(),
dst_texture.vk_image_handle(),
@ -130,12 +162,27 @@ void VKCommandBuffer::copy(VKTexture &dst_texture,
regions.size(),
regions.data());
}
void VKCommandBuffer::blit(VKTexture &dst_texture,
VKTexture &src_buffer,
Span<VkImageBlit> regions)
{
ensure_no_active_framebuffer();
vkCmdBlitImage(vk_command_buffer_,
src_buffer.vk_image_handle(),
src_buffer.current_layout_get(),
dst_texture.vk_image_handle(),
dst_texture.current_layout_get(),
regions.size(),
regions.data(),
VK_FILTER_NEAREST);
}
void VKCommandBuffer::clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges)
{
ensure_no_active_framebuffer();
vkCmdClearColorImage(vk_command_buffer_,
vk_image,
vk_image_layout,
@ -146,13 +193,36 @@ void VKCommandBuffer::clear(VkImage vk_image,
void VKCommandBuffer::clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdClearAttachments(
vk_command_buffer_, attachments.size(), attachments.data(), areas.size(), areas.data());
}
void VKCommandBuffer::draw(int v_first, int v_count, int i_first, int i_count)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdDraw(vk_command_buffer_, v_count, i_count, v_first, i_first);
state.draw_counts++;
}
void VKCommandBuffer::draw(
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
vkCmdDrawIndexed(
vk_command_buffer_, index_count, instance_count, first_index, vertex_offset, first_instance);
state.draw_counts++;
}
void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages)
{
if (state.framebuffer_) {
ensure_active_framebuffer();
}
vkCmdPipelineBarrier(vk_command_buffer_,
source_stages,
destination_stages,
@ -167,6 +237,7 @@ void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
void VKCommandBuffer::pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers)
{
ensure_no_active_framebuffer();
vkCmdPipelineBarrier(vk_command_buffer_,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
@ -181,11 +252,13 @@ void VKCommandBuffer::pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_b
void VKCommandBuffer::dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
{
ensure_no_active_framebuffer();
vkCmdDispatch(vk_command_buffer_, groups_x_len, groups_y_len, groups_z_len);
}
void VKCommandBuffer::submit()
{
ensure_no_active_framebuffer();
end_recording();
encode_recorded_commands();
submit_encoded_commands();
@ -208,6 +281,55 @@ void VKCommandBuffer::submit_encoded_commands()
vkQueueSubmit(vk_queue_, 1, &submit_info, vk_fence_);
submission_id_.next();
stage_transfer(Stage::BetweenRecordingAndSubmitting, Stage::Submitted);
}
/* -------------------------------------------------------------------- */
/** \name Framebuffer/RenderPass state tracking
* \{ */
void VKCommandBuffer::validate_framebuffer_not_exists()
{
BLI_assert_msg(state.framebuffer_ == nullptr && state.framebuffer_active_ == false,
"State error: expected no framebuffer being tracked.");
}
void VKCommandBuffer::validate_framebuffer_exists()
{
BLI_assert_msg(state.framebuffer_, "State error: expected framebuffer being tracked.");
}
void VKCommandBuffer::ensure_no_active_framebuffer()
{
state.checks_++;
if (state.framebuffer_ && state.framebuffer_active_) {
vkCmdEndRenderPass(vk_command_buffer_);
state.framebuffer_active_ = false;
state.switches_++;
}
}
void VKCommandBuffer::ensure_active_framebuffer()
{
BLI_assert(state.framebuffer_);
state.checks_++;
if (!state.framebuffer_active_) {
VkRenderPassBeginInfo render_pass_begin_info = {};
render_pass_begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_pass_begin_info.renderPass = state.framebuffer_->vk_render_pass_get();
render_pass_begin_info.framebuffer = state.framebuffer_->vk_framebuffer_get();
render_pass_begin_info.renderArea = state.framebuffer_->vk_render_area_get();
/* We don't use clear ops, but vulkan wants to have at least one. */
VkClearValue clear_value = {};
render_pass_begin_info.clearValueCount = 1;
render_pass_begin_info.pClearValues = &clear_value;
vkCmdBeginRenderPass(vk_command_buffer_, &render_pass_begin_info, VK_SUBPASS_CONTENTS_INLINE);
state.framebuffer_active_ = true;
state.switches_++;
}
}
/** \} */
} // namespace blender::gpu

View File

@ -16,21 +16,116 @@ namespace blender::gpu {
class VKBuffer;
class VKDescriptorSet;
class VKFrameBuffer;
class VKIndexBuffer;
class VKPipeline;
class VKPushConstants;
class VKTexture;
class VKVertexBuffer;
/** Command buffer to keep track of the life-time of a command buffer. */
class VKCommandBuffer : NonCopyable, NonMovable {
/** None owning handle to the command buffer and device. Handle is owned by `GHOST_ContextVK`. */
/** Not owning handle to the command buffer and device. Handle is owned by `GHOST_ContextVK`. */
VkDevice vk_device_ = VK_NULL_HANDLE;
VkCommandBuffer vk_command_buffer_ = VK_NULL_HANDLE;
VkQueue vk_queue_ = VK_NULL_HANDLE;
/**
* Timeout to use when waiting for fences in nanoseconds.
*
* Currently added as the fence will halt when there are no commands in the command buffer for
* the second time. This should be solved and this timeout should be removed.
*/
static constexpr uint64_t FenceTimeout = UINT64_MAX;
/** Owning handles */
VkFence vk_fence_ = VK_NULL_HANDLE;
VKSubmissionID submission_id_;
private:
enum class Stage {
Initial,
Recording,
BetweenRecordingAndSubmitting,
Submitted,
Executed,
};
/*
* Some vulkan command require an active frame buffer. Others require no active frame-buffer. As
* our current API does not provide a solution for this we need to keep track of the actual state
* and do the changes when recording the next command.
*
* This is a temporary solution to get things rolling.
* TODO: In a future solution we should decide the scope of a command buffer.
*
* - command buffer per draw command.
* - minimize command buffers and track render passes.
* - add custom encoder to also track resource usages.
*
* Currently I expect the custom encoder has to be done eventually. But want to keep post-poning
* the custom encoder for now to collect more use cases it should solve. (first pixel drawn on
* screen).
*
* Some command can also be encoded in another way when encoded as a first command. For example
* clearing a framebuffer textures isn't allowed inside a render pass, but clearing the
* framebuffer textures via ops is allowed. When clearing a framebuffer texture directly after
* beginning a render pass could be re-encoded to do this in the same command.
*
* So for now we track the state and temporary switch to another state if the command requires
* it.
*/
struct {
/* Reference to the last_framebuffer where begin_render_pass was called for. */
const VKFrameBuffer *framebuffer_ = nullptr;
/* Is last_framebuffer_ currently bound. Each call should ensure the correct state. */
bool framebuffer_active_ = false;
/* Amount of times a check has been requested. */
uint64_t checks_ = 0;
/* Amount of times a check required to change the render pass. */
uint64_t switches_ = 0;
/* Number of times a vkDraw command has been recorded. */
uint64_t draw_counts = 0;
/**
* Current stage of the command buffer to keep track of inconsistencies & incorrect usage.
*/
Stage stage = Stage::Initial;
} state;
bool is_in_stage(Stage stage)
{
return state.stage == stage;
}
void stage_set(Stage stage)
{
state.stage = stage;
}
std::string to_string(Stage stage)
{
switch (stage) {
case Stage::Initial:
return "INITIAL";
case Stage::Recording:
return "RECORDING";
case Stage::BetweenRecordingAndSubmitting:
return "BEFORE_SUBMIT";
case Stage::Submitted:
return "SUBMITTED";
case Stage::Executed:
return "EXECUTED";
}
return "UNKNOWN";
}
void stage_transfer(Stage stage_from, Stage stage_to)
{
BLI_assert(is_in_stage(stage_from));
#if 0
printf(" *** Transfer stage from %s to %s\n",
to_string(stage_from).c_str(),
to_string(stage_to).c_str());
#endif
stage_set(stage_to);
}
public:
virtual ~VKCommandBuffer();
void init(const VkDevice vk_device, const VkQueue vk_queue, VkCommandBuffer vk_command_buffer);
@ -40,6 +135,12 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void bind(const VKDescriptorSet &descriptor_set,
const VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint bind_point);
void bind(const uint32_t binding,
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset);
/* Bind the given buffer as a vertex buffer. */
void bind(const uint32_t binding, const VkBuffer &vk_vertex_buffer, const VkDeviceSize offset);
void begin_render_pass(const VKFrameBuffer &framebuffer);
void end_render_pass(const VKFrameBuffer &framebuffer);
@ -55,6 +156,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
/** Copy the contents of a texture MIP level to the dst buffer. */
void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKBuffer &src_buffer, Span<VkBufferImageCopy> regions);
void blit(VKTexture &dst_texture, VKTexture &src_texture, Span<VkImageBlit> regions);
void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages);
void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers);
@ -72,6 +174,10 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void clear(Span<VkClearAttachment> attachments, Span<VkClearRect> areas);
void fill(VKBuffer &buffer, uint32_t data);
void draw(int v_first, int v_count, int i_first, int i_count);
void draw(
int index_count, int instance_count, int first_index, int vertex_offset, int first_instance);
/**
* Stop recording commands, encode + send the recordings to Vulkan, wait for the until the
* commands have been executed and start the command buffer to accept recordings again.
@ -86,6 +192,30 @@ class VKCommandBuffer : NonCopyable, NonMovable {
private:
void encode_recorded_commands();
void submit_encoded_commands();
/**
* Validate that there isn't a framebuffer being tracked (bound or not bound).
*
* Raises an assert in debug when a framebuffer is being tracked.
*/
void validate_framebuffer_not_exists();
/**
* Validate that there is a framebuffer being tracked (bound or not bound).
*
* Raises an assert in debug when no framebuffer is being tracked.
*/
void validate_framebuffer_exists();
/**
* Ensure that there is no framebuffer being tracked or the tracked framebuffer isn't bound.
*/
void ensure_no_active_framebuffer();
/**
* Ensure that the tracked framebuffer is bound.
*/
void ensure_active_framebuffer();
};
} // namespace blender::gpu

View File

@ -203,17 +203,22 @@ class VKPushConstants : VKResourceTracker<VKUniformBuffer> {
const T *input_data)
{
const Layout::PushConstant *push_constant_layout = layout_->find(location);
BLI_assert(push_constant_layout);
if (push_constant_layout == nullptr) {
/* Legacy code can still try to update push constants when they don't exist. For example
* `immDrawPixelsTexSetup` will bind an image slot manually. This works in OpenGL, but in
* vulkan images aren't stored as push constants. */
return;
}
uint8_t *bytes = static_cast<uint8_t *>(data_);
T *dst = static_cast<T *>(static_cast<void *>(&bytes[push_constant_layout->offset]));
const bool is_tightly_std140_packed = (comp_len % 4) == 0;
if (layout_->storage_type_get() == StorageType::PUSH_CONSTANTS || array_size == 0 ||
is_tightly_std140_packed) {
BLI_assert_msg(push_constant_layout->offset + comp_len * array_size * sizeof(T) <=
layout_->size_in_bytes(),
push_constant_layout->array_size == 0 || is_tightly_std140_packed) {
const size_t copy_size_in_bytes = comp_len * max_ii(array_size, 1) * sizeof(T);
BLI_assert_msg(push_constant_layout->offset + copy_size_in_bytes <= layout_->size_in_bytes(),
"Tried to write outside the push constant allocated memory.");
memcpy(dst, input_data, comp_len * array_size * sizeof(T));
memcpy(dst, input_data, copy_size_in_bytes);
is_dirty_ = true;
return;
}

View File

@ -984,12 +984,14 @@ void VKShader::unbind()
void VKShader::uniform_float(int location, int comp_len, int array_size, const float *data)
{
pipeline_get().push_constants_get().push_constant_set(location, comp_len, array_size, data);
VKPushConstants &push_constants = pipeline_get().push_constants_get();
push_constants.push_constant_set(location, comp_len, array_size, data);
}
void VKShader::uniform_int(int location, int comp_len, int array_size, const int *data)
{
pipeline_get().push_constants_get().push_constant_set(location, comp_len, array_size, data);
VKPushConstants &push_constants = pipeline_get().push_constants_get();
push_constants.push_constant_set(location, comp_len, array_size, data);
}
std::string VKShader::resources_declare(const shader::ShaderCreateInfo &info) const
@ -1183,6 +1185,7 @@ std::string VKShader::geometry_layout_declare(const shader::ShaderCreateInfo &in
}
ss << "\n";
location = 0;
for (const StageInterfaceInfo *iface : info.geometry_out_interfaces_) {
bool has_matching_input_iface = find_interface_by_name(info.vertex_out_interfaces_,
iface->instance_name) != nullptr;

View File

@ -17,7 +17,7 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
using namespace blender::gpu::shader;
attr_len_ = 0;
attr_len_ = info.vertex_inputs_.size();
uniform_len_ = info.push_constants_.size();
ssbo_len_ = 0;
ubo_len_ = 0;
@ -58,7 +58,7 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
/* Make sure that the image slots don't overlap with the sampler slots. */
image_offset_++;
int32_t input_tot_len = ubo_len_ + uniform_len_ + ssbo_len_;
int32_t input_tot_len = attr_len_ + ubo_len_ + uniform_len_ + ssbo_len_;
inputs_ = static_cast<ShaderInput *>(
MEM_calloc_arrayN(input_tot_len, sizeof(ShaderInput), __func__));
ShaderInput *input = inputs_;
@ -66,6 +66,20 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
name_buffer_ = (char *)MEM_mallocN(names_size, "name_buffer");
uint32_t name_buffer_offset = 0;
/* Attributes */
for (const ShaderCreateInfo::VertIn &attr : info.vertex_inputs_) {
copy_input_name(input, attr.name, name_buffer_, name_buffer_offset);
input->location = input->binding = attr.index;
if (input->location != -1) {
enabled_attr_mask_ |= (1 << input->location);
/* Used in `GPU_shader_get_attribute_info`. */
attr_types_[input->location] = uint8_t(attr.type);
}
input++;
}
/* Uniform blocks */
for (const ShaderCreateInfo::Resource &res : all_resources) {
if (res.bind_type == ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER) {
@ -131,7 +145,9 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
}
/* Determine the descriptor set locations after the inputs have been sorted. */
descriptor_set_locations_ = Array<VKDescriptorSet::Location>(input_tot_len);
/* Note: input_tot_len is sometimes more than we need. */
const uint32_t resources_len = input_tot_len;
descriptor_set_locations_ = Array<VKDescriptorSet::Location>(resources_len);
uint32_t descriptor_set_location = 0;
for (ShaderCreateInfo::Resource &res : all_resources) {
const ShaderInput *input = shader_input_get(res);

View File

@ -7,6 +7,8 @@
#include "vk_uniform_buffer.hh"
#include "vk_context.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
namespace blender::gpu {
@ -22,13 +24,42 @@ void VKUniformBuffer::update(const void *data)
void VKUniformBuffer::allocate(VKContext &context)
{
buffer_.create(context, size_in_bytes_, GPU_USAGE_STATIC, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
debug::object_label(&context, buffer_.vk_handle(), name_);
}
void VKUniformBuffer::clear_to_zero() {}
void VKUniformBuffer::clear_to_zero()
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
buffer_.clear(context, 0);
}
void VKUniformBuffer::bind(int /*slot*/) {}
void VKUniformBuffer::bind(int slot, shader::ShaderCreateInfo::Resource::BindType bind_type)
{
VKContext &context = *VKContext::get();
if (!buffer_.is_allocated()) {
allocate(context);
}
void VKUniformBuffer::bind_as_ssbo(int /*slot*/) {}
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
const VKDescriptorSet::Location location = shader_interface.descriptor_set_location(bind_type,
slot);
VKDescriptorSetTracker &descriptor_set = shader->pipeline_get().descriptor_set_get();
descriptor_set.bind(*this, location);
}
void VKUniformBuffer::bind(int slot)
{
bind(slot, shader::ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER);
}
void VKUniformBuffer::bind_as_ssbo(int slot)
{
bind(slot, shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER);
}
void VKUniformBuffer::unbind() {}

View File

@ -39,6 +39,7 @@ class VKUniformBuffer : public UniformBuf, NonCopyable {
private:
void allocate(VKContext &context);
void bind(int slot, shader::ShaderCreateInfo::Resource::BindType bind_type);
};
} // namespace blender::gpu