Vulkan: Convert VertexBuffer to Contain Supported Attributes #107733
|
@ -882,4 +882,56 @@ void convert_device_to_host(void *dst_buffer,
|
|||
|
||||
/* \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Vertex Attributes
|
||||
* \{ */
|
||||
|
||||
static bool conversion_needed(const GPUVertAttr &vertex_attribute)
|
||||
{
|
||||
return (vertex_attribute.fetch_mode == GPU_FETCH_INT_TO_FLOAT &&
|
||||
ELEM(vertex_attribute.comp_type, GPU_COMP_I32, GPU_COMP_U32));
|
||||
}
|
||||
|
||||
bool conversion_needed(const GPUVertFormat &vertex_format)
|
||||
{
|
||||
for (int attr_index : IndexRange(vertex_format.attr_len)) {
|
||||
const GPUVertAttr &vert_attr = vertex_format.attrs[attr_index];
|
||||
if (conversion_needed(vert_attr)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void convert_in_place(void *data, const GPUVertFormat &vertex_format, const uint vertex_len)
|
||||
{
|
||||
BLI_assert(vertex_format.deinterleaved == false);
|
||||
for (int attr_index : IndexRange(vertex_format.attr_len)) {
|
||||
const GPUVertAttr &vert_attr = vertex_format.attrs[attr_index];
|
||||
if (!conversion_needed(vert_attr)) {
|
||||
continue;
|
||||
}
|
||||
void *row_data = static_cast<uint8_t *>(data) + vert_attr.offset;
|
||||
for (int vert_index = 0; vert_index < vertex_len; vert_index++) {
|
||||
if (vert_attr.comp_type == GPU_COMP_I32) {
|
||||
for (int component : IndexRange(vert_attr.comp_len)) {
|
||||
int32_t *component_in = static_cast<int32_t *>(row_data) + component;
|
||||
float *component_out = static_cast<float *>(row_data) + component;
|
||||
*component_out = float(*component_in);
|
||||
}
|
||||
}
|
||||
else if (vert_attr.comp_type == GPU_COMP_U32) {
|
||||
for (int component : IndexRange(vert_attr.comp_len)) {
|
||||
uint32_t *component_in = static_cast<uint32_t *>(row_data) + component;
|
||||
float *component_out = static_cast<float *>(row_data) + component;
|
||||
*component_out = float(*component_in);
|
||||
}
|
||||
}
|
||||
row_data = static_cast<uint8_t *>(row_data) + vertex_format.stride;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* \} */
|
||||
|
||||
} // namespace blender::gpu
|
||||
|
|
|
@ -72,4 +72,28 @@ void convert_device_to_host(void *dst_buffer,
|
|||
eGPUDataFormat host_format,
|
||||
eGPUTextureFormat device_format);
|
||||
|
||||
/**
|
||||
* Are all attributes of the given vertex format natively supported or does conversion needs to
|
||||
* happen.
|
||||
*
|
||||
* \param vertex_format: the vertex format to check if an associated buffer requires conversion
|
||||
* being done on the host.
|
||||
*/
|
||||
bool conversion_needed(const GPUVertFormat &vertex_format);
|
||||
|
||||
/**
|
||||
* Convert the given `data` to contain Vulkan natively supported data formats.
|
||||
*
|
||||
* When for an vertex attribute the fetch mode is set to GPU_FETCH_INT_TO_FLOAT and the attribute
|
||||
* is an int32_t or uint32_t the conversion will be done. Attributes of 16 or 8 bits are supported
|
||||
* natively and will be done in Vulkan.
|
||||
*
|
||||
* \param data: Buffer to convert. Data will be converted in place.
|
||||
* \param vertex_format: Vertex format of the given data. Attributes that aren't supported will be
|
||||
* converted to a supported one.
|
||||
* \param vertex_len: Number of vertices of the given data buffer;
|
||||
* The number of vertices to convert.
|
||||
*/
|
||||
void convert_in_place(void *data, const GPUVertFormat &vertex_format, const uint vertex_len);
|
||||
|
||||
}; // namespace blender::gpu
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
#include "MEM_guardedalloc.h"
|
||||
|
||||
#include "vk_data_conversion.hh"
|
||||
#include "vk_shader.hh"
|
||||
#include "vk_shader_interface.hh"
|
||||
#include "vk_vertex_buffer.hh"
|
||||
|
@ -64,13 +65,51 @@ void VKVertexBuffer::release_data()
|
|||
MEM_SAFE_FREE(data);
|
||||
}
|
||||
|
||||
void VKVertexBuffer::upload_data() {}
|
||||
static bool inplace_conversion_supported(const GPUUsageType &usage)
|
||||
{
|
||||
return ELEM(usage, GPU_USAGE_STATIC, GPU_USAGE_STREAM);
|
||||
}
|
||||
|
||||
void *VKVertexBuffer::convert() const
|
||||
{
|
||||
void *out_data = data;
|
||||
if (!inplace_conversion_supported(usage_)) {
|
||||
out_data = MEM_dupallocN(out_data);
|
||||
}
|
||||
BLI_assert(format.deinterleaved);
|
||||
convert_in_place(out_data, format, vertex_len);
|
||||
return out_data;
|
||||
}
|
||||
|
||||
void VKVertexBuffer::upload_data()
|
||||
{
|
||||
if (!buffer_.is_allocated()) {
|
||||
allocate();
|
||||
}
|
||||
|
||||
if (flag &= GPU_VERTBUF_DATA_DIRTY) {
|
||||
void *data_to_upload = data;
|
||||
if (conversion_needed(format)) {
|
||||
data_to_upload = convert();
|
||||
}
|
||||
buffer_.update(data_to_upload);
|
||||
if (data_to_upload != data) {
|
||||
MEM_SAFE_FREE(data_to_upload);
|
||||
}
|
||||
if (usage_ == GPU_USAGE_STATIC) {
|
||||
MEM_SAFE_FREE(data);
|
||||
}
|
||||
|
||||
flag &= ~GPU_VERTBUF_DATA_DIRTY;
|
||||
flag |= GPU_VERTBUF_DATA_UPLOADED;
|
||||
}
|
||||
}
|
||||
|
||||
void VKVertexBuffer::duplicate_data(VertBuf * /*dst*/) {}
|
||||
|
||||
void VKVertexBuffer::allocate()
|
||||
{
|
||||
buffer_.create(size_used_get(),
|
||||
buffer_.create(size_alloc_get(),
|
||||
usage_,
|
||||
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
|
||||
|
|
|
@ -40,6 +40,12 @@ class VKVertexBuffer : public VertBuf {
|
|||
|
||||
private:
|
||||
void allocate();
|
||||
void *convert() const;
|
||||
};
|
||||
|
||||
static inline VKVertexBuffer *unwrap(VertBuf *vertex_buffer)
|
||||
{
|
||||
return static_cast<VKVertexBuffer *>(vertex_buffer);
|
||||
}
|
||||
|
||||
} // namespace blender::gpu
|
||||
|
|
Loading…
Reference in New Issue