Vulkan: Clearing Framebuffer + Scissors #106044

Merged
Jeroen Bakker merged 49 commits from Jeroen-Bakker/blender:vulkan-framebuffer-clear into main 2023-03-28 11:51:45 +02:00
5 changed files with 1066 additions and 204 deletions
Showing only changes of commit bc1aa48ae9 - Show all commits

View File

@ -759,7 +759,8 @@ inline size_t to_bytesize(eGPUTextureFormat tex_format, eGPUDataFormat data_form
}
/* Definitely not complete, edit according to the gl specification. */
inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
constexpr inline bool validate_data_format(eGPUTextureFormat tex_format,
eGPUDataFormat data_format)
{
switch (tex_format) {
/* Formats texture & render-buffer */

View File

@ -3,10 +3,13 @@
#include "MEM_guardedalloc.h"
#include "BLI_math_vector.hh"
#include "BLI_vector.hh"
#include "GPU_context.h"
#include "GPU_texture.h"
#include "gpu_texture_private.hh"
namespace blender::gpu::tests {
static void test_texture_read()
@ -46,196 +49,772 @@ static void test_texture_read()
}
GPU_TEST(texture_read)
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing 32F
* \{ */
static float *generate_test_data_float(size_t data_len)
template<typename DataType> static DataType *generate_test_data(size_t data_len)
{
float *data = static_cast<float *>(MEM_mallocN(data_len * sizeof(float), __func__));
DataType *data = static_cast<DataType *>(MEM_mallocN(data_len * sizeof(DataType), __func__));
for (int i : IndexRange(data_len)) {
data[i] = 8.0 / max_ff(i % 8, 0.5f);
data[i] = (DataType)(i % 8);
}
return data;
}
template<eGPUTextureFormat DeviceFormat, int ComponentLen, int Size = 256>
static void texture_create_upload_read_float()
template<eGPUTextureFormat DeviceFormat,
eGPUDataFormat HostFormat,
typename DataType,
int Size = 16>
static void texture_create_upload_read()
{
size_t data_len = Size * Size * ComponentLen;
float *data = generate_test_data_float(data_len);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d("texture", Size, Size, 1, DeviceFormat, usage, data);
EXPECT_NE(texture, nullptr);
float *read_data = (float *)GPU_texture_read(texture, GPU_DATA_FLOAT, 0);
for (int i : IndexRange(data_len)) {
EXPECT_EQ(read_data[i], data[i]);
}
MEM_freeN(read_data);
GPU_texture_free(texture);
MEM_freeN(data);
}
static void test_texture_roundtrip_FLOAT_RGBA32F()
{
texture_create_upload_read_float<GPU_RGBA32F, 4>();
}
GPU_TEST(texture_roundtrip_FLOAT_RGBA32F)
#if 0
/* Isn't supported natively on NVidia/Vulkan. */
static void test_texture_roundtrip_FLOAT_RGBA32F()
{
texture_create_upload_read_float<GPU_RGBA32F, 4>();
}
GPU_TEST(texture_roundtrip_FLOAT_RGBA32F)
#endif
static void test_texture_roundtrip_FLOAT_RG32F()
{
texture_create_upload_read_float<GPU_RG32F, 2>();
}
GPU_TEST(texture_roundtrip_FLOAT_RG32F)
static void test_texture_roundtrip_FLOAT_R32F()
{
texture_create_upload_read_float<GPU_R32F, 1>();
}
GPU_TEST(texture_roundtrip_FLOAT_R32F)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing 32UI
* \{ */
static uint32_t *generate_test_data_uint(size_t data_len)
{
uint32_t *data = static_cast<uint32_t *>(MEM_mallocN(data_len * sizeof(uint32_t), __func__));
for (int i : IndexRange(data_len)) {
data[i] = 8 / max_ii(i % 8, 1);
}
return data;
}
template<eGPUTextureFormat DeviceFormat, int ComponentLen, int Size = 256>
static void texture_create_upload_read_uint()
{
static_assert(validate_data_format(DeviceFormat, HostFormat));
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d(
"texture", Size, Size, 1, DeviceFormat, usage, nullptr);
EXPECT_NE(texture, nullptr);
size_t data_len = Size * Size * ComponentLen;
uint32_t *data = generate_test_data_uint(data_len);
GPU_texture_update(texture, GPU_DATA_UINT, data);
size_t data_len = Size * Size * to_component_len(DeviceFormat);
DataType *data = static_cast<DataType *>(generate_test_data<DataType>(data_len));
GPU_texture_update(texture, HostFormat, data);
uint32_t *read_data = (uint32_t *)GPU_texture_read(texture, GPU_DATA_UINT, 0);
DataType *read_data = static_cast<DataType *>(GPU_texture_read(texture, HostFormat, 0));
for (int i : IndexRange(data_len)) {
EXPECT_EQ(read_data[i], data[i]);
}
MEM_freeN(read_data);
MEM_freeN(data);
GPU_texture_free(texture);
MEM_freeN(data);
}
static void test_texture_roundtrip_UINT_RGBA32UI()
{
texture_create_upload_read_uint<GPU_RGBA32UI, 4>();
}
GPU_TEST(texture_roundtrip_UINT_RGBA32UI)
#if 0
/* Isn't supported natively on NVidia/Vulkan. */
static void test_texture_roundtrip_UINT_RGB32UI()
{
texture_create_upload_read_uint<GPU_RGB32UI, 3>();
}
GPU_TEST(texture_roundtrip_UINT_RGB32UI)
#endif
static void test_texture_roundtrip_UINT_RG32UI()
{
texture_create_upload_read_uint<GPU_RG32UI, 2>();
}
GPU_TEST(texture_roundtrip_UINT_RG32UI)
static void test_texture_roundtrip_UINT_R32UI()
{
texture_create_upload_read_uint<GPU_R32UI, 1>();
}
GPU_TEST(texture_roundtrip_UINT_R32UI)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing 32I
/** \name Roundtrip testing GPU_DATA_FLOAT
* \{ */
static int32_t *generate_test_data_int(size_t data_len)
#if 1
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8()
{
int32_t *data = static_cast<int32_t *>(MEM_mallocN(data_len * sizeof(int32_t), __func__));
for (int i : IndexRange(data_len)) {
data[i] = 8 / max_ii(i % 8, 1);
}
return data;
texture_create_upload_read<GPU_RGBA8, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8);
template<eGPUTextureFormat DeviceFormat, int ComponentLen, int Size = 256>
static void texture_create_upload_read_int()
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16F()
{
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d(
"texture", Size, Size, 1, DeviceFormat, usage, nullptr);
EXPECT_NE(texture, nullptr);
size_t data_len = Size * Size * ComponentLen;
int32_t *data = generate_test_data_int(data_len);
GPU_texture_update(texture, GPU_DATA_INT, data);
uint32_t *read_data = (uint32_t *)GPU_texture_read(texture, GPU_DATA_INT, 0);
for (int i : IndexRange(data_len)) {
EXPECT_EQ(read_data[i], data[i]);
}
MEM_freeN(read_data);
GPU_texture_free(texture);
MEM_freeN(data);
texture_create_upload_read<GPU_RGBA16F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16F);
static void test_texture_roundtrip_INT_RGBA32I()
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16()
{
texture_create_upload_read_int<GPU_RGBA32I, 4>();
texture_create_upload_read<GPU_RGBA16, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip_INT_RGBA32I)
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16);
#if 0
/* Isn't supported natively on NVidia/Vulkan. */
static void test_texture_roundtrip_INT_RGB32I()
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA32F()
{
texture_create_upload_read_int<GPU_RGB32I, 3>();
texture_create_upload_read<GPU_RGBA32F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip_INT_RGB32I)
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA32F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG8()
{
texture_create_upload_read<GPU_RG8, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16F()
{
texture_create_upload_read<GPU_RG16F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16()
{
texture_create_upload_read<GPU_RG16, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG32F()
{
texture_create_upload_read<GPU_RG32F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG32F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R8()
{
texture_create_upload_read<GPU_R8, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R16F()
{
texture_create_upload_read<GPU_R16F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R16F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R16()
{
texture_create_upload_read<GPU_R16, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R16);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R32F()
{
texture_create_upload_read<GPU_R32F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R32F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB10_A2()
{
texture_create_upload_read<GPU_RGB10_A2, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB10_A2);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB10_A2UI()
{
texture_create_upload_read<GPU_RGB10_A2UI, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB10_A2UI);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R11F_G11F_B10F()
{
texture_create_upload_read<GPU_R11F_G11F_B10F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R11F_G11F_B10F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8()
{
texture_create_upload_read<GPU_SRGB8_A8, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_SNORM()
{
texture_create_upload_read<GPU_RGBA8_SNORM, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16_SNORM()
{
texture_create_upload_read<GPU_RGBA16_SNORM, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB8()
{
texture_create_upload_read<GPU_RGB8, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB8_SNORM()
{
texture_create_upload_read<GPU_RGB8_SNORM, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB8_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16F()
{
texture_create_upload_read<GPU_RGB16F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16()
{
texture_create_upload_read<GPU_RGB16, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16_SNORM()
{
texture_create_upload_read<GPU_RGB16_SNORM, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB32F()
{
texture_create_upload_read<GPU_RGB32F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB32F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG8_SNORM()
{
texture_create_upload_read<GPU_RG8_SNORM, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG8_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16_SNORM()
{
texture_create_upload_read<GPU_RG16_SNORM, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R8_SNORM()
{
texture_create_upload_read<GPU_R8_SNORM, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R8_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R16_SNORM()
{
texture_create_upload_read<GPU_R16_SNORM, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R16_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT1()
{
texture_create_upload_read<GPU_SRGB8_A8_DXT1, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT1);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT3()
{
texture_create_upload_read<GPU_SRGB8_A8_DXT3, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT3);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT5()
{
texture_create_upload_read<GPU_SRGB8_A8_DXT5, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT5);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT1()
{
texture_create_upload_read<GPU_RGBA8_DXT1, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT1);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT3()
{
texture_create_upload_read<GPU_RGBA8_DXT3, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT3);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT5()
{
texture_create_upload_read<GPU_RGBA8_DXT5, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT5);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8()
{
texture_create_upload_read<GPU_SRGB8, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB9_E5()
{
texture_create_upload_read<GPU_RGB9_E5, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB9_E5);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT32F()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT32F, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT32F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT24()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT24, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT24);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT16()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT16, GPU_DATA_FLOAT, float>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT16);
#endif
/* \} */
static void test_texture_roundtrip_INT_RG32I()
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_HALF_FLOAT
* \{ */
#if 0
static void test_texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RGBA16F()
{
texture_create_upload_read_int<GPU_RG32I, 2>();
texture_create_upload_read<GPU_RGBA16F, GPU_DATA_HALF_FLOAT, half>();
}
GPU_TEST(texture_roundtrip_INT_RG32I)
GPU_TEST(texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RGBA16F);
static void test_texture_roundtrip_INT_R32I()
static void test_texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RG16F()
{
texture_create_upload_read_int<GPU_R32I, 1>();
texture_create_upload_read<GPU_RG16F, GPU_DATA_HALF_FLOAT, half>();
}
GPU_TEST(texture_roundtrip_INT_R32I)
GPU_TEST(texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RG16F);
static void test_texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_R16F()
{
texture_create_upload_read<GPU_R16F, GPU_DATA_HALF_FLOAT, half>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_R16F);
static void test_texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RGB16F()
{
texture_create_upload_read<GPU_RGB16F, GPU_DATA_HALF_FLOAT, half>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RGB16F);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_INT
* \{ */
#if 0
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGBA8I()
{
texture_create_upload_read<GPU_RGBA8I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGBA8I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGBA16I()
{
texture_create_upload_read<GPU_RGBA16I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGBA16I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGBA32I()
{
texture_create_upload_read<GPU_RGBA32I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGBA32I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RG8I()
{
texture_create_upload_read<GPU_RG8I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RG8I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RG16I()
{
texture_create_upload_read<GPU_RG16I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RG16I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RG32I()
{
texture_create_upload_read<GPU_RG32I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RG32I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_R8I()
{
texture_create_upload_read<GPU_R8I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_R8I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_R16I()
{
texture_create_upload_read<GPU_R16I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_R16I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_R32I()
{
texture_create_upload_read<GPU_R32I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_R32I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGB8I()
{
texture_create_upload_read<GPU_RGB8I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGB8I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGB16I()
{
texture_create_upload_read<GPU_RGB16I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGB16I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGB32I()
{
texture_create_upload_read<GPU_RGB32I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGB32I);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_UINT
* \{ */
#if 0
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGBA8UI()
{
texture_create_upload_read<GPU_RGBA8UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGBA8UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGBA16UI()
{
texture_create_upload_read<GPU_RGBA16UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGBA16UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGBA32UI()
{
texture_create_upload_read<GPU_RGBA32UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGBA32UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RG8UI()
{
texture_create_upload_read<GPU_RG8UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RG8UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RG16UI()
{
texture_create_upload_read<GPU_RG16UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RG16UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RG32UI()
{
texture_create_upload_read<GPU_RG32UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RG32UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_R8UI()
{
texture_create_upload_read<GPU_R8UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_R8UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_R16UI()
{
texture_create_upload_read<GPU_R16UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_R16UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_R32UI()
{
texture_create_upload_read<GPU_R32UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_R32UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH32F_STENCIL8()
{
texture_create_upload_read<GPU_DEPTH32F_STENCIL8, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH32F_STENCIL8);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH24_STENCIL8()
{
texture_create_upload_read<GPU_DEPTH24_STENCIL8, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH24_STENCIL8);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGB8UI()
{
texture_create_upload_read<GPU_RGB8UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGB8UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGB16UI()
{
texture_create_upload_read<GPU_RGB16UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGB16UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGB32UI()
{
texture_create_upload_read<GPU_RGB32UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGB32UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT32F()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT32F, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT32F);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT24()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT24, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT24);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT16()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT16, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT16);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_UBYTE
* \{ */
#if 0
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RGBA8UI()
{
texture_create_upload_read<GPU_RGBA8UI, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RGBA8UI);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RGBA8()
{
texture_create_upload_read<GPU_RGBA8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RGBA8);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RG8UI()
{
texture_create_upload_read<GPU_RG8UI, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RG8UI);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RG8()
{
texture_create_upload_read<GPU_RG8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RG8);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_R8UI()
{
texture_create_upload_read<GPU_R8UI, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_R8UI);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_R8()
{
texture_create_upload_read<GPU_R8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_R8);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_SRGB8_A8()
{
texture_create_upload_read<GPU_SRGB8_A8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_SRGB8_A8);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RGB8I()
{
texture_create_upload_read<GPU_RGB8I, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RGB8I);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RGB8()
{
texture_create_upload_read<GPU_RGB8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RGB8);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_SRGB8()
{
texture_create_upload_read<GPU_SRGB8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_SRGB8);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_UINT_24_8
* \{ */
#if 0
static void test_texture_roundtrip__GPU_DATA_UINT_24_8__GPU_DEPTH32F_STENCIL8()
{
texture_create_upload_read<GPU_DEPTH32F_STENCIL8, GPU_DATA_UINT_24_8, void>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT_24_8__GPU_DEPTH32F_STENCIL8);
static void test_texture_roundtrip__GPU_DATA_UINT_24_8__GPU_DEPTH24_STENCIL8()
{
texture_create_upload_read<GPU_DEPTH24_STENCIL8, GPU_DATA_UINT_24_8, void>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT_24_8__GPU_DEPTH24_STENCIL8);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_10_11_11_REV
* \{ */
#if 0
static void test_texture_roundtrip__GPU_DATA_10_11_11_REV__GPU_R11F_G11F_B10F()
{
texture_create_upload_read<GPU_R11F_G11F_B10F, GPU_DATA_10_11_11_REV, void>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_10_11_11_REV__GPU_R11F_G11F_B10F);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_2_10_10_10_REV
* \{ */
#if 0
static void test_texture_roundtrip__GPU_DATA_2_10_10_10_REV__GPU_RGB10_A2()
{
texture_create_upload_read<GPU_RGB10_A2, GPU_DATA_2_10_10_10_REV, void>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_2_10_10_10_REV__GPU_RGB10_A2);
static void test_texture_roundtrip__GPU_DATA_2_10_10_10_REV__GPU_RGB10_A2UI()
{
texture_create_upload_read<GPU_RGB10_A2UI, GPU_DATA_2_10_10_10_REV, void>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_2_10_10_10_REV__GPU_RGB10_A2UI);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Generate test cases.
*
* Next section is kept for convenience to regenerate test cases.
*
* \{ */
#if 0
static std::string to_prim_type_string(eGPUDataFormat host_format)
{
switch (host_format) {
case GPU_DATA_FLOAT:
return std::string("float");
case GPU_DATA_HALF_FLOAT:
return std::string("half");
case GPU_DATA_INT:
return std::string("int32_t");
case GPU_DATA_UINT:
return std::string("uint32_t");
case GPU_DATA_UBYTE:
return std::string("uint8_t");
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
case GPU_DATA_2_10_10_10_REV:
return std::string("void");
}
return std::string("UNKNOWN");
}
static std::string to_string(eGPUDataFormat host_format)
{
switch (host_format) {
case GPU_DATA_FLOAT:
return std::string("GPU_DATA_FLOAT");
case GPU_DATA_HALF_FLOAT:
return std::string("GPU_DATA_HALF_FLOAT");
case GPU_DATA_INT:
return std::string("GPU_DATA_INT");
case GPU_DATA_UINT:
return std::string("GPU_DATA_UINT");
case GPU_DATA_UBYTE:
return std::string("GPU_DATA_UBYTE");
case GPU_DATA_UINT_24_8:
return std::string("GPU_DATA_UINT_24_8");
case GPU_DATA_10_11_11_REV:
return std::string("GPU_DATA_10_11_11_REV");
case GPU_DATA_2_10_10_10_REV:
return std::string("GPU_DATA_2_10_10_10_REV");
}
return std::string("UNKNOWN");
}
static std::string to_string(eGPUTextureFormat texture_format)
{
return std::string("GPU_") + std::string(GPU_texture_format_name(texture_format));
}
TEST(gpu_util, generate_test_cases)
{
Vector<eGPUDataFormat> host_formats;
host_formats.append(GPU_DATA_FLOAT);
host_formats.append(GPU_DATA_HALF_FLOAT);
host_formats.append(GPU_DATA_INT);
host_formats.append(GPU_DATA_UINT);
host_formats.append(GPU_DATA_UBYTE);
host_formats.append(GPU_DATA_UINT_24_8);
host_formats.append(GPU_DATA_10_11_11_REV);
host_formats.append(GPU_DATA_2_10_10_10_REV);
Vector<eGPUTextureFormat> texture_formats;
texture_formats.append(GPU_RGBA8UI);
texture_formats.append(GPU_RGBA8I);
texture_formats.append(GPU_RGBA8);
texture_formats.append(GPU_RGBA16UI);
texture_formats.append(GPU_RGBA16I);
texture_formats.append(GPU_RGBA16F);
texture_formats.append(GPU_RGBA16);
texture_formats.append(GPU_RGBA32UI);
texture_formats.append(GPU_RGBA32I);
texture_formats.append(GPU_RGBA32F);
texture_formats.append(GPU_RG8UI);
texture_formats.append(GPU_RG8I);
texture_formats.append(GPU_RG8);
texture_formats.append(GPU_RG16UI);
texture_formats.append(GPU_RG16I);
texture_formats.append(GPU_RG16F);
texture_formats.append(GPU_RG16);
texture_formats.append(GPU_RG32UI);
texture_formats.append(GPU_RG32I);
texture_formats.append(GPU_RG32F);
texture_formats.append(GPU_R8UI);
texture_formats.append(GPU_R8I);
texture_formats.append(GPU_R8);
texture_formats.append(GPU_R16UI);
texture_formats.append(GPU_R16I);
texture_formats.append(GPU_R16F);
texture_formats.append(GPU_R16);
texture_formats.append(GPU_R32UI);
texture_formats.append(GPU_R32I);
texture_formats.append(GPU_R32F);
texture_formats.append(GPU_RGB10_A2);
texture_formats.append(GPU_RGB10_A2UI);
texture_formats.append(GPU_R11F_G11F_B10F);
texture_formats.append(GPU_DEPTH32F_STENCIL8);
texture_formats.append(GPU_DEPTH24_STENCIL8);
texture_formats.append(GPU_SRGB8_A8);
texture_formats.append(GPU_RGBA8_SNORM);
texture_formats.append(GPU_RGBA16_SNORM);
texture_formats.append(GPU_RGB8UI);
texture_formats.append(GPU_RGB8I);
texture_formats.append(GPU_RGB8);
texture_formats.append(GPU_RGB8_SNORM);
texture_formats.append(GPU_RGB16UI);
texture_formats.append(GPU_RGB16I);
texture_formats.append(GPU_RGB16F);
texture_formats.append(GPU_RGB16);
texture_formats.append(GPU_RGB16_SNORM);
texture_formats.append(GPU_RGB32UI);
texture_formats.append(GPU_RGB32I);
texture_formats.append(GPU_RGB32F);
texture_formats.append(GPU_RG8_SNORM);
texture_formats.append(GPU_RG16_SNORM);
texture_formats.append(GPU_R8_SNORM);
texture_formats.append(GPU_R16_SNORM);
texture_formats.append(GPU_SRGB8_A8_DXT1);
texture_formats.append(GPU_SRGB8_A8_DXT3);
texture_formats.append(GPU_SRGB8_A8_DXT5);
texture_formats.append(GPU_RGBA8_DXT1);
texture_formats.append(GPU_RGBA8_DXT3);
texture_formats.append(GPU_RGBA8_DXT5);
texture_formats.append(GPU_SRGB8);
texture_formats.append(GPU_RGB9_E5);
texture_formats.append(GPU_DEPTH_COMPONENT32F);
texture_formats.append(GPU_DEPTH_COMPONENT24);
texture_formats.append(GPU_DEPTH_COMPONENT16);
for (eGPUDataFormat host_format : host_formats) {
std::cout << "/* -------------------------------------------------------------------- */\n";
std::cout << "/** \\name Roundtrip testing " << to_string(host_format) << "\n";
std::cout << " * \\{ */\n\n";
for (eGPUTextureFormat texture_format : texture_formats) {
if (!validate_data_format(texture_format, host_format)) {
continue;
}
std::cout << "static void test_texture_roundtrip__" << to_string(host_format) << "__"
<< to_string(texture_format) << "()\n";
std::cout << "{\n";
std::cout << " texture_create_upload_read<" << to_string(texture_format) << ", "
<< to_string(host_format) << ", " << to_prim_type_string(host_format)
<< ">();\n";
std::cout << "}\n";
std::cout << "GPU_TEST(texture_roundtrip__" << to_string(host_format) << "__"
<< to_string(texture_format) << ");\n\n";
}
std::cout << "/* \\} */\n\n";
}
}
#endif
/** \} */
} // namespace blender::gpu::tests

View File

@ -23,10 +23,20 @@ static ConversionType type_of_conversion_float(eGPUTextureFormat device_format)
case GPU_RGB16F:
return ConversionType::FLOAT_TO_HALF;
case GPU_RGBA8:
case GPU_RG8:
case GPU_R8:
return ConversionType::FLOAT_TO_UNORM8;
case GPU_RGBA8_SNORM:
case GPU_RGB8_SNORM:
case GPU_RG8_SNORM:
case GPU_R8_SNORM:
return ConversionType::FLOAT_TO_SNORM8;
case GPU_RGB32F: /* GPU_RGB32F Not supported by vendors. */
case GPU_RGBA8UI:
case GPU_RGBA8I:
case GPU_RGBA8:
case GPU_RGBA16UI:
case GPU_RGBA16I:
case GPU_RGBA16:
@ -34,7 +44,6 @@ static ConversionType type_of_conversion_float(eGPUTextureFormat device_format)
case GPU_RGBA32I:
case GPU_RG8UI:
case GPU_RG8I:
case GPU_RG8:
case GPU_RG16UI:
case GPU_RG16I:
case GPU_RG16:
@ -42,7 +51,6 @@ static ConversionType type_of_conversion_float(eGPUTextureFormat device_format)
case GPU_RG32I:
case GPU_R8UI:
case GPU_R8I:
case GPU_R8:
case GPU_R16UI:
case GPU_R16I:
case GPU_R16:
@ -54,21 +62,17 @@ static ConversionType type_of_conversion_float(eGPUTextureFormat device_format)
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
case GPU_SRGB8_A8:
case GPU_RGBA8_SNORM:
case GPU_RGBA16_SNORM:
case GPU_RGB8UI:
case GPU_RGB8I:
case GPU_RGB8:
case GPU_RGB8_SNORM:
case GPU_RGB16UI:
case GPU_RGB16I:
case GPU_RGB16:
case GPU_RGB16_SNORM:
case GPU_RGB32UI:
case GPU_RGB32I:
case GPU_RG8_SNORM:
case GPU_RG16_SNORM:
case GPU_R8_SNORM:
case GPU_R16_SNORM:
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
@ -93,29 +97,33 @@ static ConversionType type_of_conversion_int(eGPUTextureFormat device_format)
case GPU_R32I:
return ConversionType::PASS_THROUGH;
case GPU_RGBA8UI:
case GPU_RGBA16I:
case GPU_RG16I:
case GPU_R16I:
return ConversionType::I32_TO_I16;
case GPU_RGBA8I:
case GPU_RG8I:
case GPU_R8I:
return ConversionType::I32_TO_I8;
case GPU_RGBA8UI:
case GPU_RGBA8:
case GPU_RGBA16UI:
case GPU_RGBA16I:
case GPU_RGBA16F:
case GPU_RGBA16:
case GPU_RGBA32UI:
case GPU_RGBA32F:
case GPU_RG8UI:
case GPU_RG8I:
case GPU_RG8:
case GPU_RG16UI:
case GPU_RG16I:
case GPU_RG16F:
case GPU_RG16:
case GPU_RG32UI:
case GPU_RG32F:
case GPU_RG16:
case GPU_R8UI:
case GPU_R8I:
case GPU_R8:
case GPU_R16UI:
case GPU_R16I:
case GPU_R16F:
case GPU_R16:
case GPU_R32UI:
@ -175,6 +183,10 @@ static ConversionType type_of_conversion_uint(eGPUTextureFormat device_format)
return ConversionType::UI32_TO_UI16;
case GPU_RGBA8UI:
case GPU_RG8UI:
case GPU_R8UI:
return ConversionType::UI32_TO_UI8;
case GPU_RGBA8I:
case GPU_RGBA8:
case GPU_RGBA16I:
@ -182,7 +194,6 @@ static ConversionType type_of_conversion_uint(eGPUTextureFormat device_format)
case GPU_RGBA16:
case GPU_RGBA32I:
case GPU_RGBA32F:
case GPU_RG8UI:
case GPU_RG8I:
case GPU_RG8:
case GPU_RG16I:
@ -190,7 +201,6 @@ static ConversionType type_of_conversion_uint(eGPUTextureFormat device_format)
case GPU_RG16:
case GPU_RG32I:
case GPU_RG32F:
case GPU_R8UI:
case GPU_R8I:
case GPU_R8:
case GPU_R16I:
@ -310,6 +320,81 @@ static ConversionType type_of_conversion_half(eGPUTextureFormat device_format)
return ConversionType::UNSUPPORTED;
}
static ConversionType type_of_conversion_ubyte(eGPUTextureFormat device_format)
{
switch (device_format) {
case GPU_RGBA8UI:
case GPU_RG8UI:
case GPU_R8UI:
return ConversionType::PASS_THROUGH;
case GPU_RGBA8I:
case GPU_RGBA8:
case GPU_RGBA16UI:
case GPU_RGBA16I:
case GPU_RGBA16F:
case GPU_RGBA16:
case GPU_RGBA32UI:
case GPU_RGBA32I:
case GPU_RGBA32F:
case GPU_RG8I:
case GPU_RG8:
case GPU_RG16UI:
case GPU_RG16I:
case GPU_RG16F:
case GPU_RG16:
case GPU_RG32UI:
case GPU_RG32I:
case GPU_RG32F:
case GPU_R8I:
case GPU_R8:
case GPU_R16UI:
case GPU_R16I:
case GPU_R16F:
case GPU_R16:
case GPU_R32UI:
case GPU_R32I:
case GPU_R32F:
case GPU_RGB10_A2:
case GPU_RGB10_A2UI:
case GPU_R11F_G11F_B10F:
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
case GPU_SRGB8_A8:
case GPU_RGBA8_SNORM:
case GPU_RGBA16_SNORM:
case GPU_RGB8UI:
case GPU_RGB8I:
case GPU_RGB8:
case GPU_RGB8_SNORM:
case GPU_RGB16UI:
case GPU_RGB16I:
case GPU_RGB16F:
case GPU_RGB16:
case GPU_RGB16_SNORM:
case GPU_RGB32UI:
case GPU_RGB32I:
case GPU_RGB32F:
case GPU_RG8_SNORM:
case GPU_RG16_SNORM:
case GPU_R8_SNORM:
case GPU_R16_SNORM:
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
case GPU_SRGB8:
case GPU_RGB9_E5:
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
return ConversionType::UNSUPPORTED;
}
return ConversionType::UNSUPPORTED;
}
ConversionType conversion_type_for_update(eGPUDataFormat host_format,
eGPUTextureFormat device_format)
{
@ -324,8 +409,9 @@ ConversionType conversion_type_for_update(eGPUDataFormat host_format,
return type_of_conversion_int(device_format);
case GPU_DATA_HALF_FLOAT:
return type_of_conversion_half(device_format);
case GPU_DATA_UBYTE:
return type_of_conversion_ubyte(device_format);
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
case GPU_DATA_2_10_10_10_REV:
@ -337,24 +423,33 @@ ConversionType conversion_type_for_update(eGPUDataFormat host_format,
static ConversionType invert(ConversionType type)
{
#define CASE_SINGLE(a, b) \
case ConversionType::a##_TO_##b: \
return ConversionType::b##_TO_##a;
#define CASE_PAIR(a, b) \
CASE_SINGLE(a, b) \
CASE_SINGLE(b, a)
switch (type) {
case ConversionType::PASS_THROUGH:
return ConversionType::PASS_THROUGH;
case ConversionType::UI16_TO_UI32:
return ConversionType::UI32_TO_UI16;
case ConversionType::UI32_TO_UI16:
return ConversionType::UI16_TO_UI32;
case ConversionType::FLOAT_TO_HALF:
return ConversionType::HALF_TO_FLOAT;
case ConversionType::HALF_TO_FLOAT:
return ConversionType::FLOAT_TO_HALF;
CASE_PAIR(FLOAT, UNORM8)
CASE_PAIR(FLOAT, SNORM8)
CASE_PAIR(UI32, UI16)
CASE_PAIR(I32, I16)
CASE_PAIR(UI32, UI8)
CASE_PAIR(I32, I8)
CASE_PAIR(FLOAT, HALF)
case ConversionType::UNSUPPORTED:
return ConversionType::UNSUPPORTED;
}
#undef CASE_PAIR
#undef CASE_SINGLE
return ConversionType::UNSUPPORTED;
}
@ -365,15 +460,169 @@ ConversionType conversion_type_for_read(eGPUDataFormat host_format,
}
/* Copy the contents of src to dst with out performing any actual conversion. */
template<typename SourceType, typename DestinationType>
template<typename DestinationType, typename SourceType>
void copy_unchecked(MutableSpan<DestinationType> dst, Span<SourceType> src)
{
BLI_assert(src.size() == dst.size());
for (SourceType index : IndexRange(src.size())) {
for (int64_t index : IndexRange(src.size())) {
dst[index] = src[index];
}
}
template<typename DestinationType, typename SourceType>
void copy_unchecked(void *dst_memory,
const void *src_memory,
eGPUTextureFormat device_format,
size_t sample_len)
{
size_t total_components = to_component_len(device_format) * sample_len;
Span<SourceType> src = Span<SourceType>(static_cast<const SourceType *>(src_memory),
total_components);
MutableSpan<DestinationType> dst = MutableSpan<DestinationType>(
static_cast<DestinationType *>(dst_memory), total_components);
copy_unchecked<DestinationType, SourceType>(dst, src);
}
/* Float <=> unsigned normalized */
static uint8_t clamp_unorm(int32_t unclamped)
{
if (unclamped < 0.0f) {
return 0;
}
if (unclamped > 255.0f) {
return 255;
}
return uint8_t(unclamped);
}
template<typename DestinationType, typename SourceType>
static DestinationType to_unorm(SourceType value)
{
return (clamp_unorm((value * 255.0f)));
}
template<typename DestinationType, typename SourceType>
static DestinationType from_unorm(SourceType value)
{
return DestinationType(value / 255.0f);
}
template<typename DestinationType, typename SourceType>
void float_to_unorm(MutableSpan<DestinationType> dst, Span<SourceType> src)
{
BLI_assert(src.size() == dst.size());
for (int64_t index : IndexRange(src.size())) {
dst[index] = to_unorm<DestinationType, SourceType>(src[index]);
}
}
template<typename DestinationType, typename SourceType>
void float_to_unorm(void *dst_memory,
const void *src_memory,
eGPUTextureFormat device_format,
size_t sample_len)
{
size_t total_components = to_component_len(device_format) * sample_len;
Span<SourceType> src = Span<SourceType>(static_cast<const SourceType *>(src_memory),
total_components);
MutableSpan<DestinationType> dst = MutableSpan<DestinationType>(
static_cast<DestinationType *>(dst_memory), total_components);
float_to_unorm<DestinationType, SourceType>(dst, src);
}
template<typename DestinationType, typename SourceType>
void unorm_to_float(MutableSpan<DestinationType> dst, Span<SourceType> src)
{
BLI_assert(src.size() == dst.size());
for (int64_t index : IndexRange(src.size())) {
dst[index] = from_unorm<DestinationType, SourceType>(src[index]);
}
}
template<typename DestinationType, typename SourceType>
void unorm_to_float(void *dst_memory,
const void *src_memory,
eGPUTextureFormat device_format,
size_t sample_len)
{
size_t total_components = to_component_len(device_format) * sample_len;
Span<SourceType> src = Span<SourceType>(static_cast<const SourceType *>(src_memory),
total_components);
MutableSpan<DestinationType> dst = MutableSpan<DestinationType>(
static_cast<DestinationType *>(dst_memory), total_components);
unorm_to_float<DestinationType, SourceType>(dst, src);
}
/* Float <=> signed normalized */
static int8_t clamp_snorm(int32_t unclamped)
{
if (unclamped < -127) {
return 0;
}
if (unclamped > 128) {
return 128;
}
return int8_t(unclamped);
}
template<typename DestinationType, typename SourceType>
static DestinationType to_snorm(SourceType value)
{
return (clamp_snorm((value * 128.0f)));
}
template<typename DestinationType, typename SourceType>
static DestinationType from_snorm(SourceType value)
{
return DestinationType(value / 128.0f);
}
template<typename DestinationType, typename SourceType>
void float_to_snorm(MutableSpan<DestinationType> dst, Span<SourceType> src)
{
BLI_assert(src.size() == dst.size());
for (int64_t index : IndexRange(src.size())) {
dst[index] = to_snorm<DestinationType, SourceType>(src[index]);
}
}
template<typename DestinationType, typename SourceType>
void float_to_snorm(void *dst_memory,
const void *src_memory,
eGPUTextureFormat device_format,
size_t sample_len)
{
size_t total_components = to_component_len(device_format) * sample_len;
Span<SourceType> src = Span<SourceType>(static_cast<const SourceType *>(src_memory),
total_components);
MutableSpan<DestinationType> dst = MutableSpan<DestinationType>(
static_cast<DestinationType *>(dst_memory), total_components);
float_to_snorm<DestinationType, SourceType>(dst, src);
}
template<typename DestinationType, typename SourceType>
void snorm_to_float(MutableSpan<DestinationType> dst, Span<SourceType> src)
{
BLI_assert(src.size() == dst.size());
for (int64_t index : IndexRange(src.size())) {
dst[index] = from_snorm<DestinationType, SourceType>(src[index]);
}
}
template<typename DestinationType, typename SourceType>
void snorm_to_float(void *dst_memory,
const void *src_memory,
eGPUTextureFormat device_format,
size_t sample_len)
{
size_t total_components = to_component_len(device_format) * sample_len;
Span<SourceType> src = Span<SourceType>(static_cast<const SourceType *>(src_memory),
total_components);
MutableSpan<DestinationType> dst = MutableSpan<DestinationType>(
static_cast<DestinationType *>(dst_memory), total_components);
snorm_to_float<DestinationType, SourceType>(dst, src);
}
void convert(ConversionType type,
eGPUTextureFormat device_format,
size_t sample_len,
@ -388,25 +637,51 @@ void convert(ConversionType type,
memcpy(dst_memory, src_memory, sample_len * to_bytesize(device_format));
return;
case ConversionType::UI16_TO_UI32: {
size_t component_len = to_component_len(device_format) * sample_len;
Span<uint16_t> src = Span<uint16_t>(static_cast<const uint16_t *>(src_memory),
component_len);
MutableSpan<uint32_t> dst = MutableSpan<uint32_t>(static_cast<uint32_t *>(dst_memory),
component_len);
copy_unchecked<uint16_t, uint32_t>(dst, src);
case ConversionType::UI32_TO_UI16:
copy_unchecked<uint16_t, uint32_t>(dst_memory, src_memory, device_format, sample_len);
break;
}
case ConversionType::UI32_TO_UI16: {
size_t component_len = to_component_len(device_format) * sample_len;
Span<uint32_t> src = Span<uint32_t>(static_cast<const uint32_t *>(src_memory),
component_len);
MutableSpan<uint16_t> dst = MutableSpan<uint16_t>(static_cast<uint16_t *>(dst_memory),
component_len);
copy_unchecked<uint32_t, uint16_t>(dst, src);
case ConversionType::UI16_TO_UI32:
copy_unchecked<uint32_t, uint16_t>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::UI32_TO_UI8:
copy_unchecked<uint8_t, uint32_t>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::UI8_TO_UI32:
copy_unchecked<uint32_t, uint8_t>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::I32_TO_I16:
copy_unchecked<int16_t, int32_t>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::I16_TO_I32:
copy_unchecked<int32_t, int16_t>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::I32_TO_I8:
copy_unchecked<int8_t, int32_t>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::I8_TO_I32:
copy_unchecked<int32_t, int8_t>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::FLOAT_TO_UNORM8:
float_to_unorm<uint8_t, float>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::UNORM8_TO_FLOAT:
unorm_to_float<float, uint8_t>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::FLOAT_TO_SNORM8:
float_to_snorm<int8_t, float>(dst_memory, src_memory, device_format, sample_len);
break;
case ConversionType::SNORM8_TO_FLOAT:
snorm_to_float<float, int8_t>(dst_memory, src_memory, device_format, sample_len);
break;
}
case ConversionType::FLOAT_TO_HALF:
case ConversionType::HALF_TO_FLOAT:

View File

@ -16,17 +16,23 @@ enum class ConversionType {
/** No conversion needed, result can be directly read back to host memory. */
PASS_THROUGH,
UI16_TO_UI32,
UI32_TO_UI16,
FLOAT_TO_UNORM8,
UNORM8_TO_FLOAT,
/*
FLOAT_TO_SNORM8,
SNORM8_TO_FLOAT,
UI32_TO_UI16,
UI16_TO_UI32,
UI32_TO_UI8,
UI8_TO_UI32,
I32_TO_I16,
I16_TO_I32,
I32_TO_I8,
I8_TO_I32,
UI8_TO_I32,
UI8_TO_FLOAT,
UI8_TO_UBYTE,
*/
/** Convert device 16F to floats. */
HALF_TO_FLOAT,

View File

@ -149,7 +149,7 @@ void *VKTexture::read(int mip, eGPUDataFormat format)
}
void VKTexture::update_sub(
int mip, int /*offset*/[3], int extent[3], eGPUDataFormat format, const void *data)
int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data)
{
if (!is_allocated()) {
allocate();
@ -173,6 +173,9 @@ void VKTexture::update_sub(
region.imageExtent.width = extent[0];
region.imageExtent.height = extent[1];
region.imageExtent.depth = extent[2];
region.imageOffset.x = offset[0];
region.imageOffset.y = offset[1];
region.imageOffset.z = offset[2];
region.imageSubresource.aspectMask = to_vk_image_aspect_flag_bits(format_);
region.imageSubresource.mipLevel = mip;
region.imageSubresource.layerCount = 1;
@ -180,8 +183,6 @@ void VKTexture::update_sub(
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(*this, staging_buffer, Span<VkBufferImageCopy>(&region, 1));
command_buffer.submit();
/* TODO: add support for offset. */
}
void VKTexture::update_sub(int /*offset*/[3],
@ -244,7 +245,7 @@ bool VKTexture::allocate()
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
VkResult result;
if (G.debug &= G_DEBUG_GPU) {
if (G.debug & G_DEBUG_GPU) {
VkImageFormatProperties image_format = {};
result = vkGetPhysicalDeviceImageFormatProperties(context.physical_device_get(),
image_info.format,