Metal: Resolve runtime issues with texture views #107167

Merged
Jeroen Bakker merged 1 commits from Jason-Fielder/blender:MetalEEVEENext_TextureViewCompatibility into main 2023-05-01 09:14:06 +02:00
9 changed files with 20 additions and 16 deletions

View File

@ -703,7 +703,7 @@ void GPU_texture_free(GPUTexture *texture);
* TODO(fclem): Target conversion (ex: Texture 2D as Texture 2D Array) is not implemented yet.
*/
GPUTexture *GPU_texture_create_view(const char *name,
const GPUTexture *source_texture,
GPUTexture *source_texture,
eGPUTextureFormat view_format,
int mip_start,
int mip_len,

View File

@ -132,7 +132,7 @@ bool Texture::init_buffer(GPUVertBuf *vbo, eGPUTextureFormat format)
return this->init_internal(vbo);
}
bool Texture::init_view(const GPUTexture *src_,
bool Texture::init_view(GPUTexture *src_,
eGPUTextureFormat format,
eGPUTextureType type,
int mip_start,
@ -448,7 +448,7 @@ GPUTexture *GPU_texture_create_error(int dimension, bool is_array)
}
GPUTexture *GPU_texture_create_view(const char *name,
const GPUTexture *src,
GPUTexture *src,
eGPUTextureFormat format,
int mip_start,
int mip_len,

View File

@ -132,7 +132,7 @@ class Texture {
bool init_3D(int w, int h, int d, int mip_len, eGPUTextureFormat format);
bool init_cubemap(int w, int layers, int mip_len, eGPUTextureFormat format);
bool init_buffer(GPUVertBuf *vbo, eGPUTextureFormat format);
bool init_view(const GPUTexture *src,
bool init_view(GPUTexture *src,
eGPUTextureFormat format,
eGPUTextureType type,
int mip_start,
@ -313,7 +313,7 @@ class Texture {
protected:
virtual bool init_internal() = 0;
virtual bool init_internal(GPUVertBuf *vbo) = 0;
virtual bool init_internal(const GPUTexture *src, int mip_offset, int layer_offset) = 0;
virtual bool init_internal(GPUTexture *src, int mip_offset, int layer_offset) = 0;
};
/* Syntactic sugar. */

View File

@ -181,7 +181,6 @@ class MTLTexture : public Texture {
bool is_baked_ = false;
MTLTextureDescriptor *texture_descriptor_ = nullptr;
id<MTLTexture> texture_ = nil;
MTLTextureUsage usage_;
/* Texture Storage. */
id<MTLBuffer> texture_buffer_ = nil;
@ -284,7 +283,7 @@ class MTLTexture : public Texture {
protected:
bool init_internal() override;
bool init_internal(GPUVertBuf *vbo) override;
bool init_internal(const GPUTexture *src,
bool init_internal(GPUTexture *src,
int mip_offset,
int layer_offset) override; /* Texture View */

View File

@ -159,7 +159,7 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
}
int range_len = min_ii((mip_texture_max_level_ - mip_texture_base_level_) + 1,
texture_.mipmapLevelCount);
texture_.mipmapLevelCount - mip_texture_base_level_);
BLI_assert(range_len > 0);
BLI_assert(mip_texture_base_level_ < texture_.mipmapLevelCount);
BLI_assert(mip_texture_base_layer_ < num_slices);
@ -1152,7 +1152,7 @@ void gpu::MTLTexture::ensure_mipmaps(int miplvl)
void gpu::MTLTexture::generate_mipmap()
{
/* Fetch Active Context. */
MTLContext *ctx = reinterpret_cast<MTLContext *>(GPU_context_active_get());
MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
BLI_assert(ctx);
if (!ctx->device) {
@ -1265,7 +1265,7 @@ void gpu::MTLTexture::clear(eGPUDataFormat data_format, const void *data)
/* Create clear framebuffer. */
GPUFrameBuffer *prev_fb = GPU_framebuffer_active_get();
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(this->get_blit_framebuffer(0, 0));
FrameBuffer *fb = unwrap(this->get_blit_framebuffer(0, 0));
fb->bind(true);
fb->clear_attachment(this->attachment_type(0), data_format, data);
GPU_framebuffer_bind(prev_fb);
@ -1805,7 +1805,7 @@ bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
return true;
}
bool gpu::MTLTexture::init_internal(const GPUTexture *src, int mip_offset, int layer_offset)
bool gpu::MTLTexture::init_internal(GPUTexture *src, int mip_offset, int layer_offset)
{
BLI_assert(src);
@ -1817,12 +1817,17 @@ bool gpu::MTLTexture::init_internal(const GPUTexture *src, int mip_offset, int l
source_texture_ = src;
mip_texture_base_level_ = mip_offset;
mip_texture_base_layer_ = layer_offset;
texture_view_dirty_flags_ |= TEXTURE_VIEW_MIP_DIRTY;
/* Assign usage. */
gpu_image_usage_flags_ = GPU_texture_usage(src);
BLI_assert_msg(
Review

How bad is it to have incorrect usage flags. This might also fail for devs/users not familiar with the Metal backend and they are then not able to continue their work. Would rather see an CLog error/warning depending on the actual priority to fix the issue. Or guarded by --debug-gpu.

How bad is it to have incorrect usage flags. This might also fail for devs/users not familiar with the Metal backend and they are then not able to continue their work. Would rather see an CLog error/warning depending on the actual priority to fix the issue. Or guarded by --debug-gpu.

Yeah, its likely not important for non-metal platforms right now, but there was a discussion about moving the checks outside of the GPU backend, as this may also help the Vulkan bring-up.

However, we can guard it behind debug-gpu for now perhaps?
The purpose of this was mostly for aiding bring-up, to ensure certain features were using required flags up front, though hopefully the debug-gpu flag can still be used for this.

Yeah, its likely not important for non-metal platforms right now, but there was a discussion about moving the checks outside of the GPU backend, as this may also help the Vulkan bring-up. However, we can guard it behind debug-gpu for now perhaps? The purpose of this was mostly for aiding bring-up, to ensure certain features were using required flags up front, though hopefully the debug-gpu flag can still be used for this.
Review

I think for now it is better to assert all the time. This way it is easier for users/developers to detect any issue so we can fix it. Hiding it behind the --debug-gpu flag has some other cons:

  • might crash when we ask users to provide more information about a different issue.
I think for now it is better to assert all the time. This way it is easier for users/developers to detect any issue so we can fix it. Hiding it behind the --debug-gpu flag has some other cons: - might crash when we ask users to provide more information about a different issue.
gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
"Source texture of TextureView must have GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW usage flag.");
/* Assign texture as view. */
const gpu::MTLTexture *mtltex = static_cast<const gpu::MTLTexture *>(unwrap(src));
gpu::MTLTexture *mtltex = static_cast<gpu::MTLTexture *>(unwrap(src));
mtltex->ensure_baked();
texture_ = mtltex->texture_;
BLI_assert(texture_);
[texture_ retain];

View File

@ -178,7 +178,7 @@ bool GLTexture::init_internal(GPUVertBuf *vbo)
return true;
}
bool GLTexture::init_internal(const GPUTexture *src, int mip_offset, int layer_offset)
bool GLTexture::init_internal(GPUTexture *src, int mip_offset, int layer_offset)
{
BLI_assert(GLContext::texture_storage_support);

View File

@ -117,7 +117,7 @@ class GLTexture : public Texture {
/** Return true on success. */
bool init_internal(GPUVertBuf *vbo) override;
/** Return true on success. */
bool init_internal(const GPUTexture *src, int mip_offset, int layer_offset) override;
bool init_internal(GPUTexture *src, int mip_offset, int layer_offset) override;
private:
bool proxy_check(int mip);

View File

@ -155,7 +155,7 @@ bool VKTexture::init_internal(GPUVertBuf * /*vbo*/)
return false;
}
bool VKTexture::init_internal(const GPUTexture * /*src*/, int /*mip_offset*/, int /*layer_offset*/)
bool VKTexture::init_internal(GPUTexture * /*src*/, int /*mip_offset*/, int /*layer_offset*/)
{
return false;
}

View File

@ -61,7 +61,7 @@ class VKTexture : public Texture {
protected:
bool init_internal() override;
bool init_internal(GPUVertBuf *vbo) override;
bool init_internal(const GPUTexture *src, int mip_offset, int layer_offset) override;
bool init_internal(GPUTexture *src, int mip_offset, int layer_offset) override;
private:
/** Is this texture already allocated on device. */