This repository has been archived on 2023-10-09. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
blender-archive/source/blender/gpu/metal/mtl_uniform_buffer.mm
Jason Fielder 359d98423e Metal: Fix memory leaks.
Fix a number of small memory leaks in the Metal backend. Unreleased blit
shader objects and temporary textures addressed. Static memory manager
modified to defer creation until use. Added reference count tracker to
shared memory manager across contexts, such that cached memory allocations
will be released if all contexts are destroyed and re-initialized.

Authored by Apple: Michael Parkin-White

Ref T96261

Reviewed By: fclem
Differential Revision: https://developer.blender.org/D16415
2022-12-08 23:08:57 +01:00

181 lines
4.3 KiB
C++

/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup gpu
*/
#include "BKE_global.h"
#include "BLI_string.h"
#include "gpu_backend.hh"
#include "gpu_context_private.hh"
#include "mtl_backend.hh"
#include "mtl_context.hh"
#include "mtl_debug.hh"
#include "mtl_uniform_buffer.hh"
namespace blender::gpu {
MTLUniformBuf::MTLUniformBuf(size_t size, const char *name) : UniformBuf(size, name)
{
}
MTLUniformBuf::~MTLUniformBuf()
{
if (metal_buffer_ != nullptr) {
metal_buffer_->free();
metal_buffer_ = nullptr;
}
has_data_ = false;
/* Ensure UBO is not bound to active CTX.
* UBO bindings are reset upon Context-switch so we do not need
* to check deactivated context's. */
MTLContext *ctx = MTLContext::get();
if (ctx) {
for (int i = 0; i < MTL_MAX_UNIFORM_BUFFER_BINDINGS; i++) {
MTLUniformBufferBinding &slot = ctx->pipeline_state.ubo_bindings[i];
if (slot.bound && slot.ubo == this) {
slot.bound = false;
slot.ubo = nullptr;
}
}
}
}
void MTLUniformBuf::update(const void *data)
{
BLI_assert(this);
BLI_assert(size_in_bytes_ > 0);
/* Free existing allocation.
* The previous UBO resource will be tracked by the memory manager,
* in case dependent GPU work is still executing. */
if (metal_buffer_ != nullptr) {
metal_buffer_->free();
metal_buffer_ = nullptr;
}
/* Allocate MTL buffer */
MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
BLI_assert(ctx);
BLI_assert(ctx->device);
UNUSED_VARS_NDEBUG(ctx);
if (data != nullptr) {
metal_buffer_ = MTLContext::get_global_memory_manager()->allocate_with_data(
size_in_bytes_, true, data);
has_data_ = true;
metal_buffer_->set_label(@"Uniform Buffer");
BLI_assert(metal_buffer_ != nullptr);
BLI_assert(metal_buffer_->get_metal_buffer() != nil);
}
else {
/* If data is not yet present, no buffer will be allocated and MTLContext will use an empty
* null buffer, containing zeroes, if the UBO is bound. */
metal_buffer_ = nullptr;
has_data_ = false;
}
}
void MTLUniformBuf::clear_to_zero()
{
/* TODO(fclem): Avoid another allocation and just do the clear on the GPU if possible. */
void *clear_data = calloc(1, size_in_bytes_);
this->update(clear_data);
free(clear_data);
}
void MTLUniformBuf::bind(int slot)
{
if (slot < 0) {
MTL_LOG_WARNING("Failed to bind UBO %p. uniform location %d invalid.\n", this, slot);
return;
}
BLI_assert(slot < MTL_MAX_UNIFORM_BUFFER_BINDINGS);
/* Bind current UBO to active context. */
MTLContext *ctx = MTLContext::get();
BLI_assert(ctx);
MTLUniformBufferBinding &ctx_ubo_bind_slot = ctx->pipeline_state.ubo_bindings[slot];
ctx_ubo_bind_slot.ubo = this;
ctx_ubo_bind_slot.bound = true;
bind_slot_ = slot;
bound_ctx_ = ctx;
/* Check if we have any deferred data to upload. */
if (data_ != nullptr) {
this->update(data_);
MEM_SAFE_FREE(data_);
}
/* Ensure there is at least an empty dummy buffer. */
if (metal_buffer_ == nullptr) {
this->update(nullptr);
}
}
void MTLUniformBuf::bind_as_ssbo(int slot)
{
if (slot < 0) {
MTL_LOG_WARNING("Failed to bind UBO %p as SSBO. uniform location %d invalid.\n", this, slot);
return;
}
BLI_assert_msg(0, "Not implemented yet");
}
void MTLUniformBuf::unbind()
{
/* Unbind in debug mode to validate missing binds.
* Otherwise, only perform a full unbind upon destruction
* to ensure no lingering references. */
#ifndef NDEBUG
if (true) {
#else
if (G.debug & G_DEBUG_GPU) {
#endif
if (bound_ctx_ != nullptr && bind_slot_ > -1) {
MTLUniformBufferBinding &ctx_ubo_bind_slot =
bound_ctx_->pipeline_state.ubo_bindings[bind_slot_];
if (ctx_ubo_bind_slot.bound && ctx_ubo_bind_slot.ubo == this) {
ctx_ubo_bind_slot.bound = false;
ctx_ubo_bind_slot.ubo = nullptr;
}
}
}
/* Reset bind index. */
bind_slot_ = -1;
bound_ctx_ = nullptr;
}
id<MTLBuffer> MTLUniformBuf::get_metal_buffer(int *r_offset)
{
BLI_assert(this);
*r_offset = 0;
if (metal_buffer_ != nullptr && has_data_) {
*r_offset = 0;
metal_buffer_->debug_ensure_used();
return metal_buffer_->get_metal_buffer();
}
else {
*r_offset = 0;
return nil;
}
}
int MTLUniformBuf::get_size()
{
BLI_assert(this);
return size_in_bytes_;
}
} // blender::gpu