Cleanup: spelling in comments
This commit is contained in:
@@ -93,7 +93,7 @@ class GHOST_ContextCGL : public GHOST_Context {
|
||||
|
||||
/**
|
||||
* Returns a texture that Metal code can use as a render target. The current
|
||||
* contents of this texture will be composited on top of the framebuffer
|
||||
* contents of this texture will be composited on top of the frame-buffer
|
||||
* each time `swapBuffers` is called.
|
||||
*/
|
||||
id<MTLTexture> metalOverlayTexture();
|
||||
@@ -134,7 +134,7 @@ class GHOST_ContextCGL : public GHOST_Context {
|
||||
/** The virtualized default frame-buffer. */
|
||||
unsigned int m_defaultFramebuffer;
|
||||
|
||||
/** The virtualized default framebuffer's texture */
|
||||
/** The virtualized default frame-buffer's texture. */
|
||||
/**
|
||||
* Texture that you can render into with Metal. The texture will be
|
||||
* composited on top of `m_defaultFramebufferMetalTexture` whenever
|
||||
|
@@ -57,7 +57,7 @@ GHOST_ContextCGL::GHOST_ContextCGL(bool stereoVisual,
|
||||
m_defaultFramebuffer(0),
|
||||
m_debug(false)
|
||||
{
|
||||
/* Init Metal Swapchain. */
|
||||
/* Initialize Metal Swap-chain. */
|
||||
current_swapchain_index = 0;
|
||||
for (int i = 0; i < METAL_SWAPCHAIN_SIZE; i++) {
|
||||
m_defaultFramebufferMetalTexture[i].texture = nil;
|
||||
@@ -94,7 +94,7 @@ GHOST_ContextCGL::GHOST_ContextCGL(bool stereoVisual,
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialise swapinterval. */
|
||||
/* Initialize swap-interval. */
|
||||
mtl_SwapInterval = 60;
|
||||
}
|
||||
|
||||
@@ -302,7 +302,7 @@ GHOST_TSuccess GHOST_ContextCGL::updateDrawingContext()
|
||||
|
||||
id<MTLTexture> GHOST_ContextCGL::metalOverlayTexture()
|
||||
{
|
||||
/* Increment Swapchain - Only needed if context is requesting a new texture */
|
||||
/* Increment Swap-chain - Only needed if context is requesting a new texture */
|
||||
current_swapchain_index = (current_swapchain_index + 1) % METAL_SWAPCHAIN_SIZE;
|
||||
|
||||
/* Ensure backing texture is ready for current swapchain index */
|
||||
@@ -409,7 +409,7 @@ GHOST_TSuccess GHOST_ContextCGL::initializeDrawingContext()
|
||||
pixelFormat = [[NSOpenGLPixelFormat alloc] initWithAttributes:&attribs[0]];
|
||||
if (pixelFormat == nil) {
|
||||
/* If pixel format creation fails when testing increased sampler limit,
|
||||
* attempt intialisation again with feature disabled, otherwise, fail. */
|
||||
* attempt initialization again with feature disabled, otherwise, fail. */
|
||||
if (increasedSamplerLimit) {
|
||||
increasedSamplerLimit = false;
|
||||
continue;
|
||||
@@ -594,7 +594,7 @@ void GHOST_ContextCGL::metalInit()
|
||||
}
|
||||
|
||||
/* Create a render pipeline to composite things rendered with Metal on top
|
||||
* of the framebuffer contents. Uses the same vertex and fragment shader
|
||||
* of the frame-buffer contents. Uses the same vertex and fragment shader
|
||||
* as the blit above, but with alpha blending enabled. */
|
||||
desc.label = @"Metal Overlay";
|
||||
desc.colorAttachments[0].blendingEnabled = YES;
|
||||
|
@@ -258,8 +258,8 @@ class GHOST_Window : public GHOST_IWindow {
|
||||
GHOST_Context *getContext();
|
||||
|
||||
/**
|
||||
* Gets the OpenGL framebuffer associated with the window's contents.
|
||||
* \return The ID of an OpenGL framebuffer object.
|
||||
* Gets the OpenGL frame-buffer associated with the window's contents.
|
||||
* \return The ID of an OpenGL frame-buffer object.
|
||||
*/
|
||||
virtual unsigned int getDefaultFramebuffer() override;
|
||||
|
||||
|
@@ -139,8 +139,8 @@ void radixsort(std::vector<T> &data, std::vector<T> &data2, KeyGetter getKey)
|
||||
|
||||
static void float_add_atomic(float *val, float add)
|
||||
{
|
||||
/* Hacky, but atomic floats are only supported from C++20 onwards.
|
||||
* This works in practise since std::atomic<uint32_t> is really just an uint32_t in memory,
|
||||
/* Hacky, but atomic floats are only supported from C++20 onward.
|
||||
* This works in practice since `std::atomic<uint32_t>` is really just an `uint32_t` in memory,
|
||||
* so this cast lets us do a 32-bit CAS operation (which is used to build the atomic float
|
||||
* operation) without needing any external libraries or compiler-specific builtins. */
|
||||
std::atomic<uint32_t> *atomic_val = reinterpret_cast<std::atomic<uint32_t> *>(val);
|
||||
|
@@ -458,7 +458,7 @@ int BLI_string_search_query(StringSearch *search, const char *query, void ***r_d
|
||||
if (score == found_scores[0] && !query_str.is_empty()) {
|
||||
/* Sort items with best score by length. Shorter items are more likely the ones you are
|
||||
* looking for. This also ensures that exact matches will be at the top, even if the query is
|
||||
* a substring of another item. */
|
||||
* a sub-string of another item. */
|
||||
std::sort(indices.begin(), indices.end(), [&](int a, int b) {
|
||||
return search->items[a].length < search->items[b].length;
|
||||
});
|
||||
|
@@ -138,8 +138,8 @@ bool RNANodeQuery::contains(const char *prop_identifier, const char *rna_path_co
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If substr != prop_identifier, it means that the substring is found further in prop_identifier,
|
||||
* and that thus index -1 is a valid memory location. */
|
||||
/* If `substr != prop_identifier`, it means that the sub-string is found further in
|
||||
* `prop_identifier`, and that thus index -1 is a valid memory location. */
|
||||
const bool start_ok = substr == prop_identifier || substr[-1] == '.';
|
||||
if (!start_ok) {
|
||||
return false;
|
||||
|
@@ -197,16 +197,16 @@ static void color_mul_hsl_v3(uchar ch[3], float h_factor, float s_factor, float
|
||||
* \{ */
|
||||
|
||||
/**
|
||||
* - in: roundbox codes for corner types and radius
|
||||
* - return: array of `[size][2][x, y]` points, the edges of the roundbox, + UV coords
|
||||
* - in: `roundbox` codes for corner types and radius
|
||||
* - return: array of `[size][2][x, y]` points, the edges of the `roundbox`, + UV coords
|
||||
*
|
||||
* - draw black box with alpha 0 on exact button boundbox
|
||||
* - for every AA step:
|
||||
* - Draw black box with alpha 0 on exact button bounding-box.
|
||||
* - For every AA step:
|
||||
* - draw the inner part for a round filled box, with color blend codes or texture coords
|
||||
* - draw outline in outline color
|
||||
* - draw outer part, bottom half, extruded 1 pixel to bottom, for emboss shadow
|
||||
* - draw extra decorations
|
||||
* - draw background color box with alpha 1 on exact button boundbox
|
||||
* - Draw background color box with alpha 1 on exact button bounding-box.
|
||||
*/
|
||||
|
||||
/* fill this struct with polygon info to draw AA'ed */
|
||||
@@ -693,7 +693,7 @@ static void round_box__edges(
|
||||
{
|
||||
float vec[WIDGET_CURVE_RESOLU][2], veci[WIDGET_CURVE_RESOLU][2];
|
||||
const float minx = rect->xmin, miny = rect->ymin, maxx = rect->xmax, maxy = rect->ymax;
|
||||
const float minxi = minx + U.pixelsize; /* boundbox inner */
|
||||
const float minxi = minx + U.pixelsize; /* Bounding-box inner. */
|
||||
const float maxxi = maxx - U.pixelsize;
|
||||
const float minyi = miny + U.pixelsize;
|
||||
const float maxyi = maxy - U.pixelsize;
|
||||
|
@@ -227,7 +227,7 @@ struct FileList {
|
||||
* because those are no more persistent
|
||||
* (only generated on demand, and freed as soon as possible).
|
||||
* Persistent part (mere list of paths + stat info)
|
||||
* is kept as small as possible, and filebrowser-agnostic.
|
||||
* is kept as small as possible, and file-browser agnostic.
|
||||
*/
|
||||
GHash *selection_state;
|
||||
|
||||
|
@@ -23,7 +23,9 @@
|
||||
|
||||
#include "file_intern.h"
|
||||
|
||||
/* ----------------- FOLDERLIST (previous/next) -------------- */
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name FOLDERLIST (previous/next)
|
||||
* \{ */
|
||||
|
||||
typedef struct FolderList {
|
||||
struct FolderList *next, *prev;
|
||||
@@ -47,7 +49,7 @@ void folderlist_popdir(struct ListBase *folderlist, char *dir)
|
||||
BLI_strncpy(dir, prev_dir, FILE_MAXDIR);
|
||||
}
|
||||
}
|
||||
/* delete the folder next or use setdir directly before PREVIOUS OP */
|
||||
/* Delete the folder next or use set-directory directly before PREVIOUS OP. */
|
||||
}
|
||||
|
||||
void folderlist_pushdir(ListBase *folderlist, const char *dir)
|
||||
@@ -129,7 +131,11 @@ static ListBase folderlist_duplicate(ListBase *folderlist)
|
||||
return folderlistn;
|
||||
}
|
||||
|
||||
/* ----------------- Folder-History (wraps/owns file list above) -------------- */
|
||||
/** \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Folder-History (wraps/owns file list above)
|
||||
* \{ */
|
||||
|
||||
static FileFolderHistory *folder_history_find(const SpaceFile *sfile, eFileBrowse_Mode browse_mode)
|
||||
{
|
||||
@@ -189,3 +195,5 @@ ListBase folder_history_list_duplicate(ListBase *listbase)
|
||||
|
||||
return histories;
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
@@ -245,7 +245,7 @@ class GPUCodegen {
|
||||
ListBase ubo_inputs_ = {nullptr, nullptr};
|
||||
GPUInput *cryptomatte_input_ = nullptr;
|
||||
|
||||
/** Cache paramters for complexity heuristic. */
|
||||
/** Cache parameters for complexity heuristic. */
|
||||
uint nodes_total_ = 0;
|
||||
uint textures_total_ = 0;
|
||||
uint uniforms_total_ = 0;
|
||||
@@ -459,7 +459,7 @@ void GPUCodegen::generate_library()
|
||||
GPUCodegenCreateInfo &info = *create_info;
|
||||
|
||||
void *value;
|
||||
/* Iterate over libraries. We need to keep this struct intact incase
|
||||
/* Iterate over libraries. We need to keep this struct intact in case
|
||||
* it is required for the optimization an pass. */
|
||||
GHashIterator *ihash = BLI_ghashIterator_new((GHash *)graph.used_libraries);
|
||||
while (!BLI_ghashIterator_done(ihash)) {
|
||||
@@ -694,7 +694,7 @@ GPUPass *GPU_generate_pass(GPUMaterial *material,
|
||||
|
||||
/** Cache lookup: Reuse shaders already compiled.
|
||||
* NOTE: We only perform cache look-up for non-optimized shader
|
||||
* graphs, as baked constant data amongst other optimizations will generate too many
|
||||
* graphs, as baked constant data among other optimizations will generate too many
|
||||
* shader source permutations, with minimal re-usability. */
|
||||
pass_hash = gpu_pass_cache_lookup(codegen.hash_get());
|
||||
|
||||
@@ -750,7 +750,7 @@ GPUPass *GPU_generate_pass(GPUMaterial *material,
|
||||
pass->compiled = false;
|
||||
/* Only flag pass optimization hint if this is the first generated pass for a material.
|
||||
* Optimized passes cannot be optimized further, even if the heuristic is still not
|
||||
* favourable. */
|
||||
* favorable. */
|
||||
pass->should_optimize = (!optimize_graph) && codegen.should_optimize_heuristic();
|
||||
|
||||
codegen.create_info = nullptr;
|
||||
|
@@ -45,13 +45,13 @@
|
||||
#define MAX_COLOR_BAND 128
|
||||
#define MAX_GPU_SKIES 8
|
||||
|
||||
/** Whether the optimized variant of the GPUPass should be created asynchronously.
|
||||
/** Whether the optimized variant of the #GPUPass should be created asynchronously.
|
||||
* Usage of this depends on whether there are possible threading challenges of doing so.
|
||||
* Currently, the overhead of GPU_generate_pass is relatively small in comparison to shader
|
||||
* compilation, though this option exists in case any potential scenarios for material graph
|
||||
* optimization cause a slow down on the main thread.
|
||||
*
|
||||
* NOTE: The actual shader program for the optimized pass will alwaysbe compiled asynchronously,
|
||||
* NOTE: The actual shader program for the optimized pass will always be compiled asynchronously,
|
||||
* this flag controls whether shader node graph source serialization happens on the compilation
|
||||
* worker thread. */
|
||||
#define ASYNC_OPTIMIZED_PASS_CREATION 0
|
||||
@@ -67,8 +67,8 @@ typedef struct GPUSkyBuilder {
|
||||
} GPUSkyBuilder;
|
||||
|
||||
struct GPUMaterial {
|
||||
/* Contains GPUShader and source code for deferred compilation.
|
||||
* Can be shared between similar material (i.e: sharing same nodetree topology). */
|
||||
/* Contains #GPUShader and source code for deferred compilation.
|
||||
* Can be shared between similar material (i.e: sharing same node-tree topology). */
|
||||
GPUPass *pass;
|
||||
/* Optimized GPUPass, situationally compiled after initial pass for optimal realtime performance.
|
||||
* This shader variant bakes dynamic uniform data as constant. This variant will not use
|
||||
|
@@ -577,7 +577,7 @@ class MTLContext : public Context {
|
||||
friend class MTLRenderPassState;
|
||||
|
||||
public:
|
||||
/* Swapchain and latency management. */
|
||||
/* Swap-chain and latency management. */
|
||||
static std::atomic<int> max_drawables_in_flight;
|
||||
static std::atomic<int64_t> avg_drawable_latency_us;
|
||||
static int64_t frame_latency[MTL_FRAME_AVERAGE_COUNT];
|
||||
@@ -591,7 +591,7 @@ class MTLContext : public Context {
|
||||
id<MTLDevice> device = nil;
|
||||
|
||||
#ifndef NDEBUG
|
||||
/* Label for Context debug name assignemnt. */
|
||||
/* Label for Context debug name assignment. */
|
||||
NSString *label = nil;
|
||||
#endif
|
||||
|
||||
@@ -606,7 +606,7 @@ class MTLContext : public Context {
|
||||
/* Parent Context. */
|
||||
GHOST_ContextCGL *ghost_context_;
|
||||
|
||||
/* Render Passes and Framebuffers. */
|
||||
/* Render Passes and Frame-buffers. */
|
||||
id<MTLTexture> default_fbo_mtltexture_ = nil;
|
||||
gpu::MTLTexture *default_fbo_gputexture_ = nullptr;
|
||||
|
||||
@@ -637,7 +637,7 @@ class MTLContext : public Context {
|
||||
gpu::MTLBuffer *visibility_buffer_ = nullptr;
|
||||
bool visibility_is_dirty_ = false;
|
||||
|
||||
/* Null buffers for empty/unintialized bindings.
|
||||
/* Null buffers for empty/uninitialized bindings.
|
||||
* Null attribute buffer follows default attribute format of OpenGL Backend. */
|
||||
id<MTLBuffer> null_buffer_; /* All zero's. */
|
||||
id<MTLBuffer> null_attribute_buffer_; /* Value float4(0.0,0.0,0.0,1.0). */
|
||||
@@ -783,7 +783,7 @@ class MTLContext : public Context {
|
||||
return MTLContext::global_memory_manager;
|
||||
}
|
||||
|
||||
/* Swapchain and latency management. */
|
||||
/* Swap-chain and latency management. */
|
||||
static void latency_resolve_average(int64_t frame_latency_us)
|
||||
{
|
||||
int64_t avg = 0;
|
||||
|
@@ -37,7 +37,7 @@ namespace blender::gpu {
|
||||
/* Global memory manager. */
|
||||
MTLBufferPool MTLContext::global_memory_manager;
|
||||
|
||||
/* Swapchain and latency management. */
|
||||
/* Swap-chain and latency management. */
|
||||
std::atomic<int> MTLContext::max_drawables_in_flight = 0;
|
||||
std::atomic<int64_t> MTLContext::avg_drawable_latency_us = 0;
|
||||
int64_t MTLContext::frame_latency[MTL_FRAME_AVERAGE_COUNT] = {0};
|
||||
@@ -92,7 +92,7 @@ void MTLContext::set_ghost_context(GHOST_ContextHandle ghostCtxHandle)
|
||||
default_fbo_gputexture_ = new gpu::MTLTexture(
|
||||
"MTL_BACKBUFFER", GPU_RGBA16F, GPU_TEXTURE_2D, default_fbo_mtltexture_);
|
||||
|
||||
/* Update framebuffers with new texture attachments */
|
||||
/* Update frame-buffers with new texture attachments. */
|
||||
mtl_front_left->add_color_attachment(default_fbo_gputexture_, 0, 0, 0);
|
||||
mtl_back_left->add_color_attachment(default_fbo_gputexture_, 0, 0, 0);
|
||||
#ifndef NDEBUG
|
||||
@@ -147,7 +147,7 @@ MTLContext::MTLContext(void *ghost_window, void *ghost_context)
|
||||
/* Init debug. */
|
||||
debug::mtl_debug_init();
|
||||
|
||||
/* Initialise Renderpass and Framebuffer State */
|
||||
/* Initialize Render-pass and Frame-buffer State. */
|
||||
this->back_left = nullptr;
|
||||
|
||||
/* Initialize command buffer state. */
|
||||
@@ -164,7 +164,7 @@ MTLContext::MTLContext(void *ghost_window, void *ghost_context)
|
||||
null_buffer_ = nil;
|
||||
null_attribute_buffer_ = nil;
|
||||
|
||||
/* Zero-initialise MTL Textures. */
|
||||
/* Zero-initialize MTL textures. */
|
||||
default_fbo_mtltexture_ = nil;
|
||||
default_fbo_gputexture_ = nullptr;
|
||||
|
||||
@@ -821,7 +821,7 @@ bool MTLContext::ensure_render_pipeline_state(MTLPrimitiveType mtl_prim_type)
|
||||
|
||||
/* Matrix Bindings. */
|
||||
/* This is now called upon shader bind. We may need to re-evaluate this though,
|
||||
* as was done here to ensure uniform changes beween draws were tracked.
|
||||
* as was done here to ensure uniform changes between draws were tracked.
|
||||
* NOTE(Metal): We may be able to remove this. */
|
||||
GPU_matrix_bind(reinterpret_cast<struct GPUShader *>(
|
||||
static_cast<Shader *>(this->pipeline_state.active_shader)));
|
||||
@@ -869,7 +869,7 @@ bool MTLContext::ensure_render_pipeline_state(MTLPrimitiveType mtl_prim_type)
|
||||
scissor.width = this->pipeline_state.scissor_width;
|
||||
scissor.height = this->pipeline_state.scissor_height;
|
||||
|
||||
/* Some scissor assignments exceed the bounds of the viewport due to implictly added
|
||||
/* Some scissor assignments exceed the bounds of the viewport due to implicitly added
|
||||
* padding to the width/height - Clamp width/height. */
|
||||
BLI_assert(scissor.x >= 0 && scissor.x < render_fb->get_width());
|
||||
BLI_assert(scissor.y >= 0 && scissor.y < render_fb->get_height());
|
||||
@@ -899,7 +899,7 @@ bool MTLContext::ensure_render_pipeline_state(MTLPrimitiveType mtl_prim_type)
|
||||
|
||||
/* State: Face winding. */
|
||||
if (this->pipeline_state.dirty_flags & MTL_PIPELINE_STATE_FRONT_FACING_FLAG) {
|
||||
/* We nede to invert the face winding in Metal, to account for the inverted-Y coordinate
|
||||
/* We need to invert the face winding in Metal, to account for the inverted-Y coordinate
|
||||
* system. */
|
||||
MTLWinding winding = (this->pipeline_state.front_face == GPU_CLOCKWISE) ?
|
||||
MTLWindingClockwise :
|
||||
@@ -909,7 +909,7 @@ bool MTLContext::ensure_render_pipeline_state(MTLPrimitiveType mtl_prim_type)
|
||||
~MTL_PIPELINE_STATE_FRONT_FACING_FLAG);
|
||||
}
|
||||
|
||||
/* State: cullmode. */
|
||||
/* State: cull-mode. */
|
||||
if (this->pipeline_state.dirty_flags & MTL_PIPELINE_STATE_CULLMODE_FLAG) {
|
||||
|
||||
MTLCullMode mode = MTLCullModeNone;
|
||||
@@ -960,7 +960,7 @@ bool MTLContext::ensure_uniform_buffer_bindings(
|
||||
const MTLShaderUniformBlock &push_constant_block = shader_interface->get_push_constant_block();
|
||||
if (push_constant_block.size > 0) {
|
||||
|
||||
/* Fetch uniform buffer base binding index from pipeline_state_instance - Terhe buffer index
|
||||
/* Fetch uniform buffer base binding index from pipeline_state_instance - There buffer index
|
||||
* will be offset by the number of bound VBOs. */
|
||||
uint32_t block_size = push_constant_block.size;
|
||||
uint32_t buffer_index = pipeline_state_instance->base_uniform_buffer_index +
|
||||
@@ -1267,10 +1267,10 @@ void MTLContext::ensure_texture_bindings(
|
||||
|
||||
/* Generate or Fetch argument buffer sampler configuration.
|
||||
* NOTE(Metal): we need to base sampler counts off of the maximal texture
|
||||
* index. This is not the most optimal, but in practise, not a use-case
|
||||
* index. This is not the most optimal, but in practice, not a use-case
|
||||
* when argument buffers are required.
|
||||
* This is because with explicit texture indices, the binding indices
|
||||
* should match across draws, to allow the high-level to optimise bindpoints. */
|
||||
* should match across draws, to allow the high-level to optimize bind-points. */
|
||||
gpu::MTLBuffer *encoder_buffer = nullptr;
|
||||
this->samplers_.num_samplers = shader_interface->get_max_texture_index() + 1;
|
||||
|
||||
@@ -1624,7 +1624,7 @@ id<MTLSamplerState> MTLContext::get_default_sampler_state()
|
||||
/** \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Swapchain management and Metal presentation.
|
||||
/** \name Swap-chain management and Metal presentation.
|
||||
* \{ */
|
||||
|
||||
void present(MTLRenderPassDescriptor *blit_descriptor,
|
||||
@@ -1636,7 +1636,7 @@ void present(MTLRenderPassDescriptor *blit_descriptor,
|
||||
MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
|
||||
BLI_assert(ctx);
|
||||
|
||||
/* Flush any oustanding work. */
|
||||
/* Flush any outstanding work. */
|
||||
ctx->flush();
|
||||
|
||||
/* Always pace CPU to maximum of 3 drawables in flight.
|
||||
@@ -1660,8 +1660,8 @@ void present(MTLRenderPassDescriptor *blit_descriptor,
|
||||
PIL_sleep_ms(2);
|
||||
}
|
||||
|
||||
/* Present is submitted in its own CMD Buffer to enusure drawable reference released as early as
|
||||
* possible. This command buffer is separate as it does not utilise the global state
|
||||
/* Present is submitted in its own CMD Buffer to ensure drawable reference released as early as
|
||||
* possible. This command buffer is separate as it does not utilize the global state
|
||||
* for rendering as the main context does. */
|
||||
id<MTLCommandBuffer> cmdbuf = [ctx->queue commandBuffer];
|
||||
MTLCommandBufferManager::num_active_cmd_bufs++;
|
||||
|
@@ -49,7 +49,7 @@ uchar *MTLImmediate::begin()
|
||||
|
||||
void MTLImmediate::end()
|
||||
{
|
||||
/* Ensure we're between a imm::begin/imm:end pair. */
|
||||
/* Ensure we're between a `imm::begin` / `imm:end` pair. */
|
||||
BLI_assert(has_begun_);
|
||||
BLI_assert(prim_type != GPU_PRIM_NONE);
|
||||
|
||||
@@ -187,7 +187,7 @@ void MTLImmediate::end()
|
||||
/* Some conversions are NOT valid, e.g. Int4 to Float4
|
||||
* - In this case, we need to implement a conversion routine inside the shader.
|
||||
* - This is handled using the format_conversion_mode flag
|
||||
* - This flag is passed into the PSO as a function specialisation,
|
||||
* - This flag is passed into the PSO as a function specialization,
|
||||
* and will generate an appropriate conversion function when reading the vertex attribute
|
||||
* value into local shader storage.
|
||||
* (If no explicit conversion is needed, the function specialize to a pass-through). */
|
||||
@@ -359,7 +359,7 @@ void MTLImmediate::end()
|
||||
}
|
||||
|
||||
/* Submit draw call with modified vertex count, which reflects vertices per primitive
|
||||
* defined in the USE_SSBO_VERTEX_FETCH pragma. */
|
||||
* defined in the USE_SSBO_VERTEX_FETCH `pragma`. */
|
||||
int num_input_primitives = gpu_get_prim_count_from_type(vertex_count, this->prim_type);
|
||||
int output_num_verts = num_input_primitives *
|
||||
active_mtl_shader->get_ssbo_vertex_fetch_output_num_verts();
|
||||
|
@@ -174,7 +174,7 @@ id<MTLTexture> gpu::MTLTexture::get_metal_handle()
|
||||
if (resource_mode_ == MTL_TEXTURE_MODE_VBO) {
|
||||
id<MTLBuffer> buf = vert_buffer_->get_metal_buffer();
|
||||
|
||||
/* Source vertex buffer has been re-generated, require re-initialisation. */
|
||||
/* Source vertex buffer has been re-generated, require re-initialization. */
|
||||
if (buf != vert_buffer_mtl_) {
|
||||
MTL_LOG_INFO(
|
||||
"MTLTexture '%p' using MTL_TEXTURE_MODE_VBO requires re-generation due to updated "
|
||||
@@ -183,7 +183,7 @@ id<MTLTexture> gpu::MTLTexture::get_metal_handle()
|
||||
/* Clear state. */
|
||||
this->reset();
|
||||
|
||||
/* Re-initialise. */
|
||||
/* Re-initialize. */
|
||||
this->init_internal(wrap(vert_buffer_));
|
||||
|
||||
/* Update for assertion check below. */
|
||||
@@ -191,7 +191,7 @@ id<MTLTexture> gpu::MTLTexture::get_metal_handle()
|
||||
}
|
||||
|
||||
/* Ensure buffer is valid.
|
||||
* Fetchvert buffer handle directly in-case it changed above. */
|
||||
* Fetch-vert buffer handle directly in-case it changed above. */
|
||||
BLI_assert(vert_buffer_mtl_ != nil);
|
||||
BLI_assert(vert_buffer_->get_metal_buffer() == vert_buffer_mtl_);
|
||||
}
|
||||
|
@@ -38,7 +38,7 @@ class MTLVertBuf : public VertBuf {
|
||||
/** Whether existing allocation has been submitted for use by the GPU. */
|
||||
bool contents_in_flight_ = false;
|
||||
|
||||
/* Fetch Metal buffer and offset into allocation if ncessary.
|
||||
/* Fetch Metal buffer and offset into allocation if necessary.
|
||||
* Access limited to friend classes. */
|
||||
id<MTLBuffer> get_metal_buffer()
|
||||
{
|
||||
|
@@ -58,7 +58,7 @@ void MTLVertBuf::duplicate_data(VertBuf *dst_)
|
||||
MTLVertBuf *src = this;
|
||||
MTLVertBuf *dst = static_cast<MTLVertBuf *>(dst_);
|
||||
|
||||
/* Ensure buffer has been initialised. */
|
||||
/* Ensure buffer has been initialized. */
|
||||
src->bind();
|
||||
|
||||
if (src->vbo_) {
|
||||
@@ -206,7 +206,7 @@ void MTLVertBuf::bind()
|
||||
|
||||
if (prev_flag & GPU_VERTBUF_DATA_UPLOADED) {
|
||||
|
||||
/* Fetch active econtext. */
|
||||
/* Fetch active context. */
|
||||
MTLContext *ctx = MTLContext::get();
|
||||
BLI_assert(ctx);
|
||||
|
||||
|
@@ -21,9 +21,9 @@
|
||||
/** Uncompressed 4x4 color block. */
|
||||
struct ColorBlock {
|
||||
ColorBlock() = default;
|
||||
/** Init the color block from an array of colors. */
|
||||
/** Initialize the color block from an array of colors. */
|
||||
ColorBlock(const uint *linearImage);
|
||||
/** Init the color block with the contents of the given block. */
|
||||
/** Initialize the color block with the contents of the given block. */
|
||||
ColorBlock(const ColorBlock &block);
|
||||
/** Initialize this color block. */
|
||||
ColorBlock(const Image *img, uint x, uint y);
|
||||
|
@@ -872,8 +872,8 @@ DirectDrawSurface::DirectDrawSurface(uchar *mem, uint size) : stream(mem, size),
|
||||
mem_read(stream, header);
|
||||
|
||||
/* Some ATI2 compressed normal maps do not have their
|
||||
* normal flag set, so force it here (the original nvtt don't do
|
||||
* this, but the decompressor has a -forcenormal flag). */
|
||||
* normal flag set, so force it here (the original `nvtt` don't do
|
||||
* this, but the decompressor has a `-forcenormal` flag). */
|
||||
if (header.pf.fourcc == FOURCC_ATI2) {
|
||||
header.setNormalFlag(true);
|
||||
}
|
||||
|
@@ -1442,7 +1442,7 @@ void IMB_exr_close(void *handle)
|
||||
|
||||
/* ********* */
|
||||
|
||||
/* get a substring from the end of the name, separated by '.' */
|
||||
/** Get a sub-string from the end of the name, separated by '.'. */
|
||||
static int imb_exr_split_token(const char *str, const char *end, const char **token)
|
||||
{
|
||||
const char delims[] = {'.', '\0'};
|
||||
|
@@ -1014,7 +1014,7 @@ static struct PyMethodDef pygpu_shader_info__tp_methods[] = {
|
||||
/** \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name GPUShaderCreateInfo Init
|
||||
/** \name GPUShaderCreateInfo Initialization
|
||||
* \{ */
|
||||
|
||||
static PyObject *pygpu_shader_info__tp_new(PyTypeObject *UNUSED(type),
|
||||
|
@@ -3304,7 +3304,7 @@ PyObject *Vector_CreatePyObject(const float *vec, const int vec_num, PyTypeObjec
|
||||
self->vec = vec_alloc;
|
||||
self->vec_num = vec_num;
|
||||
|
||||
/* init callbacks as NULL */
|
||||
/* Initialize callbacks as NULL. */
|
||||
self->cb_user = NULL;
|
||||
self->cb_type = self->cb_subtype = 0;
|
||||
|
||||
@@ -3339,7 +3339,7 @@ PyObject *Vector_CreatePyObject_wrap(float *vec, const int vec_num, PyTypeObject
|
||||
if (self) {
|
||||
self->vec_num = vec_num;
|
||||
|
||||
/* init callbacks as NULL */
|
||||
/* Initialize callbacks as NULL. */
|
||||
self->cb_user = NULL;
|
||||
self->cb_type = self->cb_subtype = 0;
|
||||
|
||||
|
@@ -486,7 +486,7 @@ static void wm_event_execute_timers(bContext *C)
|
||||
|
||||
void wm_event_do_notifiers(bContext *C)
|
||||
{
|
||||
/* Ensure insiude render boundary. */
|
||||
/* Ensure inside render boundary. */
|
||||
GPU_render_begin();
|
||||
|
||||
/* Run the timer before assigning `wm` in the unlikely case a timer loads a file, see T80028. */
|
||||
|
Reference in New Issue
Block a user