main sync #3

Merged
Patrick Busch merged 318 commits from blender/blender:main into main 2023-03-17 15:52:21 +01:00
66 changed files with 130 additions and 125 deletions
Showing only changes of commit b3625e6bfd - Show all commits

View File

@ -81,7 +81,7 @@ class DeviceInfo {
bool has_gpu_queue; /* Device supports GPU queue. */
bool use_metalrt; /* Use MetalRT to accelerate ray queries (Metal only). */
KernelOptimizationLevel kernel_optimization_level; /* Optimization level applied to path tracing
kernels (Metal only). */
* kernels (Metal only). */
DenoiserTypeMask denoisers; /* Supported denoiser types. */
int cpu_threads;
vector<DeviceInfo> multi_devices;

View File

@ -278,7 +278,7 @@ int MetalDeviceQueue::num_concurrent_states(const size_t state_size) const
if (metal_device_->device_vendor == METAL_GPU_APPLE) {
result *= 4;
/* Increasing the state count doesn't notably benefit M1-family systems. */
/* Increasing the state count doesn't notably benefit M1-family systems. */
if (MetalInfo::get_apple_gpu_architecture(metal_device_->mtlDevice) != APPLE_M1) {
size_t system_ram = system_physical_ram();
size_t allocated_so_far = [metal_device_->mtlDevice currentAllocatedSize];

View File

@ -1343,7 +1343,7 @@ void PathTrace::guiding_prepare_structures()
* per update to be limited, for reproducible results and reasonable training size.
*
* Idea: we could stochastically discard samples with a probability of 1/num_samples_per_update
* we can then update only after the num_samples_per_update iterations are rendered. */
* we can then update only after the num_samples_per_update iterations are rendered. */
render_scheduler_.set_limit_samples_per_update(4);
}
else {

View File

@ -94,7 +94,7 @@ class PathTrace {
void set_adaptive_sampling(const AdaptiveSampling &adaptive_sampling);
/* Set the parameters for guiding.
* Use to setup the guiding structures before each rendering iteration.*/
* Use to setup the guiding structures before each rendering iteration. */
void set_guiding_params(const GuidingParams &params, const bool reset);
/* Sets output driver for render buffer output. */
@ -119,7 +119,7 @@ class PathTrace {
*/
void cancel();
/* Copy an entire render buffer to/from the path trace. */
/* Copy an entire render buffer to/from the path trace. */
/* Copy happens via CPU side buffer: data will be copied from every device of the path trace, and
* the data will be copied to the device of the given render buffers. */
@ -294,7 +294,7 @@ class PathTrace {
* rendering iteration. */
unique_ptr<openpgl::cpp::SampleStorage> guiding_sample_data_storage_;
/* The number of already performed training iterations for the guiding field.*/
/* The number of already performed training iterations for the guiding field. */
int guiding_update_count = 0;
#endif

View File

@ -202,7 +202,7 @@ ccl_device float2 direction_to_mirrorball(float3 dir)
}
/* Single face of a equiangular cube map projection as described in
https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/ */
* https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/ */
ccl_device float3 equiangular_cubemap_face_to_direction(float u, float v)
{
u = (1.0f - u);

View File

@ -136,7 +136,7 @@ ccl_device_forceinline float3 microfacet_beckmann_sample_vndf(const float3 wi,
/* Find root in a monotonic interval using newton method, under given precision and maximal
* iterations. Falls back to bisection if newton step produces results outside of the valid
* interval.*/
* interval. */
const float precision = 1e-6f;
const int max_iter = 3;
int iter = 0;

View File

@ -53,7 +53,7 @@ ccl_device_forceinline void guiding_record_surface_segment(KernelGlobals kg,
#endif
}
/* Records the surface scattering event at the current vertex position of the segment.*/
/* Records the surface scattering event at the current vertex position of the segment. */
ccl_device_forceinline void guiding_record_surface_bounce(KernelGlobals kg,
IntegratorState state,
ccl_private const ShaderData *sd,
@ -134,7 +134,7 @@ ccl_device_forceinline void guiding_record_bssrdf_segment(KernelGlobals kg,
}
/* Records the transmission of the path at the point of entry while passing
* the surface boundary.*/
* the surface boundary. */
ccl_device_forceinline void guiding_record_bssrdf_weight(KernelGlobals kg,
IntegratorState state,
const Spectrum weight,
@ -161,7 +161,7 @@ ccl_device_forceinline void guiding_record_bssrdf_weight(KernelGlobals kg,
/* Records the direction at the point of entry the path takes when sampling the SSS contribution.
* If not terminated this function is usually followed by a call of
* guiding_record_volume_transmission to record the transmittance between the point of entry and
* the point of exit.*/
* the point of exit. */
ccl_device_forceinline void guiding_record_bssrdf_bounce(KernelGlobals kg,
IntegratorState state,
const float pdf,
@ -216,7 +216,7 @@ ccl_device_forceinline void guiding_record_volume_segment(KernelGlobals kg,
#endif
}
/* Records the volume scattering event at the current vertex position of the segment.*/
/* Records the volume scattering event at the current vertex position of the segment. */
ccl_device_forceinline void guiding_record_volume_bounce(KernelGlobals kg,
IntegratorState state,
ccl_private const ShaderData *sd,
@ -247,7 +247,7 @@ ccl_device_forceinline void guiding_record_volume_bounce(KernelGlobals kg,
}
/* Records the transmission (a.k.a. transmittance weight) between the current path segment
* and the next one, when the path is inside or passes a volume.*/
* and the next one, when the path is inside or passes a volume. */
ccl_device_forceinline void guiding_record_volume_transmission(KernelGlobals kg,
IntegratorState state,
const float3 transmittance_weight)
@ -330,7 +330,7 @@ ccl_device_forceinline void guiding_record_light_surface_segment(
/* Records/Adds a final path segment when the path leaves the scene and
* intersects with a background light (e.g., background color,
* distant light, or env map). The vertex for this segment is placed along
* the current ray far out the scene.*/
* the current ray far out the scene. */
ccl_device_forceinline void guiding_record_background(KernelGlobals kg,
IntegratorState state,
const Spectrum L,
@ -359,7 +359,7 @@ ccl_device_forceinline void guiding_record_background(KernelGlobals kg,
/* Records the scattered contribution of a next event estimation
* (i.e., a direct light estimate scattered at the current path vertex
* towards the previous vertex).*/
* towards the previous vertex). */
ccl_device_forceinline void guiding_record_direct_light(KernelGlobals kg,
IntegratorShadowState state)
{
@ -397,7 +397,7 @@ ccl_device_forceinline void guiding_record_continuation_probability(
/* Path guiding debug render passes. */
/* Write a set of path guiding related debug information (e.g., guiding probability at first
* bounce) into separate rendering passes.*/
* bounce) into separate rendering passes. */
ccl_device_forceinline void guiding_write_debug_passes(KernelGlobals kg,
IntegratorState state,
ccl_private const ShaderData *sd,

View File

@ -1019,7 +1019,7 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
const float step_size = volume_stack_step_size(kg, volume_read_lambda_pass);
# if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 1
/* The current path throughput which is used later to calculate per-segment throughput.*/
/* The current path throughput which is used later to calculate per-segment throughput. */
const float3 initial_throughput = INTEGRATOR_STATE(state, path, throughput);
/* The path throughput used to calculate the throughput for direct light. */
float3 unlit_throughput = initial_throughput;
@ -1063,7 +1063,7 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
if (result.direct_sample_method == VOLUME_SAMPLE_DISTANCE) {
/* If the direct scatter event is generated using VOLUME_SAMPLE_DISTANCE the direct event
* will happen at the same position as the indirect event and the direct light contribution
* will contribute to the position of the next path segment.*/
* will contribute to the position of the next path segment. */
float3 transmittance_weight = spectrum_to_rgb(
safe_divide_color(result.indirect_throughput, initial_throughput));
guiding_record_volume_transmission(kg, state, transmittance_weight);
@ -1076,7 +1076,8 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
/* If the direct scatter event is generated using VOLUME_SAMPLE_EQUIANGULAR the direct
* event will happen at a separate position as the indirect event and the direct light
* contribution will contribute to the position of the current/previous path segment. The
* unlit_throughput has to be adjusted to include the scattering at the previous segment.*/
* unlit_throughput has to be adjusted to include the scattering at the previous segment.
*/
float3 scatterEval = one_float3();
if (state->guiding.path_segment) {
pgl_vec3f scatteringWeight = state->guiding.path_segment->scatteringWeight;

View File

@ -126,16 +126,16 @@ typedef struct IntegratorStateGPU {
/* Count number of kernels queued for specific shaders. */
ccl_global int *sort_key_counter[DEVICE_KERNEL_INTEGRATOR_NUM];
/* Index of shadow path which will be used by a next shadow path. */
/* Index of shadow path which will be used by a next shadow path. */
ccl_global int *next_shadow_path_index;
/* Index of main path which will be used by a next shadow catcher split. */
/* Index of main path which will be used by a next shadow catcher split. */
ccl_global int *next_main_path_index;
/* Partition/key offsets used when writing sorted active indices. */
ccl_global int *sort_partition_key_offsets;
/* Divisor used to partition active indices by locality when sorting by material. */
/* Divisor used to partition active indices by locality when sorting by material. */
uint sort_partition_divisor;
} IntegratorStateGPU;

View File

@ -38,7 +38,7 @@ ccl_device_inline void surface_shader_prepare_guiding(KernelGlobals kg,
const float surface_guiding_probability = kernel_data.integrator.surface_guiding_probability;
float rand_bsdf_guiding = path_state_rng_1D(kg, rng_state, PRNG_SURFACE_BSDF_GUIDING);
/* Compute proportion of diffuse BSDF and BSSRDFs .*/
/* Compute proportion of diffuse BSDF and BSSRDFs. */
float diffuse_sampling_fraction = 0.0f;
float bssrdf_sampling_fraction = 0.0f;
float bsdf_bssrdf_sampling_sum = 0.0f;

View File

@ -259,7 +259,7 @@ int LightTree::recursive_build(
bool should_split = false;
if (try_splitting) {
/* Find the best place to split the primitives into 2 nodes.
* If the best split cost is no better than making a leaf node, make a leaf instead.*/
* If the best split cost is no better than making a leaf node, make a leaf instead. */
float min_cost = min_split_saoh(
centroid_bounds, start, end, bbox, bcone, split_dim, split_bucket, num_left_prims, prims);
should_split = num_prims > max_lights_in_leaf_ || min_cost < energy_total;

View File

@ -351,7 +351,7 @@ class Scene : public NodeOwner {
/* Get maximum number of closures to be used in kernel. */
int get_max_closure_count();
/* Get size of a volume stack needed to render this scene. */
/* Get size of a volume stack needed to render this scene. */
int get_volume_stack_size() const;
template<typename T> void delete_node_impl(T *node)

View File

@ -507,7 +507,7 @@ void Session::do_delayed_reset()
params = delayed_reset_.session_params;
buffer_params_ = delayed_reset_.buffer_params;
/* Store parameters used for buffers access outside of scene graph. */
/* Store parameters used for buffers access outside of scene graph. */
buffer_params_.samples = params.samples;
buffer_params_.exposure = scene->film->get_exposure();
buffer_params_.use_approximate_shadow_catcher =

View File

@ -878,7 +878,7 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
}
extensions_device.push_back("VK_KHR_dedicated_allocation");
extensions_device.push_back("VK_KHR_get_memory_requirements2");
/* Enable MoltenVK required instance extensions.*/
/* Enable MoltenVK required instance extensions. */
#ifdef VK_MVK_MOLTENVK_EXTENSION_NAME
requireExtension(
extensions_available, extensions_enabled, "VK_KHR_get_physical_device_properties2");

View File

@ -60,7 +60,7 @@ static FT_Library ft_lib = NULL;
static FTC_Manager ftc_manager = NULL;
static FTC_CMapCache ftc_charmap_cache = NULL;
/* Lock for FreeType library, used around face creation and deletion. */
/* Lock for FreeType library, used around face creation and deletion. */
static ThreadMutex ft_lib_mutex;
/* May be set to #UI_widgetbase_draw_cache_flush. */
@ -1566,7 +1566,7 @@ FontBLF *blf_font_new_ex(const char *name,
}
}
/* Detect "Last resort" fonts. They have everything. Usually except last 5 bits. */
/* Detect "Last resort" fonts. They have everything. Usually except last 5 bits. */
if (font->unicode_ranges[0] == 0xffffffffU && font->unicode_ranges[1] == 0xffffffffU &&
font->unicode_ranges[2] == 0xffffffffU && font->unicode_ranges[3] >= 0x7FFFFFFU) {
font->flags |= BLF_LAST_RESORT;

View File

@ -300,7 +300,7 @@ typedef struct FontBLF {
/** Font size. */
float size;
/** Axes data for Adobe MM, TrueType GX, or OpenType variation fonts. */
/** Axes data for Adobe MM, TrueType GX, or OpenType variation fonts. */
FT_MM_Var *variations;
/** Character variation; 0=default, -1=min, +1=max. */

View File

@ -246,7 +246,7 @@ static const char32_t *blf_get_sample_text(FT_Face face)
return def;
}
/* Detect "Last resort" fonts. They have everything, except the last 5 bits. */
/* Detect "Last resort" fonts. They have everything, except the last 5 bits. */
if (os2_table->ulUnicodeRange1 == 0xffffffffU && os2_table->ulUnicodeRange2 == 0xffffffffU &&
os2_table->ulUnicodeRange3 == 0xffffffffU && os2_table->ulUnicodeRange4 >= 0x7FFFFFFU) {
return U"\xE000\xFFFF";

View File

@ -133,21 +133,26 @@ void BKE_curvemapping_table_RGBA(const struct CurveMapping *cumap, float **array
void BKE_curvemapping_get_range_minimums(const struct CurveMapping *curve_mapping,
float minimums[4]);
/** Get the reciprocal of the difference between the maximum and the minimum x value of each curve
/**
* Get the reciprocal of the difference between the maximum and the minimum x value of each curve
* map table. Evaluation parameters can be multiplied by this value to be normalized. If the
* difference is zero, 1^8 is returned. */
* difference is zero, 1^8 is returned.
*/
void BKE_curvemapping_compute_range_dividers(const struct CurveMapping *curve_mapping,
float dividers[4]);
/** Compute the slopes at the start and end points of each curve map. The slopes are multiplied by
/**
* Compute the slopes at the start and end points of each curve map. The slopes are multiplied by
* the range of the curve map to compensate for parameter normalization. If the slope is vertical,
* 1^8 is returned. */
* 1^8 is returned.
*/
void BKE_curvemapping_compute_slopes(const struct CurveMapping *curve_mapping,
float start_slopes[4],
float end_slopes[4]);
/** Check if the curve map at the index is identity, that is, does nothing. A curve map is said to
* be identity if:
/**
* Check if the curve map at the index is identity, that is, does nothing.
* A curve map is said to be identity if:
* - The curve mapping uses extrapolation.
* - Its range is 1.
* - The slope at its start point is 1.

View File

@ -139,11 +139,15 @@ enum {
/** Do not process ID pointers inside embedded IDs. Needed by depsgraph processing e.g. */
IDWALK_IGNORE_EMBEDDED_ID = (1 << 3),
/** Also process internal ID pointers like `ID.newid` or `ID.orig_id`.
* WARNING: Dangerous, use with caution. */
/**
* Also process internal ID pointers like `ID.newid` or `ID.orig_id`.
* WARNING: Dangerous, use with caution.
*/
IDWALK_DO_INTERNAL_RUNTIME_POINTERS = (1 << 9),
/** Also process the ID.lib pointer. It is an option because this pointer can usually be fully
ignored. */
/**
* Also process the ID.lib pointer. It is an option because this pointer can usually be fully
* ignored.
*/
IDWALK_DO_LIBRARY_POINTER = (1 << 10),
};

View File

@ -410,7 +410,7 @@ void BKE_curve_init(Curve *cu, const short curve_type)
}
cu->bevel_profile = nullptr;
/* Initialize the offset to 1.0, to compensate for it being set to -1.0
in the property getter. */
* in the property getter. */
cu->offset = 1.0f;
}

View File

@ -589,7 +589,7 @@ static bool get_fcurve_end_keyframes(const FCurve *fcu,
}
/* The binary search returns an index where a keyframe would be inserted,
so it needs to be clamped to ensure it is in range of the array. */
* so it needs to be clamped to ensure it is in range of the array. */
first_index = clamp_i(first_index, 0, fcu->totvert - 1);
last_index = clamp_i(last_index - 1, 0, fcu->totvert - 1);
}

View File

@ -1275,9 +1275,8 @@ bGPDframe *BKE_gpencil_layer_frame_get(bGPDlayer *gpl, int cframe, eGP_GetFrame_
gpl->actframe = gpf;
}
else if (addnew == GP_GETFRAME_ADD_COPY) {
/* The frame_addcopy function copies the active frame of gpl,
so we need to set the active frame before copying.
*/
/* The #BKE_gpencil_frame_addcopy function copies the active frame of gpl,
* so we need to set the active frame before copying. */
gpl->actframe = gpf;
gpl->actframe = BKE_gpencil_frame_addcopy(gpl, cframe);
}
@ -1306,9 +1305,8 @@ bGPDframe *BKE_gpencil_layer_frame_get(bGPDlayer *gpl, int cframe, eGP_GetFrame_
gpl->actframe = gpf;
}
else if (addnew == GP_GETFRAME_ADD_COPY) {
/* The frame_addcopy function copies the active frame of gpl;
so we need to set the active frame before copying.
*/
/* The #BKE_gpencil_frame_addcopy function copies the active frame of gpl;
* so we need to set the active frame before copying. */
gpl->actframe = gpf;
gpl->actframe = BKE_gpencil_frame_addcopy(gpl, cframe);
}

View File

@ -63,7 +63,7 @@ static void merge_uvs_for_vertex(const Span<int> loops_for_vert, Span<float2 *>
if (loops_for_vert.size() <= 1) {
return;
}
/* Manipulate a copy of the loop indices, de-duplicating UVs per layer. */
/* Manipulate a copy of the loop indices, de-duplicating UVs per layer. */
Vector<int, 32> loops_merge;
loops_merge.reserve(loops_for_vert.size());
for (float2 *mloopuv : mloopuv_layers) {

View File

@ -4090,7 +4090,7 @@ bool BKE_object_minmax_empty_drawtype(const struct Object *ob, float r_min[3], f
max[0] = radius + (ofs[0] * radius);
max[1] = radius + (ofs[1] * radius);
/* Since the image aspect can shrink the bounds towards the object origin,
* adjust the min/max to account for that. */
* adjust the min/max to account for that. */
for (int i = 0; i < 2; i++) {
CLAMP_MAX(min[i], 0.0f);
CLAMP_MIN(max[i], 0.0f);

View File

@ -2803,8 +2803,7 @@ static void sculpt_attribute_update_refs(Object *ob)
{
SculptSession *ss = ob->sculpt;
/* run twice, in case sculpt_attr_update had to recreate a layer and
messed up the bmesh offsets. */
/* Run twice, in case sculpt_attr_update had to recreate a layer and messed up #BMesh offsets. */
for (int i = 0; i < 2; i++) {
for (int j = 0; j < SCULPT_MAX_ATTRIBUTES; j++) {
SculptAttribute *attr = ss->temp_attributes + j;

View File

@ -586,7 +586,7 @@ static const AVCodec *get_av1_encoder(
}
else {
/* Is not a square num, set greater side based on longer side, or use a square if both
sides are equal. */
* sides are equal. */
int sqrt_p2 = power_of_2_min_i(threads_sqrt);
if (sqrt_p2 < 2) {
/* Ensure a default minimum. */

View File

@ -10,7 +10,7 @@
#ifdef __GNUC__
/* NOTE(@ideasman42): CLANG behaves slightly differently to GCC,
* these can be enabled but do so carefully as they can introduce build-errors. */
* these can be enabled but do so carefully as they can introduce build-errors. */
# if !defined(__clang__)
# pragma GCC diagnostic error "-Wsign-compare"
# pragma GCC diagnostic error "-Wconversion"

View File

@ -274,7 +274,7 @@ static BMOpDefine bmo_reverse_faces_def = {
* Flip Quad Tessellation
*
* Flip the tessellation direction of the selected quads.
*/
*/
static BMOpDefine bmo_flip_quad_tessellation_def = {
"flip_quad_tessellation",
/* slot_in */

View File

@ -30,7 +30,7 @@ namespace blender::compositor {
* - Distance between the center of the image and the pixel to be evaluated.
* - Distance between the center of the image and the outer-edge.
* - Distance between the center of the image and the inner-edge.
*
* With a simple compare it can be detected if the evaluated pixel is between the outer and inner
* edge.
*/

View File

@ -102,7 +102,7 @@ void GaussianAlphaBlurBaseOperation::update_memory_buffer_partial(MemoryBuffer *
const int coord_min = max_ii(coord - filtersize_, min_input_coord);
const int coord_max = min_ii(coord + filtersize_ + 1, max_input_coord);
/* *** This is the main part which is different to #GaussianBlurBaseOperation. *** */
/* *** This is the main part which is different to #GaussianBlurBaseOperation. *** */
/* Gauss. */
float alpha_accum = 0.0f;
float multiplier_accum = 0.0f;

View File

@ -155,7 +155,7 @@ struct PersistentOperationKey : public OperationKey {
component_name_storage_ = component_node->name;
name_storage_ = operation_node->name;
/* Assign fields used by the #OperationKey API. */
/* Assign fields used by the #OperationKey API. */
id = id_node->id_orig;
component_type = component_node->type;
component_name = component_name_storage_.c_str();

View File

@ -227,7 +227,7 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_effect_reflection_resolve)
.auto_resource_location(true)
.do_static_compilation(true);
/* Split reflection resolve support for Intel-based MacBooks.*/
/* Split reflection resolve support for Intel-based MacBooks. */
GPU_SHADER_CREATE_INFO(eevee_legacy_effect_reflection_resolve_probe)
.define("RESOLVE_PROBE")
.additional_info("eevee_legacy_effect_reflection_resolve")

View File

@ -345,7 +345,7 @@ void animviz_motionpath_compute_range(Object *ob, Scene *scene)
bAnimVizSettings *avs = ob->mode == OB_MODE_POSE ? &ob->pose->avs : &ob->avs;
if (avs->path_range == MOTIONPATH_RANGE_MANUAL) {
/* Don't touch manually-determined ranges. */
/* Don't touch manually-determined ranges. */
return;
}

View File

@ -798,7 +798,7 @@ static void gpencil_create_extensions_radius(tGPDfill *tgpf)
}
/* Don't check endpoint distances unless the bounding boxes of the strokes
are close enough together that they can plausibly be connected. */
* are close enough together that they can plausibly be connected. */
if (!extended_bbox_overlap(gps->boundbox_min,
gps->boundbox_max,
gps2->boundbox_min,

View File

@ -109,7 +109,7 @@ enum {
/* Display the hover region (edge or corner) of the underlying bounding box. */
ED_GIZMO_CAGE2D_STYLE_BOX = 0,
/* Display the bounding box plus dots on four corners while hovering, usually used for
transforming a 2D shape. */
* transforming a 2D shape. */
ED_GIZMO_CAGE2D_STYLE_BOX_TRANSFORM,
/* Display the bounding circle while hovering. */
ED_GIZMO_CAGE2D_STYLE_CIRCLE,

View File

@ -377,7 +377,7 @@ static bool screen_areas_can_align(bScreen *screen, ScrArea *sa1, ScrArea *sa2,
return false;
}
/* Areas that are _smaller_ than minimum sizes, sharing an edge to be moved. See #100772. */
/* Areas that are _smaller_ than minimum sizes, sharing an edge to be moved. See #100772. */
if (SCREEN_DIR_IS_VERTICAL(dir)) {
const short xmin = MIN2(sa1->v1->vec.x, sa2->v1->vec.x);
const short xmax = MAX2(sa1->v3->vec.x, sa2->v3->vec.x);

View File

@ -1177,7 +1177,7 @@ void SCULPT_OT_mesh_filter(wmOperatorType *ot)
ot->ui = sculpt_mesh_ui_exec;
/* Doesn't seem to actually be called?
Check `sculpt_mesh_filter_modal` to see where it's really called. */
* Check `sculpt_mesh_filter_modal` to see where it's really called. */
ot->cancel = sculpt_mesh_filter_cancel;
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO | OPTYPE_GRAB_CURSOR_X | OPTYPE_BLOCKING |

View File

@ -521,7 +521,7 @@ static void write_space_outliner(BlendWriter *writer, const SpaceOutliner *space
/* TODO the mempool could be moved to #SpaceOutliner_Runtime so that #SpaceOutliner could
* hold the #TreeStore directly. */
/* Address relative to the tree-store, as noted above. */
/* Address relative to the tree-store, as noted above. */
void *data_addr = (void *)POINTER_OFFSET(ts, sizeof(void *));
/* There should be plenty of memory addresses within the mempool data that we can point into,
* just double-check we don't potentially end up with a memory address that another DNA

View File

@ -3169,13 +3169,12 @@ static void uv_map_mirror(BMFace *efa,
* than 0.5 units in the *u* coordinate.
* If we find such a face, we try and improve the unwrapping
* by adding (1.0, 0.0) onto some of the face's UVs.
*
* Note that this is only a heuristic. The property we're
* attempting to maintain is that the winding of the face
* in UV space corresponds with the handedness of the face
* in 3D space w.r.t to the unwrapping. Even for triangles,
* that property is somewhat complicated to evaluate.
*/
* that property is somewhat complicated to evaluate. */
float right_u = -1.0e30f;
BMLoop *l;

View File

@ -94,7 +94,7 @@ inline void execute_lazy_function_eagerly_impl(
fn.execute(params, context);
fn.destruct_storage(context.storage);
/* Make sure all outputs have been computed. */
/* Make sure all outputs have been computed. */
BLI_assert(!Span<bool>(set_outputs).contains(false));
}

View File

@ -125,7 +125,7 @@ namespace detail {
*/
template<typename MaskT, typename... Args, typename... ParamTags, size_t... I, typename ElementFn>
/* Perform additional optimizations on this loop because it is a very hot loop. For example, the
* math node in geometry nodes is processed here. */
* math node in geometry nodes is processed here. */
#if (defined(__GNUC__) && !defined(__clang__))
[[gnu::optimize("-funroll-loops")]] [[gnu::optimize("O3")]]
#endif

View File

@ -21,7 +21,7 @@ enum eUVPackIsland_MarginMethod {
struct UVPackIsland_Params {
/** Islands can be rotated to improve packing. */
bool rotate;
/** (In UV Editor) only pack islands which have one or more selected UVs.*/
/** (In UV Editor) only pack islands which have one or more selected UVs. */
bool only_selected_uvs;
/** (In 3D Viewport or UV Editor) only pack islands which have selected faces. */
bool only_selected_faces;

View File

@ -1115,7 +1115,7 @@ static int poly_find_doubles(const OffsetIndices<int> poly_corners_offsets,
{
/* Fills the `r_buffer` buffer with the intersection of the arrays in `buffer_a` and `buffer_b`.
* `buffer_a` and `buffer_b` have a sequence of sorted, non-repeating indices representing
* polygons. */
* polygons. */
const auto intersect = [](const Span<int> buffer_a, const Span<int> buffer_b, int *r_buffer) {
int result_num = 0;
int index_a = 0, index_b = 0;

View File

@ -548,7 +548,7 @@ static void sample_interval_bezier(const Span<float3> src_positions,
}
}
else {
/* General case, compute the insertion point. */
/* General case, compute the insertion point. */
end_point_insert = knot_insert_bezier(
src_positions, src_handles_l, src_handles_r, end_point);
@ -1063,7 +1063,7 @@ bke::CurvesGeometry trim_curves(const bke::CurvesGeometry &src_curves,
dst_curves.attributes_for_write().remove("cyclic");
}
else {
/* Only trimmed curves are no longer cyclic. */
/* Only trimmed curves are no longer cyclic. */
if (bke::SpanAttributeWriter cyclic = dst_attributes.lookup_for_write_span<bool>("cyclic")) {
cyclic.span.fill_indices(selection, false);
cyclic.finish();

View File

@ -39,8 +39,8 @@ class UVAABBIsland {
*
* In theory, alpaca_turbo should be the fastest non-trivial packer, hence the "turbo" suffix.
*
* Technically, the algorithm here is only O(n), In practice, to get reasonable results, the input
* must be pre-sorted, which costs an additional O(nlogn) time complexity.
* Technically, the algorithm here is only `O(n)`, In practice, to get reasonable results,
* the input must be pre-sorted, which costs an additional `O(nlogn)` time complexity.
*/
static void pack_islands_alpaca_turbo(const Span<UVAABBIsland *> islands,
float *r_max_u,
@ -100,21 +100,20 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
const float scale,
const float margin)
{
/* #BLI_box_pack_2d produces layouts with high packing efficiency, but has O(n^3)
/* #BLI_box_pack_2d produces layouts with high packing efficiency, but has `O(n^3)`
* time complexity, causing poor performance if there are lots of islands. See: #102843.
* #pack_islands_alpaca_turbo is designed to be the fastest packing method, O(nlogn),
* #pack_islands_alpaca_turbo is designed to be the fastest packing method, `O(nlogn)`,
* but has poor packing efficiency if the AABBs have a spread of sizes and aspect ratios.
* Here, we merge the best properties of both packers into one combined packer.
*
* The free tuning parameter, `alpaca_cutoff` will determine how many islands are packed
* using each method.
*
* The current strategy is:
* - Sort islands in size order.
* - Call #BLI_box_pack_2d on the first `alpaca_cutoff` islands.
* - Call #pack_islands_alpaca_turbo on the remaining islands.
* - Combine results.
* - Sort islands in size order.
* - Call #BLI_box_pack_2d on the first `alpaca_cutoff` islands.
* - Call #pack_islands_alpaca_turbo on the remaining islands.
* - Combine results.
*/
/* First, copy information from our input into the AABB structure. */

View File

@ -1006,7 +1006,7 @@ void GPU_material_optimize(GPUMaterial *mat)
* NOTE(Threading): Need to verify if GPU_generate_pass can cause side-effects, especially when
* used with "thunk". So far, this appears to work, and deferring optimized pass creation is more
* optimal, as these do not benefit from caching, due to baked constants. However, this could
* possibly be cause for concern for certain cases. */
* possibly be cause for concern for certain cases. */
if (!mat->optimized_pass) {
mat->optimized_pass = GPU_generate_pass(
mat, &mat->graph, mat->optimize_pass_info.callback, mat->optimize_pass_info.thunk, true);

View File

@ -40,7 +40,7 @@ class MTLBatch : public Batch {
VertexBufferID bufferIds[GPU_BATCH_VBO_MAX_LEN] = {};
/* Cache life index compares a cache entry with the active MTLBatch state.
* This is initially set to the cache life index of MTLBatch. If the batch has been modified,
* this index is incremented to cheaply invalidate existing cache entries. */
* this index is incremented to cheaply invalidate existing cache entries. */
uint32_t cache_life_index = 0;
};

View File

@ -619,7 +619,7 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(
* Vertex Descriptors are required to generate a pipeline state, based on the current Batch's
* buffer bindings. These bindings are a unique matching, depending on what input attributes a
* batch has in its buffers, and those which are supported by the shader interface.
*
* We iterate through the buffers and resolve which attributes satisfy the requirements of the
* currently bound shader. We cache this data, for a given Batch<->ShderInterface pairing in a
* VAO cache to avoid the need to recalculate this data. */

View File

@ -27,7 +27,7 @@ namespace blender::gpu {
* uint32_t instanceCount;
* uint32_t vertexStart;
* uint32_t baseInstance;
};*/
* }; */
/* MTLDrawIndexedPrimitivesIndirectArguments --
* https://developer.apple.com/documentation/metal/mtldrawindexedprimitivesindirectarguments?language=objc
@ -38,7 +38,7 @@ namespace blender::gpu {
* uint32_t indexStart;
* uint32_t baseVertex;
* uint32_t baseInstance;
};*/
* }; */
#define MDI_ENABLED (buffer_size_ != 0)
#define MDI_DISABLED (buffer_size_ == 0)

View File

@ -497,7 +497,7 @@ void gpu::MTLTexture::update_sub(
}
/* Early exit if update size is zero. update_sub sometimes has a zero-sized
* extent when called from texture painting. */
* extent when called from texture painting. */
if (totalsize <= 0 || extent[0] <= 0) {
MTL_LOG_WARNING(
"MTLTexture::update_sub called with extent size of zero for one or more dimensions. "
@ -643,7 +643,7 @@ void gpu::MTLTexture::update_sub(
/* For compute, we should use a stating texture to avoid texture write usage,
* if it has not been specified for the texture. Using shader-write disables
* lossless texture compression, so this is best to avoid where possible. */
* lossless texture compression, so this is best to avoid where possible. */
if (!(gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_SHADER_WRITE)) {
use_staging_texture = true;
}

View File

@ -176,7 +176,7 @@ MTLPixelFormat gpu_texture_format_to_metal(eGPUTextureFormat tex_format)
return MTLPixelFormatDepth32Float;
case GPU_DEPTH_COMPONENT24:
/* This formal is not supported on Metal.
* Use 32Float depth instead with some conversion steps for download and upload. */
* Use 32Float depth instead with some conversion steps for download and upload. */
return MTLPixelFormatDepth32Float;
case GPU_DEPTH_COMPONENT16:
return MTLPixelFormatDepth16Unorm;

View File

@ -23,7 +23,7 @@ void main()
vec2 circle_center = vec2(circle_radius_outer - text_width, 0.5);
/* radius in icon space (1 is the icon width). */
/* Radius in icon space (1 is the icon width). */
float radius = length(mask_coord_interp - circle_center);
float mask = smoothstep(circle_radius_inner, circle_radius_outer, radius);

View File

@ -55,7 +55,7 @@ GPU_SHADER_CREATE_INFO(gpu_compute_ssbo_binding_test)
.compute_source("gpu_compute_dummy_test.glsl")
.do_static_compilation(true);
/* Push constants*/
/* Push constants. */
GPU_SHADER_CREATE_INFO(gpu_push_constants_base_test)
.local_group_size(1)
.storage_buf(0, Qualifier::WRITE, "float", "data_out[]")

View File

@ -15,13 +15,13 @@ namespace blender::gpu {
* Information about alignment/components and memory size for types when using std140 layout.
*/
struct Std140 {
/** Get the memory size in bytes of a single component using by the given type.*/
/** Get the memory size in bytes of a single component using by the given type. */
static uint32_t component_mem_size(const shader::Type type);
/** Get to alignment of the given type in bytes.*/
/** Get to alignment of the given type in bytes. */
static uint32_t element_alignment(const shader::Type type, bool is_array);
/** Get the number of components that should be allocated for the given type.*/
/** Get the number of components that should be allocated for the given type. */
static uint32_t element_components_len(const shader::Type type);
/** Get the number of components of the given type when used in an array.*/
/** Get the number of components of the given type when used in an array. */
static uint32_t array_components_len(const shader::Type type);
};
@ -29,13 +29,13 @@ struct Std140 {
* Information about alignment/components and memory size for types when using std430 layout.
*/
struct Std430 {
/** Get the memory size in bytes of a single component using by the given type.*/
/** Get the memory size in bytes of a single component using by the given type. */
static uint32_t component_mem_size(const shader::Type type);
/** Get to alignment of the given type in bytes.*/
/** Get to alignment of the given type in bytes. */
static uint32_t element_alignment(const shader::Type type, bool is_array);
/** Get the number of components that should be allocated for the given type.*/
/** Get the number of components that should be allocated for the given type. */
static uint32_t element_components_len(const shader::Type type);
/** Get the number of components of the given type when used in an array.*/
/** Get the number of components of the given type when used in an array. */
static uint32_t array_components_len(const shader::Type type);
};

View File

@ -45,12 +45,12 @@ class VKContext;
*/
class VKPushConstants : NonCopyable {
public:
/** Different methods to store push constants.*/
/** Different methods to store push constants. */
enum class StorageType {
/** Push constants aren't in use.*/
/** Push constants aren't in use. */
NONE,
/** Store push constants as regular vulkan push constants.*/
/** Store push constants as regular vulkan push constants. */
PUSH_CONSTANTS,
/**
@ -67,7 +67,7 @@ class VKPushConstants : NonCopyable {
static constexpr StorageType STORAGE_TYPE_FALLBACK = StorageType::UNIFORM_BUFFER;
struct PushConstant {
/* Used as lookup based on ShaderInput.*/
/* Used as lookup based on ShaderInput. */
int32_t location;
/** Offset in the push constant data (in bytes). */
@ -213,7 +213,7 @@ class VKPushConstants : NonCopyable {
}
/* Store elements in uniform buffer as array. In Std140 arrays have an element stride of 16
* bytes.*/
* bytes. */
BLI_assert(sizeof(T) == 4);
const T *src = input_data;
for (const int i : IndexRange(array_size)) {

View File

@ -909,7 +909,7 @@ static void add_descriptor_set_layout_bindings(
r_bindings.append(create_descriptor_set_layout_binding(location, resource));
}
/* Add push constants to the descriptor when push constants are stored in an uniform buffer.*/
/* Add push constants to the descriptor when push constants are stored in an uniform buffer. */
const VKPushConstants::Layout &push_constants_layout = interface.push_constants_layout_get();
if (push_constants_layout.storage_type_get() == VKPushConstants::StorageType::UNIFORM_BUFFER) {
r_bindings.append(create_descriptor_set_layout_binding(push_constants_layout));

View File

@ -138,8 +138,8 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
descriptor_set_location_update(input, descriptor_set_location++);
}
/* Post initializing push constants.*/
/* Determine the binding location of push constants fallback buffer.*/
/* Post initializing push constants. */
/* Determine the binding location of push constants fallback buffer. */
int32_t push_constant_descriptor_set_location = -1;
if (push_constants_storage_type == VKPushConstants::StorageType::UNIFORM_BUFFER) {
push_constant_descriptor_set_location = descriptor_set_location++;

View File

@ -41,7 +41,7 @@ class VKShaderInterface : public ShaderInterface {
const VKDescriptorSet::Location descriptor_set_location(
const shader::ShaderCreateInfo::Resource::BindType &bind_type, int binding) const;
/** Get the Layout of the shader.*/
/** Get the Layout of the shader. */
const VKPushConstants::Layout &push_constants_layout_get() const
{
return push_constants_layout_;

View File

@ -11,9 +11,9 @@ namespace blender::io::usd {
void ensure_usd_plugin_path_registered()
{
/* if PXR_PYTHON_SUPPORT_ENABLED is defined, we *must* be dynamic and
the plugins are placed relative to the USD shared library hence no
hinting is required. */
/* If #PXR_PYTHON_SUPPORT_ENABLED is defined, we *must* be dynamic and
* the plugins are placed relative to the USD shared library hence no
* hinting is required. */
#ifndef PXR_PYTHON_SUPPORT_ENABLED
static bool plugin_path_registered = false;
if (plugin_path_registered) {

View File

@ -277,7 +277,7 @@ USDPrimReader *USDStageReader::collect_readers(Main *bmain, const pxr::UsdPrim &
if (prim.IsA<pxr::UsdShadeMaterial>()) {
/* Record material path for later processing, if needed,
* e.g., when importing all materials. */
* e.g., when importing all materials. */
material_paths_.push_back(prim.GetPath().GetAsString());
/* We don't create readers for materials, so return early. */

View File

@ -36,9 +36,9 @@ std::string register_usd_plugins_for_tests()
BLI_assert(path_len + 1 < FILE_MAX);
usd_datafiles_dir[path_len] = '/';
usd_datafiles_dir[path_len + 1] = '\0';
/* if PXR_PYTHON_SUPPORT_ENABLED is defined, we *must* be dynamic and
the plugins are placed relative to the USD shared library hence no
hinting is required. */
/* If #PXR_PYTHON_SUPPORT_ENABLED is defined, we *must* be dynamic and
* the plugins are placed relative to the USD shared library hence no
* hinting is required. */
#ifndef PXR_PYTHON_SUPPORT_ENABLED
pxr::PlugRegistry::GetInstance().RegisterPlugins(usd_datafiles_dir);
#endif

View File

@ -2390,7 +2390,7 @@ void RNA_property_boolean_set_array(PointerRNA *ptr, PropertyRNA *prop, const bo
}
else if (idprop->subtype == IDP_INT) {
/* Support writing to integer and boolean IDProperties, since boolean
RNA properties used to be stored with integer IDProperties. */
* RNA properties used to be stored with integer IDProperties. */
int *values_dst = static_cast<int *>(IDP_Array(idprop));
for (uint i = 0; i < idprop->len; i++) {
values_dst[i] = int(values[i]);

View File

@ -2548,7 +2548,7 @@ static void rna_def_modifier_gpencilbuild(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "Time Alignment", "How should strokes start to appear/disappear");
RNA_def_property_update(prop, 0, "rna_GpencilModifier_update");
/* Which time mode to use: Current frames, manual percentage, or drawspeed. */
/* Which time mode to use: Current frames, manual percentage, or drawspeed. */
prop = RNA_def_property(srna, "time_mode", PROP_ENUM, PROP_NONE);
RNA_def_property_enum_sdna(prop, NULL, "time_mode");
RNA_def_property_enum_items(prop, gpencil_build_time_mode_items);

View File

@ -245,7 +245,7 @@ void RNA_api_mesh(StructRNA *srna)
func = RNA_def_function(srna, "split_faces", "rna_Mesh_split_faces");
RNA_def_function_ui_description(func, "Split faces based on the edge angle");
/* TODO: This parameter has no effect anymore, since the internal code does not need to
* compute temporary CD_NORMAL loop data. It should be removed for next major release (4.0). */
* compute temporary CD_NORMAL loop data. It should be removed for next major release (4.0). */
RNA_def_boolean(func, "free_loop_normals", 1, "Free Loop Normals", "Deprecated, has no effect");
func = RNA_def_function(srna, "calc_tangents", "rna_Mesh_calc_tangents");

View File

@ -1088,7 +1088,7 @@ static void extrude_individual_mesh_faces(Mesh &mesh,
/* For every selected polygon, change it to use the new extruded vertices and the duplicate
* edges, and build the faces that form the sides of the extrusion. Build "original index"
* arrays for the new vertices and edges so they can be accessed later.
*
* Filling some of this data like the new edges or polygons could be easily split into
* separate loops, which may or may not be faster, but would involve more duplication. */
Array<int> new_vert_indices(extrude_corner_size);

View File

@ -131,7 +131,7 @@ static PyObject *bpy_bmloopuv_select_edge_get(BPy_BMLoopUV *self, void *UNUSED(c
}
static int bpy_bmloopuv_select_edge_set(BPy_BMLoopUV *self, PyObject *value, void *UNUSED(closure))
{
/* TODO: see comment above on bpy_bmloopuv_pin_uv_set(), the same applies here. */
/* TODO: see comment above on bpy_bmloopuv_pin_uv_set(), the same applies here. */
BLI_assert(self->edge_select);
if (self->edge_select) {
*self->edge_select = PyC_Long_AsBool(value);