main sync #3

Merged
Patrick Busch merged 318 commits from blender/blender:main into main 2023-03-17 15:52:21 +01:00
66 changed files with 130 additions and 125 deletions
Showing only changes of commit b3625e6bfd - Show all commits

View File

@ -81,7 +81,7 @@ class DeviceInfo {
bool has_gpu_queue; /* Device supports GPU queue. */ bool has_gpu_queue; /* Device supports GPU queue. */
bool use_metalrt; /* Use MetalRT to accelerate ray queries (Metal only). */ bool use_metalrt; /* Use MetalRT to accelerate ray queries (Metal only). */
KernelOptimizationLevel kernel_optimization_level; /* Optimization level applied to path tracing KernelOptimizationLevel kernel_optimization_level; /* Optimization level applied to path tracing
kernels (Metal only). */ * kernels (Metal only). */
DenoiserTypeMask denoisers; /* Supported denoiser types. */ DenoiserTypeMask denoisers; /* Supported denoiser types. */
int cpu_threads; int cpu_threads;
vector<DeviceInfo> multi_devices; vector<DeviceInfo> multi_devices;

View File

@ -94,7 +94,7 @@ class PathTrace {
void set_adaptive_sampling(const AdaptiveSampling &adaptive_sampling); void set_adaptive_sampling(const AdaptiveSampling &adaptive_sampling);
/* Set the parameters for guiding. /* Set the parameters for guiding.
* Use to setup the guiding structures before each rendering iteration.*/ * Use to setup the guiding structures before each rendering iteration. */
void set_guiding_params(const GuidingParams &params, const bool reset); void set_guiding_params(const GuidingParams &params, const bool reset);
/* Sets output driver for render buffer output. */ /* Sets output driver for render buffer output. */
@ -294,7 +294,7 @@ class PathTrace {
* rendering iteration. */ * rendering iteration. */
unique_ptr<openpgl::cpp::SampleStorage> guiding_sample_data_storage_; unique_ptr<openpgl::cpp::SampleStorage> guiding_sample_data_storage_;
/* The number of already performed training iterations for the guiding field.*/ /* The number of already performed training iterations for the guiding field. */
int guiding_update_count = 0; int guiding_update_count = 0;
#endif #endif

View File

@ -202,7 +202,7 @@ ccl_device float2 direction_to_mirrorball(float3 dir)
} }
/* Single face of a equiangular cube map projection as described in /* Single face of a equiangular cube map projection as described in
https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/ */ * https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/ */
ccl_device float3 equiangular_cubemap_face_to_direction(float u, float v) ccl_device float3 equiangular_cubemap_face_to_direction(float u, float v)
{ {
u = (1.0f - u); u = (1.0f - u);

View File

@ -136,7 +136,7 @@ ccl_device_forceinline float3 microfacet_beckmann_sample_vndf(const float3 wi,
/* Find root in a monotonic interval using newton method, under given precision and maximal /* Find root in a monotonic interval using newton method, under given precision and maximal
* iterations. Falls back to bisection if newton step produces results outside of the valid * iterations. Falls back to bisection if newton step produces results outside of the valid
* interval.*/ * interval. */
const float precision = 1e-6f; const float precision = 1e-6f;
const int max_iter = 3; const int max_iter = 3;
int iter = 0; int iter = 0;

View File

@ -53,7 +53,7 @@ ccl_device_forceinline void guiding_record_surface_segment(KernelGlobals kg,
#endif #endif
} }
/* Records the surface scattering event at the current vertex position of the segment.*/ /* Records the surface scattering event at the current vertex position of the segment. */
ccl_device_forceinline void guiding_record_surface_bounce(KernelGlobals kg, ccl_device_forceinline void guiding_record_surface_bounce(KernelGlobals kg,
IntegratorState state, IntegratorState state,
ccl_private const ShaderData *sd, ccl_private const ShaderData *sd,
@ -134,7 +134,7 @@ ccl_device_forceinline void guiding_record_bssrdf_segment(KernelGlobals kg,
} }
/* Records the transmission of the path at the point of entry while passing /* Records the transmission of the path at the point of entry while passing
* the surface boundary.*/ * the surface boundary. */
ccl_device_forceinline void guiding_record_bssrdf_weight(KernelGlobals kg, ccl_device_forceinline void guiding_record_bssrdf_weight(KernelGlobals kg,
IntegratorState state, IntegratorState state,
const Spectrum weight, const Spectrum weight,
@ -161,7 +161,7 @@ ccl_device_forceinline void guiding_record_bssrdf_weight(KernelGlobals kg,
/* Records the direction at the point of entry the path takes when sampling the SSS contribution. /* Records the direction at the point of entry the path takes when sampling the SSS contribution.
* If not terminated this function is usually followed by a call of * If not terminated this function is usually followed by a call of
* guiding_record_volume_transmission to record the transmittance between the point of entry and * guiding_record_volume_transmission to record the transmittance between the point of entry and
* the point of exit.*/ * the point of exit. */
ccl_device_forceinline void guiding_record_bssrdf_bounce(KernelGlobals kg, ccl_device_forceinline void guiding_record_bssrdf_bounce(KernelGlobals kg,
IntegratorState state, IntegratorState state,
const float pdf, const float pdf,
@ -216,7 +216,7 @@ ccl_device_forceinline void guiding_record_volume_segment(KernelGlobals kg,
#endif #endif
} }
/* Records the volume scattering event at the current vertex position of the segment.*/ /* Records the volume scattering event at the current vertex position of the segment. */
ccl_device_forceinline void guiding_record_volume_bounce(KernelGlobals kg, ccl_device_forceinline void guiding_record_volume_bounce(KernelGlobals kg,
IntegratorState state, IntegratorState state,
ccl_private const ShaderData *sd, ccl_private const ShaderData *sd,
@ -247,7 +247,7 @@ ccl_device_forceinline void guiding_record_volume_bounce(KernelGlobals kg,
} }
/* Records the transmission (a.k.a. transmittance weight) between the current path segment /* Records the transmission (a.k.a. transmittance weight) between the current path segment
* and the next one, when the path is inside or passes a volume.*/ * and the next one, when the path is inside or passes a volume. */
ccl_device_forceinline void guiding_record_volume_transmission(KernelGlobals kg, ccl_device_forceinline void guiding_record_volume_transmission(KernelGlobals kg,
IntegratorState state, IntegratorState state,
const float3 transmittance_weight) const float3 transmittance_weight)
@ -330,7 +330,7 @@ ccl_device_forceinline void guiding_record_light_surface_segment(
/* Records/Adds a final path segment when the path leaves the scene and /* Records/Adds a final path segment when the path leaves the scene and
* intersects with a background light (e.g., background color, * intersects with a background light (e.g., background color,
* distant light, or env map). The vertex for this segment is placed along * distant light, or env map). The vertex for this segment is placed along
* the current ray far out the scene.*/ * the current ray far out the scene. */
ccl_device_forceinline void guiding_record_background(KernelGlobals kg, ccl_device_forceinline void guiding_record_background(KernelGlobals kg,
IntegratorState state, IntegratorState state,
const Spectrum L, const Spectrum L,
@ -359,7 +359,7 @@ ccl_device_forceinline void guiding_record_background(KernelGlobals kg,
/* Records the scattered contribution of a next event estimation /* Records the scattered contribution of a next event estimation
* (i.e., a direct light estimate scattered at the current path vertex * (i.e., a direct light estimate scattered at the current path vertex
* towards the previous vertex).*/ * towards the previous vertex). */
ccl_device_forceinline void guiding_record_direct_light(KernelGlobals kg, ccl_device_forceinline void guiding_record_direct_light(KernelGlobals kg,
IntegratorShadowState state) IntegratorShadowState state)
{ {
@ -397,7 +397,7 @@ ccl_device_forceinline void guiding_record_continuation_probability(
/* Path guiding debug render passes. */ /* Path guiding debug render passes. */
/* Write a set of path guiding related debug information (e.g., guiding probability at first /* Write a set of path guiding related debug information (e.g., guiding probability at first
* bounce) into separate rendering passes.*/ * bounce) into separate rendering passes. */
ccl_device_forceinline void guiding_write_debug_passes(KernelGlobals kg, ccl_device_forceinline void guiding_write_debug_passes(KernelGlobals kg,
IntegratorState state, IntegratorState state,
ccl_private const ShaderData *sd, ccl_private const ShaderData *sd,

View File

@ -1019,7 +1019,7 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
const float step_size = volume_stack_step_size(kg, volume_read_lambda_pass); const float step_size = volume_stack_step_size(kg, volume_read_lambda_pass);
# if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 1 # if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 1
/* The current path throughput which is used later to calculate per-segment throughput.*/ /* The current path throughput which is used later to calculate per-segment throughput. */
const float3 initial_throughput = INTEGRATOR_STATE(state, path, throughput); const float3 initial_throughput = INTEGRATOR_STATE(state, path, throughput);
/* The path throughput used to calculate the throughput for direct light. */ /* The path throughput used to calculate the throughput for direct light. */
float3 unlit_throughput = initial_throughput; float3 unlit_throughput = initial_throughput;
@ -1063,7 +1063,7 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
if (result.direct_sample_method == VOLUME_SAMPLE_DISTANCE) { if (result.direct_sample_method == VOLUME_SAMPLE_DISTANCE) {
/* If the direct scatter event is generated using VOLUME_SAMPLE_DISTANCE the direct event /* If the direct scatter event is generated using VOLUME_SAMPLE_DISTANCE the direct event
* will happen at the same position as the indirect event and the direct light contribution * will happen at the same position as the indirect event and the direct light contribution
* will contribute to the position of the next path segment.*/ * will contribute to the position of the next path segment. */
float3 transmittance_weight = spectrum_to_rgb( float3 transmittance_weight = spectrum_to_rgb(
safe_divide_color(result.indirect_throughput, initial_throughput)); safe_divide_color(result.indirect_throughput, initial_throughput));
guiding_record_volume_transmission(kg, state, transmittance_weight); guiding_record_volume_transmission(kg, state, transmittance_weight);
@ -1076,7 +1076,8 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg,
/* If the direct scatter event is generated using VOLUME_SAMPLE_EQUIANGULAR the direct /* If the direct scatter event is generated using VOLUME_SAMPLE_EQUIANGULAR the direct
* event will happen at a separate position as the indirect event and the direct light * event will happen at a separate position as the indirect event and the direct light
* contribution will contribute to the position of the current/previous path segment. The * contribution will contribute to the position of the current/previous path segment. The
* unlit_throughput has to be adjusted to include the scattering at the previous segment.*/ * unlit_throughput has to be adjusted to include the scattering at the previous segment.
*/
float3 scatterEval = one_float3(); float3 scatterEval = one_float3();
if (state->guiding.path_segment) { if (state->guiding.path_segment) {
pgl_vec3f scatteringWeight = state->guiding.path_segment->scatteringWeight; pgl_vec3f scatteringWeight = state->guiding.path_segment->scatteringWeight;

View File

@ -38,7 +38,7 @@ ccl_device_inline void surface_shader_prepare_guiding(KernelGlobals kg,
const float surface_guiding_probability = kernel_data.integrator.surface_guiding_probability; const float surface_guiding_probability = kernel_data.integrator.surface_guiding_probability;
float rand_bsdf_guiding = path_state_rng_1D(kg, rng_state, PRNG_SURFACE_BSDF_GUIDING); float rand_bsdf_guiding = path_state_rng_1D(kg, rng_state, PRNG_SURFACE_BSDF_GUIDING);
/* Compute proportion of diffuse BSDF and BSSRDFs .*/ /* Compute proportion of diffuse BSDF and BSSRDFs. */
float diffuse_sampling_fraction = 0.0f; float diffuse_sampling_fraction = 0.0f;
float bssrdf_sampling_fraction = 0.0f; float bssrdf_sampling_fraction = 0.0f;
float bsdf_bssrdf_sampling_sum = 0.0f; float bsdf_bssrdf_sampling_sum = 0.0f;

View File

@ -259,7 +259,7 @@ int LightTree::recursive_build(
bool should_split = false; bool should_split = false;
if (try_splitting) { if (try_splitting) {
/* Find the best place to split the primitives into 2 nodes. /* Find the best place to split the primitives into 2 nodes.
* If the best split cost is no better than making a leaf node, make a leaf instead.*/ * If the best split cost is no better than making a leaf node, make a leaf instead. */
float min_cost = min_split_saoh( float min_cost = min_split_saoh(
centroid_bounds, start, end, bbox, bcone, split_dim, split_bucket, num_left_prims, prims); centroid_bounds, start, end, bbox, bcone, split_dim, split_bucket, num_left_prims, prims);
should_split = num_prims > max_lights_in_leaf_ || min_cost < energy_total; should_split = num_prims > max_lights_in_leaf_ || min_cost < energy_total;

View File

@ -878,7 +878,7 @@ GHOST_TSuccess GHOST_ContextVK::initializeDrawingContext()
} }
extensions_device.push_back("VK_KHR_dedicated_allocation"); extensions_device.push_back("VK_KHR_dedicated_allocation");
extensions_device.push_back("VK_KHR_get_memory_requirements2"); extensions_device.push_back("VK_KHR_get_memory_requirements2");
/* Enable MoltenVK required instance extensions.*/ /* Enable MoltenVK required instance extensions. */
#ifdef VK_MVK_MOLTENVK_EXTENSION_NAME #ifdef VK_MVK_MOLTENVK_EXTENSION_NAME
requireExtension( requireExtension(
extensions_available, extensions_enabled, "VK_KHR_get_physical_device_properties2"); extensions_available, extensions_enabled, "VK_KHR_get_physical_device_properties2");

View File

@ -133,21 +133,26 @@ void BKE_curvemapping_table_RGBA(const struct CurveMapping *cumap, float **array
void BKE_curvemapping_get_range_minimums(const struct CurveMapping *curve_mapping, void BKE_curvemapping_get_range_minimums(const struct CurveMapping *curve_mapping,
float minimums[4]); float minimums[4]);
/** Get the reciprocal of the difference between the maximum and the minimum x value of each curve /**
* Get the reciprocal of the difference between the maximum and the minimum x value of each curve
* map table. Evaluation parameters can be multiplied by this value to be normalized. If the * map table. Evaluation parameters can be multiplied by this value to be normalized. If the
* difference is zero, 1^8 is returned. */ * difference is zero, 1^8 is returned.
*/
void BKE_curvemapping_compute_range_dividers(const struct CurveMapping *curve_mapping, void BKE_curvemapping_compute_range_dividers(const struct CurveMapping *curve_mapping,
float dividers[4]); float dividers[4]);
/** Compute the slopes at the start and end points of each curve map. The slopes are multiplied by /**
* Compute the slopes at the start and end points of each curve map. The slopes are multiplied by
* the range of the curve map to compensate for parameter normalization. If the slope is vertical, * the range of the curve map to compensate for parameter normalization. If the slope is vertical,
* 1^8 is returned. */ * 1^8 is returned.
*/
void BKE_curvemapping_compute_slopes(const struct CurveMapping *curve_mapping, void BKE_curvemapping_compute_slopes(const struct CurveMapping *curve_mapping,
float start_slopes[4], float start_slopes[4],
float end_slopes[4]); float end_slopes[4]);
/** Check if the curve map at the index is identity, that is, does nothing. A curve map is said to /**
* be identity if: * Check if the curve map at the index is identity, that is, does nothing.
* A curve map is said to be identity if:
* - The curve mapping uses extrapolation. * - The curve mapping uses extrapolation.
* - Its range is 1. * - Its range is 1.
* - The slope at its start point is 1. * - The slope at its start point is 1.

View File

@ -139,11 +139,15 @@ enum {
/** Do not process ID pointers inside embedded IDs. Needed by depsgraph processing e.g. */ /** Do not process ID pointers inside embedded IDs. Needed by depsgraph processing e.g. */
IDWALK_IGNORE_EMBEDDED_ID = (1 << 3), IDWALK_IGNORE_EMBEDDED_ID = (1 << 3),
/** Also process internal ID pointers like `ID.newid` or `ID.orig_id`. /**
* WARNING: Dangerous, use with caution. */ * Also process internal ID pointers like `ID.newid` or `ID.orig_id`.
* WARNING: Dangerous, use with caution.
*/
IDWALK_DO_INTERNAL_RUNTIME_POINTERS = (1 << 9), IDWALK_DO_INTERNAL_RUNTIME_POINTERS = (1 << 9),
/** Also process the ID.lib pointer. It is an option because this pointer can usually be fully /**
ignored. */ * Also process the ID.lib pointer. It is an option because this pointer can usually be fully
* ignored.
*/
IDWALK_DO_LIBRARY_POINTER = (1 << 10), IDWALK_DO_LIBRARY_POINTER = (1 << 10),
}; };

View File

@ -410,7 +410,7 @@ void BKE_curve_init(Curve *cu, const short curve_type)
} }
cu->bevel_profile = nullptr; cu->bevel_profile = nullptr;
/* Initialize the offset to 1.0, to compensate for it being set to -1.0 /* Initialize the offset to 1.0, to compensate for it being set to -1.0
in the property getter. */ * in the property getter. */
cu->offset = 1.0f; cu->offset = 1.0f;
} }

View File

@ -589,7 +589,7 @@ static bool get_fcurve_end_keyframes(const FCurve *fcu,
} }
/* The binary search returns an index where a keyframe would be inserted, /* The binary search returns an index where a keyframe would be inserted,
so it needs to be clamped to ensure it is in range of the array. */ * so it needs to be clamped to ensure it is in range of the array. */
first_index = clamp_i(first_index, 0, fcu->totvert - 1); first_index = clamp_i(first_index, 0, fcu->totvert - 1);
last_index = clamp_i(last_index - 1, 0, fcu->totvert - 1); last_index = clamp_i(last_index - 1, 0, fcu->totvert - 1);
} }

View File

@ -1275,9 +1275,8 @@ bGPDframe *BKE_gpencil_layer_frame_get(bGPDlayer *gpl, int cframe, eGP_GetFrame_
gpl->actframe = gpf; gpl->actframe = gpf;
} }
else if (addnew == GP_GETFRAME_ADD_COPY) { else if (addnew == GP_GETFRAME_ADD_COPY) {
/* The frame_addcopy function copies the active frame of gpl, /* The #BKE_gpencil_frame_addcopy function copies the active frame of gpl,
so we need to set the active frame before copying. * so we need to set the active frame before copying. */
*/
gpl->actframe = gpf; gpl->actframe = gpf;
gpl->actframe = BKE_gpencil_frame_addcopy(gpl, cframe); gpl->actframe = BKE_gpencil_frame_addcopy(gpl, cframe);
} }
@ -1306,9 +1305,8 @@ bGPDframe *BKE_gpencil_layer_frame_get(bGPDlayer *gpl, int cframe, eGP_GetFrame_
gpl->actframe = gpf; gpl->actframe = gpf;
} }
else if (addnew == GP_GETFRAME_ADD_COPY) { else if (addnew == GP_GETFRAME_ADD_COPY) {
/* The frame_addcopy function copies the active frame of gpl; /* The #BKE_gpencil_frame_addcopy function copies the active frame of gpl;
so we need to set the active frame before copying. * so we need to set the active frame before copying. */
*/
gpl->actframe = gpf; gpl->actframe = gpf;
gpl->actframe = BKE_gpencil_frame_addcopy(gpl, cframe); gpl->actframe = BKE_gpencil_frame_addcopy(gpl, cframe);
} }

View File

@ -2803,8 +2803,7 @@ static void sculpt_attribute_update_refs(Object *ob)
{ {
SculptSession *ss = ob->sculpt; SculptSession *ss = ob->sculpt;
/* run twice, in case sculpt_attr_update had to recreate a layer and /* Run twice, in case sculpt_attr_update had to recreate a layer and messed up #BMesh offsets. */
messed up the bmesh offsets. */
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
for (int j = 0; j < SCULPT_MAX_ATTRIBUTES; j++) { for (int j = 0; j < SCULPT_MAX_ATTRIBUTES; j++) {
SculptAttribute *attr = ss->temp_attributes + j; SculptAttribute *attr = ss->temp_attributes + j;

View File

@ -586,7 +586,7 @@ static const AVCodec *get_av1_encoder(
} }
else { else {
/* Is not a square num, set greater side based on longer side, or use a square if both /* Is not a square num, set greater side based on longer side, or use a square if both
sides are equal. */ * sides are equal. */
int sqrt_p2 = power_of_2_min_i(threads_sqrt); int sqrt_p2 = power_of_2_min_i(threads_sqrt);
if (sqrt_p2 < 2) { if (sqrt_p2 < 2) {
/* Ensure a default minimum. */ /* Ensure a default minimum. */

View File

@ -274,7 +274,7 @@ static BMOpDefine bmo_reverse_faces_def = {
* Flip Quad Tessellation * Flip Quad Tessellation
* *
* Flip the tessellation direction of the selected quads. * Flip the tessellation direction of the selected quads.
*/ */
static BMOpDefine bmo_flip_quad_tessellation_def = { static BMOpDefine bmo_flip_quad_tessellation_def = {
"flip_quad_tessellation", "flip_quad_tessellation",
/* slot_in */ /* slot_in */

View File

@ -30,7 +30,7 @@ namespace blender::compositor {
* - Distance between the center of the image and the pixel to be evaluated. * - Distance between the center of the image and the pixel to be evaluated.
* - Distance between the center of the image and the outer-edge. * - Distance between the center of the image and the outer-edge.
* - Distance between the center of the image and the inner-edge. * - Distance between the center of the image and the inner-edge.
*
* With a simple compare it can be detected if the evaluated pixel is between the outer and inner * With a simple compare it can be detected if the evaluated pixel is between the outer and inner
* edge. * edge.
*/ */

View File

@ -227,7 +227,7 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_effect_reflection_resolve)
.auto_resource_location(true) .auto_resource_location(true)
.do_static_compilation(true); .do_static_compilation(true);
/* Split reflection resolve support for Intel-based MacBooks.*/ /* Split reflection resolve support for Intel-based MacBooks. */
GPU_SHADER_CREATE_INFO(eevee_legacy_effect_reflection_resolve_probe) GPU_SHADER_CREATE_INFO(eevee_legacy_effect_reflection_resolve_probe)
.define("RESOLVE_PROBE") .define("RESOLVE_PROBE")
.additional_info("eevee_legacy_effect_reflection_resolve") .additional_info("eevee_legacy_effect_reflection_resolve")

View File

@ -798,7 +798,7 @@ static void gpencil_create_extensions_radius(tGPDfill *tgpf)
} }
/* Don't check endpoint distances unless the bounding boxes of the strokes /* Don't check endpoint distances unless the bounding boxes of the strokes
are close enough together that they can plausibly be connected. */ * are close enough together that they can plausibly be connected. */
if (!extended_bbox_overlap(gps->boundbox_min, if (!extended_bbox_overlap(gps->boundbox_min,
gps->boundbox_max, gps->boundbox_max,
gps2->boundbox_min, gps2->boundbox_min,

View File

@ -109,7 +109,7 @@ enum {
/* Display the hover region (edge or corner) of the underlying bounding box. */ /* Display the hover region (edge or corner) of the underlying bounding box. */
ED_GIZMO_CAGE2D_STYLE_BOX = 0, ED_GIZMO_CAGE2D_STYLE_BOX = 0,
/* Display the bounding box plus dots on four corners while hovering, usually used for /* Display the bounding box plus dots on four corners while hovering, usually used for
transforming a 2D shape. */ * transforming a 2D shape. */
ED_GIZMO_CAGE2D_STYLE_BOX_TRANSFORM, ED_GIZMO_CAGE2D_STYLE_BOX_TRANSFORM,
/* Display the bounding circle while hovering. */ /* Display the bounding circle while hovering. */
ED_GIZMO_CAGE2D_STYLE_CIRCLE, ED_GIZMO_CAGE2D_STYLE_CIRCLE,

View File

@ -1177,7 +1177,7 @@ void SCULPT_OT_mesh_filter(wmOperatorType *ot)
ot->ui = sculpt_mesh_ui_exec; ot->ui = sculpt_mesh_ui_exec;
/* Doesn't seem to actually be called? /* Doesn't seem to actually be called?
Check `sculpt_mesh_filter_modal` to see where it's really called. */ * Check `sculpt_mesh_filter_modal` to see where it's really called. */
ot->cancel = sculpt_mesh_filter_cancel; ot->cancel = sculpt_mesh_filter_cancel;
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO | OPTYPE_GRAB_CURSOR_X | OPTYPE_BLOCKING | ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO | OPTYPE_GRAB_CURSOR_X | OPTYPE_BLOCKING |

View File

@ -3169,13 +3169,12 @@ static void uv_map_mirror(BMFace *efa,
* than 0.5 units in the *u* coordinate. * than 0.5 units in the *u* coordinate.
* If we find such a face, we try and improve the unwrapping * If we find such a face, we try and improve the unwrapping
* by adding (1.0, 0.0) onto some of the face's UVs. * by adding (1.0, 0.0) onto some of the face's UVs.
*
* Note that this is only a heuristic. The property we're * Note that this is only a heuristic. The property we're
* attempting to maintain is that the winding of the face * attempting to maintain is that the winding of the face
* in UV space corresponds with the handedness of the face * in UV space corresponds with the handedness of the face
* in 3D space w.r.t to the unwrapping. Even for triangles, * in 3D space w.r.t to the unwrapping. Even for triangles,
* that property is somewhat complicated to evaluate. * that property is somewhat complicated to evaluate. */
*/
float right_u = -1.0e30f; float right_u = -1.0e30f;
BMLoop *l; BMLoop *l;

View File

@ -21,7 +21,7 @@ enum eUVPackIsland_MarginMethod {
struct UVPackIsland_Params { struct UVPackIsland_Params {
/** Islands can be rotated to improve packing. */ /** Islands can be rotated to improve packing. */
bool rotate; bool rotate;
/** (In UV Editor) only pack islands which have one or more selected UVs.*/ /** (In UV Editor) only pack islands which have one or more selected UVs. */
bool only_selected_uvs; bool only_selected_uvs;
/** (In 3D Viewport or UV Editor) only pack islands which have selected faces. */ /** (In 3D Viewport or UV Editor) only pack islands which have selected faces. */
bool only_selected_faces; bool only_selected_faces;

View File

@ -39,8 +39,8 @@ class UVAABBIsland {
* *
* In theory, alpaca_turbo should be the fastest non-trivial packer, hence the "turbo" suffix. * In theory, alpaca_turbo should be the fastest non-trivial packer, hence the "turbo" suffix.
* *
* Technically, the algorithm here is only O(n), In practice, to get reasonable results, the input * Technically, the algorithm here is only `O(n)`, In practice, to get reasonable results,
* must be pre-sorted, which costs an additional O(nlogn) time complexity. * the input must be pre-sorted, which costs an additional `O(nlogn)` time complexity.
*/ */
static void pack_islands_alpaca_turbo(const Span<UVAABBIsland *> islands, static void pack_islands_alpaca_turbo(const Span<UVAABBIsland *> islands,
float *r_max_u, float *r_max_u,
@ -100,21 +100,20 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
const float scale, const float scale,
const float margin) const float margin)
{ {
/* #BLI_box_pack_2d produces layouts with high packing efficiency, but has O(n^3) /* #BLI_box_pack_2d produces layouts with high packing efficiency, but has `O(n^3)`
* time complexity, causing poor performance if there are lots of islands. See: #102843. * time complexity, causing poor performance if there are lots of islands. See: #102843.
* #pack_islands_alpaca_turbo is designed to be the fastest packing method, O(nlogn), * #pack_islands_alpaca_turbo is designed to be the fastest packing method, `O(nlogn)`,
* but has poor packing efficiency if the AABBs have a spread of sizes and aspect ratios. * but has poor packing efficiency if the AABBs have a spread of sizes and aspect ratios.
* Here, we merge the best properties of both packers into one combined packer. * Here, we merge the best properties of both packers into one combined packer.
*
* The free tuning parameter, `alpaca_cutoff` will determine how many islands are packed * The free tuning parameter, `alpaca_cutoff` will determine how many islands are packed
* using each method. * using each method.
*
* The current strategy is: * The current strategy is:
* - Sort islands in size order. * - Sort islands in size order.
* - Call #BLI_box_pack_2d on the first `alpaca_cutoff` islands. * - Call #BLI_box_pack_2d on the first `alpaca_cutoff` islands.
* - Call #pack_islands_alpaca_turbo on the remaining islands. * - Call #pack_islands_alpaca_turbo on the remaining islands.
* - Combine results. * - Combine results.
*/ */
/* First, copy information from our input into the AABB structure. */ /* First, copy information from our input into the AABB structure. */

View File

@ -619,7 +619,7 @@ void MTLBatch::prepare_vertex_descriptor_and_bindings(
* Vertex Descriptors are required to generate a pipeline state, based on the current Batch's * Vertex Descriptors are required to generate a pipeline state, based on the current Batch's
* buffer bindings. These bindings are a unique matching, depending on what input attributes a * buffer bindings. These bindings are a unique matching, depending on what input attributes a
* batch has in its buffers, and those which are supported by the shader interface. * batch has in its buffers, and those which are supported by the shader interface.
*
* We iterate through the buffers and resolve which attributes satisfy the requirements of the * We iterate through the buffers and resolve which attributes satisfy the requirements of the
* currently bound shader. We cache this data, for a given Batch<->ShderInterface pairing in a * currently bound shader. We cache this data, for a given Batch<->ShderInterface pairing in a
* VAO cache to avoid the need to recalculate this data. */ * VAO cache to avoid the need to recalculate this data. */

View File

@ -27,7 +27,7 @@ namespace blender::gpu {
* uint32_t instanceCount; * uint32_t instanceCount;
* uint32_t vertexStart; * uint32_t vertexStart;
* uint32_t baseInstance; * uint32_t baseInstance;
};*/ * }; */
/* MTLDrawIndexedPrimitivesIndirectArguments -- /* MTLDrawIndexedPrimitivesIndirectArguments --
* https://developer.apple.com/documentation/metal/mtldrawindexedprimitivesindirectarguments?language=objc * https://developer.apple.com/documentation/metal/mtldrawindexedprimitivesindirectarguments?language=objc
@ -38,7 +38,7 @@ namespace blender::gpu {
* uint32_t indexStart; * uint32_t indexStart;
* uint32_t baseVertex; * uint32_t baseVertex;
* uint32_t baseInstance; * uint32_t baseInstance;
};*/ * }; */
#define MDI_ENABLED (buffer_size_ != 0) #define MDI_ENABLED (buffer_size_ != 0)
#define MDI_DISABLED (buffer_size_ == 0) #define MDI_DISABLED (buffer_size_ == 0)

View File

@ -23,7 +23,7 @@ void main()
vec2 circle_center = vec2(circle_radius_outer - text_width, 0.5); vec2 circle_center = vec2(circle_radius_outer - text_width, 0.5);
/* radius in icon space (1 is the icon width). */ /* Radius in icon space (1 is the icon width). */
float radius = length(mask_coord_interp - circle_center); float radius = length(mask_coord_interp - circle_center);
float mask = smoothstep(circle_radius_inner, circle_radius_outer, radius); float mask = smoothstep(circle_radius_inner, circle_radius_outer, radius);

View File

@ -55,7 +55,7 @@ GPU_SHADER_CREATE_INFO(gpu_compute_ssbo_binding_test)
.compute_source("gpu_compute_dummy_test.glsl") .compute_source("gpu_compute_dummy_test.glsl")
.do_static_compilation(true); .do_static_compilation(true);
/* Push constants*/ /* Push constants. */
GPU_SHADER_CREATE_INFO(gpu_push_constants_base_test) GPU_SHADER_CREATE_INFO(gpu_push_constants_base_test)
.local_group_size(1) .local_group_size(1)
.storage_buf(0, Qualifier::WRITE, "float", "data_out[]") .storage_buf(0, Qualifier::WRITE, "float", "data_out[]")

View File

@ -15,13 +15,13 @@ namespace blender::gpu {
* Information about alignment/components and memory size for types when using std140 layout. * Information about alignment/components and memory size for types when using std140 layout.
*/ */
struct Std140 { struct Std140 {
/** Get the memory size in bytes of a single component using by the given type.*/ /** Get the memory size in bytes of a single component using by the given type. */
static uint32_t component_mem_size(const shader::Type type); static uint32_t component_mem_size(const shader::Type type);
/** Get to alignment of the given type in bytes.*/ /** Get to alignment of the given type in bytes. */
static uint32_t element_alignment(const shader::Type type, bool is_array); static uint32_t element_alignment(const shader::Type type, bool is_array);
/** Get the number of components that should be allocated for the given type.*/ /** Get the number of components that should be allocated for the given type. */
static uint32_t element_components_len(const shader::Type type); static uint32_t element_components_len(const shader::Type type);
/** Get the number of components of the given type when used in an array.*/ /** Get the number of components of the given type when used in an array. */
static uint32_t array_components_len(const shader::Type type); static uint32_t array_components_len(const shader::Type type);
}; };
@ -29,13 +29,13 @@ struct Std140 {
* Information about alignment/components and memory size for types when using std430 layout. * Information about alignment/components and memory size for types when using std430 layout.
*/ */
struct Std430 { struct Std430 {
/** Get the memory size in bytes of a single component using by the given type.*/ /** Get the memory size in bytes of a single component using by the given type. */
static uint32_t component_mem_size(const shader::Type type); static uint32_t component_mem_size(const shader::Type type);
/** Get to alignment of the given type in bytes.*/ /** Get to alignment of the given type in bytes. */
static uint32_t element_alignment(const shader::Type type, bool is_array); static uint32_t element_alignment(const shader::Type type, bool is_array);
/** Get the number of components that should be allocated for the given type.*/ /** Get the number of components that should be allocated for the given type. */
static uint32_t element_components_len(const shader::Type type); static uint32_t element_components_len(const shader::Type type);
/** Get the number of components of the given type when used in an array.*/ /** Get the number of components of the given type when used in an array. */
static uint32_t array_components_len(const shader::Type type); static uint32_t array_components_len(const shader::Type type);
}; };

View File

@ -45,12 +45,12 @@ class VKContext;
*/ */
class VKPushConstants : NonCopyable { class VKPushConstants : NonCopyable {
public: public:
/** Different methods to store push constants.*/ /** Different methods to store push constants. */
enum class StorageType { enum class StorageType {
/** Push constants aren't in use.*/ /** Push constants aren't in use. */
NONE, NONE,
/** Store push constants as regular vulkan push constants.*/ /** Store push constants as regular vulkan push constants. */
PUSH_CONSTANTS, PUSH_CONSTANTS,
/** /**
@ -67,7 +67,7 @@ class VKPushConstants : NonCopyable {
static constexpr StorageType STORAGE_TYPE_FALLBACK = StorageType::UNIFORM_BUFFER; static constexpr StorageType STORAGE_TYPE_FALLBACK = StorageType::UNIFORM_BUFFER;
struct PushConstant { struct PushConstant {
/* Used as lookup based on ShaderInput.*/ /* Used as lookup based on ShaderInput. */
int32_t location; int32_t location;
/** Offset in the push constant data (in bytes). */ /** Offset in the push constant data (in bytes). */
@ -213,7 +213,7 @@ class VKPushConstants : NonCopyable {
} }
/* Store elements in uniform buffer as array. In Std140 arrays have an element stride of 16 /* Store elements in uniform buffer as array. In Std140 arrays have an element stride of 16
* bytes.*/ * bytes. */
BLI_assert(sizeof(T) == 4); BLI_assert(sizeof(T) == 4);
const T *src = input_data; const T *src = input_data;
for (const int i : IndexRange(array_size)) { for (const int i : IndexRange(array_size)) {

View File

@ -909,7 +909,7 @@ static void add_descriptor_set_layout_bindings(
r_bindings.append(create_descriptor_set_layout_binding(location, resource)); r_bindings.append(create_descriptor_set_layout_binding(location, resource));
} }
/* Add push constants to the descriptor when push constants are stored in an uniform buffer.*/ /* Add push constants to the descriptor when push constants are stored in an uniform buffer. */
const VKPushConstants::Layout &push_constants_layout = interface.push_constants_layout_get(); const VKPushConstants::Layout &push_constants_layout = interface.push_constants_layout_get();
if (push_constants_layout.storage_type_get() == VKPushConstants::StorageType::UNIFORM_BUFFER) { if (push_constants_layout.storage_type_get() == VKPushConstants::StorageType::UNIFORM_BUFFER) {
r_bindings.append(create_descriptor_set_layout_binding(push_constants_layout)); r_bindings.append(create_descriptor_set_layout_binding(push_constants_layout));

View File

@ -138,8 +138,8 @@ void VKShaderInterface::init(const shader::ShaderCreateInfo &info)
descriptor_set_location_update(input, descriptor_set_location++); descriptor_set_location_update(input, descriptor_set_location++);
} }
/* Post initializing push constants.*/ /* Post initializing push constants. */
/* Determine the binding location of push constants fallback buffer.*/ /* Determine the binding location of push constants fallback buffer. */
int32_t push_constant_descriptor_set_location = -1; int32_t push_constant_descriptor_set_location = -1;
if (push_constants_storage_type == VKPushConstants::StorageType::UNIFORM_BUFFER) { if (push_constants_storage_type == VKPushConstants::StorageType::UNIFORM_BUFFER) {
push_constant_descriptor_set_location = descriptor_set_location++; push_constant_descriptor_set_location = descriptor_set_location++;

View File

@ -41,7 +41,7 @@ class VKShaderInterface : public ShaderInterface {
const VKDescriptorSet::Location descriptor_set_location( const VKDescriptorSet::Location descriptor_set_location(
const shader::ShaderCreateInfo::Resource::BindType &bind_type, int binding) const; const shader::ShaderCreateInfo::Resource::BindType &bind_type, int binding) const;
/** Get the Layout of the shader.*/ /** Get the Layout of the shader. */
const VKPushConstants::Layout &push_constants_layout_get() const const VKPushConstants::Layout &push_constants_layout_get() const
{ {
return push_constants_layout_; return push_constants_layout_;

View File

@ -11,9 +11,9 @@ namespace blender::io::usd {
void ensure_usd_plugin_path_registered() void ensure_usd_plugin_path_registered()
{ {
/* if PXR_PYTHON_SUPPORT_ENABLED is defined, we *must* be dynamic and /* If #PXR_PYTHON_SUPPORT_ENABLED is defined, we *must* be dynamic and
the plugins are placed relative to the USD shared library hence no * the plugins are placed relative to the USD shared library hence no
hinting is required. */ * hinting is required. */
#ifndef PXR_PYTHON_SUPPORT_ENABLED #ifndef PXR_PYTHON_SUPPORT_ENABLED
static bool plugin_path_registered = false; static bool plugin_path_registered = false;
if (plugin_path_registered) { if (plugin_path_registered) {

View File

@ -36,9 +36,9 @@ std::string register_usd_plugins_for_tests()
BLI_assert(path_len + 1 < FILE_MAX); BLI_assert(path_len + 1 < FILE_MAX);
usd_datafiles_dir[path_len] = '/'; usd_datafiles_dir[path_len] = '/';
usd_datafiles_dir[path_len + 1] = '\0'; usd_datafiles_dir[path_len + 1] = '\0';
/* if PXR_PYTHON_SUPPORT_ENABLED is defined, we *must* be dynamic and /* If #PXR_PYTHON_SUPPORT_ENABLED is defined, we *must* be dynamic and
the plugins are placed relative to the USD shared library hence no * the plugins are placed relative to the USD shared library hence no
hinting is required. */ * hinting is required. */
#ifndef PXR_PYTHON_SUPPORT_ENABLED #ifndef PXR_PYTHON_SUPPORT_ENABLED
pxr::PlugRegistry::GetInstance().RegisterPlugins(usd_datafiles_dir); pxr::PlugRegistry::GetInstance().RegisterPlugins(usd_datafiles_dir);
#endif #endif

View File

@ -2390,7 +2390,7 @@ void RNA_property_boolean_set_array(PointerRNA *ptr, PropertyRNA *prop, const bo
} }
else if (idprop->subtype == IDP_INT) { else if (idprop->subtype == IDP_INT) {
/* Support writing to integer and boolean IDProperties, since boolean /* Support writing to integer and boolean IDProperties, since boolean
RNA properties used to be stored with integer IDProperties. */ * RNA properties used to be stored with integer IDProperties. */
int *values_dst = static_cast<int *>(IDP_Array(idprop)); int *values_dst = static_cast<int *>(IDP_Array(idprop));
for (uint i = 0; i < idprop->len; i++) { for (uint i = 0; i < idprop->len; i++) {
values_dst[i] = int(values[i]); values_dst[i] = int(values[i]);

View File

@ -1088,7 +1088,7 @@ static void extrude_individual_mesh_faces(Mesh &mesh,
/* For every selected polygon, change it to use the new extruded vertices and the duplicate /* For every selected polygon, change it to use the new extruded vertices and the duplicate
* edges, and build the faces that form the sides of the extrusion. Build "original index" * edges, and build the faces that form the sides of the extrusion. Build "original index"
* arrays for the new vertices and edges so they can be accessed later. * arrays for the new vertices and edges so they can be accessed later.
*
* Filling some of this data like the new edges or polygons could be easily split into * Filling some of this data like the new edges or polygons could be easily split into
* separate loops, which may or may not be faster, but would involve more duplication. */ * separate loops, which may or may not be faster, but would involve more duplication. */
Array<int> new_vert_indices(extrude_corner_size); Array<int> new_vert_indices(extrude_corner_size);