Mesh: Reduce custom normal calculation memory usage #107592

Merged
Hans Goudey merged 32 commits from HooglyBoogly/blender:corner-normals-refactor-storage into main 2023-05-10 14:41:16 +02:00
1433 changed files with 13471 additions and 8340 deletions
Showing only changes of commit 494f881529 - Show all commits

View File

@ -62,7 +62,7 @@ ContinuationIndentWidth: 4
BreakBeforeBraces: Custom
BraceWrapping: {
AfterClass: 'false',
AfterControlStatement: 'false',
AfterControlStatement: 'MultiLine',
AfterEnum : 'false',
AfterFunction : 'true',
AfterNamespace : 'false',

View File

@ -156,7 +156,8 @@ int CLG_color_support_get(CLG_LogRef *clg_ref);
{ \
CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \
if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \
(severity >= CLG_SEVERITY_WARN)) { \
(severity >= CLG_SEVERITY_WARN)) \
{ \
CLG_logf(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, __VA_ARGS__); \
} \
} \
@ -166,7 +167,8 @@ int CLG_color_support_get(CLG_LogRef *clg_ref);
{ \
CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \
if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \
(severity >= CLG_SEVERITY_WARN)) { \
(severity >= CLG_SEVERITY_WARN)) \
{ \
CLG_log_str(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, str); \
} \
} \
@ -176,7 +178,8 @@ int CLG_color_support_get(CLG_LogRef *clg_ref);
{ \
CLG_LogType *_lg_ty = CLOG_ENSURE(clg_ref); \
if (((_lg_ty->flag & CLG_FLAG_USE) && (_lg_ty->level >= verbose_level)) || \
(severity >= CLG_SEVERITY_WARN)) { \
(severity >= CLG_SEVERITY_WARN)) \
{ \
const char *_str = str; \
CLG_log_str(_lg_ty, severity, __FILE__ ":" STRINGIFY(__LINE__), __func__, _str); \
MEM_freeN((void *)_str); \

View File

@ -316,7 +316,8 @@ static bool clg_ctx_filter_check(CLogContext *ctx, const char *identifier)
}
else if ((len >= 2) && (STREQLEN(".*", &flt->match[len - 2], 2))) {
if (((identifier_len == len - 2) && STREQLEN(identifier, flt->match, len - 2)) ||
((identifier_len >= len - 1) && STREQLEN(identifier, flt->match, len - 1))) {
((identifier_len >= len - 1) && STREQLEN(identifier, flt->match, len - 1)))
{
return (bool)i;
}
}

View File

@ -511,7 +511,8 @@ static void options_parse(int argc, const char **argv)
exit(EXIT_FAILURE);
}
else if (options.scene_params.shadingsystem == SHADINGSYSTEM_OSL &&
options.session_params.device.type != DEVICE_CPU) {
options.session_params.device.type != DEVICE_CPU)
{
fprintf(stderr, "OSL shading system only works with CPU device\n");
exit(EXIT_FAILURE);
}

View File

@ -59,7 +59,8 @@ void OIIOOutputDriver::write_render_tile(const Tile &tile)
/* Apply gamma correction for (some) non-linear file formats.
* TODO: use OpenColorIO view transform if available. */
if (ColorSpaceManager::detect_known_colorspace(
u_colorspace_auto, "", image_output->format_name(), true) == u_colorspace_srgb) {
u_colorspace_auto, "", image_output->format_name(), true) == u_colorspace_srgb)
{
const float g = 1.0f / 2.2f;
ImageBufAlgo::pow(image_buffer, image_buffer, {g, g, g, 1.0f});
}

View File

@ -314,7 +314,8 @@ void window_main_loop(const char *title,
}
else if (event.type == SDL_WINDOWEVENT) {
if (event.window.event == SDL_WINDOWEVENT_RESIZED ||
event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED) {
event.window.event == SDL_WINDOWEVENT_SIZE_CHANGED)
{
window_reshape(event.window.data1, event.window.data2);
}
}

View File

@ -432,7 +432,8 @@ static void blender_camera_sync(Camera *cam,
/* panorama sensor */
if (bcam->type == CAMERA_PANORAMA && (bcam->panorama_type == PANORAMA_FISHEYE_EQUISOLID ||
bcam->panorama_type == PANORAMA_FISHEYE_LENS_POLYNOMIAL)) {
bcam->panorama_type == PANORAMA_FISHEYE_LENS_POLYNOMIAL))
{
float fit_xratio = (float)bcam->render_width * bcam->pixelaspect.x;
float fit_yratio = (float)bcam->render_height * bcam->pixelaspect.y;
bool horizontal_fit;

View File

@ -55,13 +55,15 @@ static bool ObtainCacheParticleData(
for (BL::Modifier &b_mod : b_ob->modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
(background ? b_mod.show_render() : b_mod.show_viewport())) {
(background ? b_mod.show_render() : b_mod.show_viewport()))
{
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) &&
(b_part.type() == BL::ParticleSettings::type_HAIR)) {
(b_part.type() == BL::ParticleSettings::type_HAIR))
{
int shader = clamp(b_part.material() - 1, 0, hair->get_used_shaders().size() - 1);
int display_step = background ? b_part.render_step() : b_part.display_step();
int totparts = b_psys.particles.length();
@ -150,13 +152,15 @@ static bool ObtainCacheParticleUV(Hair *hair,
for (BL::Modifier &b_mod : b_ob->modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
(background ? b_mod.show_render() : b_mod.show_viewport())) {
(background ? b_mod.show_render() : b_mod.show_viewport()))
{
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) &&
(b_part.type() == BL::ParticleSettings::type_HAIR)) {
(b_part.type() == BL::ParticleSettings::type_HAIR))
{
int totparts = b_psys.particles.length();
int totchild = background ? b_psys.child_particles.length() :
(int)((float)b_psys.child_particles.length() *
@ -212,13 +216,15 @@ static bool ObtainCacheParticleVcol(Hair *hair,
for (BL::Modifier &b_mod : b_ob->modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
(background ? b_mod.show_render() : b_mod.show_viewport())) {
(background ? b_mod.show_render() : b_mod.show_viewport()))
{
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) &&
(b_part.type() == BL::ParticleSettings::type_HAIR)) {
(b_part.type() == BL::ParticleSettings::type_HAIR))
{
int totparts = b_psys.particles.length();
int totchild = background ? b_psys.child_particles.length() :
(int)((float)b_psys.child_particles.length() *
@ -283,7 +289,8 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) {
for (int curve = CData->psys_firstcurve[sys];
curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys];
curve++) {
curve++)
{
num_keys += CData->curve_keynum[curve];
num_curves++;
}
@ -298,12 +305,14 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) {
for (int curve = CData->psys_firstcurve[sys];
curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys];
curve++) {
curve++)
{
size_t num_curve_keys = 0;
for (int curvekey = CData->curve_firstkey[curve];
curvekey < CData->curve_firstkey[curve] + CData->curve_keynum[curve];
curvekey++) {
curvekey++)
{
const float3 ickey_loc = CData->curvekey_co[curvekey];
const float curve_time = CData->curvekey_time[curvekey];
const float curve_length = CData->curve_length[curve];
@ -311,7 +320,8 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
float radius = shaperadius(
CData->psys_shape[sys], CData->psys_rootradius[sys], CData->psys_tipradius[sys], time);
if (CData->psys_closetip[sys] &&
(curvekey == CData->curve_firstkey[curve] + CData->curve_keynum[curve] - 1)) {
(curvekey == CData->curve_firstkey[curve] + CData->curve_keynum[curve] - 1))
{
radius = 0.0f;
}
hair->add_curve_key(ickey_loc, radius);
@ -433,7 +443,8 @@ static void ExportCurveSegmentsMotion(Hair *hair, ParticleCurveData *CData, int
for (int sys = 0; sys < CData->psys_firstcurve.size(); sys++) {
for (int curve = CData->psys_firstcurve[sys];
curve < CData->psys_firstcurve[sys] + CData->psys_curvenum[sys];
curve++) {
curve++)
{
/* Curve lengths may not match! Curves can be clipped. */
int curve_key_end = (num_curves + 1 < (int)hair->get_curve_first_key().size() ?
hair->get_curve_first_key()[num_curves + 1] :
@ -444,7 +455,8 @@ static void ExportCurveSegmentsMotion(Hair *hair, ParticleCurveData *CData, int
if (!is_num_keys_different) {
for (int curvekey = CData->curve_firstkey[curve];
curvekey < CData->curve_firstkey[curve] + CData->curve_keynum[curve];
curvekey++) {
curvekey++)
{
if (i < hair->get_curve_keys().size()) {
mP[i] = CurveSegmentMotionCV(CData, sys, curve, curvekey);
if (!have_motion) {
@ -489,13 +501,15 @@ bool BlenderSync::object_has_particle_hair(BL::Object b_ob)
/* Test if the object has a particle modifier with hair. */
for (BL::Modifier &b_mod : b_ob.modifiers) {
if ((b_mod.type() == b_mod.type_PARTICLE_SYSTEM) &&
(preview ? b_mod.show_viewport() : b_mod.show_render())) {
(preview ? b_mod.show_viewport() : b_mod.show_render()))
{
BL::ParticleSystemModifier psmd((const PointerRNA)b_mod.ptr);
BL::ParticleSystem b_psys((const PointerRNA)psmd.particle_system().ptr);
BL::ParticleSettings b_part((const PointerRNA)b_psys.settings().ptr);
if ((b_part.render_type() == BL::ParticleSettings::render_type_PATH) &&
(b_part.type() == BL::ParticleSettings::type_HAIR)) {
(b_part.type() == BL::ParticleSettings::type_HAIR))
{
return true;
}
}
@ -677,7 +691,8 @@ static void fill_generic_attribute(const int num_curves,
static void attr_create_motion(Hair *hair, BL::Attribute &b_attribute, const float motion_scale)
{
if (!(b_attribute.domain() == BL::Attribute::domain_POINT) &&
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR)) {
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR))
{
return;
}
@ -748,7 +763,8 @@ static void attr_create_generic(Scene *scene,
/* Weak, use first float2 attribute as standard UV. */
if (need_uv && !have_uv && b_data_type == BL::Attribute::data_type_FLOAT2 &&
b_domain == BL::Attribute::domain_CURVE) {
b_domain == BL::Attribute::domain_CURVE)
{
attr_create_uv(attributes, num_curves, num_keys, b_attribute, name);
have_uv = true;
continue;
@ -1100,7 +1116,8 @@ void BlenderSync::sync_hair(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, H
for (const SocketType &socket : new_hair.type->inputs) {
/* Those sockets are updated in sync_object, so do not modify them. */
if (socket.name == "use_motion_blur" || socket.name == "motion_steps" ||
socket.name == "used_shaders") {
socket.name == "used_shaders")
{
continue;
}
hair->set_value(socket, new_hair, socket);

View File

@ -115,7 +115,8 @@ DeviceInfo blender_device_info(BL::Preferences &b_preferences,
bool accumulated_use_hardware_raytracing = false;
foreach (
DeviceInfo &info,
(device.multi_devices.size() != 0 ? device.multi_devices : vector<DeviceInfo>({device}))) {
(device.multi_devices.size() != 0 ? device.multi_devices : vector<DeviceInfo>({device})))
{
if (info.type == DEVICE_METAL && !get_boolean(cpreferences, "use_metalrt")) {
info.use_hardware_raytracing = false;
}

View File

@ -312,7 +312,8 @@ class DisplayGPUPixelBuffer {
/* Try to re-use the existing PBO if it has usable size. */
if (gpu_pixel_buffer) {
if (new_width != width || new_height != height ||
GPU_pixel_buffer_size(gpu_pixel_buffer) < required_size) {
GPU_pixel_buffer_size(gpu_pixel_buffer) < required_size)
{
gpu_resources_destroy();
}
}
@ -513,7 +514,8 @@ bool BlenderDisplayDriver::update_begin(const Params &params,
const int buffer_height = params.size.y;
if (!current_tile_buffer_object.gpu_resources_ensure(buffer_width, buffer_height) ||
!current_tile.texture.gpu_resources_ensure(texture_width, texture_height)) {
!current_tile.texture.gpu_resources_ensure(texture_width, texture_height))
{
tiles_->current_tile.gpu_resources_destroy();
gpu_context_disable();
return false;
@ -563,7 +565,8 @@ void BlenderDisplayDriver::update_end()
* renders while Blender is drawing. As a workaround update texture during draw, under assumption
* that there is no graphics interop on macOS and viewport render has a single tile. */
if (!background_ &&
GPU_type_matches_ex(GPU_DEVICE_NVIDIA, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_ANY)) {
GPU_type_matches_ex(GPU_DEVICE_NVIDIA, GPU_OS_MAC, GPU_DRIVER_ANY, GPU_BACKEND_ANY))
{
tiles_->current_tile.need_update_texture_pixels = true;
}
else {
@ -708,7 +711,8 @@ static void draw_tile(const float2 &zoom,
GPU_texture_bind_ex(texture.gpu_texture, GPUSamplerState::default_sampler(), 0);
}
else if (zoomed_width - draw_tile.params.size.x > 0.5f ||
zoomed_height - draw_tile.params.size.y > 0.5f) {
zoomed_height - draw_tile.params.size.y > 0.5f)
{
GPU_texture_bind_ex(texture.gpu_texture, GPUSamplerState::default_sampler(), 0);
}
else {

View File

@ -28,7 +28,8 @@ static Geometry::Type determine_geom_type(BObjectInfo &b_ob_info, bool use_parti
if (b_ob_info.object_data.is_a(&RNA_Volume) ||
(b_ob_info.object_data == b_ob_info.real_object.data() &&
object_fluid_gas_domain_find(b_ob_info.real_object))) {
object_fluid_gas_domain_find(b_ob_info.real_object)))
{
return Geometry::VOLUME;
}
@ -192,7 +193,8 @@ void BlenderSync::sync_geometry_motion(BL::Depsgraph &b_depsgraph,
Geometry *geom = object->get_geometry();
if (geometry_motion_synced.find(geom) != geometry_motion_synced.end() ||
geometry_motion_attribute_synced.find(geom) != geometry_motion_attribute_synced.end()) {
geometry_motion_attribute_synced.find(geom) != geometry_motion_attribute_synced.end())
{
return;
}
@ -218,7 +220,8 @@ void BlenderSync::sync_geometry_motion(BL::Depsgraph &b_depsgraph,
sync_hair_motion(b_depsgraph, b_ob_info, hair, motion_step);
}
else if (b_ob_info.object_data.is_a(&RNA_Volume) ||
object_fluid_gas_domain_find(b_ob_info.real_object)) {
object_fluid_gas_domain_find(b_ob_info.real_object))
{
/* No volume motion blur support yet. */
}
else if (b_ob_info.object_data.is_a(&RNA_PointCloud)) {

View File

@ -169,7 +169,8 @@ void BlenderSync::sync_background_light(BL::SpaceView3D &b_v3d, bool use_portal)
ObjectKey key(b_world, 0, b_world, false);
if (light_map.add_or_update(&light, b_world, b_world, key) || world_recalc ||
b_world.ptr.data != world_map) {
b_world.ptr.data != world_map)
{
light->set_light_type(LIGHT_BACKGROUND);
if (sampling_method == SAMPLING_MANUAL) {
light->set_map_resolution(get_int(cworld, "sample_map_resolution"));

View File

@ -337,7 +337,8 @@ static void fill_generic_attribute(BL::Mesh &b_mesh,
static void attr_create_motion(Mesh *mesh, BL::Attribute &b_attribute, const float motion_scale)
{
if (!(b_attribute.domain() == BL::Attribute::domain_POINT) &&
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR)) {
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR))
{
return;
}
@ -384,7 +385,8 @@ static void attr_create_generic(Scene *scene,
}
if (!(mesh->need_attribute(scene, name) ||
(is_render_color && mesh->need_attribute(scene, ATTR_STD_VERTEX_COLOR)))) {
(is_render_color && mesh->need_attribute(scene, ATTR_STD_VERTEX_COLOR))))
{
continue;
}
if (attributes.find(name)) {
@ -741,13 +743,15 @@ static void attr_create_pointiness(Scene *scene, Mesh *mesh, BL::Mesh &b_mesh, b
const float3 &vert_co = mesh->get_verts()[vert_index];
bool found = false;
for (int other_sorted_vert_index = sorted_vert_index + 1; other_sorted_vert_index < num_verts;
++other_sorted_vert_index) {
++other_sorted_vert_index)
{
const int other_vert_index = sorted_vert_indeices[other_sorted_vert_index];
const float3 &other_vert_co = mesh->get_verts()[other_vert_index];
/* We are too far away now, we wouldn't have duplicate. */
if ((other_vert_co.x + other_vert_co.y + other_vert_co.z) -
(vert_co.x + vert_co.y + vert_co.z) >
3 * FLT_EPSILON) {
3 * FLT_EPSILON)
{
break;
}
/* Found duplicate. */
@ -1325,7 +1329,8 @@ void BlenderSync::sync_mesh(BL::Depsgraph b_depsgraph, BObjectInfo &b_ob_info, M
for (const SocketType &socket : new_mesh.type->inputs) {
/* Those sockets are updated in sync_object, so do not modify them. */
if (socket.name == "use_motion_blur" || socket.name == "motion_steps" ||
socket.name == "used_shaders") {
socket.name == "used_shaders")
{
continue;
}
mesh->set_value(socket, new_mesh, socket);

View File

@ -63,7 +63,8 @@ bool BlenderSync::object_is_geometry(BObjectInfo &b_ob_info)
BL::Object::type_enum type = b_ob_info.iter_object.type();
if (type == BL::Object::type_VOLUME || type == BL::Object::type_CURVES ||
type == BL::Object::type_POINTCLOUD) {
type == BL::Object::type_POINTCLOUD)
{
/* Will be exported attached to mesh. */
return true;
}
@ -325,7 +326,8 @@ Object *BlenderSync::sync_object(BL::Depsgraph &b_depsgraph,
* transform comparison should not be needed, but duplis don't work perfect
* in the depsgraph and may not signal changes, so this is a workaround */
if (object->is_modified() || object_updated ||
(object->get_geometry() && object->get_geometry()->is_modified())) {
(object->get_geometry() && object->get_geometry()->is_modified()))
{
object->name = b_ob.name().c_str();
object->set_pass_id(b_ob.pass_index());
const BL::Array<float, 4> object_color = b_ob.color();
@ -408,7 +410,8 @@ bool BlenderSync::sync_object_attributes(BL::DepsgraphObjectInstance &b_instance
BlenderAttributeType type = blender_attribute_name_split_type(name, &real_name);
if (type == BL::ShaderNodeAttribute::attribute_type_OBJECT ||
type == BL::ShaderNodeAttribute::attribute_type_INSTANCER) {
type == BL::ShaderNodeAttribute::attribute_type_INSTANCER)
{
bool use_instancer = (type == BL::ShaderNodeAttribute::attribute_type_INSTANCER);
float4 value = lookup_instance_property(b_instance, real_name, use_instancer);
@ -556,7 +559,8 @@ void BlenderSync::sync_objects(BL::Depsgraph &b_depsgraph,
for (b_depsgraph.object_instances.begin(b_instance_iter);
b_instance_iter != b_depsgraph.object_instances.end() && !cancel;
++b_instance_iter) {
++b_instance_iter)
{
BL::DepsgraphObjectInstance b_instance = *b_instance_iter;
BL::Object b_ob = b_instance.object();
@ -667,7 +671,8 @@ void BlenderSync::sync_motion(BL::RenderSettings &b_render,
float frame_center_delta = 0.0f;
if (scene->need_motion() != Scene::MOTION_PASS &&
scene->camera->get_motion_position() != MOTION_POSITION_CENTER) {
scene->camera->get_motion_position() != MOTION_POSITION_CENTER)
{
float shuttertime = scene->camera->get_shuttertime();
if (scene->camera->get_motion_position() == MOTION_POSITION_END) {
frame_center_delta = -shuttertime * 0.5f;

View File

@ -21,7 +21,8 @@ static void attr_create_motion(PointCloud *pointcloud,
const float motion_scale)
{
if (!(b_attribute.domain() == BL::Attribute::domain_POINT) &&
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR)) {
(b_attribute.data_type() == BL::Attribute::data_type_FLOAT_VECTOR))
{
return;
}
@ -313,7 +314,8 @@ void BlenderSync::sync_pointcloud(PointCloud *pointcloud, BObjectInfo &b_ob_info
for (const SocketType &socket : new_pointcloud.type->inputs) {
/* Those sockets are updated in sync_object, so do not modify them. */
if (socket.name == "use_motion_blur" || socket.name == "motion_steps" ||
socket.name == "used_shaders") {
socket.name == "used_shaders")
{
continue;
}
pointcloud->set_value(socket, new_pointcloud, socket);

View File

@ -163,7 +163,8 @@ static PyObject *create_func(PyObject * /*self*/, PyObject *args)
&pyregion,
&pyv3d,
&pyrv3d,
&preview_osl)) {
&preview_osl))
{
return NULL;
}
@ -522,7 +523,8 @@ static PyObject *osl_update_node_func(PyObject * /*self*/, PyObject *args)
}
else if (param->type.vecsemantics == TypeDesc::POINT ||
param->type.vecsemantics == TypeDesc::VECTOR ||
param->type.vecsemantics == TypeDesc::NORMAL) {
param->type.vecsemantics == TypeDesc::NORMAL)
{
socket_type = "NodeSocketVector";
data_type = BL::NodeSocket::type_VECTOR;
@ -738,7 +740,8 @@ static PyObject *denoise_func(PyObject * /*self*/, PyObject *args, PyObject *key
&pyscene,
&pyviewlayer,
&pyinput,
&pyoutput)) {
&pyoutput))
{
return NULL;
}

View File

@ -202,7 +202,8 @@ void BlenderSession::reset_session(BL::BlendData &b_data, BL::Depsgraph &b_depsg
b_scene, background, use_developer_ui);
if (scene->params.modified(scene_params) || session->params.modified(session_params) ||
!this->b_render.use_persistent_data()) {
!this->b_render.use_persistent_data())
{
/* if scene or session parameters changed, it's easier to simply re-create
* them rather than trying to distinguish which settings need to be updated
*/
@ -376,8 +377,8 @@ void BlenderSession::render(BL::Depsgraph &b_depsgraph_)
}
int view_index = 0;
for (b_rr.views.begin(b_view_iter); b_view_iter != b_rr.views.end();
++b_view_iter, ++view_index) {
for (b_rr.views.begin(b_view_iter); b_view_iter != b_rr.views.end(); ++b_view_iter, ++view_index)
{
b_rview_name = b_view_iter->name();
buffer_params.layer = b_view_layer.name();
@ -562,7 +563,8 @@ static bool bake_setup_pass(Scene *scene, const string &bake_type_str, const int
/* Light component passes. */
else if (strcmp(bake_type, "DIFFUSE") == 0) {
if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) &&
bake_filter & BL::BakeSettings::pass_filter_INDIRECT) {
bake_filter & BL::BakeSettings::pass_filter_INDIRECT)
{
type = PASS_DIFFUSE;
use_direct_light = true;
use_indirect_light = true;
@ -583,7 +585,8 @@ static bool bake_setup_pass(Scene *scene, const string &bake_type_str, const int
}
else if (strcmp(bake_type, "GLOSSY") == 0) {
if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) &&
bake_filter & BL::BakeSettings::pass_filter_INDIRECT) {
bake_filter & BL::BakeSettings::pass_filter_INDIRECT)
{
type = PASS_GLOSSY;
use_direct_light = true;
use_indirect_light = true;
@ -604,7 +607,8 @@ static bool bake_setup_pass(Scene *scene, const string &bake_type_str, const int
}
else if (strcmp(bake_type, "TRANSMISSION") == 0) {
if ((bake_filter & BL::BakeSettings::pass_filter_DIRECT) &&
bake_filter & BL::BakeSettings::pass_filter_INDIRECT) {
bake_filter & BL::BakeSettings::pass_filter_INDIRECT)
{
type = PASS_TRANSMISSION;
use_direct_light = true;
use_indirect_light = true;

View File

@ -1246,7 +1246,8 @@ static void add_nodes(Scene *scene,
}
}
else if (b_node.is_a(&RNA_ShaderNodeGroup) || b_node.is_a(&RNA_NodeCustomGroup) ||
b_node.is_a(&RNA_ShaderNodeCustomGroup)) {
b_node.is_a(&RNA_ShaderNodeCustomGroup))
{
BL::ShaderNodeTree b_group_ntree(PointerRNA_NULL);
if (b_node.is_a(&RNA_ShaderNodeGroup))
@ -1382,7 +1383,8 @@ static void add_nodes(Scene *scene,
/* Ignore invalid links to avoid unwanted cycles created in graph.
* Also ignore links with unavailable sockets. */
if (!(b_link.is_valid() && b_link.from_socket().enabled() && b_link.to_socket().enabled()) ||
b_link.is_muted()) {
b_link.is_muted())
{
continue;
}
/* get blender link data */
@ -1531,7 +1533,8 @@ void BlenderSync::sync_materials(BL::Depsgraph &b_depsgraph, bool update_all)
/* test if we need to sync */
if (shader_map.add_or_update(&shader, b_mat) || update_all ||
scene_attr_needs_recalc(shader, b_depsgraph)) {
scene_attr_needs_recalc(shader, b_depsgraph))
{
ShaderGraph *graph = new ShaderGraph();
shader->name = b_mat.name().c_str();
@ -1614,12 +1617,14 @@ void BlenderSync::sync_world(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d,
if (world_recalc || update_all || b_world.ptr.data != world_map ||
viewport_parameters.shader_modified(new_viewport_parameters) ||
scene_attr_needs_recalc(shader, b_depsgraph)) {
scene_attr_needs_recalc(shader, b_depsgraph))
{
ShaderGraph *graph = new ShaderGraph();
/* create nodes */
if (new_viewport_parameters.use_scene_world && b_world && b_world.use_nodes() &&
b_world.node_tree()) {
b_world.node_tree())
{
BL::ShaderNodeTree b_ntree(b_world.node_tree());
add_nodes(scene, b_engine, b_data, b_depsgraph, b_scene, graph, b_ntree);
@ -1781,7 +1786,8 @@ void BlenderSync::sync_lights(BL::Depsgraph &b_depsgraph, bool update_all)
/* test if we need to sync */
if (shader_map.add_or_update(&shader, b_light) || update_all ||
scene_attr_needs_recalc(shader, b_depsgraph)) {
scene_attr_needs_recalc(shader, b_depsgraph))
{
ShaderGraph *graph = new ShaderGraph();
/* create nodes */

View File

@ -169,7 +169,8 @@ void BlenderSync::sync_recalc(BL::Depsgraph &b_depsgraph, BL::SpaceView3D &b_v3d
}
if (updated_geometry ||
(object_subdivision_type(b_ob, preview, experimental) != Mesh::SUBDIVISION_NONE)) {
(object_subdivision_type(b_ob, preview, experimental) != Mesh::SUBDIVISION_NONE))
{
BL::ID key = BKE_object_is_modified(b_ob) ? b_ob : b_ob.data();
geometry_map.set_recalc(key);
@ -277,7 +278,8 @@ void BlenderSync::sync_data(BL::RenderSettings &b_render,
geometry_synced.clear(); /* use for objects and motion sync */
if (scene->need_motion() == Scene::MOTION_PASS || scene->need_motion() == Scene::MOTION_NONE ||
scene->camera->get_motion_position() == MOTION_POSITION_CENTER) {
scene->camera->get_motion_position() == MOTION_POSITION_CENTER)
{
sync_objects(b_depsgraph, b_v3d);
}
sync_motion(b_render, b_depsgraph, b_v3d, b_override, width, height, python_thread_state);
@ -445,7 +447,8 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer, bool background)
/* No denoising support for vertex color baking, vertices packed into image
* buffer have no relation to neighbors. */
if (scene->bake_manager->get_baking() &&
b_scene.render().bake().target() != BL::BakeSettings::target_IMAGE_TEXTURES) {
b_scene.render().bake().target() != BL::BakeSettings::target_IMAGE_TEXTURES)
{
denoise_params.use = false;
}
@ -709,7 +712,8 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
BL::ViewLayer::lightgroups_iterator b_lightgroup_iter;
for (b_view_layer.lightgroups.begin(b_lightgroup_iter);
b_lightgroup_iter != b_view_layer.lightgroups.end();
++b_lightgroup_iter) {
++b_lightgroup_iter)
{
BL::Lightgroup b_lightgroup(*b_lightgroup_iter);
string name = string_printf("Combined_%s", b_lightgroup.name().c_str());
@ -732,7 +736,8 @@ void BlenderSync::sync_render_passes(BL::RenderLayer &b_rlay, BL::ViewLayer &b_v
}
if (pass_type == PASS_MOTION &&
(b_view_layer.use_motion_blur() && b_scene.render().use_motion_blur())) {
(b_view_layer.use_motion_blur() && b_scene.render().use_motion_blur()))
{
continue;
}

View File

@ -588,7 +588,8 @@ static inline BL::FluidDomainSettings object_fluid_gas_domain_find(BL::Object &b
BL::FluidModifier b_mmd(b_mod);
if (b_mmd.fluid_type() == BL::FluidModifier::fluid_type_DOMAIN &&
b_mmd.domain_settings().domain_type() == BL::FluidDomainSettings::domain_type_GAS) {
b_mmd.domain_settings().domain_type() == BL::FluidDomainSettings::domain_type_GAS)
{
return b_mmd.domain_settings();
}
}
@ -637,7 +638,8 @@ static inline Mesh::SubdivisionType object_subdivision_type(BL::Object &b_ob,
bool enabled = preview ? mod.show_viewport() : mod.show_render();
if (enabled && mod.type() == BL::Modifier::type_SUBSURF &&
RNA_boolean_get(&cobj, "use_adaptive_subdivision")) {
RNA_boolean_get(&cobj, "use_adaptive_subdivision"))
{
BL::SubsurfModifier subsurf(mod);
if (subsurf.subdivision_type() == BL::SubsurfModifier::subdivision_type_CATMULL_CLARK) {

View File

@ -35,7 +35,8 @@ class BlenderSmokeLoader : public ImageLoader {
}
if (attribute == ATTR_STD_VOLUME_DENSITY || attribute == ATTR_STD_VOLUME_FLAME ||
attribute == ATTR_STD_VOLUME_HEAT || attribute == ATTR_STD_VOLUME_TEMPERATURE) {
attribute == ATTR_STD_VOLUME_HEAT || attribute == ATTR_STD_VOLUME_TEMPERATURE)
{
metadata.type = IMAGE_DATA_TYPE_FLOAT;
metadata.channels = 1;
}
@ -315,24 +316,29 @@ static void sync_volume_object(BL::BlendData &b_data,
std = ATTR_STD_VOLUME_TEMPERATURE;
}
else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY) ||
name == b_volume.velocity_grid()) {
name == b_volume.velocity_grid())
{
std = ATTR_STD_VOLUME_VELOCITY;
}
else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_X) ||
name == b_volume.velocity_x_grid()) {
name == b_volume.velocity_x_grid())
{
std = ATTR_STD_VOLUME_VELOCITY_X;
}
else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_Y) ||
name == b_volume.velocity_y_grid()) {
name == b_volume.velocity_y_grid())
{
std = ATTR_STD_VOLUME_VELOCITY_Y;
}
else if (name == Attribute::standard_name(ATTR_STD_VOLUME_VELOCITY_Z) ||
name == b_volume.velocity_z_grid()) {
name == b_volume.velocity_z_grid())
{
std = ATTR_STD_VOLUME_VELOCITY_Z;
}
if ((std != ATTR_STD_NONE && volume->need_attribute(scene, std)) ||
volume->need_attribute(scene, name)) {
volume->need_attribute(scene, name))
{
Attribute *attr = (std != ATTR_STD_NONE) ?
volume->attributes.add(std) :
volume->attributes.add(name, TypeDesc::TypeFloat, ATTR_ELEMENT_VOXEL);

View File

@ -689,7 +689,8 @@ BVHNode *BVHBuild::build_node(const BVHObjectBinning &range, int level)
if (!(range.size() > 0 && params.top_level && level == 0)) {
/* Make leaf node when threshold reached or SAH tells us. */
if ((params.small_enough_for_leaf(size, level)) ||
(range_within_max_leaf_size(range, references) && leafSAH < splitSAH)) {
(range_within_max_leaf_size(range, references) && leafSAH < splitSAH))
{
return create_leaf_node(range, references);
}
}
@ -708,7 +709,8 @@ BVHNode *BVHBuild::build_node(const BVHObjectBinning &range, int level)
unalignedLeafSAH = params.sah_primitive_cost * unaligned_range.leafSAH;
if (!(range.size() > 0 && params.top_level && level == 0)) {
if (unalignedLeafSAH < unalignedSplitSAH && unalignedSplitSAH < splitSAH &&
range_within_max_leaf_size(range, references)) {
range_within_max_leaf_size(range, references))
{
return create_leaf_node(range, references);
}
}

View File

@ -516,7 +516,8 @@ void BVH2::pack_instances(size_t nodes_size, size_t leaf_nodes_size)
pack.object_node.resize(objects.size());
if (params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0 ||
params.num_motion_point_steps > 0) {
params.num_motion_point_steps > 0)
{
pack.prim_time.resize(prim_index_size);
}

View File

@ -265,7 +265,8 @@ void CPUDevice::build_bvh(BVH *bvh, Progress &progress, bool refit)
#ifdef WITH_EMBREE
if (bvh->params.bvh_layout == BVH_LAYOUT_EMBREE ||
bvh->params.bvh_layout == BVH_LAYOUT_MULTI_OPTIX_EMBREE ||
bvh->params.bvh_layout == BVH_LAYOUT_MULTI_METAL_EMBREE) {
bvh->params.bvh_layout == BVH_LAYOUT_MULTI_METAL_EMBREE)
{
BVHEmbree *const bvh_embree = static_cast<BVHEmbree *>(bvh);
if (refit) {
bvh_embree->refit(progress);

View File

@ -75,10 +75,12 @@ Device *device_cuda_create(const DeviceInfo &info, Stats &stats, Profiler &profi
static CUresult device_cuda_safe_init()
{
# ifdef _WIN32
__try {
__try
{
return cuInit(0);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
__except (EXCEPTION_EXECUTE_HANDLER)
{
/* Ignore crashes inside the CUDA driver and hope we can
* survive even with corrupted CUDA installs. */
fprintf(stderr, "Cycles CUDA: driver crashed, continuing without CUDA.\n");

View File

@ -333,7 +333,8 @@ string CUDADevice::compile_kernel(const string &common_cflags,
return string();
}
else if (!(nvcc_cuda_version == 101 || nvcc_cuda_version == 102 || nvcc_cuda_version == 111 ||
nvcc_cuda_version == 112 || nvcc_cuda_version == 113 || nvcc_cuda_version == 114)) {
nvcc_cuda_version == 112 || nvcc_cuda_version == 113 || nvcc_cuda_version == 114))
{
printf(
"CUDA version %d.%d detected, build may succeed but only "
"CUDA 10.1 to 11.4 are officially supported.\n",
@ -847,7 +848,8 @@ void CUDADevice::tex_alloc(device_texture &mem)
if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3 &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FPN &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16) {
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16)
{
CUDA_RESOURCE_DESC resDesc;
memset(&resDesc, 0, sizeof(resDesc));

View File

@ -694,7 +694,8 @@ GPUDevice::Mem *GPUDevice::generic_alloc(device_memory &mem, size_t pitch_paddin
* since other devices might be using the memory. */
if (!move_texture_to_host && pitch_padding == 0 && mem.host_pointer &&
mem.host_pointer != shared_pointer) {
mem.host_pointer != shared_pointer)
{
memcpy(shared_pointer, mem.host_pointer, size);
/* A Call to device_memory::host_free() should be preceded by

View File

@ -91,10 +91,12 @@ Device *device_hip_create(const DeviceInfo &info, Stats &stats, Profiler &profil
static hipError_t device_hip_safe_init()
{
# ifdef _WIN32
__try {
__try
{
return hipInit(0);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
__except (EXCEPTION_EXECUTE_HANDLER)
{
/* Ignore crashes inside the HIP driver and hope we can
* survive even with corrupted HIP installs. */
fprintf(stderr, "Cycles HIP: driver crashed, continuing without HIP.\n");

View File

@ -812,7 +812,8 @@ void HIPDevice::tex_alloc(device_texture &mem)
if (mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FLOAT3 &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FPN &&
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16) {
mem.info.data_type != IMAGE_DATA_TYPE_NANOVDB_FP16)
{
/* Bindless textures. */
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));

View File

@ -387,7 +387,8 @@ hiprtGeometryBuildInput HIPRTDevice::prepare_triangle_blas(BVHHIPRT *bvh, Mesh *
geom_input.geomType = Triangle;
if (mesh->has_motion_blur() &&
!(bvh->params.num_motion_triangle_steps == 0 || bvh->params.use_spatial_split)) {
!(bvh->params.num_motion_triangle_steps == 0 || bvh->params.use_spatial_split))
{
const Attribute *attr_mP = mesh->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
const size_t num_triangles = mesh->num_triangles();

View File

@ -1032,8 +1032,7 @@ void MetalDevice::const_copy_to(const char *name, void *host, size_t size)
offsetof(KernelParamsMetal, integrator_state), host, size, pointer_block_size);
}
# define KERNEL_DATA_ARRAY(data_type, tex_name) \
else if (strcmp(name, #tex_name) == 0) \
{ \
else if (strcmp(name, #tex_name) == 0) { \
update_launch_pointers(offsetof(KernelParamsMetal, tex_name), host, size, size); \
}
# include "kernel/data_arrays.h"
@ -1096,9 +1095,8 @@ void MetalDevice::tex_alloc(device_texture &mem)
}
MTLStorageMode storage_mode = MTLStorageModeManaged;
if (@available(macos 10.15, *)) {
if ([mtlDevice hasUnifiedMemory] &&
device_vendor !=
METAL_GPU_INTEL) { /* Intel GPUs don't support MTLStorageModeShared for MTLTextures */
/* Intel GPUs don't support MTLStorageModeShared for MTLTextures. */
if ([mtlDevice hasUnifiedMemory] && device_vendor != METAL_GPU_INTEL) {
storage_mode = MTLStorageModeShared;
}
}

View File

@ -263,7 +263,8 @@ bool ShaderCache::should_load_kernel(DeviceKernel device_kernel,
if (pso_type != PSO_GENERIC) {
/* Only specialize kernels where it can make an impact. */
if (device_kernel < DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST ||
device_kernel > DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL) {
device_kernel > DEVICE_KERNEL_INTEGRATOR_MEGAKERNEL)
{
return false;
}
@ -400,7 +401,8 @@ bool MetalKernelPipeline::should_use_binary_archive() const
if ((device_kernel >= DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND &&
device_kernel <= DEVICE_KERNEL_INTEGRATOR_SHADE_SHADOW) ||
(device_kernel >= DEVICE_KERNEL_SHADER_EVAL_DISPLACE &&
device_kernel <= DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY)) {
device_kernel <= DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY))
{
/* Archive all shade kernels - they take a long time to compile. */
return true;
}
@ -704,7 +706,8 @@ void MetalKernelPipeline::compile()
if (creating_new_archive && ShaderCache::running) {
NSError *error;
if (![archive addComputePipelineFunctionsWithDescriptor:computePipelineStateDescriptor
error:&error]) {
error:&error])
{
NSString *errStr = [error localizedDescription];
metal_printf("Failed to add PSO to archive:\n%s\n",
errStr ? [errStr UTF8String] : "nil");

View File

@ -892,7 +892,8 @@ id<MTLComputeCommandEncoder> MetalDeviceQueue::get_compute_encoder(DeviceKernel
if (mtlComputeEncoder_) {
if (mtlComputeEncoder_.dispatchType == concurrent ? MTLDispatchTypeConcurrent :
MTLDispatchTypeSerial) {
MTLDispatchTypeSerial)
{
/* declare usage of MTLBuffers etc */
prepare_resources(kernel);

View File

@ -35,7 +35,8 @@ int MetalInfo::get_apple_gpu_core_count(id<MTLDevice> device)
io_service_t gpu_service = IOServiceGetMatchingService(
kIOMainPortDefault, IORegistryEntryIDMatching(device.registryID));
if (CFNumberRef numberRef = (CFNumberRef)IORegistryEntryCreateCFProperty(
gpu_service, CFSTR("gpu-core-count"), 0, 0)) {
gpu_service, CFSTR("gpu-core-count"), 0, 0))
{
if (CFGetTypeID(numberRef) == CFNumberGetTypeID()) {
CFNumberGetValue(numberRef, kCFNumberSInt32Type, &core_count);
}
@ -170,7 +171,8 @@ id<MTLBuffer> MetalBufferPool::get_buffer(id<MTLDevice> device,
/* Check if buffer matches size and storage mode and is old enough to reuse */
if (bufferEntry.buffer.length == length && storageMode == bufferEntry.buffer.storageMode &&
cpuCacheMode == bufferEntry.buffer.cpuCacheMode) {
cpuCacheMode == bufferEntry.buffer.cpuCacheMode)
{
buffer = bufferEntry.buffer;
buffer_free_list.erase(entry);
bufferEntry.command_buffer = command_buffer;

View File

@ -72,7 +72,8 @@ class MultiDevice : public Device {
foreach (SubDevice &peer_sub, devices) {
if (peer_sub.peer_island_index < 0 &&
peer_sub.device->info.type == sub.device->info.type &&
peer_sub.device->check_peer_access(sub.device)) {
peer_sub.device->check_peer_access(sub.device))
{
peer_sub.peer_island_index = sub.peer_island_index;
peer_islands[sub.peer_island_index].push_back(&peer_sub);
}
@ -205,7 +206,8 @@ class MultiDevice : public Device {
/* Skip building a bottom level acceleration structure for non-instanced geometry on Embree
* (since they are put into the top level directly, see bvh_embree.cpp) */
if (!params.top_level && params.bvh_layout == BVH_LAYOUT_EMBREE &&
!bvh->geometry[0]->is_instanced()) {
!bvh->geometry[0]->is_instanced())
{
i++;
continue;
}
@ -251,8 +253,8 @@ class MultiDevice : public Device {
SubDevice *owner_sub = &sub;
if (owner_sub->ptr_map.find(key) == owner_sub->ptr_map.end()) {
foreach (SubDevice *island_sub, peer_islands[sub.peer_island_index]) {
if (island_sub != owner_sub &&
island_sub->ptr_map.find(key) != island_sub->ptr_map.end()) {
if (island_sub != owner_sub && island_sub->ptr_map.find(key) != island_sub->ptr_map.end())
{
owner_sub = island_sub;
}
}
@ -268,7 +270,8 @@ class MultiDevice : public Device {
SubDevice *owner_sub = island.front();
foreach (SubDevice *island_sub, island) {
if (key ? (island_sub->ptr_map.find(key) != island_sub->ptr_map.end()) :
(island_sub->device->stats.mem_used < owner_sub->device->stats.mem_used)) {
(island_sub->device->stats.mem_used < owner_sub->device->stats.mem_used))
{
owner_sub = island_sub;
}
}

View File

@ -696,8 +696,7 @@ void OneapiDevice::set_global_memory(SyclQueue *queue_,
/* This macro will change global ptr of KernelGlobals via name matching. */
# define KERNEL_DATA_ARRAY(type, name) \
else if (#name == matched_name) \
{ \
else if (#name == matched_name) { \
globals->__##name = (type *)memory_device_pointer; \
return; \
}
@ -709,8 +708,7 @@ void OneapiDevice::set_global_memory(SyclQueue *queue_,
}
KERNEL_DATA_ARRAY(KernelData, data)
# include "kernel/data_arrays.h"
else
{
else {
std::cerr << "Can't found global/constant memory with name \"" << matched_name << "\"!"
<< std::endl;
assert(false);
@ -823,7 +821,8 @@ std::vector<sycl::device> OneapiDevice::available_devices()
int driver_build_version = parse_driver_build_version(device);
if ((driver_build_version > 100000 &&
driver_build_version < lowest_supported_driver_version_win) ||
driver_build_version < lowest_supported_driver_version_neo) {
driver_build_version < lowest_supported_driver_version_neo)
{
filter_out = true;
}
}
@ -965,7 +964,8 @@ int OneapiDevice::get_max_num_threads_per_multiprocessor()
{
const sycl::device &device = reinterpret_cast<sycl::queue *>(device_queue_)->get_device();
if (device.has(sycl::aspect::ext_intel_gpu_eu_simd_width) &&
device.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu)) {
device.has(sycl::aspect::ext_intel_gpu_hw_threads_per_eu))
{
return device.get_info<sycl::ext::intel::info::device::gpu_eu_simd_width>() *
device.get_info<sycl::ext::intel::info::device::gpu_hw_threads_per_eu>();
}

View File

@ -352,7 +352,23 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
return false;
}
# if OPTIX_ABI_VERSION >= 55
# if OPTIX_ABI_VERSION >= 84
OptixTask task = nullptr;
OptixResult result = optixModuleCreateWithTasks(context,
&module_options,
&pipeline_options,
ptx_data.data(),
ptx_data.size(),
nullptr,
nullptr,
&optix_module,
&task);
if (result == OPTIX_SUCCESS) {
TaskPool pool;
execute_optix_task(pool, task, result);
pool.wait_work();
}
# elif OPTIX_ABI_VERSION >= 55
OptixTask task = nullptr;
OptixResult result = optixModuleCreateFromPTXWithTasks(context,
&module_options,
@ -555,7 +571,11 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
memset(sbt_data.host_pointer, 0, sizeof(SbtRecord) * NUM_PROGRAM_GROUPS);
for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
optix_assert(optixSbtRecordPackHeader(groups[i], &sbt_data[i]));
# if OPTIX_ABI_VERSION >= 84
optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i], nullptr));
# else
optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i]));
# endif
}
sbt_data.copy_to_device(); /* Upload SBT to device. */
@ -577,7 +597,9 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
OptixPipelineLinkOptions link_options = {};
link_options.maxTraceDepth = 1;
# if OPTIX_ABI_VERSION < 84
link_options.debugLevel = module_options.debugLevel;
# endif
if (use_osl) {
/* Re-create OSL pipeline in case kernels are reloaded after it has been created before. */
@ -693,7 +715,8 @@ bool OptiXDevice::load_osl_kernels()
vector<OSLKernel> osl_kernels;
for (ShaderType type = SHADER_TYPE_SURFACE; type <= SHADER_TYPE_BUMP;
type = static_cast<ShaderType>(type + 1)) {
type = static_cast<ShaderType>(type + 1))
{
const vector<OSL::ShaderGroupRef> &groups = (type == SHADER_TYPE_SURFACE ?
osl_globals.surface_state :
type == SHADER_TYPE_VOLUME ?
@ -768,6 +791,16 @@ bool OptiXDevice::load_osl_kernels()
return false;
}
# if OPTIX_ABI_VERSION >= 84
const OptixResult result = optixModuleCreate(context,
&module_options,
&pipeline_options,
ptx_data.data(),
ptx_data.size(),
nullptr,
0,
&osl_modules.back());
# else
const OptixResult result = optixModuleCreateFromPTX(context,
&module_options,
&pipeline_options,
@ -776,6 +809,7 @@ bool OptiXDevice::load_osl_kernels()
nullptr,
0,
&osl_modules.back());
# endif
if (result != OPTIX_SUCCESS) {
set_error(string_printf("Failed to load OptiX OSL services kernel from '%s' (%s)",
ptx_filename.c_str(),
@ -800,7 +834,21 @@ bool OptiXDevice::load_osl_kernels()
continue;
}
# if OPTIX_ABI_VERSION >= 55
# if OPTIX_ABI_VERSION >= 84
OptixTask task = nullptr;
results[i] = optixModuleCreateWithTasks(context,
&module_options,
&pipeline_options,
osl_kernels[i].ptx.data(),
osl_kernels[i].ptx.size(),
nullptr,
nullptr,
&osl_modules[i],
&task);
if (results[i] == OPTIX_SUCCESS) {
execute_optix_task(pool, task, results[i]);
}
# elif OPTIX_ABI_VERSION >= 55
OptixTask task = nullptr;
results[i] = optixModuleCreateFromPTXWithTasks(context,
&module_options,
@ -861,12 +909,20 @@ bool OptiXDevice::load_osl_kernels()
sbt_data.alloc(NUM_PROGRAM_GROUPS + osl_groups.size());
for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
optix_assert(optixSbtRecordPackHeader(groups[i], &sbt_data[i]));
# if OPTIX_ABI_VERSION >= 84
optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i], nullptr));
# else
optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i]));
# endif
}
for (size_t i = 0; i < osl_groups.size(); ++i) {
if (osl_groups[i] != NULL) {
optix_assert(optixSbtRecordPackHeader(osl_groups[i], &sbt_data[NUM_PROGRAM_GROUPS + i]));
# if OPTIX_ABI_VERSION >= 84
optix_assert(optixProgramGroupGetStackSize(osl_groups[i], &osl_stack_size[i], nullptr));
# else
optix_assert(optixProgramGroupGetStackSize(osl_groups[i], &osl_stack_size[i]));
# endif
}
else {
/* Default to "__direct_callable__dummy_services", so that OSL evaluation for empty
@ -878,7 +934,9 @@ bool OptiXDevice::load_osl_kernels()
OptixPipelineLinkOptions link_options = {};
link_options.maxTraceDepth = 0;
# if OPTIX_ABI_VERSION < 84
link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_NONE;
# endif
{
vector<OptixProgramGroup> pipeline_groups;
@ -958,7 +1016,8 @@ bool OptiXDevice::build_optix_bvh(BVHOptiX *bvh,
if (use_fast_trace_bvh ||
/* The build flags have to match the ones used to query the built-in curve intersection
* program (see optixBuiltinISModuleGet above) */
build_input.type == OPTIX_BUILD_INPUT_TYPE_CURVES) {
build_input.type == OPTIX_BUILD_INPUT_TYPE_CURVES)
{
VLOG_INFO << "Using fast to trace OptiX BVH";
options.buildFlags = OPTIX_BUILD_FLAG_PREFER_FAST_TRACE | OPTIX_BUILD_FLAG_ALLOW_COMPACTION;
}
@ -1465,7 +1524,8 @@ void OptiXDevice::build_bvh(BVH *bvh, Progress &progress, bool refit)
}
if (ob->get_geometry()->geometry_type == Geometry::HAIR &&
static_cast<const Hair *>(ob->get_geometry())->curve_shape == CURVE_THICK) {
static_cast<const Hair *>(ob->get_geometry())->curve_shape == CURVE_THICK)
{
if (pipeline_options.usesMotionBlur && ob->get_geometry()->has_motion_blur()) {
/* Select between motion blur and non-motion blur built-in intersection module. */
instance.sbtOffset = PG_HITD_MOTION - PG_HITD;

View File

@ -80,7 +80,8 @@ bool OptiXDeviceQueue::enqueue(DeviceKernel kernel,
}
if (kernel == DEVICE_KERNEL_SHADER_EVAL_DISPLACE ||
kernel == DEVICE_KERNEL_SHADER_EVAL_BACKGROUND ||
kernel == DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY) {
kernel == DEVICE_KERNEL_SHADER_EVAL_CURVE_SHADOW_TRANSPARENCY)
{
cuda_device_assert(cuda_device_,
cuMemcpyHtoDAsync(launch_params_ptr + offsetof(KernelParamsOptiX, offset),
args.values[2], // &d_offset

View File

@ -72,7 +72,8 @@ void HdCyclesCurves::PopulatePoints(HdSceneDelegate *sceneDelegate)
VtValue value;
for (const HdExtComputationPrimvarDescriptor &desc :
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex)) {
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex))
{
if (desc.name == HdTokens->points) {
auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate);
const auto valueStoreIt = valueStore.find(desc.name);
@ -179,7 +180,8 @@ void HdCyclesCurves::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
// Skip attributes that are not needed
if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) ||
_geom->need_attribute(scene, name)) {
_geom->need_attribute(scene, name))
{
ApplyPrimvars(_geom->attributes, name, value, interpolation.second, std);
}
}

View File

@ -226,7 +226,8 @@ void HdCyclesDisplayDriver::draw(const Params &params)
const auto renderBuffer = static_cast<HdCyclesRenderBuffer *>(
_renderParam->GetDisplayAovBinding().renderBuffer);
if (!renderBuffer || // Ensure this render buffer matches the texture dimensions
(renderBuffer->GetWidth() != params.size.x || renderBuffer->GetHeight() != params.size.y)) {
(renderBuffer->GetWidth() != params.size.x || renderBuffer->GetHeight() != params.size.y))
{
return;
}

View File

@ -48,7 +48,8 @@ void HdCyclesInstancer::SyncPrimvars()
sceneDelegate->GetRenderIndex().GetChangeTracker().GetInstancerDirtyBits(GetId());
for (const HdPrimvarDescriptor &desc :
sceneDelegate->GetPrimvarDescriptors(GetId(), HdInterpolationInstance)) {
sceneDelegate->GetPrimvarDescriptors(GetId(), HdInterpolationInstance))
{
if (!HdChangeTracker::IsPrimvarDirty(dirtyBits, GetId(), desc.name)) {
continue;
}
@ -119,7 +120,8 @@ VtMatrix4dArray HdCyclesInstancer::ComputeInstanceTransforms(const SdfPath &prot
VtMatrix4dArray resultTransforms;
if (const auto instancer = static_cast<HdCyclesInstancer *>(
GetDelegate()->GetRenderIndex().GetInstancer(GetParentId()))) {
GetDelegate()->GetRenderIndex().GetInstancer(GetParentId())))
{
for (const GfMatrix4d &parentTransform : instancer->ComputeInstanceTransforms(GetId())) {
for (const GfMatrix4d &localTransform : transforms) {
resultTransforms.push_back(parentTransform * localTransform);

View File

@ -168,7 +168,8 @@ void HdCyclesLight::Sync(HdSceneDelegate *sceneDelegate,
}
// Need to update shader graph when transform changes in case transform was baked into it
else if (_light->tfm_is_modified() && (_lightType == HdPrimTypeTokens->domeLight ||
_light->get_shader()->has_surface_spatial_varying)) {
_light->get_shader()->has_surface_spatial_varying))
{
PopulateShaderGraph(sceneDelegate);
}

View File

@ -71,7 +71,8 @@ class UsdToCyclesMapping {
}
// TODO: Is there a better mapping than 'color'?
if (name == CyclesMaterialTokens->r || name == CyclesMaterialTokens->g ||
name == CyclesMaterialTokens->b) {
name == CyclesMaterialTokens->b)
{
return "color";
}
@ -168,7 +169,8 @@ class UsdToCycles {
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float2 ||
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float3 ||
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_float4 ||
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_int) {
usdNodeType == CyclesMaterialTokens->UsdPrimvarReader_int)
{
return &UsdPrimvarReader;
}
@ -492,7 +494,8 @@ void HdCyclesMaterial::PopulateShaderGraph(const HdMaterialNetwork2 &networkMap)
const char *inputName = nullptr;
const char *outputName = nullptr;
if (terminalName == HdMaterialTerminalTokens->surface ||
terminalName == CyclesMaterialTokens->cyclesSurface) {
terminalName == CyclesMaterialTokens->cyclesSurface)
{
inputName = "Surface";
// Find default output name based on the node if none is provided
if (node->type->name == "add_closure" || node->type->name == "mix_closure") {
@ -506,11 +509,13 @@ void HdCyclesMaterial::PopulateShaderGraph(const HdMaterialNetwork2 &networkMap)
}
}
else if (terminalName == HdMaterialTerminalTokens->displacement ||
terminalName == CyclesMaterialTokens->cyclesDisplacement) {
terminalName == CyclesMaterialTokens->cyclesDisplacement)
{
inputName = outputName = "Displacement";
}
else if (terminalName == HdMaterialTerminalTokens->volume ||
terminalName == CyclesMaterialTokens->cyclesVolume) {
terminalName == CyclesMaterialTokens->cyclesVolume)
{
inputName = outputName = "Volume";
}

View File

@ -53,7 +53,8 @@ VtValue ComputeTriangulatedFaceVaryingPrimvar(VtValue value,
HdMeshUtil &meshUtil)
{
if (meshUtil.ComputeTriangulatedFaceVaryingPrimvar(
HdGetValueData(value), value.GetArraySize(), valueType, &value)) {
HdGetValueData(value), value.GetArraySize(), valueType, &value))
{
return value;
}
@ -113,7 +114,8 @@ HdDirtyBits HdCyclesMesh::_PropagateDirtyBits(HdDirtyBits bits) const
}
if (bits & (HdChangeTracker::DirtyTopology | HdChangeTracker::DirtyDisplayStyle |
HdChangeTracker::DirtySubdivTags)) {
HdChangeTracker::DirtySubdivTags))
{
// Do full topology update when display style or subdivision changes
bits |= HdChangeTracker::DirtyTopology | HdChangeTracker::DirtyDisplayStyle |
HdChangeTracker::DirtySubdivTags;
@ -159,7 +161,8 @@ void HdCyclesMesh::PopulatePoints(HdSceneDelegate *sceneDelegate)
VtValue value;
for (const HdExtComputationPrimvarDescriptor &desc :
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex)) {
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex))
{
if (desc.name == HdTokens->points) {
auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate);
const auto valueStoreIt = valueStore.find(desc.name);
@ -208,7 +211,8 @@ void HdCyclesMesh::PopulateNormals(HdSceneDelegate *sceneDelegate)
for (int i = 0; i < HdInterpolationCount && interpolation == HdInterpolationCount; ++i) {
for (const HdExtComputationPrimvarDescriptor &desc :
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(),
static_cast<HdInterpolation>(i))) {
static_cast<HdInterpolation>(i)))
{
if (desc.name == HdTokens->normals) {
auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate);
const auto valueStoreIt = valueStore.find(desc.name);
@ -270,7 +274,8 @@ void HdCyclesMesh::PopulateNormals(HdSceneDelegate *sceneDelegate)
TF_VERIFY(normals.size() == static_cast<size_t>(_topology.GetNumFaceVaryings()));
if (!_util.ComputeTriangulatedFaceVaryingPrimvar(
normals.data(), normals.size(), HdTypeFloatVec3, &value)) {
normals.data(), normals.size(), HdTypeFloatVec3, &value))
{
return;
}
@ -340,7 +345,8 @@ void HdCyclesMesh::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
// Skip attributes that are not needed
if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) ||
_geom->need_attribute(scene, name)) {
_geom->need_attribute(scene, name))
{
const HdType valueType = HdGetValueTupleType(value).type;
if (!subdivision) {

View File

@ -44,7 +44,8 @@ bool HdCyclesOutputDriver::update_render_tile(const Tile &tile)
// Avoid extra copy by mapping render buffer directly when dimensions/format match the tile
if (tile.offset.x == 0 && tile.offset.y == 0 && tile.size.x == renderBuffer->GetWidth() &&
tile.size.y == renderBuffer->GetHeight() &&
(format >= HdFormatFloat32 && format <= HdFormatFloat32Vec4)) {
(format >= HdFormatFloat32 && format <= HdFormatFloat32Vec4))
{
float *const data = static_cast<float *>(renderBuffer->Map());
TF_VERIFY(tile.get_pass_pixels(aovBinding.aovName.GetString(), channels, data));
renderBuffer->Unmap();

View File

@ -74,7 +74,8 @@ void HdCyclesPoints::PopulatePoints(HdSceneDelegate *sceneDelegate)
VtValue value;
for (const HdExtComputationPrimvarDescriptor &desc :
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex)) {
sceneDelegate->GetExtComputationPrimvarDescriptors(GetId(), HdInterpolationVertex))
{
if (desc.name == HdTokens->points) {
auto valueStore = HdExtComputationUtils::GetComputedPrimvarValues({desc}, sceneDelegate);
const auto valueStoreIt = valueStore.find(desc.name);
@ -187,7 +188,8 @@ void HdCyclesPoints::PopulatePrimvars(HdSceneDelegate *sceneDelegate)
// Skip attributes that are not needed
if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) ||
_geom->need_attribute(scene, name)) {
_geom->need_attribute(scene, name))
{
ApplyPrimvars(_geom->attributes, name, value, interpolation.second, std);
}
}

View File

@ -123,7 +123,8 @@ HdCyclesDelegate::HdCyclesDelegate(const HdRenderSettingsMap &settingsMap,
for (const auto &setting : settingsMap) {
// Skip over the settings known to be used for initialization only
if (setting.first == HdCyclesRenderSettingsTokens->device ||
setting.first == HdCyclesRenderSettingsTokens->threads) {
setting.first == HdCyclesRenderSettingsTokens->threads)
{
continue;
}
@ -284,7 +285,8 @@ HdSprim *HdCyclesDelegate::CreateSprim(const TfToken &typeId, const SdfPath &spr
}
if (typeId == HdPrimTypeTokens->diskLight || typeId == HdPrimTypeTokens->distantLight ||
typeId == HdPrimTypeTokens->domeLight || typeId == HdPrimTypeTokens->rectLight ||
typeId == HdPrimTypeTokens->sphereLight) {
typeId == HdPrimTypeTokens->sphereLight)
{
return new HdCyclesLight(sprimId, typeId);
}
if (typeId == HdPrimTypeTokens->extComputation) {
@ -400,7 +402,8 @@ HdAovDescriptor HdCyclesDelegate::GetDefaultAovDescriptor(const TfToken &name) c
return HdAovDescriptor(HdFormatFloat32Vec3, false, VtValue(GfVec3f(0.0f)));
}
if (name == HdAovTokens->primId || name == HdAovTokens->instanceId ||
name == HdAovTokens->elementId) {
name == HdAovTokens->elementId)
{
return HdAovDescriptor(HdFormatInt32, false, VtValue(-1));
}

View File

@ -90,8 +90,8 @@ void HdCyclesRenderPass::_Execute(const HdRenderPassStateSharedPtr &renderPassSt
const HdRenderPassAovBindingVector &aovBindings = renderPassState->GetAovBindings();
if (_renderParam->GetAovBindings() != aovBindings ||
// Need to resync passes when denoising is enabled or disabled to update the pass mode
(settingsVersion != _lastSettingsVersion &&
scene->integrator->use_denoise_is_modified())) {
(settingsVersion != _lastSettingsVersion && scene->integrator->use_denoise_is_modified()))
{
_renderParam->SyncAovBindings(aovBindings);
if (renderDelegate->IsDisplaySupported()) {

View File

@ -44,10 +44,11 @@ void HdCyclesVolume::Populate(HdSceneDelegate *sceneDelegate, HdDirtyBits dirtyB
Scene *const scene = (Scene *)_geom->get_owner();
if (dirtyBits & HdChangeTracker::DirtyVolumeField) {
for (const HdVolumeFieldDescriptor &field :
sceneDelegate->GetVolumeFieldDescriptors(GetId())) {
for (const HdVolumeFieldDescriptor &field : sceneDelegate->GetVolumeFieldDescriptors(GetId()))
{
if (const auto openvdbAsset = static_cast<HdCyclesField *>(
sceneDelegate->GetRenderIndex().GetBprim(_tokens->openvdbAsset, field.fieldId))) {
sceneDelegate->GetRenderIndex().GetBprim(_tokens->openvdbAsset, field.fieldId)))
{
const ustring name(field.fieldName.GetString());
AttributeStandard std = ATTR_STD_NONE;
@ -72,7 +73,8 @@ void HdCyclesVolume::Populate(HdSceneDelegate *sceneDelegate, HdDirtyBits dirtyB
// Skip attributes that are not needed
if ((std != ATTR_STD_NONE && _geom->need_attribute(scene, std)) ||
_geom->need_attribute(scene, name)) {
_geom->need_attribute(scene, name))
{
Attribute *const attr = (std != ATTR_STD_NONE) ?
_geom->attributes.add(std) :
_geom->attributes.add(

View File

@ -109,8 +109,8 @@ static Device *find_best_device(Device *device, DenoiserType type)
}
else {
/* Prefer a device that can use graphics interop for faster display update. */
if (sub_device->should_use_graphics_interop() &&
!best_device->should_use_graphics_interop()) {
if (sub_device->should_use_graphics_interop() && !best_device->should_use_graphics_interop())
{
best_device = sub_device;
}

View File

@ -164,7 +164,8 @@ class OIDNDenoiseContext {
oidn_filter.set("hdr", true);
oidn_filter.set("srgb", false);
if (denoise_params_.prefilter == DENOISER_PREFILTER_NONE ||
denoise_params_.prefilter == DENOISER_PREFILTER_ACCURATE) {
denoise_params_.prefilter == DENOISER_PREFILTER_ACCURATE)
{
oidn_filter.set("cleanAux", true);
}
oidn_filter.commit();
@ -189,7 +190,8 @@ class OIDNDenoiseContext {
void filter_guiding_pass_if_needed(oidn::DeviceRef &oidn_device, OIDNPass &oidn_pass)
{
if (denoise_params_.prefilter != DENOISER_PREFILTER_ACCURATE || !oidn_pass ||
oidn_pass.is_filtered) {
oidn_pass.is_filtered)
{
return;
}

View File

@ -180,7 +180,8 @@ bool PassAccessor::get_render_tile_pixels(const RenderBuffers *render_buffers,
}
else if ((pass_info.divide_type != PASS_NONE || pass_info.direct_type != PASS_NONE ||
pass_info.indirect_type != PASS_NONE) &&
mode != PassMode::DENOISED) {
mode != PassMode::DENOISED)
{
/* RGB lighting passes that need to divide out color and/or sum direct and indirect.
* These can also optionally write alpha like the combined pass. */
get_pass_light_path(render_buffers, buffer_params, destination);

View File

@ -348,7 +348,8 @@ void PathTrace::update_work_buffer_params_if_needed(const RenderWork &render_wor
}
if (render_state_.need_reset_params ||
render_state_.resolution_divider != render_work.resolution_divider) {
render_state_.resolution_divider != render_work.resolution_divider)
{
update_effective_work_buffer_params(render_work);
}
@ -565,7 +566,8 @@ void PathTrace::denoise(const RenderWork &render_work)
if (denoiser_->denoise_buffer(render_state_.effective_big_tile_params,
buffer_to_denoise,
get_num_samples_in_buffer(),
allow_inplace_modification)) {
allow_inplace_modification))
{
render_state_.has_denoised_result = true;
}

View File

@ -117,7 +117,8 @@ void PathTraceDisplay::copy_pixels_to_texture(
const int texture_height = texture_state_.size.y;
if (texture_x == 0 && texture_y == 0 && pixels_width == texture_width &&
pixels_height == texture_height) {
pixels_height == texture_height)
{
const size_t size_in_bytes = sizeof(half4) * texture_width * texture_height;
memcpy(mapped_rgba_pixels, rgba_pixels, size_in_bytes);
}

View File

@ -232,7 +232,8 @@ int PathTraceWorkCPU::adaptive_sampling_converge_filter_count_active(float thres
uint num_row_pixels_active = 0;
for (int x = 0; x < width; ++x) {
if (!kernels_.adaptive_sampling_convergence_check(
kernel_globals, render_buffer, full_x + x, y, threshold, reset, offset, stride)) {
kernel_globals, render_buffer, full_x + x, y, threshold, reset, offset, stride))
{
++num_row_pixels_active;
row_converged = false;
}

View File

@ -22,7 +22,8 @@ static size_t estimate_single_state_size(const uint kernel_features)
{
size_t state_size = 0;
#define KERNEL_STRUCT_BEGIN(name) for (int array_index = 0;; array_index++) {
#define KERNEL_STRUCT_BEGIN(name) \
for (int array_index = 0;; array_index++) {
#define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
state_size += (kernel_features & (feature)) ? sizeof(type) : 0;
#define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
@ -96,7 +97,8 @@ void PathTraceWorkGPU::alloc_integrator_soa()
const int requested_volume_stack_size = device_scene_->data.volume_stack_size;
const uint kernel_features = device_scene_->data.kernel_features;
if ((integrator_state_soa_kernel_features_ & kernel_features) == kernel_features &&
integrator_state_soa_volume_stack_size_ >= requested_volume_stack_size) {
integrator_state_soa_volume_stack_size_ >= requested_volume_stack_size)
{
return;
}
integrator_state_soa_kernel_features_ = kernel_features;
@ -121,7 +123,8 @@ void PathTraceWorkGPU::alloc_integrator_soa()
* write the pointers into a struct that resides in constant memory.
*
* TODO: store float3 in separate XYZ arrays. */
#define KERNEL_STRUCT_BEGIN(name) for (int array_index = 0;; array_index++) {
#define KERNEL_STRUCT_BEGIN(name) \
for (int array_index = 0;; array_index++) {
#define KERNEL_STRUCT_MEMBER(parent_struct, type, name, feature) \
if ((kernel_features & (feature)) && (integrator_state_gpu_.parent_struct.name == nullptr)) { \
device_only_memory<type> *array = new device_only_memory<type>(device_, \
@ -132,7 +135,8 @@ void PathTraceWorkGPU::alloc_integrator_soa()
}
#define KERNEL_STRUCT_ARRAY_MEMBER(parent_struct, type, name, feature) \
if ((kernel_features & (feature)) && \
(integrator_state_gpu_.parent_struct[array_index].name == nullptr)) { \
(integrator_state_gpu_.parent_struct[array_index].name == nullptr)) \
{ \
device_only_memory<type> *array = new device_only_memory<type>(device_, \
"integrator_state_" #name); \
array->alloc_to_device(max_num_paths_); \
@ -611,7 +615,8 @@ void PathTraceWorkGPU::compact_main_paths(const int num_active_paths)
const int min_compact_paths = 32;
if (max_active_main_path_index_ == num_active_paths ||
max_active_main_path_index_ < min_compact_paths) {
max_active_main_path_index_ < min_compact_paths)
{
return;
}
@ -647,7 +652,8 @@ void PathTraceWorkGPU::compact_shadow_paths()
const float shadow_compact_ratio = 0.5f;
const int min_compact_paths = 32;
if (integrator_next_shadow_path_index_.data()[0] < num_active_paths * shadow_compact_ratio ||
integrator_next_shadow_path_index_.data()[0] < min_compact_paths) {
integrator_next_shadow_path_index_.data()[0] < min_compact_paths)
{
return;
}

View File

@ -465,7 +465,8 @@ void RenderScheduler::report_work_begin(const RenderWork &render_work)
* because it might be wrongly 0. Check for whether path tracing is actually happening as it is
* expected to happen in the first work. */
if (render_work.resolution_divider == pixel_size_ && render_work.path_trace.num_samples != 0 &&
render_work.path_trace.start_sample == get_start_sample()) {
render_work.path_trace.start_sample == get_start_sample())
{
state_.start_render_time = time_dt();
}
}

View File

@ -159,7 +159,8 @@ ccl_device_inline
tmin,
isect_t,
lcg_state,
max_hits)) {
max_hits))
{
return true;
}
}
@ -196,7 +197,8 @@ ccl_device_inline
tmin,
isect_t,
lcg_state,
max_hits)) {
max_hits))
{
return true;
}
}

View File

@ -140,7 +140,8 @@ ccl_device_noinline bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
visibility,
prim_object,
prim,
prim_addr)) {
prim_addr))
{
/* shadow ray early termination */
if (visibility & PATH_RAY_SHADOW_OPAQUE)
return true;
@ -159,7 +160,8 @@ ccl_device_noinline bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
visibility,
prim_object,
prim,
prim_addr)) {
prim_addr))
{
/* shadow ray early termination */
if (visibility & PATH_RAY_SHADOW_OPAQUE)
return true;

View File

@ -55,7 +55,8 @@ ccl_device_forceinline Spectrum bsdf_ashikhmin_shirley_eval(ccl_private const Sh
float out = 0.0f;
if ((cosNgO < 0.0f) || fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f ||
!(NdotI > 0.0f && NdotO > 0.0f)) {
!(NdotI > 0.0f && NdotO > 0.0f))
{
*pdf = 0.0f;
return zero_spectrum();
}

View File

@ -399,7 +399,8 @@ ccl_device Spectrum bsdf_microfacet_eval(ccl_private const ShaderClosure *sc,
* - Purely refractive closures can't have reflection.
*/
if ((cos_NI <= 0) || (alpha_x * alpha_y <= 1e-7f) || ((cos_NgO < 0.0f) != is_refraction) ||
(is_refraction && !m_refractive) || (!is_refraction && m_refractive && !m_glass)) {
(is_refraction && !m_refractive) || (!is_refraction && m_refractive && !m_glass))
{
*pdf = 0.0f;
return zero_spectrum();
}

View File

@ -38,7 +38,8 @@ ccl_device_forceinline Spectrum MF_FUNCTION_FULL_NAME(mf_eval)(float3 wi,
}
else
#endif
if (wo.z < wi.z) {
if (wo.z < wi.z)
{
swapped = true;
float3 tmp = wo;
wo = wi;

View File

@ -284,7 +284,8 @@ ccl_device_forceinline void kernel_embree_filter_intersection_func_impl(
const Ray *cray = ctx->ray;
if (kernel_embree_is_self_intersection(
kg, hit, cray, reinterpret_cast<intptr_t>(args->geometryUserPtr))) {
kg, hit, cray, reinterpret_cast<intptr_t>(args->geometryUserPtr)))
{
*args->valid = 0;
}
}
@ -577,7 +578,8 @@ ccl_device void kernel_embree_filter_func_backface_cull(const RTCFilterFunctionN
/* Always ignore back-facing intersections. */
if (dot(make_float3(ray->dir_x, ray->dir_y, ray->dir_z),
make_float3(hit->Ng_x, hit->Ng_y, hit->Ng_z)) > 0.0f) {
make_float3(hit->Ng_x, hit->Ng_y, hit->Ng_z)) > 0.0f)
{
*args->valid = 0;
return;
}
@ -587,7 +589,8 @@ ccl_device void kernel_embree_filter_func_backface_cull(const RTCFilterFunctionN
const Ray *cray = ctx->ray;
if (kernel_embree_is_self_intersection(
kg, hit, cray, reinterpret_cast<intptr_t>(args->geometryUserPtr))) {
kg, hit, cray, reinterpret_cast<intptr_t>(args->geometryUserPtr)))
{
*args->valid = 0;
}
}
@ -600,7 +603,8 @@ ccl_device void kernel_embree_filter_occluded_func_backface_cull(
/* Always ignore back-facing intersections. */
if (dot(make_float3(ray->dir_x, ray->dir_y, ray->dir_z),
make_float3(hit->Ng_x, hit->Ng_y, hit->Ng_z)) > 0.0f) {
make_float3(hit->Ng_x, hit->Ng_y, hit->Ng_z)) > 0.0f)
{
*args->valid = 0;
return;
}

View File

@ -72,8 +72,7 @@ void kernel_global_memory_copy(KernelGlobalsCPU *kg, const char *name, void *mem
}
#define KERNEL_DATA_ARRAY(type, tname) \
else if (strcmp(name, #tname) == 0) \
{ \
else if (strcmp(name, #tname) == 0) { \
kg->tname.data = (type *)mem; \
kg->tname.width = size; \
}

View File

@ -193,7 +193,8 @@ ccl_device float4 kernel_tex_image_interp(KernelGlobals kg, int id, float x, flo
/* float4, byte4, ushort4 and half4 */
const int texture_type = info.data_type;
if (texture_type == IMAGE_DATA_TYPE_FLOAT4 || texture_type == IMAGE_DATA_TYPE_BYTE4 ||
texture_type == IMAGE_DATA_TYPE_HALF4 || texture_type == IMAGE_DATA_TYPE_USHORT4) {
texture_type == IMAGE_DATA_TYPE_HALF4 || texture_type == IMAGE_DATA_TYPE_USHORT4)
{
if (info.interpolation == INTERPOLATION_CUBIC || info.interpolation == INTERPOLATION_SMART) {
return kernel_tex_image_interp_bicubic<float4>(info, x, y);
}
@ -256,7 +257,8 @@ ccl_device float4 kernel_tex_image_interp_3d(KernelGlobals kg,
}
#endif
if (texture_type == IMAGE_DATA_TYPE_FLOAT4 || texture_type == IMAGE_DATA_TYPE_BYTE4 ||
texture_type == IMAGE_DATA_TYPE_HALF4 || texture_type == IMAGE_DATA_TYPE_USHORT4) {
texture_type == IMAGE_DATA_TYPE_HALF4 || texture_type == IMAGE_DATA_TYPE_USHORT4)
{
if (interpolation == INTERPOLATION_CUBIC || interpolation == INTERPOLATION_SMART) {
return kernel_tex_image_interp_tricubic<float4>(info, x, y, z);
}

View File

@ -53,7 +53,8 @@ ccl_device_inline void gpu_parallel_sort_bucket_pass(const uint num_states,
const uint partition_end = min(num_states, partition_start + partition_size);
for (int state_index = partition_start + uint(local_id); state_index < partition_end;
state_index += uint(local_size)) {
state_index += uint(local_size))
{
ushort kernel_index = d_queued_kernel[state_index];
if (kernel_index == queued_kernel) {
uint key = d_shader_sort_key[state_index] % max_shaders;
@ -115,7 +116,8 @@ ccl_device_inline void gpu_parallel_sort_write_pass(const uint num_states,
ccl_global int *key_offsets = partition_key_offsets + (uint(grid_id) * max_shaders);
for (int state_index = partition_start + uint(local_id); state_index < partition_end;
state_index += uint(local_size)) {
state_index += uint(local_size))
{
ushort kernel_index = d_queued_kernel[state_index];
if (kernel_index == queued_kernel) {
uint key = d_shader_sort_key[state_index] % max_shaders;

View File

@ -442,7 +442,8 @@ ccl_device_inline bool shadow_intersection_filter(const hiprtRay &ray,
# else
if (num_hits >= max_hits ||
!(intersection_get_shader_flags(NULL, prim, type) & SD_HAS_TRANSPARENT_SHADOW)) {
!(intersection_get_shader_flags(NULL, prim, type) & SD_HAS_TRANSPARENT_SHADOW))
{
return false;
}

View File

@ -330,7 +330,8 @@ ccl_device float4 kernel_tex_image_interp_3d(KernelGlobals, int id, float3 P, in
if (info.data_type == IMAGE_DATA_TYPE_NANOVDB_FLOAT ||
info.data_type == IMAGE_DATA_TYPE_NANOVDB_FLOAT3 ||
info.data_type == IMAGE_DATA_TYPE_NANOVDB_FPN ||
info.data_type == IMAGE_DATA_TYPE_NANOVDB_FP16) {
info.data_type == IMAGE_DATA_TYPE_NANOVDB_FP16)
{
return make_float4(
TEX_IMAGE_MISSING_R, TEX_IMAGE_MISSING_G, TEX_IMAGE_MISSING_B, TEX_IMAGE_MISSING_A);
}

View File

@ -217,7 +217,8 @@ bool oneapi_load_kernels(SyclQueue *queue_,
const std::string &kernel_name = kernel_id.get_name();
if (!oneapi_kernel_is_required_for_features(kernel_name, kernel_features) ||
!oneapi_kernel_is_using_embree(kernel_name)) {
!oneapi_kernel_is_using_embree(kernel_name))
{
continue;
}
@ -259,7 +260,8 @@ bool oneapi_load_kernels(SyclQueue *queue_,
/* In case HWRT is on, compilation of kernels using Embree is already handled in previous
* block. */
if (!oneapi_kernel_is_required_for_features(kernel_name, kernel_features) ||
(use_hardware_raytracing && oneapi_kernel_is_using_embree(kernel_name))) {
(use_hardware_raytracing && oneapi_kernel_is_using_embree(kernel_name)))
{
continue;
}
@ -323,7 +325,8 @@ bool oneapi_enqueue_kernel(KernelContext *kernel_context,
device_kernel == DEVICE_KERNEL_INTEGRATOR_TERMINATED_PATHS_ARRAY ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_TERMINATED_SHADOW_PATHS_ARRAY ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_PATHS_ARRAY ||
device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY) {
device_kernel == DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_PATHS_ARRAY)
{
int num_states = *((int *)(args[0]));
/* Round up to the next work-group. */
size_t groups_count = (num_states + local_size - 1) / local_size;

View File

@ -195,7 +195,8 @@ extern "C" __global__ void __anyhit__kernel_optix_shadow_all_hit()
/* If no transparent shadows, all light is blocked and we can stop immediately. */
if (num_hits >= max_hits ||
!(intersection_get_shader_flags(NULL, prim, type) & SD_HAS_TRANSPARENT_SHADOW)) {
!(intersection_get_shader_flags(NULL, prim, type) & SD_HAS_TRANSPARENT_SHADOW))
{
optixSetPayload_5(true);
return optixTerminateRay();
}

View File

@ -68,7 +68,8 @@ ccl_device_inline void film_write_data_passes(KernelGlobals kg,
}
if (!(sd->flag & SD_TRANSPARENT) || kernel_data.film.pass_alpha_threshold == 0.0f ||
average(surface_shader_alpha(kg, sd)) >= kernel_data.film.pass_alpha_threshold) {
average(surface_shader_alpha(kg, sd)) >= kernel_data.film.pass_alpha_threshold)
{
if (flag & PASSMASK(NORMAL)) {
const float3 normal = surface_shader_average_normal(kg, sd);
film_write_pass_float3(buffer + kernel_data.film.pass_normal, normal);

View File

@ -366,7 +366,8 @@ ccl_device_inline void film_write_emission_or_background_pass(
const bool is_shadowcatcher = (path_flag & PATH_RAY_SHADOW_CATCHER_HIT) != 0;
if (!is_shadowcatcher && lightgroup != LIGHTGROUP_NONE &&
kernel_data.film.pass_lightgroup != PASS_UNUSED) {
kernel_data.film.pass_lightgroup != PASS_UNUSED)
{
film_write_pass_spectrum(buffer + kernel_data.film.pass_lightgroup + 3 * lightgroup,
contribution);
}

View File

@ -538,8 +538,8 @@ ccl_device_inline void film_apply_pass_pixel_overlays_rgba(
ccl_global const float *ccl_restrict buffer,
ccl_private float *ccl_restrict pixel)
{
if (kfilm_convert->show_active_pixels &&
kfilm_convert->pass_adaptive_aux_buffer != PASS_UNUSED) {
if (kfilm_convert->show_active_pixels && kfilm_convert->pass_adaptive_aux_buffer != PASS_UNUSED)
{
if (buffer[kfilm_convert->pass_adaptive_aux_buffer + 3] == 0.0f) {
const float3 active_rgb = make_float3(1.0f, 0.0f, 0.0f);
const float3 mix_rgb = interp(make_float3(pixel[0], pixel[1], pixel[2]), active_rgb, 0.5f);

View File

@ -77,7 +77,8 @@ find_attribute(KernelGlobals kg, int object, int prim, int type, uint64_t id)
desc.element = (AttributeElement)attr_map.element;
if (prim == PRIM_NONE && desc.element != ATTR_ELEMENT_MESH &&
desc.element != ATTR_ELEMENT_VOXEL && desc.element != ATTR_ELEMENT_OBJECT) {
desc.element != ATTR_ELEMENT_VOXEL && desc.element != ATTR_ELEMENT_OBJECT)
{
return attribute_not_found();
}

View File

@ -318,7 +318,8 @@ ccl_device_forceinline float4 primitive_motion_vector(KernelGlobals kg,
}
else
#endif
if (sd->type & PRIMITIVE_TRIANGLE) {
if (sd->type & PRIMITIVE_TRIANGLE)
{
/* Triangle */
if (subd_triangle_patch(kg, sd->prim) == ~0) {
motion_pre = triangle_attribute_float3(kg, sd, desc, NULL, NULL);

View File

@ -65,7 +65,8 @@ ccl_device_inline void shader_setup_from_ray(KernelGlobals kg,
else
#endif
#ifdef __POINTCLOUD__
if (sd->type & PRIMITIVE_POINT) {
if (sd->type & PRIMITIVE_POINT)
{
/* point */
point_shader_setup(kg, sd, isect, ray);
}

View File

@ -135,7 +135,8 @@ ccl_device_noinline float subd_triangle_attribute_float(KernelGlobals kg,
}
else
#endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE) {
if (desc.element == ATTR_ELEMENT_FACE)
{
if (dx)
*dx = 0.0f;
if (dy)
@ -275,7 +276,8 @@ ccl_device_noinline float2 subd_triangle_attribute_float2(KernelGlobals kg,
}
else
#endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE) {
if (desc.element == ATTR_ELEMENT_FACE)
{
if (dx)
*dx = make_float2(0.0f, 0.0f);
if (dy)
@ -416,7 +418,8 @@ ccl_device_noinline float3 subd_triangle_attribute_float3(KernelGlobals kg,
}
else
#endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE) {
if (desc.element == ATTR_ELEMENT_FACE)
{
if (dx)
*dx = make_float3(0.0f, 0.0f, 0.0f);
if (dy)
@ -562,7 +565,8 @@ ccl_device_noinline float4 subd_triangle_attribute_float4(KernelGlobals kg,
}
else
#endif /* __PATCH_EVAL__ */
if (desc.element == ATTR_ELEMENT_FACE) {
if (desc.element == ATTR_ELEMENT_FACE)
{
if (dx)
*dx = zero_float4();
if (dy)

View File

@ -307,7 +307,8 @@ ccl_device float4 triangle_attribute_float4(KernelGlobals kg,
ccl_private float4 *dy)
{
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER |
ATTR_ELEMENT_CORNER_BYTE)) {
ATTR_ELEMENT_CORNER_BYTE))
{
float4 f0, f1, f2;
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {

View File

@ -264,7 +264,8 @@ ccl_device_forceinline void guiding_record_volume_transmission(KernelGlobals kg,
(transmittance_weight[1] < 0.f || !std::isfinite(transmittance_weight[1]) ||
std::isnan(transmittance_weight[1])) ||
(transmittance_weight[2] < 0.f || !std::isfinite(transmittance_weight[2]) ||
std::isnan(transmittance_weight[2]))) {
std::isnan(transmittance_weight[2])))
{
}
else {
openpgl::cpp::SetTransmittanceWeight(state->guiding.path_segment,
@ -459,7 +460,8 @@ ccl_device_forceinline bool guiding_bsdf_init(KernelGlobals kg,
kg->opgl_guiding_field, guiding_point3f(P), rand)) {
# else
if (kg->opgl_surface_sampling_distribution->Init(
kg->opgl_guiding_field, guiding_point3f(P), rand, true)) {
kg->opgl_guiding_field, guiding_point3f(P), rand, true))
{
# endif
kg->opgl_surface_sampling_distribution->ApplyCosineProduct(guiding_point3f(N));
return true;
@ -516,7 +518,8 @@ ccl_device_forceinline bool guiding_phase_init(KernelGlobals kg,
kg->opgl_guiding_field, guiding_point3f(P), rand)) {
# else
if (kg->opgl_volume_sampling_distribution->Init(
kg->opgl_guiding_field, guiding_point3f(P), rand, true)) {
kg->opgl_guiding_field, guiding_point3f(P), rand, true))
{
# endif
kg->opgl_volume_sampling_distribution->ApplySingleLobeHenyeyGreensteinProduct(guiding_vec3f(D),
g);

View File

@ -77,7 +77,8 @@ ccl_device_inline void sort_shadow_intersections(IntegratorShadowState state, ui
swapped = false;
for (int j = 0; j < num_hits - 1; ++j) {
if (INTEGRATOR_STATE_ARRAY(state, shadow_isect, j, t) >
INTEGRATOR_STATE_ARRAY(state, shadow_isect, j + 1, t)) {
INTEGRATOR_STATE_ARRAY(state, shadow_isect, j + 1, t))
{
struct Intersection tmp_j ccl_optional_struct_init;
struct Intersection tmp_j_1 ccl_optional_struct_init;
integrator_state_read_shadow_isect(state, &tmp_j, j);

View File

@ -153,7 +153,8 @@ ccl_device void integrator_volume_stack_init(KernelGlobals kg, IntegratorState s
int step = 0;
while (stack_index < volume_stack_size - 1 && enclosed_index < MAX_VOLUME_STACK_SIZE - 1 &&
step < 2 * volume_stack_size) {
step < 2 * volume_stack_size)
{
Intersection isect;
if (!scene_intersect_volume(kg, &volume_ray, &isect, visibility)) {
break;

View File

@ -87,7 +87,8 @@ ccl_device_forceinline bool integrate_surface_holdout(KernelGlobals kg,
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
if (((sd->flag & SD_HOLDOUT) || (sd->object_flag & SD_OBJECT_HOLDOUT_MASK)) &&
(path_flag & PATH_RAY_TRANSPARENT_BACKGROUND)) {
(path_flag & PATH_RAY_TRANSPARENT_BACKGROUND))
{
const Spectrum holdout_weight = surface_shader_apply_holdout(kg, sd);
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
const float transparent = average(holdout_weight * throughput);
@ -160,7 +161,8 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
sd->flag,
bounce,
path_flag,
&ls)) {
&ls))
{
return;
}
}

View File

@ -441,7 +441,8 @@ ccl_device_forceinline void volume_integrate_step_scattering(
/* Equiangular sampling for direct lighting. */
if (vstate.direct_sample_method == VOLUME_SAMPLE_EQUIANGULAR && !result.direct_scatter) {
if (result.direct_t >= vstate.tmin && result.direct_t <= vstate.tmax &&
vstate.equiangular_pdf > VOLUME_SAMPLE_PDF_CUTOFF) {
vstate.equiangular_pdf > VOLUME_SAMPLE_PDF_CUTOFF)
{
const float new_dt = result.direct_t - vstate.tmin;
const Spectrum new_transmittance = volume_color_transmittance(coeff.sigma_t, new_dt);
@ -720,7 +721,8 @@ ccl_device_forceinline bool integrate_volume_equiangular_sample_light(
ray->tmax - ray->tmin,
bounce,
path_flag,
&ls)) {
&ls))
{
return false;
}
@ -784,7 +786,8 @@ ccl_device_forceinline void integrate_volume_direct_light(
SD_BSDF_HAS_TRANSMISSION,
bounce,
path_flag,
&ls)) {
&ls))
{
return;
}
}

View File

@ -66,7 +66,8 @@ ccl_device_inline void surface_shader_prepare_guiding(KernelGlobals kg,
/* Init guiding (diffuse BSDFs only for now). */
if (!(diffuse_sampling_fraction > 0.0f &&
guiding_bsdf_init(kg, state, sd->P, sd->N, rand_bsdf_guiding))) {
guiding_bsdf_init(kg, state, sd->P, sd->N, rand_bsdf_guiding)))
{
state->guiding.use_surface_guiding = false;
return;
}
@ -106,12 +107,14 @@ ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg,
(CLOSURE_IS_BSDF_GLOSSY(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_GLOSSY)) ||
(CLOSURE_IS_BSDF_TRANSMISSION(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSMISSION))) {
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSMISSION)))
{
sc->type = CLOSURE_NONE_ID;
sc->sample_weight = 0.0f;
}
else if ((CLOSURE_IS_BSDF_TRANSPARENT(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSPARENT))) {
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSPARENT)))
{
sc->type = CLOSURE_HOLDOUT_ID;
sc->sample_weight = 0.0f;
sd->flag |= SD_HOLDOUT;
@ -127,7 +130,8 @@ ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg,
* a good heuristic. */
if (INTEGRATOR_STATE(state, path, bounce) + INTEGRATOR_STATE(state, path, transparent_bounce) ==
0 &&
sd->num_closure > 1) {
sd->num_closure > 1)
{
float sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
@ -153,7 +157,8 @@ ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg,
#ifdef __MNEE__
&& !(INTEGRATOR_STATE(state, path, mnee) & PATH_MNEE_VALID)
#endif
) {
)
{
float blur_pdf = kernel_data.integrator.filter_glossy *
INTEGRATOR_STATE(state, path, min_ray_pdf);

View File

@ -277,7 +277,8 @@ ccl_device_inline bool area_light_sample(const ccl_global KernelLight *klight,
&sample_axis_v,
&sample_len_v,
klight->area.tan_half_spread,
&sample_rectangle)) {
&sample_rectangle))
{
return false;
}
}
@ -424,7 +425,8 @@ ccl_device_inline bool area_light_sample_from_intersection(
&sample_axis_v,
&sample_len_v,
klight->area.tan_half_spread,
&sample_rectangle)) {
&sample_rectangle))
{
return false;
}
}

View File

@ -140,7 +140,8 @@ ccl_device_noinline bool light_sample(KernelGlobals kg,
/* Exclude synthetic meshes from shadow catcher pass. */
if ((path_flag & PATH_RAY_SHADOW_CATCHER_PASS) &&
!(kernel_data_fetch(object_flag, object) & SD_OBJECT_SHADOW_CATCHER)) {
!(kernel_data_fetch(object_flag, object) & SD_OBJECT_SHADOW_CATCHER))
{
return false;
}
@ -192,7 +193,8 @@ ccl_device bool lights_intersect(KernelGlobals kg,
/* This path should have been resolved with mnee, it will
* generate a firefly for small lights since it is improbable. */
if ((INTEGRATOR_STATE(state, path, mnee) & PATH_MNEE_CULL_LIGHT_CONNECTION) &&
klight->use_caustics) {
klight->use_caustics)
{
continue;
}
#endif
@ -227,7 +229,8 @@ ccl_device bool lights_intersect(KernelGlobals kg,
}
if (t < isect->t &&
!(last_prim == lamp && last_object == OBJECT_NONE && last_type == PRIMITIVE_LAMP)) {
!(last_prim == lamp && last_object == OBJECT_NONE && last_type == PRIMITIVE_LAMP))
{
isect->t = t;
isect->u = u;
isect->v = v;

View File

@ -29,7 +29,8 @@ ccl_device float light_tree_cos_bounding_box_angle(const BoundingBox bbox,
const float3 point_to_centroid)
{
if (P.x > bbox.min.x && P.y > bbox.min.y && P.z > bbox.min.z && P.x < bbox.max.x &&
P.y < bbox.max.y && P.z < bbox.max.z) {
P.y < bbox.max.y && P.z < bbox.max.z)
{
/* If P is inside the bbox, `theta_u` covers the whole sphere. */
return -1.0f;
}
@ -189,7 +190,8 @@ ccl_device void light_tree_importance(const float3 N_or_D,
cos_min_outgoing_angle = 1.0f;
}
else if ((bcone.theta_o + bcone.theta_e > M_PI_F) ||
(cos_theta_minus_theta_u > cos(bcone.theta_o + bcone.theta_e))) {
(cos_theta_minus_theta_u > cos(bcone.theta_o + bcone.theta_e)))
{
/* theta' = theta - theta_o - theta_u < theta_e */
kernel_assert(
(fast_acosf(cos_theta) - bcone.theta_o - fast_acosf(cos_theta_u) - bcone.theta_e) < 5e-4f);
@ -218,7 +220,8 @@ ccl_device void light_tree_importance(const float3 N_or_D,
float cos_max_outgoing_angle;
const float cos_theta_plus_theta_u = cos_theta * cos_theta_u - sin_theta * sin_theta_u;
if (bcone.theta_e - bcone.theta_o < 0 || cos_theta < 0 || cos_theta_u < 0 ||
cos_theta_plus_theta_u < cos(bcone.theta_e - bcone.theta_o)) {
cos_theta_plus_theta_u < cos(bcone.theta_e - bcone.theta_o))
{
min_importance = 0.0f;
}
else {
@ -279,8 +282,8 @@ ccl_device bool compute_emitter_centroid_and_dir(KernelGlobals kg,
dir = -dir;
}
const int object_flag = kernel_data_fetch(object_flag, object);
if ((object_flag & SD_OBJECT_TRANSFORM_APPLIED) &&
(object_flag & SD_OBJECT_NEGATIVE_SCALE)) {
if ((object_flag & SD_OBJECT_TRANSFORM_APPLIED) && (object_flag & SD_OBJECT_NEGATIVE_SCALE))
{
dir = -dir;
}
}
@ -393,8 +396,8 @@ ccl_device void light_tree_emitter_importance(KernelGlobals kg,
float2 distance; /* distance.x = max_distance, distance.y = mix_distance */
float3 centroid, point_to_centroid, P_c;
if (!compute_emitter_centroid_and_dir<in_volume_segment>(
kg, kemitter, P, centroid, bcone.axis)) {
if (!compute_emitter_centroid_and_dir<in_volume_segment>(kg, kemitter, P, centroid, bcone.axis))
{
return;
}
@ -706,7 +709,8 @@ ccl_device_noinline bool light_tree_sample(KernelGlobals kg,
float left_prob;
if (!get_left_probability<in_volume_segment>(
kg, local_P, N_or_D, t, has_transmission, left_index, right_index, left_prob)) {
kg, local_P, N_or_D, t, has_transmission, left_index, right_index, left_prob))
{
return false; /* Both child nodes have zero importance. */
}
@ -825,7 +829,8 @@ ccl_device float light_tree_pdf(
float left_prob;
if (!get_left_probability<false>(
kg, P, N, 0, has_transmission, left_index, right_index, left_prob)) {
kg, P, N, 0, has_transmission, left_index, right_index, left_prob))
{
return 0.0f;
}

View File

@ -221,8 +221,8 @@ ccl_device_forceinline bool triangle_light_sample(KernelGlobals kg,
ls->D = z * B + sin_from_cos(z) * safe_normalize(C_ - dot(C_, B) * B);
/* calculate intersection with the planar triangle */
if (!ray_triangle_intersect(
P, ls->D, 0.0f, FLT_MAX, V[0], V[1], V[2], &ls->u, &ls->v, &ls->t)) {
if (!ray_triangle_intersect(P, ls->D, 0.0f, FLT_MAX, V[0], V[1], V[2], &ls->u, &ls->v, &ls->t))
{
ls->pdf = 0.0f;
return false;
}

View File

@ -52,7 +52,8 @@ ccl_device_forceinline bool osl_closure_skip(KernelGlobals kg,
/* caustic options */
if ((scattering & LABEL_GLOSSY) && (path_flag & PATH_RAY_DIFFUSE)) {
if ((!kernel_data.integrator.caustics_reflective && (scattering & LABEL_REFLECT)) ||
(!kernel_data.integrator.caustics_refractive && (scattering & LABEL_TRANSMIT))) {
(!kernel_data.integrator.caustics_refractive && (scattering & LABEL_TRANSMIT)))
{
return true;
}
}
@ -215,7 +216,8 @@ ccl_device void osl_closure_dielectric_bsdf_setup(KernelGlobals kg,
/* GGX */
if (closure->distribution == make_string("ggx", 11253504724482777663ull) ||
closure->distribution == make_string("default", 4430693559278735917ull)) {
closure->distribution == make_string("default", 4430693559278735917ull))
{
if (has_reflection && has_transmission) {
sd->flag |= bsdf_microfacet_ggx_glass_setup(bsdf);
}
@ -274,7 +276,8 @@ ccl_device void osl_closure_conductor_bsdf_setup(KernelGlobals kg,
/* GGX */
if (closure->distribution == make_string("ggx", 11253504724482777663ull) ||
closure->distribution == make_string("default", 4430693559278735917ull)) {
closure->distribution == make_string("default", 4430693559278735917ull))
{
sd->flag |= bsdf_microfacet_ggx_setup(bsdf);
}
/* Beckmann */
@ -324,7 +327,8 @@ ccl_device void osl_closure_generalized_schlick_bsdf_setup(
/* GGX */
if (closure->distribution == make_string("ggx", 11253504724482777663ull) ||
closure->distribution == make_string("default", 4430693559278735917ull)) {
closure->distribution == make_string("default", 4430693559278735917ull))
{
if (has_reflection && has_transmission) {
sd->flag |= bsdf_microfacet_ggx_glass_setup(bsdf);
}

View File

@ -427,7 +427,8 @@ static bool set_attribute_float2(float2 f[3], TypeDesc type, bool derivatives, v
return true;
}
else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector ||
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor) {
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor)
{
float *fval = (float *)val;
fval[0] = f[0].x;
@ -497,7 +498,8 @@ static bool set_attribute_float3(float3 f[3], TypeDesc type, bool derivatives, v
return true;
}
else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector ||
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor) {
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor)
{
float *fval = (float *)val;
fval[0] = f[0].x;
@ -573,7 +575,8 @@ static bool set_attribute_float4(float4 f[3], TypeDesc type, bool derivatives, v
return true;
}
else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector ||
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor) {
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor)
{
fval[0] = f[0].x;
fval[1] = f[0].y;
fval[2] = f[0].z;
@ -637,7 +640,8 @@ static bool set_attribute_float(float f[3], TypeDesc type, bool derivatives, voi
return true;
}
else if (type == TypeDesc::TypePoint || type == TypeDesc::TypeVector ||
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor) {
type == TypeDesc::TypeNormal || type == TypeDesc::TypeColor)
{
float *fval = (float *)val;
fval[0] = f[0];
fval[1] = f[0];
@ -928,7 +932,8 @@ bool OSLRenderServices::get_object_standard_attribute(const KernelGlobalsCPU *kg
return set_attribute_int(3, type, derivatives, val);
}
else if ((name == u_geom_trianglevertices || name == u_geom_polyvertices) &&
sd->type & PRIMITIVE_TRIANGLE) {
sd->type & PRIMITIVE_TRIANGLE)
{
float3 P[3];
if (sd->type & PRIMITIVE_MOTION) {
@ -1068,7 +1073,8 @@ bool OSLRenderServices::get_background_attribute(const KernelGlobalsCPU *kg,
float3 ndc[3];
if ((globals->raytype & PATH_RAY_CAMERA) && sd->object == OBJECT_NONE &&
kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
kernel_data.cam.type == CAMERA_ORTHOGRAPHIC)
{
ndc[0] = camera_world_to_ndc(kg, sd, sd->ray_P);
if (derivatives) {

View File

@ -810,16 +810,16 @@ ccl_device_inline bool set_attribute_float(ccl_private float fval[3],
const int type_arraylen = type >> 32;
if (type_basetype == 11 /* TypeDesc::FLOAT */) {
if ((type_aggregate == 2 /* TypeDesc::VEC2 */) ||
(type_aggregate == 1 && type_arraylen == 2)) {
if ((type_aggregate == 2 /* TypeDesc::VEC2 */) || (type_aggregate == 1 && type_arraylen == 2))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 2 + 0] = fval[i];
static_cast<ccl_private float *>(val)[i * 2 + 1] = fval[i];
}
return true;
}
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) ||
(type_aggregate == 1 && type_arraylen == 3)) {
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || (type_aggregate == 1 && type_arraylen == 3))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i];
static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i];
@ -827,8 +827,8 @@ ccl_device_inline bool set_attribute_float(ccl_private float fval[3],
}
return true;
}
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) ||
(type_aggregate == 1 && type_arraylen == 4)) {
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || (type_aggregate == 1 && type_arraylen == 4))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i];
static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i];
@ -870,16 +870,16 @@ ccl_device_inline bool set_attribute_float2(ccl_private float2 fval[3],
const int type_arraylen = type >> 32;
if (type_basetype == 11 /* TypeDesc::FLOAT */) {
if ((type_aggregate == 2 /* TypeDesc::VEC2 */) ||
(type_aggregate == 1 && type_arraylen == 2)) {
if ((type_aggregate == 2 /* TypeDesc::VEC2 */) || (type_aggregate == 1 && type_arraylen == 2))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 2 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 2 + 1] = fval[i].y;
}
return true;
}
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) ||
(type_aggregate == 1 && type_arraylen == 3)) {
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || (type_aggregate == 1 && type_arraylen == 3))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y;
@ -887,8 +887,8 @@ ccl_device_inline bool set_attribute_float2(ccl_private float2 fval[3],
}
return true;
}
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) ||
(type_aggregate == 1 && type_arraylen == 4)) {
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || (type_aggregate == 1 && type_arraylen == 4))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y;
@ -917,8 +917,8 @@ ccl_device_inline bool set_attribute_float3(ccl_private float3 fval[3],
const int type_arraylen = type >> 32;
if (type_basetype == 11 /* TypeDesc::FLOAT */) {
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) ||
(type_aggregate == 1 && type_arraylen == 3)) {
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || (type_aggregate == 1 && type_arraylen == 3))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y;
@ -926,8 +926,8 @@ ccl_device_inline bool set_attribute_float3(ccl_private float3 fval[3],
}
return true;
}
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) ||
(type_aggregate == 1 && type_arraylen == 4)) {
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || (type_aggregate == 1 && type_arraylen == 4))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y;
@ -969,8 +969,8 @@ ccl_device_inline bool set_attribute_float4(ccl_private float4 fval[3],
const int type_arraylen = type >> 32;
if (type_basetype == 11 /* TypeDesc::FLOAT */) {
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) ||
(type_aggregate == 1 && type_arraylen == 3)) {
if ((type_aggregate == 3 /* TypeDesc::VEC3 */) || (type_aggregate == 1 && type_arraylen == 3))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 3 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 3 + 1] = fval[i].y;
@ -978,8 +978,8 @@ ccl_device_inline bool set_attribute_float4(ccl_private float4 fval[3],
}
return true;
}
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) ||
(type_aggregate == 1 && type_arraylen == 4)) {
if ((type_aggregate == 4 /* TypeDesc::VEC4 */) || (type_aggregate == 1 && type_arraylen == 4))
{
for (int i = 0; i < (derivatives ? 3 : 1); ++i) {
static_cast<ccl_private float *>(val)[i * 4 + 0] = fval[i].x;
static_cast<ccl_private float *>(val)[i * 4 + 1] = fval[i].y;

View File

@ -31,7 +31,8 @@ shader node_normal_map(float Strength = 1.0,
// get _unnormalized_ interpolated normal and tangent
if (getattribute(attr_name, tangent) && getattribute(attr_sign_name, tangent_sign) &&
(!is_smooth || getattribute("geom:normal_map_normal", ninterp))) {
(!is_smooth || getattribute("geom:normal_map_normal", ninterp)))
{
// apply normal map
vector B = tangent_sign * cross(ninterp, tangent);
Normal = normalize(mcolor[0] * tangent + mcolor[1] * B + mcolor[2] * ninterp);

View File

@ -52,8 +52,7 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
return svm_node_closure_bsdf_skip(kg, offset, type);
}
}
else
{
else {
return svm_node_closure_bsdf_skip(kg, offset, type);
}
@ -256,7 +255,8 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
if (kernel_data.integrator.caustics_reflective || (path_flag & PATH_RAY_DIFFUSE) == 0) {
#endif
if (specular_weight > CLOSURE_WEIGHT_CUTOFF &&
(specular > CLOSURE_WEIGHT_CUTOFF || metallic > CLOSURE_WEIGHT_CUTOFF)) {
(specular > CLOSURE_WEIGHT_CUTOFF || metallic > CLOSURE_WEIGHT_CUTOFF))
{
Spectrum spec_weight = weight * specular_weight;
ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
@ -288,12 +288,14 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
fresnel->color = rgb_to_spectrum(base_color);
/* setup bsdf */
if (distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID ||
roughness <= 0.075f) { /* use single-scatter GGX */
/* Use single-scatter GGX. */
if (distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID || roughness <= 0.075f) {
sd->flag |= bsdf_microfacet_ggx_setup(bsdf);
bsdf_microfacet_setup_fresnel_principledv1(bsdf, sd, fresnel);
}
else { /* use multi-scatter GGX */
} /* Use multi-scatter GGX. */
else {
bsdf->fresnel = fresnel;
sd->flag |= bsdf_microfacet_multi_ggx_fresnel_setup(bsdf, sd);
}
@ -306,14 +308,16 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
/* BSDF */
#ifdef __CAUSTICS_TRICKS__
if (kernel_data.integrator.caustics_reflective ||
kernel_data.integrator.caustics_refractive || (path_flag & PATH_RAY_DIFFUSE) == 0) {
kernel_data.integrator.caustics_refractive || (path_flag & PATH_RAY_DIFFUSE) == 0)
{
#endif
if (final_transmission > CLOSURE_WEIGHT_CUTOFF) {
Spectrum glass_weight = weight * final_transmission;
float3 cspec0 = base_color * specular_tint + make_float3(1.0f - specular_tint);
if (roughness <= 5e-2f ||
distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID) { /* use single-scatter GGX */
/* Use single-scatter GGX. */
if (roughness <= 5e-2f || distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID) {
float refl_roughness = roughness;
/* reflection */
@ -376,8 +380,8 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg,
sd->flag |= bsdf_microfacet_ggx_refraction_setup(bsdf);
}
}
}
else { /* use multi-scatter GGX */
} /* Use multi-scatter GGX. */
else {
ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc(
sd, sizeof(MicrofacetBsdf), glass_weight);
ccl_private FresnelPrincipledV1 *fresnel =

View File

@ -73,8 +73,7 @@ ccl_device_noinline void svm_node_set_bump(KernelGlobals kg,
stack_store_float3(stack, node.w, normal_out);
}
else
{
else {
stack_store_float3(stack, node.w, zero_float3());
}
#endif
@ -128,8 +127,7 @@ ccl_device_noinline void svm_node_displacement(KernelGlobals kg,
stack_store_float3(stack, node.z, dP);
}
else
{
else {
stack_store_float3(stack, node.z, zero_float3());
}
}
@ -183,8 +181,7 @@ ccl_device_noinline int svm_node_vector_displacement(
stack_store_float3(stack, displacement_offset, dP);
}
else
{
else {
stack_store_float3(stack, displacement_offset, zero_float3());
(void)data_node;
}

View File

@ -47,7 +47,8 @@ ccl_device_noinline int svm_node_vector_math(KernelGlobals kg,
/* 3 Vector Operators */
if (type == NODE_VECTOR_MATH_WRAP || type == NODE_VECTOR_MATH_FACEFORWARD ||
type == NODE_VECTOR_MATH_MULTIPLY_ADD) {
type == NODE_VECTOR_MATH_MULTIPLY_ADD)
{
uint4 extra_node = read_node(kg, &offset);
c = stack_load_float3(stack, extra_node.x);
}

View File

@ -144,7 +144,8 @@ ccl_device float3 sky_radiance_nishita(KernelGlobals kg,
/* If the ray is inside the sun disc, render it, otherwise render the sky.
* Alternatively, ignore the sun if we're evaluating the background texture. */
if (sun_disc && sun_dir_angle < half_angular &&
!((path_flag & PATH_RAY_IMPORTANCE_BAKE) && kernel_data.background.use_sun_guiding)) {
!((path_flag & PATH_RAY_IMPORTANCE_BAKE) && kernel_data.background.use_sun_guiding))
{
/* get 2 pixels data */
float y;

View File

@ -49,7 +49,8 @@ ccl_device_noinline void svm_node_vector_transform(KernelGlobals kg,
/* From camera */
else if (from == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_CAMERA) {
if (to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_WORLD ||
to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_OBJECT) {
to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_OBJECT)
{
tfm = kernel_data.cam.cameratoworld;
if (is_direction)
in = transform_direction(&tfm, in);
@ -68,7 +69,8 @@ ccl_device_noinline void svm_node_vector_transform(KernelGlobals kg,
else if (from == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_OBJECT) {
if ((to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_WORLD ||
to == NODE_VECTOR_TRANSFORM_CONVERT_SPACE_CAMERA) &&
is_object) {
is_object)
{
if (is_direction)
object_dir_transform(kg, sd, &in);
else

View File

@ -254,7 +254,8 @@ static M44d convert_yup_zup(const M44d &mtx, float scale_mult)
rotation,
translation,
true,
IMATH_INTERNAL_NAMESPACE::Euler<double>::XZY)) {
IMATH_INTERNAL_NAMESPACE::Euler<double>::XZY))
{
return mtx;
}
@ -806,7 +807,8 @@ void AlembicProcedural::generate(Scene *scene, Progress &progress)
/* Check if the shaders were modified. */
if (object->used_shaders_is_modified() && object->get_object() &&
object->get_object()->get_geometry()) {
object->get_object()->get_geometry())
{
Geometry *geometry = object->get_object()->get_geometry();
array<Node *> used_shaders = object->get_used_shaders();
geometry->set_used_shaders(used_shaders);
@ -908,7 +910,8 @@ void AlembicProcedural::generate(Scene *scene, Progress &progress)
/* skip constant objects */
if (object->is_constant() && !object->is_modified() && !object->need_shader_update &&
!scale_is_modified()) {
!scale_is_modified())
{
continue;
}
@ -994,7 +997,8 @@ void AlembicProcedural::load_objects(Progress &progress)
geometry = scene_->create_node<PointCloud>();
}
else if (abc_object->schema_type == AlembicObject::POLY_MESH ||
abc_object->schema_type == AlembicObject::SUBD) {
abc_object->schema_type == AlembicObject::SUBD)
{
geometry = scene_->create_node<Mesh>();
}
else {
@ -1469,7 +1473,8 @@ void AlembicProcedural::build_caches(Progress &progress)
}
else if (object->schema_type == AlembicObject::CURVES) {
if (!object->has_data_loaded() || default_radius_is_modified() ||
object->radius_scale_is_modified()) {
object->radius_scale_is_modified())
{
ICurves curves(object->iobject, Alembic::Abc::kWrapExisting);
ICurvesSchema schema = curves.getSchema();
object->load_data_in_cache(object->get_cached_data(), this, schema, progress);
@ -1477,7 +1482,8 @@ void AlembicProcedural::build_caches(Progress &progress)
}
else if (object->schema_type == AlembicObject::POINTS) {
if (!object->has_data_loaded() || default_radius_is_modified() ||
object->radius_scale_is_modified()) {
object->radius_scale_is_modified())
{
IPoints points(object->iobject, Alembic::Abc::kWrapExisting);
IPointsSchema schema = points.getSchema();
object->load_data_in_cache(object->get_cached_data(), this, schema, progress);

View File

@ -470,7 +470,8 @@ static void add_subd_edge_creases(CachedData &cached_data,
chrono_t time)
{
if (!(data.crease_indices.valid() && data.crease_lengths.valid() &&
data.crease_sharpnesses.valid())) {
data.crease_sharpnesses.valid()))
{
return;
}
@ -519,7 +520,8 @@ static void add_subd_vertex_creases(CachedData &cached_data,
const FloatArraySamplePtr creases_sharpnesses = data.crease_sharpnesses.getValue(iss);
if (!(creases_indices && creases_sharpnesses) ||
creases_indices->size() != creases_sharpnesses->size()) {
creases_indices->size() != creases_sharpnesses->size())
{
return;
}

View File

@ -278,9 +278,11 @@ bool Attribute::same_storage(TypeDesc a, TypeDesc b)
return true;
if (a == TypeDesc::TypeColor || a == TypeDesc::TypePoint || a == TypeDesc::TypeVector ||
a == TypeDesc::TypeNormal) {
a == TypeDesc::TypeNormal)
{
if (b == TypeDesc::TypeColor || b == TypeDesc::TypePoint || b == TypeDesc::TypeVector ||
b == TypeDesc::TypeNormal) {
b == TypeDesc::TypeNormal)
{
return true;
}
}

Some files were not shown because too many files have changed in this diff Show More