ClangFormat: apply to source, most of intern

Apply clang format as proposed in T53211.

For details on usage and instructions for migrating branches
without conflicts, see:

https://wiki.blender.org/wiki/Tools/ClangFormat
This commit is contained in:
2019-04-17 06:17:24 +02:00
parent b3dabc200a
commit e12c08e8d1
4481 changed files with 1230080 additions and 1155401 deletions

View File

@@ -20,7 +20,7 @@ CCL_NAMESPACE_BEGIN
typedef struct VolumeState {
# ifdef __SPLIT_KERNEL__
# else
PathState ps;
PathState ps;
# endif
} VolumeState;
@@ -28,77 +28,70 @@ typedef struct VolumeState {
# ifdef __SPLIT_KERNEL__
ccl_addr_space
# endif
ccl_device_inline PathState *shadow_blocked_volume_path_state(
KernelGlobals *kg,
VolumeState *volume_state,
ccl_addr_space PathState *state,
ShaderData *sd,
Ray *ray)
ccl_device_inline PathState *
shadow_blocked_volume_path_state(KernelGlobals *kg,
VolumeState *volume_state,
ccl_addr_space PathState *state,
ShaderData *sd,
Ray *ray)
{
# ifdef __SPLIT_KERNEL__
ccl_addr_space PathState *ps =
&kernel_split_state.state_shadow[ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0)];
ccl_addr_space PathState *ps =
&kernel_split_state.state_shadow[ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0)];
# else
PathState *ps = &volume_state->ps;
PathState *ps = &volume_state->ps;
# endif
*ps = *state;
/* We are checking for shadow on the "other" side of the surface, so need
* to discard volume we are currently at.
*/
if(dot(sd->Ng, ray->D) < 0.0f) {
kernel_volume_stack_enter_exit(kg, sd, ps->volume_stack);
}
return ps;
*ps = *state;
/* We are checking for shadow on the "other" side of the surface, so need
* to discard volume we are currently at.
*/
if (dot(sd->Ng, ray->D) < 0.0f) {
kernel_volume_stack_enter_exit(kg, sd, ps->volume_stack);
}
return ps;
}
#endif /* __VOLUME__ */
#endif /* __VOLUME__ */
/* Attenuate throughput accordingly to the given intersection event.
* Returns true if the throughput is zero and traversal can be aborted.
*/
ccl_device_forceinline bool shadow_handle_transparent_isect(
KernelGlobals *kg,
ShaderData *shadow_sd,
ccl_addr_space PathState *state,
# ifdef __VOLUME__
ccl_addr_space struct PathState *volume_state,
# endif
Intersection *isect,
Ray *ray,
float3 *throughput)
KernelGlobals *kg,
ShaderData *shadow_sd,
ccl_addr_space PathState *state,
#ifdef __VOLUME__
ccl_addr_space struct PathState *volume_state,
#endif
Intersection *isect,
Ray *ray,
float3 *throughput)
{
#ifdef __VOLUME__
/* Attenuation between last surface and next surface. */
if(volume_state->volume_stack[0].shader != SHADER_NONE) {
Ray segment_ray = *ray;
segment_ray.t = isect->t;
kernel_volume_shadow(kg,
shadow_sd,
volume_state,
&segment_ray,
throughput);
}
/* Attenuation between last surface and next surface. */
if (volume_state->volume_stack[0].shader != SHADER_NONE) {
Ray segment_ray = *ray;
segment_ray.t = isect->t;
kernel_volume_shadow(kg, shadow_sd, volume_state, &segment_ray, throughput);
}
#endif
/* Setup shader data at surface. */
shader_setup_from_ray(kg, shadow_sd, isect, ray);
/* Attenuation from transparent surface. */
if(!(shadow_sd->flag & SD_HAS_ONLY_VOLUME)) {
path_state_modify_bounce(state, true);
shader_eval_surface(kg,
shadow_sd,
state,
PATH_RAY_SHADOW);
path_state_modify_bounce(state, false);
*throughput *= shader_bsdf_transparency(kg, shadow_sd);
}
/* Stop if all light is blocked. */
if(is_zero(*throughput)) {
return true;
}
/* Setup shader data at surface. */
shader_setup_from_ray(kg, shadow_sd, isect, ray);
/* Attenuation from transparent surface. */
if (!(shadow_sd->flag & SD_HAS_ONLY_VOLUME)) {
path_state_modify_bounce(state, true);
shader_eval_surface(kg, shadow_sd, state, PATH_RAY_SHADOW);
path_state_modify_bounce(state, false);
*throughput *= shader_bsdf_transparency(kg, shadow_sd);
}
/* Stop if all light is blocked. */
if (is_zero(*throughput)) {
return true;
}
#ifdef __VOLUME__
/* Exit/enter volume. */
kernel_volume_stack_enter_exit(kg, shadow_sd, volume_state->volume_stack);
/* Exit/enter volume. */
kernel_volume_stack_enter_exit(kg, shadow_sd, volume_state->volume_stack);
#endif
return false;
return false;
}
/* Special version which only handles opaque shadows. */
@@ -110,19 +103,15 @@ ccl_device bool shadow_blocked_opaque(KernelGlobals *kg,
Intersection *isect,
float3 *shadow)
{
const bool blocked = scene_intersect(kg,
*ray,
visibility & PATH_RAY_SHADOW_OPAQUE,
isect,
NULL,
0.0f, 0.0f);
const bool blocked = scene_intersect(
kg, *ray, visibility & PATH_RAY_SHADOW_OPAQUE, isect, NULL, 0.0f, 0.0f);
#ifdef __VOLUME__
if(!blocked && state->volume_stack[0].shader != SHADER_NONE) {
/* Apply attenuation from current volume shader. */
kernel_volume_shadow(kg, shadow_sd, state, ray, shadow);
}
if (!blocked && state->volume_stack[0].shader != SHADER_NONE) {
/* Apply attenuation from current volume shader. */
kernel_volume_shadow(kg, shadow_sd, state, ray, shadow);
}
#endif
return blocked;
return blocked;
}
#ifdef __TRANSPARENT_SHADOWS__
@@ -169,94 +158,80 @@ ccl_device bool shadow_blocked_transparent_all_loop(KernelGlobals *kg,
uint max_hits,
float3 *shadow)
{
/* Intersect to find an opaque surface, or record all transparent
* surface hits.
*/
uint num_hits;
const bool blocked = scene_intersect_shadow_all(kg,
ray,
hits,
visibility,
max_hits,
&num_hits);
/* Intersect to find an opaque surface, or record all transparent
* surface hits.
*/
uint num_hits;
const bool blocked = scene_intersect_shadow_all(kg, ray, hits, visibility, max_hits, &num_hits);
# ifdef __VOLUME__
VolumeState volume_state;
VolumeState volume_state;
# endif
/* If no opaque surface found but we did find transparent hits,
* shade them.
*/
if(!blocked && num_hits > 0) {
float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
float3 Pend = ray->P + ray->D*ray->t;
float last_t = 0.0f;
int bounce = state->transparent_bounce;
Intersection *isect = hits;
/* If no opaque surface found but we did find transparent hits,
* shade them.
*/
if (!blocked && num_hits > 0) {
float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
float3 Pend = ray->P + ray->D * ray->t;
float last_t = 0.0f;
int bounce = state->transparent_bounce;
Intersection *isect = hits;
# ifdef __VOLUME__
# ifdef __SPLIT_KERNEL__
ccl_addr_space
ccl_addr_space
# endif
PathState *ps = shadow_blocked_volume_path_state(kg,
&volume_state,
state,
sd,
ray);
PathState *ps = shadow_blocked_volume_path_state(kg, &volume_state, state, sd, ray);
# endif
sort_intersections(hits, num_hits);
for(int hit = 0; hit < num_hits; hit++, isect++) {
/* Adjust intersection distance for moving ray forward. */
float new_t = isect->t;
isect->t -= last_t;
/* Skip hit if we did not move forward, step by step raytracing
* would have skipped it as well then.
*/
if(last_t == new_t) {
continue;
}
last_t = new_t;
/* Attenuate the throughput. */
if(shadow_handle_transparent_isect(kg,
shadow_sd,
state,
#ifdef __VOLUME__
ps,
#endif
isect,
ray,
&throughput))
{
return true;
}
/* Move ray forward. */
ray->P = shadow_sd->P;
if(ray->t != FLT_MAX) {
ray->D = normalize_len(Pend - ray->P, &ray->t);
}
bounce++;
}
sort_intersections(hits, num_hits);
for (int hit = 0; hit < num_hits; hit++, isect++) {
/* Adjust intersection distance for moving ray forward. */
float new_t = isect->t;
isect->t -= last_t;
/* Skip hit if we did not move forward, step by step raytracing
* would have skipped it as well then.
*/
if (last_t == new_t) {
continue;
}
last_t = new_t;
/* Attenuate the throughput. */
if (shadow_handle_transparent_isect(kg,
shadow_sd,
state,
# ifdef __VOLUME__
/* Attenuation for last line segment towards light. */
if(ps->volume_stack[0].shader != SHADER_NONE) {
kernel_volume_shadow(kg, shadow_sd, ps, ray, &throughput);
}
ps,
# endif
*shadow = throughput;
return is_zero(throughput);
}
isect,
ray,
&throughput)) {
return true;
}
/* Move ray forward. */
ray->P = shadow_sd->P;
if (ray->t != FLT_MAX) {
ray->D = normalize_len(Pend - ray->P, &ray->t);
}
bounce++;
}
# ifdef __VOLUME__
if(!blocked && state->volume_stack[0].shader != SHADER_NONE) {
/* Apply attenuation from current volume shader. */
/* Attenuation for last line segment towards light. */
if (ps->volume_stack[0].shader != SHADER_NONE) {
kernel_volume_shadow(kg, shadow_sd, ps, ray, &throughput);
}
# endif
*shadow = throughput;
return is_zero(throughput);
}
# ifdef __VOLUME__
if (!blocked && state->volume_stack[0].shader != SHADER_NONE) {
/* Apply attenuation from current volume shader. */
# ifdef __SPLIT_KERNEL__
ccl_addr_space
ccl_addr_space
# endif
PathState *ps = shadow_blocked_volume_path_state(kg,
&volume_state,
state,
sd,
ray);
kernel_volume_shadow(kg, shadow_sd, ps, ray, shadow);
}
PathState *ps = shadow_blocked_volume_path_state(kg, &volume_state, state, sd, ray);
kernel_volume_shadow(kg, shadow_sd, ps, ray, shadow);
}
# endif
return blocked;
return blocked;
}
/* Here we do all device specific trickery before invoking actual traversal
@@ -272,43 +247,36 @@ ccl_device bool shadow_blocked_transparent_all(KernelGlobals *kg,
float3 *shadow)
{
# ifdef __SPLIT_KERNEL__
Intersection hits_[SHADOW_STACK_MAX_HITS];
Intersection *hits = &hits_[0];
Intersection hits_[SHADOW_STACK_MAX_HITS];
Intersection *hits = &hits_[0];
# elif defined(__KERNEL_CUDA__)
Intersection *hits = kg->hits_stack;
Intersection *hits = kg->hits_stack;
# else
Intersection hits_stack[SHADOW_STACK_MAX_HITS];
Intersection *hits = hits_stack;
Intersection hits_stack[SHADOW_STACK_MAX_HITS];
Intersection *hits = hits_stack;
# endif
# ifndef __KERNEL_GPU__
/* Prefer to use stack but use dynamic allocation if too deep max hits
* we need max_hits + 1 storage space due to the logic in
* scene_intersect_shadow_all which will first store and then check if
* the limit is exceeded.
*
* Ignore this on GPU because of slow/unavailable malloc().
*/
if(max_hits + 1 > SHADOW_STACK_MAX_HITS) {
if(kg->transparent_shadow_intersections == NULL) {
const int transparent_max_bounce = kernel_data.integrator.transparent_max_bounce;
kg->transparent_shadow_intersections =
(Intersection*)malloc(sizeof(Intersection)*(transparent_max_bounce + 1));
}
hits = kg->transparent_shadow_intersections;
}
# endif /* __KERNEL_GPU__ */
/* Invoke actual traversal. */
return shadow_blocked_transparent_all_loop(kg,
sd,
shadow_sd,
state,
visibility,
ray,
hits,
max_hits,
shadow);
/* Prefer to use stack but use dynamic allocation if too deep max hits
* we need max_hits + 1 storage space due to the logic in
* scene_intersect_shadow_all which will first store and then check if
* the limit is exceeded.
*
* Ignore this on GPU because of slow/unavailable malloc().
*/
if (max_hits + 1 > SHADOW_STACK_MAX_HITS) {
if (kg->transparent_shadow_intersections == NULL) {
const int transparent_max_bounce = kernel_data.integrator.transparent_max_bounce;
kg->transparent_shadow_intersections = (Intersection *)malloc(sizeof(Intersection) *
(transparent_max_bounce + 1));
}
hits = kg->transparent_shadow_intersections;
}
# endif /* __KERNEL_GPU__ */
/* Invoke actual traversal. */
return shadow_blocked_transparent_all_loop(
kg, sd, shadow_sd, state, visibility, ray, hits, max_hits, shadow);
}
# endif /* __SHADOW_RECORD_ALL__ */
# endif /* __SHADOW_RECORD_ALL__ */
# if defined(__KERNEL_GPU__) || !defined(__SHADOW_RECORD_ALL__)
/* Shadow function to compute how much light is blocked,
@@ -323,130 +291,100 @@ ccl_device bool shadow_blocked_transparent_all(KernelGlobals *kg,
/* This function is only implementing device-independent traversal logic
* which requires some precalculation done.
*/
ccl_device bool shadow_blocked_transparent_stepped_loop(
KernelGlobals *kg,
ShaderData *sd,
ShaderData *shadow_sd,
ccl_addr_space PathState *state,
const uint visibility,
Ray *ray,
Intersection *isect,
const bool blocked,
const bool is_transparent_isect,
float3 *shadow)
ccl_device bool shadow_blocked_transparent_stepped_loop(KernelGlobals *kg,
ShaderData *sd,
ShaderData *shadow_sd,
ccl_addr_space PathState *state,
const uint visibility,
Ray *ray,
Intersection *isect,
const bool blocked,
const bool is_transparent_isect,
float3 *shadow)
{
# ifdef __VOLUME__
VolumeState volume_state;
VolumeState volume_state;
# endif
if(blocked && is_transparent_isect) {
float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
float3 Pend = ray->P + ray->D*ray->t;
int bounce = state->transparent_bounce;
if (blocked && is_transparent_isect) {
float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
float3 Pend = ray->P + ray->D * ray->t;
int bounce = state->transparent_bounce;
# ifdef __VOLUME__
# ifdef __SPLIT_KERNEL__
ccl_addr_space
ccl_addr_space
# endif
PathState *ps = shadow_blocked_volume_path_state(kg,
&volume_state,
state,
sd,
ray);
PathState *ps = shadow_blocked_volume_path_state(kg, &volume_state, state, sd, ray);
# endif
for(;;) {
if(bounce >= kernel_data.integrator.transparent_max_bounce) {
return true;
}
if(!scene_intersect(kg,
*ray,
visibility & PATH_RAY_SHADOW_TRANSPARENT,
isect,
NULL,
0.0f, 0.0f))
{
break;
}
if(!shader_transparent_shadow(kg, isect)) {
return true;
}
/* Attenuate the throughput. */
if(shadow_handle_transparent_isect(kg,
shadow_sd,
state,
#ifdef __VOLUME__
ps,
#endif
isect,
ray,
&throughput))
{
return true;
}
/* Move ray forward. */
ray->P = ray_offset(shadow_sd->P, -shadow_sd->Ng);
if(ray->t != FLT_MAX) {
ray->D = normalize_len(Pend - ray->P, &ray->t);
}
bounce++;
}
for (;;) {
if (bounce >= kernel_data.integrator.transparent_max_bounce) {
return true;
}
if (!scene_intersect(
kg, *ray, visibility & PATH_RAY_SHADOW_TRANSPARENT, isect, NULL, 0.0f, 0.0f)) {
break;
}
if (!shader_transparent_shadow(kg, isect)) {
return true;
}
/* Attenuate the throughput. */
if (shadow_handle_transparent_isect(kg,
shadow_sd,
state,
# ifdef __VOLUME__
/* Attenuation for last line segment towards light. */
if(ps->volume_stack[0].shader != SHADER_NONE) {
kernel_volume_shadow(kg, shadow_sd, ps, ray, &throughput);
}
ps,
# endif
*shadow *= throughput;
return is_zero(throughput);
}
isect,
ray,
&throughput)) {
return true;
}
/* Move ray forward. */
ray->P = ray_offset(shadow_sd->P, -shadow_sd->Ng);
if (ray->t != FLT_MAX) {
ray->D = normalize_len(Pend - ray->P, &ray->t);
}
bounce++;
}
# ifdef __VOLUME__
if(!blocked && state->volume_stack[0].shader != SHADER_NONE) {
/* Apply attenuation from current volume shader. */
/* Attenuation for last line segment towards light. */
if (ps->volume_stack[0].shader != SHADER_NONE) {
kernel_volume_shadow(kg, shadow_sd, ps, ray, &throughput);
}
# endif
*shadow *= throughput;
return is_zero(throughput);
}
# ifdef __VOLUME__
if (!blocked && state->volume_stack[0].shader != SHADER_NONE) {
/* Apply attenuation from current volume shader. */
# ifdef __SPLIT_KERNEL__
ccl_addr_space
ccl_addr_space
# endif
PathState *ps = shadow_blocked_volume_path_state(kg,
&volume_state,
state,
sd,
ray);
kernel_volume_shadow(kg, shadow_sd, ps, ray, shadow);
}
PathState *ps = shadow_blocked_volume_path_state(kg, &volume_state, state, sd, ray);
kernel_volume_shadow(kg, shadow_sd, ps, ray, shadow);
}
# endif
return blocked;
return blocked;
}
ccl_device bool shadow_blocked_transparent_stepped(
KernelGlobals *kg,
ShaderData *sd,
ShaderData *shadow_sd,
ccl_addr_space PathState *state,
const uint visibility,
Ray *ray,
Intersection *isect,
float3 *shadow)
ccl_device bool shadow_blocked_transparent_stepped(KernelGlobals *kg,
ShaderData *sd,
ShaderData *shadow_sd,
ccl_addr_space PathState *state,
const uint visibility,
Ray *ray,
Intersection *isect,
float3 *shadow)
{
bool blocked = scene_intersect(kg,
*ray,
visibility & PATH_RAY_SHADOW_OPAQUE,
isect,
NULL,
0.0f, 0.0f);
bool is_transparent_isect = blocked
? shader_transparent_shadow(kg, isect)
: false;
return shadow_blocked_transparent_stepped_loop(kg,
sd,
shadow_sd,
state,
visibility,
ray,
isect,
blocked,
is_transparent_isect,
shadow);
bool blocked = scene_intersect(
kg, *ray, visibility & PATH_RAY_SHADOW_OPAQUE, isect, NULL, 0.0f, 0.0f);
bool is_transparent_isect = blocked ? shader_transparent_shadow(kg, isect) : false;
return shadow_blocked_transparent_stepped_loop(
kg, sd, shadow_sd, state, visibility, ray, isect, blocked, is_transparent_isect, shadow);
}
# endif /* __KERNEL_GPU__ || !__SHADOW_RECORD_ALL__ */
#endif /* __TRANSPARENT_SHADOWS__ */
# endif /* __KERNEL_GPU__ || !__SHADOW_RECORD_ALL__ */
#endif /* __TRANSPARENT_SHADOWS__ */
ccl_device_inline bool shadow_blocked(KernelGlobals *kg,
ShaderData *sd,
@@ -455,100 +393,65 @@ ccl_device_inline bool shadow_blocked(KernelGlobals *kg,
Ray *ray_input,
float3 *shadow)
{
Ray *ray = ray_input;
Intersection isect;
/* Some common early checks. */
*shadow = make_float3(1.0f, 1.0f, 1.0f);
if(ray->t == 0.0f) {
return false;
}
Ray *ray = ray_input;
Intersection isect;
/* Some common early checks. */
*shadow = make_float3(1.0f, 1.0f, 1.0f);
if (ray->t == 0.0f) {
return false;
}
#ifdef __SHADOW_TRICKS__
const uint visibility = (state->flag & PATH_RAY_SHADOW_CATCHER)
? PATH_RAY_SHADOW_NON_CATCHER
: PATH_RAY_SHADOW;
const uint visibility = (state->flag & PATH_RAY_SHADOW_CATCHER) ? PATH_RAY_SHADOW_NON_CATCHER :
PATH_RAY_SHADOW;
#else
const uint visibility = PATH_RAY_SHADOW;
const uint visibility = PATH_RAY_SHADOW;
#endif
/* Do actual shadow shading. */
/* First of all, we check if integrator requires transparent shadows.
* if not, we use simplest and fastest ever way to calculate occlusion.
*/
/* Do actual shadow shading. */
/* First of all, we check if integrator requires transparent shadows.
* if not, we use simplest and fastest ever way to calculate occlusion.
*/
#ifdef __TRANSPARENT_SHADOWS__
if(!kernel_data.integrator.transparent_shadows)
if (!kernel_data.integrator.transparent_shadows)
#endif
{
return shadow_blocked_opaque(kg,
shadow_sd,
state,
visibility,
ray,
&isect,
shadow);
}
{
return shadow_blocked_opaque(kg, shadow_sd, state, visibility, ray, &isect, shadow);
}
#ifdef __TRANSPARENT_SHADOWS__
# ifdef __SHADOW_RECORD_ALL__
/* For the transparent shadows we try to use record-all logic on the
* devices which supports this.
*/
const int transparent_max_bounce = kernel_data.integrator.transparent_max_bounce;
/* Check transparent bounces here, for volume scatter which can do
* lighting before surface path termination is checked.
*/
if(state->transparent_bounce >= transparent_max_bounce) {
return true;
}
const uint max_hits = transparent_max_bounce - state->transparent_bounce - 1;
/* For the transparent shadows we try to use record-all logic on the
* devices which supports this.
*/
const int transparent_max_bounce = kernel_data.integrator.transparent_max_bounce;
/* Check transparent bounces here, for volume scatter which can do
* lighting before surface path termination is checked.
*/
if (state->transparent_bounce >= transparent_max_bounce) {
return true;
}
const uint max_hits = transparent_max_bounce - state->transparent_bounce - 1;
# ifdef __KERNEL_GPU__
/* On GPU we do trickey with tracing opaque ray first, this avoids speed
* regressions in some files.
*
* TODO(sergey): Check why using record-all behavior causes slowdown in such
* cases. Could that be caused by a higher spill pressure?
*/
const bool blocked = scene_intersect(kg,
*ray,
visibility & PATH_RAY_SHADOW_OPAQUE,
&isect,
NULL,
0.0f, 0.0f);
const bool is_transparent_isect = blocked
? shader_transparent_shadow(kg, &isect)
: false;
if(!blocked || !is_transparent_isect ||
max_hits + 1 >= SHADOW_STACK_MAX_HITS)
{
return shadow_blocked_transparent_stepped_loop(kg,
sd,
shadow_sd,
state,
visibility,
ray,
&isect,
blocked,
is_transparent_isect,
shadow);
}
# endif /* __KERNEL_GPU__ */
return shadow_blocked_transparent_all(kg,
sd,
shadow_sd,
state,
visibility,
ray,
max_hits,
shadow);
/* On GPU we do trickey with tracing opaque ray first, this avoids speed
* regressions in some files.
*
* TODO(sergey): Check why using record-all behavior causes slowdown in such
* cases. Could that be caused by a higher spill pressure?
*/
const bool blocked = scene_intersect(
kg, *ray, visibility & PATH_RAY_SHADOW_OPAQUE, &isect, NULL, 0.0f, 0.0f);
const bool is_transparent_isect = blocked ? shader_transparent_shadow(kg, &isect) : false;
if (!blocked || !is_transparent_isect || max_hits + 1 >= SHADOW_STACK_MAX_HITS) {
return shadow_blocked_transparent_stepped_loop(
kg, sd, shadow_sd, state, visibility, ray, &isect, blocked, is_transparent_isect, shadow);
}
# endif /* __KERNEL_GPU__ */
return shadow_blocked_transparent_all(
kg, sd, shadow_sd, state, visibility, ray, max_hits, shadow);
# else /* __SHADOW_RECORD_ALL__ */
/* Fallback to a slowest version which works on all devices. */
return shadow_blocked_transparent_stepped(kg,
sd,
shadow_sd,
state,
visibility,
ray,
&isect,
shadow);
# endif /* __SHADOW_RECORD_ALL__ */
#endif /* __TRANSPARENT_SHADOWS__ */
/* Fallback to a slowest version which works on all devices. */
return shadow_blocked_transparent_stepped(
kg, sd, shadow_sd, state, visibility, ray, &isect, shadow);
# endif /* __SHADOW_RECORD_ALL__ */
#endif /* __TRANSPARENT_SHADOWS__ */
}
#undef SHADOW_STACK_MAX_HITS