1
1

Cleanup: split surface/displacement/volume shader eval into separate files

This commit is contained in:
2022-09-02 15:32:46 +02:00
parent b865339833
commit aa174f632e
18 changed files with 1045 additions and 1006 deletions

View File

@@ -236,6 +236,7 @@ set(SRC_KERNEL_FILM_HEADERS
)
set(SRC_KERNEL_INTEGRATOR_HEADERS
integrator/displacement_shader.h
integrator/init_from_bake.h
integrator/init_from_camera.h
integrator/intersect_closest.h
@@ -247,7 +248,6 @@ set(SRC_KERNEL_INTEGRATOR_HEADERS
integrator/path_state.h
integrator/shade_background.h
integrator/shade_light.h
integrator/shader_eval.h
integrator/shade_shadow.h
integrator/shade_surface.h
integrator/shade_volume.h
@@ -260,6 +260,8 @@ set(SRC_KERNEL_INTEGRATOR_HEADERS
integrator/subsurface_disk.h
integrator/subsurface.h
integrator/subsurface_random_walk.h
integrator/surface_shader.h
integrator/volume_shader.h
integrator/volume_stack.h
)

View File

@@ -4,7 +4,8 @@
#pragma once
#include "kernel/camera/projection.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/displacement_shader.h"
#include "kernel/integrator/surface_shader.h"
#include "kernel/geom/geom.h"
@@ -25,7 +26,7 @@ ccl_device void kernel_displace_evaluate(KernelGlobals kg,
/* Evaluate displacement shader. */
const float3 P = sd.P;
shader_eval_displacement(kg, INTEGRATOR_STATE_NULL, &sd);
displacement_shader_eval(kg, INTEGRATOR_STATE_NULL, &sd);
float3 D = sd.P - P;
object_inverse_dir_transform(kg, &sd, &D);
@@ -64,10 +65,10 @@ ccl_device void kernel_background_evaluate(KernelGlobals kg,
/* Evaluate shader.
* This is being evaluated for all BSDFs, so path flag does not contain a specific type. */
const uint32_t path_flag = PATH_RAY_EMISSION;
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_LIGHT &
surface_shader_eval<KERNEL_FEATURE_NODE_MASK_SURFACE_LIGHT &
~(KERNEL_FEATURE_NODE_RAYTRACE | KERNEL_FEATURE_NODE_LIGHT_PATH)>(
kg, INTEGRATOR_STATE_NULL, &sd, NULL, path_flag);
Spectrum color = shader_background_eval(&sd);
Spectrum color = surface_shader_background(&sd);
#ifdef __KERNEL_DEBUG_NAN__
if (!isfinite_safe(color)) {
@@ -99,12 +100,12 @@ ccl_device void kernel_curve_shadow_transparency_evaluate(
shader_setup_from_curve(kg, &sd, in.object, in.prim, __float_as_int(in.v), in.u);
/* Evaluate transparency. */
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW &
surface_shader_eval<KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW &
~(KERNEL_FEATURE_NODE_RAYTRACE | KERNEL_FEATURE_NODE_LIGHT_PATH)>(
kg, INTEGRATOR_STATE_NULL, &sd, NULL, PATH_RAY_SHADOW);
/* Write output. */
output[offset] = clamp(average(shader_bsdf_transparency(kg, &sd)), 0.0f, 1.0f);
output[offset] = clamp(average(surface_shader_transparency(kg, &sd)), 0.0f, 1.0f);
}
CCL_NAMESPACE_END

View File

@@ -41,7 +41,7 @@ ccl_device_inline void film_write_data_passes(KernelGlobals kg,
if (!(path_flag & PATH_RAY_SINGLE_PASS_DONE)) {
if (!(sd->flag & SD_TRANSPARENT) || kernel_data.film.pass_alpha_threshold == 0.0f ||
average(shader_bsdf_alpha(kg, sd)) >= kernel_data.film.pass_alpha_threshold) {
average(surface_shader_alpha(kg, sd)) >= kernel_data.film.pass_alpha_threshold) {
if (INTEGRATOR_STATE(state, path, sample) == 0) {
if (flag & PASSMASK(DEPTH)) {
const float depth = camera_z_depth(kg, sd->P);
@@ -62,11 +62,11 @@ ccl_device_inline void film_write_data_passes(KernelGlobals kg,
}
if (flag & PASSMASK(NORMAL)) {
const float3 normal = shader_bsdf_average_normal(kg, sd);
const float3 normal = surface_shader_average_normal(kg, sd);
film_write_pass_float3(buffer + kernel_data.film.pass_normal, normal);
}
if (flag & PASSMASK(ROUGHNESS)) {
const float roughness = shader_bsdf_average_roughness(sd);
const float roughness = surface_shader_average_roughness(sd);
film_write_pass_float(buffer + kernel_data.film.pass_roughness, roughness);
}
if (flag & PASSMASK(UV)) {
@@ -86,7 +86,7 @@ ccl_device_inline void film_write_data_passes(KernelGlobals kg,
if (kernel_data.film.cryptomatte_passes) {
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
const float matte_weight = average(throughput) *
(1.0f - average(shader_bsdf_transparency(kg, sd)));
(1.0f - average(surface_shader_transparency(kg, sd)));
if (matte_weight > 0.0f) {
ccl_global float *cryptomatte_buffer = buffer + kernel_data.film.pass_cryptomatte;
if (kernel_data.film.cryptomatte_passes & CRYPT_OBJECT) {
@@ -95,7 +95,7 @@ ccl_device_inline void film_write_data_passes(KernelGlobals kg,
cryptomatte_buffer, kernel_data.film.cryptomatte_depth, id, matte_weight);
}
if (kernel_data.film.cryptomatte_passes & CRYPT_MATERIAL) {
const float id = shader_cryptomatte_id(kg, sd->shader);
const float id = kernel_data_fetch(shaders, (sd->shader & SHADER_MASK)).cryptomatte_id;
cryptomatte_buffer += film_write_cryptomatte_pass(
cryptomatte_buffer, kernel_data.film.cryptomatte_depth, id, matte_weight);
}
@@ -110,17 +110,17 @@ ccl_device_inline void film_write_data_passes(KernelGlobals kg,
if (flag & PASSMASK(DIFFUSE_COLOR)) {
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
film_write_pass_spectrum(buffer + kernel_data.film.pass_diffuse_color,
shader_bsdf_diffuse(kg, sd) * throughput);
surface_shader_diffuse(kg, sd) * throughput);
}
if (flag & PASSMASK(GLOSSY_COLOR)) {
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
film_write_pass_spectrum(buffer + kernel_data.film.pass_glossy_color,
shader_bsdf_glossy(kg, sd) * throughput);
surface_shader_glossy(kg, sd) * throughput);
}
if (flag & PASSMASK(TRANSMISSION_COLOR)) {
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
film_write_pass_spectrum(buffer + kernel_data.film.pass_transmission_color,
shader_bsdf_transmission(kg, sd) * throughput);
surface_shader_transmission(kg, sd) * throughput);
}
if (flag & PASSMASK(MIST)) {
/* Bring depth into 0..1 range. */
@@ -144,7 +144,7 @@ ccl_device_inline void film_write_data_passes(KernelGlobals kg,
/* Modulate by transparency */
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
const Spectrum alpha = shader_bsdf_alpha(kg, sd);
const Spectrum alpha = surface_shader_alpha(kg, sd);
const float mist_output = (1.0f - mist) * average(throughput * alpha);
/* Note that the final value in the render buffer we want is 1 - mist_output,

View File

@@ -0,0 +1,38 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2011-2022 Blender Foundation */
/* Functions to evaluate displacement shader. */
#pragma once
#include "kernel/svm/svm.h"
#ifdef __OSL__
# include "kernel/osl/shader.h"
#endif
CCL_NAMESPACE_BEGIN
template<typename ConstIntegratorGenericState>
ccl_device void displacement_shader_eval(KernelGlobals kg,
ConstIntegratorGenericState state,
ccl_private ShaderData *sd)
{
sd->num_closure = 0;
sd->num_closure_left = 0;
/* this will modify sd->P */
#ifdef __SVM__
# ifdef __OSL__
if (kg->osl)
OSLShader::eval_displacement(kg, state, sd);
else
# endif
{
svm_eval_nodes<KERNEL_FEATURE_NODE_MASK_DISPLACEMENT, SHADER_TYPE_DISPLACEMENT>(
kg, state, sd, NULL, 0);
}
#endif
}
CCL_NAMESPACE_END

View File

@@ -5,7 +5,6 @@
#include "kernel/bvh/bvh.h"
#include "kernel/geom/geom.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/volume_stack.h"
CCL_NAMESPACE_BEGIN

View File

@@ -807,7 +807,7 @@ ccl_device_forceinline bool mnee_path_contribution(KernelGlobals kg,
float3 wo = normalize_len(vertices[0].p - sd->P, &wo_len);
/* Initialize throughput and evaluate receiver bsdf * |n.wo|. */
shader_bsdf_eval(kg, sd, wo, false, throughput, ls->shader);
surface_shader_bsdf_eval(kg, sd, wo, false, throughput, ls->shader);
/* Update light sample with new position / direct.ion
* and keep pdf in vertex area measure */
@@ -913,7 +913,7 @@ ccl_device_forceinline bool mnee_path_contribution(KernelGlobals kg,
INTEGRATOR_STATE_WRITE(state, path, bounce) = bounce + 1 + vi;
/* Evaluate shader nodes at solution vi. */
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW>(
surface_shader_eval<KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW>(
kg, state, sd_mnee, NULL, PATH_RAY_DIFFUSE, true);
/* Set light looking dir. */
@@ -1006,7 +1006,7 @@ ccl_device_forceinline int kernel_path_mnee_sample(KernelGlobals kg,
return 0;
/* Last bool argument is the MNEE flag (for TINY_MAX_CLOSURE cap in kernel_shader.h). */
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW>(
surface_shader_eval<KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW>(
kg, state, sd_mnee, NULL, PATH_RAY_DIFFUSE, true);
/* Get and sample refraction bsdf */

View File

@@ -5,7 +5,7 @@
#include "kernel/film/light_passes.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/surface_shader.h"
#include "kernel/light/light.h"
#include "kernel/light/sample.h"
@@ -32,7 +32,7 @@ ccl_device Spectrum integrator_eval_background_shader(KernelGlobals kg,
/* Use fast constant background color if available. */
Spectrum L = zero_spectrum();
if (shader_constant_emission_eval(kg, shader, &L)) {
if (surface_shader_constant_emission(kg, shader, &L)) {
return L;
}
@@ -52,10 +52,10 @@ ccl_device Spectrum integrator_eval_background_shader(KernelGlobals kg,
PROFILING_SHADER(emission_sd->object, emission_sd->shader);
PROFILING_EVENT(PROFILING_SHADE_LIGHT_EVAL);
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_BACKGROUND>(
surface_shader_eval<KERNEL_FEATURE_NODE_MASK_SURFACE_BACKGROUND>(
kg, state, emission_sd, render_buffer, path_flag | PATH_RAY_EMISSION);
return shader_background_eval(emission_sd);
return surface_shader_background(emission_sd);
}
ccl_device_inline void integrate_background(KernelGlobals kg,

View File

@@ -4,7 +4,7 @@
#pragma once
#include "kernel/film/light_passes.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/surface_shader.h"
#include "kernel/light/light.h"
#include "kernel/light/sample.h"

View File

@@ -4,7 +4,7 @@
#pragma once
#include "kernel/integrator/shade_volume.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/surface_shader.h"
#include "kernel/integrator/volume_stack.h"
CCL_NAMESPACE_BEGIN
@@ -40,7 +40,7 @@ ccl_device_inline Spectrum integrate_transparent_surface_shadow(KernelGlobals kg
/* Evaluate shader. */
if (!(shadow_sd->flag & SD_HAS_ONLY_VOLUME)) {
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW>(
surface_shader_eval<KERNEL_FEATURE_NODE_MASK_SURFACE_SHADOW>(
kg, state, shadow_sd, NULL, PATH_RAY_SHADOW);
}
@@ -50,7 +50,7 @@ ccl_device_inline Spectrum integrate_transparent_surface_shadow(KernelGlobals kg
# endif
/* Compute transparency from closures. */
return shader_bsdf_transparency(kg, shadow_sd);
return surface_shader_transparency(kg, shadow_sd);
}
# ifdef __VOLUME__

View File

@@ -10,8 +10,8 @@
#include "kernel/integrator/mnee.h"
#include "kernel/integrator/path_state.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/subsurface.h"
#include "kernel/integrator/surface_shader.h"
#include "kernel/integrator/volume_stack.h"
#include "kernel/light/light.h"
@@ -88,7 +88,7 @@ ccl_device_forceinline bool integrate_surface_holdout(KernelGlobals kg,
if (((sd->flag & SD_HOLDOUT) || (sd->object_flag & SD_OBJECT_HOLDOUT_MASK)) &&
(path_flag & PATH_RAY_TRANSPARENT_BACKGROUND)) {
const Spectrum holdout_weight = shader_holdout_apply(kg, sd);
const Spectrum holdout_weight = surface_shader_apply_holdout(kg, sd);
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput);
const float transparent = average(holdout_weight * throughput);
film_write_holdout(kg, state, path_flag, transparent, render_buffer);
@@ -109,7 +109,7 @@ ccl_device_forceinline void integrate_surface_emission(KernelGlobals kg,
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
/* Evaluate emissive closure. */
Spectrum L = shader_emissive_eval(sd);
Spectrum L = surface_shader_emission(sd);
float mis_weight = 1.0f;
#ifdef __HAIR__
@@ -171,7 +171,7 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
Ray ray ccl_optional_struct_init;
BsdfEval bsdf_eval ccl_optional_struct_init;
const bool is_transmission = shader_bsdf_is_transmission(sd, ls.D);
const bool is_transmission = surface_shader_is_transmission(sd, ls.D);
#ifdef __MNEE__
int mnee_vertex_count = 0;
@@ -207,7 +207,8 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
}
/* Evaluate BSDF. */
const float bsdf_pdf = shader_bsdf_eval(kg, sd, ls.D, is_transmission, &bsdf_eval, ls.shader);
const float bsdf_pdf = surface_shader_bsdf_eval(
kg, sd, ls.D, is_transmission, &bsdf_eval, ls.shader);
bsdf_eval_mul(&bsdf_eval, light_eval / ls.pdf);
if (ls.shader & SHADER_USE_MIS) {
@@ -341,7 +342,7 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce(
}
float2 rand_bsdf = path_state_rng_2D(kg, rng_state, PRNG_SURFACE_BSDF);
ccl_private const ShaderClosure *sc = shader_bsdf_bssrdf_pick(sd, &rand_bsdf);
ccl_private const ShaderClosure *sc = surface_shader_bsdf_bssrdf_pick(sd, &rand_bsdf);
#ifdef __SUBSURFACE__
/* BSSRDF closure, we schedule subsurface intersection kernel. */
@@ -356,7 +357,8 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce(
float3 bsdf_omega_in ccl_optional_struct_init;
int label;
label = shader_bsdf_sample_closure(kg, sd, sc, rand_bsdf, &bsdf_eval, &bsdf_omega_in, &bsdf_pdf);
label = surface_shader_bsdf_sample_closure(
kg, sd, sc, rand_bsdf, &bsdf_eval, &bsdf_omega_in, &bsdf_pdf);
if (bsdf_pdf == 0.0f || bsdf_eval_is_zero(&bsdf_eval)) {
return LABEL_NONE;
@@ -450,7 +452,7 @@ ccl_device_forceinline void integrate_surface_ao(KernelGlobals kg,
const float2 rand_bsdf = path_state_rng_2D(kg, rng_state, PRNG_SURFACE_BSDF);
float3 ao_N;
const Spectrum ao_weight = shader_bsdf_ao(
const Spectrum ao_weight = surface_shader_ao(
kg, sd, kernel_data.integrator.ao_additive_factor, &ao_N);
float3 ao_D;
@@ -494,7 +496,7 @@ ccl_device_forceinline void integrate_surface_ao(KernelGlobals kg,
const uint16_t transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce);
uint32_t shadow_flag = INTEGRATOR_STATE(state, path, flag) | PATH_RAY_SHADOW_FOR_AO;
const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput) *
shader_bsdf_alpha(kg, sd);
surface_shader_alpha(kg, sd);
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, render_pixel_index) = INTEGRATOR_STATE(
state, path, render_pixel_index);
@@ -543,7 +545,7 @@ ccl_device bool integrate_surface(KernelGlobals kg,
{
/* Evaluate shader. */
PROFILING_EVENT(PROFILING_SHADE_SURFACE_EVAL);
shader_eval_surface<node_feature_mask>(kg, state, &sd, render_buffer, path_flag);
surface_shader_eval<node_feature_mask>(kg, state, &sd, render_buffer, path_flag);
/* Initialize additional RNG for BSDFs. */
if (sd.flag & SD_BSDF_NEEDS_LCG) {
@@ -565,7 +567,7 @@ ccl_device bool integrate_surface(KernelGlobals kg,
#endif
{
/* Filter closures. */
shader_prepare_surface_closures(kg, state, &sd, path_flag);
surface_shader_prepare_closures(kg, state, &sd, path_flag);
/* Evaluate holdout. */
if (!integrate_surface_holdout(kg, state, &sd, render_buffer)) {

View File

@@ -9,7 +9,7 @@
#include "kernel/integrator/intersect_closest.h"
#include "kernel/integrator/path_state.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/volume_shader.h"
#include "kernel/integrator/volume_stack.h"
#include "kernel/light/light.h"
@@ -65,7 +65,7 @@ ccl_device_inline bool shadow_volume_shader_sample(KernelGlobals kg,
ccl_private Spectrum *ccl_restrict extinction)
{
VOLUME_READ_LAMBDA(integrator_state_read_shadow_volume_stack(state, i))
shader_eval_volume<true>(kg, state, sd, PATH_RAY_SHADOW, volume_read_lambda_pass);
volume_shader_eval<true>(kg, state, sd, PATH_RAY_SHADOW, volume_read_lambda_pass);
if (!(sd->flag & SD_EXTINCTION)) {
return false;
@@ -84,7 +84,7 @@ ccl_device_inline bool volume_shader_sample(KernelGlobals kg,
{
const uint32_t path_flag = INTEGRATOR_STATE(state, path, flag);
VOLUME_READ_LAMBDA(integrator_state_read_volume_stack(state, i))
shader_eval_volume<false>(kg, state, sd, path_flag, volume_read_lambda_pass);
volume_shader_eval<false>(kg, state, sd, path_flag, volume_read_lambda_pass);
if (!(sd->flag & (SD_EXTINCTION | SD_SCATTER | SD_EMISSION))) {
return false;
@@ -443,7 +443,7 @@ ccl_device_forceinline void volume_integrate_step_scattering(
result.direct_scatter = true;
result.direct_throughput *= coeff.sigma_s * new_transmittance / vstate.equiangular_pdf;
shader_copy_volume_phases(&result.direct_phases, sd);
volume_shader_copy_phases(&result.direct_phases, sd);
/* Multiple importance sampling. */
if (vstate.use_mis) {
@@ -479,7 +479,7 @@ ccl_device_forceinline void volume_integrate_step_scattering(
result.indirect_scatter = true;
result.indirect_t = new_t;
result.indirect_throughput *= coeff.sigma_s * new_transmittance / distance_pdf;
shader_copy_volume_phases(&result.indirect_phases, sd);
volume_shader_copy_phases(&result.indirect_phases, sd);
if (vstate.direct_sample_method != VOLUME_SAMPLE_EQUIANGULAR) {
/* If using distance sampling for direct light, just copy parameters
@@ -487,7 +487,7 @@ ccl_device_forceinline void volume_integrate_step_scattering(
result.direct_scatter = true;
result.direct_t = result.indirect_t;
result.direct_throughput = result.indirect_throughput;
shader_copy_volume_phases(&result.direct_phases, sd);
volume_shader_copy_phases(&result.direct_phases, sd);
/* Multiple importance sampling. */
if (vstate.use_mis) {
@@ -761,7 +761,7 @@ ccl_device_forceinline void integrate_volume_direct_light(
/* Evaluate BSDF. */
BsdfEval phase_eval ccl_optional_struct_init;
const float phase_pdf = shader_volume_phase_eval(kg, sd, phases, ls->D, &phase_eval);
const float phase_pdf = volume_shader_phase_eval(kg, sd, phases, ls->D, &phase_eval);
if (ls->shader & SHADER_USE_MIS) {
float mis_weight = light_sample_mis_weight_nee(kg, ls->pdf, phase_pdf);
@@ -868,7 +868,7 @@ ccl_device_forceinline bool integrate_volume_phase_scatter(
BsdfEval phase_eval ccl_optional_struct_init;
float3 phase_omega_in ccl_optional_struct_init;
const int label = shader_volume_phase_sample(
const int label = volume_shader_phase_sample(
kg, sd, phases, rand_phase, &phase_eval, &phase_omega_in, &phase_pdf);
if (phase_pdf == 0.0f || bsdf_eval_is_zero(&phase_eval)) {

View File

@@ -1,947 +0,0 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2011-2022 Blender Foundation */
/* Functions to evaluate shaders and use the resulting shader closures. */
#pragma once
#include "kernel/closure/alloc.h"
#include "kernel/closure/bsdf.h"
#include "kernel/closure/bsdf_util.h"
#include "kernel/closure/emissive.h"
#include "kernel/film/light_passes.h"
#include "kernel/svm/svm.h"
#ifdef __OSL__
# include "kernel/osl/shader.h"
#endif
CCL_NAMESPACE_BEGIN
/* Merging */
#if defined(__VOLUME__)
ccl_device_inline void shader_merge_volume_closures(ccl_private ShaderData *sd)
{
/* Merge identical closures to save closure space with stacked volumes. */
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sci = &sd->closure[i];
if (sci->type != CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID) {
continue;
}
for (int j = i + 1; j < sd->num_closure; j++) {
ccl_private ShaderClosure *scj = &sd->closure[j];
if (sci->type != scj->type) {
continue;
}
ccl_private const HenyeyGreensteinVolume *hgi = (ccl_private const HenyeyGreensteinVolume *)
sci;
ccl_private const HenyeyGreensteinVolume *hgj = (ccl_private const HenyeyGreensteinVolume *)
scj;
if (!(hgi->g == hgj->g)) {
continue;
}
sci->weight += scj->weight;
sci->sample_weight += scj->sample_weight;
int size = sd->num_closure - (j + 1);
if (size > 0) {
for (int k = 0; k < size; k++) {
scj[k] = scj[k + 1];
}
}
sd->num_closure--;
kernel_assert(sd->num_closure >= 0);
j--;
}
}
}
ccl_device_inline void shader_copy_volume_phases(ccl_private ShaderVolumePhases *ccl_restrict
phases,
ccl_private const ShaderData *ccl_restrict sd)
{
phases->num_closure = 0;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *from_sc = &sd->closure[i];
ccl_private const HenyeyGreensteinVolume *from_hg =
(ccl_private const HenyeyGreensteinVolume *)from_sc;
if (from_sc->type == CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID) {
ccl_private ShaderVolumeClosure *to_sc = &phases->closure[phases->num_closure];
to_sc->weight = from_sc->weight;
to_sc->sample_weight = from_sc->sample_weight;
to_sc->g = from_hg->g;
phases->num_closure++;
if (phases->num_closure >= MAX_VOLUME_CLOSURE) {
break;
}
}
}
}
#endif /* __VOLUME__ */
ccl_device_inline void shader_prepare_surface_closures(KernelGlobals kg,
ConstIntegratorState state,
ccl_private ShaderData *sd,
const uint32_t path_flag)
{
/* Filter out closures. */
if (kernel_data.integrator.filter_closures) {
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_EMISSION) {
sd->closure_emission_background = zero_spectrum();
}
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_DIRECT_LIGHT) {
sd->flag &= ~SD_BSDF_HAS_EVAL;
}
if (path_flag & PATH_RAY_CAMERA) {
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if ((CLOSURE_IS_BSDF_DIFFUSE(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_DIFFUSE)) ||
(CLOSURE_IS_BSDF_GLOSSY(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_GLOSSY)) ||
(CLOSURE_IS_BSDF_TRANSMISSION(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSMISSION))) {
sc->type = CLOSURE_NONE_ID;
sc->sample_weight = 0.0f;
}
else if ((CLOSURE_IS_BSDF_TRANSPARENT(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSPARENT))) {
sc->type = CLOSURE_HOLDOUT_ID;
sc->sample_weight = 0.0f;
sd->flag |= SD_HOLDOUT;
}
}
}
}
/* Defensive sampling.
*
* We can likely also do defensive sampling at deeper bounces, particularly
* for cases like a perfect mirror but possibly also others. This will need
* a good heuristic. */
if (INTEGRATOR_STATE(state, path, bounce) + INTEGRATOR_STATE(state, path, transparent_bounce) ==
0 &&
sd->num_closure > 1) {
float sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
sum += sc->sample_weight;
}
}
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
sc->sample_weight = max(sc->sample_weight, 0.125f * sum);
}
}
}
/* Filter glossy.
*
* Blurring of bsdf after bounces, for rays that have a small likelihood
* of following this particular path (diffuse, rough glossy) */
if (kernel_data.integrator.filter_glossy != FLT_MAX
#ifdef __MNEE__
&& !(INTEGRATOR_STATE(state, path, mnee) & PATH_MNEE_VALID)
#endif
) {
float blur_pdf = kernel_data.integrator.filter_glossy *
INTEGRATOR_STATE(state, path, min_ray_pdf);
if (blur_pdf < 1.0f) {
float blur_roughness = sqrtf(1.0f - blur_pdf) * 0.5f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF(sc->type)) {
bsdf_blur(kg, sc, blur_roughness);
}
}
}
}
}
/* BSDF */
ccl_device_inline bool shader_bsdf_is_transmission(ccl_private const ShaderData *sd,
const float3 omega_in)
{
return dot(sd->N, omega_in) < 0.0f;
}
ccl_device_forceinline bool _shader_bsdf_exclude(ClosureType type, uint light_shader_flags)
{
if (!(light_shader_flags & SHADER_EXCLUDE_ANY)) {
return false;
}
if (light_shader_flags & SHADER_EXCLUDE_DIFFUSE) {
if (CLOSURE_IS_BSDF_DIFFUSE(type)) {
return true;
}
}
if (light_shader_flags & SHADER_EXCLUDE_GLOSSY) {
if (CLOSURE_IS_BSDF_GLOSSY(type)) {
return true;
}
}
if (light_shader_flags & SHADER_EXCLUDE_TRANSMIT) {
if (CLOSURE_IS_BSDF_TRANSMISSION(type)) {
return true;
}
}
return false;
}
ccl_device_inline float _shader_bsdf_multi_eval(KernelGlobals kg,
ccl_private ShaderData *sd,
const float3 omega_in,
const bool is_transmission,
ccl_private const ShaderClosure *skip_sc,
ccl_private BsdfEval *result_eval,
float sum_pdf,
float sum_sample_weight,
const uint light_shader_flags)
{
/* This is the veach one-sample model with balance heuristic,
* some PDF factors drop out when using balance heuristic weighting. */
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (sc == skip_sc) {
continue;
}
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
if (CLOSURE_IS_BSDF(sc->type) && !_shader_bsdf_exclude(sc->type, light_shader_flags)) {
float bsdf_pdf = 0.0f;
Spectrum eval = bsdf_eval(kg, sd, sc, omega_in, is_transmission, &bsdf_pdf);
if (bsdf_pdf != 0.0f) {
bsdf_eval_accum(result_eval, sc->type, eval * sc->weight);
sum_pdf += bsdf_pdf * sc->sample_weight;
}
}
sum_sample_weight += sc->sample_weight;
}
}
return (sum_sample_weight > 0.0f) ? sum_pdf / sum_sample_weight : 0.0f;
}
#ifndef __KERNEL_CUDA__
ccl_device
#else
ccl_device_inline
#endif
float
shader_bsdf_eval(KernelGlobals kg,
ccl_private ShaderData *sd,
const float3 omega_in,
const bool is_transmission,
ccl_private BsdfEval *bsdf_eval,
const uint light_shader_flags)
{
bsdf_eval_init(bsdf_eval, CLOSURE_NONE_ID, zero_spectrum());
return _shader_bsdf_multi_eval(
kg, sd, omega_in, is_transmission, NULL, bsdf_eval, 0.0f, 0.0f, light_shader_flags);
}
/* Randomly sample a BSSRDF or BSDF proportional to ShaderClosure.sample_weight. */
ccl_device_inline ccl_private const ShaderClosure *shader_bsdf_bssrdf_pick(
ccl_private const ShaderData *ccl_restrict sd, ccl_private float2 *rand_bsdf)
{
int sampled = 0;
if (sd->num_closure > 1) {
/* Pick a BSDF or based on sample weights. */
float sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
sum += sc->sample_weight;
}
}
float r = (*rand_bsdf).x * sum;
float partial_sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
float next_sum = partial_sum + sc->sample_weight;
if (r < next_sum) {
sampled = i;
/* Rescale to reuse for direction sample, to better preserve stratification. */
(*rand_bsdf).x = (r - partial_sum) / sc->sample_weight;
break;
}
partial_sum = next_sum;
}
}
}
return &sd->closure[sampled];
}
/* Return weight for picked BSSRDF. */
ccl_device_inline Spectrum
shader_bssrdf_sample_weight(ccl_private const ShaderData *ccl_restrict sd,
ccl_private const ShaderClosure *ccl_restrict bssrdf_sc)
{
Spectrum weight = bssrdf_sc->weight;
if (sd->num_closure > 1) {
float sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
sum += sc->sample_weight;
}
}
weight *= sum / bssrdf_sc->sample_weight;
}
return weight;
}
/* Sample direction for picked BSDF, and return evaluation and pdf for all
* BSDFs combined using MIS. */
ccl_device int shader_bsdf_sample_closure(KernelGlobals kg,
ccl_private ShaderData *sd,
ccl_private const ShaderClosure *sc,
const float2 rand_bsdf,
ccl_private BsdfEval *bsdf_eval,
ccl_private float3 *omega_in,
ccl_private float *pdf)
{
/* BSSRDF should already have been handled elsewhere. */
kernel_assert(CLOSURE_IS_BSDF(sc->type));
int label;
Spectrum eval = zero_spectrum();
*pdf = 0.0f;
label = bsdf_sample(kg, sd, sc, rand_bsdf.x, rand_bsdf.y, &eval, omega_in, pdf);
if (*pdf != 0.0f) {
bsdf_eval_init(bsdf_eval, sc->type, eval * sc->weight);
if (sd->num_closure > 1) {
const bool is_transmission = shader_bsdf_is_transmission(sd, *omega_in);
float sweight = sc->sample_weight;
*pdf = _shader_bsdf_multi_eval(
kg, sd, *omega_in, is_transmission, sc, bsdf_eval, *pdf * sweight, sweight, 0);
}
}
return label;
}
ccl_device float shader_bsdf_average_roughness(ccl_private const ShaderData *sd)
{
float roughness = 0.0f;
float sum_weight = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF(sc->type)) {
/* sqrt once to undo the squaring from multiplying roughness on the
* two axes, and once for the squared roughness convention. */
float weight = fabsf(average(sc->weight));
roughness += weight * sqrtf(safe_sqrtf(bsdf_get_roughness_squared(sc)));
sum_weight += weight;
}
}
return (sum_weight > 0.0f) ? roughness / sum_weight : 0.0f;
}
ccl_device Spectrum shader_bsdf_transparency(KernelGlobals kg, ccl_private const ShaderData *sd)
{
if (sd->flag & SD_HAS_ONLY_VOLUME) {
return one_spectrum();
}
else if (sd->flag & SD_TRANSPARENT) {
return sd->closure_transparent_extinction;
}
else {
return zero_spectrum();
}
}
ccl_device void shader_bsdf_disable_transparency(KernelGlobals kg, ccl_private ShaderData *sd)
{
if (sd->flag & SD_TRANSPARENT) {
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (sc->type == CLOSURE_BSDF_TRANSPARENT_ID) {
sc->sample_weight = 0.0f;
sc->weight = zero_spectrum();
}
}
sd->flag &= ~SD_TRANSPARENT;
}
}
ccl_device Spectrum shader_bsdf_alpha(KernelGlobals kg, ccl_private const ShaderData *sd)
{
Spectrum alpha = one_spectrum() - shader_bsdf_transparency(kg, sd);
alpha = saturate(alpha);
return alpha;
}
ccl_device Spectrum shader_bsdf_diffuse(KernelGlobals kg, ccl_private const ShaderData *sd)
{
Spectrum eval = zero_spectrum();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_DIFFUSE(sc->type) || CLOSURE_IS_BSSRDF(sc->type))
eval += sc->weight;
}
return eval;
}
ccl_device Spectrum shader_bsdf_glossy(KernelGlobals kg, ccl_private const ShaderData *sd)
{
Spectrum eval = zero_spectrum();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_GLOSSY(sc->type))
eval += sc->weight;
}
return eval;
}
ccl_device Spectrum shader_bsdf_transmission(KernelGlobals kg, ccl_private const ShaderData *sd)
{
Spectrum eval = zero_spectrum();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_TRANSMISSION(sc->type))
eval += sc->weight;
}
return eval;
}
ccl_device float3 shader_bsdf_average_normal(KernelGlobals kg, ccl_private const ShaderData *sd)
{
float3 N = zero_float3();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type))
N += sc->N * fabsf(average(sc->weight));
}
return (is_zero(N)) ? sd->N : normalize(N);
}
ccl_device Spectrum shader_bsdf_ao(KernelGlobals kg,
ccl_private const ShaderData *sd,
const float ao_factor,
ccl_private float3 *N_)
{
Spectrum eval = zero_spectrum();
float3 N = zero_float3();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) {
ccl_private const DiffuseBsdf *bsdf = (ccl_private const DiffuseBsdf *)sc;
eval += sc->weight * ao_factor;
N += bsdf->N * fabsf(average(sc->weight));
}
}
*N_ = (is_zero(N)) ? sd->N : normalize(N);
return eval;
}
#ifdef __SUBSURFACE__
ccl_device float3 shader_bssrdf_normal(ccl_private const ShaderData *sd)
{
float3 N = zero_float3();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSSRDF(sc->type)) {
ccl_private const Bssrdf *bssrdf = (ccl_private const Bssrdf *)sc;
float avg_weight = fabsf(average(sc->weight));
N += bssrdf->N * avg_weight;
}
}
return (is_zero(N)) ? sd->N : normalize(N);
}
#endif /* __SUBSURFACE__ */
/* Constant emission optimization */
ccl_device bool shader_constant_emission_eval(KernelGlobals kg,
int shader,
ccl_private Spectrum *eval)
{
int shader_index = shader & SHADER_MASK;
int shader_flag = kernel_data_fetch(shaders, shader_index).flags;
if (shader_flag & SD_HAS_CONSTANT_EMISSION) {
const float3 emission_rgb = make_float3(
kernel_data_fetch(shaders, shader_index).constant_emission[0],
kernel_data_fetch(shaders, shader_index).constant_emission[1],
kernel_data_fetch(shaders, shader_index).constant_emission[2]);
*eval = rgb_to_spectrum(emission_rgb);
return true;
}
return false;
}
/* Background */
ccl_device Spectrum shader_background_eval(ccl_private const ShaderData *sd)
{
if (sd->flag & SD_EMISSION) {
return sd->closure_emission_background;
}
else {
return zero_spectrum();
}
}
/* Emission */
ccl_device Spectrum shader_emissive_eval(ccl_private const ShaderData *sd)
{
if (sd->flag & SD_EMISSION) {
return emissive_simple_eval(sd->Ng, sd->I) * sd->closure_emission_background;
}
else {
return zero_spectrum();
}
}
/* Holdout */
ccl_device Spectrum shader_holdout_apply(KernelGlobals kg, ccl_private ShaderData *sd)
{
Spectrum weight = zero_spectrum();
/* For objects marked as holdout, preserve transparency and remove all other
* closures, replacing them with a holdout weight. */
if (sd->object_flag & SD_OBJECT_HOLDOUT_MASK) {
if ((sd->flag & SD_TRANSPARENT) && !(sd->flag & SD_HAS_ONLY_VOLUME)) {
weight = one_spectrum() - sd->closure_transparent_extinction;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (!CLOSURE_IS_BSDF_TRANSPARENT(sc->type)) {
sc->type = NBUILTIN_CLOSURES;
}
}
sd->flag &= ~(SD_CLOSURE_FLAGS - (SD_TRANSPARENT | SD_BSDF));
}
else {
weight = one_spectrum();
}
}
else {
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_HOLDOUT(sc->type)) {
weight += sc->weight;
}
}
}
return weight;
}
/* Surface Evaluation */
template<uint node_feature_mask, typename ConstIntegratorGenericState>
ccl_device void shader_eval_surface(KernelGlobals kg,
ConstIntegratorGenericState state,
ccl_private ShaderData *ccl_restrict sd,
ccl_global float *ccl_restrict buffer,
uint32_t path_flag,
bool use_caustics_storage = false)
{
/* If path is being terminated, we are tracing a shadow ray or evaluating
* emission, then we don't need to store closures. The emission and shadow
* shader data also do not have a closure array to save GPU memory. */
int max_closures;
if (path_flag & (PATH_RAY_TERMINATE | PATH_RAY_SHADOW | PATH_RAY_EMISSION)) {
max_closures = 0;
}
else {
max_closures = use_caustics_storage ? CAUSTICS_MAX_CLOSURE : kernel_data.max_closures;
}
sd->num_closure = 0;
sd->num_closure_left = max_closures;
#ifdef __OSL__
if (kg->osl) {
if (sd->object == OBJECT_NONE && sd->lamp == LAMP_NONE) {
OSLShader::eval_background(kg, state, sd, path_flag);
}
else {
OSLShader::eval_surface(kg, state, sd, path_flag);
}
}
else
#endif
{
#ifdef __SVM__
svm_eval_nodes<node_feature_mask, SHADER_TYPE_SURFACE>(kg, state, sd, buffer, path_flag);
#else
if (sd->object == OBJECT_NONE) {
sd->closure_emission_background = make_spectrum(0.8f);
sd->flag |= SD_EMISSION;
}
else {
ccl_private DiffuseBsdf *bsdf = (ccl_private DiffuseBsdf *)bsdf_alloc(
sd, sizeof(DiffuseBsdf), make_spectrum(0.8f));
if (bsdf != NULL) {
bsdf->N = sd->N;
sd->flag |= bsdf_diffuse_setup(bsdf);
}
}
#endif
}
}
/* Volume */
#ifdef __VOLUME__
ccl_device_inline float _shader_volume_phase_multi_eval(
ccl_private const ShaderData *sd,
ccl_private const ShaderVolumePhases *phases,
const float3 omega_in,
int skip_phase,
ccl_private BsdfEval *result_eval,
float sum_pdf,
float sum_sample_weight)
{
for (int i = 0; i < phases->num_closure; i++) {
if (i == skip_phase)
continue;
ccl_private const ShaderVolumeClosure *svc = &phases->closure[i];
float phase_pdf = 0.0f;
Spectrum eval = volume_phase_eval(sd, svc, omega_in, &phase_pdf);
if (phase_pdf != 0.0f) {
bsdf_eval_accum(result_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
sum_pdf += phase_pdf * svc->sample_weight;
}
sum_sample_weight += svc->sample_weight;
}
return (sum_sample_weight > 0.0f) ? sum_pdf / sum_sample_weight : 0.0f;
}
ccl_device float shader_volume_phase_eval(KernelGlobals kg,
ccl_private const ShaderData *sd,
ccl_private const ShaderVolumePhases *phases,
const float3 omega_in,
ccl_private BsdfEval *phase_eval)
{
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, zero_spectrum());
return _shader_volume_phase_multi_eval(sd, phases, omega_in, -1, phase_eval, 0.0f, 0.0f);
}
ccl_device int shader_volume_phase_sample(KernelGlobals kg,
ccl_private const ShaderData *sd,
ccl_private const ShaderVolumePhases *phases,
float2 rand_phase,
ccl_private BsdfEval *phase_eval,
ccl_private float3 *omega_in,
ccl_private float *pdf)
{
int sampled = 0;
if (phases->num_closure > 1) {
/* pick a phase closure based on sample weights */
float sum = 0.0f;
for (sampled = 0; sampled < phases->num_closure; sampled++) {
ccl_private const ShaderVolumeClosure *svc = &phases->closure[sampled];
sum += svc->sample_weight;
}
float r = rand_phase.x * sum;
float partial_sum = 0.0f;
for (sampled = 0; sampled < phases->num_closure; sampled++) {
ccl_private const ShaderVolumeClosure *svc = &phases->closure[sampled];
float next_sum = partial_sum + svc->sample_weight;
if (r <= next_sum) {
/* Rescale to reuse for BSDF direction sample. */
rand_phase.x = (r - partial_sum) / svc->sample_weight;
break;
}
partial_sum = next_sum;
}
if (sampled == phases->num_closure) {
*pdf = 0.0f;
return LABEL_NONE;
}
}
/* todo: this isn't quite correct, we don't weight anisotropy properly
* depending on color channels, even if this is perhaps not a common case */
ccl_private const ShaderVolumeClosure *svc = &phases->closure[sampled];
int label;
Spectrum eval = zero_spectrum();
*pdf = 0.0f;
label = volume_phase_sample(sd, svc, rand_phase.x, rand_phase.y, &eval, omega_in, pdf);
if (*pdf != 0.0f) {
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
}
return label;
}
ccl_device int shader_phase_sample_closure(KernelGlobals kg,
ccl_private const ShaderData *sd,
ccl_private const ShaderVolumeClosure *sc,
const float2 rand_phase,
ccl_private BsdfEval *phase_eval,
ccl_private float3 *omega_in,
ccl_private float *pdf)
{
int label;
Spectrum eval = zero_spectrum();
*pdf = 0.0f;
label = volume_phase_sample(sd, sc, rand_phase.x, rand_phase.y, &eval, omega_in, pdf);
if (*pdf != 0.0f)
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
return label;
}
/* Volume Evaluation */
template<const bool shadow, typename StackReadOp, typename ConstIntegratorGenericState>
ccl_device_inline void shader_eval_volume(KernelGlobals kg,
ConstIntegratorGenericState state,
ccl_private ShaderData *ccl_restrict sd,
const uint32_t path_flag,
StackReadOp stack_read)
{
/* If path is being terminated, we are tracing a shadow ray or evaluating
* emission, then we don't need to store closures. The emission and shadow
* shader data also do not have a closure array to save GPU memory. */
int max_closures;
if (path_flag & (PATH_RAY_TERMINATE | PATH_RAY_SHADOW | PATH_RAY_EMISSION)) {
max_closures = 0;
}
else {
max_closures = kernel_data.max_closures;
}
/* reset closures once at the start, we will be accumulating the closures
* for all volumes in the stack into a single array of closures */
sd->num_closure = 0;
sd->num_closure_left = max_closures;
sd->flag = 0;
sd->object_flag = 0;
for (int i = 0;; i++) {
const VolumeStack entry = stack_read(i);
if (entry.shader == SHADER_NONE) {
break;
}
/* Setup shader-data from stack. it's mostly setup already in
* shader_setup_from_volume, this switching should be quick. */
sd->object = entry.object;
sd->lamp = LAMP_NONE;
sd->shader = entry.shader;
sd->flag &= ~SD_SHADER_FLAGS;
sd->flag |= kernel_data_fetch(shaders, (sd->shader & SHADER_MASK)).flags;
sd->object_flag &= ~SD_OBJECT_FLAGS;
if (sd->object != OBJECT_NONE) {
sd->object_flag |= kernel_data_fetch(object_flag, sd->object);
# ifdef __OBJECT_MOTION__
/* todo: this is inefficient for motion blur, we should be
* caching matrices instead of recomputing them each step */
shader_setup_object_transforms(kg, sd, sd->time);
if ((sd->object_flag & SD_OBJECT_HAS_VOLUME_MOTION) != 0) {
AttributeDescriptor v_desc = find_attribute(kg, sd, ATTR_STD_VOLUME_VELOCITY);
kernel_assert(v_desc.offset != ATTR_STD_NOT_FOUND);
const float3 P = sd->P;
const float velocity_scale = kernel_data_fetch(objects, sd->object).velocity_scale;
const float time_offset = kernel_data.cam.motion_position == MOTION_POSITION_CENTER ?
0.5f :
0.0f;
const float time = kernel_data.cam.motion_position == MOTION_POSITION_END ?
(1.0f - kernel_data.cam.shuttertime) + sd->time :
sd->time;
/* Use a 1st order semi-lagrangian advection scheme to estimate what volume quantity
* existed, or will exist, at the given time:
*
* `phi(x, T) = phi(x - (T - t) * u(x, T), t)`
*
* where
*
* x : position
* T : super-sampled time (or ray time)
* t : current time of the simulation (in rendering we assume this is center frame with
* relative time = 0)
* phi : the volume quantity
* u : the velocity field
*
* But first we need to determine the velocity field `u(x, T)`, which we can estimate also
* using semi-lagrangian advection.
*
* `u(x, T) = u(x - (T - t) * u(x, T), t)`
*
* This is the typical way to model self-advection in fluid dynamics, however, we do not
* account for other forces affecting the velocity during simulation (pressure, buoyancy,
* etc.): this gives a linear interpolation when fluid are mostly "curvy". For better
* results, a higher order interpolation scheme can be used (at the cost of more lookups),
* or an interpolation of the velocity fields for the previous and next frames could also
* be used to estimate `u(x, T)` (which will cost more memory and lookups).
*
* References:
* "Eulerian Motion Blur", Kim and Ko, 2007
* "Production Volume Rendering", Wreninge et al., 2012
*/
/* Find velocity. */
float3 velocity = primitive_volume_attribute_float3(kg, sd, v_desc);
object_dir_transform(kg, sd, &velocity);
/* Find advected P. */
sd->P = P - (time - time_offset) * velocity_scale * velocity;
/* Find advected velocity. */
velocity = primitive_volume_attribute_float3(kg, sd, v_desc);
object_dir_transform(kg, sd, &velocity);
/* Find advected P. */
sd->P = P - (time - time_offset) * velocity_scale * velocity;
}
# endif
}
/* evaluate shader */
# ifdef __SVM__
# ifdef __OSL__
if (kg->osl) {
OSLShader::eval_volume(kg, state, sd, path_flag);
}
else
# endif
{
svm_eval_nodes<KERNEL_FEATURE_NODE_MASK_VOLUME, SHADER_TYPE_VOLUME>(
kg, state, sd, NULL, path_flag);
}
# endif
/* Merge closures to avoid exceeding number of closures limit. */
if (!shadow) {
if (i > 0) {
shader_merge_volume_closures(sd);
}
}
}
}
#endif /* __VOLUME__ */
/* Displacement Evaluation */
template<typename ConstIntegratorGenericState>
ccl_device void shader_eval_displacement(KernelGlobals kg,
ConstIntegratorGenericState state,
ccl_private ShaderData *sd)
{
sd->num_closure = 0;
sd->num_closure_left = 0;
/* this will modify sd->P */
#ifdef __SVM__
# ifdef __OSL__
if (kg->osl)
OSLShader::eval_displacement(kg, state, sd);
else
# endif
{
svm_eval_nodes<KERNEL_FEATURE_NODE_MASK_DISPLACEMENT, SHADER_TYPE_DISPLACEMENT>(
kg, state, sd, NULL, 0);
}
#endif
}
/* Cryptomatte */
ccl_device float shader_cryptomatte_id(KernelGlobals kg, int shader)
{
return kernel_data_fetch(shaders, (shader & SHADER_MASK)).cryptomatte_id;
}
CCL_NAMESPACE_END

View File

@@ -15,9 +15,9 @@
#include "kernel/integrator/intersect_volume_stack.h"
#include "kernel/integrator/path_state.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/subsurface_disk.h"
#include "kernel/integrator/subsurface_random_walk.h"
#include "kernel/integrator/surface_shader.h"
CCL_NAMESPACE_BEGIN
@@ -51,7 +51,7 @@ ccl_device int subsurface_bounce(KernelGlobals kg,
PATH_RAY_SUBSURFACE_RANDOM_WALK);
/* Compute weight, optionally including Fresnel from entry point. */
Spectrum weight = shader_bssrdf_sample_weight(sd, sc);
Spectrum weight = surface_shader_bssrdf_sample_weight(sd, sc);
if (bssrdf->roughness != FLT_MAX) {
path_flag |= PATH_RAY_SUBSURFACE_USE_FRESNEL;
}
@@ -89,7 +89,7 @@ ccl_device void subsurface_shader_data_setup(KernelGlobals kg,
/* Get bump mapped normal from shader evaluation at exit point. */
float3 N = sd->N;
if (sd->flag & SD_HAS_BSSRDF_BUMP) {
N = shader_bssrdf_normal(sd);
N = surface_shader_bssrdf_normal(sd);
}
/* Setup diffuse BSDF at the exit point. This replaces shader_eval_surface. */

View File

@@ -0,0 +1,587 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2011-2022 Blender Foundation */
/* Functions to evaluate shaders. */
#pragma once
#include "kernel/closure/alloc.h"
#include "kernel/closure/bsdf.h"
#include "kernel/closure/bsdf_util.h"
#include "kernel/closure/emissive.h"
#include "kernel/svm/svm.h"
#ifdef __OSL__
# include "kernel/osl/shader.h"
#endif
CCL_NAMESPACE_BEGIN
ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg,
ConstIntegratorState state,
ccl_private ShaderData *sd,
const uint32_t path_flag)
{
/* Filter out closures. */
if (kernel_data.integrator.filter_closures) {
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_EMISSION) {
sd->closure_emission_background = zero_spectrum();
}
if (kernel_data.integrator.filter_closures & FILTER_CLOSURE_DIRECT_LIGHT) {
sd->flag &= ~SD_BSDF_HAS_EVAL;
}
if (path_flag & PATH_RAY_CAMERA) {
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if ((CLOSURE_IS_BSDF_DIFFUSE(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_DIFFUSE)) ||
(CLOSURE_IS_BSDF_GLOSSY(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_GLOSSY)) ||
(CLOSURE_IS_BSDF_TRANSMISSION(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSMISSION))) {
sc->type = CLOSURE_NONE_ID;
sc->sample_weight = 0.0f;
}
else if ((CLOSURE_IS_BSDF_TRANSPARENT(sc->type) &&
(kernel_data.integrator.filter_closures & FILTER_CLOSURE_TRANSPARENT))) {
sc->type = CLOSURE_HOLDOUT_ID;
sc->sample_weight = 0.0f;
sd->flag |= SD_HOLDOUT;
}
}
}
}
/* Defensive sampling.
*
* We can likely also do defensive sampling at deeper bounces, particularly
* for cases like a perfect mirror but possibly also others. This will need
* a good heuristic. */
if (INTEGRATOR_STATE(state, path, bounce) + INTEGRATOR_STATE(state, path, transparent_bounce) ==
0 &&
sd->num_closure > 1) {
float sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
sum += sc->sample_weight;
}
}
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
sc->sample_weight = max(sc->sample_weight, 0.125f * sum);
}
}
}
/* Filter glossy.
*
* Blurring of bsdf after bounces, for rays that have a small likelihood
* of following this particular path (diffuse, rough glossy) */
if (kernel_data.integrator.filter_glossy != FLT_MAX
#ifdef __MNEE__
&& !(INTEGRATOR_STATE(state, path, mnee) & PATH_MNEE_VALID)
#endif
) {
float blur_pdf = kernel_data.integrator.filter_glossy *
INTEGRATOR_STATE(state, path, min_ray_pdf);
if (blur_pdf < 1.0f) {
float blur_roughness = sqrtf(1.0f - blur_pdf) * 0.5f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF(sc->type)) {
bsdf_blur(kg, sc, blur_roughness);
}
}
}
}
}
/* BSDF */
ccl_device_inline bool surface_shader_is_transmission(ccl_private const ShaderData *sd,
const float3 omega_in)
{
return dot(sd->N, omega_in) < 0.0f;
}
ccl_device_forceinline bool _surface_shader_exclude(ClosureType type, uint light_shader_flags)
{
if (!(light_shader_flags & SHADER_EXCLUDE_ANY)) {
return false;
}
if (light_shader_flags & SHADER_EXCLUDE_DIFFUSE) {
if (CLOSURE_IS_BSDF_DIFFUSE(type)) {
return true;
}
}
if (light_shader_flags & SHADER_EXCLUDE_GLOSSY) {
if (CLOSURE_IS_BSDF_GLOSSY(type)) {
return true;
}
}
if (light_shader_flags & SHADER_EXCLUDE_TRANSMIT) {
if (CLOSURE_IS_BSDF_TRANSMISSION(type)) {
return true;
}
}
return false;
}
ccl_device_inline float _surface_shader_bsdf_eval_mis(KernelGlobals kg,
ccl_private ShaderData *sd,
const float3 omega_in,
const bool is_transmission,
ccl_private const ShaderClosure *skip_sc,
ccl_private BsdfEval *result_eval,
float sum_pdf,
float sum_sample_weight,
const uint light_shader_flags)
{
/* This is the veach one-sample model with balance heuristic,
* some PDF factors drop out when using balance heuristic weighting. */
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (sc == skip_sc) {
continue;
}
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
if (CLOSURE_IS_BSDF(sc->type) && !_surface_shader_exclude(sc->type, light_shader_flags)) {
float bsdf_pdf = 0.0f;
Spectrum eval = bsdf_eval(kg, sd, sc, omega_in, is_transmission, &bsdf_pdf);
if (bsdf_pdf != 0.0f) {
bsdf_eval_accum(result_eval, sc->type, eval * sc->weight);
sum_pdf += bsdf_pdf * sc->sample_weight;
}
}
sum_sample_weight += sc->sample_weight;
}
}
return (sum_sample_weight > 0.0f) ? sum_pdf / sum_sample_weight : 0.0f;
}
#ifndef __KERNEL_CUDA__
ccl_device
#else
ccl_device_inline
#endif
float
surface_shader_bsdf_eval(KernelGlobals kg,
ccl_private ShaderData *sd,
const float3 omega_in,
const bool is_transmission,
ccl_private BsdfEval *bsdf_eval,
const uint light_shader_flags)
{
bsdf_eval_init(bsdf_eval, CLOSURE_NONE_ID, zero_spectrum());
return _surface_shader_bsdf_eval_mis(
kg, sd, omega_in, is_transmission, NULL, bsdf_eval, 0.0f, 0.0f, light_shader_flags);
}
/* Randomly sample a BSSRDF or BSDF proportional to ShaderClosure.sample_weight. */
ccl_device_inline ccl_private const ShaderClosure *surface_shader_bsdf_bssrdf_pick(
ccl_private const ShaderData *ccl_restrict sd, ccl_private float2 *rand_bsdf)
{
int sampled = 0;
if (sd->num_closure > 1) {
/* Pick a BSDF or based on sample weights. */
float sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
sum += sc->sample_weight;
}
}
float r = (*rand_bsdf).x * sum;
float partial_sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
float next_sum = partial_sum + sc->sample_weight;
if (r < next_sum) {
sampled = i;
/* Rescale to reuse for direction sample, to better preserve stratification. */
(*rand_bsdf).x = (r - partial_sum) / sc->sample_weight;
break;
}
partial_sum = next_sum;
}
}
}
return &sd->closure[sampled];
}
/* Return weight for picked BSSRDF. */
ccl_device_inline Spectrum
surface_shader_bssrdf_sample_weight(ccl_private const ShaderData *ccl_restrict sd,
ccl_private const ShaderClosure *ccl_restrict bssrdf_sc)
{
Spectrum weight = bssrdf_sc->weight;
if (sd->num_closure > 1) {
float sum = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
sum += sc->sample_weight;
}
}
weight *= sum / bssrdf_sc->sample_weight;
}
return weight;
}
/* Sample direction for picked BSDF, and return evaluation and pdf for all
* BSDFs combined using MIS. */
ccl_device int surface_shader_bsdf_sample_closure(KernelGlobals kg,
ccl_private ShaderData *sd,
ccl_private const ShaderClosure *sc,
const float2 rand_bsdf,
ccl_private BsdfEval *bsdf_eval,
ccl_private float3 *omega_in,
ccl_private float *pdf)
{
/* BSSRDF should already have been handled elsewhere. */
kernel_assert(CLOSURE_IS_BSDF(sc->type));
int label;
Spectrum eval = zero_spectrum();
*pdf = 0.0f;
label = bsdf_sample(kg, sd, sc, rand_bsdf.x, rand_bsdf.y, &eval, omega_in, pdf);
if (*pdf != 0.0f) {
bsdf_eval_init(bsdf_eval, sc->type, eval * sc->weight);
if (sd->num_closure > 1) {
const bool is_transmission = surface_shader_is_transmission(sd, *omega_in);
float sweight = sc->sample_weight;
*pdf = _surface_shader_bsdf_eval_mis(
kg, sd, *omega_in, is_transmission, sc, bsdf_eval, *pdf * sweight, sweight, 0);
}
}
return label;
}
ccl_device float surface_shader_average_roughness(ccl_private const ShaderData *sd)
{
float roughness = 0.0f;
float sum_weight = 0.0f;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF(sc->type)) {
/* sqrt once to undo the squaring from multiplying roughness on the
* two axes, and once for the squared roughness convention. */
float weight = fabsf(average(sc->weight));
roughness += weight * sqrtf(safe_sqrtf(bsdf_get_roughness_squared(sc)));
sum_weight += weight;
}
}
return (sum_weight > 0.0f) ? roughness / sum_weight : 0.0f;
}
ccl_device Spectrum surface_shader_transparency(KernelGlobals kg, ccl_private const ShaderData *sd)
{
if (sd->flag & SD_HAS_ONLY_VOLUME) {
return one_spectrum();
}
else if (sd->flag & SD_TRANSPARENT) {
return sd->closure_transparent_extinction;
}
else {
return zero_spectrum();
}
}
ccl_device void surface_shader_disable_transparency(KernelGlobals kg, ccl_private ShaderData *sd)
{
if (sd->flag & SD_TRANSPARENT) {
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (sc->type == CLOSURE_BSDF_TRANSPARENT_ID) {
sc->sample_weight = 0.0f;
sc->weight = zero_spectrum();
}
}
sd->flag &= ~SD_TRANSPARENT;
}
}
ccl_device Spectrum surface_shader_alpha(KernelGlobals kg, ccl_private const ShaderData *sd)
{
Spectrum alpha = one_spectrum() - surface_shader_transparency(kg, sd);
alpha = saturate(alpha);
return alpha;
}
ccl_device Spectrum surface_shader_diffuse(KernelGlobals kg, ccl_private const ShaderData *sd)
{
Spectrum eval = zero_spectrum();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_DIFFUSE(sc->type) || CLOSURE_IS_BSSRDF(sc->type))
eval += sc->weight;
}
return eval;
}
ccl_device Spectrum surface_shader_glossy(KernelGlobals kg, ccl_private const ShaderData *sd)
{
Spectrum eval = zero_spectrum();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_GLOSSY(sc->type))
eval += sc->weight;
}
return eval;
}
ccl_device Spectrum surface_shader_transmission(KernelGlobals kg, ccl_private const ShaderData *sd)
{
Spectrum eval = zero_spectrum();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_TRANSMISSION(sc->type))
eval += sc->weight;
}
return eval;
}
ccl_device float3 surface_shader_average_normal(KernelGlobals kg, ccl_private const ShaderData *sd)
{
float3 N = zero_float3();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type))
N += sc->N * fabsf(average(sc->weight));
}
return (is_zero(N)) ? sd->N : normalize(N);
}
ccl_device Spectrum surface_shader_ao(KernelGlobals kg,
ccl_private const ShaderData *sd,
const float ao_factor,
ccl_private float3 *N_)
{
Spectrum eval = zero_spectrum();
float3 N = zero_float3();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) {
ccl_private const DiffuseBsdf *bsdf = (ccl_private const DiffuseBsdf *)sc;
eval += sc->weight * ao_factor;
N += bsdf->N * fabsf(average(sc->weight));
}
}
*N_ = (is_zero(N)) ? sd->N : normalize(N);
return eval;
}
#ifdef __SUBSURFACE__
ccl_device float3 surface_shader_bssrdf_normal(ccl_private const ShaderData *sd)
{
float3 N = zero_float3();
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_BSSRDF(sc->type)) {
ccl_private const Bssrdf *bssrdf = (ccl_private const Bssrdf *)sc;
float avg_weight = fabsf(average(sc->weight));
N += bssrdf->N * avg_weight;
}
}
return (is_zero(N)) ? sd->N : normalize(N);
}
#endif /* __SUBSURFACE__ */
/* Constant emission optimization */
ccl_device bool surface_shader_constant_emission(KernelGlobals kg,
int shader,
ccl_private Spectrum *eval)
{
int shader_index = shader & SHADER_MASK;
int shader_flag = kernel_data_fetch(shaders, shader_index).flags;
if (shader_flag & SD_HAS_CONSTANT_EMISSION) {
const float3 emission_rgb = make_float3(
kernel_data_fetch(shaders, shader_index).constant_emission[0],
kernel_data_fetch(shaders, shader_index).constant_emission[1],
kernel_data_fetch(shaders, shader_index).constant_emission[2]);
*eval = rgb_to_spectrum(emission_rgb);
return true;
}
return false;
}
/* Background */
ccl_device Spectrum surface_shader_background(ccl_private const ShaderData *sd)
{
if (sd->flag & SD_EMISSION) {
return sd->closure_emission_background;
}
else {
return zero_spectrum();
}
}
/* Emission */
ccl_device Spectrum surface_shader_emission(ccl_private const ShaderData *sd)
{
if (sd->flag & SD_EMISSION) {
return emissive_simple_eval(sd->Ng, sd->I) * sd->closure_emission_background;
}
else {
return zero_spectrum();
}
}
/* Holdout */
ccl_device Spectrum surface_shader_apply_holdout(KernelGlobals kg, ccl_private ShaderData *sd)
{
Spectrum weight = zero_spectrum();
/* For objects marked as holdout, preserve transparency and remove all other
* closures, replacing them with a holdout weight. */
if (sd->object_flag & SD_OBJECT_HOLDOUT_MASK) {
if ((sd->flag & SD_TRANSPARENT) && !(sd->flag & SD_HAS_ONLY_VOLUME)) {
weight = one_spectrum() - sd->closure_transparent_extinction;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sc = &sd->closure[i];
if (!CLOSURE_IS_BSDF_TRANSPARENT(sc->type)) {
sc->type = NBUILTIN_CLOSURES;
}
}
sd->flag &= ~(SD_CLOSURE_FLAGS - (SD_TRANSPARENT | SD_BSDF));
}
else {
weight = one_spectrum();
}
}
else {
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *sc = &sd->closure[i];
if (CLOSURE_IS_HOLDOUT(sc->type)) {
weight += sc->weight;
}
}
}
return weight;
}
/* Surface Evaluation */
template<uint node_feature_mask, typename ConstIntegratorGenericState>
ccl_device void surface_shader_eval(KernelGlobals kg,
ConstIntegratorGenericState state,
ccl_private ShaderData *ccl_restrict sd,
ccl_global float *ccl_restrict buffer,
uint32_t path_flag,
bool use_caustics_storage = false)
{
/* If path is being terminated, we are tracing a shadow ray or evaluating
* emission, then we don't need to store closures. The emission and shadow
* shader data also do not have a closure array to save GPU memory. */
int max_closures;
if (path_flag & (PATH_RAY_TERMINATE | PATH_RAY_SHADOW | PATH_RAY_EMISSION)) {
max_closures = 0;
}
else {
max_closures = use_caustics_storage ? CAUSTICS_MAX_CLOSURE : kernel_data.max_closures;
}
sd->num_closure = 0;
sd->num_closure_left = max_closures;
#ifdef __OSL__
if (kg->osl) {
if (sd->object == OBJECT_NONE && sd->lamp == LAMP_NONE) {
OSLShader::eval_background(kg, state, sd, path_flag);
}
else {
OSLShader::eval_surface(kg, state, sd, path_flag);
}
}
else
#endif
{
#ifdef __SVM__
svm_eval_nodes<node_feature_mask, SHADER_TYPE_SURFACE>(kg, state, sd, buffer, path_flag);
#else
if (sd->object == OBJECT_NONE) {
sd->closure_emission_background = make_spectrum(0.8f);
sd->flag |= SD_EMISSION;
}
else {
ccl_private DiffuseBsdf *bsdf = (ccl_private DiffuseBsdf *)bsdf_alloc(
sd, sizeof(DiffuseBsdf), make_spectrum(0.8f));
if (bsdf != NULL) {
bsdf->N = sd->N;
sd->flag |= bsdf_diffuse_setup(bsdf);
}
}
#endif
}
}
CCL_NAMESPACE_END

View File

@@ -0,0 +1,353 @@
/* SPDX-License-Identifier: Apache-2.0
* Copyright 2011-2022 Blender Foundation */
/* Volume shader evaluation and sampling. */
#pragma once
#include "kernel/closure/alloc.h"
#include "kernel/closure/bsdf.h"
#include "kernel/closure/bsdf_util.h"
#include "kernel/closure/emissive.h"
#include "kernel/svm/svm.h"
#ifdef __OSL__
# include "kernel/osl/shader.h"
#endif
CCL_NAMESPACE_BEGIN
#ifdef __VOLUME__
/* Merging */
ccl_device_inline void volume_shader_merge_closures(ccl_private ShaderData *sd)
{
/* Merge identical closures to save closure space with stacked volumes. */
for (int i = 0; i < sd->num_closure; i++) {
ccl_private ShaderClosure *sci = &sd->closure[i];
if (sci->type != CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID) {
continue;
}
for (int j = i + 1; j < sd->num_closure; j++) {
ccl_private ShaderClosure *scj = &sd->closure[j];
if (sci->type != scj->type) {
continue;
}
ccl_private const HenyeyGreensteinVolume *hgi = (ccl_private const HenyeyGreensteinVolume *)
sci;
ccl_private const HenyeyGreensteinVolume *hgj = (ccl_private const HenyeyGreensteinVolume *)
scj;
if (!(hgi->g == hgj->g)) {
continue;
}
sci->weight += scj->weight;
sci->sample_weight += scj->sample_weight;
int size = sd->num_closure - (j + 1);
if (size > 0) {
for (int k = 0; k < size; k++) {
scj[k] = scj[k + 1];
}
}
sd->num_closure--;
kernel_assert(sd->num_closure >= 0);
j--;
}
}
}
ccl_device_inline void volume_shader_copy_phases(ccl_private ShaderVolumePhases *ccl_restrict
phases,
ccl_private const ShaderData *ccl_restrict sd)
{
phases->num_closure = 0;
for (int i = 0; i < sd->num_closure; i++) {
ccl_private const ShaderClosure *from_sc = &sd->closure[i];
ccl_private const HenyeyGreensteinVolume *from_hg =
(ccl_private const HenyeyGreensteinVolume *)from_sc;
if (from_sc->type == CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID) {
ccl_private ShaderVolumeClosure *to_sc = &phases->closure[phases->num_closure];
to_sc->weight = from_sc->weight;
to_sc->sample_weight = from_sc->sample_weight;
to_sc->g = from_hg->g;
phases->num_closure++;
if (phases->num_closure >= MAX_VOLUME_CLOSURE) {
break;
}
}
}
}
ccl_device_inline float _volume_shader_phase_eval_mis(ccl_private const ShaderData *sd,
ccl_private const ShaderVolumePhases *phases,
const float3 omega_in,
int skip_phase,
ccl_private BsdfEval *result_eval,
float sum_pdf,
float sum_sample_weight)
{
for (int i = 0; i < phases->num_closure; i++) {
if (i == skip_phase)
continue;
ccl_private const ShaderVolumeClosure *svc = &phases->closure[i];
float phase_pdf = 0.0f;
Spectrum eval = volume_phase_eval(sd, svc, omega_in, &phase_pdf);
if (phase_pdf != 0.0f) {
bsdf_eval_accum(result_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
sum_pdf += phase_pdf * svc->sample_weight;
}
sum_sample_weight += svc->sample_weight;
}
return (sum_sample_weight > 0.0f) ? sum_pdf / sum_sample_weight : 0.0f;
}
ccl_device float volume_shader_phase_eval(KernelGlobals kg,
ccl_private const ShaderData *sd,
ccl_private const ShaderVolumePhases *phases,
const float3 omega_in,
ccl_private BsdfEval *phase_eval)
{
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, zero_spectrum());
return _volume_shader_phase_eval_mis(sd, phases, omega_in, -1, phase_eval, 0.0f, 0.0f);
}
ccl_device int volume_shader_phase_sample(KernelGlobals kg,
ccl_private const ShaderData *sd,
ccl_private const ShaderVolumePhases *phases,
float2 rand_phase,
ccl_private BsdfEval *phase_eval,
ccl_private float3 *omega_in,
ccl_private float *pdf)
{
int sampled = 0;
if (phases->num_closure > 1) {
/* pick a phase closure based on sample weights */
float sum = 0.0f;
for (sampled = 0; sampled < phases->num_closure; sampled++) {
ccl_private const ShaderVolumeClosure *svc = &phases->closure[sampled];
sum += svc->sample_weight;
}
float r = rand_phase.x * sum;
float partial_sum = 0.0f;
for (sampled = 0; sampled < phases->num_closure; sampled++) {
ccl_private const ShaderVolumeClosure *svc = &phases->closure[sampled];
float next_sum = partial_sum + svc->sample_weight;
if (r <= next_sum) {
/* Rescale to reuse for BSDF direction sample. */
rand_phase.x = (r - partial_sum) / svc->sample_weight;
break;
}
partial_sum = next_sum;
}
if (sampled == phases->num_closure) {
*pdf = 0.0f;
return LABEL_NONE;
}
}
/* todo: this isn't quite correct, we don't weight anisotropy properly
* depending on color channels, even if this is perhaps not a common case */
ccl_private const ShaderVolumeClosure *svc = &phases->closure[sampled];
int label;
Spectrum eval = zero_spectrum();
*pdf = 0.0f;
label = volume_phase_sample(sd, svc, rand_phase.x, rand_phase.y, &eval, omega_in, pdf);
if (*pdf != 0.0f) {
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
}
return label;
}
ccl_device int volume_shader_phase_sample_closure(KernelGlobals kg,
ccl_private const ShaderData *sd,
ccl_private const ShaderVolumeClosure *sc,
const float2 rand_phase,
ccl_private BsdfEval *phase_eval,
ccl_private float3 *omega_in,
ccl_private float *pdf)
{
int label;
Spectrum eval = zero_spectrum();
*pdf = 0.0f;
label = volume_phase_sample(sd, sc, rand_phase.x, rand_phase.y, &eval, omega_in, pdf);
if (*pdf != 0.0f)
bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval);
return label;
}
/* Motion Blur */
# ifdef __OBJECT_MOTION__
ccl_device_inline void volume_shader_motion_blur(KernelGlobals kg,
ccl_private ShaderData *ccl_restrict sd)
{
if ((sd->object_flag & SD_OBJECT_HAS_VOLUME_MOTION) == 0) {
return;
}
AttributeDescriptor v_desc = find_attribute(kg, sd, ATTR_STD_VOLUME_VELOCITY);
kernel_assert(v_desc.offset != ATTR_STD_NOT_FOUND);
const float3 P = sd->P;
const float velocity_scale = kernel_data_fetch(objects, sd->object).velocity_scale;
const float time_offset = kernel_data.cam.motion_position == MOTION_POSITION_CENTER ? 0.5f :
0.0f;
const float time = kernel_data.cam.motion_position == MOTION_POSITION_END ?
(1.0f - kernel_data.cam.shuttertime) + sd->time :
sd->time;
/* Use a 1st order semi-lagrangian advection scheme to estimate what volume quantity
* existed, or will exist, at the given time:
*
* `phi(x, T) = phi(x - (T - t) * u(x, T), t)`
*
* where
*
* x : position
* T : super-sampled time (or ray time)
* t : current time of the simulation (in rendering we assume this is center frame with
* relative time = 0)
* phi : the volume quantity
* u : the velocity field
*
* But first we need to determine the velocity field `u(x, T)`, which we can estimate also
* using semi-lagrangian advection.
*
* `u(x, T) = u(x - (T - t) * u(x, T), t)`
*
* This is the typical way to model self-advection in fluid dynamics, however, we do not
* account for other forces affecting the velocity during simulation (pressure, buoyancy,
* etc.): this gives a linear interpolation when fluid are mostly "curvy". For better
* results, a higher order interpolation scheme can be used (at the cost of more lookups),
* or an interpolation of the velocity fields for the previous and next frames could also
* be used to estimate `u(x, T)` (which will cost more memory and lookups).
*
* References:
* "Eulerian Motion Blur", Kim and Ko, 2007
* "Production Volume Rendering", Wreninge et al., 2012
*/
/* Find velocity. */
float3 velocity = primitive_volume_attribute_float3(kg, sd, v_desc);
object_dir_transform(kg, sd, &velocity);
/* Find advected P. */
sd->P = P - (time - time_offset) * velocity_scale * velocity;
/* Find advected velocity. */
velocity = primitive_volume_attribute_float3(kg, sd, v_desc);
object_dir_transform(kg, sd, &velocity);
/* Find advected P. */
sd->P = P - (time - time_offset) * velocity_scale * velocity;
}
# endif
/* Volume Evaluation */
template<const bool shadow, typename StackReadOp, typename ConstIntegratorGenericState>
ccl_device_inline void volume_shader_eval(KernelGlobals kg,
ConstIntegratorGenericState state,
ccl_private ShaderData *ccl_restrict sd,
const uint32_t path_flag,
StackReadOp stack_read)
{
/* If path is being terminated, we are tracing a shadow ray or evaluating
* emission, then we don't need to store closures. The emission and shadow
* shader data also do not have a closure array to save GPU memory. */
int max_closures;
if (path_flag & (PATH_RAY_TERMINATE | PATH_RAY_SHADOW | PATH_RAY_EMISSION)) {
max_closures = 0;
}
else {
max_closures = kernel_data.max_closures;
}
/* reset closures once at the start, we will be accumulating the closures
* for all volumes in the stack into a single array of closures */
sd->num_closure = 0;
sd->num_closure_left = max_closures;
sd->flag = 0;
sd->object_flag = 0;
for (int i = 0;; i++) {
const VolumeStack entry = stack_read(i);
if (entry.shader == SHADER_NONE) {
break;
}
/* Setup shader-data from stack. it's mostly setup already in
* shader_setup_from_volume, this switching should be quick. */
sd->object = entry.object;
sd->lamp = LAMP_NONE;
sd->shader = entry.shader;
sd->flag &= ~SD_SHADER_FLAGS;
sd->flag |= kernel_data_fetch(shaders, (sd->shader & SHADER_MASK)).flags;
sd->object_flag &= ~SD_OBJECT_FLAGS;
if (sd->object != OBJECT_NONE) {
sd->object_flag |= kernel_data_fetch(object_flag, sd->object);
# ifdef __OBJECT_MOTION__
/* todo: this is inefficient for motion blur, we should be
* caching matrices instead of recomputing them each step */
shader_setup_object_transforms(kg, sd, sd->time);
volume_shader_motion_blur(kg, sd);
# endif
}
/* evaluate shader */
# ifdef __SVM__
# ifdef __OSL__
if (kg->osl) {
OSLShader::eval_volume(kg, state, sd, path_flag);
}
else
# endif
{
svm_eval_nodes<KERNEL_FEATURE_NODE_MASK_VOLUME, SHADER_TYPE_VOLUME>(
kg, state, sd, NULL, path_flag);
}
# endif
/* Merge closures to avoid exceeding number of closures limit. */
if (!shadow) {
if (i > 0) {
volume_shader_merge_closures(sd);
}
}
}
}
#endif /* __VOLUME__ */
CCL_NAMESPACE_END

View File

@@ -4,7 +4,7 @@
#pragma once
#include "kernel/integrator/path_state.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/integrator/surface_shader.h"
#include "kernel/light/light.h"
@@ -24,13 +24,13 @@ light_sample_shader_eval(KernelGlobals kg,
/* setup shading at emitter */
Spectrum eval = zero_spectrum();
if (shader_constant_emission_eval(kg, ls->shader, &eval)) {
if (surface_shader_constant_emission(kg, ls->shader, &eval)) {
if ((ls->prim != PRIM_NONE) && dot(ls->Ng, ls->D) > 0.0f) {
ls->Ng = -ls->Ng;
}
}
else {
/* Setup shader data and call shader_eval_surface once, better
/* Setup shader data and call surface_shader_eval once, better
* for GPU coherence and compile times. */
PROFILING_INIT_FOR_SHADER(kg, PROFILING_SHADE_LIGHT_SETUP);
if (ls->type == LIGHT_BACKGROUND) {
@@ -60,15 +60,15 @@ light_sample_shader_eval(KernelGlobals kg,
/* No proper path flag, we're evaluating this for all closures. that's
* weak but we'd have to do multiple evaluations otherwise. */
shader_eval_surface<KERNEL_FEATURE_NODE_MASK_SURFACE_LIGHT>(
surface_shader_eval<KERNEL_FEATURE_NODE_MASK_SURFACE_LIGHT>(
kg, state, emission_sd, NULL, PATH_RAY_EMISSION);
/* Evaluate closures. */
if (ls->type == LIGHT_BACKGROUND) {
eval = shader_background_eval(emission_sd);
eval = surface_shader_background(emission_sd);
}
else {
eval = shader_emissive_eval(emission_sd);
eval = surface_shader_emission(emission_sd);
}
}

View File

@@ -27,7 +27,6 @@
#include "util/log.h"
#include "util/string.h"
// clang-format off
#include "kernel/device/cpu/compat.h"
#include "kernel/device/cpu/globals.h"
#include "kernel/device/cpu/image.h"
@@ -45,10 +44,10 @@
#include "kernel/camera/projection.h"
#include "kernel/integrator/path_state.h"
#include "kernel/integrator/shader_eval.h"
#include "kernel/svm/svm.h"
#include "kernel/util/color.h"
// clang-format on
CCL_NAMESPACE_BEGIN

View File

@@ -3,6 +3,11 @@
#pragma once
#include "kernel/closure/alloc.h"
#include "kernel/closure/bsdf.h"
#include "kernel/closure/bsdf_util.h"
#include "kernel/closure/emissive.h"
#include "kernel/util/color.h"
CCL_NAMESPACE_BEGIN