EEVEE Next: Ambient Occlusion #108398

Merged
Miguel Pozo merged 29 commits from pragma37/blender:pull-eevee-next-ao into main 2023-06-30 19:37:37 +02:00
29 changed files with 894 additions and 40 deletions

View File

@ -145,6 +145,26 @@ class RENDER_PT_eevee_ambient_occlusion(RenderButtonsPanel, Panel):
col.prop(props, "use_gtao_bounce")
class RENDER_PT_eevee_next_ambient_occlusion(RenderButtonsPanel, Panel):
bl_label = "Ambient Occlusion"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE_NEXT'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
pragma37 marked this conversation as resolved

Do not make it optional. Always enable it if render pass is needed. It might become a core part of the raytracing module.

Do not make it optional. Always enable it if render pass is needed. It might become a core part of the raytracing module.
Review

Always enable it if render pass is needed

It already works that way. See AmbientOcclusion::init.

Do you want to always enable it for the main shading too?
I think some users wouldn’t like that.

> Always enable it if render pass is needed It already works that way. See `AmbientOcclusion::init`. Do you want to always enable it for the main shading too? I think some users wouldn’t like that.

Do you want to always enable it for the main shading too?

Kind of. If it has to be an option it would be elsewhere.

> Do you want to always enable it for the main shading too? Kind of. If it has to be an option it would be elsewhere.
props = scene.eevee
col = layout.column()
col.prop(props, "gtao_distance")
col.prop(props, "gtao_quality")
class RENDER_PT_eevee_motion_blur(RenderButtonsPanel, Panel):
bl_label = "Motion Blur"
bl_options = {'DEFAULT_CLOSED'}
@ -935,6 +955,7 @@ classes = (
RENDER_PT_eevee_sampling,
RENDER_PT_eevee_next_sampling,
RENDER_PT_eevee_ambient_occlusion,
RENDER_PT_eevee_next_ambient_occlusion,
RENDER_PT_eevee_bloom,
RENDER_PT_eevee_depth_of_field,
RENDER_PT_eevee_next_depth_of_field,

View File

@ -140,6 +140,7 @@ set(SRC
engines/eevee/eevee_subsurface.c
engines/eevee/eevee_temporal_sampling.c
engines/eevee/eevee_volumes.c
engines/eevee_next/eevee_ambient_occlusion.cc
engines/eevee_next/eevee_camera.cc
engines/eevee_next/eevee_cryptomatte.cc
engines/eevee_next/eevee_depth_of_field.cc
@ -282,6 +283,7 @@ set(SRC
engines/eevee/eevee_lut.h
engines/eevee/eevee_private.h
engines/eevee/engine_eevee_shared_defines.h
engines/eevee_next/eevee_ambient_occlusion.hh
engines/eevee_next/eevee_camera.hh
engines/eevee_next/eevee_cryptomatte.hh
engines/eevee_next/eevee_depth_of_field.hh
@ -459,6 +461,8 @@ set(GLSL_SRC
engines/eevee/shaders/infos/engine_eevee_legacy_shared.h
engines/eevee/engine_eevee_shared_defines.h
engines/eevee_next/shaders/eevee_ambient_occlusion_lib.glsl
engines/eevee_next/shaders/eevee_ambient_occlusion_pass_comp.glsl
engines/eevee_next/shaders/eevee_attributes_lib.glsl
engines/eevee_next/shaders/eevee_camera_lib.glsl
engines/eevee_next/shaders/eevee_colorspace_lib.glsl
@ -514,6 +518,7 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_motion_blur_gather_comp.glsl
engines/eevee_next/shaders/eevee_motion_blur_lib.glsl
engines/eevee_next/shaders/eevee_nodetree_lib.glsl
engines/eevee_next/shaders/eevee_ray_types_lib.glsl
engines/eevee_next/shaders/eevee_reflection_probe_eval_lib.glsl
engines/eevee_next/shaders/eevee_reflection_probe_lib.glsl
engines/eevee_next/shaders/eevee_sampling_lib.glsl

View File

@ -0,0 +1,89 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup eevee
*
* Ground Truth Ambient Occlusion
*
* Based on Practical Realtime Strategies for Accurate Indirect Occlusion
* http://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf
* http://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pptx
*
* Algorithm Overview:
*
* We separate the computation into 2 steps.
*
* - First we scan the neighborhood pixels to find the maximum horizon angle.
* We save this angle in a RG8 array texture.
*
* - Then we use this angle to compute occlusion with the shading normal at
* the shading stage. This let us do correct shadowing for each diffuse / specular
* lobe present in the shader using the correct normal.
*/
#pragma once
#include "eevee_ambient_occlusion.hh"
#include "eevee_instance.hh"
#include "GPU_capabilities.h"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name AmbientOcclusion
* \{ */
void AmbientOcclusion::init()
{
render_pass_enabled_ = inst_.film.enabled_passes_get() & EEVEE_RENDER_PASS_AO;
data_.distance = inst_.scene->eevee.gtao_distance;
data_.quality = inst_.scene->eevee.gtao_quality;
/* Size is multiplied by 2 because it is applied in NDC [-1..1] range. */
data_.pixel_size = float2(2.0f) / float2(inst_.film.render_extent_get());
data_.push_update();
}
void AmbientOcclusion::sync()
{
if (!render_pass_enabled_) {
return;
}
render_pass_ps_.init();
render_pass_ps_.shader_set(inst_.shaders.static_shader_get(AMBIENT_OCCLUSION_PASS));
render_pass_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, &inst_.pipelines.utility_tx);
inst_.sampling.bind_resources(&render_pass_ps_);
inst_.hiz_buffer.bind_resources(&render_pass_ps_);
bind_resources(&render_pass_ps_);
render_pass_ps_.bind_image("in_normal_img", &rp_normal_tx_);
render_pass_ps_.bind_image("out_ao_img", &rp_ao_tx_);
render_pass_ps_.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS & GPU_BARRIER_TEXTURE_FETCH);
render_pass_ps_.dispatch(
math::divide_ceil(inst_.film.render_extent_get(), int2(AMBIENT_OCCLUSION_PASS_TILE_SIZE)));
}
void AmbientOcclusion::render_pass(View &view)
{
if (!render_pass_enabled_) {
return;
}
inst_.hiz_buffer.update();
RenderBuffers &rb = inst_.render_buffers;
rb.rp_color_tx.ensure_layer_views();
rp_normal_tx_ = rb.rp_color_tx.layer_view(rb.data.normal_id);
rb.rp_value_tx.ensure_layer_views();
rp_ao_tx_ = rb.rp_value_tx.layer_view(rb.data.ambient_occlusion_id);
inst_.manager->submit(render_pass_ps_, view);
}
} // namespace blender::eevee

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup eevee
*
* Ground Truth Ambient Occlusion
*
* Based on Practical Realtime Strategies for Accurate Indirect Occlusion
* http://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf
* http://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pptx
*
*/
#pragma once
#include "eevee_shader_shared.hh"
namespace blender::eevee {
class Instance;
/* -------------------------------------------------------------------- */
/** \name AmbientOcclusion
* \{ */
class AmbientOcclusion {
private:
class Instance &inst_;
bool render_pass_enabled_;
AODataBuf data_;
PassSimple render_pass_ps_ = {"AO Render Pass"};
/* Used as pointers for texture views in the AO Render Pass. */
GPUTexture *rp_normal_tx_ = nullptr;
GPUTexture *rp_ao_tx_ = nullptr;
public:
AmbientOcclusion(Instance &inst) : inst_(inst){};
~AmbientOcclusion(){};
void init();
void sync();
void render(View &view);
void render_pass(View &view);
template<typename T> void bind_resources(draw::detail::PassBase<T> *pass)
{
pass->bind_ubo(AO_BUF_SLOT, &data_);
pragma37 marked this conversation as resolved Outdated

Do not bind other resources here. Will result in double binds and it is against the design of this function.

Do not bind other resources here. Will result in double binds and it is against the design of this function.
}
};
/** \} */
} // namespace blender::eevee

View File

@ -90,6 +90,9 @@
#define DOF_GATHER_GROUP_SIZE DOF_TILES_SIZE
#define DOF_RESOLVE_GROUP_SIZE (DOF_TILES_SIZE * 2)
/* Ambient Occlusion. */
#define AMBIENT_OCCLUSION_PASS_TILE_SIZE 16
/* IrradianceBake. */
#define SURFEL_GROUP_SIZE 256
#define SURFEL_LIST_GROUP_SIZE 256
@ -126,6 +129,7 @@
/* Only during surface shading (forward and deferred eval). */
#define HIZ_BUF_SLOT 3
#define IRRADIANCE_GRID_BUF_SLOT 4
#define AO_BUF_SLOT 5
/* Only during pre-pass. */
#define VELOCITY_CAMERA_PREV_BUF 3
#define VELOCITY_CAMERA_CURR_BUF 4

View File

@ -105,13 +105,12 @@ class Film {
int cryptomatte_layer_max_get() const;
int cryptomatte_layer_len_get() const;
/** WARNING: Film and RenderBuffers use different storage types for AO and Shadow. */
static ePassStorageType pass_storage_type(eViewLayerEEVEEPassType pass_type)
{
switch (pass_type) {
case EEVEE_RENDER_PASS_Z:
case EEVEE_RENDER_PASS_MIST:
case EEVEE_RENDER_PASS_SHADOW:
case EEVEE_RENDER_PASS_AO:
return PASS_STORAGE_VALUE;
case EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT:
case EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET:
@ -124,19 +123,8 @@ class Film {
static bool pass_is_float3(eViewLayerEEVEEPassType pass_type)
{
switch (pass_type) {
case EEVEE_RENDER_PASS_NORMAL:
case EEVEE_RENDER_PASS_DIFFUSE_LIGHT:
case EEVEE_RENDER_PASS_DIFFUSE_COLOR:
case EEVEE_RENDER_PASS_SPECULAR_LIGHT:
case EEVEE_RENDER_PASS_SPECULAR_COLOR:
case EEVEE_RENDER_PASS_VOLUME_LIGHT:
case EEVEE_RENDER_PASS_EMIT:
case EEVEE_RENDER_PASS_ENVIRONMENT:
return true;
default:
return false;
}
return pass_storage_type(pass_type) == PASS_STORAGE_COLOR &&
pass_type != EEVEE_RENDER_PASS_COMBINED;
}
/* Returns layer offset in the accumulation texture. -1 if the pass is not enabled. */

View File

@ -75,11 +75,10 @@ class HiZBuffer {
DRW_shgroup_uniform_block_ref(grp, "hiz_buf", &data_);
}
/* TODO(fclem): Hardcoded bind slots. */
template<typename T> void bind_resources(draw::detail::PassBase<T> *pass)
{
pass->bind_texture("hiz_tx", &hiz_tx_);
pass->bind_ubo("hiz_buf", &data_);
pass->bind_texture(HIZ_TEX_SLOT, &hiz_tx_);
pass->bind_ubo(HIZ_BUF_SLOT, &data_);
}
};

View File

@ -64,6 +64,7 @@ void Instance::init(const int2 &output_res,
sampling.init(scene);
camera.init();
film.init(output_res, output_rect);
ambient_occlusion.init();
velocity.init();
depth_of_field.init();
shadows.init();
@ -150,6 +151,7 @@ void Instance::begin_sync()
world.sync();
film.sync();
render_buffers.sync();
ambient_occlusion.sync();
irradiance_cache.sync();
}
@ -448,9 +450,8 @@ void Instance::update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view
CHECK_PASS_EEVEE(VOLUME_LIGHT, SOCK_RGBA, 3, "RGB");
CHECK_PASS_LEGACY(EMIT, SOCK_RGBA, 3, "RGB");
CHECK_PASS_LEGACY(ENVIRONMENT, SOCK_RGBA, 3, "RGB");
/* TODO: CHECK_PASS_LEGACY(SHADOW, SOCK_RGBA, 3, "RGB");
* CHECK_PASS_LEGACY(AO, SOCK_RGBA, 3, "RGB");
* When available they should be converted from Value textures to RGB. */
CHECK_PASS_LEGACY(SHADOW, SOCK_RGBA, 3, "RGB");
CHECK_PASS_LEGACY(AO, SOCK_RGBA, 3, "RGB");
LISTBASE_FOREACH (ViewLayerAOV *, aov, &view_layer->aovs) {
if ((aov->flag & AOV_CONFLICT) != 0) {

View File

@ -15,6 +15,7 @@
#include "DNA_lightprobe_types.h"
#include "DRW_render.h"
#include "eevee_ambient_occlusion.hh"
#include "eevee_camera.hh"
#include "eevee_cryptomatte.hh"
#include "eevee_depth_of_field.hh"
@ -55,6 +56,7 @@ class Instance {
PipelineModule pipelines;
ShadowModule shadows;
LightModule lights;
AmbientOcclusion ambient_occlusion;
ReflectionProbeModule reflection_probes;
VelocityModule velocity;
MotionBlurModule motion_blur;
@ -108,6 +110,7 @@ class Instance {
pipelines(*this),
shadows(*this),
lights(*this),
ambient_occlusion(*this),
reflection_probes(*this),
velocity(*this),
motion_blur(*this),

View File

@ -200,6 +200,8 @@ void ForwardPipeline::sync()
inst_.lights.bind_resources(&opaque_ps_);
inst_.shadows.bind_resources(&opaque_ps_);
inst_.sampling.bind_resources(&opaque_ps_);
inst_.hiz_buffer.bind_resources(&opaque_ps_);
inst_.ambient_occlusion.bind_resources(&opaque_ps_);
inst_.cryptomatte.bind_resources(&opaque_ps_);
}
@ -227,6 +229,8 @@ void ForwardPipeline::sync()
inst_.lights.bind_resources(&sub);
inst_.shadows.bind_resources(&sub);
inst_.sampling.bind_resources(&sub);
inst_.hiz_buffer.bind_resources(&sub);
inst_.ambient_occlusion.bind_resources(&sub);
}
}
@ -380,6 +384,8 @@ void DeferredLayer::begin_sync()
gbuffer_ps_.bind_ubo(RBUFS_BUF_SLOT, &inst_.render_buffers.data);
inst_.sampling.bind_resources(&gbuffer_ps_);
inst_.hiz_buffer.bind_resources(&gbuffer_ps_);
inst_.ambient_occlusion.bind_resources(&gbuffer_ps_);
inst_.cryptomatte.bind_resources(&gbuffer_ps_);
}
@ -421,6 +427,7 @@ void DeferredLayer::end_sync()
inst_.shadows.bind_resources(&eval_light_ps_);
inst_.sampling.bind_resources(&eval_light_ps_);
inst_.hiz_buffer.bind_resources(&eval_light_ps_);
inst_.ambient_occlusion.bind_resources(&eval_light_ps_);
inst_.reflection_probes.bind_resources(&eval_light_ps_);
inst_.irradiance_cache.bind_resources(&eval_light_ps_);

View File

@ -31,15 +31,15 @@ void RenderBuffers::sync()
data.color_len = 0;
data.value_len = 0;
auto pass_index_get = [&](eViewLayerEEVEEPassType pass_type) {
if (enabled_passes & pass_type) {
return inst_.film.pass_storage_type(pass_type) == PASS_STORAGE_COLOR ? data.color_len++ :
data.value_len++;
auto pass_index_get = [&](eViewLayerEEVEEPassType pass_type, int dependent_passes = 0) {
if (enabled_passes & (pass_type | dependent_passes)) {
return pass_storage_type(pass_type) == PASS_STORAGE_COLOR ? data.color_len++ :
data.value_len++;
}
return -1;
};
data.normal_id = pass_index_get(EEVEE_RENDER_PASS_NORMAL);
data.normal_id = pass_index_get(EEVEE_RENDER_PASS_NORMAL, EEVEE_RENDER_PASS_AO);
data.diffuse_light_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_LIGHT);
data.diffuse_color_id = pass_index_get(EEVEE_RENDER_PASS_DIFFUSE_COLOR);
data.specular_light_id = pass_index_get(EEVEE_RENDER_PASS_SPECULAR_LIGHT);

View File

@ -40,6 +40,24 @@ class RenderBuffers {
public:
RenderBuffers(Instance &inst) : inst_(inst){};
/** WARNING: RenderBuffers and Film use different storage types for AO and Shadow. */
static ePassStorageType pass_storage_type(eViewLayerEEVEEPassType pass_type)
{
switch (pass_type) {
case EEVEE_RENDER_PASS_Z:
case EEVEE_RENDER_PASS_MIST:
case EEVEE_RENDER_PASS_SHADOW:
case EEVEE_RENDER_PASS_AO:
return PASS_STORAGE_VALUE;
case EEVEE_RENDER_PASS_CRYPTOMATTE_OBJECT:
case EEVEE_RENDER_PASS_CRYPTOMATTE_ASSET:
case EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL:
return PASS_STORAGE_CRYPTOMATTE;
default:
return PASS_STORAGE_COLOR;
}
}
void sync();
/* Acquires (also ensures) the render buffer before rendering to them. */

View File

@ -123,6 +123,9 @@ void Sampling::step()
/* TODO de-correlate. */
data_.dimensions[SAMPLING_LIGHTPROBE] = r[0];
data_.dimensions[SAMPLING_TRANSPARENCY] = r[1];
/* TODO de-correlate. */
data_.dimensions[SAMPLING_AO_U] = r[0];
data_.dimensions[SAMPLING_AO_V] = r[1];
}
{
/* Using leaped Halton sequence so we can reused the same primes as lens. */

View File

@ -80,6 +80,8 @@ ShaderModule::~ShaderModule()
const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_type)
{
switch (shader_type) {
case AMBIENT_OCCLUSION_PASS:
return "eevee_ambient_occlusion_pass";
case FILM_FRAG:
return "eevee_film_frag";
case FILM_COMP:
@ -259,6 +261,13 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
info.additional_info("eevee_render_pass_out");
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_AO) &&
ELEM(pipeline_type, MAT_PIPE_FORWARD, MAT_PIPE_DEFERRED) &&
ELEM(geometry_type, MAT_GEOM_MESH, MAT_GEOM_CURVES))
{
info.define("MAT_AMBIENT_OCCLUSION");
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
info.define("MAT_TRANSPARENT");
/* Transparent material do not have any velocity specific pipeline. */

View File

@ -26,7 +26,9 @@ namespace blender::eevee {
/* Keep alphabetical order and clean prefix. */
enum eShaderType {
FILM_FRAG = 0,
AMBIENT_OCCLUSION_PASS = 0,
FILM_FRAG,
FILM_COMP,
FILM_CRYPTOMATTE_POST,

View File

@ -98,14 +98,16 @@ enum eSamplingDimension : uint32_t {
SAMPLING_RAYTRACE_U = 15u,
SAMPLING_RAYTRACE_V = 16u,
SAMPLING_RAYTRACE_W = 17u,
SAMPLING_RAYTRACE_X = 18u
SAMPLING_RAYTRACE_X = 18u,
SAMPLING_AO_U = 19u,
SAMPLING_AO_V = 20u,
};
/**
* IMPORTANT: Make sure the array can contain all sampling dimensions.
* Also note that it needs to be multiple of 4.
*/
#define SAMPLING_DIMENSION_COUNT 20
#define SAMPLING_DIMENSION_COUNT 24
/* NOTE(@fclem): Needs to be used in #StorageBuffer because of arrays of scalar. */
struct SamplingData {
@ -979,6 +981,19 @@ enum eClosureBits : uint32_t {
/** \} */
/* -------------------------------------------------------------------- */
/** \name Ambient Occlussion
* \{ */
struct AOData {
float distance;
float quality;
float2 pixel_size;
};
BLI_STATIC_ASSERT_ALIGN(AOData, 16)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Subsurface
* \{ */
@ -1080,6 +1095,7 @@ using VelocityGeometryBuf = draw::StorageArrayBuffer<float4, 16, true>;
using VelocityIndexBuf = draw::StorageArrayBuffer<VelocityIndex, 16>;
using VelocityObjectBuf = draw::StorageArrayBuffer<float4x4, 16>;
using CryptomatteObjectBuf = draw::StorageArrayBuffer<float2, 16>;
using AODataBuf = draw::UniformBuffer<AOData>;
} // namespace blender::eevee
#endif

View File

@ -123,6 +123,7 @@ void ShadingView::render()
/* TODO(fclem): Move it after the first prepass (and hiz update) once pipeline is stabilized. */
inst_.lights.set_view(render_view_new_, extent_);
/* TODO(Miguel Pozo): Deferred and forward prepass should happen before the GBuffer pass. */
inst_.pipelines.deferred.render(render_view_new_, prepass_fb_, combined_fb_, extent_);
// inst_.lookdev.render_overlay(view_fb_);
@ -134,8 +135,9 @@ void ShadingView::render()
inst_.shadows.debug_draw(render_view_new_, combined_fb_);
inst_.irradiance_cache.viewport_draw(render_view_new_, combined_fb_);
GPUTexture *combined_final_tx = render_postfx(rbufs.combined_tx);
inst_.ambient_occlusion.render_pass(render_view_new_);
GPUTexture *combined_final_tx = render_postfx(rbufs.combined_tx);
inst_.film.accumulate(sub_view_, combined_final_tx);
rbufs.release();

View File

@ -0,0 +1,397 @@
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_geom_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_sampling_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_ray_types_lib.glsl)
/* TODO(Miguel Pozo): Move this function somewhere else. */
/* Return a fitted cone angle given the input roughness */
float ambient_occlusion_cone_cosine(float r)
{
/* Using phong gloss
* roughness = sqrt(2/(gloss+2)) */
float gloss = -2 + 2 / (r * r);
/* Drobot 2014 in GPUPro5 */
// return cos(2.0 * sqrt(2.0 / (gloss + 2)));
/* Uludag 2014 in GPUPro5 */
// return pow(0.244, 1 / (gloss + 1));
/* Jimenez 2016 in Practical Realtime Strategies for Accurate Indirect Occlusion. */
return exp2(-3.32193 * r * r);
}
/* Based on Practical Realtime Strategies for Accurate Indirect Occlusion
* http://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pdf
* http://blog.selfshadow.com/publications/s2016-shading-course/activision/s2016_pbs_activision_occlusion.pptx
*/
#define AO_BENT_NORMALS true
#define AO_MULTI_BOUNCE true
struct OcclusionData {
/* 4 horizon angles, one in each direction around the view vector to form a cross pattern. */
vec4 horizons;
/* Custom large scale occlusion. */
float custom_occlusion;
};
OcclusionData ambient_occlusion_data(vec4 horizons, float custom_occlusion)
{
OcclusionData data;
data.horizons = horizons;
data.custom_occlusion = custom_occlusion;
return data;
}
/* No Occlusion Data. */
OcclusionData ambient_occlusion_disabled_data()
{
return ambient_occlusion_data(vec4(M_PI, -M_PI, M_PI, -M_PI), 1.0);
}
vec4 ambient_occlusion_pack_data(OcclusionData data)
{
return vec4(1.0 - data.horizons * vec4(1, -1, 1, -1) * M_1_PI);
}
OcclusionData ambient_occlusion_unpack_data(vec4 v)
{
return ambient_occlusion_data((1.0 - v) * vec4(1, -1, 1, -1) * M_PI, 0.0);
}
vec2 ambient_occlusion_get_noise(ivec2 texel)
{
vec2 noise = utility_tx_fetch(utility_tx, texel, UTIL_BLUE_NOISE_LAYER).xy;
return fract(noise + sampling_rng_2D_get(SAMPLING_AO_U));
}
vec2 ambient_occlusion_get_dir(float jitter)
{
/* Only a quarter of a turn because we integrate using 2 slices.
* We use this instead of using utiltex circle noise to improve cache hits
* since all tracing direction will be in the same quadrant. */
jitter *= M_PI_2;
return vec2(cos(jitter), sin(jitter));
}
/* Return horizon angle cosine. */
float ambient_ambient_occlusion_search_horizon(vec3 vI,
vec3 vP,
float noise,
ScreenSpaceRay ssray,
sampler2D depth_tx,
const float inverted,
float radius,
const float sample_count)
{
/* Init at cos(M_PI). */
float h = (inverted != 0.0) ? 1.0 : -1.0;
ssray.max_time -= 1.0;
if (ssray.max_time <= 2.0) {
/* Produces self shadowing under this threshold. */
return fast_acos(h);
}
float prev_time, time = 0.0;
for (float iter = 0.0; time < ssray.max_time && iter < sample_count; iter++) {
prev_time = time;
/* Gives us good precision at center and ensure we cross at least one pixel per iteration. */
time = 1.0 + iter + sqr((iter + noise) / sample_count) * ssray.max_time;
float stride = time - prev_time;
float lod = (log2(stride) - noise) / (1.0 + ao_buf.quality);
vec2 uv = ssray.origin.xy + ssray.direction.xy * time;
float depth = textureLod(depth_tx, uv * hiz_buf.uv_scale, floor(lod)).r;
if (depth == 1.0 && inverted == 0.0) {
/* Skip background. Avoids making shadow on the geometry near the far plane. */
continue;
}
/* Bias depth a bit to avoid self shadowing issues. */
const float bias = 2.0 * 2.4e-7;
depth += (inverted != 0.0) ? -bias : bias;
vec3 s = get_view_space_from_depth(uv, depth);
vec3 omega_s = s - vP;
float len = length(omega_s);
/* Sample's horizon angle cosine. */
float s_h = dot(vI, omega_s / len);
/* Blend weight to fade artifacts. */
float dist_ratio = abs(len) / radius;
/* Sphere falloff. */
float dist_fac = sqr(saturate(dist_ratio));
/* Unbiased, gives too much hard cut behind objects */
// float dist_fac = step(0.999, dist_ratio);
if (inverted != 0.0) {
h = min(h, s_h);
}
else {
h = mix(max(h, s_h), h, dist_fac);
}
}
return fast_acos(h);
}
OcclusionData ambient_occlusion_search(vec3 vP,
sampler2D depth_tx,
ivec2 texel,
float radius,
const float inverted,
const float dir_sample_count)
{
vec2 noise = ambient_occlusion_get_noise(texel);
vec2 dir = ambient_occlusion_get_dir(noise.x);
vec2 uv = get_uvs_from_view(vP);
vec3 vI = ((ProjectionMatrix[3][3] == 0.0) ? normalize(-vP) : vec3(0.0, 0.0, 1.0));
vec3 avg_dir = vec3(0.0);
float avg_apperture = 0.0;
OcclusionData data = (inverted != 0.0) ? ambient_occlusion_data(vec4(0, 0, 0, 0), 1.0) :
ambient_occlusion_disabled_data();
for (int i = 0; i < 2; i++) {
Ray ray;
ray.origin = vP;
ray.direction = vec3(dir * radius, 0.0);
ScreenSpaceRay ssray;
ssray = raytrace_screenspace_ray_create(ray, ao_buf.pixel_size);
data.horizons[0 + i * 2] = ambient_ambient_occlusion_search_horizon(
vI, vP, noise.y, ssray, depth_tx, inverted, radius, dir_sample_count);
ray.direction = -ray.direction;
ssray = raytrace_screenspace_ray_create(ray, ao_buf.pixel_size);
data.horizons[1 + i * 2] = -ambient_ambient_occlusion_search_horizon(
vI, vP, noise.y, ssray, depth_tx, inverted, radius, dir_sample_count);
/* Rotate 90 degrees. */
dir = vec2(-dir.y, dir.x);
}
return data;
}
vec2 ambient_occlusion_clamp_horizons_to_hemisphere(vec2 horizons,
float angle_N,
const float inverted)
{
/* Add a little bias to fight self shadowing. */
const float max_angle = M_PI_2 - 0.05;
if (inverted != 0.0) {
horizons.x = max(horizons.x, angle_N + max_angle);
horizons.y = min(horizons.y, angle_N - max_angle);
}
else {
horizons.x = min(horizons.x, angle_N + max_angle);
horizons.y = max(horizons.y, angle_N - max_angle);
}
return horizons;
}
void ambient_occlusion_eval(OcclusionData data,
ivec2 texel,
vec3 V,
vec3 N,
vec3 Ng,
const float inverted,
out float visibility,
out float visibility_error,
out vec3 bent_normal)
{
/* No error by default. */
visibility_error = 1.0;
bool early_out = (inverted != 0.0) ? (max_v4(abs(data.horizons)) == 0.0) :
(min_v4(abs(data.horizons)) == M_PI);
if (early_out) {
visibility = saturate(dot(N, Ng) * 0.5 + 0.5);
visibility = min(visibility, data.custom_occlusion);
if (AO_BENT_NORMALS) {
bent_normal = safe_normalize(N + Ng);
}
else {
bent_normal = N;
}
return;
}
vec2 noise = ambient_occlusion_get_noise(texel);
vec2 dir = ambient_occlusion_get_dir(noise.x);
visibility_error = 0.0;
visibility = 0.0;
bent_normal = N * 0.001;
for (int i = 0; i < 2; i++) {
vec3 T = transform_direction(ViewMatrixInverse, vec3(dir, 0.0));
/* Setup integration domain around V. */
vec3 B = normalize(cross(V, T));
T = normalize(cross(B, V));
float proj_N_len;
vec3 proj_N = normalize_len(N - B * dot(N, B), proj_N_len);
vec3 proj_Ng = normalize(Ng - B * dot(Ng, B));
vec2 h = (i == 0) ? data.horizons.xy : data.horizons.zw;
float N_sin = dot(proj_N, T);
float Ng_sin = dot(proj_Ng, T);
float N_cos = saturate(dot(proj_N, V));
float Ng_cos = saturate(dot(proj_Ng, V));
/* Gamma, angle between normalized projected normal and view vector. */
float angle_Ng = sign(Ng_sin) * fast_acos(Ng_cos);
float angle_N = sign(N_sin) * fast_acos(N_cos);
/* Clamp horizons to hemisphere around shading normal. */
h = ambient_occlusion_clamp_horizons_to_hemisphere(h, angle_N, inverted);
float bent_angle = (h.x + h.y) * 0.5;
/* NOTE: here we multiply z by 0.5 as it shows less difference with the geometric normal.
* Also modulate by projected normal length to reduce issues with slanted surfaces.
* All of this is ad-hoc and not really grounded. */
bent_normal += proj_N_len * (T * sin(bent_angle) + V * 0.5 * cos(bent_angle));
/* Clamp to geometric normal only for integral to keep smooth bent normal. */
/* This is done to match Cycles ground truth but adds some computation. */
h = ambient_occlusion_clamp_horizons_to_hemisphere(h, angle_Ng, inverted);
/* Inner integral (Eq. 7). */
float a = dot(-cos(2.0 * h - angle_N) + N_cos + 2.0 * h * N_sin, vec2(0.25));
/* Correct normal not on plane (Eq. 8). */
visibility += proj_N_len * a;
/* Using a very low number of slices (2) leads to over-darkening of surfaces orthogonal to
* the view. This is particularly annoying for sharp reflections occlusion. So we compute how
* much the error is and correct the visibility later. */
visibility_error += proj_N_len;
/* Rotate 90 degrees. */
dir = vec2(-dir.y, dir.x);
}
/* We integrated 2 directions. */
visibility *= 0.5;
visibility_error *= 0.5;
visibility = min(visibility, data.custom_occlusion);
if (AO_BENT_NORMALS) {
/* NOTE: using pow(visibility, 6.0) produces NaN (see #87369). */
float tmp = saturate(pow6(visibility));
bent_normal = normalize(mix(bent_normal, N, tmp));
}
else {
bent_normal = N;
}
}
/* Multibounce approximation base on surface albedo.
* Page 78 in the .pdf version. */
float ambient_occlusion_multibounce(float visibility, vec3 albedo)
{
if (!AO_MULTI_BOUNCE) {
return visibility;
}
/* Median luminance. Because Colored multibounce looks bad. */
float lum = dot(albedo, vec3(0.3333));
float a = 2.0404 * lum - 0.3324;
float b = -4.7951 * lum + 0.6417;
float c = 2.7552 * lum + 0.6903;
float x = visibility;
return max(x, ((x * a + b) * x + c) * x);
}
float ambient_occlusion_diffuse(OcclusionData data, ivec2 texel, vec3 V, vec3 N, vec3 Ng)
{
vec3 unused;
float unused_error;
float visibility;
ambient_occlusion_eval(data, texel, V, N, Ng, 0.0, visibility, unused_error, unused);
return saturate(visibility);
}
float ambient_occlusion_diffuse(
OcclusionData data, ivec2 texel, vec3 V, vec3 N, vec3 Ng, vec3 albedo, out vec3 bent_normal)
{
float visibility;
float unused_error;
ambient_occlusion_eval(data, texel, V, N, Ng, 0.0, visibility, unused_error, bent_normal);
visibility = ambient_occlusion_multibounce(visibility, albedo);
return saturate(visibility);
}
/**
* Approximate the area of intersection of two spherical caps
* radius1 : First cap radius (arc length in radians)
* radius2 : Second cap radius (in radians)
* dist : Distance between caps (radians between centers of caps)
* NOTE: Result is divided by pi to save one multiply.
*/
float ambient_occlusion_spherical_cap_intersection(float radius1, float radius2, float dist)
{
/* From "Ambient Aperture Lighting" by Chris Oat
* Slide 15. */
float max_radius = max(radius1, radius2);
float min_radius = min(radius1, radius2);
float sum_radius = radius1 + radius2;
float area;
if (dist <= max_radius - min_radius) {
/* One cap in completely inside the other */
area = 1.0 - cos(min_radius);
}
else if (dist >= sum_radius) {
/* No intersection exists */
area = 0;
}
else {
float diff = max_radius - min_radius;
area = smoothstep(0.0, 1.0, 1.0 - saturate((dist - diff) / (sum_radius - diff)));
area *= 1.0 - cos(min_radius);
}
return area;
}
float ambient_occlusion_specular(
OcclusionData data, ivec2 texel, vec3 V, vec3 N, float roughness, inout vec3 specular_dir)
{
vec3 visibility_dir;
float visibility_error;
float visibility;
ambient_occlusion_eval(data, texel, V, N, N, 0.0, visibility, visibility_error, visibility_dir);
/* Correct visibility error for very sharp surfaces. */
visibility *= mix(safe_rcp(visibility_error), 1.0, roughness);
specular_dir = normalize(mix(specular_dir, visibility_dir, roughness * (1.0 - visibility)));
/* Visibility to cone angle (eq. 18). */
float vis_angle = fast_acos(sqrt(1 - visibility));
/* Roughness to cone angle (eq. 26). */
/* A 0.001 min_angle can generate NaNs on Intel GPUs. See D12508. */
const float min_angle = 0.00990998744964599609375;
float spec_angle = max(min_angle, fast_acos(ambient_occlusion_cone_cosine(roughness)));
/* Angle between cone axes. */
float cone_cone_dist = fast_acos(saturate(dot(visibility_dir, specular_dir)));
float cone_nor_dist = fast_acos(saturate(dot(N, specular_dir)));
float isect_solid_angle = ambient_occlusion_spherical_cap_intersection(
vis_angle, spec_angle, cone_cone_dist);
float specular_solid_angle = ambient_occlusion_spherical_cap_intersection(
M_PI_2, spec_angle, cone_nor_dist);
float specular_occlusion = isect_solid_angle / specular_solid_angle;
/* Mix because it is unstable in unoccluded areas. */
float tmp = saturate(pow8(visibility));
visibility = mix(specular_occlusion, 1.0, tmp);
return saturate(visibility);
}

View File

@ -0,0 +1,92 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_geom_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_ambient_occlusion_lib.glsl)
/* Similar to https://atyuwen.github.io/posts/normal-reconstruction/.
* This samples the depth buffer 4 time for each direction to get the most correct
* implicit normal reconstruction out of the depth buffer. */
vec3 view_position_derivative_from_depth(
sampler2D depth_tx, ivec2 extent, vec2 uv, ivec2 offset, vec3 vP, float depth_center)
{
vec4 H;
H.x = texelFetch(depth_tx, ivec2(uv * extent) - offset * 2, 0).r;
H.y = texelFetch(depth_tx, ivec2(uv * extent) - offset, 0).r;
H.z = texelFetch(depth_tx, ivec2(uv * extent) + offset, 0).r;
H.w = texelFetch(depth_tx, ivec2(uv * extent) + offset * 2, 0).r;
vec2 uv_offset = vec2(offset) / extent;
vec2 uv1 = uv - uv_offset * 2.0;
vec2 uv2 = uv - uv_offset;
vec2 uv3 = uv + uv_offset;
vec2 uv4 = uv + uv_offset * 2.0;
/* Fix issue with depth precision. Take even larger diff. */
vec4 diff = abs(vec4(depth_center, H.yzw) - H.x);
if (max_v4(diff) < 2.4e-7 && all(lessThan(diff.xyz, diff.www))) {
return 0.25 * (get_view_space_from_depth(uv3, H.w) - get_view_space_from_depth(uv1, H.x));
}
/* Simplified (H.xw + 2.0 * (H.yz - H.xw)) - depth_center */
vec2 deltas = abs((2.0 * H.yz - H.xw) - depth_center);
if (deltas.x < deltas.y) {
return vP - get_view_space_from_depth(uv2, H.y);
}
else {
return get_view_space_from_depth(uv3, H.z) - vP;
}
}
/* TODO(Miguel Pozo): This should be in common_view_lib,
* but moving it there results in dependency hell. */
bool reconstruct_view_position_and_normal_from_depth(
sampler2D depth_tx, ivec2 extent, vec2 uv, out vec3 vP, out vec3 vNg)
{
float depth_center = texelFetch(depth_tx, ivec2(uv * extent), 0).r;
vP = get_view_space_from_depth(uv, depth_center);
vec3 dPdx = view_position_derivative_from_depth(
depth_tx, extent, uv, ivec2(1, 0), vP, depth_center);
vec3 dPdy = view_position_derivative_from_depth(
depth_tx, extent, uv, ivec2(0, 1), vP, depth_center);
vNg = safe_normalize(cross(dPdx, dPdy));
/* Background case. */
return depth_center != 1.0;
}
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
ivec2 extent = imageSize(in_normal_img);
if (any(greaterThanEqual(texel, extent))) {
return;
}
vec2 uv = (vec2(texel) + vec2(0.5)) / vec2(extent);
vec3 vP, vNg;
if (!reconstruct_view_position_and_normal_from_depth(hiz_tx, extent, uv, vP, vNg)) {
/* Do not trace for background */
imageStore(out_ao_img, texel, vec4(0.0));
return;
}
vec3 P = transform_point(ViewMatrixInverse, vP);
vec3 V = cameraVec(P);
vec3 Ng = transform_direction(ViewMatrixInverse, vNg);
vec3 N = imageLoad(in_normal_img, texel).xyz;
OcclusionData data = ambient_occlusion_search(vP, hiz_tx, texel, ao_buf.distance, 0.0, 8.0);
float visibility;
float visibility_error_out;
vec3 bent_normal_out;
ambient_occlusion_eval(
data, texel, V, N, Ng, 0.0, visibility, visibility_error_out, bent_normal_out);
/* Scale by user factor */
visibility = saturate(visibility);
imageStore(out_ao_img, texel, vec4(visibility));
}

View File

@ -69,10 +69,42 @@ void main()
reflection_light,
shadow);
if (!is_last_eval_pass) {
/* Output diffuse light along with object ID for sub-surface screen space processing. */
if (is_last_eval_pass) {
/* Apply color and output lighting to render-passes. */
vec4 color_0_packed = texelFetch(gbuffer_color_tx, ivec3(texel, 0), 0);
vec4 color_1_packed = texelFetch(gbuffer_color_tx, ivec3(texel, 1), 0);
reflection_data.color = gbuffer_color_unpack(color_0_packed);
diffuse_data.color = gbuffer_color_unpack(color_1_packed);
if (is_refraction) {
diffuse_data.color = vec3(0.0);
}
/* Light passes. */
if (rp_buf.diffuse_light_id >= 0) {
imageStore(rp_color_img, ivec3(texel, rp_buf.diffuse_light_id), vec4(diffuse_light, 1.0));
}
if (rp_buf.specular_light_id >= 0) {
imageStore(
rp_color_img, ivec3(texel, rp_buf.specular_light_id), vec4(reflection_light, 1.0));
}
fclem marked this conversation as resolved Outdated

Remove the comment.

Remove the comment.

Why specifically on this one?
It's in the other shaders that write to the renderpasses too.
I think it makes sense to mention why AO is omitted.

Why specifically on this one? It's in the other shaders that write to the renderpasses too. I think it makes sense to mention why AO is omitted.
if (rp_buf.shadow_id >= 0) {
imageStore(rp_value_img, ivec3(texel, rp_buf.shadow_id), vec4(shadow));
}
/** NOTE: AO is done on its own pass. */
diffuse_light *= diffuse_data.color;
reflection_light *= reflection_data.color;
/* Add radiance to combined pass. */
out_radiance = vec4(diffuse_light + reflection_light, 0.0);
out_transmittance = vec4(1.0);
}
else {
/* Store lighting for next deferred pass. */
vec4 diffuse_radiance;
diffuse_radiance.xyz = diffuse_light;
/* Output object ID for sub-surface screen space processing. */
diffuse_radiance.w = gbuffer_object_id_f16_pack(diffuse_data.sss_id);
imageStore(out_diffuse_light_img, texel, diffuse_radiance);
imageStore(out_specular_light_img, texel, vec4(reflection_light, 0.0));

View File

@ -717,8 +717,8 @@ void film_process_data(ivec2 texel_film, out vec4 out_color, out float out_depth
film_store_color(dst, film_buf.diffuse_color_id, diffuse_color_accum, out_color);
film_store_color(dst, film_buf.specular_color_id, specular_color_accum, out_color);
film_store_color(dst, film_buf.environment_id, environment_accum, out_color);
film_store_value(dst, film_buf.shadow_id, shadow_accum, out_color);
film_store_value(dst, film_buf.ambient_occlusion_id, ao_accum, out_color);
film_store_color(dst, film_buf.shadow_id, vec4(vec3(shadow_accum), 1.0), out_color);
film_store_color(dst, film_buf.ambient_occlusion_id, vec4(vec3(ao_accum), 1.0), out_color);
film_store_value(dst, film_buf.mist_id, mist_accum, out_color);
}

View File

@ -199,12 +199,31 @@ Closure closure_mix(Closure cl1, Closure cl2, float fac)
}
float ambient_occlusion_eval(vec3 normal,
float distance,
float max_distance,
const float inverted,
const float sample_count)
{
/* TODO */
/* Avoid multiline preprocesor conditionals.
* Some drivers don't handle them correctly. */
// clang-format off
#if defined(GPU_FRAGMENT_SHADER) && defined(MAT_AMBIENT_OCCLUSION) && !defined(MAT_DEPTH) && !defined(MAT_SHADOW)
// clang-format on
vec3 vP = transform_point(ViewMatrix, g_data.P);
ivec2 texel = ivec2(gl_FragCoord.xy);
OcclusionData data = ambient_occlusion_search(
vP, hiz_tx, texel, max_distance, inverted, sample_count);
vec3 V = cameraVec(g_data.P);
vec3 N = g_data.N;
vec3 Ng = g_data.Ng;
float unused_error, visibility;
vec3 unused;
ambient_occlusion_eval(data, texel, V, N, Ng, inverted, visibility, unused_error, unused);
return visibility;
#else
return 1.0;
#endif
}
#ifndef GPU_METAL

View File

@ -0,0 +1,63 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
/**
* Screen-Space Raytracing functions.
*/
struct Ray {
vec3 origin;
/* Ray direction premultiplied by its maximum length. */
vec3 direction;
};
/* Screenspace ray ([0..1] "uv" range) where direction is normalize to be as small as one
* full-resolution pixel. The ray is also clipped to all frustum sides.
*/
struct ScreenSpaceRay {
vec4 origin;
vec4 direction;
float max_time;
};
void raytrace_screenspace_ray_finalize(inout ScreenSpaceRay ray, vec2 pixel_size)
{
/* Constant bias (due to depth buffer precision). Helps with self intersection. */
/* Magic numbers for 24bits of precision.
* From http://terathon.com/gdc07_lengyel.pdf (slide 26) */
const float bias = -2.4e-7 * 2.0;
ray.origin.zw += bias;
ray.direction.zw += bias;
ray.direction -= ray.origin;
/* If the line is degenerate, make it cover at least one pixel
* to not have to handle zero-pixel extent as a special case later */
if (len_squared(ray.direction.xy) < 0.00001) {
ray.direction.xy = vec2(0.0, 0.00001);
}
float ray_len_sqr = len_squared(ray.direction.xyz);
/* Make ray.direction cover one pixel. */
bool is_more_vertical = abs(ray.direction.x / pixel_size.x) <
abs(ray.direction.y / pixel_size.y);
ray.direction /= (is_more_vertical) ? abs(ray.direction.y) : abs(ray.direction.x);
ray.direction *= (is_more_vertical) ? pixel_size.y : pixel_size.x;
/* Clip to segment's end. */
ray.max_time = sqrt(ray_len_sqr * safe_rcp(len_squared(ray.direction.xyz)));
/* Clipping to frustum sides. */
float clip_dist = line_unit_box_intersect_dist_safe(ray.origin.xyz, ray.direction.xyz);
ray.max_time = min(ray.max_time, clip_dist);
/* Convert to texture coords [0..1] range. */
ray.origin = ray.origin * 0.5 + 0.5;
ray.direction *= 0.5;
}
ScreenSpaceRay raytrace_screenspace_ray_create(Ray ray, vec2 pixel_size)
{
ScreenSpaceRay ssray;
ssray.origin.xyz = project_point(ProjectionMatrix, ray.origin);
ssray.direction.xyz = project_point(ProjectionMatrix, ray.origin + ray.direction);
raytrace_screenspace_ray_finalize(ssray, pixel_size);
return ssray;
}

View File

@ -10,6 +10,7 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_ambient_occlusion_lib.glsl)
fclem marked this conversation as resolved Outdated

Why is this needed?

Why is this needed?

Because the AO lib is required by the AO node in the nodetree lib.
But I can’t include the AO lib directly from the nodetree lib since then it ends up also in the depth, shadow and world passes.
AFAIK there’s no way to make a conditional BLENDER_REQUIRE, so the other option would be to put the whole AO lib behind a define (which would be kind of annoying in most code editors).

Because the AO lib is required by the AO node in the nodetree lib. But I can’t include the AO lib directly from the nodetree lib since then it ends up also in the depth, shadow and world passes. AFAIK there’s no way to make a conditional BLENDER_REQUIRE, so the other option would be to put the whole AO lib behind a define (which would be kind of annoying in most code editors).
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_sampling_lib.glsl)

View File

@ -8,6 +8,7 @@
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_ambient_occlusion_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_light_eval_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_sampling_lib.glsl)
@ -119,7 +120,7 @@ void main()
output_renderpass_color(rp_buf.specular_light_id, vec4(specular_light, 1.0));
output_renderpass_color(rp_buf.emission_id, vec4(g_emission, 1.0));
output_renderpass_value(rp_buf.shadow_id, shadow);
/* TODO: AO. */
/** NOTE: AO is done on its own pass. */
#endif
out_radiance.rgb *= 1.0 - g_holdout;

View File

@ -49,6 +49,6 @@ void main()
output_renderpass_color(rp_buf.specular_color_id, clear_color);
output_renderpass_color(rp_buf.emission_id, clear_color);
output_renderpass_value(rp_buf.shadow_id, 1.0);
output_renderpass_value(rp_buf.ambient_occlusion_id, 0.0);
/** NOTE: AO is done on its own pass. */
imageStore(rp_cryptomatte_img, texel, vec4(0.0));
}

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "eevee_defines.hh"
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(eevee_ambient_occlusion_data)
.additional_info("draw_view",
"eevee_shared",
"eevee_hiz_data",
"eevee_sampling_data",
"eevee_utility_texture")
.uniform_buf(AO_BUF_SLOT, "AOData", "ao_buf");
GPU_SHADER_CREATE_INFO(eevee_ambient_occlusion_pass)
.additional_info("eevee_ambient_occlusion_data")
.compute_source("eevee_ambient_occlusion_pass_comp.glsl")
.local_group_size(AMBIENT_OCCLUSION_PASS_TILE_SIZE, AMBIENT_OCCLUSION_PASS_TILE_SIZE)
.image(0, GPU_RGBA16F, Qualifier::READ, ImageType::FLOAT_2D, "in_normal_img")
.image(1, GPU_RG16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_ao_img")
.do_static_compilation(true);

View File

@ -109,7 +109,8 @@ GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
"eevee_sampling_data",
/* Added at runtime because of test shaders not having `node_tree`. */
// "eevee_render_pass_out",
"eevee_cryptomatte_out");
"eevee_cryptomatte_out",
"eevee_ambient_occlusion_data");
GPU_SHADER_CREATE_INFO(eevee_surf_forward)
.vertex_out(eevee_surf_iface)
@ -123,7 +124,8 @@ GPU_SHADER_CREATE_INFO(eevee_surf_forward)
"eevee_camera",
"eevee_utility_texture",
"eevee_sampling_data",
"eevee_shadow_data"
"eevee_shadow_data",
"eevee_ambient_occlusion_data"
/* Optionally added depending on the material. */
// "eevee_render_pass_out",
// "eevee_cryptomatte_out",
@ -140,6 +142,7 @@ GPU_SHADER_CREATE_INFO(eevee_surf_capture)
.additional_info("eevee_camera", "eevee_utility_texture");
GPU_SHADER_CREATE_INFO(eevee_surf_depth)
.define("MAT_DEPTH")
.vertex_out(eevee_surf_iface)
.fragment_source("eevee_surf_depth_frag.glsl")
.additional_info("eevee_sampling_data", "eevee_camera", "eevee_utility_texture");

View File

@ -643,6 +643,7 @@ list(APPEND INC ${CMAKE_CURRENT_BINARY_DIR})
set(SRC_SHADER_CREATE_INFOS
../draw/engines/basic/shaders/infos/basic_depth_info.hh
../draw/engines/eevee_next/shaders/infos/eevee_ambient_occlusion_info.hh
../draw/engines/eevee_next/shaders/infos/eevee_deferred_info.hh
../draw/engines/eevee_next/shaders/infos/eevee_depth_of_field_info.hh
../draw/engines/eevee_next/shaders/infos/eevee_film_info.hh