EEVEE-Next: Port to new Draw Manager
This commit is contained in:
@@ -68,3 +68,37 @@
|
||||
#define DOF_FILTER_GROUP_SIZE 8
|
||||
#define DOF_GATHER_GROUP_SIZE DOF_TILES_SIZE
|
||||
#define DOF_RESOLVE_GROUP_SIZE (DOF_TILES_SIZE * 2)
|
||||
|
||||
/* Resource bindings. */
|
||||
|
||||
/* Texture. */
|
||||
#define RBUFS_UTILITY_TEX_SLOT 14
|
||||
|
||||
/* Images. */
|
||||
#define RBUFS_NORMAL_SLOT 0
|
||||
#define RBUFS_LIGHT_SLOT 1
|
||||
#define RBUFS_DIFF_COLOR_SLOT 2
|
||||
#define RBUFS_SPEC_COLOR_SLOT 3
|
||||
#define RBUFS_EMISSION_SLOT 4
|
||||
#define RBUFS_AOV_COLOR_SLOT 5
|
||||
#define RBUFS_AOV_VALUE_SLOT 6
|
||||
|
||||
/* Uniform Bufs. */
|
||||
/* Only during prepass. */
|
||||
#define VELOCITY_CAMERA_PREV_BUF 3
|
||||
#define VELOCITY_CAMERA_CURR_BUF 4
|
||||
#define VELOCITY_CAMERA_NEXT_BUF 5
|
||||
|
||||
/* Storage Bufs. */
|
||||
#define LIGHT_CULL_BUF_SLOT 0
|
||||
#define LIGHT_BUF_SLOT 1
|
||||
#define LIGHT_ZBIN_BUF_SLOT 2
|
||||
#define LIGHT_TILE_BUF_SLOT 3
|
||||
#define RBUFS_AOV_BUF_SLOT 5
|
||||
#define SAMPLING_BUF_SLOT 6
|
||||
/* Only during prepass. */
|
||||
#define VELOCITY_OBJ_PREV_BUF_SLOT 0
|
||||
#define VELOCITY_OBJ_NEXT_BUF_SLOT 1
|
||||
#define VELOCITY_GEO_PREV_BUF_SLOT 2
|
||||
#define VELOCITY_GEO_NEXT_BUF_SLOT 3
|
||||
#define VELOCITY_INDIRECTION_BUF_SLOT 4
|
||||
|
@@ -237,35 +237,34 @@ void DepthOfField::bokeh_lut_pass_sync()
|
||||
const bool has_anisotropy = data_.bokeh_anisotropic_scale != float2(1.0f);
|
||||
if (!has_anisotropy && (data_.bokeh_blades == 0.0)) {
|
||||
/* No need for LUTs in these cases. */
|
||||
bokeh_lut_ps_ = nullptr;
|
||||
use_bokeh_lut_ = false;
|
||||
return;
|
||||
}
|
||||
use_bokeh_lut_ = true;
|
||||
|
||||
/* Precompute bokeh texture. */
|
||||
bokeh_lut_ps_ = DRW_pass_create("Dof.bokeh_lut_ps_", DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_BOKEH_LUT);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, bokeh_lut_ps_);
|
||||
DRW_shgroup_uniform_block(grp, "dof_buf", data_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_gather_lut_img", &bokeh_gather_lut_tx_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_scatter_lut_img", &bokeh_scatter_lut_tx_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_resolve_lut_img", &bokeh_resolve_lut_tx_);
|
||||
DRW_shgroup_call_compute(grp, 1, 1, 1);
|
||||
bokeh_lut_ps_.init();
|
||||
bokeh_lut_ps_.shader_set(inst_.shaders.static_shader_get(DOF_BOKEH_LUT));
|
||||
bokeh_lut_ps_.bind_ubo("dof_buf", data_);
|
||||
bokeh_lut_ps_.bind_image("out_gather_lut_img", &bokeh_gather_lut_tx_);
|
||||
bokeh_lut_ps_.bind_image("out_scatter_lut_img", &bokeh_scatter_lut_tx_);
|
||||
bokeh_lut_ps_.bind_image("out_resolve_lut_img", &bokeh_resolve_lut_tx_);
|
||||
bokeh_lut_ps_.dispatch(int3(1, 1, 1));
|
||||
}
|
||||
|
||||
void DepthOfField::setup_pass_sync()
|
||||
{
|
||||
RenderBuffers &render_buffers = inst_.render_buffers;
|
||||
|
||||
setup_ps_ = DRW_pass_create("Dof.setup_ps_", DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_SETUP);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, setup_ps_);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "color_tx", &input_color_tx_, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depth_tx", &render_buffers.depth_tx, no_filter);
|
||||
DRW_shgroup_uniform_block(grp, "dof_buf", data_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_color_img", &setup_color_tx_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_coc_img", &setup_coc_tx_);
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_setup_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
setup_ps_.init();
|
||||
setup_ps_.shader_set(inst_.shaders.static_shader_get(DOF_SETUP));
|
||||
setup_ps_.bind_texture("color_tx", &input_color_tx_, no_filter);
|
||||
setup_ps_.bind_texture("depth_tx", &render_buffers.depth_tx, no_filter);
|
||||
setup_ps_.bind_ubo("dof_buf", data_);
|
||||
setup_ps_.bind_image("out_color_img", &setup_color_tx_);
|
||||
setup_ps_.bind_image("out_coc_img", &setup_coc_tx_);
|
||||
setup_ps_.dispatch(&dispatch_setup_size_);
|
||||
setup_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
|
||||
void DepthOfField::stabilize_pass_sync()
|
||||
@@ -273,214 +272,203 @@ void DepthOfField::stabilize_pass_sync()
|
||||
RenderBuffers &render_buffers = inst_.render_buffers;
|
||||
VelocityModule &velocity = inst_.velocity;
|
||||
|
||||
stabilize_ps_ = DRW_pass_create("Dof.stabilize_ps_", DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_STABILIZE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, stabilize_ps_);
|
||||
DRW_shgroup_uniform_block_ref(grp, "camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
|
||||
DRW_shgroup_uniform_block_ref(grp, "camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
|
||||
stabilize_ps_.init();
|
||||
stabilize_ps_.shader_set(inst_.shaders.static_shader_get(DOF_STABILIZE));
|
||||
stabilize_ps_.bind_ubo("camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
|
||||
stabilize_ps_.bind_ubo("camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
|
||||
/* This is only for temporal stability. The next step is not needed. */
|
||||
DRW_shgroup_uniform_block_ref(grp, "camera_next", &(*velocity.camera_steps[STEP_PREVIOUS]));
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "coc_tx", &setup_coc_tx_, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "color_tx", &setup_color_tx_, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "velocity_tx", &render_buffers.vector_tx, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "in_history_tx", &stabilize_input_, with_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depth_tx", &render_buffers.depth_tx, no_filter);
|
||||
DRW_shgroup_uniform_bool(grp, "use_history", &stabilize_valid_history_, 1);
|
||||
DRW_shgroup_uniform_block(grp, "dof_buf", data_);
|
||||
DRW_shgroup_uniform_image(grp, "out_coc_img", reduced_coc_tx_.mip_view(0));
|
||||
DRW_shgroup_uniform_image(grp, "out_color_img", reduced_color_tx_.mip_view(0));
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_history_img", &stabilize_output_tx_);
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_stabilize_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
stabilize_ps_.bind_ubo("camera_next", &(*velocity.camera_steps[STEP_PREVIOUS]));
|
||||
stabilize_ps_.bind_texture("coc_tx", &setup_coc_tx_, no_filter);
|
||||
stabilize_ps_.bind_texture("color_tx", &setup_color_tx_, no_filter);
|
||||
stabilize_ps_.bind_texture("velocity_tx", &render_buffers.vector_tx, no_filter);
|
||||
stabilize_ps_.bind_texture("in_history_tx", &stabilize_input_, with_filter);
|
||||
stabilize_ps_.bind_texture("depth_tx", &render_buffers.depth_tx, no_filter);
|
||||
stabilize_ps_.bind_ubo("dof_buf", data_);
|
||||
stabilize_ps_.push_constant("use_history", &stabilize_valid_history_, 1);
|
||||
stabilize_ps_.bind_image("out_coc_img", reduced_coc_tx_.mip_view(0));
|
||||
stabilize_ps_.bind_image("out_color_img", reduced_color_tx_.mip_view(0));
|
||||
stabilize_ps_.bind_image("out_history_img", &stabilize_output_tx_);
|
||||
stabilize_ps_.dispatch(&dispatch_stabilize_size_);
|
||||
stabilize_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
}
|
||||
|
||||
void DepthOfField::downsample_pass_sync()
|
||||
{
|
||||
downsample_ps_ = DRW_pass_create("Dof.downsample_ps_", DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_DOWNSAMPLE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, downsample_ps_);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "color_tx", reduced_color_tx_.mip_view(0), no_filter);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "coc_tx", reduced_coc_tx_.mip_view(0), no_filter);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_color_img", &downsample_tx_);
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_downsample_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
downsample_ps_.init();
|
||||
downsample_ps_.shader_set(inst_.shaders.static_shader_get(DOF_DOWNSAMPLE));
|
||||
downsample_ps_.bind_texture("color_tx", reduced_color_tx_.mip_view(0), no_filter);
|
||||
downsample_ps_.bind_texture("coc_tx", reduced_coc_tx_.mip_view(0), no_filter);
|
||||
downsample_ps_.bind_image("out_color_img", &downsample_tx_);
|
||||
downsample_ps_.dispatch(&dispatch_downsample_size_);
|
||||
downsample_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
|
||||
void DepthOfField::reduce_pass_sync()
|
||||
{
|
||||
reduce_ps_ = DRW_pass_create("Dof.reduce_ps_", DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_REDUCE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, reduce_ps_);
|
||||
DRW_shgroup_uniform_block(grp, "dof_buf", data_);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "downsample_tx", &downsample_tx_, no_filter);
|
||||
DRW_shgroup_storage_block(grp, "scatter_fg_list_buf", scatter_fg_list_buf_);
|
||||
DRW_shgroup_storage_block(grp, "scatter_bg_list_buf", scatter_bg_list_buf_);
|
||||
DRW_shgroup_storage_block(grp, "scatter_fg_indirect_buf", scatter_fg_indirect_buf_);
|
||||
DRW_shgroup_storage_block(grp, "scatter_bg_indirect_buf", scatter_bg_indirect_buf_);
|
||||
DRW_shgroup_uniform_image(grp, "inout_color_lod0_img", reduced_color_tx_.mip_view(0));
|
||||
DRW_shgroup_uniform_image(grp, "out_color_lod1_img", reduced_color_tx_.mip_view(1));
|
||||
DRW_shgroup_uniform_image(grp, "out_color_lod2_img", reduced_color_tx_.mip_view(2));
|
||||
DRW_shgroup_uniform_image(grp, "out_color_lod3_img", reduced_color_tx_.mip_view(3));
|
||||
DRW_shgroup_uniform_image(grp, "in_coc_lod0_img", reduced_coc_tx_.mip_view(0));
|
||||
DRW_shgroup_uniform_image(grp, "out_coc_lod1_img", reduced_coc_tx_.mip_view(1));
|
||||
DRW_shgroup_uniform_image(grp, "out_coc_lod2_img", reduced_coc_tx_.mip_view(2));
|
||||
DRW_shgroup_uniform_image(grp, "out_coc_lod3_img", reduced_coc_tx_.mip_view(3));
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_reduce_size_);
|
||||
reduce_ps_.init();
|
||||
reduce_ps_.shader_set(inst_.shaders.static_shader_get(DOF_REDUCE));
|
||||
reduce_ps_.bind_ubo("dof_buf", data_);
|
||||
reduce_ps_.bind_texture("downsample_tx", &downsample_tx_, no_filter);
|
||||
reduce_ps_.bind_ssbo("scatter_fg_list_buf", scatter_fg_list_buf_);
|
||||
reduce_ps_.bind_ssbo("scatter_bg_list_buf", scatter_bg_list_buf_);
|
||||
reduce_ps_.bind_ssbo("scatter_fg_indirect_buf", scatter_fg_indirect_buf_);
|
||||
reduce_ps_.bind_ssbo("scatter_bg_indirect_buf", scatter_bg_indirect_buf_);
|
||||
reduce_ps_.bind_image("inout_color_lod0_img", reduced_color_tx_.mip_view(0));
|
||||
reduce_ps_.bind_image("out_color_lod1_img", reduced_color_tx_.mip_view(1));
|
||||
reduce_ps_.bind_image("out_color_lod2_img", reduced_color_tx_.mip_view(2));
|
||||
reduce_ps_.bind_image("out_color_lod3_img", reduced_color_tx_.mip_view(3));
|
||||
reduce_ps_.bind_image("in_coc_lod0_img", reduced_coc_tx_.mip_view(0));
|
||||
reduce_ps_.bind_image("out_coc_lod1_img", reduced_coc_tx_.mip_view(1));
|
||||
reduce_ps_.bind_image("out_coc_lod2_img", reduced_coc_tx_.mip_view(2));
|
||||
reduce_ps_.bind_image("out_coc_lod3_img", reduced_coc_tx_.mip_view(3));
|
||||
reduce_ps_.dispatch(&dispatch_reduce_size_);
|
||||
/* NOTE: Command buffer barrier is done automatically by the GPU backend. */
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_STORAGE);
|
||||
reduce_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_STORAGE);
|
||||
}
|
||||
|
||||
void DepthOfField::tiles_flatten_pass_sync()
|
||||
{
|
||||
tiles_flatten_ps_ = DRW_pass_create("Dof.tiles_flatten_ps_", DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_TILES_FLATTEN);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tiles_flatten_ps_);
|
||||
tiles_flatten_ps_.init();
|
||||
tiles_flatten_ps_.shader_set(inst_.shaders.static_shader_get(DOF_TILES_FLATTEN));
|
||||
/* NOTE(fclem): We should use the reduced_coc_tx_ as it is stable, but we need the slight focus
|
||||
* flag from the setup pass. A better way would be to do the brute-force in focus gather without
|
||||
* this. */
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "coc_tx", &setup_coc_tx_, no_filter);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_tiles_flatten_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
tiles_flatten_ps_.bind_texture("coc_tx", &setup_coc_tx_, no_filter);
|
||||
tiles_flatten_ps_.bind_image("out_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
tiles_flatten_ps_.bind_image("out_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
tiles_flatten_ps_.dispatch(&dispatch_tiles_flatten_size_);
|
||||
tiles_flatten_ps_.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
}
|
||||
|
||||
void DepthOfField::tiles_dilate_pass_sync()
|
||||
{
|
||||
tiles_dilate_minmax_ps_ = DRW_pass_create("Dof.tiles_dilate_minmax_ps_", DRW_STATE_NO_DRAW);
|
||||
tiles_dilate_minabs_ps_ = DRW_pass_create("Dof.tiles_dilate_minabs_ps_", DRW_STATE_NO_DRAW);
|
||||
for (int pass = 0; pass < 2; pass++) {
|
||||
DRWPass *drw_pass = (pass == 0) ? tiles_dilate_minmax_ps_ : tiles_dilate_minabs_ps_;
|
||||
GPUShader *sh = inst_.shaders.static_shader_get((pass == 0) ? DOF_TILES_DILATE_MINMAX :
|
||||
DOF_TILES_DILATE_MINABS);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, drw_pass);
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_fg_img", &tiles_fg_tx_.previous());
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_bg_img", &tiles_bg_tx_.previous());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
DRW_shgroup_uniform_int(grp, "ring_count", &tiles_dilate_ring_count_, 1);
|
||||
DRW_shgroup_uniform_int(grp, "ring_width_multiplier", &tiles_dilate_ring_width_mul_, 1);
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_tiles_dilate_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
PassSimple &drw_pass = (pass == 0) ? tiles_dilate_minmax_ps_ : tiles_dilate_minabs_ps_;
|
||||
eShaderType sh_type = (pass == 0) ? DOF_TILES_DILATE_MINMAX : DOF_TILES_DILATE_MINABS;
|
||||
drw_pass.init();
|
||||
drw_pass.shader_set(inst_.shaders.static_shader_get(sh_type));
|
||||
drw_pass.bind_image("in_tiles_fg_img", &tiles_fg_tx_.previous());
|
||||
drw_pass.bind_image("in_tiles_bg_img", &tiles_bg_tx_.previous());
|
||||
drw_pass.bind_image("out_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
drw_pass.bind_image("out_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
drw_pass.push_constant("ring_count", &tiles_dilate_ring_count_, 1);
|
||||
drw_pass.push_constant("ring_width_multiplier", &tiles_dilate_ring_width_mul_, 1);
|
||||
drw_pass.dispatch(&dispatch_tiles_dilate_size_);
|
||||
drw_pass.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
}
|
||||
}
|
||||
|
||||
void DepthOfField::gather_pass_sync()
|
||||
{
|
||||
gather_fg_ps_ = DRW_pass_create("Dof.gather_fg_ps_", DRW_STATE_NO_DRAW);
|
||||
gather_bg_ps_ = DRW_pass_create("Dof.gather_bg_ps_", DRW_STATE_NO_DRAW);
|
||||
for (int pass = 0; pass < 2; pass++) {
|
||||
PassSimple &drw_pass = (pass == 0) ? gather_fg_ps_ : gather_bg_ps_;
|
||||
SwapChain<TextureFromPool, 2> &color_chain = (pass == 0) ? color_fg_tx_ : color_bg_tx_;
|
||||
SwapChain<TextureFromPool, 2> &weight_chain = (pass == 0) ? weight_fg_tx_ : weight_bg_tx_;
|
||||
bool use_lut = bokeh_lut_ps_ != nullptr;
|
||||
eShaderType sh_type = (pass == 0) ?
|
||||
(use_lut ? DOF_GATHER_FOREGROUND_LUT : DOF_GATHER_FOREGROUND) :
|
||||
(use_lut ? DOF_GATHER_BACKGROUND_LUT : DOF_GATHER_BACKGROUND);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(sh_type);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, (pass == 0) ? gather_fg_ps_ : gather_bg_ps_);
|
||||
inst_.sampling.bind_resources(grp);
|
||||
DRW_shgroup_uniform_block(grp, "dof_buf", data_);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "color_bilinear_tx", reduced_color_tx_, gather_bilinear);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "color_tx", reduced_color_tx_, gather_nearest);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "coc_tx", reduced_coc_tx_, gather_nearest);
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_color_img", &color_chain.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_weight_img", &weight_chain.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_occlusion_img", &occlusion_tx_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "bokeh_lut_tx", &bokeh_gather_lut_tx_);
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_gather_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
(use_bokeh_lut_ ? DOF_GATHER_FOREGROUND_LUT :
|
||||
DOF_GATHER_FOREGROUND) :
|
||||
(use_bokeh_lut_ ? DOF_GATHER_BACKGROUND_LUT : DOF_GATHER_BACKGROUND);
|
||||
drw_pass.init();
|
||||
inst_.sampling.bind_resources(&drw_pass);
|
||||
drw_pass.shader_set(inst_.shaders.static_shader_get(sh_type));
|
||||
drw_pass.bind_ubo("dof_buf", data_);
|
||||
drw_pass.bind_texture("color_bilinear_tx", reduced_color_tx_, gather_bilinear);
|
||||
drw_pass.bind_texture("color_tx", reduced_color_tx_, gather_nearest);
|
||||
drw_pass.bind_texture("coc_tx", reduced_coc_tx_, gather_nearest);
|
||||
drw_pass.bind_image("in_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
drw_pass.bind_image("in_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
drw_pass.bind_image("out_color_img", &color_chain.current());
|
||||
drw_pass.bind_image("out_weight_img", &weight_chain.current());
|
||||
drw_pass.bind_image("out_occlusion_img", &occlusion_tx_);
|
||||
drw_pass.bind_texture("bokeh_lut_tx", &bokeh_gather_lut_tx_);
|
||||
drw_pass.dispatch(&dispatch_gather_size_);
|
||||
drw_pass.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
}
|
||||
|
||||
void DepthOfField::filter_pass_sync()
|
||||
{
|
||||
filter_fg_ps_ = DRW_pass_create("Dof.filter_fg_ps_", DRW_STATE_NO_DRAW);
|
||||
filter_bg_ps_ = DRW_pass_create("Dof.filter_bg_ps_", DRW_STATE_NO_DRAW);
|
||||
for (int pass = 0; pass < 2; pass++) {
|
||||
PassSimple &drw_pass = (pass == 0) ? filter_fg_ps_ : filter_bg_ps_;
|
||||
SwapChain<TextureFromPool, 2> &color_chain = (pass == 0) ? color_fg_tx_ : color_bg_tx_;
|
||||
SwapChain<TextureFromPool, 2> &weight_chain = (pass == 0) ? weight_fg_tx_ : weight_bg_tx_;
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_FILTER);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, (pass == 0) ? filter_fg_ps_ : filter_bg_ps_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "color_tx", &color_chain.previous());
|
||||
DRW_shgroup_uniform_texture_ref(grp, "weight_tx", &weight_chain.previous());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_color_img", &color_chain.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_weight_img", &weight_chain.current());
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_filter_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
drw_pass.init();
|
||||
drw_pass.shader_set(inst_.shaders.static_shader_get(DOF_FILTER));
|
||||
drw_pass.bind_texture("color_tx", &color_chain.previous());
|
||||
drw_pass.bind_texture("weight_tx", &weight_chain.previous());
|
||||
drw_pass.bind_image("out_color_img", &color_chain.current());
|
||||
drw_pass.bind_image("out_weight_img", &weight_chain.current());
|
||||
drw_pass.dispatch(&dispatch_filter_size_);
|
||||
drw_pass.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
}
|
||||
|
||||
void DepthOfField::scatter_pass_sync()
|
||||
{
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_ADD_FULL;
|
||||
scatter_fg_ps_ = DRW_pass_create("Dof.scatter_fg_ps_", state);
|
||||
scatter_bg_ps_ = DRW_pass_create("Dof.scatter_bg_ps_", state);
|
||||
for (int pass = 0; pass < 2; pass++) {
|
||||
GPUStorageBuf *scatter_buf = (pass == 0) ? scatter_fg_indirect_buf_ : scatter_bg_indirect_buf_;
|
||||
GPUStorageBuf *rect_list_buf = (pass == 0) ? scatter_fg_list_buf_ : scatter_bg_list_buf_;
|
||||
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_SCATTER);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, (pass == 0) ? scatter_fg_ps_ : scatter_bg_ps_);
|
||||
DRW_shgroup_uniform_bool_copy(grp, "use_bokeh_lut", bokeh_lut_ps_ != nullptr);
|
||||
DRW_shgroup_storage_block(grp, "scatter_list_buf", rect_list_buf);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "bokeh_lut_tx", &bokeh_scatter_lut_tx_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "occlusion_tx", &occlusion_tx_);
|
||||
DRW_shgroup_call_procedural_indirect(grp, GPU_PRIM_TRI_STRIP, nullptr, scatter_buf);
|
||||
PassSimple &drw_pass = (pass == 0) ? scatter_fg_ps_ : scatter_bg_ps_;
|
||||
drw_pass.init();
|
||||
drw_pass.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_ADD_FULL);
|
||||
drw_pass.shader_set(inst_.shaders.static_shader_get(DOF_SCATTER));
|
||||
drw_pass.push_constant("use_bokeh_lut", use_bokeh_lut_);
|
||||
drw_pass.bind_texture("bokeh_lut_tx", &bokeh_scatter_lut_tx_);
|
||||
drw_pass.bind_texture("occlusion_tx", &occlusion_tx_);
|
||||
if (pass == 0) {
|
||||
drw_pass.bind_ssbo("scatter_list_buf", scatter_fg_list_buf_);
|
||||
drw_pass.draw_procedural_indirect(GPU_PRIM_TRI_STRIP, scatter_fg_indirect_buf_);
|
||||
/* Avoid background gather pass writing to the occlusion_tx mid pass. */
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
drw_pass.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
}
|
||||
else {
|
||||
drw_pass.bind_ssbo("scatter_list_buf", scatter_bg_list_buf_);
|
||||
drw_pass.draw_procedural_indirect(GPU_PRIM_TRI_STRIP, scatter_bg_indirect_buf_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DepthOfField::hole_fill_pass_sync()
|
||||
{
|
||||
hole_fill_ps_ = DRW_pass_create("Dof.hole_fill_ps_", DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(DOF_GATHER_HOLE_FILL);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, hole_fill_ps_);
|
||||
inst_.sampling.bind_resources(grp);
|
||||
DRW_shgroup_uniform_block(grp, "dof_buf", data_);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "color_bilinear_tx", reduced_color_tx_, gather_bilinear);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "color_tx", reduced_color_tx_, gather_nearest);
|
||||
DRW_shgroup_uniform_texture_ex(grp, "coc_tx", reduced_coc_tx_, gather_nearest);
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_color_img", &hole_fill_color_tx_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_weight_img", &hole_fill_weight_tx_);
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_gather_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
hole_fill_ps_.init();
|
||||
inst_.sampling.bind_resources(&hole_fill_ps_);
|
||||
hole_fill_ps_.shader_set(inst_.shaders.static_shader_get(DOF_GATHER_HOLE_FILL));
|
||||
hole_fill_ps_.bind_ubo("dof_buf", data_);
|
||||
hole_fill_ps_.bind_texture("color_bilinear_tx", reduced_color_tx_, gather_bilinear);
|
||||
hole_fill_ps_.bind_texture("color_tx", reduced_color_tx_, gather_nearest);
|
||||
hole_fill_ps_.bind_texture("coc_tx", reduced_coc_tx_, gather_nearest);
|
||||
hole_fill_ps_.bind_image("in_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
hole_fill_ps_.bind_image("in_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
hole_fill_ps_.bind_image("out_color_img", &hole_fill_color_tx_);
|
||||
hole_fill_ps_.bind_image("out_weight_img", &hole_fill_weight_tx_);
|
||||
hole_fill_ps_.dispatch(&dispatch_gather_size_);
|
||||
hole_fill_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
|
||||
void DepthOfField::resolve_pass_sync()
|
||||
{
|
||||
eGPUSamplerState with_filter = GPU_SAMPLER_FILTER;
|
||||
RenderBuffers &render_buffers = inst_.render_buffers;
|
||||
eShaderType sh_type = use_bokeh_lut_ ? DOF_RESOLVE_LUT : DOF_RESOLVE;
|
||||
|
||||
resolve_ps_ = DRW_pass_create("Dof.resolve_ps_", DRW_STATE_NO_DRAW);
|
||||
bool use_lut = bokeh_lut_ps_ != nullptr;
|
||||
eShaderType sh_type = use_lut ? DOF_RESOLVE_LUT : DOF_RESOLVE;
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(sh_type);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, resolve_ps_);
|
||||
inst_.sampling.bind_resources(grp);
|
||||
DRW_shgroup_uniform_block(grp, "dof_buf", data_);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depth_tx", &render_buffers.depth_tx, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "color_tx", &input_color_tx_, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "stable_color_tx", &resolve_stable_color_tx_, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "color_bg_tx", &color_bg_tx_.current(), with_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "color_fg_tx", &color_fg_tx_.current(), with_filter);
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
DRW_shgroup_uniform_texture_ref(grp, "weight_bg_tx", &weight_bg_tx_.current());
|
||||
DRW_shgroup_uniform_texture_ref(grp, "weight_fg_tx", &weight_fg_tx_.current());
|
||||
DRW_shgroup_uniform_texture_ref(grp, "color_hole_fill_tx", &hole_fill_color_tx_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "weight_hole_fill_tx", &hole_fill_weight_tx_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "bokeh_lut_tx", &bokeh_resolve_lut_tx_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_color_img", &output_color_tx_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_resolve_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
resolve_ps_.init();
|
||||
inst_.sampling.bind_resources(&resolve_ps_);
|
||||
resolve_ps_.shader_set(inst_.shaders.static_shader_get(sh_type));
|
||||
resolve_ps_.bind_ubo("dof_buf", data_);
|
||||
resolve_ps_.bind_texture("depth_tx", &render_buffers.depth_tx, no_filter);
|
||||
resolve_ps_.bind_texture("color_tx", &input_color_tx_, no_filter);
|
||||
resolve_ps_.bind_texture("stable_color_tx", &resolve_stable_color_tx_, no_filter);
|
||||
resolve_ps_.bind_texture("color_bg_tx", &color_bg_tx_.current(), with_filter);
|
||||
resolve_ps_.bind_texture("color_fg_tx", &color_fg_tx_.current(), with_filter);
|
||||
resolve_ps_.bind_image("in_tiles_fg_img", &tiles_fg_tx_.current());
|
||||
resolve_ps_.bind_image("in_tiles_bg_img", &tiles_bg_tx_.current());
|
||||
resolve_ps_.bind_texture("weight_bg_tx", &weight_bg_tx_.current());
|
||||
resolve_ps_.bind_texture("weight_fg_tx", &weight_fg_tx_.current());
|
||||
resolve_ps_.bind_texture("color_hole_fill_tx", &hole_fill_color_tx_);
|
||||
resolve_ps_.bind_texture("weight_hole_fill_tx", &hole_fill_weight_tx_);
|
||||
resolve_ps_.bind_texture("bokeh_lut_tx", &bokeh_resolve_lut_tx_);
|
||||
resolve_ps_.bind_image("out_color_img", &output_color_tx_);
|
||||
resolve_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
resolve_ps_.dispatch(&dispatch_resolve_size_);
|
||||
resolve_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -509,7 +497,8 @@ void DepthOfField::update_sample_table()
|
||||
data_.filter_center_weight = film_filter_weight(radius, math::length_squared(subpixel_offset));
|
||||
}
|
||||
|
||||
void DepthOfField::render(GPUTexture **input_tx,
|
||||
void DepthOfField::render(View &view,
|
||||
GPUTexture **input_tx,
|
||||
GPUTexture **output_tx,
|
||||
DepthOfFieldBuffer &dof_buffer)
|
||||
{
|
||||
@@ -580,6 +569,8 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
|
||||
DRW_stats_group_start("Depth of Field");
|
||||
|
||||
Manager &drw = *inst_.manager;
|
||||
|
||||
{
|
||||
DRW_stats_group_start("Setup");
|
||||
{
|
||||
@@ -587,13 +578,15 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
bokeh_scatter_lut_tx_.acquire(int2(DOF_BOKEH_LUT_SIZE), GPU_R16F);
|
||||
bokeh_resolve_lut_tx_.acquire(int2(DOF_MAX_SLIGHT_FOCUS_RADIUS * 2 + 1), GPU_R16F);
|
||||
|
||||
DRW_draw_pass(bokeh_lut_ps_);
|
||||
if (use_bokeh_lut_) {
|
||||
drw.submit(bokeh_lut_ps_, view);
|
||||
}
|
||||
}
|
||||
{
|
||||
setup_color_tx_.acquire(half_res, GPU_RGBA16F);
|
||||
setup_coc_tx_.acquire(half_res, GPU_R16F);
|
||||
|
||||
DRW_draw_pass(setup_ps_);
|
||||
drw.submit(setup_ps_, view);
|
||||
}
|
||||
{
|
||||
stabilize_output_tx_.acquire(half_res, GPU_RGBA16F);
|
||||
@@ -607,7 +600,7 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
|
||||
stabilize_input_ = dof_buffer.stabilize_history_tx_;
|
||||
/* Outputs to reduced_*_tx_ mip 0. */
|
||||
DRW_draw_pass(stabilize_ps_);
|
||||
drw.submit(stabilize_ps_, view);
|
||||
|
||||
/* WATCH(fclem): Swap Texture an TextureFromPool internal GPUTexture in order to reuse
|
||||
* the one that we just consumed. */
|
||||
@@ -626,7 +619,7 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
tiles_fg_tx_.current().acquire(tile_res, GPU_R11F_G11F_B10F);
|
||||
tiles_bg_tx_.current().acquire(tile_res, GPU_R11F_G11F_B10F);
|
||||
|
||||
DRW_draw_pass(tiles_flatten_ps_);
|
||||
drw.submit(tiles_flatten_ps_, view);
|
||||
|
||||
/* Used by tile_flatten and stabilize_ps pass. */
|
||||
setup_coc_tx_.release();
|
||||
@@ -655,7 +648,7 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
tiles_fg_tx_.swap();
|
||||
tiles_bg_tx_.swap();
|
||||
|
||||
DRW_draw_pass((pass == 0) ? tiles_dilate_minmax_ps_ : tiles_dilate_minabs_ps_);
|
||||
drw.submit((pass == 0) ? tiles_dilate_minmax_ps_ : tiles_dilate_minabs_ps_, view);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -667,12 +660,12 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
|
||||
downsample_tx_.acquire(quarter_res, GPU_RGBA16F);
|
||||
|
||||
DRW_draw_pass(downsample_ps_);
|
||||
drw.submit(downsample_ps_, view);
|
||||
|
||||
scatter_fg_indirect_buf_.clear_to_zero();
|
||||
scatter_bg_indirect_buf_.clear_to_zero();
|
||||
|
||||
DRW_draw_pass(reduce_ps_);
|
||||
drw.submit(reduce_ps_, view);
|
||||
|
||||
/* Used by reduce pass. */
|
||||
downsample_tx_.release();
|
||||
@@ -686,15 +679,15 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
SwapChain<TextureFromPool, 2> &color_tx = is_background ? color_bg_tx_ : color_fg_tx_;
|
||||
SwapChain<TextureFromPool, 2> &weight_tx = is_background ? weight_bg_tx_ : weight_fg_tx_;
|
||||
Framebuffer &scatter_fb = is_background ? scatter_bg_fb_ : scatter_fg_fb_;
|
||||
DRWPass *gather_ps = is_background ? gather_bg_ps_ : gather_fg_ps_;
|
||||
DRWPass *filter_ps = is_background ? filter_bg_ps_ : filter_fg_ps_;
|
||||
DRWPass *scatter_ps = is_background ? scatter_bg_ps_ : scatter_fg_ps_;
|
||||
PassSimple &gather_ps = is_background ? gather_bg_ps_ : gather_fg_ps_;
|
||||
PassSimple &filter_ps = is_background ? filter_bg_ps_ : filter_fg_ps_;
|
||||
PassSimple &scatter_ps = is_background ? scatter_bg_ps_ : scatter_fg_ps_;
|
||||
|
||||
color_tx.current().acquire(half_res, GPU_RGBA16F);
|
||||
weight_tx.current().acquire(half_res, GPU_R16F);
|
||||
occlusion_tx_.acquire(half_res, GPU_RG16F);
|
||||
|
||||
DRW_draw_pass(gather_ps);
|
||||
drw.submit(gather_ps, view);
|
||||
|
||||
{
|
||||
/* Filtering pass. */
|
||||
@@ -704,7 +697,7 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
color_tx.current().acquire(half_res, GPU_RGBA16F);
|
||||
weight_tx.current().acquire(half_res, GPU_R16F);
|
||||
|
||||
DRW_draw_pass(filter_ps);
|
||||
drw.submit(filter_ps, view);
|
||||
|
||||
color_tx.previous().release();
|
||||
weight_tx.previous().release();
|
||||
@@ -715,7 +708,7 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
scatter_fb.ensure(GPU_ATTACHMENT_NONE, GPU_ATTACHMENT_TEXTURE(color_tx.current()));
|
||||
|
||||
GPU_framebuffer_bind(scatter_fb);
|
||||
DRW_draw_pass(scatter_ps);
|
||||
drw.submit(scatter_ps, view);
|
||||
|
||||
/* Used by scatter pass. */
|
||||
occlusion_tx_.release();
|
||||
@@ -731,7 +724,7 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
hole_fill_color_tx_.acquire(half_res, GPU_RGBA16F);
|
||||
hole_fill_weight_tx_.acquire(half_res, GPU_R16F);
|
||||
|
||||
DRW_draw_pass(hole_fill_ps_);
|
||||
drw.submit(hole_fill_ps_, view);
|
||||
|
||||
/* NOTE: We do not filter the hole-fill pass as effect is likely to not be noticeable. */
|
||||
|
||||
@@ -742,7 +735,7 @@ void DepthOfField::render(GPUTexture **input_tx,
|
||||
|
||||
resolve_stable_color_tx_ = dof_buffer.stabilize_history_tx_;
|
||||
|
||||
DRW_draw_pass(resolve_ps_);
|
||||
drw.submit(resolve_ps_, view);
|
||||
|
||||
color_bg_tx_.current().release();
|
||||
color_fg_tx_.current().release();
|
||||
|
@@ -56,13 +56,13 @@ class DepthOfField {
|
||||
TextureFromPool bokeh_gather_lut_tx_ = {"dof_bokeh_gather_lut"};
|
||||
TextureFromPool bokeh_resolve_lut_tx_ = {"dof_bokeh_resolve_lut"};
|
||||
TextureFromPool bokeh_scatter_lut_tx_ = {"dof_bokeh_scatter_lut"};
|
||||
DRWPass *bokeh_lut_ps_ = nullptr;
|
||||
PassSimple bokeh_lut_ps_ = {"BokehLut"};
|
||||
|
||||
/** Outputs half-resolution color and Circle Of Confusion. */
|
||||
TextureFromPool setup_coc_tx_ = {"dof_setup_coc"};
|
||||
TextureFromPool setup_color_tx_ = {"dof_setup_color"};
|
||||
int3 dispatch_setup_size_ = int3(-1);
|
||||
DRWPass *setup_ps_ = nullptr;
|
||||
PassSimple setup_ps_ = {"Setup"};
|
||||
|
||||
/** Allocated because we need mip chain. Which isn't supported by TextureFromPool. */
|
||||
Texture reduced_coc_tx_ = {"dof_reduced_coc"};
|
||||
@@ -73,12 +73,12 @@ class DepthOfField {
|
||||
GPUTexture *stabilize_input_ = nullptr;
|
||||
bool1 stabilize_valid_history_ = false;
|
||||
int3 dispatch_stabilize_size_ = int3(-1);
|
||||
DRWPass *stabilize_ps_ = nullptr;
|
||||
PassSimple stabilize_ps_ = {"Stabilize"};
|
||||
|
||||
/** 1/4th res color buffer used to speedup the local contrast test in the first reduce pass. */
|
||||
TextureFromPool downsample_tx_ = {"dof_downsample"};
|
||||
int3 dispatch_downsample_size_ = int3(-1);
|
||||
DRWPass *downsample_ps_ = nullptr;
|
||||
PassSimple downsample_ps_ = {"Downsample"};
|
||||
|
||||
/** Create mip-mapped color & COC textures for gather passes as well as scatter rect list. */
|
||||
DepthOfFieldScatterListBuf scatter_fg_list_buf_;
|
||||
@@ -86,20 +86,20 @@ class DepthOfField {
|
||||
DrawIndirectBuf scatter_fg_indirect_buf_;
|
||||
DrawIndirectBuf scatter_bg_indirect_buf_;
|
||||
int3 dispatch_reduce_size_ = int3(-1);
|
||||
DRWPass *reduce_ps_ = nullptr;
|
||||
PassSimple reduce_ps_ = {"Reduce"};
|
||||
|
||||
/** Outputs min & max COC in each 8x8 half res pixel tiles (so 1/16th of full resolution). */
|
||||
SwapChain<TextureFromPool, 2> tiles_fg_tx_;
|
||||
SwapChain<TextureFromPool, 2> tiles_bg_tx_;
|
||||
int3 dispatch_tiles_flatten_size_ = int3(-1);
|
||||
DRWPass *tiles_flatten_ps_ = nullptr;
|
||||
PassSimple tiles_flatten_ps_ = {"TilesFlatten"};
|
||||
|
||||
/** Dilates the min & max CoCs to cover maximum COC values. */
|
||||
int tiles_dilate_ring_count_ = -1;
|
||||
int tiles_dilate_ring_width_mul_ = -1;
|
||||
int3 dispatch_tiles_dilate_size_ = int3(-1);
|
||||
DRWPass *tiles_dilate_minmax_ps_ = nullptr;
|
||||
DRWPass *tiles_dilate_minabs_ps_ = nullptr;
|
||||
PassSimple tiles_dilate_minmax_ps_ = {"TilesDilateMinmax"};
|
||||
PassSimple tiles_dilate_minabs_ps_ = {"TilesDilateMinabs"};
|
||||
|
||||
/** Gather convolution for low intensity pixels and low contrast areas. */
|
||||
SwapChain<TextureFromPool, 2> color_bg_tx_;
|
||||
@@ -108,29 +108,29 @@ class DepthOfField {
|
||||
SwapChain<TextureFromPool, 2> weight_fg_tx_;
|
||||
TextureFromPool occlusion_tx_ = {"dof_occlusion"};
|
||||
int3 dispatch_gather_size_ = int3(-1);
|
||||
DRWPass *gather_fg_ps_ = nullptr;
|
||||
DRWPass *gather_bg_ps_ = nullptr;
|
||||
PassSimple gather_fg_ps_ = {"GatherFg"};
|
||||
PassSimple gather_bg_ps_ = {"GatherBg"};
|
||||
|
||||
/** Hole-fill convolution: Gather pass meant to fill areas of foreground dis-occlusion. */
|
||||
TextureFromPool hole_fill_color_tx_ = {"dof_color_hole_fill"};
|
||||
TextureFromPool hole_fill_weight_tx_ = {"dof_weight_hole_fill"};
|
||||
DRWPass *hole_fill_ps_ = nullptr;
|
||||
PassSimple hole_fill_ps_ = {"HoleFill"};
|
||||
|
||||
/** Small Filter pass to reduce noise out of gather passes. */
|
||||
int3 dispatch_filter_size_ = int3(-1);
|
||||
DRWPass *filter_fg_ps_ = nullptr;
|
||||
DRWPass *filter_bg_ps_ = nullptr;
|
||||
PassSimple filter_fg_ps_ = {"FilterFg"};
|
||||
PassSimple filter_bg_ps_ = {"FilterBg"};
|
||||
|
||||
/** Scatter convolution: A quad is emitted for every 4 bright enough half pixels. */
|
||||
Framebuffer scatter_fg_fb_ = {"dof_scatter_fg"};
|
||||
Framebuffer scatter_bg_fb_ = {"dof_scatter_bg"};
|
||||
DRWPass *scatter_fg_ps_ = nullptr;
|
||||
DRWPass *scatter_bg_ps_ = nullptr;
|
||||
PassSimple scatter_fg_ps_ = {"ScatterFg"};
|
||||
PassSimple scatter_bg_ps_ = {"ScatterBg"};
|
||||
|
||||
/** Recombine the results and also perform a slight out of focus gather. */
|
||||
GPUTexture *resolve_stable_color_tx_ = nullptr;
|
||||
int3 dispatch_resolve_size_ = int3(-1);
|
||||
DRWPass *resolve_ps_ = nullptr;
|
||||
PassSimple resolve_ps_ = {"Resolve"};
|
||||
|
||||
DepthOfFieldDataBuf data_;
|
||||
|
||||
@@ -139,6 +139,8 @@ class DepthOfField {
|
||||
float fx_max_coc_;
|
||||
/** Use jittered depth of field where we randomize camera location. */
|
||||
bool do_jitter_;
|
||||
/** Enable bokeh lookup texture. */
|
||||
bool use_bokeh_lut_;
|
||||
|
||||
/** Circle of Confusion radius for FX DoF passes. Is in view X direction in [0..1] range. */
|
||||
float fx_radius_;
|
||||
@@ -166,7 +168,10 @@ class DepthOfField {
|
||||
* Will swap input and output texture if rendering happens. The actual output of this function
|
||||
* is in input_tx.
|
||||
*/
|
||||
void render(GPUTexture **input_tx, GPUTexture **output_tx, DepthOfFieldBuffer &dof_buffer);
|
||||
void render(View &view,
|
||||
GPUTexture **input_tx,
|
||||
GPUTexture **output_tx,
|
||||
DepthOfFieldBuffer &dof_buffer);
|
||||
|
||||
bool postfx_enabled() const
|
||||
{
|
||||
|
@@ -377,48 +377,44 @@ void Film::sync()
|
||||
* Still bind previous step to avoid undefined behavior. */
|
||||
eVelocityStep step_next = inst_.is_viewport() ? STEP_PREVIOUS : STEP_NEXT;
|
||||
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS;
|
||||
accumulate_ps_ = DRW_pass_create("Film.Accumulate", state);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(shader);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, accumulate_ps_);
|
||||
DRW_shgroup_uniform_block_ref(grp, "film_buf", &data_);
|
||||
DRW_shgroup_uniform_block_ref(grp, "camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
|
||||
DRW_shgroup_uniform_block_ref(grp, "camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
|
||||
DRW_shgroup_uniform_block_ref(grp, "camera_next", &(*velocity.camera_steps[step_next]));
|
||||
DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &rbuffers.depth_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "combined_tx", &combined_final_tx_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "normal_tx", &rbuffers.normal_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "vector_tx", &rbuffers.vector_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "light_tx", &rbuffers.light_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "diffuse_color_tx", &rbuffers.diffuse_color_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "specular_color_tx", &rbuffers.specular_color_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "volume_light_tx", &rbuffers.volume_light_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "emission_tx", &rbuffers.emission_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "environment_tx", &rbuffers.environment_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "shadow_tx", &rbuffers.shadow_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "ambient_occlusion_tx", &rbuffers.ambient_occlusion_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "aov_color_tx", &rbuffers.aov_color_tx);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "aov_value_tx", &rbuffers.aov_value_tx);
|
||||
accumulate_ps_.init();
|
||||
accumulate_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS);
|
||||
accumulate_ps_.shader_set(inst_.shaders.static_shader_get(shader));
|
||||
accumulate_ps_.bind_ubo("film_buf", &data_);
|
||||
accumulate_ps_.bind_ubo("camera_prev", &(*velocity.camera_steps[STEP_PREVIOUS]));
|
||||
accumulate_ps_.bind_ubo("camera_curr", &(*velocity.camera_steps[STEP_CURRENT]));
|
||||
accumulate_ps_.bind_ubo("camera_next", &(*velocity.camera_steps[step_next]));
|
||||
accumulate_ps_.bind_texture("depth_tx", &rbuffers.depth_tx);
|
||||
accumulate_ps_.bind_texture("combined_tx", &combined_final_tx_);
|
||||
accumulate_ps_.bind_texture("normal_tx", &rbuffers.normal_tx);
|
||||
accumulate_ps_.bind_texture("vector_tx", &rbuffers.vector_tx);
|
||||
accumulate_ps_.bind_texture("light_tx", &rbuffers.light_tx);
|
||||
accumulate_ps_.bind_texture("diffuse_color_tx", &rbuffers.diffuse_color_tx);
|
||||
accumulate_ps_.bind_texture("specular_color_tx", &rbuffers.specular_color_tx);
|
||||
accumulate_ps_.bind_texture("volume_light_tx", &rbuffers.volume_light_tx);
|
||||
accumulate_ps_.bind_texture("emission_tx", &rbuffers.emission_tx);
|
||||
accumulate_ps_.bind_texture("environment_tx", &rbuffers.environment_tx);
|
||||
accumulate_ps_.bind_texture("shadow_tx", &rbuffers.shadow_tx);
|
||||
accumulate_ps_.bind_texture("ambient_occlusion_tx", &rbuffers.ambient_occlusion_tx);
|
||||
accumulate_ps_.bind_texture("aov_color_tx", &rbuffers.aov_color_tx);
|
||||
accumulate_ps_.bind_texture("aov_value_tx", &rbuffers.aov_value_tx);
|
||||
/* NOTE(@fclem): 16 is the max number of sampled texture in many implementations.
|
||||
* If we need more, we need to pack more of the similar passes in the same textures as arrays or
|
||||
* use image binding instead. */
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_weight_img", &weight_tx_.current());
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_weight_img", &weight_tx_.next());
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "in_combined_tx", &combined_tx_.current(), filter);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_combined_img", &combined_tx_.next());
|
||||
DRW_shgroup_uniform_image_ref(grp, "depth_img", &depth_tx_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "color_accum_img", &color_accum_tx_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "value_accum_img", &value_accum_tx_);
|
||||
accumulate_ps_.bind_image("in_weight_img", &weight_tx_.current());
|
||||
accumulate_ps_.bind_image("out_weight_img", &weight_tx_.next());
|
||||
accumulate_ps_.bind_texture("in_combined_tx", &combined_tx_.current(), filter);
|
||||
accumulate_ps_.bind_image("out_combined_img", &combined_tx_.next());
|
||||
accumulate_ps_.bind_image("depth_img", &depth_tx_);
|
||||
accumulate_ps_.bind_image("color_accum_img", &color_accum_tx_);
|
||||
accumulate_ps_.bind_image("value_accum_img", &value_accum_tx_);
|
||||
/* Sync with rendering passes. */
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
/* Sync with rendering passes. */
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
accumulate_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
if (use_compute) {
|
||||
int2 dispatch_size = math::divide_ceil(data_.extent, int2(FILM_GROUP_SIZE));
|
||||
DRW_shgroup_call_compute(grp, UNPACK2(dispatch_size), 1);
|
||||
accumulate_ps_.dispatch(int3(math::divide_ceil(data_.extent, int2(FILM_GROUP_SIZE)), 1));
|
||||
}
|
||||
else {
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
accumulate_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -565,8 +561,9 @@ void Film::accumulate(const DRWView *view, GPUTexture *combined_final_tx)
|
||||
data_.display_only = false;
|
||||
data_.push_update();
|
||||
|
||||
DRW_view_set_active(view);
|
||||
DRW_draw_pass(accumulate_ps_);
|
||||
draw::View drw_view("MainView", view);
|
||||
|
||||
DRW_manager_get()->submit(accumulate_ps_, drw_view);
|
||||
|
||||
combined_tx_.swap();
|
||||
weight_tx_.swap();
|
||||
@@ -593,8 +590,9 @@ void Film::display()
|
||||
data_.display_only = true;
|
||||
data_.push_update();
|
||||
|
||||
DRW_view_set_active(nullptr);
|
||||
DRW_draw_pass(accumulate_ps_);
|
||||
draw::View drw_view("MainView", DRW_view_default_get());
|
||||
|
||||
DRW_manager_get()->submit(accumulate_ps_, drw_view);
|
||||
|
||||
inst_.render_buffers.release();
|
||||
|
||||
|
@@ -55,7 +55,7 @@ class Film {
|
||||
/** User setting to disable reprojection. Useful for debugging or have a more precise render. */
|
||||
bool force_disable_reprojection_ = false;
|
||||
|
||||
DRWPass *accumulate_ps_ = nullptr;
|
||||
PassSimple accumulate_ps_ = {"Film.Accumulate"};
|
||||
|
||||
FilmDataBuf data_;
|
||||
|
||||
|
@@ -32,36 +32,31 @@ void HiZBuffer::sync()
|
||||
data_.push_update();
|
||||
|
||||
{
|
||||
hiz_update_ps_ = DRW_pass_create("HizUpdate", DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(HIZ_UPDATE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, hiz_update_ps_);
|
||||
DRW_shgroup_storage_block(grp, "finished_tile_counter", atomic_tile_counter_);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depth_tx", &render_buffers.depth_tx, with_filter);
|
||||
DRW_shgroup_uniform_image(grp, "out_mip_0", hiz_tx_.mip_view(0));
|
||||
DRW_shgroup_uniform_image(grp, "out_mip_1", hiz_tx_.mip_view(1));
|
||||
DRW_shgroup_uniform_image(grp, "out_mip_2", hiz_tx_.mip_view(2));
|
||||
DRW_shgroup_uniform_image(grp, "out_mip_3", hiz_tx_.mip_view(3));
|
||||
DRW_shgroup_uniform_image(grp, "out_mip_4", hiz_tx_.mip_view(4));
|
||||
DRW_shgroup_uniform_image(grp, "out_mip_5", hiz_tx_.mip_view(5));
|
||||
DRW_shgroup_uniform_image(grp, "out_mip_6", hiz_tx_.mip_view(6));
|
||||
DRW_shgroup_uniform_image(grp, "out_mip_7", hiz_tx_.mip_view(7));
|
||||
hiz_update_ps_.init();
|
||||
hiz_update_ps_.shader_set(inst_.shaders.static_shader_get(HIZ_UPDATE));
|
||||
hiz_update_ps_.bind_ssbo("finished_tile_counter", atomic_tile_counter_);
|
||||
hiz_update_ps_.bind_texture("depth_tx", &render_buffers.depth_tx, with_filter);
|
||||
hiz_update_ps_.bind_image("out_mip_0", hiz_tx_.mip_view(0));
|
||||
hiz_update_ps_.bind_image("out_mip_1", hiz_tx_.mip_view(1));
|
||||
hiz_update_ps_.bind_image("out_mip_2", hiz_tx_.mip_view(2));
|
||||
hiz_update_ps_.bind_image("out_mip_3", hiz_tx_.mip_view(3));
|
||||
hiz_update_ps_.bind_image("out_mip_4", hiz_tx_.mip_view(4));
|
||||
hiz_update_ps_.bind_image("out_mip_5", hiz_tx_.mip_view(5));
|
||||
hiz_update_ps_.bind_image("out_mip_6", hiz_tx_.mip_view(6));
|
||||
hiz_update_ps_.bind_image("out_mip_7", hiz_tx_.mip_view(7));
|
||||
/* TODO(@fclem): There might be occasions where we might not want to
|
||||
* copy mip 0 for performance reasons if there is no need for it. */
|
||||
DRW_shgroup_uniform_bool_copy(grp, "update_mip_0", true);
|
||||
DRW_shgroup_call_compute(grp, UNPACK2(dispatch_size), 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
hiz_update_ps_.push_constant("update_mip_0", true);
|
||||
hiz_update_ps_.dispatch(int3(dispatch_size, 1));
|
||||
hiz_update_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
|
||||
if (inst_.debug_mode == eDebugMode::DEBUG_HIZ_VALIDATION) {
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM;
|
||||
debug_draw_ps_ = DRW_pass_create("HizUpdate.Debug", state);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(HIZ_DEBUG);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, debug_draw_ps_);
|
||||
this->bind_resources(grp);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
else {
|
||||
debug_draw_ps_ = nullptr;
|
||||
debug_draw_ps_.init();
|
||||
debug_draw_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM);
|
||||
debug_draw_ps_.shader_set(inst_.shaders.static_shader_get(HIZ_DEBUG));
|
||||
this->bind_resources(&debug_draw_ps_);
|
||||
debug_draw_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,22 +74,24 @@ void HiZBuffer::update()
|
||||
GPU_framebuffer_restore();
|
||||
}
|
||||
|
||||
DRW_draw_pass(hiz_update_ps_);
|
||||
inst_.manager->submit(hiz_update_ps_);
|
||||
|
||||
if (G.debug & G_DEBUG_GPU) {
|
||||
GPU_framebuffer_bind(fb);
|
||||
}
|
||||
}
|
||||
|
||||
void HiZBuffer::debug_draw(GPUFrameBuffer *view_fb)
|
||||
void HiZBuffer::debug_draw(View &view, GPUFrameBuffer *view_fb)
|
||||
{
|
||||
if (debug_draw_ps_ == nullptr) {
|
||||
return;
|
||||
if (inst_.debug_mode == eDebugMode::DEBUG_HIZ_VALIDATION) {
|
||||
inst_.info =
|
||||
"Debug Mode: HiZ Validation\n"
|
||||
" - Red: pixel in front of HiZ tile value.\n"
|
||||
" - Blue: No error.";
|
||||
inst_.hiz_buffer.update();
|
||||
GPU_framebuffer_bind(view_fb);
|
||||
inst_.manager->submit(debug_draw_ps_, view);
|
||||
}
|
||||
inst_.info = "Debug Mode: HiZ Validation";
|
||||
inst_.hiz_buffer.update();
|
||||
GPU_framebuffer_bind(view_fb);
|
||||
DRW_draw_pass(debug_draw_ps_);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
@@ -36,9 +36,9 @@ class HiZBuffer {
|
||||
*/
|
||||
draw::StorageBuffer<uint4, true> atomic_tile_counter_ = {"atomic_tile_counter"};
|
||||
/** Single pass recursive downsample. */
|
||||
DRWPass *hiz_update_ps_ = nullptr;
|
||||
PassSimple hiz_update_ps_ = {"HizUpdate"};
|
||||
/** Debug pass. */
|
||||
DRWPass *debug_draw_ps_ = nullptr;
|
||||
PassSimple debug_draw_ps_ = {"HizUpdate.Debug"};
|
||||
/** Dirty flag to check if the update is necessary. */
|
||||
bool is_dirty_ = true;
|
||||
|
||||
@@ -67,13 +67,20 @@ class HiZBuffer {
|
||||
*/
|
||||
void update();
|
||||
|
||||
void debug_draw(GPUFrameBuffer *view_fb);
|
||||
void debug_draw(View &view, GPUFrameBuffer *view_fb);
|
||||
|
||||
void bind_resources(DRWShadingGroup *grp)
|
||||
{
|
||||
DRW_shgroup_uniform_texture_ref(grp, "hiz_tx", &hiz_tx_);
|
||||
DRW_shgroup_uniform_block_ref(grp, "hiz_buf", &data_);
|
||||
}
|
||||
|
||||
/* TODO(fclem): Hardcoded bind slots. */
|
||||
template<typename T> void bind_resources(draw::detail::PassBase<T> *pass)
|
||||
{
|
||||
pass->bind_texture("hiz_tx", &hiz_tx_);
|
||||
pass->bind_ubo("hiz_buf", &data_);
|
||||
}
|
||||
};
|
||||
|
||||
/** \} */
|
||||
|
@@ -52,6 +52,7 @@ void Instance::init(const int2 &output_res,
|
||||
drw_view = drw_view_;
|
||||
v3d = v3d_;
|
||||
rv3d = rv3d_;
|
||||
manager = DRW_manager_get();
|
||||
|
||||
if (assign_if_different(debug_mode, (eDebugMode)G.debug_value)) {
|
||||
sampling.reset();
|
||||
@@ -126,12 +127,16 @@ void Instance::object_sync(Object *ob)
|
||||
return;
|
||||
}
|
||||
|
||||
/* TODO cleanup. */
|
||||
ObjectRef ob_ref = DRW_object_ref_get(ob);
|
||||
ResourceHandle res_handle = manager->resource_handle(ob_ref);
|
||||
|
||||
ObjectHandle &ob_handle = sync.sync_object(ob);
|
||||
|
||||
if (partsys_is_visible && ob != DRW_context_state_get()->object_edit) {
|
||||
LISTBASE_FOREACH (ModifierData *, md, &ob->modifiers) {
|
||||
if (md->type == eModifierType_ParticleSystem) {
|
||||
sync.sync_curves(ob, ob_handle, md);
|
||||
sync.sync_curves(ob, ob_handle, res_handle, md);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -142,15 +147,15 @@ void Instance::object_sync(Object *ob)
|
||||
lights.sync_light(ob, ob_handle);
|
||||
break;
|
||||
case OB_MESH:
|
||||
sync.sync_mesh(ob, ob_handle);
|
||||
sync.sync_mesh(ob, ob_handle, res_handle, ob_ref);
|
||||
break;
|
||||
case OB_VOLUME:
|
||||
break;
|
||||
case OB_CURVES:
|
||||
sync.sync_curves(ob, ob_handle);
|
||||
sync.sync_curves(ob, ob_handle, res_handle);
|
||||
break;
|
||||
case OB_GPENCIL:
|
||||
sync.sync_gpencil(ob, ob_handle);
|
||||
sync.sync_gpencil(ob, ob_handle, res_handle);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@@ -59,6 +59,7 @@ class Instance {
|
||||
|
||||
/** Input data. */
|
||||
Depsgraph *depsgraph;
|
||||
Manager *manager;
|
||||
/** Evaluated IDs. */
|
||||
Scene *scene;
|
||||
ViewLayer *view_layer;
|
||||
|
@@ -399,76 +399,70 @@ void LightModule::culling_pass_sync()
|
||||
uint culling_tile_dispatch_size = divide_ceil_u(total_word_count_, CULLING_TILE_GROUP_SIZE);
|
||||
|
||||
/* NOTE: We reference the buffers that may be resized or updated later. */
|
||||
|
||||
culling_ps_.init();
|
||||
{
|
||||
DRW_PASS_CREATE(culling_select_ps_, DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(LIGHT_CULLING_SELECT);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, culling_select_ps_);
|
||||
DRW_shgroup_storage_block_ref(grp, "light_cull_buf", &culling_data_buf_);
|
||||
DRW_shgroup_storage_block(grp, "in_light_buf", light_buf_);
|
||||
DRW_shgroup_storage_block(grp, "out_light_buf", culling_light_buf_);
|
||||
DRW_shgroup_storage_block(grp, "out_zdist_buf", culling_zdist_buf_);
|
||||
DRW_shgroup_storage_block(grp, "out_key_buf", culling_key_buf_);
|
||||
DRW_shgroup_call_compute(grp, culling_select_dispatch_size, 1, 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_STORAGE);
|
||||
auto &sub = culling_ps_.sub("Select");
|
||||
sub.shader_set(inst_.shaders.static_shader_get(LIGHT_CULLING_SELECT));
|
||||
sub.bind_ssbo("light_cull_buf", &culling_data_buf_);
|
||||
sub.bind_ssbo("in_light_buf", light_buf_);
|
||||
sub.bind_ssbo("out_light_buf", culling_light_buf_);
|
||||
sub.bind_ssbo("out_zdist_buf", culling_zdist_buf_);
|
||||
sub.bind_ssbo("out_key_buf", culling_key_buf_);
|
||||
sub.dispatch(int3(culling_select_dispatch_size, 1, 1));
|
||||
sub.barrier(GPU_BARRIER_SHADER_STORAGE);
|
||||
}
|
||||
{
|
||||
DRW_PASS_CREATE(culling_sort_ps_, DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(LIGHT_CULLING_SORT);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, culling_sort_ps_);
|
||||
DRW_shgroup_storage_block_ref(grp, "light_cull_buf", &culling_data_buf_);
|
||||
DRW_shgroup_storage_block(grp, "in_light_buf", light_buf_);
|
||||
DRW_shgroup_storage_block(grp, "out_light_buf", culling_light_buf_);
|
||||
DRW_shgroup_storage_block(grp, "in_zdist_buf", culling_zdist_buf_);
|
||||
DRW_shgroup_storage_block(grp, "in_key_buf", culling_key_buf_);
|
||||
DRW_shgroup_call_compute(grp, culling_sort_dispatch_size, 1, 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_STORAGE);
|
||||
auto &sub = culling_ps_.sub("Sort");
|
||||
sub.shader_set(inst_.shaders.static_shader_get(LIGHT_CULLING_SORT));
|
||||
sub.bind_ssbo("light_cull_buf", &culling_data_buf_);
|
||||
sub.bind_ssbo("in_light_buf", light_buf_);
|
||||
sub.bind_ssbo("out_light_buf", culling_light_buf_);
|
||||
sub.bind_ssbo("in_zdist_buf", culling_zdist_buf_);
|
||||
sub.bind_ssbo("in_key_buf", culling_key_buf_);
|
||||
sub.dispatch(int3(culling_sort_dispatch_size, 1, 1));
|
||||
sub.barrier(GPU_BARRIER_SHADER_STORAGE);
|
||||
}
|
||||
{
|
||||
DRW_PASS_CREATE(culling_zbin_ps_, DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(LIGHT_CULLING_ZBIN);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, culling_zbin_ps_);
|
||||
DRW_shgroup_storage_block_ref(grp, "light_cull_buf", &culling_data_buf_);
|
||||
DRW_shgroup_storage_block(grp, "light_buf", culling_light_buf_);
|
||||
DRW_shgroup_storage_block(grp, "out_zbin_buf", culling_zbin_buf_);
|
||||
DRW_shgroup_call_compute(grp, 1, 1, 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_STORAGE);
|
||||
auto &sub = culling_ps_.sub("Zbin");
|
||||
sub.shader_set(inst_.shaders.static_shader_get(LIGHT_CULLING_ZBIN));
|
||||
sub.bind_ssbo("light_cull_buf", &culling_data_buf_);
|
||||
sub.bind_ssbo("light_buf", culling_light_buf_);
|
||||
sub.bind_ssbo("out_zbin_buf", culling_zbin_buf_);
|
||||
sub.dispatch(int3(1, 1, 1));
|
||||
sub.barrier(GPU_BARRIER_SHADER_STORAGE);
|
||||
}
|
||||
{
|
||||
DRW_PASS_CREATE(culling_tile_ps_, DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(LIGHT_CULLING_TILE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, culling_tile_ps_);
|
||||
DRW_shgroup_storage_block_ref(grp, "light_cull_buf", &culling_data_buf_);
|
||||
DRW_shgroup_storage_block(grp, "light_buf", culling_light_buf_);
|
||||
DRW_shgroup_storage_block(grp, "out_light_tile_buf", culling_tile_buf_);
|
||||
DRW_shgroup_call_compute(grp, culling_tile_dispatch_size, 1, 1);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_STORAGE);
|
||||
auto &sub = culling_ps_.sub("Tiles");
|
||||
sub.shader_set(inst_.shaders.static_shader_get(LIGHT_CULLING_TILE));
|
||||
sub.bind_ssbo("light_cull_buf", &culling_data_buf_);
|
||||
sub.bind_ssbo("light_buf", culling_light_buf_);
|
||||
sub.bind_ssbo("out_light_tile_buf", culling_tile_buf_);
|
||||
sub.dispatch(int3(culling_tile_dispatch_size, 1, 1));
|
||||
sub.barrier(GPU_BARRIER_SHADER_STORAGE);
|
||||
}
|
||||
}
|
||||
|
||||
void LightModule::debug_pass_sync()
|
||||
{
|
||||
if (inst_.debug_mode != eDebugMode::DEBUG_LIGHT_CULLING) {
|
||||
debug_draw_ps_ = nullptr;
|
||||
return;
|
||||
if (inst_.debug_mode == eDebugMode::DEBUG_LIGHT_CULLING) {
|
||||
debug_draw_ps_.init();
|
||||
debug_draw_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM);
|
||||
debug_draw_ps_.shader_set(inst_.shaders.static_shader_get(LIGHT_CULLING_DEBUG));
|
||||
inst_.hiz_buffer.bind_resources(&debug_draw_ps_);
|
||||
debug_draw_ps_.bind_ssbo("light_buf", &culling_light_buf_);
|
||||
debug_draw_ps_.bind_ssbo("light_cull_buf", &culling_data_buf_);
|
||||
debug_draw_ps_.bind_ssbo("light_zbin_buf", &culling_zbin_buf_);
|
||||
debug_draw_ps_.bind_ssbo("light_tile_buf", &culling_tile_buf_);
|
||||
debug_draw_ps_.bind_texture("depth_tx", &inst_.render_buffers.depth_tx);
|
||||
debug_draw_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
|
||||
}
|
||||
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM;
|
||||
debug_draw_ps_ = DRW_pass_create("LightCulling.Debug", state);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(LIGHT_CULLING_DEBUG);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, debug_draw_ps_);
|
||||
inst_.hiz_buffer.bind_resources(grp);
|
||||
DRW_shgroup_storage_block_ref(grp, "light_buf", &culling_light_buf_);
|
||||
DRW_shgroup_storage_block_ref(grp, "light_cull_buf", &culling_data_buf_);
|
||||
DRW_shgroup_storage_block_ref(grp, "light_zbin_buf", &culling_zbin_buf_);
|
||||
DRW_shgroup_storage_block_ref(grp, "light_tile_buf", &culling_tile_buf_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &inst_.render_buffers.depth_tx);
|
||||
DRW_shgroup_call_procedural_triangles(grp, nullptr, 1);
|
||||
}
|
||||
|
||||
void LightModule::set_view(const DRWView *view, const int2 extent)
|
||||
void LightModule::set_view(View &view, const int2 extent)
|
||||
{
|
||||
float far_z = DRW_view_far_distance_get(view);
|
||||
float near_z = DRW_view_near_distance_get(view);
|
||||
float far_z = view.far_clip();
|
||||
float near_z = view.near_clip();
|
||||
|
||||
culling_data_buf_.zbin_scale = -CULLING_ZBIN_COUNT / fabsf(far_z - near_z);
|
||||
culling_data_buf_.zbin_bias = -near_z * culling_data_buf_.zbin_scale;
|
||||
@@ -476,26 +470,17 @@ void LightModule::set_view(const DRWView *view, const int2 extent)
|
||||
culling_data_buf_.visible_count = 0;
|
||||
culling_data_buf_.push_update();
|
||||
|
||||
DRW_stats_group_start("Light Culling");
|
||||
|
||||
DRW_view_set_active(view);
|
||||
DRW_draw_pass(culling_select_ps_);
|
||||
DRW_draw_pass(culling_sort_ps_);
|
||||
DRW_draw_pass(culling_zbin_ps_);
|
||||
DRW_draw_pass(culling_tile_ps_);
|
||||
|
||||
DRW_stats_group_end();
|
||||
inst_.manager->submit(culling_ps_, view);
|
||||
}
|
||||
|
||||
void LightModule::debug_draw(GPUFrameBuffer *view_fb)
|
||||
void LightModule::debug_draw(View &view, GPUFrameBuffer *view_fb)
|
||||
{
|
||||
if (debug_draw_ps_ == nullptr) {
|
||||
return;
|
||||
if (inst_.debug_mode == eDebugMode::DEBUG_LIGHT_CULLING) {
|
||||
inst_.info = "Debug Mode: Light Culling Validation";
|
||||
inst_.hiz_buffer.update();
|
||||
GPU_framebuffer_bind(view_fb);
|
||||
inst_.manager->submit(debug_draw_ps_, view);
|
||||
}
|
||||
inst_.info = "Debug Mode: Light Culling Validation";
|
||||
inst_.hiz_buffer.update();
|
||||
GPU_framebuffer_bind(view_fb);
|
||||
DRW_draw_pass(debug_draw_ps_);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
@@ -116,16 +116,12 @@ class LightModule {
|
||||
/** Bitmap of lights touching each tiles. */
|
||||
LightCullingTileBuf culling_tile_buf_ = {"LightCull_tile"};
|
||||
/** Culling compute passes. */
|
||||
DRWPass *culling_select_ps_ = nullptr;
|
||||
DRWPass *culling_sort_ps_ = nullptr;
|
||||
DRWPass *culling_zbin_ps_ = nullptr;
|
||||
DRWPass *culling_tile_ps_ = nullptr;
|
||||
PassSimple culling_ps_ = {"LightCulling"};
|
||||
/** Total number of words the tile buffer needs to contain for the render resolution. */
|
||||
uint total_word_count_ = 0;
|
||||
|
||||
/** Debug Culling visualization. */
|
||||
DRWPass *debug_draw_ps_ = nullptr;
|
||||
/* GPUTexture *input_depth_tx_ = nullptr; */
|
||||
PassSimple debug_draw_ps_ = {"LightCulling.Debug"};
|
||||
|
||||
public:
|
||||
LightModule(Instance &inst) : inst_(inst){};
|
||||
@@ -138,9 +134,9 @@ class LightModule {
|
||||
/**
|
||||
* Update acceleration structure for the given view.
|
||||
*/
|
||||
void set_view(const DRWView *view, const int2 extent);
|
||||
void set_view(View &view, const int2 extent);
|
||||
|
||||
void debug_draw(GPUFrameBuffer *view_fb);
|
||||
void debug_draw(View &view, GPUFrameBuffer *view_fb);
|
||||
|
||||
void bind_resources(DRWShadingGroup *grp)
|
||||
{
|
||||
@@ -154,6 +150,15 @@ class LightModule {
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename T> void bind_resources(draw::detail::PassBase<T> *pass)
|
||||
{
|
||||
/* Storage Buf. */
|
||||
pass->bind_ssbo(LIGHT_CULL_BUF_SLOT, &culling_data_buf_);
|
||||
pass->bind_ssbo(LIGHT_BUF_SLOT, &culling_light_buf_);
|
||||
pass->bind_ssbo(LIGHT_ZBIN_BUF_SLOT, &culling_zbin_buf_);
|
||||
pass->bind_ssbo(LIGHT_TILE_BUF_SLOT, &culling_tile_buf_);
|
||||
}
|
||||
|
||||
private:
|
||||
void culling_pass_sync();
|
||||
void debug_pass_sync();
|
||||
|
@@ -145,9 +145,6 @@ MaterialModule::MaterialModule(Instance &inst) : inst_(inst)
|
||||
|
||||
MaterialModule::~MaterialModule()
|
||||
{
|
||||
for (Material *mat : material_map_.values()) {
|
||||
delete mat;
|
||||
}
|
||||
BKE_id_free(nullptr, glossy_mat);
|
||||
BKE_id_free(nullptr, diffuse_mat);
|
||||
BKE_id_free(nullptr, error_mat_);
|
||||
@@ -157,13 +154,12 @@ void MaterialModule::begin_sync()
|
||||
{
|
||||
queued_shaders_count = 0;
|
||||
|
||||
for (Material *mat : material_map_.values()) {
|
||||
mat->init = false;
|
||||
}
|
||||
material_map_.clear();
|
||||
shader_map_.clear();
|
||||
}
|
||||
|
||||
MaterialPass MaterialModule::material_pass_get(::Material *blender_mat,
|
||||
MaterialPass MaterialModule::material_pass_get(Object *ob,
|
||||
::Material *blender_mat,
|
||||
eMaterialPipeline pipeline_type,
|
||||
eMaterialGeometry geometry_type)
|
||||
{
|
||||
@@ -203,35 +199,34 @@ MaterialPass MaterialModule::material_pass_get(::Material *blender_mat,
|
||||
pipeline_type = MAT_PIPE_FORWARD;
|
||||
}
|
||||
|
||||
if ((pipeline_type == MAT_PIPE_FORWARD) &&
|
||||
if (ELEM(pipeline_type,
|
||||
MAT_PIPE_FORWARD,
|
||||
MAT_PIPE_FORWARD_PREPASS,
|
||||
MAT_PIPE_FORWARD_PREPASS_VELOCITY) &&
|
||||
GPU_material_flag_get(matpass.gpumat, GPU_MATFLAG_TRANSPARENT)) {
|
||||
/* Transparent needs to use one shgroup per object to support reordering. */
|
||||
matpass.shgrp = inst_.pipelines.material_add(blender_mat, matpass.gpumat, pipeline_type);
|
||||
/* Transparent pass is generated later. */
|
||||
matpass.sub_pass = nullptr;
|
||||
}
|
||||
else {
|
||||
ShaderKey shader_key(matpass.gpumat, geometry_type, pipeline_type);
|
||||
|
||||
auto add_cb = [&]() -> DRWShadingGroup * {
|
||||
/* First time encountering this shader. Create a shading group. */
|
||||
return inst_.pipelines.material_add(blender_mat, matpass.gpumat, pipeline_type);
|
||||
};
|
||||
DRWShadingGroup *grp = shader_map_.lookup_or_add_cb(shader_key, add_cb);
|
||||
PassMain::Sub *shader_sub = shader_map_.lookup_or_add_cb(shader_key, [&]() {
|
||||
/* First time encountering this shader. Create a sub that will contain materials using it. */
|
||||
return inst_.pipelines.material_add(ob, blender_mat, matpass.gpumat, pipeline_type);
|
||||
});
|
||||
|
||||
if (grp != nullptr) {
|
||||
/* Shading group for this shader already exists. Create a sub one for this material. */
|
||||
/* IMPORTANT: We always create a subgroup so that all subgroups are inserted after the
|
||||
* first "empty" shgroup. This avoids messing the order of subgroups when there is more
|
||||
* nested subgroup (i.e: hair drawing). */
|
||||
/* TODO(@fclem): Remove material resource binding from the first group creation. */
|
||||
matpass.shgrp = DRW_shgroup_create_sub(grp);
|
||||
DRW_shgroup_add_material_resources(matpass.shgrp, matpass.gpumat);
|
||||
if (shader_sub != nullptr) {
|
||||
/* Create a sub for this material as `shader_sub` is for sharing shader between materials. */
|
||||
matpass.sub_pass = &shader_sub->sub(GPU_material_get_name(matpass.gpumat));
|
||||
matpass.sub_pass->material_set(*inst_.manager, matpass.gpumat);
|
||||
}
|
||||
}
|
||||
|
||||
return matpass;
|
||||
}
|
||||
|
||||
Material &MaterialModule::material_sync(::Material *blender_mat,
|
||||
Material &MaterialModule::material_sync(Object *ob,
|
||||
::Material *blender_mat,
|
||||
eMaterialGeometry geometry_type,
|
||||
bool has_motion)
|
||||
{
|
||||
@@ -249,27 +244,32 @@ Material &MaterialModule::material_sync(::Material *blender_mat,
|
||||
|
||||
MaterialKey material_key(blender_mat, geometry_type, surface_pipe);
|
||||
|
||||
/* TODO: allocate in blocks to avoid memory fragmentation. */
|
||||
auto add_cb = [&]() { return new Material(); };
|
||||
Material &mat = *material_map_.lookup_or_add_cb(material_key, add_cb);
|
||||
|
||||
/* Forward pipeline needs to use one shgroup per object. */
|
||||
if (mat.init == false || (surface_pipe == MAT_PIPE_FORWARD)) {
|
||||
mat.init = true;
|
||||
Material &mat = material_map_.lookup_or_add_cb(material_key, [&]() {
|
||||
Material mat;
|
||||
/* Order is important for transparent. */
|
||||
mat.prepass = material_pass_get(blender_mat, prepass_pipe, geometry_type);
|
||||
mat.shading = material_pass_get(blender_mat, surface_pipe, geometry_type);
|
||||
mat.prepass = material_pass_get(ob, blender_mat, prepass_pipe, geometry_type);
|
||||
mat.shading = material_pass_get(ob, blender_mat, surface_pipe, geometry_type);
|
||||
if (blender_mat->blend_shadow == MA_BS_NONE) {
|
||||
mat.shadow = MaterialPass();
|
||||
}
|
||||
else {
|
||||
mat.shadow = material_pass_get(blender_mat, MAT_PIPE_SHADOW, geometry_type);
|
||||
mat.shadow = material_pass_get(ob, blender_mat, MAT_PIPE_SHADOW, geometry_type);
|
||||
}
|
||||
|
||||
mat.is_alpha_blend_transparent = (blender_mat->blend_method == MA_BM_BLEND) &&
|
||||
GPU_material_flag_get(mat.prepass.gpumat,
|
||||
GPU_material_flag_get(mat.shading.gpumat,
|
||||
GPU_MATFLAG_TRANSPARENT);
|
||||
return mat;
|
||||
});
|
||||
|
||||
if (mat.is_alpha_blend_transparent) {
|
||||
/* Transparent needs to use one sub pass per object to support reordering.
|
||||
* NOTE: Pre-pass needs to be created first in order to be sorted first. */
|
||||
mat.prepass.sub_pass = inst_.pipelines.forward.prepass_transparent_add(
|
||||
ob, blender_mat, mat.shading.gpumat);
|
||||
mat.shading.sub_pass = inst_.pipelines.forward.material_transparent_add(
|
||||
ob, blender_mat, mat.shading.gpumat);
|
||||
}
|
||||
|
||||
return mat;
|
||||
}
|
||||
|
||||
@@ -297,7 +297,7 @@ MaterialArray &MaterialModule::material_array_get(Object *ob, bool has_motion)
|
||||
|
||||
for (auto i : IndexRange(materials_len)) {
|
||||
::Material *blender_mat = material_from_slot(ob, i);
|
||||
Material &mat = material_sync(blender_mat, to_material_geometry(ob), has_motion);
|
||||
Material &mat = material_sync(ob, blender_mat, to_material_geometry(ob), has_motion);
|
||||
material_array_.materials.append(&mat);
|
||||
material_array_.gpu_materials.append(mat.shading.gpumat);
|
||||
}
|
||||
@@ -310,7 +310,7 @@ Material &MaterialModule::material_get(Object *ob,
|
||||
eMaterialGeometry geometry_type)
|
||||
{
|
||||
::Material *blender_mat = material_from_slot(ob, mat_nr);
|
||||
Material &mat = material_sync(blender_mat, geometry_type, has_motion);
|
||||
Material &mat = material_sync(ob, blender_mat, geometry_type, has_motion);
|
||||
return mat;
|
||||
}
|
||||
|
||||
|
@@ -203,12 +203,11 @@ class DefaultSurfaceNodeTree {
|
||||
* \{ */
|
||||
|
||||
struct MaterialPass {
|
||||
GPUMaterial *gpumat = nullptr;
|
||||
DRWShadingGroup *shgrp = nullptr;
|
||||
GPUMaterial *gpumat;
|
||||
PassMain::Sub *sub_pass;
|
||||
};
|
||||
|
||||
struct Material {
|
||||
bool init = false;
|
||||
bool is_alpha_blend_transparent;
|
||||
MaterialPass shadow, shading, prepass;
|
||||
};
|
||||
@@ -228,8 +227,8 @@ class MaterialModule {
|
||||
private:
|
||||
Instance &inst_;
|
||||
|
||||
Map<MaterialKey, Material *> material_map_;
|
||||
Map<ShaderKey, DRWShadingGroup *> shader_map_;
|
||||
Map<MaterialKey, Material> material_map_;
|
||||
Map<ShaderKey, PassMain::Sub *> shader_map_;
|
||||
|
||||
MaterialArray material_array_;
|
||||
|
||||
@@ -254,13 +253,15 @@ class MaterialModule {
|
||||
Material &material_get(Object *ob, bool has_motion, int mat_nr, eMaterialGeometry geometry_type);
|
||||
|
||||
private:
|
||||
Material &material_sync(::Material *blender_mat,
|
||||
Material &material_sync(Object *ob,
|
||||
::Material *blender_mat,
|
||||
eMaterialGeometry geometry_type,
|
||||
bool has_motion);
|
||||
|
||||
/** Return correct material or empty default material if slot is empty. */
|
||||
::Material *material_from_slot(Object *ob, int slot);
|
||||
MaterialPass material_pass_get(::Material *blender_mat,
|
||||
MaterialPass material_pass_get(Object *ob,
|
||||
::Material *blender_mat,
|
||||
eMaterialPipeline pipeline_type,
|
||||
eMaterialGeometry geometry_type);
|
||||
};
|
||||
|
@@ -135,53 +135,49 @@ void MotionBlurModule::sync()
|
||||
eGPUSamplerState no_filter = GPU_SAMPLER_DEFAULT;
|
||||
RenderBuffers &render_buffers = inst_.render_buffers;
|
||||
|
||||
motion_blur_ps_.init();
|
||||
inst_.velocity.bind_resources(&motion_blur_ps_);
|
||||
inst_.sampling.bind_resources(&motion_blur_ps_);
|
||||
{
|
||||
/* Create max velocity tiles. */
|
||||
DRW_PASS_CREATE(tiles_flatten_ps_, DRW_STATE_NO_DRAW);
|
||||
PassSimple::Sub &sub = motion_blur_ps_.sub("TilesFlatten");
|
||||
eShaderType shader = (inst_.is_viewport()) ? MOTION_BLUR_TILE_FLATTEN_VIEWPORT :
|
||||
MOTION_BLUR_TILE_FLATTEN_RENDER;
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(shader);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tiles_flatten_ps_);
|
||||
inst_.velocity.bind_resources(grp);
|
||||
DRW_shgroup_uniform_block(grp, "motion_blur_buf", data_);
|
||||
DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &render_buffers.depth_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "velocity_img", &render_buffers.vector_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_tiles_img", &tiles_tx_);
|
||||
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_flatten_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS | GPU_BARRIER_TEXTURE_FETCH);
|
||||
sub.shader_set(inst_.shaders.static_shader_get(shader));
|
||||
sub.bind_ubo("motion_blur_buf", data_);
|
||||
sub.bind_texture("depth_tx", &render_buffers.depth_tx);
|
||||
sub.bind_image("velocity_img", &render_buffers.vector_tx);
|
||||
sub.bind_image("out_tiles_img", &tiles_tx_);
|
||||
sub.dispatch(&dispatch_flatten_size_);
|
||||
sub.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS | GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
{
|
||||
/* Expand max velocity tiles by spreading them in their neighborhood. */
|
||||
DRW_PASS_CREATE(tiles_dilate_ps_, DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(MOTION_BLUR_TILE_DILATE);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, tiles_dilate_ps_);
|
||||
DRW_shgroup_storage_block(grp, "tile_indirection_buf", tile_indirection_buf_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_img", &tiles_tx_);
|
||||
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_dilate_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_STORAGE);
|
||||
PassSimple::Sub &sub = motion_blur_ps_.sub("TilesDilate");
|
||||
sub.shader_set(inst_.shaders.static_shader_get(MOTION_BLUR_TILE_DILATE));
|
||||
sub.bind_ssbo("tile_indirection_buf", tile_indirection_buf_);
|
||||
sub.bind_image("in_tiles_img", &tiles_tx_);
|
||||
sub.dispatch(&dispatch_dilate_size_);
|
||||
sub.barrier(GPU_BARRIER_SHADER_STORAGE);
|
||||
}
|
||||
{
|
||||
/* Do the motion blur gather algorithm. */
|
||||
DRW_PASS_CREATE(gather_ps_, DRW_STATE_NO_DRAW);
|
||||
GPUShader *sh = inst_.shaders.static_shader_get(MOTION_BLUR_GATHER);
|
||||
DRWShadingGroup *grp = DRW_shgroup_create(sh, gather_ps_);
|
||||
inst_.sampling.bind_resources(grp);
|
||||
DRW_shgroup_uniform_block(grp, "motion_blur_buf", data_);
|
||||
DRW_shgroup_storage_block(grp, "tile_indirection_buf", tile_indirection_buf_);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "depth_tx", &render_buffers.depth_tx, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "velocity_tx", &render_buffers.vector_tx, no_filter);
|
||||
DRW_shgroup_uniform_texture_ref_ex(grp, "in_color_tx", &input_color_tx_, no_filter);
|
||||
DRW_shgroup_uniform_image_ref(grp, "in_tiles_img", &tiles_tx_);
|
||||
DRW_shgroup_uniform_image_ref(grp, "out_color_img", &output_color_tx_);
|
||||
PassSimple::Sub &sub = motion_blur_ps_.sub("ConvolveGather");
|
||||
sub.shader_set(inst_.shaders.static_shader_get(MOTION_BLUR_GATHER));
|
||||
sub.bind_ubo("motion_blur_buf", data_);
|
||||
sub.bind_ssbo("tile_indirection_buf", tile_indirection_buf_);
|
||||
sub.bind_texture("depth_tx", &render_buffers.depth_tx, no_filter);
|
||||
sub.bind_texture("velocity_tx", &render_buffers.vector_tx, no_filter);
|
||||
sub.bind_texture("in_color_tx", &input_color_tx_, no_filter);
|
||||
sub.bind_image("in_tiles_img", &tiles_tx_);
|
||||
sub.bind_image("out_color_img", &output_color_tx_);
|
||||
|
||||
DRW_shgroup_call_compute_ref(grp, dispatch_gather_size_);
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_TEXTURE_FETCH);
|
||||
sub.dispatch(&dispatch_gather_size_);
|
||||
sub.barrier(GPU_BARRIER_TEXTURE_FETCH);
|
||||
}
|
||||
}
|
||||
|
||||
void MotionBlurModule::render(GPUTexture **input_tx, GPUTexture **output_tx)
|
||||
void MotionBlurModule::render(View &view, GPUTexture **input_tx, GPUTexture **output_tx)
|
||||
{
|
||||
if (!motion_blur_fx_enabled_) {
|
||||
return;
|
||||
@@ -239,9 +235,7 @@ void MotionBlurModule::render(GPUTexture **input_tx, GPUTexture **output_tx)
|
||||
|
||||
GPU_storagebuf_clear_to_zero(tile_indirection_buf_);
|
||||
|
||||
DRW_draw_pass(tiles_flatten_ps_);
|
||||
DRW_draw_pass(tiles_dilate_ps_);
|
||||
DRW_draw_pass(gather_ps_);
|
||||
inst_.manager->submit(motion_blur_ps_, view);
|
||||
|
||||
tiles_tx_.release();
|
||||
|
||||
|
@@ -95,9 +95,7 @@ class MotionBlurModule {
|
||||
GPUTexture *input_color_tx_ = nullptr;
|
||||
GPUTexture *output_color_tx_ = nullptr;
|
||||
|
||||
DRWPass *tiles_flatten_ps_ = nullptr;
|
||||
DRWPass *tiles_dilate_ps_ = nullptr;
|
||||
DRWPass *gather_ps_ = nullptr;
|
||||
PassSimple motion_blur_ps_ = {"MotionBlur"};
|
||||
|
||||
MotionBlurTileIndirectionBuf tile_indirection_buf_;
|
||||
MotionBlurDataBuf data_;
|
||||
@@ -121,7 +119,7 @@ class MotionBlurModule {
|
||||
return motion_blur_fx_enabled_;
|
||||
}
|
||||
|
||||
void render(GPUTexture **input_tx, GPUTexture **output_tx);
|
||||
void render(View &view, GPUTexture **input_tx, GPUTexture **output_tx);
|
||||
|
||||
private:
|
||||
float shutter_time_to_scene_time(float time);
|
||||
|
@@ -24,36 +24,35 @@ namespace blender::eevee {
|
||||
|
||||
void WorldPipeline::sync(GPUMaterial *gpumat)
|
||||
{
|
||||
Manager &manager = *inst_.manager;
|
||||
RenderBuffers &rbufs = inst_.render_buffers;
|
||||
|
||||
DRWState state = DRW_STATE_WRITE_COLOR;
|
||||
world_ps_ = DRW_pass_create("World", state);
|
||||
ResourceHandle handle = manager.resource_handle(float4x4::identity().ptr());
|
||||
|
||||
/* Push a matrix at the same location as the camera. */
|
||||
float4x4 camera_mat = float4x4::identity();
|
||||
// copy_v3_v3(camera_mat[3], inst_.camera.data_get().viewinv[3]);
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, world_ps_);
|
||||
DRW_shgroup_uniform_texture(grp, "utility_tx", inst_.pipelines.utility_tx);
|
||||
DRW_shgroup_call_obmat(grp, DRW_cache_fullscreen_quad_get(), camera_mat.ptr());
|
||||
DRW_shgroup_uniform_float_copy(grp, "world_opacity_fade", inst_.film.background_opacity_get());
|
||||
world_ps_.init();
|
||||
world_ps_.state_set(DRW_STATE_WRITE_COLOR);
|
||||
world_ps_.material_set(manager, gpumat);
|
||||
world_ps_.push_constant("world_opacity_fade", inst_.film.background_opacity_get());
|
||||
world_ps_.bind_texture("utility_tx", inst_.pipelines.utility_tx);
|
||||
/* AOVs. */
|
||||
DRW_shgroup_uniform_image_ref(grp, "aov_color_img", &rbufs.aov_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "aov_value_img", &rbufs.aov_value_tx);
|
||||
DRW_shgroup_storage_block_ref(grp, "aov_buf", &inst_.film.aovs_info);
|
||||
world_ps_.bind_image("aov_color_img", &rbufs.aov_color_tx);
|
||||
world_ps_.bind_image("aov_value_img", &rbufs.aov_value_tx);
|
||||
world_ps_.bind_ssbo("aov_buf", &inst_.film.aovs_info);
|
||||
/* RenderPasses. Cleared by background (even if bad practice). */
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_normal_img", &rbufs.normal_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_light_img", &rbufs.light_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_diffuse_color_img", &rbufs.diffuse_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_specular_color_img", &rbufs.specular_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_emission_img", &rbufs.emission_tx);
|
||||
world_ps_.bind_image("rp_normal_img", &rbufs.normal_tx);
|
||||
world_ps_.bind_image("rp_light_img", &rbufs.light_tx);
|
||||
world_ps_.bind_image("rp_diffuse_color_img", &rbufs.diffuse_color_tx);
|
||||
world_ps_.bind_image("rp_specular_color_img", &rbufs.specular_color_tx);
|
||||
world_ps_.bind_image("rp_emission_img", &rbufs.emission_tx);
|
||||
|
||||
world_ps_.draw(DRW_cache_fullscreen_quad_get(), handle);
|
||||
/* To allow opaque pass rendering over it. */
|
||||
DRW_shgroup_barrier(grp, GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
world_ps_.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS);
|
||||
}
|
||||
|
||||
void WorldPipeline::render()
|
||||
void WorldPipeline::render(View &view)
|
||||
{
|
||||
DRW_draw_pass(world_ps_);
|
||||
inst_.manager->submit(world_ps_, view);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
@@ -66,194 +65,150 @@ void WorldPipeline::render()
|
||||
|
||||
void ForwardPipeline::sync()
|
||||
{
|
||||
camera_forward_ = inst_.camera.forward();
|
||||
|
||||
DRWState state_depth_only = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS;
|
||||
DRWState state_depth_color = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS |
|
||||
DRW_STATE_WRITE_COLOR;
|
||||
{
|
||||
DRWState state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS;
|
||||
prepass_ps_ = DRW_pass_create("Forward.Opaque.Prepass", state);
|
||||
prepass_velocity_ps_ = DRW_pass_create("Forward.Opaque.Prepass.Velocity",
|
||||
state | DRW_STATE_WRITE_COLOR);
|
||||
prepass_ps_.init();
|
||||
|
||||
state |= DRW_STATE_CULL_BACK;
|
||||
prepass_culled_ps_ = DRW_pass_create("Forward.Opaque.Prepass.Culled", state);
|
||||
prepass_culled_velocity_ps_ = DRW_pass_create("Forward.Opaque.Prepass.Velocity",
|
||||
state | DRW_STATE_WRITE_COLOR);
|
||||
{
|
||||
/* Common resources. */
|
||||
|
||||
DRW_pass_link(prepass_ps_, prepass_velocity_ps_);
|
||||
DRW_pass_link(prepass_velocity_ps_, prepass_culled_ps_);
|
||||
DRW_pass_link(prepass_culled_ps_, prepass_culled_velocity_ps_);
|
||||
/* Textures. */
|
||||
prepass_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
|
||||
|
||||
inst_.velocity.bind_resources(&prepass_ps_);
|
||||
inst_.sampling.bind_resources(&prepass_ps_);
|
||||
}
|
||||
|
||||
prepass_double_sided_static_ps_ = &prepass_ps_.sub("DoubleSided.Static");
|
||||
prepass_double_sided_static_ps_->state_set(state_depth_only);
|
||||
|
||||
prepass_single_sided_static_ps_ = &prepass_ps_.sub("SingleSided.Static");
|
||||
prepass_single_sided_static_ps_->state_set(state_depth_only | DRW_STATE_CULL_BACK);
|
||||
|
||||
prepass_double_sided_moving_ps_ = &prepass_ps_.sub("DoubleSided.Moving");
|
||||
prepass_double_sided_moving_ps_->state_set(state_depth_color);
|
||||
|
||||
prepass_single_sided_moving_ps_ = &prepass_ps_.sub("SingleSided.Moving");
|
||||
prepass_single_sided_moving_ps_->state_set(state_depth_color | DRW_STATE_CULL_BACK);
|
||||
}
|
||||
{
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL;
|
||||
opaque_ps_ = DRW_pass_create("Forward.Opaque", state);
|
||||
opaque_ps_.init();
|
||||
|
||||
state |= DRW_STATE_CULL_BACK;
|
||||
opaque_culled_ps_ = DRW_pass_create("Forward.Opaque.Culled", state);
|
||||
{
|
||||
/* Common resources. */
|
||||
|
||||
DRW_pass_link(opaque_ps_, opaque_culled_ps_);
|
||||
/* RenderPasses. */
|
||||
opaque_ps_.bind_image(RBUFS_NORMAL_SLOT, &inst_.render_buffers.normal_tx);
|
||||
opaque_ps_.bind_image(RBUFS_LIGHT_SLOT, &inst_.render_buffers.light_tx);
|
||||
opaque_ps_.bind_image(RBUFS_DIFF_COLOR_SLOT, &inst_.render_buffers.diffuse_color_tx);
|
||||
opaque_ps_.bind_image(RBUFS_SPEC_COLOR_SLOT, &inst_.render_buffers.specular_color_tx);
|
||||
opaque_ps_.bind_image(RBUFS_EMISSION_SLOT, &inst_.render_buffers.emission_tx);
|
||||
/* AOVs. */
|
||||
opaque_ps_.bind_image(RBUFS_AOV_COLOR_SLOT, &inst_.render_buffers.aov_color_tx);
|
||||
opaque_ps_.bind_image(RBUFS_AOV_VALUE_SLOT, &inst_.render_buffers.aov_value_tx);
|
||||
/* Storage Buf. */
|
||||
opaque_ps_.bind_ssbo(RBUFS_AOV_BUF_SLOT, &inst_.film.aovs_info);
|
||||
/* Textures. */
|
||||
opaque_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
|
||||
|
||||
inst_.lights.bind_resources(&opaque_ps_);
|
||||
inst_.sampling.bind_resources(&opaque_ps_);
|
||||
}
|
||||
|
||||
opaque_single_sided_ps_ = &opaque_ps_.sub("SingleSided");
|
||||
opaque_single_sided_ps_->state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL |
|
||||
DRW_STATE_CULL_BACK);
|
||||
|
||||
opaque_double_sided_ps_ = &opaque_ps_.sub("DoubleSided");
|
||||
opaque_double_sided_ps_->state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
|
||||
}
|
||||
{
|
||||
DRWState state = DRW_STATE_DEPTH_LESS_EQUAL;
|
||||
transparent_ps_ = DRW_pass_create("Forward.Transparent", state);
|
||||
transparent_ps_.init();
|
||||
/* Workaround limitation of PassSortable. Use dummy pass that will be sorted first in all
|
||||
* circumstances. */
|
||||
PassMain::Sub &sub = transparent_ps_.sub("ResourceBind", -FLT_MAX);
|
||||
|
||||
/* Common resources. */
|
||||
|
||||
/* Textures. */
|
||||
sub.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
|
||||
|
||||
inst_.lights.bind_resources(&sub);
|
||||
inst_.sampling.bind_resources(&sub);
|
||||
}
|
||||
}
|
||||
|
||||
DRWShadingGroup *ForwardPipeline::material_opaque_add(::Material *blender_mat, GPUMaterial *gpumat)
|
||||
PassMain::Sub *ForwardPipeline::prepass_opaque_add(::Material *blender_mat,
|
||||
GPUMaterial *gpumat,
|
||||
bool has_motion)
|
||||
{
|
||||
RenderBuffers &rbufs = inst_.render_buffers;
|
||||
DRWPass *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ? opaque_culled_ps_ : opaque_ps_;
|
||||
LightModule &lights = inst_.lights;
|
||||
Sampling &sampling = inst_.sampling;
|
||||
// LightProbeModule &lightprobes = inst_.lightprobes;
|
||||
// RaytracingModule &raytracing = inst_.raytracing;
|
||||
// eGPUSamplerState no_interp = GPU_SAMPLER_DEFAULT;
|
||||
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, pass);
|
||||
lights.bind_resources(grp);
|
||||
sampling.bind_resources(grp);
|
||||
// DRW_shgroup_uniform_block(grp, "sampling_buf", inst_.sampling.ubo_get());
|
||||
// DRW_shgroup_uniform_block(grp, "grids_buf", lightprobes.grid_ubo_get());
|
||||
// DRW_shgroup_uniform_block(grp, "cubes_buf", lightprobes.cube_ubo_get());
|
||||
// DRW_shgroup_uniform_block(grp, "probes_buf", lightprobes.info_ubo_get());
|
||||
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_grid_tx", lightprobes.grid_tx_ref_get());
|
||||
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_cube_tx", lightprobes.cube_tx_ref_get());
|
||||
DRW_shgroup_uniform_texture(grp, "utility_tx", inst_.pipelines.utility_tx);
|
||||
/* AOVs. */
|
||||
DRW_shgroup_uniform_image_ref(grp, "aov_color_img", &rbufs.aov_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "aov_value_img", &rbufs.aov_value_tx);
|
||||
DRW_shgroup_storage_block_ref(grp, "aov_buf", &inst_.film.aovs_info);
|
||||
/* RenderPasses. */
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_normal_img", &rbufs.normal_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_light_img", &rbufs.light_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_diffuse_color_img", &rbufs.diffuse_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_specular_color_img", &rbufs.specular_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_emission_img", &rbufs.emission_tx);
|
||||
|
||||
/* TODO(fclem): Make this only needed if material uses it ... somehow. */
|
||||
// if (true) {
|
||||
// DRW_shgroup_uniform_texture_ref(
|
||||
// grp, "sss_transmittance_tx", inst_.subsurface.transmittance_ref_get());
|
||||
// }
|
||||
// if (raytracing.enabled()) {
|
||||
// DRW_shgroup_uniform_block(grp, "rt_diffuse_buf", raytracing.diffuse_data);
|
||||
// DRW_shgroup_uniform_block(grp, "rt_reflection_buf", raytracing.reflection_data);
|
||||
// DRW_shgroup_uniform_block(grp, "rt_refraction_buf", raytracing.refraction_data);
|
||||
// DRW_shgroup_uniform_texture_ref_ex(grp, "radiance_tx", &input_screen_radiance_tx_,
|
||||
// no_interp);
|
||||
// }
|
||||
// if (raytracing.enabled()) {
|
||||
// DRW_shgroup_uniform_block(grp, "hiz_buf", inst_.hiz.ubo_get());
|
||||
// DRW_shgroup_uniform_texture_ref(grp, "hiz_tx", inst_.hiz_front.texture_ref_get());
|
||||
// }
|
||||
return grp;
|
||||
PassMain::Sub *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ?
|
||||
(has_motion ? prepass_single_sided_moving_ps_ :
|
||||
prepass_single_sided_static_ps_) :
|
||||
(has_motion ? prepass_double_sided_moving_ps_ :
|
||||
prepass_double_sided_static_ps_);
|
||||
return &pass->sub(GPU_material_get_name(gpumat));
|
||||
}
|
||||
|
||||
DRWShadingGroup *ForwardPipeline::prepass_opaque_add(::Material *blender_mat,
|
||||
GPUMaterial *gpumat,
|
||||
bool has_motion)
|
||||
PassMain::Sub *ForwardPipeline::material_opaque_add(::Material *blender_mat, GPUMaterial *gpumat)
|
||||
{
|
||||
DRWPass *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ?
|
||||
(has_motion ? prepass_culled_velocity_ps_ : prepass_culled_ps_) :
|
||||
(has_motion ? prepass_velocity_ps_ : prepass_ps_);
|
||||
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, pass);
|
||||
if (has_motion) {
|
||||
inst_.velocity.bind_resources(grp);
|
||||
}
|
||||
return grp;
|
||||
PassMain::Sub *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ? opaque_single_sided_ps_ :
|
||||
opaque_double_sided_ps_;
|
||||
return &pass->sub(GPU_material_get_name(gpumat));
|
||||
}
|
||||
|
||||
DRWShadingGroup *ForwardPipeline::material_transparent_add(::Material *blender_mat,
|
||||
GPUMaterial *gpumat)
|
||||
{
|
||||
RenderBuffers &rbufs = inst_.render_buffers;
|
||||
LightModule &lights = inst_.lights;
|
||||
Sampling &sampling = inst_.sampling;
|
||||
// LightProbeModule &lightprobes = inst_.lightprobes;
|
||||
// RaytracingModule &raytracing = inst_.raytracing;
|
||||
// eGPUSamplerState no_interp = GPU_SAMPLER_DEFAULT;
|
||||
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, transparent_ps_);
|
||||
lights.bind_resources(grp);
|
||||
sampling.bind_resources(grp);
|
||||
// DRW_shgroup_uniform_block(grp, "sampling_buf", inst_.sampling.ubo_get());
|
||||
// DRW_shgroup_uniform_block(grp, "grids_buf", lightprobes.grid_ubo_get());
|
||||
// DRW_shgroup_uniform_block(grp, "cubes_buf", lightprobes.cube_ubo_get());
|
||||
// DRW_shgroup_uniform_block(grp, "probes_buf", lightprobes.info_ubo_get());
|
||||
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_grid_tx", lightprobes.grid_tx_ref_get());
|
||||
// DRW_shgroup_uniform_texture_ref(grp, "lightprobe_cube_tx", lightprobes.cube_tx_ref_get());
|
||||
DRW_shgroup_uniform_texture(grp, "utility_tx", inst_.pipelines.utility_tx);
|
||||
/* TODO(fclem): Make this only needed if material uses it ... somehow. */
|
||||
// if (true) {
|
||||
// DRW_shgroup_uniform_texture_ref(
|
||||
// grp, "sss_transmittance_tx", inst_.subsurface.transmittance_ref_get());
|
||||
// }
|
||||
// if (raytracing.enabled()) {
|
||||
// DRW_shgroup_uniform_block(grp, "rt_diffuse_buf", raytracing.diffuse_data);
|
||||
// DRW_shgroup_uniform_block(grp, "rt_reflection_buf", raytracing.reflection_data);
|
||||
// DRW_shgroup_uniform_block(grp, "rt_refraction_buf", raytracing.refraction_data);
|
||||
// DRW_shgroup_uniform_texture_ref_ex(
|
||||
// grp, "rt_radiance_tx", &input_screen_radiance_tx_, no_interp);
|
||||
// }
|
||||
// if (raytracing.enabled()) {
|
||||
// DRW_shgroup_uniform_block(grp, "hiz_buf", inst_.hiz.ubo_get());
|
||||
// DRW_shgroup_uniform_texture_ref(grp, "hiz_tx", inst_.hiz_front.texture_ref_get());
|
||||
// }
|
||||
{
|
||||
/* TODO(fclem): This is not needed. This is only to please the OpenGL debug Layer.
|
||||
* If we are to introduce transparency render-passes support, it would be through a separate
|
||||
* pass. */
|
||||
/* AOVs. */
|
||||
DRW_shgroup_uniform_image_ref(grp, "aov_color_img", &rbufs.aov_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "aov_value_img", &rbufs.aov_value_tx);
|
||||
DRW_shgroup_storage_block_ref(grp, "aov_buf", &inst_.film.aovs_info);
|
||||
/* RenderPasses. */
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_normal_img", &rbufs.normal_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_light_img", &rbufs.light_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_diffuse_color_img", &rbufs.diffuse_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_specular_color_img", &rbufs.specular_color_tx);
|
||||
DRW_shgroup_uniform_image_ref(grp, "rp_emission_img", &rbufs.emission_tx);
|
||||
}
|
||||
|
||||
DRWState state_disable = DRW_STATE_WRITE_DEPTH;
|
||||
DRWState state_enable = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM;
|
||||
if (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) {
|
||||
state_enable |= DRW_STATE_CULL_BACK;
|
||||
}
|
||||
DRW_shgroup_state_disable(grp, state_disable);
|
||||
DRW_shgroup_state_enable(grp, state_enable);
|
||||
return grp;
|
||||
}
|
||||
|
||||
DRWShadingGroup *ForwardPipeline::prepass_transparent_add(::Material *blender_mat,
|
||||
GPUMaterial *gpumat)
|
||||
PassMain::Sub *ForwardPipeline::prepass_transparent_add(const Object *ob,
|
||||
::Material *blender_mat,
|
||||
GPUMaterial *gpumat)
|
||||
{
|
||||
if ((blender_mat->blend_flag & MA_BL_HIDE_BACKFACE) == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, transparent_ps_);
|
||||
|
||||
DRWState state_disable = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM;
|
||||
DRWState state_enable = DRW_STATE_WRITE_DEPTH;
|
||||
if (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) {
|
||||
state_enable |= DRW_STATE_CULL_BACK;
|
||||
DRWState state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
|
||||
if ((blender_mat->blend_flag & MA_BL_CULL_BACKFACE)) {
|
||||
state |= DRW_STATE_CULL_BACK;
|
||||
}
|
||||
DRW_shgroup_state_disable(grp, state_disable);
|
||||
DRW_shgroup_state_enable(grp, state_enable);
|
||||
return grp;
|
||||
float sorting_value = math::dot(float3(ob->obmat[3]), camera_forward_);
|
||||
PassMain::Sub *pass = &transparent_ps_.sub(GPU_material_get_name(gpumat), sorting_value);
|
||||
pass->state_set(state);
|
||||
pass->material_set(*inst_.manager, gpumat);
|
||||
return pass;
|
||||
}
|
||||
|
||||
void ForwardPipeline::render(const DRWView *view,
|
||||
PassMain::Sub *ForwardPipeline::material_transparent_add(const Object *ob,
|
||||
::Material *blender_mat,
|
||||
GPUMaterial *gpumat)
|
||||
{
|
||||
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM | DRW_STATE_DEPTH_LESS_EQUAL;
|
||||
if ((blender_mat->blend_flag & MA_BL_CULL_BACKFACE)) {
|
||||
state |= DRW_STATE_CULL_BACK;
|
||||
}
|
||||
float sorting_value = math::dot(float3(ob->obmat[3]), camera_forward_);
|
||||
PassMain::Sub *pass = &transparent_ps_.sub(GPU_material_get_name(gpumat), sorting_value);
|
||||
pass->state_set(state);
|
||||
pass->material_set(*inst_.manager, gpumat);
|
||||
return pass;
|
||||
}
|
||||
|
||||
void ForwardPipeline::render(View &view,
|
||||
Framebuffer &prepass_fb,
|
||||
Framebuffer &combined_fb,
|
||||
GPUTexture *UNUSED(combined_tx))
|
||||
{
|
||||
UNUSED_VARS(view);
|
||||
|
||||
DRW_stats_group_start("ForwardOpaque");
|
||||
DRW_stats_group_start("Forward.Opaque");
|
||||
|
||||
GPU_framebuffer_bind(prepass_fb);
|
||||
DRW_draw_pass(prepass_ps_);
|
||||
inst_.manager->submit(prepass_ps_, view);
|
||||
|
||||
if (!DRW_pass_is_empty(prepass_ps_)) {
|
||||
inst_.hiz_buffer.set_dirty();
|
||||
}
|
||||
// if (!DRW_pass_is_empty(prepass_ps_)) {
|
||||
inst_.hiz_buffer.set_dirty();
|
||||
// }
|
||||
|
||||
// if (inst_.raytracing.enabled()) {
|
||||
// rt_buffer.radiance_copy(combined_tx);
|
||||
@@ -263,17 +218,11 @@ void ForwardPipeline::render(const DRWView *view,
|
||||
// inst_.shadows.set_view(view, depth_tx);
|
||||
|
||||
GPU_framebuffer_bind(combined_fb);
|
||||
DRW_draw_pass(opaque_ps_);
|
||||
inst_.manager->submit(opaque_ps_, view);
|
||||
|
||||
DRW_stats_group_end();
|
||||
|
||||
DRW_stats_group_start("ForwardTransparent");
|
||||
/* TODO(fclem) This is suboptimal. We could sort during sync. */
|
||||
/* FIXME(fclem) This wont work for panoramic, where we need
|
||||
* to sort by distance to camera, not by z. */
|
||||
DRW_pass_sort_shgroup_z(transparent_ps_);
|
||||
DRW_draw_pass(transparent_ps_);
|
||||
DRW_stats_group_end();
|
||||
inst_.manager->submit(transparent_ps_, view);
|
||||
|
||||
// if (inst_.raytracing.enabled()) {
|
||||
// gbuffer.ray_radiance_tx.release();
|
||||
|
@@ -13,6 +13,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "DRW_render.h"
|
||||
#include "draw_shader_shared.h"
|
||||
|
||||
/* TODO(fclem): Move it to GPU/DRAW. */
|
||||
#include "../eevee/eevee_lut.h"
|
||||
@@ -31,13 +32,13 @@ class WorldPipeline {
|
||||
private:
|
||||
Instance &inst_;
|
||||
|
||||
DRWPass *world_ps_ = nullptr;
|
||||
PassSimple world_ps_ = {"World.Background"};
|
||||
|
||||
public:
|
||||
WorldPipeline(Instance &inst) : inst_(inst){};
|
||||
|
||||
void sync(GPUMaterial *gpumat);
|
||||
void render();
|
||||
void render(View &view);
|
||||
};
|
||||
|
||||
/** \} */
|
||||
@@ -52,13 +53,18 @@ class ForwardPipeline {
|
||||
private:
|
||||
Instance &inst_;
|
||||
|
||||
DRWPass *prepass_ps_ = nullptr;
|
||||
DRWPass *prepass_velocity_ps_ = nullptr;
|
||||
DRWPass *prepass_culled_ps_ = nullptr;
|
||||
DRWPass *prepass_culled_velocity_ps_ = nullptr;
|
||||
DRWPass *opaque_ps_ = nullptr;
|
||||
DRWPass *opaque_culled_ps_ = nullptr;
|
||||
DRWPass *transparent_ps_ = nullptr;
|
||||
PassMain prepass_ps_ = {"Prepass"};
|
||||
PassMain::Sub *prepass_single_sided_static_ps_ = nullptr;
|
||||
PassMain::Sub *prepass_single_sided_moving_ps_ = nullptr;
|
||||
PassMain::Sub *prepass_double_sided_static_ps_ = nullptr;
|
||||
PassMain::Sub *prepass_double_sided_moving_ps_ = nullptr;
|
||||
|
||||
PassMain opaque_ps_ = {"Shading"};
|
||||
PassMain::Sub *opaque_single_sided_ps_ = nullptr;
|
||||
PassMain::Sub *opaque_double_sided_ps_ = nullptr;
|
||||
|
||||
PassSortable transparent_ps_ = {"Forward.Transparent"};
|
||||
float3 camera_forward_;
|
||||
|
||||
// GPUTexture *input_screen_radiance_tx_ = nullptr;
|
||||
|
||||
@@ -67,28 +73,17 @@ class ForwardPipeline {
|
||||
|
||||
void sync();
|
||||
|
||||
DRWShadingGroup *material_add(::Material *blender_mat, GPUMaterial *gpumat)
|
||||
{
|
||||
return (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) ?
|
||||
material_transparent_add(blender_mat, gpumat) :
|
||||
material_opaque_add(blender_mat, gpumat);
|
||||
}
|
||||
PassMain::Sub *prepass_opaque_add(::Material *blender_mat, GPUMaterial *gpumat, bool has_motion);
|
||||
PassMain::Sub *material_opaque_add(::Material *blender_mat, GPUMaterial *gpumat);
|
||||
|
||||
DRWShadingGroup *prepass_add(::Material *blender_mat, GPUMaterial *gpumat, bool has_motion)
|
||||
{
|
||||
return (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) ?
|
||||
prepass_transparent_add(blender_mat, gpumat) :
|
||||
prepass_opaque_add(blender_mat, gpumat, has_motion);
|
||||
}
|
||||
PassMain::Sub *prepass_transparent_add(const Object *ob,
|
||||
::Material *blender_mat,
|
||||
GPUMaterial *gpumat);
|
||||
PassMain::Sub *material_transparent_add(const Object *ob,
|
||||
::Material *blender_mat,
|
||||
GPUMaterial *gpumat);
|
||||
|
||||
DRWShadingGroup *material_opaque_add(::Material *blender_mat, GPUMaterial *gpumat);
|
||||
DRWShadingGroup *prepass_opaque_add(::Material *blender_mat,
|
||||
GPUMaterial *gpumat,
|
||||
bool has_motion);
|
||||
DRWShadingGroup *material_transparent_add(::Material *blender_mat, GPUMaterial *gpumat);
|
||||
DRWShadingGroup *prepass_transparent_add(::Material *blender_mat, GPUMaterial *gpumat);
|
||||
|
||||
void render(const DRWView *view,
|
||||
void render(View &view,
|
||||
Framebuffer &prepass_fb,
|
||||
Framebuffer &combined_fb,
|
||||
GPUTexture *combined_tx);
|
||||
@@ -192,26 +187,36 @@ class PipelineModule {
|
||||
// velocity.sync();
|
||||
}
|
||||
|
||||
DRWShadingGroup *material_add(::Material *blender_mat,
|
||||
GPUMaterial *gpumat,
|
||||
eMaterialPipeline pipeline_type)
|
||||
PassMain::Sub *material_add(Object *ob,
|
||||
::Material *blender_mat,
|
||||
GPUMaterial *gpumat,
|
||||
eMaterialPipeline pipeline_type)
|
||||
{
|
||||
switch (pipeline_type) {
|
||||
case MAT_PIPE_DEFERRED_PREPASS:
|
||||
// return deferred.prepass_add(blender_mat, gpumat, false);
|
||||
break;
|
||||
case MAT_PIPE_FORWARD_PREPASS:
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
|
||||
return forward.prepass_transparent_add(ob, blender_mat, gpumat);
|
||||
}
|
||||
return forward.prepass_opaque_add(blender_mat, gpumat, false);
|
||||
|
||||
case MAT_PIPE_DEFERRED_PREPASS_VELOCITY:
|
||||
// return deferred.prepass_add(blender_mat, gpumat, true);
|
||||
break;
|
||||
case MAT_PIPE_FORWARD_PREPASS:
|
||||
return forward.prepass_add(blender_mat, gpumat, false);
|
||||
case MAT_PIPE_FORWARD_PREPASS_VELOCITY:
|
||||
return forward.prepass_add(blender_mat, gpumat, true);
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
|
||||
return forward.prepass_transparent_add(ob, blender_mat, gpumat);
|
||||
}
|
||||
return forward.prepass_opaque_add(blender_mat, gpumat, true);
|
||||
|
||||
case MAT_PIPE_DEFERRED:
|
||||
// return deferred.material_add(blender_mat, gpumat);
|
||||
break;
|
||||
case MAT_PIPE_FORWARD:
|
||||
return forward.material_add(blender_mat, gpumat);
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
|
||||
return forward.material_transparent_add(ob, blender_mat, gpumat);
|
||||
}
|
||||
return forward.material_opaque_add(blender_mat, gpumat);
|
||||
|
||||
case MAT_PIPE_VOLUME:
|
||||
/* TODO(fclem) volume pass. */
|
||||
return nullptr;
|
||||
|
@@ -87,6 +87,12 @@ class Sampling {
|
||||
DRW_shgroup_storage_block_ref(grp, "sampling_buf", &data_);
|
||||
}
|
||||
|
||||
template<typename T> void bind_resources(draw::detail::PassBase<T> *pass)
|
||||
{
|
||||
/* Storage Buf. */
|
||||
pass->bind_ssbo(SAMPLING_BUF_SLOT, &data_);
|
||||
}
|
||||
|
||||
/* Returns a pseudo random number in [0..1] range. Each dimension are de-correlated. */
|
||||
float rng_get(eSamplingDimension dimension) const
|
||||
{
|
||||
|
@@ -9,6 +9,8 @@
|
||||
* and static shader usage.
|
||||
*/
|
||||
|
||||
#include "GPU_capabilities.h"
|
||||
|
||||
#include "gpu_shader_create_info.hh"
|
||||
|
||||
#include "eevee_shader.hh"
|
||||
@@ -180,11 +182,41 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
|
||||
GPUCodegenOutput &codegen = *codegen_;
|
||||
ShaderCreateInfo &info = *reinterpret_cast<ShaderCreateInfo *>(codegen.create_info);
|
||||
|
||||
info.auto_resource_location(true);
|
||||
/* WORKAROUND: Replace by new ob info. */
|
||||
int64_t ob_info_index = info.additional_infos_.first_index_of_try("draw_object_infos");
|
||||
if (ob_info_index != -1) {
|
||||
info.additional_infos_[ob_info_index] = "draw_object_infos_new";
|
||||
}
|
||||
|
||||
/* WORKAROUND: Add new ob attr buffer. */
|
||||
if (GPU_material_uniform_attributes(gpumat) != nullptr) {
|
||||
info.additional_info("draw_object_attribute_new");
|
||||
}
|
||||
|
||||
/* WORKAROUND: Avoid utility texture merge error. TODO: find a cleaner fix. */
|
||||
for (auto &resource : info.batch_resources_) {
|
||||
if (resource.bind_type == ShaderCreateInfo::Resource::BindType::SAMPLER) {
|
||||
if (resource.slot == RBUFS_UTILITY_TEX_SLOT) {
|
||||
resource.slot = GPU_max_textures_frag() - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
|
||||
info.define("MAT_TRANSPARENT");
|
||||
/* Transparent material do not have any velocity specific pipeline. */
|
||||
if (pipeline_type == MAT_PIPE_FORWARD_PREPASS_VELOCITY) {
|
||||
pipeline_type = MAT_PIPE_FORWARD_PREPASS;
|
||||
}
|
||||
}
|
||||
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT) == false &&
|
||||
pipeline_type == MAT_PIPE_FORWARD) {
|
||||
/* Opaque forward do support AOVs and render pass. */
|
||||
info.additional_info("eevee_aov_out");
|
||||
info.additional_info("eevee_render_pass_out");
|
||||
}
|
||||
|
||||
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_BARYCENTRIC)) {
|
||||
switch (geometry_type) {
|
||||
case MAT_GEOM_MESH:
|
||||
|
@@ -12,16 +12,16 @@
|
||||
# include "BLI_memory_utils.hh"
|
||||
# include "DRW_gpu_wrapper.hh"
|
||||
|
||||
# include "draw_manager.hh"
|
||||
# include "draw_pass.hh"
|
||||
|
||||
# include "eevee_defines.hh"
|
||||
|
||||
# include "GPU_shader_shared.h"
|
||||
|
||||
namespace blender::eevee {
|
||||
|
||||
using draw::Framebuffer;
|
||||
using draw::SwapChain;
|
||||
using draw::Texture;
|
||||
using draw::TextureFromPool;
|
||||
using namespace draw;
|
||||
|
||||
constexpr eGPUSamplerState no_filter = GPU_SAMPLER_DEFAULT;
|
||||
constexpr eGPUSamplerState with_filter = GPU_SAMPLER_FILTER;
|
||||
|
@@ -74,25 +74,12 @@ WorldHandle &SyncModule::sync_world(::World *world)
|
||||
/** \name Common
|
||||
* \{ */
|
||||
|
||||
static inline void shgroup_geometry_call(DRWShadingGroup *grp,
|
||||
Object *ob,
|
||||
GPUBatch *geom,
|
||||
int v_first = -1,
|
||||
int v_count = -1,
|
||||
bool use_instancing = false)
|
||||
static inline void geometry_call(PassMain::Sub *sub_pass,
|
||||
GPUBatch *geom,
|
||||
ResourceHandle resource_handle)
|
||||
{
|
||||
if (grp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (v_first == -1) {
|
||||
DRW_shgroup_call(grp, geom, ob);
|
||||
}
|
||||
else if (use_instancing) {
|
||||
DRW_shgroup_call_instance_range(grp, ob, geom, v_first, v_count);
|
||||
}
|
||||
else {
|
||||
DRW_shgroup_call_range(grp, ob, geom, v_first, v_count);
|
||||
if (sub_pass != nullptr) {
|
||||
sub_pass->draw(geom, resource_handle);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,9 +89,13 @@ static inline void shgroup_geometry_call(DRWShadingGroup *grp,
|
||||
/** \name Mesh
|
||||
* \{ */
|
||||
|
||||
void SyncModule::sync_mesh(Object *ob, ObjectHandle &ob_handle)
|
||||
void SyncModule::sync_mesh(Object *ob,
|
||||
ObjectHandle &ob_handle,
|
||||
ResourceHandle res_handle,
|
||||
const ObjectRef &ob_ref)
|
||||
{
|
||||
bool has_motion = inst_.velocity.step_object_sync(ob, ob_handle.object_key, ob_handle.recalc);
|
||||
bool has_motion = inst_.velocity.step_object_sync(
|
||||
ob, ob_handle.object_key, res_handle, ob_handle.recalc);
|
||||
|
||||
MaterialArray &material_array = inst_.materials.material_array_get(ob, has_motion);
|
||||
|
||||
@@ -123,14 +114,16 @@ void SyncModule::sync_mesh(Object *ob, ObjectHandle &ob_handle)
|
||||
continue;
|
||||
}
|
||||
Material *material = material_array.materials[i];
|
||||
shgroup_geometry_call(material->shading.shgrp, ob, geom);
|
||||
shgroup_geometry_call(material->prepass.shgrp, ob, geom);
|
||||
shgroup_geometry_call(material->shadow.shgrp, ob, geom);
|
||||
geometry_call(material->shading.sub_pass, geom, res_handle);
|
||||
geometry_call(material->prepass.sub_pass, geom, res_handle);
|
||||
geometry_call(material->shadow.sub_pass, geom, res_handle);
|
||||
|
||||
is_shadow_caster = is_shadow_caster || material->shadow.shgrp != nullptr;
|
||||
is_shadow_caster = is_shadow_caster || material->shadow.sub_pass != nullptr;
|
||||
is_alpha_blend = is_alpha_blend || material->is_alpha_blend_transparent;
|
||||
}
|
||||
|
||||
inst_.manager->extract_object_attributes(res_handle, ob_ref, material_array.gpu_materials);
|
||||
|
||||
// shadows.sync_object(ob, ob_handle, is_shadow_caster, is_alpha_blend);
|
||||
}
|
||||
|
||||
@@ -155,11 +148,13 @@ struct gpIterData {
|
||||
int vcount = 0;
|
||||
bool instancing = false;
|
||||
|
||||
gpIterData(Instance &inst_, Object *ob_, ObjectHandle &ob_handle)
|
||||
gpIterData(Instance &inst_, Object *ob_, ObjectHandle &ob_handle, ResourceHandle resource_handle)
|
||||
: inst(inst_),
|
||||
ob(ob_),
|
||||
material_array(inst_.materials.material_array_get(
|
||||
ob_, inst_.velocity.step_object_sync(ob, ob_handle.object_key, ob_handle.recalc)))
|
||||
ob_,
|
||||
inst_.velocity.step_object_sync(
|
||||
ob, ob_handle.object_key, resource_handle, ob_handle.recalc)))
|
||||
{
|
||||
cfra = DEG_get_ctime(inst.depsgraph);
|
||||
};
|
||||
@@ -167,26 +162,28 @@ struct gpIterData {
|
||||
|
||||
static void gpencil_drawcall_flush(gpIterData &iter)
|
||||
{
|
||||
#if 0 /* Incompatible with new darw manager. */
|
||||
if (iter.geom != nullptr) {
|
||||
shgroup_geometry_call(iter.material->shading.shgrp,
|
||||
geometry_call(iter.material->shading.sub_pass,
|
||||
iter.ob,
|
||||
iter.geom,
|
||||
iter.vfirst,
|
||||
iter.vcount,
|
||||
iter.instancing);
|
||||
shgroup_geometry_call(iter.material->prepass.shgrp,
|
||||
geometry_call(iter.material->prepass.sub_pass,
|
||||
iter.ob,
|
||||
iter.geom,
|
||||
iter.vfirst,
|
||||
iter.vcount,
|
||||
iter.instancing);
|
||||
shgroup_geometry_call(iter.material->shadow.shgrp,
|
||||
geometry_call(iter.material->shadow.sub_pass,
|
||||
iter.ob,
|
||||
iter.geom,
|
||||
iter.vfirst,
|
||||
iter.vcount,
|
||||
iter.instancing);
|
||||
}
|
||||
#endif
|
||||
iter.geom = nullptr;
|
||||
iter.vfirst = -1;
|
||||
iter.vcount = 0;
|
||||
@@ -250,21 +247,22 @@ static void gpencil_stroke_sync(bGPDlayer *UNUSED(gpl),
|
||||
}
|
||||
}
|
||||
|
||||
void SyncModule::sync_gpencil(Object *ob, ObjectHandle &ob_handle)
|
||||
void SyncModule::sync_gpencil(Object *ob, ObjectHandle &ob_handle, ResourceHandle res_handle)
|
||||
{
|
||||
/* TODO(fclem): Waiting for a user option to use the render engine instead of gpencil engine. */
|
||||
if (true) {
|
||||
inst_.gpencil_engine_enabled = true;
|
||||
return;
|
||||
}
|
||||
UNUSED_VARS(res_handle);
|
||||
|
||||
gpIterData iter(inst_, ob, ob_handle);
|
||||
gpIterData iter(inst_, ob, ob_handle, res_handle);
|
||||
|
||||
BKE_gpencil_visible_stroke_iter((bGPdata *)ob->data, nullptr, gpencil_stroke_sync, &iter);
|
||||
|
||||
gpencil_drawcall_flush(iter);
|
||||
|
||||
// bool is_caster = true; /* TODO material.shadow.shgrp. */
|
||||
// bool is_caster = true; /* TODO material.shadow.sub_pass. */
|
||||
// bool is_alpha_blend = true; /* TODO material.is_alpha_blend. */
|
||||
// shadows.sync_object(ob, ob_handle, is_caster, is_alpha_blend);
|
||||
}
|
||||
@@ -280,19 +278,24 @@ static void shgroup_curves_call(MaterialPass &matpass,
|
||||
ParticleSystem *part_sys = nullptr,
|
||||
ModifierData *modifier_data = nullptr)
|
||||
{
|
||||
if (matpass.shgrp == nullptr) {
|
||||
UNUSED_VARS(ob, modifier_data);
|
||||
if (matpass.sub_pass == nullptr) {
|
||||
return;
|
||||
}
|
||||
if (part_sys != nullptr) {
|
||||
DRW_shgroup_hair_create_sub(ob, part_sys, modifier_data, matpass.shgrp, matpass.gpumat);
|
||||
// DRW_shgroup_hair_create_sub(ob, part_sys, modifier_data, matpass.sub_pass, matpass.gpumat);
|
||||
}
|
||||
else {
|
||||
DRW_shgroup_curves_create_sub(ob, matpass.shgrp, matpass.gpumat);
|
||||
// DRW_shgroup_curves_create_sub(ob, matpass.sub_pass, matpass.gpumat);
|
||||
}
|
||||
}
|
||||
|
||||
void SyncModule::sync_curves(Object *ob, ObjectHandle &ob_handle, ModifierData *modifier_data)
|
||||
void SyncModule::sync_curves(Object *ob,
|
||||
ObjectHandle &ob_handle,
|
||||
ResourceHandle res_handle,
|
||||
ModifierData *modifier_data)
|
||||
{
|
||||
UNUSED_VARS(res_handle);
|
||||
int mat_nr = CURVES_MATERIAL_NR;
|
||||
|
||||
ParticleSystem *part_sys = nullptr;
|
||||
@@ -320,7 +323,7 @@ void SyncModule::sync_curves(Object *ob, ObjectHandle &ob_handle, ModifierData *
|
||||
/* TODO(fclem) Hair velocity. */
|
||||
// shading_passes.velocity.gpencil_add(ob, ob_handle);
|
||||
|
||||
// bool is_caster = material.shadow.shgrp != nullptr;
|
||||
// bool is_caster = material.shadow.sub_pass != nullptr;
|
||||
// bool is_alpha_blend = material.is_alpha_blend_transparent;
|
||||
// shadows.sync_object(ob, ob_handle, is_caster, is_alpha_blend);
|
||||
}
|
||||
|
@@ -150,9 +150,15 @@ class SyncModule {
|
||||
ObjectHandle &sync_object(Object *ob);
|
||||
WorldHandle &sync_world(::World *world);
|
||||
|
||||
void sync_mesh(Object *ob, ObjectHandle &ob_handle);
|
||||
void sync_gpencil(Object *ob, ObjectHandle &ob_handle);
|
||||
void sync_curves(Object *ob, ObjectHandle &ob_handle, ModifierData *modifier_data = nullptr);
|
||||
void sync_mesh(Object *ob,
|
||||
ObjectHandle &ob_handle,
|
||||
ResourceHandle res_handle,
|
||||
const ObjectRef &ob_ref);
|
||||
void sync_gpencil(Object *ob, ObjectHandle &ob_handle, ResourceHandle res_handle);
|
||||
void sync_curves(Object *ob,
|
||||
ObjectHandle &ob_handle,
|
||||
ResourceHandle res_handle,
|
||||
ModifierData *modifier_data = nullptr);
|
||||
};
|
||||
|
||||
/** \} */
|
||||
|
@@ -43,6 +43,10 @@ void VelocityModule::init()
|
||||
step_ = STEP_CURRENT;
|
||||
/* Let the main sync loop handle the current step. */
|
||||
}
|
||||
|
||||
/* For viewport, only previous motion is supported.
|
||||
* Still bind previous step to avoid undefined behavior. */
|
||||
next_step_ = inst_.is_viewport() ? STEP_PREVIOUS : STEP_NEXT;
|
||||
}
|
||||
|
||||
static void step_object_sync_render(void *velocity,
|
||||
@@ -51,7 +55,9 @@ static void step_object_sync_render(void *velocity,
|
||||
Depsgraph *UNUSED(depsgraph))
|
||||
{
|
||||
ObjectKey object_key(ob);
|
||||
reinterpret_cast<VelocityModule *>(velocity)->step_object_sync(ob, object_key);
|
||||
/* NOTE: Dummy resource handle since this will not be used for drawing. */
|
||||
ResourceHandle resource_handle(0);
|
||||
reinterpret_cast<VelocityModule *>(velocity)->step_object_sync(ob, object_key, resource_handle);
|
||||
}
|
||||
|
||||
void VelocityModule::step_sync(eVelocityStep step, float time)
|
||||
@@ -78,6 +84,7 @@ void VelocityModule::step_camera_sync()
|
||||
|
||||
bool VelocityModule::step_object_sync(Object *ob,
|
||||
ObjectKey &object_key,
|
||||
ResourceHandle resource_handle,
|
||||
int /*IDRecalcFlag*/ recalc)
|
||||
{
|
||||
bool has_motion = object_has_velocity(ob) || (recalc & ID_RECALC_TRANSFORM);
|
||||
@@ -89,8 +96,6 @@ bool VelocityModule::step_object_sync(Object *ob,
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t resource_id = DRW_object_resource_id_get(ob);
|
||||
|
||||
/* Object motion. */
|
||||
/* FIXME(fclem) As we are using original objects pointers, there is a chance the previous
|
||||
* object key matches a totally different object if the scene was changed by user or python
|
||||
@@ -99,7 +104,7 @@ bool VelocityModule::step_object_sync(Object *ob,
|
||||
* We live with that until we have a correct way of identifying new objects. */
|
||||
VelocityObjectData &vel = velocity_map.lookup_or_add_default(object_key);
|
||||
vel.obj.ofs[step_] = object_steps_usage[step_]++;
|
||||
vel.obj.resource_id = resource_id;
|
||||
vel.obj.resource_id = resource_handle.resource_index();
|
||||
vel.id = (ID *)ob->data;
|
||||
object_steps[step_]->get_or_resize(vel.obj.ofs[step_]) = ob->obmat;
|
||||
if (step_ == STEP_CURRENT) {
|
||||
@@ -257,7 +262,7 @@ void VelocityModule::end_sync()
|
||||
uint32_t max_resource_id_ = 0u;
|
||||
|
||||
for (Map<ObjectKey, VelocityObjectData>::Item item : velocity_map.items()) {
|
||||
if (item.value.obj.resource_id == (uint)-1) {
|
||||
if (item.value.obj.resource_id == (uint32_t)-1) {
|
||||
deleted_obj.append(item.key);
|
||||
}
|
||||
else {
|
||||
@@ -277,7 +282,7 @@ void VelocityModule::end_sync()
|
||||
velocity_map.remove(key);
|
||||
}
|
||||
|
||||
indirection_buf.resize(power_of_2_max_u(max_resource_id_ + 1));
|
||||
indirection_buf.resize(ceil_to_multiple_u(max_resource_id_, 128));
|
||||
|
||||
/* Avoid uploading more data to the GPU as well as an extra level of
|
||||
* indirection on the GPU by copying back offsets the to VelocityIndex. */
|
||||
|
@@ -67,7 +67,10 @@ class VelocityModule {
|
||||
private:
|
||||
Instance &inst_;
|
||||
|
||||
/** Step being synced. */
|
||||
eVelocityStep step_ = STEP_CURRENT;
|
||||
/** Step referenced as next step. */
|
||||
eVelocityStep next_step_ = STEP_NEXT;
|
||||
|
||||
public:
|
||||
VelocityModule(Instance &inst) : inst_(inst)
|
||||
@@ -102,7 +105,10 @@ class VelocityModule {
|
||||
void step_sync(eVelocityStep step, float time);
|
||||
|
||||
/* Gather motion data. Returns true if the object **can** have motion. */
|
||||
bool step_object_sync(Object *ob, ObjectKey &object_key, int recalc = 0);
|
||||
bool step_object_sync(Object *ob,
|
||||
ObjectKey &object_key,
|
||||
ResourceHandle resource_handle,
|
||||
int recalc = 0);
|
||||
|
||||
/* Moves next frame data to previous frame data. Nullify next frame data. */
|
||||
void step_swap();
|
||||
@@ -112,6 +118,20 @@ class VelocityModule {
|
||||
|
||||
void bind_resources(DRWShadingGroup *grp);
|
||||
|
||||
template<typename T> void bind_resources(draw::detail::Pass<T> *pass)
|
||||
{
|
||||
/* Storage Buf. */
|
||||
pass->bind_ssbo(VELOCITY_OBJ_PREV_BUF_SLOT, &(*object_steps[STEP_PREVIOUS]));
|
||||
pass->bind_ssbo(VELOCITY_OBJ_NEXT_BUF_SLOT, &(*object_steps[next_step_]));
|
||||
pass->bind_ssbo(VELOCITY_GEO_PREV_BUF_SLOT, &(*geometry_steps[STEP_PREVIOUS]));
|
||||
pass->bind_ssbo(VELOCITY_GEO_NEXT_BUF_SLOT, &(*geometry_steps[next_step_]));
|
||||
pass->bind_ssbo(VELOCITY_INDIRECTION_BUF_SLOT, &indirection_buf);
|
||||
/* Uniform Buf. */
|
||||
pass->bind_ubo(VELOCITY_CAMERA_PREV_BUF, &(*camera_steps[STEP_PREVIOUS]));
|
||||
pass->bind_ubo(VELOCITY_CAMERA_CURR_BUF, &(*camera_steps[STEP_CURRENT]));
|
||||
pass->bind_ubo(VELOCITY_CAMERA_NEXT_BUF, &(*camera_steps[next_step_]));
|
||||
}
|
||||
|
||||
bool camera_has_motion() const;
|
||||
bool camera_changed_projection() const;
|
||||
|
||||
|
@@ -118,10 +118,10 @@ void ShadingView::render()
|
||||
GPU_framebuffer_bind(combined_fb_);
|
||||
GPU_framebuffer_clear_color_depth(combined_fb_, clear_color, 1.0f);
|
||||
|
||||
inst_.pipelines.world.render();
|
||||
inst_.pipelines.world.render(render_view_new_);
|
||||
|
||||
/* TODO(fclem): Move it after the first prepass (and hiz update) once pipeline is stabilized. */
|
||||
inst_.lights.set_view(render_view_, extent_);
|
||||
inst_.lights.set_view(render_view_new_, extent_);
|
||||
|
||||
// inst_.pipelines.deferred.render(
|
||||
// render_view_, rt_buffer_opaque_, rt_buffer_refract_, depth_tx_, combined_tx_);
|
||||
@@ -130,10 +130,10 @@ void ShadingView::render()
|
||||
|
||||
// inst_.lookdev.render_overlay(view_fb_);
|
||||
|
||||
inst_.pipelines.forward.render(render_view_, prepass_fb_, combined_fb_, rbufs.combined_tx);
|
||||
inst_.pipelines.forward.render(render_view_new_, prepass_fb_, combined_fb_, rbufs.combined_tx);
|
||||
|
||||
inst_.lights.debug_draw(combined_fb_);
|
||||
inst_.hiz_buffer.debug_draw(combined_fb_);
|
||||
inst_.lights.debug_draw(render_view_new_, combined_fb_);
|
||||
inst_.hiz_buffer.debug_draw(render_view_new_, combined_fb_);
|
||||
|
||||
GPUTexture *combined_final_tx = render_postfx(rbufs.combined_tx);
|
||||
|
||||
@@ -157,8 +157,8 @@ GPUTexture *ShadingView::render_postfx(GPUTexture *input_tx)
|
||||
GPUTexture *output_tx = postfx_tx_;
|
||||
|
||||
/* Swapping is done internally. Actual output is set to the next input. */
|
||||
inst_.depth_of_field.render(&input_tx, &output_tx, dof_buffer_);
|
||||
inst_.motion_blur.render(&input_tx, &output_tx);
|
||||
inst_.depth_of_field.render(render_view_new_, &input_tx, &output_tx, dof_buffer_);
|
||||
inst_.motion_blur.render(render_view_new_, &input_tx, &output_tx);
|
||||
|
||||
return input_tx;
|
||||
}
|
||||
@@ -186,6 +186,8 @@ void ShadingView::update_view()
|
||||
* out of the blurring radius. To fix this, use custom enlarged culling matrix. */
|
||||
inst_.depth_of_field.jitter_apply(winmat, viewmat);
|
||||
DRW_view_update_sub(render_view_, viewmat.ptr(), winmat.ptr());
|
||||
|
||||
render_view_new_.sync(viewmat, winmat);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
@@ -57,6 +57,7 @@ class ShadingView {
|
||||
DRWView *sub_view_ = nullptr;
|
||||
/** Same as sub_view_ but has Depth Of Field jitter applied. */
|
||||
DRWView *render_view_ = nullptr;
|
||||
View render_view_new_;
|
||||
|
||||
/** Render size of the view. Can change between scene sample eval. */
|
||||
int2 extent_ = {-1, -1};
|
||||
@@ -65,7 +66,7 @@ class ShadingView {
|
||||
|
||||
public:
|
||||
ShadingView(Instance &inst, const char *name, const float (*face_matrix)[4])
|
||||
: inst_(inst), name_(name), face_matrix_(face_matrix){};
|
||||
: inst_(inst), name_(name), face_matrix_(face_matrix), render_view_new_(name){};
|
||||
|
||||
~ShadingView(){};
|
||||
|
||||
|
@@ -133,9 +133,9 @@ void main()
|
||||
/* Issue a sprite for each field if any CoC matches. */
|
||||
if (any(lessThan(do_scatter4 * sign(coc4), vec4(0.0)))) {
|
||||
/* Same value for all threads. Not an issue if we don't sync access to it. */
|
||||
scatter_fg_indirect_buf.v_count = 4u;
|
||||
scatter_fg_indirect_buf.vertex_len = 4u;
|
||||
/* Issue 1 strip instance per sprite. */
|
||||
uint rect_id = atomicAdd(scatter_fg_indirect_buf.i_count, 1u);
|
||||
uint rect_id = atomicAdd(scatter_fg_indirect_buf.instance_len, 1u);
|
||||
if (rect_id < dof_buf.scatter_max_rect) {
|
||||
|
||||
vec4 coc4_fg = max(vec4(0.0), -coc4);
|
||||
@@ -166,9 +166,9 @@ void main()
|
||||
}
|
||||
if (any(greaterThan(do_scatter4 * sign(coc4), vec4(0.0)))) {
|
||||
/* Same value for all threads. Not an issue if we don't sync access to it. */
|
||||
scatter_bg_indirect_buf.v_count = 4u;
|
||||
scatter_bg_indirect_buf.vertex_len = 4u;
|
||||
/* Issue 1 strip instance per sprite. */
|
||||
uint rect_id = atomicAdd(scatter_bg_indirect_buf.i_count, 1u);
|
||||
uint rect_id = atomicAdd(scatter_bg_indirect_buf.instance_len, 1u);
|
||||
if (rect_id < dof_buf.scatter_max_rect) {
|
||||
vec4 coc4_bg = max(vec4(0.0), coc4);
|
||||
vec4 bg_weights = dof_layer_weight(coc4_bg) * dof_sample_weight(coc4_bg) * do_scatter4;
|
||||
|
@@ -411,7 +411,7 @@ vec4 attr_load_color_post(vec4 attr)
|
||||
/** \} */
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
/** \name Volume Attribute post
|
||||
/** \name Uniform Attributes
|
||||
*
|
||||
* TODO(@fclem): These implementation details should concern the DRWManager and not be a fix on
|
||||
* the engine side. But as of now, the engines are responsible for loading the attributes.
|
||||
@@ -420,15 +420,20 @@ vec4 attr_load_color_post(vec4 attr)
|
||||
|
||||
vec4 attr_load_uniform(vec4 attr, const uint attr_hash)
|
||||
{
|
||||
#if defined(OBINFO_LIB) && defined(OBATTR_LIB)
|
||||
for (int i = ObjectAttributeStart; i < ObjectAttributeLen; i++) {
|
||||
if (drw_attrs[i].hash_code == attr_hash) {
|
||||
return vec4(
|
||||
drw_attrs[i].data_x, drw_attrs[i].data_y, drw_attrs[i].data_z, drw_attrs[i].data_w);
|
||||
#if defined(OBATTR_LIB)
|
||||
uint index = floatBitsToUint(ObjectAttributeStart);
|
||||
for (uint i = 0; i < floatBitsToUint(ObjectAttributeLen); i++, index++) {
|
||||
if (drw_attrs[index].hash_code == attr_hash) {
|
||||
return vec4(drw_attrs[index].data_x,
|
||||
drw_attrs[index].data_y,
|
||||
drw_attrs[index].data_z,
|
||||
drw_attrs[index].data_w);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return vec4(0.0);
|
||||
#else
|
||||
return attr;
|
||||
#endif
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
@@ -6,6 +6,7 @@
|
||||
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_sampling_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
|
||||
#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
|
||||
@@ -73,8 +74,7 @@ void main()
|
||||
|
||||
nodetree_surface();
|
||||
|
||||
// float noise_offset = sampling_rng_1D_get(SAMPLING_TRANSPARENCY);
|
||||
float noise_offset = 0.5;
|
||||
float noise_offset = sampling_rng_1D_get(SAMPLING_TRANSPARENCY);
|
||||
float random_threshold = hashed_alpha_threshold(1.0, noise_offset, g_data.P);
|
||||
|
||||
float transparency = avg(g_transmittance);
|
||||
|
@@ -97,6 +97,7 @@ void main()
|
||||
out_normal += g_refraction_data.N * g_refraction_data.weight;
|
||||
out_normal = safe_normalize(out_normal);
|
||||
|
||||
#ifdef MAT_RENDER_PASS_SUPPORT
|
||||
ivec2 out_texel = ivec2(gl_FragCoord.xy);
|
||||
imageStore(rp_normal_img, out_texel, vec4(out_normal, 1.0));
|
||||
imageStore(
|
||||
@@ -106,6 +107,7 @@ void main()
|
||||
imageStore(rp_diffuse_color_img, out_texel, vec4(g_diffuse_data.color, 1.0));
|
||||
imageStore(rp_specular_color_img, out_texel, vec4(specular_color, 1.0));
|
||||
imageStore(rp_emission_img, out_texel, vec4(g_emission, 1.0));
|
||||
#endif
|
||||
|
||||
out_radiance.rgb *= 1.0 - g_holdout;
|
||||
|
||||
|
@@ -11,7 +11,7 @@ GPU_SHADER_CREATE_INFO(eevee_depth_of_field_bokeh_lut)
|
||||
.do_static_compilation(true)
|
||||
.local_group_size(DOF_BOKEH_LUT_SIZE, DOF_BOKEH_LUT_SIZE)
|
||||
.additional_info("eevee_shared", "draw_view")
|
||||
.uniform_buf(1, "DepthOfFieldData", "dof_buf")
|
||||
.uniform_buf(6, "DepthOfFieldData", "dof_buf")
|
||||
.image(0, GPU_RG16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_gather_lut_img")
|
||||
.image(1, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_scatter_lut_img")
|
||||
.image(2, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_resolve_lut_img")
|
||||
@@ -21,7 +21,7 @@ GPU_SHADER_CREATE_INFO(eevee_depth_of_field_setup)
|
||||
.do_static_compilation(true)
|
||||
.local_group_size(DOF_DEFAULT_GROUP_SIZE, DOF_DEFAULT_GROUP_SIZE)
|
||||
.additional_info("eevee_shared", "draw_view")
|
||||
.uniform_buf(1, "DepthOfFieldData", "dof_buf")
|
||||
.uniform_buf(6, "DepthOfFieldData", "dof_buf")
|
||||
.sampler(0, ImageType::FLOAT_2D, "color_tx")
|
||||
.sampler(1, ImageType::DEPTH_2D, "depth_tx")
|
||||
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_color_img")
|
||||
@@ -32,7 +32,7 @@ GPU_SHADER_CREATE_INFO(eevee_depth_of_field_stabilize)
|
||||
.do_static_compilation(true)
|
||||
.local_group_size(DOF_STABILIZE_GROUP_SIZE, DOF_STABILIZE_GROUP_SIZE)
|
||||
.additional_info("eevee_shared", "draw_view", "eevee_velocity_camera")
|
||||
.uniform_buf(4, "DepthOfFieldData", "dof_buf")
|
||||
.uniform_buf(6, "DepthOfFieldData", "dof_buf")
|
||||
.sampler(0, ImageType::FLOAT_2D, "coc_tx")
|
||||
.sampler(1, ImageType::FLOAT_2D, "color_tx")
|
||||
.sampler(2, ImageType::FLOAT_2D, "velocity_tx")
|
||||
@@ -57,7 +57,7 @@ GPU_SHADER_CREATE_INFO(eevee_depth_of_field_reduce)
|
||||
.do_static_compilation(true)
|
||||
.local_group_size(DOF_REDUCE_GROUP_SIZE, DOF_REDUCE_GROUP_SIZE)
|
||||
.additional_info("eevee_shared", "draw_view")
|
||||
.uniform_buf(1, "DepthOfFieldData", "dof_buf")
|
||||
.uniform_buf(6, "DepthOfFieldData", "dof_buf")
|
||||
.sampler(0, ImageType::FLOAT_2D, "downsample_tx")
|
||||
.storage_buf(0, Qualifier::WRITE, "ScatterRect", "scatter_fg_list_buf[]")
|
||||
.storage_buf(1, Qualifier::WRITE, "ScatterRect", "scatter_bg_list_buf[]")
|
||||
@@ -154,7 +154,7 @@ GPU_SHADER_CREATE_INFO(eevee_depth_of_field_gather_common)
|
||||
"draw_view",
|
||||
"eevee_depth_of_field_tiles_common",
|
||||
"eevee_sampling_data")
|
||||
.uniform_buf(2, "DepthOfFieldData", "dof_buf")
|
||||
.uniform_buf(6, "DepthOfFieldData", "dof_buf")
|
||||
.local_group_size(DOF_GATHER_GROUP_SIZE, DOF_GATHER_GROUP_SIZE)
|
||||
.sampler(0, ImageType::FLOAT_2D, "color_tx")
|
||||
.sampler(1, ImageType::FLOAT_2D, "color_bilinear_tx")
|
||||
@@ -229,7 +229,7 @@ GPU_SHADER_CREATE_INFO(eevee_depth_of_field_resolve)
|
||||
"draw_view",
|
||||
"eevee_depth_of_field_tiles_common",
|
||||
"eevee_sampling_data")
|
||||
.uniform_buf(2, "DepthOfFieldData", "dof_buf")
|
||||
.uniform_buf(6, "DepthOfFieldData", "dof_buf")
|
||||
.sampler(0, ImageType::DEPTH_2D, "depth_tx")
|
||||
.sampler(1, ImageType::FLOAT_2D, "color_tx")
|
||||
.sampler(2, ImageType::FLOAT_2D, "color_bg_tx")
|
||||
|
@@ -4,7 +4,7 @@
|
||||
#include "gpu_shader_create_info.hh"
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_film)
|
||||
.uniform_buf(4, "FilmData", "film_buf")
|
||||
.uniform_buf(6, "FilmData", "film_buf")
|
||||
.sampler(0, ImageType::DEPTH_2D, "depth_tx")
|
||||
.sampler(1, ImageType::FLOAT_2D, "combined_tx")
|
||||
.sampler(2, ImageType::FLOAT_2D, "normal_tx")
|
||||
|
@@ -8,10 +8,10 @@
|
||||
* \{ */
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_light_data)
|
||||
.storage_buf(0, Qualifier::READ, "LightCullingData", "light_cull_buf")
|
||||
.storage_buf(1, Qualifier::READ, "LightData", "light_buf[]")
|
||||
.storage_buf(2, Qualifier::READ, "uint", "light_zbin_buf[]")
|
||||
.storage_buf(3, Qualifier::READ, "uint", "light_tile_buf[]");
|
||||
.storage_buf(LIGHT_CULL_BUF_SLOT, Qualifier::READ, "LightCullingData", "light_cull_buf")
|
||||
.storage_buf(LIGHT_BUF_SLOT, Qualifier::READ, "LightData", "light_buf[]")
|
||||
.storage_buf(LIGHT_ZBIN_BUF_SLOT, Qualifier::READ, "uint", "light_zbin_buf[]")
|
||||
.storage_buf(LIGHT_TILE_BUF_SLOT, Qualifier::READ, "uint", "light_tile_buf[]");
|
||||
|
||||
/** \} */
|
||||
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include "eevee_defines.hh"
|
||||
#include "gpu_shader_create_info.hh"
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
@@ -14,9 +15,10 @@ GPU_SHADER_CREATE_INFO(eevee_shared)
|
||||
GPU_SHADER_CREATE_INFO(eevee_sampling_data)
|
||||
.define("EEVEE_SAMPLING_DATA")
|
||||
.additional_info("eevee_shared")
|
||||
.storage_buf(14, Qualifier::READ, "SamplingData", "sampling_buf");
|
||||
.storage_buf(6, Qualifier::READ, "SamplingData", "sampling_buf");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_utility_texture).sampler(8, ImageType::FLOAT_2D_ARRAY, "utility_tx");
|
||||
GPU_SHADER_CREATE_INFO(eevee_utility_texture)
|
||||
.sampler(RBUFS_UTILITY_TEX_SLOT, ImageType::FLOAT_2D_ARRAY, "utility_tx");
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -30,7 +32,7 @@ GPU_SHADER_CREATE_INFO(eevee_geom_mesh)
|
||||
.vertex_in(0, Type::VEC3, "pos")
|
||||
.vertex_in(1, Type::VEC3, "nor")
|
||||
.vertex_source("eevee_geom_mesh_vert.glsl")
|
||||
.additional_info("draw_mesh", "draw_resource_id_varying", "draw_resource_handle");
|
||||
.additional_info("draw_modelmat_new", "draw_resource_id_varying", "draw_view");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_geom_gpencil)
|
||||
.additional_info("eevee_shared")
|
||||
@@ -52,7 +54,7 @@ GPU_SHADER_CREATE_INFO(eevee_geom_world)
|
||||
.define("MAT_GEOM_WORLD")
|
||||
.builtins(BuiltinBits::VERTEX_ID)
|
||||
.vertex_source("eevee_geom_world_vert.glsl")
|
||||
.additional_info("draw_modelmat", "draw_resource_id_varying", "draw_resource_handle");
|
||||
.additional_info("draw_modelmat_new", "draw_resource_id_varying", "draw_view");
|
||||
|
||||
/** \} */
|
||||
|
||||
@@ -78,9 +80,17 @@ GPU_SHADER_INTERFACE_INFO(eevee_surf_iface, "interp")
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_aov_out)
|
||||
.define("MAT_AOV_SUPPORT")
|
||||
.image_array_out(5, Qualifier::WRITE, GPU_RGBA16F, "aov_color_img")
|
||||
.image_array_out(6, Qualifier::WRITE, GPU_R16F, "aov_value_img")
|
||||
.storage_buf(7, Qualifier::READ, "AOVsInfoData", "aov_buf");
|
||||
.image_array_out(RBUFS_AOV_COLOR_SLOT, Qualifier::WRITE, GPU_RGBA16F, "aov_color_img")
|
||||
.image_array_out(RBUFS_AOV_VALUE_SLOT, Qualifier::WRITE, GPU_R16F, "aov_value_img")
|
||||
.storage_buf(RBUFS_AOV_BUF_SLOT, Qualifier::READ, "AOVsInfoData", "aov_buf");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_render_pass_out)
|
||||
.define("MAT_RENDER_PASS_SUPPORT")
|
||||
.image_out(RBUFS_NORMAL_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_normal_img")
|
||||
.image_array_out(RBUFS_LIGHT_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_light_img")
|
||||
.image_out(RBUFS_DIFF_COLOR_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
|
||||
.image_out(RBUFS_SPEC_COLOR_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img")
|
||||
.image_out(RBUFS_EMISSION_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
|
||||
.vertex_out(eevee_surf_iface)
|
||||
@@ -104,7 +114,6 @@ GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
|
||||
;
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_forward)
|
||||
.auto_resource_location(true)
|
||||
.vertex_out(eevee_surf_iface)
|
||||
/* Early fragment test is needed for render passes support for forward surfaces. */
|
||||
/* NOTE: This removes the possibility of using gl_FragDepth. */
|
||||
@@ -112,41 +121,27 @@ GPU_SHADER_CREATE_INFO(eevee_surf_forward)
|
||||
.fragment_out(0, Type::VEC4, "out_radiance", DualBlend::SRC_0)
|
||||
.fragment_out(0, Type::VEC4, "out_transmittance", DualBlend::SRC_1)
|
||||
.fragment_source("eevee_surf_forward_frag.glsl")
|
||||
.image_out(0, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_normal_img")
|
||||
.image_array_out(1, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_light_img")
|
||||
.image_out(2, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
|
||||
.image_out(3, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img")
|
||||
.image_out(4, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img")
|
||||
.additional_info("eevee_aov_out",
|
||||
"eevee_light_data",
|
||||
"eevee_utility_texture",
|
||||
"eevee_sampling_data"
|
||||
// "eevee_lightprobe_data",
|
||||
.additional_info("eevee_light_data", "eevee_utility_texture", "eevee_sampling_data"
|
||||
// "eevee_lightprobe_data",
|
||||
// "eevee_shadow_data"
|
||||
/* Optionally added depending on the material. */
|
||||
// "eevee_raytrace_data",
|
||||
// "eevee_transmittance_data",
|
||||
// "eevee_shadow_data"
|
||||
// "eevee_aov_out",
|
||||
// "eevee_render_pass_out",
|
||||
);
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_depth)
|
||||
.vertex_out(eevee_surf_iface)
|
||||
.fragment_source("eevee_surf_depth_frag.glsl")
|
||||
// .additional_info("eevee_sampling_data", "eevee_utility_texture")
|
||||
;
|
||||
.additional_info("eevee_sampling_data", "eevee_utility_texture");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_surf_world)
|
||||
.vertex_out(eevee_surf_iface)
|
||||
.image_out(0, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_normal_img")
|
||||
.image_array_out(1, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_light_img")
|
||||
.image_out(2, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
|
||||
.image_out(3, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img")
|
||||
.image_out(4, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img")
|
||||
.push_constant(Type::FLOAT, "world_opacity_fade")
|
||||
.fragment_out(0, Type::VEC4, "out_background")
|
||||
.fragment_source("eevee_surf_world_frag.glsl")
|
||||
.additional_info("eevee_aov_out"
|
||||
//"eevee_utility_texture"
|
||||
);
|
||||
.additional_info("eevee_aov_out", "eevee_render_pass_out", "eevee_utility_texture");
|
||||
|
||||
#undef image_out
|
||||
#undef image_array_out
|
||||
@@ -188,10 +183,7 @@ GPU_SHADER_CREATE_INFO(eevee_volume_deferred)
|
||||
GPU_SHADER_CREATE_INFO(eevee_material_stub).define("EEVEE_MATERIAL_STUBS");
|
||||
|
||||
# define EEVEE_MAT_FINAL_VARIATION(name, ...) \
|
||||
GPU_SHADER_CREATE_INFO(name) \
|
||||
.additional_info(__VA_ARGS__) \
|
||||
.auto_resource_location(true) \
|
||||
.do_static_compilation(true);
|
||||
GPU_SHADER_CREATE_INFO(name).additional_info(__VA_ARGS__).do_static_compilation(true);
|
||||
|
||||
# define EEVEE_MAT_GEOM_VARIATIONS(prefix, ...) \
|
||||
EEVEE_MAT_FINAL_VARIATION(prefix##_world, "eevee_geom_world", __VA_ARGS__) \
|
||||
|
@@ -6,7 +6,7 @@
|
||||
GPU_SHADER_CREATE_INFO(eevee_motion_blur_tiles_flatten)
|
||||
.local_group_size(MOTION_BLUR_GROUP_SIZE, MOTION_BLUR_GROUP_SIZE)
|
||||
.additional_info("eevee_shared", "draw_view", "eevee_velocity_camera")
|
||||
.uniform_buf(4, "MotionBlurData", "motion_blur_buf")
|
||||
.uniform_buf(6, "MotionBlurData", "motion_blur_buf")
|
||||
.sampler(0, ImageType::DEPTH_2D, "depth_tx")
|
||||
.image(1, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "out_tiles_img")
|
||||
.compute_source("eevee_motion_blur_flatten_comp.glsl");
|
||||
@@ -35,7 +35,7 @@ GPU_SHADER_CREATE_INFO(eevee_motion_blur_gather)
|
||||
.do_static_compilation(true)
|
||||
.local_group_size(MOTION_BLUR_GROUP_SIZE, MOTION_BLUR_GROUP_SIZE)
|
||||
.additional_info("eevee_shared", "draw_view", "eevee_sampling_data")
|
||||
.uniform_buf(4, "MotionBlurData", "motion_blur_buf")
|
||||
.uniform_buf(6, "MotionBlurData", "motion_blur_buf")
|
||||
.sampler(0, ImageType::DEPTH_2D, "depth_tx")
|
||||
.sampler(1, ImageType::FLOAT_2D, "velocity_tx")
|
||||
.sampler(2, ImageType::FLOAT_2D, "in_color_tx")
|
||||
|
@@ -1,5 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include "eevee_defines.hh"
|
||||
#include "gpu_shader_create_info.hh"
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
@@ -17,19 +18,20 @@ GPU_SHADER_INTERFACE_INFO(eevee_velocity_surface_iface, "motion")
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_velocity_camera)
|
||||
.define("VELOCITY_CAMERA")
|
||||
.uniform_buf(1, "CameraData", "camera_prev")
|
||||
.uniform_buf(2, "CameraData", "camera_curr")
|
||||
.uniform_buf(3, "CameraData", "camera_next");
|
||||
.uniform_buf(VELOCITY_CAMERA_PREV_BUF, "CameraData", "camera_prev")
|
||||
.uniform_buf(VELOCITY_CAMERA_CURR_BUF, "CameraData", "camera_curr")
|
||||
.uniform_buf(VELOCITY_CAMERA_NEXT_BUF, "CameraData", "camera_next");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(eevee_velocity_geom)
|
||||
.define("MAT_VELOCITY")
|
||||
.auto_resource_location(true)
|
||||
.storage_buf(4, Qualifier::READ, "mat4", "velocity_obj_prev_buf[]", Frequency::PASS)
|
||||
.storage_buf(5, Qualifier::READ, "mat4", "velocity_obj_next_buf[]", Frequency::PASS)
|
||||
.storage_buf(6, Qualifier::READ, "vec4", "velocity_geo_prev_buf[]", Frequency::PASS)
|
||||
.storage_buf(7, Qualifier::READ, "vec4", "velocity_geo_next_buf[]", Frequency::PASS)
|
||||
.storage_buf(
|
||||
7, Qualifier::READ, "VelocityIndex", "velocity_indirection_buf[]", Frequency::PASS)
|
||||
.storage_buf(VELOCITY_OBJ_PREV_BUF_SLOT, Qualifier::READ, "mat4", "velocity_obj_prev_buf[]")
|
||||
.storage_buf(VELOCITY_OBJ_NEXT_BUF_SLOT, Qualifier::READ, "mat4", "velocity_obj_next_buf[]")
|
||||
.storage_buf(VELOCITY_GEO_PREV_BUF_SLOT, Qualifier::READ, "vec4", "velocity_geo_prev_buf[]")
|
||||
.storage_buf(VELOCITY_GEO_NEXT_BUF_SLOT, Qualifier::READ, "vec4", "velocity_geo_next_buf[]")
|
||||
.storage_buf(VELOCITY_INDIRECTION_BUF_SLOT,
|
||||
Qualifier::READ,
|
||||
"VelocityIndex",
|
||||
"velocity_indirection_buf[]")
|
||||
.vertex_out(eevee_velocity_surface_iface)
|
||||
.fragment_out(0, Type::VEC4, "out_velocity")
|
||||
.additional_info("eevee_velocity_camera");
|
||||
|
Reference in New Issue
Block a user