UI: Drag Select Edge and Face Keymap Configuration #109309

Open
Lukas Sneyd wants to merge 25 commits from lcas/blender:drag-select-edge-face-keymap-options into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
181 changed files with 6670 additions and 2438 deletions
Showing only changes of commit e12216c208 - Show all commits

View File

@ -1734,12 +1734,12 @@ class CyclesPreferences(bpy.types.AddonPreferences):
elif device_type == 'ONEAPI':
import sys
if sys.platform.startswith("win"):
driver_version = "101.4032"
driver_version = "101.4314"
col.label(text="Requires Intel GPU with Xe-HPG architecture", icon='BLANK1')
col.label(text=iface_("and Windows driver version %s or newer") % driver_version,
icon='BLANK1', translate=False)
elif sys.platform.startswith("linux"):
driver_version = "1.3.24931"
driver_version = "1.3.25812"
col.label(text="Requires Intel GPU with Xe-HPG architecture and", icon='BLANK1')
col.label(text=iface_(" - intel-level-zero-gpu version %s or newer") % driver_version,
icon='BLANK1', translate=False)

View File

@ -759,10 +759,10 @@ bool OneapiDevice::enqueue_kernel(KernelContext *kernel_context,
/* Compute-runtime (ie. NEO) version is what gets returned by sycl/L0 on Windows
* since Windows driver 101.3268. */
/* The same min compute-runtime version is currently required across Windows and Linux.
* For Windows driver 101.4032, compute-runtime version is 24931. */
static const int lowest_supported_driver_version_win = 1014032;
static const int lowest_supported_driver_version_neo = 24931;
/* The same min compute-runtime version is currently used across Windows and Linux.
* For Windows driver 101.4314, compute-runtime version is 25977. */
static const int lowest_supported_driver_version_win = 1014314;
static const int lowest_supported_driver_version_neo = 25812;
int OneapiDevice::parse_driver_build_version(const sycl::device &device)
{

View File

@ -10,9 +10,9 @@ CCL_NAMESPACE_BEGIN
ccl_device float spot_light_attenuation(const ccl_global KernelSpotLight *spot, float3 ray)
{
const float3 scaled_ray = safe_normalize(
make_float3(dot(ray, spot->axis_u), dot(ray, spot->axis_v), dot(ray, spot->dir)) /
spot->len);
const float3 scaled_ray = safe_normalize(make_float3(dot(ray, spot->scaled_axis_u),
dot(ray, spot->scaled_axis_v),
dot(ray, spot->dir * spot->inv_len_z)));
return smoothstepf((scaled_ray.z - spot->cos_half_spot_angle) * spot->spot_smooth);
}

View File

@ -24,14 +24,14 @@
\
for (int i = 0; i <= ceil(params.detail); ++i) { \
VoronoiOutput octave; \
if (params.feature == "f1") { \
octave = voronoi_f1(params, coord * scale); \
if (params.feature == "f2") { \
octave = voronoi_f2(params, coord * scale); \
} \
else if (params.feature == "smooth_f1") { \
else if (params.feature == "smooth_f1" && params.smoothness != 0.0) { \
octave = voronoi_smooth_f1(params, coord * scale); \
} \
else { \
octave = voronoi_f2(params, coord * scale); \
octave = voronoi_f1(params, coord * scale); \
} \
\
if (zero_input) { \

View File

@ -878,11 +878,12 @@ ccl_device VoronoiOutput fractal_voronoi_x_fx(ccl_private const VoronoiParams &p
params.lacunarity == 0.0f;
for (int i = 0; i <= ceilf(params.detail); ++i) {
VoronoiOutput octave = (params.feature == NODE_VORONOI_F1) ?
voronoi_f1(params, coord * scale) :
(params.feature == NODE_VORONOI_SMOOTH_F1) ?
VoronoiOutput octave = (params.feature == NODE_VORONOI_F2) ?
voronoi_f2(params, coord * scale) :
(params.feature == NODE_VORONOI_SMOOTH_F1 &&
params.smoothness != 0.0f) ?
voronoi_smooth_f1(params, coord * scale) :
voronoi_f2(params, coord * scale);
voronoi_f1(params, coord * scale);
if (zero_input) {
max_amplitude = 1.0f;

View File

@ -1350,14 +1350,15 @@ typedef struct KernelCurveSegment {
static_assert_align(KernelCurveSegment, 8);
typedef struct KernelSpotLight {
packed_float3 axis_u;
packed_float3 scaled_axis_u;
float radius;
packed_float3 axis_v;
packed_float3 scaled_axis_v;
float invarea;
packed_float3 dir;
float cos_half_spot_angle;
packed_float3 len;
float inv_len_z;
float spot_smooth;
float pad[2];
} KernelSpotLight;
/* PointLight is SpotLight with only radius and invarea being used. */

View File

@ -1309,13 +1309,12 @@ void LightManager::device_update_lights(Device *device, DeviceScene *dscene, Sce
else if (light->light_type == LIGHT_SPOT) {
shader_id &= ~SHADER_AREA_LIGHT;
float3 len;
float3 axis_u = normalize_len(light->axisu, &len.x);
float3 axis_v = normalize_len(light->axisv, &len.y);
float3 dir = normalize_len(light->dir, &len.z);
if (len.z == 0.0f) {
dir = zero_float3();
}
/* Scale axes to accommodate non-uniform scaling. */
float3 scaled_axis_u = light->axisu / len_squared(light->axisu);
float3 scaled_axis_v = light->axisv / len_squared(light->axisv);
float len_z;
/* Keep direction normalized. */
float3 dir = safe_normalize_len(light->dir, &len_z);
float radius = light->size;
float invarea = (light->normalize && radius > 0.0f) ? 1.0f / (M_PI_F * radius * radius) :
@ -1327,13 +1326,13 @@ void LightManager::device_update_lights(Device *device, DeviceScene *dscene, Sce
shader_id |= SHADER_USE_MIS;
klights[light_index].co = co;
klights[light_index].spot.axis_u = axis_u;
klights[light_index].spot.scaled_axis_u = scaled_axis_u;
klights[light_index].spot.radius = radius;
klights[light_index].spot.axis_v = axis_v;
klights[light_index].spot.scaled_axis_v = scaled_axis_v;
klights[light_index].spot.invarea = invarea;
klights[light_index].spot.dir = dir;
klights[light_index].spot.cos_half_spot_angle = cos_half_spot_angle;
klights[light_index].spot.len = len;
klights[light_index].spot.inv_len_z = 1.0f / len_z;
klights[light_index].spot.spot_smooth = spot_smooth;
}

View File

@ -31,6 +31,7 @@ class DATA_PT_context_grease_pencil(DataButtonsPanel, Panel):
elif grease_pencil:
layout.template_ID(space, "pin_id")
class DATA_PT_grease_pencil_layers(DataButtonsPanel, Panel):
bl_label = "Layers"

View File

@ -19,7 +19,7 @@ class DataButtonsPanel:
class DATA_PT_context_lightprobe(DataButtonsPanel, Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_RENDER'}
COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_RENDER', 'BLENDER_EEVEE_NEXT'}
def draw(self, context):
layout = self.layout
@ -83,6 +83,42 @@ class DATA_PT_lightprobe(DataButtonsPanel, Panel):
sub.prop(probe, "clip_end", text="End")
class DATA_PT_lightprobe_eevee_next(DataButtonsPanel, Panel):
bl_label = "Probe"
COMPAT_ENGINES = {'BLENDER_EEVEE_NEXT'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
probe = context.lightprobe
if probe.type == 'GRID':
col = layout.column()
sub = col.column(align=True)
sub.prop(probe, "grid_resolution_x", text="Resolution X")
sub.prop(probe, "grid_resolution_y", text="Y")
sub.prop(probe, "grid_resolution_z", text="Z")
col.separator()
col.operator("object.lightprobe_cache_bake").subset = "ACTIVE"
col.operator("object.lightprobe_cache_free").subset = "ACTIVE"
col.separator()
col.prop(probe, "grid_bake_samples")
col.prop(probe, "surfel_density")
elif probe.type == 'PLANAR':
# Currently unsupported
pass
else:
# Currently unsupported
pass
class DATA_PT_lightprobe_visibility(DataButtonsPanel, Panel):
bl_label = "Visibility"
bl_parent_id = "DATA_PT_lightprobe"
@ -169,6 +205,7 @@ class DATA_PT_lightprobe_display(DataButtonsPanel, Panel):
classes = (
DATA_PT_context_lightprobe,
DATA_PT_lightprobe,
DATA_PT_lightprobe_eevee_next,
DATA_PT_lightprobe_visibility,
DATA_PT_lightprobe_parallax,
DATA_PT_lightprobe_display,

View File

@ -573,6 +573,28 @@ class RENDER_PT_eevee_indirect_lighting(RenderButtonsPanel, Panel):
col.prop(props, "gi_filter_quality")
class RENDER_PT_eevee_next_indirect_lighting(RenderButtonsPanel, Panel):
bl_label = "Indirect Lighting"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_EEVEE_NEXT'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
props = scene.eevee
col = layout.column()
col.operator("object.lightprobe_cache_bake", text="Bake Light Caches", icon='RENDER_STILL').subset = "ALL"
col.operator("object.lightprobe_cache_free", text="Delete Light Caches").subset = "ALL"
class RENDER_PT_eevee_indirect_lighting_display(RenderButtonsPanel, Panel):
bl_label = "Display"
bl_parent_id = "RENDER_PT_eevee_indirect_lighting"
@ -599,6 +621,28 @@ class RENDER_PT_eevee_indirect_lighting_display(RenderButtonsPanel, Panel):
row.prop(props, "gi_show_irradiance", text="", toggle=True)
class RENDER_PT_eevee_next_indirect_lighting_display(RenderButtonsPanel, Panel):
bl_label = "Display"
bl_parent_id = "RENDER_PT_eevee_next_indirect_lighting"
COMPAT_ENGINES = {'BLENDER_EEVEE_NEXT'}
@classmethod
def poll(cls, context):
return (context.engine in cls.COMPAT_ENGINES)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
props = scene.eevee
row = layout.row(align=True)
row.prop(props, "gi_irradiance_display_size", text="Irradiance Size")
row.prop(props, "gi_show_irradiance", text="", toggle=True)
class RENDER_PT_eevee_film(RenderButtonsPanel, Panel):
bl_label = "Film"
bl_options = {'DEFAULT_CLOSED'}
@ -908,6 +952,8 @@ classes = (
RENDER_PT_eevee_next_shadows,
RENDER_PT_eevee_indirect_lighting,
RENDER_PT_eevee_indirect_lighting_display,
RENDER_PT_eevee_next_indirect_lighting,
RENDER_PT_eevee_next_indirect_lighting_display,
RENDER_PT_eevee_film,
RENDER_PT_eevee_next_film,

View File

@ -1157,7 +1157,6 @@ classes = (
NODE_PT_node_color_presets,
NODE_PT_active_node_generic,
NODE_PT_active_node_color,
NODE_PT_active_node_properties,
NODE_PT_texture_mapping,
NODE_PT_active_tool,
NODE_PT_backdrop,
@ -1171,6 +1170,7 @@ classes = (
NODE_PT_panels,
NODE_UL_simulation_zone_items,
NODE_PT_simulation_zone_items,
NODE_PT_active_node_properties,
node_panel(EEVEE_MATERIAL_PT_settings),
node_panel(MATERIAL_PT_viewport),

View File

@ -1991,6 +1991,7 @@ class VIEW3D_MT_select_edit_grease_pencil(Menu):
layout.separator()
layout.operator("grease_pencil.select_linked", text="Linked")
layout.operator("grease_pencil.select_alternate", text="Alternated")
layout.operator("grease_pencil.select_random", text="Random")
layout.separator()

View File

@ -28,11 +28,12 @@ struct ReportList;
* \{ */
typedef enum eBPathForeachFlag {
/** Flags controlling the behavior of the generic BPath API. */
/** Ensures the `absolute_base_path` member of #BPathForeachPathData is initialized properly with
/* Flags controlling the behavior of the generic BPath API. */
/**
* Ensures the `absolute_base_path` member of #BPathForeachPathData is initialized properly with
* the path of the current .blend file. This can be used by the callbacks to convert relative
* paths to absolute ones. */
* paths to absolute ones.
*/
BKE_BPATH_FOREACH_PATH_ABSOLUTE = (1 << 0),
/** Skip paths of linked IDs. */
BKE_BPATH_FOREACH_PATH_SKIP_LINKED = (1 << 1),
@ -68,13 +69,19 @@ struct BPathForeachPathData;
/**
* Callback used to iterate over an ID's file paths.
* \param path_dst: Optionally write to the path (for callbacks that manipulate the path).
* \note When #BKE_BPATH_FOREACH_PATH_ABSOLUTE us used, `path_src` will be absolute and `path_dst`
* can be used to access the original path.
* \param path_dst_maxncpy: The buffer size of `path_dst` including the null byte.
* \warning Actions such as #BLI_path_abs & #BLI_path_rel must not be called directly
* on `path_dst` as they assume #FILE_MAX size which may not be the case.
*
* \note `path`s parameters should be considered as having a maximal `FILE_MAX` string length.
*
* \return `true` if the path has been changed, and in that case, result should be written into
* `r_path_dst`. */
* \return `true` if the path has been changed, and in that case,
* result must be written to `path_dst`.
*/
typedef bool (*BPathForeachPathFunctionCallback)(struct BPathForeachPathData *bpath_data,
char *r_path_dst,
char *path_dst,
size_t path_dst_maxncpy,
const char *path_src);
/** Storage for common data needed across the BPath 'foreach_path' code. */
@ -122,7 +129,9 @@ void BKE_bpath_foreach_path_main(BPathForeachPathData *bpath_data);
*
* \return true is \a path was modified, false otherwise.
*/
bool BKE_bpath_foreach_path_fixed_process(struct BPathForeachPathData *bpath_data, char *path);
bool BKE_bpath_foreach_path_fixed_process(struct BPathForeachPathData *bpath_data,
char *path,
size_t path_maxncpy);
/**
* Run the callback on a (directory + file) path, replacing the content of the two strings as
@ -135,7 +144,9 @@ bool BKE_bpath_foreach_path_fixed_process(struct BPathForeachPathData *bpath_dat
*/
bool BKE_bpath_foreach_path_dirfile_fixed_process(struct BPathForeachPathData *bpath_data,
char *path_dir,
char *path_file);
size_t path_dir_maxncpy,
char *path_file,
size_t path_file_maxncpy);
/**
* Run the callback on a path, replacing the content of the string as needed.

View File

@ -167,9 +167,10 @@ static bool blendfile_or_libraries_versions_atleast(Main *bmain,
static bool foreach_path_clean_cb(BPathForeachPathData * /*bpath_data*/,
char *path_dst,
size_t path_dst_maxncpy,
const char *path_src)
{
strcpy(path_dst, path_src);
BLI_strncpy(path_dst, path_src, path_dst_maxncpy);
BLI_path_slash_native(path_dst);
return !STREQ(path_dst, path_src);
}

View File

@ -97,7 +97,9 @@ void BKE_bpath_foreach_path_id(BPathForeachPathData *bpath_data, ID *id)
if (id->library_weak_reference != NULL && (flag & BKE_BPATH_TRAVERSE_SKIP_WEAK_REFERENCES) == 0)
{
BKE_bpath_foreach_path_fixed_process(bpath_data, id->library_weak_reference->library_filepath);
BKE_bpath_foreach_path_fixed_process(bpath_data,
id->library_weak_reference->library_filepath,
sizeof(id->library_weak_reference->library_filepath));
}
bNodeTree *embedded_node_tree = ntreeFromID(id);
@ -128,7 +130,9 @@ void BKE_bpath_foreach_path_main(BPathForeachPathData *bpath_data)
FOREACH_MAIN_ID_END;
}
bool BKE_bpath_foreach_path_fixed_process(BPathForeachPathData *bpath_data, char *path)
bool BKE_bpath_foreach_path_fixed_process(BPathForeachPathData *bpath_data,
char *path,
size_t path_maxncpy)
{
const char *absolute_base_path = bpath_data->absolute_base_path;
@ -148,8 +152,8 @@ bool BKE_bpath_foreach_path_fixed_process(BPathForeachPathData *bpath_data, char
/* so functions can check old value */
STRNCPY(path_dst, path);
if (bpath_data->callback_function(bpath_data, path_dst, path_src)) {
BLI_strncpy(path, path_dst, FILE_MAX);
if (bpath_data->callback_function(bpath_data, path_dst, sizeof(path_dst), path_src)) {
BLI_strncpy(path, path_dst, path_maxncpy);
bpath_data->is_path_modified = true;
return true;
}
@ -159,7 +163,9 @@ bool BKE_bpath_foreach_path_fixed_process(BPathForeachPathData *bpath_data, char
bool BKE_bpath_foreach_path_dirfile_fixed_process(BPathForeachPathData *bpath_data,
char *path_dir,
char *path_file)
size_t path_dir_maxncpy,
char *path_file,
size_t path_file_maxncpy)
{
const char *absolute_base_path = bpath_data->absolute_base_path;
@ -175,8 +181,9 @@ bool BKE_bpath_foreach_path_dirfile_fixed_process(BPathForeachPathData *bpath_da
BLI_path_abs(path_src, absolute_base_path);
}
if (bpath_data->callback_function(bpath_data, path_dst, (const char *)path_src)) {
BLI_path_split_dir_file(path_dst, path_dir, FILE_MAXDIR, path_file, FILE_MAXFILE);
if (bpath_data->callback_function(
bpath_data, path_dst, sizeof(path_dst), (const char *)path_src)) {
BLI_path_split_dir_file(path_dst, path_dir, path_dir_maxncpy, path_file, path_file_maxncpy);
bpath_data->is_path_modified = true;
return true;
}
@ -201,7 +208,7 @@ bool BKE_bpath_foreach_path_allocated_process(BPathForeachPathData *bpath_data,
path_src = *path;
}
if (bpath_data->callback_function(bpath_data, path_dst, path_src)) {
if (bpath_data->callback_function(bpath_data, path_dst, sizeof(path_dst), path_src)) {
MEM_freeN(*path);
(*path) = BLI_strdup(path_dst);
bpath_data->is_path_modified = true;
@ -219,6 +226,8 @@ bool BKE_bpath_foreach_path_allocated_process(BPathForeachPathData *bpath_data,
static bool check_missing_files_foreach_path_cb(BPathForeachPathData *bpath_data,
char *UNUSED(path_dst),
size_t UNUSED(path_dst_maxncpy),
const char *path_src)
{
ReportList *reports = (ReportList *)bpath_data->user_data;
@ -337,6 +346,7 @@ typedef struct BPathFind_Data {
static bool missing_files_find_foreach_path_cb(BPathForeachPathData *bpath_data,
char *path_dst,
size_t path_dst_maxncpy,
const char *path_src)
{
BPathFind_Data *data = (BPathFind_Data *)bpath_data->user_data;
@ -371,15 +381,11 @@ static bool missing_files_find_foreach_path_cb(BPathForeachPathData *bpath_data,
return false;
}
bool was_relative = BLI_path_is_rel(path_dst);
BLI_strncpy(path_dst, filepath_new, FILE_MAX);
/* Keep the path relative if the previous one was relative. */
if (was_relative) {
BLI_path_rel(path_dst, data->basedir);
if (BLI_path_is_rel(path_dst)) {
BLI_path_rel(filepath_new, data->basedir);
}
BLI_strncpy(path_dst, filepath_new, path_dst_maxncpy);
return true;
}
@ -425,6 +431,7 @@ typedef struct BPathRebase_Data {
static bool relative_rebase_foreach_path_cb(BPathForeachPathData *bpath_data,
char *path_dst,
size_t path_dst_maxncpy,
const char *path_src)
{
BPathRebase_Data *data = (BPathRebase_Data *)bpath_data->user_data;
@ -449,7 +456,7 @@ static bool relative_rebase_foreach_path_cb(BPathForeachPathData *bpath_data,
/* This may fail, if so it's fine to leave absolute since the path is still valid. */
BLI_path_rel(filepath, data->basedir_dst);
BLI_strncpy(path_dst, filepath, FILE_MAX);
BLI_strncpy(path_dst, filepath, path_dst_maxncpy);
data->count_changed++;
return true;
}
@ -500,6 +507,7 @@ typedef struct BPathRemap_Data {
static bool relative_convert_foreach_path_cb(BPathForeachPathData *bpath_data,
char *path_dst,
size_t path_dst_maxncpy,
const char *path_src)
{
BPathRemap_Data *data = (BPathRemap_Data *)bpath_data->user_data;
@ -510,12 +518,11 @@ static bool relative_convert_foreach_path_cb(BPathForeachPathData *bpath_data,
return false; /* Already relative. */
}
BLI_strncpy(path_dst, path_src, FILE_MAX);
BLI_path_rel(path_dst, data->basedir);
if (BLI_path_is_rel(path_dst)) {
data->count_changed++;
}
else {
char path_test[FILE_MAX];
STRNCPY(path_test, path_src);
BLI_path_rel(path_test, data->basedir);
if (!BLI_path_is_rel(path_test)) {
const char *type_name = BKE_idtype_get_info_from_id(bpath_data->owner_id)->name;
const char *id_name = bpath_data->owner_id->name + 2;
BKE_reportf(data->reports,
@ -525,12 +532,17 @@ static bool relative_convert_foreach_path_cb(BPathForeachPathData *bpath_data,
type_name,
id_name);
data->count_failed++;
return false;
}
BLI_strncpy(path_dst, path_test, path_dst_maxncpy);
data->count_changed++;
return true;
}
static bool absolute_convert_foreach_path_cb(BPathForeachPathData *bpath_data,
char *path_dst,
size_t path_dst_maxncpy,
const char *path_src)
{
BPathRemap_Data *data = (BPathRemap_Data *)bpath_data->user_data;
@ -541,12 +553,10 @@ static bool absolute_convert_foreach_path_cb(BPathForeachPathData *bpath_data,
return false; /* Already absolute. */
}
BLI_strncpy(path_dst, path_src, FILE_MAX);
BLI_path_abs(path_dst, data->basedir);
if (BLI_path_is_rel(path_dst) == false) {
data->count_changed++;
}
else {
char path_test[FILE_MAX];
STRNCPY(path_test, path_src);
BLI_path_abs(path_test, data->basedir);
if (BLI_path_is_rel(path_test)) {
const char *type_name = BKE_idtype_get_info_from_id(bpath_data->owner_id)->name;
const char *id_name = bpath_data->owner_id->name + 2;
BKE_reportf(data->reports,
@ -556,7 +566,11 @@ static bool absolute_convert_foreach_path_cb(BPathForeachPathData *bpath_data,
type_name,
id_name);
data->count_failed++;
return false;
}
BLI_strncpy(path_dst, path_test, path_dst_maxncpy);
data->count_changed++;
return true;
}
@ -611,6 +625,7 @@ struct PathStore {
static bool bpath_list_append(BPathForeachPathData *bpath_data,
char *UNUSED(path_dst),
size_t UNUSED(path_dst_maxncpy),
const char *path_src)
{
ListBase *path_list = bpath_data->user_data;
@ -627,6 +642,7 @@ static bool bpath_list_append(BPathForeachPathData *bpath_data,
static bool bpath_list_restore(BPathForeachPathData *bpath_data,
char *path_dst,
size_t path_dst_maxncpy,
const char *path_src)
{
ListBase *path_list = bpath_data->user_data;
@ -640,7 +656,7 @@ static bool bpath_list_restore(BPathForeachPathData *bpath_data,
bool is_path_changed = false;
if (!STREQ(path_src, filepath)) {
BLI_strncpy(path_dst, filepath, FILE_MAX);
BLI_strncpy(path_dst, filepath, path_dst_maxncpy);
is_path_changed = true;
}

View File

@ -208,7 +208,8 @@ static void brush_foreach_path(ID *id, BPathForeachPathData *bpath_data)
{
Brush *brush = (Brush *)id;
if (brush->icon_filepath[0] != '\0') {
BKE_bpath_foreach_path_fixed_process(bpath_data, brush->icon_filepath);
BKE_bpath_foreach_path_fixed_process(
bpath_data, brush->icon_filepath, sizeof(brush->icon_filepath));
}
}

View File

@ -87,7 +87,8 @@ static void cache_file_free_data(ID *id)
static void cache_file_foreach_path(ID *id, BPathForeachPathData *bpath_data)
{
CacheFile *cache_file = (CacheFile *)id;
BKE_bpath_foreach_path_fixed_process(bpath_data, cache_file->filepath);
BKE_bpath_foreach_path_fixed_process(
bpath_data, cache_file->filepath, sizeof(cache_file->filepath));
}
static void cache_file_blend_write(BlendWriter *writer, ID *id, const void *id_address)

View File

@ -300,7 +300,7 @@ static void image_foreach_path(ID *id, BPathForeachPathData *bpath_data)
temp_path, udim_pattern, tile_format, ((ImageTile *)ima->tiles.first)->tile_number);
MEM_SAFE_FREE(udim_pattern);
result = BKE_bpath_foreach_path_fixed_process(bpath_data, temp_path);
result = BKE_bpath_foreach_path_fixed_process(bpath_data, temp_path, sizeof(temp_path));
if (result) {
/* Put the filepath back together using the new directory and the original file name. */
char new_dir[FILE_MAXDIR];
@ -309,7 +309,8 @@ static void image_foreach_path(ID *id, BPathForeachPathData *bpath_data)
}
}
else {
result = BKE_bpath_foreach_path_fixed_process(bpath_data, ima->filepath);
result = BKE_bpath_foreach_path_fixed_process(
bpath_data, ima->filepath, sizeof(ima->filepath));
}
if (result) {
@ -1164,7 +1165,7 @@ static ImBuf *add_ibuf_for_tile(Image *ima, ImageTile *tile)
if (ibuf != nullptr) {
rect = ibuf->byte_buffer.data;
IMB_colormanagement_assign_rect_colorspace(ibuf, ima->colorspace_settings.name);
IMB_colormanagement_assign_byte_colorspace(ibuf, ima->colorspace_settings.name);
}
copy_v4_v4(fill_color, tile->gen_color);
@ -1263,8 +1264,8 @@ static void image_colorspace_from_imbuf(Image *image, const ImBuf *ibuf)
const char *colorspace_name = nullptr;
if (ibuf->float_buffer.data) {
if (ibuf->float_colorspace) {
colorspace_name = IMB_colormanagement_colorspace_get_name(ibuf->float_colorspace);
if (ibuf->float_buffer.colorspace) {
colorspace_name = IMB_colormanagement_colorspace_get_name(ibuf->float_buffer.colorspace);
}
else {
colorspace_name = IMB_colormanagement_role_colorspace_name_get(COLOR_ROLE_DEFAULT_FLOAT);
@ -1272,8 +1273,8 @@ static void image_colorspace_from_imbuf(Image *image, const ImBuf *ibuf)
}
if (ibuf->byte_buffer.data && !colorspace_name) {
if (ibuf->rect_colorspace) {
colorspace_name = IMB_colormanagement_colorspace_get_name(ibuf->rect_colorspace);
if (ibuf->byte_buffer.colorspace) {
colorspace_name = IMB_colormanagement_colorspace_get_name(ibuf->byte_buffer.colorspace);
}
else {
colorspace_name = IMB_colormanagement_role_colorspace_name_get(COLOR_ROLE_DEFAULT_BYTE);
@ -4441,7 +4442,7 @@ static ImBuf *image_get_render_result(Image *ima, ImageUser *iuser, void **r_loc
*/
if (ibuf->byte_buffer.data != byte_buffer->data) {
const char *colorspace = IMB_colormanagement_role_colorspace_name_get(COLOR_ROLE_DEFAULT_BYTE);
IMB_colormanagement_assign_rect_colorspace(ibuf, colorspace);
IMB_colormanagement_assign_byte_colorspace(ibuf, colorspace);
}
/* invalidate color managed buffers if render result changed */

View File

@ -769,11 +769,11 @@ static void gpu_texture_update_from_ibuf(
}
else {
/* Byte image is in original colorspace from the file, and may need conversion. */
if (IMB_colormanagement_space_is_data(ibuf->rect_colorspace)) {
if (IMB_colormanagement_space_is_data(ibuf->byte_buffer.colorspace)) {
/* Non-color data, just store buffer as is. */
}
else if (IMB_colormanagement_space_is_srgb(ibuf->rect_colorspace) ||
IMB_colormanagement_space_is_scene_linear(ibuf->rect_colorspace))
else if (IMB_colormanagement_space_is_srgb(ibuf->byte_buffer.colorspace) ||
IMB_colormanagement_space_is_scene_linear(ibuf->byte_buffer.colorspace))
{
/* sRGB or scene linear, store as byte texture that the GPU can decode directly. */
rect = (uchar *)MEM_mallocN(sizeof(uchar[4]) * w * h, __func__);

View File

@ -119,7 +119,8 @@ IDTypeInfo IDType_ID_LINK_PLACEHOLDER = {
* absolute, in which case it is not altered.
*/
static bool lib_id_library_local_paths_callback(BPathForeachPathData *bpath_data,
char *r_path_dst,
char *path_dst,
size_t path_dst_maxncpy,
const char *path_src)
{
const char **data = bpath_data->user_data;
@ -142,7 +143,7 @@ static bool lib_id_library_local_paths_callback(BPathForeachPathData *bpath_data
* because it won't work for paths that start with "//../" */
BLI_path_normalize(filepath);
BLI_path_rel(filepath, base_new);
BLI_strncpy(r_path_dst, filepath, FILE_MAX);
BLI_strncpy(path_dst, filepath, path_dst_maxncpy);
return true;
}

View File

@ -68,7 +68,7 @@ static void library_foreach_path(ID *id, BPathForeachPathData *bpath_data)
return;
}
if (BKE_bpath_foreach_path_fixed_process(bpath_data, lib->filepath)) {
if (BKE_bpath_foreach_path_fixed_process(bpath_data, lib->filepath, sizeof(lib->filepath))) {
BKE_library_filepath_set(bpath_data->bmain, lib, lib->filepath);
}
}

View File

@ -221,7 +221,8 @@ static void mesh_foreach_path(ID *id, BPathForeachPathData *bpath_data)
{
Mesh *me = (Mesh *)id;
if (me->ldata.external) {
BKE_bpath_foreach_path_fixed_process(bpath_data, me->ldata.external->filepath);
BKE_bpath_foreach_path_fixed_process(
bpath_data, me->ldata.external->filepath, sizeof(me->ldata.external->filepath));
}
}

View File

@ -145,7 +145,8 @@ static void movie_clip_foreach_cache(ID *id,
static void movie_clip_foreach_path(ID *id, BPathForeachPathData *bpath_data)
{
MovieClip *movie_clip = (MovieClip *)id;
BKE_bpath_foreach_path_fixed_process(bpath_data, movie_clip->filepath);
BKE_bpath_foreach_path_fixed_process(
bpath_data, movie_clip->filepath, sizeof(movie_clip->filepath));
}
static void write_movieTracks(BlendWriter *writer, ListBase *tracks)

View File

@ -436,11 +436,11 @@ static void node_foreach_path(ID *id, BPathForeachPathData *bpath_data)
for (bNode *node : ntree->all_nodes()) {
if (node->type == SH_NODE_SCRIPT) {
NodeShaderScript *nss = static_cast<NodeShaderScript *>(node->storage);
BKE_bpath_foreach_path_fixed_process(bpath_data, nss->filepath);
BKE_bpath_foreach_path_fixed_process(bpath_data, nss->filepath, sizeof(nss->filepath));
}
else if (node->type == SH_NODE_TEX_IES) {
NodeShaderTexIES *ies = static_cast<NodeShaderTexIES *>(node->storage);
BKE_bpath_foreach_path_fixed_process(bpath_data, ies->filepath);
BKE_bpath_foreach_path_fixed_process(bpath_data, ies->filepath, sizeof(ies->filepath));
}
}
break;

View File

@ -200,8 +200,9 @@ static void find_logical_origins_for_socket_recursive(
sockets_in_current_chain.pop_last();
}
static void update_logical_origins(const bNodeTree &ntree)
static void update_logically_linked_sockets(const bNodeTree &ntree)
{
/* Compute logically linked sockets to inputs. */
bNodeTreeRuntime &tree_runtime = *ntree.runtime;
Span<bNode *> nodes = tree_runtime.nodes_by_id;
threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
@ -220,6 +221,26 @@ static void update_logical_origins(const bNodeTree &ntree)
}
}
});
/* Clear logically linked sockets to outputs. */
threading::parallel_for(nodes.index_range(), 128, [&](const IndexRange range) {
for (const int i : range) {
bNode &node = *nodes[i];
for (bNodeSocket *socket : node.runtime->outputs) {
socket->runtime->logically_linked_sockets.clear();
}
}
});
/* Compute logically linked sockets to outputs using the previously computed logically linked
* sockets to inputs. */
for (const bNode *node : nodes) {
for (bNodeSocket *input_socket : node->runtime->inputs) {
for (bNodeSocket *output_socket : input_socket->runtime->logically_linked_sockets) {
output_socket->runtime->logically_linked_sockets.append(input_socket);
}
}
}
}
static void update_nodes_by_type(const bNodeTree &ntree)
@ -499,7 +520,7 @@ static void ensure_topology_cache(const bNodeTree &ntree)
update_nodes_by_type(ntree);
threading::parallel_invoke(
tree_runtime.nodes_by_id.size() > 32,
[&]() { update_logical_origins(ntree); },
[&]() { update_logically_linked_sockets(ntree); },
[&]() { update_sockets_by_identifier(ntree); },
[&]() {
update_toposort(ntree,

View File

@ -489,7 +489,7 @@ static void object_foreach_path_pointcache(ListBase *ptcache_list,
for (PointCache *cache = (PointCache *)ptcache_list->first; cache != nullptr;
cache = cache->next) {
if (cache->flag & PTCACHE_DISK_CACHE) {
BKE_bpath_foreach_path_fixed_process(bpath_data, cache->path);
BKE_bpath_foreach_path_fixed_process(bpath_data, cache->path, sizeof(cache->path));
}
}
}
@ -504,14 +504,16 @@ static void object_foreach_path(ID *id, BPathForeachPathData *bpath_data)
case eModifierType_Fluidsim: {
FluidsimModifierData *fluidmd = reinterpret_cast<FluidsimModifierData *>(md);
if (fluidmd->fss) {
BKE_bpath_foreach_path_fixed_process(bpath_data, fluidmd->fss->surfdataPath);
BKE_bpath_foreach_path_fixed_process(
bpath_data, fluidmd->fss->surfdataPath, sizeof(fluidmd->fss->surfdataPath));
}
break;
}
case eModifierType_Fluid: {
FluidModifierData *fmd = reinterpret_cast<FluidModifierData *>(md);
if (fmd->type & MOD_FLUID_TYPE_DOMAIN && fmd->domain) {
BKE_bpath_foreach_path_fixed_process(bpath_data, fmd->domain->cache_directory);
BKE_bpath_foreach_path_fixed_process(
bpath_data, fmd->domain->cache_directory, sizeof(fmd->domain->cache_directory));
}
break;
}
@ -522,12 +524,12 @@ static void object_foreach_path(ID *id, BPathForeachPathData *bpath_data)
}
case eModifierType_Ocean: {
OceanModifierData *omd = reinterpret_cast<OceanModifierData *>(md);
BKE_bpath_foreach_path_fixed_process(bpath_data, omd->cachepath);
BKE_bpath_foreach_path_fixed_process(bpath_data, omd->cachepath, sizeof(omd->cachepath));
break;
}
case eModifierType_MeshCache: {
MeshCacheModifierData *mcmd = reinterpret_cast<MeshCacheModifierData *>(md);
BKE_bpath_foreach_path_fixed_process(bpath_data, mcmd->filepath);
BKE_bpath_foreach_path_fixed_process(bpath_data, mcmd->filepath, sizeof(mcmd->filepath));
break;
}
default:

View File

@ -945,7 +945,11 @@ static bool seq_foreach_path_callback(Sequence *seq, void *user_data)
BPathForeachPathData *bpath_data = (BPathForeachPathData *)user_data;
if (ELEM(seq->type, SEQ_TYPE_MOVIE, SEQ_TYPE_SOUND_RAM) && se) {
BKE_bpath_foreach_path_dirfile_fixed_process(bpath_data, seq->strip->dirpath, se->filename);
BKE_bpath_foreach_path_dirfile_fixed_process(bpath_data,
seq->strip->dirpath,
sizeof(seq->strip->dirpath),
se->filename,
sizeof(se->filename));
}
else if ((seq->type == SEQ_TYPE_IMAGE) && se) {
/* NOTE: An option not to loop over all strips could be useful? */
@ -958,13 +962,17 @@ static bool seq_foreach_path_callback(Sequence *seq, void *user_data)
}
for (i = 0; i < len; i++, se++) {
BKE_bpath_foreach_path_dirfile_fixed_process(
bpath_data, seq->strip->dirpath, se->filename);
BKE_bpath_foreach_path_dirfile_fixed_process(bpath_data,
seq->strip->dirpath,
sizeof(seq->strip->dirpath),
se->filename,
sizeof(se->filename));
}
}
else {
/* simple case */
BKE_bpath_foreach_path_fixed_process(bpath_data, seq->strip->dirpath);
BKE_bpath_foreach_path_fixed_process(
bpath_data, seq->strip->dirpath, sizeof(seq->strip->dirpath));
}
}
return true;
@ -1495,6 +1503,7 @@ static void scene_blend_read_data(BlendDataReader *reader, ID *id)
EEVEE_lightcache_blend_read_data(reader, sce->eevee.light_cache_data);
}
}
EEVEE_lightcache_info_update(&sce->eevee);
BKE_screen_view3d_shading_blend_read_data(reader, &sce->display.shading);
@ -1804,6 +1813,7 @@ constexpr IDTypeInfo get_type_info()
IDTypeInfo IDType_ID_SCE = get_type_info();
const char *RE_engine_id_BLENDER_EEVEE = "BLENDER_EEVEE";
const char *RE_engine_id_BLENDER_EEVEE_NEXT = "BLENDER_EEVEE_NEXT";
const char *RE_engine_id_BLENDER_WORKBENCH = "BLENDER_WORKBENCH";
const char *RE_engine_id_BLENDER_WORKBENCH_NEXT = "BLENDER_WORKBENCH_NEXT";
const char *RE_engine_id_CYCLES = "CYCLES";
@ -2992,7 +3002,8 @@ bool BKE_scene_use_spherical_stereo(Scene *scene)
bool BKE_scene_uses_blender_eevee(const Scene *scene)
{
return STREQ(scene->r.engine, RE_engine_id_BLENDER_EEVEE);
return STREQ(scene->r.engine, RE_engine_id_BLENDER_EEVEE) ||
STREQ(scene->r.engine, RE_engine_id_BLENDER_EEVEE_NEXT);
}
bool BKE_scene_uses_blender_workbench(const Scene *scene)

View File

@ -127,7 +127,7 @@ static void sound_foreach_path(ID *id, BPathForeachPathData *bpath_data)
}
/* FIXME: This does not check for empty path... */
BKE_bpath_foreach_path_fixed_process(bpath_data, sound->filepath);
BKE_bpath_foreach_path_fixed_process(bpath_data, sound->filepath, sizeof(sound->filepath));
}
static void sound_blend_write(BlendWriter *writer, ID *id, const void *id_address)

View File

@ -2872,8 +2872,8 @@ ImBuf *BKE_tracking_get_plane_imbuf(const ImBuf *frame_ibuf,
&warped_position_y);
}
plane_ibuf->rect_colorspace = frame_ibuf->rect_colorspace;
plane_ibuf->float_colorspace = frame_ibuf->float_colorspace;
plane_ibuf->byte_buffer.colorspace = frame_ibuf->byte_buffer.colorspace;
plane_ibuf->float_buffer.colorspace = frame_ibuf->float_buffer.colorspace;
return plane_ibuf;
}

View File

@ -121,7 +121,7 @@ static void vfont_foreach_path(ID *id, BPathForeachPathData *bpath_data)
return;
}
BKE_bpath_foreach_path_fixed_process(bpath_data, vfont->filepath);
BKE_bpath_foreach_path_fixed_process(bpath_data, vfont->filepath, sizeof(vfont->filepath));
}
static void vfont_blend_write(BlendWriter *writer, ID *id, const void *id_address)

View File

@ -36,7 +36,9 @@
#include "DNA_packedFile_types.h"
#include "DNA_vfont_types.h"
static void freetype_outline_to_curves(FT_Outline ftoutline, ListBase *nurbsbase, const float scale)
static void freetype_outline_to_curves(FT_Outline ftoutline,
ListBase *nurbsbase,
const float scale)
{
const float eps = 0.0001f;
const float eps_sq = eps * eps;
@ -69,8 +71,7 @@ static void freetype_outline_to_curves(FT_Outline ftoutline, ListBase *nurbsbase
{
const int l_next = (k < n - 1) ? (l + 1) : l_first;
if (ftoutline.tags[l] == FT_Curve_Tag_Conic &&
ftoutline.tags[l_next] == FT_Curve_Tag_Conic)
{
ftoutline.tags[l_next] == FT_Curve_Tag_Conic) {
onpoints[j]++;
}
}
@ -104,8 +105,7 @@ static void freetype_outline_to_curves(FT_Outline ftoutline, ListBase *nurbsbase
{
const int l_next = (k < n - 1) ? (l + 1) : l_first;
if (ftoutline.tags[l] == FT_Curve_Tag_Conic &&
ftoutline.tags[l_next] == FT_Curve_Tag_Conic)
{
ftoutline.tags[l_next] == FT_Curve_Tag_Conic) {
dx = (ftoutline.points[l].x + ftoutline.points[l_next].x) * scale / 2.0f;
dy = (ftoutline.points[l].y + ftoutline.points[l_next].y) * scale / 2.0f;
@ -205,7 +205,7 @@ static void freetype_outline_to_curves(FT_Outline ftoutline, ListBase *nurbsbase
MEM_freeN(onpoints);
}
static VChar *freetypechar_to_vchar(FT_Face face, FT_ULong charcode, const VFontData* vfd)
static VChar *freetypechar_to_vchar(FT_Face face, FT_ULong charcode, const VFontData *vfd)
{
FT_UInt glyph_index = FT_Get_Char_Index(face, charcode);
if (FT_Load_Glyph(face, glyph_index, FT_LOAD_NO_SCALE | FT_LOAD_NO_BITMAP) != FT_Err_Ok) {

View File

@ -585,7 +585,7 @@ static void volume_foreach_path(ID *id, BPathForeachPathData *bpath_data)
return;
}
BKE_bpath_foreach_path_fixed_process(bpath_data, volume->filepath);
BKE_bpath_foreach_path_fixed_process(bpath_data, volume->filepath, sizeof(volume->filepath));
}
static void volume_blend_write(BlendWriter *writer, ID *id, const void *id_address)

View File

@ -18,61 +18,6 @@
namespace blender::bounds {
/**
* Find the smallest and largest values element-wise in the span.
*/
template<typename T> [[nodiscard]] inline std::optional<Bounds<T>> min_max(Span<T> values)
{
if (values.is_empty()) {
return std::nullopt;
}
const Bounds<T> init{values.first(), values.first()};
return threading::parallel_reduce(
values.index_range(),
1024,
init,
[&](IndexRange range, const Bounds<T> &init) {
Bounds<T> result = init;
for (const int i : range) {
math::min_max(values[i], result.min, result.max);
}
return result;
},
[](const Bounds<T> &a, const Bounds<T> &b) {
return Bounds<T>{math::min(a.min, b.min), math::max(a.max, b.max)};
});
}
/**
* Find the smallest and largest values element-wise in the span, adding the radius to each element
* first. The template type T is expected to have an addition operator implemented with RadiusT.
*/
template<typename T, typename RadiusT>
[[nodiscard]] inline std::optional<Bounds<T>> min_max_with_radii(Span<T> values,
Span<RadiusT> radii)
{
BLI_assert(values.size() == radii.size());
if (values.is_empty()) {
return std::nullopt;
}
const Bounds<T> init{values.first(), values.first()};
return threading::parallel_reduce(
values.index_range(),
1024,
init,
[&](IndexRange range, const Bounds<T> &init) {
Bounds<T> result = init;
for (const int i : range) {
result.min = math::min(values[i] - radii[i], result.min);
result.max = math::max(values[i] + radii[i], result.max);
}
return result;
},
[](const Bounds<T> &a, const Bounds<T> &b) {
return Bounds<T>{math::min(a.min, b.min), math::max(a.max, b.max)};
});
}
template<typename T> [[nodiscard]] inline Bounds<T> merge(const Bounds<T> &a, const Bounds<T> &b)
{
return {math::min(a.min, b.min), math::max(a.max, b.max)};
@ -94,4 +39,55 @@ template<typename T>
return std::nullopt;
}
/**
* Find the smallest and largest values element-wise in the span.
*/
template<typename T> [[nodiscard]] inline std::optional<Bounds<T>> min_max(const Span<T> values)
{
if (values.is_empty()) {
return std::nullopt;
}
const Bounds<T> init{values.first(), values.first()};
return threading::parallel_reduce(
values.index_range(),
1024,
init,
[&](const IndexRange range, const Bounds<T> &init) {
Bounds<T> result = init;
for (const int i : range) {
math::min_max(values[i], result.min, result.max);
}
return result;
},
[](const Bounds<T> &a, const Bounds<T> &b) { return merge(a, b); });
}
/**
* Find the smallest and largest values element-wise in the span, adding the radius to each element
* first. The template type T is expected to have an addition operator implemented with RadiusT.
*/
template<typename T, typename RadiusT>
[[nodiscard]] inline std::optional<Bounds<T>> min_max_with_radii(const Span<T> values,
const Span<RadiusT> radii)
{
BLI_assert(values.size() == radii.size());
if (values.is_empty()) {
return std::nullopt;
}
const Bounds<T> init{values.first(), values.first()};
return threading::parallel_reduce(
values.index_range(),
1024,
init,
[&](const IndexRange range, const Bounds<T> &init) {
Bounds<T> result = init;
for (const int i : range) {
result.min = math::min(values[i] - radii[i], result.min);
result.max = math::max(values[i] + radii[i], result.max);
}
return result;
},
[](const Bounds<T> &a, const Bounds<T> &b) { return merge(a, b); });
}
} // namespace blender::bounds

View File

@ -2262,11 +2262,12 @@ VoronoiOutput fractal_voronoi_x_fx(const VoronoiParams &params,
params.lacunarity == 0.0f;
for (int i = 0; i <= ceilf(params.detail); ++i) {
VoronoiOutput octave = (params.feature == NOISE_SHD_VORONOI_F1) ?
voronoi_f1(params, coord * scale) :
(params.feature == NOISE_SHD_VORONOI_SMOOTH_F1) ?
VoronoiOutput octave = (params.feature == NOISE_SHD_VORONOI_F2) ?
voronoi_f2(params, coord * scale) :
(params.feature == NOISE_SHD_VORONOI_SMOOTH_F1 &&
params.smoothness != 0.0f) ?
voronoi_smooth_f1(params, coord * scale, calc_color) :
voronoi_f2(params, coord * scale);
voronoi_f1(params, coord * scale);
if (zero_input) {
max_amplitude = 1.0f;

View File

@ -10,6 +10,7 @@
#include "CLG_log.h"
#include "DNA_lightprobe_types.h"
#include "DNA_modifier_types.h"
#include "DNA_movieclip_types.h"
@ -200,7 +201,7 @@ static void versioning_remove_microfacet_sharp_distribution(bNodeTree *ntree)
}
}
void blo_do_versions_400(FileData * /*fd*/, Library * /*lib*/, Main *bmain)
void blo_do_versions_400(FileData *fd, Library * /*lib*/, Main *bmain)
{
if (!MAIN_VERSION_ATLEAST(bmain, 400, 1)) {
LISTBASE_FOREACH (Mesh *, mesh, &bmain->meshes) {
@ -273,5 +274,12 @@ void blo_do_versions_400(FileData * /*fd*/, Library * /*lib*/, Main *bmain)
/* Convert anisotropic BSDF node to glossy BSDF. */
/* Keep this block, even when empty. */
if (!DNA_struct_elem_find(fd->filesdna, "LightProbe", "int", "grid_bake_sample_count")) {
LISTBASE_FOREACH (LightProbe *, lightprobe, &bmain->lightprobes) {
lightprobe->grid_bake_samples = 2048;
lightprobe->surfel_density = 1.0f;
}
}
}
}

View File

@ -361,7 +361,7 @@ void MemoryBuffer::copy_from(const ImBuf *src,
to_y,
to_channel_offset);
if (ensure_linear_space) {
colorspace_to_scene_linear(this, area, src->rect_colorspace);
colorspace_to_scene_linear(this, area, src->byte_buffer.colorspace);
}
}
else {

View File

@ -80,11 +80,7 @@ void COM_execute(Render *render,
node_tree->execution_mode == NTREE_EXECUTION_MODE_REALTIME)
{
/* Realtime GPU compositor. */
/* TODO: add persistence and depsgraph updates for better performance. */
blender::render::RealtimeCompositor compositor(
*render, *scene, *render_data, *node_tree, rendering, view_name);
compositor.execute();
RE_compositor_execute(*render, *scene, *render_data, *node_tree, rendering, view_name);
}
else {
/* Tiled and Full Frame compositors. */

View File

@ -136,7 +136,8 @@ static void sample_image_at_location(
}
rgba_uchar_to_float(color, byte_color);
if (make_linear_rgb) {
IMB_colormanagement_colorspace_to_scene_linear_v4(color, false, ibuf->rect_colorspace);
IMB_colormanagement_colorspace_to_scene_linear_v4(
color, false, ibuf->byte_buffer.colorspace);
}
}
}

View File

@ -84,7 +84,9 @@ class Context {
/* Get the texture where the given render pass is stored. This should be called by the Render
* Layer node to populate its outputs. */
virtual GPUTexture *get_input_texture(int view_layer, const char *pass_name) = 0;
virtual GPUTexture *get_input_texture(const Scene *scene,
int view_layer,
const char *pass_name) = 0;
/* Get the name of the view currently being rendered. */
virtual StringRef get_view_name() = 0;

View File

@ -45,10 +45,7 @@ class NodeOperation : public Operation {
void compute_results_reference_counts(const Schedule &schedule);
protected:
/* Compute a preview for the operation and set to the bNodePreview of the node. This is only done
* for nodes which enables previews, are not hidden, and are part of the active node context. The
* preview is computed as a lower resolution version of the output of the get_preview_result
* method. */
/* Compute a node preview using the result returned from the get_preview_result method. */
void compute_preview() override;
/* Returns a reference to the derived node that this operation represents. */
@ -67,10 +64,6 @@ class NodeOperation : public Operation {
* guaranteed not to be returned, since the node will always either have a linked output or an
* allocated input. */
Result *get_preview_result();
/* Resize the give input result to the given preview size and set it to the preview buffer after
* applying the necessary color management processor.*/
void write_preview_from_result(bNodePreview &preview, Result &input_result);
};
} // namespace blender::realtime_compositor

View File

@ -112,6 +112,9 @@ class ShaderOperation : public Operation {
* the attribute that was created for it. This is used to share the same attribute with all
* inputs that are linked to the same output socket. */
Map<DOutputSocket, GPUNodeLink *> output_to_material_attribute_map_;
/* A vector set that stores all output sockets that are used as previews for nodes inside the
* shader operation. */
VectorSet<DOutputSocket> preview_outputs_;
public:
/* Construct and compile a GPU material from the given shader compile unit by calling
@ -125,6 +128,13 @@ class ShaderOperation : public Operation {
* shader. */
void execute() override;
/* Compute a node preview for all nodes in the shader operations if the node requires a preview.
*
* Previews are computed from results that are populated for outputs that are used to compute
* previews even if they are internally linked, and those outputs are stored and tracked in the
* preview_outputs_ vector set, see the populate_results_for_node method for more information. */
void compute_preview() override;
/* Get the identifier of the operation output corresponding to the given output socket. This is
* called by the compiler to identify the operation output that provides the result for an input
* by providing the output socket that the input is linked to. See
@ -138,9 +148,14 @@ class ShaderOperation : public Operation {
/* Compute and set the initial reference counts of all the results of the operation. The
* reference counts of the results are the number of operations that use those results, which is
* computed as the number of inputs whose node is part of the schedule and is linked to the
* output corresponding to each of the results of the operation. The node execution schedule is
* given as an input. */
* computed as the number of inputs linked to the output corresponding to each of the results of
* the operation, but only the linked inputs whose node is part of the schedule but not part of
* the shader operation, since inputs that are part of the shader operations are internal links.
*
* Additionally, results that are used as node previews gets an extra reference count because
* they are referenced and released by the compute_preview method.
*
* The node execution schedule is given as an input. */
void compute_results_reference_counts(const Schedule &schedule);
private:
@ -209,7 +224,8 @@ class ShaderOperation : public Operation {
GPUMaterial *material);
/* Populate the output results of the shader operation for output sockets of the given node that
* are linked to nodes outside of the shader operation. */
* are linked to nodes outside of the shader operation or are used to compute a preview for the
* node. */
void populate_results_for_node(DNode node, GPUMaterial *material);
/* Given the output socket of a node that is part of the shader operation which is linked to an

View File

@ -70,4 +70,11 @@ void compute_dispatch_threads_at_least(GPUShader *shader,
int2 threads_range,
int2 local_size = int2(16));
/* Returns true if a node preview needs to be computed for the give node. */
bool is_node_preview_needed(const DNode &node);
/* Computes a lower resolution version of the given result and sets it as a preview for the given
* node after applying the appropriate color management specified in the given context. */
void compute_preview_from_result(Context &context, const DNode &node, Result &input_result);
} // namespace blender::realtime_compositor

View File

@ -5,21 +5,16 @@
#include <memory>
#include "BLI_assert.h"
#include "BLI_index_range.hh"
#include "BLI_map.hh"
#include "BLI_math_base.h"
#include "BLI_math_base.hh"
#include "BLI_math_color.h"
#include "BLI_math_vector_types.hh"
#include "BLI_string_ref.hh"
#include "BLI_task.hh"
#include "BLI_vector.hh"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "IMB_colormanagement.h"
#include "DNA_node_types.h"
#include "NOD_derived_node_tree.hh"
@ -53,53 +48,11 @@ NodeOperation::NodeOperation(Context &context, DNode node) : Operation(context),
}
}
/* Given the size of a result, compute a lower resolution size for a preview. The greater dimension
* will be assigned an arbitrarily chosen size of 128, while the other dimension will get the size
* that maintains the same aspect ratio. */
static int2 compute_preview_size(int2 size)
{
const int greater_dimension_size = 128;
if (size.x > size.y) {
return int2(greater_dimension_size, int(greater_dimension_size * (float(size.y) / size.x)));
}
else {
return int2(int(greater_dimension_size * (float(size.x) / size.y)), greater_dimension_size);
}
}
void NodeOperation::compute_preview()
{
if (!(node()->flag & NODE_PREVIEW)) {
return;
if (is_node_preview_needed(node())) {
compute_preview_from_result(context(), node(), *get_preview_result());
}
if (node()->flag & NODE_HIDDEN) {
return;
}
/* Only compute previews for nodes in the active context. */
if (node().context()->instance_key().value !=
node().context()->derived_tree().active_context().instance_key().value)
{
return;
}
/* Initialize node tree previews if not already initialized. */
bNodeTree *root_tree = const_cast<bNodeTree *>(
&node().context()->derived_tree().root_context().btree());
if (!root_tree->previews) {
root_tree->previews = BKE_node_instance_hash_new("node previews");
}
Result *preview_result = get_preview_result();
const int2 preview_size = compute_preview_size(preview_result->domain().size);
node()->runtime->preview_xsize = preview_size.x;
node()->runtime->preview_ysize = preview_size.y;
bNodePreview *preview = bke::node_preview_verify(
root_tree->previews, node().instance_key(), preview_size.x, preview_size.y, true);
write_preview_from_result(*preview, *preview_result);
}
Result *NodeOperation::get_preview_result()
@ -124,55 +77,6 @@ Result *NodeOperation::get_preview_result()
return nullptr;
}
void NodeOperation::write_preview_from_result(bNodePreview &preview, Result &input_result)
{
GPUShader *shader = shader_manager().get("compositor_compute_preview");
GPU_shader_bind(shader);
if (input_result.type() == ResultType::Float) {
GPU_texture_swizzle_set(input_result.texture(), "rrr1");
}
input_result.bind_as_texture(shader, "input_tx");
const int2 preview_size = int2(preview.xsize, preview.ysize);
Result preview_result = Result::Temporary(ResultType::Color, texture_pool());
preview_result.allocate_texture(Domain(preview_size));
preview_result.bind_as_image(shader, "preview_img");
compute_dispatch_threads_at_least(shader, preview_size);
input_result.unbind_as_texture();
preview_result.unbind_as_image();
GPU_shader_unbind();
GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH);
float *preview_pixels = static_cast<float *>(
GPU_texture_read(preview_result.texture(), GPU_DATA_FLOAT, 0));
preview_result.release();
ColormanageProcessor *color_processor = IMB_colormanagement_display_processor_new(
&context().get_scene().view_settings, &context().get_scene().display_settings);
threading::parallel_for(IndexRange(preview_size.y), 1, [&](const IndexRange sub_y_range) {
for (const int64_t y : sub_y_range) {
for (const int64_t x : IndexRange(preview_size.x)) {
const int index = (y * preview_size.x + x) * 4;
IMB_colormanagement_processor_apply_v4(color_processor, preview_pixels + index);
rgba_float_to_uchar(preview.rect + index, preview_pixels + index);
}
}
});
/* Restore original swizzle mask set above. */
if (input_result.type() == ResultType::Float) {
GPU_texture_swizzle_set(input_result.texture(), "rgba");
}
IMB_colormanagement_processor_free(color_processor);
MEM_freeN(preview_pixels);
}
void NodeOperation::compute_results_reference_counts(const Schedule &schedule)
{
for (const bNodeSocket *output : this->node()->output_sockets()) {

View File

@ -71,6 +71,15 @@ void ShaderOperation::execute()
GPU_shader_unbind();
}
void ShaderOperation::compute_preview()
{
for (const DOutputSocket &output : preview_outputs_) {
Result &result = get_result(get_output_identifier_from_output_socket(output));
compute_preview_from_result(context(), output.node(), result);
result.release();
}
}
StringRef ShaderOperation::get_output_identifier_from_output_socket(DOutputSocket output_socket)
{
return output_sockets_to_output_identifiers_map_.lookup(output_socket);
@ -84,7 +93,7 @@ Map<std::string, DOutputSocket> &ShaderOperation::get_inputs_to_linked_outputs_m
void ShaderOperation::compute_results_reference_counts(const Schedule &schedule)
{
for (const auto item : output_sockets_to_output_identifiers_map_.items()) {
const int reference_count = number_of_inputs_linked_to_output_conditioned(
int reference_count = number_of_inputs_linked_to_output_conditioned(
item.key, [&](DInputSocket input) {
/* We only consider inputs that are not part of the shader operations, because inputs
* that are part of the shader operations are internal and do not deal with the result
@ -92,6 +101,10 @@ void ShaderOperation::compute_results_reference_counts(const Schedule &schedule)
return schedule.contains(input.node()) && !compile_unit_.contains(input.node());
});
if (preview_outputs_.contains(item.key)) {
reference_count++;
}
get_result(item.value).set_initial_reference_count(reference_count);
}
}
@ -248,17 +261,41 @@ void ShaderOperation::declare_operation_input(DInputSocket input_socket,
inputs_to_linked_outputs_map_.add_new(input_identifier, output_socket);
}
static DOutputSocket find_preview_output_socket(const DNode &node)
{
if (!is_node_preview_needed(node)) {
return DOutputSocket();
}
for (const bNodeSocket *output : node->output_sockets()) {
if (output->is_logically_linked()) {
return DOutputSocket(node.context(), output);
}
}
return DOutputSocket();
}
void ShaderOperation::populate_results_for_node(DNode node, GPUMaterial *material)
{
const DOutputSocket preview_output = find_preview_output_socket(node);
for (const bNodeSocket *output : node->output_sockets()) {
const DOutputSocket doutput{node.context(), output};
/* If any of the nodes linked to the output are not part of the shader operation, then an
* output result needs to be populated for it. */
const bool need_to_populate_result = is_output_linked_to_node_conditioned(
const bool is_operation_output = is_output_linked_to_node_conditioned(
doutput, [&](DNode node) { return !compile_unit_.contains(node); });
if (need_to_populate_result) {
/* If the output is used as the node preview, then an output result needs to be populated for
* it, and we additionally keep track of that output to later compute the previes from. */
const bool is_preview_output = doutput == preview_output;
if (is_preview_output) {
preview_outputs_.add(doutput);
}
if (is_operation_output || is_preview_output) {
populate_operation_result(doutput, material);
}
}

View File

@ -4,10 +4,15 @@
#include "BLI_assert.h"
#include "BLI_function_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_math_color.h"
#include "BLI_math_vector.hh"
#include "BLI_math_vector_types.hh"
#include "BLI_task.hh"
#include "BLI_utildefines.h"
#include "IMB_colormanagement.h"
#include "DNA_node_types.h"
#include "NOD_derived_node_tree.hh"
@ -133,4 +138,100 @@ void compute_dispatch_threads_at_least(GPUShader *shader, int2 threads_range, in
GPU_compute_dispatch(shader, groups_to_dispatch.x, groups_to_dispatch.y, 1);
}
bool is_node_preview_needed(const DNode &node)
{
if (!(node->flag & NODE_PREVIEW)) {
return false;
}
if (node->flag & NODE_HIDDEN) {
return false;
}
/* Only compute previews for nodes in the active context. */
if (node.context()->instance_key().value !=
node.context()->derived_tree().active_context().instance_key().value)
{
return false;
}
return true;
}
/* Given the size of a result, compute a lower resolution size for a preview. The greater dimension
* will be assigned an arbitrarily chosen size of 128, while the other dimension will get the size
* that maintains the same aspect ratio. */
static int2 compute_preview_size(int2 size)
{
const int greater_dimension_size = 128;
if (size.x > size.y) {
return int2(greater_dimension_size, int(greater_dimension_size * (float(size.y) / size.x)));
}
else {
return int2(int(greater_dimension_size * (float(size.x) / size.y)), greater_dimension_size);
}
}
void compute_preview_from_result(Context &context, const DNode &node, Result &input_result)
{
/* Initialize node tree previews if not already initialized. */
bNodeTree *root_tree = const_cast<bNodeTree *>(
&node.context()->derived_tree().root_context().btree());
if (!root_tree->previews) {
root_tree->previews = BKE_node_instance_hash_new("node previews");
}
const int2 preview_size = compute_preview_size(input_result.domain().size);
node->runtime->preview_xsize = preview_size.x;
node->runtime->preview_ysize = preview_size.y;
bNodePreview *preview = bke::node_preview_verify(
root_tree->previews, node.instance_key(), preview_size.x, preview_size.y, true);
GPUShader *shader = context.shader_manager().get("compositor_compute_preview");
GPU_shader_bind(shader);
if (input_result.type() == ResultType::Float) {
GPU_texture_swizzle_set(input_result.texture(), "rrr1");
}
input_result.bind_as_texture(shader, "input_tx");
Result preview_result = Result::Temporary(ResultType::Color, context.texture_pool());
preview_result.allocate_texture(Domain(preview_size));
preview_result.bind_as_image(shader, "preview_img");
compute_dispatch_threads_at_least(shader, preview_size);
input_result.unbind_as_texture();
preview_result.unbind_as_image();
GPU_shader_unbind();
GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH);
float *preview_pixels = static_cast<float *>(
GPU_texture_read(preview_result.texture(), GPU_DATA_FLOAT, 0));
preview_result.release();
ColormanageProcessor *color_processor = IMB_colormanagement_display_processor_new(
&context.get_scene().view_settings, &context.get_scene().display_settings);
threading::parallel_for(IndexRange(preview_size.y), 1, [&](const IndexRange sub_y_range) {
for (const int64_t y : sub_y_range) {
for (const int64_t x : IndexRange(preview_size.x)) {
const int index = (y * preview_size.x + x) * 4;
IMB_colormanagement_processor_apply_v4(color_processor, preview_pixels + index);
rgba_float_to_uchar(preview->rect + index, preview_pixels + index);
}
}
});
/* Restore original swizzle mask set above. */
if (input_result.type() == ResultType::Float) {
GPU_texture_swizzle_set(input_result.texture(), "rgba");
}
IMB_colormanagement_processor_free(color_processor);
MEM_freeN(preview_pixels);
}
} // namespace blender::realtime_compositor

View File

@ -149,6 +149,8 @@ set(SRC
engines/eevee_next/eevee_instance.cc
engines/eevee_next/eevee_irradiance_cache.cc
engines/eevee_next/eevee_light.cc
engines/eevee_next/eevee_lightprobe.cc
engines/eevee_next/eevee_lightcache.cc
engines/eevee_next/eevee_material.cc
engines/eevee_next/eevee_motion_blur.cc
engines/eevee_next/eevee_pipeline.cc
@ -288,6 +290,7 @@ set(SRC
engines/eevee_next/eevee_instance.hh
engines/eevee_next/eevee_irradiance_cache.hh
engines/eevee_next/eevee_light.hh
engines/eevee_next/eevee_lightcache.hh
engines/eevee_next/eevee_material.hh
engines/eevee_next/eevee_motion_blur.hh
engines/eevee_next/eevee_pipeline.hh
@ -455,7 +458,6 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_camera_lib.glsl
engines/eevee_next/shaders/eevee_colorspace_lib.glsl
engines/eevee_next/shaders/eevee_cryptomatte_lib.glsl
engines/eevee_next/shaders/eevee_transparency_lib.glsl
engines/eevee_next/shaders/eevee_debug_surfels_vert.glsl
engines/eevee_next/shaders/eevee_debug_surfels_frag.glsl
engines/eevee_next/shaders/eevee_deferred_light_frag.glsl
@ -474,6 +476,8 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_depth_of_field_stabilize_comp.glsl
engines/eevee_next/shaders/eevee_depth_of_field_tiles_dilate_comp.glsl
engines/eevee_next/shaders/eevee_depth_of_field_tiles_flatten_comp.glsl
engines/eevee_next/shaders/eevee_display_probe_grid_frag.glsl
engines/eevee_next/shaders/eevee_display_probe_grid_vert.glsl
engines/eevee_next/shaders/eevee_film_comp.glsl
engines/eevee_next/shaders/eevee_film_cryptomatte_post_comp.glsl
engines/eevee_next/shaders/eevee_film_frag.glsl
@ -493,6 +497,11 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_light_eval_lib.glsl
engines/eevee_next/shaders/eevee_light_iter_lib.glsl
engines/eevee_next/shaders/eevee_light_lib.glsl
engines/eevee_next/shaders/eevee_lightprobe_eval_lib.glsl
engines/eevee_next/shaders/eevee_lightprobe_irradiance_bounds_comp.glsl
engines/eevee_next/shaders/eevee_lightprobe_irradiance_ray_comp.glsl
engines/eevee_next/shaders/eevee_lightprobe_irradiance_load_comp.glsl
engines/eevee_next/shaders/eevee_lightprobe_lib.glsl
engines/eevee_next/shaders/eevee_ltc_lib.glsl
engines/eevee_next/shaders/eevee_motion_blur_dilate_comp.glsl
engines/eevee_next/shaders/eevee_motion_blur_flatten_comp.glsl
@ -513,6 +522,7 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_shadow_tag_usage_comp.glsl
engines/eevee_next/shaders/eevee_shadow_tag_usage_frag.glsl
engines/eevee_next/shaders/eevee_shadow_tag_usage_lib.glsl
engines/eevee_next/shaders/eevee_shadow_tag_usage_surfels_comp.glsl
engines/eevee_next/shaders/eevee_shadow_tag_usage_vert.glsl
engines/eevee_next/shaders/eevee_shadow_test.glsl
engines/eevee_next/shaders/eevee_shadow_tilemap_bounds_comp.glsl
@ -521,12 +531,19 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_shadow_tilemap_lib.glsl
engines/eevee_next/shaders/eevee_spherical_harmonics_lib.glsl
engines/eevee_next/shaders/eevee_subsurface_eval_frag.glsl
engines/eevee_next/shaders/eevee_surf_capture_frag.glsl
engines/eevee_next/shaders/eevee_surf_deferred_frag.glsl
engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
engines/eevee_next/shaders/eevee_surf_lib.glsl
engines/eevee_next/shaders/eevee_surf_shadow_frag.glsl
engines/eevee_next/shaders/eevee_surf_world_frag.glsl
engines/eevee_next/shaders/eevee_surfel_light_comp.glsl
engines/eevee_next/shaders/eevee_surfel_list_build_comp.glsl
engines/eevee_next/shaders/eevee_surfel_list_lib.glsl
engines/eevee_next/shaders/eevee_surfel_list_sort_comp.glsl
engines/eevee_next/shaders/eevee_surfel_ray_comp.glsl
engines/eevee_next/shaders/eevee_transparency_lib.glsl
engines/eevee_next/shaders/eevee_velocity_lib.glsl
engines/eevee_next/eevee_defines.hh
@ -616,6 +633,7 @@ set(GLSL_SRC
engines/gpencil/shaders/gpencil_frag.glsl
engines/gpencil/shaders/gpencil_vert.glsl
engines/gpencil/shaders/grease_pencil_vert.glsl
engines/gpencil/shaders/gpencil_antialiasing_frag.glsl
engines/gpencil/shaders/gpencil_antialiasing_vert.glsl
engines/gpencil/shaders/gpencil_common_lib.glsl

View File

@ -163,9 +163,12 @@ class Context : public realtime_compositor::Context {
return DRW_viewport_texture_list_get()->color;
}
GPUTexture *get_input_texture(int view_layer, const char *pass_name) override
GPUTexture *get_input_texture(const Scene *scene, int view_layer, const char *pass_name) override
{
if (view_layer == 0 && STREQ(pass_name, RE_PASSNAME_COMBINED)) {
if ((DEG_get_original_id(const_cast<ID *>(&scene->id)) ==
DEG_get_original_id(&DRW_context_state_get()->scene->id)) &&
view_layer == 0 && STREQ(pass_name, RE_PASSNAME_COMBINED))
{
return get_output_texture();
}
else {

View File

@ -78,7 +78,27 @@ void Camera::sync()
CameraData &data = data_;
if (inst_.drw_view) {
if (inst_.is_baking()) {
/* Any view so that shadows and light culling works during irradiance bake. */
draw::View &view = inst_.irradiance_cache.bake.view_z_;
data.viewmat = view.viewmat();
data.viewinv = view.viewinv();
data.winmat = view.winmat();
data.wininv = view.wininv();
data.persmat = data.winmat * data.viewmat;
data.persinv = math::invert(data.persmat);
data.uv_scale = float2(1.0f);
data.uv_bias = float2(0.0f);
data.type = CAMERA_ORTHO;
/* \note: Follow camera parameters where distances are positive in front of the camera. */
data.clip_near = -view.far_clip();
data.clip_far = -view.near_clip();
data.fisheye_fov = data.fisheye_lens = -1.0f;
data.equirect_bias = float2(0.0f);
data.equirect_scale = float2(0.0f);
}
else if (inst_.drw_view) {
DRW_view_viewmat_get(inst_.drw_view, data.viewmat.ptr(), false);
DRW_view_viewmat_get(inst_.drw_view, data.viewinv.ptr(), true);
DRW_view_winmat_get(inst_.drw_view, data.winmat.ptr(), false);

View File

@ -71,6 +71,10 @@
#define MOTION_BLUR_GROUP_SIZE 32
#define MOTION_BLUR_DILATE_GROUP_SIZE 512
/* Irradiance Cache. */
/** Maximum number of entities inside the cache. */
#define IRRADIANCE_GRID_MAX 64
/* Depth Of Field. */
#define DOF_TILES_SIZE 8
#define DOF_TILES_FLATTEN_GROUP_SIZE DOF_TILES_SIZE
@ -86,6 +90,13 @@
#define DOF_GATHER_GROUP_SIZE DOF_TILES_SIZE
#define DOF_RESOLVE_GROUP_SIZE (DOF_TILES_SIZE * 2)
/* IrradianceBake. */
#define SURFEL_GROUP_SIZE 256
#define SURFEL_LIST_GROUP_SIZE 256
#define IRRADIANCE_GRID_GROUP_SIZE 4 /* In each dimension, so 4x4x4 workgroup size. */
#define IRRADIANCE_GRID_BRICK_SIZE 4 /* In each dimension, so 4x4x4 brick size. */
#define IRRADIANCE_BOUNDS_GROUP_SIZE 64
/* Resource bindings. */
/* Textures. */
@ -96,6 +107,7 @@
#define SHADOW_TILEMAPS_TEX_SLOT 4
#define SHADOW_ATLAS_TEX_SLOT 5
#define SSS_TRANSMITTANCE_TEX_SLOT 6
#define IRRADIANCE_ATLAS_TEX_SLOT 7
/* Only during shadow rendering. */
#define SHADOW_RENDER_MAP_SLOT 4
@ -107,6 +119,8 @@
#define GBUF_COLOR_SLOT 4
/* Uniform Buffers. */
#define IRRADIANCE_GRID_BUF_SLOT 3
#define HIZ_BUF_SLOT 5
/* Only during pre-pass. */
#define VELOCITY_CAMERA_PREV_BUF 3
#define VELOCITY_CAMERA_CURR_BUF 4
@ -120,9 +134,14 @@
#define LIGHT_BUF_SLOT 1
#define LIGHT_ZBIN_BUF_SLOT 2
#define LIGHT_TILE_BUF_SLOT 3
#define IRRADIANCE_BRICK_BUF_SLOT 4
/* Only during surface capture. */
#define SURFEL_BUF_SLOT 4
/* Only during surface capture. */
#define CAPTURE_BUF_SLOT 5
/* Only during shadow rendering. */
#define SHADOW_PAGE_INFO_SLOT 4
#define SAMPLING_BUF_SLOT 5
#define SAMPLING_BUF_SLOT 6
#define CRYPTOMATTE_BUF_SLOT 7
/* Only during pre-pass. */

View File

@ -85,8 +85,7 @@ static void eevee_engine_init(void *vedata)
}
}
ved->instance->init(
size, &rect, nullptr, depsgraph, nullptr, camera, nullptr, default_view, v3d, rv3d);
ved->instance->init(size, &rect, nullptr, depsgraph, camera, nullptr, default_view, v3d, rv3d);
}
static void eevee_draw_scene(void *vedata)
@ -161,7 +160,7 @@ static void eevee_render_to_image(void *vedata,
rcti rect;
RE_GetViewPlane(render, &view_rect, &rect);
instance->init(size, &rect, engine, depsgraph, nullptr, camera_original_ob, layer);
instance->init(size, &rect, engine, depsgraph, camera_original_ob, layer);
instance->render_frame(layer, viewname);
EEVEE_Data *ved = static_cast<EEVEE_Data *>(vedata);

View File

@ -19,6 +19,7 @@
#include "DNA_modifier_types.h"
#include "RE_pipeline.h"
#include "eevee_engine.h"
#include "eevee_instance.hh"
namespace blender::eevee {
@ -37,14 +38,12 @@ void Instance::init(const int2 &output_res,
const rcti *output_rect,
RenderEngine *render_,
Depsgraph *depsgraph_,
const LightProbe *light_probe_,
Object *camera_object_,
const RenderLayer *render_layer_,
const DRWView *drw_view_,
const View3D *v3d_,
const RegionView3D *rv3d_)
{
UNUSED_VARS(light_probe_);
render = render_;
depsgraph = depsgraph_;
camera_orig_object = camera_object_;
@ -73,6 +72,35 @@ void Instance::init(const int2 &output_res,
irradiance_cache.init();
}
void Instance::init_light_bake(Depsgraph *depsgraph, draw::Manager *manager)
{
this->depsgraph = depsgraph;
this->manager = manager;
camera_orig_object = nullptr;
render = nullptr;
render_layer = nullptr;
drw_view = nullptr;
v3d = nullptr;
rv3d = nullptr;
is_light_bake = true;
debug_mode = (eDebugMode)G.debug_value;
info = "";
update_eval_members();
sampling.init(scene);
camera.init();
/* Film isn't used but init to avoid side effects in other module. */
rcti empty_rect{0, 0, 0, 0};
film.init(int2(1), &empty_rect);
velocity.init();
depth_of_field.init();
shadows.init();
main_view.init();
irradiance_cache.init();
}
void Instance::set_time(float time)
{
BLI_assert(render);
@ -107,6 +135,7 @@ void Instance::begin_sync()
shadows.begin_sync();
pipelines.begin_sync();
cryptomatte.begin_sync();
light_probes.begin_sync();
gpencil_engine_enabled = false;
@ -139,7 +168,8 @@ void Instance::scene_sync()
void Instance::object_sync(Object *ob)
{
const bool is_renderable_type = ELEM(ob->type, OB_CURVES, OB_GPENCIL_LEGACY, OB_MESH, OB_LAMP);
const bool is_renderable_type = ELEM(
ob->type, OB_CURVES, OB_GPENCIL_LEGACY, OB_MESH, OB_LAMP, OB_LIGHTPROBE);
const int ob_visibility = DRW_object_visibility_in_active_context(ob);
const bool partsys_is_visible = (ob_visibility & OB_VISIBLE_PARTICLES) != 0 &&
(ob->type == OB_MESH);
@ -180,6 +210,9 @@ void Instance::object_sync(Object *ob)
case OB_GPENCIL_LEGACY:
sync.sync_gpencil(ob, ob_handle, res_handle);
break;
case OB_LIGHTPROBE:
light_probes.sync_probe(ob, ob_handle);
break;
default:
break;
}
@ -209,6 +242,7 @@ void Instance::end_sync()
film.end_sync();
cryptomatte.end_sync();
pipelines.end_sync();
light_probes.end_sync();
}
void Instance::render_sync()
@ -449,6 +483,79 @@ void Instance::update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view
EEVEE_RENDER_PASS_CRYPTOMATTE_MATERIAL);
}
void Instance::light_bake_irradiance(
Object &probe,
FunctionRef<void()> context_enable,
FunctionRef<void()> context_disable,
FunctionRef<bool()> stop,
FunctionRef<void(LightProbeGridCacheFrame *, float progress)> result_update)
{
BLI_assert(is_baking());
auto custom_pipeline_wrapper = [&](FunctionRef<void()> callback) {
context_enable();
DRW_custom_pipeline_begin(&draw_engine_eevee_next_type, depsgraph);
callback();
DRW_custom_pipeline_end();
context_disable();
};
auto context_wrapper = [&](FunctionRef<void()> callback) {
context_enable();
callback();
context_disable();
};
irradiance_cache.bake.init(probe);
custom_pipeline_wrapper([&]() {
/* TODO: lightprobe visibility group option. */
manager->begin_sync();
render_sync();
manager->end_sync();
irradiance_cache.bake.surfels_create(probe);
irradiance_cache.bake.surfels_lights_eval();
});
sampling.init(probe);
while (!sampling.finished()) {
context_wrapper([&]() {
/* Batch ray cast by pack of 16. Avoids too much overhead of the update function & context
* switch. */
/* TODO(fclem): Could make the number of iteration depend on the computation time. */
for (int i = 0; i < 16 && !sampling.finished(); i++) {
sampling.step();
irradiance_cache.bake.raylists_build();
irradiance_cache.bake.propagate_light();
irradiance_cache.bake.irradiance_capture();
}
if (sampling.finished()) {
/* TODO(fclem): Dilation, filter etc... */
// irradiance_cache.bake.irradiance_finalize();
}
LightProbeGridCacheFrame *cache_frame;
if (sampling.finished()) {
cache_frame = irradiance_cache.bake.read_result_packed();
}
else {
/* TODO(fclem): Only do this read-back if needed. But it might be tricky to know when. */
cache_frame = irradiance_cache.bake.read_result_unpacked();
}
float progress = sampling.sample_index() / float(sampling.sample_count());
result_update(cache_frame, progress);
});
if (stop()) {
return;
}
}
}
/** \} */
} // namespace blender::eevee

View File

@ -23,6 +23,7 @@
#include "eevee_hizbuffer.hh"
#include "eevee_irradiance_cache.hh"
#include "eevee_light.hh"
#include "eevee_lightprobe.hh"
#include "eevee_material.hh"
#include "eevee_motion_blur.hh"
#include "eevee_pipeline.hh"
@ -65,6 +66,7 @@ class Instance {
RenderBuffers render_buffers;
MainView main_view;
World world;
LightProbeModule light_probes;
IrradianceCache irradiance_cache;
/** Input data. */
@ -73,6 +75,7 @@ class Instance {
/** Evaluated IDs. */
Scene *scene;
ViewLayer *view_layer;
/** Camera object if rendering through a camera. nullptr otherwise. */
Object *camera_eval_object;
Object *camera_orig_object;
/** Only available when rendering for final render. */
@ -85,6 +88,8 @@ class Instance {
/** True if the grease pencil engine might be running. */
bool gpencil_engine_enabled;
/** True if the instance is created for light baking. */
bool is_light_bake = false;
/** Info string displayed at the top of the render / viewport. */
std::string info = "";
@ -111,14 +116,16 @@ class Instance {
render_buffers(*this),
main_view(*this),
world(*this),
light_probes(*this),
irradiance_cache(*this){};
~Instance(){};
/* Render & Viewport. */
/* TODO(fclem): Split for clarity. */
void init(const int2 &output_res,
const rcti *output_rect,
RenderEngine *render,
Depsgraph *depsgraph,
const LightProbe *light_probe_ = nullptr,
Object *camera_object = nullptr,
const RenderLayer *render_layer = nullptr,
const DRWView *drw_view = nullptr,
@ -129,17 +136,36 @@ class Instance {
void object_sync(Object *ob);
void end_sync();
/* Render. */
void render_sync();
void render_frame(RenderLayer *render_layer, const char *view_name);
void store_metadata(RenderResult *render_result);
/* Viewport. */
void draw_viewport(DefaultFramebufferList *dfbl);
/* Light bake. */
void init_light_bake(Depsgraph *depsgraph, draw::Manager *manager);
void light_bake_irradiance(
Object &probe,
FunctionRef<void()> context_enable,
FunctionRef<void()> context_disable,
FunctionRef<bool()> stop,
FunctionRef<void(LightProbeGridCacheFrame *, float progress)> result_update);
static void update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer);
bool is_viewport() const
{
return render == nullptr;
return render == nullptr && !is_baking();
}
bool is_baking() const
{
return is_light_bake;
}
bool overlays_enabled() const

View File

@ -2,66 +2,849 @@
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_rand.hh"
#include "DNA_lightprobe_types.h"
#include "BKE_lightprobe.h"
#include "GPU_capabilities.h"
#include "GPU_debug.h"
#include "BLI_math_rotation.hh"
#include "eevee_instance.hh"
#include "eevee_irradiance_cache.hh"
namespace blender::eevee {
void IrradianceCache::generate_random_surfels()
{
const int surfels_len = 256;
debug_surfels.resize(surfels_len);
RandomNumberGenerator rng;
rng.seed(0);
for (DebugSurfel &surfel : debug_surfels) {
float3 random = rng.get_unit_float3();
surfel.position = random * 3.0f;
surfel.normal = random;
surfel.color = float4(rng.get_float(), rng.get_float(), rng.get_float(), 1.0f);
}
debug_surfels.push_update();
}
/* -------------------------------------------------------------------- */
/** \name Interface
* \{ */
void IrradianceCache::init()
{
if (debug_surfels_sh_ == nullptr) {
debug_surfels_sh_ = inst_.shaders.static_shader_get(DEBUG_SURFELS);
display_grids_enabled_ = DRW_state_draw_support() &&
(inst_.scene->eevee.flag & SCE_EEVEE_SHOW_IRRADIANCE);
/* TODO option. */
int atlas_byte_size = 1024 * 1024 * 16;
/* This might become an option in the future. */
bool use_l2_band = false;
int sh_coef_len = use_l2_band ? 9 : 4;
int texel_byte_size = 8; /* Assumes GPU_RGBA16F. */
int3 atlas_extent(IRRADIANCE_GRID_BRICK_SIZE);
atlas_extent.z *= sh_coef_len;
int atlas_col_count = 256;
atlas_extent.x *= atlas_col_count;
/* Determine the row count depending on the scene settings. */
int row_byte_size = atlas_extent.x * atlas_extent.y * atlas_extent.z * texel_byte_size;
int atlas_row_count = divide_ceil_u(atlas_byte_size, row_byte_size);
atlas_extent.y *= atlas_row_count;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_WRITE | GPU_TEXTURE_USAGE_SHADER_READ |
GPU_TEXTURE_USAGE_ATTACHMENT;
do_full_update_ = irradiance_atlas_tx_.ensure_3d(GPU_RGBA16F, atlas_extent, usage);
if (do_full_update_) {
/* Delete all references to existing bricks. */
for (IrradianceGrid &grid : inst_.light_probes.grid_map_.values()) {
grid.bricks.clear();
}
brick_pool_.clear();
/* Fill with all the available bricks. */
for (auto i : IndexRange(atlas_row_count * atlas_col_count)) {
if (i == 0) {
/* Reserve one brick for the world. */
world_brick_index_ = 0;
}
else {
IrradianceBrick brick;
brick.atlas_coord = uint2(i % atlas_col_count, i / atlas_col_count) *
IRRADIANCE_GRID_BRICK_SIZE;
brick_pool_.append(irradiance_brick_pack(brick));
}
}
if (irradiance_atlas_tx_.is_valid()) {
/* Clear the pool to avoid any interpolation to undefined values. */
irradiance_atlas_tx_.clear(float4(0.0f));
}
}
/* TODO: Remove this. */
generate_random_surfels();
if (irradiance_atlas_tx_.is_valid() == false) {
inst_.info = "Irradiance Atlas texture could not be created";
}
}
void IrradianceCache::sync()
{
debug_pass_sync();
}
void IrradianceCache::debug_pass_sync()
{
if (inst_.debug_mode == eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS) {
debug_surfels_ps_.init();
debug_surfels_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
DRW_STATE_DEPTH_LESS_EQUAL);
debug_surfels_ps_.shader_set(debug_surfels_sh_);
debug_surfels_ps_.bind_ssbo("surfels_buf", debug_surfels);
debug_surfels_ps_.push_constant("surfel_radius", 0.25f);
debug_surfels_ps_.draw_procedural(GPU_PRIM_TRI_STRIP, debug_surfels.size(), 4);
if (inst_.is_baking()) {
bake.sync();
}
}
void IrradianceCache::debug_draw(View &view, GPUFrameBuffer *view_fb)
Vector<IrradianceBrickPacked> IrradianceCache::bricks_alloc(int brick_len)
{
if (inst_.debug_mode == eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS) {
inst_.info = "Debug Mode: Irradiance Cache Surfels";
GPU_framebuffer_bind(view_fb);
if (brick_pool_.size() < brick_len) {
/* Fail allocation. Not enough brick in the atlas. */
return {};
}
Vector<IrradianceBrickPacked> allocated(brick_len);
/* Copy bricks to return vector. */
allocated.as_mutable_span().copy_from(brick_pool_.as_span().take_back(brick_len));
/* Remove bricks from the pool. */
brick_pool_.resize(brick_pool_.size() - brick_len);
return allocated;
}
void IrradianceCache::bricks_free(Vector<IrradianceBrickPacked> &bricks)
{
brick_pool_.extend(bricks.as_span());
bricks.clear();
}
void IrradianceCache::set_view(View & /*view*/)
{
Vector<IrradianceGrid *> grid_updates;
Vector<IrradianceGrid *> grid_loaded;
/* First allocate the needed bricks and populate the brick buffer. */
bricks_infos_buf_.clear();
for (IrradianceGrid &grid : inst_.light_probes.grid_map_.values()) {
LightProbeGridCacheFrame *cache = grid.cache ? grid.cache->grid_static_cache : nullptr;
if (cache == nullptr) {
continue;
}
if (cache->baking.L0 == nullptr && cache->irradiance.L0 == nullptr) {
/* No data. */
continue;
}
int3 grid_size = int3(cache->size);
if (grid_size.x <= 0 || grid_size.y <= 0 || grid_size.z <= 0) {
inst_.info = "Error: Malformed irradiance grid data";
continue;
}
/* TODO frustum cull and only load visible grids. */
/* Note that we reserve 1 slot for the world irradiance. */
if (grid_loaded.size() >= IRRADIANCE_GRID_MAX - 1) {
inst_.info = "Error: Too many grid visible";
continue;
}
if (grid.bricks.is_empty()) {
int3 grid_size_in_bricks = math::divide_ceil(grid_size,
int3(IRRADIANCE_GRID_BRICK_SIZE - 1));
int brick_len = grid_size_in_bricks.x * grid_size_in_bricks.y * grid_size_in_bricks.z;
grid.bricks = bricks_alloc(brick_len);
if (grid.bricks.is_empty()) {
inst_.info = "Error: Irradiance grid allocation failed";
continue;
}
grid_updates.append(&grid);
}
grid.brick_offset = bricks_infos_buf_.size();
bricks_infos_buf_.extend(grid.bricks);
if (grid_size.x <= 0 || grid_size.y <= 0 || grid_size.z <= 0) {
inst_.info = "Error: Malformed irradiance grid data";
continue;
}
float4x4 grid_to_world = grid.object_to_world * math::from_location<float4x4>(float3(-1.0f)) *
math::from_scale<float4x4>(float3(2.0f / float3(grid_size))) *
math::from_location<float4x4>(float3(0.0f));
grid.world_to_grid_transposed = float3x4(math::transpose(math::invert(grid_to_world)));
grid.grid_size = grid_size;
grid_loaded.append(&grid);
}
/* Then create brick & grid infos UBOs content. */
{
/* Stable sorting of grids. */
std::sort(grid_loaded.begin(),
grid_loaded.end(),
[](const IrradianceGrid *a, const IrradianceGrid *b) {
float volume_a = math::determinant(float3x3(a->world_to_grid_transposed));
float volume_b = math::determinant(float3x3(b->world_to_grid_transposed));
if (volume_a != volume_b) {
/* Smallest first. */
return volume_a > volume_b;
}
/* Volumes are identical. Any arbitrary criteria can be used to sort them.
* Use position to avoid unstable result caused by depsgraph non deterministic eval
* order. This could also become a priority parameter. */
return a->world_to_grid_transposed[0][0] < b->world_to_grid_transposed[0][0] ||
a->world_to_grid_transposed[0][1] < b->world_to_grid_transposed[0][1] ||
a->world_to_grid_transposed[0][2] < b->world_to_grid_transposed[0][2];
});
/* Insert grids in UBO in sorted order. */
int grids_len = 0;
for (IrradianceGrid *grid : grid_loaded) {
grid->grid_index = grids_len;
grids_infos_buf_[grids_len++] = *grid;
}
/* Insert world grid last. */
IrradianceGridData grid;
grid.world_to_grid_transposed = float3x4::identity();
grid.grid_size = int3(1);
grid.brick_offset = bricks_infos_buf_.size();
grids_infos_buf_[grids_len++] = grid;
bricks_infos_buf_.append(world_brick_index_);
if (grids_len < IRRADIANCE_GRID_MAX) {
/* Tag last grid as invalid to stop the iteration. */
grids_infos_buf_[grids_len].grid_size = int3(-1);
}
bricks_infos_buf_.push_update();
grids_infos_buf_.push_update();
}
/* Upload data for each grid that need to be inserted in the atlas. */
for (IrradianceGrid *grid : grid_updates) {
LightProbeGridCacheFrame *cache = grid->cache->grid_static_cache;
/* Staging textures are recreated for each light grid to avoid increasing VRAM usage. */
draw::Texture irradiance_a_tx = {"irradiance_a_tx"};
draw::Texture irradiance_b_tx = {"irradiance_b_tx"};
draw::Texture irradiance_c_tx = {"irradiance_c_tx"};
draw::Texture irradiance_d_tx = {"irradiance_d_tx"};
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ;
int3 grid_size = int3(cache->size);
if (cache->baking.L0) {
irradiance_a_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L0);
irradiance_b_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L1_a);
irradiance_c_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L1_b);
irradiance_d_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L1_c);
}
else if (cache->irradiance.L0) {
irradiance_a_tx.ensure_3d(GPU_RGB16F, grid_size, usage, (float *)cache->irradiance.L0);
irradiance_b_tx.ensure_3d(GPU_RGB16F, grid_size, usage, (float *)cache->irradiance.L1_a);
irradiance_c_tx.ensure_3d(GPU_RGB16F, grid_size, usage, (float *)cache->irradiance.L1_b);
irradiance_d_tx.ensure_3d(GPU_RGB16F, grid_size, usage, (float *)cache->irradiance.L1_c);
}
else {
continue;
}
if (irradiance_a_tx.is_valid() == false) {
inst_.info = "Error: Could not allocate irradiance staging texture";
/* Avoid undefined behavior with uninitialized values. Still load a clear texture. */
float4 zero(0.0f);
irradiance_a_tx.ensure_3d(GPU_RGB16F, int3(1), usage, zero);
irradiance_b_tx.ensure_3d(GPU_RGB16F, int3(1), usage, zero);
irradiance_c_tx.ensure_3d(GPU_RGB16F, int3(1), usage, zero);
irradiance_d_tx.ensure_3d(GPU_RGB16F, int3(1), usage, zero);
}
grid_upload_ps_.init();
grid_upload_ps_.shader_set(inst_.shaders.static_shader_get(LIGHTPROBE_IRRADIANCE_LOAD));
grid_upload_ps_.push_constant("grid_index", grid->grid_index);
grid_upload_ps_.bind_ubo("grids_infos_buf", &grids_infos_buf_);
grid_upload_ps_.bind_ssbo("bricks_infos_buf", &bricks_infos_buf_);
grid_upload_ps_.bind_texture("irradiance_a_tx", &irradiance_a_tx);
grid_upload_ps_.bind_texture("irradiance_b_tx", &irradiance_b_tx);
grid_upload_ps_.bind_texture("irradiance_c_tx", &irradiance_c_tx);
grid_upload_ps_.bind_texture("irradiance_d_tx", &irradiance_d_tx);
grid_upload_ps_.bind_image("irradiance_atlas_img", &irradiance_atlas_tx_);
/* Note that we take into account the padding border of each brick. */
int3 grid_size_in_bricks = math::divide_ceil(grid_size, int3(IRRADIANCE_GRID_BRICK_SIZE - 1));
grid_upload_ps_.dispatch(grid_size_in_bricks);
inst_.manager->submit(grid_upload_ps_);
irradiance_a_tx.free();
irradiance_b_tx.free();
irradiance_c_tx.free();
irradiance_d_tx.free();
}
do_full_update_ = false;
}
void IrradianceCache::viewport_draw(View &view, GPUFrameBuffer *view_fb)
{
if (!inst_.is_baking()) {
debug_pass_draw(view, view_fb);
display_pass_draw(view, view_fb);
}
}
void IrradianceCache::debug_pass_draw(View &view, GPUFrameBuffer *view_fb)
{
switch (inst_.debug_mode) {
case eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS_NORMAL:
inst_.info = "Debug Mode: Surfels Normal";
break;
case eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS_IRRADIANCE:
inst_.info = "Debug Mode: Surfels Irradiance";
break;
default:
/* Nothing to display. */
return;
}
for (const IrradianceGrid &grid : inst_.light_probes.grid_map_.values()) {
if (grid.cache == nullptr) {
continue;
}
LightProbeGridCacheFrame *cache = grid.cache->grid_static_cache;
if (cache->surfels == nullptr || cache->surfels_len == 0) {
continue;
}
debug_surfels_ps_.init();
debug_surfels_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
DRW_STATE_DEPTH_LESS_EQUAL);
display_grids_ps_.framebuffer_set(&view_fb);
debug_surfels_ps_.shader_set(inst_.shaders.static_shader_get(DEBUG_SURFELS));
debug_surfels_ps_.push_constant("surfel_radius", 1.5f / 4.0f);
debug_surfels_ps_.push_constant("debug_mode", static_cast<int>(inst_.debug_mode));
debug_surfels_buf_.resize(cache->surfels_len);
/* TODO(fclem): Cleanup: Could have a function in draw::StorageArrayBuffer that takes an input
* data. */
Span<Surfel> grid_surfels(static_cast<Surfel *>(cache->surfels), cache->surfels_len);
MutableSpan<Surfel>(debug_surfels_buf_.data(), cache->surfels_len).copy_from(grid_surfels);
debug_surfels_buf_.push_update();
debug_surfels_ps_.bind_ssbo("surfels_buf", debug_surfels_buf_);
debug_surfels_ps_.draw_procedural(GPU_PRIM_TRI_STRIP, cache->surfels_len, 4);
inst_.manager->submit(debug_surfels_ps_, view);
}
}
void IrradianceCache::display_pass_draw(View &view, GPUFrameBuffer *view_fb)
{
if (!display_grids_enabled_) {
return;
}
for (const IrradianceGrid &grid : inst_.light_probes.grid_map_.values()) {
if (grid.cache == nullptr) {
continue;
}
LightProbeGridCacheFrame *cache = grid.cache->grid_static_cache;
if (cache == nullptr) {
continue;
}
/* Display texture. Updated for each individual light grid to avoid increasing VRAM usage. */
draw::Texture irradiance_a_tx = {"irradiance_a_tx"};
draw::Texture irradiance_b_tx = {"irradiance_b_tx"};
draw::Texture irradiance_c_tx = {"irradiance_c_tx"};
draw::Texture irradiance_d_tx = {"irradiance_d_tx"};
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ;
int3 grid_size = int3(cache->size);
if (cache->baking.L0) {
irradiance_a_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L0);
irradiance_b_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L1_a);
irradiance_c_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L1_b);
irradiance_d_tx.ensure_3d(GPU_RGBA16F, grid_size, usage, (float *)cache->baking.L1_c);
}
else if (cache->irradiance.L0) {
irradiance_a_tx.ensure_3d(GPU_RGB16F, grid_size, usage, (float *)cache->irradiance.L0);
irradiance_b_tx.ensure_3d(GPU_RGB16F, grid_size, usage, (float *)cache->irradiance.L1_a);
irradiance_c_tx.ensure_3d(GPU_RGB16F, grid_size, usage, (float *)cache->irradiance.L1_b);
irradiance_d_tx.ensure_3d(GPU_RGB16F, grid_size, usage, (float *)cache->irradiance.L1_c);
}
else {
continue;
}
display_grids_ps_.init();
display_grids_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
DRW_STATE_DEPTH_LESS_EQUAL | DRW_STATE_CULL_BACK);
display_grids_ps_.framebuffer_set(&view_fb);
display_grids_ps_.shader_set(inst_.shaders.static_shader_get(DISPLAY_PROBE_GRID));
display_grids_ps_.push_constant("sphere_radius", inst_.scene->eevee.gi_irradiance_draw_size);
display_grids_ps_.push_constant("grid_resolution", grid_size);
display_grids_ps_.push_constant("grid_to_world", grid.object_to_world);
display_grids_ps_.push_constant("world_to_grid", grid.world_to_object);
display_grids_ps_.bind_texture("irradiance_a_tx", &irradiance_a_tx);
display_grids_ps_.bind_texture("irradiance_b_tx", &irradiance_b_tx);
display_grids_ps_.bind_texture("irradiance_c_tx", &irradiance_c_tx);
display_grids_ps_.bind_texture("irradiance_d_tx", &irradiance_d_tx);
int sample_count = int(BKE_lightprobe_grid_cache_frame_sample_count(cache));
int triangle_count = sample_count * 2;
display_grids_ps_.draw_procedural(GPU_PRIM_TRIS, 1, triangle_count * 3);
inst_.manager->submit(display_grids_ps_, view);
irradiance_a_tx.free();
irradiance_b_tx.free();
irradiance_c_tx.free();
irradiance_d_tx.free();
}
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Baking
* \{ */
void IrradianceBake::init(const Object &probe_object)
{
const ::LightProbe *lightprobe = static_cast<::LightProbe *>(probe_object.data);
surfel_density_ = lightprobe->surfel_density;
}
void IrradianceBake::sync()
{
{
PassSimple &pass = surfel_light_eval_ps_;
pass.init();
/* Apply lights contribution to scene surfel representation. */
pass.shader_set(inst_.shaders.static_shader_get(SURFEL_LIGHT));
pass.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
pass.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
pass.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
inst_.lights.bind_resources(&pass);
inst_.shadows.bind_resources(&pass);
/* Sync with the surfel creation stage. */
pass.barrier(GPU_BARRIER_SHADER_STORAGE);
pass.barrier(GPU_BARRIER_SHADER_IMAGE_ACCESS);
pass.barrier(GPU_BARRIER_TEXTURE_FETCH);
pass.dispatch(&dispatch_per_surfel_);
}
{
PassSimple &pass = surfel_ray_build_ps_;
pass.init();
{
PassSimple::Sub &sub = pass.sub("ListBuild");
sub.shader_set(inst_.shaders.static_shader_get(SURFEL_LIST_BUILD));
sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
sub.bind_ssbo("list_start_buf", &list_start_buf_);
sub.bind_ssbo("list_info_buf", &list_info_buf_);
sub.barrier(GPU_BARRIER_SHADER_STORAGE);
sub.dispatch(&dispatch_per_surfel_);
}
{
PassSimple::Sub &sub = pass.sub("ListSort");
sub.shader_set(inst_.shaders.static_shader_get(SURFEL_LIST_SORT));
sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
sub.bind_ssbo("list_start_buf", &list_start_buf_);
sub.bind_ssbo("list_info_buf", &list_info_buf_);
sub.barrier(GPU_BARRIER_SHADER_STORAGE);
sub.dispatch(&dispatch_per_list_);
}
}
{
PassSimple &pass = surfel_light_propagate_ps_;
pass.init();
{
PassSimple::Sub &sub = pass.sub("RayEval");
sub.shader_set(inst_.shaders.static_shader_get(SURFEL_RAY));
sub.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
sub.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
sub.push_constant("radiance_src", &radiance_src_);
sub.push_constant("radiance_dst", &radiance_dst_);
sub.barrier(GPU_BARRIER_SHADER_STORAGE);
sub.dispatch(&dispatch_per_surfel_);
}
}
{
PassSimple &pass = irradiance_capture_ps_;
pass.init();
pass.shader_set(inst_.shaders.static_shader_get(LIGHTPROBE_IRRADIANCE_RAY));
pass.bind_ssbo(SURFEL_BUF_SLOT, &surfels_buf_);
pass.bind_ssbo(CAPTURE_BUF_SLOT, &capture_info_buf_);
pass.bind_ssbo("list_start_buf", &list_start_buf_);
pass.bind_ssbo("list_info_buf", &list_info_buf_);
pass.push_constant("radiance_src", &radiance_src_);
pass.bind_image("irradiance_L0_img", &irradiance_L0_tx_);
pass.bind_image("irradiance_L1_a_img", &irradiance_L1_a_tx_);
pass.bind_image("irradiance_L1_b_img", &irradiance_L1_b_tx_);
pass.bind_image("irradiance_L1_c_img", &irradiance_L1_c_tx_);
pass.barrier(GPU_BARRIER_SHADER_STORAGE | GPU_BARRIER_SHADER_IMAGE_ACCESS);
pass.dispatch(&dispatch_per_grid_sample_);
}
}
void IrradianceBake::surfel_raster_views_sync(const float3 &scene_min, const float3 &scene_max)
{
using namespace blender::math;
grid_pixel_extent_ = max(int3(1), int3(surfel_density_ * (scene_max - scene_min)));
grid_pixel_extent_ = min(grid_pixel_extent_, int3(16384));
/* We could use multi-view rendering here to avoid multiple submissions but it is unlikely to
* make any difference. The bottleneck is still the light propagation loop. */
auto sync_view = [&](View &view, CartesianBasis basis) {
float3 extent_min = transform_point(invert(basis), scene_min);
float3 extent_max = transform_point(invert(basis), scene_max);
float4x4 winmat = projection::orthographic(
extent_min.x, extent_max.x, extent_min.y, extent_max.y, -extent_min.z, -extent_max.z);
float4x4 viewinv = from_rotation<float4x4>(to_quaternion<float>(basis));
view.visibility_test(false);
view.sync(invert(viewinv), winmat);
};
sync_view(view_x_, basis_x_);
sync_view(view_y_, basis_y_);
sync_view(view_z_, basis_z_);
}
void IrradianceBake::surfels_create(const Object &probe_object)
{
/**
* We rasterize the scene along the 3 axes. Each generated fragment will write a surface element
* so raster grid density need to match the desired surfel density. We do a first pass to know
* how much surfel to allocate then render again to create the surfels.
*/
using namespace blender::math;
const ::LightProbe *lightprobe = static_cast<::LightProbe *>(probe_object.data);
int3 grid_resolution = int3(&lightprobe->grid_resolution_x);
float4x4 grid_local_to_world = invert(float4x4(probe_object.world_to_object));
dispatch_per_grid_sample_ = math::divide_ceil(grid_resolution, int3(IRRADIANCE_GRID_GROUP_SIZE));
capture_info_buf_.irradiance_grid_size = grid_resolution;
capture_info_buf_.irradiance_grid_local_to_world = grid_local_to_world;
capture_info_buf_.irradiance_grid_world_to_local_rotation = float4x4(
(invert(normalize(float3x3(grid_local_to_world)))));
eGPUTextureUsage texture_usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE |
GPU_TEXTURE_USAGE_HOST_READ | GPU_TEXTURE_USAGE_ATTACHMENT;
/* 32bit float is needed here otherwise we loose too much energy from rounding error during the
* accumulation when the sample count is above 500. */
irradiance_L0_tx_.ensure_3d(GPU_RGBA32F, grid_resolution, texture_usage);
irradiance_L1_a_tx_.ensure_3d(GPU_RGBA32F, grid_resolution, texture_usage);
irradiance_L1_b_tx_.ensure_3d(GPU_RGBA32F, grid_resolution, texture_usage);
irradiance_L1_c_tx_.ensure_3d(GPU_RGBA32F, grid_resolution, texture_usage);
irradiance_L0_tx_.clear(float4(0.0f));
irradiance_L1_a_tx_.clear(float4(0.0f));
irradiance_L1_b_tx_.clear(float4(0.0f));
irradiance_L1_c_tx_.clear(float4(0.0f));
DRW_stats_group_start("IrradianceBake.SceneBounds");
{
draw::Manager &manager = *inst_.manager;
PassSimple &pass = irradiance_bounds_ps_;
pass.init();
pass.shader_set(inst_.shaders.static_shader_get(LIGHTPROBE_IRRADIANCE_BOUNDS));
pass.bind_ssbo("capture_info_buf", &capture_info_buf_);
pass.bind_ssbo("bounds_buf", &manager.bounds_buf.current());
pass.push_constant("resource_len", int(manager.resource_handle_count()));
pass.dispatch(
int3(divide_ceil_u(manager.resource_handle_count(), IRRADIANCE_BOUNDS_GROUP_SIZE), 1, 1));
}
/* Raster the scene to query the number of surfel needed. */
capture_info_buf_.do_surfel_count = false;
capture_info_buf_.do_surfel_output = false;
int neg_flt_max = int(0xFF7FFFFFu ^ 0x7FFFFFFFu); /* floatBitsToOrderedInt(-FLT_MAX) */
int pos_flt_max = 0x7F7FFFFF; /* floatBitsToOrderedInt(FLT_MAX) */
capture_info_buf_.scene_bound_x_min = pos_flt_max;
capture_info_buf_.scene_bound_y_min = pos_flt_max;
capture_info_buf_.scene_bound_z_min = pos_flt_max;
capture_info_buf_.scene_bound_x_max = neg_flt_max;
capture_info_buf_.scene_bound_y_max = neg_flt_max;
capture_info_buf_.scene_bound_z_max = neg_flt_max;
capture_info_buf_.push_update();
inst_.manager->submit(irradiance_bounds_ps_);
GPU_memory_barrier(GPU_BARRIER_BUFFER_UPDATE);
capture_info_buf_.read();
auto ordered_int_bits_to_float = [](int32_t int_value) -> float {
int32_t float_bits = (int_value < 0) ? (int_value ^ 0x7FFFFFFF) : int_value;
return *reinterpret_cast<float *>(&float_bits);
};
float3 scene_min = float3(ordered_int_bits_to_float(capture_info_buf_.scene_bound_x_min),
ordered_int_bits_to_float(capture_info_buf_.scene_bound_y_min),
ordered_int_bits_to_float(capture_info_buf_.scene_bound_z_min));
float3 scene_max = float3(ordered_int_bits_to_float(capture_info_buf_.scene_bound_x_max),
ordered_int_bits_to_float(capture_info_buf_.scene_bound_y_max),
ordered_int_bits_to_float(capture_info_buf_.scene_bound_z_max));
/* To avoid loosing any surface to the clipping planes, add some padding. */
float epsilon = 1.0f / surfel_density_;
scene_min -= epsilon;
scene_max += epsilon;
surfel_raster_views_sync(scene_min, scene_max);
scene_bound_sphere_ = float4(midpoint(scene_max, scene_min),
distance(scene_max, scene_min) / 2.0f);
DRW_stats_group_end();
/* WORKAROUND: Sync camera with correct bounds for light culling. */
inst_.camera.sync();
DRW_stats_group_start("IrradianceBake.SurfelsCount");
/* Raster the scene to query the number of surfel needed. */
capture_info_buf_.do_surfel_count = true;
capture_info_buf_.do_surfel_output = false;
capture_info_buf_.surfel_len = 0u;
capture_info_buf_.push_update();
empty_raster_fb_.ensure(transform_point(invert(basis_x_), grid_pixel_extent_).xy());
inst_.pipelines.capture.render(view_x_);
empty_raster_fb_.ensure(transform_point(invert(basis_y_), grid_pixel_extent_).xy());
inst_.pipelines.capture.render(view_y_);
empty_raster_fb_.ensure(transform_point(invert(basis_z_), grid_pixel_extent_).xy());
inst_.pipelines.capture.render(view_z_);
DRW_stats_group_end();
/* Allocate surfel pool. */
GPU_memory_barrier(GPU_BARRIER_BUFFER_UPDATE);
capture_info_buf_.read();
if (capture_info_buf_.surfel_len == 0) {
/* No surfel to allocated. */
return;
}
/* TODO(fclem): Check for GL limit and abort if the surfel cache doesn't fit the GPU memory. */
surfels_buf_.resize(capture_info_buf_.surfel_len);
surfels_buf_.clear_to_zero();
dispatch_per_surfel_.x = divide_ceil_u(surfels_buf_.size(), SURFEL_GROUP_SIZE);
DRW_stats_group_start("IrradianceBake.SurfelsCreate");
/* Raster the scene to generate the surfels. */
capture_info_buf_.do_surfel_count = true;
capture_info_buf_.do_surfel_output = true;
capture_info_buf_.surfel_len = 0u;
capture_info_buf_.push_update();
empty_raster_fb_.ensure(transform_point(invert(basis_x_), grid_pixel_extent_).xy());
inst_.pipelines.capture.render(view_x_);
empty_raster_fb_.ensure(transform_point(invert(basis_y_), grid_pixel_extent_).xy());
inst_.pipelines.capture.render(view_y_);
empty_raster_fb_.ensure(transform_point(invert(basis_z_), grid_pixel_extent_).xy());
inst_.pipelines.capture.render(view_z_);
/* Sync with any other following pass using the surfel buffer. */
GPU_memory_barrier(GPU_BARRIER_SHADER_STORAGE);
/* Read back so that following push_update will contain correct surfel count. */
capture_info_buf_.read();
DRW_stats_group_end();
}
void IrradianceBake::surfels_lights_eval()
{
/* Use the last setup view. This should work since the view is orthographic. */
/* TODO(fclem): Remove this. It is only present to avoid crash inside `shadows.set_view` */
inst_.render_buffers.acquire(int2(1));
inst_.lights.set_view(view_z_, grid_pixel_extent_.xy());
inst_.shadows.set_view(view_z_);
inst_.render_buffers.release();
inst_.manager->submit(surfel_light_eval_ps_, view_z_);
}
void IrradianceBake::raylists_build()
{
using namespace blender::math;
float2 rand_uv = inst_.sampling.rng_2d_get(eSamplingDimension::SAMPLING_LENS_U);
const float3 ray_direction = inst_.sampling.sample_sphere(rand_uv);
const float3 up = ray_direction;
const float3 forward = cross(up, normalize(orthogonal(up)));
const float4x4 viewinv = from_orthonormal_axes<float4x4>(float3(0.0f), forward, up);
const float4x4 viewmat = invert(viewinv);
/* Compute projection bounds. */
float2 min, max;
min = max = transform_point(viewmat, scene_bound_sphere_.xyz()).xy();
min -= scene_bound_sphere_.w;
max += scene_bound_sphere_.w;
/* This avoid light leaking by making sure that for one surface there will always be at least 1
* surfel capture inside a ray list. Since the surface with the maximum distance (after
* projection) between adjacent surfels is a slope that goes through 3 corners of a cube,
* the distance the grid needs to cover is the diagonal of a cube face.
*
* The lower the number the more surfels it clumps together in the same surfel-list.
* Biasing the grid_density like that will create many invalid link between coplanar surfels.
* These are dealt with during the list sorting pass.
*
* This has a side effect of inflating shadows and emissive surfaces.
*
* We add an extra epsilon just in case. We really need this step to be leak free. */
const float max_distance_between_neighbor_surfels_inv = M_SQRT1_2 - 1e-4;
/* Surfel list per unit distance. */
const float ray_grid_density = surfel_density_ * max_distance_between_neighbor_surfels_inv;
/* Surfel list size in unit distance. */
const float pixel_size = 1.0f / ray_grid_density;
list_info_buf_.ray_grid_size = math::max(int2(1), int2(ray_grid_density * (max - min)));
/* Add a 2 pixels margin to have empty lists for irradiance grid samples to fall into (as they
* are not considered by the scene bounds). The first pixel margin is because we are jittering
* the grid position. */
list_info_buf_.ray_grid_size += int2(4);
min -= pixel_size * 2.0f;
max += pixel_size * 2.0f;
/* Randomize grid center to avoid uneven inflating of corners in some directions. */
const float2 aa_rand = inst_.sampling.rng_2d_get(eSamplingDimension::SAMPLING_FILTER_U);
/* Offset in surfel list "pixel". */
const float2 aa_offset = (aa_rand - 0.5f) * 0.499f;
min += pixel_size * aa_offset;
list_info_buf_.list_max = list_info_buf_.ray_grid_size.x * list_info_buf_.ray_grid_size.y;
list_info_buf_.push_update();
/* NOTE: Z values do not really matter since we are not doing any rasterization. */
const float4x4 winmat = projection::orthographic<float>(min.x, max.x, min.y, max.y, 0, 1);
ray_view_.sync(viewmat, winmat);
dispatch_per_list_.x = divide_ceil_u(list_info_buf_.list_max, SURFEL_LIST_GROUP_SIZE);
list_start_buf_.resize(ceil_to_multiple_u(list_info_buf_.list_max, 4));
GPU_storagebuf_clear(list_start_buf_, -1);
inst_.manager->submit(surfel_ray_build_ps_, ray_view_);
}
void IrradianceBake::propagate_light()
{
/* NOTE: Subtract 1 because after `sampling.step()`. */
capture_info_buf_.sample_index = inst_.sampling.sample_index() - 1;
capture_info_buf_.sample_count = inst_.sampling.sample_count();
capture_info_buf_.push_update();
inst_.manager->submit(surfel_light_propagate_ps_, ray_view_);
std::swap(radiance_src_, radiance_dst_);
}
void IrradianceBake::irradiance_capture()
{
inst_.manager->submit(irradiance_capture_ps_, ray_view_);
}
void IrradianceBake::read_surfels(LightProbeGridCacheFrame *cache_frame)
{
if (!ELEM(inst_.debug_mode,
eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS_NORMAL,
eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS_IRRADIANCE))
{
return;
}
GPU_memory_barrier(GPU_BARRIER_BUFFER_UPDATE);
capture_info_buf_.read();
surfels_buf_.read();
cache_frame->surfels_len = capture_info_buf_.surfel_len;
cache_frame->surfels = MEM_malloc_arrayN(cache_frame->surfels_len, sizeof(Surfel), __func__);
MutableSpan<Surfel> surfels_dst((Surfel *)cache_frame->surfels, cache_frame->surfels_len);
Span<Surfel> surfels_src(surfels_buf_.data(), cache_frame->surfels_len);
surfels_dst.copy_from(surfels_src);
}
LightProbeGridCacheFrame *IrradianceBake::read_result_unpacked()
{
LightProbeGridCacheFrame *cache_frame = BKE_lightprobe_grid_cache_frame_create();
read_surfels(cache_frame);
cache_frame->size[0] = irradiance_L0_tx_.width();
cache_frame->size[1] = irradiance_L0_tx_.height();
cache_frame->size[2] = irradiance_L0_tx_.depth();
GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE);
cache_frame->baking.L0 = (float(*)[4])irradiance_L0_tx_.read<float4>(GPU_DATA_FLOAT);
cache_frame->baking.L1_a = (float(*)[4])irradiance_L1_a_tx_.read<float4>(GPU_DATA_FLOAT);
cache_frame->baking.L1_b = (float(*)[4])irradiance_L1_b_tx_.read<float4>(GPU_DATA_FLOAT);
cache_frame->baking.L1_c = (float(*)[4])irradiance_L1_c_tx_.read<float4>(GPU_DATA_FLOAT);
/* TODO(fclem): Connectivity. */
// cache_frame->connectivity.bitmask = connectivity_tx_.read<uint8_t>(GPU_DATA_FLOAT);
return cache_frame;
}
LightProbeGridCacheFrame *IrradianceBake::read_result_packed()
{
LightProbeGridCacheFrame *cache_frame = BKE_lightprobe_grid_cache_frame_create();
read_surfels(cache_frame);
cache_frame->size[0] = irradiance_L0_tx_.width();
cache_frame->size[1] = irradiance_L0_tx_.height();
cache_frame->size[2] = irradiance_L0_tx_.depth();
GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE);
cache_frame->baking.L0 = (float(*)[4])irradiance_L0_tx_.read<float4>(GPU_DATA_FLOAT);
cache_frame->baking.L1_a = (float(*)[4])irradiance_L1_a_tx_.read<float4>(GPU_DATA_FLOAT);
cache_frame->baking.L1_b = (float(*)[4])irradiance_L1_b_tx_.read<float4>(GPU_DATA_FLOAT);
cache_frame->baking.L1_c = (float(*)[4])irradiance_L1_c_tx_.read<float4>(GPU_DATA_FLOAT);
int64_t sample_count = irradiance_L0_tx_.width() * irradiance_L0_tx_.height() *
irradiance_L0_tx_.depth();
size_t coefficient_texture_size = sizeof(*cache_frame->irradiance.L0) * sample_count;
cache_frame->irradiance.L0 = (float(*)[3])MEM_mallocN(coefficient_texture_size, __func__);
cache_frame->irradiance.L1_a = (float(*)[3])MEM_mallocN(coefficient_texture_size, __func__);
cache_frame->irradiance.L1_b = (float(*)[3])MEM_mallocN(coefficient_texture_size, __func__);
cache_frame->irradiance.L1_c = (float(*)[3])MEM_mallocN(coefficient_texture_size, __func__);
for (auto i : IndexRange(sample_count)) {
copy_v3_v3(cache_frame->irradiance.L0[i], cache_frame->baking.L0[i]);
copy_v3_v3(cache_frame->irradiance.L1_a[i], cache_frame->baking.L1_a[i]);
copy_v3_v3(cache_frame->irradiance.L1_b[i], cache_frame->baking.L1_b[i]);
copy_v3_v3(cache_frame->irradiance.L1_c[i], cache_frame->baking.L1_c[i]);
}
MEM_SAFE_FREE(cache_frame->baking.L0);
MEM_SAFE_FREE(cache_frame->baking.L1_a);
MEM_SAFE_FREE(cache_frame->baking.L1_b);
MEM_SAFE_FREE(cache_frame->baking.L1_c);
// cache_frame->visibility.L0 = irradiance_only_L0_tx_.read<uint8_t>(GPU_DATA_UBYTE);
// cache_frame->visibility.L1_a = irradiance_only_L1_a_tx_.read<uint8_t>(GPU_DATA_UBYTE);
// cache_frame->visibility.L1_b = irradiance_only_L1_b_tx_.read<uint8_t>(GPU_DATA_UBYTE);
// cache_frame->visibility.L1_c = irradiance_only_L1_c_tx_.read<uint8_t>(GPU_DATA_UBYTE);
/* TODO(fclem): Connectivity. */
// cache_frame->connectivity.bitmask = connectivity_tx_.read<uint8_t>(GPU_DATA_FLOAT);
return cache_frame;
}
/** \} */
} // namespace blender::eevee

View File

@ -2,34 +2,185 @@
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup eevee
*/
#pragma once
#include "DNA_lightprobe_types.h"
#include "BLI_math_quaternion_types.hh"
#include "eevee_lightprobe.hh"
#include "eevee_shader_shared.hh"
namespace blender::eevee {
class Instance;
class CapturePipeline;
class ShadowModule;
class Camera;
/**
* Baking related pass and data. Not used at runtime.
*/
class IrradianceBake {
friend CapturePipeline;
friend ShadowModule;
friend Camera;
class IrradianceCache {
private:
Instance &inst_;
DebugSurfelBuf debug_surfels;
PassSimple debug_surfels_ps_ = {"IrradianceCache.Debug"};
GPUShader *debug_surfels_sh_ = nullptr;
/** Light cache being baked. */
LightCache *light_cache_ = nullptr;
/** Surface elements that represent the scene. */
SurfelBuf surfels_buf_;
/** Capture state. */
CaptureInfoBuf capture_info_buf_;
/** Framebuffer. */
Framebuffer empty_raster_fb_ = {"empty_raster_fb_"};
/** Evaluate light object contribution and store result to surfel. */
PassSimple surfel_light_eval_ps_ = {"LightEval"};
/** Create linked list of surfel to emulated raycast. */
PassSimple surfel_ray_build_ps_ = {"RayBuild"};
/** Propagate light from surfel to surfel. */
PassSimple surfel_light_propagate_ps_ = {"LightPropagate"};
/** Capture surfel lighting to irradiance samples. */
PassSimple irradiance_capture_ps_ = {"IrradianceCapture"};
/** Compute scene bounding box. */
PassSimple irradiance_bounds_ps_ = {"IrradianceBounds"};
/** Index of source and destination radiance in radiance double-buffer. */
int radiance_src_ = 0, radiance_dst_ = 1;
/* TODO: Remove this. */
void generate_random_surfels();
/**
* Basis orientation for each baking projection.
* Note that this is the view orientation. The projection matrix will take the negative Z axis
* as forward and Y as up. */
math::CartesianBasis basis_x_ = {
math::AxisSigned::Y_POS, math::AxisSigned::Z_POS, math::AxisSigned::X_POS};
math::CartesianBasis basis_y_ = {
math::AxisSigned::X_POS, math::AxisSigned::Z_POS, math::AxisSigned::Y_NEG};
math::CartesianBasis basis_z_ = {
math::AxisSigned::X_POS, math::AxisSigned::Y_POS, math::AxisSigned::Z_POS};
/** Views for each baking projection. */
View view_x_ = {"BakingViewX"};
View view_y_ = {"BakingViewY"};
View view_z_ = {"BakingViewZ"};
/** Pixel resolution in each of the projection axes. Match the target surfel density. */
int3 grid_pixel_extent_ = int3(0);
/** Information for surfel list building. */
SurfelListInfoBuf list_info_buf_ = {"list_info_buf_"};
/** List array containing list start surfel index. Cleared to -1. */
StorageArrayBuffer<int, 16, true> list_start_buf_ = {"list_start_buf_"};
/* Dispatch size for per surfel workload. */
int3 dispatch_per_surfel_ = int3(1);
/* Dispatch size for per surfel list workload. */
int3 dispatch_per_list_ = int3(1);
/* Dispatch size for per grid sample workload. */
int3 dispatch_per_grid_sample_ = int3(1);
/** View used to flatten the surfels into surfel lists representing rays. */
View ray_view_ = {"RayProjectionView"};
/** Irradiance textures for baking. Only represents one grid in there. */
Texture irradiance_L0_tx_ = {"irradiance_L0_tx_"};
Texture irradiance_L1_a_tx_ = {"irradiance_L1_a_tx_"};
Texture irradiance_L1_b_tx_ = {"irradiance_L1_b_tx_"};
Texture irradiance_L1_c_tx_ = {"irradiance_L1_c_tx_"};
/* Bounding sphere of the scene being baked. In world space. */
float4 scene_bound_sphere_;
/* Surfel per unit distance. */
float surfel_density_ = 1.0f;
public:
IrradianceCache(Instance &inst) : inst_(inst){};
IrradianceBake(Instance &inst) : inst_(inst){};
void init(const Object &probe_object);
void sync();
/** Create the views used to rasterize the scene into surfel representation. */
void surfel_raster_views_sync(const float3 &scene_min, const float3 &scene_max);
/** Create a surfel representation of the scene from the probe using the capture pipeline. */
void surfels_create(const Object &probe_object);
/** Evaluate direct lighting (and also clear the surfels radiance). */
void surfels_lights_eval();
/** Create a surfel lists to emulate ray-casts for the current sample random direction. */
void raylists_build();
/** Propagate light from surfel to surfel in a random direction over the sphere. */
void propagate_light();
/** Store surfel irradiance inside the irradiance grid samples. */
void irradiance_capture();
/** Read grid unpacked irradiance back to CPU and returns as a #LightProbeGridCacheFrame. */
LightProbeGridCacheFrame *read_result_unpacked();
/** Read grid packed irradiance back to CPU and returns as a #LightProbeGridCacheFrame. */
LightProbeGridCacheFrame *read_result_packed();
private:
/** Read surfel data back to CPU into \a cache_frame . */
void read_surfels(LightProbeGridCacheFrame *cache_frame);
};
/**
* Runtime container of diffuse indirect lighting.
* Also have debug and baking components.
*/
class IrradianceCache {
public:
IrradianceBake bake;
private:
Instance &inst_;
/** Atlas 3D texture containing all loaded grid data. */
Texture irradiance_atlas_tx_ = {"irradiance_atlas_tx_"};
/** Reserved atlas brick for world irradiance. */
int world_brick_index_ = 0;
/** Data structure used to index irradiance cache pages inside the atlas. */
IrradianceGridDataBuf grids_infos_buf_ = {"grids_infos_buf_"};
IrradianceBrickBuf bricks_infos_buf_ = {"bricks_infos_buf_"};
/** Pool of atlas regions to allocate to different grids. */
Vector<IrradianceBrickPacked> brick_pool_;
/** Stream data into the irradiance atlas texture. */
PassSimple grid_upload_ps_ = {"IrradianceCache.Upload"};
/** If true, will trigger the reupload of all grid data instead of just streaming new ones. */
bool do_full_update_ = true;
/** Display surfel debug data. */
PassSimple debug_surfels_ps_ = {"IrradianceCache.Debug"};
/** Debug surfel elements copied from the light cache. */
draw::StorageArrayBuffer<Surfel> debug_surfels_buf_;
/** Display grid cache data. */
bool display_grids_enabled_ = false;
PassSimple display_grids_ps_ = {"IrradianceCache.Display Grids"};
public:
IrradianceCache(Instance &inst) : bake(inst), inst_(inst){};
~IrradianceCache(){};
void init();
void sync();
void set_view(View &view);
void viewport_draw(View &view, GPUFrameBuffer *view_fb);
void debug_pass_sync();
void debug_draw(View &view, GPUFrameBuffer *view_fb);
Vector<IrradianceBrickPacked> bricks_alloc(int brick_len);
void bricks_free(Vector<IrradianceBrickPacked> &bricks);
template<typename T> void bind_resources(draw::detail::PassBase<T> *pass)
{
pass->bind_ubo(IRRADIANCE_GRID_BUF_SLOT, &grids_infos_buf_);
pass->bind_ssbo(IRRADIANCE_BRICK_BUF_SLOT, &bricks_infos_buf_);
pass->bind_texture(IRRADIANCE_ATLAS_TEX_SLOT, &irradiance_atlas_tx_);
}
private:
void debug_pass_draw(View &view, GPUFrameBuffer *view_fb);
void display_pass_draw(View &view, GPUFrameBuffer *view_fb);
};
} // namespace blender::eevee

View File

@ -0,0 +1,346 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup eevee
*
* Contains everything about light baking.
*/
#include "DRW_render.h"
#include "BKE_global.h"
#include "BKE_lightprobe.h"
#include "DNA_lightprobe_types.h"
#include "BLI_threads.h"
#include "DEG_depsgraph_build.h"
#include "DEG_depsgraph_query.h"
#include "PIL_time.h"
#include "GPU_capabilities.h"
#include "GPU_context.h"
#include "WM_api.h"
#include "WM_types.h"
#include "wm_window.h"
#include "eevee_engine.h"
#include "eevee_instance.hh"
#include "eevee_lightcache.hh"
/* -------------------------------------------------------------------- */
/** \name Light Probe Baking
* \{ */
namespace blender::eevee {
class LightBake {
private:
Depsgraph *depsgraph_;
/** Scene frame to evaluate the depsgraph at. */
int frame_;
/** Milliseconds. Delay the start of the baking to not slowdown interactions (TODO: remove). */
int delay_ms_;
/**
* If running in parallel (in a separate thread), use this context.
* Created on main thread but first bound in worker thread.
*/
void *gl_context_ = nullptr;
/** Context associated to `gl_context_`. Created in the worker thread. */
GPUContext *gpu_context_ = nullptr;
/** Baking instance. Created and freed in the worker thread. */
Instance *instance_ = nullptr;
/** Manager used for command submission. Created and freed in the worker thread. */
draw::Manager *manager_ = nullptr;
/** Lightprobe original objects to bake. */
Vector<Object *> original_probes_;
/** Frame to copy to original objects during update. This is needed to avoid race conditions. */
Vector<LightProbeGridCacheFrame *> bake_result_;
std::mutex result_mutex_;
public:
LightBake(Main *bmain,
ViewLayer *view_layer,
Scene *scene,
Span<Object *> probes,
bool run_as_job,
int frame,
int delay_ms = 0)
: depsgraph_(DEG_graph_new(bmain, scene, view_layer, DAG_EVAL_RENDER)),
frame_(frame),
delay_ms_(delay_ms),
original_probes_(probes)
{
BLI_assert(BLI_thread_is_main());
bake_result_.resize(probes.size());
bake_result_.fill(nullptr);
if (run_as_job && !GPU_use_main_context_workaround()) {
/* This needs to happen in main thread. */
gl_context_ = WM_system_gpu_context_create();
wm_window_reset_drawable();
}
}
~LightBake()
{
BLI_assert(BLI_thread_is_main());
DEG_graph_free(depsgraph_);
}
/**
* Called from main thread.
* Copy result to original scene data.
* Note that since this is in the main thread, the viewport cannot be using the light cache.
* So there is no race condition here.
*/
void update()
{
BLI_assert(BLI_thread_is_main());
for (auto i : bake_result_.index_range()) {
if (bake_result_[i] == nullptr) {
continue;
}
Object *orig_ob = original_probes_[i];
{
std::scoped_lock lock(result_mutex_);
LightProbeObjectCache *cache = orig_ob->lightprobe_cache;
/* Delete any existing cache. */
if (cache->grid_static_cache != nullptr) {
BKE_lightprobe_grid_cache_frame_free(cache->grid_static_cache);
}
/* Pass ownership to original object. */
cache->grid_static_cache = bake_result_[i];
bake_result_[i] = nullptr;
}
/* Propagate the cache to evaluated object. */
DEG_id_tag_update(&orig_ob->id, ID_RECALC_COPY_ON_WRITE);
}
}
/**
* Called from worker thread.
*/
void run(bool *stop = nullptr, bool *do_update = nullptr, float *progress = nullptr)
{
DEG_graph_relations_update(depsgraph_);
DEG_evaluate_on_framechange(depsgraph_, frame_);
if (delay_ms_ > 0) {
PIL_sleep_ms(delay_ms_);
}
context_enable();
manager_ = new draw::Manager();
instance_ = new eevee::Instance();
instance_->init_light_bake(depsgraph_, manager_);
context_disable();
for (auto i : original_probes_.index_range()) {
Object *eval_ob = DEG_get_evaluated_object(depsgraph_, original_probes_[i]);
instance_->light_bake_irradiance(
*eval_ob,
[this]() { context_enable(); },
[this]() { context_disable(); },
[&]() { return (G.is_break == true) || ((stop != nullptr) ? *stop : false); },
[&](LightProbeGridCacheFrame *cache_frame, float grid_progress) {
{
std::scoped_lock lock(result_mutex_);
/* Delete any existing cache that wasn't transferred to the original object. */
if (bake_result_[i] != nullptr) {
BKE_lightprobe_grid_cache_frame_free(bake_result_[i]);
}
bake_result_[i] = cache_frame;
}
if (do_update) {
*do_update = true;
}
if (progress) {
*progress = (i + grid_progress) / original_probes_.size();
}
});
if ((G.is_break == true) || ((stop != nullptr && *stop == true))) {
break;
}
}
delete_resources();
}
private:
void context_enable(bool render_begin = true)
{
if (GPU_use_main_context_workaround() && !BLI_thread_is_main()) {
/* Reuse main draw context. */
GPU_context_main_lock();
DRW_gpu_context_enable();
}
else if (gl_context_ == nullptr) {
/* Main thread case. */
DRW_gpu_context_enable();
}
else {
/* Worker thread case. */
DRW_system_gpu_render_context_enable(gl_context_);
if (gpu_context_ == nullptr) {
/* Create GPUContext in worker thread as it needs the correct gl context bound (which can
* only be bound in worker thread because of some GL driver requirements). */
gpu_context_ = GPU_context_create(nullptr, gl_context_);
}
DRW_blender_gpu_render_context_enable(gpu_context_);
}
if (render_begin) {
GPU_render_begin();
}
}
void context_disable()
{
if (GPU_use_main_context_workaround() && !BLI_thread_is_main()) {
/* Reuse main draw context. */
DRW_gpu_context_disable();
GPU_render_end();
GPU_context_main_unlock();
}
else if (gl_context_ == nullptr) {
/* Main thread case. */
DRW_gpu_context_disable();
GPU_render_end();
}
else {
/* Worker thread case. */
DRW_blender_gpu_render_context_disable(gpu_context_);
GPU_render_end();
DRW_system_gpu_render_context_disable(gl_context_);
}
}
/**
* Delete the engine instance and the optional contexts.
* This needs to run on the worker thread because the OpenGL context can only be ever bound to a
* single thread (because of some driver implementation), and the resources (textures,
* buffers,...) need to be freed with the right context bound.
*/
void delete_resources()
{
/* Bind context without GPU_render_begin(). */
context_enable(false);
/* Free GPU data (Textures, Framebuffers, etc...). */
delete instance_;
delete manager_;
/* Delete / unbind the GL & GPU context. Assumes it is currently bound. */
if (GPU_use_main_context_workaround() && !BLI_thread_is_main()) {
/* Reuse main draw context. */
DRW_gpu_context_disable();
GPU_context_main_unlock();
}
else if (gl_context_ == nullptr) {
/* Main thread case. */
DRW_gpu_context_disable();
}
else {
/* Worker thread case. */
if (gpu_context_ != nullptr) {
GPU_context_discard(gpu_context_);
}
DRW_system_gpu_render_context_disable(gl_context_);
WM_system_gpu_context_dispose(gl_context_);
}
}
};
} // namespace blender::eevee
using namespace blender::eevee;
/* -------------------------------------------------------------------- */
/** \name Light Bake Job
* \{ */
wmJob *EEVEE_NEXT_lightbake_job_create(wmWindowManager *wm,
wmWindow *win,
Main *bmain,
ViewLayer *view_layer,
Scene *scene,
blender::Vector<Object *> original_probes,
int delay_ms,
int frame)
{
/* Do not bake if there is a render going on. */
if (WM_jobs_test(wm, scene, WM_JOB_TYPE_RENDER)) {
return nullptr;
}
/* Stop existing baking job. */
WM_jobs_stop(wm, nullptr, (void *)EEVEE_NEXT_lightbake_job);
wmJob *wm_job = WM_jobs_get(wm,
win,
scene,
"Bake Lighting",
WM_JOB_EXCL_RENDER | WM_JOB_PRIORITY | WM_JOB_PROGRESS,
WM_JOB_TYPE_LIGHT_BAKE);
LightBake *bake = new LightBake(
bmain, view_layer, scene, std::move(original_probes), true, frame, delay_ms);
WM_jobs_customdata_set(wm_job, bake, EEVEE_NEXT_lightbake_job_data_free);
WM_jobs_timer(wm_job, 0.4, NC_SCENE | NA_EDITED, 0);
WM_jobs_callbacks(wm_job,
EEVEE_NEXT_lightbake_job,
nullptr,
EEVEE_NEXT_lightbake_update,
EEVEE_NEXT_lightbake_update);
G.is_break = false;
return wm_job;
}
void *EEVEE_NEXT_lightbake_job_data_alloc(Main *bmain,
ViewLayer *view_layer,
Scene *scene,
blender::Vector<Object *> original_probes,
int frame)
{
LightBake *bake = new LightBake(
bmain, view_layer, scene, std::move(original_probes), false, frame);
/* TODO(fclem): Can remove this cast once we remove the previous EEVEE light cache. */
return reinterpret_cast<void *>(bake);
}
void EEVEE_NEXT_lightbake_job_data_free(void *job_data)
{
delete static_cast<LightBake *>(job_data);
}
void EEVEE_NEXT_lightbake_update(void *job_data)
{
static_cast<LightBake *>(job_data)->update();
}
void EEVEE_NEXT_lightbake_job(void *job_data, bool *stop, bool *do_update, float *progress)
{
static_cast<LightBake *>(job_data)->run(stop, do_update, progress);
}
/** \} */

View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup eevee
*/
#pragma once
#include "BLI_vector.hh"
struct wmWindowManager;
struct wmWindow;
struct Main;
struct ViewLayer;
struct Scene;
struct Object;
struct wmJob;
/** Opaque type hiding eevee::LightBake. */
struct EEVEE_NEXT_LightBake;
/* -------------------------------------------------------------------- */
/** \name Light Bake Job
* \{ */
/**
* Create the job description.
* This is called for async (modal) bake operator.
* The actual work will be done by `EEVEE_NEXT_lightbake_job()`.
* IMPORTANT: Must run on the main thread because of potential GPUContext creation.
*/
wmJob *EEVEE_NEXT_lightbake_job_create(wmWindowManager *wm,
wmWindow *win,
Main *bmain,
ViewLayer *view_layer,
Scene *scene,
blender::Vector<Object *> original_probes,
int delay_ms,
int frame);
/**
* Allocate dependency graph and job description (EEVEE_NEXT_LightBake).
* Dependency graph evaluation does *not* happen here. It is delayed until
* `EEVEE_NEXT_lightbake_job` runs.
* IMPORTANT: Must run on the main thread because of potential GPUContext creation.
* Return `EEVEE_NEXT_LightBake *` but cast to `void *` because of compatibility with existing
* EEVEE function.
*/
void *EEVEE_NEXT_lightbake_job_data_alloc(Main *bmain,
ViewLayer *view_layer,
Scene *scene,
blender::Vector<Object *> original_probes,
int frame);
/**
* Free the job data.
* NOTE: Does not free the GPUContext. This is the responsibility of `EEVEE_NEXT_lightbake_job()`
*/
void EEVEE_NEXT_lightbake_job_data_free(void *job_data /* EEVEE_NEXT_LightBake */);
/**
* Callback for updating original scene light cache with bake result.
* Run by the job system for each update step and the finish step.
* This is called manually by `EEVEE_NEXT_lightbake_job()` if not run from a job.
*/
void EEVEE_NEXT_lightbake_update(void *job_data /* EEVEE_NEXT_LightBake */);
/**
* Do the full light baking for all samples.
* Will call `EEVEE_NEXT_lightbake_update()` on finish.
*/
void EEVEE_NEXT_lightbake_job(void *job_data /* EEVEE_NEXT_LightBake */,
bool *stop,
bool *do_update,
float *progress);
/** \} */

View File

@ -0,0 +1,138 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup eevee
*
* Module that handles light probe update tagging.
* Lighting data is contained in their respective module `IrradianceCache` and `ReflectionProbes`.
*/
#include "DNA_lightprobe_types.h"
#include "WM_api.h"
#include "eevee_instance.hh"
#include "eevee_lightprobe.hh"
#include "draw_debug.hh"
namespace blender::eevee {
void LightProbeModule::begin_sync()
{
auto_bake_enabled_ = inst_.is_viewport() &&
(inst_.scene->eevee.flag & SCE_EEVEE_GI_AUTOBAKE) != 0;
}
void LightProbeModule::sync_grid(const Object *ob, ObjectHandle &handle)
{
IrradianceGrid &grid = grid_map_.lookup_or_add_default(handle.object_key);
grid.used = true;
if (handle.recalc != 0 || grid.initialized == false) {
grid.initialized = true;
grid.updated = true;
grid.object_to_world = float4x4(ob->object_to_world);
grid.world_to_object = float4x4(
math::normalize(math::transpose(float3x3(grid.object_to_world))));
grid.cache = ob->lightprobe_cache;
/* Force reupload. */
inst_.irradiance_cache.bricks_free(grid.bricks);
}
}
void LightProbeModule::sync_cube(ObjectHandle &handle)
{
ReflectionCube &cube = cube_map_.lookup_or_add_default(handle.object_key);
cube.used = true;
if (handle.recalc != 0 || cube.initialized == false) {
cube.initialized = true;
cube_update_ = true;
}
}
void LightProbeModule::sync_probe(const Object *ob, ObjectHandle &handle)
{
const ::LightProbe *lightprobe = static_cast<const ::LightProbe *>(ob->data);
switch (lightprobe->type) {
case LIGHTPROBE_TYPE_CUBE:
sync_cube(handle);
return;
case LIGHTPROBE_TYPE_PLANAR:
/* TODO(fclem): Remove support? Add support? */
return;
case LIGHTPROBE_TYPE_GRID:
sync_grid(ob, handle);
return;
}
BLI_assert_unreachable();
}
void LightProbeModule::end_sync()
{
{
/* Check for deleted or updated grid. */
grid_update_ = false;
auto it_end = grid_map_.items().end();
for (auto it = grid_map_.items().begin(); it != it_end; ++it) {
IrradianceGrid &grid = (*it).value;
if (grid.updated) {
grid.updated = false;
grid_update_ = true;
}
if (!grid.used) {
inst_.irradiance_cache.bricks_free(grid.bricks);
grid_map_.remove(it);
grid_update_ = true;
continue;
}
/* Untag for next sync. */
grid.used = false;
}
}
{
/* Check for deleted or updated cube. */
cube_update_ = false;
auto it_end = cube_map_.items().end();
for (auto it = cube_map_.items().begin(); it != it_end; ++it) {
ReflectionCube &cube = (*it).value;
if (cube.updated) {
cube.updated = false;
cube_update_ = true;
}
if (!cube.used) {
cube_map_.remove(it);
cube_update_ = true;
continue;
}
/* Untag for next sync. */
cube.used = false;
}
}
#if 0 /* TODO make this work with new per object light cache. */
/* If light-cache auto-update is enable we tag the relevant part
* of the cache to update and fire up a baking job. */
if (auto_bake_enabled_ && (grid_update_ || cube_update_)) {
Scene *original_scene = DEG_get_input_scene(inst_.depsgraph);
LightCache *light_cache = original_scene->eevee.light_cache_data;
if (light_cache != nullptr) {
if (grid_update_) {
light_cache->flag |= LIGHTCACHE_UPDATE_GRID;
}
/* TODO(fclem): Reflection Cubemap should capture albedo + normal and be
* relit at runtime. So no dependency like in the old system. */
if (cube_update_) {
light_cache->flag |= LIGHTCACHE_UPDATE_CUBE;
}
/* Tag the lightcache to auto update. */
light_cache->flag |= LIGHTCACHE_UPDATE_AUTO;
/* Use a notifier to trigger the operator after drawing. */
/* TODO(fclem): Avoid usage of global DRW. */
WM_event_add_notifier(DRW_context_state_get()->evil_C, NC_LIGHTPROBE, original_scene);
}
}
#endif
}
} // namespace blender::eevee

View File

@ -0,0 +1,78 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup eevee
*
* Module that handles light probe update tagging.
* Lighting data is contained in their respective module `IrradianceCache` and `ReflectionProbes`.
*/
#pragma once
#include "BLI_map.hh"
#include "eevee_sync.hh"
namespace blender::eevee {
class Instance;
class IrradianceCache;
struct LightProbe {
bool used = false;
bool initialized = false;
bool updated = false;
};
struct IrradianceGrid : public LightProbe, IrradianceGridData {
/** Copy of the transform matrix. */
float4x4 object_to_world;
/** Precomputed inverse transform with normalized axes. No position. Used for rotating SH. */
float4x4 world_to_object;
/**
* Reference to the light-cache data.
* Do not try to dereference it before LightProbeModule::end_sync() as the grid could
* already have been freed (along with its cache). It is only safe to dereference after the
* pruning have been done.
*/
const struct LightProbeObjectCache *cache = nullptr;
/** List of associated atlas bricks that are used by this grid. */
Vector<IrradianceBrickPacked> bricks;
/** Index of the grid inside the grid UBO. */
int grid_index;
};
struct ReflectionCube : public LightProbe {
};
class LightProbeModule {
friend class IrradianceCache;
private:
Instance &inst_;
/** Light Probe map to detect deletion and store associated data. */
Map<ObjectKey, IrradianceGrid> grid_map_;
Map<ObjectKey, ReflectionCube> cube_map_;
/** True if a grid update was detected. It will trigger a bake if auto bake is enabled. */
bool grid_update_;
/** True if a grid update was detected. It will trigger a bake if auto bake is enabled. */
bool cube_update_;
/** True if the auto bake feature is enabled & available in this context. */
bool auto_bake_enabled_;
public:
LightProbeModule(Instance &inst) : inst_(inst){};
~LightProbeModule(){};
void begin_sync();
void sync_cube(ObjectHandle &handle);
void sync_grid(const Object *ob, ObjectHandle &handle);
void sync_probe(const Object *ob, ObjectHandle &handle);
void end_sync();
};
} // namespace blender::eevee

View File

@ -164,9 +164,11 @@ MaterialPass MaterialModule::material_pass_get(Object *ob,
blender_mat->nodetree :
default_surface_ntree_.nodetree_get(blender_mat);
bool use_deferred_compilation = inst_.is_viewport();
MaterialPass matpass = MaterialPass();
matpass.gpumat = inst_.shaders.material_shader_get(
blender_mat, ntree, pipeline_type, geometry_type, true);
blender_mat, ntree, pipeline_type, geometry_type, use_deferred_compilation);
switch (GPU_material_status(matpass.gpumat)) {
case GPU_MAT_SUCCESS:
@ -240,9 +242,20 @@ Material &MaterialModule::material_sync(Object *ob,
Material &mat = material_map_.lookup_or_add_cb(material_key, [&]() {
Material mat;
/* Order is important for transparent. */
mat.prepass = material_pass_get(ob, blender_mat, prepass_pipe, geometry_type);
mat.shading = material_pass_get(ob, blender_mat, surface_pipe, geometry_type);
if (inst_.is_baking()) {
mat.prepass = MaterialPass();
/* TODO(fclem): Still need the shading pass for correct attribute extraction. Would be better
* to avoid this shader compilation in another context. */
mat.shading = material_pass_get(ob, blender_mat, surface_pipe, geometry_type);
mat.capture = material_pass_get(ob, blender_mat, MAT_PIPE_CAPTURE, geometry_type);
}
else {
/* Order is important for transparent. */
mat.prepass = material_pass_get(ob, blender_mat, prepass_pipe, geometry_type);
mat.shading = material_pass_get(ob, blender_mat, surface_pipe, geometry_type);
mat.capture = MaterialPass();
}
if (blender_mat->blend_shadow == MA_BS_NONE) {
mat.shadow = MaterialPass();
}
@ -252,6 +265,12 @@ Material &MaterialModule::material_sync(Object *ob,
mat.is_alpha_blend_transparent = (blender_mat->blend_method == MA_BM_BLEND) &&
GPU_material_flag_get(mat.shading.gpumat,
GPU_MATFLAG_TRANSPARENT);
if (inst_.is_baking()) {
/* WORKAROUND(fclem): This is to request the shadow for the surfels. This will well
* over-request the number of shadow tiles. A better way would be to request from the surfels
* directly. */
mat.is_alpha_blend_transparent = true;
}
return mat;
});

View File

@ -34,6 +34,7 @@ enum eMaterialPipeline {
MAT_PIPE_FORWARD_PREPASS_VELOCITY,
MAT_PIPE_VOLUME,
MAT_PIPE_SHADOW,
MAT_PIPE_CAPTURE,
};
enum eMaterialGeometry {
@ -48,16 +49,16 @@ static inline void material_type_from_shader_uuid(uint64_t shader_uuid,
eMaterialPipeline &pipeline_type,
eMaterialGeometry &geometry_type)
{
const uint64_t geometry_mask = ((1u << 3u) - 1u);
const uint64_t pipeline_mask = ((1u << 3u) - 1u);
const uint64_t geometry_mask = ((1u << 4u) - 1u);
const uint64_t pipeline_mask = ((1u << 4u) - 1u);
geometry_type = static_cast<eMaterialGeometry>(shader_uuid & geometry_mask);
pipeline_type = static_cast<eMaterialPipeline>((shader_uuid >> 3u) & pipeline_mask);
pipeline_type = static_cast<eMaterialPipeline>((shader_uuid >> 4u) & pipeline_mask);
}
static inline uint64_t shader_uuid_from_material_type(eMaterialPipeline pipeline_type,
eMaterialGeometry geometry_type)
{
return geometry_type | (pipeline_type << 3);
return geometry_type | (pipeline_type << 4);
}
ENUM_OPERATORS(eClosureBits, CLOSURE_AMBIENT_OCCLUSION)
@ -213,7 +214,7 @@ struct MaterialPass {
struct Material {
bool is_alpha_blend_transparent;
MaterialPass shadow, shading, prepass;
MaterialPass shadow, shading, prepass, capture;
};
struct MaterialArray {

View File

@ -254,6 +254,7 @@ void ForwardPipeline::render(View &view,
// }
inst_.shadows.set_view(view);
inst_.irradiance_cache.set_view(view);
GPU_framebuffer_bind(combined_fb);
inst_.manager->submit(opaque_ps_, view);
@ -370,6 +371,7 @@ void DeferredLayer::end_sync()
inst_.shadows.bind_resources(&eval_light_ps_);
inst_.sampling.bind_resources(&eval_light_ps_);
inst_.hiz_buffer.bind_resources(&eval_light_ps_);
inst_.irradiance_cache.bind_resources(&eval_light_ps_);
eval_light_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_IMAGE_ACCESS);
eval_light_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
@ -412,6 +414,7 @@ void DeferredLayer::render(View &view,
inst_.hiz_buffer.set_dirty();
inst_.shadows.set_view(view);
inst_.irradiance_cache.set_view(view);
inst_.gbuffer.acquire(extent, closure_bits_);
@ -494,4 +497,37 @@ void DeferredPipeline::render(View &view,
/** \} */
/* -------------------------------------------------------------------- */
/** \name Capture Pipeline
*
* \{ */
void CapturePipeline::sync()
{
surface_ps_.init();
/* Surfel output is done using a SSBO, so no need for a fragment shader output color or depth. */
/* WORKAROUND: Avoid rasterizer discard, but the shaders actually use no fragment output. */
surface_ps_.state_set(DRW_STATE_WRITE_STENCIL);
surface_ps_.framebuffer_set(&inst_.irradiance_cache.bake.empty_raster_fb_);
surface_ps_.bind_ssbo(SURFEL_BUF_SLOT, &inst_.irradiance_cache.bake.surfels_buf_);
surface_ps_.bind_ssbo(CAPTURE_BUF_SLOT, &inst_.irradiance_cache.bake.capture_info_buf_);
surface_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
/* TODO(fclem): Remove. There should be no view dependent behavior during capture. */
surface_ps_.bind_ubo(CAMERA_BUF_SLOT, inst_.camera.ubo_get());
}
PassMain::Sub *CapturePipeline::surface_material_add(GPUMaterial *gpumat)
{
return &surface_ps_.sub(GPU_material_get_name(gpumat));
}
void CapturePipeline::render(View &view)
{
inst_.manager->submit(surface_ps_, view);
}
/** \} */
} // namespace blender::eevee

View File

@ -182,6 +182,28 @@ class DeferredPipeline {
/** \} */
/* -------------------------------------------------------------------- */
/** \name Capture Pipeline
*
* \{ */
class CapturePipeline {
private:
Instance &inst_;
PassMain surface_ps_ = {"Capture.Surface"};
public:
CapturePipeline(Instance &inst) : inst_(inst){};
PassMain::Sub *surface_material_add(GPUMaterial *gpumat);
void sync();
void render(View &view);
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Utility texture
*
@ -269,17 +291,20 @@ class PipelineModule {
DeferredPipeline deferred;
ForwardPipeline forward;
ShadowPipeline shadow;
CapturePipeline capture;
UtilityTexture utility_tx;
public:
PipelineModule(Instance &inst) : world(inst), deferred(inst), forward(inst), shadow(inst){};
PipelineModule(Instance &inst)
: world(inst), deferred(inst), forward(inst), shadow(inst), capture(inst){};
void begin_sync()
{
deferred.begin_sync();
forward.sync();
shadow.sync();
capture.sync();
}
void end_sync()
@ -322,6 +347,8 @@ class PipelineModule {
return nullptr;
case MAT_PIPE_SHADOW:
return shadow.surface_material_add(gpumat);
case MAT_PIPE_CAPTURE:
return capture.surface_material_add(gpumat);
}
return nullptr;
}

View File

@ -10,6 +10,9 @@
#include "BLI_rand.h"
#include "BLI_math_base.hh"
#include "BLI_math_base_safe.h"
#include "eevee_instance.hh"
#include "eevee_sampling.hh"
@ -53,6 +56,15 @@ void Sampling::init(const Scene *scene)
sample_count_ *= motion_blur_steps_;
}
void Sampling::init(const Object &probe_object)
{
BLI_assert(inst_.is_baking());
const ::LightProbe *lightprobe = static_cast<::LightProbe *>(probe_object.data);
sample_count_ = max_ii(1, lightprobe->grid_bake_samples);
sample_ = 0;
}
void Sampling::end_sync()
{
if (reset_) {
@ -174,6 +186,22 @@ float2 Sampling::sample_disk(const float2 &rand)
return sqrtf(rand.x) * float2(cosf(omega), sinf(omega));
}
float3 Sampling::sample_hemisphere(const float2 &rand)
{
const float omega = rand.y * 2.0f * M_PI;
const float cos_theta = rand.x;
const float sin_theta = safe_sqrtf(1.0f - square_f(cos_theta));
return float3(sin_theta * float2(cosf(omega), sinf(omega)), cos_theta);
}
float3 Sampling::sample_sphere(const float2 &rand)
{
const float omega = rand.y * 2.0f * M_PI;
const float cos_theta = rand.x * 2.0f - 1.0f;
const float sin_theta = safe_sqrtf(1.0f - square_f(cos_theta));
return float3(sin_theta * float2(cosf(omega), sinf(omega)), cos_theta);
}
float2 Sampling::sample_spiral(const float2 &rand)
{
/* Fibonacci spiral. */

View File

@ -66,6 +66,7 @@ class Sampling {
~Sampling(){};
void init(const Scene *scene);
void init(const Object &probe_object);
void end_sync();
void step();
@ -123,11 +124,18 @@ class Sampling {
return interactive_mode_;
}
/* Target sample count. */
uint64_t sample_count() const
{
return sample_count_;
}
/* 0 based current sample. Might not increase sequentially in viewport. */
uint64_t sample_index() const
{
return sample_;
}
/* Return true if we are starting a new motion blur step. We need to run sync again since
* depsgraph was updated by MotionBlur::step(). */
bool do_render_sync() const
@ -152,6 +160,20 @@ class Sampling {
*/
static float2 sample_disk(const float2 &rand);
/**
* Uniform hemisphere distribution.
* \a rand is 2 random float in the [0..1] range.
* Returns point on a Z positive hemisphere of radius 1 and centered on the origin.
*/
static float3 sample_hemisphere(const float2 &rand);
/**
* Uniform sphere distribution.
* \a rand is 2 random float in the [0..1] range.
* Returns point on the sphere of radius 1 and centered on the origin.
*/
static float3 sample_sphere(const float2 &rand);
/**
* Uniform disc distribution using Fibonacci spiral sampling.
* \a rand is 2 random float in the [0..1] range.

View File

@ -102,6 +102,8 @@ const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_
return "eevee_motion_blur_tiles_flatten_viewport";
case DEBUG_SURFELS:
return "eevee_debug_surfels";
case DISPLAY_PROBE_GRID:
return "eevee_display_probe_grid";
case DOF_BOKEH_LUT:
return "eevee_depth_of_field_bokeh_lut";
case DOF_DOWNSAMPLE:
@ -146,6 +148,12 @@ const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_
return "eevee_light_culling_tile";
case LIGHT_CULLING_ZBIN:
return "eevee_light_culling_zbin";
case LIGHTPROBE_IRRADIANCE_BOUNDS:
return "eevee_lightprobe_irradiance_bounds";
case LIGHTPROBE_IRRADIANCE_RAY:
return "eevee_lightprobe_irradiance_ray";
case LIGHTPROBE_IRRADIANCE_LOAD:
return "eevee_lightprobe_irradiance_load";
case SHADOW_CLIPMAP_CLEAR:
return "eevee_shadow_clipmap_clear";
case SHADOW_DEBUG:
@ -170,10 +178,20 @@ const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_
return "eevee_shadow_tag_update";
case SHADOW_TILEMAP_TAG_USAGE_OPAQUE:
return "eevee_shadow_tag_usage_opaque";
case SHADOW_TILEMAP_TAG_USAGE_SURFELS:
return "eevee_shadow_tag_usage_surfels";
case SHADOW_TILEMAP_TAG_USAGE_TRANSPARENT:
return "eevee_shadow_tag_usage_transparent";
case SUBSURFACE_EVAL:
return "eevee_subsurface_eval";
case SURFEL_LIGHT:
return "eevee_surfel_light";
case SURFEL_LIST_BUILD:
return "eevee_surfel_list_build";
case SURFEL_LIST_SORT:
return "eevee_surfel_list_sort";
case SURFEL_RAY:
return "eevee_surfel_ray";
/* To avoid compiler warning about missing case. */
case MAX_SHADER_TYPE:
return "";
@ -411,7 +429,6 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
info.additional_info("eevee_geom_curves");
break;
case MAT_GEOM_MESH:
default:
info.additional_info("eevee_geom_mesh");
break;
}
@ -436,6 +453,9 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
case MAT_PIPE_SHADOW:
info.additional_info("eevee_surf_shadow");
break;
case MAT_PIPE_CAPTURE:
info.additional_info("eevee_surf_capture");
break;
case MAT_PIPE_DEFERRED:
info.additional_info("eevee_surf_deferred");
break;
@ -443,7 +463,7 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
info.additional_info("eevee_surf_forward");
break;
default:
BLI_assert(0);
BLI_assert_unreachable();
break;
}
break;

View File

@ -34,6 +34,8 @@ enum eShaderType {
DEBUG_SURFELS,
DISPLAY_PROBE_GRID,
DOF_BOKEH_LUT,
DOF_DOWNSAMPLE,
DOF_FILTER,
@ -61,6 +63,10 @@ enum eShaderType {
LIGHT_CULLING_TILE,
LIGHT_CULLING_ZBIN,
LIGHTPROBE_IRRADIANCE_BOUNDS,
LIGHTPROBE_IRRADIANCE_RAY,
LIGHTPROBE_IRRADIANCE_LOAD,
MOTION_BLUR_GATHER,
MOTION_BLUR_TILE_DILATE,
MOTION_BLUR_TILE_FLATTEN_RENDER,
@ -78,10 +84,16 @@ enum eShaderType {
SHADOW_TILEMAP_INIT,
SHADOW_TILEMAP_TAG_UPDATE,
SHADOW_TILEMAP_TAG_USAGE_OPAQUE,
SHADOW_TILEMAP_TAG_USAGE_SURFELS,
SHADOW_TILEMAP_TAG_USAGE_TRANSPARENT,
SUBSURFACE_EVAL,
SURFEL_LIGHT,
SURFEL_LIST_BUILD,
SURFEL_LIST_SORT,
SURFEL_RAY,
MAX_SHADER_TYPE,
};

View File

@ -53,7 +53,8 @@ enum eDebugMode : uint32_t {
/**
* Display IrradianceCache surfels.
*/
DEBUG_IRRADIANCE_CACHE_SURFELS = 3u,
DEBUG_IRRADIANCE_CACHE_SURFELS_NORMAL = 3u,
DEBUG_IRRADIANCE_CACHE_SURFELS_IRRADIANCE = 4u,
/**
* Show tiles depending on their status.
*/
@ -838,17 +839,109 @@ static inline ShadowTileDataPacked shadow_tile_pack(ShadowTileData tile)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Debug
/** \name Irradiance Cache
* \{ */
struct DebugSurfel {
packed_float3 position;
int _pad0;
packed_float3 normal;
int _pad1;
float4 color;
struct SurfelRadiance {
float4 front;
float4 back;
};
BLI_STATIC_ASSERT_ALIGN(DebugSurfel, 16)
BLI_STATIC_ASSERT_ALIGN(SurfelRadiance, 16)
struct Surfel {
/** World position of the surfel. */
packed_float3 position;
/** Previous surfel index in the ray link-list. Only valid after sorting. */
int prev;
/** World orientation of the surface. */
packed_float3 normal;
/** Next surfel index in the ray link-list. */
int next;
/** Surface albedo to apply to incoming radiance. */
packed_float3 albedo_front;
/** Distance along the ray direction for sorting. */
float ray_distance;
/** Surface albedo to apply to incoming radiance. */
packed_float3 albedo_back;
int _pad3;
/** Surface radiance: Emission + Direct Lighting. */
SurfelRadiance radiance_direct;
/** Surface radiance: Indirect Lighting. Double buffered to avoid race conditions. */
SurfelRadiance radiance_indirect[2];
};
BLI_STATIC_ASSERT_ALIGN(Surfel, 16)
struct CaptureInfoData {
/** Number of surfels inside the surfel buffer or the needed len. */
packed_int3 irradiance_grid_size;
/** True if the surface shader needs to write the surfel data. */
bool1 do_surfel_output;
/** True if the surface shader needs to increment the surfel_len. */
bool1 do_surfel_count;
/** Number of surfels inside the surfel buffer or the needed len. */
uint surfel_len;
/** Total number of a ray for light transportation. */
float sample_count;
/** 0 based sample index. */
float sample_index;
/** Transform of the lightprobe object. */
float4x4 irradiance_grid_local_to_world;
/** Transform vectors from world space to local space. Does not have location component. */
/** TODO(fclem): This could be a float3x4 or a float3x3 if padded correctly. */
float4x4 irradiance_grid_world_to_local_rotation;
/** Scene bounds. Stored as min & max and as int for atomic operations. */
int scene_bound_x_min;
int scene_bound_y_min;
int scene_bound_z_min;
int scene_bound_x_max;
int scene_bound_y_max;
int scene_bound_z_max;
int _pad0;
int _pad1;
int _pad2;
};
BLI_STATIC_ASSERT_ALIGN(CaptureInfoData, 16)
struct SurfelListInfoData {
/** Size of the grid used to project the surfels into linked lists. */
int2 ray_grid_size;
/** Maximum number of list. Is equal to `ray_grid_size.x * ray_grid_size.y`. */
int list_max;
int _pad0;
};
BLI_STATIC_ASSERT_ALIGN(SurfelListInfoData, 16)
struct IrradianceGridData {
/** World to non-normalized local grid space [0..size-1]. Stored transposed for compactness. */
float3x4 world_to_grid_transposed;
/** Number of bricks for this grid. */
packed_int3 grid_size;
/** Index in brick descriptor list of the first brick of this grid. */
int brick_offset;
};
BLI_STATIC_ASSERT_ALIGN(IrradianceGridData, 16)
struct IrradianceBrick {
/* Offset in pixel to the start of the data inside the atlas texture. */
uint2 atlas_coord;
};
/** \note Stored packed as a uint. */
#define IrradianceBrickPacked uint
static inline IrradianceBrickPacked irradiance_brick_pack(IrradianceBrick brick)
{
uint2 data = (uint2(brick.atlas_coord) & 0xFFFFu) << uint2(0u, 16u);
IrradianceBrickPacked brick_packed = data.x | data.y;
return brick_packed;
}
static inline IrradianceBrick irradiance_brick_unpack(IrradianceBrickPacked brick_packed)
{
IrradianceBrick brick;
brick.atlas_coord = (uint2(brick_packed) >> uint2(0u, 16u)) & uint2(0xFFFFu);
return brick;
}
/** \} */
@ -959,8 +1052,9 @@ using DepthOfFieldDataBuf = draw::UniformBuffer<DepthOfFieldData>;
using DepthOfFieldScatterListBuf = draw::StorageArrayBuffer<ScatterRect, 16, true>;
using DrawIndirectBuf = draw::StorageBuffer<DrawCommand, true>;
using FilmDataBuf = draw::UniformBuffer<FilmData>;
using DebugSurfelBuf = draw::StorageArrayBuffer<DebugSurfel, 64>;
using HiZDataBuf = draw::UniformBuffer<HiZData>;
using IrradianceGridDataBuf = draw::UniformArrayBuffer<IrradianceGridData, IRRADIANCE_GRID_MAX>;
using IrradianceBrickBuf = draw::StorageVectorBuffer<IrradianceBrickPacked, 16>;
using LightCullingDataBuf = draw::StorageBuffer<LightCullingData>;
using LightCullingKeyBuf = draw::StorageArrayBuffer<uint, LIGHT_CHUNK, true>;
using LightCullingTileBuf = draw::StorageArrayBuffer<uint, LIGHT_CHUNK, true>;
@ -978,6 +1072,10 @@ using ShadowTileMapDataBuf = draw::StorageVectorBuffer<ShadowTileMapData, SHADOW
using ShadowTileMapClipBuf = draw::StorageArrayBuffer<ShadowTileMapClip, SHADOW_MAX_TILEMAP, true>;
using ShadowTileDataBuf = draw::StorageArrayBuffer<ShadowTileDataPacked, SHADOW_MAX_TILE, true>;
using SubsurfaceDataBuf = draw::UniformBuffer<SubsurfaceData>;
using SurfelBuf = draw::StorageArrayBuffer<Surfel, 64>;
using SurfelRadianceBuf = draw::StorageArrayBuffer<SurfelRadiance, 64>;
using CaptureInfoBuf = draw::StorageBuffer<CaptureInfoData>;
using SurfelListInfoBuf = draw::StorageBuffer<SurfelListInfoData>;
using VelocityGeometryBuf = draw::StorageArrayBuffer<float4, 16, true>;
using VelocityIndexBuf = draw::StorageArrayBuffer<VelocityIndex, 16>;
using VelocityObjectBuf = draw::StorageArrayBuffer<float4x4, 16>;

View File

@ -708,6 +708,32 @@ void ShadowModule::begin_sync()
PassMain &pass = tilemap_usage_ps_;
pass.init();
if (inst_.is_baking()) {
SurfelBuf &surfels_buf = inst_.irradiance_cache.bake.surfels_buf_;
CaptureInfoBuf &capture_info_buf = inst_.irradiance_cache.bake.capture_info_buf_;
float surfel_coverage_area = inst_.irradiance_cache.bake.surfel_density_;
/* Directional shadows. */
float texel_size = ShadowDirectional::tile_size_get(0) / float(SHADOW_PAGE_RES);
int directional_level = std::max(0, int(std::ceil(log2(surfel_coverage_area / texel_size))));
/* Punctual shadows. */
float projection_ratio = tilemap_pixel_radius() / (surfel_coverage_area / 2.0);
PassMain::Sub &sub = pass.sub("Surfels");
sub.shader_set(inst_.shaders.static_shader_get(SHADOW_TILEMAP_TAG_USAGE_SURFELS));
sub.bind_ssbo("tilemaps_buf", &tilemap_pool.tilemaps_data);
sub.bind_ssbo("tiles_buf", &tilemap_pool.tiles_data);
sub.bind_ssbo("surfel_buf", &surfels_buf);
sub.bind_ssbo("capture_info_buf", &capture_info_buf);
sub.push_constant("directional_level", directional_level);
sub.push_constant("tilemap_projection_ratio", projection_ratio);
inst_.lights.bind_resources(&sub);
sub.dispatch(&inst_.irradiance_cache.bake.dispatch_per_surfel_);
/* Skip opaque and transparent tagging for light baking. */
return;
}
{
/** Use depth buffer to tag needed shadow pages for opaque geometry. */
PassMain::Sub &sub = pass.sub("Opaque");
@ -768,7 +794,7 @@ void ShadowModule::sync_object(const ObjectHandle &handle,
curr_casters_.append(resource_handle.raw);
}
if (is_alpha_blend) {
if (is_alpha_blend && !inst_.is_baking()) {
tilemap_usage_transparent_ps_->draw(box_batch_, resource_handle);
}
}

View File

@ -131,6 +131,7 @@ void SyncModule::sync_mesh(Object *ob,
geometry_call(material.shading.sub_pass, geom, res_handle);
geometry_call(material.prepass.sub_pass, geom, res_handle);
geometry_call(material.shadow.sub_pass, geom, res_handle);
geometry_call(material.capture.sub_pass, geom, res_handle);
is_shadow_caster = is_shadow_caster || material.shadow.sub_pass != nullptr;
is_alpha_blend = is_alpha_blend || material.is_alpha_blend_transparent;

View File

@ -134,8 +134,7 @@ void ShadingView::render()
inst_.lights.debug_draw(render_view_new_, combined_fb_);
inst_.hiz_buffer.debug_draw(render_view_new_, combined_fb_);
inst_.shadows.debug_draw(render_view_new_, combined_fb_);
inst_.irradiance_cache.debug_draw(render_view_new_, combined_fb_);
inst_.irradiance_cache.viewport_draw(render_view_new_, combined_fb_);
GPUTexture *combined_final_tx = render_postfx(rbufs.combined_tx);

View File

@ -1,21 +1,26 @@
void main()
{
DebugSurfel surfel = surfels_buf[surfel_index];
out_color = surfel.color;
Surfel surfel = surfels_buf[surfel_index];
vec3 radiance = vec3(0.0);
radiance += gl_FrontFacing ? surfel.radiance_direct.front.rgb : surfel.radiance_direct.back.rgb;
radiance += gl_FrontFacing ? surfel.radiance_indirect[1].front.rgb :
surfel.radiance_indirect[1].back.rgb;
switch (eDebugMode(debug_mode)) {
default:
case DEBUG_IRRADIANCE_CACHE_SURFELS_NORMAL:
out_color = vec4(pow(surfel.normal * 0.5 + 0.5, vec3(2.2)), 0.0);
break;
case DEBUG_IRRADIANCE_CACHE_SURFELS_IRRADIANCE:
out_color = vec4(radiance, 0.0);
break;
}
/* Display surfels as circles. */
if (distance(P, surfel.position) > surfel_radius) {
discard;
return;
}
/* Display backfacing surfels with a transparent checkerboard grid. */
if (!gl_FrontFacing) {
ivec2 grid_uv = ivec2(gl_FragCoord.xy) / 5;
if ((grid_uv.x + grid_uv.y) % 2 == 0) {
discard;
return;
}
}
}

View File

@ -1,10 +1,21 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_geom_lib.glsl)
#pragma BLENDER_REQUIRE(common_debug_draw_lib.glsl)
void main()
{
surfel_index = gl_InstanceID;
DebugSurfel surfel = surfels_buf[surfel_index];
Surfel surfel = surfels_buf[surfel_index];
#if 0 /* Debug surfel lists. TODO allow in release build with a dedicated shader. */
if (gl_VertexID == 0 && surfel.next > -1) {
Surfel surfel_next = surfels_buf[surfel.next];
vec4 line_color = (surfel.prev == -1) ? vec4(1.0, 1.0, 0.0, 1.0) :
(surfel_next.next == -1) ? vec4(0.0, 1.0, 1.0, 1.0) :
vec4(0.0, 1.0, 0.0, 1.0);
drw_debug_line(surfel_next.position, surfel.position, line_color);
}
#endif
vec3 lP;
@ -35,4 +46,5 @@ void main()
P = (model_matrix * vec4(lP, 1)).xyz;
gl_Position = point_world_to_ndc(P);
gl_Position.z -= 2.5e-5;
}

View File

@ -9,6 +9,7 @@
#pragma BLENDER_REQUIRE(eevee_gbuffer_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_light_eval_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_lightprobe_eval_lib.glsl)
void main()
{
@ -53,6 +54,8 @@ void main()
vec3 reflection_light = vec3(0.0);
float shadow = 1.0;
lightprobe_eval(diffuse_data, reflection_data, P, Ng, V, diffuse_light, reflection_light);
light_eval(diffuse_data,
reflection_data,
P,

View File

@ -0,0 +1,28 @@
#pragma BLENDER_REQUIRE(eevee_spherical_harmonics_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
void main()
{
float dist_sqr = dot(lP, lP);
/* Discard outside the circle. */
if (dist_sqr > 1.0) {
discard;
return;
}
SphericalHarmonicL1 sh;
sh.L0.M0 = texelFetch(irradiance_a_tx, cell, 0);
sh.L1.Mn1 = texelFetch(irradiance_b_tx, cell, 0);
sh.L1.M0 = texelFetch(irradiance_c_tx, cell, 0);
sh.L1.Mp1 = texelFetch(irradiance_d_tx, cell, 0);
vec3 vN = vec3(lP, sqrt(max(0.0, 1.0 - dist_sqr)));
vec3 N = normal_view_to_world(vN);
vec3 lN = transform_direction(world_to_grid, N);
vec3 irradiance = spherical_harmonics_evaluate_lambert_non_linear(lN, sh);
out_color = vec4(irradiance, 0.0);
}

View File

@ -0,0 +1,34 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_lightprobe_lib.glsl)
void main()
{
/* Constant array moved inside function scope.
* Minimises local register allocation in MSL. */
const vec2 pos[6] = vec2[6](vec2(-1.0, -1.0),
vec2(1.0, -1.0),
vec2(-1.0, 1.0),
vec2(1.0, -1.0),
vec2(1.0, 1.0),
vec2(-1.0, 1.0));
lP = pos[gl_VertexID % 6];
int cell_index = gl_VertexID / 6;
ivec3 grid_res = grid_resolution;
cell = ivec3(cell_index / (grid_res.z * grid_res.y),
(cell_index / grid_res.z) % grid_res.y,
cell_index % grid_res.z);
vec3 ws_cell_pos = lightprobe_irradiance_grid_sample_position(grid_to_world, grid_res, cell);
vec3 vs_offset = vec3(lP, 0.0) * sphere_radius;
vec3 vP = (ViewMatrix * vec4(ws_cell_pos, 1.0)).xyz + vs_offset;
gl_Position = ProjectionMatrix * vec4(vP, 1.0);
/* Small bias to let the icon draw without zfighting. */
gl_Position.z += 0.0001;
}

View File

@ -22,7 +22,7 @@ void light_eval_ex(ClosureDiffuse diffuse,
vec3 P,
vec3 Ng,
vec3 V,
float vP_z,
float vP_z, /* TODO(fclem): Remove, is unused. */
float thickness,
vec4 ltc_mat,
uint l_idx,
@ -115,7 +115,11 @@ void light_eval(ClosureDiffuse diffuse,
}
LIGHT_FOREACH_END
#ifdef GPU_FRAGMENT_SHADER
vec2 px = gl_FragCoord.xy;
#else
vec2 px = vec2(0.0);
#endif
LIGHT_FOREACH_BEGIN_LOCAL (light_cull_buf, light_zbin_buf, light_tile_buf, px, vP_z, l_idx) {
light_eval_ex(diffuse,
reflection,

View File

@ -0,0 +1,79 @@
/**
* The resources expected to be defined are:
* - grids_infos_buf
* - bricks_infos_buf
* - irradiance_atlas_tx
*/
#pragma BLENDER_REQUIRE(eevee_lightprobe_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_spherical_harmonics_lib.glsl)
/**
* Return sample coordinates of the first SH coef in unormalized texture space.
*/
vec3 lightprobe_irradiance_grid_atlas_coord(IrradianceGridData grid_data, vec3 lP)
{
ivec3 brick_coord = ivec3((lP - 0.5) / float(IRRADIANCE_GRID_BRICK_SIZE - 1));
/* Avoid sampling adjacent bricks. */
brick_coord = max(brick_coord, ivec3(0));
/* Avoid sampling adjacent bricks. */
lP = max(lP, vec3(0.5));
/* Local position inside the brick (still in grid sample spacing unit). */
vec3 brick_lP = lP - vec3(brick_coord) * float(IRRADIANCE_GRID_BRICK_SIZE - 1);
int brick_index = lightprobe_irradiance_grid_brick_index_get(grid_data, brick_coord);
IrradianceBrick brick = irradiance_brick_unpack(bricks_infos_buf[brick_index]);
vec3 output_coord = vec3(vec2(brick.atlas_coord), 0.0) + brick_lP;
return output_coord;
}
vec4 textureUnormalizedCoord(sampler3D tx, vec3 co)
{
return texture(tx, co / vec3(textureSize(tx, 0)));
}
SphericalHarmonicL1 lightprobe_irradiance_sample(sampler3D atlas_tx, vec3 P)
{
vec3 lP;
int grid_index;
for (grid_index = 0; grid_index < IRRADIANCE_GRID_MAX; grid_index++) {
/* Last grid is tagged as invalid to stop the iteration. */
if (grids_infos_buf[grid_index].grid_size.x == -1) {
/* Sample the last grid instead. */
grid_index -= 1;
break;
}
/* If sample fall inside the grid, step out of the loop. */
if (lightprobe_irradiance_grid_local_coord(grids_infos_buf[grid_index], P, lP)) {
break;
}
}
vec3 atlas_coord = lightprobe_irradiance_grid_atlas_coord(grids_infos_buf[grid_index], lP);
SphericalHarmonicL1 sh;
sh.L0.M0 = textureUnormalizedCoord(atlas_tx, atlas_coord);
atlas_coord.z += float(IRRADIANCE_GRID_BRICK_SIZE);
sh.L1.Mn1 = textureUnormalizedCoord(atlas_tx, atlas_coord);
atlas_coord.z += float(IRRADIANCE_GRID_BRICK_SIZE);
sh.L1.M0 = textureUnormalizedCoord(atlas_tx, atlas_coord);
atlas_coord.z += float(IRRADIANCE_GRID_BRICK_SIZE);
sh.L1.Mp1 = textureUnormalizedCoord(atlas_tx, atlas_coord);
return sh;
}
void lightprobe_eval(ClosureDiffuse diffuse,
ClosureReflection reflection,
vec3 P,
vec3 Ng,
vec3 V,
inout vec3 out_diffuse,
inout vec3 out_specular)
{
SphericalHarmonicL1 irradiance = lightprobe_irradiance_sample(irradiance_atlas_tx, P);
out_diffuse += spherical_harmonics_evaluate_lambert_non_linear(diffuse.N, irradiance);
}

View File

@ -0,0 +1,48 @@
/**
* Surface Capture: Output surface parameters to diverse storage.
*
* The resources expected to be defined are:
* - capture_info_buf
*/
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_intersect_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_shadow_tilemap_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_light_iter_lib.glsl)
void main()
{
uint index = gl_GlobalInvocationID.x;
if (index >= resource_len) {
return;
}
ObjectBounds bounds = bounds_buf[index];
/* Bounds are not correct as culling is disabled for these. */
if (bounds._inner_sphere_radius <= 0.0) {
return;
}
IsectBox box = isect_data_setup(bounds.bounding_corners[0].xyz,
bounds.bounding_corners[1].xyz,
bounds.bounding_corners[2].xyz,
bounds.bounding_corners[3].xyz);
vec3 local_min = vec3(FLT_MAX);
vec3 local_max = vec3(-FLT_MAX);
for (int i = 0; i < 8; i++) {
local_min = min(local_min, box.corners[i].xyz);
local_max = max(local_max, box.corners[i].xyz);
}
atomicMin(capture_info_buf.scene_bound_x_min, floatBitsToOrderedInt(local_min.x));
atomicMax(capture_info_buf.scene_bound_x_max, floatBitsToOrderedInt(local_max.x));
atomicMin(capture_info_buf.scene_bound_y_min, floatBitsToOrderedInt(local_min.y));
atomicMax(capture_info_buf.scene_bound_y_max, floatBitsToOrderedInt(local_max.y));
atomicMin(capture_info_buf.scene_bound_z_min, floatBitsToOrderedInt(local_min.z));
atomicMax(capture_info_buf.scene_bound_z_max, floatBitsToOrderedInt(local_max.z));
}

View File

@ -0,0 +1,51 @@
/**
* Load an input lightgrid cache texture into the atlas.
*
* Each thread group will load a brick worth of data and add the needed padding texels.
*/
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_base_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_vector_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_matrix_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_spherical_harmonics_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_lightprobe_lib.glsl)
void atlas_store(vec4 sh_coefficient, ivec2 atlas_coord, int layer)
{
imageStore(irradiance_atlas_img,
ivec3(atlas_coord, layer * IRRADIANCE_GRID_BRICK_SIZE) + ivec3(gl_LocalInvocationID),
sh_coefficient);
}
void main()
{
int brick_index = lightprobe_irradiance_grid_brick_index_get(grids_infos_buf[grid_index],
ivec3(gl_WorkGroupID));
/* Brick coordinate in the source grid. */
ivec3 brick_coord = ivec3(gl_WorkGroupID);
/* Add padding border to allow bilinear filtering. */
ivec3 texel_coord = brick_coord * (IRRADIANCE_GRID_BRICK_SIZE - 1) + ivec3(gl_LocalInvocationID);
ivec3 input_coord = min(texel_coord, textureSize(irradiance_a_tx, 0) - 1);
/* Brick coordinate in the destination atlas. */
IrradianceBrick brick = irradiance_brick_unpack(bricks_infos_buf[brick_index]);
ivec2 output_coord = ivec2(brick.atlas_coord);
SphericalHarmonicL1 sh;
sh.L0.M0 = texelFetch(irradiance_a_tx, input_coord, 0);
sh.L1.Mn1 = texelFetch(irradiance_b_tx, input_coord, 0);
sh.L1.M0 = texelFetch(irradiance_c_tx, input_coord, 0);
sh.L1.Mp1 = texelFetch(irradiance_d_tx, input_coord, 0);
/* Rotate Spherical Harmonic into world space. */
mat3 world_to_grid_transposed = mat3(grids_infos_buf[grid_index].world_to_grid_transposed);
mat3 rotation = normalize(world_to_grid_transposed);
spherical_harmonics_L1_rotate(rotation, sh.L1);
atlas_store(sh.L0.M0, output_coord, 0);
atlas_store(sh.L1.Mn1, output_coord, 1);
atlas_store(sh.L1.M0, output_coord, 2);
atlas_store(sh.L1.Mp1, output_coord, 3);
}

View File

@ -0,0 +1,115 @@
/**
* For every irradiance probe sample, compute the incomming radiance from both side.
* This is the same as the surfel ray but we do not actually transport the light, we only capture
* the irradiance as spherical harmonic coefficients.
*
* Dispatched as 1 thread per irradiance probe sample.
*/
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_base_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_spherical_harmonics_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surfel_list_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_lightprobe_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
void irradiance_capture(vec3 L, vec3 irradiance, inout SphericalHarmonicL1 sh)
{
vec3 lL = transform_direction(capture_info_buf.irradiance_grid_world_to_local_rotation, L);
/* Spherical harmonics need to be weighted by sphere area. */
irradiance *= 4.0 * M_PI;
spherical_harmonics_encode_signal_sample(lL, vec4(irradiance, 1.0), sh);
}
void irradiance_capture(Surfel surfel, vec3 P, inout SphericalHarmonicL1 sh)
{
vec3 L = safe_normalize(surfel.position - P);
bool facing = dot(-L, surfel.normal) > 0.0;
SurfelRadiance surfel_radiance_indirect = surfel.radiance_indirect[radiance_src];
vec3 irradiance = vec3(0.0);
irradiance += facing ? surfel.radiance_direct.front.rgb : surfel.radiance_direct.back.rgb;
/* NOTE: The indirect radiance is already normalized and this is wanted, because we are not
* integrating the same signal and we would have the SH lagging behind the surfel integration
* otherwise. */
irradiance += facing ? surfel_radiance_indirect.front.rgb : surfel_radiance_indirect.back.rgb;
irradiance_capture(L, irradiance, sh);
}
void main()
{
ivec3 grid_coord = ivec3(gl_GlobalInvocationID);
if (any(greaterThanEqual(grid_coord, capture_info_buf.irradiance_grid_size))) {
return;
}
vec3 P = lightprobe_irradiance_grid_sample_position(
capture_info_buf.irradiance_grid_local_to_world,
capture_info_buf.irradiance_grid_size,
grid_coord);
/* Project to get ray linked list. */
float irradiance_sample_ray_distance;
int list_index = surfel_list_index_get(P, irradiance_sample_ray_distance);
/* Walk the ray to get which surfels the irradiance sample is between. */
int surfel_prev = -1;
int surfel_next = list_start_buf[list_index];
for (; surfel_next > -1; surfel_next = surfel_buf[surfel_next].next) {
/* Reminder: List is sorted with highest value first. */
if (surfel_buf[surfel_next].ray_distance < irradiance_sample_ray_distance) {
break;
}
surfel_prev = surfel_next;
}
vec3 sky_L = cameraVec(P);
SphericalHarmonicL1 sh;
sh.L0.M0 = imageLoad(irradiance_L0_img, grid_coord);
sh.L1.Mn1 = imageLoad(irradiance_L1_a_img, grid_coord);
sh.L1.M0 = imageLoad(irradiance_L1_b_img, grid_coord);
sh.L1.Mp1 = imageLoad(irradiance_L1_c_img, grid_coord);
/* Un-normalize for accumulation. */
float weight_captured = capture_info_buf.sample_index * 2.0;
sh.L0.M0 *= weight_captured;
sh.L1.Mn1 *= weight_captured;
sh.L1.M0 *= weight_captured;
sh.L1.Mp1 *= weight_captured;
if (surfel_next > -1) {
Surfel surfel = surfel_buf[surfel_next];
irradiance_capture(surfel, P, sh);
}
else {
/* TODO(fclem): Sky radiance. */
irradiance_capture(sky_L, vec3(0.0), sh);
}
if (surfel_prev > -1) {
Surfel surfel = surfel_buf[surfel_prev];
irradiance_capture(surfel, P, sh);
}
else {
/* TODO(fclem): Sky radiance. */
irradiance_capture(-sky_L, vec3(0.0), sh);
}
/* Normalize for storage. We accumulated 2 samples. */
weight_captured += 2.0;
sh.L0.M0 /= weight_captured;
sh.L1.Mn1 /= weight_captured;
sh.L1.M0 /= weight_captured;
sh.L1.Mp1 /= weight_captured;
imageStore(irradiance_L0_img, grid_coord, sh.L0.M0);
imageStore(irradiance_L1_a_img, grid_coord, sh.L1.Mn1);
imageStore(irradiance_L1_b_img, grid_coord, sh.L1.M0);
imageStore(irradiance_L1_c_img, grid_coord, sh.L1.Mp1);
}

View File

@ -0,0 +1,37 @@
#pragma BLENDER_REQUIRE(gpu_shader_math_vector_lib.glsl)
vec3 lightprobe_irradiance_grid_sample_position(mat4 grid_local_to_world,
ivec3 grid_res,
ivec3 cell_coord)
{
vec3 ls_cell_pos = (vec3(cell_coord) + vec3(0.5)) / vec3(grid_res);
ls_cell_pos = ls_cell_pos * 2.0 - 1.0;
vec3 ws_cell_pos = (grid_local_to_world * vec4(ls_cell_pos, 1.0)).xyz;
return ws_cell_pos;
}
/**
* Return true if sample position is valid.
* \a r_lP is the local position in grid units [0..grid_size).
*/
bool lightprobe_irradiance_grid_local_coord(IrradianceGridData grid_data, vec3 P, out vec3 r_lP)
{
/* Position in cell units. */
/* NOTE: The vector-matrix multiplication swapped on purpose to cancel the matrix transpose. */
vec3 lP = (vec4(P, 1.0) * grid_data.world_to_grid_transposed).xyz;
r_lP = clamp(lP, vec3(0.0), vec3(grid_data.grid_size) - 1e-5);
/* Sample is valid if position wasn't clamped. */
return all(equal(lP, r_lP));
}
int lightprobe_irradiance_grid_brick_index_get(IrradianceGridData grid_data, ivec3 brick_coord)
{
int3 grid_size_in_bricks = divide_ceil(grid_data.grid_size,
int3(IRRADIANCE_GRID_BRICK_SIZE - 1));
int brick_index = grid_data.brick_offset;
brick_index += brick_coord.x;
brick_index += brick_coord.y * grid_size_in_bricks.x;
brick_index += brick_coord.z * grid_size_in_bricks.x * grid_size_in_bricks.y;
return brick_index;
}

View File

@ -0,0 +1,71 @@
/* Directive for resetting the line numbering so the failing tests lines can be printed.
* This conflict with the shader compiler error logging scheme.
* Comment out for correct compilation error line. */
#line 5
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_matrix_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_vector_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_shadow_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_test_lib.glsl)
#define TEST(a, b) if (true)
void main()
{
TEST(eevee_lightprobe, IrradianceBrickIndex)
{
float near = 0.5, far = 1.0;
mat4 pers_mat = projection_perspective(-near, near, -near, near, near, far);
mat4 normal_mat = invert(transpose(pers_mat));
LightData light;
light.clip_near = floatBitsToInt(near);
light.clip_far = floatBitsToInt(far);
light.influence_radius_max = far;
light.type = LIGHT_SPOT;
light.normal_mat_packed.x = normal_mat[3][2];
light.normal_mat_packed.y = normal_mat[3][3];
vec2 atlas_size = vec2(SHADOW_TILEMAP_RES);
{
/* Simulate a "2D" plane crossing the frustum diagonaly. */
vec3 lP0 = vec3(-1.0, 0.0, -1.0);
vec3 lP1 = vec3(0.5, 0.0, -0.5);
vec3 lTg = normalize(lP1 - lP0);
vec3 lNg = vec3(-lTg.z, 0.0, lTg.x);
float expect = 1.0 / (SHADOW_TILEMAP_RES * SHADOW_PAGE_RES);
EXPECT_NEAR(shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 0), expect, 1e-4);
EXPECT_NEAR(
shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 1), expect * 2.0, 1e-4);
EXPECT_NEAR(
shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 2), expect * 4.0, 1e-4);
}
{
/* Simulate a "2D" plane crossing the near plane at the center diagonaly. */
vec3 lP0 = vec3(-1.0, 0.0, -1.0);
vec3 lP1 = vec3(0.0, 0.0, -0.5);
vec3 lTg = normalize(lP1 - lP0);
vec3 lNg = vec3(-lTg.z, 0.0, lTg.x);
float expect = 2.0 / (SHADOW_TILEMAP_RES * SHADOW_PAGE_RES);
EXPECT_NEAR(shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 0), expect, 1e-4);
EXPECT_NEAR(
shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 1), expect * 2.0, 1e-4);
EXPECT_NEAR(
shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 2), expect * 4.0, 1e-4);
}
{
/* Simulate a "2D" plane parallel to near clip plane. */
vec3 lP0 = vec3(-1.0, 0.0, -0.75);
vec3 lP1 = vec3(0.0, 0.0, -0.75);
vec3 lTg = normalize(lP1 - lP0);
vec3 lNg = vec3(-lTg.z, 0.0, lTg.x);
EXPECT_NEAR(shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 0), 0.0, 1e-4);
EXPECT_NEAR(shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 1), 0.0, 1e-4);
EXPECT_NEAR(shadow_slope_bias_get(atlas_size, light, lNg, lP0, vec2(0.0), 2), 0.0, 1e-4);
}
}
}

View File

@ -24,6 +24,22 @@ void shadow_tag_usage_tile(LightData light, ivec2 tile_co, int lod, int tilemap_
atomicOr(tiles_buf[tile_index], uint(SHADOW_IS_USED));
}
void shadow_tag_usage_tilemap_directional_at_level(uint l_idx, vec3 P, int level)
{
LightData light = light_buf[l_idx];
if (light.tilemap_index == LIGHT_NO_SHADOW) {
return;
}
vec3 lP = shadow_world_to_local(light, P);
level = clamp(level, light.clipmap_lod_min, light.clipmap_lod_max);
ShadowCoordinates coord = shadow_directional_coordinates_at_level(light, lP, level);
shadow_tag_usage_tile(light, coord.tile_coord, 0, coord.tilemap_index);
}
void shadow_tag_usage_tilemap_directional(uint l_idx, vec3 P, vec3 V, float radius)
{
LightData light = light_buf[l_idx];
@ -34,7 +50,7 @@ void shadow_tag_usage_tilemap_directional(uint l_idx, vec3 P, vec3 V, float radi
vec3 lP = shadow_world_to_local(light, P);
if (radius == 0) {
if (radius == 0.0) {
ShadowCoordinates coord = shadow_directional_coordinates(light, lP);
shadow_tag_usage_tile(light, coord.tile_coord, 0, coord.tilemap_index);
}
@ -46,9 +62,9 @@ void shadow_tag_usage_tilemap_directional(uint l_idx, vec3 P, vec3 V, float radi
for (int level = min_level; level <= max_level; level++) {
ShadowCoordinates coord_min = shadow_directional_coordinates_at_level(
light, lP - vec3(radius, radius, 0), level);
light, lP - vec3(radius, radius, 0.0), level);
ShadowCoordinates coord_max = shadow_directional_coordinates_at_level(
light, lP + vec3(radius, radius, 0), level);
light, lP + vec3(radius, radius, 0.0), level);
for (int x = coord_min.tile_coord.x; x <= coord_max.tile_coord.x; x++) {
for (int y = coord_min.tile_coord.y; y <= coord_max.tile_coord.y; y++) {
@ -59,7 +75,7 @@ void shadow_tag_usage_tilemap_directional(uint l_idx, vec3 P, vec3 V, float radi
}
}
void shadow_tag_usage_tilemap_punctual(uint l_idx, vec3 P, vec3 V, float dist_to_cam, float radius)
void shadow_tag_usage_tilemap_punctual(uint l_idx, vec3 P, float dist_to_cam, float radius)
{
LightData light = light_buf[l_idx];
@ -159,7 +175,7 @@ void shadow_tag_usage(vec3 vP, vec3 P, vec3 V, float radius, float dist_to_cam,
LIGHT_FOREACH_END
LIGHT_FOREACH_BEGIN_LOCAL (light_cull_buf, light_zbin_buf, light_tile_buf, pixel, vP.z, l_idx) {
shadow_tag_usage_tilemap_punctual(l_idx, P, V, dist_to_cam, radius);
shadow_tag_usage_tilemap_punctual(l_idx, P, dist_to_cam, radius);
}
LIGHT_FOREACH_END
}
@ -170,3 +186,21 @@ void shadow_tag_usage(vec3 vP, vec3 P, vec2 pixel)
shadow_tag_usage(vP, P, vec3(0), 0, dist_to_cam, pixel);
}
void shadow_tag_usage_surfel(Surfel surfel, int directional_lvl)
{
vec3 P = surfel.position;
LIGHT_FOREACH_BEGIN_DIRECTIONAL (light_cull_buf, l_idx) {
shadow_tag_usage_tilemap_directional_at_level(l_idx, P, directional_lvl);
}
LIGHT_FOREACH_END
LIGHT_FOREACH_BEGIN_LOCAL_NO_CULL(light_cull_buf, l_idx)
{
/* Set distance to camera to 1 to avoid changing footprint_ratio. */
float dist_to_cam = 1.0;
shadow_tag_usage_tilemap_punctual(l_idx, P, dist_to_cam, 0);
}
LIGHT_FOREACH_END
}

View File

@ -0,0 +1,21 @@
/**
* Virtual shadowmapping: Usage tagging
*
* Shadow pages are only allocated if they are visible.
* This pass iterates the surfels buffer and tag all tiles that are needed for light shadowing as
* needed.
*/
#pragma BLENDER_REQUIRE(eevee_shadow_tag_usage_lib.glsl)
void main()
{
int index = int(gl_GlobalInvocationID.x);
if (index >= capture_info_buf.surfel_len) {
return;
}
Surfel surfel = surfel_buf[index];
shadow_tag_usage_surfel(surfel, directional_level);
}

View File

@ -1,4 +1,7 @@
#pragma BLENDER_REQUIRE(gpu_shader_math_base_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_vector_lib.glsl)
/* -------------------------------------------------------------------- */
/** \name Spherical Harmonics Functions
*
@ -64,21 +67,21 @@ float spherical_harmonics_L2_Mp2(vec3 v)
* \{ */
struct SphericalHarmonicBandL0 {
vec3 M0;
vec4 M0;
};
struct SphericalHarmonicBandL1 {
vec3 Mn1;
vec3 M0;
vec3 Mp1;
vec4 Mn1;
vec4 M0;
vec4 Mp1;
};
struct SphericalHarmonicBandL2 {
vec3 Mn2;
vec3 Mn1;
vec3 M0;
vec3 Mp1;
vec3 Mp2;
vec4 Mn2;
vec4 Mn1;
vec4 M0;
vec4 Mp1;
vec4 Mp2;
};
struct SphericalHarmonicL0 {
@ -102,17 +105,18 @@ struct SphericalHarmonicL2 {
/** \name Encode
*
* Decompose an input signal into spherical harmonic coefficients.
* Note that `amplitude` need to be scaled by solid angle.
* \{ */
void spherical_harmonics_L0_encode_signal_sample(vec3 direction,
vec3 amplitude,
vec4 amplitude,
inout SphericalHarmonicBandL0 r_L0)
{
r_L0.M0 += spherical_harmonics_L0_M0(direction) * amplitude;
}
void spherical_harmonics_L1_encode_signal_sample(vec3 direction,
vec3 amplitude,
vec4 amplitude,
inout SphericalHarmonicBandL1 r_L1)
{
r_L1.Mn1 += spherical_harmonics_L1_Mn1(direction) * amplitude;
@ -121,7 +125,7 @@ void spherical_harmonics_L1_encode_signal_sample(vec3 direction,
}
void spherical_harmonics_L2_encode_signal_sample(vec3 direction,
vec3 amplitude,
vec4 amplitude,
inout SphericalHarmonicBandL2 r_L2)
{
r_L2.Mn2 += spherical_harmonics_L2_Mn2(direction) * amplitude;
@ -132,14 +136,14 @@ void spherical_harmonics_L2_encode_signal_sample(vec3 direction,
}
void spherical_harmonics_encode_signal_sample(vec3 direction,
vec3 amplitude,
vec4 amplitude,
inout SphericalHarmonicL0 sh)
{
spherical_harmonics_L0_encode_signal_sample(direction, amplitude, sh.L0);
}
void spherical_harmonics_encode_signal_sample(vec3 direction,
vec3 amplitude,
vec4 amplitude,
inout SphericalHarmonicL1 sh)
{
spherical_harmonics_L0_encode_signal_sample(direction, amplitude, sh.L0);
@ -147,7 +151,7 @@ void spherical_harmonics_encode_signal_sample(vec3 direction,
}
void spherical_harmonics_encode_signal_sample(vec3 direction,
vec3 amplitude,
vec4 amplitude,
inout SphericalHarmonicL2 sh)
{
spherical_harmonics_L0_encode_signal_sample(direction, amplitude, sh.L0);
@ -163,19 +167,19 @@ void spherical_harmonics_encode_signal_sample(vec3 direction,
* Evaluate an encoded signal in a given unit vector direction.
* \{ */
vec3 spherical_harmonics_L0_evaluate(vec3 direction, SphericalHarmonicBandL0 L0)
vec4 spherical_harmonics_L0_evaluate(vec3 direction, SphericalHarmonicBandL0 L0)
{
return spherical_harmonics_L0_M0(direction) * L0.M0;
}
vec3 spherical_harmonics_L1_evaluate(vec3 direction, SphericalHarmonicBandL1 L1)
vec4 spherical_harmonics_L1_evaluate(vec3 direction, SphericalHarmonicBandL1 L1)
{
return spherical_harmonics_L1_Mn1(direction) * L1.Mn1 +
spherical_harmonics_L1_M0(direction) * L1.M0 +
spherical_harmonics_L1_Mp1(direction) * L1.Mp1;
}
vec3 spherical_harmonics_L2_evaluate(vec3 direction, SphericalHarmonicBandL2 L2)
vec4 spherical_harmonics_L2_evaluate(vec3 direction, SphericalHarmonicBandL2 L2)
{
return spherical_harmonics_L2_Mn2(direction) * L2.Mn2 +
spherical_harmonics_L2_Mn1(direction) * L2.Mn1 +
@ -186,6 +190,40 @@ vec3 spherical_harmonics_L2_evaluate(vec3 direction, SphericalHarmonicBandL2 L2)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Rotation
* \{ */
void spherical_harmonics_L0_rotate(mat3x3 rotation, inout SphericalHarmonicBandL0 L0)
{
/* L0 band being a constant function (i.e: there is no directionallity) there is nothing to
* rotate. This is a no-op. */
}
void spherical_harmonics_L1_rotate(mat3x3 rotation, inout SphericalHarmonicBandL1 L1)
{
/* Convert L1 coefficients to per channel column.
* Note the component shuffle to match blender coordinate system. */
mat4x3 per_channel = transpose(mat3x4(L1.Mp1, L1.Mn1, -L1.M0));
/* Rotate each channel. */
per_channel[0] = rotation * per_channel[0];
per_channel[1] = rotation * per_channel[1];
per_channel[2] = rotation * per_channel[2];
/* Convert back to L1 coefficients to per channel column.
* Note the component shuffle to match blender coordinate system. */
mat3x4 per_coef = transpose(per_channel);
L1.Mn1 = per_coef[1];
L1.M0 = -per_coef[2];
L1.Mp1 = per_coef[0];
}
void spherical_harmonics_L2_rotate(mat3x3 rotation, inout SphericalHarmonicBandL2 L2)
{
/* TODO */
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Evaluation
* \{ */
@ -198,18 +236,95 @@ vec3 spherical_harmonics_L2_evaluate(vec3 direction, SphericalHarmonicBandL2 L2)
*/
vec3 spherical_harmonics_evaluate_lambert(vec3 N, SphericalHarmonicL0 sh)
{
return spherical_harmonics_L0_evaluate(N, sh.L0);
vec3 radiance = spherical_harmonics_L0_evaluate(N, sh.L0).rgb;
return radiance;
}
vec3 spherical_harmonics_evaluate_lambert(vec3 N, SphericalHarmonicL1 sh)
{
return spherical_harmonics_L0_evaluate(N, sh.L0) +
spherical_harmonics_L1_evaluate(N, sh.L1) * (2.0 / 3.0);
vec3 radiance = spherical_harmonics_L0_evaluate(N, sh.L0).rgb +
spherical_harmonics_L1_evaluate(N, sh.L1).rgb * (2.0 / 3.0);
return radiance;
}
vec3 spherical_harmonics_evaluate_lambert(vec3 N, SphericalHarmonicL2 sh)
{
return spherical_harmonics_L0_evaluate(N, sh.L0) +
spherical_harmonics_L1_evaluate(N, sh.L1) * (2.0 / 3.0) +
spherical_harmonics_L2_evaluate(N, sh.L2) * (1.0 / 4.0);
vec3 radiance = spherical_harmonics_L0_evaluate(N, sh.L0).rgb +
spherical_harmonics_L1_evaluate(N, sh.L1).rgb * (2.0 / 3.0) +
spherical_harmonics_L2_evaluate(N, sh.L2).rgb * (1.0 / 4.0);
return radiance;
}
/**
* Use non-linear reconstruction method to avoid negative lobe artifacts.
* See this reference for more explanation:
* https://grahamhazel.com/blog/2017/12/22/converting-sh-radiance-to-irradiance/
*/
float spherical_harmonics_evaluate_non_linear(vec3 N, float R0, vec3 R1)
{
/* No idea why this is needed. */
R1 /= 2.0;
float R1_len;
vec3 R1_dir = safe_normalize_and_get_length(R1, R1_len);
float rcp_R0 = safe_rcp(R0);
float q = (1.0 + dot(R1_dir, N)) / 2.0;
float p = 1.0 + 2.0 * R1_len * rcp_R0;
float a = (1.0 - R1_len * rcp_R0) * safe_rcp(1.0 + R1_len * rcp_R0);
return R0 * (a + (1.0 - a) * (p + 1.0) * pow(q, p));
}
vec3 spherical_harmonics_evaluate_lambert_non_linear(vec3 N, SphericalHarmonicL1 sh)
{
/* Shuffling based on spherical_harmonics_L1_* functions. */
vec3 R1_r = vec3(-sh.L1.Mp1.r, -sh.L1.Mn1.r, sh.L1.M0.r);
vec3 R1_g = vec3(-sh.L1.Mp1.g, -sh.L1.Mn1.g, sh.L1.M0.g);
vec3 R1_b = vec3(-sh.L1.Mp1.b, -sh.L1.Mn1.b, sh.L1.M0.b);
vec3 radiance = vec3(spherical_harmonics_evaluate_non_linear(N, sh.L0.M0.r, R1_r),
spherical_harmonics_evaluate_non_linear(N, sh.L0.M0.g, R1_g),
spherical_harmonics_evaluate_non_linear(N, sh.L0.M0.b, R1_b));
/* Return lambertian radiance. So divide by PI. */
return radiance / M_PI;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Load/Store
*
* This section define the compression scheme of spherical harmonic data.
* \{ */
SphericalHarmonicL1 spherical_harmonics_unpack(vec4 L0_L1_a,
vec4 L0_L1_b,
vec4 L0_L1_c,
vec4 L0_L1_vis)
{
SphericalHarmonicL1 sh;
sh.L0.M0.xyz = L0_L1_a.xyz;
sh.L1.Mn1.xyz = L0_L1_b.xyz;
sh.L1.M0.xyz = L0_L1_c.xyz;
sh.L1.Mp1.xyz = vec3(L0_L1_a.w, L0_L1_b.w, L0_L1_c.w);
sh.L0.M0.w = L0_L1_vis.x;
sh.L1.Mn1.w = L0_L1_vis.y;
sh.L1.M0.w = L0_L1_vis.z;
sh.L1.Mp1.w = L0_L1_vis.w;
return sh;
}
void spherical_harmonics_pack(SphericalHarmonicL1 sh,
out vec4 L0_L1_a,
out vec4 L0_L1_b,
out vec4 L0_L1_c,
out vec4 L0_L1_vis)
{
L0_L1_a.xyz = sh.L0.M0.xyz;
L0_L1_b.xyz = sh.L1.Mn1.xyz;
L0_L1_c.xyz = sh.L1.M0.xyz;
L0_L1_a.w = sh.L1.Mp1.x;
L0_L1_b.w = sh.L1.Mp1.y;
L0_L1_c.w = sh.L1.Mp1.z;
L0_L1_vis = vec4(sh.L0.M0.w, sh.L1.Mn1.w, sh.L1.M0.w, sh.L1.Mp1.w);
}
/** \} */

View File

@ -0,0 +1,56 @@
/**
* Surface Capture: Output surface parameters to diverse storage.
*
* This is a separate shader to allow custom closure behavior and avoid putting more complexity
* into other surface shaders.
*/
#pragma BLENDER_REQUIRE(gpu_shader_math_vector_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_gbuffer_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_sampling_lib.glsl)
vec4 closure_to_rgba(Closure cl)
{
return vec4(0.0);
}
void main()
{
init_globals();
/* TODO(fclem): Remove random sampling for capture and accumulate color. */
g_closure_rand = 0.5;
nodetree_surface();
g_diffuse_data.color *= g_diffuse_data.weight;
g_reflection_data.color *= g_reflection_data.weight;
g_refraction_data.color *= g_refraction_data.weight;
vec3 albedo = g_diffuse_data.color + g_reflection_data.color;
/* ----- Surfel output ----- */
if (capture_info_buf.do_surfel_count) {
/* Generate a surfel only once. This check allow cases where no axis is dominant. */
bool is_surface_view_aligned = dominant_axis(g_data.Ng) == dominant_axis(cameraForward);
if (is_surface_view_aligned) {
uint surfel_id = atomicAdd(capture_info_buf.surfel_len, 1u);
if (capture_info_buf.do_surfel_output) {
surfel_buf[surfel_id].position = g_data.P;
surfel_buf[surfel_id].normal = gl_FrontFacing ? g_data.Ng : -g_data.Ng;
surfel_buf[surfel_id].albedo_front = albedo;
surfel_buf[surfel_id].radiance_direct.front.rgb = g_emission;
/* TODO(fclem): 2nd surface evaluation. */
surfel_buf[surfel_id].albedo_back = albedo;
surfel_buf[surfel_id].radiance_direct.back.rgb = g_emission;
}
}
}
}

View File

@ -0,0 +1,89 @@
/**
* Apply lights contribution to scene surfel representation.
*/
#pragma BLENDER_REQUIRE(eevee_light_eval_lib.glsl)
void light_eval_surfel(
ClosureDiffuse diffuse, vec3 P, vec3 Ng, float thickness, inout vec3 out_diffuse)
{
/* Dummy closure. Not used. */
ClosureReflection reflection;
reflection.N = vec3(1.0, 0.0, 0.0);
reflection.roughness = 0.0;
vec3 out_specular = vec3(0.0);
/* Dummy ltc mat parameters. Not used since we have no reflections. */
vec4 ltc_mat_dummy = utility_tx_sample(utility_tx, vec2(0.0), UTIL_LTC_MAT_LAYER);
vec3 V = Ng;
float vP_z = 0.0;
float out_shadow_unused;
LIGHT_FOREACH_BEGIN_DIRECTIONAL (light_cull_buf, l_idx) {
light_eval_ex(diffuse,
reflection,
true,
P,
Ng,
V,
vP_z,
thickness,
ltc_mat_dummy,
l_idx,
out_diffuse,
out_specular,
out_shadow_unused);
}
LIGHT_FOREACH_END
LIGHT_FOREACH_BEGIN_LOCAL_NO_CULL(light_cull_buf, l_idx)
{
light_eval_ex(diffuse,
reflection,
false,
P,
Ng,
V,
vP_z,
thickness,
ltc_mat_dummy,
l_idx,
out_diffuse,
out_specular,
out_shadow_unused);
}
LIGHT_FOREACH_END
}
void main()
{
int index = int(gl_GlobalInvocationID.x);
if (index >= capture_info_buf.surfel_len) {
return;
}
Surfel surfel = surfel_buf[index];
ClosureDiffuse diffuse_data;
diffuse_data.N = surfel.normal;
/* TODO: These could be saved inside the surfel. */
diffuse_data.sss_radius = vec3(0.0);
diffuse_data.sss_id = 0u;
float thickness = 0.0;
vec3 diffuse_light = vec3(0.0);
vec3 reflection_light = vec3(0.0);
light_eval_surfel(diffuse_data, surfel.position, surfel.normal, thickness, diffuse_light);
surfel_buf[index].radiance_direct.front.rgb += diffuse_light * surfel.albedo_front;
diffuse_data.N = -surfel.normal;
diffuse_light = vec3(0.0);
reflection_light = vec3(0.0);
light_eval_surfel(diffuse_data, surfel.position, -surfel.normal, thickness, diffuse_light);
surfel_buf[index].radiance_direct.back.rgb += diffuse_light * surfel.albedo_back;
}

View File

@ -0,0 +1,32 @@
/**
* Takes scene surfel representation and build list of surfels aligning in a given direction.
*
* The lists head are allocated to fit the surfel granularity.
*
* Due to alignment the link and list head are split into several int arrays to avoid too much
* memory waste.
*
* Dispatch 1 thread per surfel.
*/
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_base_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surfel_list_lib.glsl)
void main()
{
int surfel_index = int(gl_GlobalInvocationID.x);
if (surfel_index >= capture_info_buf.surfel_len) {
return;
}
float ray_distance;
int list_index = surfel_list_index_get(surfel_buf[surfel_index].position, ray_distance);
/* Do separate assignement to avoid reference to buffer in arguments which is tricky to cross
* compile. */
surfel_buf[surfel_index].ray_distance = ray_distance;
/* NOTE: We only need to init the `list_start_buf` to -1 for the whole list to be valid since
* every surfel will load its `next` value from the list head. */
surfel_buf[surfel_index].next = atomicExchange(list_start_buf[list_index], surfel_index);
}

View File

@ -0,0 +1,19 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
/**
* Return the coresponding list index in the `list_start_buf` for a given world position.
* It will clamp any coordinate outside valid bounds to nearest list.
* Also return the surfel sorting value as `r_ray_distance`.
*/
int surfel_list_index_get(vec3 P, out float r_ray_distance)
{
vec4 hP = point_world_to_ndc(P);
r_ray_distance = -hP.z;
vec2 ssP = hP.xy * 0.5 + 0.5;
ivec2 ray_coord_on_grid = ivec2(ssP * vec2(list_info_buf.ray_grid_size));
ray_coord_on_grid = clamp(ray_coord_on_grid, ivec2(0), list_info_buf.ray_grid_size - 1);
int list_index = ray_coord_on_grid.y * list_info_buf.ray_grid_size.x + ray_coord_on_grid.x;
return list_index;
}

View File

@ -0,0 +1,162 @@
/**
* Sort a buffer of surfel list by distance along a direction.
* The resulting surfel lists are then the equivalent of a series of ray cast in the same
* direction. The fact that the surfels are sorted gives proper occlusion.
*
* Sort by increasing `ray_distance`. Start of list is smallest value.
*
* Dispatched as 1 thread per list.
*/
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
/**
* A doubly-linked list implementation.
* IMPORTANT: It is not general purpose as it only cover the cases needed by this shader.
*/
struct List {
int first, last;
};
/* Return the split list after link_index. */
List list_split_after(inout List original, int link_index)
{
int next_link = surfel_buf[link_index].next;
int last_link = original.last;
original.last = link_index;
List split;
split.first = next_link;
split.last = last_link;
surfel_buf[link_index].next = -1;
surfel_buf[next_link].prev = -1;
return split;
}
void list_add_tail(inout List list, int link_index)
{
surfel_buf[link_index].next = -1;
surfel_buf[link_index].prev = list.last;
surfel_buf[list.last].next = link_index;
list.last = link_index;
}
void list_insert_link_before(inout List list, int next_link, int new_link)
{
if (list.first == next_link) {
/* At beginning of list. */
list.first = new_link;
}
int prev_link = surfel_buf[next_link].prev;
surfel_buf[new_link].next = next_link;
surfel_buf[new_link].prev = prev_link;
surfel_buf[next_link].prev = new_link;
if (prev_link != -1) {
surfel_buf[prev_link].next = new_link;
}
}
/**
* Return true if link from `surfel[a]` to `surfel[b]` is valid.
* WARNING: this function is not commutative : `f(a, b) != f(b, a)`
*/
bool is_valid_surfel_link(int a, int b)
{
vec3 link_vector = normalize(surfel_buf[b].position - surfel_buf[a].position);
float link_angle_cos = dot(surfel_buf[a].normal, link_vector);
bool is_coplanar = abs(link_angle_cos) < 1.0e-3;
return !is_coplanar;
}
void main()
{
int list_index = int(gl_GlobalInvocationID.x);
if (list_index >= list_info_buf.list_max) {
return;
}
int list_start = list_start_buf[list_index];
if (list_start == -1) {
/* Empty list. */
return;
}
/* Create Surfel.prev pointers. */
int prev_id = -1;
for (int i = list_start; i > -1; i = surfel_buf[i].next) {
surfel_buf[i].prev = prev_id;
prev_id = i;
}
List sorted_list;
sorted_list.first = list_start;
sorted_list.last = prev_id;
if (sorted_list.first == sorted_list.last) {
/* Only one item. Nothing to sort. */
return;
}
/* Using insertion sort as it is easier to implement. */
List unsorted_list = list_split_after(sorted_list, sorted_list.first);
/* Mutable foreach. */
for (int i = unsorted_list.first, next; i > -1; i = next) {
next = surfel_buf[i].next;
bool insert = false;
for (int j = sorted_list.first; j > -1; j = surfel_buf[j].next) {
if (surfel_buf[j].ray_distance < surfel_buf[i].ray_distance) {
list_insert_link_before(sorted_list, j, i);
insert = true;
break;
}
}
if (insert == false) {
list_add_tail(sorted_list, i);
}
}
/* Update list start for irradiance sample capture. */
list_start_buf[list_index] = sorted_list.first;
/* Now that we have a sorted list, try to avoid connection from coplanar surfels.
* For that we disconnect them and link them to the first non-coplanar surfel.
* Note that this changes the list to a tree, which doesn't affect the rest of the algorithm.
*
* This is a really important step since it allows to clump more surfels into one ray list and
* avoid light leaking through surfaces. If we don't disconnect coplanar surfels, we loose many
* good rays by evaluating null radiance transfer between the coplanar surfels for rays that
* are not directly perpendicular to the surface. */
/* Mutable foreach. */
for (int i = sorted_list.first, next; i > -1; i = next) {
next = surfel_buf[i].next;
int valid_next = surfel_buf[i].next;
int valid_prev = surfel_buf[i].prev;
/* Search the list for the first valid next and previous surfel. */
while (valid_next > -1) {
if (is_valid_surfel_link(i, valid_next)) {
break;
}
valid_next = surfel_buf[valid_next].next;
}
while (valid_prev > -1) {
if (is_valid_surfel_link(i, valid_prev)) {
break;
}
valid_prev = surfel_buf[valid_prev].prev;
}
surfel_buf[i].next = valid_next;
surfel_buf[i].prev = valid_prev;
}
}

View File

@ -0,0 +1,100 @@
/**
* For every surfel, compute the incomming radiance from both side.
* For that, walk the ray surfel linked-list and gather the light from the neighbor surfels.
* This shader is dispatched for a random ray in a uniform hemisphere as we evaluate the
* radiance in both directions.
*
* Dispatched as 1 thread per surfel.
*/
#pragma BLENDER_REQUIRE(gpu_shader_utildefines_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_math_base_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
void radiance_transfer(inout Surfel surfel, vec3 in_radiance, vec3 L)
{
float NL = dot(surfel.normal, L);
/* Lambertian BSDF. Albedo applied later depending on which side of the surfel was hit. */
float bsdf = M_1_PI;
/* From "Global Illumination using Parallel Global Ray-Bundles"
* Eq. 3: Outgoing light */
vec3 out_radiance = (M_TAU / capture_info_buf.sample_count) * bsdf * in_radiance * abs(NL);
SurfelRadiance surfel_radiance_indirect = surfel.radiance_indirect[radiance_dst];
bool front_facing = (NL > 0.0);
if (front_facing) {
/* Store radiance normalized for spherical harmonic accumulation and for visualization. */
surfel_radiance_indirect.front.rgb *= surfel_radiance_indirect.front.w;
surfel_radiance_indirect.front += vec4(out_radiance * surfel.albedo_front,
1.0 / capture_info_buf.sample_count);
surfel_radiance_indirect.front.rgb /= surfel_radiance_indirect.front.w;
}
else {
/* Store radiance normalized for spherical harmonic accumulation and for visualization. */
surfel_radiance_indirect.back.rgb *= surfel_radiance_indirect.back.w;
surfel_radiance_indirect.back += vec4(out_radiance * surfel.albedo_back,
1.0 / capture_info_buf.sample_count);
surfel_radiance_indirect.back.rgb /= surfel_radiance_indirect.back.w;
}
surfel.radiance_indirect[radiance_dst] = surfel_radiance_indirect;
}
void radiance_transfer_surfel(inout Surfel receiver, Surfel sender)
{
vec3 L = safe_normalize(sender.position - receiver.position);
bool front_facing = dot(-L, sender.normal) > 0.0;
vec3 radiance;
SurfelRadiance sender_radiance_indirect = sender.radiance_indirect[radiance_src];
if (front_facing) {
radiance = sender.radiance_direct.front.rgb;
radiance += sender_radiance_indirect.front.rgb * sender_radiance_indirect.front.w;
}
else {
radiance = sender.radiance_direct.back.rgb;
radiance += sender_radiance_indirect.back.rgb * sender_radiance_indirect.back.w;
}
radiance_transfer(receiver, radiance, L);
}
void radiance_transfer_world(inout Surfel receiver, vec3 sky_L)
{
/* TODO(fclem): Sky radiance. */
vec3 radiance = vec3(0.0);
radiance_transfer(receiver, radiance, -sky_L);
}
void main()
{
int surfel_index = int(gl_GlobalInvocationID.x);
if (surfel_index >= int(capture_info_buf.surfel_len)) {
return;
}
Surfel surfel = surfel_buf[surfel_index];
vec3 sky_L = cameraVec(surfel.position);
if (surfel.next > -1) {
Surfel surfel_next = surfel_buf[surfel.next];
radiance_transfer_surfel(surfel, surfel_next);
}
else {
radiance_transfer_world(surfel, sky_L);
}
if (surfel.prev > -1) {
Surfel surfel_prev = surfel_buf[surfel.prev];
radiance_transfer_surfel(surfel, surfel_prev);
}
else {
radiance_transfer_world(surfel, -sky_L);
}
surfel_buf[surfel_index] = surfel;
}

View File

@ -37,6 +37,7 @@ GPU_SHADER_CREATE_INFO(eevee_deferred_light)
.additional_info("eevee_shared",
"eevee_utility_texture",
"eevee_light_data",
"eevee_lightprobe_data",
"eevee_shadow_data",
"eevee_deferred_base",
"eevee_transmittance_data",

View File

@ -7,7 +7,7 @@
GPU_SHADER_CREATE_INFO(eevee_hiz_data)
.sampler(HIZ_TEX_SLOT, ImageType::FLOAT_2D, "hiz_tx")
.uniform_buf(5, "HiZData", "hiz_buf");
.uniform_buf(HIZ_BUF_SLOT, "HiZData", "hiz_buf");
GPU_SHADER_CREATE_INFO(eevee_hiz_update)
.do_static_compilation(true)

View File

@ -5,16 +5,144 @@
#include "eevee_defines.hh"
#include "gpu_shader_create_info.hh"
GPU_SHADER_INTERFACE_INFO(eeve_debug_surfel_iface, "")
/* -------------------------------------------------------------------- */
/** \name Display
* \{ */
GPU_SHADER_INTERFACE_INFO(eevee_debug_surfel_iface, "")
.smooth(Type::VEC3, "P")
.flat(Type::INT, "surfel_index");
GPU_SHADER_CREATE_INFO(eevee_debug_surfels)
.additional_info("eevee_shared", "draw_view")
.vertex_source("eevee_debug_surfels_vert.glsl")
.vertex_out(eeve_debug_surfel_iface)
.vertex_out(eevee_debug_surfel_iface)
.fragment_source("eevee_debug_surfels_frag.glsl")
.fragment_out(0, Type::VEC4, "out_color")
.storage_buf(0, Qualifier::READ, "DebugSurfel", "surfels_buf[]")
.storage_buf(0, Qualifier::READ, "Surfel", "surfels_buf[]")
.push_constant(Type::FLOAT, "surfel_radius")
.push_constant(Type::INT, "debug_mode")
.do_static_compilation(true);
GPU_SHADER_INTERFACE_INFO(eevee_display_probe_grid_iface, "")
.smooth(Type::VEC2, "lP")
.flat(Type::IVEC3, "cell");
GPU_SHADER_CREATE_INFO(eevee_display_probe_grid)
.additional_info("eevee_shared", "draw_view")
.vertex_source("eevee_display_probe_grid_vert.glsl")
.vertex_out(eevee_display_probe_grid_iface)
.fragment_source("eevee_display_probe_grid_frag.glsl")
.fragment_out(0, Type::VEC4, "out_color")
.push_constant(Type::FLOAT, "sphere_radius")
.push_constant(Type::IVEC3, "grid_resolution")
.push_constant(Type::MAT4, "grid_to_world")
.push_constant(Type::MAT4, "world_to_grid")
.sampler(0, ImageType::FLOAT_3D, "irradiance_a_tx")
.sampler(1, ImageType::FLOAT_3D, "irradiance_b_tx")
.sampler(2, ImageType::FLOAT_3D, "irradiance_c_tx")
.sampler(3, ImageType::FLOAT_3D, "irradiance_d_tx")
.do_static_compilation(true);
/** \} */
/* -------------------------------------------------------------------- */
/** \name Baking
* \{ */
GPU_SHADER_CREATE_INFO(eevee_surfel_common)
.storage_buf(SURFEL_BUF_SLOT, Qualifier::READ_WRITE, "Surfel", "surfel_buf[]")
.storage_buf(CAPTURE_BUF_SLOT, Qualifier::READ, "CaptureInfoData", "capture_info_buf");
GPU_SHADER_CREATE_INFO(eevee_surfel_light)
.local_group_size(SURFEL_GROUP_SIZE)
.additional_info("eevee_shared",
"draw_view",
"eevee_utility_texture",
"eevee_surfel_common",
"eevee_light_data",
"eevee_shadow_data")
.compute_source("eevee_surfel_light_comp.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(eevee_surfel_list_build)
.local_group_size(SURFEL_GROUP_SIZE)
.additional_info("eevee_shared", "eevee_surfel_common", "draw_view")
.storage_buf(0, Qualifier::READ_WRITE, "int", "list_start_buf[]")
.storage_buf(6, Qualifier::READ_WRITE, "SurfelListInfoData", "list_info_buf")
.compute_source("eevee_surfel_list_build_comp.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(eevee_surfel_list_sort)
.local_group_size(SURFEL_LIST_GROUP_SIZE)
.additional_info("eevee_shared", "eevee_surfel_common", "draw_view")
.storage_buf(0, Qualifier::READ_WRITE, "int", "list_start_buf[]")
.storage_buf(6, Qualifier::READ, "SurfelListInfoData", "list_info_buf")
.compute_source("eevee_surfel_list_sort_comp.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(eevee_surfel_ray)
.local_group_size(SURFEL_GROUP_SIZE)
.additional_info("eevee_shared", "eevee_surfel_common", "draw_view")
.push_constant(Type::INT, "radiance_src")
.push_constant(Type::INT, "radiance_dst")
.compute_source("eevee_surfel_ray_comp.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(eevee_lightprobe_irradiance_bounds)
.do_static_compilation(true)
.local_group_size(IRRADIANCE_BOUNDS_GROUP_SIZE)
.storage_buf(0, Qualifier::READ_WRITE, "CaptureInfoData", "capture_info_buf")
.storage_buf(1, Qualifier::READ, "ObjectBounds", "bounds_buf[]")
.push_constant(Type::INT, "resource_len")
.typedef_source("draw_shader_shared.h")
.additional_info("eevee_shared")
.compute_source("eevee_lightprobe_irradiance_bounds_comp.glsl");
GPU_SHADER_CREATE_INFO(eevee_lightprobe_irradiance_ray)
.local_group_size(IRRADIANCE_GRID_GROUP_SIZE,
IRRADIANCE_GRID_GROUP_SIZE,
IRRADIANCE_GRID_GROUP_SIZE)
.additional_info("eevee_shared", "eevee_surfel_common", "draw_view")
.push_constant(Type::INT, "radiance_src")
.storage_buf(0, Qualifier::READ, "int", "list_start_buf[]")
.storage_buf(6, Qualifier::READ, "SurfelListInfoData", "list_info_buf")
.image(0, GPU_RGBA32F, Qualifier::READ_WRITE, ImageType::FLOAT_3D, "irradiance_L0_img")
.image(1, GPU_RGBA32F, Qualifier::READ_WRITE, ImageType::FLOAT_3D, "irradiance_L1_a_img")
.image(2, GPU_RGBA32F, Qualifier::READ_WRITE, ImageType::FLOAT_3D, "irradiance_L1_b_img")
.image(3, GPU_RGBA32F, Qualifier::READ_WRITE, ImageType::FLOAT_3D, "irradiance_L1_c_img")
.compute_source("eevee_lightprobe_irradiance_ray_comp.glsl")
.do_static_compilation(true);
/** \} */
/* -------------------------------------------------------------------- */
/** \name Runtime
* \{ */
GPU_SHADER_CREATE_INFO(eevee_lightprobe_irradiance_load)
.local_group_size(IRRADIANCE_GRID_BRICK_SIZE,
IRRADIANCE_GRID_BRICK_SIZE,
IRRADIANCE_GRID_BRICK_SIZE)
.additional_info("eevee_shared")
.push_constant(Type::INT, "grid_index")
.uniform_buf(0, "IrradianceGridData", "grids_infos_buf[IRRADIANCE_GRID_MAX]")
.storage_buf(0, Qualifier::READ, "uint", "bricks_infos_buf[]")
.sampler(0, ImageType::FLOAT_3D, "irradiance_a_tx")
.sampler(1, ImageType::FLOAT_3D, "irradiance_b_tx")
.sampler(2, ImageType::FLOAT_3D, "irradiance_c_tx")
.sampler(3, ImageType::FLOAT_3D, "irradiance_d_tx")
.image(0, GPU_RGBA16F, Qualifier::READ_WRITE, ImageType::FLOAT_3D, "irradiance_atlas_img")
.compute_source("eevee_lightprobe_irradiance_load_comp.glsl")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(eevee_lightprobe_data)
.uniform_buf(IRRADIANCE_GRID_BUF_SLOT,
"IrradianceGridData",
"grids_infos_buf[IRRADIANCE_GRID_MAX]")
/* NOTE: Use uint instead of IrradianceBrickPacked because Metal needs to know the exact
* type.*/
.storage_buf(IRRADIANCE_BRICK_BUF_SLOT, Qualifier::READ, "uint", "bricks_infos_buf[]")
.sampler(IRRADIANCE_ATLAS_TEX_SLOT, ImageType::FLOAT_3D, "irradiance_atlas_tx");
/** \} */

View File

@ -131,6 +131,14 @@ GPU_SHADER_CREATE_INFO(eevee_surf_forward)
// "eevee_transmittance_data",
);
GPU_SHADER_CREATE_INFO(eevee_surf_capture)
.vertex_out(eevee_surf_iface)
.define("MAT_CAPTURE")
.storage_buf(SURFEL_BUF_SLOT, Qualifier::WRITE, "Surfel", "surfel_buf[]")
.storage_buf(CAPTURE_BUF_SLOT, Qualifier::READ_WRITE, "CaptureInfoData", "capture_info_buf")
.fragment_source("eevee_surf_capture_frag.glsl")
.additional_info("eevee_camera", "eevee_utility_texture");
GPU_SHADER_CREATE_INFO(eevee_surf_depth)
.vertex_out(eevee_surf_iface)
.fragment_source("eevee_surf_depth_frag.glsl")
@ -216,6 +224,7 @@ GPU_SHADER_CREATE_INFO(eevee_material_stub).define("EEVEE_MATERIAL_STUBS");
EEVEE_MAT_GEOM_VARIATIONS(name##_depth, "eevee_surf_depth", __VA_ARGS__) \
EEVEE_MAT_GEOM_VARIATIONS(name##_deferred, "eevee_surf_deferred", __VA_ARGS__) \
EEVEE_MAT_GEOM_VARIATIONS(name##_forward, "eevee_surf_forward", __VA_ARGS__) \
EEVEE_MAT_GEOM_VARIATIONS(name##_capture, "eevee_surf_capture", __VA_ARGS__) \
EEVEE_MAT_GEOM_VARIATIONS(name##_shadow, "eevee_surf_shadow", __VA_ARGS__)
EEVEE_MAT_PIPE_VARIATIONS(eevee_surface, "eevee_material_stub")

View File

@ -67,6 +67,21 @@ GPU_SHADER_CREATE_INFO(eevee_shadow_tag_usage_opaque)
.additional_info("eevee_shared", "draw_view", "draw_view_culling", "eevee_light_data")
.compute_source("eevee_shadow_tag_usage_comp.glsl");
GPU_SHADER_CREATE_INFO(eevee_shadow_tag_usage_surfels)
.do_static_compilation(true)
.local_group_size(SURFEL_GROUP_SIZE)
.storage_buf(6, Qualifier::READ_WRITE, "ShadowTileMapData", "tilemaps_buf[]")
/* ShadowTileDataPacked is uint. But MSL translation need the real type. */
.storage_buf(7, Qualifier::READ_WRITE, "uint", "tiles_buf[]")
.push_constant(Type::INT, "directional_level")
.push_constant(Type::FLOAT, "tilemap_projection_ratio")
.additional_info("eevee_shared",
"draw_view",
"draw_view_culling",
"eevee_light_data",
"eevee_surfel_common")
.compute_source("eevee_shadow_tag_usage_surfels_comp.glsl");
GPU_SHADER_INTERFACE_INFO(eevee_shadow_tag_transparent_iface, "interp")
.smooth(Type::VEC3, "P")
.smooth(Type::VEC3, "vP")

View File

@ -27,10 +27,6 @@ void gpencil_color_output(vec4 stroke_col, vec4 vert_col, float vert_strength, f
void main()
{
#ifdef GPENCIL_NEXT
PASS_RESOURCE_ID
#endif
float vert_strength;
vec4 vert_color;
vec3 vert_N;

View File

@ -0,0 +1,136 @@
#pragma BLENDER_REQUIRE(common_gpencil_lib.glsl)
void gpencil_color_output(vec4 stroke_col, vec4 vert_col, float vert_strength, float mix_tex)
{
/* Mix stroke with other colors. */
vec4 mixed_col = stroke_col;
mixed_col.rgb = mix(mixed_col.rgb, vert_col.rgb, vert_col.a * gpVertexColorOpacity);
mixed_col.rgb = mix(mixed_col.rgb, gpLayerTint.rgb, gpLayerTint.a);
mixed_col.a *= vert_strength * gpLayerOpacity;
/**
* This is what the fragment shader looks like.
* out = col * gp_interp.color_mul + col.a * gp_interp.color_add.
* gp_interp.color_mul is how much of the texture color to keep.
* gp_interp.color_add is how much of the mixed color to add.
* Note that we never add alpha. This is to keep the texture act as a stencil.
* We do however, modulate the alpha (reduce it).
*/
/* We add the mixed color. This is 100% mix (no texture visible). */
gp_interp.color_mul = vec4(mixed_col.aaa, mixed_col.a);
gp_interp.color_add = vec4(mixed_col.rgb * mixed_col.a, 0.0);
/* Then we blend according to the texture mix factor.
* Note that we keep the alpha modulation. */
gp_interp.color_mul.rgb *= mix_tex;
gp_interp.color_add.rgb *= 1.0 - mix_tex;
}
void main()
{
PASS_RESOURCE_ID
float vert_strength;
vec4 vert_color;
vec3 vert_N;
ivec4 ma1 = floatBitsToInt(texelFetch(gp_pos_tx, gpencil_stroke_point_id() * 3 + 1));
gpMaterial gp_mat = gp_materials[ma1.x + gpMaterialOffset];
gpMaterialFlag gp_flag = floatBitsToUint(gp_mat._flag);
gl_Position = gpencil_vertex(vec4(viewportSize, 1.0 / viewportSize),
gp_flag,
gp_mat._alignment_rot,
gp_interp.pos,
vert_N,
vert_color,
vert_strength,
gp_interp.uv,
gp_interp.sspos,
gp_interp.aspect,
gp_interp.thickness,
gp_interp.hardness);
if (gpencil_is_stroke_vertex()) {
if (!flag_test(gp_flag, GP_STROKE_ALIGNMENT)) {
gp_interp.uv.x *= gp_mat._stroke_u_scale;
}
/* Special case: We don't use vertex color if material Holdout. */
if (flag_test(gp_flag, GP_STROKE_HOLDOUT)) {
vert_color = vec4(0.0);
}
gpencil_color_output(
gp_mat.stroke_color, vert_color, vert_strength, gp_mat._stroke_texture_mix);
gp_interp.mat_flag = gp_flag & ~GP_FILL_FLAGS;
if (gpStrokeOrder3d) {
/* Use the fragment depth (see fragment shader). */
gp_interp.depth = -1.0;
}
else if (flag_test(gp_flag, GP_STROKE_OVERLAP)) {
/* Use the index of the point as depth.
* This means the stroke can overlap itself. */
float point_index = float(ma1.z);
gp_interp.depth = (point_index + gpStrokeIndexOffset + 2.0) * 0.0000002;
}
else {
/* Use the index of first point of the stroke as depth.
* We render using a greater depth test this means the stroke
* cannot overlap itself.
* We offset by one so that the fill can be overlapped by its stroke.
* The offset is ok since we pad the strokes data because of adjacency infos. */
float stroke_index = float(ma1.y);
gp_interp.depth = (stroke_index + gpStrokeIndexOffset + 2.0) * 0.0000002;
}
}
else {
int stroke_point_id = gpencil_stroke_point_id();
vec4 uv1 = texelFetch(gp_pos_tx, stroke_point_id * 3 + 2);
vec4 fcol1 = texelFetch(gp_col_tx, stroke_point_id * 2 + 1);
vec4 fill_col = gp_mat.fill_color;
/* Special case: We don't modulate alpha in gradient mode. */
if (flag_test(gp_flag, GP_FILL_GRADIENT_USE)) {
fill_col.a = 1.0;
}
/* Decode fill opacity. */
vec4 fcol_decode = vec4(fcol1.rgb, floor(fcol1.a / 10.0));
float fill_opacity = fcol1.a - (fcol_decode.a * 10);
fcol_decode.a /= 10000.0;
/* Special case: We don't use vertex color if material Holdout. */
if (flag_test(gp_flag, GP_FILL_HOLDOUT)) {
fcol_decode = vec4(0.0);
}
/* Apply opacity. */
fill_col.a *= fill_opacity;
/* If factor is > 1 force opacity. */
if (fill_opacity > 1.0) {
fill_col.a += fill_opacity - 1.0;
}
fill_col.a = clamp(fill_col.a, 0.0, 1.0);
gpencil_color_output(fill_col, fcol_decode, 1.0, gp_mat._fill_texture_mix);
gp_interp.mat_flag = gp_flag & GP_FILL_FLAGS;
gp_interp.mat_flag |= uint(ma1.x + gpMaterialOffset) << GPENCIl_MATID_SHIFT;
gp_interp.uv = mat2(gp_mat.fill_uv_rot_scale.xy, gp_mat.fill_uv_rot_scale.zw) * uv1.xy +
gp_mat._fill_uv_offset;
if (gpStrokeOrder3d) {
/* Use the fragment depth (see fragment shader). */
gp_interp.depth = -1.0;
}
else {
/* Use the index of first point of the stroke as depth. */
float stroke_index = float(ma1.y);
gp_interp.depth = (stroke_index + gpStrokeIndexOffset + 1.0) * 0.0000002;
}
}
}

View File

@ -78,7 +78,7 @@ GPU_SHADER_CREATE_INFO(gpencil_geometry_next)
.fragment_out(0, Type::VEC4, "fragColor")
.fragment_out(1, Type::VEC4, "revealColor")
.vertex_out(gpencil_geometry_iface)
.vertex_source("gpencil_vert.glsl")
.vertex_source("grease_pencil_vert.glsl")
.fragment_source("gpencil_frag.glsl")
.additional_info("draw_gpencil_new")
.depth_write(DepthWrite::ANY);

View File

@ -177,8 +177,11 @@ class UniformCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
#endif
public:
UniformCommon()
UniformCommon(const char *name = nullptr)
{
if (name) {
name_ = name;
}
ubo_ = GPU_uniformbuf_create_ex(sizeof(T) * len, nullptr, name_);
}
@ -277,7 +280,7 @@ template<
/* bool device_only = false */>
class UniformArrayBuffer : public detail::UniformCommon<T, len, false> {
public:
UniformArrayBuffer()
UniformArrayBuffer(const char *name = nullptr) : detail::UniformCommon<T, len, false>(name)
{
/* TODO(@fclem): We should map memory instead. */
this->data_ = (T *)MEM_mallocN_aligned(len * sizeof(T), 16, this->name_);
@ -296,7 +299,7 @@ template<
/* bool device_only = false */>
class UniformBuffer : public T, public detail::UniformCommon<T, 1, false> {
public:
UniformBuffer()
UniformBuffer(const char *name = nullptr) : detail::UniformCommon<T, 1, false>(name)
{
/* TODO(@fclem): How could we map this? */
this->data_ = static_cast<T *>(this);
@ -368,6 +371,11 @@ class StorageArrayBuffer : public detail::StorageCommon<T, len, device_only> {
return this->len_;
}
MutableSpan<T> as_span() const
{
return {this->data_, this->len_};
}
static void swap(StorageArrayBuffer &a, StorageArrayBuffer &b)
{
SWAP(T *, a.data_, b.data_);
@ -423,6 +431,14 @@ class StorageVectorBuffer : public StorageArrayBuffer<T, len, false> {
new (ptr) T(std::forward<ForwardT>(value)...);
}
void extend(const Span<T> &values)
{
/* TODO(fclem): Optimize to a single memcpy. */
for (auto v : values) {
this->append(v);
}
}
int64_t size() const
{
return item_len_;

Some files were not shown because too many files have changed in this diff Show More