ImBuf: optimize IMB_transform #115653

Merged
Aras Pranckevicius merged 15 commits from aras_p/blender:imb_transform_opt into main 2023-12-14 15:10:41 +01:00
221 changed files with 3977 additions and 3385 deletions
Showing only changes of commit ce9860df3a - Show all commits

View File

@ -1133,9 +1133,6 @@ struct GWL_Display {
#ifdef WITH_GHOST_WAYLAND_LIBDECOR
GWL_LibDecor_System *libdecor = nullptr;
bool libdecor_required = false;
#endif
#ifdef USE_XDG_INIT_WINDOW_SIZE_HACK
bool xdg_decor_ignore_initial_window_size = false;
#endif
GWL_XDG_Decor_System *xdg_decor = nullptr;
@ -6031,18 +6028,11 @@ static void global_handle_add(void *data,
else {
/* Not found. */
#ifdef USE_GNOME_NEEDS_LIBDECOR_HACK
/* `gtk_shell1` at time of writing. */
if (STRPREFIX(interface, "gtk_shell")) {
if (STRPREFIX(interface, "gtk_shell")) { /* `gtk_shell1` at time of writing. */
/* Only require `libdecor` when built with X11 support,
* otherwise there is nothing to fall back on. */
display->libdecor_required = true;
}
#endif
#ifdef USE_XDG_INIT_WINDOW_SIZE_HACK
/* `org_kde_plasma_shell` at time of writing. */
if (STRPREFIX(interface, "org_kde_plasma_shell")) {
display->xdg_decor_ignore_initial_window_size = true;
}
#endif
}
@ -7674,13 +7664,6 @@ zxdg_decoration_manager_v1 *GHOST_SystemWayland::xdg_decor_manager_get()
return display_->xdg_decor->manager;
}
#ifdef USE_XDG_INIT_WINDOW_SIZE_HACK
bool GHOST_SystemWayland::xdg_decor_needs_window_size_hack() const
{
return display_->xdg_decor_ignore_initial_window_size;
}
#endif
/* End `xdg_decor`. */
const std::vector<GWL_Output *> &GHOST_SystemWayland::outputs_get() const
@ -7688,20 +7671,6 @@ const std::vector<GWL_Output *> &GHOST_SystemWayland::outputs_get() const
return display_->outputs;
}
const GWL_Output *GHOST_SystemWayland::outputs_get_max_native_size() const
{
uint64_t area_best = 0;
const GWL_Output *output_best = nullptr;
for (const GWL_Output *output : display_->outputs) {
const uint64_t area_test = (uint64_t)output->size_native[0] * (uint64_t)output->size_native[1];
if (output_best == nullptr || area_best < area_test) {
output_best = output;
area_best = area_test;
}
}
return output_best;
}
wl_shm *GHOST_SystemWayland::wl_shm_get() const
{
return display_->wl.shm;

View File

@ -230,14 +230,9 @@ class GHOST_SystemWayland : public GHOST_System {
#endif
struct xdg_wm_base *xdg_decor_shell_get();
struct zxdg_decoration_manager_v1 *xdg_decor_manager_get();
#ifdef USE_XDG_INIT_WINDOW_SIZE_HACK
bool xdg_decor_needs_window_size_hack() const;
#endif
/* End `xdg_decor`. */
const std::vector<GWL_Output *> &outputs_get() const;
/** Return the output with the largest pixel-area. */
const GWL_Output *outputs_get_max_native_size() const;
struct wl_shm *wl_shm_get() const;

View File

@ -103,9 +103,6 @@ struct WGL_XDG_Decor_Window {
/** The window has been configured (see #xdg_surface_ack_configure). */
bool initial_configure_seen = false;
#ifdef USE_XDG_INIT_WINDOW_SIZE_HACK
bool initial_configure_seen_with_size = false;
#endif
};
static void gwl_xdg_decor_window_destroy(WGL_XDG_Decor_Window *decor)
@ -337,7 +334,10 @@ static void gwl_window_resize_for_backend(GWL_Window *win, const int32_t size[2]
{
#ifdef WITH_OPENGL_BACKEND
if (win->ghost_context_type == GHOST_kDrawingContextTypeOpenGL) {
wl_egl_window_resize(win->backend.egl_window, UNPACK2(size), 0, 0);
/* Null on window initialization. */
if (win->backend.egl_window) {
wl_egl_window_resize(win->backend.egl_window, UNPACK2(size), 0, 0);
}
}
#endif
#ifdef WITH_VULKAN_BACKEND
@ -991,8 +991,11 @@ static int outputs_uniform_scale_or_default(const std::vector<GWL_Output *> &out
static CLG_LogRef LOG_WL_XDG_TOPLEVEL = {"ghost.wl.handle.xdg_toplevel"};
#define LOG (&LOG_WL_XDG_TOPLEVEL)
static void xdg_toplevel_handle_configure(
void *data, xdg_toplevel * /*xdg_toplevel*/, int32_t width, int32_t height, wl_array *states)
static void xdg_toplevel_handle_configure(void *data,
xdg_toplevel * /*xdg_toplevel*/,
const int32_t width,
const int32_t height,
wl_array *states)
{
/* TODO: log `states`, not urgent. */
CLOG_INFO(LOG, 2, "configure (size=[%d, %d])", width, height);
@ -1003,6 +1006,17 @@ static void xdg_toplevel_handle_configure(
std::lock_guard lock_frame_guard{win->frame_pending_mutex};
#endif
const int32_t size[2] = {width, height};
for (int i = 0; i < 2; i++) {
if (size[i] == 0) {
/* Values may be zero, in this case the client should choose. */
continue;
}
win->frame_pending.size[i] = win->frame.fractional_scale ?
gwl_window_fractional_to_viewport_round(win->frame, size[i]) :
(size[i] * win->frame.buffer_scale);
}
win->frame_pending.is_maximised = false;
win->frame_pending.is_fullscreen = false;
win->frame_pending.is_active = false;
@ -1023,46 +1037,6 @@ static void xdg_toplevel_handle_configure(
break;
}
}
#ifdef USE_XDG_INIT_WINDOW_SIZE_HACK
if (width || height) {
WGL_XDG_Decor_Window &decor = *win->xdg_decor;
if (decor.initial_configure_seen_with_size == false) {
if (win->ghost_system->xdg_decor_needs_window_size_hack() &&
(decor.mode == ZXDG_TOPLEVEL_DECORATION_V1_MODE_SERVER_SIDE) &&
(win->frame_pending.is_maximised == false &&
win->frame_pending.is_fullscreen == false) &&
/* Account for the initial size being smaller. */
((width <= win->frame.size[0]) && (height <= win->frame.size[1])))
{
/* Fail safe, check the window is *not* larger than all available outputs
* as this could cause files saved on other peoples systems to create
* unreasonably large windows. */
const GWL_Output *output_big = win->ghost_system->outputs_get_max_native_size();
if (output_big &&
((output_big->size_native[0] < width) || (output_big->size_native[1] < height))) {
/* Pass, the window exceeds the size of the largest output, ignore initial size. */
}
else {
width = win->frame.size[0];
height = win->frame.size[1];
}
}
decor.initial_configure_seen_with_size = true;
}
}
#endif /* USE_XDG_INIT_WINDOW_SIZE_HACK */
const int32_t size[2] = {width, height};
for (int i = 0; i < 2; i++) {
if (size[i] == 0) {
/* Values may be zero, in this case the client should choose. */
continue;
}
win->frame_pending.size[i] = win->frame.fractional_scale ?
gwl_window_fractional_to_viewport_round(win->frame, size[i]) :
(size[i] * win->frame.buffer_scale);
}
}
static void xdg_toplevel_handle_close(void *data, xdg_toplevel * /*xdg_toplevel*/)
@ -1536,20 +1510,6 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
wl_surface_add_listener(window_->wl.surface, &wl_surface_listener, window_);
#ifdef WITH_OPENGL_BACKEND
if (type == GHOST_kDrawingContextTypeOpenGL) {
window_->backend.egl_window = wl_egl_window_create(
window_->wl.surface, int(window_->frame.size[0]), int(window_->frame.size[1]));
}
#endif
#ifdef WITH_VULKAN_BACKEND
if (type == GHOST_kDrawingContextTypeVulkan) {
window_->backend.vulkan_window_info = new GHOST_ContextVK_WindowInfo;
window_->backend.vulkan_window_info->size[0] = window_->frame.size[0];
window_->backend.vulkan_window_info->size[1] = window_->frame.size[1];
}
#endif
wp_fractional_scale_manager_v1 *fractional_scale_manager =
system->wp_fractional_scale_manager_get();
if (fractional_scale_manager) {
@ -1612,9 +1572,6 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
wl_surface_set_user_data(window_->wl.surface, this);
/* Call top-level callbacks. */
wl_surface_commit(window_->wl.surface);
/* NOTE: the method used for XDG & LIBDECOR initialization (using `initial_configure_seen`)
* follows the method used in SDL 3.16. */
@ -1628,6 +1585,9 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
decor.scale_fractional_from_output = scale_fractional_from_output;
}
/* Commit needed so the top-level callbacks run (and `toplevel` can be accessed). */
wl_surface_commit(window_->wl.surface);
/* Additional round-trip is needed to ensure `xdg_toplevel` is set. */
wl_display_roundtrip(system_->wl_display_get());
@ -1658,12 +1618,7 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
else
#endif /* WITH_GHOST_WAYLAND_LIBDECOR */
{
/* Call top-level callbacks. */
WGL_XDG_Decor_Window &decor = *window_->xdg_decor;
while (!decor.initial_configure_seen) {
wl_display_flush(system->wl_display_get());
wl_display_dispatch(system->wl_display_get());
}
if (system_->xdg_decor_manager_get()) {
decor.toplevel_decor = zxdg_decoration_manager_v1_get_toplevel_decoration(
@ -1675,8 +1630,36 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
}
gwl_window_state_set(window_, state);
/* Commit needed to so configure callback runs. */
wl_surface_commit(window_->wl.surface);
while (!decor.initial_configure_seen) {
wl_display_flush(system->wl_display_get());
wl_display_dispatch(system->wl_display_get());
}
}
/* Postpone binding the buffer until after it's decor has been configured:
* - Ensure the window is sized properly (with XDG window decorations), see: #113059.
* - Avoids flickering on startup.
*/
#ifdef WITH_OPENGL_BACKEND
if (type == GHOST_kDrawingContextTypeOpenGL) {
window_->backend.egl_window = wl_egl_window_create(
window_->wl.surface, int(window_->frame.size[0]), int(window_->frame.size[1]));
}
#endif
#ifdef WITH_VULKAN_BACKEND
if (type == GHOST_kDrawingContextTypeVulkan) {
window_->backend.vulkan_window_info = new GHOST_ContextVK_WindowInfo;
window_->backend.vulkan_window_info->size[0] = window_->frame.size[0];
window_->backend.vulkan_window_info->size[1] = window_->frame.size[1];
}
#endif
/* Commit after setting the buffer. */
wl_surface_commit(window_->wl.surface);
/* Drawing context. */
if (setDrawingContextType(type) == GHOST_kFailure) {
GHOST_PRINT("Failed to create drawing context" << std::endl);

View File

@ -54,14 +54,6 @@
*/
#define USE_EVENT_BACKGROUND_THREAD
/**
* Hack for KDE where the initial window size includes window decorations (title-bar, borders etc),
* making the usable region smaller than requested. As the size of decorations is unknown:
* account for this by ignoring the initial size and using the size requested from GHOST instead
* (with some exceptions & sanity checks for overly large windows), see: #113059.
*/
#define USE_XDG_INIT_WINDOW_SIZE_HACK
class GHOST_SystemWayland;
struct GWL_Output;

View File

@ -72,14 +72,17 @@ int uaccess(const char *filename, int mode)
return r;
}
int urename(const char *oldname, const char *newname)
int urename(const char *oldname, const char *newname, const bool do_replace)
{
int r = -1;
UTF16_ENCODE(oldname);
UTF16_ENCODE(newname);
if (oldname_16 && newname_16) {
r = _wrename(oldname_16, newname_16);
/* Closer to UNIX `rename` behavior, as it at least allows to replace an existing file.
* Return value logic is inverted however (returns non-zero on sucess, 0 on failure).
* Note that the operation will still fail if the 'newname' existing file is opened anywhere. */
r = (MoveFileExW(oldname_16, newname_16, do_replace ? MOVEFILE_REPLACE_EXISTING : 0) == 0);
}
UTF16_UN_ENCODE(newname);

View File

@ -18,7 +18,7 @@
FILE *ufopen(const char *filename, const char *mode);
int uopen(const char *filename, int oflag, int pmode);
int uaccess(const char *filename, int mode);
int urename(const char *oldname, const char *newname);
int urename(const char *oldname, const char *newname, const bool do_replace);
char *u_alloc_getenv(const char *varname);
void u_free_getenv(char *val);

View File

@ -4575,9 +4575,13 @@ def km_grease_pencil_paint(_params):
{"properties": [("mode", 'INVERT')]}),
("grease_pencil.brush_stroke", {"type": 'LEFTMOUSE', "value": 'PRESS', "shift": True},
{"properties": [("mode", 'SMOOTH')]}),
# Active material
op_menu("VIEW3D_MT_greasepencil_material_active", {"type": 'U', "value": 'PRESS'}),
# Active layer
op_menu("GREASE_PENCIL_MT_layer_active", {"type": 'Y', "value": 'PRESS'}),
# Show/hide
*_template_items_hide_reveal_actions("grease_pencil.layer_hide", "grease_pencil.layer_reveal"),
])
return keymap
@ -4610,6 +4614,9 @@ def km_grease_pencil_edit(params):
# Keyframe Menu
op_menu("VIEW3D_MT_edit_greasepencil_animation", {"type": 'I', "value": 'PRESS'}),
# Show/hide
*_template_items_hide_reveal_actions("grease_pencil.layer_hide", "grease_pencil.layer_reveal"),
# Transform Actions.
*_template_items_transform_actions(params, use_bend=True, use_mirror=True, use_tosphere=True, use_shear=True),
("transform.transform", {"type": 'S', "value": 'PRESS', "alt": True},
@ -4625,6 +4632,8 @@ def km_grease_pencil_edit(params):
("grease_pencil.cyclical_set", {"type": 'F', "value": 'PRESS'}, {"properties": [("type", "CLOSE")]}),
("grease_pencil.cyclical_set", {"type": 'C', "value": 'PRESS',
"alt": True}, {"properties": [("type", "TOGGLE")]}),
("grease_pencil.duplicate_move", {"type": 'D', "value": 'PRESS', "shift": True}, None),
# Active layer
op_menu("GREASE_PENCIL_MT_layer_active", {"type": 'Y', "value": 'PRESS'}),
@ -5251,6 +5260,8 @@ def km_weight_paint(params):
{"properties": [("data_path", 'weight_paint_object.data.use_paint_mask')]}),
("wm.context_toggle", {"type": 'TWO', "value": 'PRESS'},
{"properties": [("data_path", 'weight_paint_object.data.use_paint_mask_vertex')]}),
("wm.context_toggle", {"type": 'THREE', "value": 'PRESS'},
{"properties": [("data_path", 'weight_paint_object.data.use_paint_bone_selection')]}),
("wm.context_toggle", {"type": 'S', "value": 'PRESS', "shift": True},
{"properties": [("data_path", 'tool_settings.weight_paint.brush.use_smooth_stroke')]}),
op_menu_pie("VIEW3D_MT_wpaint_vgroup_lock_pie", {"type": 'K', "value": 'PRESS'}),

View File

@ -18,6 +18,7 @@ class SelectPattern(Operator):
bl_idname = "object.select_pattern"
bl_label = "Select Pattern"
bl_options = {'REGISTER', 'UNDO'}
bl_property = "pattern"
pattern: StringProperty(
name="Pattern",

View File

@ -1654,13 +1654,17 @@ class _defs_texture_paint:
class _defs_weight_paint:
@staticmethod
def poll_select_mask(context):
def poll_select_tools(context):
if context is None:
return True
return VIEW3D_PT_tools_active._tools_select
ob = context.active_object
return (ob and ob.type == 'MESH' and
(ob.data.use_paint_mask or
ob.data.use_paint_mask_vertex))
if (ob and ob.type == 'MESH' and
(ob.data.use_paint_mask or
ob.data.use_paint_mask_vertex)):
return VIEW3D_PT_tools_active._tools_select
elif context.pose_object:
return (_defs_view3d_select.select,)
return ()
@staticmethod
def generate_from_brushes(context):
@ -3115,11 +3119,7 @@ class VIEW3D_PT_tools_active(ToolSelectPanelHelper, Panel):
else ()
),
None,
lambda context: (
VIEW3D_PT_tools_active._tools_select
if _defs_weight_paint.poll_select_mask(context)
else ()
),
_defs_weight_paint.poll_select_tools,
*_tools_annotate,
],
'PAINT_GREASE_PENCIL': [

View File

@ -2163,13 +2163,17 @@ class VIEW3D_MT_select_edit_grease_pencil(Menu):
class VIEW3D_MT_paint_grease_pencil(Menu):
bl_label = "Paint"
bl_label = "Draw"
def draw(self, _context):
layout = self.layout
layout.menu("GREASE_PENCIL_MT_layer_active", text="Active Layer")
layout.separator()
layout.menu("VIEW3D_MT_edit_greasepencil_showhide")
class VIEW3D_MT_paint_gpencil(Menu):
bl_label = "Paint"
@ -5802,6 +5806,19 @@ class VIEW3D_MT_edit_gpencil_showhide(Menu):
layout.operator("gpencil.hide", text="Hide Active Layer").unselected = False
layout.operator("gpencil.hide", text="Hide Inactive Layers").unselected = True
class VIEW3D_MT_edit_greasepencil_showhide(Menu):
bl_label = "Show/Hide"
def draw(self, _context):
layout = self.layout
layout.operator("grease_pencil.layer_reveal", text="Show All Layers")
layout.separator()
layout.operator("grease_pencil.layer_hide", text="Hide Active Layer").unselected = False
layout.operator("grease_pencil.layer_hide", text="Hide Inactive Layers").unselected = True
class VIEW3D_MT_edit_greasepencil(Menu):
bl_label = "Grease Pencil"
@ -5817,6 +5834,14 @@ class VIEW3D_MT_edit_greasepencil(Menu):
layout.separator()
layout.operator("grease_pencil.duplicate_move")
layout.separator()
layout.menu("VIEW3D_MT_edit_greasepencil_showhide")
layout.separator()
layout.menu("VIEW3D_MT_edit_greasepencil_delete")
@ -8056,6 +8081,32 @@ class VIEW3D_MT_gpencil_edit_context_menu(Menu):
col.operator("gpencil.reproject", text="Reproject")
class VIEW3D_MT_greasepencil_material_active(Menu):
bl_label = "Active Material"
@classmethod
def poll(cls, context):
ob = context.active_object
if ob is None or len(ob.material_slots) == 0:
return False
return True
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
ob = context.active_object
for slot in ob.material_slots:
mat = slot.material
if not mat:
continue
mat.id_data.preview_ensure()
if mat and mat.id_data and mat.id_data.preview:
icon = mat.id_data.preview.icon_id
layout.operator("grease_pencil.set_material", text=mat.name, icon_value=icon).slot = mat.name
class VIEW3D_MT_greasepencil_edit_context_menu(Menu):
bl_label = ""
@ -8790,6 +8841,7 @@ classes = (
VIEW3D_MT_edit_mesh_merge,
VIEW3D_MT_edit_mesh_split,
VIEW3D_MT_edit_mesh_showhide,
VIEW3D_MT_greasepencil_material_active,
VIEW3D_MT_paint_grease_pencil,
VIEW3D_MT_paint_gpencil,
VIEW3D_MT_draw_gpencil,
@ -8799,6 +8851,7 @@ classes = (
VIEW3D_MT_edit_gpencil_point,
VIEW3D_MT_edit_gpencil_delete,
VIEW3D_MT_edit_gpencil_showhide,
VIEW3D_MT_edit_greasepencil_showhide,
VIEW3D_MT_weight_gpencil,
VIEW3D_MT_gpencil_animation,
VIEW3D_MT_gpencil_simplify,

View File

@ -140,7 +140,7 @@ class AssetLibrary {
bool remove_asset(AssetRepresentation &asset);
/**
* Remap ID pointers for local ID assets, see #BKE_lib_remap.h. When an ID pointer would be
* Remap ID pointers for local ID assets, see #BKE_lib_remap.hh. When an ID pointer would be
* mapped to null (typically when an ID gets removed), the asset is removed, because we don't
* support such empty/null assets.
*/

View File

@ -11,7 +11,7 @@
#include "DNA_ID.h"
#include "DNA_asset_types.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "asset_storage.hh"

View File

@ -193,6 +193,10 @@ static void blf_batch_draw_init()
g_batch.offset_loc = GPU_vertformat_attr_add(&format, "offset", GPU_COMP_I32, 1, GPU_FETCH_INT);
g_batch.glyph_size_loc = GPU_vertformat_attr_add(
&format, "glyph_size", GPU_COMP_I32, 2, GPU_FETCH_INT);
g_batch.glyph_comp_len_loc = GPU_vertformat_attr_add(
&format, "comp_len", GPU_COMP_I32, 1, GPU_FETCH_INT);
g_batch.glyph_mode_loc = GPU_vertformat_attr_add(
&format, "mode", GPU_COMP_I32, 1, GPU_FETCH_INT);
g_batch.verts = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_STREAM);
GPU_vertbuf_data_alloc(g_batch.verts, BLF_BATCH_DRAW_LEN_MAX);
@ -201,6 +205,9 @@ static void blf_batch_draw_init()
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.col_loc, &g_batch.col_step);
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.offset_loc, &g_batch.offset_step);
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.glyph_size_loc, &g_batch.glyph_size_step);
GPU_vertbuf_attr_get_raw_data(
g_batch.verts, g_batch.glyph_comp_len_loc, &g_batch.glyph_comp_len_step);
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.glyph_mode_loc, &g_batch.glyph_mode_step);
g_batch.glyph_len = 0;
/* A dummy VBO containing 4 points, attributes are not used. */
@ -347,6 +354,9 @@ void blf_batch_draw()
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.col_loc, &g_batch.col_step);
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.offset_loc, &g_batch.offset_step);
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.glyph_size_loc, &g_batch.glyph_size_step);
GPU_vertbuf_attr_get_raw_data(
g_batch.verts, g_batch.glyph_comp_len_loc, &g_batch.glyph_comp_len_step);
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.glyph_mode_loc, &g_batch.glyph_mode_step);
g_batch.glyph_len = 0;
}

View File

@ -243,11 +243,6 @@ static GlyphBLF *blf_glyph_cache_add_glyph(FontBLF *font,
g->c = charcode;
g->idx = glyph_index;
g->advance_x = (ft_pix)glyph->advance.x;
g->pos[0] = glyph->bitmap_left;
g->pos[1] = glyph->bitmap_top;
g->dims[0] = int(glyph->bitmap.width);
g->dims[1] = int(glyph->bitmap.rows);
g->pitch = glyph->bitmap.pitch;
g->subpixel = subpixel;
FT_BBox bbox;
@ -261,24 +256,99 @@ static GlyphBLF *blf_glyph_cache_add_glyph(FontBLF *font,
g->lsb_delta = (ft_pix)glyph->lsb_delta;
g->rsb_delta = (ft_pix)glyph->rsb_delta;
const int buffer_size = int(glyph->bitmap.width * glyph->bitmap.rows);
if (buffer_size != 0) {
if (font->flags & BLF_MONOCHROME) {
/* Font buffer uses only 0 or 1 values, Blender expects full 0..255 range. */
if (font->flags & BLF_MONOCHROME) {
g->render_mode = FT_RENDER_MODE_MONO;
}
else if (font->flags & BLF_HINTING_SLIGHT) {
g->render_mode = FT_RENDER_MODE_LIGHT;
}
else {
g->render_mode = FT_RENDER_MODE_NORMAL;
}
if (glyph->format == FT_GLYPH_FORMAT_BITMAP) {
/* This has been rendered and we have a bitmap. */
g->pos[0] = glyph->bitmap_left;
g->pos[1] = glyph->bitmap_top;
g->dims[0] = int(glyph->bitmap.width);
g->dims[1] = int(glyph->bitmap.rows);
g->pitch = glyph->bitmap.pitch;
g->depth = 1;
switch (glyph->bitmap.pixel_mode) {
case FT_PIXEL_MODE_LCD:
g->depth = 3;
g->dims[0] /= 3;
break;
case FT_PIXEL_MODE_LCD_V:
g->depth = 3;
g->dims[1] /= 3;
g->pitch *= 3;
break;
case FT_PIXEL_MODE_BGRA:
g->depth = 4;
break;
}
const int buffer_size = g->dims[0] * g->dims[1] * g->depth;
g->bitmap = static_cast<uchar *>(MEM_mallocN(size_t(buffer_size), "glyph bitmap"));
if (ELEM(glyph->bitmap.pixel_mode,
FT_PIXEL_MODE_GRAY,
FT_PIXEL_MODE_GRAY2,
FT_PIXEL_MODE_GRAY4)) {
/* Scale 1, 2, 4-bit gray to 8-bit. */
const char scale = char(255 / (glyph->bitmap.num_grays - 1));
for (int i = 0; i < buffer_size; i++) {
glyph->bitmap.buffer[i] = glyph->bitmap.buffer[i] ? 255 : 0;
#ifdef BLF_GAMMA_CORRECT_GLYPHS
/* Convert coverage amounts to perceptually-improved lightness values. */
g->bitmap[i] = blf_glyph_gamma(glyph->bitmap.buffer[i] * scale);
#else
g->bitmap[i] = glyph->bitmap.buffer[i] * scale;
#endif /* BLF_GAMMA_CORRECT_GLYPHS */
}
}
else if (glyph->bitmap.pixel_mode == FT_PIXEL_MODE_LCD) {
/* RGB (BGR) in successive columns. */
for (size_t y = 0; y < size_t(g->dims[1]); y++) {
for (size_t x = 0; x < size_t(g->dims[0]); x++) {
size_t offs_in = (y * size_t(glyph->bitmap.pitch)) + (x * size_t(g->depth));
size_t offs_out = (y * size_t(g->dims[0]) * size_t(g->depth)) + (x * size_t(g->depth));
g->bitmap[offs_out + 0] = glyph->bitmap.buffer[offs_in + 2];
g->bitmap[offs_out + 1] = glyph->bitmap.buffer[offs_in + 1];
g->bitmap[offs_out + 2] = glyph->bitmap.buffer[offs_in + 0];
}
}
}
else if (glyph->bitmap.pixel_mode == FT_PIXEL_MODE_LCD_V) {
/* RGB (BGR) in successive ROWS. */
for (size_t y = 0; y < size_t(g->dims[1]); y++) {
for (size_t x = 0; x < size_t(g->dims[0]); x++) {
size_t offs_in = (y * size_t(glyph->bitmap.pitch) * size_t(g->depth)) + x;
size_t offs_out = (y * size_t(g->dims[0]) * size_t(g->depth)) + (x * size_t(g->depth));
g->bitmap[offs_out + 2] = glyph->bitmap.buffer[offs_in];
g->bitmap[offs_out + 1] = glyph->bitmap.buffer[offs_in + size_t(glyph->bitmap.pitch)];
g->bitmap[offs_out + 0] = glyph->bitmap.buffer[offs_in + size_t(glyph->bitmap.pitch) +
size_t(glyph->bitmap.pitch)];
}
}
}
else if (glyph->bitmap.pixel_mode == FT_PIXEL_MODE_BGRA) {
/* Convert from BGRA to RGBA. */
for (size_t y = 0; y < size_t(g->dims[1]); y++) {
for (size_t x = 0; x < size_t(g->dims[0]); x++) {
size_t offs_in = (y * size_t(g->pitch)) + (x * size_t(g->depth));
size_t offs_out = (y * size_t(g->dims[0]) * size_t(g->depth)) + (x * size_t(g->depth));
g->bitmap[offs_out + 0] = glyph->bitmap.buffer[offs_in + 2];
g->bitmap[offs_out + 1] = glyph->bitmap.buffer[offs_in + 1];
g->bitmap[offs_out + 2] = glyph->bitmap.buffer[offs_in + 0];
g->bitmap[offs_out + 3] = glyph->bitmap.buffer[offs_in + 3];
}
}
}
else {
#ifdef BLF_GAMMA_CORRECT_GLYPHS
/* Convert coverage amounts to perceptually-improved lightness values. */
for (int i = 0; i < buffer_size; i++) {
glyph->bitmap.buffer[i] = blf_glyph_gamma(glyph->bitmap.buffer[i]);
}
#endif /* BLF_GAMMA_CORRECT_GLYPHS */
memcpy(g->bitmap, glyph->bitmap.buffer, (size_t)buffer_size);
}
g->bitmap = static_cast<uchar *>(MEM_mallocN(size_t(buffer_size), "glyph bitmap"));
memcpy(g->bitmap, glyph->bitmap.buffer, size_t(buffer_size));
}
BLI_addhead(&(gc->bucket[blf_hash(g->c << 6 | subpixel)]), g);
@ -747,6 +817,10 @@ static FT_GlyphSlot blf_glyph_load(FontBLF *font, FT_UInt glyph_index, bool outl
}
}
if (!outline_only && FT_HAS_COLOR(font->face)) {
load_flags |= FT_LOAD_COLOR;
}
if (FT_Load_Glyph(font->face, glyph_index, load_flags) == FT_Err_Ok) {
return font->face->glyph;
}
@ -764,8 +838,17 @@ static FT_GlyphSlot blf_glyph_load(FontBLF *font, FT_UInt glyph_index, bool outl
*/
static bool blf_glyph_render_bitmap(FontBLF *font, FT_GlyphSlot glyph)
{
const int render_mode = (font->flags & BLF_MONOCHROME) ? FT_RENDER_MODE_MONO :
FT_RENDER_MODE_NORMAL;
int render_mode;
if (font->flags & BLF_MONOCHROME) {
render_mode = FT_RENDER_MODE_MONO;
}
else if (font->flags & BLF_HINTING_SLIGHT) {
render_mode = FT_RENDER_MODE_LIGHT;
}
else {
render_mode = FT_RENDER_MODE_NORMAL;
}
/* Render the glyph curves to a bitmap. */
FT_Error err = FT_Render_Glyph(glyph, FT_Render_Mode(render_mode));
@ -773,11 +856,10 @@ static bool blf_glyph_render_bitmap(FontBLF *font, FT_GlyphSlot glyph)
return false;
}
FT_Bitmap tempbitmap;
if (font->flags & BLF_MONOCHROME) {
/* Convert result from 1 bit per pixel to 8 bit per pixel */
/* Accumulate errors for later, fine if not interested beyond "ok vs any error" */
if (ELEM(glyph->bitmap.pixel_mode, FT_PIXEL_MODE_MONO, FT_PIXEL_MODE_GRAY2, FT_PIXEL_MODE_GRAY4))
{
/* Convert to 8 bit per pixel */
FT_Bitmap tempbitmap;
FT_Bitmap_New(&tempbitmap);
/* Does Blender use Pitch 1 always? It works so far */
@ -786,11 +868,7 @@ static bool blf_glyph_render_bitmap(FontBLF *font, FT_GlyphSlot glyph)
err += FT_Bitmap_Done(font->ft_lib, &tempbitmap);
}
if (err || glyph->format != FT_GLYPH_FORMAT_BITMAP) {
return false;
}
return true;
return (err == FT_Err_Ok);
}
/** \} */
@ -1308,9 +1386,9 @@ static void blf_glyph_calc_rect_shadow(
/** \name Glyph Drawing
* \{ */
static void blf_texture_draw(const uchar color[4],
static void blf_texture_draw(GlyphBLF *g,
const uchar color[4],
const int glyph_size[2],
const int offset,
const int x1,
const int y1,
const int x2,
@ -1325,7 +1403,9 @@ static void blf_texture_draw(const uchar color[4],
float(y2 + g_batch.ofs[1]));
copy_v4_v4_uchar(static_cast<uchar *>(GPU_vertbuf_raw_step(&g_batch.col_step)), color);
copy_v2_v2_int(static_cast<int *>(GPU_vertbuf_raw_step(&g_batch.glyph_size_step)), glyph_size);
*((int *)GPU_vertbuf_raw_step(&g_batch.offset_step)) = offset;
*((int *)GPU_vertbuf_raw_step(&g_batch.offset_step)) = g->offset;
*((int *)GPU_vertbuf_raw_step(&g_batch.glyph_comp_len_step)) = g->depth;
*((int *)GPU_vertbuf_raw_step(&g_batch.glyph_mode_step)) = g->render_mode;
g_batch.glyph_len++;
/* Flush cache if it's full. */
@ -1334,36 +1414,26 @@ static void blf_texture_draw(const uchar color[4],
}
}
static void blf_texture5_draw(const uchar color_in[4],
const int glyph_size[2],
const int offset,
const int x1,
const int y1,
const int x2,
const int y2)
static void blf_texture5_draw(
GlyphBLF *g, const uchar color_in[4], const int x1, const int y1, const int x2, const int y2)
{
int glyph_size_flag[2];
/* flag the x and y component signs for 5x5 blurring */
glyph_size_flag[0] = -glyph_size[0];
glyph_size_flag[1] = -glyph_size[1];
glyph_size_flag[0] = -g->dims[0];
glyph_size_flag[1] = -g->dims[1];
blf_texture_draw(color_in, glyph_size_flag, offset, x1, y1, x2, y2);
blf_texture_draw(g, color_in, glyph_size_flag, x1, y1, x2, y2);
}
static void blf_texture3_draw(const uchar color_in[4],
const int glyph_size[2],
const int offset,
const int x1,
const int y1,
const int x2,
const int y2)
static void blf_texture3_draw(
GlyphBLF *g, const uchar color_in[4], const int x1, const int y1, const int x2, const int y2)
{
int glyph_size_flag[2];
/* flag the x component sign for 3x3 blurring */
glyph_size_flag[0] = -glyph_size[0];
glyph_size_flag[1] = glyph_size[1];
glyph_size_flag[0] = -g->dims[0];
glyph_size_flag[1] = g->dims[1];
blf_texture_draw(color_in, glyph_size_flag, offset, x1, y1, x2, y2);
blf_texture_draw(g, color_in, glyph_size_flag, x1, y1, x2, y2);
}
void blf_glyph_draw(FontBLF *font, GlyphCacheBLF *gc, GlyphBLF *g, const int x, const int y)
@ -1379,7 +1449,7 @@ void blf_glyph_draw(FontBLF *font, GlyphCacheBLF *gc, GlyphBLF *g, const int x,
g->offset = gc->bitmap_len;
int buff_size = g->dims[0] * g->dims[1];
int buff_size = g->dims[0] * g->dims[1] * g->depth;
int bitmap_len = gc->bitmap_len + buff_size;
if (bitmap_len > gc->bitmap_len_alloc) {
@ -1436,31 +1506,21 @@ void blf_glyph_draw(FontBLF *font, GlyphCacheBLF *gc, GlyphBLF *g, const int x,
blf_glyph_calc_rect_shadow(&rect_ofs, g, x, y, font);
if (font->shadow == 0) {
blf_texture_draw(font->shadow_color,
blf_texture_draw(g,
font->shadow_color,
g->dims,
g->offset,
rect_ofs.xmin,
rect_ofs.ymin,
rect_ofs.xmax,
rect_ofs.ymax);
}
else if (font->shadow <= 4) {
blf_texture3_draw(font->shadow_color,
g->dims,
g->offset,
rect_ofs.xmin,
rect_ofs.ymin,
rect_ofs.xmax,
rect_ofs.ymax);
blf_texture3_draw(
g, font->shadow_color, rect_ofs.xmin, rect_ofs.ymin, rect_ofs.xmax, rect_ofs.ymax);
}
else {
blf_texture5_draw(font->shadow_color,
g->dims,
g->offset,
rect_ofs.xmin,
rect_ofs.ymin,
rect_ofs.xmax,
rect_ofs.ymax);
blf_texture5_draw(
g, font->shadow_color, rect_ofs.xmin, rect_ofs.ymin, rect_ofs.xmax, rect_ofs.ymax);
}
}
@ -1470,19 +1530,16 @@ void blf_glyph_draw(FontBLF *font, GlyphCacheBLF *gc, GlyphBLF *g, const int x,
#if BLF_BLUR_ENABLE
switch (font->blur) {
case 3:
blf_texture3_draw(
font->color, g->dims, g->offset, rect.xmin, rect.ymin, rect.xmax, rect.ymax);
blf_texture3_draw(g, font->color, rect.xmin, rect.ymin, rect.xmax, rect.ymax);
break;
case 5:
blf_texture5_draw(
font->color, g->dims, g->offset, rect.xmin, rect.ymin, rect.xmax, rect.ymax);
blf_texture5_draw(g, font->color, rect.xmin, rect.ymin, rect.xmax, rect.ymax);
break;
default:
blf_texture_draw(
font->color, g->dims, g->offset, rect.xmin, rect.ymin, rect.xmax, rect.ymax);
blf_texture_draw(g, font->color, rect.xmin, rect.ymin, rect.xmax, rect.ymax);
}
#else
blf_texture_draw(font->color, g->dims, g->offset, rect.xmin, rect.ymin, rect.xmax, rect.ymax);
blf_texture_draw(g, font->color, g->dims, rect.xmin, rect.ymin, rect.xmax, rect.ymax);
#endif
}

View File

@ -88,8 +88,9 @@ typedef struct BatchBLF {
struct FontBLF *font;
struct GPUBatch *batch;
struct GPUVertBuf *verts;
struct GPUVertBufRaw pos_step, col_step, offset_step, glyph_size_step;
unsigned int pos_loc, col_loc, offset_loc, glyph_size_loc;
struct GPUVertBufRaw pos_step, col_step, offset_step, glyph_size_step, glyph_comp_len_step,
glyph_mode_step;
unsigned int pos_loc, col_loc, offset_loc, glyph_size_loc, glyph_comp_len_loc, glyph_mode_loc;
unsigned int glyph_len;
/** Copy of `font->pos`. */
int ofs[2];
@ -174,6 +175,10 @@ typedef struct GlyphBLF {
/** Glyph width and height. */
int dims[2];
int pitch;
int depth;
/** Render mode (FT_Render_Mode). */
int render_mode;
/**
* X and Y bearing of the glyph.

View File

@ -89,9 +89,9 @@ typedef struct DupliObject {
* Look up the RGBA value of a uniform shader attribute.
* \return true if the attribute was found; if not, r_value is also set to zero.
*/
bool BKE_object_dupli_find_rgba_attribute(struct Object *ob,
struct DupliObject *dupli,
struct Object *dupli_parent,
bool BKE_object_dupli_find_rgba_attribute(const struct Object *ob,
const struct DupliObject *dupli,
const struct Object *dupli_parent,
const char *name,
float r_value[4]);
@ -99,8 +99,8 @@ bool BKE_object_dupli_find_rgba_attribute(struct Object *ob,
* Look up the RGBA value of a view layer/scene/world shader attribute.
* \return true if the attribute was found; if not, r_value is also set to zero.
*/
bool BKE_view_layer_find_rgba_attribute(struct Scene *scene,
struct ViewLayer *layer,
bool BKE_view_layer_find_rgba_attribute(const struct Scene *scene,
const struct ViewLayer *layer,
const char *name,
float r_value[4]);

View File

@ -87,8 +87,10 @@ class GeometryComponent : public ImplicitSharingMixin {
virtual std::optional<AttributeAccessor> attributes() const;
virtual std::optional<MutableAttributeAccessor> attributes_for_write();
/* The returned component should be of the same type as the type this is called on. */
virtual GeometryComponent *copy() const = 0;
/**
* Copies the component. The returned component only has a single user and is therefor mutable.
*/
virtual GeometryComponentPtr copy() const = 0;
/** Remove referenced data from the geometry component. */
virtual void clear() = 0;
@ -437,7 +439,7 @@ class MeshComponent : public GeometryComponent {
public:
MeshComponent();
~MeshComponent();
GeometryComponent *copy() const override;
GeometryComponentPtr copy() const override;
void clear() override;
bool has_mesh() const;
@ -491,7 +493,7 @@ class PointCloudComponent : public GeometryComponent {
public:
PointCloudComponent();
~PointCloudComponent();
GeometryComponent *copy() const override;
GeometryComponentPtr copy() const override;
void clear() override;
bool has_pointcloud() const;
@ -551,7 +553,7 @@ class CurveComponent : public GeometryComponent {
public:
CurveComponent();
~CurveComponent();
GeometryComponent *copy() const override;
GeometryComponentPtr copy() const override;
void clear() override;
bool has_curves() const;
@ -592,7 +594,7 @@ class InstancesComponent : public GeometryComponent {
public:
InstancesComponent();
~InstancesComponent();
GeometryComponent *copy() const override;
GeometryComponentPtr copy() const override;
void clear() override;
@ -626,7 +628,7 @@ class VolumeComponent : public GeometryComponent {
public:
VolumeComponent();
~VolumeComponent();
GeometryComponent *copy() const override;
GeometryComponentPtr copy() const override;
void clear() override;
bool has_volume() const;
@ -681,7 +683,7 @@ class GeometryComponentEditData final : public GeometryComponent {
GeometryComponentEditData();
GeometryComponent *copy() const final;
GeometryComponentPtr copy() const final;
bool owns_direct_data() const final;
void ensure_owns_direct_data() final;
@ -711,7 +713,7 @@ class GreasePencilComponent : public GeometryComponent {
public:
GreasePencilComponent();
~GreasePencilComponent();
GeometryComponent *copy() const override;
GeometryComponentPtr copy() const override;
void clear() override;
bool has_grease_pencil() const;

View File

@ -481,7 +481,7 @@ struct ID *BKE_id_copy_for_use_in_bmain(struct Main *bmain, const struct ID *id)
* \param bmain: May be NULL, in which case there is no guarantee that internal remapping of ID
* pointers to themselves will be complete (regarding depsgraph and/or runtime data updates).
* \param do_self_remap: Whether to remap internal pointers to itself or not.
* \param self_remap_flags: Flags controlling self remapping, see BKE_lib_remap.h.
* \param self_remap_flags: Flags controlling self remapping, see BKE_lib_remap.hh.
*/
void BKE_lib_id_swap(struct Main *bmain,
struct ID *id_a,

View File

@ -21,15 +21,12 @@
*/
#include "BLI_compiler_attrs.h"
#include "BLI_span.hh"
#include "BLI_utildefines.h"
#ifdef __cplusplus
extern "C" {
#endif
struct ID;
struct IDRemapper;
struct LinkNode;
struct Main;
/* BKE_libblock_free, delete are declared in BKE_lib_id.h for convenience. */
@ -120,13 +117,9 @@ typedef enum eIDRemapType {
*
* \note Is preferred over BKE_libblock_remap_locked due to performance.
*/
void BKE_libblock_remap_multiple_locked(struct Main *bmain,
struct IDRemapper *mappings,
const int remap_flags);
void BKE_libblock_remap_multiple_locked(Main *bmain, IDRemapper *mappings, const int remap_flags);
void BKE_libblock_remap_multiple(struct Main *bmain,
struct IDRemapper *mappings,
const int remap_flags);
void BKE_libblock_remap_multiple(Main *bmain, IDRemapper *mappings, const int remap_flags);
/**
* Bare raw remapping of IDs, with no other processing than actually updating the ID pointers.
@ -137,9 +130,7 @@ void BKE_libblock_remap_multiple(struct Main *bmain,
* case e.g. in read-file process.
*
* WARNING: This call will likely leave the given BMain in invalid state in many aspects. */
void BKE_libblock_remap_multiple_raw(struct Main *bmain,
struct IDRemapper *mappings,
const int remap_flags);
void BKE_libblock_remap_multiple_raw(Main *bmain, IDRemapper *mappings, const int remap_flags);
/**
* Replace all references in given Main to \a old_id by \a new_id
* (if \a new_id is NULL, it unlinks \a old_id).
@ -147,9 +138,9 @@ void BKE_libblock_remap_multiple_raw(struct Main *bmain,
* \note Requiring new_id to be non-null, this *may* not be the case ultimately,
* but makes things simpler for now.
*/
void BKE_libblock_remap_locked(struct Main *bmain, void *old_idv, void *new_idv, int remap_flags)
void BKE_libblock_remap_locked(Main *bmain, void *old_idv, void *new_idv, int remap_flags)
ATTR_NONNULL(1, 2);
void BKE_libblock_remap(struct Main *bmain, void *old_idv, void *new_idv, int remap_flags)
void BKE_libblock_remap(Main *bmain, void *old_idv, void *new_idv, int remap_flags)
ATTR_NONNULL(1, 2);
/**
@ -159,10 +150,8 @@ void BKE_libblock_remap(struct Main *bmain, void *old_idv, void *new_idv, int re
* \param do_flag_never_null: If true, all IDs using \a idv in a 'non-NULL' way are flagged by
* #LIB_TAG_DOIT flag (quite obviously, 'non-NULL' usages can never be unlinked by this function).
*/
void BKE_libblock_unlink(struct Main *bmain,
void *idv,
bool do_flag_never_null,
bool do_skip_indirect) ATTR_NONNULL();
void BKE_libblock_unlink(Main *bmain, void *idv, bool do_flag_never_null, bool do_skip_indirect)
ATTR_NONNULL();
/**
* Similar to libblock_remap, but only affects IDs used by given \a idv ID.
@ -173,16 +162,16 @@ void BKE_libblock_unlink(struct Main *bmain,
* \param bmain: May be NULL, in which case there won't be depsgraph updates nor post-processing on
* some ID types (like collections or objects) to ensure their runtime data is valid.
*/
void BKE_libblock_relink_ex(
struct Main *bmain, void *idv, void *old_idv, void *new_idv, int remap_flags) ATTR_NONNULL(2);
void BKE_libblock_relink_ex(Main *bmain, void *idv, void *old_idv, void *new_idv, int remap_flags)
ATTR_NONNULL(2);
/**
* Same as #BKE_libblock_relink_ex, but applies all rules defined in \a id_remapper to \a ids (or
* does cleanup if `ID_REMAP_TYPE_CLEANUP` is specified as \a remap_type).
*/
void BKE_libblock_relink_multiple(struct Main *bmain,
struct LinkNode *ids,
void BKE_libblock_relink_multiple(Main *bmain,
const blender::Span<ID *> ids,
eIDRemapType remap_type,
struct IDRemapper *id_remapper,
IDRemapper *id_remapper,
int remap_flags);
/**
@ -194,18 +183,16 @@ void BKE_libblock_relink_multiple(struct Main *bmain,
* Very specific usage, not sure we'll keep it on the long run,
* currently only used in Object/Collection duplication code.
*/
void BKE_libblock_relink_to_newid(struct Main *bmain, struct ID *id, int remap_flag)
ATTR_NONNULL();
void BKE_libblock_relink_to_newid(Main *bmain, ID *id, int remap_flag) ATTR_NONNULL();
typedef void (*BKE_library_free_notifier_reference_cb)(const void *);
typedef void (*BKE_library_remap_editor_id_reference_cb)(const struct IDRemapper *mappings);
typedef void (*BKE_library_remap_editor_id_reference_cb)(const IDRemapper *mappings);
void BKE_library_callback_free_notifier_reference_set(BKE_library_free_notifier_reference_cb func);
void BKE_library_callback_remap_editor_id_reference_set(
BKE_library_remap_editor_id_reference_cb func);
/* IDRemapper */
struct IDRemapper;
typedef enum IDRemapperApplyResult {
/** No remapping rules available for the source. */
ID_REMAP_RESULT_SOURCE_UNAVAILABLE,
@ -249,33 +236,31 @@ typedef enum IDRemapperApplyOptions {
} IDRemapperApplyOptions;
ENUM_OPERATORS(IDRemapperApplyOptions, ID_REMAP_APPLY_UNMAP_WHEN_REMAPPING_TO_SELF)
typedef void (*IDRemapperIterFunction)(struct ID *old_id, struct ID *new_id, void *user_data);
typedef void (*IDRemapperIterFunction)(ID *old_id, ID *new_id, void *user_data);
/**
* Create a new ID Remapper.
*
* An ID remapper stores multiple remapping rules.
*/
struct IDRemapper *BKE_id_remapper_create(void);
IDRemapper *BKE_id_remapper_create(void);
void BKE_id_remapper_clear(struct IDRemapper *id_remapper);
bool BKE_id_remapper_is_empty(const struct IDRemapper *id_remapper);
void BKE_id_remapper_clear(IDRemapper *id_remapper);
bool BKE_id_remapper_is_empty(const IDRemapper *id_remapper);
/** Free the given ID Remapper. */
void BKE_id_remapper_free(struct IDRemapper *id_remapper);
void BKE_id_remapper_free(IDRemapper *id_remapper);
/** Add a new remapping. Does not replace an existing mapping for `old_id`, if any. */
void BKE_id_remapper_add(struct IDRemapper *id_remapper, struct ID *old_id, struct ID *new_id);
void BKE_id_remapper_add(IDRemapper *id_remapper, ID *old_id, ID *new_id);
/** Add a new remapping, replacing a potential already existing mapping of `old_id`. */
void BKE_id_remapper_add_overwrite(struct IDRemapper *id_remapper,
struct ID *old_id,
struct ID *new_id);
void BKE_id_remapper_add_overwrite(IDRemapper *id_remapper, ID *old_id, ID *new_id);
/**
* Apply a remapping.
*
* Update the id pointer stored in the given r_id_ptr if a remapping rule exists.
*/
IDRemapperApplyResult BKE_id_remapper_apply(const struct IDRemapper *id_remapper,
struct ID **r_id_ptr,
IDRemapperApplyResult BKE_id_remapper_apply(const IDRemapper *id_remapper,
ID **r_id_ptr,
IDRemapperApplyOptions options);
/**
* Apply a remapping.
@ -286,28 +271,24 @@ IDRemapperApplyResult BKE_id_remapper_apply(const struct IDRemapper *id_remapper
* \param id_self: required for ID_REMAP_APPLY_UNMAP_WHEN_REMAPPING_TO_SELF.
* When remapping to id_self it will then be remapped to NULL.
*/
IDRemapperApplyResult BKE_id_remapper_apply_ex(const struct IDRemapper *id_remapper,
struct ID **r_id_ptr,
IDRemapperApplyResult BKE_id_remapper_apply_ex(const IDRemapper *id_remapper,
ID **r_id_ptr,
IDRemapperApplyOptions options,
struct ID *id_self);
bool BKE_id_remapper_has_mapping_for(const struct IDRemapper *id_remapper, uint64_t type_filter);
ID *id_self);
bool BKE_id_remapper_has_mapping_for(const IDRemapper *id_remapper, uint64_t type_filter);
/**
* Determine the mapping result, without applying the mapping.
*/
IDRemapperApplyResult BKE_id_remapper_get_mapping_result(const struct IDRemapper *id_remapper,
struct ID *id,
IDRemapperApplyResult BKE_id_remapper_get_mapping_result(const IDRemapper *id_remapper,
ID *id,
IDRemapperApplyOptions options,
const struct ID *id_self);
void BKE_id_remapper_iter(const struct IDRemapper *id_remapper,
const ID *id_self);
void BKE_id_remapper_iter(const IDRemapper *id_remapper,
IDRemapperIterFunction func,
void *user_data);
/** Returns a readable string for the given result. Can be used for debugging purposes. */
const char *BKE_id_remapper_result_string(const IDRemapperApplyResult result);
/** Prints out the rules inside the given id_remapper. Can be used for debugging purposes. */
void BKE_id_remapper_print(const struct IDRemapper *id_remapper);
#ifdef __cplusplus
}
#endif
void BKE_id_remapper_print(const IDRemapper *id_remapper);

View File

@ -366,8 +366,8 @@ MLoopNorSpace *BKE_lnor_space_create(MLoopNorSpaceArray *lnors_spacearr);
*/
void BKE_lnor_space_define(MLoopNorSpace *lnor_space,
const float lnor[3],
float vec_ref[3],
float vec_other[3],
const float vec_ref[3],
const float vec_other[3],
blender::Span<blender::float3> edge_vectors);
#endif

View File

@ -14,7 +14,6 @@
#include "BLI_utildefines.h"
struct PBVHNode;
struct PBVHBatches;
struct BMesh;
enum PBVHType {

View File

@ -41,14 +41,16 @@ struct MLoopTri;
struct Mesh;
struct PBVH;
struct PBVHNode;
struct PBVHBatches;
struct PBVH_GPU_Args;
struct SculptSession;
struct SubdivCCGFace;
struct SubdivCCG;
struct TaskParallelSettings;
struct Image;
struct ImageUser;
namespace blender::draw::pbvh {
struct PBVHBatches;
struct PBVH_GPU_Args;
} // namespace blender::draw::pbvh
/*
* These structs represent logical verts/edges/faces.
@ -92,16 +94,6 @@ struct PBVHPixelsNode {
void *node_data = nullptr;
};
class PBVHAttrReq {
public:
PBVHAttrReq() = default;
PBVHAttrReq(const eAttrDomain domain, const eCustomDataType type) : domain(domain), type(type) {}
std::string name;
eAttrDomain domain;
eCustomDataType type;
};
struct PBVHFrustumPlanes {
float (*planes)[4];
int num_planes;
@ -215,14 +207,7 @@ void BKE_pbvh_update_mesh_pointers(PBVH *pbvh, Mesh *mesh);
/**
* Do a full rebuild with on Grids data structure.
*/
void BKE_pbvh_build_grids(PBVH *pbvh,
blender::Span<CCGElem *> grids,
CCGKey *key,
blender::Span<int> grid_to_face_map,
blender::Span<DMFlagMat> flagmats,
blender::Span<BLI_bitmap *> grid_hidden,
Mesh *me,
SubdivCCG *subdiv_ccg);
void BKE_pbvh_build_grids(PBVH *pbvh, CCGKey *key, Mesh *me, SubdivCCG *subdiv_ccg);
/**
* Build a PBVH from a BMesh.
*/
@ -260,6 +245,8 @@ bool BKE_pbvh_node_raycast(PBVH *pbvh,
PBVHNode *node,
float (*origco)[3],
bool use_origco,
blender::Span<int> corner_verts,
const bool *hide_poly,
const float ray_start[3],
const float ray_normal[3],
IsectRayPrecalc *isect_precalc,
@ -298,6 +285,8 @@ bool BKE_pbvh_node_find_nearest_to_ray(PBVH *pbvh,
PBVHNode *node,
float (*origco)[3],
bool use_origco,
blender::Span<int> corner_verts,
const bool *hide_poly,
const float ray_start[3],
const float ray_normal[3],
float *depth,
@ -305,7 +294,7 @@ bool BKE_pbvh_node_find_nearest_to_ray(PBVH *pbvh,
/* Drawing */
void BKE_pbvh_set_frustum_planes(PBVH *pbvh, PBVHFrustumPlanes *planes);
void BKE_pbvh_get_frustum_planes(PBVH *pbvh, PBVHFrustumPlanes *planes);
void BKE_pbvh_get_frustum_planes(const PBVH *pbvh, PBVHFrustumPlanes *planes);
void BKE_pbvh_draw_cb(const Mesh &mesh,
PBVH *pbvh,
@ -313,12 +302,9 @@ void BKE_pbvh_draw_cb(const Mesh &mesh,
PBVHFrustumPlanes *update_frustum,
PBVHFrustumPlanes *draw_frustum,
void (*draw_fn)(void *user_data,
PBVHBatches *batches,
const PBVH_GPU_Args &args),
void *user_data,
bool full_render,
PBVHAttrReq *attrs,
int attrs_num);
blender::draw::pbvh::PBVHBatches *batches,
const blender::draw::pbvh::PBVH_GPU_Args &args),
void *user_data);
/* PBVH Access */
@ -329,11 +315,6 @@ bool BKE_pbvh_has_faces(const PBVH *pbvh);
*/
void BKE_pbvh_bounding_box(const PBVH *pbvh, float min[3], float max[3]);
/**
* Multi-res hidden data, only valid for type == PBVH_GRIDS.
*/
blender::Span<const BLI_bitmap *> BKE_pbvh_get_grid_visibility(const PBVH *pbvh);
void BKE_pbvh_sync_visibility_from_verts(PBVH *pbvh, Mesh *me);
/**
@ -350,7 +331,6 @@ int BKE_pbvh_count_grid_quads(blender::Span<const BLI_bitmap *> grid_hidden,
*/
const CCGKey *BKE_pbvh_get_grid_key(const PBVH *pbvh);
blender::Span<CCGElem *> BKE_pbvh_get_grids(const PBVH *pbvh);
int BKE_pbvh_get_grid_num_verts(const PBVH *pbvh);
int BKE_pbvh_get_grid_num_faces(const PBVH *pbvh);
@ -411,10 +391,7 @@ void BKE_pbvh_node_num_verts(const PBVH *pbvh,
int BKE_pbvh_node_num_unique_verts(const PBVH &pbvh, const PBVHNode &node);
blender::Span<int> BKE_pbvh_node_get_vert_indices(const PBVHNode *node);
blender::Span<int> BKE_pbvh_node_get_unique_vert_indices(const PBVHNode *node);
void BKE_pbvh_node_get_loops(PBVH *pbvh,
PBVHNode *node,
const int **r_loop_indices,
const int **r_corner_verts);
void BKE_pbvh_node_get_loops(PBVHNode *node, const int **r_loop_indices);
blender::Vector<int> BKE_pbvh_node_calc_face_indices(const PBVH &pbvh, const PBVHNode &node);
/* Get number of faces in the mesh; for PBVH_GRIDS the
@ -460,22 +437,9 @@ void BKE_pbvh_redraw_BB(PBVH *pbvh, float bb_min[3], float bb_max[3]);
blender::IndexMask BKE_pbvh_get_grid_updates(const PBVH *pbvh,
blender::Span<const PBVHNode *> nodes,
blender::IndexMaskMemory &memory);
void BKE_pbvh_grids_update(PBVH *pbvh,
blender::Span<CCGElem *> grids,
blender::Span<int> grid_to_face_map,
blender::Span<DMFlagMat> flagmats,
blender::Span<BLI_bitmap *> grid_hidden,
CCGKey *key);
void BKE_pbvh_grids_update(PBVH *pbvh, CCGKey *key);
void BKE_pbvh_subdiv_cgg_set(PBVH *pbvh, SubdivCCG *subdiv_ccg);
/**
* If an operation causes the hide status stored in the mesh to change, this must be called
* to update the references to those attributes, since they are only added when necessary.
*/
void BKE_pbvh_update_hide_attributes_from_mesh(PBVH *pbvh);
/* Vertex Deformer. */
void BKE_pbvh_vert_coords_apply(PBVH *pbvh, blender::Span<blender::float3> vert_positions);
bool BKE_pbvh_is_deformed(PBVH *pbvh);
@ -642,10 +606,6 @@ void BKE_pbvh_parallel_range_settings(TaskParallelSettings *settings,
blender::MutableSpan<blender::float3> BKE_pbvh_get_vert_positions(const PBVH *pbvh);
const float (*BKE_pbvh_get_vert_normals(const PBVH *pbvh))[3];
const bool *BKE_pbvh_get_vert_hide(const PBVH *pbvh);
bool *BKE_pbvh_get_vert_hide_for_write(PBVH *pbvh);
const bool *BKE_pbvh_get_poly_hide(const PBVH *pbvh);
PBVHColorBufferNode *BKE_pbvh_node_color_buffer_get(PBVHNode *node);
void BKE_pbvh_node_color_buffer_free(PBVH *pbvh);

View File

@ -416,7 +416,7 @@ set(SRC
BKE_lib_id.h
BKE_lib_override.hh
BKE_lib_query.h
BKE_lib_remap.h
BKE_lib_remap.hh
BKE_library.h
BKE_light.h
BKE_light_linking.h
@ -522,7 +522,7 @@ set(SRC
intern/CCGSubSurf_intern.h
intern/attribute_access_intern.hh
intern/data_transfer_intern.h
intern/lib_intern.h
intern/lib_intern.hh
intern/multires_inline.hh
intern/multires_reshape.hh
intern/multires_unsubdivide.hh

View File

@ -12,6 +12,7 @@
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <limits>
#include "MEM_guardedalloc.h"
@ -2919,8 +2920,8 @@ std::optional<blender::Bounds<blender::float3>> BKE_armature_min_max(const bPose
if (BLI_listbase_is_empty(&pose->chanbase)) {
return std::nullopt;
}
blender::float3 min(-FLT_MAX);
blender::float3 max(FLT_MAX);
blender::float3 min(std::numeric_limits<float>::max());
blender::float3 max(std::numeric_limits<float>::lowest());
/* For now, we assume BKE_pose_where_is has already been called
* (hence we have valid data in pachan). */
LISTBASE_FOREACH (bPoseChannel *, pchan, &pose->chanbase) {

View File

@ -46,7 +46,7 @@
#include "BKE_lib_id.h"
#include "BKE_lib_override.hh"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_main_idmap.hh"
#include "BKE_main_namemap.hh"

View File

@ -41,7 +41,7 @@
#include "BKE_lib_id.h"
#include "BKE_lib_override.hh"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_main_namemap.hh"
#include "BKE_material.h"

View File

@ -29,7 +29,7 @@
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_material.h"
#include "BKE_paint.hh"

View File

@ -27,7 +27,7 @@
#include "BKE_layer.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_object.hh"
#include "BKE_preview_image.hh"

View File

@ -35,7 +35,7 @@
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_modifier.hh"
#include "BKE_object.hh"

View File

@ -32,14 +32,14 @@ CurveComponent::~CurveComponent()
this->clear();
}
GeometryComponent *CurveComponent::copy() const
GeometryComponentPtr CurveComponent::copy() const
{
CurveComponent *new_component = new CurveComponent();
if (curves_ != nullptr) {
new_component->curves_ = BKE_curves_copy_for_eval(curves_);
new_component->ownership_ = GeometryOwnershipType::Owned;
}
return new_component;
return GeometryComponentPtr(new_component);
}
void CurveComponent::clear()

View File

@ -10,7 +10,7 @@ namespace blender::bke {
GeometryComponentEditData::GeometryComponentEditData() : GeometryComponent(Type::Edit) {}
GeometryComponent *GeometryComponentEditData::copy() const
GeometryComponentPtr GeometryComponentEditData::copy() const
{
GeometryComponentEditData *new_component = new GeometryComponentEditData();
if (curves_edit_hints_) {
@ -20,7 +20,7 @@ GeometryComponent *GeometryComponentEditData::copy() const
new_component->grease_pencil_edit_hints_ = std::make_unique<GreasePencilEditHints>(
*grease_pencil_edit_hints_);
}
return new_component;
return GeometryComponentPtr(new_component);
}
bool GeometryComponentEditData::owns_direct_data() const

View File

@ -23,14 +23,14 @@ GreasePencilComponent::~GreasePencilComponent()
this->clear();
}
GeometryComponent *GreasePencilComponent::copy() const
GeometryComponentPtr GreasePencilComponent::copy() const
{
GreasePencilComponent *new_component = new GreasePencilComponent();
if (grease_pencil_ != nullptr) {
new_component->grease_pencil_ = BKE_grease_pencil_copy_for_eval(grease_pencil_);
new_component->ownership_ = GeometryOwnershipType::Owned;
}
return new_component;
return GeometryComponentPtr(new_component);
}
void GreasePencilComponent::clear()

View File

@ -37,14 +37,14 @@ InstancesComponent::~InstancesComponent()
this->clear();
}
GeometryComponent *InstancesComponent::copy() const
GeometryComponentPtr InstancesComponent::copy() const
{
InstancesComponent *new_component = new InstancesComponent();
if (instances_ != nullptr) {
new_component->instances_ = new Instances(*instances_);
new_component->ownership_ = GeometryOwnershipType::Owned;
}
return new_component;
return GeometryComponentPtr(new_component);
}
void InstancesComponent::clear()

View File

@ -34,14 +34,14 @@ MeshComponent::~MeshComponent()
this->clear();
}
GeometryComponent *MeshComponent::copy() const
GeometryComponentPtr MeshComponent::copy() const
{
MeshComponent *new_component = new MeshComponent();
if (mesh_ != nullptr) {
new_component->mesh_ = BKE_mesh_copy_for_eval(mesh_);
new_component->ownership_ = GeometryOwnershipType::Owned;
}
return new_component;
return GeometryComponentPtr(new_component);
}
void MeshComponent::clear()

View File

@ -23,14 +23,14 @@ PointCloudComponent::~PointCloudComponent()
this->clear();
}
GeometryComponent *PointCloudComponent::copy() const
GeometryComponentPtr PointCloudComponent::copy() const
{
PointCloudComponent *new_component = new PointCloudComponent();
if (pointcloud_ != nullptr) {
new_component->pointcloud_ = BKE_pointcloud_copy_for_eval(pointcloud_);
new_component->ownership_ = GeometryOwnershipType::Owned;
}
return new_component;
return GeometryComponentPtr(new_component);
}
void PointCloudComponent::clear()

View File

@ -21,14 +21,14 @@ VolumeComponent::~VolumeComponent()
this->clear();
}
GeometryComponent *VolumeComponent::copy() const
GeometryComponentPtr VolumeComponent::copy() const
{
VolumeComponent *new_component = new VolumeComponent();
if (volume_ != nullptr) {
new_component->volume_ = BKE_volume_copy_for_eval(volume_);
new_component->ownership_ = GeometryOwnershipType::Owned;
}
return new_component;
return GeometryComponentPtr(new_component);
}
void VolumeComponent::clear()

View File

@ -120,17 +120,17 @@ GeometryComponent &GeometrySet::get_component_for_write(GeometryComponent::Type
if (!component_ptr) {
/* If the component did not exist before, create a new one. */
component_ptr = GeometryComponent::create(component_type);
return *component_ptr;
}
if (component_ptr->is_mutable()) {
else if (component_ptr->is_mutable()) {
/* If the referenced component is already mutable, return it directly. */
component_ptr->tag_ensured_mutable();
return *component_ptr;
}
/* If the referenced component is shared, make a copy. The copy is not shared and is
* therefore mutable. */
component_ptr = GeometryComponentPtr(component_ptr->copy());
return *component_ptr;
else {
/* If the referenced component is shared, make a copy. The copy is not shared and is
* therefore mutable. */
component_ptr = component_ptr->copy();
}
return const_cast<GeometryComponent &>(*component_ptr);
}
GeometryComponent *GeometrySet::get_component_ptr(GeometryComponent::Type type)

View File

@ -31,6 +31,7 @@
#include "BLI_utildefines.h"
#include "BLI_alloca.h"
#include "BLI_array.hh"
#include "BLI_blenlib.h"
#include "BLI_ghash.h"
#include "BLI_linklist.h"
@ -52,7 +53,7 @@
#include "BKE_lib_id.h"
#include "BKE_lib_override.hh"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_main_namemap.hh"
#include "BKE_node.h"
@ -68,7 +69,7 @@
#include "atomic_ops.h"
#include "lib_intern.h"
#include "lib_intern.hh"
//#define DEBUG_TIME
@ -859,15 +860,10 @@ static void id_swap(Main *bmain,
/* Finalize remapping of internal references to self broken by swapping, if requested. */
if (do_self_remap) {
LinkNode ids{};
ids.next = nullptr;
ids.link = id_a;
BKE_libblock_relink_multiple(
bmain, &ids, ID_REMAP_TYPE_REMAP, remapper_id_a, self_remap_flags);
ids.link = id_b;
bmain, {id_a}, ID_REMAP_TYPE_REMAP, remapper_id_a, self_remap_flags);
BKE_libblock_relink_multiple(
bmain, &ids, ID_REMAP_TYPE_REMAP, remapper_id_b, self_remap_flags);
bmain, {id_b}, ID_REMAP_TYPE_REMAP, remapper_id_b, self_remap_flags);
}
if (input_remapper_id_a == nullptr && remapper_id_a != nullptr) {

View File

@ -20,6 +20,7 @@
#include "BLI_linklist.h"
#include "BLI_listbase.h"
#include "BLI_vector.hh"
#include "BKE_anim_data.h"
#include "BKE_asset.hh"
@ -29,11 +30,11 @@
#include "BKE_layer.h"
#include "BKE_lib_id.h"
#include "BKE_lib_override.hh"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_main_namemap.hh"
#include "lib_intern.h"
#include "lib_intern.hh"
#include "DEG_depsgraph.hh"
@ -302,10 +303,10 @@ static size_t id_delete(Main *bmain,
}
/* Since we removed IDs from Main, their own other IDs usages need to be removed 'manually'. */
LinkNode *cleanup_ids = nullptr;
blender::Vector<ID *> cleanup_ids;
for (ID *id = static_cast<ID *>(tagged_deleted_ids.first); id;
id = static_cast<ID *>(id->next)) {
BLI_linklist_prepend(&cleanup_ids, id);
cleanup_ids.append(id);
}
BKE_libblock_relink_multiple(bmain,
cleanup_ids,
@ -313,9 +314,8 @@ static size_t id_delete(Main *bmain,
id_remapper,
ID_REMAP_FORCE_INTERNAL_RUNTIME_POINTERS |
ID_REMAP_SKIP_USER_CLEAR);
cleanup_ids.clear();
BKE_id_remapper_free(id_remapper);
BLI_linklist_free(cleanup_ids, nullptr);
BKE_layer_collection_resync_allow();
BKE_main_collection_sync_remap(bmain);

View File

@ -6,7 +6,7 @@
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "MEM_guardedalloc.h"
@ -136,8 +136,6 @@ static const blender::bke::id::remapper::IDRemapper *unwrap(const IDRemapper *re
static_cast<const void *>(remapper));
}
extern "C" {
IDRemapper *BKE_id_remapper_create()
{
blender::bke::id::remapper::IDRemapper *remapper =
@ -251,4 +249,3 @@ void BKE_id_remapper_print(const IDRemapper *id_remapper)
{
BKE_id_remapper_iter(id_remapper, id_remapper_print_item_cb, nullptr);
}
}

View File

@ -4,7 +4,7 @@
#include "testing/testing.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BLI_string.h"

View File

@ -8,11 +8,7 @@
#pragma once
#include "BKE_lib_remap.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "BKE_lib_remap.hh"
extern BKE_library_free_notifier_reference_cb free_notifier_reference_cb;
@ -21,11 +17,4 @@ extern BKE_library_remap_editor_id_reference_cb remap_editor_id_reference_cb;
struct ID;
struct Main;
void lib_id_copy_ensure_local(struct Main *bmain,
const struct ID *old_id,
struct ID *new_id,
const int flags);
#ifdef __cplusplus
}
#endif
void lib_id_copy_ensure_local(Main *bmain, const ID *old_id, ID *new_id, const int flags);

View File

@ -37,7 +37,7 @@
#include "BKE_lib_id.h"
#include "BKE_lib_override.hh"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_main_namemap.hh"
#include "BKE_node.hh"
@ -54,6 +54,7 @@
#include "BLI_string.h"
#include "BLI_task.h"
#include "BLI_utildefines.h"
#include "BLI_vector.hh"
#include "PIL_time.h"
@ -64,7 +65,7 @@
#include "atomic_ops.h"
#include "lib_intern.h"
#include "lib_intern.hh"
#define OVERRIDE_AUTO_CHECK_DELAY 0.2 /* 200ms between auto-override checks. */
//#define DEBUG_OVERRIDE_TIMEIT
@ -643,7 +644,7 @@ bool BKE_lib_override_library_create_from_tag(Main *bmain,
}
BLI_assert(id_hierarchy_root != nullptr);
LinkNode *relinked_ids = nullptr;
blender::Vector<ID *> relinked_ids;
IDRemapper *id_remapper = BKE_id_remapper_create();
/* Still checking the whole Main, that way we can tag other local IDs as needing to be
* remapped to use newly created overriding IDs, if needed. */
@ -677,7 +678,7 @@ bool BKE_lib_override_library_create_from_tag(Main *bmain,
(!ID_IS_OVERRIDE_LIBRARY_REAL(owner_id) ||
owner_id->override_library->hierarchy_root == id_hierarchy_root))
{
BLI_linklist_prepend(&relinked_ids, other_id);
relinked_ids.append(other_id);
}
if (ID_IS_OVERRIDE_LIBRARY_REAL(other_id) &&
@ -718,7 +719,7 @@ bool BKE_lib_override_library_create_from_tag(Main *bmain,
ID_REMAP_SKIP_OVERRIDE_LIBRARY | ID_REMAP_FORCE_USER_REFCOUNT);
BKE_id_remapper_free(id_remapper);
BLI_linklist_free(relinked_ids, nullptr);
relinked_ids.clear();
}
else {
/* We need to cleanup potentially already created data. */
@ -1837,7 +1838,7 @@ static void lib_override_library_remap(Main *bmain,
{
ID *id;
IDRemapper *remapper = BKE_id_remapper_create();
LinkNode *nomain_ids = nullptr;
blender::Vector<ID *> nomain_ids;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (id->tag & LIB_TAG_DOIT && id->newid != nullptr && id->lib == id_root_reference->lib) {
@ -1861,7 +1862,7 @@ static void lib_override_library_remap(Main *bmain,
continue;
}
BLI_linklist_prepend(&nomain_ids, id_override_old_iter);
nomain_ids.append(id_override_old_iter);
}
/* Remap all IDs to use the new override. */
@ -1872,7 +1873,6 @@ static void lib_override_library_remap(Main *bmain,
remapper,
ID_REMAP_FORCE_USER_REFCOUNT | ID_REMAP_FORCE_NEVER_NULL_USAGE);
BKE_id_remapper_free(remapper);
BLI_linklist_free(nomain_ids, nullptr);
}
/**
@ -2280,7 +2280,7 @@ static bool lib_override_library_resync(Main *bmain,
BKE_main_collection_sync(bmain);
LinkNode *id_override_old_list = nullptr;
blender::Vector<ID *> id_override_old_vector;
/* We need to apply override rules in a separate loop, after all ID pointers have been properly
* remapped, and all new local override IDs have gotten their proper original names, otherwise
@ -2398,7 +2398,7 @@ static bool lib_override_library_resync(Main *bmain,
}
}
BLI_linklist_prepend(&id_override_old_list, id_override_old);
id_override_old_vector.append(id_override_old);
}
FOREACH_MAIN_ID_END;
@ -2407,17 +2407,15 @@ static bool lib_override_library_resync(Main *bmain,
* This is necessary in case said old ID is not in Main anymore. */
IDRemapper *id_remapper = BKE_id_remapper_create();
BKE_libblock_relink_multiple(bmain,
id_override_old_list,
id_override_old_vector,
ID_REMAP_TYPE_CLEANUP,
id_remapper,
ID_REMAP_FORCE_USER_REFCOUNT | ID_REMAP_FORCE_NEVER_NULL_USAGE);
for (LinkNode *ln_iter = id_override_old_list; ln_iter != nullptr; ln_iter = ln_iter->next) {
ID *id_override_old = static_cast<ID *>(ln_iter->link);
for (ID *id_override_old : id_override_old_vector) {
id_override_old->tag |= LIB_TAG_NO_USER_REFCOUNT;
}
id_override_old_vector.clear();
BKE_id_remapper_free(id_remapper);
BLI_linklist_free(id_override_old_list, nullptr);
id_override_old_list = nullptr;
/* Delete old override IDs.
* Note that we have to use tagged group deletion here, since ID deletion also uses
@ -2442,7 +2440,7 @@ static bool lib_override_library_resync(Main *bmain,
}
else {
/* Defer tagging. */
BLI_linklist_prepend(&id_override_old_list, id_override_old);
id_override_old_vector.append(id_override_old);
}
}
}
@ -2507,11 +2505,10 @@ static bool lib_override_library_resync(Main *bmain,
FOREACH_MAIN_ID_END;
/* Finalize tagging old liboverrides for deletion. */
for (LinkNode *ln_iter = id_override_old_list; ln_iter != nullptr; ln_iter = ln_iter->next) {
ID *id_override_old = static_cast<ID *>(ln_iter->link);
for (ID *id_override_old : id_override_old_vector) {
id_override_old->tag |= LIB_TAG_DOIT;
}
BLI_linklist_free(id_override_old_list, nullptr);
id_override_old_vector.clear();
/* Cleanup, many pointers in this GHash are already invalid now. */
BLI_ghash_free(linkedref_to_old_override, nullptr, nullptr);

View File

@ -10,6 +10,7 @@
#include "CLG_log.h"
#include "BLI_array.hh"
#include "BLI_linklist.h"
#include "BLI_utildefines.h"
@ -22,7 +23,7 @@
#include "BKE_layer.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_material.h"
#include "BKE_mball.h"
@ -35,7 +36,7 @@
#include "DEG_depsgraph.hh"
#include "DEG_depsgraph_build.hh"
#include "lib_intern.h" /* own include */
#include "lib_intern.hh" /* own include */
static CLG_LogRef LOG = {"bke.lib_remap"};
@ -733,23 +734,21 @@ void BKE_libblock_unlink(Main *bmain,
struct LibBlockRelinkMultipleUserData {
Main *bmain;
LinkNode *ids;
blender::Span<ID *> ids;
};
static void libblock_relink_foreach_idpair_cb(ID *old_id, ID *new_id, void *user_data)
{
LibBlockRelinkMultipleUserData *data = static_cast<LibBlockRelinkMultipleUserData *>(user_data);
Main *bmain = data->bmain;
LinkNode *ids = data->ids;
const blender::Span<ID *> ids = data->ids;
BLI_assert(old_id != nullptr);
BLI_assert((new_id == nullptr) || GS(old_id->name) == GS(new_id->name));
BLI_assert(old_id != new_id);
bool is_object_update_processed = false;
for (LinkNode *ln_iter = ids; ln_iter != nullptr; ln_iter = ln_iter->next) {
ID *id_iter = static_cast<ID *>(ln_iter->link);
for (ID *id_iter : ids) {
/* Some after-process updates.
* This is a bit ugly, but cannot see a way to avoid it.
* Maybe we should do a per-ID callback for this instead?
@ -793,15 +792,14 @@ static void libblock_relink_foreach_idpair_cb(ID *old_id, ID *new_id, void *user
}
void BKE_libblock_relink_multiple(Main *bmain,
LinkNode *ids,
const blender::Span<ID *> ids,
const eIDRemapType remap_type,
IDRemapper *id_remapper,
const int remap_flags)
{
BLI_assert(remap_type == ID_REMAP_TYPE_REMAP || BKE_id_remapper_is_empty(id_remapper));
for (LinkNode *ln_iter = ids; ln_iter != nullptr; ln_iter = ln_iter->next) {
ID *id_iter = static_cast<ID *>(ln_iter->link);
for (ID *id_iter : ids) {
libblock_remap_data(bmain, id_iter, remap_type, id_remapper, remap_flags);
}
@ -811,18 +809,14 @@ void BKE_libblock_relink_multiple(Main *bmain,
switch (remap_type) {
case ID_REMAP_TYPE_REMAP: {
LibBlockRelinkMultipleUserData user_data = {nullptr};
user_data.bmain = bmain;
user_data.ids = ids;
LibBlockRelinkMultipleUserData user_data = {bmain, ids};
BKE_id_remapper_iter(id_remapper, libblock_relink_foreach_idpair_cb, &user_data);
break;
}
case ID_REMAP_TYPE_CLEANUP: {
bool is_object_update_processed = false;
for (LinkNode *ln_iter = ids; ln_iter != nullptr; ln_iter = ln_iter->next) {
ID *id_iter = static_cast<ID *>(ln_iter->link);
for (ID *id_iter : ids) {
switch (GS(id_iter->name)) {
case ID_SCE:
case ID_GR: {
@ -868,9 +862,7 @@ void BKE_libblock_relink_ex(
ID *id = static_cast<ID *>(idv);
ID *old_id = static_cast<ID *>(old_idv);
ID *new_id = static_cast<ID *>(new_idv);
LinkNode ids{};
ids.next = nullptr;
ids.link = idv;
blender::Array<ID *> ids = {id};
/* No need to lock here, we are only affecting given ID, not bmain database. */
IDRemapper *id_remapper = BKE_id_remapper_create();
@ -888,13 +880,13 @@ void BKE_libblock_relink_ex(
remap_type = ID_REMAP_TYPE_CLEANUP;
}
BKE_libblock_relink_multiple(bmain, &ids, remap_type, id_remapper, remap_flags);
BKE_libblock_relink_multiple(bmain, ids, remap_type, id_remapper, remap_flags);
BKE_id_remapper_free(id_remapper);
}
struct RelinkToNewIDData {
LinkNode *ids;
blender::Vector<ID *> ids;
IDRemapper *id_remapper;
};
@ -936,7 +928,7 @@ static void libblock_relink_to_newid_prepare_data(Main *bmain,
}
id->tag &= ~LIB_TAG_NEW;
BLI_linklist_prepend(&relink_data->ids, id);
relink_data->ids.append(id);
BKE_library_foreach_ID_link(bmain, id, id_relink_to_newid_looper, relink_data, 0);
}
@ -949,7 +941,6 @@ void BKE_libblock_relink_to_newid(Main *bmain, ID *id, const int remap_flag)
BLI_assert(bmain->relations == nullptr);
RelinkToNewIDData relink_data{};
relink_data.ids = nullptr;
relink_data.id_remapper = BKE_id_remapper_create();
libblock_relink_to_newid_prepare_data(bmain, id, &relink_data);
@ -960,5 +951,4 @@ void BKE_libblock_relink_to_newid(Main *bmain, ID *id, const int remap_flag)
bmain, relink_data.ids, ID_REMAP_TYPE_REMAP, relink_data.id_remapper, remap_flag_final);
BKE_id_remapper_free(relink_data.id_remapper);
BLI_linklist_free(relink_data.ids, nullptr);
}

View File

@ -19,7 +19,7 @@
#include "BKE_global.h"
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_mesh.hh"
#include "BKE_node.hh"

View File

@ -0,0 +1,256 @@
/* SPDX-FileCopyrightText: 2020 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "testing/testing.h"
#include "MEM_guardedalloc.h"
#include "CLG_log.h"
#include "BLI_listbase.h"
#include "BLI_string.h"
#include "BKE_collection.h"
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
#include "BKE_library.h"
#include "BKE_main.h"
#include "BKE_main_namemap.hh"
#include "DNA_ID.h"
#include "DNA_collection_types.h"
#include "DNA_object_types.h"
namespace blender::bke::tests {
class BMainTest : public testing::Test {
public:
static void SetUpTestSuite()
{
CLG_init();
BKE_idtype_init();
}
static void TearDownTestSuite()
{
CLG_exit();
}
};
class BMainMergeTest : public BMainTest {
public:
void SetUp() override
{
bmain_src = BKE_main_new();
bmain_dst = BKE_main_new();
}
void TearDown() override
{
if (bmain_src) {
BKE_main_free(bmain_src);
}
if (bmain_dst) {
BKE_main_free(bmain_dst);
}
}
Main *bmain_src;
Main *bmain_dst;
};
TEST_F(BMainMergeTest, basics)
{
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_dst->libraries));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_dst->collections));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_dst->objects));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_src->libraries));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_src->collections));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_src->objects));
BKE_id_new(bmain_dst, ID_GR, "Coll_dst");
Collection *coll = static_cast<Collection *>(BKE_id_new(bmain_src, ID_GR, "Coll_src"));
Object *ob = static_cast<Object *>(BKE_id_new(bmain_src, ID_OB, "Ob_src"));
BKE_collection_object_add(bmain_src, coll, ob);
EXPECT_EQ(1, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(0, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->objects));
BKE_main_merge(bmain_dst, &bmain_src, nullptr);
EXPECT_EQ(2, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(nullptr, bmain_src);
bmain_src = BKE_main_new();
Collection *coll_2 = static_cast<Collection *>(BKE_id_new(bmain_src, ID_GR, "Coll_src_2"));
Object *ob_2 = static_cast<Object *>(BKE_id_new(bmain_src, ID_OB, "Ob_src"));
BKE_collection_object_add(bmain_src, coll_2, ob_2);
EXPECT_EQ(2, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->objects));
BKE_main_merge(bmain_dst, &bmain_src, nullptr);
/* The second `Ob_src` object in `bmain_src` cannot be merged in `bmain_dst`, since its name
* would collide with the first object. */
EXPECT_EQ(3, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(nullptr, bmain_src);
/* `Coll_src_2` should have been remapped to using `Ob_src` in `bmain_dst`, instead of `Ob_src`
* in `bmain_src`. */
EXPECT_EQ(1, BLI_listbase_count(&coll_2->gobject));
EXPECT_EQ(ob, static_cast<CollectionObject *>(coll_2->gobject.first)->ob);
}
TEST_F(BMainMergeTest, linked_data)
{
constexpr char *DST_PATH = "/tmp/dst/dst.blend";
constexpr char *SRC_PATH = "/tmp/src/src.blend";
constexpr char *LIB_PATH = "/tmp/lib/lib.blend";
constexpr char *LIB_PATH_RELATIVE = "//lib/lib.blend";
constexpr char *LIB_PATH_RELATIVE_ABS_SRC = "/tmp/src/lib/lib.blend";
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_dst->libraries));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_dst->collections));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_dst->objects));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_src->libraries));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_src->collections));
EXPECT_TRUE(BLI_listbase_is_empty(&bmain_src->objects));
BLI_strncpy(bmain_dst->filepath, DST_PATH, sizeof(bmain_dst->filepath));
BLI_strncpy(bmain_src->filepath, SRC_PATH, sizeof(bmain_dst->filepath));
BKE_id_new(bmain_dst, ID_GR, "Coll_dst");
Collection *coll_1 = static_cast<Collection *>(BKE_id_new(bmain_src, ID_GR, "Coll_src"));
Object *ob_1 = static_cast<Object *>(BKE_id_new(bmain_src, ID_OB, "Ob_src"));
BKE_collection_object_add(bmain_src, coll_1, ob_1);
Library *lib_src_1 = static_cast<Library *>(BKE_id_new(bmain_src, ID_LI, LIB_PATH));
BKE_library_filepath_set(bmain_src, lib_src_1, LIB_PATH);
ob_1->id.lib = lib_src_1;
EXPECT_EQ(1, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(0, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(0, BLI_listbase_count(&bmain_dst->libraries));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->objects));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->libraries));
BKE_main_merge(bmain_dst, &bmain_src, nullptr);
EXPECT_EQ(2, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(1, BLI_listbase_count(&bmain_dst->libraries));
EXPECT_EQ(ob_1, bmain_dst->objects.first);
EXPECT_EQ(lib_src_1, bmain_dst->libraries.first);
EXPECT_EQ(ob_1->id.lib, lib_src_1);
EXPECT_EQ(nullptr, bmain_src);
/* Try another merge, with the same library path - second library should be skipped, destination
* merge should still have only one library ID.*/
bmain_src = BKE_main_new();
BLI_strncpy(bmain_src->filepath, SRC_PATH, sizeof(bmain_dst->filepath));
Collection *coll_2 = static_cast<Collection *>(BKE_id_new(bmain_src, ID_GR, "Coll_src_2"));
Object *ob_2 = static_cast<Object *>(BKE_id_new(bmain_src, ID_OB, "Ob_src_2"));
BKE_collection_object_add(bmain_src, coll_2, ob_2);
Library *lib_src_2 = static_cast<Library *>(BKE_id_new(bmain_src, ID_LI, LIB_PATH));
BKE_library_filepath_set(bmain_src, lib_src_2, LIB_PATH);
std::cout << lib_src_1->filepath_abs << "\n";
std::cout << lib_src_2->filepath_abs << "\n";
ob_2->id.lib = lib_src_2;
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->objects));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->libraries));
BKE_main_merge(bmain_dst, &bmain_src, nullptr);
EXPECT_EQ(3, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(2, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(1, BLI_listbase_count(&bmain_dst->libraries));
EXPECT_EQ(ob_1, bmain_dst->objects.first);
EXPECT_EQ(ob_2, bmain_dst->objects.last);
EXPECT_EQ(lib_src_1, bmain_dst->libraries.first);
EXPECT_EQ(ob_1->id.lib, lib_src_1);
EXPECT_EQ(ob_2->id.lib, lib_src_1);
EXPECT_EQ(nullptr, bmain_src);
/* Use a relative library path. Since this is a different library, even though the object re-use
* the same name, it should still be moved into `bmain_dst`. The library filepath should also be
* updated and become relative the the path of bmain_dst too. */
bmain_src = BKE_main_new();
BLI_strncpy(bmain_src->filepath, SRC_PATH, sizeof(bmain_dst->filepath));
Collection *coll_3 = static_cast<Collection *>(BKE_id_new(bmain_src, ID_GR, "Coll_src_3"));
Object *ob_3 = static_cast<Object *>(BKE_id_new(bmain_src, ID_OB, "Ob_src"));
BKE_collection_object_add(bmain_src, coll_3, ob_3);
Library *lib_src_3 = static_cast<Library *>(BKE_id_new(bmain_src, ID_LI, LIB_PATH_RELATIVE));
BKE_library_filepath_set(bmain_src, lib_src_3, LIB_PATH_RELATIVE);
ob_3->id.lib = lib_src_3;
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->objects));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->libraries));
EXPECT_TRUE(STREQ(lib_src_3->filepath, LIB_PATH_RELATIVE));
EXPECT_TRUE(STREQ(lib_src_3->filepath_abs, LIB_PATH_RELATIVE_ABS_SRC));
BKE_main_merge(bmain_dst, &bmain_src, nullptr);
EXPECT_EQ(4, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(3, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(2, BLI_listbase_count(&bmain_dst->libraries));
EXPECT_EQ(ob_1, bmain_dst->objects.first);
EXPECT_EQ(ob_3, bmain_dst->objects.last);
EXPECT_EQ(lib_src_3, bmain_dst->libraries.first);
EXPECT_EQ(lib_src_1, bmain_dst->libraries.last);
EXPECT_EQ(ob_1->id.lib, lib_src_1);
EXPECT_EQ(ob_2->id.lib, lib_src_1);
EXPECT_EQ(ob_3->id.lib, lib_src_3);
EXPECT_FALSE(STREQ(lib_src_3->filepath, LIB_PATH_RELATIVE));
EXPECT_TRUE(STREQ(lib_src_3->filepath_abs, LIB_PATH_RELATIVE_ABS_SRC));
EXPECT_EQ(nullptr, bmain_src);
/* Try another merge, with the library path set to the path of the destination bmain. That source
* library should also be skipped, and the 'linked' object in source bmain should become a local
* object in destination bmain. */
bmain_src = BKE_main_new();
BLI_strncpy(bmain_src->filepath, SRC_PATH, sizeof(bmain_dst->filepath));
Collection *coll_4 = static_cast<Collection *>(BKE_id_new(bmain_src, ID_GR, "Coll_src_4"));
Object *ob_4 = static_cast<Object *>(BKE_id_new(bmain_src, ID_OB, "Ob_src_4"));
BKE_collection_object_add(bmain_src, coll_4, ob_4);
Library *lib_src_4 = static_cast<Library *>(BKE_id_new(bmain_src, ID_LI, DST_PATH));
BKE_library_filepath_set(bmain_src, lib_src_4, DST_PATH);
ob_4->id.lib = lib_src_4;
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->collections));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->objects));
EXPECT_EQ(1, BLI_listbase_count(&bmain_src->libraries));
BKE_main_merge(bmain_dst, &bmain_src, nullptr);
EXPECT_EQ(5, BLI_listbase_count(&bmain_dst->collections));
EXPECT_EQ(4, BLI_listbase_count(&bmain_dst->objects));
EXPECT_EQ(2, BLI_listbase_count(&bmain_dst->libraries));
EXPECT_EQ(ob_1, bmain_dst->objects.first);
/* `ob_4` is now local in `bmain_dst`, so should come before linked ones. */
EXPECT_EQ(ob_4, ob_1->id.prev);
EXPECT_EQ(lib_src_3, bmain_dst->libraries.first);
EXPECT_EQ(lib_src_1, bmain_dst->libraries.last);
EXPECT_EQ(ob_1->id.lib, lib_src_1);
EXPECT_EQ(ob_2->id.lib, lib_src_1);
EXPECT_EQ(ob_3->id.lib, lib_src_3);
EXPECT_EQ(ob_4->id.lib, nullptr);
EXPECT_EQ(nullptr, bmain_src);
}
} // namespace blender::bke::tests

View File

@ -396,7 +396,7 @@ namespace blender::bke::mesh {
static CornerNormalSpace lnor_space_define(const float3 &lnor,
const float3 &vec_ref,
float3 vec_other,
const float3 &vec_other,
const Span<float3> edge_vectors)
{
CornerNormalSpace lnor_space{};
@ -440,13 +440,14 @@ static CornerNormalSpace lnor_space_define(const float3 &lnor,
lnor_space.vec_ortho = math::normalize(math::cross(lnor, lnor_space.vec_ref));
/* Project vec_other on lnor's ortho plane. */
vec_other = math::normalize(vec_other - lnor * dtp_other);
const float3 vec_other_proj = math::normalize(vec_other - lnor * dtp_other);
/* Beta is angle between ref_vec and other_vec, around lnor. */
const float dtp = math::dot(lnor_space.vec_ref, vec_other);
const float dtp = math::dot(lnor_space.vec_ref, vec_other_proj);
if (LIKELY(dtp < LNOR_SPACE_TRIGO_THRESHOLD)) {
const float beta = math::safe_acos_approx(dtp);
lnor_space.ref_beta = (math::dot(lnor_space.vec_ortho, vec_other) < 0.0f) ? pi2 - beta : beta;
lnor_space.ref_beta = (math::dot(lnor_space.vec_ortho, vec_other_proj) < 0.0f) ? pi2 - beta :
beta;
}
else {
lnor_space.ref_beta = pi2;
@ -459,8 +460,8 @@ static CornerNormalSpace lnor_space_define(const float3 &lnor,
void BKE_lnor_space_define(MLoopNorSpace *lnor_space,
const float lnor[3],
float vec_ref[3],
float vec_other[3],
const float vec_ref[3],
const float vec_other[3],
const blender::Span<blender::float3> edge_vectors)
{
using namespace blender::bke::mesh;

View File

@ -102,7 +102,7 @@
#include "BKE_layer.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_light.h"
#include "BKE_lightprobe.h"
#include "BKE_linestyle.h"

View File

@ -1940,18 +1940,21 @@ static bool find_rna_property_rgba(PointerRNA *id_ptr, const char *name, float r
return false;
}
static bool find_rna_property_rgba(ID *id, const char *name, float r_data[4])
static bool find_rna_property_rgba(const ID *id, const char *name, float r_data[4])
{
PointerRNA ptr = RNA_id_pointer_create(id);
PointerRNA ptr = RNA_id_pointer_create(const_cast<ID *>(id));
return find_rna_property_rgba(&ptr, name, r_data);
}
bool BKE_object_dupli_find_rgba_attribute(
Object *ob, DupliObject *dupli, Object *dupli_parent, const char *name, float r_value[4])
bool BKE_object_dupli_find_rgba_attribute(const Object *ob,
const DupliObject *dupli,
const Object *dupli_parent,
const char *name,
float r_value[4])
{
/* Check the dupli particle system. */
if (dupli && dupli->particle_system) {
ParticleSettings *settings = dupli->particle_system->part;
const ParticleSettings *settings = dupli->particle_system->part;
if (find_rna_property_rgba(&settings->id, name, r_value)) {
return true;
@ -1975,7 +1978,7 @@ bool BKE_object_dupli_find_rgba_attribute(
}
/* Check the main object data (e.g. mesh). */
if (ob->data && find_rna_property_rgba((ID *)ob->data, name, r_value)) {
if (ob->data && find_rna_property_rgba((const ID *)ob->data, name, r_value)) {
return true;
}
}
@ -1984,13 +1987,14 @@ bool BKE_object_dupli_find_rgba_attribute(
return false;
}
bool BKE_view_layer_find_rgba_attribute(Scene *scene,
ViewLayer *layer,
bool BKE_view_layer_find_rgba_attribute(const Scene *scene,
const ViewLayer *layer,
const char *name,
float r_value[4])
{
if (layer) {
PointerRNA layer_ptr = RNA_pointer_create(&scene->id, &RNA_ViewLayer, layer);
PointerRNA layer_ptr = RNA_pointer_create(
&const_cast<ID &>(scene->id), &RNA_ViewLayer, const_cast<ViewLayer *>(layer));
if (find_rna_property_rgba(&layer_ptr, name, r_value)) {
return true;

View File

@ -1750,7 +1750,6 @@ static void sculpt_update_object(
UNUSED_VARS_NDEBUG(pbvh);
BKE_pbvh_subdiv_cgg_set(ss->pbvh, ss->subdiv_ccg);
BKE_pbvh_update_hide_attributes_from_mesh(ss->pbvh);
sculpt_attribute_update_refs(ob);
sculpt_update_persistent_base(ob);
@ -2215,14 +2214,7 @@ static PBVH *build_pbvh_from_ccg(Object *ob, SubdivCCG *subdiv_ccg)
Mesh *base_mesh = BKE_mesh_from_object(ob);
BKE_sculpt_sync_face_visibility_to_grids(base_mesh, subdiv_ccg);
BKE_pbvh_build_grids(pbvh,
subdiv_ccg->grids,
&key,
subdiv_ccg->grid_to_face_map,
subdiv_ccg->grid_flag_mats,
subdiv_ccg->grid_hidden,
base_mesh,
subdiv_ccg);
BKE_pbvh_build_grids(pbvh, &key, base_mesh, subdiv_ccg);
return pbvh;
}
@ -2304,13 +2296,7 @@ void BKE_sculpt_bvh_update_from_ccg(PBVH *pbvh, SubdivCCG *subdiv_ccg)
{
CCGKey key;
BKE_subdiv_ccg_key_top_level(key, *subdiv_ccg);
BKE_pbvh_grids_update(pbvh,
subdiv_ccg->grids,
subdiv_ccg->grid_to_face_map,
subdiv_ccg->grid_flag_mats,
subdiv_ccg->grid_hidden,
&key);
BKE_pbvh_grids_update(pbvh, &key);
}
bool BKE_sculptsession_use_pbvh_draw(const Object *ob, const RegionView3D *rv3d)

File diff suppressed because it is too large Load Diff

View File

@ -342,7 +342,7 @@ static void pbvh_bmesh_node_split(PBVH *pbvh, const Span<BBC> bbc_array, int nod
n->layer_disp = nullptr;
if (n->draw_batches) {
DRW_pbvh_node_free(n->draw_batches);
blender::draw::pbvh::node_free(n->draw_batches);
}
n->flag &= ~PBVH_Leaf;

View File

@ -16,6 +16,10 @@
* \ingroup bke
*/
namespace blender::draw::pbvh {
struct PBVHBatches;
}
struct PBVHGPUFormat;
struct MLoopTri;
struct BMVert;
@ -35,7 +39,7 @@ struct BBC {
* union'd structs */
struct PBVHNode {
/* Opaque handle for drawing code */
PBVHBatches *draw_batches = nullptr;
blender::draw::pbvh::PBVHBatches *draw_batches = nullptr;
/* Voxel bounds */
BB vb = {};
@ -164,24 +168,16 @@ struct PBVH {
blender::Span<blender::float3> vert_normals;
blender::Span<blender::float3> face_normals;
blender::OffsetIndices<int> faces;
bool *hide_vert;
bool *hide_poly;
/** Only valid for polygon meshes. */
blender::OffsetIndices<int> faces;
blender::Span<int> corner_verts;
/* Owned by the #PBVH, because after deformations they have to be recomputed. */
blender::Array<MLoopTri> looptri;
blender::Span<int> looptri_faces;
CustomData *vert_data;
CustomData *loop_data;
CustomData *face_data;
/* Grid Data */
CCGKey gridkey;
blender::Span<CCGElem *> grids;
blender::Span<int> grid_to_face_map;
blender::Span<DMFlagMat> grid_flag_mats;
blender::Span<BLI_bitmap *> grid_hidden;
SubdivCCG *subdiv_ccg;
/* Used during BVH build and later to mark that a vertex needs to update
* (its normal must be recalculated). */
@ -204,7 +200,6 @@ struct PBVH {
int num_planes;
BMLog *bm_log;
SubdivCCG *subdiv_ccg;
blender::GroupedSpan<int> pmap;
@ -291,4 +286,4 @@ void pbvh_bmesh_normals_update(blender::Span<PBVHNode *> nodes);
void pbvh_node_pixels_free(PBVHNode *node);
void pbvh_pixels_free(PBVH *pbvh);
void pbvh_free_draw_buffers(PBVH *pbvh, PBVHNode *node);
void pbvh_free_draw_buffers(PBVH &pbvh, PBVHNode *node);

View File

@ -667,7 +667,8 @@ static bool update_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image
const AttributeAccessor attributes = mesh->attributes();
const VArraySpan uv_map = *attributes.lookup<float2>(active_uv_name, ATTR_DOMAIN_CORNER);
uv_islands::MeshData mesh_data(pbvh->looptri, pbvh->corner_verts, uv_map, pbvh->vert_positions);
uv_islands::MeshData mesh_data(
pbvh->looptri, mesh->corner_verts(), uv_map, pbvh->vert_positions);
uv_islands::UVIslands islands(mesh_data);
uv_islands::UVIslandsMask uv_masks;

View File

@ -31,7 +31,7 @@
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_mesh_wrapper.hh"
#include "BKE_modifier.hh"

View File

@ -74,7 +74,7 @@
#include "BKE_layer.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_linestyle.h"
#include "BKE_main.h"
#include "BKE_mask.h"

View File

@ -3,7 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_viewer_path.hh"
#include "BLI_index_range.hh"

View File

@ -36,7 +36,7 @@
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h"
#include "BKE_modifier.hh"
#include "BKE_object.hh"

View File

@ -52,18 +52,36 @@ int BLI_copy(const char *path_src, const char *path_dst) ATTR_NONNULL();
int BLI_path_move(const char *path_src, const char *path_dst) ATTR_NONNULL();
/**
* Rename a file or directory.
* Rename a file or directory, unless `to` already exists.
*
* \note This matches Windows `rename` logic, _not_ Unix one. It does not allow to replace an
* existing target. Use #BLI_rename_overwrite instead if existing file should be replaced.
*
* \param from: The path to rename from (return failure if it does not exist).
* \param to: The destination path.
* \return zero on success (matching 'rename' behavior).
*/
int BLI_rename(const char *from, const char *to);
int BLI_rename(const char *from, const char *to) ATTR_NONNULL();
/**
* Rename a file or directory.
* Rename a file or directory, replacing target `to` path if it exists.
*
* \warning It's up to the caller to ensure `from` & `to` don't point to the same file
* as this will result in `to` being deleted to make room for `from`
* (which will then also be deleted).
* \note This matches Unix `rename` logic. It does allow to replace an existing target. Use
* #BLI_rename instead if existing file should never be replaced. However, if `to` is an existing,
* non-empty directory, the operation will fail.
*
* \note There is still no feature-parity between behaviors on Windows and Unix, in case the target
* `to` exists and is opened by some process in the system:
* - On Unix, it will typically succeed
* (see https://man7.org/linux/man-pages/man2/rename.2.html for details).
* - On Windows, it will always fail
* (see https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-movefileexw for
* details).
*
* \warning Due to internal limitation/implementation, on Windows, in case paths point to
* directories, it's up to the caller to ensure that `from` and `to` are not the same directory.
* Since `to` is being deleted to make room for `from`, this will result in `from` being deleted as
* well.
*
* See #BLI_path_move to move directories.
*

View File

@ -20,12 +20,12 @@ namespace blender {
*/
template<typename T> class ImplicitSharingPtr {
private:
T *data_ = nullptr;
const T *data_ = nullptr;
public:
ImplicitSharingPtr() = default;
explicit ImplicitSharingPtr(T *data) : data_(data) {}
explicit ImplicitSharingPtr(const T *data) : data_(data) {}
/* Implicit conversion from nullptr. */
ImplicitSharingPtr(std::nullptr_t) : data_(nullptr) {}
@ -69,24 +69,12 @@ template<typename T> class ImplicitSharingPtr {
return *this;
}
T *operator->()
{
BLI_assert(data_ != nullptr);
return data_;
}
const T *operator->() const
{
BLI_assert(data_ != nullptr);
return data_;
}
T &operator*()
{
BLI_assert(data_ != nullptr);
return *data_;
}
const T &operator*() const
{
BLI_assert(data_ != nullptr);
@ -98,17 +86,12 @@ template<typename T> class ImplicitSharingPtr {
return data_ != nullptr;
}
T *get()
{
return data_;
}
const T *get() const
{
return data_;
}
T *release()
const T *release()
{
T *data = data_;
data_ = nullptr;
@ -134,14 +117,14 @@ template<typename T> class ImplicitSharingPtr {
BLI_STRUCT_EQUALITY_OPERATORS_1(ImplicitSharingPtr, data_)
private:
static void add_user(T *data)
static void add_user(const T *data)
{
if (data != nullptr) {
data->add_user();
}
}
static void remove_user_and_delete_if_last(T *data)
static void remove_user_and_delete_if_last(const T *data)
{
if (data != nullptr) {
data->remove_user_and_delete_if_last();

View File

@ -580,7 +580,7 @@ bool isect_ray_plane_v3(const float ray_origin[3],
/**
* Check if a point is behind all planes.
*/
bool isect_point_planes_v3(float (*planes)[4], int totplane, const float p[3]);
bool isect_point_planes_v3(const float (*planes)[4], int totplane, const float p[3]);
/**
* Check if a point is in front all planes.
* Same as isect_point_planes_v3 but with planes facing the opposite direction.

View File

@ -208,73 +208,6 @@ void BLI_task_parallel_range(int start,
TaskParallelRangeFunc func,
const TaskParallelSettings *settings);
/**
* This data is shared between all tasks, its access needs thread lock or similar protection.
*/
typedef struct TaskParallelIteratorStateShared {
/* Maximum amount of items to acquire at once. */
int chunk_size;
/* Next item to be acquired. */
void *next_item;
/* Index of the next item to be acquired. */
int next_index;
/* Indicates that end of iteration has been reached. */
bool is_finished;
/* Helper lock to protect access to this data in iterator getter callback,
* can be ignored (if the callback implements its own protection system, using atomics e.g.).
* Will be NULL when iterator is actually processed in a single thread. */
SpinLock *spin_lock;
} TaskParallelIteratorStateShared;
typedef void (*TaskParallelIteratorIterFunc)(void *__restrict userdata,
const TaskParallelTLS *__restrict tls,
void **r_next_item,
int *r_next_index,
bool *r_do_abort);
typedef void (*TaskParallelIteratorFunc)(void *__restrict userdata,
void *item,
int index,
const TaskParallelTLS *__restrict tls);
/**
* This function allows to parallelize for loops using a generic iterator.
*
* \param userdata: Common userdata passed to all instances of \a func.
* \param iter_func: Callback function used to generate chunks of items.
* \param init_item: The initial item, if necessary (may be NULL if unused).
* \param init_index: The initial index.
* \param items_num: The total amount of items to iterate over
* (if unknown, set it to a negative number).
* \param func: Callback function.
* \param settings: See public API doc of TaskParallelSettings for description of all settings.
*
* \note Static scheduling is only available when \a items_num is >= 0.
*/
void BLI_task_parallel_iterator(void *userdata,
TaskParallelIteratorIterFunc iter_func,
void *init_item,
int init_index,
int items_num,
TaskParallelIteratorFunc func,
const TaskParallelSettings *settings);
/**
* This function allows to parallelize for loops over ListBase items.
*
* \param listbase: The double linked list to loop over.
* \param userdata: Common userdata passed to all instances of \a func.
* \param func: Callback function.
* \param settings: See public API doc of ParallelRangeSettings for description of all settings.
*
* \note There is no static scheduling here,
* since it would need another full loop over items to count them.
*/
void BLI_task_parallel_listbase(struct ListBase *listbase,
void *userdata,
TaskParallelIteratorFunc func,
const TaskParallelSettings *settings);
typedef struct MempoolIterData MempoolIterData;
typedef void (*TaskParallelMempoolFunc)(void *userdata,

View File

@ -435,15 +435,6 @@ bool BLI_file_ensure_parent_dir_exists(const char *filepath)
}
int BLI_rename(const char *from, const char *to)
{
#ifdef WIN32
return urename(from, to);
#else
return rename(from, to);
#endif
}
int BLI_rename_overwrite(const char *from, const char *to)
{
if (!BLI_exists(from)) {
return 1;
@ -462,13 +453,60 @@ int BLI_rename_overwrite(const char *from, const char *to)
* In this particular case we would not want to follow symbolic-links as well.
* Since this functionality isn't required at the moment, leave this as-is.
* Noting it as a potential improvement. */
/* NOTE: To avoid the concurrency 'time of check/time of use' (TOC/TOU) issue, this code attemps
* to use available solutions for an 'atomic' (file-system wise) rename operation, instead of
* first checking for an existing `to` target path, and then doing the rename operation if it
* does not exists at the time of check.
*
* Windows (through `MoveFileExW`) by default does not allow replacing an existing path. It is
* however not clear whether its API is exposed to the TOC/TOU issue or not.
*
* On Linux or OSX, to keep operations atomic, special non-standardized variants of `rename` must
* be used, depending on the OS. Note that there may also be failure due to file system not
* supporting this operation, although in practice this should not be a problem in modern
* systems.
* - https://man7.org/linux/man-pages/man2/rename.2.html
* - https://www.unix.com/man-page/mojave/2/renameatx_np/
*
* BSD systems do not have any such thing currently, and are therefore exposed to the TOC/TOU
* issue. */
#ifdef WIN32
return urename(from, to, false);
#elif defined(__APPLE__)
return renamex_np(from, to, RENAME_EXCL);
#elif defined(__GLIBC_PREREQ) && __GLIBC_PREREQ(2, 28)
/* Most common Linux cases. */
return renameat2(AT_FDCWD, from, AT_FDCWD, to, RENAME_NOREPLACE);
#else
/* At least all BSD's currently. */
if (BLI_exists(to)) {
if (BLI_delete(to, false, false)) {
return 1;
}
return rename(from, to);
#endif
}
int BLI_rename_overwrite(const char *from, const char *to)
{
if (!BLI_exists(from)) {
return 1;
}
#ifdef WIN32
/* `urename` from `utfconv` intern utils uses `MoveFileExW`, which allows to replace an existing
* file, but not an existing directory, even if empty. This will only delete empty directories.
*/
if (BLI_is_dir(to)) {
if (BLI_delete(to, true, false)) {
return 1;
}
}
return BLI_rename(from, to);
return urename(from, to, true);
#else
return rename(from, to);
#endif
}
#ifdef WIN32

View File

@ -2054,7 +2054,7 @@ bool isect_ray_line_v3(const float ray_origin[3],
return true;
}
bool isect_point_planes_v3(float (*planes)[4], int totplane, const float p[3])
bool isect_point_planes_v3(const float (*planes)[4], int totplane, const float p[3])
{
int i;

View File

@ -37,298 +37,6 @@
/** \} */
/* -------------------------------------------------------------------- */
/** \name Generic Iteration
* \{ */
BLI_INLINE void task_parallel_calc_chunk_size(const TaskParallelSettings *settings,
const int items_num,
int tasks_num,
int *r_chunk_size)
{
int chunk_size = 0;
if (!settings->use_threading) {
/* Some users of this helper will still need a valid chunk size in case processing is not
* threaded. We can use a bigger one than in default threaded case then. */
chunk_size = 1024;
tasks_num = 1;
}
else if (settings->min_iter_per_thread > 0) {
/* Already set by user, no need to do anything here. */
chunk_size = settings->min_iter_per_thread;
}
else {
/* Multiplier used in heuristics below to define "optimal" chunk size.
* The idea here is to increase the chunk size to compensate for a rather measurable threading
* overhead caused by fetching tasks. With too many CPU threads we are starting
* to spend too much time in those overheads.
* First values are: 1 if tasks_num < 16;
* else 2 if tasks_num < 32;
* else 3 if tasks_num < 48;
* else 4 if tasks_num < 64;
* etc.
* NOTE: If we wanted to keep the 'power of two' multiplier, we'd need something like:
* 1 << max_ii(0, (int)(sizeof(int) * 8) - 1 - bitscan_reverse_i(tasks_num) - 3)
*/
const int tasks_num_factor = max_ii(1, tasks_num >> 3);
/* We could make that 'base' 32 number configurable in TaskParallelSettings too, or maybe just
* always use that heuristic using TaskParallelSettings.min_iter_per_thread as basis? */
chunk_size = 32 * tasks_num_factor;
/* Basic heuristic to avoid threading on low amount of items.
* We could make that limit configurable in settings too. */
if (items_num > 0 && items_num < max_ii(256, chunk_size * 2)) {
chunk_size = items_num;
}
}
BLI_assert(chunk_size > 0);
*r_chunk_size = chunk_size;
}
typedef struct TaskParallelIteratorState {
void *userdata;
TaskParallelIteratorIterFunc iter_func;
TaskParallelIteratorFunc func;
/* *** Data used to 'acquire' chunks of items from the iterator. *** */
/* Common data also passed to the generator callback. */
TaskParallelIteratorStateShared iter_shared;
/* Total number of items. If unknown, set it to a negative number. */
int items_num;
} TaskParallelIteratorState;
static void parallel_iterator_func_do(TaskParallelIteratorState *__restrict state,
void *userdata_chunk)
{
TaskParallelTLS tls = {
.userdata_chunk = userdata_chunk,
};
void **current_chunk_items;
int *current_chunk_indices;
int current_chunk_size;
const size_t items_size = sizeof(*current_chunk_items) * (size_t)state->iter_shared.chunk_size;
const size_t indices_size = sizeof(*current_chunk_indices) *
(size_t)state->iter_shared.chunk_size;
current_chunk_items = MALLOCA(items_size);
current_chunk_indices = MALLOCA(indices_size);
current_chunk_size = 0;
for (bool do_abort = false; !do_abort;) {
if (state->iter_shared.spin_lock != NULL) {
BLI_spin_lock(state->iter_shared.spin_lock);
}
/* Get current status. */
int index = state->iter_shared.next_index;
void *item = state->iter_shared.next_item;
int i;
/* 'Acquire' a chunk of items from the iterator function. */
for (i = 0; i < state->iter_shared.chunk_size && !state->iter_shared.is_finished; i++) {
current_chunk_indices[i] = index;
current_chunk_items[i] = item;
state->iter_func(state->userdata, &tls, &item, &index, &state->iter_shared.is_finished);
}
/* Update current status. */
state->iter_shared.next_index = index;
state->iter_shared.next_item = item;
current_chunk_size = i;
do_abort = state->iter_shared.is_finished;
if (state->iter_shared.spin_lock != NULL) {
BLI_spin_unlock(state->iter_shared.spin_lock);
}
for (i = 0; i < current_chunk_size; ++i) {
state->func(state->userdata, current_chunk_items[i], current_chunk_indices[i], &tls);
}
}
MALLOCA_FREE(current_chunk_items, items_size);
MALLOCA_FREE(current_chunk_indices, indices_size);
}
static void parallel_iterator_func(TaskPool *__restrict pool, void *userdata_chunk)
{
TaskParallelIteratorState *__restrict state = BLI_task_pool_user_data(pool);
parallel_iterator_func_do(state, userdata_chunk);
}
static void task_parallel_iterator_no_threads(const TaskParallelSettings *settings,
TaskParallelIteratorState *state)
{
/* Prepare user's TLS data. */
void *userdata_chunk = settings->userdata_chunk;
if (userdata_chunk) {
if (settings->func_init != NULL) {
settings->func_init(state->userdata, userdata_chunk);
}
}
/* Also marking it as non-threaded for the iterator callback. */
state->iter_shared.spin_lock = NULL;
parallel_iterator_func_do(state, userdata_chunk);
if (userdata_chunk) {
if (settings->func_free != NULL) {
/* `func_free` should only free data that was created during execution of `func`. */
settings->func_free(state->userdata, userdata_chunk);
}
}
}
static void task_parallel_iterator_do(const TaskParallelSettings *settings,
TaskParallelIteratorState *state)
{
const int threads_num = BLI_task_scheduler_num_threads();
task_parallel_calc_chunk_size(
settings, state->items_num, threads_num, &state->iter_shared.chunk_size);
if (!settings->use_threading) {
task_parallel_iterator_no_threads(settings, state);
return;
}
const int chunk_size = state->iter_shared.chunk_size;
const int items_num = state->items_num;
const size_t tasks_num = items_num >= 0 ?
(size_t)min_ii(threads_num, state->items_num / chunk_size) :
(size_t)threads_num;
BLI_assert(tasks_num > 0);
if (tasks_num == 1) {
task_parallel_iterator_no_threads(settings, state);
return;
}
SpinLock spin_lock;
BLI_spin_init(&spin_lock);
state->iter_shared.spin_lock = &spin_lock;
void *userdata_chunk = settings->userdata_chunk;
const size_t userdata_chunk_size = settings->userdata_chunk_size;
void *userdata_chunk_local = NULL;
void *userdata_chunk_array = NULL;
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
TaskPool *task_pool = BLI_task_pool_create(state, TASK_PRIORITY_HIGH);
if (use_userdata_chunk) {
userdata_chunk_array = MALLOCA(userdata_chunk_size * tasks_num);
}
for (size_t i = 0; i < tasks_num; i++) {
if (use_userdata_chunk) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
if (settings->func_init != NULL) {
settings->func_init(state->userdata, userdata_chunk_local);
}
}
/* Use this pool's pre-allocated tasks. */
BLI_task_pool_push(task_pool, parallel_iterator_func, userdata_chunk_local, false, NULL);
}
BLI_task_pool_work_and_wait(task_pool);
BLI_task_pool_free(task_pool);
if (use_userdata_chunk) {
if (settings->func_reduce != NULL || settings->func_free != NULL) {
for (size_t i = 0; i < tasks_num; i++) {
userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
if (settings->func_reduce != NULL) {
settings->func_reduce(state->userdata, userdata_chunk, userdata_chunk_local);
}
if (settings->func_free != NULL) {
settings->func_free(state->userdata, userdata_chunk_local);
}
}
}
MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * tasks_num);
}
BLI_spin_end(&spin_lock);
state->iter_shared.spin_lock = NULL;
}
void BLI_task_parallel_iterator(void *userdata,
TaskParallelIteratorIterFunc iter_func,
void *init_item,
const int init_index,
const int items_num,
TaskParallelIteratorFunc func,
const TaskParallelSettings *settings)
{
TaskParallelIteratorState state = {0};
state.items_num = items_num;
state.iter_shared.next_index = init_index;
state.iter_shared.next_item = init_item;
state.iter_shared.is_finished = false;
state.userdata = userdata;
state.iter_func = iter_func;
state.func = func;
task_parallel_iterator_do(settings, &state);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name ListBase Iteration
* \{ */
static void task_parallel_listbase_get(void *__restrict UNUSED(userdata),
const TaskParallelTLS *__restrict UNUSED(tls),
void **r_next_item,
int *r_next_index,
bool *r_do_abort)
{
/* Get current status. */
Link *link = *r_next_item;
if (link->next == NULL) {
*r_do_abort = true;
}
*r_next_item = link->next;
(*r_next_index)++;
}
void BLI_task_parallel_listbase(ListBase *listbase,
void *userdata,
TaskParallelIteratorFunc func,
const TaskParallelSettings *settings)
{
if (BLI_listbase_is_empty(listbase)) {
return;
}
TaskParallelIteratorState state = {0};
state.items_num = BLI_listbase_count(listbase);
state.iter_shared.next_index = 0;
state.iter_shared.next_item = listbase->first;
state.iter_shared.is_finished = false;
state.userdata = userdata;
state.iter_func = task_parallel_listbase_get;
state.func = func;
task_parallel_iterator_do(settings, &state);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name MemPool Iteration
* \{ */

View File

@ -67,15 +67,9 @@ TEST_F(FileOpsTest, rename)
BLI_file_touch(test_filepath_src.c_str());
ASSERT_TRUE(BLI_exists(test_filepath_src.c_str()));
/* `test_filepath_dst` does exist now, so regular rename should succeed on Unix, but fail on
* Windows. */
#ifdef WIN32
/* `test_filepath_dst` does exist now, so regular rename should fail. */
ASSERT_NE(0, BLI_rename(test_filepath_src.c_str(), test_filepath_dst.c_str()));
ASSERT_TRUE(BLI_exists(test_filepath_src.c_str()));
#else
ASSERT_EQ(0, BLI_rename(test_filepath_src.c_str(), test_filepath_dst.c_str()));
ASSERT_FALSE(BLI_exists(test_filepath_src.c_str()));
#endif
ASSERT_TRUE(BLI_exists(test_filepath_dst.c_str()));
BLI_file_touch(test_filepath_src.c_str());
@ -94,18 +88,6 @@ TEST_F(FileOpsTest, rename)
*
* This is expected to succeed on Unix, but fail on Windows. */
int fd_dst = BLI_open(test_filepath_dst.c_str(), O_BINARY | O_RDONLY, 0);
#ifdef WIN32
ASSERT_NE(0, BLI_rename(test_filepath_src.c_str(), test_filepath_dst.c_str()));
ASSERT_TRUE(BLI_exists(test_filepath_src.c_str()));
#else
ASSERT_EQ(0, BLI_rename(test_filepath_src.c_str(), test_filepath_dst.c_str()));
ASSERT_FALSE(BLI_exists(test_filepath_src.c_str()));
#endif
ASSERT_TRUE(BLI_exists(test_filepath_dst.c_str()));
BLI_file_touch(test_filepath_src.c_str());
ASSERT_TRUE(BLI_exists(test_filepath_src.c_str()));
#ifdef WIN32
ASSERT_NE(0, BLI_rename_overwrite(test_filepath_src.c_str(), test_filepath_dst.c_str()));
ASSERT_TRUE(BLI_exists(test_filepath_src.c_str()));
@ -138,20 +120,17 @@ TEST_F(FileOpsTest, rename)
BLI_dir_create_recursive(test_dirpath_src.c_str());
ASSERT_TRUE(BLI_exists(test_dirpath_src.c_str()));
/* `test_dirpath_dst` now exists, so regular rename should succeed on Unix, but fail on Windows.
*/
#ifdef WIN32
/* `test_dirpath_dst` now exists, so regular rename should fail. */
ASSERT_NE(0, BLI_rename(test_dirpath_src.c_str(), test_dirpath_dst.c_str()));
ASSERT_TRUE(BLI_exists(test_dirpath_src.c_str()));
#else
ASSERT_EQ(0, BLI_rename(test_dirpath_src.c_str(), test_dirpath_dst.c_str()));
ASSERT_FALSE(BLI_exists(test_dirpath_src.c_str()));
#endif
ASSERT_TRUE(BLI_exists(test_dirpath_dst.c_str()));
#ifndef WIN32
/* `test_dirpath_dst` now exists, but is empty, so overwrite rename should suceed. */
ASSERT_EQ(0, BLI_rename_overwrite(test_dirpath_src.c_str(), test_dirpath_dst.c_str()));
ASSERT_FALSE(BLI_exists(test_dirpath_src.c_str()));
ASSERT_TRUE(BLI_exists(test_dirpath_dst.c_str()));
BLI_dir_create_recursive(test_dirpath_src.c_str());
#endif
ASSERT_TRUE(BLI_exists(test_dirpath_src.c_str()));
const std::string test_dir_filepath_src = test_dirpath_src + SEP_STR + file_name_src;
@ -167,13 +146,12 @@ TEST_F(FileOpsTest, rename)
ASSERT_FALSE(BLI_exists(test_dir_filepath_src.c_str()));
ASSERT_TRUE(BLI_exists(test_dir_filepath_dst.c_str()));
/* `test_dirpath_dst` exists and is not empty, so regular rename should fail on all platforms. */
/* `test_dirpath_dst` exists and is not empty, so regular rename should fail. */
ASSERT_NE(0, BLI_rename(test_dirpath_src.c_str(), test_dirpath_dst.c_str()));
ASSERT_TRUE(BLI_exists(test_dirpath_src.c_str()));
ASSERT_TRUE(BLI_exists(test_dirpath_dst.c_str()));
/* `test_dirpath_dst` exists and is not empty, so even overwrite rename should fail on all
* platforms. */
/* `test_dirpath_dst` exists and is not empty, so even overwrite rename should fail. */
ASSERT_NE(0, BLI_rename_overwrite(test_dirpath_src.c_str(), test_dirpath_dst.c_str()));
ASSERT_TRUE(BLI_exists(test_dirpath_src.c_str()));
ASSERT_TRUE(BLI_exists(test_dirpath_dst.c_str()));

View File

@ -47,10 +47,11 @@ class SharedDataContainer {
}
if (data_->is_mutable()) {
data_->tag_ensured_mutable();
return data_.get();
}
data_ = data_->copy();
return data_.get();
else {
data_ = data_->copy();
}
return const_cast<ImplicitlySharedData *>(data_.get());
}
};

View File

@ -234,55 +234,6 @@ TEST(task, MempoolIterTLS)
BLI_threadapi_exit();
}
/* *** Parallel iterations over double-linked list items. *** */
static void task_listbase_iter_func(void *userdata,
void *item,
int index,
const TaskParallelTLS *__restrict /*tls*/)
{
LinkData *data = (LinkData *)item;
int *count = (int *)userdata;
data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
}
TEST(task, ListBaseIter)
{
ListBase list = {nullptr, nullptr};
LinkData *items_buffer = (LinkData *)MEM_calloc_arrayN(
ITEMS_NUM, sizeof(*items_buffer), __func__);
BLI_threadapi_init();
int i;
int items_num = 0;
for (i = 0; i < ITEMS_NUM; i++) {
BLI_addtail(&list, &items_buffer[i]);
items_num++;
}
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
BLI_task_parallel_listbase(&list, &items_num, task_listbase_iter_func, &settings);
/* Those checks should ensure us all items of the listbase were processed once, and only once -
* as expected. */
EXPECT_EQ(items_num, 0);
LinkData *item;
for (i = 0, item = (LinkData *)list.first; i < ITEMS_NUM && item != nullptr;
i++, item = item->next)
{
EXPECT_EQ(POINTER_AS_INT(item->data), i);
}
EXPECT_EQ(ITEMS_NUM, i);
MEM_freeN(items_buffer);
BLI_threadapi_exit();
}
TEST(task, ParallelInvoke)
{
std::atomic<int> counter = 0;

View File

@ -1,213 +0,0 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: Apache-2.0 */
#include "BLI_ressource_strings.h"
#include "testing/testing.h"
#include "atomic_ops.h"
#define GHASH_INTERNAL_API
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_listbase.h"
#include "BLI_mempool.h"
#include "BLI_task.h"
#include "PIL_time.h"
#define NUM_RUN_AVERAGED 100
static uint gen_pseudo_random_number(uint num)
{
/* NOTE: this is taken from BLI_ghashutil_uinthash(), don't want to depend on external code that
* might change here... */
num += ~(num << 16);
num ^= (num >> 5);
num += (num << 3);
num ^= (num >> 13);
num += ~(num << 9);
num ^= (num >> 17);
/* Make final number in [65 - 16385] range. */
return ((num & 255) << 6) + 1;
}
/* *** Parallel iterations over double-linked list items. *** */
static void task_listbase_light_iter_func(void * /*userdata*/,
void *item,
int index,
const TaskParallelTLS *__restrict /*tls*/)
{
LinkData *data = (LinkData *)item;
data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
}
static void task_listbase_light_membarrier_iter_func(void *userdata,
void *item,
int index,
const TaskParallelTLS *__restrict /*tls*/)
{
LinkData *data = (LinkData *)item;
int *count = (int *)userdata;
data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
}
static void task_listbase_heavy_iter_func(void * /*userdata*/,
void *item,
int index,
const TaskParallelTLS *__restrict /*tls*/)
{
LinkData *data = (LinkData *)item;
/* 'Random' number of iterations. */
const uint num = gen_pseudo_random_number(uint(index));
for (uint i = 0; i < num; i++) {
data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + ((i % 2) ? -index : index));
}
}
static void task_listbase_heavy_membarrier_iter_func(void *userdata,
void *item,
int index,
const TaskParallelTLS *__restrict /*tls*/)
{
LinkData *data = (LinkData *)item;
int *count = (int *)userdata;
/* 'Random' number of iterations. */
const uint num = gen_pseudo_random_number(uint(index));
for (uint i = 0; i < num; i++) {
data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + ((i % 2) ? -index : index));
}
atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
}
static void task_listbase_test_do(ListBase *list,
const int items_num,
int *items_tmp_num,
const char *id,
TaskParallelIteratorFunc func,
const bool use_threads,
const bool check_items_tmp_num)
{
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = use_threads;
double averaged_timing = 0.0;
for (int i = 0; i < NUM_RUN_AVERAGED; i++) {
const double init_time = PIL_check_seconds_timer();
BLI_task_parallel_listbase(list, items_tmp_num, func, &settings);
averaged_timing += PIL_check_seconds_timer() - init_time;
/* Those checks should ensure us all items of the listbase were processed once, and only once -
* as expected. */
if (check_items_tmp_num) {
EXPECT_EQ(*items_tmp_num, 0);
}
LinkData *item;
int j;
for (j = 0, item = (LinkData *)list->first; j < items_num && item != nullptr;
j++, item = item->next)
{
EXPECT_EQ(POINTER_AS_INT(item->data), j);
item->data = POINTER_FROM_INT(0);
}
EXPECT_EQ(items_num, j);
*items_tmp_num = items_num;
}
printf("\t%s: done in %fs on average over %d runs\n",
id,
averaged_timing / NUM_RUN_AVERAGED,
NUM_RUN_AVERAGED);
}
static void task_listbase_test(const char *id, const int count, const bool use_threads)
{
printf("\n========== STARTING %s ==========\n", id);
ListBase list = {nullptr, nullptr};
LinkData *items_buffer = (LinkData *)MEM_calloc_arrayN(count, sizeof(*items_buffer), __func__);
BLI_threadapi_init();
int items_num = 0;
for (int i = 0; i < count; i++) {
BLI_addtail(&list, &items_buffer[i]);
items_num++;
}
int items_tmp_num = items_num;
task_listbase_test_do(&list,
items_num,
&items_tmp_num,
"Light iter",
task_listbase_light_iter_func,
use_threads,
false);
task_listbase_test_do(&list,
items_num,
&items_tmp_num,
"Light iter with mem barrier",
task_listbase_light_membarrier_iter_func,
use_threads,
true);
task_listbase_test_do(&list,
items_num,
&items_tmp_num,
"Heavy iter",
task_listbase_heavy_iter_func,
use_threads,
false);
task_listbase_test_do(&list,
items_num,
&items_tmp_num,
"Heavy iter with mem barrier",
task_listbase_heavy_membarrier_iter_func,
use_threads,
true);
MEM_freeN(items_buffer);
BLI_threadapi_exit();
printf("========== ENDED %s ==========\n\n", id);
}
TEST(task, ListBaseIterNoThread10k)
{
task_listbase_test("ListBase parallel iteration - Single thread - 10000 items", 10000, false);
}
TEST(task, ListBaseIter10k)
{
task_listbase_test("ListBase parallel iteration - Threaded - 10000 items", 10000, true);
}
TEST(task, ListBaseIterNoThread100k)
{
task_listbase_test("ListBase parallel iteration - Single thread - 100000 items", 100000, false);
}
TEST(task, ListBaseIter100k)
{
task_listbase_test("ListBase parallel iteration - Threaded - 100000 items", 100000, true);
}

View File

@ -19,4 +19,3 @@ set(LIB
)
blender_add_performancetest_executable(BLI_ghash_performance "BLI_ghash_performance_test.cc" "${INC}" "${INC_SYS}" "${LIB}")
blender_add_performancetest_executable(BLI_task_performance "BLI_task_performance_test.cc" "${INC}" "${INC_SYS}" "${LIB}")

View File

@ -26,7 +26,7 @@
#include "BKE_key.h"
#include "BKE_lib_id.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_library.h"
#include "BKE_main.h"
#include "BKE_report.h"

View File

@ -73,7 +73,7 @@
#include "BKE_lib_id.h"
#include "BKE_lib_override.hh"
#include "BKE_lib_query.h"
#include "BKE_lib_remap.h"
#include "BKE_lib_remap.hh"
#include "BKE_main.h" /* for Main */
#include "BKE_main_idmap.hh"
#include "BKE_main_namemap.hh"

View File

@ -1664,9 +1664,6 @@ void DepsgraphNodeBuilder::build_object_data_geometry(Object *object)
NodeType::BATCH_CACHE,
OperationCode::GEOMETRY_SELECT_UPDATE,
[object_cow](::Depsgraph *depsgraph) { BKE_object_select_update(depsgraph, object_cow); });
/* Shading (No-Op).
* Needed to allow the Material shading updates reach the Object. */
add_operation_node((ID *)object->data, NodeType::SHADING, OperationCode::SHADING);
}
void DepsgraphNodeBuilder::build_object_data_geometry_datablock(ID *obdata)
@ -1795,6 +1792,9 @@ void DepsgraphNodeBuilder::build_object_data_geometry_datablock(ID *obdata)
[obdata_cow](::Depsgraph *depsgraph) {
BKE_object_data_select_update(depsgraph, obdata_cow);
});
/* Shading (No-Op).
* Needed to allow the Material shading updates reach the Object. */
add_operation_node(obdata, NodeType::SHADING, OperationCode::SHADING);
}
void DepsgraphNodeBuilder::build_armature(bArmature *armature)

View File

@ -250,7 +250,7 @@ set(SRC
intern/draw_manager_testing.h
intern/draw_manager_text.h
intern/draw_pass.hh
intern/draw_pbvh.h
intern/draw_pbvh.hh
intern/draw_resource.hh
intern/draw_sculpt.hh
intern/draw_shader.h
@ -468,6 +468,7 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_colorspace_lib.glsl
engines/eevee_next/shaders/eevee_cryptomatte_lib.glsl
engines/eevee_next/shaders/eevee_cubemap_lib.glsl
engines/eevee_next/shaders/eevee_debug_gbuffer_frag.glsl
engines/eevee_next/shaders/eevee_debug_surfels_vert.glsl
engines/eevee_next/shaders/eevee_debug_surfels_frag.glsl
engines/eevee_next/shaders/eevee_debug_irradiance_grid_vert.glsl

View File

@ -217,8 +217,8 @@ void DRW_draw_cursor_2d_ex(const struct ARegion *region, const float cursor[2]);
void DRW_cdlayer_attr_aliases_add(struct GPUVertFormat *format,
const char *base_name,
const struct CustomData *data,
const struct CustomDataLayer *cl,
int data_type,
const char *layer_name,
bool is_active_render,
bool is_active_layer);
#ifdef __cplusplus

View File

@ -15,15 +15,13 @@
#include "BLI_offset_indices.hh"
#include "BLI_set.hh"
#include "BLI_span.hh"
#include "BLI_struct_equality_utils.hh"
#include "BKE_attribute.hh"
#include "BKE_ccg.h"
class PBVHAttrReq;
struct GPUBatch;
struct PBVHNode;
struct PBVHBatches;
struct PBVHGPUFormat;
struct GSet;
struct DMFlagMat;
struct Mesh;
struct MLoopTri;
@ -32,61 +30,83 @@ struct SubdivCCG;
struct BMesh;
struct BMFace;
namespace blender::draw::pbvh {
class GenericRequest {
public:
std::string name;
eCustomDataType type;
eAttrDomain domain;
GenericRequest(const StringRef name, const eCustomDataType type, const eAttrDomain domain)
: name(name), type(type), domain(domain)
{
}
BLI_STRUCT_EQUALITY_OPERATORS_3(GenericRequest, type, domain, name);
};
enum class CustomRequest : int8_t {
Position,
Normal,
Mask,
FaceSet,
};
using AttributeRequest = std::variant<CustomRequest, GenericRequest>;
struct PBVHBatches;
struct PBVH_GPU_Args {
int pbvh_type;
BMesh *bm;
const Mesh *me;
blender::MutableSpan<blender::float3> vert_positions;
blender::OffsetIndices<int> faces;
blender::Span<int> corner_verts;
blender::Span<int> corner_edges;
MutableSpan<float3> vert_positions;
Span<int> corner_verts;
Span<int> corner_edges;
const CustomData *vert_data;
const CustomData *loop_data;
const CustomData *face_data;
blender::Span<blender::float3> vert_normals;
blender::Span<blender::float3> face_normals;
Span<float3> vert_normals;
Span<float3> face_normals;
const char *active_color;
const char *render_color;
int face_sets_color_seed, face_sets_color_default;
int face_sets_color_seed;
int face_sets_color_default;
SubdivCCG *subdiv_ccg;
blender::Span<DMFlagMat> grid_flag_mats;
blender::Span<int> grid_indices;
Span<DMFlagMat> grid_flag_mats;
Span<int> grid_indices;
CCGKey ccg_key;
blender::Span<CCGElem *> grids;
blender::Span<const BLI_bitmap *> grid_hidden;
Span<CCGElem *> grids;
Span<const BLI_bitmap *> grid_hidden;
blender::Span<int> prim_indices;
Span<int> prim_indices;
const bool *hide_poly;
blender::Span<MLoopTri> mlooptri;
blender::Span<int> looptri_faces;
PBVHNode *node;
Span<MLoopTri> mlooptri;
Span<int> looptri_faces;
/* BMesh. */
const blender::Set<BMFace *, 0> *bm_faces;
const Set<BMFace *, 0> *bm_faces;
int cd_mask_layer;
};
void DRW_pbvh_node_update(PBVHBatches *batches, const PBVH_GPU_Args &args);
void DRW_pbvh_update_pre(PBVHBatches *batches, const PBVH_GPU_Args &args);
void node_update(PBVHBatches *batches, const PBVH_GPU_Args &args);
void update_pre(PBVHBatches *batches, const PBVH_GPU_Args &args);
void DRW_pbvh_node_gpu_flush(PBVHBatches *batches);
PBVHBatches *DRW_pbvh_node_create(const PBVH_GPU_Args &args);
void DRW_pbvh_node_free(PBVHBatches *batches);
GPUBatch *DRW_pbvh_tris_get(PBVHBatches *batches,
PBVHAttrReq *attrs,
int attrs_num,
const PBVH_GPU_Args &args,
int *r_prim_count,
bool do_coarse_grids);
GPUBatch *DRW_pbvh_lines_get(PBVHBatches *batches,
PBVHAttrReq *attrs,
int attrs_num,
const PBVH_GPU_Args &args,
int *r_prim_count,
bool do_coarse_grids);
void node_gpu_flush(PBVHBatches *batches);
PBVHBatches *node_create(const PBVH_GPU_Args &args);
void node_free(PBVHBatches *batches);
GPUBatch *tris_get(PBVHBatches *batches,
Span<AttributeRequest> attrs,
const PBVH_GPU_Args &args,
bool do_coarse_grids);
GPUBatch *lines_get(PBVHBatches *batches,
Span<AttributeRequest> attrs,
const PBVH_GPU_Args &args,
bool do_coarse_grids);
} // namespace blender::draw::pbvh

View File

@ -21,65 +21,134 @@ class Instance;
/**
* Full-screen textures containing geometric and surface data.
* Used by deferred shading passes. Only one gbuffer is allocated per view
* Used by deferred shading passes. Only one g-buffer is allocated per view
* and is reused for each deferred layer. This is why there can only be temporary
* texture inside it.
*
* Everything is stored inside two array texture, one for each format. This is to fit the
* limitation of the number of images we can bind on a single shader.
*
* First layer is always for reflection. All parameters to shoot a reflection ray are inside
* this layer.
*
* - Layer 1 : Reflection
* - R : Normal packed X
* - G : Normal packed Y
* - B : Roughness
* - A : Unused (Could be used for anisotropic roughness)
*
* Second layer is either for diffuse or transmission. Material mixing both are not
* physically based and are uncommon. So in order to save bandwidth and texture memory, we only
* store one. We use random sampling to mix between both. All parameters to shoot a refraction
* ray are inside this layer.
*
* - Layer 2 : Refraction
* - R : Normal packed X
* - G : Normal packed Y
* - B : Roughness (isotropic)
* - A : IOR
*
* - Layer 2 : Diffuse / Sub-Surface Scattering
* - R : Normal packed X
* - G : Normal packed Y
* - B : Thickness
* - A : Unused (Could be used for diffuse roughness)
*
* Layer 3 is only allocated if Sub-Surface Scattering is needed. All parameters for
* screen-space scattering are inside this layer.
*
* - Layer 3 : Sub-Surface Scattering
* - R : Scattering radius R
* - G : Scattering radius G
* - B : Scattering radius B
* - A : Object ID
* The content of the g-buffer is polymorphic. A 8bit header specify the layout of the data.
* The first layer is always written to while others are written only if needed using imageStore
* operations reducing the bandwidth needed.
* Except for some special configurations, the g-buffer holds up to 3 closures.
*
* For each output closure, we also output the color to apply after the lighting computation.
* The color is stored with a 2 exponent that allows input color with component higher than 1.
* Color degradation is expected to happen in this case.
*
* Here are special configurations:
*
* - Opaque Dielectric:
* - 1 Diffuse lobe and 1 Reflection lobe without anisotropy.
* - Share a single normal.
* - Reflection is not colored.
* - Layout:
* - Color 1 : Diffuse color
* - Closure 1 R : Normal packed X
* - Closure 1 G : Normal packed Y
* - Closure 1 B : Roughness (isotropic)
* - Closure 1 A : Reflection intensity
*
* - Simple Car-paint: (TODO)
* - 2 Reflection lobe without anisotropy.
* - Share a single normal.
* - Coat layer is not colored.
* - Layout:
* - Color 1 : Bottom layer color
* - Closure 1 R : Normal packed X
* - Closure 1 G : Normal packed Y
* - Closure 1 B : Roughness (isotropic)
* - Closure 1 A : Coat layer intensity
*
* - Simple Glass: (TODO)
* - 1 Refraction lobe and 1 Reflection lobe without anisotropy.
* - Share a single normal.
* - Reflection intensity is derived from IOR.
* - Layout:
* - Color 1 : Refraction color
* - Closure 1 R : Normal packed X
* - Closure 1 G : Normal packed Y
* - Closure 1 B : Roughness (isotropic)
* - Closure 1 A : IOR
*
* Here are Closure configurations:
*
* - Reflection (Isotropic):
* - Layout:
* - Color : Reflection color
* - Closure 1 R : Normal packed X
* - Closure 1 G : Normal packed Y
* - Closure 1 B : Roughness
* - Closure 1 A : Unused
*
* - Reflection (Anisotropic): (TODO)
* - Layout:
* - Color : Reflection color
* - Closure 1 R : Normal packed X
* - Closure 1 G : Normal packed Y
* - Closure 1 B : Tangent packed X
* - Closure 1 A : Tangent packed Y
* - Closure 2 R : Roughness X
* - Closure 2 G : Roughness Y
* - Closure 2 B : Unused
* - Closure 2 A : Unused
*
* - Refraction (Isotropic):
* - Layout:
* - Color : Refraction color
* - Closure 1 R : Normal packed X
* - Closure 1 G : Normal packed Y
* - Closure 1 B : Roughness
* - Closure 1 A : IOR
*
* - Diffuse:
* - Layout:
* - Color : Diffuse color
* - Closure 1 R : Normal packed X
* - Closure 1 G : Normal packed Y
* - Closure 1 B : Unused
* - Closure 1 A : Unused (Could be used for diffuse roughness)
*
* - Sub-Surface Scattering:
* - Layout:
* - Color : Diffuse color
* - Closure 1 R : Normal packed X
* - Closure 1 G : Normal packed Y
* - Closure 1 B : Thickness
* - Closure 1 A : Unused (Could be used for diffuse roughness)
* - Closure 2 R : Scattering radius R
* - Closure 2 G : Scattering radius G
* - Closure 2 B : Scattering radius B
* - Closure 2 A : Object ID
*
*/
struct GBuffer {
/* TODO(fclem): Use texture from pool once they support texture array and layer views. */
Texture header_tx = {"GbufferHeader"};
Texture closure_tx = {"GbufferClosure"};
Texture color_tx = {"GbufferColor"};
Texture header_tx = {"GBufferHeader"};
Texture closure_tx = {"GBufferClosure"};
Texture color_tx = {"GBufferColor"};
/* References to the GBuffer layer range [1..max]. */
GPUTexture *closure_img_tx = nullptr;
GPUTexture *color_img_tx = nullptr;
void acquire(int2 extent, eClosureBits closure_bits_)
void acquire(int2 extent, int closure_layer_count, int color_layer_count)
{
const bool use_sss = (closure_bits_ & CLOSURE_SSS) != 0;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE;
header_tx.ensure_2d(GPU_R8UI, extent, usage);
closure_tx.ensure_2d_array(GPU_RGBA16, extent, use_sss ? 3 : 2, usage);
color_tx.ensure_2d_array(GPU_RGB10_A2, extent, 2, usage);
/* Always allocating 2 layers so that the image view is always valid. */
closure_layer_count = max_ii(2, closure_layer_count);
color_layer_count = max_ii(2, color_layer_count);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE |
GPU_TEXTURE_USAGE_ATTACHMENT;
header_tx.ensure_2d(GPU_R16UI, extent, usage);
closure_tx.ensure_2d_array(GPU_RGBA16, extent, closure_layer_count, usage);
color_tx.ensure_2d_array(GPU_RGB10_A2, extent, color_layer_count, usage);
/* Ensure layer view for frame-buffer attachment. */
closure_tx.ensure_layer_views();
color_tx.ensure_layer_views();
/* Ensure layer view for image store. */
closure_img_tx = closure_tx.layer_range_view(1, closure_layer_count - 1);
color_img_tx = color_tx.layer_range_view(1, color_layer_count - 1);
}
void release()
@ -88,6 +157,9 @@ struct GBuffer {
// header_tx.release();
// closure_tx.release();
// color_tx.release();
closure_img_tx = nullptr;
color_img_tx = nullptr;
}
template<typename PassType> void bind_resources(PassType &pass)

View File

@ -119,7 +119,7 @@ class Instance {
sync(*this),
materials(*this),
subsurface(*this, global_ubo_.subsurface),
pipelines(*this),
pipelines(*this, global_ubo_.pipeline),
shadows(*this, global_ubo_.shadow),
lights(*this),
ambient_occlusion(*this, global_ubo_.ao),

View File

@ -93,11 +93,11 @@ MaterialModule::MaterialModule(Instance &inst) : inst_(inst)
nodeSetActive(ntree, output);
}
{
glossy_mat = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default metal");
metallic_mat = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default metal");
bNodeTree *ntree = bke::ntreeAddTreeEmbedded(
nullptr, &glossy_mat->id, "Shader Nodetree", ntreeType_Shader->idname);
glossy_mat->use_nodes = true;
glossy_mat->surface_render_method = MA_SURFACE_METHOD_FORWARD;
nullptr, &metallic_mat->id, "Shader Nodetree", ntreeType_Shader->idname);
metallic_mat->use_nodes = true;
metallic_mat->surface_render_method = MA_SURFACE_METHOD_FORWARD;
bNode *bsdf = nodeAddStaticNode(nullptr, ntree, SH_NODE_BSDF_GLOSSY);
bNodeSocket *base_color = nodeFindSocket(bsdf, SOCK_IN, "Color");
@ -140,7 +140,7 @@ MaterialModule::MaterialModule(Instance &inst) : inst_(inst)
MaterialModule::~MaterialModule()
{
BKE_id_free(nullptr, glossy_mat);
BKE_id_free(nullptr, metallic_mat);
BKE_id_free(nullptr, diffuse_mat);
BKE_id_free(nullptr, error_mat_);
}

View File

@ -305,7 +305,7 @@ struct MaterialArray {
class MaterialModule {
public:
::Material *diffuse_mat;
::Material *glossy_mat;
::Material *metallic_mat;
int64_t queued_shaders_count = 0;
int64_t queued_optimize_shaders_count = 0;

View File

@ -116,6 +116,7 @@ void WorldVolumePipeline::sync(GPUMaterial *gpumat)
world_ps_.init();
world_ps_.state_set(DRW_STATE_WRITE_COLOR);
world_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
inst_.bind_uniform_data(&world_ps_);
inst_.volume.bind_properties_buffers(world_ps_);
inst_.sampling.bind_resources(world_ps_);
@ -417,6 +418,10 @@ void DeferredLayer::begin_sync()
/* Textures. */
prepass_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
inst_.pipelines.data.alpha_hash_scale = 0.1f;
if (inst_.is_viewport() && inst_.velocity.camera_has_motion()) {
inst_.pipelines.data.alpha_hash_scale = 1.0f;
}
inst_.bind_uniform_data(&prepass_ps_);
inst_.velocity.bind_resources(prepass_ps_);
inst_.sampling.bind_resources(prepass_ps_);
@ -440,16 +445,13 @@ void DeferredLayer::begin_sync()
}
{
gbuffer_ps_.init();
gbuffer_ps_.clear_stencil(0x00u);
gbuffer_ps_.state_stencil(0xFFu, 0xFFu, 0xFFu);
{
/* Common resources. */
/* G-buffer. */
gbuffer_ps_.bind_image(GBUF_CLOSURE_SLOT, &inst_.gbuffer.closure_tx);
gbuffer_ps_.bind_image(GBUF_COLOR_SLOT, &inst_.gbuffer.color_tx);
gbuffer_ps_.bind_image(GBUF_HEADER_SLOT, &inst_.gbuffer.header_tx);
gbuffer_ps_.bind_image(GBUF_CLOSURE_SLOT, &inst_.gbuffer.closure_img_tx);
gbuffer_ps_.bind_image(GBUF_COLOR_SLOT, &inst_.gbuffer.color_img_tx);
/* RenderPasses & AOVs. */
gbuffer_ps_.bind_image(RBUFS_COLOR_SLOT, &inst_.render_buffers.rp_color_tx);
gbuffer_ps_.bind_image(RBUFS_VALUE_SLOT, &inst_.render_buffers.rp_value_tx);
@ -465,8 +467,7 @@ void DeferredLayer::begin_sync()
inst_.cryptomatte.bind_resources(gbuffer_ps_);
}
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL |
DRW_STATE_STENCIL_ALWAYS;
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL;
gbuffer_double_sided_ps_ = &gbuffer_ps_.sub("DoubleSided");
gbuffer_double_sided_ps_->state_set(state);
@ -485,9 +486,9 @@ void DeferredLayer::end_sync()
{
PassSimple &pass = eval_light_ps_;
pass.init();
/* Use stencil test to reject pixel not written by this layer. */
pass.state_set(DRW_STATE_WRITE_STENCIL | DRW_STATE_STENCIL_NEQUAL);
pass.state_stencil(0x00u, 0x00u, evaluated_closures);
/* Use depth test to reject background pixels. */
/* WORKAROUND: Avoid rasterizer discard, but the shaders actually use no fragment output. */
pass.state_set(DRW_STATE_WRITE_STENCIL | DRW_STATE_DEPTH_GREATER);
pass.shader_set(inst_.shaders.static_shader_get(DEFERRED_LIGHT));
pass.bind_image("direct_diffuse_img", &direct_diffuse_tx_);
pass.bind_image("direct_reflect_img", &direct_reflect_tx_);
@ -507,9 +508,8 @@ void DeferredLayer::end_sync()
{
PassSimple &pass = combine_ps_;
pass.init();
/* Use stencil test to reject pixel not written by this layer. */
pass.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL | DRW_STATE_BLEND_ADD_FULL);
pass.state_stencil(0x00u, 0x00u, evaluated_closures);
/* Use depth test to reject background pixels. */
pass.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_GREATER | DRW_STATE_BLEND_ADD_FULL);
pass.shader_set(inst_.shaders.static_shader_get(DEFERRED_COMBINE));
pass.bind_image("direct_diffuse_img", &direct_diffuse_tx_);
pass.bind_image("direct_reflect_img", &direct_reflect_tx_);
@ -548,15 +548,14 @@ PassMain::Sub *DeferredLayer::material_add(::Material *blender_mat, GPUMaterial
PassMain::Sub *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ?
gbuffer_single_sided_ps_ :
gbuffer_double_sided_ps_;
pass = &pass->sub(GPU_material_get_name(gpumat));
pass->state_stencil(closure_bits, 0xFFu, 0xFFu);
return pass;
return &pass->sub(GPU_material_get_name(gpumat));
}
void DeferredLayer::render(View &main_view,
View &render_view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
Framebuffer &gbuffer_fb,
int2 extent,
RayTraceBuffer &rt_buffer,
bool is_first_pass)
@ -597,8 +596,6 @@ void DeferredLayer::render(View &main_view,
GPU_framebuffer_bind(prepass_fb);
inst_.manager->submit(prepass_ps_, render_view);
inst_.gbuffer.acquire(extent, closure_bits_);
if (closure_bits_ & CLOSURE_AMBIENT_OCCLUSION) {
/* If the shader needs Ambient Occlusion, we need to update the HiZ here. */
if (do_screen_space_refraction) {
@ -612,10 +609,23 @@ void DeferredLayer::render(View &main_view,
}
}
/* TODO(fclem): Clear in pass when Gbuffer will render with framebuffer. */
inst_.gbuffer.header_tx.clear(uint4(0));
if (/* FIXME(fclem): Metal doesn't clear the whole framebuffer correctly. */
GPU_backend_get_type() == GPU_BACKEND_METAL ||
/* FIXME(fclem): Vulkan doesn't implement load / store config yet. */
GPU_backend_get_type() == GPU_BACKEND_VULKAN)
{
inst_.gbuffer.header_tx.clear(int4(0));
}
GPU_framebuffer_bind_ex(gbuffer_fb,
{
{GPU_LOADACTION_LOAD, GPU_STOREACTION_STORE}, /* Depth */
{GPU_LOADACTION_LOAD, GPU_STOREACTION_STORE}, /* Combined */
{GPU_LOADACTION_CLEAR, GPU_STOREACTION_STORE, {0}}, /* GBuf Header */
{GPU_LOADACTION_DONT_CARE, GPU_STOREACTION_STORE}, /* GBuf Closure */
{GPU_LOADACTION_DONT_CARE, GPU_STOREACTION_STORE}, /* GBuf Color */
});
GPU_framebuffer_bind(combined_fb);
inst_.manager->submit(gbuffer_ps_, render_view);
inst_.hiz_buffer.set_dirty();
@ -637,13 +647,13 @@ void DeferredLayer::render(View &main_view,
inst_.shadows.set_view(render_view);
{
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE |
GPU_TEXTURE_USAGE_ATTACHMENT;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE;
direct_diffuse_tx_.acquire(extent, GPU_RGBA16F, usage);
direct_reflect_tx_.acquire(extent, GPU_RGBA16F, usage);
direct_refract_tx_.acquire(extent, GPU_RGBA16F, usage);
}
GPU_framebuffer_bind(combined_fb);
inst_.manager->submit(eval_light_ps_, render_view);
RayTraceResult diffuse_result = inst_.raytracing.trace(rt_buffer,
@ -684,7 +694,7 @@ void DeferredLayer::render(View &main_view,
radiance_feedback_persmat_ = render_view.persmat();
}
inst_.gbuffer.release();
inst_.pipelines.deferred.debug_draw(render_view, combined_fb);
}
/** \} */
@ -705,6 +715,53 @@ void DeferredPipeline::end_sync()
{
opaque_layer_.end_sync();
refraction_layer_.end_sync();
debug_pass_sync();
}
void DeferredPipeline::debug_pass_sync()
{
Instance &inst = opaque_layer_.inst_;
if (!ELEM(inst.debug_mode,
eDebugMode::DEBUG_GBUFFER_EVALUATION,
eDebugMode::DEBUG_GBUFFER_STORAGE))
{
return;
}
PassSimple &pass = debug_draw_ps_;
pass.init();
pass.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM);
pass.shader_set(inst.shaders.static_shader_get(DEBUG_GBUFFER));
pass.push_constant("debug_mode", int(inst.debug_mode));
inst.gbuffer.bind_resources(pass);
pass.draw_procedural(GPU_PRIM_TRIS, 1, 3);
}
void DeferredPipeline::debug_draw(draw::View &view, GPUFrameBuffer *combined_fb)
{
Instance &inst = opaque_layer_.inst_;
if (!ELEM(inst.debug_mode,
eDebugMode::DEBUG_GBUFFER_EVALUATION,
eDebugMode::DEBUG_GBUFFER_STORAGE))
{
return;
}
switch (inst.debug_mode) {
case eDebugMode::DEBUG_GBUFFER_EVALUATION:
inst.info = "Debug Mode: Deferred Lighting Cost";
break;
case eDebugMode::DEBUG_GBUFFER_STORAGE:
inst.info = "Debug Mode: Gbuffer Storage Cost";
break;
default:
/* Nothing to display. */
return;
}
GPU_framebuffer_bind(combined_fb);
inst.manager->submit(debug_draw_ps_, view);
}
PassMain::Sub *DeferredPipeline::prepass_add(::Material *blender_mat,
@ -733,18 +790,31 @@ void DeferredPipeline::render(View &main_view,
View &render_view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
Framebuffer &gbuffer_fb,
int2 extent,
RayTraceBuffer &rt_buffer_opaque_layer,
RayTraceBuffer &rt_buffer_refract_layer)
{
DRW_stats_group_start("Deferred.Opaque");
opaque_layer_.render(
main_view, render_view, prepass_fb, combined_fb, extent, rt_buffer_opaque_layer, true);
opaque_layer_.render(main_view,
render_view,
prepass_fb,
combined_fb,
gbuffer_fb,
extent,
rt_buffer_opaque_layer,
true);
DRW_stats_group_end();
DRW_stats_group_start("Deferred.Refract");
refraction_layer_.render(
main_view, render_view, prepass_fb, combined_fb, extent, rt_buffer_refract_layer, false);
refraction_layer_.render(main_view,
render_view,
prepass_fb,
combined_fb,
gbuffer_fb,
extent,
rt_buffer_refract_layer,
false);
DRW_stats_group_end();
}
@ -999,9 +1069,6 @@ void DeferredProbeLayer::begin_sync()
}
{
gbuffer_ps_.init();
gbuffer_ps_.clear_stencil(0x00u);
gbuffer_ps_.state_stencil(0xFFu, 0xFFu, 0xFFu);
{
/* Common resources. */
@ -1024,8 +1091,7 @@ void DeferredProbeLayer::begin_sync()
inst_.cryptomatte.bind_resources(gbuffer_ps_);
}
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL |
DRW_STATE_STENCIL_ALWAYS;
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL;
gbuffer_double_sided_ps_ = &gbuffer_ps_.sub("DoubleSided");
gbuffer_double_sided_ps_->state_set(state);
@ -1040,9 +1106,8 @@ void DeferredProbeLayer::end_sync()
if (closure_bits_ & (CLOSURE_DIFFUSE | CLOSURE_REFLECTION)) {
PassSimple &pass = eval_light_ps_;
pass.init();
/* Use stencil test to reject pixel not written by this layer. */
pass.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL);
pass.state_stencil(0x00u, 0x00u, (CLOSURE_DIFFUSE | CLOSURE_REFLECTION));
/* Use depth test to reject background pixels. */
pass.state_set(DRW_STATE_DEPTH_GREATER | DRW_STATE_WRITE_COLOR);
pass.shader_set(inst_.shaders.static_shader_get(DEFERRED_CAPTURE_EVAL));
pass.bind_image(RBUFS_COLOR_SLOT, &inst_.render_buffers.rp_color_tx);
pass.bind_image(RBUFS_VALUE_SLOT, &inst_.render_buffers.rp_value_tx);
@ -1076,14 +1141,13 @@ PassMain::Sub *DeferredProbeLayer::material_add(::Material *blender_mat, GPUMate
PassMain::Sub *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ?
gbuffer_single_sided_ps_ :
gbuffer_double_sided_ps_;
pass = &pass->sub(GPU_material_get_name(gpumat));
pass->state_stencil(closure_bits, 0xFFu, 0xFFu);
return pass;
return &pass->sub(GPU_material_get_name(gpumat));
}
void DeferredProbeLayer::render(View &view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
Framebuffer &gbuffer_fb,
int2 extent)
{
GPU_framebuffer_bind(prepass_fb);
@ -1095,14 +1159,11 @@ void DeferredProbeLayer::render(View &view,
inst_.shadows.set_view(view);
inst_.irradiance_cache.set_view(view);
inst_.gbuffer.acquire(extent, closure_bits_);
GPU_framebuffer_bind(combined_fb);
GPU_framebuffer_bind(gbuffer_fb);
inst_.manager->submit(gbuffer_ps_, view);
GPU_framebuffer_bind(combined_fb);
inst_.manager->submit(eval_light_ps_, view);
inst_.gbuffer.release();
}
/** \} */
@ -1136,10 +1197,11 @@ PassMain::Sub *DeferredProbePipeline::material_add(::Material *blender_mat, GPUM
void DeferredProbePipeline::render(View &view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
Framebuffer &gbuffer_fb,
int2 extent)
{
GPU_debug_group_begin("Probe.Render");
opaque_layer_.render(view, prepass_fb, combined_fb, extent);
opaque_layer_.render(view, prepass_fb, combined_fb, gbuffer_fb, extent);
GPU_debug_group_end();
}
@ -1236,30 +1298,30 @@ PassMain::Sub *PlanarProbePipeline::material_add(::Material *blender_mat, GPUMat
return &pass->sub(GPU_material_get_name(gpumat));
}
void PlanarProbePipeline::render(View &view, Framebuffer &combined_fb, int layer_id, int2 extent)
void PlanarProbePipeline::render(
View &view, Framebuffer &gbuffer_fb, Framebuffer &combined_fb, int layer_id, int2 extent)
{
GPU_debug_group_begin("Planar.Capture");
inst_.hiz_buffer.set_source(&inst_.planar_probes.depth_tx_, layer_id);
inst_.hiz_buffer.set_dirty();
GPU_framebuffer_bind(combined_fb);
GPU_framebuffer_clear_depth(combined_fb, 1.0f);
GPU_framebuffer_bind(gbuffer_fb);
GPU_framebuffer_clear_depth(gbuffer_fb, 1.0f);
inst_.manager->submit(prepass_ps_, view);
inst_.lights.set_view(view, extent);
inst_.shadows.set_view(view);
inst_.irradiance_cache.set_view(view);
inst_.gbuffer.acquire(extent, closure_bits_);
inst_.hiz_buffer.update();
GPU_framebuffer_bind(combined_fb);
GPU_framebuffer_clear_color(combined_fb, float4(0.0f, 0.0f, 0.0f, 1.0f));
inst_.manager->submit(gbuffer_ps_, view);
inst_.manager->submit(eval_light_ps_, view);
inst_.gbuffer.release();
GPU_framebuffer_bind(gbuffer_fb);
GPU_framebuffer_clear_color(gbuffer_fb, float4(0.0f, 0.0f, 0.0f, 1.0f));
inst_.manager->submit(gbuffer_ps_, view);
GPU_framebuffer_bind(combined_fb);
inst_.manager->submit(eval_light_ps_, view);
GPU_debug_group_end();
}

View File

@ -180,9 +180,27 @@ struct DeferredLayerBase {
/* Closures bits from the materials in this pass. */
eClosureBits closure_bits_ = CLOSURE_NONE;
/* Return the amount of gbuffer layer needed. */
int closure_layer_count() const
{
return count_bits_i(closure_bits_ &
(CLOSURE_REFRACTION | CLOSURE_REFLECTION | CLOSURE_DIFFUSE | CLOSURE_SSS));
}
/* Return the amount of gbuffer layer needed. */
int color_layer_count() const
{
return count_bits_i(closure_bits_ &
(CLOSURE_REFRACTION | CLOSURE_REFLECTION | CLOSURE_DIFFUSE));
}
};
class DeferredLayer : private DeferredLayerBase {
class DeferredPipeline;
class DeferredLayer : DeferredLayerBase {
friend DeferredPipeline;
private:
Instance &inst_;
@ -225,6 +243,7 @@ class DeferredLayer : private DeferredLayerBase {
void render(View &main_view,
View &render_view,
Framebuffer &prepass_fb,
Framebuffer &gbuffer_fb,
Framebuffer &combined_fb,
int2 extent,
RayTraceBuffer &rt_buffer,
@ -239,6 +258,8 @@ class DeferredPipeline {
DeferredLayer refraction_layer_;
DeferredLayer volumetric_layer_;
PassSimple debug_draw_ps_ = {"debug_gbuffer"};
public:
DeferredPipeline(Instance &inst)
: opaque_layer_(inst), refraction_layer_(inst), volumetric_layer_(inst){};
@ -253,9 +274,27 @@ class DeferredPipeline {
View &render_view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
Framebuffer &gbuffer_fb,
int2 extent,
RayTraceBuffer &rt_buffer_opaque_layer,
RayTraceBuffer &rt_buffer_refract_layer);
/* Return the maximum amount of gbuffer layer needed. */
int closure_layer_count() const
{
return max_ii(opaque_layer_.closure_layer_count(), refraction_layer_.closure_layer_count());
}
/* Return the maximum amount of gbuffer layer needed. */
int color_layer_count() const
{
return max_ii(opaque_layer_.color_layer_count(), refraction_layer_.color_layer_count());
}
void debug_draw(draw::View &view, GPUFrameBuffer *combined_fb);
private:
void debug_pass_sync();
};
/** \} */
@ -410,7 +449,12 @@ class VolumePipeline {
/* -------------------------------------------------------------------- */
/** \name Deferred Probe Capture.
* \{ */
class DeferredProbePipeline;
class DeferredProbeLayer : DeferredLayerBase {
friend DeferredProbePipeline;
private:
Instance &inst_;
@ -425,7 +469,11 @@ class DeferredProbeLayer : DeferredLayerBase {
PassMain::Sub *prepass_add(::Material *blender_mat, GPUMaterial *gpumat);
PassMain::Sub *material_add(::Material *blender_mat, GPUMaterial *gpumat);
void render(View &view, Framebuffer &prepass_fb, Framebuffer &combined_fb, int2 extent);
void render(View &view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
Framebuffer &gbuffer_fb,
int2 extent);
};
class DeferredProbePipeline {
@ -441,7 +489,23 @@ class DeferredProbePipeline {
PassMain::Sub *prepass_add(::Material *material, GPUMaterial *gpumat);
PassMain::Sub *material_add(::Material *material, GPUMaterial *gpumat);
void render(View &view, Framebuffer &prepass_fb, Framebuffer &combined_fb, int2 extent);
void render(View &view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
Framebuffer &gbuffer_fb,
int2 extent);
/* Return the maximum amount of gbuffer layer needed. */
int closure_layer_count() const
{
return opaque_layer_.closure_layer_count();
}
/* Return the maximum amount of gbuffer layer needed. */
int color_layer_count() const
{
return opaque_layer_.color_layer_count();
}
};
/** \} */
@ -456,9 +520,6 @@ class PlanarProbePipeline : DeferredLayerBase {
PassSimple eval_light_ps_ = {"EvalLights"};
/* Closures bits from the materials in this pass. */
eClosureBits closure_bits_ = CLOSURE_NONE;
public:
PlanarProbePipeline(Instance &inst) : inst_(inst){};
@ -468,7 +529,8 @@ class PlanarProbePipeline : DeferredLayerBase {
PassMain::Sub *prepass_add(::Material *material, GPUMaterial *gpumat);
PassMain::Sub *material_add(::Material *material, GPUMaterial *gpumat);
void render(View &view, Framebuffer &combined_fb, int layer_id, int2 extent);
void render(
View &view, Framebuffer &gbuffer, Framebuffer &combined_fb, int layer_id, int2 extent);
};
/** \} */
@ -593,9 +655,10 @@ class PipelineModule {
CapturePipeline capture;
UtilityTexture utility_tx;
PipelineInfoData &data;
public:
PipelineModule(Instance &inst)
PipelineModule(Instance &inst, PipelineInfoData &data)
: background(inst),
world(inst),
world_volume(inst),
@ -605,7 +668,8 @@ class PipelineModule {
forward(inst),
shadow(inst),
volume(inst),
capture(inst){};
capture(inst),
data(data){};
void begin_sync()
{

View File

@ -136,10 +136,22 @@ void PlanarProbeModule::set_view(const draw::View &main_view, int2 main_view_ext
world_clip_buf_.plane = probe.reflection_clip_plane_get();
world_clip_buf_.push_update();
GBuffer &gbuf = instance_.gbuffer;
gbuf.acquire(extent,
instance_.pipelines.deferred.closure_layer_count(),
instance_.pipelines.deferred.color_layer_count());
res.combined_fb.ensure(GPU_ATTACHMENT_TEXTURE_LAYER(depth_tx_, resource_index),
GPU_ATTACHMENT_TEXTURE_LAYER(radiance_tx_, resource_index));
instance_.pipelines.planar.render(res.view, res.combined_fb, resource_index, extent);
res.gbuffer_fb.ensure(GPU_ATTACHMENT_TEXTURE_LAYER(depth_tx_, resource_index),
GPU_ATTACHMENT_TEXTURE_LAYER(radiance_tx_, resource_index),
GPU_ATTACHMENT_TEXTURE(gbuf.header_tx),
GPU_ATTACHMENT_TEXTURE_LAYER(gbuf.color_tx.layer_view(0), 0),
GPU_ATTACHMENT_TEXTURE_LAYER(gbuf.closure_tx.layer_view(0), 0));
instance_.pipelines.planar.render(
res.view, res.combined_fb, res.gbuffer_fb, resource_index, extent);
if (do_display_draw_ && probe.viewport_display) {
display_data_buf_.get_or_resize(display_index++) = {probe.plane_to_world, resource_index};

View File

@ -69,6 +69,7 @@ struct PlanarProbe : ProbePlanarData {
struct PlanarProbeResources : NonCopyable {
Framebuffer combined_fb = {"planar.combined_fb"};
Framebuffer gbuffer_fb = {"planar.gbuffer_fb"};
draw::View view = {"planar.view"};
};

View File

@ -128,6 +128,8 @@ const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_
return "eevee_debug_surfels";
case DEBUG_IRRADIANCE_GRID:
return "eevee_debug_irradiance_grid";
case DEBUG_GBUFFER:
return "eevee_debug_gbuffer";
case DISPLAY_PROBE_GRID:
return "eevee_display_probe_grid";
case DISPLAY_PROBE_REFLECTION:

View File

@ -37,6 +37,7 @@ enum eShaderType {
DEFERRED_CAPTURE_EVAL,
DEFERRED_PLANAR_EVAL,
DEBUG_GBUFFER,
DEBUG_SURFELS,
DEBUG_IRRADIANCE_GRID,

View File

@ -78,6 +78,14 @@ enum eDebugMode : uint32_t {
* Show random color for each tile. Verify distribution and LOD transitions.
*/
DEBUG_SHADOW_TILEMAP_RANDOM_COLOR = 13u,
/**
* Show storage cost of each pixel in the gbuffer.
*/
DEBUG_GBUFFER_STORAGE = 14u,
/**
* Show evaluation cost of each pixel.
*/
DEBUG_GBUFFER_EVALUATION = 15u,
};
/** \} */
@ -1170,12 +1178,10 @@ BLI_STATIC_ASSERT_ALIGN(HiZData, 16)
enum eClosureBits : uint32_t {
CLOSURE_NONE = 0u,
/** NOTE: These are used as stencil bits. So we are limited to 8bits. */
CLOSURE_DIFFUSE = (1u << 0u),
CLOSURE_SSS = (1u << 1u),
CLOSURE_REFLECTION = (1u << 2u),
CLOSURE_REFRACTION = (1u << 3u),
/* Non-stencil bits. */
CLOSURE_TRANSPARENCY = (1u << 8u),
CLOSURE_EMISSION = (1u << 9u),
CLOSURE_HOLDOUT = (1u << 10u),
@ -1183,6 +1189,24 @@ enum eClosureBits : uint32_t {
CLOSURE_AMBIENT_OCCLUSION = (1u << 12u),
};
enum GBufferMode : uint32_t {
/** None mode for pixels not rendered. */
GBUF_NONE = 0u,
GBUF_REFLECTION = 1u,
GBUF_REFRACTION = 2u,
GBUF_DIFFUSE = 3u,
GBUF_SSS = 4u,
/** Special configurations. Packs multiple closures into 1 layer. */
GBUF_OPAQUE_DIELECTRIC = 4u,
/** Set for surfaces without lit closures. This stores only the normal to the surface. */
GBUF_UNLIT = 15u,
/** IMPORTANT: Needs to be less than 16 for correct packing in g-buffer header. */
};
struct RayTraceData {
/** ViewProjection matrix used to render the previous frame. */
float4x4 history_persmat;
@ -1394,7 +1418,19 @@ struct ProbePlanarDisplayData {
BLI_STATIC_ASSERT_ALIGN(ProbePlanarDisplayData, 16)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Pipeline Data
* \{ */
struct PipelineInfoData {
float alpha_hash_scale;
float _pad0;
float _pad1;
float _pad3;
};
BLI_STATIC_ASSERT_ALIGN(PipelineInfoData, 16)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Uniform Data
* \{ */
@ -1410,6 +1446,7 @@ struct UniformData {
ShadowSceneData shadow;
SubsurfaceData subsurface;
VolumesInfoData volumes;
PipelineInfoData pipeline;
};
BLI_STATIC_ASSERT_ALIGN(UniformData, 16)

View File

@ -78,19 +78,32 @@ void ShadingView::render()
return;
}
update_view();
DRW_stats_group_start(name_);
/* Needs to be before anything else because it query its own gbuffer. */
inst_.planar_probes.set_view(render_view_, extent_);
/* Query temp textures and create frame-buffers. */
RenderBuffers &rbufs = inst_.render_buffers;
rbufs.acquire(extent_);
combined_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
GPU_ATTACHMENT_TEXTURE(rbufs.combined_tx));
prepass_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
GPU_ATTACHMENT_TEXTURE(rbufs.vector_tx));
update_view();
GBuffer &gbuf = inst_.gbuffer;
gbuf.acquire(extent_,
inst_.pipelines.deferred.closure_layer_count(),
inst_.pipelines.deferred.color_layer_count());
DRW_stats_group_start(name_);
inst_.planar_probes.set_view(render_view_, extent_);
gbuffer_fb_.ensure(GPU_ATTACHMENT_TEXTURE(rbufs.depth_tx),
GPU_ATTACHMENT_TEXTURE(rbufs.combined_tx),
GPU_ATTACHMENT_TEXTURE(gbuf.header_tx),
GPU_ATTACHMENT_TEXTURE_LAYER(gbuf.color_tx.layer_view(0), 0),
GPU_ATTACHMENT_TEXTURE_LAYER(gbuf.closure_tx.layer_view(0), 0));
/* If camera has any motion, compute motion vector in the film pass. Otherwise, we avoid float
* precision issue by setting the motion of all static geometry to 0. */
@ -119,10 +132,13 @@ void ShadingView::render()
render_view_,
prepass_fb_,
combined_fb_,
gbuffer_fb_,
extent_,
rt_buffer_opaque_,
rt_buffer_refract_);
inst_.gbuffer.release();
inst_.volume.draw_compute(render_view_);
// inst_.lookdev.render_overlay(view_fb_);
@ -231,10 +247,10 @@ void CaptureView::render_world()
update_info->clipping_distances.y);
view.sync(view_m4, win_m4);
capture_fb_.ensure(
combined_fb_.ensure(
GPU_ATTACHMENT_NONE,
GPU_ATTACHMENT_TEXTURE_CUBEFACE(inst_.reflection_probes.cubemap_tx_, face));
GPU_framebuffer_bind(capture_fb_);
GPU_framebuffer_bind(combined_fb_);
inst_.pipelines.world.render(view);
}
@ -267,6 +283,10 @@ void CaptureView::render_probes()
prepass_fb.ensure(GPU_ATTACHMENT_TEXTURE(inst_.render_buffers.depth_tx),
GPU_ATTACHMENT_TEXTURE(inst_.render_buffers.vector_tx));
inst_.gbuffer.acquire(extent,
inst_.pipelines.probe.closure_layer_count(),
inst_.pipelines.probe.color_layer_count());
for (int face : IndexRange(6)) {
float4x4 view_m4 = cubeface_mat(face);
view_m4 = math::translate(view_m4, -update_info->probe_pos);
@ -278,16 +298,24 @@ void CaptureView::render_probes()
update_info->clipping_distances.y);
view.sync(view_m4, win_m4);
capture_fb_.ensure(
combined_fb_.ensure(
GPU_ATTACHMENT_TEXTURE(inst_.render_buffers.depth_tx),
GPU_ATTACHMENT_TEXTURE_CUBEFACE(inst_.reflection_probes.cubemap_tx_, face));
GPU_framebuffer_bind(capture_fb_);
GPU_framebuffer_clear_color_depth(capture_fb_, float4(0.0f, 0.0f, 0.0f, 1.0f), 1.0f);
inst_.pipelines.probe.render(view, prepass_fb, capture_fb_, extent);
gbuffer_fb_.ensure(
GPU_ATTACHMENT_TEXTURE(inst_.render_buffers.depth_tx),
GPU_ATTACHMENT_TEXTURE_CUBEFACE(inst_.reflection_probes.cubemap_tx_, face),
GPU_ATTACHMENT_TEXTURE(inst_.gbuffer.header_tx),
GPU_ATTACHMENT_TEXTURE_LAYER(inst_.gbuffer.color_tx.layer_view(0), 0),
GPU_ATTACHMENT_TEXTURE_LAYER(inst_.gbuffer.closure_tx.layer_view(0), 0));
GPU_framebuffer_bind(combined_fb_);
GPU_framebuffer_clear_color_depth(combined_fb_, float4(0.0f, 0.0f, 0.0f, 1.0f), 1.0f);
inst_.pipelines.probe.render(view, prepass_fb, combined_fb_, gbuffer_fb_, extent);
}
inst_.render_buffers.release();
inst_.gbuffer.release();
GPU_debug_group_end();
inst_.reflection_probes.remap_to_octahedral_projection(update_info->atlas_coord);
}

View File

@ -46,8 +46,9 @@ class ShadingView {
RayTraceBuffer rt_buffer_refract_;
DepthOfFieldBuffer dof_buffer_;
Framebuffer prepass_fb_;
Framebuffer combined_fb_;
Framebuffer prepass_fb_ = {"prepass_fb_"};
Framebuffer combined_fb_ = {"combined_fb_"};
Framebuffer gbuffer_fb_ = {"gbuffer_fb_"};
Framebuffer transparent_fb_ = {"transparent"};
TextureFromPool postfx_tx_;
@ -153,7 +154,8 @@ class MainView {
class CaptureView {
private:
Instance &inst_;
Framebuffer capture_fb_ = {"World.Capture"};
Framebuffer combined_fb_ = {"Capture.Combined"};
Framebuffer gbuffer_fb_ = {"Capture.Gbuffer"};
public:
CaptureView(Instance &inst) : inst_(inst) {}
@ -163,22 +165,4 @@ class CaptureView {
/** \} */
/* -------------------------------------------------------------------- */
/** \name Capture Planar View
*
* View for capturing planar probes outside a ShadingView.
* \{ */
class CapturePlanarView {
private:
Instance &inst_;
Framebuffer capture_fb_ = {"Planar.Capture"};
public:
CapturePlanarView(Instance &inst) : inst_(inst) {}
void render_probes();
};
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,46 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
/**
* Combine light passes to the combined color target and apply surface colors.
* This also fills the different render passes.
*/
#pragma BLENDER_REQUIRE(draw_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_gbuffer_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_renderpass_lib.glsl)
#pragma BLENDER_REQUIRE(gpu_shader_debug_gradients_lib.glsl)
void main()
{
ivec2 texel = ivec2(gl_FragCoord.xy);
GBufferData gbuf = gbuffer_read(gbuf_header_tx, gbuf_closure_tx, gbuf_color_tx, texel);
if (gbuf.header == 0u) {
discard;
return;
}
float shade = saturate(drw_normal_world_to_view(gbuf.surface_N).z);
uvec4 closure_types = (uvec4(gbuf.header) >> uvec4(0u, 4u, 8u, 12u)) & 15u;
float storage_cost = reduce_add(vec4(not(equal(closure_types, uvec4(0u)))));
float eval_cost = reduce_add(vec4(equal(closure_types, uvec4(GBUF_REFLECTION)))) * 1.0 +
reduce_add(vec4(equal(closure_types, uvec4(GBUF_REFRACTION)))) * 1.0 +
reduce_add(vec4(equal(closure_types, uvec4(GBUF_DIFFUSE)))) * 1.0 +
reduce_add(vec4(equal(closure_types, uvec4(GBUF_SSS)))) * 1.0;
switch (eDebugMode(debug_mode)) {
default:
case DEBUG_GBUFFER_STORAGE:
out_color_add = shade * vec4(green_to_red_gradient(storage_cost / 4.0), 0.0);
break;
case DEBUG_GBUFFER_EVALUATION:
out_color_add = shade * vec4(green_to_red_gradient(eval_cost / 4.0), 0.0);
break;
}
out_color_mul = vec4(0.0);
}

View File

@ -19,6 +19,10 @@ void main()
GBufferData gbuf = gbuffer_read(gbuf_header_tx, gbuf_closure_tx, gbuf_color_tx, texel);
if (!gbuf.has_reflection && !gbuf.has_reflection && !gbuf.has_refraction) {
return;
}
ClosureLightStack stack;
stack.cl[0].N = gbuf.has_diffuse ? gbuf.diffuse.N : gbuf.reflection.N;
stack.cl[0].ltc_mat = LTC_LAMBERT_MAT;

View File

@ -9,6 +9,7 @@
#pragma BLENDER_REQUIRE(eevee_gbuffer_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_renderpass_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_colorspace_lib.glsl)
void main()
{
@ -44,4 +45,10 @@ void main()
out_combined.xyz += diffuse_light * gbuf.diffuse.color;
out_combined.xyz += reflect_light * gbuf.reflection.color;
out_combined.xyz += refract_light * gbuf.refraction.color;
if (any(isnan(out_combined))) {
out_combined = vec4(1.0, 0.0, 1.0, 0.0);
}
out_combined = colorspace_safe_color(out_combined);
}

View File

@ -20,6 +20,10 @@ void main()
float depth = texelFetch(hiz_tx, texel, 0).r;
GBufferData gbuf = gbuffer_read(gbuf_header_tx, gbuf_closure_tx, gbuf_color_tx, texel);
if (!gbuf.has_reflection && !gbuf.has_diffuse /* TODO(fclem) && !gbuf.has_refraction */) {
return;
}
vec3 P = drw_point_screen_to_world(vec3(uvcoordsvar.xy, depth));
/* Assume reflection closure normal is always somewhat representative of the geometric normal.
* Ng is only used for shadow biases and subsurface check in this case. */
@ -91,8 +95,12 @@ void main()
vec3 shadows = radiance_shadowed * safe_rcp(radiance_unshadowed);
output_renderpass_value(uniform_buf.render_pass.shadow_id, average(shadows));
imageStore(direct_diffuse_img, texel, vec4(radiance_diffuse, 1.0));
imageStore(direct_reflect_img, texel, vec4(radiance_specular, 1.0));
if (gbuf.has_diffuse) {
imageStore(direct_diffuse_img, texel, vec4(radiance_diffuse, 1.0));
}
if (gbuf.has_reflection) {
imageStore(direct_reflect_img, texel, vec4(radiance_specular, 1.0));
}
/* TODO(fclem): Support LTC for refraction. */
// imageStore(direct_refract_img, texel, vec4(cl_refr.light_shadowed, 1.0));
}

View File

@ -113,14 +113,160 @@ bool gbuffer_is_refraction(vec4 gbuffer)
return gbuffer.w < 1.0;
}
uint gbuffer_header_pack(GBufferMode mode, uint layer)
{
return (mode << (4u * layer));
}
GBufferMode gbuffer_header_unpack(uint data, uint layer)
{
return GBufferMode((data >> (4u * layer)) & 15u);
}
/* Return true if any layer of the gbuffer match the given closure. */
bool gbuffer_has_closure(uint header, eClosureBits closure)
{
int layer = 0;
/* Check special configurations first. */
if (gbuffer_header_unpack(header, layer) == GBUF_OPAQUE_DIELECTRIC) {
if (closure == eClosureBits(CLOSURE_DIFFUSE)) {
return true;
}
if (closure == eClosureBits(CLOSURE_REFLECTION)) {
return true;
}
return false;
}
/* Since closure order in the gbuffer is static, we check them in order. */
bool has_refraction = (gbuffer_header_unpack(header, layer) == GBUF_REFRACTION);
layer += int(has_refraction);
if (closure == eClosureBits(CLOSURE_REFRACTION)) {
return has_refraction;
}
bool has_reflection = (gbuffer_header_unpack(header, layer) == GBUF_REFLECTION);
layer += int(has_reflection);
if (closure == eClosureBits(CLOSURE_REFLECTION)) {
return has_reflection;
}
bool has_diffuse = (gbuffer_header_unpack(header, layer) == GBUF_DIFFUSE);
layer += int(has_diffuse);
if (closure == eClosureBits(CLOSURE_DIFFUSE)) {
return has_diffuse;
}
return false;
}
struct GBufferDataPacked {
uint header;
/* TODO(fclem): Resize arrays based on used closures. */
vec4 closure[4];
vec4 color[3];
};
GBufferDataPacked gbuffer_pack(ClosureDiffuse diffuse,
ClosureReflection reflection,
ClosureRefraction refraction,
vec3 default_N,
float thickness)
{
GBufferDataPacked gbuf;
gbuf.header = 0u;
bool has_refraction = refraction.weight > 1e-5;
bool has_reflection = reflection.weight > 1e-5;
bool has_diffuse = diffuse.weight > 1e-5;
bool has_sss = diffuse.sss_id > 0;
int layer = 0;
/* Check special configurations first. */
/* Opaque Dielectric. */
if (!has_refraction && !has_sss && has_reflection && has_diffuse) {
/* TODO(fclem): Compute this only if needed (guarded under ifdefs). */
bool has_shared_normal = all(equal(diffuse.N, reflection.N));
bool has_colorless_reflection = all(equal(reflection.color.rgb, reflection.color.gbr));
if (has_shared_normal && has_colorless_reflection) {
gbuf.color[layer] = gbuffer_color_pack(diffuse.color);
gbuf.closure[layer].xy = gbuffer_normal_pack(diffuse.N);
gbuf.closure[layer].z = reflection.roughness;
/* Supports weight > 1.0. Same precision as 10bit. */
gbuf.closure[layer].w = reflection.color.r * (1.0 / 16.0);
gbuf.header = gbuffer_header_pack(GBUF_OPAQUE_DIELECTRIC, layer);
return gbuf;
}
}
if (has_refraction) {
gbuf.color[layer] = gbuffer_color_pack(refraction.color);
gbuf.closure[layer].xy = gbuffer_normal_pack(refraction.N);
gbuf.closure[layer].z = refraction.roughness;
gbuf.closure[layer].w = gbuffer_ior_pack(refraction.ior);
gbuf.header |= gbuffer_header_pack(GBUF_REFRACTION, layer);
layer += 1;
}
if (has_reflection) {
gbuf.color[layer] = gbuffer_color_pack(reflection.color);
gbuf.closure[layer].xy = gbuffer_normal_pack(reflection.N);
gbuf.closure[layer].z = reflection.roughness;
gbuf.closure[layer].w = 0.0; /* Unused. */
gbuf.header |= gbuffer_header_pack(GBUF_REFLECTION, layer);
layer += 1;
}
if (has_diffuse) {
gbuf.color[layer] = gbuffer_color_pack(diffuse.color);
gbuf.closure[layer].xy = gbuffer_normal_pack(diffuse.N);
gbuf.closure[layer].z = 0.0; /* Unused. */
gbuf.closure[layer].w = gbuffer_thickness_pack(thickness);
gbuf.header |= gbuffer_header_pack(GBUF_DIFFUSE, layer);
layer += 1;
}
if (has_sss) {
gbuf.closure[layer].xyz = gbuffer_sss_radii_pack(diffuse.sss_radius);
gbuf.closure[layer].w = gbuffer_object_id_unorm16_pack(diffuse.sss_id);
gbuf.header |= gbuffer_header_pack(GBUF_SSS, layer);
layer += 1;
}
if (layer == 0) {
/* If no lit BDSF is outputed, still output the surface normal in the first layer.
* This is needed by some algorithms. */
gbuf.color[layer] = vec4(0.0);
gbuf.closure[layer].xy = gbuffer_normal_pack(default_N);
gbuf.closure[layer].z = 0.0; /* Unused. */
gbuf.closure[layer].w = 0.0; /* Unused. */
gbuf.header |= gbuffer_header_pack(GBUF_UNLIT, layer);
}
return gbuf;
}
struct GBufferData {
/* Only valid (or null) if `has_diffuse`, `has_reflection` or `has_refraction` is true. */
ClosureDiffuse diffuse;
ClosureReflection reflection;
ClosureRefraction refraction;
/* First world normal stored in the gbuffer. Only valid if `has_any_surface` is true. */
vec3 surface_N;
float thickness;
bool has_diffuse;
bool has_reflection;
bool has_refraction;
bool has_any_surface;
uint header;
};
GBufferData gbuffer_read(usampler2D header_tx,
@ -130,66 +276,122 @@ GBufferData gbuffer_read(usampler2D header_tx,
{
GBufferData gbuf;
uint header = texelFetch(header_tx, texel, 0).r;
gbuf.header = texelFetch(header_tx, texel, 0).r;
gbuf.has_any_surface = (gbuf.header != 0u);
if (!gbuf.has_any_surface) {
gbuf.has_diffuse = false;
gbuf.has_reflection = false;
gbuf.has_refraction = false;
return gbuf;
}
gbuf.thickness = 0.0;
gbuf.has_diffuse = flag_test(header, CLOSURE_DIFFUSE);
gbuf.has_reflection = flag_test(header, CLOSURE_REFLECTION);
gbuf.has_refraction = flag_test(header, CLOSURE_REFRACTION);
if (gbuf.has_reflection) {
int layer = 0;
/* First closure is always written. */
gbuf.surface_N = gbuffer_normal_unpack(texelFetch(closure_tx, ivec3(texel, 0), 0).xy);
int layer = 0;
/* Check special configurations first. */
if (gbuffer_header_unpack(gbuf.header, layer) == GBUF_OPAQUE_DIELECTRIC) {
vec4 closure_packed = texelFetch(closure_tx, ivec3(texel, layer), 0);
gbuf.reflection.N = gbuffer_normal_unpack(closure_packed.xy);
gbuf.reflection.roughness = closure_packed.z;
vec4 color_packed = texelFetch(color_tx, ivec3(texel, layer), 0);
gbuf.reflection.color = gbuffer_color_unpack(color_packed);
}
else {
gbuf.reflection.N = vec3(0.0, 0.0, 1.0);
gbuf.reflection.roughness = 0.0;
gbuf.reflection.color = vec3(0.0);
}
if (gbuf.has_diffuse) {
int layer = 1;
vec4 closure_packed = texelFetch(closure_tx, ivec3(texel, layer), 0);
gbuf.diffuse.N = gbuffer_normal_unpack(closure_packed.xy);
gbuf.diffuse.sss_id = 0u;
gbuf.thickness = gbuffer_thickness_unpack(closure_packed.z);
vec4 color_packed = texelFetch(color_tx, ivec3(texel, layer), 0);
gbuf.diffuse.color = gbuffer_color_unpack(color_packed);
gbuf.has_diffuse = true;
if (flag_test(header, CLOSURE_SSS)) {
int layer = 2;
vec4 closure_packed = texelFetch(closure_tx, ivec3(texel, layer), 0);
gbuf.diffuse.sss_radius = gbuffer_sss_radii_unpack(closure_packed.xyz);
gbuf.diffuse.sss_id = gbuffer_object_id_unorm16_unpack(closure_packed.w);
}
}
else {
gbuf.diffuse.N = vec3(0.0, 0.0, 1.0);
gbuf.reflection.color = vec3(closure_packed.w * 16.0);
gbuf.reflection.N = gbuf.diffuse.N = gbuffer_normal_unpack(closure_packed.xy);
gbuf.reflection.roughness = closure_packed.z;
gbuf.has_reflection = true;
/* Default values. */
gbuf.refraction.color = vec3(0.0);
gbuf.refraction.N = vec3(0.0, 0.0, 1.0);
gbuf.refraction.roughness = 0.0;
gbuf.refraction.ior = 1.1;
gbuf.has_refraction = false;
/* Default values. */
gbuf.diffuse.sss_radius = vec3(0.0, 0.0, 0.0);
gbuf.diffuse.sss_id = 0u;
gbuf.diffuse.color = vec3(0.0);
return gbuf;
}
/* Since closure order in the gbuffer is static, we check them in order. */
gbuf.has_refraction = (gbuffer_header_unpack(gbuf.header, layer) == GBUF_REFRACTION);
if (gbuf.has_refraction) {
int layer = 1;
vec4 closure_packed = texelFetch(closure_tx, ivec3(texel, layer), 0);
vec4 color_packed = texelFetch(color_tx, ivec3(texel, layer), 0);
gbuf.refraction.color = gbuffer_color_unpack(color_packed);
gbuf.refraction.N = gbuffer_normal_unpack(closure_packed.xy);
gbuf.refraction.roughness = closure_packed.z;
gbuf.refraction.ior = gbuffer_ior_unpack(closure_packed.w);
vec4 color_packed = texelFetch(color_tx, ivec3(texel, layer), 0);
gbuf.refraction.color = gbuffer_color_unpack(color_packed);
layer += 1;
}
else {
gbuf.refraction.N = vec3(0.0, 0.0, 1.0);
gbuf.refraction.ior = 1.1;
gbuf.refraction.roughness = 0.0;
/* Default values. */
gbuf.refraction.color = vec3(0.0);
gbuf.refraction.N = vec3(0.0, 0.0, 1.0);
gbuf.refraction.roughness = 0.0;
gbuf.refraction.ior = 1.1;
}
gbuf.has_reflection = (gbuffer_header_unpack(gbuf.header, layer) == GBUF_REFLECTION);
if (gbuf.has_reflection) {
vec4 closure_packed = texelFetch(closure_tx, ivec3(texel, layer), 0);
vec4 color_packed = texelFetch(color_tx, ivec3(texel, layer), 0);
gbuf.reflection.color = gbuffer_color_unpack(color_packed);
gbuf.reflection.N = gbuffer_normal_unpack(closure_packed.xy);
gbuf.reflection.roughness = closure_packed.z;
layer += 1;
}
else {
/* Default values. */
gbuf.reflection.color = vec3(0.0);
gbuf.reflection.N = vec3(0.0, 0.0, 1.0);
gbuf.reflection.roughness = 0.0;
}
gbuf.has_diffuse = (gbuffer_header_unpack(gbuf.header, layer) == GBUF_DIFFUSE);
if (gbuf.has_diffuse) {
vec4 closure_packed = texelFetch(closure_tx, ivec3(texel, layer), 0);
vec4 color_packed = texelFetch(color_tx, ivec3(texel, layer), 0);
gbuf.diffuse.color = gbuffer_color_unpack(color_packed);
gbuf.diffuse.N = gbuffer_normal_unpack(closure_packed.xy);
gbuf.thickness = gbuffer_thickness_unpack(closure_packed.w);
layer += 1;
}
else {
/* Default values. */
gbuf.diffuse.color = vec3(0.0);
gbuf.diffuse.N = vec3(0.0, 0.0, 1.0);
gbuf.thickness = 0.0;
}
bool has_sss = (gbuffer_header_unpack(gbuf.header, layer) == GBUF_SSS);
if (has_sss) {
vec4 closure_packed = texelFetch(closure_tx, ivec3(texel, layer), 0);
gbuf.diffuse.sss_radius = gbuffer_sss_radii_unpack(closure_packed.xyz);
gbuf.diffuse.sss_id = gbuffer_object_id_unorm16_unpack(closure_packed.w);
layer += 1;
}
else {
/* Default values. */
gbuf.diffuse.sss_radius = vec3(0.0, 0.0, 0.0);
gbuf.diffuse.sss_id = 0u;
}
return gbuf;

View File

@ -62,10 +62,10 @@ vec3 load_normal(ivec2 texel)
if (gbuf.has_diffuse) {
N = gbuf.diffuse.N;
}
if (gbuf.has_reflection) {
else if (gbuf.has_reflection) {
N = gbuf.reflection.N;
}
if (gbuf.has_refraction) {
else if (gbuf.has_refraction) {
N = gbuf.refraction.N;
}
return N;
@ -95,20 +95,26 @@ void main()
GBufferData gbuf = gbuffer_read(gbuf_header_tx, gbuf_closure_tx, gbuf_color_tx, texel_fullres);
uint closure_bits = texelFetch(gbuf_header_tx, texel_fullres, 0).r;
if (!flag_test(closure_bits, uniform_buf.raytrace.closure_active)) {
return;
}
vec3 center_N = gbuf.diffuse.N;
float roughness = 1.0;
if (uniform_buf.raytrace.closure_active == eClosureBits(CLOSURE_REFLECTION)) {
roughness = gbuf.reflection.roughness;
center_N = gbuf.reflection.N;
if (!gbuf.has_reflection) {
return;
}
}
if (uniform_buf.raytrace.closure_active == eClosureBits(CLOSURE_REFRACTION)) {
else if (uniform_buf.raytrace.closure_active == eClosureBits(CLOSURE_REFRACTION)) {
roughness = 1.0; /* TODO(fclem): Apparent roughness. */
center_N = gbuf.refraction.N;
if (!gbuf.has_refraction) {
return;
}
}
else /* if (uniform_buf.raytrace.closure_active == eClosureBits(CLOSURE_DIFFUSE)) */ {
if (!gbuf.has_diffuse) {
return;
}
}
float mix_fac = saturate(roughness * uniform_buf.raytrace.roughness_mask_scale -

View File

@ -76,8 +76,8 @@ void main()
}
bool valid_texel = in_texture_range(texel_fullres, gbuf_header_tx);
uint closure_bits = (!valid_texel) ? 0u : texelFetch(gbuf_header_tx, texel_fullres, 0).r;
if (!flag_test(closure_bits, CLOSURE_ACTIVE)) {
uint header = (!valid_texel) ? 0u : texelFetch(gbuf_header_tx, texel_fullres, 0).r;
if (!gbuffer_has_closure(header, eClosureBits(CLOSURE_ACTIVE))) {
imageStore(out_radiance_img, texel_fullres, vec4(FLT_11_11_10_MAX, 0.0));
imageStore(out_variance_img, texel_fullres, vec4(0.0));
imageStore(out_hit_depth_img, texel_fullres, vec4(0.0));

View File

@ -50,16 +50,16 @@ void main()
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
bool valid_texel = in_texture_range(texel, gbuf_header_tx);
uint closure_bits = (!valid_texel) ? 0u : texelFetch(gbuf_header_tx, texel, 0).r;
uint header = (!valid_texel) ? 0u : texelFetch(gbuf_header_tx, texel, 0).r;
if (flag_test(closure_bits, uniform_buf.raytrace.closure_active)) {
if (gbuffer_has_closure(header, uniform_buf.raytrace.closure_active)) {
GBufferData gbuf = gbuffer_read(gbuf_header_tx, gbuf_closure_tx, gbuf_color_tx, texel);
float roughness = 1.0;
if (uniform_buf.raytrace.closure_active == eClosureBits(CLOSURE_REFLECTION)) {
roughness = gbuf.reflection.roughness;
}
if (uniform_buf.raytrace.closure_active == eClosureBits(CLOSURE_REFRACTION)) {
else if (uniform_buf.raytrace.closure_active == eClosureBits(CLOSURE_REFRACTION)) {
roughness = 0.0; /* TODO(fclem): Apparent roughness. For now, always raytrace. */
}

View File

@ -82,62 +82,28 @@ void main()
/* ----- GBuffer output ----- */
uint header = 0u;
GBufferDataPacked gbuf = gbuffer_pack(
g_diffuse_data, g_reflection_data, g_refraction_data, out_normal, thickness);
if (g_reflection_data.weight > 0.0) {
/* Reflection. */
vec4 closure;
closure.xy = gbuffer_normal_pack(g_reflection_data.N);
closure.z = g_reflection_data.roughness;
closure.w = 0.0;
imageStore(out_gbuf_closure_img, ivec3(out_texel, 0), closure);
/* Output header and first closure using framebuffer attachment. */
out_gbuf_header = gbuf.header;
out_gbuf_color = gbuf.color[0];
out_gbuf_closure = gbuf.closure[0];
vec4 color = gbuffer_color_pack(g_reflection_data.color);
imageStore(out_gbuf_color_img, ivec3(out_texel, 0), color);
header |= CLOSURE_REFLECTION;
/* Output remaining closures using image store. */
/* NOTE: The image view start at layer 1 so all destination layer is `closure_index - 1`. */
if (gbuffer_header_unpack(gbuf.header, 1) != GBUF_NONE) {
imageStore(out_gbuf_color_img, ivec3(out_texel, 1 - 1), gbuf.color[1]);
imageStore(out_gbuf_closure_img, ivec3(out_texel, 1 - 1), gbuf.closure[1]);
}
float combined_weight = g_refraction_data.weight + g_diffuse_data.weight;
if (combined_weight > 0.0) {
/* TODO(fclem) other RNG. */
float refract_rand = fract(g_closure_rand * 6.1803398875);
bool output_refraction = (refract_rand * combined_weight) < g_refraction_data.weight;
if (output_refraction) {
/* Refraction. */
vec4 closure;
closure.xy = gbuffer_normal_pack(g_refraction_data.N);
closure.z = g_refraction_data.roughness;
closure.w = gbuffer_ior_pack(g_refraction_data.ior);
imageStore(out_gbuf_closure_img, ivec3(out_texel, 1), closure);
vec4 color = gbuffer_color_pack(g_refraction_data.color);
imageStore(out_gbuf_color_img, ivec3(out_texel, 1), color);
header |= CLOSURE_REFRACTION;
}
else {
/* Diffuse. */
vec4 closure;
closure.xy = gbuffer_normal_pack(g_diffuse_data.N);
closure.z = gbuffer_thickness_pack(thickness);
closure.w = 0.0; /* Unused. */
imageStore(out_gbuf_closure_img, ivec3(out_texel, 1), closure);
vec4 color = gbuffer_color_pack(g_diffuse_data.color);
imageStore(out_gbuf_color_img, ivec3(out_texel, 1), color);
header |= CLOSURE_DIFFUSE;
}
if (g_diffuse_data.sss_id > 0) {
/* SubSurface Scattering. */
vec4 closure;
closure.xyz = gbuffer_sss_radii_pack(g_diffuse_data.sss_radius);
closure.w = gbuffer_object_id_unorm16_pack(uint(resource_id));
imageStore(out_gbuf_closure_img, ivec3(out_texel, 2), closure);
header |= CLOSURE_SSS;
}
if (gbuffer_header_unpack(gbuf.header, 2) != GBUF_NONE) {
imageStore(out_gbuf_color_img, ivec3(out_texel, 2 - 1), gbuf.color[2]);
imageStore(out_gbuf_closure_img, ivec3(out_texel, 2 - 1), gbuf.closure[2]);
}
if (gbuffer_header_unpack(gbuf.header, 3) != GBUF_NONE) {
/* No color for SSS. */
imageStore(out_gbuf_closure_img, ivec3(out_texel, 3 - 1), gbuf.closure[3]);
}
imageStore(out_gbuf_header_img, out_texel, uvec4(header));
/* ----- Radiance output ----- */

View File

@ -38,7 +38,8 @@ void main()
float threshold = 0.0;
# else
float noise_offset = sampling_rng_1D_get(SAMPLING_TRANSPARENCY);
float threshold = transparency_hashed_alpha_threshold(1.0, noise_offset, g_data.P);
float threshold = transparency_hashed_alpha_threshold(
uniform_buf.pipeline.alpha_hash_scale, noise_offset, g_data.P);
# endif
float transparency = average(g_transmittance);

View File

@ -100,3 +100,17 @@ GPU_SHADER_CREATE_INFO(eevee_deferred_planar_eval)
#undef image_out
#undef image_in
/* -------------------------------------------------------------------- */
/** \name Debug
* \{ */
GPU_SHADER_CREATE_INFO(eevee_debug_gbuffer)
.do_static_compilation(true)
.fragment_out(0, Type::VEC4, "out_color_add", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "out_color_mul", DualBlend::SRC_1)
.push_constant(Type::INT, "debug_mode")
.fragment_source("eevee_debug_gbuffer_frag.glsl")
.additional_info("draw_view", "draw_fullscreen", "eevee_shared", "eevee_gbuffer_data");
/** \} */

Some files were not shown because too many files have changed in this diff Show More