WIP: Rewrite asset browser as separate editor #104978

Closed
Julian Eisel wants to merge 68 commits from asset-browser-grid-view into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
134 changed files with 2323 additions and 914 deletions
Showing only changes of commit c8c8783088 - Show all commits

View File

@ -765,6 +765,7 @@ endif()
set_and_warn_dependency(WITH_PYTHON WITH_CYCLES OFF)
set_and_warn_dependency(WITH_PYTHON WITH_DRACO OFF)
set_and_warn_dependency(WITH_PYTHON WITH_MOD_FLUID OFF)
if(WITH_DRACO AND NOT WITH_PYTHON_INSTALL)
message(STATUS "WITH_DRACO requires WITH_PYTHON_INSTALL to be ON, disabling WITH_DRACO for now")
@ -937,7 +938,10 @@ set(PLATFORM_CFLAGS)
set(C_WARNINGS)
set(CXX_WARNINGS)
# for gcc -Wno-blah-blah
# NOTE: These flags are intended for situations where where it's impractical to
# suppress warnings by modifying the code or for code which is maintained externally.
# For GCC this typically means adding `-Wno-*` arguments to negate warnings
# that are useful in the general case.
set(C_REMOVE_STRICT_FLAGS)
set(CXX_REMOVE_STRICT_FLAGS)
@ -1617,6 +1621,18 @@ if(CMAKE_COMPILER_IS_GNUCC)
ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_IMPLICIT_FALLTHROUGH -Wimplicit-fallthrough=5)
endif()
#----------------------
# Suppress Strict Flags
#
# Exclude the following warnings from this list:
# - `-Wno-address`:
# This can give useful hints that point to bugs/misleading logic.
# - `-Wno-strict-prototypes`:
# No need to support older C-style prototypes.
#
# If code in `./extern/` needs to suppress these flags that can be done on a case-by-case basis.
# flags to undo strict flags
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_DEPRECATED_DECLARATIONS -Wno-deprecated-declarations)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
@ -1672,6 +1688,9 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
# ADD_CHECK_C_COMPILER_FLAG(C_WARNINGS C_WARN_UNUSED_MACROS -Wunused-macros)
# ADD_CHECK_CXX_COMPILER_FLAG(CXX_WARNINGS CXX_WARN_UNUSED_MACROS -Wunused-macros)
#----------------------
# Suppress Strict Flags
# flags to undo strict flags
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_PARAMETER -Wno-unused-parameter)
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)

View File

@ -4,6 +4,12 @@ OpenGL Wrapper (bgl)
.. module:: bgl
.. warning::
This module is deprecated and will be removed in a future release,
when OpenGL is replaced by Metal and Vulkan.
Use the graphics API independent :mod:`gpu` module instead.
This module wraps OpenGL constants and functions, making them available from
within Blender Python.

View File

@ -450,7 +450,11 @@ if(WITH_COREAUDIO)
if(WITH_STRICT_DEPENDENCIES)
message(FATAL_ERROR "CoreAudio not found!")
else()
set(WITH_COREAUDIO FALSE CACHE BOOL "Build With CoreAudio" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_COREAUDIO FALSE CACHE BOOL "Build With CoreAudio" FORCE)
else()
set(WITH_COREAUDIO FALSE)
endif()
message(WARNING "CoreAudio not found, plugin will not be built.")
endif()
endif()
@ -487,7 +491,11 @@ if(WITH_FFMPEG)
list(APPEND DLLS ${FFMPEG_DLLS})
endif()
else()
set(WITH_FFMPEG FALSE CACHE BOOL "Build With FFMPEG" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_FFMPEG FALSE CACHE BOOL "Build With FFMPEG" FORCE)
else()
set(WITH_FFMPEG FALSE)
endif()
message(WARNING "FFMPEG not found, plugin will not be built.")
endif()
endif()
@ -536,7 +544,11 @@ if(WITH_FFTW)
list(APPEND DLLS ${FFTW_DLLS})
endif()
else()
set(WITH_FFTW FALSE CACHE BOOL "Build With FFTW" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_FFTW FALSE CACHE BOOL "Build With FFTW" FORCE)
else()
set(WITH_FFTW FALSE)
endif()
message(WARNING "FFTW not found, convolution functionality will not be built.")
endif()
endif()
@ -579,7 +591,11 @@ if(WITH_JACK)
list(APPEND DLLS ${JACK_DLLS})
endif()
else()
set(WITH_JACK FALSE CACHE BOOL "Build With JACK" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_JACK FALSE CACHE BOOL "Build With JACK" FORCE)
else()
set(WITH_JACK FALSE)
endif()
message(WARNING "JACK not found, plugin will not be built.")
endif()
endif()
@ -615,7 +631,11 @@ if(WITH_LIBSNDFILE)
list(APPEND DLLS ${LIBSNDFILE_DLLS})
endif()
else()
set(WITH_LIBSNDFILE FALSE CACHE BOOL "Build With LibSndFile" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_LIBSNDFILE FALSE CACHE BOOL "Build With LibSndFile" FORCE)
else()
set(WITH_LIBSNDFILE FALSE)
endif()
message(WARNING "LibSndFile not found, plugin will not be built.")
endif()
endif()
@ -649,7 +669,11 @@ if(WITH_OPENAL)
list(APPEND DLLS ${OPENAL_DLLS})
endif()
else()
set(WITH_OPENAL FALSE CACHE BOOL "Build With OpenAL" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_OPENAL FALSE CACHE BOOL "Build With OpenAL" FORCE)
else()
set(WITH_OPENAL FALSE)
endif()
message(WARNING "OpenAL not found, plugin will not be built.")
endif()
endif()
@ -685,7 +709,11 @@ if(WITH_PULSEAUDIO)
list(APPEND STATIC_PLUGINS PulseAudioDevice)
endif()
else()
set(WITH_PULSEAUDIO FALSE CACHE BOOL "Build With PulseAudio" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_PULSEAUDIO FALSE CACHE BOOL "Build With PulseAudio" FORCE)
else()
set(WITH_PULSEAUDIO FALSE)
endif()
message(WARNING "PulseAudio not found, plugin will not be built.")
endif()
endif()
@ -716,8 +744,12 @@ if(WITH_PYTHON)
list(APPEND DLLS ${PYTHON_DLLS})
endif()
else()
set(WITH_PYTHON FALSE CACHE BOOL "Build With Python Library" FORCE)
message(WARNING "Python libraries not found, language binding will not be built.")
if(AUDASPACE_STANDALONE)
set(WITH_PYTHON FALSE CACHE BOOL "Build With Python Library" FORCE)
else()
set(WITH_PYTHON FALSE)
endif()
message(WARNING "Python & NumPy libraries not found, language binding will not be built.")
endif()
endif()
@ -759,7 +791,11 @@ if(WITH_SDL)
list(APPEND DLLS ${SDL_DLLS})
endif()
else()
set(WITH_SDL FALSE CACHE BOOL "Build With SDL" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_SDL FALSE CACHE BOOL "Build With SDL" FORCE)
else()
set(WITH_SDL FALSE)
endif()
message(WARNING "SDL not found, plugin will not be built.")
endif()
endif()
@ -1116,7 +1152,11 @@ if(WITH_DOCS)
add_custom_target(audaspace_doc ALL ${DOXYGEN_EXECUTABLE} Doxyfile COMMENT "Building C++ HTML documentation with Doxygen.")
else()
set(WITH_DOCS FALSE CACHE BOOL "Build C++ HTML Documentation with Doxygen" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_DOCS FALSE CACHE BOOL "Build C++ HTML Documentation with Doxygen" FORCE)
else()
set(WITH_DOCS FALSE)
endif()
message(WARNING "Doxygen (and/or dot) not found, documentation will not be built.")
endif()
endif()
@ -1129,7 +1169,11 @@ if(WITH_BINDING_DOCS)
add_custom_target(bindings_doc ALL COMMAND ${PYTHON_EXECUTABLE} setup.py --build-docs ${SPHINX_EXECUTABLE} -q -b html -c "${CMAKE_CURRENT_BINARY_DIR}" -d "${CMAKE_CURRENT_BINARY_DIR}/_doctrees" "${CMAKE_CURRENT_SOURCE_DIR}/bindings/doc" "${CMAKE_CURRENT_BINARY_DIR}/doc/bindings" DEPENDS pythonmodule COMMENT "Building C/Python HTML documentation with Sphinx.")
else()
set(WITH_BINDING_DOCS FALSE CACHE BOOL "Build C/Python HTML Documentation with Sphinx" FORCE)
if(AUDASPACE_STANDALONE)
set(WITH_BINDING_DOCS FALSE CACHE BOOL "Build C/Python HTML Documentation with Sphinx" FORCE)
else()
set(WITH_BINDING_DOCS FALSE)
endif()
message(WARNING "Sphinx not found, binding documentation will not be built.")
endif()
endif()

View File

@ -7,6 +7,11 @@ if(CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang")
"-Wno-strict-prototypes"
)
endif()
if(CMAKE_COMPILER_IS_GNUCC AND (NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "12.1"))
add_c_flag(
"-Wno-address"
)
endif()
# MSVC's inliner is not having a happy time with glewIsSupported
# causing this to be one of the most expensive things to build

View File

@ -35,11 +35,15 @@ if(NOT WITH_SYSTEM_AUDASPACE)
else()
list(APPEND LIB
${AUDASPACE_C_LIBRARIES}
${AUDASPACE_PY_LIBRARIES}
)
if(WITH_PYTHON AND WITH_PYTHON_NUMPY)
list(APPEND LIB
${AUDASPACE_PY_LIBRARIES}
)
endif()
endif()
if(WITH_PYTHON)
if(WITH_PYTHON AND WITH_PYTHON_NUMPY)
list(APPEND INC_SYS
${PYTHON_INCLUDE_DIRS}
)

View File

@ -145,8 +145,8 @@ if(CYCLES_STANDALONE_REPOSITORY)
-DOIIO_STATIC_DEFINE
)
set(OPENIMAGEIO_INCLUDE_DIR ${OPENIMAGEIO_ROOT_DIR}/include)
set(OPENIMAGEIO_INCLUDE_DIRS ${OPENIMAGEIO_INCLUDE_DIR} ${OPENIMAGEIO_INCLUDE_DIR}/OpenImageIO)
set(OPENIMAGEIO_INCLUDE_DIR ${OPENIMAGEIO_ROOT_DIR}/include)
set(OPENIMAGEIO_INCLUDE_DIRS ${OPENIMAGEIO_INCLUDE_DIR} ${OPENIMAGEIO_INCLUDE_DIR}/OpenImageIO)
# Special exceptions for libraries which needs explicit debug version
set(OPENIMAGEIO_LIBRARIES
optimized ${OPENIMAGEIO_ROOT_DIR}/lib/OpenImageIO.lib

View File

@ -19,8 +19,10 @@ struct NodeEnum {
}
void insert(const char *x, int y)
{
left[ustring(x)] = y;
right[y] = ustring(x);
ustring ustr_x(x);
left[ustr_x] = y;
right[y] = ustr_x;
}
bool exists(ustring x) const

Binary file not shown.

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-or-later
"""
Utilities relating to text mode console interations.
"""

View File

@ -256,7 +256,7 @@ PYGETTEXT_KEYWORDS = (() +
# bUnitDef unit names.
# NOTE: regex is a bit more complex than it would need too. Since the actual
# identifier (`B_UNIT_DEF_`) is at the end, if it's simpler/too general it
# becomes extremely slow to process some (unrelated) source files.
# becomes extremely slow to process some (unrelated) source files.
((r"\{(?:(?:\s*\"[^\"',]+\"\s*,)|(?:\s*NULL\s*,)){4}\s*" +
_msg_re + r"\s*,(?:(?:\s*\"[^\"',]+\"\s*,)|(?:\s*NULL\s*,))(?:[^,]+,){2}"
+ "\s*B_UNIT_DEF_[_A-Z]+\s*\}"),) +

View File

@ -213,7 +213,7 @@ execute.hooks = []
def autocomplete(context):
from console import intellisense
from bl_console_utils.autocomplete import intellisense
sc = context.space_data

View File

@ -1066,29 +1066,9 @@ def km_node_editor(params):
{"items": items},
)
def node_select_ops(select_mouse):
return [
("node.select", {"type": select_mouse, "value": 'PRESS'},
{"properties": [("extend", False), ("deselect_all", True)]}),
("node.select", {"type": select_mouse, "value": 'PRESS', "ctrl": True},
{"properties": [("extend", False)]}),
("node.select", {"type": select_mouse, "value": 'PRESS', "alt": True},
{"properties": [("extend", False)]}),
("node.select", {"type": select_mouse, "value": 'PRESS', "ctrl": True, "alt": True},
{"properties": [("extend", False)]}),
("node.select", {"type": select_mouse, "value": 'PRESS', "shift": True},
{"properties": [("extend", True)]}),
("node.select", {"type": select_mouse, "value": 'PRESS', "shift": True, "ctrl": True},
{"properties": [("extend", True)]}),
("node.select", {"type": select_mouse, "value": 'PRESS', "shift": True, "alt": True},
{"properties": [("extend", True)]}),
("node.select", {"type": select_mouse, "value": 'PRESS', "shift": True, "ctrl": True, "alt": True},
{"properties": [("extend", True)]}),
]
# Allow node selection with both for RMB select
items.extend(node_select_ops('LEFTMOUSE'))
items.extend(_template_node_select(type='LEFTMOUSE', value='PRESS', select_passthrough=True))
items.extend([
("node.select_box", {"type": 'LEFTMOUSE', "value": 'CLICK_DRAG'},
@ -4053,6 +4033,36 @@ def km_3d_view_tool_edit_gpencil_select(params):
)
# NOTE: duplicated from `blender_default.py`.
def _template_node_select(*, type, value, select_passthrough):
items = [
("node.select", {"type": type, "value": value},
{"properties": [("deselect_all", True), ("select_passthrough", True)]}),
("node.select", {"type": type, "value": value, "ctrl": True}, None),
("node.select", {"type": type, "value": value, "alt": True}, None),
("node.select", {"type": type, "value": value, "ctrl": True, "alt": True}, None),
("node.select", {"type": type, "value": value, "shift": True},
{"properties": [("toggle", True)]}),
("node.select", {"type": type, "value": value, "shift": True, "ctrl": True},
{"properties": [("toggle", True)]}),
("node.select", {"type": type, "value": value, "shift": True, "alt": True},
{"properties": [("toggle", True)]}),
("node.select", {"type": type, "value": value, "shift": True, "ctrl": True, "alt": True},
{"properties": [("toggle", True)]}),
]
if select_passthrough and (value == 'PRESS'):
# Add an additional click item to de-select all other items,
# needed so pass-through is able to de-select other items.
items.append((
"node.select",
{"type": type, "value": 'CLICK'},
{"properties": [("deselect_all", True)]},
))
return items
def km_3d_view_tool_interactive_add(params):
return (
"3D View Tool: Object, Add Primitive",

View File

@ -1642,6 +1642,27 @@ class SEQUENCER_PT_source(SequencerButtonsPanel, Panel):
split.operator("sound.pack", icon='UGLYPACKAGE', text="")
layout.prop(sound, "use_memory_cache")
col = layout.box()
col = col.column(align=True)
split = col.split(factor=0.5, align=False)
split.alignment = 'RIGHT'
split.label(text="Samplerate")
split.alignment = 'LEFT'
if sound.samplerate <= 0:
split.label(text="Unknown")
else:
split.label(text="%d Hz." % sound.samplerate, translate=False)
split = col.split(factor=0.5, align=False)
split.alignment = 'RIGHT'
split.label(text="Channels")
split.alignment = 'LEFT'
# FIXME(@campbellbarton): this is ugly, we may want to support a way of showing a label from an enum.
channel_enum_items = sound.bl_rna.properties["channels"].enum_items
split.label(text=channel_enum_items[channel_enum_items.find(sound.channels)].name)
del channel_enum_items
else:
if strip_type == 'IMAGE':
col = layout.column()

View File

@ -89,6 +89,42 @@ set(SRC_DNA_INC
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_xr_types.h
)
set(SRC_DNA_DEFAULTS_INC
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_armature_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_asset_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_brush_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_cachefile_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_camera_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_collection_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_curves_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_curve_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_fluid_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_gpencil_modifier_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_image_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_lattice_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_lightprobe_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_light_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_linestyle_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_material_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_mesh_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_meta_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_modifier_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_movieclip_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_object_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_particle_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_pointcloud_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_scene_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_simulation_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_space_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_speaker_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_texture_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_vec_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_view3d_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_volume_defaults.h
${CMAKE_CURRENT_SOURCE_DIR}/makesdna/DNA_world_defaults.h
)
add_subdirectory(datatoc)
add_subdirectory(editors)
add_subdirectory(windowmanager)

View File

@ -20,6 +20,7 @@ struct Depsgraph;
struct Main;
struct Sequence;
struct bSound;
struct SoundInfo;
typedef struct SoundWaveform {
int length;
@ -78,6 +79,7 @@ typedef enum eSoundChannels {
typedef struct SoundInfo {
struct {
eSoundChannels channels;
int samplerate;
} specs;
float length;
} SoundInfo;

View File

@ -62,7 +62,7 @@ void BKE_subdiv_eval_limit_point_and_derivatives(struct Subdiv *subdiv,
void BKE_subdiv_eval_limit_point_and_normal(
struct Subdiv *subdiv, int ptex_face_index, float u, float v, float r_P[3], float r_N[3]);
/* Evaluate smoothly interpolated vertex data (such as orco). */
/* Evaluate smoothly interpolated vertex data (such as ORCO). */
void BKE_subdiv_eval_vertex_data(struct Subdiv *subdiv,
const int ptex_face_index,
const float u,

View File

@ -1172,14 +1172,21 @@ static bool scene_collections_object_remove(
{
bool removed = false;
/* If given object is removed from all collections in given scene, then it can also be safely
* removed from rigidbody world for given scene. */
if (collection_skip == NULL) {
BKE_scene_remove_rigidbody_object(bmain, scene, ob, free_us);
}
FOREACH_SCENE_COLLECTION_BEGIN (scene, collection) {
if (collection != collection_skip) {
removed |= collection_object_remove(bmain, collection, ob, free_us);
if (ID_IS_LINKED(collection) || ID_IS_OVERRIDE_LIBRARY(collection)) {
continue;
}
if (collection == collection_skip) {
continue;
}
removed |= collection_object_remove(bmain, collection, ob, free_us);
}
FOREACH_SCENE_COLLECTION_END;

View File

@ -2599,18 +2599,24 @@ void CustomData_set_layer_render_index(CustomData *data, int type, int n)
void CustomData_set_layer_clone_index(CustomData *data, int type, int n)
{
const int layer_index = data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_clone = n - i;
data->layers[i].active_clone = n - layer_index;
}
}
}
void CustomData_set_layer_stencil_index(CustomData *data, int type, int n)
{
const int layer_index = data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_mask = n - i;
data->layers[i].active_mask = n - layer_index;
}
}
}

View File

@ -72,6 +72,21 @@ static void lib_override_library_property_clear(IDOverrideLibraryProperty *op);
static void lib_override_library_property_operation_clear(
IDOverrideLibraryPropertyOperation *opop);
/** Helper to preserve Pose mode on override objects.
* A bit annoying to have this special case, but not much to be done here currently, since the
* matching RNA property is read-only. */
BLI_INLINE void lib_override_object_posemode_transfer(ID *id_dst, ID *id_src)
{
if (GS(id_src->name) == ID_OB && GS(id_dst->name) == ID_OB) {
Object *ob_src = (Object *)id_src;
Object *ob_dst = (Object *)id_dst;
if (ob_src->type == OB_ARMATURE && (ob_src->mode & OB_MODE_POSE) != 0) {
ob_dst->restore_mode = ob_dst->mode;
ob_dst->mode |= OB_MODE_POSE;
}
}
}
/** Get override data for a given ID. Needed because of our beloved shape keys snowflake. */
BLI_INLINE IDOverrideLibrary *lib_override_get(Main *bmain, ID *id, ID **r_owner_id)
{
@ -1703,6 +1718,8 @@ static bool lib_override_library_resync(Main *bmain,
id_override_old->tag |= LIB_TAG_NO_MAIN;
id_override_new->tag &= ~LIB_TAG_NO_MAIN;
lib_override_object_posemode_transfer(id_override_new, id_override_old);
if (ID_IS_OVERRIDE_LIBRARY_REAL(id_override_new)) {
BLI_assert(ID_IS_OVERRIDE_LIBRARY_REAL(id_override_old));
@ -2332,13 +2349,15 @@ static int lib_override_sort_libraries_func(LibraryIDLinkCallbackData *cb_data)
if (id != NULL && ID_IS_LINKED(id) && id->lib != id_owner->lib) {
const int owner_library_indirect_level = ID_IS_LINKED(id_owner) ? id_owner->lib->temp_index :
0;
if (owner_library_indirect_level > 10000) {
CLOG_ERROR(
&LOG,
"Levels of indirect usages of libraries is way too high, skipping further building "
"loops (Involves at least '%s' and '%s')",
id_owner->lib->filepath,
id->lib->filepath);
if (owner_library_indirect_level > 200) {
CLOG_ERROR(&LOG,
"Levels of indirect usages of libraries is way too high, there are most likely "
"dependency loops, skipping further building loops (involves at least '%s' from "
"'%s' and '%s' from '%s')",
id_owner->name,
id_owner->lib->filepath,
id->name,
id->lib->filepath);
return IDWALK_RET_NOP;
}
@ -3423,6 +3442,8 @@ void BKE_lib_override_library_update(Main *bmain, ID *local)
local->override_library,
RNA_OVERRIDE_APPLY_FLAG_NOP);
lib_override_object_posemode_transfer(tmp_id, local);
/* This also transfers all pointers (memory) owned by local to tmp_id, and vice-versa.
* So when we'll free tmp_id, we'll actually free old, outdated data from local. */
lib_override_id_swap(bmain, local, tmp_id);

View File

@ -890,8 +890,15 @@ static void object_for_curve_to_mesh_free(Object *temp_object)
curve.editnurb = nullptr;
}
BKE_id_free(nullptr, temp_object->data);
/* Only free the final object data if it is *not* stored in the #data_eval field. This is still
* necessary because #temp_object's data could be replaced by a #Curve data-block that isn't also
* assigned to #data_eval. */
const bool object_data_stored_in_data_eval = final_object_data == temp_object->runtime.data_eval;
BKE_id_free(nullptr, temp_object);
if (!object_data_stored_in_data_eval) {
BKE_id_free(nullptr, final_object_data);
}
}
/**
@ -937,24 +944,9 @@ static void curve_to_mesh_eval_ensure(Object &object)
BKE_object_runtime_free_data(&taper_object);
}
/* Necessary because #BKE_object_get_evaluated_mesh doesn't look in the geometry set yet. */
static const Mesh *get_evaluated_mesh_from_object(const Object *object)
{
const Mesh *mesh = BKE_object_get_evaluated_mesh(object);
if (mesh) {
return mesh;
}
GeometrySet *geometry_set_eval = object->runtime.geometry_set_eval;
if (geometry_set_eval) {
return geometry_set_eval->get_mesh_for_read();
}
return nullptr;
}
static const Curves *get_evaluated_curves_from_object(const Object *object)
{
GeometrySet *geometry_set_eval = object->runtime.geometry_set_eval;
if (geometry_set_eval) {
if (GeometrySet *geometry_set_eval = object->runtime.geometry_set_eval) {
return geometry_set_eval->get_curves_for_read();
}
return nullptr;
@ -962,12 +954,10 @@ static const Curves *get_evaluated_curves_from_object(const Object *object)
static Mesh *mesh_new_from_evaluated_curve_type_object(const Object *evaluated_object)
{
const Mesh *mesh = get_evaluated_mesh_from_object(evaluated_object);
if (mesh) {
if (const Mesh *mesh = BKE_object_get_evaluated_mesh(evaluated_object)) {
return BKE_mesh_copy_for_eval(mesh, false);
}
const Curves *curves = get_evaluated_curves_from_object(evaluated_object);
if (curves) {
if (const Curves *curves = get_evaluated_curves_from_object(evaluated_object)) {
return blender::bke::curve_to_wire_mesh(blender::bke::CurvesGeometry::wrap(curves->geometry));
}
return nullptr;

View File

@ -244,8 +244,8 @@ static int map_insert_vert(
key = POINTER_FROM_INT(vertex);
if (!BLI_ghash_ensure_p(map, key, &value_p)) {
int value_i;
if (BLI_BITMAP_TEST(pbvh->vert_bitmap, vertex) == 0) {
BLI_BITMAP_ENABLE(pbvh->vert_bitmap, vertex);
if (!pbvh->vert_bitmap[vertex]) {
pbvh->vert_bitmap[vertex] = true;
value_i = *uniq_verts;
(*uniq_verts)++;
}
@ -562,7 +562,7 @@ void BKE_pbvh_build_mesh(PBVH *pbvh,
pbvh->verts = verts;
BKE_mesh_vertex_normals_ensure(mesh);
pbvh->vert_normals = BKE_mesh_vertex_normals_for_write(mesh);
pbvh->vert_bitmap = BLI_BITMAP_NEW(totvert, "bvh->vert_bitmap");
pbvh->vert_bitmap = MEM_calloc_arrayN(totvert, sizeof(bool), "bvh->vert_bitmap");
pbvh->totvert = totvert;
pbvh->leaf_limit = LEAF_LIMIT;
pbvh->vdata = vdata;
@ -600,7 +600,7 @@ void BKE_pbvh_build_mesh(PBVH *pbvh,
MEM_freeN(prim_bbc);
/* Clear the bitmap so it can be used as an update tag later on. */
BLI_bitmap_set_all(pbvh->vert_bitmap, false, totvert);
memset(pbvh->vert_bitmap, 0, sizeof(bool) * totvert);
BKE_pbvh_update_active_vcol(pbvh, mesh);
}
@ -1021,7 +1021,7 @@ static void pbvh_update_normals_clear_task_cb(void *__restrict userdata,
const int totvert = node->uniq_verts;
for (int i = 0; i < totvert; i++) {
const int v = verts[i];
if (BLI_BITMAP_TEST(pbvh->vert_bitmap, v)) {
if (pbvh->vert_bitmap[v]) {
zero_v3(vnors[v]);
}
}
@ -1064,7 +1064,7 @@ static void pbvh_update_normals_accum_task_cb(void *__restrict userdata,
for (int j = sides; j--;) {
const int v = vtri[j];
if (BLI_BITMAP_TEST(pbvh->vert_bitmap, v)) {
if (pbvh->vert_bitmap[v]) {
/* NOTE: This avoids `lock, add_v3_v3, unlock`
* and is five to ten times quicker than a spin-lock.
* Not exact equivalent though, since atomicity is only ensured for one component
@ -1096,9 +1096,9 @@ static void pbvh_update_normals_store_task_cb(void *__restrict userdata,
/* No atomics necessary because we are iterating over uniq_verts only,
* so we know only this thread will handle this vertex. */
if (BLI_BITMAP_TEST(pbvh->vert_bitmap, v)) {
if (pbvh->vert_bitmap[v]) {
normalize_v3(vnors[v]);
BLI_BITMAP_DISABLE(pbvh->vert_bitmap, v);
pbvh->vert_bitmap[v] = false;
}
}
@ -1879,7 +1879,7 @@ bool BKE_pbvh_node_fully_unmasked_get(PBVHNode *node)
void BKE_pbvh_vert_mark_update(PBVH *pbvh, int index)
{
BLI_assert(pbvh->type == PBVH_FACES);
BLI_BITMAP_ENABLE(pbvh->vert_bitmap, index);
pbvh->vert_bitmap[index] = true;
}
void BKE_pbvh_node_get_loops(PBVH *pbvh,
@ -2044,7 +2044,7 @@ bool BKE_pbvh_node_vert_update_check_any(PBVH *pbvh, PBVHNode *node)
for (int i = 0; i < totvert; i++) {
const int v = verts[i];
if (BLI_BITMAP_TEST(pbvh->vert_bitmap, v)) {
if (pbvh->vert_bitmap[v]) {
return true;
}
}

View File

@ -170,7 +170,7 @@ struct PBVH {
/* Used during BVH build and later to mark that a vertex needs to update
* (its normal must be recalculated). */
BLI_bitmap *vert_bitmap;
bool *vert_bitmap;
#ifdef PERFCNTRS
int perf_modified;

View File

@ -264,6 +264,14 @@ bSound *BKE_sound_new_file(Main *bmain, const char *filepath)
BLI_strncpy(sound->filepath, filepath, FILE_MAX);
/* sound->type = SOUND_TYPE_FILE; */ /* XXX unused currently */
/* Extract sound specs for bSound */
SoundInfo info;
bool success = BKE_sound_info_get(bmain, sound, &info);
if (success) {
sound->samplerate = info.specs.samplerate;
sound->audio_channels = info.specs.channels;
}
sound->spinlock = MEM_mallocN(sizeof(SpinLock), "sound_spinlock");
BLI_spin_init(sound->spinlock);
@ -1202,6 +1210,7 @@ static bool sound_info_from_playback_handle(void *playback_handle, SoundInfo *so
AUD_SoundInfo info = AUD_getInfo(playback_handle);
sound_info->specs.channels = (eSoundChannels)info.specs.channels;
sound_info->length = info.length;
sound_info->specs.samplerate = info.specs.rate;
return true;
}

View File

@ -147,6 +147,16 @@ struct float4x4 {
return m * float3(v);
}
friend bool operator==(const float4x4 &a, const float4x4 &b)
{
return equals_m4m4(a.ptr(), b.ptr());
}
friend bool operator!=(const float4x4 &a, const float4x4 &b)
{
return !(a == b);
}
float3 translation() const
{
return float3(values[3]);
@ -246,6 +256,25 @@ struct float4x4 {
}
return h;
}
friend std::ostream &operator<<(std::ostream &stream, const float4x4 &mat)
{
char fchar[16];
stream << "(\n";
for (int i = 0; i < 4; i++) {
stream << "(";
for (int j = 0; j < 4; j++) {
snprintf(fchar, sizeof(fchar), "%11.6f", mat[j][i]);
stream << fchar;
if (i != 3) {
stream << ", ";
}
}
stream << ")\n";
}
stream << ")\n";
return stream;
}
};
} // namespace blender

View File

@ -204,14 +204,18 @@ MINLINE int integer_digits_i(int i);
/* These don't really fit anywhere but were being copied about a lot. */
MINLINE int is_power_of_2_i(int n);
MINLINE int power_of_2_max_i(int n);
MINLINE int power_of_2_min_i(int n);
MINLINE unsigned int power_of_2_max_u(unsigned int x);
MINLINE unsigned int power_of_2_min_u(unsigned int x);
MINLINE unsigned int log2_floor_u(unsigned int x);
MINLINE unsigned int log2_ceil_u(unsigned int x);
/**
* Returns next (or previous) power of 2 or the input number if it is already a power of 2.
*/
MINLINE int power_of_2_max_i(int n);
MINLINE int power_of_2_min_i(int n);
MINLINE unsigned int power_of_2_max_u(unsigned int x);
MINLINE unsigned int power_of_2_min_u(unsigned int x);
/**
* Integer division that rounds 0.5 up, particularly useful for color blending
* with integers, to avoid gradual darkening when rounding down.

View File

@ -360,8 +360,8 @@ static void seq_update_meta_disp_range(Editing *ed)
}
/* Update meta strip endpoints. */
SEQ_transform_set_left_handle_frame(ms->parseq, ms->disp_range[0]);
SEQ_transform_set_right_handle_frame(ms->parseq, ms->disp_range[1]);
SEQ_time_left_handle_frame_set(ms->parseq, ms->disp_range[0]);
SEQ_time_right_handle_frame_set(ms->parseq, ms->disp_range[1]);
SEQ_transform_fix_single_image_seq_offsets(ms->parseq);
/* Recalculate effects using meta strip. */

View File

@ -1217,6 +1217,15 @@ static bool version_fix_seq_meta_range(Sequence *seq, void *user_data)
return true;
}
static bool version_merge_still_offsets(Sequence *seq, void *UNUSED(user_data))
{
seq->startofs -= seq->startstill;
seq->endofs -= seq->endstill;
seq->startstill = 0;
seq->endstill = 0;
return true;
}
/* Those `version_liboverride_rnacollections_*` functions mimic the old, pre-3.0 code to find
* anchor and source items in the given list of modifiers, constraints etc., using only the
* `subitem_local` data of the override property operation.
@ -3045,5 +3054,13 @@ void blo_do_versions_300(FileData *fd, Library *UNUSED(lib), Main *bmain)
}
}
}
/* Merge still offsets into start/end offsets. */
LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
Editing *ed = SEQ_editing_get(scene);
if (ed != NULL) {
SEQ_for_each_callback(&ed->seqbase, version_merge_still_offsets, NULL);
}
}
}
}

View File

@ -141,6 +141,7 @@ set(SRC
engines/eevee_next/eevee_shader.cc
engines/eevee_next/eevee_sync.cc
engines/eevee_next/eevee_view.cc
engines/eevee_next/eevee_velocity.cc
engines/eevee_next/eevee_world.cc
engines/workbench/workbench_data.c
engines/workbench/workbench_effect_antialiasing.c
@ -364,6 +365,11 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_surf_forward_frag.glsl
engines/eevee_next/shaders/eevee_surf_lib.glsl
engines/eevee_next/shaders/eevee_surf_world_frag.glsl
engines/eevee_next/shaders/eevee_velocity_lib.glsl
engines/eevee_next/shaders/eevee_velocity_resolve_comp.glsl
engines/eevee_next/eevee_defines.hh
engines/eevee_next/eevee_shader_shared.hh
engines/workbench/shaders/workbench_cavity_lib.glsl
engines/workbench/shaders/workbench_common_lib.glsl

View File

@ -140,4 +140,4 @@ void DRW_select_buffer_context_create(struct Base **bases, uint bases_len, short
#ifdef __cplusplus
}
#endif
#endif

View File

@ -203,4 +203,4 @@ vec3 coordinate_incoming(vec3 P)
#else
return cameraVec(P);
#endif
}
}

View File

@ -76,4 +76,4 @@ vec3 coordinate_reflect(vec3 P, vec3 N)
vec3 coordinate_incoming(vec3 P)
{
return vec3(0.0);
}
}

View File

@ -69,7 +69,7 @@ void Camera::init()
data.type = DRW_view_is_persp_get(inst_.drw_view) ? CAMERA_PERSP : CAMERA_ORTHO;
}
else {
/* Lightprobe baking. */
/* Light-probe baking. */
data.type = CAMERA_PERSP;
}
}
@ -91,7 +91,7 @@ void Camera::sync()
DRW_view_camtexco_get(inst_.drw_view, data.uv_scale);
}
else if (inst_.render) {
/* TODO(fclem) Overscan */
/* TODO(@fclem): Over-scan. */
// RE_GetCameraWindowWithOverscan(inst_.render->re, g_data->overscan, data.winmat);
RE_GetCameraWindow(inst_.render->re, camera_eval, data.winmat.ptr());
RE_GetCameraModelMatrix(inst_.render->re, camera_eval, data.viewinv.ptr());

View File

@ -5,6 +5,7 @@
#include "BKE_global.h"
#include "BLI_rect.h"
#include "GPU_capabilities.h"
#include "GPU_framebuffer.h"
#include "ED_view3d.h"
@ -24,10 +25,17 @@ struct EEVEE_Data {
DRWViewportEmptyList *psl;
DRWViewportEmptyList *stl;
eevee::Instance *instance;
char info[GPU_INFO_SIZE];
};
static void eevee_engine_init(void *vedata)
{
/* TODO(fclem): Remove once it is minimum required. */
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
EEVEE_Data *ved = reinterpret_cast<EEVEE_Data *>(vedata);
if (ved->instance == nullptr) {
ved->instance = new eevee::Instance();
@ -81,31 +89,50 @@ static void eevee_engine_init(void *vedata)
static void eevee_draw_scene(void *vedata)
{
EEVEE_Data *ved = reinterpret_cast<EEVEE_Data *>(vedata);
if (!GPU_shader_storage_buffer_objects_support()) {
STRNCPY(ved->info, "Error: No shader storage buffer support");
return;
}
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
reinterpret_cast<EEVEE_Data *>(vedata)->instance->draw_viewport(dfbl);
ved->instance->draw_viewport(dfbl);
STRNCPY(ved->info, ved->instance->info.c_str());
}
static void eevee_cache_init(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<EEVEE_Data *>(vedata)->instance->begin_sync();
}
static void eevee_cache_populate(void *vedata, Object *object)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<EEVEE_Data *>(vedata)->instance->object_sync(object);
}
static void eevee_cache_finish(void *vedata)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
reinterpret_cast<EEVEE_Data *>(vedata)->instance->end_sync();
}
static void eevee_engine_free()
{
eevee::ShaderModule::module_free();
}
static void eevee_instance_free(void *instance)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
delete reinterpret_cast<eevee::Instance *>(instance);
}
@ -114,11 +141,17 @@ static void eevee_render_to_image(void *UNUSED(vedata),
struct RenderLayer *layer,
const struct rcti *UNUSED(rect))
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
UNUSED_VARS(engine, layer);
}
static void eevee_render_update_passes(RenderEngine *engine, Scene *scene, ViewLayer *view_layer)
{
if (!GPU_shader_storage_buffer_objects_support()) {
return;
}
UNUSED_VARS(engine, scene, view_layer);
}

View File

@ -8,6 +8,8 @@
* An instance contains all structures needed to do a complete render.
*/
#include <sstream>
#include "BKE_global.h"
#include "BKE_object.h"
#include "BLI_rect.h"
@ -50,11 +52,20 @@ void Instance::init(const int2 &output_res,
v3d = v3d_;
rv3d = rv3d_;
info = "";
update_eval_members();
main_view.init(output_res);
}
void Instance::set_time(float time)
{
BLI_assert(render);
DRW_render_set_time(render, depsgraph, floorf(time), fractf(time));
update_eval_members();
}
void Instance::update_eval_members()
{
scene = DEG_get_evaluated_scene(depsgraph);
@ -77,6 +88,7 @@ void Instance::update_eval_members()
void Instance::begin_sync()
{
materials.begin_sync();
velocity.begin_sync();
pipelines.sync();
main_view.sync();
@ -136,6 +148,7 @@ void Instance::object_sync(Object *ob)
void Instance::end_sync()
{
velocity.end_sync();
}
void Instance::render_sync()
@ -172,6 +185,13 @@ void Instance::draw_viewport(DefaultFramebufferList *dfbl)
{
UNUSED_VARS(dfbl);
render_sample();
velocity.step_swap();
if (materials.queued_shaders_count > 0) {
std::stringstream ss;
ss << "Compiling Shaders " << materials.queued_shaders_count;
info = ss.str();
}
}
/** \} */

View File

@ -15,6 +15,7 @@
#include "DNA_lightprobe_types.h"
#include "DRW_render.h"
#include "eevee_camera.hh"
#include "eevee_material.hh"
#include "eevee_pipeline.hh"
#include "eevee_shader.hh"
@ -29,11 +30,15 @@ namespace blender::eevee {
* \brief A running instance of the engine.
*/
class Instance {
friend VelocityModule;
public:
ShaderModule &shaders;
SyncModule sync;
MaterialModule materials;
PipelineModule pipelines;
VelocityModule velocity;
Camera camera;
MainView main_view;
World world;
@ -53,7 +58,7 @@ class Instance {
const RegionView3D *rv3d;
/* Info string displayed at the top of the render / viewport. */
char info[64];
std::string info = "";
public:
Instance()
@ -61,6 +66,8 @@ class Instance {
sync(*this),
materials(*this),
pipelines(*this),
velocity(*this),
camera(*this),
main_view(*this),
world(*this){};
~Instance(){};
@ -85,12 +92,37 @@ class Instance {
void draw_viewport(DefaultFramebufferList *dfbl);
bool is_viewport(void)
{
return !DRW_state_is_scene_render();
}
bool use_scene_lights(void) const
{
return (!v3d) ||
((v3d->shading.type == OB_MATERIAL) &&
(v3d->shading.flag & V3D_SHADING_SCENE_LIGHTS)) ||
((v3d->shading.type == OB_RENDER) &&
(v3d->shading.flag & V3D_SHADING_SCENE_LIGHTS_RENDER));
}
/* Light the scene using the selected HDRI in the viewport shading pop-over. */
bool use_studio_light(void) const
{
return (v3d) && (((v3d->shading.type == OB_MATERIAL) &&
((v3d->shading.flag & V3D_SHADING_SCENE_WORLD) == 0)) ||
((v3d->shading.type == OB_RENDER) &&
((v3d->shading.flag & V3D_SHADING_SCENE_WORLD_RENDER) == 0)));
}
private:
void render_sample();
void mesh_sync(Object *ob, ObjectHandle &ob_handle);
void update_eval_members();
void set_time(float time);
};
} // namespace blender::eevee

View File

@ -51,7 +51,6 @@ DefaultSurfaceNodeTree::~DefaultSurfaceNodeTree()
MEM_SAFE_FREE(ntree_);
}
/* Configure a default nodetree with the given material. */
bNodeTree *DefaultSurfaceNodeTree::nodetree_get(::Material *ma)
{
/* WARNING: This function is not threadsafe. Which is not a problem for the moment. */
@ -75,11 +74,11 @@ MaterialModule::MaterialModule(Instance &inst) : inst_(inst)
{
bNodeTree *ntree = ntreeAddTree(nullptr, "Shader Nodetree", ntreeType_Shader->idname);
diffuse_mat_ = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default diffuse");
diffuse_mat_->nodetree = ntree;
diffuse_mat_->use_nodes = true;
diffuse_mat = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default diffuse");
diffuse_mat->nodetree = ntree;
diffuse_mat->use_nodes = true;
/* To use the forward pipeline. */
diffuse_mat_->blend_method = MA_BM_BLEND;
diffuse_mat->blend_method = MA_BM_BLEND;
bNode *bsdf = nodeAddStaticNode(nullptr, ntree, SH_NODE_BSDF_DIFFUSE);
bNodeSocket *base_color = nodeFindSocket(bsdf, SOCK_IN, "Color");
@ -98,11 +97,11 @@ MaterialModule::MaterialModule(Instance &inst) : inst_(inst)
{
bNodeTree *ntree = ntreeAddTree(nullptr, "Shader Nodetree", ntreeType_Shader->idname);
glossy_mat_ = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default metal");
glossy_mat_->nodetree = ntree;
glossy_mat_->use_nodes = true;
glossy_mat = (::Material *)BKE_id_new_nomain(ID_MA, "EEVEE default metal");
glossy_mat->nodetree = ntree;
glossy_mat->use_nodes = true;
/* To use the forward pipeline. */
glossy_mat_->blend_method = MA_BM_BLEND;
glossy_mat->blend_method = MA_BM_BLEND;
bNode *bsdf = nodeAddStaticNode(nullptr, ntree, SH_NODE_BSDF_GLOSSY);
bNodeSocket *base_color = nodeFindSocket(bsdf, SOCK_IN, "Color");
@ -149,14 +148,14 @@ MaterialModule::~MaterialModule()
for (Material *mat : material_map_.values()) {
delete mat;
}
BKE_id_free(nullptr, glossy_mat_);
BKE_id_free(nullptr, diffuse_mat_);
BKE_id_free(nullptr, glossy_mat);
BKE_id_free(nullptr, diffuse_mat);
BKE_id_free(nullptr, error_mat_);
}
void MaterialModule::begin_sync()
{
queued_shaders_count_ = 0;
queued_shaders_count = 0;
for (Material *mat : material_map_.values()) {
mat->init = false;
@ -180,7 +179,7 @@ MaterialPass MaterialModule::material_pass_get(::Material *blender_mat,
case GPU_MAT_SUCCESS:
break;
case GPU_MAT_QUEUED:
queued_shaders_count_++;
queued_shaders_count++;
blender_mat = (geometry_type == MAT_GEOM_VOLUME) ? BKE_material_default_volume() :
BKE_material_default_surface();
matpass.gpumat = inst_.shaders.material_shader_get(
@ -223,7 +222,7 @@ MaterialPass MaterialModule::material_pass_get(::Material *blender_mat,
/* IMPORTANT: We always create a subgroup so that all subgroups are inserted after the
* first "empty" shgroup. This avoids messing the order of subgroups when there is more
* nested subgroup (i.e: hair drawing). */
/* TODO(fclem) Remove material resource binding from the first group creation. */
/* TODO(@fclem): Remove material resource binding from the first group creation. */
matpass.shgrp = DRW_shgroup_create_sub(grp);
DRW_shgroup_add_material_resources(matpass.shgrp, matpass.gpumat);
}
@ -232,21 +231,25 @@ MaterialPass MaterialModule::material_pass_get(::Material *blender_mat,
return matpass;
}
Material &MaterialModule::material_sync(::Material *blender_mat, eMaterialGeometry geometry_type)
Material &MaterialModule::material_sync(::Material *blender_mat,
eMaterialGeometry geometry_type,
bool has_motion)
{
eMaterialPipeline surface_pipe = (blender_mat->blend_method == MA_BM_BLEND) ? MAT_PIPE_FORWARD :
MAT_PIPE_DEFERRED;
eMaterialPipeline prepass_pipe = (blender_mat->blend_method == MA_BM_BLEND) ?
MAT_PIPE_FORWARD_PREPASS :
MAT_PIPE_DEFERRED_PREPASS;
(has_motion ? MAT_PIPE_FORWARD_PREPASS_VELOCITY :
MAT_PIPE_FORWARD_PREPASS) :
(has_motion ? MAT_PIPE_DEFERRED_PREPASS_VELOCITY :
MAT_PIPE_DEFERRED_PREPASS);
/* Test */
/* TEST until we have deferred pipeline up and running. */
surface_pipe = MAT_PIPE_FORWARD;
prepass_pipe = MAT_PIPE_FORWARD_PREPASS;
prepass_pipe = has_motion ? MAT_PIPE_FORWARD_PREPASS_VELOCITY : MAT_PIPE_FORWARD_PREPASS;
MaterialKey material_key(blender_mat, geometry_type, surface_pipe);
/* TODO allocate in blocks to avoid memory fragmentation. */
/* TODO: allocate in blocks to avoid memory fragmentation. */
auto add_cb = [&]() { return new Material(); };
Material &mat = *material_map_.lookup_or_add_cb(material_key, add_cb);
@ -270,7 +273,6 @@ Material &MaterialModule::material_sync(::Material *blender_mat, eMaterialGeomet
return mat;
}
/* Return correct material or empty default material if slot is empty. */
::Material *MaterialModule::material_from_slot(Object *ob, int slot)
{
if (ob->base_flag & BASE_HOLDOUT) {
@ -286,9 +288,7 @@ Material &MaterialModule::material_sync(::Material *blender_mat, eMaterialGeomet
return ma;
}
/* Returned Material references are valid until the next call to this function or
* material_get(). */
MaterialArray &MaterialModule::material_array_get(Object *ob)
MaterialArray &MaterialModule::material_array_get(Object *ob, bool has_motion)
{
material_array_.materials.clear();
material_array_.gpu_materials.clear();
@ -297,22 +297,23 @@ MaterialArray &MaterialModule::material_array_get(Object *ob)
for (auto i : IndexRange(materials_len)) {
::Material *blender_mat = material_from_slot(ob, i);
Material &mat = material_sync(blender_mat, to_material_geometry(ob));
Material &mat = material_sync(blender_mat, to_material_geometry(ob), has_motion);
material_array_.materials.append(&mat);
material_array_.gpu_materials.append(mat.shading.gpumat);
}
return material_array_;
}
/* Returned Material references are valid until the next call to this function or
* material_array_get(). */
Material &MaterialModule::material_get(Object *ob, int mat_nr, eMaterialGeometry geometry_type)
Material &MaterialModule::material_get(Object *ob,
bool has_motion,
int mat_nr,
eMaterialGeometry geometry_type)
{
::Material *blender_mat = material_from_slot(ob, mat_nr);
Material &mat = material_sync(blender_mat, geometry_type);
Material &mat = material_sync(blender_mat, geometry_type, has_motion);
return mat;
}
/** \} */
} // namespace blender::eevee
} // namespace blender::eevee

View File

@ -27,19 +27,21 @@ class Instance;
enum eMaterialPipeline {
MAT_PIPE_DEFERRED = 0,
MAT_PIPE_FORWARD = 1,
MAT_PIPE_DEFERRED_PREPASS = 2,
MAT_PIPE_FORWARD_PREPASS = 3,
MAT_PIPE_VOLUME = 4,
MAT_PIPE_SHADOW = 5,
MAT_PIPE_FORWARD,
MAT_PIPE_DEFERRED_PREPASS,
MAT_PIPE_DEFERRED_PREPASS_VELOCITY,
MAT_PIPE_FORWARD_PREPASS,
MAT_PIPE_FORWARD_PREPASS_VELOCITY,
MAT_PIPE_VOLUME,
MAT_PIPE_SHADOW,
};
enum eMaterialGeometry {
MAT_GEOM_MESH = 0,
MAT_GEOM_CURVES = 1,
MAT_GEOM_GPENCIL = 2,
MAT_GEOM_VOLUME = 3,
MAT_GEOM_WORLD = 4,
MAT_GEOM_CURVES,
MAT_GEOM_GPENCIL,
MAT_GEOM_VOLUME,
MAT_GEOM_WORLD,
};
static inline void material_type_from_shader_uuid(uint64_t shader_uuid,
@ -189,6 +191,7 @@ class DefaultSurfaceNodeTree {
DefaultSurfaceNodeTree();
~DefaultSurfaceNodeTree();
/** Configure a default node-tree with the given material. */
bNodeTree *nodetree_get(::Material *ma);
};
@ -217,8 +220,10 @@ struct MaterialArray {
class MaterialModule {
public:
::Material *diffuse_mat_;
::Material *glossy_mat_;
::Material *diffuse_mat;
::Material *glossy_mat;
int64_t queued_shaders_count = 0;
private:
Instance &inst_;
@ -232,20 +237,28 @@ class MaterialModule {
::Material *error_mat_;
int64_t queued_shaders_count_ = 0;
public:
MaterialModule(Instance &inst);
~MaterialModule();
void begin_sync();
MaterialArray &material_array_get(Object *ob);
Material &material_get(Object *ob, int mat_nr, eMaterialGeometry geometry_type);
/**
* Returned Material references are valid until the next call to this function or material_get().
*/
MaterialArray &material_array_get(Object *ob, bool has_motion);
/**
* Returned Material references are valid until the next call to this function or
* material_array_get().
*/
Material &material_get(Object *ob, bool has_motion, int mat_nr, eMaterialGeometry geometry_type);
private:
Material &material_sync(::Material *blender_mat, eMaterialGeometry geometry_type);
Material &material_sync(::Material *blender_mat,
eMaterialGeometry geometry_type,
bool has_motion);
/** Return correct material or empty default material if slot is empty. */
::Material *material_from_slot(Object *ob, int slot);
MaterialPass material_pass_get(::Material *blender_mat,
eMaterialPipeline pipeline_type,

View File

@ -54,11 +54,17 @@ void ForwardPipeline::sync()
{
DRWState state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS;
prepass_ps_ = DRW_pass_create("Forward.Opaque.Prepass", state);
prepass_velocity_ps_ = DRW_pass_create("Forward.Opaque.Prepass.Velocity",
state | DRW_STATE_WRITE_COLOR);
state |= DRW_STATE_CULL_BACK;
prepass_culled_ps_ = DRW_pass_create("Forward.Opaque.Prepass.Culled", state);
prepass_culled_velocity_ps_ = DRW_pass_create("Forward.Opaque.Prepass.Velocity",
state | DRW_STATE_WRITE_COLOR);
DRW_pass_link(prepass_ps_, prepass_culled_ps_);
DRW_pass_link(prepass_ps_, prepass_velocity_ps_);
DRW_pass_link(prepass_velocity_ps_, prepass_culled_ps_);
DRW_pass_link(prepass_culled_ps_, prepass_culled_velocity_ps_);
}
{
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL;
@ -110,11 +116,17 @@ DRWShadingGroup *ForwardPipeline::material_opaque_add(::Material *blender_mat, G
return grp;
}
DRWShadingGroup *ForwardPipeline::prepass_opaque_add(::Material *blender_mat, GPUMaterial *gpumat)
DRWShadingGroup *ForwardPipeline::prepass_opaque_add(::Material *blender_mat,
GPUMaterial *gpumat,
bool has_motion)
{
DRWPass *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ? prepass_culled_ps_ :
prepass_ps_;
DRWPass *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ?
(has_motion ? prepass_culled_velocity_ps_ : prepass_culled_ps_) :
(has_motion ? prepass_velocity_ps_ : prepass_ps_);
DRWShadingGroup *grp = DRW_shgroup_material_create(gpumat, pass);
if (has_motion) {
inst_.velocity.bind_resources(grp);
}
return grp;
}
@ -181,15 +193,19 @@ DRWShadingGroup *ForwardPipeline::prepass_transparent_add(::Material *blender_ma
}
void ForwardPipeline::render(const DRWView *view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
GPUTexture *depth_tx,
GPUTexture *UNUSED(combined_tx))
{
UNUSED_VARS(view, depth_tx);
UNUSED_VARS(view, depth_tx, prepass_fb, combined_fb);
// HiZBuffer &hiz = inst_.hiz_front;
DRW_stats_group_start("ForwardOpaque");
GPU_framebuffer_bind(prepass_fb);
DRW_draw_pass(prepass_ps_);
// hiz.set_dirty();
// if (inst_.raytracing.enabled()) {
@ -199,6 +215,7 @@ void ForwardPipeline::render(const DRWView *view,
// inst_.shadows.set_view(view, depth_tx);
GPU_framebuffer_bind(combined_fb);
DRW_draw_pass(opaque_ps_);
DRW_stats_group_end();
@ -218,4 +235,4 @@ void ForwardPipeline::render(const DRWView *view,
/** \} */
} // namespace blender::eevee
} // namespace blender::eevee

View File

@ -53,7 +53,9 @@ class ForwardPipeline {
Instance &inst_;
DRWPass *prepass_ps_ = nullptr;
DRWPass *prepass_velocity_ps_ = nullptr;
DRWPass *prepass_culled_ps_ = nullptr;
DRWPass *prepass_culled_velocity_ps_ = nullptr;
DRWPass *opaque_ps_ = nullptr;
DRWPass *opaque_culled_ps_ = nullptr;
DRWPass *transparent_ps_ = nullptr;
@ -72,19 +74,25 @@ class ForwardPipeline {
material_opaque_add(blender_mat, gpumat);
}
DRWShadingGroup *prepass_add(::Material *blender_mat, GPUMaterial *gpumat)
DRWShadingGroup *prepass_add(::Material *blender_mat, GPUMaterial *gpumat, bool has_motion)
{
return (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) ?
prepass_transparent_add(blender_mat, gpumat) :
prepass_opaque_add(blender_mat, gpumat);
prepass_opaque_add(blender_mat, gpumat, has_motion);
}
DRWShadingGroup *material_opaque_add(::Material *blender_mat, GPUMaterial *gpumat);
DRWShadingGroup *prepass_opaque_add(::Material *blender_mat, GPUMaterial *gpumat);
DRWShadingGroup *prepass_opaque_add(::Material *blender_mat,
GPUMaterial *gpumat,
bool has_motion);
DRWShadingGroup *material_transparent_add(::Material *blender_mat, GPUMaterial *gpumat);
DRWShadingGroup *prepass_transparent_add(::Material *blender_mat, GPUMaterial *gpumat);
void render(const DRWView *view, GPUTexture *depth_tx, GPUTexture *combined_tx);
void render(const DRWView *view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
GPUTexture *depth_tx,
GPUTexture *combined_tx);
};
/** \} */
@ -191,10 +199,15 @@ class PipelineModule {
{
switch (pipeline_type) {
case MAT_PIPE_DEFERRED_PREPASS:
// return deferred.prepass_add(blender_mat, gpumat);
// return deferred.prepass_add(blender_mat, gpumat, false);
break;
case MAT_PIPE_DEFERRED_PREPASS_VELOCITY:
// return deferred.prepass_add(blender_mat, gpumat, true);
break;
case MAT_PIPE_FORWARD_PREPASS:
return forward.prepass_add(blender_mat, gpumat);
return forward.prepass_add(blender_mat, gpumat, false);
case MAT_PIPE_FORWARD_PREPASS_VELOCITY:
return forward.prepass_add(blender_mat, gpumat, true);
case MAT_PIPE_DEFERRED:
// return deferred.material_add(blender_mat, gpumat);
break;
@ -213,4 +226,4 @@ class PipelineModule {
/** \} */
} // namespace blender::eevee
} // namespace blender::eevee

View File

@ -78,6 +78,8 @@ ShaderModule::~ShaderModule()
const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_type)
{
switch (shader_type) {
case VELOCITY_RESOLVE:
return "eevee_velocity_resolve";
/* To avoid compiler warning about missing case. */
case MAX_SHADER_TYPE:
return "";
@ -289,6 +291,10 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
break;
default:
switch (pipeline_type) {
case MAT_PIPE_FORWARD_PREPASS_VELOCITY:
case MAT_PIPE_DEFERRED_PREPASS_VELOCITY:
info.additional_info("eevee_surf_depth", "eevee_velocity_geom");
break;
case MAT_PIPE_FORWARD_PREPASS:
case MAT_PIPE_DEFERRED_PREPASS:
case MAT_PIPE_SHADOW:

View File

@ -26,7 +26,9 @@ namespace blender::eevee {
/* Keep alphabetical order and clean prefix. */
enum eShaderType {
MAX_SHADER_TYPE = 0,
VELOCITY_RESOLVE = 0,
MAX_SHADER_TYPE,
};
/**

View File

@ -53,7 +53,7 @@ struct CameraData {
float4x4 viewinv;
float4x4 winmat;
float4x4 wininv;
/** Camera UV scale and bias. Also known as viewcamtexcofac. */
/** Camera UV scale and bias. Also known as `viewcamtexcofac`. */
float2 uv_scale;
float2 uv_bias;
/** Panorama parameters. */
@ -73,6 +73,54 @@ BLI_STATIC_ASSERT_ALIGN(CameraData, 16)
/** \} */
/* -------------------------------------------------------------------- */
/** \name VelocityModule
* \{ */
#define VELOCITY_INVALID 512.0
enum eVelocityStep : uint32_t {
STEP_PREVIOUS = 0,
STEP_NEXT = 1,
STEP_CURRENT = 2,
};
struct VelocityObjectIndex {
/** Offset inside #VelocityObjectBuf for each timestep. Indexed using eVelocityStep. */
int3 ofs;
/** Temporary index to copy this to the #VelocityIndexBuf. */
uint resource_id;
#ifdef __cplusplus
VelocityObjectIndex() : ofs(-1, -1, -1), resource_id(-1){};
#endif
};
BLI_STATIC_ASSERT_ALIGN(VelocityObjectIndex, 16)
struct VelocityGeometryIndex {
/** Offset inside #VelocityGeometryBuf for each timestep. Indexed using eVelocityStep. */
int3 ofs;
/** If true, compute deformation motion blur. */
bool1 do_deform;
/** Length of data inside #VelocityGeometryBuf for each timestep. Indexed using eVelocityStep. */
int3 len;
int _pad0;
#ifdef __cplusplus
VelocityGeometryIndex() : ofs(-1, -1, -1), do_deform(false), len(-1, -1, -1), _pad0(1){};
#endif
};
BLI_STATIC_ASSERT_ALIGN(VelocityGeometryIndex, 16)
struct VelocityIndex {
VelocityObjectIndex obj;
VelocityGeometryIndex geo;
};
BLI_STATIC_ASSERT_ALIGN(VelocityGeometryIndex, 16)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Ray-Tracing
* \{ */
@ -131,6 +179,9 @@ float4 utility_tx_sample(sampler2DArray util_tx, float2 uv, float layer)
#ifdef __cplusplus
using CameraDataBuf = draw::UniformBuffer<CameraData>;
using VelocityIndexBuf = draw::StorageArrayBuffer<VelocityIndex, 16>;
using VelocityObjectBuf = draw::StorageArrayBuffer<float4x4, 16>;
using VelocityGeometryBuf = draw::StorageArrayBuffer<float4, 16, true>;
} // namespace blender::eevee
#endif

View File

@ -104,7 +104,9 @@ static inline void shgroup_geometry_call(DRWShadingGroup *grp,
void SyncModule::sync_mesh(Object *ob, ObjectHandle &ob_handle)
{
MaterialArray &material_array = inst_.materials.material_array_get(ob);
bool has_motion = inst_.velocity.step_object_sync(ob, ob_handle.object_key, ob_handle.recalc);
MaterialArray &material_array = inst_.materials.material_array_get(ob, has_motion);
GPUBatch **mat_geom = DRW_cache_object_surface_material_get(
ob, material_array.gpu_materials.data(), material_array.gpu_materials.size());
@ -129,9 +131,6 @@ void SyncModule::sync_mesh(Object *ob, ObjectHandle &ob_handle)
is_alpha_blend = is_alpha_blend || material->is_alpha_blend_transparent;
}
UNUSED_VARS(ob_handle);
// shading_passes.velocity.mesh_add(ob, ob_handle);
// shadows.sync_object(ob, ob_handle, is_shadow_caster, is_alpha_blend);
}
@ -156,8 +155,11 @@ struct gpIterData {
int vcount = 0;
bool instancing = false;
gpIterData(Instance &inst_, Object *ob_)
: inst(inst_), ob(ob_), material_array(inst_.materials.material_array_get(ob_))
gpIterData(Instance &inst_, Object *ob_, ObjectHandle &ob_handle)
: inst(inst_),
ob(ob_),
material_array(inst_.materials.material_array_get(
ob_, inst_.velocity.step_object_sync(ob, ob_handle.object_key, ob_handle.recalc)))
{
cfra = DEG_get_ctime(inst.depsgraph);
};
@ -253,16 +255,12 @@ void SyncModule::sync_gpencil(Object *ob, ObjectHandle &ob_handle)
/* TODO(fclem): Waiting for a user option to use the render engine instead of gpencil engine. */
return;
gpIterData iter(inst_, ob);
gpIterData iter(inst_, ob, ob_handle);
BKE_gpencil_visible_stroke_iter((bGPdata *)ob->data, nullptr, gpencil_stroke_sync, &iter);
gpencil_drawcall_flush(iter);
UNUSED_VARS(ob_handle);
/* TODO(fclem) Gpencil velocity. */
// shading_passes.velocity.gpencil_add(ob, ob_handle);
// bool is_caster = true; /* TODO material.shadow.shgrp. */
// bool is_alpha_blend = true; /* TODO material.is_alpha_blend. */
// shadows.sync_object(ob, ob_handle, is_caster, is_alpha_blend);
@ -304,12 +302,13 @@ void SyncModule::sync_curves(Object *ob, ObjectHandle &ob_handle, ModifierData *
mat_nr = part_settings->omat;
}
Material &material = inst_.materials.material_get(ob, mat_nr - 1, MAT_GEOM_CURVES);
bool has_motion = inst_.velocity.step_object_sync(ob, ob_handle.object_key, ob_handle.recalc);
Material &material = inst_.materials.material_get(ob, has_motion, mat_nr - 1, MAT_GEOM_CURVES);
shgroup_curves_call(material.shading, ob, part_sys, modifier_data);
shgroup_curves_call(material.prepass, ob, part_sys, modifier_data);
shgroup_curves_call(material.shadow, ob, part_sys, modifier_data);
UNUSED_VARS(ob_handle);
/* TODO(fclem) Hair velocity. */
// shading_passes.velocity.gpencil_add(ob, ob_handle);

View File

@ -28,6 +28,7 @@ class Instance;
/** \name ObjectKey
*
* Unique key to identify each object in the hash-map.
* Note that we get a unique key for each object component.
* \{ */
struct ObjectKey {

View File

@ -0,0 +1,420 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* The velocity pass outputs motion vectors to use for either
* temporal re-projection or motion blur.
*
* It is the module that tracks the objects between frames updates.
*
* #VelocityModule contains all motion steps data and logic.
* #VelocityPass contains the resolve pass for static geometry.
* #VelocityView is a per view instance that contain the velocity buffer.
*/
#include "BKE_duplilist.h"
#include "BKE_object.h"
#include "BLI_map.hh"
#include "DEG_depsgraph_query.h"
#include "DNA_rigidbody_types.h"
#include "eevee_instance.hh"
// #include "eevee_renderpasses.hh"
#include "eevee_shader.hh"
#include "eevee_shader_shared.hh"
#include "eevee_velocity.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name VelocityModule
*
* \{ */
void VelocityModule::init()
{
#if 0 /* TODO renderpasses */
if (inst_.render && (inst_.render_passes.vector != nullptr)) {
/* No motion blur and the vector pass was requested. Do the step sync here. */
const Scene *scene = inst_.scene;
float initial_time = scene->r.cfra + scene->r.subframe;
step_sync(STEP_PREVIOUS, initial_time - 1.0f);
step_sync(STEP_NEXT, initial_time + 1.0f);
inst_.set_time(initial_time);
}
#endif
}
static void step_object_sync_render(void *velocity,
Object *ob,
RenderEngine *UNUSED(engine),
Depsgraph *UNUSED(depsgraph))
{
ObjectKey object_key(ob);
reinterpret_cast<VelocityModule *>(velocity)->step_object_sync(ob, object_key);
}
void VelocityModule::step_sync(eVelocityStep step, float time)
{
inst_.set_time(time);
step_ = step;
object_steps_usage[step_] = 0;
step_camera_sync();
DRW_render_object_iter(this, inst_.render, inst_.depsgraph, step_object_sync_render);
}
void VelocityModule::step_camera_sync()
{
inst_.camera.sync();
*camera_steps[step_] = inst_.camera.data_get();
}
bool VelocityModule::step_object_sync(Object *ob,
ObjectKey &object_key,
int /*IDRecalcFlag*/ recalc)
{
bool has_motion = object_has_velocity(ob) || (recalc & ID_RECALC_TRANSFORM);
/* NOTE: Fragile. This will only work with 1 frame of lag since we can't record every geometry
* just in case there might be an update the next frame. */
bool has_deform = object_is_deform(ob) || (recalc & ID_RECALC_GEOMETRY);
if (!has_motion && !has_deform) {
return false;
}
uint32_t resource_id = DRW_object_resource_id_get(ob);
/* Object motion. */
/* FIXME(fclem) As we are using original objects pointers, there is a chance the previous
* object key matches a totally different object if the scene was changed by user or python
* callback. In this case, we cannot correctly match objects between updates.
* What this means is that there will be incorrect motion vectors for these objects.
* We live with that until we have a correct way of identifying new objects. */
VelocityObjectData &vel = velocity_map.lookup_or_add_default(object_key);
vel.obj.ofs[step_] = object_steps_usage[step_]++;
vel.obj.resource_id = resource_id;
vel.id = (ID *)ob->data;
object_steps[step_]->get_or_resize(vel.obj.ofs[step_]) = ob->obmat;
if (step_ == STEP_CURRENT) {
/* Replace invalid steps. Can happen if object was hidden in one of those steps. */
if (vel.obj.ofs[STEP_PREVIOUS] == -1) {
vel.obj.ofs[STEP_PREVIOUS] = object_steps_usage[STEP_PREVIOUS]++;
object_steps[STEP_PREVIOUS]->get_or_resize(vel.obj.ofs[STEP_PREVIOUS]) = ob->obmat;
}
if (vel.obj.ofs[STEP_NEXT] == -1) {
vel.obj.ofs[STEP_NEXT] = object_steps_usage[STEP_NEXT]++;
object_steps[STEP_NEXT]->get_or_resize(vel.obj.ofs[STEP_NEXT]) = ob->obmat;
}
}
/* Geometry motion. */
if (has_deform) {
auto add_cb = [&]() {
VelocityGeometryData data;
switch (ob->type) {
case OB_CURVES:
data.pos_buf = DRW_curves_pos_buffer_get(ob);
break;
default:
data.pos_buf = DRW_cache_object_pos_vertbuf_get(ob);
break;
}
return data;
};
const VelocityGeometryData &data = geometry_map.lookup_or_add_cb(vel.id, add_cb);
if (data.pos_buf == nullptr) {
has_deform = false;
}
}
/* Avoid drawing object that has no motions but were tagged as such. */
if (step_ == STEP_CURRENT && has_motion == true && has_deform == false) {
float4x4 &obmat_curr = (*object_steps[STEP_CURRENT])[vel.obj.ofs[STEP_CURRENT]];
float4x4 &obmat_prev = (*object_steps[STEP_PREVIOUS])[vel.obj.ofs[STEP_PREVIOUS]];
float4x4 &obmat_next = (*object_steps[STEP_NEXT])[vel.obj.ofs[STEP_NEXT]];
if (inst_.is_viewport()) {
has_motion = (obmat_curr != obmat_prev);
}
else {
has_motion = (obmat_curr != obmat_prev || obmat_curr != obmat_next);
}
}
#if 0
if (!has_motion && !has_deform) {
std::cout << "Detected no motion on " << ob->id.name << std::endl;
}
if (has_deform) {
std::cout << "Geometry Motion on " << ob->id.name << std::endl;
}
if (has_motion) {
std::cout << "Object Motion on " << ob->id.name << std::endl;
}
#endif
if (!has_motion && !has_deform) {
return false;
}
/* TODO(@fclem): Reset sampling here? Should ultimately be covered by depsgraph update tags. */
// inst_.sampling.reset();
return true;
}
/**
* Moves next frame data to previous frame data. Nullify next frame data.
* IMPORTANT: This runs AFTER drawing in the viewport (so after `begin_sync()`) but BEFORE drawing
* in render mode (so before `begin_sync()`). In viewport the data will be used the next frame.
*/
void VelocityModule::step_swap()
{
{
/* Now that vertex buffers are guaranteed to be updated, proceed with
* offset computation and copy into the geometry step buffer. */
uint dst_ofs = 0;
for (VelocityGeometryData &geom : geometry_map.values()) {
uint src_len = GPU_vertbuf_get_vertex_len(geom.pos_buf);
geom.len = src_len;
geom.ofs = dst_ofs;
dst_ofs += src_len;
}
/* TODO(@fclem): Fail gracefully (disable motion blur + warning print) if
`tot_len * sizeof(float4)` is greater than max SSBO size. */
geometry_steps[step_]->resize(max_ii(16, dst_ofs));
for (VelocityGeometryData &geom : geometry_map.values()) {
GPU_storagebuf_copy_sub_from_vertbuf(*geometry_steps[step_],
geom.pos_buf,
geom.ofs * sizeof(float4),
0,
geom.len * sizeof(float4));
}
/* Copy back the #VelocityGeometryIndex into #VelocityObjectData which are
* indexed using persistent keys (unlike geometries which are indexed by volatile ID). */
for (VelocityObjectData &vel : velocity_map.values()) {
const VelocityGeometryData &geom = geometry_map.lookup_default(vel.id,
VelocityGeometryData());
vel.geo.len[step_] = geom.len;
vel.geo.ofs[step_] = geom.ofs;
/* Avoid reuse. */
vel.id = nullptr;
}
geometry_map.clear();
}
auto swap_steps = [&](eVelocityStep step_a, eVelocityStep step_b) {
SWAP(VelocityObjectBuf *, object_steps[step_a], object_steps[step_b]);
SWAP(VelocityGeometryBuf *, geometry_steps[step_a], geometry_steps[step_b]);
SWAP(CameraDataBuf *, camera_steps[step_a], camera_steps[step_b]);
for (VelocityObjectData &vel : velocity_map.values()) {
vel.obj.ofs[step_a] = vel.obj.ofs[step_b];
vel.obj.ofs[step_b] = (uint)-1;
vel.geo.ofs[step_a] = vel.geo.ofs[step_b];
vel.geo.len[step_a] = vel.geo.len[step_b];
vel.geo.ofs[step_b] = (uint)-1;
vel.geo.len[step_b] = (uint)-1;
}
};
if (inst_.is_viewport()) {
/* For viewport we only use the last rendered redraw as previous frame.
* We swap current with previous step at the end of a redraw.
* We do not support motion blur as it is rendered to avoid conflicting motions
* for temporal reprojection. */
swap_steps(eVelocityStep::STEP_PREVIOUS, eVelocityStep::STEP_CURRENT);
}
else {
/* Render case: The STEP_CURRENT is left untouched. */
swap_steps(eVelocityStep::STEP_PREVIOUS, eVelocityStep::STEP_NEXT);
}
}
void VelocityModule::begin_sync()
{
if (inst_.is_viewport()) {
/* Viewport always evaluate current step. */
step_ = STEP_CURRENT;
}
step_camera_sync();
object_steps_usage[step_] = 0;
}
/* This is the end of the current frame sync. Not the step_sync. */
void VelocityModule::end_sync()
{
Vector<ObjectKey, 0> deleted_obj;
uint32_t max_resource_id_ = 0u;
for (Map<ObjectKey, VelocityObjectData>::Item item : velocity_map.items()) {
if (item.value.obj.resource_id == (uint)-1) {
deleted_obj.append(item.key);
}
else {
max_resource_id_ = max_uu(max_resource_id_, item.value.obj.resource_id);
}
}
if (deleted_obj.size() > 0) {
// inst_.sampling.reset();
}
for (auto key : deleted_obj) {
velocity_map.remove(key);
}
indirection_buf.resize(power_of_2_max_u(max_resource_id_ + 1));
/* Avoid uploading more data to the GPU as well as an extra level of
* indirection on the GPU by copying back offsets the to VelocityIndex. */
for (VelocityObjectData &vel : velocity_map.values()) {
/* Disable deform if vertex count mismatch. */
if (inst_.is_viewport()) {
/* Current geometry step will be copied at the end of the frame.
* Thus vel.geo.len[STEP_CURRENT] is not yet valid and the current length is manually
* retrieved. */
GPUVertBuf *pos_buf = geometry_map.lookup_default(vel.id, VelocityGeometryData()).pos_buf;
vel.geo.do_deform = pos_buf != nullptr &&
(vel.geo.len[STEP_PREVIOUS] == GPU_vertbuf_get_vertex_len(pos_buf));
}
else {
vel.geo.do_deform = (vel.geo.len[STEP_PREVIOUS] == vel.geo.len[STEP_CURRENT]) &&
(vel.geo.len[STEP_NEXT] == vel.geo.len[STEP_CURRENT]);
}
indirection_buf[vel.obj.resource_id] = vel;
/* Reset for next sync. */
vel.obj.resource_id = (uint)-1;
}
object_steps[STEP_PREVIOUS]->push_update();
object_steps[STEP_NEXT]->push_update();
camera_steps[STEP_PREVIOUS]->push_update();
camera_steps[STEP_CURRENT]->push_update();
camera_steps[STEP_NEXT]->push_update();
indirection_buf.push_update();
{
resolve_ps_ = DRW_pass_create("Velocity.Resolve", (DRWState)0);
GPUShader *sh = inst_.shaders.static_shader_get(VELOCITY_RESOLVE);
DRWShadingGroup *grp = DRW_shgroup_create(sh, resolve_ps_);
DRW_shgroup_uniform_texture_ref(grp, "depth_tx", &input_depth_tx_);
DRW_shgroup_uniform_image_ref(grp, "velocity_view_img", &velocity_view_tx_);
DRW_shgroup_uniform_image_ref(grp, "velocity_camera_img", &velocity_camera_tx_);
DRW_shgroup_uniform_block(grp, "camera_prev", *camera_steps[STEP_PREVIOUS]);
DRW_shgroup_uniform_block(grp, "camera_curr", *camera_steps[STEP_CURRENT]);
DRW_shgroup_uniform_block(grp, "camera_next", *camera_steps[STEP_NEXT]);
DRW_shgroup_call_compute_ref(grp, resolve_dispatch_size_);
}
}
bool VelocityModule::object_has_velocity(const Object *ob)
{
#if 0
RigidBodyOb *rbo = ob->rigidbody_object;
/* Active rigidbody objects only, as only those are affected by sim. */
const bool has_rigidbody = (rbo && (rbo->type == RBO_TYPE_ACTIVE));
/* For now we assume dupli objects are moving. */
const bool is_dupli = (ob->base_flag & BASE_FROM_DUPLI) != 0;
const bool object_moves = is_dupli || has_rigidbody || BKE_object_moves_in_time(ob, true);
#else
UNUSED_VARS(ob);
/* BKE_object_moves_in_time does not work in some cases.
* Better detect non moving object after evaluation. */
const bool object_moves = true;
#endif
return object_moves;
}
bool VelocityModule::object_is_deform(const Object *ob)
{
RigidBodyOb *rbo = ob->rigidbody_object;
/* Active rigidbody objects only, as only those are affected by sim. */
const bool has_rigidbody = (rbo && (rbo->type == RBO_TYPE_ACTIVE));
const bool is_deform = BKE_object_is_deform_modified(inst_.scene, (Object *)ob) ||
(has_rigidbody && (rbo->flag & RBO_FLAG_USE_DEFORM) != 0);
return is_deform;
}
void VelocityModule::bind_resources(DRWShadingGroup *grp)
{
/* For viewport, only previous motion is supported.
* Still bind previous step to avoid undefined behavior. */
eVelocityStep next = inst_.is_viewport() ? STEP_PREVIOUS : STEP_NEXT;
DRW_shgroup_storage_block_ref(grp, "velocity_obj_prev_buf", &(*object_steps[STEP_PREVIOUS]));
DRW_shgroup_storage_block_ref(grp, "velocity_obj_next_buf", &(*object_steps[next]));
DRW_shgroup_storage_block_ref(grp, "velocity_geo_prev_buf", &(*geometry_steps[STEP_PREVIOUS]));
DRW_shgroup_storage_block_ref(grp, "velocity_geo_next_buf", &(*geometry_steps[next]));
DRW_shgroup_uniform_block_ref(grp, "camera_prev", &(*camera_steps[STEP_PREVIOUS]));
DRW_shgroup_uniform_block_ref(grp, "camera_curr", &(*camera_steps[STEP_CURRENT]));
DRW_shgroup_uniform_block_ref(grp, "camera_next", &(*camera_steps[next]));
DRW_shgroup_storage_block_ref(grp, "velocity_indirection_buf", &indirection_buf);
}
/* Resolve pass for static geometry and to camera space projection. */
void VelocityModule::resolve_camera_motion(GPUTexture *depth_tx,
GPUTexture *velocity_view_tx,
GPUTexture *velocity_camera_tx)
{
input_depth_tx_ = depth_tx;
velocity_view_tx_ = velocity_view_tx;
velocity_camera_tx_ = velocity_camera_tx;
resolve_dispatch_size_.x = divide_ceil_u(GPU_texture_width(depth_tx), 8);
resolve_dispatch_size_.y = divide_ceil_u(GPU_texture_height(depth_tx), 8);
DRW_draw_pass(resolve_ps_);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Velocity View
* \{ */
void VelocityView::sync()
{
/* TODO: Remove. */
velocity_view_tx_.sync();
velocity_camera_tx_.sync();
}
void VelocityView::acquire(int2 extent)
{
/* WORKAROUND: View name should be unique and static.
* With this, we can reuse the same texture across views. */
DrawEngineType *owner = (DrawEngineType *)view_name_.c_str();
/* Only RG16F when only doing only reprojection or motion blur. */
eGPUTextureFormat format = inst_.is_viewport() ? GPU_RG16F : GPU_RGBA16F;
velocity_view_tx_.acquire(extent, format, owner);
if (false /* TODO(fclem): Panoramic camera. */) {
velocity_camera_tx_.acquire(extent, format, owner);
}
else {
velocity_camera_tx_.acquire(int2(1), format, owner);
}
}
void VelocityView::resolve(GPUTexture *depth_tx)
{
inst_.velocity.resolve_camera_motion(depth_tx, velocity_view_tx_, velocity_camera_tx_);
}
void VelocityView::release()
{
velocity_view_tx_.release();
velocity_camera_tx_.release();
}
/** \} */
} // namespace blender::eevee

View File

@ -0,0 +1,178 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2021 Blender Foundation.
*/
/** \file
* \ingroup eevee
*
* The velocity pass outputs motion vectors to use for either
* temporal re-projection or motion blur.
*
* It is the module that tracks the objects data between frames updates.
*/
#pragma once
#include "BLI_map.hh"
#include "eevee_shader_shared.hh"
#include "eevee_sync.hh"
namespace blender::eevee {
/* -------------------------------------------------------------------- */
/** \name VelocityModule
*
* \{ */
/** Container for scene velocity data. */
class VelocityModule {
friend class VelocityView;
public:
struct VelocityObjectData : public VelocityIndex {
/** ID to retrieve the corresponding #VelocityGeometryData after copy. */
ID *id;
};
struct VelocityGeometryData {
/** VertBuf not yet ready to be copied to the #VelocityGeometryBuf. */
GPUVertBuf *pos_buf = nullptr;
/* Offset in the #VelocityGeometryBuf to the start of the data. In vertex. */
int ofs;
/* Length of the vertex buffer. In vertex. */
int len;
};
/**
* The map contains indirection indices to the obmat and geometry in each step buffer.
* Note that each object component gets its own resource id so one component correspond to one
* geometry offset.
*/
Map<ObjectKey, VelocityObjectData> velocity_map;
/** Geometry to be copied to VelocityGeometryBuf. Indexed by evaluated ID *. Empty after */
Map<ID *, VelocityGeometryData> geometry_map;
/** Contains all objects matrices for each time step. */
std::array<VelocityObjectBuf *, 3> object_steps;
/** Contains all Geometry steps from deforming objects for each time step. */
std::array<VelocityGeometryBuf *, 3> geometry_steps;
/** Number of occupied slot in each `object_steps`. */
int3 object_steps_usage = int3(0);
/** Buffer of all #VelocityIndex used in this frame. Indexed by draw manager resource id. */
VelocityIndexBuf indirection_buf;
/**
* Copies of camera data. One for previous and one for next time step.
*/
std::array<CameraDataBuf *, 3> camera_steps;
private:
Instance &inst_;
eVelocityStep step_ = STEP_CURRENT;
DRWPass *resolve_ps_ = nullptr;
/** Reference only. Not owned. */
GPUTexture *input_depth_tx_;
GPUTexture *velocity_view_tx_;
GPUTexture *velocity_camera_tx_;
int3 resolve_dispatch_size_ = int3(1, 1, 1);
public:
VelocityModule(Instance &inst) : inst_(inst)
{
for (VelocityObjectBuf *&step_buf : object_steps) {
step_buf = new VelocityObjectBuf();
}
for (VelocityGeometryBuf *&step_buf : geometry_steps) {
step_buf = new VelocityGeometryBuf();
}
for (CameraDataBuf *&step_buf : camera_steps) {
step_buf = new CameraDataBuf();
}
};
~VelocityModule()
{
for (VelocityObjectBuf *step_buf : object_steps) {
delete step_buf;
}
for (VelocityGeometryBuf *step_buf : geometry_steps) {
delete step_buf;
}
for (CameraDataBuf *step_buf : camera_steps) {
delete step_buf;
}
}
void init();
void step_camera_sync();
void step_sync(eVelocityStep step, float time);
/* Gather motion data. Returns true if the object **can** have motion. */
bool step_object_sync(Object *ob, ObjectKey &ob_key, int recalc = 0);
/* Moves next frame data to previous frame data. Nullify next frame data. */
void step_swap();
void begin_sync();
void end_sync();
void bind_resources(DRWShadingGroup *grp);
private:
bool object_has_velocity(const Object *ob);
bool object_is_deform(const Object *ob);
void resolve_camera_motion(GPUTexture *depth_tx,
GPUTexture *velocity_view_tx,
GPUTexture *velocity_camera_tx);
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Velocity
*
* \{ */
/**
* Per view module.
*/
class VelocityView {
private:
Instance &inst_;
StringRefNull view_name_;
TextureFromPool velocity_camera_tx_ = {"velocity_camera_tx_"};
TextureFromPool velocity_view_tx_ = {"velocity_view_tx_"};
public:
VelocityView(Instance &inst, const char *name) : inst_(inst), view_name_(name){};
~VelocityView(){};
void sync();
void acquire(int2 extent);
void release();
void resolve(GPUTexture *depth_tx);
/**
* Getters
**/
GPUTexture *view_vectors_get() const
{
return velocity_view_tx_;
}
GPUTexture *camera_vectors_get() const
{
return (velocity_camera_tx_.is_valid()) ? velocity_camera_tx_ : velocity_view_tx_;
}
};
/** \} */
} // namespace blender::eevee

View File

@ -86,7 +86,7 @@ void ShadingView::sync(int2 render_extent_)
// dof_.sync(winmat_p, extent_);
// mb_.sync(extent_);
// velocity_.sync(extent_);
velocity_.sync();
// rt_buffer_opaque_.sync(extent_);
// rt_buffer_refract_.sync(extent_);
// inst_.hiz_back.view_sync(extent_);
@ -108,22 +108,30 @@ void ShadingView::render()
* With this, we can reuse the same texture across views. */
DrawEngineType *owner = (DrawEngineType *)name_;
DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
depth_tx_.ensure_2d(GPU_DEPTH24_STENCIL8, extent_);
combined_tx_.acquire(extent_, GPU_RGBA16F, owner);
view_fb_.ensure(GPU_ATTACHMENT_TEXTURE(depth_tx_), GPU_ATTACHMENT_TEXTURE(combined_tx_));
velocity_.acquire(extent_);
// combined_fb_.ensure(GPU_ATTACHMENT_TEXTURE(depth_tx_), GPU_ATTACHMENT_TEXTURE(combined_tx_));
// prepass_fb_.ensure(GPU_ATTACHMENT_TEXTURE(depth_tx_),
// GPU_ATTACHMENT_TEXTURE(velocity_.view_vectors_get()));
combined_fb_.ensure(GPU_ATTACHMENT_TEXTURE(dtxl->depth), GPU_ATTACHMENT_TEXTURE(dtxl->color));
prepass_fb_.ensure(GPU_ATTACHMENT_TEXTURE(dtxl->depth),
GPU_ATTACHMENT_TEXTURE(velocity_.view_vectors_get()));
update_view();
DRW_stats_group_start(name_);
// DRW_view_set_active(render_view_);
float4 clear_velocity(VELOCITY_INVALID);
GPU_framebuffer_bind(prepass_fb_);
GPU_framebuffer_clear_color(prepass_fb_, clear_velocity);
/* Alpha stores transmittance. So start at 1. */
float4 clear_color = {0.0f, 0.0f, 0.0f, 1.0f};
// GPU_framebuffer_bind(view_fb_);
// GPU_framebuffer_clear_color_depth(view_fb_, clear_color, 1.0f);
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
GPU_framebuffer_bind(dfbl->default_fb);
GPU_framebuffer_clear_color_depth(dfbl->default_fb, clear_color, 1.0f);
GPU_framebuffer_bind(combined_fb_);
GPU_framebuffer_clear_color_depth(combined_fb_, clear_color, 1.0f);
inst_.pipelines.world.render();
@ -134,12 +142,13 @@ void ShadingView::render()
// inst_.lookdev.render_overlay(view_fb_);
inst_.pipelines.forward.render(render_view_, depth_tx_, combined_tx_);
inst_.pipelines.forward.render(render_view_, prepass_fb_, combined_fb_, depth_tx_, combined_tx_);
// inst_.lights.debug_draw(view_fb_);
// inst_.shadows.debug_draw(view_fb_);
// velocity_.render(depth_tx_);
// velocity_.resolve(depth_tx_);
velocity_.resolve(dtxl->depth);
// if (inst_.render_passes.vector) {
// inst_.render_passes.vector->accumulate(velocity_.camera_vectors_get(), sub_view_);
@ -159,6 +168,7 @@ void ShadingView::render()
combined_tx_.release();
postfx_tx_.release();
velocity_.release();
}
GPUTexture *ShadingView::render_post(GPUTexture *input_tx)

View File

@ -21,6 +21,7 @@
#include "eevee_camera.hh"
#include "eevee_pipeline.hh"
#include "eevee_shader.hh"
#include "eevee_velocity.hh"
namespace blender::eevee {
@ -43,13 +44,14 @@ class ShadingView {
/** Post-fx modules. */
// DepthOfField dof_;
// MotionBlur mb_;
// Velocity velocity_;
VelocityView velocity_;
/** Raytracing persistent buffers. Only opaque and refraction can have surface tracing. */
// RaytraceBuffer rt_buffer_opaque_;
// RaytraceBuffer rt_buffer_refract_;
Framebuffer view_fb_;
Framebuffer prepass_fb_;
Framebuffer combined_fb_;
Texture depth_tx_;
TextureFromPool combined_tx_;
TextureFromPool postfx_tx_;
@ -69,7 +71,7 @@ class ShadingView {
public:
ShadingView(Instance &inst, const char *name, const float (*face_matrix)[4])
: inst_(inst), name_(name), face_matrix_(face_matrix){};
: inst_(inst), name_(name), face_matrix_(face_matrix), velocity_(inst, name){};
~ShadingView(){};

View File

@ -5,6 +5,7 @@
#pragma BLENDER_REQUIRE(eevee_attributes_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
void main()
{
@ -27,6 +28,19 @@ void main()
interp.N = cross(T, interp.curves_binormal);
interp.curves_strand_id = hair_get_strand_id();
interp.barycentric_coords = hair_get_barycentric();
#ifdef MAT_VELOCITY
/* Due to the screen space nature of the vertex positioning, we compute only the motion of curve
* strand, not its cylinder. Otherwise we would add the rotation velocity. */
int vert_idx = hair_get_base_id();
vec3 prv, nxt, pos = texelFetch(hairPointBuffer, vert_idx).point_position;
velocity_local_pos_get(pos, vert_idx, prv, nxt);
/* FIXME(fclem): Evaluating before displacement avoid displacement being treated as motion but
* ignores motion from animated displacement. Supporting animated displacement motion vectors
* would require evaluating the nodetree multiple time with different nodetree UBOs evaluated at
* different times, but also with different attributes (maybe we could assume static attribute at
* least). */
velocity_vertex(prv, pos, nxt, motion.prev, motion.next);
#endif
init_globals();
attrib_load();

View File

@ -3,6 +3,7 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_attributes_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
void main()
{
@ -38,6 +39,16 @@ void main()
aspect,
thickness,
hardness);
#ifdef MAT_VELOCITY
/* GPencil do not support deformation motion blur. */
vec3 lP_curr = transform_point(ModelMatrixInverse, interp.P);
/* FIXME(fclem): Evaluating before displacement avoid displacement being treated as motion but
* ignores motion from animated displacement. Supporting animated displacement motion vectors
* would require evaluating the nodetree multiple time with different nodetree UBOs evaluated at
* different times, but also with different attributes (maybe we could assume static attribute at
* least). */
velocity_vertex(lP_curr, lP_curr, lP_curr, motion.prev, motion.next);
#endif
init_globals();
attrib_load();

View File

@ -3,6 +3,7 @@
#pragma BLENDER_REQUIRE(eevee_attributes_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
void main()
{
@ -10,6 +11,16 @@ void main()
interp.P = point_object_to_world(pos);
interp.N = normal_object_to_world(nor);
#ifdef MAT_VELOCITY
vec3 prv, nxt;
velocity_local_pos_get(pos, gl_VertexID, prv, nxt);
/* FIXME(fclem): Evaluating before displacement avoid displacement being treated as motion but
* ignores motion from animated displacement. Supporting animated displacement motion vectors
* would require evaluating the nodetree multiple time with different nodetree UBOs evaluated at
* different times, but also with different attributes (maybe we could assume static attribute at
* least). */
velocity_vertex(prv, pos, nxt, motion.prev, motion.next);
#endif
init_globals();
attrib_load();

View File

@ -8,6 +8,7 @@
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
/* From the paper "Hashed Alpha Testing" by Chris Wyman and Morgan McGuire. */
float hash(vec2 a)
@ -69,4 +70,16 @@ void main()
discard;
}
#endif
#ifdef MAT_VELOCITY
vec4 out_velocity_camera; /* TODO(fclem): Panoramic cameras. */
velocity_camera(interp.P + motion.prev,
interp.P,
interp.P - motion.next,
out_velocity_camera,
out_velocity_view);
/* For testing in viewport. */
out_velocity_view.zw = vec2(0.0);
#endif
}

View File

@ -0,0 +1,101 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_camera_lib.glsl)
#ifdef VELOCITY_CAMERA
/**
* Given a triple of position, compute the previous and next motion vectors.
* Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy)
*/
vec4 velocity_view(vec3 P_prev, vec3 P, vec3 P_next)
{
vec2 prev_uv, curr_uv, next_uv;
prev_uv = transform_point(ProjectionMatrix, transform_point(camera_prev.viewmat, P_prev)).xy;
curr_uv = transform_point(ViewProjectionMatrix, P).xy;
next_uv = transform_point(ProjectionMatrix, transform_point(camera_next.viewmat, P_next)).xy;
vec4 motion;
motion.xy = prev_uv - curr_uv;
motion.zw = curr_uv - next_uv;
/* Convert NDC velocity to UV velocity */
motion *= 0.5;
return motion;
}
/**
* Given a triple of position, compute the previous and next motion vectors.
* Returns uv space motion vectors in pairs (motion_prev.xy, motion_next.xy)
* \a velocity_camera is the motion in film UV space after camera projection.
* \a velocity_view is the motion in ShadingView UV space. It is different
* from velocity_camera for multi-view rendering.
*/
void velocity_camera(vec3 P_prev, vec3 P, vec3 P_next, out vec4 vel_camera, out vec4 vel_view)
{
vec2 prev_uv, curr_uv, next_uv;
prev_uv = camera_uv_from_world(camera_prev, P_prev);
curr_uv = camera_uv_from_world(camera_curr, P);
next_uv = camera_uv_from_world(camera_next, P_next);
vel_camera.xy = prev_uv - curr_uv;
vel_camera.zw = curr_uv - next_uv;
if (is_panoramic(camera_curr.type)) {
/* This path is only used if using using panoramic projections. Since the views always have
* the same 45° aperture angle, we can safely reuse the projection matrix. */
prev_uv = transform_point(ProjectionMatrix, transform_point(camera_prev.viewmat, P_prev)).xy;
curr_uv = transform_point(ViewProjectionMatrix, P).xy;
next_uv = transform_point(ProjectionMatrix, transform_point(camera_next.viewmat, P_next)).xy;
vel_view.xy = prev_uv - curr_uv;
vel_view.zw = curr_uv - next_uv;
/* Convert NDC velocity to UV velocity */
vel_view *= 0.5;
}
else {
vel_view = vel_camera;
}
}
#endif
#ifdef MAT_VELOCITY
/**
* Given a triple of position, compute the previous and next motion vectors.
* Returns a tuple of world space motion deltas.
*/
void velocity_local_pos_get(vec3 lP, int vert_id, out vec3 lP_prev, out vec3 lP_next)
{
VelocityIndex vel = velocity_indirection_buf[resource_id];
lP_next = lP_prev = lP;
if (vel.geo.do_deform) {
if (vel.geo.ofs[STEP_PREVIOUS] != -1) {
lP_prev = velocity_geo_prev_buf[vel.geo.ofs[STEP_PREVIOUS] + vert_id].xyz;
}
if (vel.geo.ofs[STEP_NEXT] != -1) {
lP_next = velocity_geo_next_buf[vel.geo.ofs[STEP_NEXT] + vert_id].xyz;
}
}
}
/**
* Given a triple of position, compute the previous and next motion vectors.
* Returns a tuple of world space motion deltas.
*/
void velocity_vertex(
vec3 lP_prev, vec3 lP, vec3 lP_next, out vec3 motion_prev, out vec3 motion_next)
{
VelocityIndex vel = velocity_indirection_buf[resource_id];
mat4 obmat_prev = velocity_obj_prev_buf[vel.obj.ofs[STEP_PREVIOUS]];
mat4 obmat_next = velocity_obj_next_buf[vel.obj.ofs[STEP_NEXT]];
vec3 P_prev = transform_point(obmat_prev, lP_prev);
vec3 P_next = transform_point(obmat_next, lP_next);
vec3 P = transform_point(ModelMatrix, lP);
motion_prev = P_prev - P;
motion_next = P_next - P;
}
#endif

View File

@ -0,0 +1,58 @@
/**
* Fullscreen pass that compute motion vector for static geometry.
* Animated geometry has already written correct motion vectors.
*/
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_velocity_lib.glsl)
#define is_valid_output(img_) (imageSize(img_).x > 1)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
vec4 motion = imageLoad(velocity_view_img, texel);
bool pixel_has_valid_motion = (motion.x != VELOCITY_INVALID);
float depth = texelFetch(depth_tx, texel, 0).r;
bool is_background = (depth == 1.0f);
vec2 uv = vec2(texel) * drw_view.viewport_size_inverse;
vec3 P_next, P_prev, P_curr;
if (pixel_has_valid_motion) {
/* Animated geometry. View motion already computed during prepass. Convert only to camera. */
// P_prev = get_world_space_from_depth(uv + motion.xy, 0.5);
// P_curr = get_world_space_from_depth(uv, 0.5);
// P_next = get_world_space_from_depth(uv + motion.zw, 0.5);
return;
}
else if (is_background) {
/* NOTE: Use viewCameraVec to avoid imprecision if camera is far from origin. */
vec3 vV = viewCameraVec(get_view_space_from_depth(uv, 1.0));
vec3 V = transform_direction(ViewMatrixInverse, vV);
/* Background has no motion under camera translation. Translate view vector with the camera. */
/* WATCH(fclem): Might create precision issues. */
P_next = camera_next.viewinv[3].xyz + V;
P_curr = camera_curr.viewinv[3].xyz + V;
P_prev = camera_prev.viewinv[3].xyz + V;
}
else {
/* Static geometry. No translation in world space. */
P_curr = get_world_space_from_depth(uv, depth);
P_prev = P_curr;
P_next = P_curr;
}
vec4 vel_camera, vel_view;
velocity_camera(P_prev, P_curr, P_next, vel_camera, vel_view);
if (in_texture_range(texel, depth_tx)) {
imageStore(velocity_view_img, texel, vel_view);
if (is_valid_output(velocity_camera_img)) {
imageStore(velocity_camera_img, texel, vel_camera);
}
}
}

View File

@ -22,6 +22,7 @@ GPU_SHADER_CREATE_INFO(eevee_sampling_data)
* \{ */
GPU_SHADER_CREATE_INFO(eevee_geom_mesh)
.additional_info("eevee_shared")
.define("MAT_GEOM_MESH")
.vertex_in(0, Type::VEC3, "pos")
.vertex_in(1, Type::VEC3, "nor")
@ -29,16 +30,19 @@ GPU_SHADER_CREATE_INFO(eevee_geom_mesh)
.additional_info("draw_mesh", "draw_resource_id_varying", "draw_resource_handle");
GPU_SHADER_CREATE_INFO(eevee_geom_gpencil)
.additional_info("eevee_shared")
.define("MAT_GEOM_GPENCIL")
.vertex_source("eevee_geom_gpencil_vert.glsl")
.additional_info("draw_gpencil", "draw_resource_id_varying", "draw_resource_handle");
GPU_SHADER_CREATE_INFO(eevee_geom_curves)
.additional_info("eevee_shared")
.define("MAT_GEOM_CURVES")
.vertex_source("eevee_geom_curves_vert.glsl")
.additional_info("draw_hair", "draw_resource_id_varying", "draw_resource_handle");
GPU_SHADER_CREATE_INFO(eevee_geom_world)
.additional_info("eevee_shared")
.define("MAT_GEOM_WORLD")
.builtins(BuiltinBits::VERTEX_ID)
.vertex_source("eevee_geom_world_vert.glsl")

View File

@ -0,0 +1,55 @@
#include "gpu_shader_create_info.hh"
/* -------------------------------------------------------------------- */
/** \name Surface Velocity
*
* Combined with the depth prepass shader.
* Outputs the view motion vectors for animated objects.
* \{ */
/* Pass world space deltas to the fragment shader.
* This is to make sure that the resulting motion vectors are valid even with displacement. */
GPU_SHADER_INTERFACE_INFO(eevee_velocity_surface_iface, "motion")
.smooth(Type::VEC3, "prev")
.smooth(Type::VEC3, "next");
GPU_SHADER_CREATE_INFO(eevee_velocity_camera)
.define("VELOCITY_CAMERA")
.uniform_buf(1, "CameraData", "camera_prev")
.uniform_buf(2, "CameraData", "camera_curr")
.uniform_buf(3, "CameraData", "camera_next");
GPU_SHADER_CREATE_INFO(eevee_velocity_geom)
.define("MAT_VELOCITY")
.auto_resource_location(true)
.storage_buf(4, Qualifier::READ, "mat4", "velocity_obj_prev_buf[]", Frequency::PASS)
.storage_buf(5, Qualifier::READ, "mat4", "velocity_obj_next_buf[]", Frequency::PASS)
.storage_buf(6, Qualifier::READ, "vec4", "velocity_geo_prev_buf[]", Frequency::PASS)
.storage_buf(7, Qualifier::READ, "vec4", "velocity_geo_next_buf[]", Frequency::PASS)
.storage_buf(
7, Qualifier::READ, "VelocityIndex", "velocity_indirection_buf[]", Frequency::PASS)
.vertex_out(eevee_velocity_surface_iface)
.fragment_out(0, Type::VEC4, "out_velocity_view")
.additional_info("eevee_velocity_camera");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Velocity Resolve
*
* Computes velocity for static objects.
* Also converts motion to camera space (as opposed to view space) if needed.
* \{ */
GPU_SHADER_CREATE_INFO(eevee_velocity_resolve)
.do_static_compilation(true)
.local_group_size(8, 8)
.sampler(0, ImageType::DEPTH_2D, "depth_tx")
.image(0, GPU_RG16F, Qualifier::READ_WRITE, ImageType::FLOAT_2D, "velocity_view_img")
.image(1, GPU_RG16F, Qualifier::WRITE, ImageType::FLOAT_2D, "velocity_camera_img")
.additional_info("eevee_shared")
.compute_source("eevee_velocity_resolve_comp.glsl")
.additional_info("draw_view", "eevee_velocity_camera");
/** \} */

View File

@ -31,9 +31,6 @@
* discarding all data inside it.
* Data can be accessed using the [] operator.
*
* `draw::StorageFlexibleBuffer<T>`
* Same as StorageArrayBuffer but will auto resize on access when using the [] operator.
*
* `draw::StorageBuffer<T>`
* A storage buffer object class inheriting from T.
* Data can be accessed just like a normal T object.
@ -105,7 +102,7 @@ class DataBuffer {
{
BLI_STATIC_ASSERT(!device_only, "");
BLI_assert(index >= 0);
BLI_assert(index < len);
BLI_assert(index < len_);
return data_[index];
}
@ -113,7 +110,7 @@ class DataBuffer {
{
BLI_STATIC_ASSERT(!device_only, "");
BLI_assert(index >= 0);
BLI_assert(index < len);
BLI_assert(index < len_);
return data_[index];
}
@ -142,7 +139,7 @@ class DataBuffer {
const T *end() const
{
BLI_STATIC_ASSERT(!device_only, "");
return data_ + len;
return data_ + len_;
}
T *begin()
@ -153,13 +150,13 @@ class DataBuffer {
T *end()
{
BLI_STATIC_ASSERT(!device_only, "");
return data_ + len;
return data_ + len_;
}
operator Span<T>() const
{
BLI_STATIC_ASSERT(!device_only, "");
return Span<T>(data_, len);
return Span<T>(data_, len_);
}
};
@ -220,7 +217,9 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
if (name) {
name_ = name;
}
init(len);
this->len_ = len;
constexpr GPUUsageType usage = device_only ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_DYNAMIC;
ssbo_ = GPU_storagebuf_create_ex(sizeof(T) * this->len_, nullptr, usage, this->name_);
}
~StorageCommon()
@ -228,15 +227,6 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
GPU_storagebuf_free(ssbo_);
}
void resize(int64_t new_size)
{
BLI_assert(new_size > 0);
if (new_size != this->len_) {
GPU_storagebuf_free(ssbo_);
this->init(new_size);
}
}
void push_update(void)
{
BLI_assert(device_only == false);
@ -252,14 +242,6 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
{
return &ssbo_;
}
private:
void init(int64_t new_size)
{
this->len_ = new_size;
GPUUsageType usage = device_only ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_DYNAMIC;
ssbo_ = GPU_storagebuf_create_ex(sizeof(T) * this->len_, nullptr, usage, this->name_);
}
};
} // namespace detail
@ -336,38 +318,34 @@ class StorageArrayBuffer : public detail::StorageCommon<T, len, device_only> {
{
MEM_freeN(this->data_);
}
};
template<
/** Type of the values stored in this uniform buffer. */
typename T,
/** True if created on device and no memory host memory is allocated. */
bool device_only = false>
class StorageFlexibleBuffer : public detail::StorageCommon<T, 1, device_only> {
public:
StorageFlexibleBuffer(const char *name = nullptr)
: detail::StorageCommon<T, 1, device_only>(name)
void resize(int64_t new_size)
{
/* TODO(@fclem): We should map memory instead. */
this->data_ = (T *)MEM_mallocN_aligned(sizeof(T), 16, this->name_);
}
~StorageFlexibleBuffer()
{
MEM_freeN(this->data_);
BLI_assert(new_size > 0);
if (new_size != this->len_) {
/* Manual realloc since MEM_reallocN_aligned does not exists. */
T *new_data_ = (T *)MEM_mallocN_aligned(new_size * sizeof(T), 16, this->name_);
memcpy(new_data_, this->data_, min_uu(this->len_, new_size) * sizeof(T));
MEM_freeN(this->data_);
this->data_ = new_data_;
GPU_storagebuf_free(this->ssbo_);
this->len_ = new_size;
constexpr GPUUsageType usage = device_only ? GPU_USAGE_DEVICE_ONLY : GPU_USAGE_DYNAMIC;
this->ssbo_ = GPU_storagebuf_create_ex(sizeof(T) * this->len_, nullptr, usage, this->name_);
}
}
/* Resize on access. */
T &operator[](int64_t index)
T &get_or_resize(int64_t index)
{
BLI_STATIC_ASSERT(!device_only, "");
BLI_assert(index >= 0);
if (index >= this->len_) {
this->resize(this->len_ * 2);
size_t size = power_of_2_max_u(index + 1);
this->resize(size);
}
return this->data_[index];
}
/* TODO(fclem): Implement shrinking. */
};
template<

View File

@ -623,12 +623,12 @@ void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup,
void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup,
const char *name,
struct GPUVertBuf **vertex_buffer DRW_DEBUG_FILE_LINE_ARGS);
void DRW_shgroup_buffer_texture_ex(DRWShadingGroup *shgroup,
const char *name,
struct GPUVertBuf *vertex_buffer);
void DRW_shgroup_buffer_texture_ref_ex(DRWShadingGroup *shgroup,
const char *name,
struct GPUVertBuf **vertex_buffer);
void DRW_shgroup_buffer_texture(DRWShadingGroup *shgroup,
const char *name,
struct GPUVertBuf *vertex_buffer);
void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup,
const char *name,
struct GPUVertBuf **vertex_buffer);
#ifdef DRW_UNUSED_RESOURCE_TRACKING
# define DRW_shgroup_vertex_buffer(shgroup, name, vert) \

View File

@ -547,9 +547,9 @@ void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup,
shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF, vertex_buffer, 0, 0, 1);
}
void DRW_shgroup_buffer_texture_ex(DRWShadingGroup *shgroup,
const char *name,
GPUVertBuf *vertex_buffer)
void DRW_shgroup_buffer_texture(DRWShadingGroup *shgroup,
const char *name,
GPUVertBuf *vertex_buffer)
{
int location = GPU_shader_get_ssbo(shgroup->shader, name);
if (location == -1) {
@ -559,9 +559,9 @@ void DRW_shgroup_buffer_texture_ex(DRWShadingGroup *shgroup,
shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE, vertex_buffer, 0, 0, 1);
}
void DRW_shgroup_buffer_texture_ref_ex(DRWShadingGroup *shgroup,
const char *name,
GPUVertBuf **vertex_buffer)
void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup,
const char *name,
GPUVertBuf **vertex_buffer)
{
int location = GPU_shader_get_ssbo(shgroup->shader, name);
if (location == -1) {

View File

@ -38,7 +38,6 @@
#include "SEQ_iterator.h"
#include "SEQ_sequencer.h"
#include "SEQ_time.h"
#include "SEQ_transform.h"
#include "anim_intern.h"
@ -112,9 +111,9 @@ static int seq_frame_apply_snap(bContext *C, Scene *scene, const int timeline_fr
Sequence *seq;
SEQ_ITERATOR_FOREACH (seq, strips) {
seq_frame_snap_update_best(
SEQ_transform_get_left_handle_frame(seq), timeline_frame, &best_frame, &best_distance);
SEQ_time_left_handle_frame_get(seq), timeline_frame, &best_frame, &best_distance);
seq_frame_snap_update_best(
SEQ_transform_get_right_handle_frame(seq), timeline_frame, &best_frame, &best_distance);
SEQ_time_right_handle_frame_get(seq), timeline_frame, &best_frame, &best_distance);
}
SEQ_collection_free(strips);

View File

@ -28,4 +28,3 @@ set(LIB
blender_add_lib(bf_editor_curves "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")
add_dependencies(bf_editor_curves bf_rna)

View File

@ -784,6 +784,7 @@ set_property(GLOBAL PROPERTY ICON_GEOM_NAMES
ops.generic.select_box
ops.generic.select_circle
ops.generic.select_lasso
ops.generic.select_paint
ops.gpencil.draw
ops.gpencil.draw.eraser
ops.gpencil.draw.line

View File

@ -3077,6 +3077,11 @@ bGPDstroke *ED_gpencil_stroke_nearest_to_ends(bContext *C,
continue;
}
/* Check that stroke is not closed. Closed strokes must not be included in the merge. */
if (gps_target->flag & GP_STROKE_CYCLIC) {
continue;
}
/* Check if one of the ends is inside target stroke bounding box. */
if ((!ED_gpencil_stroke_check_collision(gsc, gps_target, pt2d_start, radius, diff_mat)) &&
(!ED_gpencil_stroke_check_collision(gsc, gps_target, pt2d_end, radius, diff_mat))) {

View File

@ -1083,6 +1083,16 @@ bool ED_view3d_quat_to_axis_view(const float viewquat[4],
float epsilon,
char *r_view,
char *r_view_axis_rotation);
/**
* A version of #ED_view3d_quat_to_axis_view that updates `viewquat`
* if it's within `epsilon` to an axis-view.
*
* \note Include the special case function since most callers need to perform these operations.
*/
bool ED_view3d_quat_to_axis_view_and_reset_quat(float viewquat[4],
float epsilon,
char *r_view,
char *r_view_axis_rotation);
char ED_view3d_lock_view_from_index(int index);
char ED_view3d_axis_view_opposite(char view);

View File

@ -129,6 +129,12 @@ static uiBut *ui_popup_menu_memory__internal(uiBlock *block, uiBut *but)
/* get */
LISTBASE_FOREACH (uiBut *, but_iter, &block->buttons) {
/* Prevent labels (typically headings), from being returned in the case the text
* happens to matches one of the menu items.
* Skip separators too as checking them is redundant. */
if (ELEM(but_iter->type, UI_BTYPE_LABEL, UI_BTYPE_SEPR, UI_BTYPE_SEPR_LINE)) {
continue;
}
if (mem[hash_mod] ==
ui_popup_string_hash(but_iter->str, but_iter->flag & UI_BUT_HAS_SEP_CHAR)) {
return but_iter;

View File

@ -321,4 +321,4 @@ void MESH_OT_customdata_custom_splitnormals_clear(struct wmOperatorType *ot);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1374,8 +1374,8 @@ bool ED_mesh_pick_vert(
if (use_zbuf) {
if (dist_px > 0) {
/* sample rect to increase chances of selecting, so that when clicking
* on an face in the backbuf, we can still select a vert */
/* Sample rectangle to increase chances of selecting, so that when clicking
* on an face in the back-buffer, we can still select a vert. */
*r_index = DRW_select_buffer_find_nearest_to_point(
vc.depsgraph, vc.region, vc.v3d, mval, 1, me->totvert + 1, &dist_px);
}

View File

@ -526,7 +526,7 @@ void OBJECT_OT_collection_link(wmOperatorType *ot)
ot->prop = prop;
}
static int collection_remove_exec(bContext *C, wmOperator *UNUSED(op))
static int collection_remove_exec(bContext *C, wmOperator *op)
{
Main *bmain = CTX_data_main(C);
Object *ob = ED_object_context(C);
@ -535,6 +535,12 @@ static int collection_remove_exec(bContext *C, wmOperator *UNUSED(op))
if (!ob || !collection) {
return OPERATOR_CANCELLED;
}
if (ID_IS_LINKED(collection) || ID_IS_OVERRIDE_LIBRARY(collection)) {
BKE_report(op->reports,
RPT_ERROR,
"Cannot remove an object from a linked or library override collection");
return OPERATOR_CANCELLED;
}
BKE_collection_object_remove(bmain, collection, ob, false);
@ -561,7 +567,7 @@ void OBJECT_OT_collection_remove(wmOperatorType *ot)
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
}
static int collection_unlink_exec(bContext *C, wmOperator *UNUSED(op))
static int collection_unlink_exec(bContext *C, wmOperator *op)
{
Main *bmain = CTX_data_main(C);
Collection *collection = CTX_data_pointer_get_type(C, "collection", &RNA_Collection).data;
@ -569,6 +575,14 @@ static int collection_unlink_exec(bContext *C, wmOperator *UNUSED(op))
if (!collection) {
return OPERATOR_CANCELLED;
}
if (ID_IS_OVERRIDE_LIBRARY(collection) &&
collection->id.override_library->hierarchy_root != &collection->id) {
BKE_report(op->reports,
RPT_ERROR,
"Cannot unlink a library override collection which is not the root of its override "
"hierarchy");
return OPERATOR_CANCELLED;
}
BKE_id_delete(bmain, collection);

View File

@ -1835,6 +1835,11 @@ static int move_to_collection_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
}
if (ID_IS_OVERRIDE_LIBRARY(collection)) {
BKE_report(op->reports, RPT_ERROR, "Cannot add objects to a library override collection");
return OPERATOR_CANCELLED;
}
ListBase objects = selected_objects_get(C);
if (is_new) {

View File

@ -381,11 +381,11 @@ struct CombOperationExecutor {
threading::parallel_for(changed_curves.index_range(), 256, [&](const IndexRange range) {
for (const int curve_i : changed_curves.as_span().slice(range)) {
const IndexRange points = curves_->points_for_curve(curve_i);
for (const int segment_i : IndexRange(points.size() - 1)) {
const float3 &p1_cu = positions_cu[points[segment_i]];
float3 &p2_cu = positions_cu[points[segment_i] + 1];
for (const int segment_i : points.drop_back(1)) {
const float3 &p1_cu = positions_cu[segment_i];
float3 &p2_cu = positions_cu[segment_i + 1];
const float3 direction = math::normalize(p2_cu - p1_cu);
const float expected_length_cu = expected_lengths_cu[points[segment_i]];
const float expected_length_cu = expected_lengths_cu[segment_i];
p2_cu = p1_cu + direction * expected_length_cu;
}
}

View File

@ -162,9 +162,9 @@ struct DeleteOperationExecutor {
threading::parallel_for(curves_->curves_range(), 512, [&](IndexRange curve_range) {
for (const int curve_i : curve_range) {
const IndexRange points = curves_->points_for_curve(curve_i);
for (const int segment_i : IndexRange(points.size() - 1)) {
const float3 pos1_cu = brush_transform_inv * positions_cu[points[segment_i]];
const float3 pos2_cu = brush_transform_inv * positions_cu[points[segment_i + 1]];
for (const int segment_i : points.drop_back(1)) {
const float3 pos1_cu = brush_transform_inv * positions_cu[segment_i];
const float3 pos2_cu = brush_transform_inv * positions_cu[segment_i + 1];
float2 pos1_re, pos2_re;
ED_view3d_project_float_v2_m4(region_, pos1_cu, pos1_re, projection.values);
@ -220,9 +220,9 @@ struct DeleteOperationExecutor {
threading::parallel_for(curves_->curves_range(), 512, [&](IndexRange curve_range) {
for (const int curve_i : curve_range) {
const IndexRange points = curves_->points_for_curve(curve_i);
for (const int segment_i : IndexRange(points.size() - 1)) {
const float3 pos1_cu = positions_cu[points[segment_i]];
const float3 pos2_cu = positions_cu[points[segment_i] + 1];
for (const int segment_i : points.drop_back(1)) {
const float3 &pos1_cu = positions_cu[segment_i];
const float3 &pos2_cu = positions_cu[segment_i + 1];
float3 closest_segment_cu, closest_brush_cu;
isect_seg_seg_v3(pos1_cu,

View File

@ -150,25 +150,22 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
}
}
}
else if (BKE_pbvh_type(ss->pbvh) == PBVH_GRIDS) {
{
if (!sculpt_brush_test_sq_fn(&test, vd.co)) {
continue;
}
const float fade = bstrength * SCULPT_brush_strength_factor(ss,
brush,
vd.co,
sqrtf(test.dist),
vd.no,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
thread_id);
if (!sculpt_brush_test_sq_fn(&test, vd.co)) {
continue;
}
const float fade = bstrength * SCULPT_brush_strength_factor(ss,
brush,
vd.co,
sqrtf(test.dist),
vd.no,
vd.fno,
vd.mask ? *vd.mask : 0.0f,
vd.index,
thread_id);
if (fade > 0.05f) {
SCULPT_vertex_face_set_set(ss, vd.index, ss->cache->paint_face_set);
}
if (fade > 0.05f) {
SCULPT_vertex_face_set_set(ss, vd.index, ss->cache->paint_face_set);
}
}
}

View File

@ -1652,11 +1652,11 @@ void SCULPT_do_paint_brush(struct PaintModeSettings *paint_mode_settings,
int totnode) ATTR_NONNULL();
/**
* @brief Get the image canvas for painting on the given object.
* \brief Get the image canvas for painting on the given object.
*
* @return #true if an image is found. The #r_image and #r_image_user fields are filled with the
* \return #true if an image is found. The #r_image and #r_image_user fields are filled with the
* image and image user. Returns false when the image isn't found. In the later case the r_image
* and r_image_user are set to nullptr/NULL.
* and r_image_user are set to NULL.
*/
bool SCULPT_paint_image_canvas_get(struct PaintModeSettings *paint_mode_settings,
struct Object *ob,

View File

@ -460,8 +460,9 @@ bool SCULPT_paint_image_canvas_get(PaintModeSettings *paint_mode_settings,
Image **r_image,
ImageUser **r_image_user)
{
BLI_assert(r_image);
BLI_assert(r_image_user);
*r_image = nullptr;
*r_image_user = nullptr;
ImageData image_data;
if (!ImageData::init_active_image(ob, &image_data, paint_mode_settings)) {
return false;

View File

@ -793,8 +793,8 @@ static void sequencer_add_movie_clamp_sound_strip_length(Scene *scene,
return;
}
SEQ_transform_set_right_handle_frame(seq_sound, SEQ_transform_get_right_handle_frame(seq_movie));
SEQ_transform_set_left_handle_frame(seq_sound, SEQ_transform_get_left_handle_frame(seq_movie));
SEQ_time_right_handle_frame_set(seq_sound, SEQ_time_right_handle_frame_get(seq_movie));
SEQ_time_left_handle_frame_set(seq_sound, SEQ_time_left_handle_frame_get(seq_movie));
SEQ_time_update_sequence(scene, seqbase, seq_sound);
}
@ -1300,7 +1300,7 @@ static int sequencer_add_image_strip_exec(bContext *C, wmOperator *op)
/* Adjust length. */
if (load_data.image.len == 1) {
SEQ_transform_set_right_handle_frame(seq, load_data.image.end_frame);
SEQ_time_right_handle_frame_set(seq, load_data.image.end_frame);
SEQ_time_update_sequence(scene, SEQ_active_seqbase_get(ed), seq);
}

View File

@ -1098,26 +1098,23 @@ static void draw_seq_background(Scene *scene,
/* Draw the main strip body. */
if (is_single_image) {
immRectf(pos,
SEQ_transform_get_left_handle_frame(seq),
y1,
SEQ_transform_get_right_handle_frame(seq),
y2);
immRectf(
pos, SEQ_time_left_handle_frame_get(seq), y1, SEQ_time_right_handle_frame_get(seq), y2);
}
else {
immRectf(pos, x1, y1, x2, y2);
}
/* Draw background for hold still regions. */
if (!is_single_image && (seq->startstill || seq->endstill)) {
if (!is_single_image && SEQ_time_has_still_frames(seq)) {
UI_GetColorPtrShade3ubv(col, col, -35);
immUniformColor4ubv(col);
if (seq->startstill) {
if (SEQ_time_has_left_still_frames(seq)) {
const float content_start = min_ff(seq->enddisp, seq->start);
immRectf(pos, seq->startdisp, y1, content_start, y2);
}
if (seq->endstill) {
if (SEQ_time_has_right_still_frames(seq)) {
const float content_end = max_ff(seq->startdisp, seq->start + seq->len);
immRectf(pos, content_end, y1, seq->enddisp, y2);
}
@ -1336,9 +1333,9 @@ static void draw_seq_strip(const bContext *C,
SEQ_TIMELINE_SHOW_STRIP_COLOR_TAG);
/* Draw strip body. */
x1 = (seq->startstill) ? seq->start : seq->startdisp;
x1 = SEQ_time_has_left_still_frames(seq) ? seq->start : seq->startdisp;
y1 = seq->machine + SEQ_STRIP_OFSBOTTOM;
x2 = (seq->endstill) ? (seq->start + seq->len) : seq->enddisp;
x2 = SEQ_time_has_right_still_frames(seq) ? (seq->start + seq->len) : seq->enddisp;
y2 = seq->machine + SEQ_STRIP_OFSTOP;
/* Limit body to strip bounds. Meta strip can end up with content outside of strip range. */

View File

@ -77,7 +77,6 @@
typedef struct TransSeq {
int start, machine;
int startstill, endstill;
int startdisp, enddisp;
int startofs, endofs;
int anim_startofs, anim_endofs;
@ -358,15 +357,14 @@ static int sequencer_snap_exec(bContext *C, wmOperator *op)
if (seq->flag & SELECT && !SEQ_transform_is_locked(channels, seq) &&
SEQ_transform_sequence_can_be_translated(seq)) {
if ((seq->flag & (SEQ_LEFTSEL + SEQ_RIGHTSEL)) == 0) {
SEQ_transform_translate_sequence(
scene, seq, (snap_frame - seq->startofs + seq->startstill) - seq->start);
SEQ_transform_translate_sequence(scene, seq, (snap_frame - seq->startofs) - seq->start);
}
else {
if (seq->flag & SEQ_LEFTSEL) {
SEQ_transform_set_left_handle_frame(seq, snap_frame);
SEQ_time_left_handle_frame_set(seq, snap_frame);
}
else { /* SEQ_RIGHTSEL */
SEQ_transform_set_right_handle_frame(seq, snap_frame);
SEQ_time_right_handle_frame_set(seq, snap_frame);
}
SEQ_transform_handle_xlimits(seq, seq->flag & SEQ_LEFTSEL, seq->flag & SEQ_RIGHTSEL);
SEQ_transform_fix_single_image_seq_offsets(seq);
@ -479,8 +477,6 @@ static void transseq_backup(TransSeq *ts, Sequence *seq)
{
ts->start = seq->start;
ts->machine = seq->machine;
ts->startstill = seq->startstill;
ts->endstill = seq->endstill;
ts->startdisp = seq->startdisp;
ts->enddisp = seq->enddisp;
ts->startofs = seq->startofs;
@ -494,8 +490,6 @@ static void transseq_restore(TransSeq *ts, Sequence *seq)
{
seq->start = ts->start;
seq->machine = ts->machine;
seq->startstill = ts->startstill;
seq->endstill = ts->endstill;
seq->startdisp = ts->startdisp;
seq->enddisp = ts->enddisp;
seq->startofs = ts->startofs;
@ -596,11 +590,8 @@ static int sequencer_slip_invoke(bContext *C, wmOperator *op, const wmEvent *eve
return OPERATOR_RUNNING_MODAL;
}
static bool sequencer_slip_recursively(Scene *scene, SlipData *data, int offset)
static void sequencer_slip_recursively(Scene *scene, SlipData *data, int offset)
{
/* Only data types supported for now. */
bool changed = false;
/* Iterate in reverse so meta-strips are iterated after their children. */
for (int i = data->num_seq - 1; i >= 0; i--) {
Sequence *seq = data->seq_array[i];
@ -614,33 +605,13 @@ static bool sequencer_slip_recursively(Scene *scene, SlipData *data, int offset)
endframe = seq->start + seq->len;
/* Compute the sequence offsets. */
if (endframe > seq->enddisp) {
seq->endstill = 0;
seq->endofs = endframe - seq->enddisp;
changed = true;
}
else {
seq->endstill = seq->enddisp - endframe;
seq->endofs = 0;
changed = true;
}
if (seq->start > seq->startdisp) {
seq->startstill = seq->start - seq->startdisp;
seq->startofs = 0;
changed = true;
}
else {
seq->startstill = 0;
seq->startofs = seq->startdisp - seq->start;
changed = true;
}
seq->endofs = endframe - seq->enddisp;
seq->startofs = seq->startdisp - seq->start;
}
else {
/* No transform data (likely effect strip). Only move start and end. */
seq->startdisp = data->ts[i].startdisp + offset;
seq->enddisp = data->ts[i].enddisp + offset;
changed = true;
}
/* Effects are only added if we they are in a meta-strip.
@ -652,13 +623,11 @@ static bool sequencer_slip_recursively(Scene *scene, SlipData *data, int offset)
SEQ_time_update_sequence(scene, seqbase, seq);
}
}
if (changed) {
for (int i = data->num_seq - 1; i >= 0; i--) {
Sequence *seq = data->seq_array[i];
SEQ_relations_invalidate_cache_preprocessed(scene, seq);
}
for (int i = data->num_seq - 1; i >= 0; i--) {
Sequence *seq = data->seq_array[i];
SEQ_relations_invalidate_cache_preprocessed(scene, seq);
}
return changed;
}
/* Make sure, that each strip contains at least 1 frame of content. */
@ -688,7 +657,6 @@ static int sequencer_slip_exec(bContext *C, wmOperator *op)
Scene *scene = CTX_data_scene(C);
Editing *ed = SEQ_editing_get(scene);
int offset = RNA_int_get(op->ptr, "offset");
bool success = false;
/* Recursively count the trimmed elements. */
int num_seq = slip_count_sequences_recursive(ed->seqbasep, true);
@ -710,19 +678,16 @@ static int sequencer_slip_exec(bContext *C, wmOperator *op)
}
sequencer_slip_apply_limits(data, &offset);
success = sequencer_slip_recursively(scene, data, offset);
sequencer_slip_recursively(scene, data, offset);
MEM_freeN(data->seq_array);
MEM_freeN(data->trim);
MEM_freeN(data->ts);
MEM_freeN(data);
if (success) {
WM_event_add_notifier(C, NC_SCENE | ND_SEQUENCER, scene);
DEG_id_tag_update(&scene->id, ID_RECALC_SEQUENCER_STRIPS);
return OPERATOR_FINISHED;
}
return OPERATOR_CANCELLED;
WM_event_add_notifier(C, NC_SCENE | ND_SEQUENCER, scene);
DEG_id_tag_update(&scene->id, ID_RECALC_SEQUENCER_STRIPS);
return OPERATOR_FINISHED;
}
static void sequencer_slip_update_header(Scene *scene, ScrArea *area, SlipData *data, int offset)
@ -763,9 +728,8 @@ static int sequencer_slip_modal(bContext *C, wmOperator *op, const wmEvent *even
RNA_int_set(op->ptr, "offset", offset);
if (sequencer_slip_recursively(scene, data, offset)) {
WM_event_add_notifier(C, NC_SCENE | ND_SEQUENCER, scene);
}
sequencer_slip_recursively(scene, data, offset);
WM_event_add_notifier(C, NC_SCENE | ND_SEQUENCER, scene);
return OPERATOR_RUNNING_MODAL;
}
@ -796,9 +760,8 @@ static int sequencer_slip_modal(bContext *C, wmOperator *op, const wmEvent *even
RNA_int_set(op->ptr, "offset", offset);
if (sequencer_slip_recursively(scene, data, offset)) {
WM_event_add_notifier(C, NC_SCENE | ND_SEQUENCER, scene);
}
sequencer_slip_recursively(scene, data, offset);
WM_event_add_notifier(C, NC_SCENE | ND_SEQUENCER, scene);
}
break;
}
@ -876,9 +839,8 @@ static int sequencer_slip_modal(bContext *C, wmOperator *op, const wmEvent *even
RNA_int_set(op->ptr, "offset", offset);
if (sequencer_slip_recursively(scene, data, offset)) {
WM_event_add_notifier(C, NC_SCENE | ND_SEQUENCER, scene);
}
sequencer_slip_recursively(scene, data, offset);
WM_event_add_notifier(C, NC_SCENE | ND_SEQUENCER, scene);
}
return OPERATOR_RUNNING_MODAL;
@ -1822,7 +1784,7 @@ static int sequencer_offset_clear_exec(bContext *C, wmOperator *UNUSED(op))
/* For effects, try to find a replacement input. */
for (seq = ed->seqbasep->first; seq; seq = seq->next) {
if ((seq->type & SEQ_TYPE_EFFECT) == 0 && (seq->flag & SELECT)) {
seq->startofs = seq->endofs = seq->startstill = seq->endstill = 0;
seq->startofs = seq->endofs = 0;
}
}
@ -1896,8 +1858,8 @@ static int sequencer_separate_images_exec(bContext *C, wmOperator *op)
/* TODO: remove f-curve and assign to split image strips.
* The old animation system would remove the user of `seq->ipo`. */
start_ofs = timeline_frame = SEQ_transform_get_left_handle_frame(seq);
frame_end = SEQ_transform_get_right_handle_frame(seq);
start_ofs = timeline_frame = SEQ_time_left_handle_frame_get(seq);
frame_end = SEQ_time_right_handle_frame_get(seq);
while (timeline_frame < frame_end) {
/* New seq. */
@ -1908,7 +1870,7 @@ static int sequencer_separate_images_exec(bContext *C, wmOperator *op)
seq_new->start = start_ofs;
seq_new->type = SEQ_TYPE_IMAGE;
seq_new->len = 1;
seq_new->endstill = step - 1;
seq_new->endofs = 1 - step;
/* New strip. */
strip_new = seq_new->strip;
@ -3151,8 +3113,8 @@ static int seq_cmp_time_startdisp_channel(const void *a, const void *b)
Sequence *seq_a = (Sequence *)a;
Sequence *seq_b = (Sequence *)b;
int seq_a_start = SEQ_transform_get_left_handle_frame(seq_a);
int seq_b_start = SEQ_transform_get_left_handle_frame(seq_b);
int seq_a_start = SEQ_time_left_handle_frame_get(seq_a);
int seq_b_start = SEQ_time_left_handle_frame_get(seq_b);
/* If strips have the same start frame favor the one with a higher channel. */
if (seq_a_start == seq_b_start) {

View File

@ -23,6 +23,7 @@
#include "SEQ_relations.h"
#include "SEQ_render.h"
#include "SEQ_sequencer.h"
#include "SEQ_time.h"
#include "WM_api.h"
#include "WM_types.h"
@ -443,7 +444,8 @@ void draw_seq_strip_thumbnail(View2D *v2d,
float thumb_y_end = y1 + thumb_height;
float cut_off = 0;
float upper_thumb_bound = (seq->endstill) ? (seq->start + seq->len) : seq->enddisp;
float upper_thumb_bound = SEQ_time_has_right_still_frames(seq) ? (seq->start + seq->len) :
seq->enddisp;
if (seq->type == SEQ_TYPE_IMAGE) {
upper_thumb_bound = seq->enddisp;
}

View File

@ -48,11 +48,7 @@ static void view_roll_angle(ARegion *region,
normalize_qt(quat);
if (use_axis_view && RV3D_VIEW_IS_AXIS(rv3d->view) && (fabsf(angle) == (float)M_PI_2)) {
if (ED_view3d_quat_to_axis_view(quat, 0.01f, &rv3d->view, &rv3d->view_axis_roll)) {
if (rv3d->view != RV3D_VIEW_USER) {
ED_view3d_quat_from_axis_view(rv3d->view, rv3d->view_axis_roll, quat_mul);
}
}
ED_view3d_quat_to_axis_view_and_reset_quat(quat, 0.01f, &rv3d->view, &rv3d->view_axis_roll);
}
else {
rv3d->view = RV3D_VIEW_USER;

View File

@ -162,10 +162,8 @@ static void viewrotate_apply_snap(ViewOpsData *vod)
if (found) {
/* lock 'quat_best' to an axis view if we can */
ED_view3d_quat_to_axis_view(quat_best, 0.01f, &rv3d->view, &rv3d->view_axis_roll);
if (rv3d->view != RV3D_VIEW_USER) {
ED_view3d_quat_from_axis_view(rv3d->view, rv3d->view_axis_roll, quat_best);
}
ED_view3d_quat_to_axis_view_and_reset_quat(
quat_best, 0.01f, &rv3d->view, &rv3d->view_axis_roll);
}
else {
copy_qt_qt(quat_best, viewquat_align);

View File

@ -1317,22 +1317,59 @@ bool ED_view3d_quat_to_axis_view(const float quat[4],
*r_view = RV3D_VIEW_USER;
*r_view_axis_roll = RV3D_VIEW_AXIS_ROLL_0;
/* quat values are all unit length */
for (int view = RV3D_VIEW_FRONT; view <= RV3D_VIEW_BOTTOM; view++) {
for (int view_axis_roll = RV3D_VIEW_AXIS_ROLL_0; view_axis_roll <= RV3D_VIEW_AXIS_ROLL_270;
view_axis_roll++) {
if (fabsf(angle_signed_qtqt(
quat, view3d_quat_axis[view - RV3D_VIEW_FRONT][view_axis_roll])) < epsilon) {
*r_view = view;
*r_view_axis_roll = view_axis_roll;
return true;
/* Quaternion values are all unit length. */
if (epsilon < M_PI_4) {
/* Under 45 degrees, just pick the closest value. */
for (int view = RV3D_VIEW_FRONT; view <= RV3D_VIEW_BOTTOM; view++) {
for (int view_axis_roll = RV3D_VIEW_AXIS_ROLL_0; view_axis_roll <= RV3D_VIEW_AXIS_ROLL_270;
view_axis_roll++) {
if (fabsf(angle_signed_qtqt(
quat, view3d_quat_axis[view - RV3D_VIEW_FRONT][view_axis_roll])) < epsilon) {
*r_view = view;
*r_view_axis_roll = view_axis_roll;
return true;
}
}
}
}
else {
/* Epsilon over 45 degrees, check all & find use the closest. */
float delta_best = FLT_MAX;
for (int view = RV3D_VIEW_FRONT; view <= RV3D_VIEW_BOTTOM; view++) {
for (int view_axis_roll = RV3D_VIEW_AXIS_ROLL_0; view_axis_roll <= RV3D_VIEW_AXIS_ROLL_270;
view_axis_roll++) {
const float delta_test = fabsf(
angle_signed_qtqt(quat, view3d_quat_axis[view - RV3D_VIEW_FRONT][view_axis_roll]));
if (delta_best > delta_test) {
delta_best = delta_test;
*r_view = view;
*r_view_axis_roll = view_axis_roll;
}
}
}
if (*r_view != RV3D_VIEW_USER) {
return true;
}
}
return false;
}
bool ED_view3d_quat_to_axis_view_and_reset_quat(float quat[4],
const float epsilon,
char *r_view,
char *r_view_axis_roll)
{
const bool is_axis_view = ED_view3d_quat_to_axis_view(quat, epsilon, r_view, r_view_axis_roll);
if (is_axis_view) {
/* Reset `quat` to it's view axis, so axis-aligned views are always *exactly* aligned. */
BLI_assert(*r_view != RV3D_VIEW_USER);
ED_view3d_quat_from_axis_view(*r_view, *r_view_axis_roll, quat);
}
return is_axis_view;
}
char ED_view3d_lock_view_from_index(int index)
{
switch (index) {

View File

@ -1172,12 +1172,20 @@ int transformEvent(TransInfo *t, const wmEvent *event)
MOD_CONSTRAINT_SELECT_PLANE;
if (t->con.mode & CON_APPLY) {
stopConstraint(t);
}
initSelectConstraint(t);
initSelectConstraint(t);
/* Use #TREDRAW_SOFT so that #selectConstraint is only called on the next event.
* This allows us to "deselect" the constraint. */
t->redraw = TREDRAW_SOFT;
/* In this case we might just want to remove the constraint,
* so set #TREDRAW_SOFT to only select the constraint on the next mouse move event.
* This way we can kind of "cancel" due to confirmation without constraint. */
t->redraw = TREDRAW_SOFT;
}
else {
initSelectConstraint(t);
/* When first called, set #TREDRAW_HARD to select constraint immediately in
* #selectConstraint. */
BLI_assert(t->redraw == TREDRAW_HARD);
}
}
}
handled = true;

View File

@ -91,8 +91,8 @@ static void SeqTransInfo(TransInfo *t, Sequence *seq, int *r_count, int *r_flag)
/* *** Extend Transform *** */
int cfra = CFRA;
int left = SEQ_transform_get_left_handle_frame(seq);
int right = SEQ_transform_get_right_handle_frame(seq);
int left = SEQ_time_left_handle_frame_get(seq);
int right = SEQ_time_right_handle_frame_get(seq);
if (((seq->flag & SELECT) == 0 || SEQ_transform_is_locked(channels, seq))) {
*r_count = 0;
@ -173,16 +173,16 @@ static TransData *SeqToTransData(
/* Use seq_tx_get_final_left() and an offset here
* so transform has the left hand location of the strip.
* tdsq->start_offset is used when flushing the tx data back */
start_left = SEQ_transform_get_left_handle_frame(seq);
start_left = SEQ_time_left_handle_frame_get(seq);
td2d->loc[0] = start_left;
tdsq->start_offset = start_left - seq->start; /* use to apply the original location */
break;
case SEQ_LEFTSEL:
start_left = SEQ_transform_get_left_handle_frame(seq);
start_left = SEQ_time_left_handle_frame_get(seq);
td2d->loc[0] = start_left;
break;
case SEQ_RIGHTSEL:
td2d->loc[0] = SEQ_transform_get_right_handle_frame(seq);
td2d->loc[0] = SEQ_time_right_handle_frame_get(seq);
break;
}
@ -489,11 +489,11 @@ static void seq_transform_handle_overwrite_trim(Scene *scene,
continue;
}
if (overlap == STRIP_OVERLAP_LEFT_SIDE) {
SEQ_transform_set_left_handle_frame(seq, transformed->enddisp);
SEQ_time_left_handle_frame_set(seq, transformed->enddisp);
}
else {
BLI_assert(overlap == STRIP_OVERLAP_RIGHT_SIDE);
SEQ_transform_set_right_handle_frame(seq, transformed->startdisp);
SEQ_time_right_handle_frame_set(seq, transformed->startdisp);
}
SEQ_time_update_sequence(scene, seqbasep, seq);
@ -915,7 +915,7 @@ static void flushTransSeq(TransInfo *t)
}
case SEQ_LEFTSEL: { /* No vertical transform. */
int old_startdisp = seq->startdisp;
SEQ_transform_set_left_handle_frame(seq, new_frame);
SEQ_time_left_handle_frame_set(seq, new_frame);
SEQ_transform_handle_xlimits(seq, tdsq->flag & SEQ_LEFTSEL, tdsq->flag & SEQ_RIGHTSEL);
SEQ_transform_fix_single_image_seq_offsets(seq);
SEQ_time_update_sequence(t->scene, seqbasep, seq);
@ -926,7 +926,7 @@ static void flushTransSeq(TransInfo *t)
}
case SEQ_RIGHTSEL: { /* No vertical transform. */
int old_enddisp = seq->enddisp;
SEQ_transform_set_right_handle_frame(seq, new_frame);
SEQ_time_right_handle_frame_set(seq, new_frame);
SEQ_transform_handle_xlimits(seq, tdsq->flag & SEQ_LEFTSEL, tdsq->flag & SEQ_RIGHTSEL);
SEQ_transform_fix_single_image_seq_offsets(seq);
SEQ_time_update_sequence(t->scene, seqbasep, seq);

View File

@ -82,7 +82,7 @@ struct WeldPoly {
int loop_start;
int loop_end;
/* Final Polygon Size. */
int len;
int loop_len;
/* Group of loops that will be affected. */
struct WeldGroup loops;
};
@ -104,9 +104,6 @@ struct WeldMesh {
/* References all polygons and loops that will be affected. */
Vector<WeldLoop> wloop;
Vector<WeldPoly> wpoly;
MutableSpan<WeldPoly> wpoly_new;
int wloop_len;
int wpoly_len;
int wpoly_new_len;
/* From the actual index of the element in the mesh, it indicates what is the index of the Weld
@ -147,14 +144,14 @@ struct WeldLoopOfPolyIter {
* \{ */
#ifdef USE_WELD_DEBUG
static bool weld_iter_loop_of_poly_begin(WeldLoopOfPolyIter *iter,
static bool weld_iter_loop_of_poly_begin(WeldLoopOfPolyIter &iter,
const WeldPoly &wp,
Span<WeldLoop> wloop,
Span<MLoop> mloop,
Span<int> loop_map,
int *group_buffer);
static bool weld_iter_loop_of_poly_next(WeldLoopOfPolyIter *iter);
static bool weld_iter_loop_of_poly_next(WeldLoopOfPolyIter &iter);
static void weld_assert_edge_kill_len(Span<WeldEdge> wedge, const int supposed_kill_len)
{
@ -170,12 +167,8 @@ static void weld_assert_edge_kill_len(Span<WeldEdge> wedge, const int supposed_k
BLI_assert(kills == supposed_kill_len);
}
static void weld_assert_poly_and_loop_kill_len(Span<WeldPoly> wpoly,
Span<WeldPoly> wpoly_new,
Span<WeldLoop> wloop,
static void weld_assert_poly_and_loop_kill_len(WeldMesh *weld_mesh,
Span<MLoop> mloop,
Span<int> loop_map,
Span<int> poly_map,
Span<MPoly> mpoly,
const int supposed_poly_kill_len,
const int supposed_loop_kill_len)
@ -184,11 +177,12 @@ static void weld_assert_poly_and_loop_kill_len(Span<WeldPoly> wpoly,
int loop_kills = mloop.size();
const MPoly *mp = &mpoly[0];
for (int i = 0; i < mpoly.size(); i++, mp++) {
int poly_ctx = poly_map[i];
int poly_ctx = weld_mesh->poly_map[i];
if (poly_ctx != OUT_OF_CONTEXT) {
const WeldPoly *wp = &wpoly[poly_ctx];
const WeldPoly *wp = &weld_mesh->wpoly[poly_ctx];
WeldLoopOfPolyIter iter;
if (!weld_iter_loop_of_poly_begin(&iter, *wp, wloop, mloop, loop_map, nullptr)) {
if (!weld_iter_loop_of_poly_begin(
iter, *wp, weld_mesh->wloop, mloop, weld_mesh->loop_map, nullptr)) {
poly_kills++;
continue;
}
@ -197,13 +191,13 @@ static void weld_assert_poly_and_loop_kill_len(Span<WeldPoly> wpoly,
poly_kills++;
continue;
}
int remain = wp->len;
int remain = wp->loop_len;
int l = wp->loop_start;
while (remain) {
int l_next = l + 1;
int loop_ctx = loop_map[l];
int loop_ctx = weld_mesh->loop_map[l];
if (loop_ctx != OUT_OF_CONTEXT) {
const WeldLoop *wl = &wloop[loop_ctx];
const WeldLoop *wl = &weld_mesh->wloop[loop_ctx];
if (wl->loop_skip_to != OUT_OF_CONTEXT) {
l_next = wl->loop_skip_to;
}
@ -225,19 +219,19 @@ static void weld_assert_poly_and_loop_kill_len(Span<WeldPoly> wpoly,
}
}
const WeldPoly *wp = wpoly_new.data();
for (int i = wpoly_new.size(); i--; wp++) {
if (wp->poly_dst != OUT_OF_CONTEXT) {
for (const int i : weld_mesh->wpoly.index_range().take_back(weld_mesh->wpoly_new_len)) {
const WeldPoly &wp = weld_mesh->wpoly[i];
if (wp.poly_dst != OUT_OF_CONTEXT) {
poly_kills++;
continue;
}
int remain = wp->len;
int l = wp->loop_start;
int remain = wp.loop_len;
int l = wp.loop_start;
while (remain) {
int l_next = l + 1;
int loop_ctx = loop_map[l];
int loop_ctx = weld_mesh->loop_map[l];
if (loop_ctx != OUT_OF_CONTEXT) {
const WeldLoop *wl = &wloop[loop_ctx];
const WeldLoop *wl = &weld_mesh->wloop[loop_ctx];
if (wl->loop_skip_to != OUT_OF_CONTEXT) {
l_next = wl->loop_skip_to;
}
@ -263,10 +257,10 @@ static void weld_assert_poly_no_vert_repetition(const WeldPoly &wp,
Span<MLoop> mloop,
Span<int> loop_map)
{
const int len = wp.len;
Array<int, 64> verts(len);
const int loop_len = wp.loop_len;
Array<int, 64> verts(loop_len);
WeldLoopOfPolyIter iter;
if (!weld_iter_loop_of_poly_begin(&iter, wp, wloop, mloop, loop_map, nullptr)) {
if (!weld_iter_loop_of_poly_begin(iter, wp, wloop, mloop, loop_map, nullptr)) {
return;
}
else {
@ -275,9 +269,9 @@ static void weld_assert_poly_no_vert_repetition(const WeldPoly &wp,
verts[i++] = iter.v;
}
}
for (int i = 0; i < len; i++) {
for (int i = 0; i < loop_len; i++) {
int va = verts[i];
for (int j = i + 1; j < len; j++) {
for (int j = i + 1; j < loop_len; j++) {
int vb = verts[j];
BLI_assert(va != vb);
}
@ -290,7 +284,7 @@ static void weld_assert_poly_len(const WeldPoly *wp, const Span<WeldLoop> wloop)
return;
}
int len = wp->len;
int loop_len = wp->loop_len;
const WeldLoop *wl = &wloop[wp->loops.ofs];
BLI_assert(wp->loop_start <= wl->loop_orig);
@ -304,10 +298,10 @@ static void weld_assert_poly_len(const WeldPoly *wp, const Span<WeldLoop> wloop)
min_len++;
}
}
BLI_assert(len >= min_len);
BLI_assert(loop_len >= min_len);
int max_len = wp->loop_end - wp->loop_start + 1;
BLI_assert(len <= max_len);
BLI_assert(loop_len <= max_len);
}
#endif /* USE_WELD_DEBUG */
@ -786,7 +780,7 @@ static void weld_poly_loop_ctx_alloc(Span<MPoly> mpoly,
wp.loops.ofs = prev_wloop_len;
wp.loop_start = loopstart;
wp.loop_end = loopstart + totloop - 1;
wp.len = totloop;
wp.loop_len = totloop;
wpoly.append(wp);
poly_map[i] = wpoly_len++;
@ -802,15 +796,10 @@ static void weld_poly_loop_ctx_alloc(Span<MPoly> mpoly,
}
}
if (mpoly.size() < (wpoly_len + maybe_new_poly)) {
wpoly.resize(wpoly_len + maybe_new_poly);
}
wpoly.reserve(wpoly.size() + maybe_new_poly);
r_weld_mesh->wloop = std::move(wloop);
r_weld_mesh->wpoly = std::move(wpoly);
r_weld_mesh->wpoly_new = r_weld_mesh->wpoly.as_mutable_span().drop_front(wpoly_len);
r_weld_mesh->wloop_len = wloop_len;
r_weld_mesh->wpoly_len = wpoly_len;
r_weld_mesh->wpoly_new_len = 0;
r_weld_mesh->loop_map = std::move(loop_map);
r_weld_mesh->poly_map = std::move(poly_map);
@ -827,8 +816,8 @@ static void weld_poly_split_recursive(Span<int> vert_dest_map,
int *r_poly_kill,
int *r_loop_kill)
{
int poly_len = r_wp->len;
if (poly_len < 3 || ctx_verts_len < 1) {
int poly_loop_len = r_wp->loop_len;
if (poly_loop_len < 3 || ctx_verts_len < 1) {
return;
}
@ -870,7 +859,7 @@ static void weld_poly_split_recursive(Span<int> vert_dest_map,
}
if (vert_a == vert_b) {
const int dist_a = wlb->loop_orig - wla->loop_orig - killed_ab;
const int dist_b = poly_len - dist_a;
const int dist_b = poly_loop_len - dist_a;
BLI_assert(dist_a != 0 && dist_b != 0);
if (dist_a == 1 || dist_b == 1) {
@ -886,7 +875,7 @@ static void weld_poly_split_recursive(Span<int> vert_dest_map,
wla->flag = ELEM_COLLAPSED;
wl_tmp->flag = ELEM_COLLAPSED;
loop_kill += 2;
poly_len -= 2;
poly_loop_len -= 2;
}
if (dist_b == 2) {
if (wl_tmp != nullptr) {
@ -901,20 +890,22 @@ static void weld_poly_split_recursive(Span<int> vert_dest_map,
wl_tmp->flag = ELEM_COLLAPSED;
}
loop_kill += 2;
poly_len -= 2;
poly_loop_len -= 2;
}
if (wl_tmp == nullptr) {
const int new_loops_len = lb - la;
const int new_loops_ofs = ctx_loops_ofs + la;
WeldPoly *new_wp = &r_weld_mesh->wpoly_new[r_weld_mesh->wpoly_new_len++];
r_weld_mesh->wpoly.increase_size_by_unchecked(1);
WeldPoly *new_wp = &r_weld_mesh->wpoly.last();
new_wp->poly_dst = OUT_OF_CONTEXT;
new_wp->poly_orig = r_wp->poly_orig;
new_wp->loops.len = new_loops_len;
new_wp->loops.ofs = new_loops_ofs;
new_wp->loop_start = wla->loop_orig;
new_wp->loop_end = wlb_prev->loop_orig;
new_wp->len = dist_a;
new_wp->loop_len = dist_a;
r_weld_mesh->wpoly_new_len++;
weld_poly_split_recursive(vert_dest_map,
#ifdef USE_WELD_DEBUG
mloop,
@ -924,8 +915,8 @@ static void weld_poly_split_recursive(Span<int> vert_dest_map,
r_weld_mesh,
r_poly_kill,
r_loop_kill);
BLI_assert(dist_b == poly_len - dist_a);
poly_len = dist_b;
BLI_assert(dist_b == poly_loop_len - dist_a);
poly_loop_len = dist_b;
if (wla_prev->loop_orig > wla->loop_orig) {
/* New start. */
r_wp->loop_start = wlb->loop_orig;
@ -950,7 +941,7 @@ static void weld_poly_split_recursive(Span<int> vert_dest_map,
wla_prev = wla;
}
}
r_wp->len = poly_len;
r_wp->loop_len = poly_loop_len;
*r_loop_kill += loop_kill;
#ifdef USE_WELD_DEBUG
@ -968,10 +959,8 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
MutableSpan<WeldGroup> r_vlinks,
WeldMesh *r_weld_mesh)
{
MutableSpan<WeldPoly> wpoly = r_weld_mesh->wpoly;
WeldPoly *wpoly = r_weld_mesh->wpoly.data();
MutableSpan<WeldLoop> wloop = r_weld_mesh->wloop;
int wpoly_len = r_weld_mesh->wpoly_len;
int wpoly_new_len = 0;
int poly_kill_len = 0;
int loop_kill_len = 0;
@ -979,28 +968,31 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
if (remain_edge_ctx_len) {
/* Setup Poly/Loop. Note that `wpoly_len` may be different than `wpoly.size()` here. */
for (const int i : IndexRange(wpoly_len)) {
/* Setup Poly/Loop. */
/* `wpoly.size()` may change during the loop,
* so make it clear that we are only working with the original wpolys. */
IndexRange wpoly_original_range = r_weld_mesh->wpoly.index_range();
for (const int i : wpoly_original_range) {
WeldPoly &wp = wpoly[i];
const int ctx_loops_len = wp.loops.len;
const int ctx_loops_ofs = wp.loops.ofs;
int poly_len = wp.len;
int poly_loop_len = wp.loop_len;
int ctx_verts_len = 0;
WeldLoop *wl = &wloop[ctx_loops_ofs];
for (int l = ctx_loops_len; l--; wl++) {
const int edge_dest = wl->edge;
if (edge_dest == ELEM_COLLAPSED) {
wl->flag = ELEM_COLLAPSED;
if (poly_len == 3) {
if (poly_loop_len == 3) {
wp.flag = ELEM_COLLAPSED;
poly_kill_len++;
loop_kill_len += 3;
poly_len = 0;
poly_loop_len = 0;
break;
}
loop_kill_len++;
poly_len--;
poly_loop_len--;
}
else {
const int vert_dst = wl->vert;
@ -1010,10 +1002,10 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
}
}
if (poly_len) {
wp.len = poly_len;
if (poly_loop_len) {
wp.loop_len = poly_loop_len;
#ifdef USE_WELD_DEBUG
weld_assert_poly_len(wp, wloop);
weld_assert_poly_len(&wp, wloop);
#endif
weld_poly_split_recursive(vert_dest_map,
@ -1025,32 +1017,19 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
r_weld_mesh,
&poly_kill_len,
&loop_kill_len);
wpoly_new_len = r_weld_mesh->wpoly_new_len;
}
}
#ifdef USE_WELD_DEBUG
weld_assert_poly_and_loop_kill_len(wpoly,
r_weld_mesh->wpoly_new,
wloop,
mloop,
loop_map,
r_weld_mesh->poly_map,
mpoly,
poly_kill_len,
loop_kill_len);
weld_assert_poly_and_loop_kill_len(r_weld_mesh, mloop, mpoly, poly_kill_len, loop_kill_len);
#endif
/* Setup Polygon Overlap. */
const int wpoly_and_new_len = wpoly_len + wpoly_new_len;
r_vlinks.fill({0, 0});
MutableSpan<WeldGroup> v_links = r_vlinks;
for (const int i : IndexRange(wpoly_and_new_len)) {
const WeldPoly &wp = wpoly[i];
for (const WeldPoly &wp : r_weld_mesh->wpoly) {
WeldLoopOfPolyIter iter;
if (weld_iter_loop_of_poly_begin(iter, wp, wloop, mloop, loop_map, nullptr)) {
while (weld_iter_loop_of_poly_next(iter)) {
@ -1068,7 +1047,7 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
if (link_len) {
Array<int> link_poly_buffer(link_len);
for (const int i : IndexRange(wpoly_and_new_len)) {
for (const int i : IndexRange(r_weld_mesh->wpoly.size())) {
const WeldPoly &wp = wpoly[i];
WeldLoopOfPolyIter iter;
if (weld_iter_loop_of_poly_begin(iter, wp, wloop, mloop, loop_map, nullptr)) {
@ -1086,7 +1065,7 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
int polys_len_a, polys_len_b, *polys_ctx_a, *polys_ctx_b, p_ctx_a, p_ctx_b;
polys_len_b = p_ctx_b = 0; /* silence warnings */
for (const int i : IndexRange(wpoly_and_new_len)) {
for (const int i : IndexRange(r_weld_mesh->wpoly.size())) {
const WeldPoly &wp = wpoly[i];
if (wp.poly_dst != OUT_OF_CONTEXT) {
/* No need to retest poly.
@ -1103,7 +1082,7 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
BLI_assert(link_poly_buffer[link_a->ofs] == i);
continue;
}
int wp_len = wp.len;
int wp_loop_len = wp.loop_len;
polys_ctx_a = &link_poly_buffer[link_a->ofs];
for (; polys_len_a--; polys_ctx_a++) {
p_ctx_a = *polys_ctx_a;
@ -1112,7 +1091,7 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
}
WeldPoly *wp_tmp = &wpoly[p_ctx_a];
if (wp_tmp->len != wp_len) {
if (wp_tmp->loop_len != wp_loop_len) {
continue;
}
@ -1151,31 +1130,23 @@ static void weld_poly_loop_ctx_setup(Span<MLoop> mloop,
BLI_assert(wp_tmp->poly_dst == OUT_OF_CONTEXT);
BLI_assert(wp_tmp != &wp);
wp_tmp->poly_dst = wp.poly_orig;
loop_kill_len += wp_tmp->len;
loop_kill_len += wp_tmp->loop_len;
poly_kill_len++;
}
}
}
}
else {
poly_kill_len = r_weld_mesh->wpoly_len;
loop_kill_len = r_weld_mesh->wloop_len;
poly_kill_len = r_weld_mesh->wpoly.size();
loop_kill_len = r_weld_mesh->wloop.size();
for (WeldPoly &wp : wpoly) {
for (WeldPoly &wp : r_weld_mesh->wpoly) {
wp.flag = ELEM_COLLAPSED;
}
}
#ifdef USE_WELD_DEBUG
weld_assert_poly_and_loop_kill_len(wpoly,
r_weld_mesh->wpoly_new,
wloop,
mloop,
loop_map,
r_weld_mesh->poly_map,
mpoly,
poly_kill_len,
loop_kill_len);
weld_assert_poly_and_loop_kill_len(r_weld_mesh, mloop, mpoly, poly_kill_len, loop_kill_len);
#endif
r_weld_mesh->poly_kill_len = poly_kill_len;
@ -1545,8 +1516,9 @@ static Mesh *create_merged_mesh(const Mesh &mesh,
r_i++;
}
for (const int i : IndexRange(weld_mesh.wpoly_new_len)) {
const WeldPoly &wp = weld_mesh.wpoly_new[i];
/* New Polygons. */
for (const int i : weld_mesh.wpoly.index_range().take_back(weld_mesh.wpoly_new_len)) {
const WeldPoly &wp = weld_mesh.wpoly[i];
const int loop_start = loop_cur;
WeldLoopOfPolyIter iter;
if (!weld_iter_loop_of_poly_begin(

View File

@ -362,8 +362,10 @@ typedef struct LineartRenderTaskInfo {
int thread_id;
/* #pending_edges here only stores a refernce to a portion in LineartRenderbuffer::pending_edges,
* assigned by the occlusion scheduler. */
/**
* #pending_edges here only stores a reference to a portion in
* LineartRenderbuffer::pending_edges, assigned by the occlusion scheduler.
*/
struct LineartPendingEdges pending_edges;
} LineartRenderTaskInfo;

View File

@ -446,6 +446,7 @@ list(APPEND INC ${CMAKE_CURRENT_BINARY_DIR})
set(SRC_SHADER_CREATE_INFOS
../draw/engines/basic/shaders/infos/basic_depth_info.hh
../draw/engines/eevee_next/shaders/infos/eevee_material_info.hh
../draw/engines/eevee_next/shaders/infos/eevee_velocity_info.hh
../draw/engines/gpencil/shaders/infos/gpencil_info.hh
../draw/engines/gpencil/shaders/infos/gpencil_vfx_info.hh
../draw/engines/overlay/shaders/infos/antialiasing_info.hh

View File

@ -15,4 +15,4 @@ typedef enum eGPUFrontFace {
#ifdef __cplusplus
}
#endif
#endif

View File

@ -47,6 +47,18 @@ void GPU_storagebuf_clear(GPUStorageBuf *ssbo,
void *data);
void GPU_storagebuf_clear_to_zero(GPUStorageBuf *ssbo);
/**
* \brief Copy a part of a vertex buffer to a storage buffer.
*
* \param ssbo: destination storage buffer
* \param src: source vertex buffer
* \param dst_offset: where to start copying to (in bytes).
* \param src_offset: where to start copying from (in bytes).
* \param copy_size: byte size of the segment to copy.
*/
void GPU_storagebuf_copy_sub_from_vertbuf(
GPUStorageBuf *ssbo, GPUVertBuf *src, uint dst_offset, uint src_offset, uint copy_size);
#ifdef __cplusplus
}
#endif

View File

@ -670,14 +670,20 @@ Vector<const char *> gpu_shader_dependency_get_resolved_source(
const StringRefNull shader_source_name)
{
Vector<const char *> result;
GPUSource *source = g_sources->lookup(shader_source_name);
source->build(result);
GPUSource *src = g_sources->lookup_default(shader_source_name, nullptr);
if (src == nullptr) {
std::cout << "Error source not found : " << shader_source_name << std::endl;
}
src->build(result);
return result;
}
StringRefNull gpu_shader_dependency_get_source(const StringRefNull shader_source_name)
{
GPUSource *src = g_sources->lookup(shader_source_name);
GPUSource *src = g_sources->lookup_default(shader_source_name, nullptr);
if (src == nullptr) {
std::cout << "Error source not found : " << shader_source_name << std::endl;
}
return src->source;
}

View File

@ -19,6 +19,7 @@
#include "GPU_storage_buffer.h"
#include "gpu_storage_buffer_private.hh"
#include "gpu_vertex_buffer_private.hh"
/* -------------------------------------------------------------------- */
/** \name Creation & Deletion
@ -103,4 +104,10 @@ void GPU_storagebuf_clear_to_zero(GPUStorageBuf *ssbo)
GPU_storagebuf_clear(ssbo, GPU_R32UI, GPU_DATA_UINT, &data);
}
void GPU_storagebuf_copy_sub_from_vertbuf(
GPUStorageBuf *ssbo, GPUVertBuf *src, uint dst_offset, uint src_offset, uint copy_size)
{
unwrap(ssbo)->copy_sub(unwrap(src), dst_offset, src_offset, copy_size);
}
/** \} */

View File

@ -43,6 +43,7 @@ class StorageBuf {
virtual void clear(eGPUTextureFormat internal_format,
eGPUDataFormat data_format,
void *data) = 0;
virtual void copy_sub(VertBuf *src, uint dst_offset, uint src_offset, uint copy_size) = 0;
};
/* Syntactic sugar. */

View File

@ -7,4 +7,4 @@ in vec2 texCoord_interp;
void main()
{
gl_FragDepth = textureLod(source_data, texCoord_interp, mip).r;
}
}

View File

@ -10,4 +10,4 @@ void main()
uint stencil = (val >> 24) & 0xFFu;
uint depth = (val)&0xFFFFFFu;
gl_FragDepth = float(depth) / float(0xFFFFFFu);
}
}

View File

@ -9,4 +9,4 @@ void main()
uint val = textureLod(source_data, texCoord_interp, mip).r;
uint depth = (val) & (0xFFFFFFFFu);
gl_FragDepth = float(depth) / float(0xFFFFFFFFu);
}
}

View File

@ -30,4 +30,4 @@ void main()
texCoord_interp = tex.zy;
}
gl_Position = vec4(rect.xy, 0.0f, 1.0f);
}
}

Some files were not shown because too many files have changed in this diff Show More