Fix : Added prerequisite checks for using VK_Layer_Validation #105922

Merged
Jeroen Bakker merged 16 commits from :vk_debug_break_down into main 2023-03-28 10:45:56 +02:00
170 changed files with 3375 additions and 1729 deletions
Showing only changes of commit 0427fb7227 - Show all commits

View File

@ -617,10 +617,12 @@ endif()
option(WITH_OPENGL "When off limits visibility of the opengl headers to just bf_gpu and gawain (temporary option for development purposes)" ON)
option(WITH_GPU_BUILDTIME_SHADER_BUILDER "Shader builder is a developer option enabling linting on GLSL during compilation" OFF)
option(WITH_RENDERDOC "Use Renderdoc API to capture frames" OFF)
mark_as_advanced(
WITH_OPENGL
WITH_GPU_BUILDTIME_SHADER_BUILDER
WITH_RENDERDOC
)
# Vulkan

View File

@ -78,12 +78,7 @@ include(cmake/tbb.cmake)
include(cmake/python.cmake)
include(cmake/llvm.cmake)
include(cmake/osl.cmake)
option(USE_PIP_NUMPY "Install NumPy using pip wheel instead of building from source" OFF)
if(APPLE AND ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64"))
set(USE_PIP_NUMPY ON)
else()
include(cmake/numpy.cmake)
endif()
include(cmake/numpy.cmake)
include(cmake/python_site_packages.cmake)
include(cmake/package_python.cmake)
include(cmake/openimageio.cmake)

View File

@ -38,15 +38,6 @@ ExternalProject_Add(external_python_site_packages
--no-binary :all:
)
if(USE_PIP_NUMPY)
# Use only wheel (and not build from source) to stop NumPy from linking against buggy
# Accelerate framework backend on macOS. Official wheels are built with OpenBLAS.
ExternalProject_Add_Step(external_python_site_packages after_install
COMMAND ${PYTHON_BINARY} -m pip install --no-cache-dir numpy==${NUMPY_VERSION} --only-binary :all:
DEPENDEES install
)
endif()
add_dependencies(
external_python_site_packages
external_python

View File

@ -165,9 +165,9 @@ set(OPENMP_URI https://github.com/llvm/llvm-project/releases/download/llvmorg-${
set(OPENMP_HASH_TYPE MD5)
set(OPENMP_FILE openmp-${OPENMP_VERSION}.src.tar.xz)
set(OPENIMAGEIO_VERSION v2.4.6.0)
set(OPENIMAGEIO_VERSION v2.4.9.0)
set(OPENIMAGEIO_URI https://github.com/OpenImageIO/oiio/archive/refs/tags/${OPENIMAGEIO_VERSION}.tar.gz)
set(OPENIMAGEIO_HASH c7acc1b9a8fda04ef48f7de1feda4dae)
set(OPENIMAGEIO_HASH 7da92a7d6029921a8599a977ff1efa2a)
set(OPENIMAGEIO_HASH_TYPE MD5)
set(OPENIMAGEIO_FILE OpenImageIO-${OPENIMAGEIO_VERSION}.tar.gz)

View File

@ -517,7 +517,7 @@ OPENEXR_FORCE_REBUILD=false
OPENEXR_SKIP=false
_with_built_openexr=false
OIIO_VERSION="2.4.6.0"
OIIO_VERSION="2.4.9.0"
OIIO_VERSION_SHORT="2.4"
OIIO_VERSION_MIN="2.2.0"
OIIO_VERSION_MEX="2.5.0"

View File

@ -80,6 +80,7 @@ set(_CLANG_FIND_COMPONENTS
clangAST
clangLex
clangBasic
clangSupport
)
set(_CLANG_LIBRARIES)
@ -94,7 +95,9 @@ foreach(COMPONENT ${_CLANG_FIND_COMPONENTS})
PATH_SUFFIXES
lib64 lib
)
list(APPEND _CLANG_LIBRARIES "${CLANG_${UPPERCOMPONENT}_LIBRARY}")
if(CLANG_${UPPERCOMPONENT}_LIBRARY)
list(APPEND _CLANG_LIBRARIES "${CLANG_${UPPERCOMPONENT}_LIBRARY}")
endif()
endforeach()

View File

@ -178,8 +178,8 @@ if(NOT MSVC_CLANG)
endif()
if(WITH_WINDOWS_SCCACHE AND CMAKE_VS_MSBUILD_COMMAND)
message(WARNING "Disabling sccache, sccache is not supported with msbuild")
set(WITH_WINDOWS_SCCACHE OFF)
message(WARNING "Disabling sccache, sccache is not supported with msbuild")
set(WITH_WINDOWS_SCCACHE OFF)
endif()
# Debug Symbol format

5
extern/renderdoc/README.blender vendored Normal file
View File

@ -0,0 +1,5 @@
Project: Renderdoc APP
URL: https://github.com/baldurk/renderdoc/
License: MIT
Upstream version: d47e79ae079783935b8857d6a1730440eafb0b38
Local modifications: None

723
extern/renderdoc/include/renderdoc_app.h vendored Normal file
View File

@ -0,0 +1,723 @@
/******************************************************************************
* The MIT License (MIT)
*
* Copyright (c) 2019-2023 Baldur Karlsson
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
#pragma once
//////////////////////////////////////////////////////////////////////////////////////////////////
//
// Documentation for the API is available at https://renderdoc.org/docs/in_application_api.html
//
#if !defined(RENDERDOC_NO_STDINT)
#include <stdint.h>
#endif
#if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER)
#define RENDERDOC_CC __cdecl
#elif defined(__linux__)
#define RENDERDOC_CC
#elif defined(__APPLE__)
#define RENDERDOC_CC
#else
#error "Unknown platform"
#endif
#ifdef __cplusplus
extern "C" {
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////
// Constants not used directly in below API
// This is a GUID/magic value used for when applications pass a path where shader debug
// information can be found to match up with a stripped shader.
// the define can be used like so: const GUID RENDERDOC_ShaderDebugMagicValue =
// RENDERDOC_ShaderDebugMagicValue_value
#define RENDERDOC_ShaderDebugMagicValue_struct \
{ \
0xeab25520, 0x6670, 0x4865, 0x84, 0x29, 0x6c, 0x8, 0x51, 0x54, 0x00, 0xff \
}
// as an alternative when you want a byte array (assuming x86 endianness):
#define RENDERDOC_ShaderDebugMagicValue_bytearray \
{ \
0x20, 0x55, 0xb2, 0xea, 0x70, 0x66, 0x65, 0x48, 0x84, 0x29, 0x6c, 0x8, 0x51, 0x54, 0x00, 0xff \
}
// truncated version when only a uint64_t is available (e.g. Vulkan tags):
#define RENDERDOC_ShaderDebugMagicValue_truncated 0x48656670eab25520ULL
//////////////////////////////////////////////////////////////////////////////////////////////////
// RenderDoc capture options
//
typedef enum RENDERDOC_CaptureOption {
// Allow the application to enable vsync
//
// Default - enabled
//
// 1 - The application can enable or disable vsync at will
// 0 - vsync is force disabled
eRENDERDOC_Option_AllowVSync = 0,
// Allow the application to enable fullscreen
//
// Default - enabled
//
// 1 - The application can enable or disable fullscreen at will
// 0 - fullscreen is force disabled
eRENDERDOC_Option_AllowFullscreen = 1,
// Record API debugging events and messages
//
// Default - disabled
//
// 1 - Enable built-in API debugging features and records the results into
// the capture, which is matched up with events on replay
// 0 - no API debugging is forcibly enabled
eRENDERDOC_Option_APIValidation = 2,
eRENDERDOC_Option_DebugDeviceMode = 2, // deprecated name of this enum
// Capture CPU callstacks for API events
//
// Default - disabled
//
// 1 - Enables capturing of callstacks
// 0 - no callstacks are captured
eRENDERDOC_Option_CaptureCallstacks = 3,
// When capturing CPU callstacks, only capture them from actions.
// This option does nothing without the above option being enabled
//
// Default - disabled
//
// 1 - Only captures callstacks for actions.
// Ignored if CaptureCallstacks is disabled
// 0 - Callstacks, if enabled, are captured for every event.
eRENDERDOC_Option_CaptureCallstacksOnlyDraws = 4,
eRENDERDOC_Option_CaptureCallstacksOnlyActions = 4,
// Specify a delay in seconds to wait for a debugger to attach, after
// creating or injecting into a process, before continuing to allow it to run.
//
// 0 indicates no delay, and the process will run immediately after injection
//
// Default - 0 seconds
//
eRENDERDOC_Option_DelayForDebugger = 5,
// Verify buffer access. This includes checking the memory returned by a Map() call to
// detect any out-of-bounds modification, as well as initialising buffers with undefined contents
// to a marker value to catch use of uninitialised memory.
//
// NOTE: This option is only valid for OpenGL and D3D11. Explicit APIs such as D3D12 and Vulkan do
// not do the same kind of interception & checking and undefined contents are really undefined.
//
// Default - disabled
//
// 1 - Verify buffer access
// 0 - No verification is performed, and overwriting bounds may cause crashes or corruption in
// RenderDoc.
eRENDERDOC_Option_VerifyBufferAccess = 6,
// The old name for eRENDERDOC_Option_VerifyBufferAccess was eRENDERDOC_Option_VerifyMapWrites.
// This option now controls the filling of uninitialised buffers with 0xdddddddd which was
// previously always enabled
eRENDERDOC_Option_VerifyMapWrites = eRENDERDOC_Option_VerifyBufferAccess,
// Hooks any system API calls that create child processes, and injects
// RenderDoc into them recursively with the same options.
//
// Default - disabled
//
// 1 - Hooks into spawned child processes
// 0 - Child processes are not hooked by RenderDoc
eRENDERDOC_Option_HookIntoChildren = 7,
// By default RenderDoc only includes resources in the final capture necessary
// for that frame, this allows you to override that behaviour.
//
// Default - disabled
//
// 1 - all live resources at the time of capture are included in the capture
// and available for inspection
// 0 - only the resources referenced by the captured frame are included
eRENDERDOC_Option_RefAllResources = 8,
// **NOTE**: As of RenderDoc v1.1 this option has been deprecated. Setting or
// getting it will be ignored, to allow compatibility with older versions.
// In v1.1 the option acts as if it's always enabled.
//
// By default RenderDoc skips saving initial states for resources where the
// previous contents don't appear to be used, assuming that writes before
// reads indicate previous contents aren't used.
//
// Default - disabled
//
// 1 - initial contents at the start of each captured frame are saved, even if
// they are later overwritten or cleared before being used.
// 0 - unless a read is detected, initial contents will not be saved and will
// appear as black or empty data.
eRENDERDOC_Option_SaveAllInitials = 9,
// In APIs that allow for the recording of command lists to be replayed later,
// RenderDoc may choose to not capture command lists before a frame capture is
// triggered, to reduce overheads. This means any command lists recorded once
// and replayed many times will not be available and may cause a failure to
// capture.
//
// NOTE: This is only true for APIs where multithreading is difficult or
// discouraged. Newer APIs like Vulkan and D3D12 will ignore this option
// and always capture all command lists since the API is heavily oriented
// around it and the overheads have been reduced by API design.
//
// 1 - All command lists are captured from the start of the application
// 0 - Command lists are only captured if their recording begins during
// the period when a frame capture is in progress.
eRENDERDOC_Option_CaptureAllCmdLists = 10,
// Mute API debugging output when the API validation mode option is enabled
//
// Default - enabled
//
// 1 - Mute any API debug messages from being displayed or passed through
// 0 - API debugging is displayed as normal
eRENDERDOC_Option_DebugOutputMute = 11,
// Option to allow vendor extensions to be used even when they may be
// incompatible with RenderDoc and cause corrupted replays or crashes.
//
// Default - inactive
//
// No values are documented, this option should only be used when absolutely
// necessary as directed by a RenderDoc developer.
eRENDERDOC_Option_AllowUnsupportedVendorExtensions = 12,
} RENDERDOC_CaptureOption;
// Sets an option that controls how RenderDoc behaves on capture.
//
// Returns 1 if the option and value are valid
// Returns 0 if either is invalid and the option is unchanged
typedef int(RENDERDOC_CC *pRENDERDOC_SetCaptureOptionU32)(RENDERDOC_CaptureOption opt, uint32_t val);
typedef int(RENDERDOC_CC *pRENDERDOC_SetCaptureOptionF32)(RENDERDOC_CaptureOption opt, float val);
// Gets the current value of an option as a uint32_t
//
// If the option is invalid, 0xffffffff is returned
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetCaptureOptionU32)(RENDERDOC_CaptureOption opt);
// Gets the current value of an option as a float
//
// If the option is invalid, -FLT_MAX is returned
typedef float(RENDERDOC_CC *pRENDERDOC_GetCaptureOptionF32)(RENDERDOC_CaptureOption opt);
typedef enum RENDERDOC_InputButton {
// '0' - '9' matches ASCII values
eRENDERDOC_Key_0 = 0x30,
eRENDERDOC_Key_1 = 0x31,
eRENDERDOC_Key_2 = 0x32,
eRENDERDOC_Key_3 = 0x33,
eRENDERDOC_Key_4 = 0x34,
eRENDERDOC_Key_5 = 0x35,
eRENDERDOC_Key_6 = 0x36,
eRENDERDOC_Key_7 = 0x37,
eRENDERDOC_Key_8 = 0x38,
eRENDERDOC_Key_9 = 0x39,
// 'A' - 'Z' matches ASCII values
eRENDERDOC_Key_A = 0x41,
eRENDERDOC_Key_B = 0x42,
eRENDERDOC_Key_C = 0x43,
eRENDERDOC_Key_D = 0x44,
eRENDERDOC_Key_E = 0x45,
eRENDERDOC_Key_F = 0x46,
eRENDERDOC_Key_G = 0x47,
eRENDERDOC_Key_H = 0x48,
eRENDERDOC_Key_I = 0x49,
eRENDERDOC_Key_J = 0x4A,
eRENDERDOC_Key_K = 0x4B,
eRENDERDOC_Key_L = 0x4C,
eRENDERDOC_Key_M = 0x4D,
eRENDERDOC_Key_N = 0x4E,
eRENDERDOC_Key_O = 0x4F,
eRENDERDOC_Key_P = 0x50,
eRENDERDOC_Key_Q = 0x51,
eRENDERDOC_Key_R = 0x52,
eRENDERDOC_Key_S = 0x53,
eRENDERDOC_Key_T = 0x54,
eRENDERDOC_Key_U = 0x55,
eRENDERDOC_Key_V = 0x56,
eRENDERDOC_Key_W = 0x57,
eRENDERDOC_Key_X = 0x58,
eRENDERDOC_Key_Y = 0x59,
eRENDERDOC_Key_Z = 0x5A,
// leave the rest of the ASCII range free
// in case we want to use it later
eRENDERDOC_Key_NonPrintable = 0x100,
eRENDERDOC_Key_Divide,
eRENDERDOC_Key_Multiply,
eRENDERDOC_Key_Subtract,
eRENDERDOC_Key_Plus,
eRENDERDOC_Key_F1,
eRENDERDOC_Key_F2,
eRENDERDOC_Key_F3,
eRENDERDOC_Key_F4,
eRENDERDOC_Key_F5,
eRENDERDOC_Key_F6,
eRENDERDOC_Key_F7,
eRENDERDOC_Key_F8,
eRENDERDOC_Key_F9,
eRENDERDOC_Key_F10,
eRENDERDOC_Key_F11,
eRENDERDOC_Key_F12,
eRENDERDOC_Key_Home,
eRENDERDOC_Key_End,
eRENDERDOC_Key_Insert,
eRENDERDOC_Key_Delete,
eRENDERDOC_Key_PageUp,
eRENDERDOC_Key_PageDn,
eRENDERDOC_Key_Backspace,
eRENDERDOC_Key_Tab,
eRENDERDOC_Key_PrtScrn,
eRENDERDOC_Key_Pause,
eRENDERDOC_Key_Max,
} RENDERDOC_InputButton;
// Sets which key or keys can be used to toggle focus between multiple windows
//
// If keys is NULL or num is 0, toggle keys will be disabled
typedef void(RENDERDOC_CC *pRENDERDOC_SetFocusToggleKeys)(RENDERDOC_InputButton *keys, int num);
// Sets which key or keys can be used to capture the next frame
//
// If keys is NULL or num is 0, captures keys will be disabled
typedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureKeys)(RENDERDOC_InputButton *keys, int num);
typedef enum RENDERDOC_OverlayBits {
// This single bit controls whether the overlay is enabled or disabled globally
eRENDERDOC_Overlay_Enabled = 0x1,
// Show the average framerate over several seconds as well as min/max
eRENDERDOC_Overlay_FrameRate = 0x2,
// Show the current frame number
eRENDERDOC_Overlay_FrameNumber = 0x4,
// Show a list of recent captures, and how many captures have been made
eRENDERDOC_Overlay_CaptureList = 0x8,
// Default values for the overlay mask
eRENDERDOC_Overlay_Default = (eRENDERDOC_Overlay_Enabled | eRENDERDOC_Overlay_FrameRate |
eRENDERDOC_Overlay_FrameNumber | eRENDERDOC_Overlay_CaptureList),
// Enable all bits
eRENDERDOC_Overlay_All = ~0U,
// Disable all bits
eRENDERDOC_Overlay_None = 0,
} RENDERDOC_OverlayBits;
// returns the overlay bits that have been set
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetOverlayBits)();
// sets the overlay bits with an and & or mask
typedef void(RENDERDOC_CC *pRENDERDOC_MaskOverlayBits)(uint32_t And, uint32_t Or);
// this function will attempt to remove RenderDoc's hooks in the application.
//
// Note: that this can only work correctly if done immediately after
// the module is loaded, before any API work happens. RenderDoc will remove its
// injected hooks and shut down. Behaviour is undefined if this is called
// after any API functions have been called, and there is still no guarantee of
// success.
typedef void(RENDERDOC_CC *pRENDERDOC_RemoveHooks)();
// DEPRECATED: compatibility for code compiled against pre-1.4.1 headers.
typedef pRENDERDOC_RemoveHooks pRENDERDOC_Shutdown;
// This function will unload RenderDoc's crash handler.
//
// If you use your own crash handler and don't want RenderDoc's handler to
// intercede, you can call this function to unload it and any unhandled
// exceptions will pass to the next handler.
typedef void(RENDERDOC_CC *pRENDERDOC_UnloadCrashHandler)();
// Sets the capture file path template
//
// pathtemplate is a UTF-8 string that gives a template for how captures will be named
// and where they will be saved.
//
// Any extension is stripped off the path, and captures are saved in the directory
// specified, and named with the filename and the frame number appended. If the
// directory does not exist it will be created, including any parent directories.
//
// If pathtemplate is NULL, the template will remain unchanged
//
// Example:
//
// SetCaptureFilePathTemplate("my_captures/example");
//
// Capture #1 -> my_captures/example_frame123.rdc
// Capture #2 -> my_captures/example_frame456.rdc
typedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureFilePathTemplate)(const char *pathtemplate);
// returns the current capture path template, see SetCaptureFileTemplate above, as a UTF-8 string
typedef const char *(RENDERDOC_CC *pRENDERDOC_GetCaptureFilePathTemplate)();
// DEPRECATED: compatibility for code compiled against pre-1.1.2 headers.
typedef pRENDERDOC_SetCaptureFilePathTemplate pRENDERDOC_SetLogFilePathTemplate;
typedef pRENDERDOC_GetCaptureFilePathTemplate pRENDERDOC_GetLogFilePathTemplate;
// returns the number of captures that have been made
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetNumCaptures)();
// This function returns the details of a capture, by index. New captures are added
// to the end of the list.
//
// filename will be filled with the absolute path to the capture file, as a UTF-8 string
// pathlength will be written with the length in bytes of the filename string
// timestamp will be written with the time of the capture, in seconds since the Unix epoch
//
// Any of the parameters can be NULL and they'll be skipped.
//
// The function will return 1 if the capture index is valid, or 0 if the index is invalid
// If the index is invalid, the values will be unchanged
//
// Note: when captures are deleted in the UI they will remain in this list, so the
// capture path may not exist anymore.
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_GetCapture)(uint32_t idx, char *filename,
uint32_t *pathlength, uint64_t *timestamp);
// Sets the comments associated with a capture file. These comments are displayed in the
// UI program when opening.
//
// filePath should be a path to the capture file to add comments to. If set to NULL or ""
// the most recent capture file created made will be used instead.
// comments should be a NULL-terminated UTF-8 string to add as comments.
//
// Any existing comments will be overwritten.
typedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureFileComments)(const char *filePath,
const char *comments);
// returns 1 if the RenderDoc UI is connected to this application, 0 otherwise
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_IsTargetControlConnected)();
// DEPRECATED: compatibility for code compiled against pre-1.1.1 headers.
// This was renamed to IsTargetControlConnected in API 1.1.1, the old typedef is kept here for
// backwards compatibility with old code, it is castable either way since it's ABI compatible
// as the same function pointer type.
typedef pRENDERDOC_IsTargetControlConnected pRENDERDOC_IsRemoteAccessConnected;
// This function will launch the Replay UI associated with the RenderDoc library injected
// into the running application.
//
// if connectTargetControl is 1, the Replay UI will be launched with a command line parameter
// to connect to this application
// cmdline is the rest of the command line, as a UTF-8 string. E.g. a captures to open
// if cmdline is NULL, the command line will be empty.
//
// returns the PID of the replay UI if successful, 0 if not successful.
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_LaunchReplayUI)(uint32_t connectTargetControl,
const char *cmdline);
// RenderDoc can return a higher version than requested if it's backwards compatible,
// this function returns the actual version returned. If a parameter is NULL, it will be
// ignored and the others will be filled out.
typedef void(RENDERDOC_CC *pRENDERDOC_GetAPIVersion)(int *major, int *minor, int *patch);
// Requests that the replay UI show itself (if hidden or not the current top window). This can be
// used in conjunction with IsTargetControlConnected and LaunchReplayUI to intelligently handle
// showing the UI after making a capture.
//
// This will return 1 if the request was successfully passed on, though it's not guaranteed that
// the UI will be on top in all cases depending on OS rules. It will return 0 if there is no current
// target control connection to make such a request, or if there was another error
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_ShowReplayUI)();
//////////////////////////////////////////////////////////////////////////
// Capturing functions
//
// A device pointer is a pointer to the API's root handle.
//
// This would be an ID3D11Device, HGLRC/GLXContext, ID3D12Device, etc
typedef void *RENDERDOC_DevicePointer;
// A window handle is the OS's native window handle
//
// This would be an HWND, GLXDrawable, etc
typedef void *RENDERDOC_WindowHandle;
// A helper macro for Vulkan, where the device handle cannot be used directly.
//
// Passing the VkInstance to this macro will return the RENDERDOC_DevicePointer to use.
//
// Specifically, the value needed is the dispatch table pointer, which sits as the first
// pointer-sized object in the memory pointed to by the VkInstance. Thus we cast to a void** and
// indirect once.
#define RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(inst) (*((void **)(inst)))
// This sets the RenderDoc in-app overlay in the API/window pair as 'active' and it will
// respond to keypresses. Neither parameter can be NULL
typedef void(RENDERDOC_CC *pRENDERDOC_SetActiveWindow)(RENDERDOC_DevicePointer device,
RENDERDOC_WindowHandle wndHandle);
// capture the next frame on whichever window and API is currently considered active
typedef void(RENDERDOC_CC *pRENDERDOC_TriggerCapture)();
// capture the next N frames on whichever window and API is currently considered active
typedef void(RENDERDOC_CC *pRENDERDOC_TriggerMultiFrameCapture)(uint32_t numFrames);
// When choosing either a device pointer or a window handle to capture, you can pass NULL.
// Passing NULL specifies a 'wildcard' match against anything. This allows you to specify
// any API rendering to a specific window, or a specific API instance rendering to any window,
// or in the simplest case of one window and one API, you can just pass NULL for both.
//
// In either case, if there are two or more possible matching (device,window) pairs it
// is undefined which one will be captured.
//
// Note: for headless rendering you can pass NULL for the window handle and either specify
// a device pointer or leave it NULL as above.
// Immediately starts capturing API calls on the specified device pointer and window handle.
//
// If there is no matching thing to capture (e.g. no supported API has been initialised),
// this will do nothing.
//
// The results are undefined (including crashes) if two captures are started overlapping,
// even on separate devices and/oror windows.
typedef void(RENDERDOC_CC *pRENDERDOC_StartFrameCapture)(RENDERDOC_DevicePointer device,
RENDERDOC_WindowHandle wndHandle);
// Returns whether or not a frame capture is currently ongoing anywhere.
//
// This will return 1 if a capture is ongoing, and 0 if there is no capture running
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_IsFrameCapturing)();
// Ends capturing immediately.
//
// This will return 1 if the capture succeeded, and 0 if there was an error capturing.
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_EndFrameCapture)(RENDERDOC_DevicePointer device,
RENDERDOC_WindowHandle wndHandle);
// Ends capturing immediately and discard any data stored without saving to disk.
//
// This will return 1 if the capture was discarded, and 0 if there was an error or no capture
// was in progress
typedef uint32_t(RENDERDOC_CC *pRENDERDOC_DiscardFrameCapture)(RENDERDOC_DevicePointer device,
RENDERDOC_WindowHandle wndHandle);
// Only valid to be called between a call to StartFrameCapture and EndFrameCapture. Gives a custom
// title to the capture produced which will be displayed in the UI.
//
// If multiple captures are ongoing, this title will be applied to the first capture to end after
// this call. The second capture to end will have no title, unless this function is called again.
//
// Calling this function has no effect if no capture is currently running
typedef void(RENDERDOC_CC *pRENDERDOC_SetCaptureTitle)(const char *title);
//////////////////////////////////////////////////////////////////////////////////////////////////
// RenderDoc API versions
//
// RenderDoc uses semantic versioning (http://semver.org/).
//
// MAJOR version is incremented when incompatible API changes happen.
// MINOR version is incremented when functionality is added in a backwards-compatible manner.
// PATCH version is incremented when backwards-compatible bug fixes happen.
//
// Note that this means the API returned can be higher than the one you might have requested.
// e.g. if you are running against a newer RenderDoc that supports 1.0.1, it will be returned
// instead of 1.0.0. You can check this with the GetAPIVersion entry point
typedef enum RENDERDOC_Version {
eRENDERDOC_API_Version_1_0_0 = 10000, // RENDERDOC_API_1_0_0 = 1 00 00
eRENDERDOC_API_Version_1_0_1 = 10001, // RENDERDOC_API_1_0_1 = 1 00 01
eRENDERDOC_API_Version_1_0_2 = 10002, // RENDERDOC_API_1_0_2 = 1 00 02
eRENDERDOC_API_Version_1_1_0 = 10100, // RENDERDOC_API_1_1_0 = 1 01 00
eRENDERDOC_API_Version_1_1_1 = 10101, // RENDERDOC_API_1_1_1 = 1 01 01
eRENDERDOC_API_Version_1_1_2 = 10102, // RENDERDOC_API_1_1_2 = 1 01 02
eRENDERDOC_API_Version_1_2_0 = 10200, // RENDERDOC_API_1_2_0 = 1 02 00
eRENDERDOC_API_Version_1_3_0 = 10300, // RENDERDOC_API_1_3_0 = 1 03 00
eRENDERDOC_API_Version_1_4_0 = 10400, // RENDERDOC_API_1_4_0 = 1 04 00
eRENDERDOC_API_Version_1_4_1 = 10401, // RENDERDOC_API_1_4_1 = 1 04 01
eRENDERDOC_API_Version_1_4_2 = 10402, // RENDERDOC_API_1_4_2 = 1 04 02
eRENDERDOC_API_Version_1_5_0 = 10500, // RENDERDOC_API_1_5_0 = 1 05 00
eRENDERDOC_API_Version_1_6_0 = 10600, // RENDERDOC_API_1_6_0 = 1 06 00
} RENDERDOC_Version;
// API version changelog:
//
// 1.0.0 - initial release
// 1.0.1 - Bugfix: IsFrameCapturing() was returning false for captures that were triggered
// by keypress or TriggerCapture, instead of Start/EndFrameCapture.
// 1.0.2 - Refactor: Renamed eRENDERDOC_Option_DebugDeviceMode to eRENDERDOC_Option_APIValidation
// 1.1.0 - Add feature: TriggerMultiFrameCapture(). Backwards compatible with 1.0.x since the new
// function pointer is added to the end of the struct, the original layout is identical
// 1.1.1 - Refactor: Renamed remote access to target control (to better disambiguate from remote
// replay/remote server concept in replay UI)
// 1.1.2 - Refactor: Renamed "log file" in function names to just capture, to clarify that these
// are captures and not debug logging files. This is the first API version in the v1.0
// branch.
// 1.2.0 - Added feature: SetCaptureFileComments() to add comments to a capture file that will be
// displayed in the UI program on load.
// 1.3.0 - Added feature: New capture option eRENDERDOC_Option_AllowUnsupportedVendorExtensions
// which allows users to opt-in to allowing unsupported vendor extensions to function.
// Should be used at the user's own risk.
// Refactor: Renamed eRENDERDOC_Option_VerifyMapWrites to
// eRENDERDOC_Option_VerifyBufferAccess, which now also controls initialisation to
// 0xdddddddd of uninitialised buffer contents.
// 1.4.0 - Added feature: DiscardFrameCapture() to discard a frame capture in progress and stop
// capturing without saving anything to disk.
// 1.4.1 - Refactor: Renamed Shutdown to RemoveHooks to better clarify what is happening
// 1.4.2 - Refactor: Renamed 'draws' to 'actions' in callstack capture option.
// 1.5.0 - Added feature: ShowReplayUI() to request that the replay UI show itself if connected
// 1.6.0 - Added feature: SetCaptureTitle() which can be used to set a title for a
// capture made with StartFrameCapture() or EndFrameCapture()
typedef struct RENDERDOC_API_1_6_0
{
pRENDERDOC_GetAPIVersion GetAPIVersion;
pRENDERDOC_SetCaptureOptionU32 SetCaptureOptionU32;
pRENDERDOC_SetCaptureOptionF32 SetCaptureOptionF32;
pRENDERDOC_GetCaptureOptionU32 GetCaptureOptionU32;
pRENDERDOC_GetCaptureOptionF32 GetCaptureOptionF32;
pRENDERDOC_SetFocusToggleKeys SetFocusToggleKeys;
pRENDERDOC_SetCaptureKeys SetCaptureKeys;
pRENDERDOC_GetOverlayBits GetOverlayBits;
pRENDERDOC_MaskOverlayBits MaskOverlayBits;
// Shutdown was renamed to RemoveHooks in 1.4.1.
// These unions allow old code to continue compiling without changes
union
{
pRENDERDOC_Shutdown Shutdown;
pRENDERDOC_RemoveHooks RemoveHooks;
};
pRENDERDOC_UnloadCrashHandler UnloadCrashHandler;
// Get/SetLogFilePathTemplate was renamed to Get/SetCaptureFilePathTemplate in 1.1.2.
// These unions allow old code to continue compiling without changes
union
{
// deprecated name
pRENDERDOC_SetLogFilePathTemplate SetLogFilePathTemplate;
// current name
pRENDERDOC_SetCaptureFilePathTemplate SetCaptureFilePathTemplate;
};
union
{
// deprecated name
pRENDERDOC_GetLogFilePathTemplate GetLogFilePathTemplate;
// current name
pRENDERDOC_GetCaptureFilePathTemplate GetCaptureFilePathTemplate;
};
pRENDERDOC_GetNumCaptures GetNumCaptures;
pRENDERDOC_GetCapture GetCapture;
pRENDERDOC_TriggerCapture TriggerCapture;
// IsRemoteAccessConnected was renamed to IsTargetControlConnected in 1.1.1.
// This union allows old code to continue compiling without changes
union
{
// deprecated name
pRENDERDOC_IsRemoteAccessConnected IsRemoteAccessConnected;
// current name
pRENDERDOC_IsTargetControlConnected IsTargetControlConnected;
};
pRENDERDOC_LaunchReplayUI LaunchReplayUI;
pRENDERDOC_SetActiveWindow SetActiveWindow;
pRENDERDOC_StartFrameCapture StartFrameCapture;
pRENDERDOC_IsFrameCapturing IsFrameCapturing;
pRENDERDOC_EndFrameCapture EndFrameCapture;
// new function in 1.1.0
pRENDERDOC_TriggerMultiFrameCapture TriggerMultiFrameCapture;
// new function in 1.2.0
pRENDERDOC_SetCaptureFileComments SetCaptureFileComments;
// new function in 1.4.0
pRENDERDOC_DiscardFrameCapture DiscardFrameCapture;
// new function in 1.5.0
pRENDERDOC_ShowReplayUI ShowReplayUI;
// new function in 1.6.0
pRENDERDOC_SetCaptureTitle SetCaptureTitle;
} RENDERDOC_API_1_6_0;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_0;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_1;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_0_2;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_0;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_1;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_1_2;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_2_0;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_3_0;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_0;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_1;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_4_2;
typedef RENDERDOC_API_1_6_0 RENDERDOC_API_1_5_0;
//////////////////////////////////////////////////////////////////////////////////////////////////
// RenderDoc API entry point
//
// This entry point can be obtained via GetProcAddress/dlsym if RenderDoc is available.
//
// The name is the same as the typedef - "RENDERDOC_GetAPI"
//
// This function is not thread safe, and should not be called on multiple threads at once.
// Ideally, call this once as early as possible in your application's startup, before doing
// any API work, since some configuration functionality etc has to be done also before
// initialising any APIs.
//
// Parameters:
// version is a single value from the RENDERDOC_Version above.
//
// outAPIPointers will be filled out with a pointer to the corresponding struct of function
// pointers.
//
// Returns:
// 1 - if the outAPIPointers has been filled with a pointer to the API struct requested
// 0 - if the requested version is not supported or the arguments are invalid.
//
typedef int(RENDERDOC_CC *pRENDERDOC_GetAPI)(RENDERDOC_Version version, void **outAPIPointers);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -7,7 +7,7 @@ set(INC
)
set(INC_SYS
${X11_X11_INCLUDE_PATH}
)
set(SRC

View File

@ -67,6 +67,10 @@ if(UNIX AND NOT APPLE)
add_subdirectory(libc_compat)
endif()
if (WITH_RENDERDOC)
add_subdirectory(renderdoc_dynload)
endif()
if(UNIX AND NOT APPLE)
# Important this comes after "ghost" as it uses includes defined by GHOST's CMake.
if(WITH_GHOST_WAYLAND AND WITH_GHOST_WAYLAND_DYNLOAD)

View File

@ -204,7 +204,7 @@ ccl_device bool integrator_init_from_bake(KernelGlobals kg,
ray.time = 0.5f;
ray.dP = differential_zero_compact();
ray.dD = differential_zero_compact();
integrator_state_write_ray(kg, state, &ray);
integrator_state_write_ray(state, &ray);
/* Setup next kernel to execute. */
integrator_path_init(kg, state, DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND);
@ -299,7 +299,7 @@ ccl_device bool integrator_init_from_bake(KernelGlobals kg,
ray.dD = differential_zero_compact();
/* Write ray. */
integrator_state_write_ray(kg, state, &ray);
integrator_state_write_ray(state, &ray);
/* Setup and write intersection. */
Intersection isect ccl_optional_struct_init;
@ -309,7 +309,7 @@ ccl_device bool integrator_init_from_bake(KernelGlobals kg,
isect.v = v;
isect.t = 1.0f;
isect.type = PRIMITIVE_TRIANGLE;
integrator_state_write_isect(kg, state, &isect);
integrator_state_write_isect(state, &isect);
/* Setup next kernel to execute. */
const bool use_caustics = kernel_data.integrator.use_caustics &&

View File

@ -85,7 +85,7 @@ ccl_device bool integrator_init_from_camera(KernelGlobals kg,
}
/* Write camera ray to state. */
integrator_state_write_ray(kg, state, &ray);
integrator_state_write_ray(state, &ray);
}
/* Initialize path state for path integration. */

View File

@ -150,7 +150,7 @@ ccl_device_forceinline void integrator_intersect_next_kernel_after_shadow_catche
/* Continue with shading shadow catcher surface. Same as integrator_split_shadow_catcher, but
* using NEXT instead of INIT. */
Intersection isect ccl_optional_struct_init;
integrator_state_read_isect(kg, state, &isect);
integrator_state_read_isect(state, &isect);
const int shader = intersection_get_shader(kg, &isect);
const int flags = kernel_data_fetch(shaders, shader).flags;
@ -326,7 +326,7 @@ ccl_device void integrator_intersect_closest(KernelGlobals kg,
/* Read ray from integrator state into local memory. */
Ray ray ccl_optional_struct_init;
integrator_state_read_ray(kg, state, &ray);
integrator_state_read_ray(state, &ray);
kernel_assert(ray.tmax != 0.0f);
const uint visibility = path_state_ray_visibility(state);
@ -397,7 +397,7 @@ ccl_device void integrator_intersect_closest(KernelGlobals kg,
}
/* Write intersection result into global integrator state memory. */
integrator_state_write_isect(kg, state, &isect);
integrator_state_write_isect(state, &isect);
/* Setup up next kernel to be executed. */
integrator_intersect_next_kernel<DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST>(

View File

@ -142,7 +142,7 @@ ccl_device void integrator_intersect_shadow(KernelGlobals kg, IntegratorShadowSt
/* Read ray from integrator state into local memory. */
Ray ray ccl_optional_struct_init;
integrator_state_read_shadow_ray(kg, state, &ray);
integrator_state_read_shadow_ray(state, &ray);
ray.self.object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 0, object);
ray.self.prim = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 0, prim);
ray.self.light_object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 1, object);

View File

@ -73,7 +73,7 @@ ccl_device void integrator_volume_stack_init(KernelGlobals kg, IntegratorState s
ccl_private ShaderData *stack_sd = AS_SHADER_DATA(&stack_sd_storage);
Ray volume_ray ccl_optional_struct_init;
integrator_state_read_ray(kg, state, &volume_ray);
integrator_state_read_ray(state, &volume_ray);
/* Trace ray in random direction. Any direction works, Z up is a guess to get the
* fewest hits. */

View File

@ -16,7 +16,7 @@ ccl_device_inline void integrate_light(KernelGlobals kg,
{
/* Setup light sample. */
Intersection isect ccl_optional_struct_init;
integrator_state_read_isect(kg, state, &isect);
integrator_state_read_isect(state, &isect);
guiding_record_light_surface_segment(kg, state, &isect);

View File

@ -35,7 +35,7 @@ ccl_device_inline Spectrum integrate_transparent_surface_shadow(KernelGlobals kg
integrator_state_read_shadow_isect(state, &isect, hit);
Ray ray ccl_optional_struct_init;
integrator_state_read_shadow_ray(kg, state, &ray);
integrator_state_read_shadow_ray(state, &ray);
shader_setup_from_ray(kg, shadow_sd, &ray, &isect);
@ -70,7 +70,7 @@ ccl_device_inline void integrate_transparent_volume_shadow(KernelGlobals kg,
/* Setup shader data. */
Ray ray ccl_optional_struct_init;
integrator_state_read_shadow_ray(kg, state, &ray);
integrator_state_read_shadow_ray(state, &ray);
ray.self.object = OBJECT_NONE;
ray.self.prim = PRIM_NONE;
ray.self.light_object = OBJECT_NONE;

View File

@ -24,10 +24,10 @@ ccl_device_forceinline void integrate_surface_shader_setup(KernelGlobals kg,
ccl_private ShaderData *sd)
{
Intersection isect ccl_optional_struct_init;
integrator_state_read_isect(kg, state, &isect);
integrator_state_read_isect(state, &isect);
Ray ray ccl_optional_struct_init;
integrator_state_read_ray(kg, state, &ray);
integrator_state_read_ray(state, &ray);
shader_setup_from_ray(kg, sd, &ray, &isect);
}
@ -253,7 +253,7 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
}
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
integrator_state_write_shadow_ray(shadow_state, &ray);
// Save memory by storing the light and object indices in the shadow_isect
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
@ -548,7 +548,7 @@ ccl_device_forceinline void integrate_surface_ao(KernelGlobals kg,
integrator_state_copy_volume_stack_to_shadow(kg, shadow_state, state);
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
integrator_state_write_shadow_ray(shadow_state, &ray);
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;

View File

@ -827,7 +827,7 @@ ccl_device_forceinline void integrate_volume_direct_light(
kg, state, DEVICE_KERNEL_INTEGRATOR_INTERSECT_SHADOW, false);
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
integrator_state_write_shadow_ray(shadow_state, &ray);
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;
@ -1172,10 +1172,10 @@ ccl_device void integrator_shade_volume(KernelGlobals kg,
#ifdef __VOLUME__
/* Setup shader data. */
Ray ray ccl_optional_struct_init;
integrator_state_read_ray(kg, state, &ray);
integrator_state_read_ray(state, &ray);
Intersection isect ccl_optional_struct_init;
integrator_state_read_isect(kg, state, &isect);
integrator_state_read_isect(state, &isect);
/* Set ray length to current segment. */
ray.tmax = (isect.prim != PRIM_NONE) ? isect.t : FLT_MAX;

View File

@ -11,8 +11,7 @@ CCL_NAMESPACE_BEGIN
/* Ray */
ccl_device_forceinline void integrator_state_write_ray(KernelGlobals kg,
IntegratorState state,
ccl_device_forceinline void integrator_state_write_ray(IntegratorState state,
ccl_private const Ray *ccl_restrict ray)
{
INTEGRATOR_STATE_WRITE(state, ray, P) = ray->P;
@ -24,8 +23,7 @@ ccl_device_forceinline void integrator_state_write_ray(KernelGlobals kg,
INTEGRATOR_STATE_WRITE(state, ray, dD) = ray->dD;
}
ccl_device_forceinline void integrator_state_read_ray(KernelGlobals kg,
ConstIntegratorState state,
ccl_device_forceinline void integrator_state_read_ray(ConstIntegratorState state,
ccl_private Ray *ccl_restrict ray)
{
ray->P = INTEGRATOR_STATE(state, ray, P);
@ -40,7 +38,7 @@ ccl_device_forceinline void integrator_state_read_ray(KernelGlobals kg,
/* Shadow Ray */
ccl_device_forceinline void integrator_state_write_shadow_ray(
KernelGlobals kg, IntegratorShadowState state, ccl_private const Ray *ccl_restrict ray)
IntegratorShadowState state, ccl_private const Ray *ccl_restrict ray)
{
INTEGRATOR_STATE_WRITE(state, shadow_ray, P) = ray->P;
INTEGRATOR_STATE_WRITE(state, shadow_ray, D) = ray->D;
@ -50,8 +48,7 @@ ccl_device_forceinline void integrator_state_write_shadow_ray(
INTEGRATOR_STATE_WRITE(state, shadow_ray, dP) = ray->dP;
}
ccl_device_forceinline void integrator_state_read_shadow_ray(KernelGlobals kg,
ConstIntegratorShadowState state,
ccl_device_forceinline void integrator_state_read_shadow_ray(ConstIntegratorShadowState state,
ccl_private Ray *ccl_restrict ray)
{
ray->P = INTEGRATOR_STATE(state, shadow_ray, P);
@ -66,7 +63,7 @@ ccl_device_forceinline void integrator_state_read_shadow_ray(KernelGlobals kg,
/* Intersection */
ccl_device_forceinline void integrator_state_write_isect(
KernelGlobals kg, IntegratorState state, ccl_private const Intersection *ccl_restrict isect)
IntegratorState state, ccl_private const Intersection *ccl_restrict isect)
{
INTEGRATOR_STATE_WRITE(state, isect, t) = isect->t;
INTEGRATOR_STATE_WRITE(state, isect, u) = isect->u;
@ -77,7 +74,7 @@ ccl_device_forceinline void integrator_state_write_isect(
}
ccl_device_forceinline void integrator_state_read_isect(
KernelGlobals kg, ConstIntegratorState state, ccl_private Intersection *ccl_restrict isect)
ConstIntegratorState state, ccl_private Intersection *ccl_restrict isect)
{
isect->prim = INTEGRATOR_STATE(state, isect, prim);
isect->object = INTEGRATOR_STATE(state, isect, object);

View File

@ -162,8 +162,8 @@ ccl_device_inline bool subsurface_scatter(KernelGlobals kg, IntegratorState stat
ray.P += ray.D * ray.tmax * 2.0f;
ray.D = -ray.D;
integrator_state_write_isect(kg, state, &ss_isect.hits[0]);
integrator_state_write_ray(kg, state, &ray);
integrator_state_write_isect(state, &ss_isect.hits[0]);
integrator_state_write_ray(state, &ray);
/* Advance random number offset for bounce. */
INTEGRATOR_STATE_WRITE(state, path, rng_offset) += PRNG_BOUNCE_NUM;

View File

@ -161,7 +161,11 @@ ccl_device_inline void osl_eval_nodes(KernelGlobals kg,
/* shadeindex = */ 0);
# endif
# if __cplusplus < 201703L
if (type == SHADER_TYPE_DISPLACEMENT) {
# else
if constexpr (type == SHADER_TYPE_DISPLACEMENT) {
# endif
sd->P = globals.P;
}
else if (globals.Ci) {

View File

@ -1646,8 +1646,8 @@ enum KernelFeatureFlag : uint32_t {
/* Must be constexpr on the CPU to avoid compile errors because the state types
* are different depending on the main, shadow or null path. For GPU we don't have
* C++17 everywhere so can't use it. */
#ifdef __KERNEL_GPU__
* C++17 everywhere so need to check it. */
#if __cplusplus < 201703L
# define IF_KERNEL_FEATURE(feature) if ((node_feature_mask & (KERNEL_FEATURE_##feature)) != 0U)
# define IF_KERNEL_NODES_FEATURE(feature) \
if ((node_feature_mask & (KERNEL_FEATURE_NODE_##feature)) != 0U)

View File

@ -442,6 +442,13 @@ void ColorSpaceManager::free_memory()
#endif
}
void ColorSpaceManager::init_fallback_config()
{
#ifdef WITH_OCIO
OCIO::SetCurrentConfig(OCIO::Config::CreateRaw());
#endif
}
/* Template instantiations so we don't have to inline functions. */
template void ColorSpaceManager::to_scene_linear(ustring, uchar *, size_t, bool, bool);
template void ColorSpaceManager::to_scene_linear(ustring, ushort *, size_t, bool, bool);

View File

@ -43,6 +43,12 @@ class ColorSpaceManager {
/* Clear memory when the application exits. Invalidates all processors. */
static void free_memory();
/* Create a fallback color space configuration.
*
* This may be useful to allow regression test to create a configuration which is considered
* valid without knowing the actual configuration used by the final application. */
static void init_fallback_config();
private:
static void is_builtin_colorspace(ustring colorspace, bool &is_no_op, bool &is_srgb);
};

View File

@ -57,7 +57,7 @@ OrientationBounds merge(const OrientationBounds &cone_a, const OrientationBounds
/* Rotate new axis to be between a and b. */
float theta_r = theta_o - a->theta_o;
float3 new_axis = rotate_around_axis(a->axis, cross(a->axis, b->axis), theta_r);
float3 new_axis = rotate_around_axis(a->axis, safe_normalize(cross(a->axis, b->axis)), theta_r);
new_axis = normalize(new_axis);
return OrientationBounds({new_axis, theta_o, theta_e});

View File

@ -160,7 +160,7 @@ struct LightTreeNode {
int num_prims = -1; /* The number of primitives a leaf node stores. A negative
number indicates it is an inner node. */
int first_prim_index; /* Leaf nodes contain an index to first primitive. */
unique_ptr<LightTreeNode> children[2]; /* Inner node has two chlidren. */
unique_ptr<LightTreeNode> children[2]; /* Inner node has two children. */
LightTreeNode() = default;

View File

@ -6,6 +6,7 @@
#include "device/device.h"
#include "scene/colorspace.h"
#include "scene/scene.h"
#include "scene/shader_graph.h"
#include "scene/shader_nodes.h"
@ -165,15 +166,29 @@ class RenderGraph : public testing::Test {
virtual void SetUp()
{
util_logging_start();
util_logging_verbosity_set(5);
/* The test is running outside of the typical application configuration when the OCIO is
* initialized prior to Cycles. Explicitly create the raw configuration to avoid the warning
* printed by the OCIO when accessing non-figured environment.
* Functionally it is the same as not doing this explicit call: the OCIO will warn and then do
* the same raw configuration. */
ColorSpaceManager::init_fallback_config();
device_cpu = Device::create(device_info, stats, profiler);
scene = new Scene(scene_params, device_cpu);
/* Initialize logging after the creation of the essential resources. This way the logging
* mock sink does not warn about uninteresting messages which happens prior to the setup of
* the actual mock sinks. */
util_logging_start();
util_logging_verbosity_set(5);
}
virtual void TearDown()
{
/* Effectively disable logging, so that the next test suit starts in an environment which is
* not logging by default. */
util_logging_verbosity_set(0);
delete scene;
delete device_cpu;
}

View File

@ -557,7 +557,7 @@ if(WITH_XR_OPENXR)
# Header only library.
../../extern/tinygltf/tiny_gltf.h
)
list(APPEND INC
list(APPEND INC_SYS
../../extern/json/include
../../extern/tinygltf
)

View File

@ -2664,13 +2664,7 @@ static void pointer_handle_enter(void *data,
/* Resetting scroll events is likely unnecessary,
* do this to avoid any possible problems as it's harmless. */
seat->pointer_scroll.smooth_xy[0] = 0;
seat->pointer_scroll.smooth_xy[1] = 0;
seat->pointer_scroll.discrete_xy[0] = 0;
seat->pointer_scroll.discrete_xy[1] = 0;
seat->pointer_scroll.inverted_xy[0] = false;
seat->pointer_scroll.inverted_xy[1] = false;
seat->pointer_scroll.axis_source = WL_POINTER_AXIS_SOURCE_WHEEL;
seat->pointer_scroll = GWL_SeatStatePointerScroll{};
seat->pointer.wl_surface_window = wl_surface;
@ -4275,8 +4269,26 @@ static void gwl_seat_capability_pointer_enable(GWL_Seat *seat)
seat->cursor.visible = true;
seat->cursor.wl_buffer = nullptr;
if (!get_cursor_settings(seat->cursor.theme_name, seat->cursor.theme_size)) {
seat->cursor.theme_name = std::string();
/* Use environment variables, falling back to defaults.
* These environment variables are used by enough WAYLAND applications
* that it makes sense to check them (see `Xcursor` man page). */
const char *env;
env = getenv("XCURSOR_THEME");
seat->cursor.theme_name = std::string(env ? env : "");
env = getenv("XCURSOR_SIZE");
seat->cursor.theme_size = default_cursor_size;
if (env && (*env != '\0')) {
char *env_end = nullptr;
/* While clamping is not needed on the WAYLAND side,
* GHOST's internal logic may get confused by negative values, so ensure it's at least 1. */
const long value = strtol(env, &env_end, 10);
if ((*env_end == '\0') && (value > 0)) {
seat->cursor.theme_size = int(value);
}
}
}
wl_pointer_add_listener(seat->wl_pointer, &pointer_listener, seat);

View File

@ -224,28 +224,30 @@ string(APPEND CMAKE_CXX_FLAGS " ${PLATFORM_CFLAGS}")
# Gears (C)
add_executable(gears_c
${CMAKE_SOURCE_DIR}/gears/GHOST_C-Test.c)
${CMAKE_SOURCE_DIR}/gears/GHOST_C-Test.c
)
target_link_libraries(gears_c
ghost_lib
string_lib
${OPENGL_gl_LIBRARY}
${CMAKE_DL_LIBS}
${PLATFORM_LINKLIBS}
)
ghost_lib
string_lib
${OPENGL_gl_LIBRARY}
${CMAKE_DL_LIBS}
${PLATFORM_LINKLIBS}
)
# Gears (C++)
add_executable(gears_cpp
${CMAKE_SOURCE_DIR}/gears/GHOST_Test.cpp)
${CMAKE_SOURCE_DIR}/gears/GHOST_Test.cpp
)
target_link_libraries(gears_cpp
ghost_lib
string_lib
${OPENGL_gl_LIBRARY}
${CMAKE_DL_LIBS}
${PLATFORM_LINKLIBS}
)
ghost_lib
string_lib
${OPENGL_gl_LIBRARY}
${CMAKE_DL_LIBS}
${PLATFORM_LINKLIBS}
)
# MultiTest (C)

View File

@ -450,10 +450,10 @@ void *MEM_guarded_mallocN(size_t len, const char *str)
#endif
return (++memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -463,11 +463,11 @@ void *MEM_guarded_malloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Malloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)mem_in_use);
mem_in_use);
abort();
return NULL;
}
@ -523,10 +523,10 @@ void *MEM_guarded_mallocN_aligned(size_t len, size_t alignment, const char *str)
#endif
return (++memh);
}
print_error("aligned_malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("aligned_malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -547,10 +547,10 @@ void *MEM_guarded_callocN(size_t len, const char *str)
#endif
return (++memh);
}
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -560,11 +560,11 @@ void *MEM_guarded_calloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Calloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)mem_in_use);
mem_in_use);
abort();
return NULL;
}

View File

@ -213,10 +213,10 @@ void *MEM_lockfree_callocN(size_t len, const char *str)
return PTR_FROM_MEMHEAD(memh);
}
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}
@ -226,11 +226,11 @@ void *MEM_lockfree_calloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Calloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(unsigned int)memory_usage_current());
memory_usage_current());
abort();
return NULL;
}
@ -256,10 +256,10 @@ void *MEM_lockfree_mallocN(size_t len, const char *str)
return PTR_FROM_MEMHEAD(memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}
@ -269,11 +269,11 @@ void *MEM_lockfree_malloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Malloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)memory_usage_current());
memory_usage_current());
abort();
return NULL;
}
@ -325,10 +325,10 @@ void *MEM_lockfree_mallocN_aligned(size_t len, size_t alignment, const char *str
return PTR_FROM_MEMHEAD(memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}

View File

@ -0,0 +1,17 @@
# SPDX-License-Identifier: GPL-2.0-or-later
set(INC
include
../../extern/renderdoc/include
)
set(INC_SYS
)
set(SRC
intern/renderdoc_api.cc
include/renderdoc_api.hh
)
blender_add_lib(bf_intern_renderdoc_dynload "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")

View File

@ -0,0 +1,45 @@
#pragma once
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
#include "renderdoc_app.h"
namespace renderdoc::api {
class Renderdoc {
private:
enum class State {
/**
* Initial state of the API indicating that the API hasn't checked if it can find renderdoc.
*/
UNINITIALIZED,
/**
* API has looked for renderdoc, but couldn't find it. This indicates that renderdoc isn't
* available on the platform, or wasn't registered correctly.
*/
NOT_FOUND,
/**
* API has loaded the symbols of renderdoc.
*/
LOADED,
};
State state_ = State::UNINITIALIZED;
RENDERDOC_API_1_6_0 *renderdoc_api_ = nullptr;
public:
bool start_frame_capture(RENDERDOC_DevicePointer device_handle,
RENDERDOC_WindowHandle window_handle);
void end_frame_capture(RENDERDOC_DevicePointer device_handle,
RENDERDOC_WindowHandle window_handle);
private:
/**
* Check if renderdoc has been loaded.
*
* When not loaded it tries to load the API, but only tries to do it once.
*/
bool check_loaded();
void load();
};
} // namespace renderdoc::api

View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
#include "renderdoc_api.hh"
#ifdef _WIN32
# define WIN32_LEAN_AND_MEAN
# include <Windows.h>
#else
# include <dlfcn.h>
#endif
#include <iostream>
namespace renderdoc::api {
bool Renderdoc::start_frame_capture(RENDERDOC_DevicePointer device_handle,
RENDERDOC_WindowHandle window_handle)
{
if (!check_loaded()) {
return false;
}
renderdoc_api_->StartFrameCapture(device_handle, window_handle);
return true;
}
void Renderdoc::end_frame_capture(RENDERDOC_DevicePointer device_handle,
RENDERDOC_WindowHandle window_handle)
{
if (!check_loaded()) {
return;
}
renderdoc_api_->EndFrameCapture(device_handle, window_handle);
}
bool Renderdoc::check_loaded()
{
switch (state_) {
case State::UNINITIALIZED:
load();
return renderdoc_api_ != nullptr;
break;
case State::NOT_FOUND:
return false;
case State::LOADED:
return true;
}
return false;
}
void Renderdoc::load()
{
#ifdef _WIN32
if (HMODULE mod = GetModuleHandleA("renderdoc.dll")) {
pRENDERDOC_GetAPI RENDERDOC_GetAPI = (pRENDERDOC_GetAPI)GetProcAddress(mod,
"RENDERDOC_GetAPI");
RENDERDOC_GetAPI(eRENDERDOC_API_Version_1_1_2, (void **)&renderdoc_api_);
}
#else
if (void *mod = dlopen("librenderdoc.so", RTLD_NOW | RTLD_NOLOAD)) {
pRENDERDOC_GetAPI RENDERDOC_GetAPI = (pRENDERDOC_GetAPI)dlsym(mod, "RENDERDOC_GetAPI");
RENDERDOC_GetAPI(eRENDERDOC_API_Version_1_1_2, (void **)&renderdoc_api_);
}
#endif
if (renderdoc_api_ != nullptr) {
int major;
int minor;
int patch;
renderdoc_api_->GetAPIVersion(&major, &minor, &patch);
std::cout << "Found renderdoc API [" << major << "." << minor << "." << patch << "]";
}
else {
std::cerr << "Unable to load renderdoc API.\n";
}
}
} // namespace renderdoc::api

View File

@ -40,6 +40,25 @@
</screenshot>
</screenshots>
<releases>
<release version="3.5" date="2023-03-29">
<description>
<p>New features:</p>
<ul>
<li>Real-Time compositor</li>
<li>Vector displacement sculpting</li>
<li>Built-in hair node groups</li>
<li>Cycles many light sampling</li>
<li>Metal Viewport for macOS</li>
</ul>
<p>Enhancements:</p>
<ul>
<li>Support for importing and exporting compressed .USDZ files</li>
<li>New Ease operator in the graph editor</li>
<li>New Geometry Nodes, like Image Info and Blur Attribute</li>
<li>Font previews now differentiate better between Korean, Japanese, Simplified and Traditional Chinese</li>
</ul>
</description>
</release>
<release version="3.4" date="2022-12-07">
<description>
<p>New features:</p>

View File

@ -21,10 +21,7 @@ __all__ = (
"ImagePreviewCollection",
)
import _bpy
_utils_previews = _bpy._utils_previews
del _bpy
from _bpy import _utils_previews
_uuid_open = set()

View File

@ -379,7 +379,7 @@ class NODE_MT_geometry_node_GEO_MESH_OPERATIONS(Menu):
bl_idname = "NODE_MT_geometry_node_GEO_MESH_OPERATIONS"
bl_label = "Operations"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeDualMesh")
node_add_menu.add_node_type(layout, "GeometryNodeEdgePathsToCurves")
@ -389,7 +389,7 @@ class NODE_MT_geometry_node_GEO_MESH_OPERATIONS(Menu):
node_add_menu.add_node_type(layout, "GeometryNodeMeshBoolean")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToCurve")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToPoints")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
node_add_menu.add_node_type(layout, "GeometryNodeMeshToSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToVolume")
node_add_menu.add_node_type(layout, "GeometryNodeScaleElements")
@ -448,14 +448,14 @@ class NODE_MT_category_GEO_POINT(Menu):
bl_idname = "NODE_MT_category_GEO_POINT"
bl_label = "Point"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeDistributePointsInVolume")
node_add_menu.add_node_type(layout, "GeometryNodeDistributePointsOnFaces")
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodePoints")
node_add_menu.add_node_type(layout, "GeometryNodePointsToVertices")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
node_add_menu.add_node_type(layout, "GeometryNodePointsToSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodePointsToVolume")
layout.separator()
@ -593,11 +593,11 @@ class NODE_MT_category_GEO_VOLUME(Menu):
bl_idname = "NODE_MT_category_GEO_VOLUME"
bl_label = "Volume"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeVolumeCube")
node_add_menu.add_node_type(layout, "GeometryNodeVolumeToMesh")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodeMeanFilterSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeOffsetSDFVolume")

View File

@ -294,6 +294,7 @@ class USERPREF_PT_interface_statusbar(InterfacePanel, CenterAlignMixIn, Panel):
col = layout.column(heading="Show")
col.prop(view, "show_statusbar_stats", text="Scene Statistics")
col.prop(view, "show_statusbar_scene_duration", text="Scene Duration")
col.prop(view, "show_statusbar_memory", text="System Memory")
col.prop(view, "show_statusbar_vram", text="Video Memory")
col.prop(view, "show_statusbar_version", text="Blender Version")

View File

@ -8,34 +8,34 @@
*
* Basic design of the DerivedMesh system:
*
* DerivedMesh is a common set of interfaces for mesh systems.
* #DerivedMesh is a common set of interfaces for mesh systems.
*
* There are three main mesh data structures in Blender:
* #Mesh, #CDDerivedMesh and #BMesh.
*
* These, and a few others, all implement DerivedMesh interfaces,
* These, and a few others, all implement #DerivedMesh interfaces,
* which contains unified drawing interfaces, a few utility interfaces,
* and a bunch of read-only interfaces intended mostly for conversion from
* one format to another.
*
* All Mesh structures in blender make use of CustomData, which is used to store
* per-element attributes and interpolate them (e.g. uvs, vcols, vgroups, etc).
* All Mesh structures in blender make use of #CustomData, which is used to store
* per-element attributes and interpolate them (e.g. UVs, vertex-colors, vertex-groups, etc).
*
* Mesh is the "serialized" structure, used for storing object-mode mesh data
* and also for saving stuff to disk. Its interfaces are also what DerivedMesh
* and also for saving stuff to disk. Its interfaces are also what #DerivedMesh
* uses to communicate with.
*
* CDDM is a little mesh library, that uses Mesh data structures in the backend.
* #CDDM is a little mesh library, that uses Mesh data structures in the backend.
* It's mostly used for modifiers, and has the advantages of not taking much
* resources.
*
* BMesh is a full-on BREP, used for edit-mode, some modifiers, etc. It's much
* more capable (if memory-intensive) then CDDM.
* #BMesh is a full-on BREP, used for edit-mode, some modifiers, etc.
* It's much more capable (if memory-intensive) then CDDM.
*
* DerivedMesh is somewhat hackish. Many places assumes that a DerivedMesh is
* #DerivedMesh is somewhat hackish. Many places assumes that a #DerivedMesh is
* a CDDM (most of the time by simply copying it and converting it to one).
* CDDM is the original structure for modifiers, but has since been superseded
* by BMesh, at least for the foreseeable future.
* by #BMesh, at least for the foreseeable future.
*/
/*

View File

@ -217,7 +217,11 @@ bool BKE_collection_object_cyclic_check(struct Main *bmain,
struct ListBase BKE_collection_object_cache_get(struct Collection *collection);
ListBase BKE_collection_object_cache_instanced_get(struct Collection *collection);
/** Free the object cache of given `collection` and all of its ancestors (recursively). */
void BKE_collection_object_cache_free(struct Collection *collection);
/** Free the object cache of all collections in given `bmain`, including master collections of
* scenes. */
void BKE_main_collections_object_cache_free(const struct Main *bmain);
struct Base *BKE_collection_or_layer_objects(const struct Scene *scene,
struct ViewLayer *view_layer,

View File

@ -104,26 +104,6 @@ void BKE_mesh_ensure_default_orig_index_customdata(struct Mesh *mesh);
*/
void BKE_mesh_ensure_default_orig_index_customdata_no_check(struct Mesh *mesh);
/**
* Find the index of the loop in 'poly' which references vertex,
* returns -1 if not found
*/
int poly_find_loop_from_vert(const struct MPoly *poly, const int *poly_verts, int vert);
/**
* Fill \a r_adj with the loop indices in \a poly adjacent to the
* vertex. Returns the index of the loop matching vertex, or -1 if the
* vertex is not in \a poly
*/
int poly_get_adj_loops_from_vert(const struct MPoly *poly,
const int *corner_verts,
int vert,
int r_adj[2]);
/**
* Return the index of the edge vert that is not equal to \a v. If
* neither edge vertex is equal to \a v, returns -1.
*/
int BKE_mesh_edge_other_vert(const struct MEdge *e, int v);
/**
* Sets each output array element to the edge index if it is a real edge, or -1.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0-or-later. */
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
@ -142,7 +142,71 @@ void edges_sharp_from_angle_set(Span<MPoly> polys,
const float split_angle,
MutableSpan<bool> sharp_edges);
} // namespace blender::bke::mesh
/** \} */
/* -------------------------------------------------------------------- */
/** \name Topology Queries
* \{ */
/**
* Find the index of the next corner in the polygon, looping to the start if necessary.
* The indices are into the entire corners array, not just the polygon's corners.
*/
inline int poly_corner_prev(const MPoly &poly, const int corner)
{
return corner - 1 + (corner == poly.loopstart) * poly.totloop;
}
/**
* Find the index of the previous corner in the polygon, looping to the end if necessary.
* The indices are into the entire corners array, not just the polygon's corners.
*/
inline int poly_corner_next(const MPoly &poly, const int corner)
{
if (corner == poly.loopstart + poly.totloop - 1) {
return poly.loopstart;
}
return corner + 1;
}
/**
* Find the index of the corner in the polygon that uses the given vertex.
* The index is into the entire corners array, not just the polygon's corners.
*/
inline int poly_find_corner_from_vert(const MPoly &poly,
const Span<int> corner_verts,
const int vert)
{
return poly.loopstart + corner_verts.slice(poly.loopstart, poly.totloop).first_index(vert);
}
/**
* Return the vertex indices on either side of the given vertex, ordered based on the winding
* direction of the polygon. The vertex must be in the polygon.
*/
inline int2 poly_find_adjecent_verts(const MPoly &poly,
const Span<int> corner_verts,
const int vert)
{
const int corner = poly_find_corner_from_vert(poly, corner_verts, vert);
return {corner_verts[poly_corner_prev(poly, corner)],
corner_verts[poly_corner_next(poly, corner)]};
}
/**
* Return the index of the edge's vertex that is not the \a vert.
* If neither edge vertex is equal to \a v, returns -1.
*/
inline int edge_other_vert(const MEdge &edge, const int vert)
{
if (edge.v1 == vert) {
return edge.v2;
}
if (edge.v2 == vert) {
return edge.v1;
}
return -1;
}
/** \} */
@ -150,6 +214,8 @@ void edges_sharp_from_angle_set(Span<MPoly> polys,
/** \name Inline Mesh Data Access
* \{ */
} // namespace blender::bke::mesh
inline blender::Span<blender::float3> Mesh::vert_positions() const
{
return {reinterpret_cast<const blender::float3 *>(BKE_mesh_vert_positions(this)), this->totvert};

View File

@ -363,18 +363,5 @@ Array<Vector<int, 2>> build_edge_to_poly_map(Span<MPoly> polys,
int edges_num);
Vector<Vector<int>> build_edge_to_loop_map_resizable(Span<int> corner_edges, int edges_num);
inline int poly_loop_prev(const MPoly &poly, int loop_i)
{
return loop_i - 1 + (loop_i == poly.loopstart) * poly.totloop;
}
inline int poly_loop_next(const MPoly &poly, int loop_i)
{
if (loop_i == poly.loopstart + poly.totloop - 1) {
return poly.loopstart;
}
return loop_i + 1;
}
} // namespace blender::bke::mesh_topology
#endif

View File

@ -51,6 +51,7 @@ struct PaletteColor;
struct Scene;
struct StrokeCache;
struct Sculpt;
struct SculptSession;
struct SubdivCCG;
struct Tex;
struct ToolSettings;
@ -563,6 +564,8 @@ typedef struct SculptAttributePointers {
SculptAttribute *dyntopo_node_id_face;
} SculptAttributePointers;
#ifdef __cplusplus
typedef struct SculptSession {
/* Mesh data (not copied) can come either directly from a Mesh, or from a MultiresDM */
struct { /* Special handling for multires meshes */
@ -576,8 +579,8 @@ typedef struct SculptSession {
/* These are always assigned to base mesh data when using PBVH_FACES and PBVH_GRIDS. */
float (*vert_positions)[3];
const struct MPoly *polys;
const int *corner_verts;
blender::Span<MPoly> polys;
blender::Span<int> corner_verts;
/* These contain the vertex and poly counts of the final mesh. */
int totvert, totpoly;
@ -758,12 +761,14 @@ typedef struct SculptSession {
bool islands_valid; /* Is attrs.topology_island_key valid? */
} SculptSession;
#endif
void BKE_sculptsession_free(struct Object *ob);
void BKE_sculptsession_free_deformMats(struct SculptSession *ss);
void BKE_sculptsession_free_vwpaint_data(struct SculptSession *ss);
void BKE_sculptsession_bm_to_me(struct Object *ob, bool reorder);
void BKE_sculptsession_bm_to_me_for_render(struct Object *object);
int BKE_sculptsession_vertex_count(const SculptSession *ss);
int BKE_sculptsession_vertex_count(const struct SculptSession *ss);
/* Ensure an attribute layer exists. */
SculptAttribute *BKE_sculpt_attribute_ensure(struct Object *ob,
@ -911,6 +916,11 @@ bool BKE_object_attributes_active_color_fill(struct Object *ob,
const float fill_color[4],
bool only_selected);
/** C accessor for #Object::sculpt::pbvh. */
struct PBVH *BKE_object_sculpt_pbvh_get(struct Object *object);
bool BKE_object_sculpt_use_dyntopo(const struct Object *object);
void BKE_object_sculpt_dyntopo_smooth_shading_set(struct Object *object, bool value);
/* paint_canvas.cc */
/**

View File

@ -37,6 +37,7 @@ struct PBVH;
struct PBVHBatches;
struct PBVHNode;
struct PBVH_GPU_Args;
struct SculptSession;
struct SubdivCCG;
struct TaskParallelSettings;
struct Image;

View File

@ -310,6 +310,7 @@ typedef enum SubdivCCGAdjacencyType {
SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
int corners_num,
const struct MPoly *mpoly,
int *r_v1,
int *r_v2);

View File

@ -54,6 +54,9 @@ const char *no_procedural_access_message =
bool allow_procedural_attribute_access(StringRef attribute_name)
{
if (attribute_name.startswith(".corner")) {
return false;
}
if (attribute_name.startswith(".select")) {
return false;
}

View File

@ -82,6 +82,8 @@ static CollectionParent *collection_find_parent(Collection *child, Collection *c
static bool collection_find_child_recursive(const Collection *parent,
const Collection *collection);
static void collection_object_cache_free(Collection *collection);
static void collection_gobject_hash_ensure(Collection *collection);
static void collection_gobject_hash_update_object(Collection *collection,
Object *ob_old,
@ -160,7 +162,7 @@ static void collection_free_data(ID *id)
BLI_freelistN(&collection->children);
BLI_freelistN(&collection->runtime.parents);
BKE_collection_object_cache_free(collection);
collection_object_cache_free(collection);
}
static void collection_foreach_id(ID *id, LibraryForeachIDData *data)
@ -887,15 +889,27 @@ static void collection_object_cache_free(Collection *collection)
collection->flag &= ~(COLLECTION_HAS_OBJECT_CACHE | COLLECTION_HAS_OBJECT_CACHE_INSTANCED);
BLI_freelistN(&collection->runtime.object_cache);
BLI_freelistN(&collection->runtime.object_cache_instanced);
}
void BKE_collection_object_cache_free(Collection *collection)
{
collection_object_cache_free(collection);
LISTBASE_FOREACH (CollectionParent *, parent, &collection->runtime.parents) {
collection_object_cache_free(parent->collection);
}
}
void BKE_collection_object_cache_free(Collection *collection)
void BKE_main_collections_object_cache_free(const Main *bmain)
{
collection_object_cache_free(collection);
for (Scene *scene = bmain->scenes.first; scene != NULL; scene = scene->id.next) {
collection_object_cache_free(scene->master_collection);
}
for (Collection *collection = bmain->collections.first; collection != NULL;
collection = collection->id.next) {
collection_object_cache_free(collection);
}
}
Base *BKE_collection_or_layer_objects(const Scene *scene,

View File

@ -2583,8 +2583,12 @@ const char *CustomData_get_render_layer_name(const CustomData *data, const int t
void CustomData_set_layer_active(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active = n;
}
}
@ -2592,8 +2596,12 @@ void CustomData_set_layer_active(CustomData *data, const int type, const int n)
void CustomData_set_layer_render(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_rnd = n;
}
}
@ -2601,8 +2609,12 @@ void CustomData_set_layer_render(CustomData *data, const int type, const int n)
void CustomData_set_layer_clone(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_clone = n;
}
}
@ -2610,8 +2622,12 @@ void CustomData_set_layer_clone(CustomData *data, const int type, const int n)
void CustomData_set_layer_stencil(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_mask = n;
}
}
@ -2619,48 +2635,64 @@ void CustomData_set_layer_stencil(CustomData *data, const int type, const int n)
void CustomData_set_layer_active_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active = layer_index;
}
}
}
void CustomData_set_layer_render_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_rnd = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_rnd = layer_index;
}
}
}
void CustomData_set_layer_clone_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_clone = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_clone = layer_index;
}
}
}
void CustomData_set_layer_stencil_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_mask = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_mask = layer_index;
}
}
}

View File

@ -606,7 +606,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
/* For every corner, mix the values from the adjacent edges on the face. */
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const int loop_index_prev = mesh::poly_corner_prev(poly, loop_index);
const int edge = corner_edges[loop_index];
const int edge_prev = corner_edges[loop_index_prev];
mixer.mix_in(loop_index, old_values[edge]);
@ -633,7 +633,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
for (const int poly_index : range) {
const MPoly &poly = polys[poly_index];
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const int loop_index_prev = mesh::poly_corner_prev(poly, loop_index);
const int edge = corner_edges[loop_index];
const int edge_prev = corner_edges[loop_index_prev];
if (old_values[edge] && old_values[edge_prev]) {

View File

@ -342,7 +342,7 @@ void BKE_image_ensure_gpu_texture(Image *image, ImageUser *image_user)
return;
}
/* Note that the image can cache both sterio views, so we only invalidate the cache if the view
/* Note that the image can cache both stereo views, so we only invalidate the cache if the view
* index is more than 2. */
if (image->gpu_pass != image_user->pass || image->gpu_layer != image_user->layer ||
(image->gpu_view != image_user->multi_index && image_user->multi_index >= 2)) {

View File

@ -1431,6 +1431,8 @@ void BKE_main_collection_sync_remap(const Main *bmain)
/* On remapping of object or collection pointers free caches. */
/* TODO: try to make this faster */
BKE_main_collections_object_cache_free(bmain);
for (Scene *scene = static_cast<Scene *>(bmain->scenes.first); scene;
scene = static_cast<Scene *>(scene->id.next)) {
LISTBASE_FOREACH (ViewLayer *, view_layer, &scene->view_layers) {
@ -1447,14 +1449,12 @@ void BKE_main_collection_sync_remap(const Main *bmain)
view_layer_bases_hash_create(view_layer, true);
}
BKE_collection_object_cache_free(scene->master_collection);
DEG_id_tag_update_ex((Main *)bmain, &scene->master_collection->id, ID_RECALC_COPY_ON_WRITE);
DEG_id_tag_update_ex((Main *)bmain, &scene->id, ID_RECALC_COPY_ON_WRITE);
}
for (Collection *collection = static_cast<Collection *>(bmain->collections.first); collection;
collection = static_cast<Collection *>(collection->id.next)) {
BKE_collection_object_cache_free(collection);
DEG_id_tag_update_ex((Main *)bmain, &collection->id, ID_RECALC_COPY_ON_WRITE);
}
@ -2514,9 +2514,8 @@ ViewLayerAOV *BKE_view_layer_add_aov(ViewLayer *view_layer)
void BKE_view_layer_remove_aov(ViewLayer *view_layer, ViewLayerAOV *aov)
{
if (aov == nullptr || BLI_findindex(&view_layer->aovs, aov) == -1) {
return;
}
BLI_assert(BLI_findindex(&view_layer->aovs, aov) != -1);
BLI_assert(aov != nullptr);
if (view_layer->active_aov == aov) {
if (aov->next) {
viewlayer_aov_active_set(view_layer, aov->next);

View File

@ -1505,45 +1505,6 @@ void BKE_mesh_auto_smooth_flag_set(Mesh *me,
}
}
int poly_find_loop_from_vert(const MPoly *poly, const int *poly_corner_verts, int vert)
{
for (int j = 0; j < poly->totloop; j++) {
if (poly_corner_verts[j] == vert) {
return j;
}
}
return -1;
}
int poly_get_adj_loops_from_vert(const MPoly *poly,
const int *corner_verts,
int vert,
int r_adj[2])
{
int corner = poly_find_loop_from_vert(poly, &corner_verts[poly->loopstart], vert);
if (corner != -1) {
/* vertex was found */
r_adj[0] = corner_verts[ME_POLY_LOOP_PREV(poly, corner)];
r_adj[1] = corner_verts[ME_POLY_LOOP_NEXT(poly, corner)];
}
return corner;
}
int BKE_mesh_edge_other_vert(const MEdge *edge, int v)
{
if (edge->v1 == v) {
return edge->v2;
}
if (edge->v2 == v) {
return edge->v1;
}
return -1;
}
void BKE_mesh_looptri_get_real_edges(const MEdge *edges,
const int *corner_verts,
const int *corner_edges,

View File

@ -237,11 +237,12 @@ class MeshFairingContext : public FairingContext {
float r_adj_next[3],
float r_adj_prev[3]) override
{
using namespace blender;
const int vert = corner_verts_[loop];
const MPoly &poly = polys[loop_to_poly_map_[loop]];
const int corner = poly_find_loop_from_vert(&poly, &corner_verts_[poly.loopstart], vert);
copy_v3_v3(r_adj_next, co_[corner_verts_[ME_POLY_LOOP_NEXT(&poly, corner)]]);
copy_v3_v3(r_adj_prev, co_[corner_verts_[ME_POLY_LOOP_PREV(&poly, corner)]]);
const int2 adjecent_verts = bke::mesh::poly_find_adjecent_verts(poly, corner_verts_, vert);
copy_v3_v3(r_adj_next, co_[adjecent_verts[0]]);
copy_v3_v3(r_adj_prev, co_[adjecent_verts[1]]);
}
int other_vertex_index_from_loop(const int loop, const uint v) override

View File

@ -1284,7 +1284,7 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
const MPoly &poly = polys[poly_index];
for (const int ml_curr_index : IndexRange(poly.loopstart, poly.totloop)) {
const int ml_prev_index = mesh_topology::poly_loop_prev(poly, ml_curr_index);
const int ml_prev_index = mesh::poly_corner_prev(poly, ml_curr_index);
#if 0
printf("Checking loop %d / edge %u / vert %u (sharp edge: %d, skiploop: %d)",

View File

@ -740,13 +740,13 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
nearest.index = -1;
for (i = 0; i < numedges_dst; i++) {
const MEdge *e_dst = &edges_dst[i];
const MEdge &e_dst = edges_dst[i];
float best_totdist = FLT_MAX;
int best_eidx_src = -1;
int j = 2;
while (j--) {
const uint vidx_dst = j ? e_dst->v1 : e_dst->v2;
const uint vidx_dst = j ? e_dst.v1 : e_dst.v2;
/* Compute closest verts only once! */
if (v_dst_to_src_map[vidx_dst].hit_dist == -1.0f) {
@ -772,7 +772,7 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
/* Now, check all source edges of closest sources vertices,
* and select the one giving the smallest total verts-to-verts distance. */
for (j = 2; j--;) {
const uint vidx_dst = j ? e_dst->v1 : e_dst->v2;
const uint vidx_dst = j ? e_dst.v1 : e_dst.v2;
const float first_dist = v_dst_to_src_map[vidx_dst].hit_dist;
const int vidx_src = v_dst_to_src_map[vidx_dst].index;
int *eidx_src, k;
@ -785,10 +785,11 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
k = vert_to_edge_src_map[vidx_src].count;
for (; k--; eidx_src++) {
const MEdge *edge_src = &edges_src[*eidx_src];
const float *other_co_src = vcos_src[BKE_mesh_edge_other_vert(edge_src, vidx_src)];
const MEdge &edge_src = edges_src[*eidx_src];
const float *other_co_src =
vcos_src[blender::bke::mesh::edge_other_vert(edge_src, vidx_src)];
const float *other_co_dst =
vert_positions_dst[BKE_mesh_edge_other_vert(e_dst, int(vidx_dst))];
vert_positions_dst[blender::bke::mesh::edge_other_vert(e_dst, int(vidx_dst))];
const float totdist = first_dist + len_v3v3(other_co_src, other_co_dst);
if (totdist < best_totdist) {
@ -801,8 +802,8 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
if (best_eidx_src >= 0) {
const float *co1_src = vcos_src[edges_src[best_eidx_src].v1];
const float *co2_src = vcos_src[edges_src[best_eidx_src].v2];
const float *co1_dst = vert_positions_dst[e_dst->v1];
const float *co2_dst = vert_positions_dst[e_dst->v2];
const float *co1_dst = vert_positions_dst[e_dst.v1];
const float *co2_dst = vert_positions_dst[e_dst.v2];
float co_src[3], co_dst[3];
/* TODO: would need an isect_seg_seg_v3(), actually! */

View File

@ -929,16 +929,57 @@ static bool mesh_validate_customdata(CustomData *data,
PRINT_MSG("%s: Checking %d CD layers...\n", __func__, data->totlayer);
/* Set dummy values so the layer-type is always initialized on first access. */
int layer_num = -1;
int layer_num_type = -1;
while (i < data->totlayer) {
CustomDataLayer *layer = &data->layers[i];
bool ok = true;
/* Count layers when the type changes. */
if (layer_num_type != layer->type) {
layer_num = CustomData_number_of_layers(data, layer->type);
layer_num_type = layer->type;
}
/* Validate active index, for a time this could be set to a negative value, see: #105860. */
int *active_index_array[] = {
&layer->active,
&layer->active_rnd,
&layer->active_clone,
&layer->active_mask,
};
for (int *active_index : Span(active_index_array, ARRAY_SIZE(active_index_array))) {
if (*active_index < 0) {
PRINT_ERR("\tCustomDataLayer type %d has a negative active index (%d)\n",
layer->type,
*active_index);
if (do_fixes) {
*active_index = 0;
has_fixes = true;
}
}
else {
if (*active_index >= layer_num) {
PRINT_ERR("\tCustomDataLayer type %d has an out of bounds active index (%d >= %d)\n",
layer->type,
*active_index,
layer_num);
if (do_fixes) {
BLI_assert(layer_num > 0);
*active_index = layer_num - 1;
has_fixes = true;
}
}
}
}
if (CustomData_layertype_is_singleton(layer->type)) {
const int layer_tot = CustomData_number_of_layers(data, layer->type);
if (layer_tot > 1) {
if (layer_num > 1) {
PRINT_ERR("\tCustomDataLayer type %d is a singleton, found %d in Mesh structure\n",
layer->type,
layer_tot);
layer_num);
ok = false;
}
}

View File

@ -4357,7 +4357,7 @@ void BKE_object_handle_update(Depsgraph *depsgraph, Scene *scene, Object *ob)
void BKE_object_sculpt_data_create(Object *ob)
{
BLI_assert((ob->sculpt == nullptr) && (ob->mode & OB_MODE_ALL_SCULPT));
ob->sculpt = MEM_cnew<SculptSession>(__func__);
ob->sculpt = MEM_new<SculptSession>(__func__);
ob->sculpt->mode_type = (eObjectMode)ob->mode;
}

View File

@ -1696,16 +1696,16 @@ static void sculpt_update_object(
/* These are assigned to the base mesh in Multires. This is needed because Face Sets operators
* and tools use the Face Sets data from the base mesh when Multires is active. */
ss->vert_positions = BKE_mesh_vert_positions_for_write(me);
ss->polys = me->polys().data();
ss->corner_verts = me->corner_verts().data();
ss->polys = me->polys();
ss->corner_verts = me->corner_verts();
}
else {
ss->totvert = me->totvert;
ss->totpoly = me->totpoly;
ss->totfaces = me->totpoly;
ss->vert_positions = BKE_mesh_vert_positions_for_write(me);
ss->polys = me->polys().data();
ss->corner_verts = me->corner_verts().data();
ss->polys = me->polys();
ss->corner_verts = me->corner_verts();
ss->multires.active = false;
ss->multires.modifier = nullptr;
ss->multires.level = 0;
@ -1999,12 +1999,11 @@ int BKE_sculpt_mask_layers_ensure(Depsgraph *depsgraph,
int level = max_ii(1, mmd->sculptlvl);
int gridsize = BKE_ccg_gridsize(level);
int gridarea = gridsize * gridsize;
int i, j;
gmask = static_cast<GridPaintMask *>(
CustomData_add_layer(&me->ldata, CD_GRID_PAINT_MASK, CD_SET_DEFAULT, me->totloop));
for (i = 0; i < me->totloop; i++) {
for (int i = 0; i < me->totloop; i++) {
GridPaintMask *gpm = &gmask[i];
gpm->level = level;
@ -2012,29 +2011,28 @@ int BKE_sculpt_mask_layers_ensure(Depsgraph *depsgraph,
MEM_callocN(sizeof(float) * gridarea, "GridPaintMask.data"));
}
/* if vertices already have mask, copy into multires data */
/* If vertices already have mask, copy into multires data. */
if (paint_mask) {
for (i = 0; i < me->totpoly; i++) {
for (const int i : polys.index_range()) {
const MPoly &poly = polys[i];
float avg = 0;
/* mask center */
for (j = 0; j < poly.totloop; j++) {
const int vert = corner_verts[poly.loopstart + j];
/* Mask center. */
float avg = 0.0f;
for (const int vert : corner_verts.slice(poly.loopstart, poly.totloop)) {
avg += paint_mask[vert];
}
avg /= float(poly.totloop);
/* fill in multires mask corner */
for (j = 0; j < poly.totloop; j++) {
GridPaintMask *gpm = &gmask[poly.loopstart + j];
const int vert = corner_verts[poly.loopstart + j];
const int prev = ME_POLY_LOOP_PREV(&poly, j);
const int next = ME_POLY_LOOP_NEXT(&poly, j);
/* Fill in multires mask corner. */
for (const int corner : blender::IndexRange(poly.loopstart, poly.totloop)) {
GridPaintMask *gpm = &gmask[corner];
const int vert = corner_verts[corner];
const int prev = corner_verts[blender::bke::mesh::poly_corner_prev(poly, vert)];
const int next = corner_verts[blender::bke::mesh::poly_corner_next(poly, vert)];
gpm->data[0] = avg;
gpm->data[1] = (paint_mask[vert] + paint_mask[corner_verts[next]]) * 0.5f;
gpm->data[2] = (paint_mask[vert] + paint_mask[corner_verts[prev]]) * 0.5f;
gpm->data[1] = (paint_mask[vert] + paint_mask[next]) * 0.5f;
gpm->data[2] = (paint_mask[vert] + paint_mask[prev]) * 0.5f;
gpm->data[3] = paint_mask[vert];
}
}
@ -2285,6 +2283,24 @@ PBVH *BKE_sculpt_object_pbvh_ensure(Depsgraph *depsgraph, Object *ob)
return pbvh;
}
PBVH *BKE_object_sculpt_pbvh_get(Object *object)
{
if (!object->sculpt) {
return nullptr;
}
return object->sculpt->pbvh;
}
bool BKE_object_sculpt_use_dyntopo(const Object *object)
{
return object->sculpt && object->sculpt->bm;
}
void BKE_object_sculpt_dyntopo_smooth_shading_set(Object *object, const bool value)
{
object->sculpt->bm_smooth_shading = value;
}
void BKE_sculpt_bvh_update_from_ccg(PBVH *pbvh, SubdivCCG *subdiv_ccg)
{
CCGKey key;

View File

@ -113,6 +113,8 @@
#include "IMB_colormanagement.h"
#include "IMB_imbuf.h"
#include "DRW_engine.h"
#include "bmesh.h"
CurveMapping *BKE_sculpt_default_cavity_curve()
@ -380,10 +382,11 @@ static void scene_free_markers(Scene *scene, bool do_id_user)
static void scene_free_data(ID *id)
{
Scene *scene = (Scene *)id;
const bool do_id_user = false;
DRW_drawdata_free(id);
SEQ_editing_free(scene, do_id_user);
BKE_keyingsets_free(&scene->keyingsets);

View File

@ -3598,9 +3598,9 @@ void sbObjectStep(struct Depsgraph *depsgraph,
/* pass */
}
else if (/*ob->id.lib || */
/* "library linking & pointcaches" has to be solved properly at some point */
/* "library linking & point-caches" has to be solved properly at some point. */
(cache->flag & PTCACHE_BAKED)) {
/* if baked and nothing in cache, do nothing */
/* If baked and nothing in cache, do nothing. */
if (can_write_cache) {
BKE_ptcache_invalidate(cache);
}

View File

@ -1986,7 +1986,7 @@ const int *BKE_subdiv_ccg_start_face_grid_index_get(const SubdivCCG *subdiv_ccg)
static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
const blender::Span<int> corner_verts,
const MPoly *polys,
int *r_v1,
int *r_v2)
@ -1996,13 +1996,13 @@ static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_cc
const MPoly &poly = polys[poly_index];
*r_v1 = corner_verts[coord->grid_index];
const int corner = poly_find_loop_from_vert(&poly, &corner_verts[poly.loopstart], *r_v1);
const int corner = blender::bke::mesh::poly_find_corner_from_vert(poly, corner_verts, *r_v1);
if (coord->x == grid_size_1) {
const int next = ME_POLY_LOOP_NEXT(&poly, corner);
const int next = blender::bke::mesh::poly_corner_next(poly, corner);
*r_v2 = corner_verts[next];
}
if (coord->y == grid_size_1) {
const int prev = ME_POLY_LOOP_PREV(&poly, corner);
const int prev = blender::bke::mesh::poly_corner_prev(poly, corner);
*r_v2 = corner_verts[prev];
}
}
@ -2010,6 +2010,7 @@ static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_cc
SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
const int corners_num,
const MPoly *polys,
int *r_v1,
int *r_v2)
@ -2027,7 +2028,8 @@ SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const Subdi
return SUBDIV_CCG_ADJACENT_VERTEX;
}
/* Grid corner adjacent to the middle of a coarse mesh edge. */
adjacet_vertices_index_from_adjacent_edge(subdiv_ccg, coord, corner_verts, polys, r_v1, r_v2);
adjacet_vertices_index_from_adjacent_edge(
subdiv_ccg, coord, {corner_verts, corners_num}, polys, r_v1, r_v2);
return SUBDIV_CCG_ADJACENT_EDGE;
}
@ -2035,7 +2037,7 @@ SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const Subdi
if (!is_inner_edge_grid_coordinate(subdiv_ccg, coord)) {
/* Grid boundary adjacent to a coarse mesh edge. */
adjacet_vertices_index_from_adjacent_edge(
subdiv_ccg, coord, corner_verts, polys, r_v1, r_v2);
subdiv_ccg, coord, {corner_verts, corners_num}, polys, r_v1, r_v2);
return SUBDIV_CCG_ADJACENT_EDGE;
}
}

View File

@ -312,7 +312,7 @@ static int ss_sync_from_uv(CCGSubSurf *ss,
int nverts = poly.totloop;
int j, j_next;
CCGFace *origf = ccgSubSurf_getFace(origss, POINTER_FROM_INT(i));
/* uint *fv = &poly.v1; */
// uint *fv = &poly.v1;
fverts.reinitialize(nverts);

View File

@ -19,12 +19,13 @@
# undef NOMINMAX
# endif
# endif
#else
# include <atomic>
# include <mutex>
# include "BLI_map.hh"
#endif
#include <atomic>
#include <mutex>
#include "BLI_map.hh"
#include "BLI_utility_mixins.hh"
namespace blender::threading {

View File

@ -24,6 +24,7 @@ template<typename T> class OffsetIndices {
Span<T> offsets_;
public:
OffsetIndices() = default;
OffsetIndices(const Span<T> offsets) : offsets_(offsets)
{
BLI_assert(std::is_sorted(offsets_.begin(), offsets_.end()));

View File

@ -64,6 +64,22 @@ template<typename T> class SharedCache {
BLI_assert(cache_->mutex.is_cached());
return cache_->data;
}
/**
* Return true if the cache currently does not exist or has been invalidated.
*/
bool is_dirty() const
{
return cache_->mutex.is_dirty();
}
/**
* Return true if the cache exists and is valid.
*/
bool is_cached() const
{
return cache_->mutex.is_cached();
}
};
} // namespace blender

View File

@ -13,11 +13,12 @@ set(INC
../../../intern/atomic
../../../intern/eigen
../../../intern/guardedalloc
../../../extern/wcwidth
../../../extern/json/include
)
set(INC_SYS
../../../extern/wcwidth
../../../extern/json/include
${EIGEN3_INCLUDE_DIRS}
${ZLIB_INCLUDE_DIRS}
${ZSTD_INCLUDE_DIRS}

View File

@ -5,10 +5,11 @@
* \brief Array storage to minimize duplication.
*
* This is done by splitting arrays into chunks and using copy-on-write (COW),
* to de-duplicate chunks,
* from the users perspective this is an implementation detail.
* to de-duplicate chunks, from the users perspective this is an implementation detail.
*
* Overview
* ========
*
* Data Structure
* --------------
*
@ -16,51 +17,52 @@
*
* \note The only 2 structures here which are referenced externally are the.
*
* - BArrayStore: The whole array store.
* - BArrayState: Represents a single state (array) of data.
* - #BArrayStore: The whole array store.
* - #BArrayState: Represents a single state (array) of data.
* These can be add using a reference state,
* while this could be considered the previous or parent state.
* no relationship is kept,
* so the caller is free to add any state from the same BArrayStore as a reference.
* so the caller is free to add any state from the same #BArrayStore as a reference.
*
* <pre>
* <+> BArrayStore: root data-structure,
* <+> #BArrayStore: root data-structure,
* | can store many 'states', which share memory.
* |
* | This can store many arrays, however they must share the same 'stride'.
* | Arrays of different types will need to use a new BArrayStore.
* | Arrays of different types will need to use a new #BArrayStore.
* |
* +- <+> states (Collection of BArrayState's):
* +- <+> states (Collection of #BArrayState's):
* | | Each represents an array added by the user of this API.
* | | and references a chunk_list (each state is a chunk_list user).
* | | Note that the list order has no significance.
* | |
* | +- <+> chunk_list (BChunkList):
* | +- <+> chunk_list (#BChunkList):
* | | The chunks that make up this state.
* | | Each state is a chunk_list user,
* | | avoids duplicating lists when there is no change between states.
* | |
* | +- chunk_refs (List of BChunkRef): Each chunk_ref links to a BChunk.
* | +- chunk_refs (List of #BChunkRef): Each chunk_ref links to a #BChunk.
* | Each reference is a chunk user,
* | avoids duplicating smaller chunks of memory found in multiple states.
* |
* +- info (BArrayInfo):
* +- info (#BArrayInfo):
* | Sizes and offsets for this array-store.
* | Also caches some variables for reuse.
* |
* +- <+> memory (BArrayMemory):
* | Memory pools for storing BArrayStore data.
* +- <+> memory (#BArrayMemory):
* | Memory pools for storing #BArrayStore data.
* |
* +- chunk_list (Pool of BChunkList):
* | All chunk_lists, (reference counted, used by BArrayState).
* +- chunk_list (Pool of #BChunkList):
* | All chunk_lists, (reference counted, used by #BArrayState).
* |
* +- chunk_ref (Pool of BChunkRef):
* | All chunk_refs (link between BChunkList & BChunk).
* +- chunk_ref (Pool of #BChunkRef):
* | All chunk_refs (link between #BChunkList & #BChunk).
* |
* +- chunks (Pool of BChunk):
* All chunks, (reference counted, used by BChunkList).
* +- chunks (Pool of #BChunk):
* All chunks, (reference counted, used by #BChunkList).
* These have their headers hashed for reuse so we can quickly check for duplicates.
* </pre>
*
* De-Duplication
* --------------
*
@ -71,7 +73,7 @@
* For identical arrays this is all that's needed.
*
* De-duplication is performed on any remaining chunks, by hashing the first few bytes of the chunk
* (see: BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
* (see: #BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
*
* \note This is cached for reuse since the referenced data never changes.
*
@ -93,9 +95,9 @@
#include "BLI_strict_flags.h"
#include "BLI_array_store.h" /* own include */
#include "BLI_array_store.h" /* Own include. */
/* only for BLI_array_store_is_valid */
/* Only for #BLI_array_store_is_valid. */
#include "BLI_ghash.h"
/* -------------------------------------------------------------------- */
@ -169,7 +171,7 @@
#endif
/**
* Calculate the key once and reuse it
* Calculate the key once and reuse it.
*/
#define USE_HASH_TABLE_KEY_CACHE
#ifdef USE_HASH_TABLE_KEY_CACHE
@ -177,6 +179,16 @@
# define HASH_TABLE_KEY_FALLBACK ((hash_key)-2)
#endif
/**
* Ensure duplicate entries aren't added to temporary hash table
* needed for arrays where many values match (an array of booleans all true/false for e.g.).
*
* Without this, a huge number of duplicates are added a single bucket, making hash lookups slow.
* While de-duplication adds some cost, it's only performed with other chunks in the same bucket
* so cases when all chunks are unique will quickly detect and exit the `memcmp` in most cases.
*/
#define USE_HASH_TABLE_DEDUPLICATE
/**
* How much larger the table is then the total number of chunks.
*/
@ -209,7 +221,7 @@
# define BCHUNK_SIZE_MAX_MUL 2
#endif /* USE_MERGE_CHUNKS */
/** Slow (keep disabled), but handy for debugging */
/** Slow (keep disabled), but handy for debugging. */
// #define USE_VALIDATE_LIST_SIZE
// #define USE_VALIDATE_LIST_DATA_PARTIAL
@ -228,9 +240,9 @@ typedef struct BArrayInfo {
size_t chunk_stride;
// uint chunk_count; /* UNUSED (other values are derived from this) */
/* pre-calculated */
/* Pre-calculated. */
size_t chunk_byte_size;
/* min/max limits (inclusive) */
/* Min/max limits (inclusive) */
size_t chunk_byte_size_min;
size_t chunk_byte_size_max;
/**
@ -245,19 +257,19 @@ typedef struct BArrayInfo {
} BArrayInfo;
typedef struct BArrayMemory {
BLI_mempool *chunk_list; /* BChunkList */
BLI_mempool *chunk_ref; /* BChunkRef */
BLI_mempool *chunk; /* BChunk */
BLI_mempool *chunk_list; /* #BChunkList. */
BLI_mempool *chunk_ref; /* #BChunkRef. */
BLI_mempool *chunk; /* #BChunk. */
} BArrayMemory;
/**
* Main storage for all states
* Main storage for all states.
*/
struct BArrayStore {
/* static */
/* Static. */
BArrayInfo info;
/* memory storage */
/** Memory storage. */
BArrayMemory memory;
/**
@ -277,14 +289,14 @@ struct BArrayStore {
* it makes it easier to trace invalid usage, so leave as-is for now.
*/
struct BArrayState {
/** linked list in #BArrayStore.states */
/** linked list in #BArrayStore.states. */
struct BArrayState *next, *prev;
/** Shared chunk list, this reference must hold a #BChunkList::users. */
struct BChunkList *chunk_list;
};
typedef struct BChunkList {
/** List of #BChunkRef's */
/** List of #BChunkRef's. */
ListBase chunk_refs;
/** Result of `BLI_listbase_count(chunks)`, store for reuse. */
uint chunk_refs_len;
@ -367,13 +379,23 @@ static void bchunk_decref(BArrayMemory *bs_mem, BChunk *chunk)
}
}
BLI_INLINE bool bchunk_data_compare_unchecked(const BChunk *chunk,
const uchar *data_base,
const size_t data_base_len,
const size_t offset)
{
BLI_assert(offset + (size_t)chunk->data_len <= data_base_len);
UNUSED_VARS_NDEBUG(data_base_len);
return (memcmp(&data_base[offset], chunk->data, chunk->data_len) == 0);
}
static bool bchunk_data_compare(const BChunk *chunk,
const uchar *data_base,
const size_t data_base_len,
const size_t offset)
{
if (offset + (size_t)chunk->data_len <= data_base_len) {
return (memcmp(&data_base[offset], chunk->data, chunk->data_len) == 0);
return bchunk_data_compare_unchecked(chunk, data_base, data_base_len, offset);
}
return false;
}
@ -446,15 +468,15 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
{
BChunkRef *cref = chunk_list->chunk_refs.last;
if (cref && cref->prev) {
/* both are decref'd after use (end of this block) */
/* Both are decref'd after use (end of this block) */
BChunk *chunk_curr = cref->link;
BChunk *chunk_prev = cref->prev->link;
if (MIN2(chunk_prev->data_len, chunk_curr->data_len) < info->chunk_byte_size_min) {
const size_t data_merge_len = chunk_prev->data_len + chunk_curr->data_len;
/* we could pass, but no need */
/* We could pass, but no need. */
if (data_merge_len <= info->chunk_byte_size_max) {
/* we have enough space to merge */
/* We have enough space to merge. */
/* Remove last from the linked-list. */
BLI_assert(chunk_list->chunk_refs.last != chunk_list->chunk_refs.first);
@ -478,10 +500,10 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
*
* if we do, the code below works (test by setting 'BCHUNK_SIZE_MAX_MUL = 1.2') */
/* keep chunk on the left hand side a regular size */
/* Keep chunk on the left hand side a regular size. */
const size_t split = info->chunk_byte_size;
/* merge and split */
/* Merge and split. */
const size_t data_prev_len = split;
const size_t data_curr_len = data_merge_len - split;
uchar *data_prev = MEM_mallocN(data_prev_len, __func__);
@ -490,10 +512,10 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
if (data_prev_len <= chunk_prev->data_len) {
const size_t data_curr_shrink_len = chunk_prev->data_len - data_prev_len;
/* setup 'data_prev' */
/* Setup 'data_prev'. */
memcpy(data_prev, chunk_prev->data, data_prev_len);
/* setup 'data_curr' */
/* Setup 'data_curr'. */
memcpy(data_curr, &chunk_prev->data[data_prev_len], data_curr_shrink_len);
memcpy(&data_curr[data_curr_shrink_len], chunk_curr->data, chunk_curr->data_len);
}
@ -503,11 +525,11 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
const size_t data_prev_grow_len = data_prev_len - chunk_prev->data_len;
/* setup 'data_prev' */
/* Setup 'data_prev'. */
memcpy(data_prev, chunk_prev->data, chunk_prev->data_len);
memcpy(&data_prev[chunk_prev->data_len], chunk_curr->data, data_prev_grow_len);
/* setup 'data_curr' */
/* Setup 'data_curr'. */
memcpy(data_curr, &chunk_curr->data[data_prev_grow_len], data_curr_len);
}
@ -518,7 +540,7 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
cref->link->users += 1;
}
/* free zero users */
/* Free zero users. */
bchunk_decref(bs_mem, chunk_curr);
bchunk_decref(bs_mem, chunk_prev);
}
@ -543,8 +565,7 @@ static void bchunk_list_calc_trim_len(const BArrayInfo *info,
size_t data_trim_len = data_len;
#ifdef USE_MERGE_CHUNKS
/* avoid creating too-small chunks
* more efficient than merging after */
/* Avoid creating too-small chunks more efficient than merging after. */
if (data_len > info->chunk_byte_size) {
data_last_chunk_len = (data_trim_len % info->chunk_byte_size);
data_trim_len = data_trim_len - data_last_chunk_len;
@ -606,7 +627,7 @@ static void bchunk_list_append_data(const BArrayInfo *info,
if (MIN2(chunk_prev->data_len, data_len) < info->chunk_byte_size_min) {
const size_t data_merge_len = chunk_prev->data_len + data_len;
/* realloc for single user */
/* Re-allocate for single user. */
if (cref->link->users == 1) {
uchar *data_merge = MEM_reallocN((void *)cref->link->data, data_merge_len);
memcpy(&data_merge[chunk_prev->data_len], data, data_len);
@ -631,7 +652,7 @@ static void bchunk_list_append_data(const BArrayInfo *info,
BChunk *chunk = bchunk_new_copydata(bs_mem, data, data_len);
bchunk_list_append_only(bs_mem, chunk_list, chunk);
/* don't run this, instead preemptively avoid creating a chunk only to merge it (above). */
/* Don't run this, instead preemptively avoid creating a chunk only to merge it (above). */
#if 0
# ifdef USE_MERGE_CHUNKS
bchunk_list_ensure_min_size_last(info, bs_mem, chunk_list);
@ -678,8 +699,7 @@ static void bchunk_list_append_data_n(const BArrayInfo *info,
}
}
else {
/* if we didn't write any chunks previously,
* we may need to merge with the last. */
/* If we didn't write any chunks previously, we may need to merge with the last. */
if (data_last_chunk_len) {
bchunk_list_append_data(info, bs_mem, chunk_list, data, data_last_chunk_len);
// i_prev = data_len; /* UNUSED */
@ -740,7 +760,7 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
}
#endif
/* works but better avoid redundant re-alloc */
/* Works but better avoid redundant re-allocation. */
#if 0
# ifdef USE_MERGE_CHUNKS
bchunk_list_ensure_min_size_last(info, bs_mem, chunk_list);
@ -754,7 +774,7 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
/** \} */
/*
* Internal Table Lookup Functions
* Internal Table Lookup Functions.
*/
/* -------------------------------------------------------------------- */
@ -770,7 +790,7 @@ BLI_INLINE hash_key hash_data_single(const uchar p)
return ((HASH_INIT << 5) + HASH_INIT) + (hash_key)(*((signed char *)&p));
}
/* hash bytes, from BLI_ghashutil_strhash_n */
/* Hash bytes, from #BLI_ghashutil_strhash_n. */
static hash_key hash_data(const uchar *key, size_t n)
{
const signed char *p;
@ -797,14 +817,14 @@ static void hash_array_from_data(const BArrayInfo *info,
}
}
else {
/* fast-path for bytes */
/* Fast-path for bytes. */
for (size_t i = 0; i < data_slice_len; i++) {
hash_array[i] = hash_data_single(data_slice[i]);
}
}
}
/*
/**
* Similar to hash_array_from_data,
* but able to step into the next chunk if we run-out of data.
*/
@ -829,7 +849,7 @@ static void hash_array_from_cref(const BArrayInfo *info,
} while ((i < hash_array_len) && (cref != NULL));
/* If this isn't equal, the caller didn't properly check
* that there was enough data left in all chunks */
* that there was enough data left in all chunks. */
BLI_assert(i == hash_array_len);
}
@ -866,11 +886,11 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
{
BLI_assert(iter_steps <= hash_array_len);
if (UNLIKELY(!(iter_steps <= hash_array_len))) {
/* while this shouldn't happen, avoid crashing */
/* While this shouldn't happen, avoid crashing. */
iter_steps = hash_array_len;
}
/* We can increase this value each step to avoid accumulating quite as much
* while getting the same results as hash_accum */
* while getting the same results as hash_accum. */
size_t iter_steps_sub = iter_steps;
while (iter_steps != 0) {
@ -886,11 +906,11 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
static hash_key key_from_chunk_ref(const BArrayInfo *info,
const BChunkRef *cref,
/* avoid reallocating each time */
/* Avoid reallocating each time. */
hash_key *hash_store,
const size_t hash_store_len)
{
/* in C, will fill in a reusable array */
/* In C, will fill in a reusable array. */
BChunk *chunk = cref->link;
BLI_assert((info->accum_read_ahead_bytes * info->chunk_stride) != 0);
@ -901,14 +921,14 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info,
key = chunk->key;
if (key != HASH_TABLE_KEY_UNSET) {
/* Using key cache!
* avoids calculating every time */
* avoids calculating every time. */
}
else {
hash_array_from_cref(info, cref, info->accum_read_ahead_bytes, hash_store);
hash_accum_single(hash_store, hash_store_len, info->accum_steps);
key = hash_store[0];
/* cache the key */
/* Cache the key. */
if (UNLIKELY(key == HASH_TABLE_KEY_UNSET)) {
key = HASH_TABLE_KEY_FALLBACK;
}
@ -921,7 +941,7 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info,
# endif
return key;
}
/* corner case - we're too small, calculate the key each time. */
/* Corner case - we're too small, calculate the key each time. */
hash_array_from_cref(info, cref, info->accum_read_ahead_bytes, hash_store);
hash_accum_single(hash_store, hash_store_len, info->accum_steps);
@ -944,30 +964,33 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
const size_t offset,
const hash_key *table_hash_array)
{
size_t size_left = data_len - offset;
hash_key key = table_hash_array[((offset - i_table_start) / info->chunk_stride)];
size_t key_index = (size_t)(key % (hash_key)table_len);
for (const BTableRef *tref = table[key_index]; tref; tref = tref->next) {
const BChunkRef *cref = tref->cref;
const hash_key key = table_hash_array[((offset - i_table_start) / info->chunk_stride)];
const uint key_index = (uint)(key % (hash_key)table_len);
const BTableRef *tref = table[key_index];
if (tref != NULL) {
const size_t size_left = data_len - offset;
do {
const BChunkRef *cref = tref->cref;
# ifdef USE_HASH_TABLE_KEY_CACHE
if (cref->link->key == key)
if (cref->link->key == key)
# endif
{
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare(chunk_test, data, data_len, offset)) {
/* we could remove the chunk from the table, to avoid multiple hits */
return cref;
{
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare_unchecked(chunk_test, data, data_len, offset)) {
/* We could remove the chunk from the table, to avoid multiple hits. */
return cref;
}
}
}
}
} while ((tref = tref->next));
}
return NULL;
}
#else /* USE_HASH_TABLE_ACCUMULATE */
/* NON USE_HASH_TABLE_ACCUMULATE code (simply hash each chunk) */
/* NON USE_HASH_TABLE_ACCUMULATE code (simply hash each chunk). */
static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref)
{
@ -979,10 +1002,10 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref
key = chunk->key;
if (key != HASH_TABLE_KEY_UNSET) {
/* Using key cache!
* avoids calculating every time */
* avoids calculating every time. */
}
else {
/* cache the key */
/* Cache the key. */
key = hash_data(chunk->data, data_hash_len);
if (key == HASH_TABLE_KEY_UNSET) {
key = HASH_TABLE_KEY_FALLBACK;
@ -1007,9 +1030,9 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
{
const size_t data_hash_len = BCHUNK_HASH_LEN * info->chunk_stride; /* TODO: cache. */
size_t size_left = data_len - offset;
hash_key key = hash_data(&data[offset], MIN2(data_hash_len, size_left));
size_t key_index = (size_t)(key % (hash_key)table_len);
const size_t size_left = data_len - offset;
const hash_key key = hash_data(&data[offset], MIN2(data_hash_len, size_left));
const uint key_index = (uint)(key % (hash_key)table_len);
for (BTableRef *tref = table[key_index]; tref; tref = tref->next) {
const BChunkRef *cref = tref->cref;
# ifdef USE_HASH_TABLE_KEY_CACHE
@ -1018,8 +1041,8 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
{
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare(chunk_test, data, data_len, offset)) {
/* we could remove the chunk from the table, to avoid multiple hits */
if (bchunk_data_compare_unchecked(chunk_test, data, data_len, offset)) {
/* We could remove the chunk from the table, to avoid multiple hits. */
return cref;
}
}
@ -1095,7 +1118,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#endif /* USE_FASTPATH_CHUNKS_FIRST */
/* Copy until we have a mismatch */
/* Copy until we have a mismatch. */
BChunkList *chunk_list = bchunk_list_new(bs_mem, data_len_original);
if (cref_match_first != NULL) {
size_t chunk_size_step = 0;
@ -1111,7 +1134,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
}
cref = cref->next;
}
/* happens when bytes are removed from the end of the array */
/* Happens when bytes are removed from the end of the array. */
if (chunk_size_step == data_len_original) {
return chunk_list;
}
@ -1125,17 +1148,16 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
/* ------------------------------------------------------------------------
* Fast-Path for end chunks
*
* Check for trailing chunks
* Check for trailing chunks.
*/
/* In this case use 'chunk_list_reference_last' to define the last index
* index_match_last = -1 */
* `index_match_last = -1`. */
/* warning, from now on don't use len(data)
* since we want to ignore chunks already matched */
/* Warning, from now on don't use len(data) since we want to ignore chunks already matched. */
size_t data_len = data_len_original;
#define data_len_original invalid_usage
#ifdef data_len_original /* quiet warning */
#ifdef data_len_original /* Quiet warning. */
#endif
const BChunkRef *chunk_list_reference_last = NULL;
@ -1175,7 +1197,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#ifdef USE_ALIGN_CHUNKS_TEST
if (chunk_list->total_expanded_size == chunk_list_reference->total_expanded_size) {
/* if we're already a quarter aligned */
/* If we're already a quarter aligned. */
if (data_len - i_prev <= chunk_list->total_expanded_size / 4) {
use_aligned = true;
}
@ -1189,7 +1211,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
* ----------------------- */
if (use_aligned) {
/* Copy matching chunks, creates using the same 'layout' as the reference */
/* Copy matching chunks, creates using the same 'layout' as the reference. */
const BChunkRef *cref = cref_match_first ? cref_match_first->next :
chunk_list_reference->chunk_refs.first;
while (i_prev != data_len) {
@ -1218,12 +1240,12 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
(chunk_list_reference->chunk_refs.first != NULL)) {
/* --------------------------------------------------------------------
* Non-Aligned Chunk De-Duplication */
* Non-Aligned Chunk De-Duplication. */
/* only create a table if we have at least one chunk to search
/* Only create a table if we have at least one chunk to search
* otherwise just make a new one.
*
* Support re-arranged chunks */
* Support re-arranged chunks. */
#ifdef USE_HASH_TABLE_ACCUMULATE
size_t i_table_start = i_prev;
@ -1234,7 +1256,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
hash_accum(table_hash_array, table_hash_array_len, info->accum_steps);
#else
/* dummy vars */
/* Dummy vars. */
uint i_table_start = 0;
hash_key *table_hash_array = NULL;
#endif
@ -1249,8 +1271,8 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
const size_t table_len = chunk_list_reference_remaining_len * BCHUNK_HASH_TABLE_MUL;
BTableRef **table = MEM_callocN(table_len * sizeof(*table), __func__);
/* table_make - inline
* include one matching chunk, to allow for repeating values */
/* Table_make - inline
* include one matching chunk, to allow for repeating values. */
{
#ifdef USE_HASH_TABLE_ACCUMULATE
const size_t hash_store_len = info->accum_read_ahead_len;
@ -1292,13 +1314,41 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
hash_store_len
#endif
);
size_t key_index = (size_t)(key % (hash_key)table_len);
const uint key_index = (uint)(key % (hash_key)table_len);
BTableRef *tref_prev = table[key_index];
BLI_assert(table_ref_stack_n < chunk_list_reference_remaining_len);
BTableRef *tref = &table_ref_stack[table_ref_stack_n++];
tref->cref = cref;
tref->next = tref_prev;
table[key_index] = tref;
#ifdef USE_HASH_TABLE_DEDUPLICATE
bool is_duplicate = false;
if (tref_prev) {
const BChunk *chunk_a = cref->link;
const BTableRef *tref = tref_prev;
do {
const BChunk *chunk_b = tref->cref->link;
# ifdef USE_HASH_TABLE_KEY_CACHE
if (key == chunk_b->key)
# endif
{
/* Not an error, it just isn't expected, in the case chunks are shared
* matching chunks should also be skipped to avoid a redundant `memcmp` call. */
BLI_assert(chunk_a != chunk_b);
if (chunk_a->data_len == chunk_b->data_len) {
if (memcmp(chunk_a->data, chunk_b->data, chunk_a->data_len) == 0) {
is_duplicate = true;
break;
}
}
}
} while ((tref = tref->next));
}
if (!is_duplicate)
#endif /* USE_HASH_TABLE_DEDUPLICATE */
{
BTableRef *tref = &table_ref_stack[table_ref_stack_n++];
tref->cref = cref;
tref->next = tref_prev;
table[key_index] = tref;
}
chunk_list_reference_bytes_remaining -= cref->link->data_len;
cref = cref->next;
@ -1310,7 +1360,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
MEM_freeN(hash_store);
#endif
}
/* done making the table */
/* Done making the table. */
BLI_assert(i_prev <= data_len);
for (size_t i = i_prev; i < data_len;) {
@ -1325,7 +1375,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
i_prev = i;
}
/* now add the reference chunk */
/* Now add the reference chunk. */
{
BChunk *chunk_found = cref_found->link;
i += chunk_found->data_len;
@ -1336,7 +1386,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
ASSERT_CHUNKLIST_SIZE(chunk_list, i_prev);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
/* its likely that the next chunk in the list will be a match, so check it! */
/* Its likely that the next chunk in the list will be a match, so check it! */
while (!ELEM(cref_found->next, NULL, chunk_list_reference_last)) {
cref_found = cref_found->next;
BChunk *chunk_found = cref_found->link;
@ -1346,7 +1396,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
* repeating memory where it would be useful to re-use chunks. */
i += chunk_found->data_len;
bchunk_list_append(info, bs_mem, chunk_list, chunk_found);
/* chunk_found may be freed! */
/* Chunk_found may be freed! */
i_prev = i;
BLI_assert(i_prev <= data_len);
ASSERT_CHUNKLIST_SIZE(chunk_list, i_prev);
@ -1389,14 +1439,13 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#ifdef USE_FASTPATH_CHUNKS_LAST
if (chunk_list_reference_last != NULL) {
/* write chunk_list_reference_last since it hasn't been written yet */
/* Write chunk_list_reference_last since it hasn't been written yet. */
const BChunkRef *cref = chunk_list_reference_last;
while (cref != NULL) {
BChunk *chunk = cref->link;
// BLI_assert(bchunk_data_compare(chunk, data, data_len, i_prev));
i_prev += chunk->data_len;
/* use simple since we assume the references chunks
* have already been sized correctly. */
/* Use simple since we assume the references chunks have already been sized correctly. */
bchunk_list_append_only(bs_mem, chunk_list, chunk);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
cref = cref->next;
@ -1408,7 +1457,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
BLI_assert(i_prev == data_len_original);
/* check we're the correct size and that we didn't accidentally modify the reference */
/* Check we're the correct size and that we didn't accidentally modify the reference. */
ASSERT_CHUNKLIST_SIZE(chunk_list, data_len_original);
ASSERT_CHUNKLIST_SIZE(chunk_list_reference, chunk_list_reference->total_expanded_size);
@ -1416,7 +1465,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
return chunk_list;
}
/* end private API */
/* End private API. */
/** \} */
@ -1470,7 +1519,7 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
bs->memory.chunk_list = BLI_mempool_create(sizeof(BChunkList), 0, 512, BLI_MEMPOOL_NOP);
bs->memory.chunk_ref = BLI_mempool_create(sizeof(BChunkRef), 0, 512, BLI_MEMPOOL_NOP);
/* allow iteration to simplify freeing, otherwise its not needed
/* Allow iteration to simplify freeing, otherwise its not needed
* (we could loop over all states as an alternative). */
bs->memory.chunk = BLI_mempool_create(sizeof(BChunk), 0, 512, BLI_MEMPOOL_ALLOW_ITER);
@ -1481,7 +1530,7 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
static void array_store_free_data(BArrayStore *bs)
{
/* free chunk data */
/* Free chunk data. */
{
BLI_mempool_iter iter;
BChunk *chunk;
@ -1492,7 +1541,7 @@ static void array_store_free_data(BArrayStore *bs)
}
}
/* free states */
/* Free states. */
for (BArrayState *state = bs->states.first, *state_next; state; state = state_next) {
state_next = state->next;
MEM_freeN(state);
@ -1560,7 +1609,7 @@ BArrayState *BLI_array_store_state_add(BArrayStore *bs,
const size_t data_len,
const BArrayState *state_reference)
{
/* ensure we're aligned to the stride */
/* Ensure we're aligned to the stride. */
BLI_assert((data_len % bs->info.chunk_stride) == 0);
#ifdef USE_PARANOID_CHECKS
@ -1575,7 +1624,7 @@ BArrayState *BLI_array_store_state_add(BArrayStore *bs,
&bs->memory,
(const uchar *)data,
data_len,
/* re-use reference chunks */
/* Re-use reference chunks. */
state_reference->chunk_list);
}
else {
@ -1652,7 +1701,7 @@ void *BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_le
/** \name Debugging API (for testing).
* \{ */
/* only for test validation */
/* Only for test validation. */
static size_t bchunk_list_size(const BChunkList *chunk_list)
{
size_t total_expanded_size = 0;
@ -1680,7 +1729,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
}
#ifdef USE_MERGE_CHUNKS
/* ensure we merge all chunks that could be merged */
/* Ensure we merge all chunks that could be merged. */
if (chunk_list->total_expanded_size > bs->info.chunk_byte_size_min) {
LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
if (cref->link->data_len < bs->info.chunk_byte_size_min) {
@ -1719,7 +1768,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
} \
((void)0)
/* count chunk_list's */
/* Count chunk_list's. */
GHash *chunk_list_map = BLI_ghash_ptr_new(__func__);
GHash *chunk_map = BLI_ghash_ptr_new(__func__);
@ -1740,7 +1789,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
goto user_finally;
}
/* count chunk's */
/* Count chunk's. */
GHASH_ITER (gh_iter, chunk_list_map) {
const struct BChunkList *chunk_list = BLI_ghashIterator_getKey(&gh_iter);
LISTBASE_FOREACH (const BChunkRef *, cref, &chunk_list->chunk_refs) {

View File

@ -774,7 +774,7 @@ void blo_do_versions_250(FileData *fd, Library *lib, Main *bmain)
part->clength = 1.0f;
}
/* set old pointcaches to have disk cache flag */
/* Set old point-caches to have disk cache flag. */
for (ob = bmain->objects.first; ob; ob = ob->id.next) {
#if 0

View File

@ -1164,13 +1164,19 @@ void DepsgraphRelationBuilder::build_object_pointcache(Object *object)
OperationKey transform_key(
&object->id, NodeType::TRANSFORM, OperationCode::TRANSFORM_SIMULATION_INIT);
add_relation(point_cache_key, transform_key, "Point Cache -> Rigid Body");
/* Manual changes to effectors need to invalidate simulation. */
OperationKey rigidbody_rebuild_key(
&scene_->id, NodeType::TRANSFORM, OperationCode::RIGIDBODY_REBUILD);
add_relation(rigidbody_rebuild_key,
point_cache_key,
"Rigid Body Rebuild -> Point Cache Reset",
RELATION_FLAG_FLUSH_USER_EDIT_ONLY);
/* Manual changes to effectors need to invalidate simulation.
*
* Don't add this relation for the render pipeline dependency graph as it does not contain
* rigid body simulation. Good thing is that there are no user edits in such dependency
* graph, so the relation is not really needed in it. */
if (!graph_->is_render_pipeline_depsgraph) {
OperationKey rigidbody_rebuild_key(
&scene_->id, NodeType::TRANSFORM, OperationCode::RIGIDBODY_REBUILD);
add_relation(rigidbody_rebuild_key,
point_cache_key,
"Rigid Body Rebuild -> Point Cache Reset",
RELATION_FLAG_FLUSH_USER_EDIT_ONLY);
}
}
else {
flag = FLAG_GEOMETRY;

View File

@ -222,10 +222,10 @@ static void basic_cache_populate(void *vedata, Object *ob)
}
}
if (G.debug_value == 889 && ob->sculpt && ob->sculpt->pbvh) {
if (G.debug_value == 889 && ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
int debug_node_nr = 0;
DRW_debug_modelmat(ob->object_to_world);
BKE_pbvh_draw_debug_cb(ob->sculpt->pbvh, DRW_sculpt_debug_cb, &debug_node_nr);
BKE_pbvh_draw_debug_cb(BKE_object_sculpt_pbvh_get(ob), DRW_sculpt_debug_cb, &debug_node_nr);
}
}
}

View File

@ -814,8 +814,8 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
bool use_sculpt_pbvh = BKE_sculptsession_use_pbvh_draw(ob, draw_ctx->rv3d) &&
!DRW_state_is_image_render();
if (ob->sculpt && ob->sculpt->pbvh) {
BKE_pbvh_is_drawing_set(ob->sculpt->pbvh, use_sculpt_pbvh);
if (ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
BKE_pbvh_is_drawing_set(BKE_object_sculpt_pbvh_get(ob), use_sculpt_pbvh);
}
/* First get materials for this mesh. */
@ -887,10 +887,11 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
}
}
if (G.debug_value == 889 && ob->sculpt && ob->sculpt->pbvh) {
if (G.debug_value == 889 && ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
int debug_node_nr = 0;
DRW_debug_modelmat(ob->object_to_world);
BKE_pbvh_draw_debug_cb(ob->sculpt->pbvh, DRW_sculpt_debug_cb, &debug_node_nr);
BKE_pbvh_draw_debug_cb(
BKE_object_sculpt_pbvh_get(ob), DRW_sculpt_debug_cb, &debug_node_nr);
}
}

View File

@ -31,7 +31,10 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_material_empty_base_volume)
/**** MATERIAL VERTEX SHADER PERMUTATIONS ****/
/** -- Volumetric -- **/
/* -------------------------------------------------------------------- */
/** \name Volumetric
* \{ */
GPU_SHADER_CREATE_INFO(eevee_legacy_material_volumetric_vert)
.additional_info("eevee_legacy_material_empty_base_volume")
.vertex_out(legacy_volume_vert_geom_iface)
@ -45,7 +48,12 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_material_volumetric_vert_no_geom)
.additional_info("draw_resource_id_varying");
#endif
/** -- World Shader -- **/
/** \} */
/* -------------------------------------------------------------------- */
/** \name World Shader
* \{ */
GPU_SHADER_CREATE_INFO(eevee_legacy_material_world_vert)
.additional_info("eevee_legacy_material_empty_base")
.additional_info("eevee_legacy_common_utiltex_lib")
@ -54,7 +62,12 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_material_world_vert)
.additional_info("draw_resource_id_varying")
.vertex_in(0, Type::VEC2, "pos");
/** -- Surface Shader -- **/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Surface Shader
* \{ */
GPU_SHADER_CREATE_INFO(eevee_legacy_material_surface_vert_common)
.additional_info("eevee_legacy_material_empty_base")
.additional_info("draw_resource_id_varying")
@ -81,7 +94,13 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_mateiral_surface_vert_pointcloud)
.auto_resource_location(true);
/**** MATERIAL GEOMETRY SHADER PERMUTATIONS ****/
/** -- Volumetric -- **/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Volumetric
* \{ */
GPU_SHADER_CREATE_INFO(eevee_legacy_material_volumetric_geom)
.additional_info("eevee_legacy_common_lib")
.additional_info("draw_view")
@ -89,9 +108,14 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_material_volumetric_geom)
.geometry_layout(PrimitiveIn::TRIANGLES, PrimitiveOut::TRIANGLE_STRIP, 3)
.additional_info("draw_resource_id_varying");
/** \} */
/**** MATERIAL FRAGMENT SHADER PERMUTATIONS ****/
/** -- Volumetric Shader -- **/
/* -------------------------------------------------------------------- */
/** \name Volumetric Shader
* \{ */
GPU_SHADER_CREATE_INFO(eevee_legacy_material_volumetric_frag)
.additional_info("eevee_legacy_common_lib")
.additional_info("draw_view")
@ -102,7 +126,11 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_material_volumetric_frag)
.fragment_out(2, Type::VEC4, "volumeEmissive")
.fragment_out(3, Type::VEC4, "volumePhase");
/** -- Prepass Shader -- **/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Pre-pass Shader
* \{ */
/* Common info for all `prepass_frag` variants. */
GPU_SHADER_CREATE_INFO(eevee_legacy_material_prepass_frag_common)
@ -148,7 +176,11 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_material_prepass_frag_alpha_hash_pointcloud)
.additional_info("eevee_legacy_material_prepass_frag_alpha_hash_common")
.additional_info("draw_pointcloud");
/** -- Surface Shader -- **/
/** \} */
/* -------------------------------------------------------------------- */
/** \name Surface Shader
* \{ */
GPU_SHADER_CREATE_INFO(eevee_legacy_material_surface_frag_common)
.additional_info("eevee_legacy_common_lib")
@ -173,6 +205,8 @@ GPU_SHADER_CREATE_INFO(eevee_legacy_material_surface_frag_alpha_blend)
.fragment_out(0, Type::VEC4, "outRadiance", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "outTransmittance", DualBlend::SRC_1);
/** \} */
/* hair_refine_shader_transform_feedback_create */
GPU_SHADER_INTERFACE_INFO(legacy_hair_refine_shader_transform_feedback_iface, "")

View File

@ -3,6 +3,11 @@
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_uniforms_lib.glsl)
/* Fix for #104266 wherein AMD GPUs running Metal erroneously discard a successful hit. */
#if defined(GPU_METAL) && defined(GPU_ATI)
# define METAL_AMD_RAYTRACE_WORKAROUND 1
#endif
/**
* Screen-Space Raytracing functions.
*/
@ -129,6 +134,9 @@ bool raytrace(Ray ray,
/* Cross at least one pixel. */
float t = 1.001, time = 1.001;
bool hit = false;
#ifdef METAL_AMD_RAYTRACE_WORKAROUND
bool hit_failsafe = true;
#endif
const float max_steps = 255.0;
for (float iter = 1.0; !hit && (time < ssray.max_time) && (iter < max_steps); iter++) {
float stride = 1.0 + iter * params.trace_quality;
@ -148,17 +156,36 @@ bool raytrace(Ray ray,
hit = (delta < 0.0);
/* ... and above it with the added thickness. */
hit = hit && (delta > ss_p.z - ss_p.w || abs(delta) < abs(ssray.direction.z * stride * 2.0));
#ifdef METAL_AMD_RAYTRACE_WORKAROUND
/* For workaround, perform discard backface and background check only within
* the iteration where the first successful ray intersection is registered.
* We flag failures to discard ray hits later. */
bool hit_valid = !(discard_backface && prev_delta < 0.0) && (depth_sample != 1.0);
if (hit && !hit_valid) {
hit_failsafe = false;
}
#endif
}
#ifndef METAL_AMD_RAYTRACE_WORKAROUND
/* Discard back-face hits. */
hit = hit && !(discard_backface && prev_delta < 0.0);
/* Reject hit if background. */
hit = hit && (depth_sample != 1.0);
#endif
/* Refine hit using intersection between the sampled heightfield and the ray.
* This simplifies nicely to this single line. */
time = mix(prev_time, time, saturate(prev_delta / (prev_delta - delta)));
hit_position = ssray.origin.xyz + ssray.direction.xyz * time;
#ifdef METAL_AMD_RAYTRACE_WORKAROUND
/* Check failed ray flag to discard bad hits. */
if (!hit_failsafe) {
return false;
}
#endif
return hit;
}

View File

@ -110,7 +110,7 @@
#define GBUF_COLOR_SLOT RBUFS_DIFF_COLOR_SLOT
/* Uniform Buffers. */
/* Only during prepass. */
/* Only during pre-pass. */
#define VELOCITY_CAMERA_PREV_BUF 3
#define VELOCITY_CAMERA_CURR_BUF 4
#define VELOCITY_CAMERA_NEXT_BUF 5

View File

@ -161,8 +161,6 @@ class DeferredLayer {
class DeferredPipeline {
private:
Instance &inst_;
/* Gbuffer filling passes. We could have an arbitrary number of them but for now we just have
* a hardcoded number of them. */
DeferredLayer opaque_layer_;
@ -171,7 +169,7 @@ class DeferredPipeline {
public:
DeferredPipeline(Instance &inst)
: inst_(inst), opaque_layer_(inst), refraction_layer_(inst), volumetric_layer_(inst){};
: opaque_layer_(inst), refraction_layer_(inst), volumetric_layer_(inst){};
void begin_sync();
void end_sync();

View File

@ -1,3 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "eevee_defines.hh"
#include "gpu_shader_create_info.hh"

View File

@ -6,7 +6,7 @@
/* -------------------------------------------------------------------- */
/** \name Surface Velocity
*
* Combined with the depth prepass shader.
* Combined with the depth pre-pass shader.
* Outputs the view motion vectors for animated objects.
* \{ */

View File

@ -1165,7 +1165,7 @@ OVERLAY_InstanceFormats *OVERLAY_shader_instance_formats_get(void)
{
{"boneStart", DRW_ATTR_FLOAT, 3},
{"boneEnd", DRW_ATTR_FLOAT, 3},
{"wireColor", DRW_ATTR_FLOAT, 4}, /* TODO: uchar color. */
{"wireColor", DRW_ATTR_FLOAT, 4}, /* TODO: `uchar` color. */
{"boneColor", DRW_ATTR_FLOAT, 4},
{"headColor", DRW_ATTR_FLOAT, 4},
{"tailColor", DRW_ATTR_FLOAT, 4},

View File

@ -277,8 +277,8 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
* of vertex color arrays from being sent to the GPU (e.g.
* when switching from eevee to workbench).
*/
if (ob->sculpt && ob->sculpt->pbvh) {
BKE_pbvh_is_drawing_set(ob->sculpt->pbvh, is_sculpt_pbvh);
if (ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
BKE_pbvh_is_drawing_set(BKE_object_sculpt_pbvh_get(ob), is_sculpt_pbvh);
}
bool has_color = false;
@ -334,7 +334,7 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
}
if (is_sculpt_pbvh && color_type == V3D_SHADING_TEXTURE_COLOR &&
BKE_pbvh_type(ob->sculpt->pbvh) != PBVH_FACES) {
BKE_pbvh_type(BKE_object_sculpt_pbvh_get(ob)) != PBVH_FACES) {
/* Force use of material color for sculpt. */
color_type = V3D_SHADING_MATERIAL_COLOR;
}

View File

@ -4805,7 +4805,7 @@ static void achannel_setting_slider_cb(bContext *C, void *id_poin, void *fcu_poi
/* try to resolve the path stored in the F-Curve */
if (RNA_path_resolve_property(&id_ptr, fcu->rna_path, &ptr, &prop)) {
/* set the special 'replace' flag if on a keyframe */
if (fcurve_frame_has_keyframe(fcu, cfra, 0)) {
if (fcurve_frame_has_keyframe(fcu, cfra)) {
flag |= INSERTKEY_REPLACE;
}
@ -4867,7 +4867,7 @@ static void achannel_setting_slider_shapekey_cb(bContext *C, void *key_poin, voi
FCurve *fcu = ED_action_fcurve_ensure(bmain, act, NULL, &ptr, rna_path, 0);
/* set the special 'replace' flag if on a keyframe */
if (fcurve_frame_has_keyframe(fcu, remapped_frame, 0)) {
if (fcurve_frame_has_keyframe(fcu, remapped_frame)) {
flag |= INSERTKEY_REPLACE;
}
@ -4927,7 +4927,7 @@ static void achannel_setting_slider_nla_curve_cb(bContext *C,
if (fcu && prop) {
/* set the special 'replace' flag if on a keyframe */
if (fcurve_frame_has_keyframe(fcu, cfra, 0)) {
if (fcurve_frame_has_keyframe(fcu, cfra)) {
flag |= INSERTKEY_REPLACE;
}

View File

@ -55,6 +55,106 @@
#include "WM_api.h"
#include "WM_types.h"
/* -------------------------------------------------------------------- */
/** \name Channel helper functions
* \{ */
static bool get_normalized_fcurve_bounds(FCurve *fcu,
bAnimContext *ac,
const bAnimListElem *ale,
const bool include_handles,
const float range[2],
rctf *r_bounds)
{
const bool fcu_selection_only = false;
const bool found_bounds = BKE_fcurve_calc_bounds(
fcu, fcu_selection_only, include_handles, range, r_bounds);
if (!found_bounds) {
return false;
}
const short mapping_flag = ANIM_get_normalization_flags(ac);
float offset;
const float unit_fac = ANIM_unit_mapping_get_factor(
ac->scene, ale->id, fcu, mapping_flag, &offset);
r_bounds->ymin = (r_bounds->ymin + offset) * unit_fac;
r_bounds->ymax = (r_bounds->ymax + offset) * unit_fac;
const float min_height = 0.01f;
const float height = BLI_rctf_size_y(r_bounds);
if (height < min_height) {
r_bounds->ymin -= (min_height - height) / 2;
r_bounds->ymax += (min_height - height) / 2;
}
return true;
}
static bool get_gpencil_bounds(bGPDlayer *gpl, const float range[2], rctf *r_bounds)
{
bool found_start = false;
int start_frame = 0;
int end_frame = 1;
LISTBASE_FOREACH (bGPDframe *, gpf, &gpl->frames) {
if (gpf->framenum < range[0]) {
continue;
}
if (gpf->framenum > range[1]) {
break;
}
if (!found_start) {
start_frame = gpf->framenum;
found_start = true;
}
end_frame = gpf->framenum;
}
r_bounds->xmin = start_frame;
r_bounds->xmax = end_frame;
r_bounds->ymin = 0;
r_bounds->ymax = 1;
return found_start;
}
static bool get_channel_bounds(bAnimContext *ac,
bAnimListElem *ale,
const float range[2],
const bool include_handles,
rctf *r_bounds)
{
bool found_bounds = false;
switch (ale->datatype) {
case ALE_GPFRAME: {
bGPDlayer *gpl = (bGPDlayer *)ale->data;
found_bounds = get_gpencil_bounds(gpl, range, r_bounds);
break;
}
case ALE_FCURVE: {
FCurve *fcu = (FCurve *)ale->key_data;
found_bounds = get_normalized_fcurve_bounds(fcu, ac, ale, include_handles, range, r_bounds);
break;
}
}
return found_bounds;
}
/* Pad the given rctf with regions that could block the view.
* For example Markers and Time Scrubbing. */
static void add_region_padding(bContext *C, bAnimContext *ac, rctf *bounds)
{
BLI_rctf_scale(bounds, 1.1f);
const float pad_top = UI_TIME_SCRUB_MARGIN_Y;
const float pad_bottom = BLI_listbase_is_empty(ED_context_get_markers(C)) ?
V2D_SCROLL_HANDLE_HEIGHT :
UI_MARKER_MARGIN_Y;
BLI_rctf_pad_y(bounds, ac->region->winy, pad_bottom, pad_top);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public Channel Selection API
* \{ */
@ -3643,87 +3743,6 @@ static void ANIM_OT_channel_select_keys(wmOperatorType *ot)
/** \name View Channel Operator
* \{ */
static bool get_normalized_fcurve_bounds(FCurve *fcu,
bAnimContext *ac,
const bAnimListElem *ale,
const bool include_handles,
const float range[2],
rctf *r_bounds)
{
const bool fcu_selection_only = false;
const bool found_bounds = BKE_fcurve_calc_bounds(
fcu, fcu_selection_only, include_handles, range, r_bounds);
if (!found_bounds) {
return false;
}
const short mapping_flag = ANIM_get_normalization_flags(ac);
float offset;
const float unit_fac = ANIM_unit_mapping_get_factor(
ac->scene, ale->id, fcu, mapping_flag, &offset);
r_bounds->ymin = (r_bounds->ymin + offset) * unit_fac;
r_bounds->ymax = (r_bounds->ymax + offset) * unit_fac;
const float min_height = 0.01f;
const float height = BLI_rctf_size_y(r_bounds);
if (height < min_height) {
r_bounds->ymin -= (min_height - height) / 2;
r_bounds->ymax += (min_height - height) / 2;
}
return true;
}
static bool get_gpencil_bounds(bGPDlayer *gpl, const float range[2], rctf *r_bounds)
{
bool found_start = false;
int start_frame = 0;
int end_frame = 1;
LISTBASE_FOREACH (bGPDframe *, gpf, &gpl->frames) {
if (gpf->framenum < range[0]) {
continue;
}
if (gpf->framenum > range[1]) {
break;
}
if (!found_start) {
start_frame = gpf->framenum;
found_start = true;
}
end_frame = gpf->framenum;
}
r_bounds->xmin = start_frame;
r_bounds->xmax = end_frame;
r_bounds->ymin = 0;
r_bounds->ymax = 1;
return found_start;
}
static bool get_channel_bounds(bAnimContext *ac,
bAnimListElem *ale,
const float range[2],
const bool include_handles,
rctf *r_bounds)
{
bool found_bounds = false;
switch (ale->datatype) {
case ALE_GPFRAME: {
bGPDlayer *gpl = (bGPDlayer *)ale->data;
found_bounds = get_gpencil_bounds(gpl, range, r_bounds);
break;
}
case ALE_FCURVE: {
FCurve *fcu = (FCurve *)ale->key_data;
found_bounds = get_normalized_fcurve_bounds(fcu, ac, ale, include_handles, range, r_bounds);
break;
}
}
return found_bounds;
}
static void get_view_range(Scene *scene, const bool use_preview_range, float r_range[2])
{
if (use_preview_range && scene->r.flag & SCER_PRV_RANGE) {
@ -3736,19 +3755,6 @@ static void get_view_range(Scene *scene, const bool use_preview_range, float r_r
}
}
/* Pad the given rctf with regions that could block the view.
* For example Markers and Time Scrubbing. */
static void add_region_padding(bContext *C, bAnimContext *ac, rctf *bounds)
{
BLI_rctf_scale(bounds, 1.1f);
const float pad_top = UI_TIME_SCRUB_MARGIN_Y;
const float pad_bottom = BLI_listbase_is_empty(ED_context_get_markers(C)) ?
V2D_SCROLL_HANDLE_HEIGHT :
UI_MARKER_MARGIN_Y;
BLI_rctf_pad_y(bounds, ac->region->winy, pad_bottom, pad_top);
}
static int graphkeys_view_selected_channels_exec(bContext *C, wmOperator *op)
{
bAnimContext ac;

View File

@ -2852,7 +2852,7 @@ bool autokeyframe_cfra_can_key(const Scene *scene, ID *id)
* For whole block, only key if there's a keyframe on that frame already
* This is a valid assumption when we're blocking + tweaking
*/
return id_frame_has_keyframe(id, cfra, ANIMFILTER_KEYS_LOCAL);
return id_frame_has_keyframe(id, cfra);
}
/* Normal Mode (or treat as being normal mode):
@ -2871,15 +2871,14 @@ bool autokeyframe_cfra_can_key(const Scene *scene, ID *id)
/* --------------- API/Per-Datablock Handling ------------------- */
bool fcurve_frame_has_keyframe(const FCurve *fcu, float frame, short filter)
bool fcurve_frame_has_keyframe(const FCurve *fcu, float frame)
{
/* quick sanity check */
if (ELEM(NULL, fcu, fcu->bezt)) {
return false;
}
/* We either include all regardless of muting, or only non-muted. */
if ((filter & ANIMFILTER_KEYS_MUTED) || (fcu->flag & FCURVE_MUTED) == 0) {
if ((fcu->flag & FCURVE_MUTED) == 0) {
bool replace;
int i = BKE_fcurve_bezt_binarysearch_index(fcu->bezt, frame, fcu->totvert, &replace);
@ -2926,7 +2925,7 @@ bool fcurve_is_changed(PointerRNA ptr,
* Since we're only concerned whether a keyframe exists,
* we can simply loop until a match is found.
*/
static bool action_frame_has_keyframe(bAction *act, float frame, short filter)
static bool action_frame_has_keyframe(bAction *act, float frame)
{
FCurve *fcu;
@ -2935,8 +2934,7 @@ static bool action_frame_has_keyframe(bAction *act, float frame, short filter)
return false;
}
/* if only check non-muted, check if muted */
if ((filter & ANIMFILTER_KEYS_MUTED) || (act->flag & ACT_MUTED)) {
if (act->flag & ACT_MUTED) {
return false;
}
@ -2946,7 +2944,7 @@ static bool action_frame_has_keyframe(bAction *act, float frame, short filter)
for (fcu = act->curves.first; fcu; fcu = fcu->next) {
/* only check if there are keyframes (currently only of type BezTriple) */
if (fcu->bezt && fcu->totvert) {
if (fcurve_frame_has_keyframe(fcu, frame, filter)) {
if (fcurve_frame_has_keyframe(fcu, frame)) {
return true;
}
}
@ -2957,7 +2955,7 @@ static bool action_frame_has_keyframe(bAction *act, float frame, short filter)
}
/* Checks whether an Object has a keyframe for a given frame */
static bool object_frame_has_keyframe(Object *ob, float frame, short filter)
static bool object_frame_has_keyframe(Object *ob, float frame)
{
/* error checking */
if (ob == NULL) {
@ -2972,59 +2970,18 @@ static bool object_frame_has_keyframe(Object *ob, float frame, short filter)
*/
float ob_frame = BKE_nla_tweakedit_remap(ob->adt, frame, NLATIME_CONVERT_UNMAP);
if (action_frame_has_keyframe(ob->adt->action, ob_frame, filter)) {
if (action_frame_has_keyframe(ob->adt->action, ob_frame)) {
return true;
}
}
/* Try shape-key keyframes (if available, and allowed by filter). */
if (!(filter & ANIMFILTER_KEYS_LOCAL) && !(filter & ANIMFILTER_KEYS_NOSKEY)) {
Key *key = BKE_key_from_object(ob);
/* Shape-keys can have keyframes ('Relative Shape Keys')
* or depend on time (old 'Absolute Shape Keys'). */
/* 1. test for relative (with keyframes) */
if (id_frame_has_keyframe((ID *)key, frame, filter)) {
return true;
}
/* 2. test for time */
/* TODO: yet to be implemented (this feature may evolve before then anyway). */
}
/* try materials */
if (!(filter & ANIMFILTER_KEYS_LOCAL) && !(filter & ANIMFILTER_KEYS_NOMAT)) {
/* if only active, then we can skip a lot of looping */
if (filter & ANIMFILTER_KEYS_ACTIVE) {
Material *ma = BKE_object_material_get(ob, (ob->actcol + 1));
/* we only retrieve the active material... */
if (id_frame_has_keyframe((ID *)ma, frame, filter)) {
return true;
}
}
else {
int a;
/* loop over materials */
for (a = 0; a < ob->totcol; a++) {
Material *ma = BKE_object_material_get(ob, a + 1);
if (id_frame_has_keyframe((ID *)ma, frame, filter)) {
return true;
}
}
}
}
/* nothing found */
return false;
}
/* --------------- API ------------------- */
bool id_frame_has_keyframe(ID *id, float frame, short filter)
bool id_frame_has_keyframe(ID *id, float frame)
{
/* sanity checks */
if (id == NULL) {
@ -3034,7 +2991,7 @@ bool id_frame_has_keyframe(ID *id, float frame, short filter)
/* perform special checks for 'macro' types */
switch (GS(id->name)) {
case ID_OB: /* object */
return object_frame_has_keyframe((Object *)id, frame, filter);
return object_frame_has_keyframe((Object *)id, frame);
#if 0
/* XXX TODO... for now, just use 'normal' behavior */
case ID_SCE: /* scene */
@ -3046,7 +3003,7 @@ bool id_frame_has_keyframe(ID *id, float frame, short filter)
/* only check keyframes in active action */
if (adt) {
return action_frame_has_keyframe(adt->action, frame, filter);
return action_frame_has_keyframe(adt->action, frame);
}
break;
}

View File

@ -213,7 +213,7 @@ void ED_gpencil_strokes_copybuf_free(void);
/* drawgpencil.c */
/**
* Draw grease-pencil sketches to specified 2d-view that uses ibuf corrections.
* Draw grease-pencil sketches to specified 2d-view that uses `ibuf` corrections.
*/
void ED_annotation_draw_2dimage(const struct bContext *C);
/**

View File

@ -148,6 +148,7 @@ typedef enum eKeyframeIterFlags {
* iterator callbacks then. */
KEYFRAME_ITER_HANDLES_DEFAULT_INVISIBLE = (1 << 3),
} eKeyframeIterFlags;
ENUM_OPERATORS(eKeyframeIterFlags, KEYFRAME_ITER_HANDLES_DEFAULT_INVISIBLE)
/** \} */

View File

@ -608,7 +608,7 @@ bool autokeyframe_cfra_can_key(const struct Scene *scene, struct ID *id);
* Checks if some F-Curve has a keyframe for a given frame.
* \note Used for the buttons to check for keyframes.
*/
bool fcurve_frame_has_keyframe(const struct FCurve *fcu, float frame, short filter);
bool fcurve_frame_has_keyframe(const struct FCurve *fcu, float frame);
/**
* \brief Lesser Keyframe Checking API call.
@ -629,23 +629,7 @@ bool fcurve_is_changed(struct PointerRNA ptr,
* in case some detail of the implementation changes...
* \param frame: The value of this is quite often result of #BKE_scene_ctime_get()
*/
bool id_frame_has_keyframe(struct ID *id, float frame, short filter);
/**
* Filter flags for #id_frame_has_keyframe.
*
* \warning do not alter order of these, as also stored in files (for `v3d->keyflags`).
*/
typedef enum eAnimFilterFlags {
/* general */
ANIMFILTER_KEYS_LOCAL = (1 << 0), /* only include locally available anim data */
ANIMFILTER_KEYS_MUTED = (1 << 1), /* include muted elements */
ANIMFILTER_KEYS_ACTIVE = (1 << 2), /* only include active-subelements */
/* object specific */
ANIMFILTER_KEYS_NOMAT = (1 << 9), /* don't include material keyframes */
ANIMFILTER_KEYS_NOSKEY = (1 << 10), /* don't include shape keys (for geometry) */
} eAnimFilterFlags;
bool id_frame_has_keyframe(struct ID *id, float frame);
/* Utility functions for auto key-frame. */

View File

@ -12,34 +12,44 @@
* - Custom context menus
* - Notifier listening
* - Drag controllers (dragging view items)
* - Drop controllers (dropping onto/into view items)
* - Drop targets (dropping onto/into view items)
*/
#pragma once
#include <array>
#include <memory>
#include <optional>
#include "DNA_defs.h"
#include "DNA_vec_types.h"
#include "BLI_span.hh"
#include "BLI_string_ref.hh"
#include "UI_interface.hh"
struct bContext;
struct uiBlock;
struct uiLayout;
struct uiViewItemHandle;
struct ViewLink;
struct wmDrag;
struct wmNotifier;
namespace blender::ui {
class AbstractViewItem;
class AbstractViewItemDropController;
class AbstractViewItemDropTarget;
class AbstractViewItemDragController;
/** The view drop target can share logic with the view item drop target for now, so just an alias.
*/
using AbstractViewDropTarget = AbstractViewItemDropTarget;
class AbstractView {
friend class AbstractViewItem;
friend struct ::ViewLink;
bool is_reconstructed_ = false;
/**
@ -51,9 +61,21 @@ class AbstractView {
*/
std::unique_ptr<std::array<char, MAX_NAME>> rename_buffer_;
/* See #get_bounds(). */
std::optional<rcti> bounds_;
public:
virtual ~AbstractView() = default;
/**
* If a view wants to support dropping data into it, it has to return a drop target here.
* That is an object implementing #AbstractViewDropTarget.
*
* \note This drop target may be requested for each event. The view doesn't keep the drop target
* around currently. So it cannot contain persistent state.
*/
virtual std::unique_ptr<AbstractViewDropTarget> create_drop_target() const;
/** Listen to a notifier, returning true if a redraw is needed. */
virtual bool listen(const wmNotifier &) const;
@ -70,6 +92,11 @@ class AbstractView {
void end_renaming();
Span<char> get_rename_buffer() const;
MutableSpan<char> get_rename_buffer();
/**
* Get the rectangle containing all the view items that are in the layout, in button space.
* Updated as part of #UI_block_end(), before that it's unset.
*/
std::optional<rcti> get_bounds() const;
protected:
AbstractView() = default;
@ -133,13 +160,13 @@ class AbstractViewItem {
*/
virtual std::unique_ptr<AbstractViewItemDragController> create_drag_controller() const;
/**
* If an item wants to support dropping data into it, it has to return a drop controller here.
* That is an object implementing #AbstractViewItemDropController.
* If an item wants to support dropping data into it, it has to return a drop target here.
* That is an object implementing #AbstractViewItemDropTarget.
*
* \note This drop controller may be requested for each event. The view doesn't keep a drop
* controller around currently. So it can not contain persistent state.
* \note This drop target may be requested for each event. The view doesn't keep a drop target
* around currently. So it can not contain persistent state.
*/
virtual std::unique_ptr<AbstractViewItemDropController> create_drop_controller() const;
virtual std::unique_ptr<AbstractViewItemDropTarget> create_drop_target() const;
/** Get the view this item is registered for using #AbstractView::register_item(). */
AbstractView &get_view() const;
@ -200,7 +227,7 @@ template<typename ToType> ToType *AbstractViewItem::from_item_handle(uiViewItemH
* \{ */
/**
* Class to enable dragging a view item. An item can return a drop controller for itself by
* Class to enable dragging a view item. An item can return a drag controller for itself by
* implementing #AbstractViewItem::create_drag_controller().
*/
class AbstractViewItemDragController {
@ -222,38 +249,15 @@ class AbstractViewItemDragController {
/**
* Class to define the behavior when dropping something onto/into a view item, plus the behavior
* when dragging over this item. An item can return a drop controller for itself via a custom
* implementation of #AbstractViewItem::create_drop_controller().
* when dragging over this item. An item can return a drop target for itself via a custom
* implementation of #AbstractViewItem::create_drop_target().
*/
class AbstractViewItemDropController {
class AbstractViewItemDropTarget : public DropTargetInterface {
protected:
AbstractView &view_;
public:
AbstractViewItemDropController(AbstractView &view);
virtual ~AbstractViewItemDropController() = default;
/**
* Check if the data dragged with \a drag can be dropped on the item this controller is for.
* \param r_disabled_hint: Return a static string to display to the user, explaining why dropping
* isn't possible on this item. Shouldn't be done too aggressively, e.g.
* don't set this if the drag-type can't be dropped here; only if it can
* but there's another reason it can't be dropped.
* Can assume this is a non-null pointer.
*/
virtual bool can_drop(const wmDrag &drag, const char **r_disabled_hint) const = 0;
/**
* Custom text to display when dragging over a view item. Should explain what happens when
* dropping the data onto this item. Will only be used if #AbstractViewItem::can_drop()
* returns true, so the implementing override doesn't have to check that again.
* The returned value must be a translated string.
*/
virtual std::string drop_tooltip(const wmDrag &drag) const = 0;
/**
* Execute the logic to apply a drop of the data dragged with \a drag onto/into the item this
* controller is for.
*/
virtual bool on_drop(struct bContext *C, const wmDrag &drag) = 0;
AbstractViewItemDropTarget(AbstractView &view);
/** Request the view the item is registered for as type #ViewType. Throws a `std::bad_cast`
* exception if the view is not of the requested type. */
@ -267,7 +271,7 @@ template<class ViewType> ViewType &AbstractViewItemDragController::get_view() co
return dynamic_cast<ViewType &>(view_);
}
template<class ViewType> ViewType &AbstractViewItemDropController::get_view() const
template<class ViewType> ViewType &AbstractViewItemDropTarget::get_view() const
{
static_assert(std::is_base_of<AbstractView, ViewType>::value,
"Type must derive from and implement the ui::AbstractView interface");

View File

@ -155,8 +155,6 @@ class AbstractGridView : public AbstractView {
* \{ */
class GridViewBuilder {
uiBlock &block_;
public:
GridViewBuilder(uiBlock &block);

View File

@ -3274,18 +3274,12 @@ void UI_view_item_context_menu_build(struct bContext *C,
* \return True if dragging started successfully, otherwise false.
*/
bool UI_view_item_drag_start(struct bContext *C, const uiViewItemHandle *item_);
bool UI_view_item_can_drop(const uiViewItemHandle *item_,
const struct wmDrag *drag,
const char **r_disabled_hint);
char *UI_view_item_drop_tooltip(const uiViewItemHandle *item, const struct wmDrag *drag);
/**
* Let a view item handle a drop event.
* \return True if the drop was handled by the view item.
*/
bool UI_view_item_drop_handle(struct bContext *C,
const uiViewItemHandle *item_,
const struct ListBase *drags);
/**
* \param xy: Coordinate to find a view item at, in window space.
* \param pad: Extra padding added to the bounding box of the view.
*/
uiViewHandle *UI_region_view_find_at(const struct ARegion *region, const int xy[2], int pad);
/**
* \param xy: Coordinate to find a view item at, in window space.
*/

View File

@ -18,11 +18,17 @@ namespace blender::nodes::geo_eval_log {
struct GeometryAttributeInfo;
}
struct ARegion;
struct bContext;
struct PointerRNA;
struct StructRNA;
struct uiBlock;
struct uiLayout;
struct uiList;
struct uiSearchItems;
struct uiViewHandle;
struct uiViewItemHandle;
struct wmDrag;
namespace blender::ui {
@ -54,6 +60,67 @@ void attribute_search_add_items(StringRefNull str,
uiSearchItems *items,
bool is_first);
/**
* This provides a common interface for UI elements that want to support dragging & dropping
* entities into/onto them. With it, the element can determine if the dragged entity can be dropped
* onto itself, provide feedback while dragging and run custom code for the dropping.
*
* Note that this is just an interface. A #wmDropBox is needed to request instances of it from a UI
* element and call its functions. For example the drop box using "UI_OT_view_drop" implements
* dropping for views and view items via this interface. To support other kinds of UI elements,
* similar drop boxes would be necessary.
*/
class DropTargetInterface {
public:
DropTargetInterface() = default;
virtual ~DropTargetInterface() = default;
/**
* Check if the data dragged with \a drag can be dropped on the element this drop target is for.
* \param r_disabled_hint: Return a static string to display to the user, explaining why dropping
* isn't possible on this UI element. Shouldn't be done too aggressively,
* e.g. don't set this if the drag-type can't be dropped here; only if it
* can but there's another reason it can't be dropped. Can assume this is
* a non-null pointer.
*/
virtual bool can_drop(const wmDrag &drag, const char **r_disabled_hint) const = 0;
/**
* Custom text to display when dragging over the element using this drop target. Should
* explain what happens when dropping the data onto this UI element. Will only be used if
* #DropTargetInterface::can_drop() returns true, so the implementing override doesn't have
* to check that again. The returned value must be a translated string.
*/
virtual std::string drop_tooltip(const wmDrag &drag) const = 0;
/**
* Execute the logic to apply a drop of the data dragged with \a drag onto/into the UI element
* this drop target is for.
*/
virtual bool on_drop(bContext *C, const wmDrag &drag) const = 0;
};
/**
* Let a drop target handle a drop event.
* \return True if the dropping was successful.
*/
bool drop_target_apply_drop(bContext &C,
const DropTargetInterface &drop_target,
const ListBase &drags);
/**
* Call #DropTargetInterface::drop_tooltip() and return the result as newly allocated C string
* (unless the result is empty, returns null then). Needs freeing with MEM_freeN().
*/
char *drop_target_tooltip(const DropTargetInterface &drop_target, const wmDrag &drag);
std::unique_ptr<DropTargetInterface> view_drop_target(const uiViewHandle *view_handle);
std::unique_ptr<DropTargetInterface> view_item_drop_target(const uiViewItemHandle *item_handle);
/**
* Try to find a view item with a drop target under the mouse cursor, or if not found, a view
* with a drop target.
* \param xy: Coordinate to find a drop target at, in window space.
*/
std::unique_ptr<DropTargetInterface> region_views_find_drop_target_at(const ARegion *region,
const int xy[2]);
} // namespace blender::ui
enum eUIListFilterResult {

View File

@ -46,6 +46,7 @@ set(SRC
interface_context_path.cc
interface_drag.cc
interface_draw.cc
interface_drop.cc
interface_dropboxes.cc
interface_handlers.cc
interface_icons.cc

View File

@ -2014,6 +2014,8 @@ void UI_block_end_ex(const bContext *C, uiBlock *block, const int xy[2], int r_x
break;
}
ui_block_views_bounds_calc(block);
if (block->rect.xmin == 0.0f && block->rect.xmax == 0.0f) {
UI_block_bounds_set_normal(block, 0);
}

View File

@ -94,7 +94,7 @@ void ui_but_anim_flag(uiBut *but, const AnimationEvalContext *anim_eval_context)
cfra = BKE_nla_tweakedit_remap(adt, cfra, NLATIME_CONVERT_UNMAP);
}
if (fcurve_frame_has_keyframe(fcu, cfra, 0)) {
if (fcurve_frame_has_keyframe(fcu, cfra)) {
but->flag |= UI_BUT_ANIMATED_KEY;
}

View File

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup edinterface
*/
#include "UI_interface.hh"
namespace blender::ui {
bool drop_target_apply_drop(bContext &C,
const DropTargetInterface &drop_target,
const ListBase &drags)
{
const char *disabled_hint_dummy = nullptr;
LISTBASE_FOREACH (const wmDrag *, drag, &drags) {
if (drop_target.can_drop(*drag, &disabled_hint_dummy)) {
return drop_target.on_drop(&C, *drag);
}
}
return false;
}
char *drop_target_tooltip(const DropTargetInterface &drop_target, const wmDrag &drag)
{
const std::string tooltip = drop_target.drop_tooltip(drag);
return tooltip.empty() ? nullptr : BLI_strdup(tooltip.c_str());
}
} // namespace blender::ui

View File

@ -20,6 +20,9 @@
#include "WM_api.h"
#include "UI_interface.h"
#include "UI_interface.hh"
using namespace blender::ui;
/* -------------------------------------------------------------------- */
/** \name View Drag/Drop Callbacks
@ -28,28 +31,27 @@
static bool ui_view_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
const ARegion *region = CTX_wm_region(C);
const uiViewItemHandle *hovered_item = UI_region_views_find_item_at(region, event->xy);
if (!hovered_item) {
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region,
event->xy);
if (!drop_target) {
return false;
}
if (drag->drop_state.free_disabled_info) {
MEM_SAFE_FREE(drag->drop_state.disabled_info);
}
drag->drop_state.free_disabled_info = false;
return UI_view_item_can_drop(hovered_item, drag, &drag->drop_state.disabled_info);
return drop_target->can_drop(*drag, &drag->drop_state.disabled_info);
}
static char *ui_view_drop_tooltip(bContext *C, wmDrag *drag, const int xy[2], wmDropBox * /*drop*/)
{
const ARegion *region = CTX_wm_region(C);
const uiViewItemHandle *hovered_item = UI_region_views_find_item_at(region, xy);
if (!hovered_item) {
return nullptr;
}
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region, xy);
return UI_view_item_drop_tooltip(hovered_item, drag);
return drop_target_tooltip(*drop_target, *drag);
}
/** \} */

View File

@ -1452,6 +1452,7 @@ void ui_interface_tag_script_reload_queries();
/* interface_view.cc */
void ui_block_free_views(uiBlock *block);
void ui_block_views_bounds_calc(const uiBlock *block);
void ui_block_views_listen(const uiBlock *block, const wmRegionListenerParams *listener_params);
uiViewHandle *ui_block_view_find_matching_in_old_block(const uiBlock *new_block,
const uiViewHandle *new_view);

View File

@ -47,6 +47,7 @@
#include "RNA_types.h"
#include "UI_interface.h"
#include "UI_interface.hh"
#include "interface_intern.hh"
@ -65,6 +66,8 @@
#include "ED_screen.h"
#include "ED_text.h"
using namespace blender::ui;
/* -------------------------------------------------------------------- */
/** \name Immediate redraw helper
*
@ -2351,7 +2354,7 @@ static void UI_OT_list_start_filter(wmOperatorType *ot)
/** \} */
/* -------------------------------------------------------------------- */
/** \name UI Tree-View Drop Operator
/** \name UI View Drop Operator
* \{ */
static bool ui_view_drop_poll(bContext *C)
@ -2361,9 +2364,7 @@ static bool ui_view_drop_poll(bContext *C)
if (region == nullptr) {
return false;
}
const uiViewItemHandle *hovered_item = UI_region_views_find_item_at(region, win->eventstate->xy);
return hovered_item != nullptr;
return region_views_find_drop_target_at(region, win->eventstate->xy) != nullptr;
}
static int ui_view_drop_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *event)
@ -2373,10 +2374,11 @@ static int ui_view_drop_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *
}
const ARegion *region = CTX_wm_region(C);
uiViewItemHandle *hovered_item = UI_region_views_find_item_at(region, event->xy);
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region,
event->xy);
if (!UI_view_item_drop_handle(
C, hovered_item, static_cast<const ListBase *>(event->customdata))) {
if (!drop_target_apply_drop(
*C, *drop_target, *static_cast<const ListBase *>(event->customdata))) {
return OPERATOR_CANCELLED | OPERATOR_PASS_THROUGH;
}
@ -2385,9 +2387,9 @@ static int ui_view_drop_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *
static void UI_OT_view_drop(wmOperatorType *ot)
{
ot->name = "View drop";
ot->name = "View Drop";
ot->idname = "UI_OT_view_drop";
ot->description = "Drag and drop items onto a data-set item";
ot->description = "Drag and drop onto a data-set or item within the data-set";
ot->invoke = ui_view_drop_invoke;
ot->poll = ui_view_drop_poll;

Some files were not shown because too many files have changed in this diff Show More