diff --git a/.gitea/default_merge_message/REBASE_TEMPLATE.md b/.gitea/default_merge_message/REBASE_TEMPLATE.md new file mode 100644 index 00000000000..87a09370e27 --- /dev/null +++ b/.gitea/default_merge_message/REBASE_TEMPLATE.md @@ -0,0 +1,5 @@ +${CommitTitle} + +${CommitBody} + +Pull Request #${PullRequestIndex} diff --git a/.gitea/default_merge_message/SQUASH_TEMPLATE.md b/.gitea/default_merge_message/SQUASH_TEMPLATE.md new file mode 100644 index 00000000000..36123d4d8ce --- /dev/null +++ b/.gitea/default_merge_message/SQUASH_TEMPLATE.md @@ -0,0 +1,3 @@ +${PullRequestTitle} + +Pull Request #${PullRequestIndex} diff --git a/.gitea/issue_template.yaml b/.gitea/issue_template.yaml deleted file mode 100644 index 08b98a0112e..00000000000 --- a/.gitea/issue_template.yaml +++ /dev/null @@ -1,45 +0,0 @@ -name: Bug Report -about: File a bug report -labels: - - bug -ref: master -body: - - type: markdown - attributes: - value: | - ### First time bug reporting? - Read [these tips](https://wiki.blender.org/wiki/Process/Bug_Reports) and watch this **[How to Report a Bug](https://www.youtube.com/watch?v=JTD0OJq_rF4)** video to make a complete, valid bug report. Remember to write your bug report in **English**. - - ### What not to report here - For feature requests, feedback, questions or issues building Blender, see [communication channels](https://wiki.blender.org/wiki/Communication/Contact#User_Feedback_and_Requests). - - ### Please verify - * Always test with the latest official release from [blender.org](https://www.blender.org/) and daily build from [builder.blender.org](https://builder.blender.org/). - * Please use `Help > Report a Bug` in Blender to automatically fill system information and exact Blender version. - * Test [previous Blender versions](https://download.blender.org/release/) to find the latest version that was working as expected. - * Find steps to redo the bug consistently without any non-official add-ons, and include a **small and simple .blend file** to demonstrate the bug. - * If there are multiple bugs, make multiple bug reports. - * Sometimes, driver or software upgrades cause problems. On Windows, try a clean install of the graphics drivers. - - ### Help the developers - Bug fixing is important, the developers will handle a report swiftly. For that reason, we need your help to carefully provide instructions that others can follow quickly. You do your half of the work, then we do our half! - - If a report is tagged with Needs Information from User and it has no reply after a week, we will assume the issue is gone and close the report. - - - type: textarea - attributes: - label: "Description" - value: | - **System Information** - Operating system: - Graphics card: - - **Blender Version** - Broken: (example: 2.80, edbf15d3c044, master, 2018-11-28, as found on the splash screen) - Worked: (newest version of Blender that worked as expected) - - **Short description of error** - - **Exact steps for others to reproduce the error** - Based on the default startup or an attached .blend file (as simple as possible). - diff --git a/.gitea/issue_template/bug.yaml b/.gitea/issue_template/bug.yaml new file mode 100644 index 00000000000..4e3c550dae9 --- /dev/null +++ b/.gitea/issue_template/bug.yaml @@ -0,0 +1,44 @@ +name: Bug Report +about: File a bug report +labels: + - "type::Report" + - "status::Needs Triage" + - "priority::Normal" +body: + - type: markdown + attributes: + value: | + ### Instructions + First time reporting? See [tips](https://wiki.blender.org/wiki/Process/Bug_Reports). + + * Use **Help > Report a Bug** in Blender to fill system information and exact Blender version. + * Test [daily builds](https://builder.blender.org/) to verify if the issue is already fixed. + * Test [previous versions](https://download.blender.org/release/) to find an older working version. + * For feature requests, feedback, questions or build issues, see [communication channels](https://wiki.blender.org/wiki/Communication/Contact#User_Feedback_and_Requests). + * If there are multiple bugs, make multiple bug reports. + + - type: textarea + id: body + attributes: + label: "Description" + hide_label: true + value: | + **System Information** + Operating system: + Graphics card: + + **Blender Version** + Broken: (example: 2.80, edbf15d3c044, master, 2018-11-28, as found on the splash screen) + Worked: (newest version of Blender that worked as expected) + + **Short description of error** + + **Exact steps for others to reproduce the error** + Based on the default startup or an attached .blend file (as simple as possible). + + - type: markdown + attributes: + value: | + ### Help the developers + + Bug fixing is important, the developers will handle reports swiftly. For that reason, carefully provide exact steps and a **small and simple .blend file** to reproduce the problem. You do your half of the work, then we do our half! diff --git a/.gitea/issue_template/config.yaml b/.gitea/issue_template/config.yaml new file mode 100644 index 00000000000..3ba13e0cec6 --- /dev/null +++ b/.gitea/issue_template/config.yaml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/.gitea/issue_template/design.yaml b/.gitea/issue_template/design.yaml new file mode 100644 index 00000000000..a1dcd8b0eda --- /dev/null +++ b/.gitea/issue_template/design.yaml @@ -0,0 +1,10 @@ +name: Design +about: Create a design task (for developers only) +labels: + - "type::Design" +body: + - type: textarea + id: body + attributes: + label: "Description" + hide_label: true diff --git a/.gitea/issue_template/todo.yaml b/.gitea/issue_template/todo.yaml new file mode 100644 index 00000000000..58e848c3e18 --- /dev/null +++ b/.gitea/issue_template/todo.yaml @@ -0,0 +1,10 @@ +name: To Do +about: Create a to do task (for developers only) +labels: + - "type::To Do" +body: + - type: textarea + id: body + attributes: + label: "Description" + hide_label: true diff --git a/.gitea/pull_request_template.md b/.gitea/pull_request_template.md deleted file mode 100644 index a6614b8ef00..00000000000 --- a/.gitea/pull_request_template.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -name: Pull Request -about: Submit a pull request ---- diff --git a/.gitea/pull_request_template.yaml b/.gitea/pull_request_template.yaml new file mode 100644 index 00000000000..b3f45baf96b --- /dev/null +++ b/.gitea/pull_request_template.yaml @@ -0,0 +1,17 @@ +name: Pull Request +about: Contribute code to Blender +body: + - type: markdown + attributes: + value: | + ### Instructions + + Guides to [contributing code](https://wiki.blender.org/index.php/Dev:Doc/Process/Contributing_Code) and effective [code review](https://wiki.blender.org/index.php/Dev:Doc/Tools/Code_Review). + + By submitting code here, you agree that the code is (compatible with) GNU GPL v2 or later. + + - type: textarea + id: body + attributes: + label: "Description" + hide_label: true diff --git a/CMakeLists.txt b/CMakeLists.txt index fe4b245b381..02fed0ec7bf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -167,14 +167,26 @@ get_blender_version() option(WITH_BLENDER "Build blender (disable to build only the blender player)" ON) mark_as_advanced(WITH_BLENDER) -if(APPLE) - # In future, can be used with `quicklookthumbnailing/qlthumbnailreply` to create file - # thumbnails for say Finder. Turn it off for now. - option(WITH_BLENDER_THUMBNAILER "Build \"blender-thumbnailer\" thumbnail extraction utility" OFF) -elseif(WIN32) - option(WITH_BLENDER_THUMBNAILER "Build \"BlendThumb.dll\" helper for Windows explorer integration" ON) +if(WIN32) + option(WITH_BLENDER_THUMBNAILER "\ +Build \"BlendThumb.dll\" helper for Windows explorer integration to support extracting \ +thumbnails from `.blend` files." + ON + ) else() - option(WITH_BLENDER_THUMBNAILER "Build \"blender-thumbnailer\" thumbnail extraction utility" ON) + set(_option_default ON) + if(APPLE) + # In future, can be used with `quicklookthumbnailing/qlthumbnailreply` + # to create file thumbnails for say Finder. + # Turn it off for now, even though it can build on APPLE, it's not likely to be useful. + set(_option_default OFF) + endif() + option(WITH_BLENDER_THUMBNAILER "\ +Build stand-alone \"blender-thumbnailer\" command-line thumbnail extraction utility, \ +intended for use by file-managers to extract PNG images from `.blend` files." + ${_option_default} + ) + unset(_option_default) endif() option(WITH_INTERNATIONAL "Enable I18N (International fonts and text)" ON) @@ -214,14 +226,19 @@ option(WITH_BULLET "Enable Bullet (Physics Engine)" ON) option(WITH_SYSTEM_BULLET "Use the systems bullet library (currently unsupported due to missing features in upstream!)" ) mark_as_advanced(WITH_SYSTEM_BULLET) option(WITH_OPENCOLORIO "Enable OpenColorIO color management" ON) + +set(_option_default ON) if(APPLE) # There's no OpenXR runtime in sight for macOS, neither is code well # tested there -> disable it by default. - option(WITH_XR_OPENXR "Enable VR features through the OpenXR specification" OFF) - mark_as_advanced(WITH_XR_OPENXR) -else() - option(WITH_XR_OPENXR "Enable VR features through the OpenXR specification" ON) + set(_option_default OFF) endif() +option(WITH_XR_OPENXR "Enable VR features through the OpenXR specification" ${_option_default}) +if(APPLE) + mark_as_advanced(WITH_XR_OPENXR) +endif() +unset(_option_default) + option(WITH_GMP "Enable features depending on GMP (Exact Boolean)" ON) # Compositor @@ -353,12 +370,13 @@ else() set(WITH_COREAUDIO OFF) endif() if(NOT WIN32) + set(_option_default ON) if(APPLE) - option(WITH_JACK "Enable JACK Support (http://www.jackaudio.org)" OFF) - else() - option(WITH_JACK "Enable JACK Support (http://www.jackaudio.org)" ON) + set(_option_default OFF) endif() - option(WITH_JACK_DYNLOAD "Enable runtime dynamic JACK libraries loading" OFF) + option(WITH_JACK "Enable JACK Support (http://www.jackaudio.org)" ${_option_default}) + unset(_option_default) + option(WITH_JACK_DYNLOAD "Enable runtime dynamic JACK libraries loading" OFF) else() set(WITH_JACK OFF) endif() @@ -399,6 +417,26 @@ mark_as_advanced(WITH_SYSTEM_GLOG) # Freestyle option(WITH_FREESTYLE "Enable Freestyle (advanced edges rendering)" ON) +# Libraries. +if(UNIX AND NOT APPLE) + # Optionally build without pre-compiled libraries. + # NOTE: this could be supported on all platforms however in practice UNIX is the only platform + # that has good support for detecting installed libraries. + option(WITH_LIBS_PRECOMPILED "\ +Detect and link against pre-compiled libraries (typically found under \"../lib/\"). \ +Disabling this option will use the system libraries although cached paths \ +that point to pre-compiled libraries will be left as-is." + ON + ) + mark_as_advanced(WITH_LIBS_PRECOMPILED) + + option(WITH_STATIC_LIBS "Try to link with static libraries, as much as possible, to make blender more portable across distributions" OFF) + if(WITH_STATIC_LIBS) + option(WITH_BOOST_ICU "Boost uses ICU library (required for linking with static Boost built with libicu)." OFF) + mark_as_advanced(WITH_BOOST_ICU) + endif() +endif() + # Misc if(WIN32 OR APPLE) option(WITH_INPUT_IME "Enable Input Method Editor (IME) for complex Asian character input" ON) @@ -406,11 +444,6 @@ endif() option(WITH_INPUT_NDOF "Enable NDOF input devices (SpaceNavigator and friends)" ON) if(UNIX AND NOT APPLE) option(WITH_INSTALL_PORTABLE "Install redistributable runtime, otherwise install into CMAKE_INSTALL_PREFIX" ON) - option(WITH_STATIC_LIBS "Try to link with static libraries, as much as possible, to make blender more portable across distributions" OFF) - if(WITH_STATIC_LIBS) - option(WITH_BOOST_ICU "Boost uses ICU library (required for linking with static Boost built with libicu)." OFF) - mark_as_advanced(WITH_BOOST_ICU) - endif() endif() option(WITH_PYTHON_INSTALL "Copy system python into the blender install folder" ON) @@ -491,7 +524,7 @@ endif() if(NOT APPLE) option(WITH_CYCLES_DEVICE_HIP "Enable Cycles AMD HIP support" ON) option(WITH_CYCLES_HIP_BINARIES "Build Cycles AMD HIP binaries" OFF) - set(CYCLES_HIP_BINARIES_ARCH gfx900 gfx906 gfx90c gfx902 gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 gfx1035 gfx1100 gfx1101 gfx1102 CACHE STRING "AMD HIP architectures to build binaries for") + set(CYCLES_HIP_BINARIES_ARCH gfx1010 gfx1011 gfx1012 gfx1030 gfx1031 gfx1032 gfx1034 gfx1035 gfx1100 gfx1101 gfx1102 CACHE STRING "AMD HIP architectures to build binaries for") mark_as_advanced(WITH_CYCLES_DEVICE_HIP) mark_as_advanced(CYCLES_HIP_BINARIES_ARCH) endif() @@ -1002,6 +1035,8 @@ set(PLATFORM_LINKLIBS "") # - CMAKE_EXE_LINKER_FLAGS_DEBUG set(PLATFORM_LINKFLAGS "") set(PLATFORM_LINKFLAGS_DEBUG "") +set(PLATFORM_LINKFLAGS_RELEASE "") +set(PLATFORM_LINKFLAGS_EXECUTABLE "") if(NOT CMAKE_BUILD_TYPE MATCHES "Release") if(WITH_COMPILER_ASAN) @@ -1215,13 +1250,6 @@ if(WITH_OPENGL) add_definitions(-DWITH_OPENGL) endif() -#----------------------------------------------------------------------------- -# Configure Vulkan. - -if(WITH_VULKAN_BACKEND) - list(APPEND BLENDER_GL_LIBRARIES ${VULKAN_LIBRARIES}) -endif() - # ----------------------------------------------------------------------------- # Configure Metal @@ -1271,12 +1299,14 @@ endif() # ----------------------------------------------------------------------------- # Configure Bullet -if(WITH_BULLET AND WITH_SYSTEM_BULLET) - find_package(Bullet) - set_and_warn_library_found("Bullet" BULLET_FOUND WITH_BULLET) -else() - set(BULLET_INCLUDE_DIRS "${CMAKE_SOURCE_DIR}/extern/bullet2/src") - # set(BULLET_LIBRARIES "") +if(WITH_BULLET) + if(WITH_SYSTEM_BULLET) + find_package(Bullet) + set_and_warn_library_found("Bullet" BULLET_FOUND WITH_BULLET) + else() + set(BULLET_INCLUDE_DIRS "${CMAKE_SOURCE_DIR}/extern/bullet2/src") + set(BULLET_LIBRARIES "extern_bullet") + endif() endif() @@ -1436,6 +1466,9 @@ if(CMAKE_COMPILER_IS_GNUCC) add_check_c_compiler_flag(C_WARNINGS C_WARN_TYPE_LIMITS -Wtype-limits) add_check_c_compiler_flag(C_WARNINGS C_WARN_FORMAT_SIGN -Wformat-signedness) add_check_c_compiler_flag(C_WARNINGS C_WARN_RESTRICT -Wrestrict) + # Useful but too many false positives and inconvenient to suppress each occurrence. + add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_STRINGOP_OVERREAD -Wno-stringop-overread) + add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_STRINGOP_OVERFLOW -Wno-stringop-overflow) # C-only. add_check_c_compiler_flag(C_WARNINGS C_WARN_NO_NULL -Wnonnull) @@ -1475,6 +1508,9 @@ if(CMAKE_COMPILER_IS_GNUCC) add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_RESTRICT -Wrestrict) add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_SUGGEST_OVERRIDE -Wno-suggest-override) add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_UNINITIALIZED -Wuninitialized) + # Useful but too many false positives and inconvenient to suppress each occurrence. + add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_STRINGOP_OVERREAD -Wno-stringop-overread) + add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_NO_STRINGOP_OVERFLOW -Wno-stringop-overflow) # causes too many warnings if(NOT APPLE) diff --git a/GNUmakefile b/GNUmakefile index ba9ee978817..a6b041597c3 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -71,6 +71,13 @@ Static Source Code Checking * check_mypy: Checks all Python scripts using mypy, see: source/tools/check_source/check_mypy_config.py scripts which are included. +Documentation Checking + + * check_wiki_file_structure: + Check the WIKI documentation for the source-tree's file structure + matches Blender's source-code. + See: https://wiki.blender.org/wiki/Source/File_Structure + Spell Checkers This runs the spell checker from the developer tools repositor. @@ -481,6 +488,10 @@ check_smatch: .FORCE check_mypy: .FORCE @$(PYTHON) "$(BLENDER_DIR)/source/tools/check_source/check_mypy.py" +check_wiki_file_structure: .FORCE + @PYTHONIOENCODING=utf_8 $(PYTHON) \ + "$(BLENDER_DIR)/source/tools/check_wiki/check_wiki_file_structure.py" + check_spelling_py: .FORCE @cd "$(BUILD_DIR)" ; \ PYTHONIOENCODING=utf_8 $(PYTHON) \ diff --git a/README.md b/README.md new file mode 100644 index 00000000000..c1916806d76 --- /dev/null +++ b/README.md @@ -0,0 +1,38 @@ + + +Blender +======= + +Blender is the free and open source 3D creation suite. +It supports the entirety of the 3D pipeline-modeling, rigging, animation, simulation, rendering, compositing, +motion tracking and video editing. + +![Blender screenshot](https://code.blender.org/wp-content/uploads/2018/12/springrg.jpg "Blender screenshot") + +Project Pages +------------- + +- [Main Website](http://www.blender.org) +- [Reference Manual](https://docs.blender.org/manual/en/latest/index.html) +- [User Community](https://www.blender.org/community/) + +Development +----------- + +- [Build Instructions](https://wiki.blender.org/wiki/Building_Blender) +- [Code Review & Bug Tracker](https://developer.blender.org) +- [Developer Forum](https://devtalk.blender.org) +- [Developer Documentation](https://wiki.blender.org) + + +License +------- + +Blender as a whole is licensed under the GNU General Public License, Version 3. +Individual files may have a different, but compatible license. + +See [blender.org/about/license](https://www.blender.org/about/license) for details. diff --git a/build_files/build_environment/cmake/dpcpp.cmake b/build_files/build_environment/cmake/dpcpp.cmake index ef6fc9c40c9..ae30d842af4 100644 --- a/build_files/build_environment/cmake/dpcpp.cmake +++ b/build_files/build_environment/cmake/dpcpp.cmake @@ -2,7 +2,7 @@ # LLVM does not switch over to cpp17 until llvm 16 and building ealier versions with # MSVC is leading to some crashes in ISPC. Switch back to their default on all platforms -# for now. +# for now. string(REPLACE "-DCMAKE_CXX_STANDARD=17" " " DPCPP_CMAKE_FLAGS "${DEFAULT_CMAKE_FLAGS}") if(WIN32) diff --git a/build_files/build_environment/cmake/llvm.cmake b/build_files/build_environment/cmake/llvm.cmake index 26e1f1f58b4..8994d330bbe 100644 --- a/build_files/build_environment/cmake/llvm.cmake +++ b/build_files/build_environment/cmake/llvm.cmake @@ -42,7 +42,7 @@ endif() # LLVM does not switch over to cpp17 until llvm 16 and building ealier versions with # MSVC is leading to some crashes in ISPC. Switch back to their default on all platforms -# for now. +# for now. string(REPLACE "-DCMAKE_CXX_STANDARD=17" " " LLVM_CMAKE_FLAGS "${DEFAULT_CMAKE_FLAGS}") # short project name due to long filename issues on windows diff --git a/build_files/build_environment/cmake/ssl.cmake b/build_files/build_environment/cmake/ssl.cmake index 6241e2d44e6..31792e788df 100644 --- a/build_files/build_environment/cmake/ssl.cmake +++ b/build_files/build_environment/cmake/ssl.cmake @@ -10,9 +10,9 @@ if(WIN32) DOWNLOAD_DIR ${DOWNLOAD_DIR} URL_HASH ${SSL_HASH_TYPE}=${SSL_HASH} PREFIX ${BUILD_DIR}/ssl - CONFIGURE_COMMAND echo "." - BUILD_COMMAND echo "." - INSTALL_COMMAND echo "." + CONFIGURE_COMMAND echo "." + BUILD_COMMAND echo "." + INSTALL_COMMAND echo "." INSTALL_DIR ${LIBDIR}/ssl ) else() @@ -46,4 +46,4 @@ else() INSTALL_COMMAND ${CONFIGURE_ENV} && cd ${BUILD_DIR}/ssl/src/external_ssl/ && make install INSTALL_DIR ${LIBDIR}/ssl ) -endif() \ No newline at end of file +endif() diff --git a/build_files/build_environment/cmake/usd.cmake b/build_files/build_environment/cmake/usd.cmake index 98c7931808f..04000951166 100644 --- a/build_files/build_environment/cmake/usd.cmake +++ b/build_files/build_environment/cmake/usd.cmake @@ -29,7 +29,7 @@ elseif(UNIX) set(USD_PLATFORM_FLAGS -DPYTHON_INCLUDE_DIR=${LIBDIR}/python/include/python${PYTHON_SHORT_VERSION}/ -DPYTHON_LIBRARY=${LIBDIR}/tbb/lib/${LIBPREFIX}${TBB_LIBRARY}${SHAREDLIBEXT} - ) + ) if(APPLE) set(USD_SHARED_LINKER_FLAGS "-Xlinker -undefined -Xlinker dynamic_lookup") diff --git a/build_files/build_environment/cmake/xml2.cmake b/build_files/build_environment/cmake/xml2.cmake index 3d31ec131bb..e403ad41a5e 100644 --- a/build_files/build_environment/cmake/xml2.cmake +++ b/build_files/build_environment/cmake/xml2.cmake @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-or-later if(WIN32) - set(XML2_EXTRA_ARGS + set(XML2_EXTRA_ARGS -DLIBXML2_WITH_ZLIB=OFF -DLIBXML2_WITH_LZMA=OFF -DLIBXML2_WITH_PYTHON=OFF diff --git a/build_files/cmake/Modules/FindMoltenVK.cmake b/build_files/cmake/Modules/FindMoltenVK.cmake index 07584e51ae5..eea7d25819a 100644 --- a/build_files/cmake/Modules/FindMoltenVK.cmake +++ b/build_files/cmake/Modules/FindMoltenVK.cmake @@ -19,9 +19,13 @@ ENDIF() SET(_moltenvk_SEARCH_DIRS ${MOLTENVK_ROOT_DIR} - ${LIBDIR}/vulkan/MoltenVK ) +# FIXME: These finder modules typically don't use LIBDIR, +# this should be set by `./build_files/cmake/platform/` instead. +IF(DEFINED LIBDIR) + SET(_moltenvk_SEARCH_DIRS ${_moltenvk_SEARCH_DIRS} ${LIBDIR}/moltenvk) +ENDIF() FIND_PATH(MOLTENVK_INCLUDE_DIR NAMES diff --git a/build_files/cmake/Modules/FindOptiX.cmake b/build_files/cmake/Modules/FindOptiX.cmake index bb671ed6495..f6838f0583f 100644 --- a/build_files/cmake/Modules/FindOptiX.cmake +++ b/build_files/cmake/Modules/FindOptiX.cmake @@ -17,9 +17,13 @@ ENDIF() SET(_optix_SEARCH_DIRS ${OPTIX_ROOT_DIR} - "$ENV{PROGRAMDATA}/NVIDIA Corporation/OptiX SDK 7.3.0" ) +# TODO: Which environment uses this? +if(DEFINED ENV{PROGRAMDATA}) + list(APPEND _optix_SEARCH_DIRS "$ENV{PROGRAMDATA}/NVIDIA Corporation/OptiX SDK 7.3.0") +endif() + FIND_PATH(OPTIX_INCLUDE_DIR NAMES optix.h diff --git a/build_files/cmake/Modules/FindPythonLibsUnix.cmake b/build_files/cmake/Modules/FindPythonLibsUnix.cmake index b222ed85a4f..5d40a4f1277 100644 --- a/build_files/cmake/Modules/FindPythonLibsUnix.cmake +++ b/build_files/cmake/Modules/FindPythonLibsUnix.cmake @@ -67,6 +67,8 @@ ENDIF() STRING(REPLACE "." "" PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION}) +SET(_PYTHON_ABI_FLAGS "") + SET(_python_SEARCH_DIRS ${PYTHON_ROOT_DIR} "$ENV{HOME}/py${PYTHON_VERSION_NO_DOTS}" diff --git a/build_files/cmake/Modules/FindShaderC.cmake b/build_files/cmake/Modules/FindShaderC.cmake new file mode 100644 index 00000000000..e38ca137775 --- /dev/null +++ b/build_files/cmake/Modules/FindShaderC.cmake @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023 Blender Foundation. + +# - Find ShaderC libraries +# Find the ShaderC includes and libraries +# This module defines +# SHADERC_INCLUDE_DIRS, where to find MoltenVK headers, Set when +# SHADERC_INCLUDE_DIR is found. +# SHADERC_LIBRARIES, libraries to link against to use ShaderC. +# SHADERC_ROOT_DIR, The base directory to search for ShaderC. +# This can also be an environment variable. +# SHADERC_FOUND, If false, do not try to use ShaderC. +# + +# If SHADERC_ROOT_DIR was defined in the environment, use it. +IF(NOT SHADERC_ROOT_DIR AND NOT $ENV{SHADERC_ROOT_DIR} STREQUAL "") + SET(SHADERC_ROOT_DIR $ENV{SHADERC_ROOT_DIR}) +ENDIF() + +SET(_shaderc_SEARCH_DIRS + ${SHADERC_ROOT_DIR} +) + +# FIXME: These finder modules typically don't use LIBDIR, +# this should be set by `./build_files/cmake/platform/` instead. +IF(DEFINED LIBDIR) + SET(_shaderc_SEARCH_DIRS ${_shaderc_SEARCH_DIRS} ${LIBDIR}/shaderc) +ENDIF() + +FIND_PATH(SHADERC_INCLUDE_DIR + NAMES + shaderc/shaderc.h + HINTS + ${_shaderc_SEARCH_DIRS} + PATH_SUFFIXES + include +) + +FIND_LIBRARY(SHADERC_LIBRARY + NAMES + shaderc_combined + HINTS + ${_shaderc_SEARCH_DIRS} + PATH_SUFFIXES + lib +) + +# handle the QUIETLY and REQUIRED arguments and set SHADERC_FOUND to TRUE if +# all listed variables are TRUE +INCLUDE(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(ShaderC DEFAULT_MSG SHADERC_LIBRARY SHADERC_INCLUDE_DIR) + +IF(SHADERC_FOUND) + SET(SHADERC_LIBRARIES ${SHADERC_LIBRARY}) + SET(SHADERC_INCLUDE_DIRS ${SHADERC_INCLUDE_DIR}) +ENDIF() + +MARK_AS_ADVANCED( + SHADERC_INCLUDE_DIR + SHADERC_LIBRARY +) + +UNSET(_shaderc_SEARCH_DIRS) diff --git a/build_files/cmake/Modules/FindVulkan.cmake b/build_files/cmake/Modules/FindVulkan.cmake new file mode 100644 index 00000000000..37da42543db --- /dev/null +++ b/build_files/cmake/Modules/FindVulkan.cmake @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023 Blender Foundation. + +# - Find Vulkan libraries +# Find the Vulkan includes and libraries +# This module defines +# VULKAN_INCLUDE_DIRS, where to find Vulkan headers, Set when +# VULKAN_INCLUDE_DIR is found. +# VULKAN_LIBRARIES, libraries to link against to use Vulkan. +# VULKAN_ROOT_DIR, The base directory to search for Vulkan. +# This can also be an environment variable. +# VULKAN_FOUND, If false, do not try to use Vulkan. +# + +# If VULKAN_ROOT_DIR was defined in the environment, use it. +IF(NOT VULKAN_ROOT_DIR AND NOT $ENV{VULKAN_ROOT_DIR} STREQUAL "") + SET(VULKAN_ROOT_DIR $ENV{VULKAN_ROOT_DIR}) +ENDIF() + +SET(_vulkan_SEARCH_DIRS + ${VULKAN_ROOT_DIR} +) + +# FIXME: These finder modules typically don't use LIBDIR, +# this should be set by `./build_files/cmake/platform/` instead. +IF(DEFINED LIBDIR) + SET(_vulkan_SEARCH_DIRS ${_vulkan_SEARCH_DIRS} ${LIBDIR}/vulkan) +ENDIF() + +FIND_PATH(VULKAN_INCLUDE_DIR + NAMES + vulkan/vulkan.h + HINTS + ${_vulkan_SEARCH_DIRS} + PATH_SUFFIXES + include +) + +FIND_LIBRARY(VULKAN_LIBRARY + NAMES + vulkan + HINTS + ${_vulkan_SEARCH_DIRS} + PATH_SUFFIXES + lib +) + +# handle the QUIETLY and REQUIRED arguments and set VULKAN_FOUND to TRUE if +# all listed variables are TRUE +INCLUDE(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(Vulkan DEFAULT_MSG VULKAN_LIBRARY VULKAN_INCLUDE_DIR) + +IF(VULKAN_FOUND) + SET(VULKAN_LIBRARIES ${VULKAN_LIBRARY}) + SET(VULKAN_INCLUDE_DIRS ${VULKAN_INCLUDE_DIR}) +ENDIF() + +MARK_AS_ADVANCED( + VULKAN_INCLUDE_DIR + VULKAN_LIBRARY +) + +UNSET(_vulkan_SEARCH_DIRS) diff --git a/build_files/cmake/cmake_print_build_options.py b/build_files/cmake/cmake_print_build_options.py index 3dc2951680a..7a30bb5da2b 100644 --- a/build_files/cmake/cmake_print_build_options.py +++ b/build_files/cmake/cmake_print_build_options.py @@ -6,18 +6,80 @@ import re import sys +from typing import Optional + cmakelists_file = sys.argv[-1] -def main(): +def count_backslashes_before_pos(file_data: str, pos: int) -> int: + slash_count = 0 + pos -= 1 + while pos >= 0: + if file_data[pos] != '\\': + break + pos -= 1 + slash_count += 1 + return slash_count + + +def extract_cmake_string_at_pos(file_data: str, pos_beg: int) -> Optional[str]: + assert file_data[pos_beg - 1] == '"' + + pos = pos_beg + # Dummy assignment. + pos_end = pos_beg + while True: + pos_next = file_data.find('"', pos) + if pos_next == -1: + raise Exception("Un-terminated string (parse error?)") + + count_slashes = count_backslashes_before_pos(file_data, pos_next) + if (count_slashes % 2) == 0: + pos_end = pos_next + # Found the closing quote. + break + + # The quote was back-slash escaped, step over it. + pos = pos_next + 1 + file_data[pos_next] + + assert file_data[pos_end] == '"' + + if pos_beg == pos_end: + return None + + # See: https://cmake.org/cmake/help/latest/manual/cmake-language.7.html#escape-sequences + text = file_data[pos_beg: pos_end].replace( + # Handle back-slash literals. + "\\\\", "\\", + ).replace( + # Handle tabs. + "\\t", "\t", + ).replace( + # Handle escaped quotes. + "\\\"", "\"", + ).replace( + # Handle tabs. + "\\;", ";", + ).replace( + # Handle trailing newlines. + "\\\n", "", + ) + + return text + + +def main() -> None: options = [] - for l in open(cmakelists_file, 'r').readlines(): - if not l.lstrip().startswith('#'): - l_option = re.sub(r'.*\boption\s*\(\s*(WITH_[a-zA-Z0-9_]+)\s+\"(.*)\"\s*.*', r'\g<1> - \g<2>', l) - if l_option != l: - l_option = l_option.strip() - if l_option.startswith('WITH_'): - options.append(l_option) + with open(cmakelists_file, 'r', encoding="utf-8") as fh: + file_data = fh.read() + for m in re.finditer(r"^\s*option\s*\(\s*(WITH_[a-zA-Z0-9_]+)\s+(\")", file_data, re.MULTILINE): + option_name = m.group(1) + option_descr = extract_cmake_string_at_pos(file_data, m.span(2)[1]) + if option_descr is None: + # Possibly a parsing error, at least show something. + option_descr = "(UNDOCUMENTED)" + options.append("{:s}: {:s}".format(option_name, option_descr)) print('\n'.join(options)) diff --git a/build_files/cmake/macros.cmake b/build_files/cmake/macros.cmake index 9965f892395..dc7c101f91a 100644 --- a/build_files/cmake/macros.cmake +++ b/build_files/cmake/macros.cmake @@ -550,7 +550,9 @@ function(setup_platform_linker_libs endif() if(WIN32 AND NOT UNIX) - target_link_libraries(${target} ${PTHREADS_LIBRARIES}) + if(DEFINED PTHREADS_LIBRARIES) + target_link_libraries(${target} ${PTHREADS_LIBRARIES}) + endif() endif() # target_link_libraries(${target} ${PLATFORM_LINKLIBS} ${CMAKE_DL_LIBS}) @@ -1115,7 +1117,7 @@ function(find_python_package # endif() # Not set, so initialize. else() - string(REPLACE "." ";" _PY_VER_SPLIT "${PYTHON_VERSION}") + string(REPLACE "." ";" _PY_VER_SPLIT "${PYTHON_VERSION}") list(GET _PY_VER_SPLIT 0 _PY_VER_MAJOR) # re-cache @@ -1262,7 +1264,7 @@ endmacro() # Utility to gather and install precompiled shared libraries. macro(add_bundled_libraries library_dir) - if(EXISTS ${LIBDIR}) + if(DEFINED LIBDIR) set(_library_dir ${LIBDIR}/${library_dir}) if(WIN32) file(GLOB _all_library_versions ${_library_dir}/*\.dll) @@ -1275,7 +1277,7 @@ macro(add_bundled_libraries library_dir) list(APPEND PLATFORM_BUNDLED_LIBRARY_DIRS ${_library_dir}) unset(_all_library_versions) unset(_library_dir) - endif() + endif() endmacro() macro(windows_install_shared_manifest) diff --git a/build_files/cmake/platform/platform_apple.cmake b/build_files/cmake/platform/platform_apple.cmake index e9e07229b2d..0b7ae0532d8 100644 --- a/build_files/cmake/platform/platform_apple.cmake +++ b/build_files/cmake/platform/platform_apple.cmake @@ -97,20 +97,8 @@ add_bundled_libraries(materialx/lib) if(WITH_VULKAN_BACKEND) find_package(MoltenVK REQUIRED) - - if(EXISTS ${LIBDIR}/vulkan) - set(VULKAN_FOUND On) - set(VULKAN_ROOT_DIR ${LIBDIR}/vulkan/macOS) - set(VULKAN_INCLUDE_DIR ${VULKAN_ROOT_DIR}/include) - set(VULKAN_LIBRARY ${VULKAN_ROOT_DIR}/lib/libvulkan.1.dylib) - set(SHADERC_LIBRARY ${VULKAN_ROOT_DIR}/lib/libshaderc_combined.a) - - set(VULKAN_INCLUDE_DIRS ${VULKAN_INCLUDE_DIR} ${MOLTENVK_INCLUDE_DIRS}) - set(VULKAN_LIBRARIES ${VULKAN_LIBRARY} ${SHADERC_LIBRARY} ${MOLTENVK_LIBRARIES}) - else() - message(WARNING "Vulkan SDK was not found, disabling WITH_VULKAN_BACKEND") - set(WITH_VULKAN_BACKEND OFF) - endif() + find_package(ShaderC REQUIRED) + find_package(Vulkan REQUIRED) endif() if(WITH_OPENSUBDIV) diff --git a/build_files/cmake/platform/platform_old_libs_update.cmake b/build_files/cmake/platform/platform_old_libs_update.cmake index ab27dd89385..d71b5d45818 100644 --- a/build_files/cmake/platform/platform_old_libs_update.cmake +++ b/build_files/cmake/platform/platform_old_libs_update.cmake @@ -1,7 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-or-later # Copyright 2022 Blender Foundation. All rights reserved. -# Auto update existing CMake caches for new libraries +# Auto update existing CMake caches for new libraries. + +# Assert that `LIBDIR` is defined. +if(NOT (DEFINED LIBDIR)) + message(FATAL_ERROR "Logical error, expected 'LIBDIR' to be defined!") +endif() # Clear cached variables whose name matches `pattern`. function(unset_cache_variables pattern) diff --git a/build_files/cmake/platform/platform_unix.cmake b/build_files/cmake/platform/platform_unix.cmake index 787d0f87002..d6aa7d63a6f 100644 --- a/build_files/cmake/platform/platform_unix.cmake +++ b/build_files/cmake/platform/platform_unix.cmake @@ -4,38 +4,52 @@ # Libraries configuration for any *nix system including Linux and Unix (excluding APPLE). # Detect precompiled library directory -if(NOT DEFINED LIBDIR) - # Path to a locally compiled libraries. - set(LIBDIR_NAME ${CMAKE_SYSTEM_NAME}_${CMAKE_SYSTEM_PROCESSOR}) - string(TOLOWER ${LIBDIR_NAME} LIBDIR_NAME) - set(LIBDIR_NATIVE_ABI ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_NAME}) - # Path to precompiled libraries with known glibc 2.28 ABI. - set(LIBDIR_GLIBC228_ABI ${CMAKE_SOURCE_DIR}/../lib/linux_x86_64_glibc_228) +if(NOT WITH_LIBS_PRECOMPILED) + unset(LIBDIR) +else() + if(NOT DEFINED LIBDIR) + # Path to a locally compiled libraries. + set(LIBDIR_NAME ${CMAKE_SYSTEM_NAME}_${CMAKE_SYSTEM_PROCESSOR}) + string(TOLOWER ${LIBDIR_NAME} LIBDIR_NAME) + set(LIBDIR_NATIVE_ABI ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_NAME}) - # Choose the best suitable libraries. - if(EXISTS ${LIBDIR_NATIVE_ABI}) - set(LIBDIR ${LIBDIR_NATIVE_ABI}) - set(WITH_LIBC_MALLOC_HOOK_WORKAROUND True) - elseif(EXISTS ${LIBDIR_GLIBC228_ABI}) - set(LIBDIR ${LIBDIR_GLIBC228_ABI}) - if(WITH_MEM_JEMALLOC) - # jemalloc provides malloc hooks. - set(WITH_LIBC_MALLOC_HOOK_WORKAROUND False) - else() + # Path to precompiled libraries with known glibc 2.28 ABI. + set(LIBDIR_GLIBC228_ABI ${CMAKE_SOURCE_DIR}/../lib/linux_x86_64_glibc_228) + + # Choose the best suitable libraries. + if(EXISTS ${LIBDIR_NATIVE_ABI}) + set(LIBDIR ${LIBDIR_NATIVE_ABI}) set(WITH_LIBC_MALLOC_HOOK_WORKAROUND True) + elseif(EXISTS ${LIBDIR_GLIBC228_ABI}) + set(LIBDIR ${LIBDIR_GLIBC228_ABI}) + if(WITH_MEM_JEMALLOC) + # jemalloc provides malloc hooks. + set(WITH_LIBC_MALLOC_HOOK_WORKAROUND False) + else() + set(WITH_LIBC_MALLOC_HOOK_WORKAROUND True) + endif() endif() + + # Avoid namespace pollustion. + unset(LIBDIR_NATIVE_ABI) + unset(LIBDIR_GLIBC228_ABI) endif() - # Avoid namespace pollustion. - unset(LIBDIR_NATIVE_ABI) - unset(LIBDIR_GLIBC228_ABI) + if(NOT (EXISTS ${LIBDIR})) + message(STATUS + "Unable to find LIBDIR: ${LIBDIR}, system libraries may be used " + "(disable WITH_LIBS_PRECOMPILED to suppress this message)." + ) + unset(LIBDIR) + endif() endif() + # Support restoring this value once pre-compiled libraries have been handled. set(WITH_STATIC_LIBS_INIT ${WITH_STATIC_LIBS}) -if(EXISTS ${LIBDIR}) +if(DEFINED LIBDIR) message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}") file(GLOB LIB_SUBDIRS ${LIBDIR}/*) @@ -85,7 +99,7 @@ endmacro() # These are libraries that may be precompiled. For this we disable searching in # the system directories so that we don't accidentally use them instead. -if(EXISTS ${LIBDIR}) +if(DEFINED LIBDIR) without_system_libs_begin() endif() @@ -97,6 +111,7 @@ find_package_wrapper(Epoxy REQUIRED) if(WITH_VULKAN_BACKEND) find_package_wrapper(Vulkan REQUIRED) + find_package_wrapper(ShaderC REQUIRED) endif() function(check_freetype_for_brotli) @@ -114,7 +129,7 @@ endfunction() if(NOT WITH_SYSTEM_FREETYPE) # FreeType compiled with Brotli compression for woff2. find_package_wrapper(Freetype REQUIRED) - if(EXISTS ${LIBDIR}) + if(DEFINED LIBDIR) find_package_wrapper(Brotli REQUIRED) # NOTE: This is done on WIN32 & APPLE but fails on some Linux systems. @@ -141,7 +156,7 @@ if(WITH_PYTHON) if(WITH_PYTHON_MODULE AND NOT WITH_INSTALL_PORTABLE) # Installing into `site-packages`, warn when installing into `./../lib/` # which script authors almost certainly don't want. - if(EXISTS ${LIBDIR}) + if(DEFINED LIBDIR) path_is_prefix(LIBDIR PYTHON_SITE_PACKAGES _is_prefix) if(_is_prefix) message(WARNING " @@ -217,7 +232,7 @@ if(WITH_CODEC_SNDFILE) endif() if(WITH_CODEC_FFMPEG) - if(EXISTS ${LIBDIR}) + if(DEFINED LIBDIR) set(FFMPEG_ROOT_DIR ${LIBDIR}/ffmpeg) # Override FFMPEG components to also include static library dependencies # included with precompiled libraries, and to ensure correct link order. @@ -232,7 +247,7 @@ if(WITH_CODEC_FFMPEG) vpx x264 xvidcore) - if(EXISTS ${LIBDIR}/ffmpeg/lib/libaom.a) + if((DEFINED LIBDIR) AND (EXISTS ${LIBDIR}/ffmpeg/lib/libaom.a)) list(APPEND FFMPEG_FIND_COMPONENTS aom) endif() elseif(FFMPEG) @@ -430,10 +445,13 @@ if(WITH_OPENIMAGEIO) ${PNG_LIBRARIES} ${JPEG_LIBRARIES} ${ZLIB_LIBRARIES} - ${BOOST_LIBRARIES} ) + set(OPENIMAGEIO_DEFINITIONS "") + if(WITH_BOOST) + list(APPEND OPENIMAGEIO_LIBRARIES "${BOOST_LIBRARIES}") + endif() if(WITH_IMAGE_TIFF) list(APPEND OPENIMAGEIO_LIBRARIES "${TIFF_LIBRARY}") endif() @@ -451,7 +469,7 @@ add_bundled_libraries(openimageio/lib) if(WITH_OPENCOLORIO) find_package_wrapper(OpenColorIO 2.0.0) - set(OPENCOLORIO_DEFINITIONS) + set(OPENCOLORIO_DEFINITIONS "") set_and_warn_library_found("OpenColorIO" OPENCOLORIO_FOUND WITH_OPENCOLORIO) endif() add_bundled_libraries(opencolorio/lib) @@ -466,7 +484,7 @@ if(WITH_OPENIMAGEDENOISE) endif() if(WITH_LLVM) - if(EXISTS ${LIBDIR}) + if(DEFINED LIBDIR) set(LLVM_STATIC ON) endif() @@ -480,7 +498,7 @@ if(WITH_LLVM) endif() # Symbol conflicts with same UTF library used by OpenCollada - if(EXISTS ${LIBDIR}) + if(DEFINED LIBDIR) if(WITH_OPENCOLLADA AND (${LLVM_VERSION} VERSION_LESS "4.0.0")) list(REMOVE_ITEM OPENCOLLADA_LIBRARIES ${OPENCOLLADA_UTF_LIBRARY}) endif() @@ -536,7 +554,7 @@ if(WITH_CYCLES AND WITH_CYCLES_PATH_GUIDING) endif() endif() -if(EXISTS ${LIBDIR}) +if(DEFINED LIBDIR) without_system_libs_end() endif() @@ -551,9 +569,14 @@ else() endif() find_package(Threads REQUIRED) -list(APPEND PLATFORM_LINKLIBS ${CMAKE_THREAD_LIBS_INIT}) -# used by other platforms -set(PTHREADS_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) +# `FindThreads` documentation notes that this may be empty +# with the system libraries provide threading functionality. +if(CMAKE_THREAD_LIBS_INIT) + list(APPEND PLATFORM_LINKLIBS ${CMAKE_THREAD_LIBS_INIT}) + # used by other platforms + set(PTHREADS_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) +endif() + if(CMAKE_DL_LIBS) list(APPEND PLATFORM_LINKLIBS ${CMAKE_DL_LIBS}) @@ -575,7 +598,7 @@ add_definitions(-D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE # # Keep last, so indirectly linked libraries don't override our own pre-compiled libs. -if(EXISTS ${LIBDIR}) +if(DEFINED LIBDIR) # Clear the prefix path as it causes the `LIBDIR` to override system locations. unset(CMAKE_PREFIX_PATH) @@ -631,7 +654,7 @@ if(WITH_GHOST_WAYLAND) # When dynamically linked WAYLAND is used and `${LIBDIR}/wayland` is present, # there is no need to search for the libraries as they are not needed for building. # Only the headers are needed which can reference the known paths. - if(EXISTS "${LIBDIR}/wayland" AND WITH_GHOST_WAYLAND_DYNLOAD) + if((DEFINED LIBDIR) AND (EXISTS "${LIBDIR}/wayland" AND WITH_GHOST_WAYLAND_DYNLOAD)) set(_use_system_wayland OFF) else() set(_use_system_wayland ON) @@ -695,7 +718,7 @@ if(WITH_GHOST_WAYLAND) add_definitions(-DWITH_GHOST_WAYLAND_LIBDECOR) endif() - if(EXISTS "${LIBDIR}/wayland/bin/wayland-scanner") + if((DEFINED LIBDIR) AND (EXISTS "${LIBDIR}/wayland/bin/wayland-scanner")) set(WAYLAND_SCANNER "${LIBDIR}/wayland/bin/wayland-scanner") else() pkg_get_variable(WAYLAND_SCANNER wayland-scanner wayland_scanner) diff --git a/build_files/config/pipeline_config.yaml b/build_files/config/pipeline_config.yaml index 392bd842586..29c0b131b3d 100644 --- a/build_files/config/pipeline_config.yaml +++ b/build_files/config/pipeline_config.yaml @@ -43,6 +43,10 @@ update-code: branch: trunk commit_id: HEAD path: lib/benchmarks + assets: + branch: trunk + commit_id: HEAD + path: lib/assets # # Buildbot only configs @@ -59,7 +63,7 @@ buildbot: optix: version: '7.3.0' ocloc: - version: '101.3430' + version: '101.4032' cmake: default: version: any diff --git a/build_files/utils/make_bpy_wheel.py b/build_files/utils/make_bpy_wheel.py index 9a33c9c1b68..79c0f91b425 100755 --- a/build_files/utils/make_bpy_wheel.py +++ b/build_files/utils/make_bpy_wheel.py @@ -24,7 +24,7 @@ import os import re import platform import string -import setuptools # type: ignore +import setuptools import sys from typing import ( @@ -208,7 +208,7 @@ def main() -> None: return paths # Ensure this wheel is marked platform specific. - class BinaryDistribution(setuptools.dist.Distribution): # type: ignore + class BinaryDistribution(setuptools.dist.Distribution): def has_ext_modules(self) -> bool: return True diff --git a/build_files/utils/make_test.py b/build_files/utils/make_test.py index 8e711d5e33a..e489bc85fc5 100755 --- a/build_files/utils/make_test.py +++ b/build_files/utils/make_test.py @@ -13,10 +13,10 @@ import sys import make_utils from make_utils import call -# Parse arguments +# Parse arguments. -def parse_arguments(): +def parse_arguments() -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument("--ctest-command", default="ctest") parser.add_argument("--cmake-command", default="cmake") diff --git a/build_files/utils/make_update.py b/build_files/utils/make_update.py index fbadeecd597..2288a9972b8 100755 --- a/build_files/utils/make_update.py +++ b/build_files/utils/make_update.py @@ -104,17 +104,30 @@ def svn_update(args: argparse.Namespace, release_version: Optional[str]) -> None svn_url_tests = svn_url + lib_tests call(svn_non_interactive + ["checkout", svn_url_tests, lib_tests_dirpath]) - # Update precompiled libraries and tests + lib_assets = "assets" + lib_assets_dirpath = os.path.join(lib_dirpath, lib_assets) + + if not os.path.exists(lib_assets_dirpath): + print_stage("Checking out Assets") + + if make_utils.command_missing(args.svn_command): + sys.stderr.write("svn not found, can't checkout assets\n") + sys.exit(1) + + svn_url_assets = svn_url + lib_assets + call(svn_non_interactive + ["checkout", svn_url_assets, lib_assets_dirpath]) + + # Update precompiled libraries, assets and tests if not os.path.isdir(lib_dirpath): print("Library path: %r, not found, skipping" % lib_dirpath) else: paths_local_and_remote = [] if os.path.exists(os.path.join(lib_dirpath, ".svn")): - print_stage("Updating Precompiled Libraries and Tests (one repository)") + print_stage("Updating Precompiled Libraries, Assets and Tests (one repository)") paths_local_and_remote.append((lib_dirpath, svn_url)) else: - print_stage("Updating Precompiled Libraries and Tests (multiple repositories)") + print_stage("Updating Precompiled Libraries, Assets and Tests (multiple repositories)") # Separate paths checked out. for dirname in os.listdir(lib_dirpath): if dirname.startswith("."): diff --git a/doc/python_api/sphinx_doc_gen.py b/doc/python_api/sphinx_doc_gen.py index b070a54407c..74bee60d662 100644 --- a/doc/python_api/sphinx_doc_gen.py +++ b/doc/python_api/sphinx_doc_gen.py @@ -2098,6 +2098,8 @@ def write_rst_types_index(basepath): fw(title_string("Types (bpy.types)", "=")) fw(".. module:: bpy.types\n\n") fw(".. toctree::\n") + # Only show top-level entries (avoids unreasonably large pages). + fw(" :maxdepth: 1\n") fw(" :glob:\n\n") fw(" bpy.types.*\n\n") @@ -2124,6 +2126,8 @@ def write_rst_ops_index(basepath): write_example_ref("", fw, "bpy.ops") fw(".. toctree::\n") fw(" :caption: Submodules\n") + # Only show top-level entries (avoids unreasonably large pages). + fw(" :maxdepth: 1\n") fw(" :glob:\n\n") fw(" bpy.ops.*\n\n") file.close() diff --git a/extern/audaspace/CMakeLists.txt b/extern/audaspace/CMakeLists.txt index ca1849321e2..1ae2fbfba10 100644 --- a/extern/audaspace/CMakeLists.txt +++ b/extern/audaspace/CMakeLists.txt @@ -513,17 +513,19 @@ if(WITH_FFTW) src/fx/Convolver.cpp src/fx/ConvolverReader.cpp src/fx/ConvolverSound.cpp + src/fx/Equalizer.cpp src/fx/FFTConvolver.cpp src/fx/HRTF.cpp src/fx/ImpulseResponse.cpp src/util/FFTPlan.cpp ) set(FFTW_HDR - include/fx/BinauralSound.h + include/fx/BinauralSound.h include/fx/BinauralReader.h include/fx/Convolver.h include/fx/ConvolverReader.h include/fx/ConvolverSound.h + include/fx/Equalizer.h include/fx/FFTConvolver.h include/fx/HRTF.h include/fx/HRTFLoader.h diff --git a/extern/audaspace/bindings/C/AUD_Sound.cpp b/extern/audaspace/bindings/C/AUD_Sound.cpp index 8a3c9d1bbc9..dbedd0045b5 100644 --- a/extern/audaspace/bindings/C/AUD_Sound.cpp +++ b/extern/audaspace/bindings/C/AUD_Sound.cpp @@ -54,6 +54,7 @@ #ifdef WITH_CONVOLUTION #include "fx/BinauralSound.h" #include "fx/ConvolverSound.h" +#include "fx/Equalizer.h" #endif #include @@ -768,4 +769,14 @@ AUD_API AUD_Sound* AUD_Sound_Binaural(AUD_Sound* sound, AUD_HRTF* hrtfs, AUD_Sou } } +AUD_API AUD_Sound* AUD_Sound_equalize(AUD_Sound* sound, float *definition, int size, float maxFreqEq, int sizeConversion) +{ + assert(sound); + + std::shared_ptr buf = std::shared_ptr(new Buffer(sizeof(float)*size)); + std::memcpy(buf->getBuffer(), definition, sizeof(float)*size); + AUD_Sound *equalizer=new AUD_Sound(new Equalizer(*sound, buf, size, maxFreqEq, sizeConversion)); + return equalizer; +} + #endif diff --git a/extern/audaspace/bindings/C/AUD_Sound.h b/extern/audaspace/bindings/C/AUD_Sound.h index fc73a31e15c..dd4fad85122 100644 --- a/extern/audaspace/bindings/C/AUD_Sound.h +++ b/extern/audaspace/bindings/C/AUD_Sound.h @@ -397,6 +397,16 @@ extern AUD_API AUD_Sound* AUD_Sound_mutable(AUD_Sound* sound); #ifdef WITH_CONVOLUTION extern AUD_API AUD_Sound* AUD_Sound_Convolver(AUD_Sound* sound, AUD_ImpulseResponse* filter, AUD_ThreadPool* threadPool); extern AUD_API AUD_Sound* AUD_Sound_Binaural(AUD_Sound* sound, AUD_HRTF* hrtfs, AUD_Source* source, AUD_ThreadPool* threadPool); + + /** + * Creates an Equalizer for the sound + * \param sound The handle of the sound + * \param definition buffer of size*sizeof(float) with the array of equalization values + * \param maxFreqEq Maximum frequency refered by the array + * \param sizeConversion Size of the transformation. Must be 2^number (for example 1024, 2048,...) + * \return A handle to the Equalizer refered to that sound + */ + extern AUD_API AUD_Sound* AUD_Sound_equalize(AUD_Sound* sound, float *definition, int size, float maxFreqEq, int sizeConversion); #endif #ifdef __cplusplus diff --git a/extern/audaspace/bindings/C/AUD_Special.h b/extern/audaspace/bindings/C/AUD_Special.h index 1d181d33f87..f9a239acd61 100644 --- a/extern/audaspace/bindings/C/AUD_Special.h +++ b/extern/audaspace/bindings/C/AUD_Special.h @@ -53,6 +53,7 @@ extern AUD_API AUD_Handle* AUD_pauseAfter(AUD_Handle* handle, double seconds); * \param buffer The buffer to write to. Must have a size of 3*4*length. * \param length How many samples to read from the sound. * \param samples_per_second How many samples to read per second of the sound. + * \param interrupt Must point to a short that equals 0. If it is set to a non-zero value, the method will be interrupted and return 0. * \return How many samples really have been read. Always <= length. */ extern AUD_API int AUD_readSound(AUD_Sound* sound, float* buffer, int length, int samples_per_second, short* interrupt); diff --git a/extern/audaspace/bindings/python/setup.py.in b/extern/audaspace/bindings/python/setup.py.in index 0e6666e06a0..e0fe4a0af42 100644 --- a/extern/audaspace/bindings/python/setup.py.in +++ b/extern/audaspace/bindings/python/setup.py.in @@ -5,12 +5,12 @@ import os import codecs import numpy -from distutils.core import setup, Extension +from setuptools import setup, Extension if len(sys.argv) > 2 and sys.argv[1] == '--build-docs': import subprocess - from distutils.core import Distribution - from distutils.command.build import build + from setuptools import Distribution + from setuptools.command.build import build dist = Distribution() cmd = build(dist) diff --git a/extern/audaspace/include/fx/Equalizer.h b/extern/audaspace/include/fx/Equalizer.h new file mode 100644 index 00000000000..87e24fa4b3f --- /dev/null +++ b/extern/audaspace/include/fx/Equalizer.h @@ -0,0 +1,106 @@ +/******************************************************************************* + * Copyright 2022 Marcos Perez Gonzalez + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#pragma once + +/** + * @file Equalizer.h + * @ingroup fx + * The Equalizer class. + */ + +#include +#include + +#include "ISound.h" +#include "ImpulseResponse.h" + +AUD_NAMESPACE_BEGIN + +class Buffer; +class ImpulseResponse; +/** + * This class represents a sound that can be modified depending on a given impulse response. + */ +class AUD_API Equalizer : public ISound +{ +private: + /** + * A pointer to the imput sound. + */ + std::shared_ptr m_sound; + + /** + * Local definition of Equalizer + */ + std::shared_ptr m_bufEQ; + + /** + * A pointer to the impulse response. + */ + std::shared_ptr m_impulseResponse; + + /** + * delete copy constructor and operator= + */ + Equalizer(const Equalizer&) = delete; + Equalizer& operator=(const Equalizer&) = delete; + + /** + * Create ImpulseResponse from the definition in the Buffer, + * using at the end a minimum phase change + */ + std::shared_ptr createImpulseResponse(); + + /** + * Create an Impulse Response with minimum phase distortion using Homomorphic + * The input is an Impulse Response + */ + std::shared_ptr minimumPhaseFilterHomomorphic(std::shared_ptr original, int lOriginal, int lWork); + + /** + * Create an Impulse Response with minimum phase distortion using Hilbert + * The input is an Impulse Response + */ + std::shared_ptr minimumPhaseFilterHilbert(std::shared_ptr original, int lOriginal, int lWork); + +public: + /** + * Creates a new Equalizer. + * \param sound The sound that will be equalized + */ + Equalizer(std::shared_ptr sound, std::shared_ptr bufEQ, int externalSizeEq, float maxFreqEq, int sizeConversion); + + virtual ~Equalizer(); + virtual std::shared_ptr createReader(); + + /* + * Length of the external equalizer definition. It must be the number of "float" positions of the Buffer + */ + int external_size_eq; + + /* + * Length of the internal equalizer definition + */ + int filter_length; + + /* + * Maximum frequency used in the equalizer definition + */ + float maxFreqEq; +}; + +AUD_NAMESPACE_END diff --git a/extern/audaspace/src/fx/BinauralReader.cpp b/extern/audaspace/src/fx/BinauralReader.cpp index 2792adada8a..12b8866c470 100644 --- a/extern/audaspace/src/fx/BinauralReader.cpp +++ b/extern/audaspace/src/fx/BinauralReader.cpp @@ -27,7 +27,7 @@ AUD_NAMESPACE_BEGIN BinauralReader::BinauralReader(std::shared_ptr reader, std::shared_ptr hrtfs, std::shared_ptr source, std::shared_ptr threadPool, std::shared_ptr plan) : - m_reader(reader), m_hrtfs(hrtfs), m_source(source), m_N(plan->getSize()), m_threadPool(threadPool), m_position(0), m_eosReader(false), m_eosTail(false), m_transition(false), m_transPos(CROSSFADE_SAMPLES*NUM_OUTCHANNELS) + m_position(0), m_reader(reader), m_hrtfs(hrtfs), m_source(source), m_N(plan->getSize()), m_transition(false), m_transPos(CROSSFADE_SAMPLES*NUM_OUTCHANNELS), m_eosReader(false), m_eosTail(false), m_threadPool(threadPool) { if(m_hrtfs->isEmpty()) AUD_THROW(StateException, "The provided HRTF object is empty"); diff --git a/extern/audaspace/src/fx/Convolver.cpp b/extern/audaspace/src/fx/Convolver.cpp index 24b205e9282..ab0036d9807 100644 --- a/extern/audaspace/src/fx/Convolver.cpp +++ b/extern/audaspace/src/fx/Convolver.cpp @@ -23,7 +23,7 @@ AUD_NAMESPACE_BEGIN Convolver::Convolver(std::shared_ptr>>>> ir, int irLength, std::shared_ptr threadPool, std::shared_ptr plan) : - m_N(plan->getSize()), m_M(plan->getSize()/2), m_L(plan->getSize()/2), m_irBuffers(ir), m_irLength(irLength), m_threadPool(threadPool), m_numThreads(std::min(threadPool->getNumOfThreads(), static_cast(m_irBuffers->size() - 1))), m_tailCounter(0), m_eos(false) + m_N(plan->getSize()), m_M(plan->getSize()/2), m_L(plan->getSize()/2), m_irBuffers(ir), m_numThreads(std::min(threadPool->getNumOfThreads(), static_cast(m_irBuffers->size() - 1))), m_threadPool(threadPool), m_irLength(irLength), m_tailCounter(0), m_eos(false) { m_resetFlag = false; diff --git a/extern/audaspace/src/fx/ConvolverReader.cpp b/extern/audaspace/src/fx/ConvolverReader.cpp index d5d9050f9a1..e79ac393368 100644 --- a/extern/audaspace/src/fx/ConvolverReader.cpp +++ b/extern/audaspace/src/fx/ConvolverReader.cpp @@ -24,7 +24,7 @@ AUD_NAMESPACE_BEGIN ConvolverReader::ConvolverReader(std::shared_ptr reader, std::shared_ptr ir, std::shared_ptr threadPool, std::shared_ptr plan) : - m_reader(reader), m_ir(ir), m_N(plan->getSize()), m_eosReader(false), m_eosTail(false), m_inChannels(reader->getSpecs().channels), m_irChannels(ir->getSpecs().channels), m_threadPool(threadPool), m_position(0) + m_position(0), m_reader(reader), m_ir(ir), m_N(plan->getSize()), m_eosReader(false), m_eosTail(false), m_inChannels(reader->getSpecs().channels), m_irChannels(ir->getSpecs().channels), m_threadPool(threadPool) { m_nChannelThreads = std::min((int)threadPool->getNumOfThreads(), m_inChannels); m_futures.resize(m_nChannelThreads); diff --git a/extern/audaspace/src/fx/Equalizer.cpp b/extern/audaspace/src/fx/Equalizer.cpp new file mode 100644 index 00000000000..3cafcc17625 --- /dev/null +++ b/extern/audaspace/src/fx/Equalizer.cpp @@ -0,0 +1,367 @@ +/******************************************************************************* + * Copyright 2022 Marcos Perez Gonzalez + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ + +#include "fx/Equalizer.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "Exception.h" + +#include "fx/ConvolverReader.h" +#include "fx/ImpulseResponse.h" +#include "util/Buffer.h" +#include "util/FFTPlan.h" +#include "util/ThreadPool.h" + +AUD_NAMESPACE_BEGIN + +Equalizer::Equalizer(std::shared_ptr sound, std::shared_ptr bufEQ, int externalSizeEq, float maxFreqEq, int sizeConversion) : m_sound(sound), m_bufEQ(bufEQ) +{ + this->maxFreqEq = maxFreqEq; + this->external_size_eq = externalSizeEq; + + filter_length = sizeConversion; +} + +Equalizer::~Equalizer() +{ +} + +std::shared_ptr Equalizer::createReader() +{ + std::shared_ptr fp = std::shared_ptr(new FFTPlan(filter_length)); + // 2 threads to start with + return std::shared_ptr(new ConvolverReader(m_sound->createReader(), createImpulseResponse(), std::shared_ptr(new ThreadPool(2)), fp)); +} + +float calculateValueArray(float* data, float minX, float maxX, int length, float posX) +{ + if(posX < minX) + return 1.0; + if(posX > maxX) + return data[length - 1]; + float interval = (maxX - minX) / (float) length; + int idx = (int) ((posX - minX) / interval); + return data[idx]; +} + +void complex_prod(float a, float b, float c, float d, float* r, float* imag) +{ + float prod1 = a * c; + float prod2 = b * d; + float prod3 = (a + b) * (c + d); + + // Real Part + *r = prod1 - prod2; + + // Imaginary Part + *imag = prod3 - (prod1 + prod2); +} + +/** + * The creation of the ImpuseResponse which will be convoluted with the sound + * + * The implementation is based on scikit-signal + */ +std::shared_ptr Equalizer::createImpulseResponse() +{ + std::shared_ptr fp = std::shared_ptr(new FFTPlan(filter_length)); + fftwf_complex* buffer = (fftwf_complex*) fp->getBuffer(); + std::memset(buffer, 0, filter_length * sizeof(fftwf_complex)); + std::shared_ptr soundReader = m_sound.get()->createReader(); + Specs specsSound = soundReader.get()->getSpecs(); + + int sampleRate = specsSound.rate; + + for(unsigned i = 0; i < filter_length / 2; i++) + { + double freq = (((float) i) / (float) filter_length) * (float) sampleRate; + + double dbGain = calculateValueArray(m_bufEQ->getBuffer(), 0.0, maxFreqEq, external_size_eq, freq); + + // gain = 10^(decibels / 20.0) + // 0 db = 1 + // 20 db = 10 + // 40 db = 100 + float gain = (float) pow(10.0, dbGain / 20.0); + + if(i == filter_length / 2 - 1) + { + gain = 0; + } + // IMPORTANT!!!! It is needed for the minimum phase step. + // Without this, the amplitude would be square rooted + // + gain *= gain; + + // Calculation of exponential with std.. or "by hand" + /* + std::complex preShift= std::complex(0.0, -(filter_length - 1) + / 2. * M_PI * freq / ( sampleRate/2)); std::complex shift = + std::exp(preShift); + + std::complex cGain = gain * shift; + */ + + float imaginary_shift = -(filter_length - 1) / 2. * M_PI * freq / (sampleRate / 2); + float cGain_real = gain * cos(imaginary_shift); + float cGain_imag = gain * sin(imaginary_shift); + + int i2 = filter_length - i - 1; + + buffer[i][0] = cGain_real; // Real + buffer[i][1] = cGain_imag; // Imag + + if(i > 0 && i2 < filter_length) + { + buffer[i2][0] = cGain_real; // Real + buffer[i2][1] = cGain_imag; // Imag + } + } + + // In place. From Complex to sample_t + fp->IFFT(buffer); + + // Window Hamming + sample_t* pt_sample_t = (sample_t*) buffer; + float half_filter = ((float) filter_length) / 2.0; + for(int i = 0; i < filter_length; i++) + { + // Centered in filter_length/2 + float window = 0.54 - 0.46 * cos((2 * M_PI * (float) i) / (float) (filter_length - 1)); + pt_sample_t[i] *= window; + } + + std::shared_ptr b2 = std::shared_ptr(new Buffer(filter_length * sizeof(sample_t))); + + sample_t* buffer_real = (sample_t*) buffer; + sample_t* buffer2 = b2->getBuffer(); + float normaliziter = (float) filter_length; + for(int i = 0; i < filter_length; i++) + { + buffer2[i] = (buffer_real[i] / normaliziter); + } + + fp->freeBuffer(buffer); + + // + // Here b2 is the buffer with a "valid" FIR (remember the squared amplitude + // + std::shared_ptr ir_minimum = minimumPhaseFilterHomomorphic(b2, filter_length, -1); + + Specs specsIR; + specsIR.rate = sampleRate; + specsIR.channels = CHANNELS_MONO; + + return std::shared_ptr(new ImpulseResponse(std::shared_ptr(new StreamBuffer(ir_minimum, specsIR)), fp)); +} + +std::shared_ptr Equalizer::minimumPhaseFilterHomomorphic(std::shared_ptr original, int lOriginal, int lWork) +{ + void* b_orig = original->getBuffer(); + + if(lWork < lOriginal || lWork < 0) + { + lWork = (int) pow(2, ceil(log2((float) (2 * (lOriginal - 1) / 0.01)))); + } + + std::shared_ptr fp = std::shared_ptr(new FFTPlan(lWork, 0.1)); + fftwf_complex* buffer = (fftwf_complex*) fp->getBuffer(); + sample_t* b_work = (sample_t*) buffer; + // Padding with 0 + std::memset(b_work, 0, lWork * sizeof(sample_t)); + std::memcpy(b_work, b_orig, lOriginal * sizeof(sample_t)); + + fp->FFT(b_work); + + for(int i = 0; i < lWork / 2; i++) + { + buffer[i][0] = fabs(sqrt(buffer[i][0] * buffer[i][0] + buffer[i][1] * buffer[i][1])); + buffer[i][1] = 0.0; + int conjugate = lWork - i - 1; + buffer[conjugate][0] = buffer[i][0]; + buffer[conjugate][1] = 0.0; + } + + double threshold = pow(10.0, -7); + float logThreshold = (float) log(threshold); + // take 0.25*log(|H|**2) = 0.5*log(|H|) + for(int i = 0; i < lWork; i++) + { + if(buffer[i][0] < threshold) + { + buffer[i][0] = 0.5 * logThreshold; + } + else + { + buffer[i][0] = 0.5 * log(buffer[i][0]); + } + } + + fp->IFFT(buffer); + + // homomorphic filter + int stop = (lOriginal + 1) / 2; + b_work[0] = b_work[0] / (float) lWork; + for(int i = 1; i < stop; i++) + { + b_work[i] = b_work[i] / (float) lWork * 2.0; + } + for(int i = stop; i < lWork; i++) + { + b_work[i] = 0; + } + + fp->FFT(buffer); + // EXP + // e^x = e^ (a+bi)= e^a * e^bi = e^a * (cos b + i sin b) + for(int i = 0; i < lWork / 2; i++) + { + float new_real; + float new_imag; + new_real = exp(buffer[i][0]) * cos(buffer[i][1]); + new_imag = exp(buffer[i][0]) * sin(buffer[i][1]); + + buffer[i][0] = new_real; + buffer[i][1] = new_imag; + int conjugate = lWork - i - 1; + buffer[conjugate][0] = new_real; + buffer[conjugate][1] = new_imag; + } + + // IFFT + fp->IFFT(buffer); + + // Create new clean Buffer with only the result and normalization + int lOut = (lOriginal / 2) + lOriginal % 2; + std::shared_ptr bOut = std::shared_ptr(new Buffer(sizeof(float) * lOut)); + float* bbOut = (float*) bOut->getBuffer(); + + // Copy and normalize + for(int i = 0; i < lOut; i++) + { + bbOut[i] = b_work[i] / (float) lWork; + } + + fp->freeBuffer(buffer); + return bOut; +} + +std::shared_ptr Equalizer::minimumPhaseFilterHilbert(std::shared_ptr original, int lOriginal, int lWork) +{ + void* b_orig = original->getBuffer(); + + if(lWork < lOriginal || lWork < 0) + { + lWork = (int) pow(2, ceil(log2((float) (2 * (lOriginal - 1) / 0.01)))); + } + + std::shared_ptr fp = std::shared_ptr(new FFTPlan(lWork, 0.1)); + fftwf_complex* buffer = (fftwf_complex*) fp->getBuffer(); + sample_t* b_work = (sample_t*) buffer; + // Padding with 0 + std::memset(b_work, 0, lWork * sizeof(sample_t)); + std::memcpy(b_work, b_orig, lOriginal * sizeof(sample_t)); + + fp->FFT(b_work); + float mymax, mymin; + float n_half = (float) (lOriginal >> 1); + for(int i = 0; i < lWork; i++) + { + float w = ((float) i) * 2.0 * M_PI / (float) lWork * n_half; + float f1 = cos(w); + float f2 = sin(w); + float f3, f4; + complex_prod(buffer[i][0], buffer[i][1], f1, f2, &f3, &f4); + buffer[i][0] = f3; + buffer[i][1] = 0.0; + if(i == 0) + { + mymax = f3; + mymin = f3; + } + else + { + if(f3 < mymin) + mymin = f3; + if(f3 > mymax) + mymax = f3; + } + } + float dp = mymax - 1; + float ds = 0 - mymin; + float S = 4.0 / pow(2, (sqrt(1 + dp + ds) + sqrt(1 - dp + ds))); + for(int i = 0; i < lWork; i++) + { + buffer[i][0] = sqrt((buffer[i][0] + ds) * S) + 1.0E-10; + } + + fftwf_complex* buffer_tmp = (fftwf_complex*) std::malloc(lWork * sizeof(fftwf_complex)); + std::memcpy(buffer_tmp, buffer, lWork * sizeof(fftwf_complex)); + + // + // Hilbert transform + // + int midpt = lWork >> 1; + for(int i = 0; i < lWork; i++) + buffer[i][0] = log(buffer[i][0]); + fp->IFFT(buffer); + b_work[0] = 0.0; + for(int i = 1; i < midpt; i++) + { + b_work[i] /= (float) lWork; + } + b_work[midpt] = 0.0; + for(int i = midpt + 1; i < lWork; i++) + { + b_work[i] /= (-1.0 * lWork); + } + + fp->FFT(b_work); + + // Exp + for(int i = 0; i < lWork; i++) + { + float base = exp(buffer[i][0]); + buffer[i][0] = base * cos(buffer[i][1]); + buffer[i][1] = base * sin(buffer[i][1]); + complex_prod(buffer_tmp[i][0], buffer_tmp[i][1], buffer[i][0], buffer[i][1], &(buffer[i][0]), &(buffer[i][1])); + } + std::free(buffer_tmp); + + fp->IFFT(buffer); + + // + // Copy and normalization + // + int n_out = n_half + lOriginal % 2; + std::shared_ptr b_minimum = std::shared_ptr(new Buffer(n_out * sizeof(sample_t))); + std::memcpy(b_minimum->getBuffer(), buffer, n_out * sizeof(sample_t)); + sample_t* b_final = (sample_t*) b_minimum->getBuffer(); + for(int i = 0; i < n_out; i++) + { + b_final[i] /= (float) lWork; + } + return b_minimum; +} + +AUD_NAMESPACE_END diff --git a/extern/audaspace/src/fx/FFTConvolver.cpp b/extern/audaspace/src/fx/FFTConvolver.cpp index 868a1ebbaf3..cf32b2c4f9a 100644 --- a/extern/audaspace/src/fx/FFTConvolver.cpp +++ b/extern/audaspace/src/fx/FFTConvolver.cpp @@ -22,7 +22,7 @@ AUD_NAMESPACE_BEGIN FFTConvolver::FFTConvolver(std::shared_ptr>> ir, std::shared_ptr plan) : - m_plan(plan), m_N(plan->getSize()), m_M(plan->getSize()/2), m_L(plan->getSize()/2), m_tailPos(0), m_irBuffer(ir) + m_plan(plan), m_N(plan->getSize()), m_M(plan->getSize()/2), m_L(plan->getSize()/2), m_irBuffer(ir), m_tailPos(0) { m_tail = (float*)calloc(m_M - 1, sizeof(float)); m_realBufLen = ((m_N / 2) + 1) * 2; diff --git a/extern/audaspace/src/fx/HRTFLoaderUnix.cpp b/extern/audaspace/src/fx/HRTFLoaderUnix.cpp index 12a23913912..00092b1c46b 100644 --- a/extern/audaspace/src/fx/HRTFLoaderUnix.cpp +++ b/extern/audaspace/src/fx/HRTFLoaderUnix.cpp @@ -75,7 +75,7 @@ void HRTFLoader::loadHRTFs(std::shared_ptr hrtfs, char ear, const std::str if(ear == 'L') azim = 360 - azim; } - catch(std::exception& e) + catch(...) { AUD_THROW(FileException, "The HRTF name doesn't follow the naming scheme: " + filename); } @@ -86,4 +86,4 @@ void HRTFLoader::loadHRTFs(std::shared_ptr hrtfs, char ear, const std::str return; } -AUD_NAMESPACE_END \ No newline at end of file +AUD_NAMESPACE_END diff --git a/extern/audaspace/src/fx/HRTFLoaderWindows.cpp b/extern/audaspace/src/fx/HRTFLoaderWindows.cpp index 148f1fa015d..303b409cb6c 100644 --- a/extern/audaspace/src/fx/HRTFLoaderWindows.cpp +++ b/extern/audaspace/src/fx/HRTFLoaderWindows.cpp @@ -78,7 +78,7 @@ void HRTFLoader::loadHRTFs(std::shared_ptr hrtfs, char ear, const std::str if(ear == 'L') azim = 360 - azim; } - catch(std::exception& e) + catch(...) { AUD_THROW(FileException, "The HRTF name doesn't follow the naming scheme: " + filename); } @@ -90,4 +90,4 @@ void HRTFLoader::loadHRTFs(std::shared_ptr hrtfs, char ear, const std::str return; } -AUD_NAMESPACE_END \ No newline at end of file +AUD_NAMESPACE_END diff --git a/extern/mantaflow/CMakeLists.txt b/extern/mantaflow/CMakeLists.txt index 06767e9af1e..2c503071184 100644 --- a/extern/mantaflow/CMakeLists.txt +++ b/extern/mantaflow/CMakeLists.txt @@ -13,10 +13,12 @@ endif() # Exporting functions from the blender binary gives linker warnings on Apple arm64 systems. # Silence them here. -if(APPLE AND ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")) - if(CMAKE_COMPILER_IS_GNUCXX OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") - string(APPEND CMAKE_C_FLAGS " -fvisibility=hidden") - string(APPEND CMAKE_CXX_FLAGS " -fvisibility=hidden") +if(APPLE) + if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64") + if(CMAKE_COMPILER_IS_GNUCXX OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + string(APPEND CMAKE_C_FLAGS " -fvisibility=hidden") + string(APPEND CMAKE_CXX_FLAGS " -fvisibility=hidden") + endif() endif() endif() @@ -261,9 +263,11 @@ set(LIB blender_add_lib(extern_mantaflow "${SRC}" "${INC}" "${INC_SYS}" "${LIB}") -# The VDB libs above are only added to as INTERFACE libs by blender_add_lib, -# meaning extern_mantaflow itself actually does not have a dependency on the -# openvdb libraries, and CMAKE is free to link the vdb libs before -# extern_mantaflow causing linker errors on linux. By explicitly declaring -# a dependency here, cmake will do the right thing. -target_link_libraries(extern_mantaflow PRIVATE ${OPENVDB_LIBRARIES}) +if(WITH_OPENVDB) + # The VDB libs above are only added to as INTERFACE libs by blender_add_lib, + # meaning extern_mantaflow itself actually does not have a dependency on the + # openvdb libraries, and CMAKE is free to link the vdb libs before + # extern_mantaflow causing linker errors on linux. By explicitly declaring + # a dependency here, cmake will do the right thing. + target_link_libraries(extern_mantaflow PRIVATE ${OPENVDB_LIBRARIES}) +endif() diff --git a/extern/vulkan_memory_allocator/CMakeLists.txt b/extern/vulkan_memory_allocator/CMakeLists.txt index 0b709e8dda1..666fb824322 100644 --- a/extern/vulkan_memory_allocator/CMakeLists.txt +++ b/extern/vulkan_memory_allocator/CMakeLists.txt @@ -7,6 +7,7 @@ set(INC set(INC_SYS ${VULKAN_INCLUDE_DIRS} + ${MOLTENVK_INCLUDE_DIRS} ) set(SRC diff --git a/extern/vulkan_memory_allocator/patches/remove_compilation_warning.diff b/extern/vulkan_memory_allocator/patches/remove_compilation_warning.diff new file mode 100644 index 00000000000..de4b8d10878 --- /dev/null +++ b/extern/vulkan_memory_allocator/patches/remove_compilation_warning.diff @@ -0,0 +1,15 @@ +diff --git a/extern/vulkan_memory_allocator/vk_mem_alloc.h b/extern/vulkan_memory_allocator/vk_mem_alloc.h +index 60f572038c0..63a9994ba46 100644 +--- a/extern/vulkan_memory_allocator/vk_mem_alloc.h ++++ b/extern/vulkan_memory_allocator/vk_mem_alloc.h +@@ -13371,8 +13371,8 @@ bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) + // Early return when max found + if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) + { +- VMA_ASSERT(m_PassStats.allocationsMoved == m_MaxPassAllocations || +- m_PassStats.bytesMoved == m_MaxPassBytes && "Exceeded maximal pass threshold!"); ++ VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations || ++ m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!"); + return true; + } + return false; diff --git a/extern/vulkan_memory_allocator/vk_mem_alloc.h b/extern/vulkan_memory_allocator/vk_mem_alloc.h index 60f572038c0..369b6178d2d 100644 --- a/extern/vulkan_memory_allocator/vk_mem_alloc.h +++ b/extern/vulkan_memory_allocator/vk_mem_alloc.h @@ -1,19558 +1,19558 @@ -// -// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. -// - -#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H -#define AMD_VULKAN_MEMORY_ALLOCATOR_H - -/** \mainpage Vulkan Memory Allocator - -Version 3.0.1 (2022-05-26) - -Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n -License: MIT - -API documentation divided into groups: [Modules](modules.html) - -\section main_table_of_contents Table of contents - -- User guide - - \subpage quick_start - - [Project setup](@ref quick_start_project_setup) - - [Initialization](@ref quick_start_initialization) - - [Resource allocation](@ref quick_start_resource_allocation) - - \subpage choosing_memory_type - - [Usage](@ref choosing_memory_type_usage) - - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) - - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) - - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) - - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) - - \subpage memory_mapping - - [Mapping functions](@ref memory_mapping_mapping_functions) - - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) - - [Cache flush and invalidate](@ref memory_mapping_cache_control) - - \subpage staying_within_budget - - [Querying for budget](@ref staying_within_budget_querying_for_budget) - - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) - - \subpage resource_aliasing - - \subpage custom_memory_pools - - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) - - [Linear allocation algorithm](@ref linear_algorithm) - - [Free-at-once](@ref linear_algorithm_free_at_once) - - [Stack](@ref linear_algorithm_stack) - - [Double stack](@ref linear_algorithm_double_stack) - - [Ring buffer](@ref linear_algorithm_ring_buffer) - - \subpage defragmentation - - \subpage statistics - - [Numeric statistics](@ref statistics_numeric_statistics) - - [JSON dump](@ref statistics_json_dump) - - \subpage allocation_annotation - - [Allocation user data](@ref allocation_user_data) - - [Allocation names](@ref allocation_names) - - \subpage virtual_allocator - - \subpage debugging_memory_usage - - [Memory initialization](@ref debugging_memory_usage_initialization) - - [Margins](@ref debugging_memory_usage_margins) - - [Corruption detection](@ref debugging_memory_usage_corruption_detection) - - \subpage opengl_interop -- \subpage usage_patterns - - [GPU-only resource](@ref usage_patterns_gpu_only) - - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) - - [Readback](@ref usage_patterns_readback) - - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) - - [Other use cases](@ref usage_patterns_other_use_cases) -- \subpage configuration - - [Pointers to Vulkan functions](@ref config_Vulkan_functions) - - [Custom host memory allocator](@ref custom_memory_allocator) - - [Device memory allocation callbacks](@ref allocation_callbacks) - - [Device heap memory limit](@ref heap_memory_limit) -- Extension support - - \subpage vk_khr_dedicated_allocation - - \subpage enabling_buffer_device_address - - \subpage vk_ext_memory_priority - - \subpage vk_amd_device_coherent_memory -- \subpage general_considerations - - [Thread safety](@ref general_considerations_thread_safety) - - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) - - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) - - [Allocation algorithm](@ref general_considerations_allocation_algorithm) - - [Features not supported](@ref general_considerations_features_not_supported) - -\section main_see_also See also - -- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) -- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) - -\defgroup group_init Library initialization - -\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. - -\defgroup group_alloc Memory allocation - -\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. -Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). - -\defgroup group_virtual Virtual allocator - -\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm -for user-defined purpose without allocating any real GPU memory. - -\defgroup group_stats Statistics - -\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. -See documentation chapter: \ref statistics. -*/ - - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef VULKAN_H_ - #include -#endif - -// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC, -// where AAA = major, BBB = minor, CCC = patch. -// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion. -#if !defined(VMA_VULKAN_VERSION) - #if defined(VK_VERSION_1_3) - #define VMA_VULKAN_VERSION 1003000 - #elif defined(VK_VERSION_1_2) - #define VMA_VULKAN_VERSION 1002000 - #elif defined(VK_VERSION_1_1) - #define VMA_VULKAN_VERSION 1001000 - #else - #define VMA_VULKAN_VERSION 1000000 - #endif -#endif - -#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS - extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; - extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; - extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; - extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; - extern PFN_vkAllocateMemory vkAllocateMemory; - extern PFN_vkFreeMemory vkFreeMemory; - extern PFN_vkMapMemory vkMapMemory; - extern PFN_vkUnmapMemory vkUnmapMemory; - extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; - extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; - extern PFN_vkBindBufferMemory vkBindBufferMemory; - extern PFN_vkBindImageMemory vkBindImageMemory; - extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; - extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; - extern PFN_vkCreateBuffer vkCreateBuffer; - extern PFN_vkDestroyBuffer vkDestroyBuffer; - extern PFN_vkCreateImage vkCreateImage; - extern PFN_vkDestroyImage vkDestroyImage; - extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; - #if VMA_VULKAN_VERSION >= 1001000 - extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; - extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; - extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; - extern PFN_vkBindImageMemory2 vkBindImageMemory2; - extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; - #endif // #if VMA_VULKAN_VERSION >= 1001000 -#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES - -#if !defined(VMA_DEDICATED_ALLOCATION) - #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation - #define VMA_DEDICATED_ALLOCATION 1 - #else - #define VMA_DEDICATED_ALLOCATION 0 - #endif -#endif - -#if !defined(VMA_BIND_MEMORY2) - #if VK_KHR_bind_memory2 - #define VMA_BIND_MEMORY2 1 - #else - #define VMA_BIND_MEMORY2 0 - #endif -#endif - -#if !defined(VMA_MEMORY_BUDGET) - #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) - #define VMA_MEMORY_BUDGET 1 - #else - #define VMA_MEMORY_BUDGET 0 - #endif -#endif - -// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. -#if !defined(VMA_BUFFER_DEVICE_ADDRESS) - #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 - #define VMA_BUFFER_DEVICE_ADDRESS 1 - #else - #define VMA_BUFFER_DEVICE_ADDRESS 0 - #endif -#endif - -// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. -#if !defined(VMA_MEMORY_PRIORITY) - #if VK_EXT_memory_priority - #define VMA_MEMORY_PRIORITY 1 - #else - #define VMA_MEMORY_PRIORITY 0 - #endif -#endif - -// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. -#if !defined(VMA_EXTERNAL_MEMORY) - #if VK_KHR_external_memory - #define VMA_EXTERNAL_MEMORY 1 - #else - #define VMA_EXTERNAL_MEMORY 0 - #endif -#endif - -// Define these macros to decorate all public functions with additional code, -// before and after returned type, appropriately. This may be useful for -// exporting the functions when compiling VMA as a separate library. Example: -// #define VMA_CALL_PRE __declspec(dllexport) -// #define VMA_CALL_POST __cdecl -#ifndef VMA_CALL_PRE - #define VMA_CALL_PRE -#endif -#ifndef VMA_CALL_POST - #define VMA_CALL_POST -#endif - -// Define this macro to decorate pointers with an attribute specifying the -// length of the array they point to if they are not null. -// -// The length may be one of -// - The name of another parameter in the argument list where the pointer is declared -// - The name of another member in the struct where the pointer is declared -// - The name of a member of a struct type, meaning the value of that member in -// the context of the call. For example -// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), -// this means the number of memory heaps available in the device associated -// with the VmaAllocator being dealt with. -#ifndef VMA_LEN_IF_NOT_NULL - #define VMA_LEN_IF_NOT_NULL(len) -#endif - -// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. -// see: https://clang.llvm.org/docs/AttributeReference.html#nullable -#ifndef VMA_NULLABLE - #ifdef __clang__ - #define VMA_NULLABLE _Nullable - #else - #define VMA_NULLABLE - #endif -#endif - -// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. -// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull -#ifndef VMA_NOT_NULL - #ifdef __clang__ - #define VMA_NOT_NULL _Nonnull - #else - #define VMA_NOT_NULL - #endif -#endif - -// If non-dispatchable handles are represented as pointers then we can give -// then nullability annotations -#ifndef VMA_NOT_NULL_NON_DISPATCHABLE - #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) - #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL - #else - #define VMA_NOT_NULL_NON_DISPATCHABLE - #endif -#endif - -#ifndef VMA_NULLABLE_NON_DISPATCHABLE - #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) - #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE - #else - #define VMA_NULLABLE_NON_DISPATCHABLE - #endif -#endif - -#ifndef VMA_STATS_STRING_ENABLED - #define VMA_STATS_STRING_ENABLED 1 -#endif - -//////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////// -// -// INTERFACE -// -//////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////// - -// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. -#ifndef _VMA_ENUM_DECLARATIONS - -/** -\addtogroup group_init -@{ -*/ - -/// Flags for created #VmaAllocator. -typedef enum VmaAllocatorCreateFlagBits -{ - /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. - - Using this flag may increase performance because internal mutexes are not used. - */ - VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, - /** \brief Enables usage of VK_KHR_dedicated_allocation extension. - - The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. - When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. - - Using this extension will automatically allocate dedicated blocks of memory for - some buffers and images instead of suballocating place for them out of bigger - memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT - flag) when it is recommended by the driver. It may improve performance on some - GPUs. - - You may set this flag only if you found out that following device extensions are - supported, you enabled them while creating Vulkan device passed as - VmaAllocatorCreateInfo::device, and you want them to be used internally by this - library: - - - VK_KHR_get_memory_requirements2 (device extension) - - VK_KHR_dedicated_allocation (device extension) - - When this flag is set, you can experience following warnings reported by Vulkan - validation layer. You can ignore them. - - > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. - */ - VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, - /** - Enables usage of VK_KHR_bind_memory2 extension. - - The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. - When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. - - You may set this flag only if you found out that this device extension is supported, - you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - and you want it to be used internally by this library. - - The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, - which allow to pass a chain of `pNext` structures while binding. - This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). - */ - VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, - /** - Enables usage of VK_EXT_memory_budget extension. - - You may set this flag only if you found out that this device extension is supported, - you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - and you want it to be used internally by this library, along with another instance extension - VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). - - The extension provides query for current memory usage and budget, which will probably - be more accurate than an estimation used by the library otherwise. - */ - VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, - /** - Enables usage of VK_AMD_device_coherent_memory extension. - - You may set this flag only if you: - - - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, - - want it to be used internally by this library. - - The extension and accompanying device feature provide access to memory types with - `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. - They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. - - When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. - To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, - returning `VK_ERROR_FEATURE_NOT_PRESENT`. - */ - VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, - /** - Enables usage of "buffer device address" feature, which allows you to use function - `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. - - You may set this flag only if you: - - 1. (For Vulkan version < 1.2) Found as available and enabled device extension - VK_KHR_buffer_device_address. - This extension is promoted to core Vulkan 1.2. - 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. - - When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. - The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to - allocated memory blocks wherever it might be needed. - - For more information, see documentation chapter \ref enabling_buffer_device_address. - */ - VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, - /** - Enables usage of VK_EXT_memory_priority extension in the library. - - You may set this flag only if you found available and enabled this device extension, - along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, - while creating Vulkan device passed as VmaAllocatorCreateInfo::device. - - When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority - are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. - - A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. - Larger values are higher priority. The granularity of the priorities is implementation-dependent. - It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. - The value to be used for default priority is 0.5. - For more details, see the documentation of the VK_EXT_memory_priority extension. - */ - VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, - - VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaAllocatorCreateFlagBits; -/// See #VmaAllocatorCreateFlagBits. -typedef VkFlags VmaAllocatorCreateFlags; - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/// \brief Intended usage of the allocated memory. -typedef enum VmaMemoryUsage -{ - /** No intended memory usage specified. - Use other members of VmaAllocationCreateInfo to specify your requirements. - */ - VMA_MEMORY_USAGE_UNKNOWN = 0, - /** - \deprecated Obsolete, preserved for backward compatibility. - Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - */ - VMA_MEMORY_USAGE_GPU_ONLY = 1, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. - */ - VMA_MEMORY_USAGE_CPU_ONLY = 2, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - */ - VMA_MEMORY_USAGE_CPU_TO_GPU = 3, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. - */ - VMA_MEMORY_USAGE_GPU_TO_CPU = 4, - /** - \deprecated Obsolete, preserved for backward compatibility. - Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - */ - VMA_MEMORY_USAGE_CPU_COPY = 5, - /** - Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. - Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. - - Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. - - Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - */ - VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, - /** - Selects best memory type automatically. - This flag is recommended for most common use cases. - - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. - - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO = 7, - /** - Selects best memory type automatically with preference for GPU (device) memory. - - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. - - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, - /** - Selects best memory type automatically with preference for CPU (host) memory. - - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. - - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, - - VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF -} VmaMemoryUsage; - -/// Flags to be passed as VmaAllocationCreateInfo::flags. -typedef enum VmaAllocationCreateFlagBits -{ - /** \brief Set this flag if the allocation should have its own memory block. - - Use it for special, big resources, like fullscreen images used as attachments. - */ - VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, - - /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. - - If new allocation cannot be placed in any of the existing blocks, allocation - fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. - - You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and - #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. - */ - VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, - /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. - - Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. - - It is valid to use this flag for allocation made from memory type that is not - `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is - useful if you need an allocation that is efficient to use on GPU - (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that - support it (e.g. Intel GPU). - */ - VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, - /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. - - Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a - null-terminated string. Instead of copying pointer value, a local copy of the - string is made and stored in allocation's `pName`. The string is automatically - freed together with the allocation. It is also used in vmaBuildStatsString(). - */ - VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, - /** Allocation will be created from upper stack in a double stack pool. - - This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. - */ - VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, - /** Create both buffer/image and allocation, but don't bind them together. - It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. - The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). - Otherwise it is ignored. - - If you want to make sure the new buffer/image is not tied to the new memory allocation - through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, - use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. - */ - VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, - /** Create allocation only if additional device memory required for it, if any, won't exceed - memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - */ - VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, - /** \brief Set this flag if the allocated memory will have aliasing resources. - - Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. - Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. - */ - VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, - /** - Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - - - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, - you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. - This includes allocations created in \ref custom_memory_pools. - - Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, - never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. - - \warning Violating this declaration may work correctly, but will likely be very slow. - Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` - Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, - /** - Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - - - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, - you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. - This includes allocations created in \ref custom_memory_pools. - - Declares that mapped memory can be read, written, and accessed in random order, - so a `HOST_CACHED` memory type is required. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, - /** - Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, - it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected - if it may improve performance. - - By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type - (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and - issue an explicit transfer to write/read your data. - To prepare for this possibility, don't forget to add appropriate flags like - `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, - /** Allocation strategy that chooses smallest possible free range for the allocation - to minimize memory usage and fragmentation, possibly at the expense of allocation time. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, - /** Allocation strategy that chooses first suitable free range for the allocation - - not necessarily in terms of the smallest offset but the one that is easiest and fastest to find - to minimize allocation time, possibly at the expense of allocation quality. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, - /** Allocation strategy that chooses always the lowest offset in available space. - This is not the most efficient strategy but achieves highly packed data. - Used internally by defragmentation, not recomended in typical usage. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, - /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. - */ - VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, - /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. - */ - VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, - /** A bit mask to extract only `STRATEGY` bits from entire set of flags. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MASK = - VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - - VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaAllocationCreateFlagBits; -/// See #VmaAllocationCreateFlagBits. -typedef VkFlags VmaAllocationCreateFlags; - -/// Flags to be passed as VmaPoolCreateInfo::flags. -typedef enum VmaPoolCreateFlagBits -{ - /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. - - This is an optional optimization flag. - - If you always allocate using vmaCreateBuffer(), vmaCreateImage(), - vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator - knows exact type of your allocations so it can handle Buffer-Image Granularity - in the optimal way. - - If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), - exact type of such allocations is not known, so allocator must be conservative - in handling Buffer-Image Granularity, which can lead to suboptimal allocation - (wasted memory). In that case, if you can make sure you always allocate only - buffers and linear images or only optimal images out of this pool, use this flag - to make allocator disregard Buffer-Image Granularity and so make allocations - faster and more optimal. - */ - VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, - - /** \brief Enables alternative, linear allocation algorithm in this pool. - - Specify this flag to enable linear allocation algorithm, which always creates - new allocations after last one and doesn't reuse space from allocations freed in - between. It trades memory consumption for simplified algorithm and data - structure, which has better performance and uses less memory for metadata. - - By using this flag, you can achieve behavior of free-at-once, stack, - ring buffer, and double stack. - For details, see documentation chapter \ref linear_algorithm. - */ - VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, - - /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. - */ - VMA_POOL_CREATE_ALGORITHM_MASK = - VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, - - VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaPoolCreateFlagBits; -/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. -typedef VkFlags VmaPoolCreateFlags; - -/// Flags to be passed as VmaDefragmentationInfo::flags. -typedef enum VmaDefragmentationFlagBits -{ - /* \brief Use simple but fast algorithm for defragmentation. - May not achieve best results but will require least time to compute and least allocations to copy. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, - /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. - Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, - /* \brief Perform full defragmentation of memory. - Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, - /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. - Only available when bufferImageGranularity is greater than 1, since it aims to reduce - alignment issues between different types of resources. - Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, - - /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, - - VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaDefragmentationFlagBits; -/// See #VmaDefragmentationFlagBits. -typedef VkFlags VmaDefragmentationFlags; - -/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. -typedef enum VmaDefragmentationMoveOperation -{ - /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). - VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, - /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. - VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, - /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. - VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, -} VmaDefragmentationMoveOperation; - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. -typedef enum VmaVirtualBlockCreateFlagBits -{ - /** \brief Enables alternative, linear allocation algorithm in this virtual block. - - Specify this flag to enable linear allocation algorithm, which always creates - new allocations after last one and doesn't reuse space from allocations freed in - between. It trades memory consumption for simplified algorithm and data - structure, which has better performance and uses less memory for metadata. - - By using this flag, you can achieve behavior of free-at-once, stack, - ring buffer, and double stack. - For details, see documentation chapter \ref linear_algorithm. - */ - VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, - - /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. - */ - VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = - VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, - - VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaVirtualBlockCreateFlagBits; -/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. -typedef VkFlags VmaVirtualBlockCreateFlags; - -/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. -typedef enum VmaVirtualAllocationCreateFlagBits -{ - /** \brief Allocation will be created from upper stack in a double stack pool. - - This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, - /** \brief Allocation strategy that tries to minimize memory usage. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, - /** \brief Allocation strategy that tries to minimize allocation time. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, - /** Allocation strategy that chooses always the lowest offset in available space. - This is not the most efficient strategy but achieves highly packed data. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. - - These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, - - VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaVirtualAllocationCreateFlagBits; -/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. -typedef VkFlags VmaVirtualAllocationCreateFlags; - -/** @} */ - -#endif // _VMA_ENUM_DECLARATIONS - -#ifndef _VMA_DATA_TYPES_DECLARATIONS - -/** -\addtogroup group_init -@{ */ - -/** \struct VmaAllocator -\brief Represents main object of this library initialized. - -Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. -Call function vmaDestroyAllocator() to destroy it. - -It is recommended to create just one object of this type per `VkDevice` object, -right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. -*/ -VK_DEFINE_HANDLE(VmaAllocator) - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/** \struct VmaPool -\brief Represents custom memory pool - -Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. -Call function vmaDestroyPool() to destroy it. - -For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). -*/ -VK_DEFINE_HANDLE(VmaPool) - -/** \struct VmaAllocation -\brief Represents single memory allocation. - -It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type -plus unique offset. - -There are multiple ways to create such object. -You need to fill structure VmaAllocationCreateInfo. -For more information see [Choosing memory type](@ref choosing_memory_type). - -Although the library provides convenience functions that create Vulkan buffer or image, -allocate memory for it and bind them together, -binding of the allocation to a buffer or an image is out of scope of the allocation itself. -Allocation object can exist without buffer/image bound, -binding can be done manually by the user, and destruction of it can be done -independently of destruction of the allocation. - -The object also remembers its size and some other information. -To retrieve this information, use function vmaGetAllocationInfo() and inspect -returned structure VmaAllocationInfo. -*/ -VK_DEFINE_HANDLE(VmaAllocation) - -/** \struct VmaDefragmentationContext -\brief An opaque object that represents started defragmentation process. - -Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. -Call function vmaEndDefragmentation() to destroy it. -*/ -VK_DEFINE_HANDLE(VmaDefragmentationContext) - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/** \struct VmaVirtualAllocation -\brief Represents single memory allocation done inside VmaVirtualBlock. - -Use it as a unique identifier to virtual allocation within the single block. - -Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. -*/ -VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation); - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/** \struct VmaVirtualBlock -\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. - -Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. -For more information, see documentation chapter \ref virtual_allocator. - -This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. -*/ -VK_DEFINE_HANDLE(VmaVirtualBlock) - -/** @} */ - -/** -\addtogroup group_init -@{ -*/ - -/// Callback function called after successful vkAllocateMemory. -typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryType, - VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, - VkDeviceSize size, - void* VMA_NULLABLE pUserData); - -/// Callback function called before vkFreeMemory. -typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryType, - VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, - VkDeviceSize size, - void* VMA_NULLABLE pUserData); - -/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. - -Provided for informative purpose, e.g. to gather statistics about number of -allocations or total amount of memory allocated in Vulkan. - -Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. -*/ -typedef struct VmaDeviceMemoryCallbacks -{ - /// Optional, can be null. - PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; - /// Optional, can be null. - PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; - /// Optional, can be null. - void* VMA_NULLABLE pUserData; -} VmaDeviceMemoryCallbacks; - -/** \brief Pointers to some Vulkan functions - a subset used by the library. - -Used in VmaAllocatorCreateInfo::pVulkanFunctions. -*/ -typedef struct VmaVulkanFunctions -{ - /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. - PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; - /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. - PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; - PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; - PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; - PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; - PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; - PFN_vkMapMemory VMA_NULLABLE vkMapMemory; - PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; - PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; - PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; - PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; - PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; - PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; - PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; - PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; - PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; - PFN_vkCreateImage VMA_NULLABLE vkCreateImage; - PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; - PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. - PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; - /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. - PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; -#endif -#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. - PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; - /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. - PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; -#endif -#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; -#endif -#if VMA_VULKAN_VERSION >= 1003000 - /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. - PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; - /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. - PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements; -#endif -} VmaVulkanFunctions; - -/// Description of a Allocator to be created. -typedef struct VmaAllocatorCreateInfo -{ - /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. - VmaAllocatorCreateFlags flags; - /// Vulkan physical device. - /** It must be valid throughout whole lifetime of created allocator. */ - VkPhysicalDevice VMA_NOT_NULL physicalDevice; - /// Vulkan device. - /** It must be valid throughout whole lifetime of created allocator. */ - VkDevice VMA_NOT_NULL device; - /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. - /** Set to 0 to use default, which is currently 256 MiB. */ - VkDeviceSize preferredLargeHeapBlockSize; - /// Custom CPU memory allocation callbacks. Optional. - /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ - const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; - /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. - /** Optional, can be null. */ - const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; - /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. - - If not NULL, it must be a pointer to an array of - `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on - maximum number of bytes that can be allocated out of particular Vulkan memory - heap. - - Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that - heap. This is also the default in case of `pHeapSizeLimit` = NULL. - - If there is a limit defined for a heap: - - - If user tries to allocate more memory from that heap using this allocator, - the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the - value of this limit will be reported instead when using vmaGetMemoryProperties(). - - Warning! Using this feature may not be equivalent to installing a GPU with - smaller amount of memory, because graphics driver doesn't necessary fail new - allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is - exceeded. It may return success and just silently migrate some device memory - blocks to system RAM. This driver behavior can also be controlled using - VK_AMD_memory_overallocation_behavior extension. - */ - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; - - /** \brief Pointers to Vulkan functions. Can be null. - - For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). - */ - const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; - /** \brief Handle to Vulkan instance object. - - Starting from version 3.0.0 this member is no longer optional, it must be set! - */ - VkInstance VMA_NOT_NULL instance; - /** \brief Optional. The highest version of Vulkan that the application is designed to use. - - It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. - The patch version number specified is ignored. Only the major and minor versions are considered. - It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. - Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation. - Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. - */ - uint32_t vulkanApiVersion; -#if VMA_EXTERNAL_MEMORY - /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. - - If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` - elements, defining external memory handle types of particular Vulkan memory type, - to be passed using `VkExportMemoryAllocateInfoKHR`. - - Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. - This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. - */ - const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; -#endif // #if VMA_EXTERNAL_MEMORY -} VmaAllocatorCreateInfo; - -/// Information about existing #VmaAllocator object. -typedef struct VmaAllocatorInfo -{ - /** \brief Handle to Vulkan instance object. - - This is the same value as has been passed through VmaAllocatorCreateInfo::instance. - */ - VkInstance VMA_NOT_NULL instance; - /** \brief Handle to Vulkan physical device object. - - This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. - */ - VkPhysicalDevice VMA_NOT_NULL physicalDevice; - /** \brief Handle to Vulkan device object. - - This is the same value as has been passed through VmaAllocatorCreateInfo::device. - */ - VkDevice VMA_NOT_NULL device; -} VmaAllocatorInfo; - -/** @} */ - -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. - -These are fast to calculate. -See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). -*/ -typedef struct VmaStatistics -{ - /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. - */ - uint32_t blockCount; - /** \brief Number of #VmaAllocation objects allocated. - - Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. - */ - uint32_t allocationCount; - /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. - - \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object - (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls - "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. - */ - VkDeviceSize blockBytes; - /** \brief Total number of bytes occupied by all #VmaAllocation objects. - - Always less or equal than `blockBytes`. - Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan - but unused by any #VmaAllocation. - */ - VkDeviceSize allocationBytes; -} VmaStatistics; - -/** \brief More detailed statistics than #VmaStatistics. - -These are slower to calculate. Use for debugging purposes. -See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). - -Previous version of the statistics API provided averages, but they have been removed -because they can be easily calculated as: - -\code -VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; -VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; -VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; -\endcode -*/ -typedef struct VmaDetailedStatistics -{ - /// Basic statistics. - VmaStatistics statistics; - /// Number of free ranges of memory between allocations. - uint32_t unusedRangeCount; - /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. - VkDeviceSize allocationSizeMin; - /// Largest allocation size. 0 if there are 0 allocations. - VkDeviceSize allocationSizeMax; - /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. - VkDeviceSize unusedRangeSizeMin; - /// Largest empty range size. 0 if there are 0 empty ranges. - VkDeviceSize unusedRangeSizeMax; -} VmaDetailedStatistics; - -/** \brief General statistics from current state of the Allocator - -total memory usage across all memory heaps and types. - -These are slower to calculate. Use for debugging purposes. -See function vmaCalculateStatistics(). -*/ -typedef struct VmaTotalStatistics -{ - VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; - VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; - VmaDetailedStatistics total; -} VmaTotalStatistics; - -/** \brief Statistics of current memory usage and available budget for a specific memory heap. - -These are fast to calculate. -See function vmaGetHeapBudgets(). -*/ -typedef struct VmaBudget -{ - /** \brief Statistics fetched from the library. - */ - VmaStatistics statistics; - /** \brief Estimated current memory usage of the program, in bytes. - - Fetched from system using VK_EXT_memory_budget extension if enabled. - - It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects - also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or - `VkDeviceMemory` blocks allocated outside of this library, if any. - */ - VkDeviceSize usage; - /** \brief Estimated amount of memory available to the program, in bytes. - - Fetched from system using VK_EXT_memory_budget extension if enabled. - - It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors - external to the program, decided by the operating system. - Difference `budget - usage` is the amount of additional memory that can probably - be allocated without problems. Exceeding the budget may result in various problems. - */ - VkDeviceSize budget; -} VmaBudget; - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/** \brief Parameters of new #VmaAllocation. - -To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. -*/ -typedef struct VmaAllocationCreateInfo -{ - /// Use #VmaAllocationCreateFlagBits enum. - VmaAllocationCreateFlags flags; - /** \brief Intended usage of memory. - - You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n - If `pool` is not null, this member is ignored. - */ - VmaMemoryUsage usage; - /** \brief Flags that must be set in a Memory Type chosen for an allocation. - - Leave 0 if you specify memory requirements in other way. \n - If `pool` is not null, this member is ignored.*/ - VkMemoryPropertyFlags requiredFlags; - /** \brief Flags that preferably should be set in a memory type chosen for an allocation. - - Set to 0 if no additional flags are preferred. \n - If `pool` is not null, this member is ignored. */ - VkMemoryPropertyFlags preferredFlags; - /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. - - Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if - it meets other requirements specified by this structure, with no further - restrictions on memory type index. \n - If `pool` is not null, this member is ignored. - */ - uint32_t memoryTypeBits; - /** \brief Pool that this allocation should be created in. - - Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: - `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. - */ - VmaPool VMA_NULLABLE pool; - /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). - - If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either - null or pointer to a null-terminated string. The string will be then copied to - internal buffer, so it doesn't need to be valid after allocation call. - */ - void* VMA_NULLABLE pUserData; - /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. - - It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object - and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. - */ - float priority; -} VmaAllocationCreateInfo; - -/// Describes parameter of created #VmaPool. -typedef struct VmaPoolCreateInfo -{ - /** \brief Vulkan memory type index to allocate this pool from. - */ - uint32_t memoryTypeIndex; - /** \brief Use combination of #VmaPoolCreateFlagBits. - */ - VmaPoolCreateFlags flags; - /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. - - Specify nonzero to set explicit, constant size of memory blocks used by this - pool. - - Leave 0 to use default and let the library manage block sizes automatically. - Sizes of particular blocks may vary. - In this case, the pool will also support dedicated allocations. - */ - VkDeviceSize blockSize; - /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. - - Set to 0 to have no preallocated blocks and allow the pool be completely empty. - */ - size_t minBlockCount; - /** \brief Maximum number of blocks that can be allocated in this pool. Optional. - - Set to 0 to use default, which is `SIZE_MAX`, which means no limit. - - Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated - throughout whole lifetime of this pool. - */ - size_t maxBlockCount; - /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. - - It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. - Otherwise, this variable is ignored. - */ - float priority; - /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. - - Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. - It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, - e.g. when doing interop with OpenGL. - */ - VkDeviceSize minAllocationAlignment; - /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. - - Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. - It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. - Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. - - Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, - can be attached automatically by this library when using other, more convenient of its features. - */ - void* VMA_NULLABLE pMemoryAllocateNext; -} VmaPoolCreateInfo; - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). -typedef struct VmaAllocationInfo -{ - /** \brief Memory type index that this allocation was allocated from. - - It never changes. - */ - uint32_t memoryType; - /** \brief Handle to Vulkan memory object. - - Same memory object can be shared by multiple allocations. - - It can change after the allocation is moved during \ref defragmentation. - */ - VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; - /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. - - You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function - vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, - not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation - and apply this offset automatically. - - It can change after the allocation is moved during \ref defragmentation. - */ - VkDeviceSize offset; - /** \brief Size of this allocation, in bytes. - - It never changes. - - \note Allocation size returned in this variable may be greater than the size - requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the - allocation is accessible for operations on memory e.g. using a pointer after - mapping with vmaMapMemory(), but operations on the resource e.g. using - `vkCmdCopyBuffer` must be limited to the size of the resource. - */ - VkDeviceSize size; - /** \brief Pointer to the beginning of this allocation as mapped data. - - If the allocation hasn't been mapped using vmaMapMemory() and hasn't been - created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. - - It can change after call to vmaMapMemory(), vmaUnmapMemory(). - It can also change after the allocation is moved during \ref defragmentation. - */ - void* VMA_NULLABLE pMappedData; - /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). - - It can change after call to vmaSetAllocationUserData() for this allocation. - */ - void* VMA_NULLABLE pUserData; - /** \brief Custom allocation name that was set with vmaSetAllocationName(). - - It can change after call to vmaSetAllocationName() for this allocation. - - Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with - additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. - */ - const char* VMA_NULLABLE pName; -} VmaAllocationInfo; - -/** \brief Parameters for defragmentation. - -To be used with function vmaBeginDefragmentation(). -*/ -typedef struct VmaDefragmentationInfo -{ - /// \brief Use combination of #VmaDefragmentationFlagBits. - VmaDefragmentationFlags flags; - /** \brief Custom pool to be defragmented. - - If null then default pools will undergo defragmentation process. - */ - VmaPool VMA_NULLABLE pool; - /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. - - `0` means no limit. - */ - VkDeviceSize maxBytesPerPass; - /** \brief Maximum number of allocations that can be moved during single pass to a different place. - - `0` means no limit. - */ - uint32_t maxAllocationsPerPass; -} VmaDefragmentationInfo; - -/// Single move of an allocation to be done for defragmentation. -typedef struct VmaDefragmentationMove -{ - /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. - VmaDefragmentationMoveOperation operation; - /// Allocation that should be moved. - VmaAllocation VMA_NOT_NULL srcAllocation; - /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. - - \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, - to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). - vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. - */ - VmaAllocation VMA_NOT_NULL dstTmpAllocation; -} VmaDefragmentationMove; - -/** \brief Parameters for incremental defragmentation steps. - -To be used with function vmaBeginDefragmentationPass(). -*/ -typedef struct VmaDefragmentationPassMoveInfo -{ - /// Number of elements in the `pMoves` array. - uint32_t moveCount; - /** \brief Array of moves to be performed by the user in the current defragmentation pass. - - Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). - - For each element, you should: - - 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. - 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. - 3. Make sure these commands finished executing on the GPU. - 4. Destroy the old buffer/image. - - Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). - After this call, the allocation will point to the new place in memory. - - Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. - - Alternatively, if you decide you want to completely remove the allocation: - - 1. Destroy its buffer/image. - 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. - - Then, after vmaEndDefragmentationPass() the allocation will be freed. - */ - VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; -} VmaDefragmentationPassMoveInfo; - -/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). -typedef struct VmaDefragmentationStats -{ - /// Total number of bytes that have been copied while moving allocations to different places. - VkDeviceSize bytesMoved; - /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. - VkDeviceSize bytesFreed; - /// Number of allocations that have been moved to different places. - uint32_t allocationsMoved; - /// Number of empty `VkDeviceMemory` objects that have been released to the system. - uint32_t deviceMemoryBlocksFreed; -} VmaDefragmentationStats; - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). -typedef struct VmaVirtualBlockCreateInfo -{ - /** \brief Total size of the virtual block. - - Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. - For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. - */ - VkDeviceSize size; - - /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. - */ - VmaVirtualBlockCreateFlags flags; - - /** \brief Custom CPU memory allocation callbacks. Optional. - - Optional, can be null. When specified, they will be used for all CPU-side memory allocations. - */ - const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; -} VmaVirtualBlockCreateInfo; - -/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). -typedef struct VmaVirtualAllocationCreateInfo -{ - /** \brief Size of the allocation. - - Cannot be zero. - */ - VkDeviceSize size; - /** \brief Required alignment of the allocation. Optional. - - Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. - */ - VkDeviceSize alignment; - /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. - */ - VmaVirtualAllocationCreateFlags flags; - /** \brief Custom pointer to be associated with the allocation. Optional. - - It can be any value and can be used for user-defined purposes. It can be fetched or changed later. - */ - void* VMA_NULLABLE pUserData; -} VmaVirtualAllocationCreateInfo; - -/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). -typedef struct VmaVirtualAllocationInfo -{ - /** \brief Offset of the allocation. - - Offset at which the allocation was made. - */ - VkDeviceSize offset; - /** \brief Size of the allocation. - - Same value as passed in VmaVirtualAllocationCreateInfo::size. - */ - VkDeviceSize size; - /** \brief Custom pointer associated with the allocation. - - Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). - */ - void* VMA_NULLABLE pUserData; -} VmaVirtualAllocationInfo; - -/** @} */ - -#endif // _VMA_DATA_TYPES_DECLARATIONS - -#ifndef _VMA_FUNCTION_HEADERS - -/** -\addtogroup group_init -@{ -*/ - -/// Creates #VmaAllocator object. -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( - const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); - -/// Destroys allocator object. -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( - VmaAllocator VMA_NULLABLE allocator); - -/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. - -It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to -`VkPhysicalDevice`, `VkDevice` etc. every time using this function. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); - -/** -PhysicalDeviceProperties are fetched from physicalDevice by the allocator. -You can access it here, without fetching it again on your own. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( - VmaAllocator VMA_NOT_NULL allocator, - const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); - -/** -PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. -You can access it here, without fetching it again on your own. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( - VmaAllocator VMA_NOT_NULL allocator, - const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); - -/** -\brief Given Memory Type Index, returns Property Flags of this memory type. - -This is just a convenience function. Same information can be obtained using -vmaGetMemoryProperties(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeIndex, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); - -/** \brief Sets index of the current frame. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t frameIndex); - -/** @} */ - -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Retrieves statistics from current state of the Allocator. - -This function is called "calculate" not "get" because it has to traverse all -internal data structures, so it may be quite slow. Use it for debugging purposes. -For faster but more brief statistics suitable to be called every frame or every allocation, -use vmaGetHeapBudgets(). - -Note that when using allocator from multiple threads, returned information may immediately -become outdated. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaTotalStatistics* VMA_NOT_NULL pStats); - -/** \brief Retrieves information about current memory usage and budget for all memory heaps. - -\param allocator -\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. - -This function is called "get" not "calculate" because it is very fast, suitable to be called -every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). - -Note that when using allocator from multiple threads, returned information may immediately -become outdated. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( - VmaAllocator VMA_NOT_NULL allocator, - VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/** -\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. - -This algorithm tries to find a memory type that: - -- Is allowed by memoryTypeBits. -- Contains all the flags from pAllocationCreateInfo->requiredFlags. -- Matches intended usage. -- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. - -\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result -from this function or any other allocating function probably means that your -device doesn't support any memory type with requested features for the specific -type of resource you want to use it for. Please check parameters of your -resource, like image layout (OPTIMAL versus LINEAR) or mip level count. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); - -/** -\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. - -It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. -It internally creates a temporary, dummy buffer that never has memory bound. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); - -/** -\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. - -It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. -It internally creates a temporary, dummy image that never has memory bound. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( - VmaAllocator VMA_NOT_NULL allocator, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); - -/** \brief Allocates Vulkan device memory and creates #VmaPool object. - -\param allocator Allocator object. -\param pCreateInfo Parameters of pool to create. -\param[out] pPool Handle to created pool. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( - VmaAllocator VMA_NOT_NULL allocator, - const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); - -/** \brief Destroys #VmaPool object and frees Vulkan device memory. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NULLABLE pool); - -/** @} */ - -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Retrieves statistics of existing #VmaPool object. - -\param allocator Allocator object. -\param pool Pool object. -\param[out] pPoolStats Statistics of specified pool. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - VmaStatistics* VMA_NOT_NULL pPoolStats); - -/** \brief Retrieves detailed statistics of existing #VmaPool object. - -\param allocator Allocator object. -\param pool Pool object. -\param[out] pPoolStats Statistics of specified pool. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ - -/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. - -Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, -`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is -`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). - -Possible return values: - -- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. -- `VK_SUCCESS` - corruption detection has been performed and succeeded. -- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. - `VMA_ASSERT` is also fired in that case. -- Other value: Error returned by Vulkan, e.g. memory mapping failure. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool); - -/** \brief Retrieves name of a custom pool. - -After the call `ppName` is either null or points to an internally-owned null-terminated string -containing name of the pool that was previously set. The pointer becomes invalid when the pool is -destroyed or its name is changed using vmaSetPoolName(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - const char* VMA_NULLABLE* VMA_NOT_NULL ppName); - -/** \brief Sets name of a custom pool. - -`pName` can be either null or pointer to a null-terminated string with new name for the pool. -Function makes internal copy of the string, so it can be changed or freed immediately after this call. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - const char* VMA_NULLABLE pName); - -/** \brief General purpose memory allocation. - -\param allocator -\param pVkMemoryRequirements -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). - -It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), -vmaCreateBuffer(), vmaCreateImage() instead whenever possible. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( - VmaAllocator VMA_NOT_NULL allocator, - const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief General purpose memory allocation for multiple allocation objects at once. - -\param allocator Allocator object. -\param pVkMemoryRequirements Memory requirements for each allocation. -\param pCreateInfo Creation parameters for each allocation. -\param allocationCount Number of allocations to make. -\param[out] pAllocations Pointer to array that will be filled with handles to created allocations. -\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. - -You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). - -Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. -It is just a general purpose allocation function able to make multiple allocations at once. -It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. - -All allocations are made using same parameters. All of them are created out of the same memory pool and type. -If any allocation fails, all allocations already made within this function call are also freed, so that when -returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( - VmaAllocator VMA_NOT_NULL allocator, - const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, - const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, - size_t allocationCount, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, - VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); - -/** \brief Allocates memory suitable for given `VkBuffer`. - -\param allocator -\param buffer -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). - -This is a special-purpose function. In most cases you should use vmaCreateBuffer(). - -You must free the allocation using vmaFreeMemory() when no longer needed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief Allocates memory suitable for given `VkImage`. - -\param allocator -\param image -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). - -This is a special-purpose function. In most cases you should use vmaCreateImage(). - -You must free the allocation using vmaFreeMemory() when no longer needed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( - VmaAllocator VMA_NOT_NULL allocator, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). - -Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( - VmaAllocator VMA_NOT_NULL allocator, - const VmaAllocation VMA_NULLABLE allocation); - -/** \brief Frees memory and destroys multiple allocations. - -Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. -It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), -vmaAllocateMemoryPages() and other functions. -It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. - -Allocations in `pAllocations` array can come from any memory pools and types. -Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( - VmaAllocator VMA_NOT_NULL allocator, - size_t allocationCount, - const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); - -/** \brief Returns current information about specified allocation. - -Current paramteres of given allocation are returned in `pAllocationInfo`. - -Although this function doesn't lock any mutex, so it should be quite efficient, -you should avoid calling it too often. -You can retrieve same VmaAllocationInfo structure while creating your resource, from function -vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change -(e.g. due to defragmentation). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); - -/** \brief Sets pUserData in given allocation to new value. - -The value of pointer `pUserData` is copied to allocation's `pUserData`. -It is opaque, so you can use it however you want - e.g. -as a pointer, ordinal number or some handle to you own data. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - void* VMA_NULLABLE pUserData); - -/** \brief Sets pName in given allocation to new value. - -`pName` must be either null, or pointer to a null-terminated string. The function -makes local copy of the string and sets it as allocation's `pName`. String -passed as pName doesn't need to be valid for whole lifetime of the allocation - -you can free it after this call. String previously pointed by allocation's -`pName` is freed from memory. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const char* VMA_NULLABLE pName); - -/** -\brief Given an allocation, returns Property Flags of its memory type. - -This is just a convenience function. Same information can be obtained using -vmaGetAllocationInfo() + vmaGetMemoryProperties(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); - -/** \brief Maps memory represented by given allocation and returns pointer to it. - -Maps memory represented by given allocation to make it accessible to CPU code. -When succeeded, `*ppData` contains pointer to first byte of this memory. - -\warning -If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is -correctly offsetted to the beginning of region assigned to this particular allocation. -Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. -You should not add VmaAllocationInfo::offset to it! - -Mapping is internally reference-counted and synchronized, so despite raw Vulkan -function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` -multiple times simultaneously, it is safe to call this function on allocations -assigned to the same memory block. Actual Vulkan memory will be mapped on first -mapping and unmapped on last unmapping. - -If the function succeeded, you must call vmaUnmapMemory() to unmap the -allocation when mapping is no longer needed or before freeing the allocation, at -the latest. - -It also safe to call this function multiple times on the same allocation. You -must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). - -It is also safe to call this function on allocation created with -#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. -You must still call vmaUnmapMemory() same number of times as you called -vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the -"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. - -This function fails when used on allocation made in memory type that is not -`HOST_VISIBLE`. - -This function doesn't automatically flush or invalidate caches. -If the allocation is made from a memory types that is not `HOST_COHERENT`, -you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - void* VMA_NULLABLE* VMA_NOT_NULL ppData); - -/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). - -For details, see description of vmaMapMemory(). - -This function doesn't automatically flush or invalidate caches. -If the allocation is made from a memory types that is not `HOST_COHERENT`, -you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation); - -/** \brief Flushes memory of given allocation. - -Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. -It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. -Unmap operation doesn't do that automatically. - -- `offset` must be relative to the beginning of allocation. -- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. -- `offset` and `size` don't have to be aligned. - They are internally rounded down/up to multiply of `nonCoherentAtomSize`. -- If `size` is 0, this call is ignored. -- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, - this call is ignored. - -Warning! `offset` and `size` are relative to the contents of given `allocation`. -If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. -Do not pass allocation's offset as `offset`!!! - -This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize offset, - VkDeviceSize size); - -/** \brief Invalidates memory of given allocation. - -Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. -It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. -Map operation doesn't do that automatically. - -- `offset` must be relative to the beginning of allocation. -- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. -- `offset` and `size` don't have to be aligned. - They are internally rounded down/up to multiply of `nonCoherentAtomSize`. -- If `size` is 0, this call is ignored. -- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, - this call is ignored. - -Warning! `offset` and `size` are relative to the contents of given `allocation`. -If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. -Do not pass allocation's offset as `offset`!!! - -This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if -it is called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize offset, - VkDeviceSize size); - -/** \brief Flushes memory of given set of allocations. - -Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. -For more information, see documentation of vmaFlushAllocation(). - -\param allocator -\param allocationCount -\param allocations -\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. -\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. - -This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t allocationCount, - const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); - -/** \brief Invalidates memory of given set of allocations. - -Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. -For more information, see documentation of vmaInvalidateAllocation(). - -\param allocator -\param allocationCount -\param allocations -\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. -\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. - -This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t allocationCount, - const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); - -/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. - -\param allocator -\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. - -Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, -`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are -`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). - -Possible return values: - -- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. -- `VK_SUCCESS` - corruption detection has been performed and succeeded. -- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. - `VMA_ASSERT` is also fired in that case. -- Other value: Error returned by Vulkan, e.g. memory mapping failure. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeBits); - -/** \brief Begins defragmentation process. - -\param allocator Allocator object. -\param pInfo Structure filled with parameters of defragmentation. -\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. -\returns -- `VK_SUCCESS` if defragmentation can begin. -- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. - -For more information about defragmentation, see documentation chapter: -[Defragmentation](@ref defragmentation). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( - VmaAllocator VMA_NOT_NULL allocator, - const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, - VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); - -/** \brief Ends defragmentation process. - -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param[out] pStats Optional stats for the defragmentation. Can be null. - -Use this function to finish defragmentation started by vmaBeginDefragmentation(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationStats* VMA_NULLABLE pStats); - -/** \brief Starts single defragmentation pass. - -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param[out] pPassInfo Computed informations for current pass. -\returns -- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. -- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), - and then preferably try another pass with vmaBeginDefragmentationPass(). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); - -/** \brief Ends single defragmentation pass. - -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. - -Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. - -Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. -After this call: - -- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY - (which is the default) will be pointing to the new destination place. -- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY - will be freed. - -If no more moves are possible you can end whole defragmentation. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); - -/** \brief Binds buffer to allocation. - -Binds specified buffer to region of memory represented by specified allocation. -Gets `VkDeviceMemory` handle and offset from the allocation. -If you want to create a buffer, allocate memory for it and bind them together separately, -you should use this function for binding instead of standard `vkBindBufferMemory()`, -because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple -allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously -(which is illegal in Vulkan). - -It is recommended to use function vmaCreateBuffer() instead of this one. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); - -/** \brief Binds buffer to allocation with additional parameters. - -\param allocator -\param allocation -\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. -\param buffer -\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. - -This function is similar to vmaBindBufferMemory(), but it provides additional parameters. - -If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag -or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize allocationLocalOffset, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, - const void* VMA_NULLABLE pNext); - -/** \brief Binds image to allocation. - -Binds specified image to region of memory represented by specified allocation. -Gets `VkDeviceMemory` handle and offset from the allocation. -If you want to create an image, allocate memory for it and bind them together separately, -you should use this function for binding instead of standard `vkBindImageMemory()`, -because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple -allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously -(which is illegal in Vulkan). - -It is recommended to use function vmaCreateImage() instead of this one. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); - -/** \brief Binds image to allocation with additional parameters. - -\param allocator -\param allocation -\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. -\param image -\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. - -This function is similar to vmaBindImageMemory(), but it provides additional parameters. - -If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag -or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize allocationLocalOffset, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, - const void* VMA_NULLABLE pNext); - -/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. - -\param allocator -\param pBufferCreateInfo -\param pAllocationCreateInfo -\param[out] pBuffer Buffer that was created. -\param[out] pAllocation Allocation that was created. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -This function automatically: - --# Creates buffer. --# Allocates appropriate memory for it. --# Binds the buffer with the memory. - -If any of these operations fail, buffer and allocation are not created, -returned value is negative error code, `*pBuffer` and `*pAllocation` are null. - -If the function succeeded, you must destroy both buffer and allocation when you -no longer need them using either convenience function vmaDestroyBuffer() or -separately, using `vkDestroyBuffer()` and vmaFreeMemory(). - -If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, -VK_KHR_dedicated_allocation extension is used internally to query driver whether -it requires or prefers the new buffer to have dedicated allocation. If yes, -and if dedicated allocation is possible -(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated -allocation for this buffer, just like when using -#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - -\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, -although recommended as a good practice, is out of scope of this library and could be implemented -by the user as a higher-level logic on top of VMA. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief Creates a buffer with additional minimum alignment. - -Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, -minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. -for interop with OpenGL. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkDeviceSize minAlignment, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/** \brief Creates a new `VkBuffer`, binds already created memory for it. - -\param allocator -\param allocation Allocation that provides memory to be used for binding new buffer to it. -\param pBufferCreateInfo -\param[out] pBuffer Buffer that was created. - -This function automatically: - --# Creates buffer. --# Binds the buffer with the supplied memory. - -If any of these operations fail, buffer is not created, -returned value is negative error code and `*pBuffer` is null. - -If the function succeeded, you must destroy the buffer when you -no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding -allocation you can use convenience function vmaDestroyBuffer(). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); - -/** \brief Destroys Vulkan buffer and frees allocated memory. - -This is just a convenience function equivalent to: - -\code -vkDestroyBuffer(device, buffer, allocationCallbacks); -vmaFreeMemory(allocator, allocation); -\endcode - -It it safe to pass null as buffer and/or allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, - VmaAllocation VMA_NULLABLE allocation); - -/// Function similar to vmaCreateBuffer(). -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( - VmaAllocator VMA_NOT_NULL allocator, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); - -/// Function similar to vmaCreateAliasingBuffer(). -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); - -/** \brief Destroys Vulkan image and frees allocated memory. - -This is just a convenience function equivalent to: - -\code -vkDestroyImage(device, image, allocationCallbacks); -vmaFreeMemory(allocator, allocation); -\endcode - -It it safe to pass null as image and/or allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( - VmaAllocator VMA_NOT_NULL allocator, - VkImage VMA_NULLABLE_NON_DISPATCHABLE image, - VmaAllocation VMA_NULLABLE allocation); - -/** @} */ - -/** -\addtogroup group_virtual -@{ -*/ - -/** \brief Creates new #VmaVirtualBlock object. - -\param pCreateInfo Parameters for creation. -\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( - const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); - -/** \brief Destroys #VmaVirtualBlock object. - -Please note that you should consciously handle virtual allocations that could remain unfreed in the block. -You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() -if you are sure this is what you want. If you do neither, an assert is called. - -If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, -don't forget to free them. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( - VmaVirtualBlock VMA_NULLABLE virtualBlock); - -/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. -*/ -VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( - VmaVirtualBlock VMA_NOT_NULL virtualBlock); - -/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); - -/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. - -If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned -(despite the function doesn't ever allocate actual GPU memory). -`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. - -\param virtualBlock Virtual block -\param pCreateInfo Parameters for the allocation -\param[out] pAllocation Returned handle of the new allocation -\param[out] pOffset Returned offset of the new allocation. Optional, can be null. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, - VkDeviceSize* VMA_NULLABLE pOffset); - -/** \brief Frees virtual allocation inside given #VmaVirtualBlock. - -It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); - -/** \brief Frees all virtual allocations inside given #VmaVirtualBlock. - -You must either call this function or free each virtual allocation individually with vmaVirtualFree() -before destroying a virtual block. Otherwise, an assert is called. - -If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, -don't forget to free it as well. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( - VmaVirtualBlock VMA_NOT_NULL virtualBlock); - -/** \brief Changes custom pointer associated with given virtual allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, - void* VMA_NULLABLE pUserData); - -/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. - -This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaStatistics* VMA_NOT_NULL pStats); - -/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. - -This function is slow to call. Use for debugging purposes. -For less detailed statistics, see vmaGetVirtualBlockStatistics(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaDetailedStatistics* VMA_NOT_NULL pStats); - -/** @} */ - -#if VMA_STATS_STRING_ENABLED -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. -\param virtualBlock Virtual block. -\param[out] ppStatsString Returned string. -\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. - -Returned string must be freed using vmaFreeVirtualBlockStatsString(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, - VkBool32 detailedMap); - -/// Frees a string returned by vmaBuildVirtualBlockStatsString(). -VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE pStatsString); - -/** \brief Builds and returns statistics as a null-terminated string in JSON format. -\param allocator -\param[out] ppStatsString Must be freed using vmaFreeStatsString() function. -\param detailedMap -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( - VmaAllocator VMA_NOT_NULL allocator, - char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, - VkBool32 detailedMap); - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( - VmaAllocator VMA_NOT_NULL allocator, - char* VMA_NULLABLE pStatsString); - -/** @} */ - -#endif // VMA_STATS_STRING_ENABLED - -#endif // _VMA_FUNCTION_HEADERS - -#ifdef __cplusplus -} -#endif - -#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H - -//////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////// -// -// IMPLEMENTATION -// -//////////////////////////////////////////////////////////////////////////////// -//////////////////////////////////////////////////////////////////////////////// - -// For Visual Studio IntelliSense. -#if defined(__cplusplus) && defined(__INTELLISENSE__) -#define VMA_IMPLEMENTATION -#endif - -#ifdef VMA_IMPLEMENTATION -#undef VMA_IMPLEMENTATION - -#include -#include -#include -#include -#include - -#ifdef _MSC_VER - #include // For functions like __popcnt, _BitScanForward etc. -#endif -#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 - #include // For std::popcount -#endif - -/******************************************************************************* -CONFIGURATION SECTION - -Define some of these macros before each #include of this header or change them -here if you need other then default behavior depending on your environment. -*/ -#ifndef _VMA_CONFIGURATION - -/* -Define this macro to 1 to make the library fetch pointers to Vulkan functions -internally, like: - - vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; -*/ -#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) - #define VMA_STATIC_VULKAN_FUNCTIONS 1 -#endif - -/* -Define this macro to 1 to make the library fetch pointers to Vulkan functions -internally, like: - - vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory"); - -To use this feature in new versions of VMA you now have to pass -VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as -VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. -*/ -#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) - #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 -#endif - -#ifndef VMA_USE_STL_SHARED_MUTEX - // Compiler conforms to C++17. - #if __cplusplus >= 201703L - #define VMA_USE_STL_SHARED_MUTEX 1 - // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus - // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. - #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L - #define VMA_USE_STL_SHARED_MUTEX 1 - #else - #define VMA_USE_STL_SHARED_MUTEX 0 - #endif -#endif - -/* -Define this macro to include custom header files without having to edit this file directly, e.g.: - - // Inside of "my_vma_configuration_user_includes.h": - - #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT - #include "my_custom_min.h" // for my_custom_min - #include - #include - - // Inside a different file, which includes "vk_mem_alloc.h": - - #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h" - #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr) - #define VMA_MIN(v1, v2) (my_custom_min(v1, v2)) - #include "vk_mem_alloc.h" - ... - -The following headers are used in this CONFIGURATION section only, so feel free to -remove them if not needed. -*/ -#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) - #include // for assert - #include // for min, max - #include -#else - #include VMA_CONFIGURATION_USER_INCLUDES_H -#endif - -#ifndef VMA_NULL - // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. - #define VMA_NULL nullptr -#endif - -#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) -#include -static void* vma_aligned_alloc(size_t alignment, size_t size) -{ - // alignment must be >= sizeof(void*) - if(alignment < sizeof(void*)) - { - alignment = sizeof(void*); - } - - return memalign(alignment, size); -} -#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) -#include - -#if defined(__APPLE__) -#include -#endif - -static void* vma_aligned_alloc(size_t alignment, size_t size) -{ - // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) - // Therefore, for now disable this specific exception until a proper solution is found. - //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) - //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 - // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only - // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds - // // MAC_OS_X_VERSION_10_16), even though the function is marked - // // availabe for 10.15. That is why the preprocessor checks for 10.16 but - // // the __builtin_available checks for 10.15. - // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. - // if (__builtin_available(macOS 10.15, iOS 13, *)) - // return aligned_alloc(alignment, size); - //#endif - //#endif - - // alignment must be >= sizeof(void*) - if(alignment < sizeof(void*)) - { - alignment = sizeof(void*); - } - - void *pointer; - if(posix_memalign(&pointer, alignment, size) == 0) - return pointer; - return VMA_NULL; -} -#elif defined(_WIN32) -static void* vma_aligned_alloc(size_t alignment, size_t size) -{ - return _aligned_malloc(size, alignment); -} -#else -static void* vma_aligned_alloc(size_t alignment, size_t size) -{ - return aligned_alloc(alignment, size); -} -#endif - -#if defined(_WIN32) -static void vma_aligned_free(void* ptr) -{ - _aligned_free(ptr); -} -#else -static void vma_aligned_free(void* VMA_NULLABLE ptr) -{ - free(ptr); -} -#endif - -// If your compiler is not compatible with C++11 and definition of -// aligned_alloc() function is missing, uncommeting following line may help: - -//#include - -// Normal assert to check for programmer's errors, especially in Debug configuration. -#ifndef VMA_ASSERT - #ifdef NDEBUG - #define VMA_ASSERT(expr) - #else - #define VMA_ASSERT(expr) assert(expr) - #endif -#endif - -// Assert that will be called very often, like inside data structures e.g. operator[]. -// Making it non-empty can make program slow. -#ifndef VMA_HEAVY_ASSERT - #ifdef NDEBUG - #define VMA_HEAVY_ASSERT(expr) - #else - #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) - #endif -#endif - -#ifndef VMA_ALIGN_OF - #define VMA_ALIGN_OF(type) (__alignof(type)) -#endif - -#ifndef VMA_SYSTEM_ALIGNED_MALLOC - #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) -#endif - -#ifndef VMA_SYSTEM_ALIGNED_FREE - // VMA_SYSTEM_FREE is the old name, but might have been defined by the user - #if defined(VMA_SYSTEM_FREE) - #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) - #else - #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) - #endif -#endif - -#ifndef VMA_COUNT_BITS_SET - // Returns number of bits set to 1 in (v) - #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) -#endif - -#ifndef VMA_BITSCAN_LSB - // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX - #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) -#endif - -#ifndef VMA_BITSCAN_MSB - // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX - #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) -#endif - -#ifndef VMA_MIN - #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) -#endif - -#ifndef VMA_MAX - #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) -#endif - -#ifndef VMA_SWAP - #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) -#endif - -#ifndef VMA_SORT - #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) -#endif - -#ifndef VMA_DEBUG_LOG - #define VMA_DEBUG_LOG(format, ...) - /* - #define VMA_DEBUG_LOG(format, ...) do { \ - printf(format, __VA_ARGS__); \ - printf("\n"); \ - } while(false) - */ -#endif - -// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. -#if VMA_STATS_STRING_ENABLED - static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) - { - snprintf(outStr, strLen, "%u", static_cast(num)); - } - static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) - { - snprintf(outStr, strLen, "%llu", static_cast(num)); - } - static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) - { - snprintf(outStr, strLen, "%p", ptr); - } -#endif - -#ifndef VMA_MUTEX - class VmaMutex - { - public: - void Lock() { m_Mutex.lock(); } - void Unlock() { m_Mutex.unlock(); } - bool TryLock() { return m_Mutex.try_lock(); } - private: - std::mutex m_Mutex; - }; - #define VMA_MUTEX VmaMutex -#endif - -// Read-write mutex, where "read" is shared access, "write" is exclusive access. -#ifndef VMA_RW_MUTEX - #if VMA_USE_STL_SHARED_MUTEX - // Use std::shared_mutex from C++17. - #include - class VmaRWMutex - { - public: - void LockRead() { m_Mutex.lock_shared(); } - void UnlockRead() { m_Mutex.unlock_shared(); } - bool TryLockRead() { return m_Mutex.try_lock_shared(); } - void LockWrite() { m_Mutex.lock(); } - void UnlockWrite() { m_Mutex.unlock(); } - bool TryLockWrite() { return m_Mutex.try_lock(); } - private: - std::shared_mutex m_Mutex; - }; - #define VMA_RW_MUTEX VmaRWMutex - #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 - // Use SRWLOCK from WinAPI. - // Minimum supported client = Windows Vista, server = Windows Server 2008. - class VmaRWMutex - { - public: - VmaRWMutex() { InitializeSRWLock(&m_Lock); } - void LockRead() { AcquireSRWLockShared(&m_Lock); } - void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } - bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } - void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } - void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } - bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } - private: - SRWLOCK m_Lock; - }; - #define VMA_RW_MUTEX VmaRWMutex - #else - // Less efficient fallback: Use normal mutex. - class VmaRWMutex - { - public: - void LockRead() { m_Mutex.Lock(); } - void UnlockRead() { m_Mutex.Unlock(); } - bool TryLockRead() { return m_Mutex.TryLock(); } - void LockWrite() { m_Mutex.Lock(); } - void UnlockWrite() { m_Mutex.Unlock(); } - bool TryLockWrite() { return m_Mutex.TryLock(); } - private: - VMA_MUTEX m_Mutex; - }; - #define VMA_RW_MUTEX VmaRWMutex - #endif // #if VMA_USE_STL_SHARED_MUTEX -#endif // #ifndef VMA_RW_MUTEX - -/* -If providing your own implementation, you need to implement a subset of std::atomic. -*/ -#ifndef VMA_ATOMIC_UINT32 - #include - #define VMA_ATOMIC_UINT32 std::atomic -#endif - -#ifndef VMA_ATOMIC_UINT64 - #include - #define VMA_ATOMIC_UINT64 std::atomic -#endif - -#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY - /** - Every allocation will have its own memory block. - Define to 1 for debugging purposes only. - */ - #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) -#endif - -#ifndef VMA_MIN_ALIGNMENT - /** - Minimum alignment of all allocations, in bytes. - Set to more than 1 for debugging purposes. Must be power of two. - */ - #ifdef VMA_DEBUG_ALIGNMENT // Old name - #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT - #else - #define VMA_MIN_ALIGNMENT (1) - #endif -#endif - -#ifndef VMA_DEBUG_MARGIN - /** - Minimum margin after every allocation, in bytes. - Set nonzero for debugging purposes only. - */ - #define VMA_DEBUG_MARGIN (0) -#endif - -#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS - /** - Define this macro to 1 to automatically fill new allocations and destroyed - allocations with some bit pattern. - */ - #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) -#endif - -#ifndef VMA_DEBUG_DETECT_CORRUPTION - /** - Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to - enable writing magic value to the margin after every allocation and - validating it, so that memory corruptions (out-of-bounds writes) are detected. - */ - #define VMA_DEBUG_DETECT_CORRUPTION (0) -#endif - -#ifndef VMA_DEBUG_GLOBAL_MUTEX - /** - Set this to 1 for debugging purposes only, to enable single mutex protecting all - entry calls to the library. Can be useful for debugging multithreading issues. - */ - #define VMA_DEBUG_GLOBAL_MUTEX (0) -#endif - -#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY - /** - Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. - Set to more than 1 for debugging purposes only. Must be power of two. - */ - #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) -#endif - -#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT - /* - Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount - and return error instead of leaving up to Vulkan implementation what to do in such cases. - */ - #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) -#endif - -#ifndef VMA_SMALL_HEAP_MAX_SIZE - /// Maximum size of a memory heap in Vulkan to consider it "small". - #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) -#endif - -#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE - /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. - #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) -#endif - -/* -Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called -or a persistently mapped allocation is created and destroyed several times in a row. -It keeps additional +1 mapping of a device memory block to prevent calling actual -vkMapMemory/vkUnmapMemory too many times, which may improve performance and help -tools like RenderDOc. -*/ -#ifndef VMA_MAPPING_HYSTERESIS_ENABLED - #define VMA_MAPPING_HYSTERESIS_ENABLED 1 -#endif - -#ifndef VMA_CLASS_NO_COPY - #define VMA_CLASS_NO_COPY(className) \ - private: \ - className(const className&) = delete; \ - className& operator=(const className&) = delete; -#endif - -#define VMA_VALIDATE(cond) do { if(!(cond)) { \ - VMA_ASSERT(0 && "Validation failed: " #cond); \ - return false; \ - } } while(false) - -/******************************************************************************* -END OF CONFIGURATION -*/ -#endif // _VMA_CONFIGURATION - - -static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; -static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; -// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. -static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; - -// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. -static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; -static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; -static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; -static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; -static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; -static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; -static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; -static const uint32_t VMA_VENDOR_ID_AMD = 4098; - -// This one is tricky. Vulkan specification defines this code as available since -// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. -// See pull request #207. -#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) - - -#if VMA_STATS_STRING_ENABLED -// Correspond to values of enum VmaSuballocationType. -static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = -{ - "FREE", - "UNKNOWN", - "BUFFER", - "IMAGE_UNKNOWN", - "IMAGE_LINEAR", - "IMAGE_OPTIMAL", -}; -#endif - -static VkAllocationCallbacks VmaEmptyAllocationCallbacks = - { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; - - -#ifndef _VMA_ENUM_DECLARATIONS - -enum VmaSuballocationType -{ - VMA_SUBALLOCATION_TYPE_FREE = 0, - VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, - VMA_SUBALLOCATION_TYPE_BUFFER = 2, - VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, - VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, - VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, - VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF -}; - -enum VMA_CACHE_OPERATION -{ - VMA_CACHE_FLUSH, - VMA_CACHE_INVALIDATE -}; - -enum class VmaAllocationRequestType -{ - Normal, - TLSF, - // Used by "Linear" algorithm. - UpperAddress, - EndOf1st, - EndOf2nd, -}; - -#endif // _VMA_ENUM_DECLARATIONS - -#ifndef _VMA_FORWARD_DECLARATIONS -// Opaque handle used by allocation algorithms to identify single allocation in any conforming way. -VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle); - -struct VmaMutexLock; -struct VmaMutexLockRead; -struct VmaMutexLockWrite; - -template -struct AtomicTransactionalIncrement; - -template -struct VmaStlAllocator; - -template -class VmaVector; - -template -class VmaSmallVector; - -template -class VmaPoolAllocator; - -template -struct VmaListItem; - -template -class VmaRawList; - -template -class VmaList; - -template -class VmaIntrusiveLinkedList; - -// Unused in this version -#if 0 -template -struct VmaPair; -template -struct VmaPairFirstLess; - -template -class VmaMap; -#endif - -#if VMA_STATS_STRING_ENABLED -class VmaStringBuilder; -class VmaJsonWriter; -#endif - -class VmaDeviceMemoryBlock; - -struct VmaDedicatedAllocationListItemTraits; -class VmaDedicatedAllocationList; - -struct VmaSuballocation; -struct VmaSuballocationOffsetLess; -struct VmaSuballocationOffsetGreater; -struct VmaSuballocationItemSizeLess; - -typedef VmaList> VmaSuballocationList; - -struct VmaAllocationRequest; - -class VmaBlockMetadata; -class VmaBlockMetadata_Linear; -class VmaBlockMetadata_TLSF; - -class VmaBlockVector; - -struct VmaPoolListItemTraits; - -struct VmaCurrentBudgetData; - -class VmaAllocationObjectAllocator; - -#endif // _VMA_FORWARD_DECLARATIONS - - -#ifndef _VMA_FUNCTIONS - -/* -Returns number of bits set to 1 in (v). - -On specific platforms and compilers you can use instrinsics like: - -Visual Studio: - return __popcnt(v); -GCC, Clang: - return static_cast(__builtin_popcount(v)); - -Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. -But you need to check in runtime whether user's CPU supports these, as some old processors don't. -*/ -static inline uint32_t VmaCountBitsSet(uint32_t v) -{ -#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 - return std::popcount(v); -#else - uint32_t c = v - ((v >> 1) & 0x55555555); - c = ((c >> 2) & 0x33333333) + (c & 0x33333333); - c = ((c >> 4) + c) & 0x0F0F0F0F; - c = ((c >> 8) + c) & 0x00FF00FF; - c = ((c >> 16) + c) & 0x0000FFFF; - return c; -#endif -} - -static inline uint8_t VmaBitScanLSB(uint64_t mask) -{ -#if defined(_MSC_VER) && defined(_WIN64) - unsigned long pos; - if (_BitScanForward64(&pos, mask)) - return static_cast(pos); - return UINT8_MAX; -#elif defined __GNUC__ || defined __clang__ - return static_cast(__builtin_ffsll(mask)) - 1U; -#else - uint8_t pos = 0; - uint64_t bit = 1; - do - { - if (mask & bit) - return pos; - bit <<= 1; - } while (pos++ < 63); - return UINT8_MAX; -#endif -} - -static inline uint8_t VmaBitScanLSB(uint32_t mask) -{ -#ifdef _MSC_VER - unsigned long pos; - if (_BitScanForward(&pos, mask)) - return static_cast(pos); - return UINT8_MAX; -#elif defined __GNUC__ || defined __clang__ - return static_cast(__builtin_ffs(mask)) - 1U; -#else - uint8_t pos = 0; - uint32_t bit = 1; - do - { - if (mask & bit) - return pos; - bit <<= 1; - } while (pos++ < 31); - return UINT8_MAX; -#endif -} - -static inline uint8_t VmaBitScanMSB(uint64_t mask) -{ -#if defined(_MSC_VER) && defined(_WIN64) - unsigned long pos; - if (_BitScanReverse64(&pos, mask)) - return static_cast(pos); -#elif defined __GNUC__ || defined __clang__ - if (mask) - return 63 - static_cast(__builtin_clzll(mask)); -#else - uint8_t pos = 63; - uint64_t bit = 1ULL << 63; - do - { - if (mask & bit) - return pos; - bit >>= 1; - } while (pos-- > 0); -#endif - return UINT8_MAX; -} - -static inline uint8_t VmaBitScanMSB(uint32_t mask) -{ -#ifdef _MSC_VER - unsigned long pos; - if (_BitScanReverse(&pos, mask)) - return static_cast(pos); -#elif defined __GNUC__ || defined __clang__ - if (mask) - return 31 - static_cast(__builtin_clz(mask)); -#else - uint8_t pos = 31; - uint32_t bit = 1UL << 31; - do - { - if (mask & bit) - return pos; - bit >>= 1; - } while (pos-- > 0); -#endif - return UINT8_MAX; -} - -/* -Returns true if given number is a power of two. -T must be unsigned integer number or signed integer but always nonnegative. -For 0 returns true. -*/ -template -inline bool VmaIsPow2(T x) -{ - return (x & (x - 1)) == 0; -} - -// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. -// Use types like uint32_t, uint64_t as T. -template -static inline T VmaAlignUp(T val, T alignment) -{ - VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); - return (val + alignment - 1) & ~(alignment - 1); -} - -// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8. -// Use types like uint32_t, uint64_t as T. -template -static inline T VmaAlignDown(T val, T alignment) -{ - VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); - return val & ~(alignment - 1); -} - -// Division with mathematical rounding to nearest number. -template -static inline T VmaRoundDiv(T x, T y) -{ - return (x + (y / (T)2)) / y; -} - -// Divide by 'y' and round up to nearest integer. -template -static inline T VmaDivideRoundingUp(T x, T y) -{ - return (x + y - (T)1) / y; -} - -// Returns smallest power of 2 greater or equal to v. -static inline uint32_t VmaNextPow2(uint32_t v) -{ - v--; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v++; - return v; -} - -static inline uint64_t VmaNextPow2(uint64_t v) -{ - v--; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v |= v >> 32; - v++; - return v; -} - -// Returns largest power of 2 less or equal to v. -static inline uint32_t VmaPrevPow2(uint32_t v) -{ - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v = v ^ (v >> 1); - return v; -} - -static inline uint64_t VmaPrevPow2(uint64_t v) -{ - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v |= v >> 32; - v = v ^ (v >> 1); - return v; -} - -static inline bool VmaStrIsEmpty(const char* pStr) -{ - return pStr == VMA_NULL || *pStr == '\0'; -} - -/* -Returns true if two memory blocks occupy overlapping pages. -ResourceA must be in less memory offset than ResourceB. - -Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" -chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". -*/ -static inline bool VmaBlocksOnSamePage( - VkDeviceSize resourceAOffset, - VkDeviceSize resourceASize, - VkDeviceSize resourceBOffset, - VkDeviceSize pageSize) -{ - VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); - VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; - VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); - VkDeviceSize resourceBStart = resourceBOffset; - VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); - return resourceAEndPage == resourceBStartPage; -} - -/* -Returns true if given suballocation types could conflict and must respect -VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer -or linear image and another one is optimal image. If type is unknown, behave -conservatively. -*/ -static inline bool VmaIsBufferImageGranularityConflict( - VmaSuballocationType suballocType1, - VmaSuballocationType suballocType2) -{ - if (suballocType1 > suballocType2) - { - VMA_SWAP(suballocType1, suballocType2); - } - - switch (suballocType1) - { - case VMA_SUBALLOCATION_TYPE_FREE: - return false; - case VMA_SUBALLOCATION_TYPE_UNKNOWN: - return true; - case VMA_SUBALLOCATION_TYPE_BUFFER: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; - case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: - return false; - default: - VMA_ASSERT(0); - return true; - } -} - -static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) -{ -#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION - uint32_t* pDst = (uint32_t*)((char*)pData + offset); - const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); - for (size_t i = 0; i < numberCount; ++i, ++pDst) - { - *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; - } -#else - // no-op -#endif -} - -static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) -{ -#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION - const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); - const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); - for (size_t i = 0; i < numberCount; ++i, ++pSrc) - { - if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) - { - return false; - } - } -#endif - return true; -} - -/* -Fills structure with parameters of an example buffer to be used for transfers -during GPU memory defragmentation. -*/ -static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) -{ - memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); - outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; - outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. -} - - -/* -Performs binary search and returns iterator to first element that is greater or -equal to (key), according to comparison (cmp). - -Cmp should return true if first argument is less than second argument. - -Returned value is the found element, if present in the collection or place where -new element with value (key) should be inserted. -*/ -template -static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) -{ - size_t down = 0, up = (end - beg); - while (down < up) - { - const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation - if (cmp(*(beg + mid), key)) - { - down = mid + 1; - } - else - { - up = mid; - } - } - return beg + down; -} - -template -IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) -{ - IterT it = VmaBinaryFindFirstNotLess( - beg, end, value, cmp); - if (it == end || - (!cmp(*it, value) && !cmp(value, *it))) - { - return it; - } - return end; -} - -/* -Returns true if all pointers in the array are not-null and unique. -Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. -T must be pointer type, e.g. VmaAllocation, VmaPool. -*/ -template -static bool VmaValidatePointerArray(uint32_t count, const T* arr) -{ - for (uint32_t i = 0; i < count; ++i) - { - const T iPtr = arr[i]; - if (iPtr == VMA_NULL) - { - return false; - } - for (uint32_t j = i + 1; j < count; ++j) - { - if (iPtr == arr[j]) - { - return false; - } - } - } - return true; -} - -template -static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) -{ - newStruct->pNext = mainStruct->pNext; - mainStruct->pNext = newStruct; -} - -// This is the main algorithm that guides the selection of a memory type best for an allocation - -// converts usage to required/preferred/not preferred flags. -static bool FindMemoryPreferences( - bool isIntegratedGPU, - const VmaAllocationCreateInfo& allocCreateInfo, - VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. - VkMemoryPropertyFlags& outRequiredFlags, - VkMemoryPropertyFlags& outPreferredFlags, - VkMemoryPropertyFlags& outNotPreferredFlags) -{ - outRequiredFlags = allocCreateInfo.requiredFlags; - outPreferredFlags = allocCreateInfo.preferredFlags; - outNotPreferredFlags = 0; - - switch(allocCreateInfo.usage) - { - case VMA_MEMORY_USAGE_UNKNOWN: - break; - case VMA_MEMORY_USAGE_GPU_ONLY: - if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) - { - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - case VMA_MEMORY_USAGE_CPU_ONLY: - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - break; - case VMA_MEMORY_USAGE_CPU_TO_GPU: - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) - { - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - case VMA_MEMORY_USAGE_GPU_TO_CPU: - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - break; - case VMA_MEMORY_USAGE_CPU_COPY: - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - break; - case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: - outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; - break; - case VMA_MEMORY_USAGE_AUTO: - case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: - case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: - { - if(bufImgUsage == UINT32_MAX) - { - VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known."); - return false; - } - // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*. - const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; - const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; - const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; - const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; - const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; - const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; - - // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. - if(hostAccessRandom) - { - if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) - { - // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. - // Omitting HOST_VISIBLE here is intentional. - // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. - // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - } - else - { - // Always CPU memory, cached. - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - } - } - // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. - else if(hostAccessSequentialWrite) - { - // Want uncached and write-combined. - outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - - if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) - { - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - } - else - { - outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) - if(deviceAccess) - { - // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. - if(preferHost) - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - else - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) - else - { - // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. - if(preferDevice) - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - else - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - } - } - // No CPU access - else - { - // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory - if(deviceAccess) - { - // ...unless there is a clear preference from the user not to do so. - if(preferHost) - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - else - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - // No direct GPU access, no CPU access, just transfers. - // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or - // a "swap file" copy to free some GPU memory (then better CPU memory). - // Up to the user to decide. If no preferece, assume the former and choose GPU memory. - if(preferHost) - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - else - outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - } - break; - } - default: - VMA_ASSERT(0); - } - - // Avoid DEVICE_COHERENT unless explicitly requested. - if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & - (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) - { - outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; - } - - return true; -} - -//////////////////////////////////////////////////////////////////////////////// -// Memory allocation - -static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) -{ - void* result = VMA_NULL; - if ((pAllocationCallbacks != VMA_NULL) && - (pAllocationCallbacks->pfnAllocation != VMA_NULL)) - { - result = (*pAllocationCallbacks->pfnAllocation)( - pAllocationCallbacks->pUserData, - size, - alignment, - VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); - } - else - { - result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); - } - VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); - return result; -} - -static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) -{ - if ((pAllocationCallbacks != VMA_NULL) && - (pAllocationCallbacks->pfnFree != VMA_NULL)) - { - (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); - } - else - { - VMA_SYSTEM_ALIGNED_FREE(ptr); - } -} - -template -static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) -{ - return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); -} - -template -static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) -{ - return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); -} - -#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) - -#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) - -template -static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) -{ - ptr->~T(); - VmaFree(pAllocationCallbacks, ptr); -} - -template -static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) -{ - if (ptr != VMA_NULL) - { - for (size_t i = count; i--; ) - { - ptr[i].~T(); - } - VmaFree(pAllocationCallbacks, ptr); - } -} - -static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) -{ - if (srcStr != VMA_NULL) - { - const size_t len = strlen(srcStr); - char* const result = vma_new_array(allocs, char, len + 1); - memcpy(result, srcStr, len + 1); - return result; - } - return VMA_NULL; -} - -#if VMA_STATS_STRING_ENABLED -static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) -{ - if (srcStr != VMA_NULL) - { - char* const result = vma_new_array(allocs, char, strLen + 1); - memcpy(result, srcStr, strLen); - result[strLen] = '\0'; - return result; - } - return VMA_NULL; -} -#endif // VMA_STATS_STRING_ENABLED - -static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) -{ - if (str != VMA_NULL) - { - const size_t len = strlen(str); - vma_delete_array(allocs, str, len + 1); - } -} - -template -size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) -{ - const size_t indexToInsert = VmaBinaryFindFirstNotLess( - vector.data(), - vector.data() + vector.size(), - value, - CmpLess()) - vector.data(); - VmaVectorInsert(vector, indexToInsert, value); - return indexToInsert; -} - -template -bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) -{ - CmpLess comparator; - typename VectorT::iterator it = VmaBinaryFindFirstNotLess( - vector.begin(), - vector.end(), - value, - comparator); - if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) - { - size_t indexToRemove = it - vector.begin(); - VmaVectorRemove(vector, indexToRemove); - return true; - } - return false; -} -#endif // _VMA_FUNCTIONS - -#ifndef _VMA_STATISTICS_FUNCTIONS - -static void VmaClearStatistics(VmaStatistics& outStats) -{ - outStats.blockCount = 0; - outStats.allocationCount = 0; - outStats.blockBytes = 0; - outStats.allocationBytes = 0; -} - -static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) -{ - inoutStats.blockCount += src.blockCount; - inoutStats.allocationCount += src.allocationCount; - inoutStats.blockBytes += src.blockBytes; - inoutStats.allocationBytes += src.allocationBytes; -} - -static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) -{ - VmaClearStatistics(outStats.statistics); - outStats.unusedRangeCount = 0; - outStats.allocationSizeMin = VK_WHOLE_SIZE; - outStats.allocationSizeMax = 0; - outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; - outStats.unusedRangeSizeMax = 0; -} - -static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) -{ - inoutStats.statistics.allocationCount++; - inoutStats.statistics.allocationBytes += size; - inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); - inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); -} - -static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) -{ - inoutStats.unusedRangeCount++; - inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); - inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); -} - -static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) -{ - VmaAddStatistics(inoutStats.statistics, src.statistics); - inoutStats.unusedRangeCount += src.unusedRangeCount; - inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); - inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); - inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); - inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); -} - -#endif // _VMA_STATISTICS_FUNCTIONS - -#ifndef _VMA_MUTEX_LOCK -// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). -struct VmaMutexLock -{ - VMA_CLASS_NO_COPY(VmaMutexLock) -public: - VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : - m_pMutex(useMutex ? &mutex : VMA_NULL) - { - if (m_pMutex) { m_pMutex->Lock(); } - } - ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } - -private: - VMA_MUTEX* m_pMutex; -}; - -// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. -struct VmaMutexLockRead -{ - VMA_CLASS_NO_COPY(VmaMutexLockRead) -public: - VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : - m_pMutex(useMutex ? &mutex : VMA_NULL) - { - if (m_pMutex) { m_pMutex->LockRead(); } - } - ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } - -private: - VMA_RW_MUTEX* m_pMutex; -}; - -// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. -struct VmaMutexLockWrite -{ - VMA_CLASS_NO_COPY(VmaMutexLockWrite) -public: - VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) - : m_pMutex(useMutex ? &mutex : VMA_NULL) - { - if (m_pMutex) { m_pMutex->LockWrite(); } - } - ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } - -private: - VMA_RW_MUTEX* m_pMutex; -}; - -#if VMA_DEBUG_GLOBAL_MUTEX - static VMA_MUTEX gDebugGlobalMutex; - #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); -#else - #define VMA_DEBUG_GLOBAL_MUTEX_LOCK -#endif -#endif // _VMA_MUTEX_LOCK - -#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT -// An object that increments given atomic but decrements it back in the destructor unless Commit() is called. -template -struct AtomicTransactionalIncrement -{ -public: - typedef std::atomic AtomicT; - - ~AtomicTransactionalIncrement() - { - if(m_Atomic) - --(*m_Atomic); - } - - void Commit() { m_Atomic = nullptr; } - T Increment(AtomicT* atomic) - { - m_Atomic = atomic; - return m_Atomic->fetch_add(1); - } - -private: - AtomicT* m_Atomic = nullptr; -}; -#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT - -#ifndef _VMA_STL_ALLOCATOR -// STL-compatible allocator. -template -struct VmaStlAllocator -{ - const VkAllocationCallbacks* const m_pCallbacks; - typedef T value_type; - - VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} - template - VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} - VmaStlAllocator(const VmaStlAllocator&) = default; - VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; - - T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } - void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } - - template - bool operator==(const VmaStlAllocator& rhs) const - { - return m_pCallbacks == rhs.m_pCallbacks; - } - template - bool operator!=(const VmaStlAllocator& rhs) const - { - return m_pCallbacks != rhs.m_pCallbacks; - } -}; -#endif // _VMA_STL_ALLOCATOR - -#ifndef _VMA_VECTOR -/* Class with interface compatible with subset of std::vector. -T must be POD because constructors and destructors are not called and memcpy is -used for these objects. */ -template -class VmaVector -{ -public: - typedef T value_type; - typedef T* iterator; - typedef const T* const_iterator; - - VmaVector(const AllocatorT& allocator); - VmaVector(size_t count, const AllocatorT& allocator); - // This version of the constructor is here for compatibility with pre-C++14 std::vector. - // value is unused. - VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} - VmaVector(const VmaVector& src); - VmaVector& operator=(const VmaVector& rhs); - ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } - - bool empty() const { return m_Count == 0; } - size_t size() const { return m_Count; } - T* data() { return m_pArray; } - T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } - T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } - const T* data() const { return m_pArray; } - const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } - const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } - - iterator begin() { return m_pArray; } - iterator end() { return m_pArray + m_Count; } - const_iterator cbegin() const { return m_pArray; } - const_iterator cend() const { return m_pArray + m_Count; } - const_iterator begin() const { return cbegin(); } - const_iterator end() const { return cend(); } - - void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } - void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } - void push_front(const T& src) { insert(0, src); } - - void push_back(const T& src); - void reserve(size_t newCapacity, bool freeMemory = false); - void resize(size_t newCount); - void clear() { resize(0); } - void shrink_to_fit(); - void insert(size_t index, const T& src); - void remove(size_t index); - - T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } - const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } - -private: - AllocatorT m_Allocator; - T* m_pArray; - size_t m_Count; - size_t m_Capacity; -}; - -#ifndef _VMA_VECTOR_FUNCTIONS -template -VmaVector::VmaVector(const AllocatorT& allocator) - : m_Allocator(allocator), - m_pArray(VMA_NULL), - m_Count(0), - m_Capacity(0) {} - -template -VmaVector::VmaVector(size_t count, const AllocatorT& allocator) - : m_Allocator(allocator), - m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), - m_Count(count), - m_Capacity(count) {} - -template -VmaVector::VmaVector(const VmaVector& src) - : m_Allocator(src.m_Allocator), - m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), - m_Count(src.m_Count), - m_Capacity(src.m_Count) -{ - if (m_Count != 0) - { - memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); - } -} - -template -VmaVector& VmaVector::operator=(const VmaVector& rhs) -{ - if (&rhs != this) - { - resize(rhs.m_Count); - if (m_Count != 0) - { - memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); - } - } - return *this; -} - -template -void VmaVector::push_back(const T& src) -{ - const size_t newIndex = size(); - resize(newIndex + 1); - m_pArray[newIndex] = src; -} - -template -void VmaVector::reserve(size_t newCapacity, bool freeMemory) -{ - newCapacity = VMA_MAX(newCapacity, m_Count); - - if ((newCapacity < m_Capacity) && !freeMemory) - { - newCapacity = m_Capacity; - } - - if (newCapacity != m_Capacity) - { - T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; - if (m_Count != 0) - { - memcpy(newArray, m_pArray, m_Count * sizeof(T)); - } - VmaFree(m_Allocator.m_pCallbacks, m_pArray); - m_Capacity = newCapacity; - m_pArray = newArray; - } -} - -template -void VmaVector::resize(size_t newCount) -{ - size_t newCapacity = m_Capacity; - if (newCount > m_Capacity) - { - newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); - } - - if (newCapacity != m_Capacity) - { - T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; - const size_t elementsToCopy = VMA_MIN(m_Count, newCount); - if (elementsToCopy != 0) - { - memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); - } - VmaFree(m_Allocator.m_pCallbacks, m_pArray); - m_Capacity = newCapacity; - m_pArray = newArray; - } - - m_Count = newCount; -} - -template -void VmaVector::shrink_to_fit() -{ - if (m_Capacity > m_Count) - { - T* newArray = VMA_NULL; - if (m_Count > 0) - { - newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); - memcpy(newArray, m_pArray, m_Count * sizeof(T)); - } - VmaFree(m_Allocator.m_pCallbacks, m_pArray); - m_Capacity = m_Count; - m_pArray = newArray; - } -} - -template -void VmaVector::insert(size_t index, const T& src) -{ - VMA_HEAVY_ASSERT(index <= m_Count); - const size_t oldCount = size(); - resize(oldCount + 1); - if (index < oldCount) - { - memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); - } - m_pArray[index] = src; -} - -template -void VmaVector::remove(size_t index) -{ - VMA_HEAVY_ASSERT(index < m_Count); - const size_t oldCount = size(); - if (index < oldCount - 1) - { - memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); - } - resize(oldCount - 1); -} -#endif // _VMA_VECTOR_FUNCTIONS - -template -static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) -{ - vec.insert(index, item); -} - -template -static void VmaVectorRemove(VmaVector& vec, size_t index) -{ - vec.remove(index); -} -#endif // _VMA_VECTOR - -#ifndef _VMA_SMALL_VECTOR -/* -This is a vector (a variable-sized array), optimized for the case when the array is small. - -It contains some number of elements in-place, which allows it to avoid heap allocation -when the actual number of elements is below that threshold. This allows normal "small" -cases to be fast without losing generality for large inputs. -*/ -template -class VmaSmallVector -{ -public: - typedef T value_type; - typedef T* iterator; - - VmaSmallVector(const AllocatorT& allocator); - VmaSmallVector(size_t count, const AllocatorT& allocator); - template - VmaSmallVector(const VmaSmallVector&) = delete; - template - VmaSmallVector& operator=(const VmaSmallVector&) = delete; - ~VmaSmallVector() = default; - - bool empty() const { return m_Count == 0; } - size_t size() const { return m_Count; } - T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } - T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } - T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } - const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } - const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } - const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } - - iterator begin() { return data(); } - iterator end() { return data() + m_Count; } - - void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } - void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } - void push_front(const T& src) { insert(0, src); } - - void push_back(const T& src); - void resize(size_t newCount, bool freeMemory = false); - void clear(bool freeMemory = false); - void insert(size_t index, const T& src); - void remove(size_t index); - - T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } - const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } - -private: - size_t m_Count; - T m_StaticArray[N]; // Used when m_Size <= N - VmaVector m_DynamicArray; // Used when m_Size > N -}; - -#ifndef _VMA_SMALL_VECTOR_FUNCTIONS -template -VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) - : m_Count(0), - m_DynamicArray(allocator) {} - -template -VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) - : m_Count(count), - m_DynamicArray(count > N ? count : 0, allocator) {} - -template -void VmaSmallVector::push_back(const T& src) -{ - const size_t newIndex = size(); - resize(newIndex + 1); - data()[newIndex] = src; -} - -template -void VmaSmallVector::resize(size_t newCount, bool freeMemory) -{ - if (newCount > N && m_Count > N) - { - // Any direction, staying in m_DynamicArray - m_DynamicArray.resize(newCount); - if (freeMemory) - { - m_DynamicArray.shrink_to_fit(); - } - } - else if (newCount > N && m_Count <= N) - { - // Growing, moving from m_StaticArray to m_DynamicArray - m_DynamicArray.resize(newCount); - if (m_Count > 0) - { - memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); - } - } - else if (newCount <= N && m_Count > N) - { - // Shrinking, moving from m_DynamicArray to m_StaticArray - if (newCount > 0) - { - memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); - } - m_DynamicArray.resize(0); - if (freeMemory) - { - m_DynamicArray.shrink_to_fit(); - } - } - else - { - // Any direction, staying in m_StaticArray - nothing to do here - } - m_Count = newCount; -} - -template -void VmaSmallVector::clear(bool freeMemory) -{ - m_DynamicArray.clear(); - if (freeMemory) - { - m_DynamicArray.shrink_to_fit(); - } - m_Count = 0; -} - -template -void VmaSmallVector::insert(size_t index, const T& src) -{ - VMA_HEAVY_ASSERT(index <= m_Count); - const size_t oldCount = size(); - resize(oldCount + 1); - T* const dataPtr = data(); - if (index < oldCount) - { - // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. - memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); - } - dataPtr[index] = src; -} - -template -void VmaSmallVector::remove(size_t index) -{ - VMA_HEAVY_ASSERT(index < m_Count); - const size_t oldCount = size(); - if (index < oldCount - 1) - { - // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. - T* const dataPtr = data(); - memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); - } - resize(oldCount - 1); -} -#endif // _VMA_SMALL_VECTOR_FUNCTIONS -#endif // _VMA_SMALL_VECTOR - -#ifndef _VMA_POOL_ALLOCATOR -/* -Allocator for objects of type T using a list of arrays (pools) to speed up -allocation. Number of elements that can be allocated is not bounded because -allocator can create multiple blocks. -*/ -template -class VmaPoolAllocator -{ - VMA_CLASS_NO_COPY(VmaPoolAllocator) -public: - VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); - ~VmaPoolAllocator(); - template T* Alloc(Types&&... args); - void Free(T* ptr); - -private: - union Item - { - uint32_t NextFreeIndex; - alignas(T) char Value[sizeof(T)]; - }; - struct ItemBlock - { - Item* pItems; - uint32_t Capacity; - uint32_t FirstFreeIndex; - }; - - const VkAllocationCallbacks* m_pAllocationCallbacks; - const uint32_t m_FirstBlockCapacity; - VmaVector> m_ItemBlocks; - - ItemBlock& CreateNewBlock(); -}; - -#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS -template -VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) - : m_pAllocationCallbacks(pAllocationCallbacks), - m_FirstBlockCapacity(firstBlockCapacity), - m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) -{ - VMA_ASSERT(m_FirstBlockCapacity > 1); -} - -template -VmaPoolAllocator::~VmaPoolAllocator() -{ - for (size_t i = m_ItemBlocks.size(); i--;) - vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); - m_ItemBlocks.clear(); -} - -template -template T* VmaPoolAllocator::Alloc(Types&&... args) -{ - for (size_t i = m_ItemBlocks.size(); i--; ) - { - ItemBlock& block = m_ItemBlocks[i]; - // This block has some free items: Use first one. - if (block.FirstFreeIndex != UINT32_MAX) - { - Item* const pItem = &block.pItems[block.FirstFreeIndex]; - block.FirstFreeIndex = pItem->NextFreeIndex; - T* result = (T*)&pItem->Value; - new(result)T(std::forward(args)...); // Explicit constructor call. - return result; - } - } - - // No block has free item: Create new one and use it. - ItemBlock& newBlock = CreateNewBlock(); - Item* const pItem = &newBlock.pItems[0]; - newBlock.FirstFreeIndex = pItem->NextFreeIndex; - T* result = (T*)&pItem->Value; - new(result) T(std::forward(args)...); // Explicit constructor call. - return result; -} - -template -void VmaPoolAllocator::Free(T* ptr) -{ - // Search all memory blocks to find ptr. - for (size_t i = m_ItemBlocks.size(); i--; ) - { - ItemBlock& block = m_ItemBlocks[i]; - - // Casting to union. - Item* pItemPtr; - memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); - - // Check if pItemPtr is in address range of this block. - if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) - { - ptr->~T(); // Explicit destructor call. - const uint32_t index = static_cast(pItemPtr - block.pItems); - pItemPtr->NextFreeIndex = block.FirstFreeIndex; - block.FirstFreeIndex = index; - return; - } - } - VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); -} - -template -typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() -{ - const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? - m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; - - const ItemBlock newBlock = - { - vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), - newBlockCapacity, - 0 - }; - - m_ItemBlocks.push_back(newBlock); - - // Setup singly-linked list of all free items in this block. - for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) - newBlock.pItems[i].NextFreeIndex = i + 1; - newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; - return m_ItemBlocks.back(); -} -#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS -#endif // _VMA_POOL_ALLOCATOR - -#ifndef _VMA_RAW_LIST -template -struct VmaListItem -{ - VmaListItem* pPrev; - VmaListItem* pNext; - T Value; -}; - -// Doubly linked list. -template -class VmaRawList -{ - VMA_CLASS_NO_COPY(VmaRawList) -public: - typedef VmaListItem ItemType; - - VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); - // Intentionally not calling Clear, because that would be unnecessary - // computations to return all items to m_ItemAllocator as free. - ~VmaRawList() = default; - - size_t GetCount() const { return m_Count; } - bool IsEmpty() const { return m_Count == 0; } - - ItemType* Front() { return m_pFront; } - ItemType* Back() { return m_pBack; } - const ItemType* Front() const { return m_pFront; } - const ItemType* Back() const { return m_pBack; } - - ItemType* PushFront(); - ItemType* PushBack(); - ItemType* PushFront(const T& value); - ItemType* PushBack(const T& value); - void PopFront(); - void PopBack(); - - // Item can be null - it means PushBack. - ItemType* InsertBefore(ItemType* pItem); - // Item can be null - it means PushFront. - ItemType* InsertAfter(ItemType* pItem); - ItemType* InsertBefore(ItemType* pItem, const T& value); - ItemType* InsertAfter(ItemType* pItem, const T& value); - - void Clear(); - void Remove(ItemType* pItem); - -private: - const VkAllocationCallbacks* const m_pAllocationCallbacks; - VmaPoolAllocator m_ItemAllocator; - ItemType* m_pFront; - ItemType* m_pBack; - size_t m_Count; -}; - -#ifndef _VMA_RAW_LIST_FUNCTIONS -template -VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) - : m_pAllocationCallbacks(pAllocationCallbacks), - m_ItemAllocator(pAllocationCallbacks, 128), - m_pFront(VMA_NULL), - m_pBack(VMA_NULL), - m_Count(0) {} - -template -VmaListItem* VmaRawList::PushFront() -{ - ItemType* const pNewItem = m_ItemAllocator.Alloc(); - pNewItem->pPrev = VMA_NULL; - if (IsEmpty()) - { - pNewItem->pNext = VMA_NULL; - m_pFront = pNewItem; - m_pBack = pNewItem; - m_Count = 1; - } - else - { - pNewItem->pNext = m_pFront; - m_pFront->pPrev = pNewItem; - m_pFront = pNewItem; - ++m_Count; - } - return pNewItem; -} - -template -VmaListItem* VmaRawList::PushBack() -{ - ItemType* const pNewItem = m_ItemAllocator.Alloc(); - pNewItem->pNext = VMA_NULL; - if(IsEmpty()) - { - pNewItem->pPrev = VMA_NULL; - m_pFront = pNewItem; - m_pBack = pNewItem; - m_Count = 1; - } - else - { - pNewItem->pPrev = m_pBack; - m_pBack->pNext = pNewItem; - m_pBack = pNewItem; - ++m_Count; - } - return pNewItem; -} - -template -VmaListItem* VmaRawList::PushFront(const T& value) -{ - ItemType* const pNewItem = PushFront(); - pNewItem->Value = value; - return pNewItem; -} - -template -VmaListItem* VmaRawList::PushBack(const T& value) -{ - ItemType* const pNewItem = PushBack(); - pNewItem->Value = value; - return pNewItem; -} - -template -void VmaRawList::PopFront() -{ - VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const pFrontItem = m_pFront; - ItemType* const pNextItem = pFrontItem->pNext; - if (pNextItem != VMA_NULL) - { - pNextItem->pPrev = VMA_NULL; - } - m_pFront = pNextItem; - m_ItemAllocator.Free(pFrontItem); - --m_Count; -} - -template -void VmaRawList::PopBack() -{ - VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const pBackItem = m_pBack; - ItemType* const pPrevItem = pBackItem->pPrev; - if(pPrevItem != VMA_NULL) - { - pPrevItem->pNext = VMA_NULL; - } - m_pBack = pPrevItem; - m_ItemAllocator.Free(pBackItem); - --m_Count; -} - -template -void VmaRawList::Clear() -{ - if (IsEmpty() == false) - { - ItemType* pItem = m_pBack; - while (pItem != VMA_NULL) - { - ItemType* const pPrevItem = pItem->pPrev; - m_ItemAllocator.Free(pItem); - pItem = pPrevItem; - } - m_pFront = VMA_NULL; - m_pBack = VMA_NULL; - m_Count = 0; - } -} - -template -void VmaRawList::Remove(ItemType* pItem) -{ - VMA_HEAVY_ASSERT(pItem != VMA_NULL); - VMA_HEAVY_ASSERT(m_Count > 0); - - if(pItem->pPrev != VMA_NULL) - { - pItem->pPrev->pNext = pItem->pNext; - } - else - { - VMA_HEAVY_ASSERT(m_pFront == pItem); - m_pFront = pItem->pNext; - } - - if(pItem->pNext != VMA_NULL) - { - pItem->pNext->pPrev = pItem->pPrev; - } - else - { - VMA_HEAVY_ASSERT(m_pBack == pItem); - m_pBack = pItem->pPrev; - } - - m_ItemAllocator.Free(pItem); - --m_Count; -} - -template -VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) -{ - if(pItem != VMA_NULL) - { - ItemType* const prevItem = pItem->pPrev; - ItemType* const newItem = m_ItemAllocator.Alloc(); - newItem->pPrev = prevItem; - newItem->pNext = pItem; - pItem->pPrev = newItem; - if(prevItem != VMA_NULL) - { - prevItem->pNext = newItem; - } - else - { - VMA_HEAVY_ASSERT(m_pFront == pItem); - m_pFront = newItem; - } - ++m_Count; - return newItem; - } - else - return PushBack(); -} - -template -VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) -{ - if(pItem != VMA_NULL) - { - ItemType* const nextItem = pItem->pNext; - ItemType* const newItem = m_ItemAllocator.Alloc(); - newItem->pNext = nextItem; - newItem->pPrev = pItem; - pItem->pNext = newItem; - if(nextItem != VMA_NULL) - { - nextItem->pPrev = newItem; - } - else - { - VMA_HEAVY_ASSERT(m_pBack == pItem); - m_pBack = newItem; - } - ++m_Count; - return newItem; - } - else - return PushFront(); -} - -template -VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) -{ - ItemType* const newItem = InsertBefore(pItem); - newItem->Value = value; - return newItem; -} - -template -VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) -{ - ItemType* const newItem = InsertAfter(pItem); - newItem->Value = value; - return newItem; -} -#endif // _VMA_RAW_LIST_FUNCTIONS -#endif // _VMA_RAW_LIST - -#ifndef _VMA_LIST -template -class VmaList -{ - VMA_CLASS_NO_COPY(VmaList) -public: - class reverse_iterator; - class const_iterator; - class const_reverse_iterator; - - class iterator - { - friend class const_iterator; - friend class VmaList; - public: - iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - - T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - - bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - - iterator operator++(int) { iterator result = *this; ++*this; return result; } - iterator operator--(int) { iterator result = *this; --*this; return result; } - - iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } - iterator& operator--(); - - private: - VmaRawList* m_pList; - VmaListItem* m_pItem; - - iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} - }; - class reverse_iterator - { - friend class const_reverse_iterator; - friend class VmaList; - public: - reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - - T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - - bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - - reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } - reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } - - reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } - reverse_iterator& operator--(); - - private: - VmaRawList* m_pList; - VmaListItem* m_pItem; - - reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} - }; - class const_iterator - { - friend class VmaList; - public: - const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - - iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } - - const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - - bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - - const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } - const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } - - const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } - const_iterator& operator--(); - - private: - const VmaRawList* m_pList; - const VmaListItem* m_pItem; - - const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} - }; - class const_reverse_iterator - { - friend class VmaList; - public: - const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - - reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } - - const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } - - bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } - - const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } - const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } - - const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } - const_reverse_iterator& operator--(); - - private: - const VmaRawList* m_pList; - const VmaListItem* m_pItem; - - const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} - }; - - VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} - - bool empty() const { return m_RawList.IsEmpty(); } - size_t size() const { return m_RawList.GetCount(); } - - iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } - iterator end() { return iterator(&m_RawList, VMA_NULL); } - - const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } - const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } - - const_iterator begin() const { return cbegin(); } - const_iterator end() const { return cend(); } - - reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } - reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } - - const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } - const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } - - const_reverse_iterator rbegin() const { return crbegin(); } - const_reverse_iterator rend() const { return crend(); } - - void push_back(const T& value) { m_RawList.PushBack(value); } - iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } - - void clear() { m_RawList.Clear(); } - void erase(iterator it) { m_RawList.Remove(it.m_pItem); } - -private: - VmaRawList m_RawList; -}; - -#ifndef _VMA_LIST_FUNCTIONS -template -typename VmaList::iterator& VmaList::iterator::operator--() -{ - if (m_pItem != VMA_NULL) - { - m_pItem = m_pItem->pPrev; - } - else - { - VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); - m_pItem = m_pList->Back(); - } - return *this; -} - -template -typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() -{ - if (m_pItem != VMA_NULL) - { - m_pItem = m_pItem->pNext; - } - else - { - VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); - m_pItem = m_pList->Front(); - } - return *this; -} - -template -typename VmaList::const_iterator& VmaList::const_iterator::operator--() -{ - if (m_pItem != VMA_NULL) - { - m_pItem = m_pItem->pPrev; - } - else - { - VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); - m_pItem = m_pList->Back(); - } - return *this; -} - -template -typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() -{ - if (m_pItem != VMA_NULL) - { - m_pItem = m_pItem->pNext; - } - else - { - VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); - m_pItem = m_pList->Back(); - } - return *this; -} -#endif // _VMA_LIST_FUNCTIONS -#endif // _VMA_LIST - -#ifndef _VMA_INTRUSIVE_LINKED_LIST -/* -Expected interface of ItemTypeTraits: -struct MyItemTypeTraits -{ - typedef MyItem ItemType; - static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; } - static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; } - static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; } - static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } -}; -*/ -template -class VmaIntrusiveLinkedList -{ -public: - typedef typename ItemTypeTraits::ItemType ItemType; - static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } - static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } - - // Movable, not copyable. - VmaIntrusiveLinkedList() = default; - VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src); - VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; - VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); - VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; - ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } - - size_t GetCount() const { return m_Count; } - bool IsEmpty() const { return m_Count == 0; } - ItemType* Front() { return m_Front; } - ItemType* Back() { return m_Back; } - const ItemType* Front() const { return m_Front; } - const ItemType* Back() const { return m_Back; } - - void PushBack(ItemType* item); - void PushFront(ItemType* item); - ItemType* PopBack(); - ItemType* PopFront(); - - // MyItem can be null - it means PushBack. - void InsertBefore(ItemType* existingItem, ItemType* newItem); - // MyItem can be null - it means PushFront. - void InsertAfter(ItemType* existingItem, ItemType* newItem); - void Remove(ItemType* item); - void RemoveAll(); - -private: - ItemType* m_Front = VMA_NULL; - ItemType* m_Back = VMA_NULL; - size_t m_Count = 0; -}; - -#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS -template -VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) - : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) -{ - src.m_Front = src.m_Back = VMA_NULL; - src.m_Count = 0; -} - -template -VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) -{ - if (&src != this) - { - VMA_HEAVY_ASSERT(IsEmpty()); - m_Front = src.m_Front; - m_Back = src.m_Back; - m_Count = src.m_Count; - src.m_Front = src.m_Back = VMA_NULL; - src.m_Count = 0; - } - return *this; -} - -template -void VmaIntrusiveLinkedList::PushBack(ItemType* item) -{ - VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); - if (IsEmpty()) - { - m_Front = item; - m_Back = item; - m_Count = 1; - } - else - { - ItemTypeTraits::AccessPrev(item) = m_Back; - ItemTypeTraits::AccessNext(m_Back) = item; - m_Back = item; - ++m_Count; - } -} - -template -void VmaIntrusiveLinkedList::PushFront(ItemType* item) -{ - VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); - if (IsEmpty()) - { - m_Front = item; - m_Back = item; - m_Count = 1; - } - else - { - ItemTypeTraits::AccessNext(item) = m_Front; - ItemTypeTraits::AccessPrev(m_Front) = item; - m_Front = item; - ++m_Count; - } -} - -template -typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() -{ - VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const backItem = m_Back; - ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); - if (prevItem != VMA_NULL) - { - ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; - } - m_Back = prevItem; - --m_Count; - ItemTypeTraits::AccessPrev(backItem) = VMA_NULL; - ItemTypeTraits::AccessNext(backItem) = VMA_NULL; - return backItem; -} - -template -typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() -{ - VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const frontItem = m_Front; - ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); - if (nextItem != VMA_NULL) - { - ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; - } - m_Front = nextItem; - --m_Count; - ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL; - ItemTypeTraits::AccessNext(frontItem) = VMA_NULL; - return frontItem; -} - -template -void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) -{ - VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); - if (existingItem != VMA_NULL) - { - ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); - ItemTypeTraits::AccessPrev(newItem) = prevItem; - ItemTypeTraits::AccessNext(newItem) = existingItem; - ItemTypeTraits::AccessPrev(existingItem) = newItem; - if (prevItem != VMA_NULL) - { - ItemTypeTraits::AccessNext(prevItem) = newItem; - } - else - { - VMA_HEAVY_ASSERT(m_Front == existingItem); - m_Front = newItem; - } - ++m_Count; - } - else - PushBack(newItem); -} - -template -void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) -{ - VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); - if (existingItem != VMA_NULL) - { - ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); - ItemTypeTraits::AccessNext(newItem) = nextItem; - ItemTypeTraits::AccessPrev(newItem) = existingItem; - ItemTypeTraits::AccessNext(existingItem) = newItem; - if (nextItem != VMA_NULL) - { - ItemTypeTraits::AccessPrev(nextItem) = newItem; - } - else - { - VMA_HEAVY_ASSERT(m_Back == existingItem); - m_Back = newItem; - } - ++m_Count; - } - else - return PushFront(newItem); -} - -template -void VmaIntrusiveLinkedList::Remove(ItemType* item) -{ - VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); - if (ItemTypeTraits::GetPrev(item) != VMA_NULL) - { - ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item); - } - else - { - VMA_HEAVY_ASSERT(m_Front == item); - m_Front = ItemTypeTraits::GetNext(item); - } - - if (ItemTypeTraits::GetNext(item) != VMA_NULL) - { - ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item); - } - else - { - VMA_HEAVY_ASSERT(m_Back == item); - m_Back = ItemTypeTraits::GetPrev(item); - } - ItemTypeTraits::AccessPrev(item) = VMA_NULL; - ItemTypeTraits::AccessNext(item) = VMA_NULL; - --m_Count; -} - -template -void VmaIntrusiveLinkedList::RemoveAll() -{ - if (!IsEmpty()) - { - ItemType* item = m_Back; - while (item != VMA_NULL) - { - ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); - ItemTypeTraits::AccessPrev(item) = VMA_NULL; - ItemTypeTraits::AccessNext(item) = VMA_NULL; - item = prevItem; - } - m_Front = VMA_NULL; - m_Back = VMA_NULL; - m_Count = 0; - } -} -#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS -#endif // _VMA_INTRUSIVE_LINKED_LIST - -// Unused in this version. -#if 0 - -#ifndef _VMA_PAIR -template -struct VmaPair -{ - T1 first; - T2 second; - - VmaPair() : first(), second() {} - VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {} -}; - -template -struct VmaPairFirstLess -{ - bool operator()(const VmaPair& lhs, const VmaPair& rhs) const - { - return lhs.first < rhs.first; - } - bool operator()(const VmaPair& lhs, const FirstT& rhsFirst) const - { - return lhs.first < rhsFirst; - } -}; -#endif // _VMA_PAIR - -#ifndef _VMA_MAP -/* Class compatible with subset of interface of std::unordered_map. -KeyT, ValueT must be POD because they will be stored in VmaVector. -*/ -template -class VmaMap -{ -public: - typedef VmaPair PairType; - typedef PairType* iterator; - - VmaMap(const VmaStlAllocator& allocator) : m_Vector(allocator) {} - - iterator begin() { return m_Vector.begin(); } - iterator end() { return m_Vector.end(); } - size_t size() { return m_Vector.size(); } - - void insert(const PairType& pair); - iterator find(const KeyT& key); - void erase(iterator it); - -private: - VmaVector< PairType, VmaStlAllocator> m_Vector; -}; - -#ifndef _VMA_MAP_FUNCTIONS -template -void VmaMap::insert(const PairType& pair) -{ - const size_t indexToInsert = VmaBinaryFindFirstNotLess( - m_Vector.data(), - m_Vector.data() + m_Vector.size(), - pair, - VmaPairFirstLess()) - m_Vector.data(); - VmaVectorInsert(m_Vector, indexToInsert, pair); -} - -template -VmaPair* VmaMap::find(const KeyT& key) -{ - PairType* it = VmaBinaryFindFirstNotLess( - m_Vector.data(), - m_Vector.data() + m_Vector.size(), - key, - VmaPairFirstLess()); - if ((it != m_Vector.end()) && (it->first == key)) - { - return it; - } - else - { - return m_Vector.end(); - } -} - -template -void VmaMap::erase(iterator it) -{ - VmaVectorRemove(m_Vector, it - m_Vector.begin()); -} -#endif // _VMA_MAP_FUNCTIONS -#endif // _VMA_MAP - -#endif // #if 0 - -#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED -class VmaStringBuilder -{ -public: - VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} - ~VmaStringBuilder() = default; - - size_t GetLength() const { return m_Data.size(); } - const char* GetData() const { return m_Data.data(); } - void AddNewLine() { Add('\n'); } - void Add(char ch) { m_Data.push_back(ch); } - - void Add(const char* pStr); - void AddNumber(uint32_t num); - void AddNumber(uint64_t num); - void AddPointer(const void* ptr); - -private: - VmaVector> m_Data; -}; - -#ifndef _VMA_STRING_BUILDER_FUNCTIONS -void VmaStringBuilder::Add(const char* pStr) -{ - const size_t strLen = strlen(pStr); - if (strLen > 0) - { - const size_t oldCount = m_Data.size(); - m_Data.resize(oldCount + strLen); - memcpy(m_Data.data() + oldCount, pStr, strLen); - } -} - -void VmaStringBuilder::AddNumber(uint32_t num) -{ - char buf[11]; - buf[10] = '\0'; - char* p = &buf[10]; - do - { - *--p = '0' + (num % 10); - num /= 10; - } while (num); - Add(p); -} - -void VmaStringBuilder::AddNumber(uint64_t num) -{ - char buf[21]; - buf[20] = '\0'; - char* p = &buf[20]; - do - { - *--p = '0' + (num % 10); - num /= 10; - } while (num); - Add(p); -} - -void VmaStringBuilder::AddPointer(const void* ptr) -{ - char buf[21]; - VmaPtrToStr(buf, sizeof(buf), ptr); - Add(buf); -} -#endif //_VMA_STRING_BUILDER_FUNCTIONS -#endif // _VMA_STRING_BUILDER - -#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED -/* -Allows to conveniently build a correct JSON document to be written to the -VmaStringBuilder passed to the constructor. -*/ -class VmaJsonWriter -{ - VMA_CLASS_NO_COPY(VmaJsonWriter) -public: - // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. - VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); - ~VmaJsonWriter(); - - // Begins object by writing "{". - // Inside an object, you must call pairs of WriteString and a value, e.g.: - // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject(); - // Will write: { "A": 1, "B": 2 } - void BeginObject(bool singleLine = false); - // Ends object by writing "}". - void EndObject(); - - // Begins array by writing "[". - // Inside an array, you can write a sequence of any values. - void BeginArray(bool singleLine = false); - // Ends array by writing "[". - void EndArray(); - - // Writes a string value inside "". - // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. - void WriteString(const char* pStr); - - // Begins writing a string value. - // Call BeginString, ContinueString, ContinueString, ..., EndString instead of - // WriteString to conveniently build the string content incrementally, made of - // parts including numbers. - void BeginString(const char* pStr = VMA_NULL); - // Posts next part of an open string. - void ContinueString(const char* pStr); - // Posts next part of an open string. The number is converted to decimal characters. - void ContinueString(uint32_t n); - void ContinueString(uint64_t n); - void ContinueString_Size(size_t n); - // Posts next part of an open string. Pointer value is converted to characters - // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 - void ContinueString_Pointer(const void* ptr); - // Ends writing a string value by writing '"'. - void EndString(const char* pStr = VMA_NULL); - - // Writes a number value. - void WriteNumber(uint32_t n); - void WriteNumber(uint64_t n); - void WriteSize(size_t n); - // Writes a boolean value - false or true. - void WriteBool(bool b); - // Writes a null value. - void WriteNull(); - -private: - enum COLLECTION_TYPE - { - COLLECTION_TYPE_OBJECT, - COLLECTION_TYPE_ARRAY, - }; - struct StackItem - { - COLLECTION_TYPE type; - uint32_t valueCount; - bool singleLineMode; - }; - - static const char* const INDENT; - - VmaStringBuilder& m_SB; - VmaVector< StackItem, VmaStlAllocator > m_Stack; - bool m_InsideString; - - // Write size_t for less than 64bits - void WriteSize(size_t n, std::integral_constant) { m_SB.AddNumber(static_cast(n)); } - // Write size_t for 64bits - void WriteSize(size_t n, std::integral_constant) { m_SB.AddNumber(static_cast(n)); } - - void BeginValue(bool isString); - void WriteIndent(bool oneLess = false); -}; -const char* const VmaJsonWriter::INDENT = " "; - -#ifndef _VMA_JSON_WRITER_FUNCTIONS -VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) - : m_SB(sb), - m_Stack(VmaStlAllocator(pAllocationCallbacks)), - m_InsideString(false) {} - -VmaJsonWriter::~VmaJsonWriter() -{ - VMA_ASSERT(!m_InsideString); - VMA_ASSERT(m_Stack.empty()); -} - -void VmaJsonWriter::BeginObject(bool singleLine) -{ - VMA_ASSERT(!m_InsideString); - - BeginValue(false); - m_SB.Add('{'); - - StackItem item; - item.type = COLLECTION_TYPE_OBJECT; - item.valueCount = 0; - item.singleLineMode = singleLine; - m_Stack.push_back(item); -} - -void VmaJsonWriter::EndObject() -{ - VMA_ASSERT(!m_InsideString); - - WriteIndent(true); - m_SB.Add('}'); - - VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); - m_Stack.pop_back(); -} - -void VmaJsonWriter::BeginArray(bool singleLine) -{ - VMA_ASSERT(!m_InsideString); - - BeginValue(false); - m_SB.Add('['); - - StackItem item; - item.type = COLLECTION_TYPE_ARRAY; - item.valueCount = 0; - item.singleLineMode = singleLine; - m_Stack.push_back(item); -} - -void VmaJsonWriter::EndArray() -{ - VMA_ASSERT(!m_InsideString); - - WriteIndent(true); - m_SB.Add(']'); - - VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); - m_Stack.pop_back(); -} - -void VmaJsonWriter::WriteString(const char* pStr) -{ - BeginString(pStr); - EndString(); -} - -void VmaJsonWriter::BeginString(const char* pStr) -{ - VMA_ASSERT(!m_InsideString); - - BeginValue(true); - m_SB.Add('"'); - m_InsideString = true; - if (pStr != VMA_NULL && pStr[0] != '\0') - { - ContinueString(pStr); - } -} - -void VmaJsonWriter::ContinueString(const char* pStr) -{ - VMA_ASSERT(m_InsideString); - - const size_t strLen = strlen(pStr); - for (size_t i = 0; i < strLen; ++i) - { - char ch = pStr[i]; - if (ch == '\\') - { - m_SB.Add("\\\\"); - } - else if (ch == '"') - { - m_SB.Add("\\\""); - } - else if (ch >= 32) - { - m_SB.Add(ch); - } - else switch (ch) - { - case '\b': - m_SB.Add("\\b"); - break; - case '\f': - m_SB.Add("\\f"); - break; - case '\n': - m_SB.Add("\\n"); - break; - case '\r': - m_SB.Add("\\r"); - break; - case '\t': - m_SB.Add("\\t"); - break; - default: - VMA_ASSERT(0 && "Character not currently supported."); - break; - } - } -} - -void VmaJsonWriter::ContinueString(uint32_t n) -{ - VMA_ASSERT(m_InsideString); - m_SB.AddNumber(n); -} - -void VmaJsonWriter::ContinueString(uint64_t n) -{ - VMA_ASSERT(m_InsideString); - m_SB.AddNumber(n); -} - -void VmaJsonWriter::ContinueString_Size(size_t n) -{ - VMA_ASSERT(m_InsideString); - // Fix for AppleClang incorrect type casting - // TODO: Change to if constexpr when C++17 used as minimal standard - WriteSize(n, std::is_same{}); -} - -void VmaJsonWriter::ContinueString_Pointer(const void* ptr) -{ - VMA_ASSERT(m_InsideString); - m_SB.AddPointer(ptr); -} - -void VmaJsonWriter::EndString(const char* pStr) -{ - VMA_ASSERT(m_InsideString); - if (pStr != VMA_NULL && pStr[0] != '\0') - { - ContinueString(pStr); - } - m_SB.Add('"'); - m_InsideString = false; -} - -void VmaJsonWriter::WriteNumber(uint32_t n) -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - m_SB.AddNumber(n); -} - -void VmaJsonWriter::WriteNumber(uint64_t n) -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - m_SB.AddNumber(n); -} - -void VmaJsonWriter::WriteSize(size_t n) -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - // Fix for AppleClang incorrect type casting - // TODO: Change to if constexpr when C++17 used as minimal standard - WriteSize(n, std::is_same{}); -} - -void VmaJsonWriter::WriteBool(bool b) -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - m_SB.Add(b ? "true" : "false"); -} - -void VmaJsonWriter::WriteNull() -{ - VMA_ASSERT(!m_InsideString); - BeginValue(false); - m_SB.Add("null"); -} - -void VmaJsonWriter::BeginValue(bool isString) -{ - if (!m_Stack.empty()) - { - StackItem& currItem = m_Stack.back(); - if (currItem.type == COLLECTION_TYPE_OBJECT && - currItem.valueCount % 2 == 0) - { - VMA_ASSERT(isString); - } - - if (currItem.type == COLLECTION_TYPE_OBJECT && - currItem.valueCount % 2 != 0) - { - m_SB.Add(": "); - } - else if (currItem.valueCount > 0) - { - m_SB.Add(", "); - WriteIndent(); - } - else - { - WriteIndent(); - } - ++currItem.valueCount; - } -} - -void VmaJsonWriter::WriteIndent(bool oneLess) -{ - if (!m_Stack.empty() && !m_Stack.back().singleLineMode) - { - m_SB.AddNewLine(); - - size_t count = m_Stack.size(); - if (count > 0 && oneLess) - { - --count; - } - for (size_t i = 0; i < count; ++i) - { - m_SB.Add(INDENT); - } - } -} -#endif // _VMA_JSON_WRITER_FUNCTIONS - -static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) -{ - json.BeginObject(); - - json.WriteString("BlockCount"); - json.WriteNumber(stat.statistics.blockCount); - json.WriteString("BlockBytes"); - json.WriteNumber(stat.statistics.blockBytes); - json.WriteString("AllocationCount"); - json.WriteNumber(stat.statistics.allocationCount); - json.WriteString("AllocationBytes"); - json.WriteNumber(stat.statistics.allocationBytes); - json.WriteString("UnusedRangeCount"); - json.WriteNumber(stat.unusedRangeCount); - - if (stat.statistics.allocationCount > 1) - { - json.WriteString("AllocationSizeMin"); - json.WriteNumber(stat.allocationSizeMin); - json.WriteString("AllocationSizeMax"); - json.WriteNumber(stat.allocationSizeMax); - } - if (stat.unusedRangeCount > 1) - { - json.WriteString("UnusedRangeSizeMin"); - json.WriteNumber(stat.unusedRangeSizeMin); - json.WriteString("UnusedRangeSizeMax"); - json.WriteNumber(stat.unusedRangeSizeMax); - } - json.EndObject(); -} -#endif // _VMA_JSON_WRITER - -#ifndef _VMA_MAPPING_HYSTERESIS - -class VmaMappingHysteresis -{ - VMA_CLASS_NO_COPY(VmaMappingHysteresis) -public: - VmaMappingHysteresis() = default; - - uint32_t GetExtraMapping() const { return m_ExtraMapping; } - - // Call when Map was called. - // Returns true if switched to extra +1 mapping reference count. - bool PostMap() - { -#if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 0) - { - ++m_MajorCounter; - if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) - { - m_ExtraMapping = 1; - m_MajorCounter = 0; - m_MinorCounter = 0; - return true; - } - } - else // m_ExtraMapping == 1 - PostMinorCounter(); -#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED - return false; - } - - // Call when Unmap was called. - void PostUnmap() - { -#if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 0) - ++m_MajorCounter; - else // m_ExtraMapping == 1 - PostMinorCounter(); -#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED - } - - // Call when allocation was made from the memory block. - void PostAlloc() - { -#if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 1) - ++m_MajorCounter; - else // m_ExtraMapping == 0 - PostMinorCounter(); -#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED - } - - // Call when allocation was freed from the memory block. - // Returns true if switched to extra -1 mapping reference count. - bool PostFree() - { -#if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 1) - { - ++m_MajorCounter; - if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && - m_MajorCounter > m_MinorCounter + 1) - { - m_ExtraMapping = 0; - m_MajorCounter = 0; - m_MinorCounter = 0; - return true; - } - } - else // m_ExtraMapping == 0 - PostMinorCounter(); -#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED - return false; - } - -private: - static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7; - - uint32_t m_MinorCounter = 0; - uint32_t m_MajorCounter = 0; - uint32_t m_ExtraMapping = 0; // 0 or 1. - - void PostMinorCounter() - { - if(m_MinorCounter < m_MajorCounter) - { - ++m_MinorCounter; - } - else if(m_MajorCounter > 0) - { - --m_MajorCounter; - --m_MinorCounter; - } - } -}; - -#endif // _VMA_MAPPING_HYSTERESIS - -#ifndef _VMA_DEVICE_MEMORY_BLOCK -/* -Represents a single block of device memory (`VkDeviceMemory`) with all the -data about its regions (aka suballocations, #VmaAllocation), assigned and free. - -Thread-safety: -- Access to m_pMetadata must be externally synchronized. -- Map, Unmap, Bind* are synchronized internally. -*/ -class VmaDeviceMemoryBlock -{ - VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock) -public: - VmaBlockMetadata* m_pMetadata; - - VmaDeviceMemoryBlock(VmaAllocator hAllocator); - ~VmaDeviceMemoryBlock(); - - // Always call after construction. - void Init( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t newMemoryTypeIndex, - VkDeviceMemory newMemory, - VkDeviceSize newSize, - uint32_t id, - uint32_t algorithm, - VkDeviceSize bufferImageGranularity); - // Always call before destruction. - void Destroy(VmaAllocator allocator); - - VmaPool GetParentPool() const { return m_hParentPool; } - VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } - uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } - uint32_t GetId() const { return m_Id; } - void* GetMappedData() const { return m_pMappedData; } - uint32_t GetMapRefCount() const { return m_MapCount; } - - // Call when allocation/free was made from m_pMetadata. - // Used for m_MappingHysteresis. - void PostAlloc() { m_MappingHysteresis.PostAlloc(); } - void PostFree(VmaAllocator hAllocator); - - // Validates all data structures inside this object. If not valid, returns false. - bool Validate() const; - VkResult CheckCorruption(VmaAllocator hAllocator); - - // ppData can be null. - VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); - void Unmap(VmaAllocator hAllocator, uint32_t count); - - VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); - VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); - - VkResult BindBufferMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkBuffer hBuffer, - const void* pNext); - VkResult BindImageMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkImage hImage, - const void* pNext); - -private: - VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. - uint32_t m_MemoryTypeIndex; - uint32_t m_Id; - VkDeviceMemory m_hMemory; - - /* - Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. - Also protects m_MapCount, m_pMappedData. - Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. - */ - VMA_MUTEX m_MapAndBindMutex; - VmaMappingHysteresis m_MappingHysteresis; - uint32_t m_MapCount; - void* m_pMappedData; -}; -#endif // _VMA_DEVICE_MEMORY_BLOCK - -#ifndef _VMA_ALLOCATION_T -struct VmaAllocation_T -{ - friend struct VmaDedicatedAllocationListItemTraits; - - enum FLAGS - { - FLAG_PERSISTENT_MAP = 0x01, - FLAG_MAPPING_ALLOWED = 0x02, - }; - -public: - enum ALLOCATION_TYPE - { - ALLOCATION_TYPE_NONE, - ALLOCATION_TYPE_BLOCK, - ALLOCATION_TYPE_DEDICATED, - }; - - // This struct is allocated using VmaPoolAllocator. - VmaAllocation_T(bool mappingAllowed); - ~VmaAllocation_T(); - - void InitBlockAllocation( - VmaDeviceMemoryBlock* block, - VmaAllocHandle allocHandle, - VkDeviceSize alignment, - VkDeviceSize size, - uint32_t memoryTypeIndex, - VmaSuballocationType suballocationType, - bool mapped); - // pMappedData not null means allocation is created with MAPPED flag. - void InitDedicatedAllocation( - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceMemory hMemory, - VmaSuballocationType suballocationType, - void* pMappedData, - VkDeviceSize size); - - ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } - VkDeviceSize GetAlignment() const { return m_Alignment; } - VkDeviceSize GetSize() const { return m_Size; } - void* GetUserData() const { return m_pUserData; } - const char* GetName() const { return m_pName; } - VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } - - VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } - uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } - bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } - bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } - - void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } - void SetName(VmaAllocator hAllocator, const char* pName); - void FreeName(VmaAllocator hAllocator); - uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); - VmaAllocHandle GetAllocHandle() const; - VkDeviceSize GetOffset() const; - VmaPool GetParentPool() const; - VkDeviceMemory GetMemory() const; - void* GetMappedData() const; - - void BlockAllocMap(); - void BlockAllocUnmap(); - VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); - void DedicatedAllocUnmap(VmaAllocator hAllocator); - -#if VMA_STATS_STRING_ENABLED - uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } - - void InitBufferImageUsage(uint32_t bufferImageUsage); - void PrintParameters(class VmaJsonWriter& json) const; -#endif - -private: - // Allocation out of VmaDeviceMemoryBlock. - struct BlockAllocation - { - VmaDeviceMemoryBlock* m_Block; - VmaAllocHandle m_AllocHandle; - }; - // Allocation for an object that has its own private VkDeviceMemory. - struct DedicatedAllocation - { - VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. - VkDeviceMemory m_hMemory; - void* m_pMappedData; // Not null means memory is mapped. - VmaAllocation_T* m_Prev; - VmaAllocation_T* m_Next; - }; - union - { - // Allocation out of VmaDeviceMemoryBlock. - BlockAllocation m_BlockAllocation; - // Allocation for an object that has its own private VkDeviceMemory. - DedicatedAllocation m_DedicatedAllocation; - }; - - VkDeviceSize m_Alignment; - VkDeviceSize m_Size; - void* m_pUserData; - char* m_pName; - uint32_t m_MemoryTypeIndex; - uint8_t m_Type; // ALLOCATION_TYPE - uint8_t m_SuballocationType; // VmaSuballocationType - // Reference counter for vmaMapMemory()/vmaUnmapMemory(). - uint8_t m_MapCount; - uint8_t m_Flags; // enum FLAGS -#if VMA_STATS_STRING_ENABLED - uint32_t m_BufferImageUsage; // 0 if unknown. -#endif -}; -#endif // _VMA_ALLOCATION_T - -#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS -struct VmaDedicatedAllocationListItemTraits -{ - typedef VmaAllocation_T ItemType; - - static ItemType* GetPrev(const ItemType* item) - { - VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - return item->m_DedicatedAllocation.m_Prev; - } - static ItemType* GetNext(const ItemType* item) - { - VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - return item->m_DedicatedAllocation.m_Next; - } - static ItemType*& AccessPrev(ItemType* item) - { - VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - return item->m_DedicatedAllocation.m_Prev; - } - static ItemType*& AccessNext(ItemType* item) - { - VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - return item->m_DedicatedAllocation.m_Next; - } -}; -#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS - -#ifndef _VMA_DEDICATED_ALLOCATION_LIST -/* -Stores linked list of VmaAllocation_T objects. -Thread-safe, synchronized internally. -*/ -class VmaDedicatedAllocationList -{ -public: - VmaDedicatedAllocationList() {} - ~VmaDedicatedAllocationList(); - - void Init(bool useMutex) { m_UseMutex = useMutex; } - bool Validate(); - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); - void AddStatistics(VmaStatistics& inoutStats); -#if VMA_STATS_STRING_ENABLED - // Writes JSON array with the list of allocations. - void BuildStatsString(VmaJsonWriter& json); -#endif - - bool IsEmpty(); - void Register(VmaAllocation alloc); - void Unregister(VmaAllocation alloc); - -private: - typedef VmaIntrusiveLinkedList DedicatedAllocationLinkedList; - - bool m_UseMutex = true; - VMA_RW_MUTEX m_Mutex; - DedicatedAllocationLinkedList m_AllocationList; -}; - -#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS - -VmaDedicatedAllocationList::~VmaDedicatedAllocationList() -{ - VMA_HEAVY_ASSERT(Validate()); - - if (!m_AllocationList.IsEmpty()) - { - VMA_ASSERT(false && "Unfreed dedicated allocations found!"); - } -} - -bool VmaDedicatedAllocationList::Validate() -{ - const size_t declaredCount = m_AllocationList.GetCount(); - size_t actualCount = 0; - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - for (VmaAllocation alloc = m_AllocationList.Front(); - alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) - { - ++actualCount; - } - VMA_VALIDATE(actualCount == declaredCount); - - return true; -} - -void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) -{ - for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) - { - const VkDeviceSize size = item->GetSize(); - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += size; - VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); - } -} - -void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) -{ - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - - const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); - inoutStats.blockCount += allocCount; - inoutStats.allocationCount += allocCount; - - for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) - { - const VkDeviceSize size = item->GetSize(); - inoutStats.blockBytes += size; - inoutStats.allocationBytes += size; - } -} - -#if VMA_STATS_STRING_ENABLED -void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) -{ - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - json.BeginArray(); - for (VmaAllocation alloc = m_AllocationList.Front(); - alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) - { - json.BeginObject(true); - alloc->PrintParameters(json); - json.EndObject(); - } - json.EndArray(); -} -#endif // VMA_STATS_STRING_ENABLED - -bool VmaDedicatedAllocationList::IsEmpty() -{ - VmaMutexLockRead lock(m_Mutex, m_UseMutex); - return m_AllocationList.IsEmpty(); -} - -void VmaDedicatedAllocationList::Register(VmaAllocation alloc) -{ - VmaMutexLockWrite lock(m_Mutex, m_UseMutex); - m_AllocationList.PushBack(alloc); -} - -void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc) -{ - VmaMutexLockWrite lock(m_Mutex, m_UseMutex); - m_AllocationList.Remove(alloc); -} -#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS -#endif // _VMA_DEDICATED_ALLOCATION_LIST - -#ifndef _VMA_SUBALLOCATION -/* -Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as -allocated memory block or free. -*/ -struct VmaSuballocation -{ - VkDeviceSize offset; - VkDeviceSize size; - void* userData; - VmaSuballocationType type; -}; - -// Comparator for offsets. -struct VmaSuballocationOffsetLess -{ - bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const - { - return lhs.offset < rhs.offset; - } -}; - -struct VmaSuballocationOffsetGreater -{ - bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const - { - return lhs.offset > rhs.offset; - } -}; - -struct VmaSuballocationItemSizeLess -{ - bool operator()(const VmaSuballocationList::iterator lhs, - const VmaSuballocationList::iterator rhs) const - { - return lhs->size < rhs->size; - } - - bool operator()(const VmaSuballocationList::iterator lhs, - VkDeviceSize rhsSize) const - { - return lhs->size < rhsSize; - } -}; -#endif // _VMA_SUBALLOCATION - -#ifndef _VMA_ALLOCATION_REQUEST -/* -Parameters of planned allocation inside a VmaDeviceMemoryBlock. -item points to a FREE suballocation. -*/ -struct VmaAllocationRequest -{ - VmaAllocHandle allocHandle; - VkDeviceSize size; - VmaSuballocationList::iterator item; - void* customData; - uint64_t algorithmData; - VmaAllocationRequestType type; -}; -#endif // _VMA_ALLOCATION_REQUEST - -#ifndef _VMA_BLOCK_METADATA -/* -Data structure used for bookkeeping of allocations and unused ranges of memory -in a single VkDeviceMemory block. -*/ -class VmaBlockMetadata -{ -public: - // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. - VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata() = default; - - virtual void Init(VkDeviceSize size) { m_Size = size; } - bool IsVirtual() const { return m_IsVirtual; } - VkDeviceSize GetSize() const { return m_Size; } - - // Validates all data structures inside this object. If not valid, returns false. - virtual bool Validate() const = 0; - virtual size_t GetAllocationCount() const = 0; - virtual size_t GetFreeRegionsCount() const = 0; - virtual VkDeviceSize GetSumFreeSize() const = 0; - // Returns true if this block is empty - contains only single free suballocation. - virtual bool IsEmpty() const = 0; - virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; - virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; - virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; - - virtual VmaAllocHandle GetAllocationListBegin() const = 0; - virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; - virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; - - // Shouldn't modify blockCount. - virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; - virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; - -#if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; -#endif - - // Tries to find a place for suballocation with given parameters inside this block. - // If succeeded, fills pAllocationRequest and returns true. - // If failed, returns false. - virtual bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) = 0; - - virtual VkResult CheckCorruption(const void* pBlockData) = 0; - - // Makes actual allocation based on request. Request must already be checked and valid. - virtual void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) = 0; - - // Frees suballocation assigned to given memory region. - virtual void Free(VmaAllocHandle allocHandle) = 0; - - // Frees all allocations. - // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! - virtual void Clear() = 0; - - virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; - virtual void DebugLogAllAllocations() const = 0; - -protected: - const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } - VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } - VkDeviceSize GetDebugMargin() const { return IsVirtual() ? 0 : VMA_DEBUG_MARGIN; } - - void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; -#if VMA_STATS_STRING_ENABLED - // mapRefCount == UINT32_MAX means unspecified. - void PrintDetailedMap_Begin(class VmaJsonWriter& json, - VkDeviceSize unusedBytes, - size_t allocationCount, - size_t unusedRangeCount) const; - void PrintDetailedMap_Allocation(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size, void* userData) const; - void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, - VkDeviceSize offset, - VkDeviceSize size) const; - void PrintDetailedMap_End(class VmaJsonWriter& json) const; -#endif - -private: - VkDeviceSize m_Size; - const VkAllocationCallbacks* m_pAllocationCallbacks; - const VkDeviceSize m_BufferImageGranularity; - const bool m_IsVirtual; -}; - -#ifndef _VMA_BLOCK_METADATA_FUNCTIONS -VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : m_Size(0), - m_pAllocationCallbacks(pAllocationCallbacks), - m_BufferImageGranularity(bufferImageGranularity), - m_IsVirtual(isVirtual) {} - -void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const -{ - if (IsVirtual()) - { - VMA_DEBUG_LOG("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData); - } - else - { - VMA_ASSERT(userData != VMA_NULL); - VmaAllocation allocation = reinterpret_cast(userData); - - userData = allocation->GetUserData(); - const char* name = allocation->GetName(); - -#if VMA_STATS_STRING_ENABLED - VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u", - offset, size, userData, name ? name : "vma_empty", - VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], - allocation->GetBufferImageUsage()); -#else - VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u", - offset, size, userData, name ? name : "vma_empty", - (uint32_t)allocation->GetSuballocationType()); -#endif // VMA_STATS_STRING_ENABLED - } - -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, - VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const -{ - json.WriteString("TotalBytes"); - json.WriteNumber(GetSize()); - - json.WriteString("UnusedBytes"); - json.WriteSize(unusedBytes); - - json.WriteString("Allocations"); - json.WriteSize(allocationCount); - - json.WriteString("UnusedRanges"); - json.WriteSize(unusedRangeCount); - - json.WriteString("Suballocations"); - json.BeginArray(); -} - -void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size, void* userData) const -{ - json.BeginObject(true); - - json.WriteString("Offset"); - json.WriteNumber(offset); - - if (IsVirtual()) - { - json.WriteString("Size"); - json.WriteNumber(size); - if (userData) - { - json.WriteString("CustomData"); - json.BeginString(); - json.ContinueString_Pointer(userData); - json.EndString(); - } - } - else - { - ((VmaAllocation)userData)->PrintParameters(json); - } - - json.EndObject(); -} - -void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size) const -{ - json.BeginObject(true); - - json.WriteString("Offset"); - json.WriteNumber(offset); - - json.WriteString("Type"); - json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); - - json.WriteString("Size"); - json.WriteNumber(size); - - json.EndObject(); -} - -void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const -{ - json.EndArray(); -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_BLOCK_METADATA_FUNCTIONS -#endif // _VMA_BLOCK_METADATA - -#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY -// Before deleting object of this class remember to call 'Destroy()' -class VmaBlockBufferImageGranularity final -{ -public: - struct ValidationContext - { - const VkAllocationCallbacks* allocCallbacks; - uint16_t* pageAllocs; - }; - - VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); - ~VmaBlockBufferImageGranularity(); - - bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } - - void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); - // Before destroying object you must call free it's memory - void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); - - void RoundupAllocRequest(VmaSuballocationType allocType, - VkDeviceSize& inOutAllocSize, - VkDeviceSize& inOutAllocAlignment) const; - - bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, - VkDeviceSize allocSize, - VkDeviceSize blockOffset, - VkDeviceSize blockSize, - VmaSuballocationType allocType) const; - - void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); - void FreePages(VkDeviceSize offset, VkDeviceSize size); - void Clear(); - - ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, - bool isVirutal) const; - bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; - bool FinishValidation(ValidationContext& ctx) const; - -private: - static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; - - struct RegionInfo - { - uint8_t allocType; - uint16_t allocCount; - }; - - VkDeviceSize m_BufferImageGranularity; - uint32_t m_RegionCount; - RegionInfo* m_RegionInfo; - - uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } - uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } - - uint32_t OffsetToPageIndex(VkDeviceSize offset) const; - void AllocPage(RegionInfo& page, uint8_t allocType); -}; - -#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS -VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) - : m_BufferImageGranularity(bufferImageGranularity), - m_RegionCount(0), - m_RegionInfo(VMA_NULL) {} - -VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() -{ - VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); -} - -void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) -{ - if (IsEnabled()) - { - m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); - m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); - memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); - } -} - -void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) -{ - if (m_RegionInfo) - { - vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); - m_RegionInfo = VMA_NULL; - } -} - -void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, - VkDeviceSize& inOutAllocSize, - VkDeviceSize& inOutAllocAlignment) const -{ - if (m_BufferImageGranularity > 1 && - m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) - { - if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) - { - inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); - inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); - } - } -} - -bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, - VkDeviceSize allocSize, - VkDeviceSize blockOffset, - VkDeviceSize blockSize, - VmaSuballocationType allocType) const -{ - if (IsEnabled()) - { - uint32_t startPage = GetStartPage(inOutAllocOffset); - if (m_RegionInfo[startPage].allocCount > 0 && - VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) - { - inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity); - if (blockSize < allocSize + inOutAllocOffset - blockOffset) - return true; - ++startPage; - } - uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize); - if (endPage != startPage && - m_RegionInfo[endPage].allocCount > 0 && - VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) - { - return true; - } - } - return false; -} - -void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) -{ - if (IsEnabled()) - { - uint32_t startPage = GetStartPage(offset); - AllocPage(m_RegionInfo[startPage], allocType); - - uint32_t endPage = GetEndPage(offset, size); - if (startPage != endPage) - AllocPage(m_RegionInfo[endPage], allocType); - } -} - -void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) -{ - if (IsEnabled()) - { - uint32_t startPage = GetStartPage(offset); - --m_RegionInfo[startPage].allocCount; - if (m_RegionInfo[startPage].allocCount == 0) - m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; - uint32_t endPage = GetEndPage(offset, size); - if (startPage != endPage) - { - --m_RegionInfo[endPage].allocCount; - if (m_RegionInfo[endPage].allocCount == 0) - m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; - } - } -} - -void VmaBlockBufferImageGranularity::Clear() -{ - if (m_RegionInfo) - memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); -} - -VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( - const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const -{ - ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; - if (!isVirutal && IsEnabled()) - { - ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); - memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); - } - return ctx; -} - -bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, - VkDeviceSize offset, VkDeviceSize size) const -{ - if (IsEnabled()) - { - uint32_t start = GetStartPage(offset); - ++ctx.pageAllocs[start]; - VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); - - uint32_t end = GetEndPage(offset, size); - if (start != end) - { - ++ctx.pageAllocs[end]; - VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); - } - } - return true; -} - -bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const -{ - // Check proper page structure - if (IsEnabled()) - { - VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); - - for (uint32_t page = 0; page < m_RegionCount; ++page) - { - VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); - } - vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); - ctx.pageAllocs = VMA_NULL; - } - return true; -} - -uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const -{ - return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); -} - -void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) -{ - // When current alloc type is free then it can be overriden by new type - if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) - page.allocType = allocType; - - ++page.allocCount; -} -#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS -#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY - -#if 0 -#ifndef _VMA_BLOCK_METADATA_GENERIC -class VmaBlockMetadata_Generic : public VmaBlockMetadata -{ - friend class VmaDefragmentationAlgorithm_Generic; - friend class VmaDefragmentationAlgorithm_Fast; - VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) -public: - VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata_Generic() = default; - - size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; } - VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } - bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); } - void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; - - void Init(VkDeviceSize size) override; - bool Validate() const override; - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; -#endif - - bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; - - VkResult CheckCorruption(const void* pBlockData) override; - - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) override; - - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; - VmaAllocHandle GetAllocationListBegin() const override; - VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; - void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; - void DebugLogAllAllocations() const override; - -private: - uint32_t m_FreeCount; - VkDeviceSize m_SumFreeSize; - VmaSuballocationList m_Suballocations; - // Suballocations that are free. Sorted by size, ascending. - VmaVector> m_FreeSuballocationsBySize; - - VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); } - - VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const; - bool ValidateFreeSuballocationList() const; - - // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. - // If yes, fills pOffset and returns true. If no, returns false. - bool CheckAllocation( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaSuballocationList::const_iterator suballocItem, - VmaAllocHandle* pAllocHandle) const; - - // Given free suballocation, it merges it with following one, which must also be free. - void MergeFreeWithNext(VmaSuballocationList::iterator item); - // Releases given suballocation, making it free. - // Merges it with adjacent free suballocations if applicable. - // Returns iterator to new free suballocation at this place. - VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); - // Given free suballocation, it inserts it into sorted list of - // m_FreeSuballocationsBySize if it is suitable. - void RegisterFreeSuballocation(VmaSuballocationList::iterator item); - // Given free suballocation, it removes it from sorted list of - // m_FreeSuballocationsBySize if it is suitable. - void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); -}; - -#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS -VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_FreeCount(0), - m_SumFreeSize(0), - m_Suballocations(VmaStlAllocator(pAllocationCallbacks)), - m_FreeSuballocationsBySize(VmaStlAllocator(pAllocationCallbacks)) {} - -void VmaBlockMetadata_Generic::Init(VkDeviceSize size) -{ - VmaBlockMetadata::Init(size); - - m_FreeCount = 1; - m_SumFreeSize = size; - - VmaSuballocation suballoc = {}; - suballoc.offset = 0; - suballoc.size = size; - suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - - m_Suballocations.push_back(suballoc); - m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); -} - -bool VmaBlockMetadata_Generic::Validate() const -{ - VMA_VALIDATE(!m_Suballocations.empty()); - - // Expected offset of new suballocation as calculated from previous ones. - VkDeviceSize calculatedOffset = 0; - // Expected number of free suballocations as calculated from traversing their list. - uint32_t calculatedFreeCount = 0; - // Expected sum size of free suballocations as calculated from traversing their list. - VkDeviceSize calculatedSumFreeSize = 0; - // Expected number of free suballocations that should be registered in - // m_FreeSuballocationsBySize calculated from traversing their list. - size_t freeSuballocationsToRegister = 0; - // True if previous visited suballocation was free. - bool prevFree = false; - - const VkDeviceSize debugMargin = GetDebugMargin(); - - for (const auto& subAlloc : m_Suballocations) - { - // Actual offset of this suballocation doesn't match expected one. - VMA_VALIDATE(subAlloc.offset == calculatedOffset); - - const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE); - // Two adjacent free suballocations are invalid. They should be merged. - VMA_VALIDATE(!prevFree || !currFree); - - VmaAllocation alloc = (VmaAllocation)subAlloc.userData; - if (!IsVirtual()) - { - VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); - } - - if (currFree) - { - calculatedSumFreeSize += subAlloc.size; - ++calculatedFreeCount; - ++freeSuballocationsToRegister; - - // Margin required between allocations - every free space must be at least that large. - VMA_VALIDATE(subAlloc.size >= debugMargin); - } - else - { - if (!IsVirtual()) - { - VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1); - VMA_VALIDATE(alloc->GetSize() == subAlloc.size); - } - - // Margin required between allocations - previous allocation must be free. - VMA_VALIDATE(debugMargin == 0 || prevFree); - } - - calculatedOffset += subAlloc.size; - prevFree = currFree; - } - - // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't - // match expected one. - VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister); - - VkDeviceSize lastSize = 0; - for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) - { - VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i]; - - // Only free suballocations can be registered in m_FreeSuballocationsBySize. - VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE); - // They must be sorted by size ascending. - VMA_VALIDATE(suballocItem->size >= lastSize); - - lastSize = suballocItem->size; - } - - // Check if totals match calculated values. - VMA_VALIDATE(ValidateFreeSuballocationList()); - VMA_VALIDATE(calculatedOffset == GetSize()); - VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize); - VMA_VALIDATE(calculatedFreeCount == m_FreeCount); - - return true; -} - -void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const -{ - const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += GetSize(); - - for (const auto& suballoc : m_Suballocations) - { - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); - else - VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size); - } -} - -void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const -{ - inoutStats.blockCount++; - inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount; - inoutStats.blockBytes += GetSize(); - inoutStats.allocationBytes += GetSize() - m_SumFreeSize; -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const -{ - PrintDetailedMap_Begin(json, - m_SumFreeSize, // unusedBytes - m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount - m_FreeCount, // unusedRangeCount - mapRefCount); - - for (const auto& suballoc : m_Suballocations) - { - if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) - { - PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size); - } - else - { - PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); - } - } - - PrintDetailedMap_End(json); -} -#endif // VMA_STATS_STRING_ENABLED - -bool VmaBlockMetadata_Generic::CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(allocSize > 0); - VMA_ASSERT(!upperAddress); - VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(pAllocationRequest != VMA_NULL); - VMA_HEAVY_ASSERT(Validate()); - - allocSize = AlignAllocationSize(allocSize); - - pAllocationRequest->type = VmaAllocationRequestType::Normal; - pAllocationRequest->size = allocSize; - - const VkDeviceSize debugMargin = GetDebugMargin(); - - // There is not enough total free space in this block to fulfill the request: Early return. - if (m_SumFreeSize < allocSize + debugMargin) - { - return false; - } - - // New algorithm, efficiently searching freeSuballocationsBySize. - const size_t freeSuballocCount = m_FreeSuballocationsBySize.size(); - if (freeSuballocCount > 0) - { - if (strategy == 0 || - strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) - { - // Find first free suballocation with size not less than allocSize + debugMargin. - VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( - m_FreeSuballocationsBySize.data(), - m_FreeSuballocationsBySize.data() + freeSuballocCount, - allocSize + debugMargin, - VmaSuballocationItemSizeLess()); - size_t index = it - m_FreeSuballocationsBySize.data(); - for (; index < freeSuballocCount; ++index) - { - if (CheckAllocation( - allocSize, - allocAlignment, - allocType, - m_FreeSuballocationsBySize[index], - &pAllocationRequest->allocHandle)) - { - pAllocationRequest->item = m_FreeSuballocationsBySize[index]; - return true; - } - } - } - else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) - { - for (VmaSuballocationList::iterator it = m_Suballocations.begin(); - it != m_Suballocations.end(); - ++it) - { - if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( - allocSize, - allocAlignment, - allocType, - it, - &pAllocationRequest->allocHandle)) - { - pAllocationRequest->item = it; - return true; - } - } - } - else - { - VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )); - // Search staring from biggest suballocations. - for (size_t index = freeSuballocCount; index--; ) - { - if (CheckAllocation( - allocSize, - allocAlignment, - allocType, - m_FreeSuballocationsBySize[index], - &pAllocationRequest->allocHandle)) - { - pAllocationRequest->item = m_FreeSuballocationsBySize[index]; - return true; - } - } - } - } - - return false; -} - -VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) -{ - for (auto& suballoc : m_Suballocations) - { - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - { - if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN_COPY; - } - } - } - - return VK_SUCCESS; -} - -void VmaBlockMetadata_Generic::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) -{ - VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); - VMA_ASSERT(request.item != m_Suballocations.end()); - VmaSuballocation& suballoc = *request.item; - // Given suballocation is a free block. - VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - // Given offset is inside this suballocation. - VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset); - const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1; - VMA_ASSERT(suballoc.size >= paddingBegin + request.size); - const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size; - - // Unregister this free suballocation from m_FreeSuballocationsBySize and update - // it to become used. - UnregisterFreeSuballocation(request.item); - - suballoc.offset = (VkDeviceSize)request.allocHandle - 1; - suballoc.size = request.size; - suballoc.type = type; - suballoc.userData = userData; - - // If there are any free bytes remaining at the end, insert new free suballocation after current one. - if (paddingEnd) - { - VmaSuballocation paddingSuballoc = {}; - paddingSuballoc.offset = suballoc.offset + suballoc.size; - paddingSuballoc.size = paddingEnd; - paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - VmaSuballocationList::iterator next = request.item; - ++next; - const VmaSuballocationList::iterator paddingEndItem = - m_Suballocations.insert(next, paddingSuballoc); - RegisterFreeSuballocation(paddingEndItem); - } - - // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. - if (paddingBegin) - { - VmaSuballocation paddingSuballoc = {}; - paddingSuballoc.offset = suballoc.offset - paddingBegin; - paddingSuballoc.size = paddingBegin; - paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - const VmaSuballocationList::iterator paddingBeginItem = - m_Suballocations.insert(request.item, paddingSuballoc); - RegisterFreeSuballocation(paddingBeginItem); - } - - // Update totals. - m_FreeCount = m_FreeCount - 1; - if (paddingBegin > 0) - { - ++m_FreeCount; - } - if (paddingEnd > 0) - { - ++m_FreeCount; - } - m_SumFreeSize -= request.size; -} - -void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) -{ - outInfo.offset = (VkDeviceSize)allocHandle - 1; - const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset); - outInfo.size = suballoc.size; - outInfo.pUserData = suballoc.userData; -} - -void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const -{ - return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData; -} - -VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const -{ - if (IsEmpty()) - return VK_NULL_HANDLE; - - for (const auto& suballoc : m_Suballocations) - { - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - return (VmaAllocHandle)(suballoc.offset + 1); - } - VMA_ASSERT(false && "Should contain at least 1 allocation!"); - return VK_NULL_HANDLE; -} - -VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const -{ - VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1); - - for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it) - { - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) - return (VmaAllocHandle)(it->offset + 1); - } - return VK_NULL_HANDLE; -} - -void VmaBlockMetadata_Generic::Clear() -{ - const VkDeviceSize size = GetSize(); - - VMA_ASSERT(IsVirtual()); - m_FreeCount = 1; - m_SumFreeSize = size; - m_Suballocations.clear(); - m_FreeSuballocationsBySize.clear(); - - VmaSuballocation suballoc = {}; - suballoc.offset = 0; - suballoc.size = size; - suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - m_Suballocations.push_back(suballoc); - - m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); -} - -void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) -{ - VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1); - suballoc.userData = userData; -} - -void VmaBlockMetadata_Generic::DebugLogAllAllocations() const -{ - for (const auto& suballoc : m_Suballocations) - { - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData); - } -} - -VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const -{ - VMA_HEAVY_ASSERT(!m_Suballocations.empty()); - const VkDeviceSize last = m_Suballocations.rbegin()->offset; - if (last == offset) - return m_Suballocations.rbegin().drop_const(); - const VkDeviceSize first = m_Suballocations.begin()->offset; - if (first == offset) - return m_Suballocations.begin().drop_const(); - - const size_t suballocCount = m_Suballocations.size(); - const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount; - auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator - { - for (auto suballocItem = begin; - suballocItem != end; - ++suballocItem) - { - if (suballocItem->offset == offset) - return suballocItem.drop_const(); - } - VMA_ASSERT(false && "Not found!"); - return m_Suballocations.end().drop_const(); - }; - // If requested offset is closer to the end of range, search from the end - if (offset - first > suballocCount * step / 2) - { - return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend()); - } - return findSuballocation(m_Suballocations.begin(), m_Suballocations.end()); -} - -bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const -{ - VkDeviceSize lastSize = 0; - for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) - { - const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; - - VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE); - VMA_VALIDATE(it->size >= lastSize); - lastSize = it->size; - } - return true; -} - -bool VmaBlockMetadata_Generic::CheckAllocation( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaSuballocationList::const_iterator suballocItem, - VmaAllocHandle* pAllocHandle) const -{ - VMA_ASSERT(allocSize > 0); - VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(suballocItem != m_Suballocations.cend()); - VMA_ASSERT(pAllocHandle != VMA_NULL); - - const VkDeviceSize debugMargin = GetDebugMargin(); - const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); - - const VmaSuballocation& suballoc = *suballocItem; - VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - // Size of this suballocation is too small for this request: Early return. - if (suballoc.size < allocSize) - { - return false; - } - - // Start from offset equal to beginning of this suballocation. - VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin()); - - // Apply debugMargin from the end of previous alloc. - if (debugMargin > 0) - { - offset += debugMargin; - } - - // Apply alignment. - offset = VmaAlignUp(offset, allocAlignment); - - // Check previous suballocations for BufferImageGranularity conflicts. - // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment) - { - bool bufferImageGranularityConflict = false; - VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; - while (prevSuballocItem != m_Suballocations.cbegin()) - { - --prevSuballocItem; - const VmaSuballocation& prevSuballoc = *prevSuballocItem; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) - { - bufferImageGranularityConflict = true; - break; - } - } - else - // Already on previous page. - break; - } - if (bufferImageGranularityConflict) - { - offset = VmaAlignUp(offset, bufferImageGranularity); - } - } - - // Calculate padding at the beginning based on current offset. - const VkDeviceSize paddingBegin = offset - suballoc.offset; - - // Fail if requested size plus margin after is bigger than size of this suballocation. - if (paddingBegin + allocSize + debugMargin > suballoc.size) - { - return false; - } - - // Check next suballocations for BufferImageGranularity conflicts. - // If conflict exists, allocation cannot be made here. - if (allocSize % bufferImageGranularity || offset % bufferImageGranularity) - { - VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; - ++nextSuballocItem; - while (nextSuballocItem != m_Suballocations.cend()) - { - const VmaSuballocation& nextSuballoc = *nextSuballocItem; - if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) - { - return false; - } - } - else - { - // Already on next page. - break; - } - ++nextSuballocItem; - } - } - - *pAllocHandle = (VmaAllocHandle)(offset + 1); - // All tests passed: Success. pAllocHandle is already filled. - return true; -} - -void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) -{ - VMA_ASSERT(item != m_Suballocations.end()); - VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaSuballocationList::iterator nextItem = item; - ++nextItem; - VMA_ASSERT(nextItem != m_Suballocations.end()); - VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); - - item->size += nextItem->size; - --m_FreeCount; - m_Suballocations.erase(nextItem); -} - -VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) -{ - // Change this suballocation to be marked as free. - VmaSuballocation& suballoc = *suballocItem; - suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - suballoc.userData = VMA_NULL; - - // Update totals. - ++m_FreeCount; - m_SumFreeSize += suballoc.size; - - // Merge with previous and/or next suballocation if it's also free. - bool mergeWithNext = false; - bool mergeWithPrev = false; - - VmaSuballocationList::iterator nextItem = suballocItem; - ++nextItem; - if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) - { - mergeWithNext = true; - } - - VmaSuballocationList::iterator prevItem = suballocItem; - if (suballocItem != m_Suballocations.begin()) - { - --prevItem; - if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) - { - mergeWithPrev = true; - } - } - - if (mergeWithNext) - { - UnregisterFreeSuballocation(nextItem); - MergeFreeWithNext(suballocItem); - } - - if (mergeWithPrev) - { - UnregisterFreeSuballocation(prevItem); - MergeFreeWithNext(prevItem); - RegisterFreeSuballocation(prevItem); - return prevItem; - } - else - { - RegisterFreeSuballocation(suballocItem); - return suballocItem; - } -} - -void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) -{ - VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(item->size > 0); - - // You may want to enable this validation at the beginning or at the end of - // this function, depending on what do you want to check. - VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); - - if (m_FreeSuballocationsBySize.empty()) - { - m_FreeSuballocationsBySize.push_back(item); - } - else - { - VmaVectorInsertSorted(m_FreeSuballocationsBySize, item); - } - - //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); -} - -void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) -{ - VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(item->size > 0); - - // You may want to enable this validation at the beginning or at the end of - // this function, depending on what do you want to check. - VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); - - VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( - m_FreeSuballocationsBySize.data(), - m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), - item, - VmaSuballocationItemSizeLess()); - for (size_t index = it - m_FreeSuballocationsBySize.data(); - index < m_FreeSuballocationsBySize.size(); - ++index) - { - if (m_FreeSuballocationsBySize[index] == item) - { - VmaVectorRemove(m_FreeSuballocationsBySize, index); - return; - } - VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found."); - } - VMA_ASSERT(0 && "Not found."); - - //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); -} -#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS -#endif // _VMA_BLOCK_METADATA_GENERIC -#endif // #if 0 - -#ifndef _VMA_BLOCK_METADATA_LINEAR -/* -Allocations and their references in internal data structure look like this: - -if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): - - 0 +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | - | | - | | -GetSize() +-------+ - -if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): - - 0 +-------+ - | Alloc | 2nd[0] - +-------+ - | Alloc | 2nd[1] - +-------+ - | ... | - +-------+ - | Alloc | 2nd[2nd.size() - 1] - +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | -GetSize() +-------+ - -if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): - - 0 +-------+ - | | - | | - | | - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount] - +-------+ - | Alloc | 1st[m_1stNullItemsBeginCount + 1] - +-------+ - | ... | - +-------+ - | Alloc | 1st[1st.size() - 1] - +-------+ - | | - | | - | | - +-------+ - | Alloc | 2nd[2nd.size() - 1] - +-------+ - | ... | - +-------+ - | Alloc | 2nd[1] - +-------+ - | Alloc | 2nd[0] -GetSize() +-------+ - -*/ -class VmaBlockMetadata_Linear : public VmaBlockMetadata -{ - VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) -public: - VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata_Linear() = default; - - VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } - bool IsEmpty() const override { return GetAllocationCount() == 0; } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; - - void Init(VkDeviceSize size) override; - bool Validate() const override; - size_t GetAllocationCount() const override; - size_t GetFreeRegionsCount() const override; - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; -#endif - - bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; - - VkResult CheckCorruption(const void* pBlockData) override; - - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) override; - - void Free(VmaAllocHandle allocHandle) override; - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; - VmaAllocHandle GetAllocationListBegin() const override; - VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; - VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; - void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; - void DebugLogAllAllocations() const override; - -private: - /* - There are two suballocation vectors, used in ping-pong way. - The one with index m_1stVectorIndex is called 1st. - The one with index (m_1stVectorIndex ^ 1) is called 2nd. - 2nd can be non-empty only when 1st is not empty. - When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. - */ - typedef VmaVector> SuballocationVectorType; - - enum SECOND_VECTOR_MODE - { - SECOND_VECTOR_EMPTY, - /* - Suballocations in 2nd vector are created later than the ones in 1st, but they - all have smaller offset. - */ - SECOND_VECTOR_RING_BUFFER, - /* - Suballocations in 2nd vector are upper side of double stack. - They all have offsets higher than those in 1st vector. - Top of this stack means smaller offsets, but higher indices in this vector. - */ - SECOND_VECTOR_DOUBLE_STACK, - }; - - VkDeviceSize m_SumFreeSize; - SuballocationVectorType m_Suballocations0, m_Suballocations1; - uint32_t m_1stVectorIndex; - SECOND_VECTOR_MODE m_2ndVectorMode; - // Number of items in 1st vector with hAllocation = null at the beginning. - size_t m_1stNullItemsBeginCount; - // Number of other items in 1st vector with hAllocation = null somewhere in the middle. - size_t m_1stNullItemsMiddleCount; - // Number of items in 2nd vector with hAllocation = null. - size_t m_2ndNullItemsCount; - - SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } - SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } - const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - - VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; - bool ShouldCompact1st() const; - void CleanupAfterFree(); - - bool CreateAllocationRequest_LowerAddress( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); - bool CreateAllocationRequest_UpperAddress( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); -}; - -#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS -VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_SumFreeSize(0), - m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), - m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), - m_1stVectorIndex(0), - m_2ndVectorMode(SECOND_VECTOR_EMPTY), - m_1stNullItemsBeginCount(0), - m_1stNullItemsMiddleCount(0), - m_2ndNullItemsCount(0) {} - -void VmaBlockMetadata_Linear::Init(VkDeviceSize size) -{ - VmaBlockMetadata::Init(size); - m_SumFreeSize = size; -} - -bool VmaBlockMetadata_Linear::Validate() const -{ - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); - VMA_VALIDATE(!suballocations1st.empty() || - suballocations2nd.empty() || - m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); - - if (!suballocations1st.empty()) - { - // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. - VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE); - // Null item at the end should be just pop_back(). - VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE); - } - if (!suballocations2nd.empty()) - { - // Null item at the end should be just pop_back(). - VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE); - } - - VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); - VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); - - VkDeviceSize sumUsedSize = 0; - const size_t suballoc1stCount = suballocations1st.size(); - const VkDeviceSize debugMargin = GetDebugMargin(); - VkDeviceSize offset = 0; - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const size_t suballoc2ndCount = suballocations2nd.size(); - size_t nullItem2ndCount = 0; - for (size_t i = 0; i < suballoc2ndCount; ++i) - { - const VmaSuballocation& suballoc = suballocations2nd[i]; - const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaAllocation const alloc = (VmaAllocation)suballoc.userData; - if (!IsVirtual()) - { - VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); - } - VMA_VALIDATE(suballoc.offset >= offset); - - if (!currFree) - { - if (!IsVirtual()) - { - VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); - VMA_VALIDATE(alloc->GetSize() == suballoc.size); - } - sumUsedSize += suballoc.size; - } - else - { - ++nullItem2ndCount; - } - - offset = suballoc.offset + suballoc.size + debugMargin; - } - - VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); - } - - for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) - { - const VmaSuballocation& suballoc = suballocations1st[i]; - VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && - suballoc.userData == VMA_NULL); - } - - size_t nullItem1stCount = m_1stNullItemsBeginCount; - - for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) - { - const VmaSuballocation& suballoc = suballocations1st[i]; - const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaAllocation const alloc = (VmaAllocation)suballoc.userData; - if (!IsVirtual()) - { - VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); - } - VMA_VALIDATE(suballoc.offset >= offset); - VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); - - if (!currFree) - { - if (!IsVirtual()) - { - VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); - VMA_VALIDATE(alloc->GetSize() == suballoc.size); - } - sumUsedSize += suballoc.size; - } - else - { - ++nullItem1stCount; - } - - offset = suballoc.offset + suballoc.size + debugMargin; - } - VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - const size_t suballoc2ndCount = suballocations2nd.size(); - size_t nullItem2ndCount = 0; - for (size_t i = suballoc2ndCount; i--; ) - { - const VmaSuballocation& suballoc = suballocations2nd[i]; - const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); - - VmaAllocation const alloc = (VmaAllocation)suballoc.userData; - if (!IsVirtual()) - { - VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); - } - VMA_VALIDATE(suballoc.offset >= offset); - - if (!currFree) - { - if (!IsVirtual()) - { - VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); - VMA_VALIDATE(alloc->GetSize() == suballoc.size); - } - sumUsedSize += suballoc.size; - } - else - { - ++nullItem2ndCount; - } - - offset = suballoc.offset + suballoc.size + debugMargin; - } - - VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); - } - - VMA_VALIDATE(offset <= GetSize()); - VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); - - return true; -} - -size_t VmaBlockMetadata_Linear::GetAllocationCount() const -{ - return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + - AccessSuballocations2nd().size() - m_2ndNullItemsCount; -} - -size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const -{ - // Function only used for defragmentation, which is disabled for this algorithm - VMA_ASSERT(0); - return SIZE_MAX; -} - -void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const -{ - const VkDeviceSize size = GetSize(); - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - const size_t suballoc1stCount = suballocations1st.size(); - const size_t suballoc2ndCount = suballocations2nd.size(); - - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += size; - - VkDeviceSize lastOffset = 0; - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; - size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - ++nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc2ndIndex; - } - // We are at the end. - else - { - // There is free space from lastOffset to freeSpace2ndTo1stEnd. - if (lastOffset < freeSpace2ndTo1stEnd) - { - const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // End of loop. - lastOffset = freeSpace2ndTo1stEnd; - } - } - } - - size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; - const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) - { - ++nextAlloc1stIndex; - } - - // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) - { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc1stIndex; - } - // We are at the end. - else - { - // There is free space from lastOffset to freeSpace1stTo2ndEnd. - if (lastOffset < freeSpace1stTo2ndEnd) - { - const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // End of loop. - lastOffset = freeSpace1stTo2ndEnd; - } - } - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - --nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - --nextAlloc2ndIndex; - } - // We are at the end. - else - { - // There is free space from lastOffset to size. - if (lastOffset < size) - { - const VkDeviceSize unusedRangeSize = size - lastOffset; - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); - } - - // End of loop. - lastOffset = size; - } - } - } -} - -void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const -{ - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - const VkDeviceSize size = GetSize(); - const size_t suballoc1stCount = suballocations1st.size(); - const size_t suballoc2ndCount = suballocations2nd.size(); - - inoutStats.blockCount++; - inoutStats.blockBytes += size; - inoutStats.allocationBytes += size - m_SumFreeSize; - - VkDeviceSize lastOffset = 0; - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; - size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; - while (lastOffset < freeSpace2ndTo1stEnd) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - ++nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++inoutStats.allocationCount; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace2ndTo1stEnd) - { - // There is free space from lastOffset to freeSpace2ndTo1stEnd. - const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; - } - - // End of loop. - lastOffset = freeSpace2ndTo1stEnd; - } - } - } - - size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; - const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) - { - ++nextAlloc1stIndex; - } - - // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) - { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++inoutStats.allocationCount; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc1stIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace1stTo2ndEnd) - { - // There is free space from lastOffset to freeSpace1stTo2ndEnd. - const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; - } - - // End of loop. - lastOffset = freeSpace1stTo2ndEnd; - } - } - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - --nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++inoutStats.allocationCount; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - --nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < size) - { - // There is free space from lastOffset to size. - const VkDeviceSize unusedRangeSize = size - lastOffset; - } - - // End of loop. - lastOffset = size; - } - } - } -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const -{ - const VkDeviceSize size = GetSize(); - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - const size_t suballoc1stCount = suballocations1st.size(); - const size_t suballoc2ndCount = suballocations2nd.size(); - - // FIRST PASS - - size_t unusedRangeCount = 0; - VkDeviceSize usedBytes = 0; - - VkDeviceSize lastOffset = 0; - - size_t alloc2ndCount = 0; - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; - size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - ++nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - ++unusedRangeCount; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++alloc2ndCount; - usedBytes += suballoc.size; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace2ndTo1stEnd) - { - // There is free space from lastOffset to freeSpace2ndTo1stEnd. - ++unusedRangeCount; - } - - // End of loop. - lastOffset = freeSpace2ndTo1stEnd; - } - } - } - - size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; - size_t alloc1stCount = 0; - const VkDeviceSize freeSpace1stTo2ndEnd = - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; - while (lastOffset < freeSpace1stTo2ndEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) - { - ++nextAlloc1stIndex; - } - - // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) - { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - ++unusedRangeCount; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++alloc1stCount; - usedBytes += suballoc.size; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc1stIndex; - } - // We are at the end. - else - { - if (lastOffset < size) - { - // There is free space from lastOffset to freeSpace1stTo2ndEnd. - ++unusedRangeCount; - } - - // End of loop. - lastOffset = freeSpace1stTo2ndEnd; - } - } - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - --nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - ++unusedRangeCount; - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - ++alloc2ndCount; - usedBytes += suballoc.size; - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - --nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < size) - { - // There is free space from lastOffset to size. - ++unusedRangeCount; - } - - // End of loop. - lastOffset = size; - } - } - } - - const VkDeviceSize unusedBytes = size - usedBytes; - PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); - - // SECOND PASS - lastOffset = 0; - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; - size_t nextAlloc2ndIndex = 0; - while (lastOffset < freeSpace2ndTo1stEnd) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - ++nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex < suballoc2ndCount) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace2ndTo1stEnd) - { - // There is free space from lastOffset to freeSpace2ndTo1stEnd. - const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // End of loop. - lastOffset = freeSpace2ndTo1stEnd; - } - } - } - - nextAlloc1stIndex = m_1stNullItemsBeginCount; - while (lastOffset < freeSpace1stTo2ndEnd) - { - // Find next non-null allocation or move nextAllocIndex to the end. - while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) - { - ++nextAlloc1stIndex; - } - - // Found non-null allocation. - if (nextAlloc1stIndex < suballoc1stCount) - { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - ++nextAlloc1stIndex; - } - // We are at the end. - else - { - if (lastOffset < freeSpace1stTo2ndEnd) - { - // There is free space from lastOffset to freeSpace1stTo2ndEnd. - const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // End of loop. - lastOffset = freeSpace1stTo2ndEnd; - } - } - - if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; - while (lastOffset < size) - { - // Find next non-null allocation or move nextAlloc2ndIndex to the end. - while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) - { - --nextAlloc2ndIndex; - } - - // Found non-null allocation. - if (nextAlloc2ndIndex != SIZE_MAX) - { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; - - // 1. Process free space before this allocation. - if (lastOffset < suballoc.offset) - { - // There is free space from lastOffset to suballoc.offset. - const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // 2. Process this allocation. - // There is allocation with suballoc.offset, suballoc.size. - PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); - - // 3. Prepare for next iteration. - lastOffset = suballoc.offset + suballoc.size; - --nextAlloc2ndIndex; - } - // We are at the end. - else - { - if (lastOffset < size) - { - // There is free space from lastOffset to size. - const VkDeviceSize unusedRangeSize = size - lastOffset; - PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); - } - - // End of loop. - lastOffset = size; - } - } - } - - PrintDetailedMap_End(json); -} -#endif // VMA_STATS_STRING_ENABLED - -bool VmaBlockMetadata_Linear::CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(allocSize > 0); - VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); - VMA_ASSERT(pAllocationRequest != VMA_NULL); - VMA_HEAVY_ASSERT(Validate()); - pAllocationRequest->size = allocSize; - return upperAddress ? - CreateAllocationRequest_UpperAddress( - allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : - CreateAllocationRequest_LowerAddress( - allocSize, allocAlignment, allocType, strategy, pAllocationRequest); -} - -VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) -{ - VMA_ASSERT(!IsVirtual()); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) - { - const VmaSuballocation& suballoc = suballocations1st[i]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - { - if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN_COPY; - } - } - } - - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) - { - const VmaSuballocation& suballoc = suballocations2nd[i]; - if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) - { - if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN_COPY; - } - } - } - - return VK_SUCCESS; -} - -void VmaBlockMetadata_Linear::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) -{ - const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; - const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; - - switch (request.type) - { - case VmaAllocationRequestType::UpperAddress: - { - VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && - "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - suballocations2nd.push_back(newSuballoc); - m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; - } - break; - case VmaAllocationRequestType::EndOf1st: - { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - - VMA_ASSERT(suballocations1st.empty() || - offset >= suballocations1st.back().offset + suballocations1st.back().size); - // Check if it fits before the end of the block. - VMA_ASSERT(offset + request.size <= GetSize()); - - suballocations1st.push_back(newSuballoc); - } - break; - case VmaAllocationRequestType::EndOf2nd: - { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. - VMA_ASSERT(!suballocations1st.empty() && - offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - switch (m_2ndVectorMode) - { - case SECOND_VECTOR_EMPTY: - // First allocation from second part ring buffer. - VMA_ASSERT(suballocations2nd.empty()); - m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; - break; - case SECOND_VECTOR_RING_BUFFER: - // 2-part ring buffer is already started. - VMA_ASSERT(!suballocations2nd.empty()); - break; - case SECOND_VECTOR_DOUBLE_STACK: - VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); - break; - default: - VMA_ASSERT(0); - } - - suballocations2nd.push_back(newSuballoc); - } - break; - default: - VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); - } - - m_SumFreeSize -= newSuballoc.size; -} - -void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) -{ - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; - - if (!suballocations1st.empty()) - { - // First allocation: Mark it as next empty at the beginning. - VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; - if (firstSuballoc.offset == offset) - { - firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; - firstSuballoc.userData = VMA_NULL; - m_SumFreeSize += firstSuballoc.size; - ++m_1stNullItemsBeginCount; - CleanupAfterFree(); - return; - } - } - - // Last allocation in 2-part ring buffer or top of upper stack (same logic). - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || - m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - VmaSuballocation& lastSuballoc = suballocations2nd.back(); - if (lastSuballoc.offset == offset) - { - m_SumFreeSize += lastSuballoc.size; - suballocations2nd.pop_back(); - CleanupAfterFree(); - return; - } - } - // Last allocation in 1st vector. - else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) - { - VmaSuballocation& lastSuballoc = suballocations1st.back(); - if (lastSuballoc.offset == offset) - { - m_SumFreeSize += lastSuballoc.size; - suballocations1st.pop_back(); - CleanupAfterFree(); - return; - } - } - - VmaSuballocation refSuballoc; - refSuballoc.offset = offset; - // Rest of members stays uninitialized intentionally for better performance. - - // Item from the middle of 1st vector. - { - const SuballocationVectorType::iterator it = VmaBinaryFindSorted( - suballocations1st.begin() + m_1stNullItemsBeginCount, - suballocations1st.end(), - refSuballoc, - VmaSuballocationOffsetLess()); - if (it != suballocations1st.end()) - { - it->type = VMA_SUBALLOCATION_TYPE_FREE; - it->userData = VMA_NULL; - ++m_1stNullItemsMiddleCount; - m_SumFreeSize += it->size; - CleanupAfterFree(); - return; - } - } - - if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) - { - // Item from the middle of 2nd vector. - const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); - if (it != suballocations2nd.end()) - { - it->type = VMA_SUBALLOCATION_TYPE_FREE; - it->userData = VMA_NULL; - ++m_2ndNullItemsCount; - m_SumFreeSize += it->size; - CleanupAfterFree(); - return; - } - } - - VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); -} - -void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) -{ - outInfo.offset = (VkDeviceSize)allocHandle - 1; - VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); - outInfo.size = suballoc.size; - outInfo.pUserData = suballoc.userData; -} - -void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const -{ - return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; -} - -VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const -{ - // Function only used for defragmentation, which is disabled for this algorithm - VMA_ASSERT(0); - return VK_NULL_HANDLE; -} - -VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const -{ - // Function only used for defragmentation, which is disabled for this algorithm - VMA_ASSERT(0); - return VK_NULL_HANDLE; -} - -VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const -{ - // Function only used for defragmentation, which is disabled for this algorithm - VMA_ASSERT(0); - return 0; -} - -void VmaBlockMetadata_Linear::Clear() -{ - m_SumFreeSize = GetSize(); - m_Suballocations0.clear(); - m_Suballocations1.clear(); - // Leaving m_1stVectorIndex unchanged - it doesn't matter. - m_2ndVectorMode = SECOND_VECTOR_EMPTY; - m_1stNullItemsBeginCount = 0; - m_1stNullItemsMiddleCount = 0; - m_2ndNullItemsCount = 0; -} - -void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) -{ - VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); - suballoc.userData = userData; -} - -void VmaBlockMetadata_Linear::DebugLogAllAllocations() const -{ - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) - DebugLogAllocation(it->offset, it->size, it->userData); - - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) - if (it->type != VMA_SUBALLOCATION_TYPE_FREE) - DebugLogAllocation(it->offset, it->size, it->userData); -} - -VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const -{ - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - VmaSuballocation refSuballoc; - refSuballoc.offset = offset; - // Rest of members stays uninitialized intentionally for better performance. - - // Item from the 1st vector. - { - SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( - suballocations1st.begin() + m_1stNullItemsBeginCount, - suballocations1st.end(), - refSuballoc, - VmaSuballocationOffsetLess()); - if (it != suballocations1st.end()) - { - return const_cast(*it); - } - } - - if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) - { - // Rest of members stays uninitialized intentionally for better performance. - SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); - if (it != suballocations2nd.end()) - { - return const_cast(*it); - } - } - - VMA_ASSERT(0 && "Allocation not found in linear allocator!"); - return const_cast(suballocations1st.back()); // Should never occur. -} - -bool VmaBlockMetadata_Linear::ShouldCompact1st() const -{ - const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; - const size_t suballocCount = AccessSuballocations1st().size(); - return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; -} - -void VmaBlockMetadata_Linear::CleanupAfterFree() -{ - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - if (IsEmpty()) - { - suballocations1st.clear(); - suballocations2nd.clear(); - m_1stNullItemsBeginCount = 0; - m_1stNullItemsMiddleCount = 0; - m_2ndNullItemsCount = 0; - m_2ndVectorMode = SECOND_VECTOR_EMPTY; - } - else - { - const size_t suballoc1stCount = suballocations1st.size(); - const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; - VMA_ASSERT(nullItem1stCount <= suballoc1stCount); - - // Find more null items at the beginning of 1st vector. - while (m_1stNullItemsBeginCount < suballoc1stCount && - suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) - { - ++m_1stNullItemsBeginCount; - --m_1stNullItemsMiddleCount; - } - - // Find more null items at the end of 1st vector. - while (m_1stNullItemsMiddleCount > 0 && - suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) - { - --m_1stNullItemsMiddleCount; - suballocations1st.pop_back(); - } - - // Find more null items at the end of 2nd vector. - while (m_2ndNullItemsCount > 0 && - suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) - { - --m_2ndNullItemsCount; - suballocations2nd.pop_back(); - } - - // Find more null items at the beginning of 2nd vector. - while (m_2ndNullItemsCount > 0 && - suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) - { - --m_2ndNullItemsCount; - VmaVectorRemove(suballocations2nd, 0); - } - - if (ShouldCompact1st()) - { - const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; - size_t srcIndex = m_1stNullItemsBeginCount; - for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) - { - while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE) - { - ++srcIndex; - } - if (dstIndex != srcIndex) - { - suballocations1st[dstIndex] = suballocations1st[srcIndex]; - } - ++srcIndex; - } - suballocations1st.resize(nonNullItemCount); - m_1stNullItemsBeginCount = 0; - m_1stNullItemsMiddleCount = 0; - } - - // 2nd vector became empty. - if (suballocations2nd.empty()) - { - m_2ndVectorMode = SECOND_VECTOR_EMPTY; - } - - // 1st vector became empty. - if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) - { - suballocations1st.clear(); - m_1stNullItemsBeginCount = 0; - - if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - // Swap 1st with 2nd. Now 2nd is empty. - m_2ndVectorMode = SECOND_VECTOR_EMPTY; - m_1stNullItemsMiddleCount = m_2ndNullItemsCount; - while (m_1stNullItemsBeginCount < suballocations2nd.size() && - suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) - { - ++m_1stNullItemsBeginCount; - --m_1stNullItemsMiddleCount; - } - m_2ndNullItemsCount = 0; - m_1stVectorIndex ^= 1; - } - } - } - - VMA_HEAVY_ASSERT(Validate()); -} - -bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - const VkDeviceSize blockSize = GetSize(); - const VkDeviceSize debugMargin = GetDebugMargin(); - const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - // Try to allocate at the end of 1st vector. - - VkDeviceSize resultBaseOffset = 0; - if (!suballocations1st.empty()) - { - const VmaSuballocation& lastSuballoc = suballocations1st.back(); - resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; - } - - // Start from offset equal to beginning of free space. - VkDeviceSize resultOffset = resultBaseOffset; - - // Apply alignment. - resultOffset = VmaAlignUp(resultOffset, allocAlignment); - - // Check previous suballocations for BufferImageGranularity conflicts. - // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) - { - bool bufferImageGranularityConflict = false; - for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) - { - const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) - { - bufferImageGranularityConflict = true; - break; - } - } - else - // Already on previous page. - break; - } - if (bufferImageGranularityConflict) - { - resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); - } - } - - const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? - suballocations2nd.back().offset : blockSize; - - // There is enough free space at the end after alignment. - if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) - { - // Check next suballocations for BufferImageGranularity conflicts. - // If conflict exists, allocation cannot be made here. - if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) - { - for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) - { - const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) - { - return false; - } - } - else - { - // Already on previous page. - break; - } - } - } - - // All tests passed: Success. - pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); - // pAllocationRequest->item, customData unused. - pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; - return true; - } - } - - // Wrap-around to end of 2nd vector. Try to allocate there, watching for the - // beginning of 1st vector as the end of free space. - if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - VMA_ASSERT(!suballocations1st.empty()); - - VkDeviceSize resultBaseOffset = 0; - if (!suballocations2nd.empty()) - { - const VmaSuballocation& lastSuballoc = suballocations2nd.back(); - resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; - } - - // Start from offset equal to beginning of free space. - VkDeviceSize resultOffset = resultBaseOffset; - - // Apply alignment. - resultOffset = VmaAlignUp(resultOffset, allocAlignment); - - // Check previous suballocations for BufferImageGranularity conflicts. - // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) - { - bool bufferImageGranularityConflict = false; - for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) - { - const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) - { - bufferImageGranularityConflict = true; - break; - } - } - else - // Already on previous page. - break; - } - if (bufferImageGranularityConflict) - { - resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); - } - } - - size_t index1st = m_1stNullItemsBeginCount; - - // There is enough free space at the end after alignment. - if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) || - (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset)) - { - // Check next suballocations for BufferImageGranularity conflicts. - // If conflict exists, allocation cannot be made here. - if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) - { - for (size_t nextSuballocIndex = index1st; - nextSuballocIndex < suballocations1st.size(); - nextSuballocIndex++) - { - const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) - { - return false; - } - } - else - { - // Already on next page. - break; - } - } - } - - // All tests passed: Success. - pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); - pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; - // pAllocationRequest->item, customData unused. - return true; - } - } - - return false; -} - -bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - const VkDeviceSize blockSize = GetSize(); - const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); - - if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) - { - VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); - return false; - } - - // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). - if (allocSize > blockSize) - { - return false; - } - VkDeviceSize resultBaseOffset = blockSize - allocSize; - if (!suballocations2nd.empty()) - { - const VmaSuballocation& lastSuballoc = suballocations2nd.back(); - resultBaseOffset = lastSuballoc.offset - allocSize; - if (allocSize > lastSuballoc.offset) - { - return false; - } - } - - // Start from offset equal to end of free space. - VkDeviceSize resultOffset = resultBaseOffset; - - const VkDeviceSize debugMargin = GetDebugMargin(); - - // Apply debugMargin at the end. - if (debugMargin > 0) - { - if (resultOffset < debugMargin) - { - return false; - } - resultOffset -= debugMargin; - } - - // Apply alignment. - resultOffset = VmaAlignDown(resultOffset, allocAlignment); - - // Check next suballocations from 2nd for BufferImageGranularity conflicts. - // Make bigger alignment if necessary. - if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) - { - bool bufferImageGranularityConflict = false; - for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) - { - const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; - if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) - { - bufferImageGranularityConflict = true; - break; - } - } - else - // Already on previous page. - break; - } - if (bufferImageGranularityConflict) - { - resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); - } - } - - // There is enough free space. - const VkDeviceSize endOf1st = !suballocations1st.empty() ? - suballocations1st.back().offset + suballocations1st.back().size : - 0; - if (endOf1st + debugMargin <= resultOffset) - { - // Check previous suballocations for BufferImageGranularity conflicts. - // If conflict exists, allocation cannot be made here. - if (bufferImageGranularity > 1) - { - for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) - { - const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; - if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) - { - if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) - { - return false; - } - } - else - { - // Already on next page. - break; - } - } - } - - // All tests passed: Success. - pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); - // pAllocationRequest->item unused. - pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; - return true; - } - - return false; -} -#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS -#endif // _VMA_BLOCK_METADATA_LINEAR - -#if 0 -#ifndef _VMA_BLOCK_METADATA_BUDDY -/* -- GetSize() is the original size of allocated memory block. -- m_UsableSize is this size aligned down to a power of two. - All allocations and calculations happen relative to m_UsableSize. -- GetUnusableSize() is the difference between them. - It is reported as separate, unused range, not available for allocations. - -Node at level 0 has size = m_UsableSize. -Each next level contains nodes with size 2 times smaller than current level. -m_LevelCount is the maximum number of levels to use in the current object. -*/ -class VmaBlockMetadata_Buddy : public VmaBlockMetadata -{ - VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) -public: - VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata_Buddy(); - - size_t GetAllocationCount() const override { return m_AllocationCount; } - VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); } - bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; } - VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; - void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); } - - void Init(VkDeviceSize size) override; - bool Validate() const override; - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; -#endif - - bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; - - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) override; - - void Free(VmaAllocHandle allocHandle) override; - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; - VmaAllocHandle GetAllocationListBegin() const override; - VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; - void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; - -private: - static const size_t MAX_LEVELS = 48; - - struct ValidationContext - { - size_t calculatedAllocationCount = 0; - size_t calculatedFreeCount = 0; - VkDeviceSize calculatedSumFreeSize = 0; - }; - struct Node - { - VkDeviceSize offset; - enum TYPE - { - TYPE_FREE, - TYPE_ALLOCATION, - TYPE_SPLIT, - TYPE_COUNT - } type; - Node* parent; - Node* buddy; - - union - { - struct - { - Node* prev; - Node* next; - } free; - struct - { - void* userData; - } allocation; - struct - { - Node* leftChild; - } split; - }; - }; - - // Size of the memory block aligned down to a power of two. - VkDeviceSize m_UsableSize; - uint32_t m_LevelCount; - VmaPoolAllocator m_NodeAllocator; - Node* m_Root; - struct - { - Node* front; - Node* back; - } m_FreeList[MAX_LEVELS]; - - // Number of nodes in the tree with type == TYPE_ALLOCATION. - size_t m_AllocationCount; - // Number of nodes in the tree with type == TYPE_FREE. - size_t m_FreeCount; - // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes. - // Doesn't include unusable size. - VkDeviceSize m_SumFreeSize; - - VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; } - VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; } - - VkDeviceSize AlignAllocationSize(VkDeviceSize size) const - { - if (!IsVirtual()) - { - size = VmaAlignUp(size, (VkDeviceSize)16); - } - return VmaNextPow2(size); - } - Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const; - void DeleteNodeChildren(Node* node); - bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; - uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; - void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const; - // Adds node to the front of FreeList at given level. - // node->type must be FREE. - // node->free.prev, next can be undefined. - void AddToFreeListFront(uint32_t level, Node* node); - // Removes node from FreeList at given level. - // node->type must be FREE. - // node->free.prev, next stay untouched. - void RemoveFromFreeList(uint32_t level, Node* node); - void DebugLogAllAllocationNode(Node* node, uint32_t level) const; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const; -#endif -}; - -#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS -VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity - m_Root(VMA_NULL), - m_AllocationCount(0), - m_FreeCount(1), - m_SumFreeSize(0) -{ - memset(m_FreeList, 0, sizeof(m_FreeList)); -} - -VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() -{ - DeleteNodeChildren(m_Root); - m_NodeAllocator.Free(m_Root); -} - -void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) -{ - VmaBlockMetadata::Init(size); - - m_UsableSize = VmaPrevPow2(size); - m_SumFreeSize = m_UsableSize; - - // Calculate m_LevelCount. - const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16; - m_LevelCount = 1; - while (m_LevelCount < MAX_LEVELS && - LevelToNodeSize(m_LevelCount) >= minNodeSize) - { - ++m_LevelCount; - } - - Node* rootNode = m_NodeAllocator.Alloc(); - rootNode->offset = 0; - rootNode->type = Node::TYPE_FREE; - rootNode->parent = VMA_NULL; - rootNode->buddy = VMA_NULL; - - m_Root = rootNode; - AddToFreeListFront(0, rootNode); -} - -bool VmaBlockMetadata_Buddy::Validate() const -{ - // Validate tree. - ValidationContext ctx; - if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) - { - VMA_VALIDATE(false && "ValidateNode failed."); - } - VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount); - VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize); - - // Validate free node lists. - for (uint32_t level = 0; level < m_LevelCount; ++level) - { - VMA_VALIDATE(m_FreeList[level].front == VMA_NULL || - m_FreeList[level].front->free.prev == VMA_NULL); - - for (Node* node = m_FreeList[level].front; - node != VMA_NULL; - node = node->free.next) - { - VMA_VALIDATE(node->type == Node::TYPE_FREE); - - if (node->free.next == VMA_NULL) - { - VMA_VALIDATE(m_FreeList[level].back == node); - } - else - { - VMA_VALIDATE(node->free.next->free.prev == node); - } - } - } - - // Validate that free lists ar higher levels are empty. - for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) - { - VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL); - } - - return true; -} - -void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const -{ - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += GetSize(); - - AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0)); - - const VkDeviceSize unusableSize = GetUnusableSize(); - if (unusableSize > 0) - VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize); -} - -void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const -{ - inoutStats.blockCount++; - inoutStats.allocationCount += (uint32_t)m_AllocationCount; - inoutStats.blockBytes += GetSize(); - inoutStats.allocationBytes += GetSize() - m_SumFreeSize; -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const -{ - VmaDetailedStatistics stats; - VmaClearDetailedStatistics(stats); - AddDetailedStatistics(stats); - - PrintDetailedMap_Begin( - json, - stats.statistics.blockBytes - stats.statistics.allocationBytes, - stats.statistics.allocationCount, - stats.unusedRangeCount, - mapRefCount); - - PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); - - const VkDeviceSize unusableSize = GetUnusableSize(); - if (unusableSize > 0) - { - PrintDetailedMap_UnusedRange(json, - m_UsableSize, // offset - unusableSize); // size - } - - PrintDetailedMap_End(json); -} -#endif // VMA_STATS_STRING_ENABLED - -bool VmaBlockMetadata_Buddy::CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); - - allocSize = AlignAllocationSize(allocSize); - - // Simple way to respect bufferImageGranularity. May be optimized some day. - // Whenever it might be an OPTIMAL image... - if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) - { - allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity()); - allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity()); - } - - if (allocSize > m_UsableSize) - { - return false; - } - - const uint32_t targetLevel = AllocSizeToLevel(allocSize); - for (uint32_t level = targetLevel; level--; ) - { - for (Node* freeNode = m_FreeList[level].front; - freeNode != VMA_NULL; - freeNode = freeNode->free.next) - { - if (freeNode->offset % allocAlignment == 0) - { - pAllocationRequest->type = VmaAllocationRequestType::Normal; - pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1); - pAllocationRequest->size = allocSize; - pAllocationRequest->customData = (void*)(uintptr_t)level; - return true; - } - } - } - - return false; -} - -void VmaBlockMetadata_Buddy::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) -{ - VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); - - const uint32_t targetLevel = AllocSizeToLevel(request.size); - uint32_t currLevel = (uint32_t)(uintptr_t)request.customData; - - Node* currNode = m_FreeList[currLevel].front; - VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); - const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; - while (currNode->offset != offset) - { - currNode = currNode->free.next; - VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); - } - - // Go down, splitting free nodes. - while (currLevel < targetLevel) - { - // currNode is already first free node at currLevel. - // Remove it from list of free nodes at this currLevel. - RemoveFromFreeList(currLevel, currNode); - - const uint32_t childrenLevel = currLevel + 1; - - // Create two free sub-nodes. - Node* leftChild = m_NodeAllocator.Alloc(); - Node* rightChild = m_NodeAllocator.Alloc(); - - leftChild->offset = currNode->offset; - leftChild->type = Node::TYPE_FREE; - leftChild->parent = currNode; - leftChild->buddy = rightChild; - - rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel); - rightChild->type = Node::TYPE_FREE; - rightChild->parent = currNode; - rightChild->buddy = leftChild; - - // Convert current currNode to split type. - currNode->type = Node::TYPE_SPLIT; - currNode->split.leftChild = leftChild; - - // Add child nodes to free list. Order is important! - AddToFreeListFront(childrenLevel, rightChild); - AddToFreeListFront(childrenLevel, leftChild); - - ++m_FreeCount; - ++currLevel; - currNode = m_FreeList[currLevel].front; - - /* - We can be sure that currNode, as left child of node previously split, - also fulfills the alignment requirement. - */ - } - - // Remove from free list. - VMA_ASSERT(currLevel == targetLevel && - currNode != VMA_NULL && - currNode->type == Node::TYPE_FREE); - RemoveFromFreeList(currLevel, currNode); - - // Convert to allocation node. - currNode->type = Node::TYPE_ALLOCATION; - currNode->allocation.userData = userData; - - ++m_AllocationCount; - --m_FreeCount; - m_SumFreeSize -= request.size; -} - -void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) -{ - uint32_t level = 0; - outInfo.offset = (VkDeviceSize)allocHandle - 1; - const Node* const node = FindAllocationNode(outInfo.offset, level); - outInfo.size = LevelToNodeSize(level); - outInfo.pUserData = node->allocation.userData; -} - -void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const -{ - uint32_t level = 0; - const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); - return node->allocation.userData; -} - -VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const -{ - // Function only used for defragmentation, which is disabled for this algorithm - return VK_NULL_HANDLE; -} - -VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const -{ - // Function only used for defragmentation, which is disabled for this algorithm - return VK_NULL_HANDLE; -} - -void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node) -{ - if (node->type == Node::TYPE_SPLIT) - { - DeleteNodeChildren(node->split.leftChild->buddy); - DeleteNodeChildren(node->split.leftChild); - const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks(); - m_NodeAllocator.Free(node->split.leftChild->buddy); - m_NodeAllocator.Free(node->split.leftChild); - } -} - -void VmaBlockMetadata_Buddy::Clear() -{ - DeleteNodeChildren(m_Root); - m_Root->type = Node::TYPE_FREE; - m_AllocationCount = 0; - m_FreeCount = 1; - m_SumFreeSize = m_UsableSize; -} - -void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) -{ - uint32_t level = 0; - Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); - node->allocation.userData = userData; -} - -VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const -{ - Node* node = m_Root; - VkDeviceSize nodeOffset = 0; - outLevel = 0; - VkDeviceSize levelNodeSize = LevelToNodeSize(0); - while (node->type == Node::TYPE_SPLIT) - { - const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1; - if (offset < nodeOffset + nextLevelNodeSize) - { - node = node->split.leftChild; - } - else - { - node = node->split.leftChild->buddy; - nodeOffset += nextLevelNodeSize; - } - ++outLevel; - levelNodeSize = nextLevelNodeSize; - } - - VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION); - return node; -} - -bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const -{ - VMA_VALIDATE(level < m_LevelCount); - VMA_VALIDATE(curr->parent == parent); - VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL)); - VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr); - switch (curr->type) - { - case Node::TYPE_FREE: - // curr->free.prev, next are validated separately. - ctx.calculatedSumFreeSize += levelNodeSize; - ++ctx.calculatedFreeCount; - break; - case Node::TYPE_ALLOCATION: - ++ctx.calculatedAllocationCount; - if (!IsVirtual()) - { - VMA_VALIDATE(curr->allocation.userData != VMA_NULL); - } - break; - case Node::TYPE_SPLIT: - { - const uint32_t childrenLevel = level + 1; - const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1; - const Node* const leftChild = curr->split.leftChild; - VMA_VALIDATE(leftChild != VMA_NULL); - VMA_VALIDATE(leftChild->offset == curr->offset); - if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) - { - VMA_VALIDATE(false && "ValidateNode for left child failed."); - } - const Node* const rightChild = leftChild->buddy; - VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize); - if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) - { - VMA_VALIDATE(false && "ValidateNode for right child failed."); - } - } - break; - default: - return false; - } - - return true; -} - -uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const -{ - // I know this could be optimized somehow e.g. by using std::log2p1 from C++20. - uint32_t level = 0; - VkDeviceSize currLevelNodeSize = m_UsableSize; - VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1; - while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) - { - ++level; - currLevelNodeSize >>= 1; - nextLevelNodeSize >>= 1; - } - return level; -} - -void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle) -{ - uint32_t level = 0; - Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); - - ++m_FreeCount; - --m_AllocationCount; - m_SumFreeSize += LevelToNodeSize(level); - - node->type = Node::TYPE_FREE; - - // Join free nodes if possible. - while (level > 0 && node->buddy->type == Node::TYPE_FREE) - { - RemoveFromFreeList(level, node->buddy); - Node* const parent = node->parent; - - m_NodeAllocator.Free(node->buddy); - m_NodeAllocator.Free(node); - parent->type = Node::TYPE_FREE; - - node = parent; - --level; - --m_FreeCount; - } - - AddToFreeListFront(level, node); -} - -void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const -{ - switch (node->type) - { - case Node::TYPE_FREE: - VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize); - break; - case Node::TYPE_ALLOCATION: - VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize); - break; - case Node::TYPE_SPLIT: - { - const VkDeviceSize childrenNodeSize = levelNodeSize / 2; - const Node* const leftChild = node->split.leftChild; - AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize); - const Node* const rightChild = leftChild->buddy; - AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize); - } - break; - default: - VMA_ASSERT(0); - } -} - -void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node) -{ - VMA_ASSERT(node->type == Node::TYPE_FREE); - - // List is empty. - Node* const frontNode = m_FreeList[level].front; - if (frontNode == VMA_NULL) - { - VMA_ASSERT(m_FreeList[level].back == VMA_NULL); - node->free.prev = node->free.next = VMA_NULL; - m_FreeList[level].front = m_FreeList[level].back = node; - } - else - { - VMA_ASSERT(frontNode->free.prev == VMA_NULL); - node->free.prev = VMA_NULL; - node->free.next = frontNode; - frontNode->free.prev = node; - m_FreeList[level].front = node; - } -} - -void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node) -{ - VMA_ASSERT(m_FreeList[level].front != VMA_NULL); - - // It is at the front. - if (node->free.prev == VMA_NULL) - { - VMA_ASSERT(m_FreeList[level].front == node); - m_FreeList[level].front = node->free.next; - } - else - { - Node* const prevFreeNode = node->free.prev; - VMA_ASSERT(prevFreeNode->free.next == node); - prevFreeNode->free.next = node->free.next; - } - - // It is at the back. - if (node->free.next == VMA_NULL) - { - VMA_ASSERT(m_FreeList[level].back == node); - m_FreeList[level].back = node->free.prev; - } - else - { - Node* const nextFreeNode = node->free.next; - VMA_ASSERT(nextFreeNode->free.prev == node); - nextFreeNode->free.prev = node->free.prev; - } -} - -void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const -{ - switch (node->type) - { - case Node::TYPE_FREE: - break; - case Node::TYPE_ALLOCATION: - DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData); - break; - case Node::TYPE_SPLIT: - { - ++level; - DebugLogAllAllocationNode(node->split.leftChild, level); - DebugLogAllAllocationNode(node->split.leftChild->buddy, level); - } - break; - default: - VMA_ASSERT(0); - } -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const -{ - switch (node->type) - { - case Node::TYPE_FREE: - PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); - break; - case Node::TYPE_ALLOCATION: - PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData); - break; - case Node::TYPE_SPLIT: - { - const VkDeviceSize childrenNodeSize = levelNodeSize / 2; - const Node* const leftChild = node->split.leftChild; - PrintDetailedMapNode(json, leftChild, childrenNodeSize); - const Node* const rightChild = leftChild->buddy; - PrintDetailedMapNode(json, rightChild, childrenNodeSize); - } - break; - default: - VMA_ASSERT(0); - } -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS -#endif // _VMA_BLOCK_METADATA_BUDDY -#endif // #if 0 - -#ifndef _VMA_BLOCK_METADATA_TLSF -// To not search current larger region if first allocation won't succeed and skip to smaller range -// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest(). -// When fragmentation and reusal of previous blocks doesn't matter then use with -// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible. -class VmaBlockMetadata_TLSF : public VmaBlockMetadata -{ - VMA_CLASS_NO_COPY(VmaBlockMetadata_TLSF) -public: - VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); - virtual ~VmaBlockMetadata_TLSF(); - - size_t GetAllocationCount() const override { return m_AllocCount; } - size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } - VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } - bool IsEmpty() const override { return m_NullBlock->offset == 0; } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }; - - void Init(VkDeviceSize size) override; - bool Validate() const override; - - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; -#endif - - bool CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; - - VkResult CheckCorruption(const void* pBlockData) override; - void Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) override; - - void Free(VmaAllocHandle allocHandle) override; - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; - VmaAllocHandle GetAllocationListBegin() const override; - VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; - VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; - void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; - void DebugLogAllAllocations() const override; - -private: - // According to original paper it should be preferable 4 or 5: - // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" - // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf - static const uint8_t SECOND_LEVEL_INDEX = 5; - static const uint16_t SMALL_BUFFER_SIZE = 256; - static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; - static const uint8_t MEMORY_CLASS_SHIFT = 7; - static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT; - - class Block - { - public: - VkDeviceSize offset; - VkDeviceSize size; - Block* prevPhysical; - Block* nextPhysical; - - void MarkFree() { prevFree = VMA_NULL; } - void MarkTaken() { prevFree = this; } - bool IsFree() const { return prevFree != this; } - void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } - Block*& PrevFree() { return prevFree; } - Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } - - private: - Block* prevFree; // Address of the same block here indicates that block is taken - union - { - Block* nextFree; - void* userData; - }; - }; - - size_t m_AllocCount; - // Total number of free blocks besides null block - size_t m_BlocksFreeCount; - // Total size of free blocks excluding null block - VkDeviceSize m_BlocksFreeSize; - uint32_t m_IsFreeBitmap; - uint8_t m_MemoryClasses; - uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; - uint32_t m_ListsCount; - /* - * 0: 0-3 lists for small buffers - * 1+: 0-(2^SLI-1) lists for normal buffers - */ - Block** m_FreeList; - VmaPoolAllocator m_BlockAllocator; - Block* m_NullBlock; - VmaBlockBufferImageGranularity m_GranularityHandler; - - uint8_t SizeToMemoryClass(VkDeviceSize size) const; - uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; - uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; - uint32_t GetListIndex(VkDeviceSize size) const; - - void RemoveFreeBlock(Block* block); - void InsertFreeBlock(Block* block); - void MergeBlock(Block* block, Block* prev); - - Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; - bool CheckBlock( - Block& block, - uint32_t listIndex, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaAllocationRequest* pAllocationRequest); -}; - -#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS -VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) - : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_AllocCount(0), - m_BlocksFreeCount(0), - m_BlocksFreeSize(0), - m_IsFreeBitmap(0), - m_MemoryClasses(0), - m_ListsCount(0), - m_FreeList(VMA_NULL), - m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), - m_NullBlock(VMA_NULL), - m_GranularityHandler(bufferImageGranularity) {} - -VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() -{ - if (m_FreeList) - vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); - m_GranularityHandler.Destroy(GetAllocationCallbacks()); -} - -void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) -{ - VmaBlockMetadata::Init(size); - - if (!IsVirtual()) - m_GranularityHandler.Init(GetAllocationCallbacks(), size); - - m_NullBlock = m_BlockAllocator.Alloc(); - m_NullBlock->size = size; - m_NullBlock->offset = 0; - m_NullBlock->prevPhysical = VMA_NULL; - m_NullBlock->nextPhysical = VMA_NULL; - m_NullBlock->MarkFree(); - m_NullBlock->NextFree() = VMA_NULL; - m_NullBlock->PrevFree() = VMA_NULL; - uint8_t memoryClass = SizeToMemoryClass(size); - uint16_t sli = SizeToSecondIndex(size, memoryClass); - m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1; - if (IsVirtual()) - m_ListsCount += 1UL << SECOND_LEVEL_INDEX; - else - m_ListsCount += 4; - - m_MemoryClasses = memoryClass + 2; - memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); - - m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); - memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); -} - -bool VmaBlockMetadata_TLSF::Validate() const -{ - VMA_VALIDATE(GetSumFreeSize() <= GetSize()); - - VkDeviceSize calculatedSize = m_NullBlock->size; - VkDeviceSize calculatedFreeSize = m_NullBlock->size; - size_t allocCount = 0; - size_t freeCount = 0; - - // Check integrity of free lists - for (uint32_t list = 0; list < m_ListsCount; ++list) - { - Block* block = m_FreeList[list]; - if (block != VMA_NULL) - { - VMA_VALIDATE(block->IsFree()); - VMA_VALIDATE(block->PrevFree() == VMA_NULL); - while (block->NextFree()) - { - VMA_VALIDATE(block->NextFree()->IsFree()); - VMA_VALIDATE(block->NextFree()->PrevFree() == block); - block = block->NextFree(); - } - } - } - - VkDeviceSize nextOffset = m_NullBlock->offset; - auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); - - VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); - if (m_NullBlock->prevPhysical) - { - VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); - } - // Check all blocks - for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) - { - VMA_VALIDATE(prev->offset + prev->size == nextOffset); - nextOffset = prev->offset; - calculatedSize += prev->size; - - uint32_t listIndex = GetListIndex(prev->size); - if (prev->IsFree()) - { - ++freeCount; - // Check if free block belongs to free list - Block* freeBlock = m_FreeList[listIndex]; - VMA_VALIDATE(freeBlock != VMA_NULL); - - bool found = false; - do - { - if (freeBlock == prev) - found = true; - - freeBlock = freeBlock->NextFree(); - } while (!found && freeBlock != VMA_NULL); - - VMA_VALIDATE(found); - calculatedFreeSize += prev->size; - } - else - { - ++allocCount; - // Check if taken block is not on a free list - Block* freeBlock = m_FreeList[listIndex]; - while (freeBlock) - { - VMA_VALIDATE(freeBlock != prev); - freeBlock = freeBlock->NextFree(); - } - - if (!IsVirtual()) - { - VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); - } - } - - if (prev->prevPhysical) - { - VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); - } - } - - if (!IsVirtual()) - { - VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); - } - - VMA_VALIDATE(nextOffset == 0); - VMA_VALIDATE(calculatedSize == GetSize()); - VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); - VMA_VALIDATE(allocCount == m_AllocCount); - VMA_VALIDATE(freeCount == m_BlocksFreeCount); - - return true; -} - -void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const -{ - inoutStats.statistics.blockCount++; - inoutStats.statistics.blockBytes += GetSize(); - if (m_NullBlock->size > 0) - VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); - - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - { - if (block->IsFree()) - VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); - else - VmaAddDetailedStatisticsAllocation(inoutStats, block->size); - } -} - -void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const -{ - inoutStats.blockCount++; - inoutStats.allocationCount += (uint32_t)m_AllocCount; - inoutStats.blockBytes += GetSize(); - inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const -{ - size_t blockCount = m_AllocCount + m_BlocksFreeCount; - VmaStlAllocator allocator(GetAllocationCallbacks()); - VmaVector> blockList(blockCount, allocator); - - size_t i = blockCount; - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - { - blockList[--i] = block; - } - VMA_ASSERT(i == 0); - - VmaDetailedStatistics stats; - VmaClearDetailedStatistics(stats); - AddDetailedStatistics(stats); - - PrintDetailedMap_Begin(json, - stats.statistics.blockBytes - stats.statistics.allocationBytes, - stats.statistics.allocationCount, - stats.unusedRangeCount); - - for (; i < blockCount; ++i) - { - Block* block = blockList[i]; - if (block->IsFree()) - PrintDetailedMap_UnusedRange(json, block->offset, block->size); - else - PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData()); - } - if (m_NullBlock->size > 0) - PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); - - PrintDetailedMap_End(json); -} -#endif - -bool VmaBlockMetadata_TLSF::CreateAllocationRequest( - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - bool upperAddress, - VmaSuballocationType allocType, - uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); - VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); - - // For small granularity round up - if (!IsVirtual()) - m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); - - allocSize += GetDebugMargin(); - // Quick check for too small pool - if (allocSize > GetSumFreeSize()) - return false; - - // If no free blocks in pool then check only null block - if (m_BlocksFreeCount == 0) - return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); - - // Round up to the next block - VkDeviceSize sizeForNextList = allocSize; - VkDeviceSize smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4); - if (allocSize > SMALL_BUFFER_SIZE) - { - sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)); - } - else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep) - sizeForNextList = SMALL_BUFFER_SIZE + 1; - else - sizeForNextList += smallSizeStep; - - uint32_t nextListIndex = 0; - uint32_t prevListIndex = 0; - Block* nextListBlock = VMA_NULL; - Block* prevListBlock = VMA_NULL; - - // Check blocks according to strategies - if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) - { - // Quick check for larger block first - nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); - if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // If not fitted then null block - if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // Null block failed, search larger bucket - while (nextListBlock) - { - if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - nextListBlock = nextListBlock->NextFree(); - } - - // Failed again, check best fit bucket - prevListBlock = FindFreeBlock(allocSize, prevListIndex); - while (prevListBlock) - { - if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - prevListBlock = prevListBlock->NextFree(); - } - } - else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) - { - // Check best fit bucket - prevListBlock = FindFreeBlock(allocSize, prevListIndex); - while (prevListBlock) - { - if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - prevListBlock = prevListBlock->NextFree(); - } - - // If failed check null block - if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // Check larger bucket - nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); - while (nextListBlock) - { - if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - nextListBlock = nextListBlock->NextFree(); - } - } - else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) - { - // Perform search from the start - VmaStlAllocator allocator(GetAllocationCallbacks()); - VmaVector> blockList(m_BlocksFreeCount, allocator); - - size_t i = m_BlocksFreeCount; - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - { - if (block->IsFree() && block->size >= allocSize) - blockList[--i] = block; - } - - for (; i < m_BlocksFreeCount; ++i) - { - Block& block = *blockList[i]; - if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - } - - // If failed check null block - if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // Whole range searched, no more memory - return false; - } - else - { - // Check larger bucket - nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); - while (nextListBlock) - { - if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - nextListBlock = nextListBlock->NextFree(); - } - - // If failed check null block - if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - - // Check best fit bucket - prevListBlock = FindFreeBlock(allocSize, prevListIndex); - while (prevListBlock) - { - if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - prevListBlock = prevListBlock->NextFree(); - } - } - - // Worst case, full search has to be done - while (++nextListIndex < m_ListsCount) - { - nextListBlock = m_FreeList[nextListIndex]; - while (nextListBlock) - { - if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) - return true; - nextListBlock = nextListBlock->NextFree(); - } - } - - // No more memory sadly - return false; -} - -VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) -{ - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - { - if (!block->IsFree()) - { - if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); - return VK_ERROR_UNKNOWN_COPY; - } - } - } - - return VK_SUCCESS; -} - -void VmaBlockMetadata_TLSF::Alloc( - const VmaAllocationRequest& request, - VmaSuballocationType type, - void* userData) -{ - VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); - - // Get block and pop it from the free list - Block* currentBlock = (Block*)request.allocHandle; - VkDeviceSize offset = request.algorithmData; - VMA_ASSERT(currentBlock != VMA_NULL); - VMA_ASSERT(currentBlock->offset <= offset); - - if (currentBlock != m_NullBlock) - RemoveFreeBlock(currentBlock); - - VkDeviceSize debugMargin = GetDebugMargin(); - VkDeviceSize misssingAlignment = offset - currentBlock->offset; - - // Append missing alignment to prev block or create new one - if (misssingAlignment) - { - Block* prevBlock = currentBlock->prevPhysical; - VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); - - if (prevBlock->IsFree() && prevBlock->size != debugMargin) - { - uint32_t oldList = GetListIndex(prevBlock->size); - prevBlock->size += misssingAlignment; - // Check if new size crosses list bucket - if (oldList != GetListIndex(prevBlock->size)) - { - prevBlock->size -= misssingAlignment; - RemoveFreeBlock(prevBlock); - prevBlock->size += misssingAlignment; - InsertFreeBlock(prevBlock); - } - else - m_BlocksFreeSize += misssingAlignment; - } - else - { - Block* newBlock = m_BlockAllocator.Alloc(); - currentBlock->prevPhysical = newBlock; - prevBlock->nextPhysical = newBlock; - newBlock->prevPhysical = prevBlock; - newBlock->nextPhysical = currentBlock; - newBlock->size = misssingAlignment; - newBlock->offset = currentBlock->offset; - newBlock->MarkTaken(); - - InsertFreeBlock(newBlock); - } - - currentBlock->size -= misssingAlignment; - currentBlock->offset += misssingAlignment; - } - - VkDeviceSize size = request.size + debugMargin; - if (currentBlock->size == size) - { - if (currentBlock == m_NullBlock) - { - // Setup new null block - m_NullBlock = m_BlockAllocator.Alloc(); - m_NullBlock->size = 0; - m_NullBlock->offset = currentBlock->offset + size; - m_NullBlock->prevPhysical = currentBlock; - m_NullBlock->nextPhysical = VMA_NULL; - m_NullBlock->MarkFree(); - m_NullBlock->PrevFree() = VMA_NULL; - m_NullBlock->NextFree() = VMA_NULL; - currentBlock->nextPhysical = m_NullBlock; - currentBlock->MarkTaken(); - } - } - else - { - VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); - - // Create new free block - Block* newBlock = m_BlockAllocator.Alloc(); - newBlock->size = currentBlock->size - size; - newBlock->offset = currentBlock->offset + size; - newBlock->prevPhysical = currentBlock; - newBlock->nextPhysical = currentBlock->nextPhysical; - currentBlock->nextPhysical = newBlock; - currentBlock->size = size; - - if (currentBlock == m_NullBlock) - { - m_NullBlock = newBlock; - m_NullBlock->MarkFree(); - m_NullBlock->NextFree() = VMA_NULL; - m_NullBlock->PrevFree() = VMA_NULL; - currentBlock->MarkTaken(); - } - else - { - newBlock->nextPhysical->prevPhysical = newBlock; - newBlock->MarkTaken(); - InsertFreeBlock(newBlock); - } - } - currentBlock->UserData() = userData; - - if (debugMargin > 0) - { - currentBlock->size -= debugMargin; - Block* newBlock = m_BlockAllocator.Alloc(); - newBlock->size = debugMargin; - newBlock->offset = currentBlock->offset + currentBlock->size; - newBlock->prevPhysical = currentBlock; - newBlock->nextPhysical = currentBlock->nextPhysical; - newBlock->MarkTaken(); - currentBlock->nextPhysical->prevPhysical = newBlock; - currentBlock->nextPhysical = newBlock; - InsertFreeBlock(newBlock); - } - - if (!IsVirtual()) - m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, - currentBlock->offset, currentBlock->size); - ++m_AllocCount; -} - -void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) -{ - Block* block = (Block*)allocHandle; - Block* next = block->nextPhysical; - VMA_ASSERT(!block->IsFree() && "Block is already free!"); - - if (!IsVirtual()) - m_GranularityHandler.FreePages(block->offset, block->size); - --m_AllocCount; - - VkDeviceSize debugMargin = GetDebugMargin(); - if (debugMargin > 0) - { - RemoveFreeBlock(next); - MergeBlock(next, block); - block = next; - next = next->nextPhysical; - } - - // Try merging - Block* prev = block->prevPhysical; - if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) - { - RemoveFreeBlock(prev); - MergeBlock(block, prev); - } - - if (!next->IsFree()) - InsertFreeBlock(block); - else if (next == m_NullBlock) - MergeBlock(m_NullBlock, block); - else - { - RemoveFreeBlock(next); - MergeBlock(next, block); - InsertFreeBlock(next); - } -} - -void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) -{ - Block* block = (Block*)allocHandle; - VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); - outInfo.offset = block->offset; - outInfo.size = block->size; - outInfo.pUserData = block->UserData(); -} - -void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const -{ - Block* block = (Block*)allocHandle; - VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); - return block->UserData(); -} - -VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const -{ - if (m_AllocCount == 0) - return VK_NULL_HANDLE; - - for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) - { - if (!block->IsFree()) - return (VmaAllocHandle)block; - } - VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); - return VK_NULL_HANDLE; -} - -VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const -{ - Block* startBlock = (Block*)prevAlloc; - VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); - - for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) - { - if (!block->IsFree()) - return (VmaAllocHandle)block; - } - return VK_NULL_HANDLE; -} - -VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const -{ - Block* block = (Block*)alloc; - VMA_ASSERT(!block->IsFree() && "Incorrect block!"); - - if (block->prevPhysical) - return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; - return 0; -} - -void VmaBlockMetadata_TLSF::Clear() -{ - m_AllocCount = 0; - m_BlocksFreeCount = 0; - m_BlocksFreeSize = 0; - m_IsFreeBitmap = 0; - m_NullBlock->offset = 0; - m_NullBlock->size = GetSize(); - Block* block = m_NullBlock->prevPhysical; - m_NullBlock->prevPhysical = VMA_NULL; - while (block) - { - Block* prev = block->prevPhysical; - m_BlockAllocator.Free(block); - block = prev; - } - memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); - memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); - m_GranularityHandler.Clear(); -} - -void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) -{ - Block* block = (Block*)allocHandle; - VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); - block->UserData() = userData; -} - -void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const -{ - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) - if (!block->IsFree()) - DebugLogAllocation(block->offset, block->size, block->UserData()); -} - -uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const -{ - if (size > SMALL_BUFFER_SIZE) - return VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT; - return 0; -} - -uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const -{ - if (memoryClass == 0) - { - if (IsVirtual()) - return static_cast((size - 1) / 8); - else - return static_cast((size - 1) / 64); - } - return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); -} - -uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const -{ - if (memoryClass == 0) - return secondIndex; - - const uint32_t index = static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex; - if (IsVirtual()) - return index + (1 << SECOND_LEVEL_INDEX); - else - return index + 4; -} - -uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const -{ - uint8_t memoryClass = SizeToMemoryClass(size); - return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); -} - -void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) -{ - VMA_ASSERT(block != m_NullBlock); - VMA_ASSERT(block->IsFree()); - - if (block->NextFree() != VMA_NULL) - block->NextFree()->PrevFree() = block->PrevFree(); - if (block->PrevFree() != VMA_NULL) - block->PrevFree()->NextFree() = block->NextFree(); - else - { - uint8_t memClass = SizeToMemoryClass(block->size); - uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); - uint32_t index = GetListIndex(memClass, secondIndex); - VMA_ASSERT(m_FreeList[index] == block); - m_FreeList[index] = block->NextFree(); - if (block->NextFree() == VMA_NULL) - { - m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex); - if (m_InnerIsFreeBitmap[memClass] == 0) - m_IsFreeBitmap &= ~(1UL << memClass); - } - } - block->MarkTaken(); - block->UserData() = VMA_NULL; - --m_BlocksFreeCount; - m_BlocksFreeSize -= block->size; -} - -void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) -{ - VMA_ASSERT(block != m_NullBlock); - VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); - - uint8_t memClass = SizeToMemoryClass(block->size); - uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); - uint32_t index = GetListIndex(memClass, secondIndex); - VMA_ASSERT(index < m_ListsCount); - block->PrevFree() = VMA_NULL; - block->NextFree() = m_FreeList[index]; - m_FreeList[index] = block; - if (block->NextFree() != VMA_NULL) - block->NextFree()->PrevFree() = block; - else - { - m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex; - m_IsFreeBitmap |= 1UL << memClass; - } - ++m_BlocksFreeCount; - m_BlocksFreeSize += block->size; -} - -void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) -{ - VMA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!"); - VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); - - block->offset = prev->offset; - block->size += prev->size; - block->prevPhysical = prev->prevPhysical; - if (block->prevPhysical) - block->prevPhysical->nextPhysical = block; - m_BlockAllocator.Free(prev); -} - -VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const -{ - uint8_t memoryClass = SizeToMemoryClass(size); - uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); - if (!innerFreeMap) - { - // Check higher levels for avaiable blocks - uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1)); - if (!freeMap) - return VMA_NULL; // No more memory avaible - - // Find lowest free region - memoryClass = VMA_BITSCAN_LSB(freeMap); - innerFreeMap = m_InnerIsFreeBitmap[memoryClass]; - VMA_ASSERT(innerFreeMap != 0); - } - // Find lowest free subregion - listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap)); - VMA_ASSERT(m_FreeList[listIndex]); - return m_FreeList[listIndex]; -} - -bool VmaBlockMetadata_TLSF::CheckBlock( - Block& block, - uint32_t listIndex, - VkDeviceSize allocSize, - VkDeviceSize allocAlignment, - VmaSuballocationType allocType, - VmaAllocationRequest* pAllocationRequest) -{ - VMA_ASSERT(block.IsFree() && "Block is already taken!"); - - VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); - if (block.size < allocSize + alignedOffset - block.offset) - return false; - - // Check for granularity conflicts - if (!IsVirtual() && - m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType)) - return false; - - // Alloc successful - pAllocationRequest->type = VmaAllocationRequestType::TLSF; - pAllocationRequest->allocHandle = (VmaAllocHandle)█ - pAllocationRequest->size = allocSize - GetDebugMargin(); - pAllocationRequest->customData = (void*)allocType; - pAllocationRequest->algorithmData = alignedOffset; - - // Place block at the start of list if it's normal block - if (listIndex != m_ListsCount && block.PrevFree()) - { - block.PrevFree()->NextFree() = block.NextFree(); - if (block.NextFree()) - block.NextFree()->PrevFree() = block.PrevFree(); - block.PrevFree() = VMA_NULL; - block.NextFree() = m_FreeList[listIndex]; - m_FreeList[listIndex] = █ - if (block.NextFree()) - block.NextFree()->PrevFree() = █ - } - - return true; -} -#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS -#endif // _VMA_BLOCK_METADATA_TLSF - -#ifndef _VMA_BLOCK_VECTOR -/* -Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific -Vulkan memory type. - -Synchronized internally with a mutex. -*/ -class VmaBlockVector -{ - friend struct VmaDefragmentationContext_T; - VMA_CLASS_NO_COPY(VmaBlockVector) -public: - VmaBlockVector( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceSize preferredBlockSize, - size_t minBlockCount, - size_t maxBlockCount, - VkDeviceSize bufferImageGranularity, - bool explicitBlockSize, - uint32_t algorithm, - float priority, - VkDeviceSize minAllocationAlignment, - void* pMemoryAllocateNext); - ~VmaBlockVector(); - - VmaAllocator GetAllocator() const { return m_hAllocator; } - VmaPool GetParentPool() const { return m_hParentPool; } - bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } - uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } - VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } - VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } - uint32_t GetAlgorithm() const { return m_Algorithm; } - bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } - float GetPriority() const { return m_Priority; } - const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } - // To be used only while the m_Mutex is locked. Used during defragmentation. - size_t GetBlockCount() const { return m_Blocks.size(); } - // To be used only while the m_Mutex is locked. Used during defragmentation. - VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } - VMA_RW_MUTEX &GetMutex() { return m_Mutex; } - - VkResult CreateMinBlocks(); - void AddStatistics(VmaStatistics& inoutStats); - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); - bool IsEmpty(); - bool IsCorruptionDetectionEnabled() const; - - VkResult Allocate( - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation* pAllocations); - - void Free(const VmaAllocation hAllocation); - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json); -#endif - - VkResult CheckCorruption(); - -private: - const VmaAllocator m_hAllocator; - const VmaPool m_hParentPool; - const uint32_t m_MemoryTypeIndex; - const VkDeviceSize m_PreferredBlockSize; - const size_t m_MinBlockCount; - const size_t m_MaxBlockCount; - const VkDeviceSize m_BufferImageGranularity; - const bool m_ExplicitBlockSize; - const uint32_t m_Algorithm; - const float m_Priority; - const VkDeviceSize m_MinAllocationAlignment; - - void* const m_pMemoryAllocateNext; - VMA_RW_MUTEX m_Mutex; - // Incrementally sorted by sumFreeSize, ascending. - VmaVector> m_Blocks; - uint32_t m_NextBlockId; - bool m_IncrementalSort = true; - - void SetIncrementalSort(bool val) { m_IncrementalSort = val; } - - VkDeviceSize CalcMaxBlockSize() const; - // Finds and removes given block from vector. - void Remove(VmaDeviceMemoryBlock* pBlock); - // Performs single step in sorting m_Blocks. They may not be fully sorted - // after this call. - void IncrementallySortBlocks(); - void SortByFreeSize(); - - VkResult AllocatePage( - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - VmaAllocation* pAllocation); - - VkResult AllocateFromBlock( - VmaDeviceMemoryBlock* pBlock, - VkDeviceSize size, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void* pUserData, - VmaSuballocationType suballocType, - uint32_t strategy, - VmaAllocation* pAllocation); - - VkResult CommitAllocationRequest( - VmaAllocationRequest& allocRequest, - VmaDeviceMemoryBlock* pBlock, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void* pUserData, - VmaSuballocationType suballocType, - VmaAllocation* pAllocation); - - VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); - bool HasEmptyBlock(); -}; -#endif // _VMA_BLOCK_VECTOR - -#ifndef _VMA_DEFRAGMENTATION_CONTEXT -struct VmaDefragmentationContext_T -{ - VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) -public: - VmaDefragmentationContext_T( - VmaAllocator hAllocator, - const VmaDefragmentationInfo& info); - ~VmaDefragmentationContext_T(); - - void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } - - VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); - VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); - -private: - // Max number of allocations to ignore due to size constraints before ending single pass - static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; - enum class CounterStatus { Pass, Ignore, End }; - - struct FragmentedBlock - { - uint32_t data; - VmaDeviceMemoryBlock* block; - }; - struct StateBalanced - { - VkDeviceSize avgFreeSize = 0; - VkDeviceSize avgAllocSize = UINT64_MAX; - }; - struct StateExtensive - { - enum class Operation : uint8_t - { - FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, - MoveBuffers, MoveTextures, MoveAll, - Cleanup, Done - }; - - Operation operation = Operation::FindFreeBlockTexture; - size_t firstFreeBlock = SIZE_MAX; - }; - struct MoveAllocationData - { - VkDeviceSize size; - VkDeviceSize alignment; - VmaSuballocationType type; - VmaAllocationCreateFlags flags; - VmaDefragmentationMove move = {}; - }; - - const VkDeviceSize m_MaxPassBytes; - const uint32_t m_MaxPassAllocations; - - VmaStlAllocator m_MoveAllocator; - VmaVector> m_Moves; - - uint8_t m_IgnoredAllocs = 0; - uint32_t m_Algorithm; - uint32_t m_BlockVectorCount; - VmaBlockVector* m_PoolBlockVector; - VmaBlockVector** m_pBlockVectors; - size_t m_ImmovableBlockCount = 0; - VmaDefragmentationStats m_GlobalStats = { 0 }; - VmaDefragmentationStats m_PassStats = { 0 }; - void* m_AlgorithmState = VMA_NULL; - - static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); - CounterStatus CheckCounters(VkDeviceSize bytes); - bool IncrementCounters(VkDeviceSize bytes); - bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); - bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); - - bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); - bool ComputeDefragmentation_Fast(VmaBlockVector& vector); - bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); - bool ComputeDefragmentation_Full(VmaBlockVector& vector); - bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); - - void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); - bool MoveDataToFreeBlocks(VmaSuballocationType currentType, - VmaBlockVector& vector, size_t firstFreeBlock, - bool& texturePresent, bool& bufferPresent, bool& otherPresent); -}; -#endif // _VMA_DEFRAGMENTATION_CONTEXT - -#ifndef _VMA_POOL_T -struct VmaPool_T -{ - friend struct VmaPoolListItemTraits; - VMA_CLASS_NO_COPY(VmaPool_T) -public: - VmaBlockVector m_BlockVector; - VmaDedicatedAllocationList m_DedicatedAllocations; - - VmaPool_T( - VmaAllocator hAllocator, - const VmaPoolCreateInfo& createInfo, - VkDeviceSize preferredBlockSize); - ~VmaPool_T(); - - uint32_t GetId() const { return m_Id; } - void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } - - const char* GetName() const { return m_Name; } - void SetName(const char* pName); - -#if VMA_STATS_STRING_ENABLED - //void PrintDetailedMap(class VmaStringBuilder& sb); -#endif - -private: - uint32_t m_Id; - char* m_Name; - VmaPool_T* m_PrevPool = VMA_NULL; - VmaPool_T* m_NextPool = VMA_NULL; -}; - -struct VmaPoolListItemTraits -{ - typedef VmaPool_T ItemType; - - static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } - static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } - static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } - static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } -}; -#endif // _VMA_POOL_T - -#ifndef _VMA_CURRENT_BUDGET_DATA -struct VmaCurrentBudgetData -{ - VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; - VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; - VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; - VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; - -#if VMA_MEMORY_BUDGET - VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; - VMA_RW_MUTEX m_BudgetMutex; - uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; - uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; - uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; -#endif // VMA_MEMORY_BUDGET - - VmaCurrentBudgetData(); - - void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); - void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); -}; - -#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS -VmaCurrentBudgetData::VmaCurrentBudgetData() -{ - for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) - { - m_BlockCount[heapIndex] = 0; - m_AllocationCount[heapIndex] = 0; - m_BlockBytes[heapIndex] = 0; - m_AllocationBytes[heapIndex] = 0; -#if VMA_MEMORY_BUDGET - m_VulkanUsage[heapIndex] = 0; - m_VulkanBudget[heapIndex] = 0; - m_BlockBytesAtBudgetFetch[heapIndex] = 0; -#endif - } - -#if VMA_MEMORY_BUDGET - m_OperationsSinceBudgetFetch = 0; -#endif -} - -void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) -{ - m_AllocationBytes[heapIndex] += allocationSize; - ++m_AllocationCount[heapIndex]; -#if VMA_MEMORY_BUDGET - ++m_OperationsSinceBudgetFetch; -#endif -} - -void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) -{ - VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); - m_AllocationBytes[heapIndex] -= allocationSize; - VMA_ASSERT(m_AllocationCount[heapIndex] > 0); - --m_AllocationCount[heapIndex]; -#if VMA_MEMORY_BUDGET - ++m_OperationsSinceBudgetFetch; -#endif -} -#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS -#endif // _VMA_CURRENT_BUDGET_DATA - -#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR -/* -Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. -*/ -class VmaAllocationObjectAllocator -{ - VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator) -public: - VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) - : m_Allocator(pAllocationCallbacks, 1024) {} - - template VmaAllocation Allocate(Types&&... args); - void Free(VmaAllocation hAlloc); - -private: - VMA_MUTEX m_Mutex; - VmaPoolAllocator m_Allocator; -}; - -template -VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) -{ - VmaMutexLock mutexLock(m_Mutex); - return m_Allocator.Alloc(std::forward(args)...); -} - -void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) -{ - VmaMutexLock mutexLock(m_Mutex); - m_Allocator.Free(hAlloc); -} -#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR - -#ifndef _VMA_VIRTUAL_BLOCK_T -struct VmaVirtualBlock_T -{ - VMA_CLASS_NO_COPY(VmaVirtualBlock_T) -public: - const bool m_AllocationCallbacksSpecified; - const VkAllocationCallbacks m_AllocationCallbacks; - - VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); - ~VmaVirtualBlock_T(); - - VkResult Init() { return VK_SUCCESS; } - bool IsEmpty() const { return m_Metadata->IsEmpty(); } - void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } - void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } - void Clear() { m_Metadata->Clear(); } - - const VkAllocationCallbacks* GetAllocationCallbacks() const; - void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); - VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, - VkDeviceSize* outOffset); - void GetStatistics(VmaStatistics& outStats) const; - void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; -#if VMA_STATS_STRING_ENABLED - void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; -#endif - -private: - VmaBlockMetadata* m_Metadata; -}; - -#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS -VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) - : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), - m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) -{ - const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; - switch (algorithm) - { - default: - VMA_ASSERT(0); - case 0: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); - break; - case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: - m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); - break; - } - - m_Metadata->Init(createInfo.size); -} - -VmaVirtualBlock_T::~VmaVirtualBlock_T() -{ - // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations - if (!m_Metadata->IsEmpty()) - m_Metadata->DebugLogAllAllocations(); - // This is the most important assert in the entire library. - // Hitting it means you have some memory leak - unreleased virtual allocations. - VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!"); - - vma_delete(GetAllocationCallbacks(), m_Metadata); -} - -const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const -{ - return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; -} - -void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) -{ - m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); -} - -VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, - VkDeviceSize* outOffset) -{ - VmaAllocationRequest request = {}; - if (m_Metadata->CreateAllocationRequest( - createInfo.size, // allocSize - VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment - (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress - VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant - createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy - &request)) - { - m_Metadata->Alloc(request, - VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant - createInfo.pUserData); - outAllocation = (VmaVirtualAllocation)request.allocHandle; - if(outOffset) - *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); - return VK_SUCCESS; - } - outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE; - if (outOffset) - *outOffset = UINT64_MAX; - return VK_ERROR_OUT_OF_DEVICE_MEMORY; -} - -void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const -{ - VmaClearStatistics(outStats); - m_Metadata->AddStatistics(outStats); -} - -void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const -{ - VmaClearDetailedStatistics(outStats); - m_Metadata->AddDetailedStatistics(outStats); -} - -#if VMA_STATS_STRING_ENABLED -void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const -{ - VmaJsonWriter json(GetAllocationCallbacks(), sb); - json.BeginObject(); - - VmaDetailedStatistics stats; - CalculateDetailedStatistics(stats); - - json.WriteString("Stats"); - VmaPrintDetailedStatistics(json, stats); - - if (detailedMap) - { - json.WriteString("Details"); - json.BeginObject(); - m_Metadata->PrintDetailedMap(json); - json.EndObject(); - } - - json.EndObject(); -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS -#endif // _VMA_VIRTUAL_BLOCK_T - - -// Main allocator object. -struct VmaAllocator_T -{ - VMA_CLASS_NO_COPY(VmaAllocator_T) -public: - bool m_UseMutex; - uint32_t m_VulkanApiVersion; - bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). - bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). - bool m_UseExtMemoryBudget; - bool m_UseAmdDeviceCoherentMemory; - bool m_UseKhrBufferDeviceAddress; - bool m_UseExtMemoryPriority; - VkDevice m_hDevice; - VkInstance m_hInstance; - bool m_AllocationCallbacksSpecified; - VkAllocationCallbacks m_AllocationCallbacks; - VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; - VmaAllocationObjectAllocator m_AllocationObjectAllocator; - - // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. - uint32_t m_HeapSizeLimitMask; - - VkPhysicalDeviceProperties m_PhysicalDeviceProperties; - VkPhysicalDeviceMemoryProperties m_MemProps; - - // Default pools. - VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; - VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; - - VmaCurrentBudgetData m_Budget; - VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. - - VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); - VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); - ~VmaAllocator_T(); - - const VkAllocationCallbacks* GetAllocationCallbacks() const - { - return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; - } - const VmaVulkanFunctions& GetVulkanFunctions() const - { - return m_VulkanFunctions; - } - - VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } - - VkDeviceSize GetBufferImageGranularity() const - { - return VMA_MAX( - static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), - m_PhysicalDeviceProperties.limits.bufferImageGranularity); - } - - uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } - uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } - - uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const - { - VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); - return m_MemProps.memoryTypes[memTypeIndex].heapIndex; - } - // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. - bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const - { - return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - } - // Minimum alignment for all allocations in specific memory type. - VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const - { - return IsMemoryTypeNonCoherent(memTypeIndex) ? - VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : - (VkDeviceSize)VMA_MIN_ALIGNMENT; - } - - bool IsIntegratedGpu() const - { - return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; - } - - uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } - - void GetBufferMemoryRequirements( - VkBuffer hBuffer, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const; - void GetImageMemoryRequirements( - VkImage hImage, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const; - VkResult FindMemoryTypeIndex( - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. - uint32_t* pMemoryTypeIndex) const; - - // Main allocation function. - VkResult AllocateMemory( - const VkMemoryRequirements& vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown. - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation* pAllocations); - - // Main deallocation function. - void FreeMemory( - size_t allocationCount, - const VmaAllocation* pAllocations); - - void CalculateStatistics(VmaTotalStatistics* pStats); - - void GetHeapBudgets( - VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); - -#if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json); -#endif - - void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); - - VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); - void DestroyPool(VmaPool pool); - void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); - void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); - - void SetCurrentFrameIndex(uint32_t frameIndex); - uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } - - VkResult CheckPoolCorruption(VmaPool hPool); - VkResult CheckCorruption(uint32_t memoryTypeBits); - - // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. - VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); - // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. - void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); - // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. - VkResult BindVulkanBuffer( - VkDeviceMemory memory, - VkDeviceSize memoryOffset, - VkBuffer buffer, - const void* pNext); - // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. - VkResult BindVulkanImage( - VkDeviceMemory memory, - VkDeviceSize memoryOffset, - VkImage image, - const void* pNext); - - VkResult Map(VmaAllocation hAllocation, void** ppData); - void Unmap(VmaAllocation hAllocation); - - VkResult BindBufferMemory( - VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkBuffer hBuffer, - const void* pNext); - VkResult BindImageMemory( - VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkImage hImage, - const void* pNext); - - VkResult FlushOrInvalidateAllocation( - VmaAllocation hAllocation, - VkDeviceSize offset, VkDeviceSize size, - VMA_CACHE_OPERATION op); - VkResult FlushOrInvalidateAllocations( - uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, const VkDeviceSize* sizes, - VMA_CACHE_OPERATION op); - - void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); - - /* - Returns bit mask of memory types that can support defragmentation on GPU as - they support creation of required buffer for copy operations. - */ - uint32_t GetGpuDefragmentationMemoryTypeBits(); - -#if VMA_EXTERNAL_MEMORY - VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const - { - return m_TypeExternalMemoryHandleTypes[memTypeIndex]; - } -#endif // #if VMA_EXTERNAL_MEMORY - -private: - VkDeviceSize m_PreferredLargeHeapBlockSize; - - VkPhysicalDevice m_PhysicalDevice; - VMA_ATOMIC_UINT32 m_CurrentFrameIndex; - VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. -#if VMA_EXTERNAL_MEMORY - VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES]; -#endif // #if VMA_EXTERNAL_MEMORY - - VMA_RW_MUTEX m_PoolsMutex; - typedef VmaIntrusiveLinkedList PoolList; - // Protected by m_PoolsMutex. - PoolList m_Pools; - uint32_t m_NextPoolId; - - VmaVulkanFunctions m_VulkanFunctions; - - // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. - uint32_t m_GlobalMemoryTypeBits; - - void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); - -#if VMA_STATIC_VULKAN_FUNCTIONS == 1 - void ImportVulkanFunctions_Static(); -#endif - - void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); - -#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 - void ImportVulkanFunctions_Dynamic(); -#endif - - void ValidateVulkanFunctions(); - - VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); - - VkResult AllocateMemoryOfType( - VmaPool pool, - VkDeviceSize size, - VkDeviceSize alignment, - bool dedicatedPreferred, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, - uint32_t memTypeIndex, - VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - VmaBlockVector& blockVector, - size_t allocationCount, - VmaAllocation* pAllocations); - - // Helper function only to be used inside AllocateDedicatedMemory. - VkResult AllocateDedicatedMemoryPage( - VmaPool pool, - VkDeviceSize size, - VmaSuballocationType suballocType, - uint32_t memTypeIndex, - const VkMemoryAllocateInfo& allocInfo, - bool map, - bool isUserDataString, - bool isMappingAllowed, - void* pUserData, - VmaAllocation* pAllocation); - - // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. - VkResult AllocateDedicatedMemory( - VmaPool pool, - VkDeviceSize size, - VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - uint32_t memTypeIndex, - bool map, - bool isUserDataString, - bool isMappingAllowed, - bool canAliasMemory, - void* pUserData, - float priority, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - size_t allocationCount, - VmaAllocation* pAllocations, - const void* pNextChain = nullptr); - - void FreeDedicatedMemory(const VmaAllocation allocation); - - VkResult CalcMemTypeParams( - VmaAllocationCreateInfo& outCreateInfo, - uint32_t memTypeIndex, - VkDeviceSize size, - size_t allocationCount); - VkResult CalcAllocationParams( - VmaAllocationCreateInfo& outCreateInfo, - bool dedicatedRequired, - bool dedicatedPreferred); - - /* - Calculates and returns bit mask of memory types that can support defragmentation - on GPU as they support creation of required buffer for copy operations. - */ - uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; - uint32_t CalculateGlobalMemoryTypeBits() const; - - bool GetFlushOrInvalidateRange( - VmaAllocation allocation, - VkDeviceSize offset, VkDeviceSize size, - VkMappedMemoryRange& outRange) const; - -#if VMA_MEMORY_BUDGET - void UpdateVulkanBudget(); -#endif // #if VMA_MEMORY_BUDGET -}; - - -#ifndef _VMA_MEMORY_FUNCTIONS -static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) -{ - return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); -} - -static void VmaFree(VmaAllocator hAllocator, void* ptr) -{ - VmaFree(&hAllocator->m_AllocationCallbacks, ptr); -} - -template -static T* VmaAllocate(VmaAllocator hAllocator) -{ - return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); -} - -template -static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) -{ - return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); -} - -template -static void vma_delete(VmaAllocator hAllocator, T* ptr) -{ - if(ptr != VMA_NULL) - { - ptr->~T(); - VmaFree(hAllocator, ptr); - } -} - -template -static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) -{ - if(ptr != VMA_NULL) - { - for(size_t i = count; i--; ) - ptr[i].~T(); - VmaFree(hAllocator, ptr); - } -} -#endif // _VMA_MEMORY_FUNCTIONS - -#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS -VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) - : m_pMetadata(VMA_NULL), - m_MemoryTypeIndex(UINT32_MAX), - m_Id(0), - m_hMemory(VK_NULL_HANDLE), - m_MapCount(0), - m_pMappedData(VMA_NULL) {} - -VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() -{ - VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); - VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); -} - -void VmaDeviceMemoryBlock::Init( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t newMemoryTypeIndex, - VkDeviceMemory newMemory, - VkDeviceSize newSize, - uint32_t id, - uint32_t algorithm, - VkDeviceSize bufferImageGranularity) -{ - VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); - - m_hParentPool = hParentPool; - m_MemoryTypeIndex = newMemoryTypeIndex; - m_Id = id; - m_hMemory = newMemory; - - switch (algorithm) - { - case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual - break; - default: - VMA_ASSERT(0); - // Fall-through. - case 0: - m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual - } - m_pMetadata->Init(newSize); -} - -void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) -{ - // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations - if (!m_pMetadata->IsEmpty()) - m_pMetadata->DebugLogAllAllocations(); - // This is the most important assert in the entire library. - // Hitting it means you have some memory leak - unreleased VmaAllocation objects. - VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); - - VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); - allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); - m_hMemory = VK_NULL_HANDLE; - - vma_delete(allocator, m_pMetadata); - m_pMetadata = VMA_NULL; -} - -void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) -{ - if(m_MappingHysteresis.PostFree()) - { - VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); - if (m_MapCount == 0) - { - m_pMappedData = VMA_NULL; - (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); - } - } -} - -bool VmaDeviceMemoryBlock::Validate() const -{ - VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && - (m_pMetadata->GetSize() != 0)); - - return m_pMetadata->Validate(); -} - -VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) -{ - void* pData = nullptr; - VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) - { - return res; - } - - res = m_pMetadata->CheckCorruption(pData); - - Unmap(hAllocator, 1); - - return res; -} - -VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) -{ - if (count == 0) - { - return VK_SUCCESS; - } - - VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); - m_MappingHysteresis.PostMap(); - if (oldTotalMapCount != 0) - { - m_MapCount += count; - VMA_ASSERT(m_pMappedData != VMA_NULL); - if (ppData != VMA_NULL) - { - *ppData = m_pMappedData; - } - return VK_SUCCESS; - } - else - { - VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( - hAllocator->m_hDevice, - m_hMemory, - 0, // offset - VK_WHOLE_SIZE, - 0, // flags - &m_pMappedData); - if (result == VK_SUCCESS) - { - if (ppData != VMA_NULL) - { - *ppData = m_pMappedData; - } - m_MapCount = count; - } - return result; - } -} - -void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) -{ - if (count == 0) - { - return; - } - - VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - if (m_MapCount >= count) - { - m_MapCount -= count; - const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); - if (totalMapCount == 0) - { - m_pMappedData = VMA_NULL; - (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); - } - m_MappingHysteresis.PostUnmap(); - } - else - { - VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); - } -} - -VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) -{ - VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); - - void* pData; - VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) - { - return res; - } - - VmaWriteMagicValue(pData, allocOffset + allocSize); - - Unmap(hAllocator, 1); - return VK_SUCCESS; -} - -VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) -{ - VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); - - void* pData; - VkResult res = Map(hAllocator, 1, &pData); - if (res != VK_SUCCESS) - { - return res; - } - - if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) - { - VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); - } - - Unmap(hAllocator, 1); - return VK_SUCCESS; -} - -VkResult VmaDeviceMemoryBlock::BindBufferMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkBuffer hBuffer, - const void* pNext) -{ - VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && - hAllocation->GetBlock() == this); - VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && - "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); - const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; - // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. - VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); -} - -VkResult VmaDeviceMemoryBlock::BindImageMemory( - const VmaAllocator hAllocator, - const VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkImage hImage, - const void* pNext) -{ - VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && - hAllocation->GetBlock() == this); - VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && - "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); - const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; - // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. - VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); -} -#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS - -#ifndef _VMA_ALLOCATION_T_FUNCTIONS -VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) - : m_Alignment{ 1 }, - m_Size{ 0 }, - m_pUserData{ VMA_NULL }, - m_pName{ VMA_NULL }, - m_MemoryTypeIndex{ 0 }, - m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, - m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, - m_MapCount{ 0 }, - m_Flags{ 0 } -{ - if(mappingAllowed) - m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; - -#if VMA_STATS_STRING_ENABLED - m_BufferImageUsage = 0; -#endif -} - -VmaAllocation_T::~VmaAllocation_T() -{ - VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction."); - - // Check if owned string was freed. - VMA_ASSERT(m_pName == VMA_NULL); -} - -void VmaAllocation_T::InitBlockAllocation( - VmaDeviceMemoryBlock* block, - VmaAllocHandle allocHandle, - VkDeviceSize alignment, - VkDeviceSize size, - uint32_t memoryTypeIndex, - VmaSuballocationType suballocationType, - bool mapped) -{ - VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); - VMA_ASSERT(block != VMA_NULL); - m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; - m_Alignment = alignment; - m_Size = size; - m_MemoryTypeIndex = memoryTypeIndex; - if(mapped) - { - VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; - } - m_SuballocationType = (uint8_t)suballocationType; - m_BlockAllocation.m_Block = block; - m_BlockAllocation.m_AllocHandle = allocHandle; -} - -void VmaAllocation_T::InitDedicatedAllocation( - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceMemory hMemory, - VmaSuballocationType suballocationType, - void* pMappedData, - VkDeviceSize size) -{ - VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); - VMA_ASSERT(hMemory != VK_NULL_HANDLE); - m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; - m_Alignment = 0; - m_Size = size; - m_MemoryTypeIndex = memoryTypeIndex; - m_SuballocationType = (uint8_t)suballocationType; - if(pMappedData != VMA_NULL) - { - VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; - } - m_DedicatedAllocation.m_hParentPool = hParentPool; - m_DedicatedAllocation.m_hMemory = hMemory; - m_DedicatedAllocation.m_pMappedData = pMappedData; - m_DedicatedAllocation.m_Prev = VMA_NULL; - m_DedicatedAllocation.m_Next = VMA_NULL; -} - -void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) -{ - VMA_ASSERT(pName == VMA_NULL || pName != m_pName); - - FreeName(hAllocator); - - if (pName != VMA_NULL) - m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); -} - -uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) -{ - VMA_ASSERT(allocation != VMA_NULL); - VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); - VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); - - if (m_MapCount != 0) - m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); - - m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); - VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation); - m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); - -#if VMA_STATS_STRING_ENABLED - VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage); -#endif - return m_MapCount; -} - -VmaAllocHandle VmaAllocation_T::GetAllocHandle() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_AllocHandle; - case ALLOCATION_TYPE_DEDICATED: - return VK_NULL_HANDLE; - default: - VMA_ASSERT(0); - return VK_NULL_HANDLE; - } -} - -VkDeviceSize VmaAllocation_T::GetOffset() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); - case ALLOCATION_TYPE_DEDICATED: - return 0; - default: - VMA_ASSERT(0); - return 0; - } -} - -VmaPool VmaAllocation_T::GetParentPool() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Block->GetParentPool(); - case ALLOCATION_TYPE_DEDICATED: - return m_DedicatedAllocation.m_hParentPool; - default: - VMA_ASSERT(0); - return VK_NULL_HANDLE; - } -} - -VkDeviceMemory VmaAllocation_T::GetMemory() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - return m_BlockAllocation.m_Block->GetDeviceMemory(); - case ALLOCATION_TYPE_DEDICATED: - return m_DedicatedAllocation.m_hMemory; - default: - VMA_ASSERT(0); - return VK_NULL_HANDLE; - } -} - -void* VmaAllocation_T::GetMappedData() const -{ - switch (m_Type) - { - case ALLOCATION_TYPE_BLOCK: - if (m_MapCount != 0 || IsPersistentMap()) - { - void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); - VMA_ASSERT(pBlockData != VMA_NULL); - return (char*)pBlockData + GetOffset(); - } - else - { - return VMA_NULL; - } - break; - case ALLOCATION_TYPE_DEDICATED: - VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap())); - return m_DedicatedAllocation.m_pMappedData; - default: - VMA_ASSERT(0); - return VMA_NULL; - } -} - -void VmaAllocation_T::BlockAllocMap() -{ - VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); - VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - - if (m_MapCount < 0xFF) - { - ++m_MapCount; - } - else - { - VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); - } -} - -void VmaAllocation_T::BlockAllocUnmap() -{ - VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); - - if (m_MapCount > 0) - { - --m_MapCount; - } - else - { - VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); - } -} - -VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) -{ - VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); - VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); - - if (m_MapCount != 0 || IsPersistentMap()) - { - if (m_MapCount < 0xFF) - { - VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); - *ppData = m_DedicatedAllocation.m_pMappedData; - ++m_MapCount; - return VK_SUCCESS; - } - else - { - VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); - return VK_ERROR_MEMORY_MAP_FAILED; - } - } - else - { - VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( - hAllocator->m_hDevice, - m_DedicatedAllocation.m_hMemory, - 0, // offset - VK_WHOLE_SIZE, - 0, // flags - ppData); - if (result == VK_SUCCESS) - { - m_DedicatedAllocation.m_pMappedData = *ppData; - m_MapCount = 1; - } - return result; - } -} - -void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) -{ - VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); - - if (m_MapCount > 0) - { - --m_MapCount; - if (m_MapCount == 0 && !IsPersistentMap()) - { - m_DedicatedAllocation.m_pMappedData = VMA_NULL; - (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( - hAllocator->m_hDevice, - m_DedicatedAllocation.m_hMemory); - } - } - else - { - VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); - } -} - -#if VMA_STATS_STRING_ENABLED -void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage) -{ - VMA_ASSERT(m_BufferImageUsage == 0); - m_BufferImageUsage = bufferImageUsage; -} - -void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const -{ - json.WriteString("Type"); - json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); - - json.WriteString("Size"); - json.WriteNumber(m_Size); - json.WriteString("Usage"); - json.WriteNumber(m_BufferImageUsage); - - if (m_pUserData != VMA_NULL) - { - json.WriteString("CustomData"); - json.BeginString(); - json.ContinueString_Pointer(m_pUserData); - json.EndString(); - } - if (m_pName != VMA_NULL) - { - json.WriteString("Name"); - json.WriteString(m_pName); - } -} -#endif // VMA_STATS_STRING_ENABLED - -void VmaAllocation_T::FreeName(VmaAllocator hAllocator) -{ - if(m_pName) - { - VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); - m_pName = VMA_NULL; - } -} -#endif // _VMA_ALLOCATION_T_FUNCTIONS - -#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS -VmaBlockVector::VmaBlockVector( - VmaAllocator hAllocator, - VmaPool hParentPool, - uint32_t memoryTypeIndex, - VkDeviceSize preferredBlockSize, - size_t minBlockCount, - size_t maxBlockCount, - VkDeviceSize bufferImageGranularity, - bool explicitBlockSize, - uint32_t algorithm, - float priority, - VkDeviceSize minAllocationAlignment, - void* pMemoryAllocateNext) - : m_hAllocator(hAllocator), - m_hParentPool(hParentPool), - m_MemoryTypeIndex(memoryTypeIndex), - m_PreferredBlockSize(preferredBlockSize), - m_MinBlockCount(minBlockCount), - m_MaxBlockCount(maxBlockCount), - m_BufferImageGranularity(bufferImageGranularity), - m_ExplicitBlockSize(explicitBlockSize), - m_Algorithm(algorithm), - m_Priority(priority), - m_MinAllocationAlignment(minAllocationAlignment), - m_pMemoryAllocateNext(pMemoryAllocateNext), - m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), - m_NextBlockId(0) {} - -VmaBlockVector::~VmaBlockVector() -{ - for (size_t i = m_Blocks.size(); i--; ) - { - m_Blocks[i]->Destroy(m_hAllocator); - vma_delete(m_hAllocator, m_Blocks[i]); - } -} - -VkResult VmaBlockVector::CreateMinBlocks() -{ - for (size_t i = 0; i < m_MinBlockCount; ++i) - { - VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); - if (res != VK_SUCCESS) - { - return res; - } - } - return VK_SUCCESS; -} - -void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) -{ - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - - const size_t blockCount = m_Blocks.size(); - for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) - { - const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pBlock); - VMA_HEAVY_ASSERT(pBlock->Validate()); - pBlock->m_pMetadata->AddStatistics(inoutStats); - } -} - -void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) -{ - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - - const size_t blockCount = m_Blocks.size(); - for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) - { - const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pBlock); - VMA_HEAVY_ASSERT(pBlock->Validate()); - pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); - } -} - -bool VmaBlockVector::IsEmpty() -{ - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - return m_Blocks.empty(); -} - -bool VmaBlockVector::IsCorruptionDetectionEnabled() const -{ - const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; - return (VMA_DEBUG_DETECT_CORRUPTION != 0) && - (VMA_DEBUG_MARGIN > 0) && - (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && - (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; -} - -VkResult VmaBlockVector::Allocate( - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation* pAllocations) -{ - size_t allocIndex; - VkResult res = VK_SUCCESS; - - alignment = VMA_MAX(alignment, m_MinAllocationAlignment); - - if (IsCorruptionDetectionEnabled()) - { - size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); - alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); - } - - { - VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) - { - res = AllocatePage( - size, - alignment, - createInfo, - suballocType, - pAllocations + allocIndex); - if (res != VK_SUCCESS) - { - break; - } - } - } - - if (res != VK_SUCCESS) - { - // Free all already created allocations. - while (allocIndex--) - Free(pAllocations[allocIndex]); - memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); - } - - return res; -} - -VkResult VmaBlockVector::AllocatePage( - VkDeviceSize size, - VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - VmaAllocation* pAllocation) -{ - const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; - - VkDeviceSize freeMemory; - { - const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); - VmaBudget heapBudget = {}; - m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); - freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; - } - - const bool canFallbackToDedicated = !HasExplicitBlockSize() && - (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; - const bool canCreateNewBlock = - ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && - (m_Blocks.size() < m_MaxBlockCount) && - (freeMemory >= size || !canFallbackToDedicated); - uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; - - // Upper address can only be used with linear allocator and within single memory block. - if (isUpperAddress && - (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) - { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - // Early reject: requested allocation size is larger that maximum block size for this block vector. - if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize) - { - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - - // 1. Search existing allocations. Try to allocate. - if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) - { - // Use only last block. - if (!m_Blocks.empty()) - { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); - VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock( - pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId()); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - } - } - else - { - if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default - { - const bool isHostVisible = - (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; - if(isHostVisible) - { - const bool isMappingAllowed = (createInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; - /* - For non-mappable allocations, check blocks that are not mapped first. - For mappable allocations, check blocks that are already mapped first. - This way, having many blocks, we will separate mappable and non-mappable allocations, - hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. - */ - for(size_t mappingI = 0; mappingI < 2; ++mappingI) - { - // Forward order in m_Blocks - prefer blocks with smallest amount of free space. - for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pCurrBlock); - const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; - if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) - { - VkResult res = AllocateFromBlock( - pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - } - } - } - } - else - { - // Forward order in m_Blocks - prefer blocks with smallest amount of free space. - for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock( - pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - } - } - } - else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT - { - // Backward order in m_Blocks - prefer blocks with largest amount of free space. - for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) - { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pCurrBlock); - VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - } - } - } - - // 2. Try to create new block. - if (canCreateNewBlock) - { - // Calculate optimal size for new block. - VkDeviceSize newBlockSize = m_PreferredBlockSize; - uint32_t newBlockSizeShift = 0; - const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; - - if (!m_ExplicitBlockSize) - { - // Allocate 1/8, 1/4, 1/2 as first blocks. - const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); - for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) - { - const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; - if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) - { - newBlockSize = smallerNewBlockSize; - ++newBlockSizeShift; - } - else - { - break; - } - } - } - - size_t newBlockIndex = 0; - VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? - CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; - // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. - if (!m_ExplicitBlockSize) - { - while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) - { - const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; - if (smallerNewBlockSize >= size) - { - newBlockSize = smallerNewBlockSize; - ++newBlockSizeShift; - res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? - CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - else - { - break; - } - } - } - - if (res == VK_SUCCESS) - { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; - VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); - - res = AllocateFromBlock( - pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); - if (res == VK_SUCCESS) - { - VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); - IncrementallySortBlocks(); - return VK_SUCCESS; - } - else - { - // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - } - } - - return VK_ERROR_OUT_OF_DEVICE_MEMORY; -} - -void VmaBlockVector::Free(const VmaAllocation hAllocation) -{ - VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; - - bool budgetExceeded = false; - { - const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); - VmaBudget heapBudget = {}; - m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); - budgetExceeded = heapBudget.usage >= heapBudget.budget; - } - - // Scope for lock. - { - VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - - VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); - - if (IsCorruptionDetectionEnabled()) - { - VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); - VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); - } - - if (hAllocation->IsPersistentMap()) - { - pBlock->Unmap(m_hAllocator, 1); - } - - const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); - pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); - pBlock->PostFree(m_hAllocator); - VMA_HEAVY_ASSERT(pBlock->Validate()); - - VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); - - const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; - // pBlock became empty after this deallocation. - if (pBlock->m_pMetadata->IsEmpty()) - { - // Already had empty block. We don't want to have two, so delete this one. - if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) - { - pBlockToDelete = pBlock; - Remove(pBlock); - } - // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. - } - // pBlock didn't become empty, but we have another empty block - find and free that one. - // (This is optional, heuristics.) - else if (hadEmptyBlockBeforeFree && canDeleteBlock) - { - VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); - if (pLastBlock->m_pMetadata->IsEmpty()) - { - pBlockToDelete = pLastBlock; - m_Blocks.pop_back(); - } - } - - IncrementallySortBlocks(); - } - - // Destruction of a free block. Deferred until this point, outside of mutex - // lock, for performance reason. - if (pBlockToDelete != VMA_NULL) - { - VMA_DEBUG_LOG(" Deleted empty block #%u", pBlockToDelete->GetId()); - pBlockToDelete->Destroy(m_hAllocator); - vma_delete(m_hAllocator, pBlockToDelete); - } - - m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); - m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); -} - -VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const -{ - VkDeviceSize result = 0; - for (size_t i = m_Blocks.size(); i--; ) - { - result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); - if (result >= m_PreferredBlockSize) - { - break; - } - } - return result; -} - -void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) -{ - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - if (m_Blocks[blockIndex] == pBlock) - { - VmaVectorRemove(m_Blocks, blockIndex); - return; - } - } - VMA_ASSERT(0); -} - -void VmaBlockVector::IncrementallySortBlocks() -{ - if (!m_IncrementalSort) - return; - if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) - { - // Bubble sort only until first swap. - for (size_t i = 1; i < m_Blocks.size(); ++i) - { - if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) - { - VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); - return; - } - } - } -} - -void VmaBlockVector::SortByFreeSize() -{ - VMA_SORT(m_Blocks.begin(), m_Blocks.end(), - [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool - { - return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); - }); -} - -VkResult VmaBlockVector::AllocateFromBlock( - VmaDeviceMemoryBlock* pBlock, - VkDeviceSize size, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void* pUserData, - VmaSuballocationType suballocType, - uint32_t strategy, - VmaAllocation* pAllocation) -{ - const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; - - VmaAllocationRequest currRequest = {}; - if (pBlock->m_pMetadata->CreateAllocationRequest( - size, - alignment, - isUpperAddress, - suballocType, - strategy, - &currRequest)) - { - return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); - } - return VK_ERROR_OUT_OF_DEVICE_MEMORY; -} - -VkResult VmaBlockVector::CommitAllocationRequest( - VmaAllocationRequest& allocRequest, - VmaDeviceMemoryBlock* pBlock, - VkDeviceSize alignment, - VmaAllocationCreateFlags allocFlags, - void* pUserData, - VmaSuballocationType suballocType, - VmaAllocation* pAllocation) -{ - const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; - const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; - const bool isMappingAllowed = (allocFlags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; - - pBlock->PostAlloc(); - // Allocate from pCurrBlock. - if (mapped) - { - VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); - if (res != VK_SUCCESS) - { - return res; - } - } - - *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); - pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); - (*pAllocation)->InitBlockAllocation( - pBlock, - allocRequest.allocHandle, - alignment, - allocRequest.size, // Not size, as actual allocation size may be larger than requested! - m_MemoryTypeIndex, - suballocType, - mapped); - VMA_HEAVY_ASSERT(pBlock->Validate()); - if (isUserDataString) - (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); - else - (*pAllocation)->SetUserData(m_hAllocator, pUserData); - m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); - if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) - { - m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); - } - if (IsCorruptionDetectionEnabled()) - { - VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); - VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); - } - return VK_SUCCESS; -} - -VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) -{ - VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; - allocInfo.pNext = m_pMemoryAllocateNext; - allocInfo.memoryTypeIndex = m_MemoryTypeIndex; - allocInfo.allocationSize = blockSize; - -#if VMA_BUFFER_DEVICE_ADDRESS - // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. - VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; - if (m_hAllocator->m_UseKhrBufferDeviceAddress) - { - allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; - VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); - } -#endif // VMA_BUFFER_DEVICE_ADDRESS - -#if VMA_MEMORY_PRIORITY - VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; - if (m_hAllocator->m_UseExtMemoryPriority) - { - VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); - priorityInfo.priority = m_Priority; - VmaPnextChainPushFront(&allocInfo, &priorityInfo); - } -#endif // VMA_MEMORY_PRIORITY - -#if VMA_EXTERNAL_MEMORY - // Attach VkExportMemoryAllocateInfoKHR if necessary. - VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; - exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); - if (exportMemoryAllocInfo.handleTypes != 0) - { - VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); - } -#endif // VMA_EXTERNAL_MEMORY - - VkDeviceMemory mem = VK_NULL_HANDLE; - VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); - if (res < 0) - { - return res; - } - - // New VkDeviceMemory successfully created. - - // Create new Allocation for it. - VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); - pBlock->Init( - m_hAllocator, - m_hParentPool, - m_MemoryTypeIndex, - mem, - allocInfo.allocationSize, - m_NextBlockId++, - m_Algorithm, - m_BufferImageGranularity); - - m_Blocks.push_back(pBlock); - if (pNewBlockIndex != VMA_NULL) - { - *pNewBlockIndex = m_Blocks.size() - 1; - } - - return VK_SUCCESS; -} - -bool VmaBlockVector::HasEmptyBlock() -{ - for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) - { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; - if (pBlock->m_pMetadata->IsEmpty()) - { - return true; - } - } - return false; -} - -#if VMA_STATS_STRING_ENABLED -void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) -{ - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - - - json.BeginObject(); - for (size_t i = 0; i < m_Blocks.size(); ++i) - { - json.BeginString(); - json.ContinueString(m_Blocks[i]->GetId()); - json.EndString(); - - json.BeginObject(); - json.WriteString("MapRefCount"); - json.WriteNumber(m_Blocks[i]->GetMapRefCount()); - - m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); - json.EndObject(); - } - json.EndObject(); -} -#endif // VMA_STATS_STRING_ENABLED - -VkResult VmaBlockVector::CheckCorruption() -{ - if (!IsCorruptionDetectionEnabled()) - { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) - { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; - VMA_ASSERT(pBlock); - VkResult res = pBlock->CheckCorruption(m_hAllocator); - if (res != VK_SUCCESS) - { - return res; - } - } - return VK_SUCCESS; -} - -#endif // _VMA_BLOCK_VECTOR_FUNCTIONS - -#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS -VmaDefragmentationContext_T::VmaDefragmentationContext_T( - VmaAllocator hAllocator, - const VmaDefragmentationInfo& info) - : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), - m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), - m_MoveAllocator(hAllocator->GetAllocationCallbacks()), - m_Moves(m_MoveAllocator) -{ - m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; - - if (info.pool != VMA_NULL) - { - m_BlockVectorCount = 1; - m_PoolBlockVector = &info.pool->m_BlockVector; - m_pBlockVectors = &m_PoolBlockVector; - m_PoolBlockVector->SetIncrementalSort(false); - m_PoolBlockVector->SortByFreeSize(); - } - else - { - m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); - m_PoolBlockVector = VMA_NULL; - m_pBlockVectors = hAllocator->m_pBlockVectors; - for (uint32_t i = 0; i < m_BlockVectorCount; ++i) - { - VmaBlockVector* vector = m_pBlockVectors[i]; - if (vector != VMA_NULL) - { - vector->SetIncrementalSort(false); - vector->SortByFreeSize(); - } - } - } - - switch (m_Algorithm) - { - case 0: // Default algorithm - m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: - { - m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); - break; - } - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - { - if (hAllocator->GetBufferImageGranularity() > 1) - { - m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); - } - break; - } - } -} - -VmaDefragmentationContext_T::~VmaDefragmentationContext_T() -{ - if (m_PoolBlockVector != VMA_NULL) - { - m_PoolBlockVector->SetIncrementalSort(true); - } - else - { - for (uint32_t i = 0; i < m_BlockVectorCount; ++i) - { - VmaBlockVector* vector = m_pBlockVectors[i]; - if (vector != VMA_NULL) - vector->SetIncrementalSort(true); - } - } - - if (m_AlgorithmState) - { - switch (m_Algorithm) - { - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: - vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); - break; - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); - break; - default: - VMA_ASSERT(0); - } - } -} - -VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) -{ - if (m_PoolBlockVector != VMA_NULL) - { - VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); - - if (m_PoolBlockVector->GetBlockCount() > 1) - ComputeDefragmentation(*m_PoolBlockVector, 0); - else if (m_PoolBlockVector->GetBlockCount() == 1) - ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); - } - else - { - for (uint32_t i = 0; i < m_BlockVectorCount; ++i) - { - if (m_pBlockVectors[i] != VMA_NULL) - { - VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); - - if (m_pBlockVectors[i]->GetBlockCount() > 1) - { - if (ComputeDefragmentation(*m_pBlockVectors[i], i)) - break; - } - else if (m_pBlockVectors[i]->GetBlockCount() == 1) - { - if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) - break; - } - } - } - } - - moveInfo.moveCount = static_cast(m_Moves.size()); - if (moveInfo.moveCount > 0) - { - moveInfo.pMoves = m_Moves.data(); - return VK_INCOMPLETE; - } - - moveInfo.pMoves = VMA_NULL; - return VK_SUCCESS; -} - -VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) -{ - VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); - - VkResult result = VK_SUCCESS; - VmaStlAllocator blockAllocator(m_MoveAllocator.m_pCallbacks); - VmaVector> immovableBlocks(blockAllocator); - VmaVector> mappedBlocks(blockAllocator); - - VmaAllocator allocator = VMA_NULL; - for (uint32_t i = 0; i < moveInfo.moveCount; ++i) - { - VmaDefragmentationMove& move = moveInfo.pMoves[i]; - size_t prevCount = 0, currentCount = 0; - VkDeviceSize freedBlockSize = 0; - - uint32_t vectorIndex; - VmaBlockVector* vector; - if (m_PoolBlockVector != VMA_NULL) - { - vectorIndex = 0; - vector = m_PoolBlockVector; - } - else - { - vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); - vector = m_pBlockVectors[vectorIndex]; - VMA_ASSERT(vector != VMA_NULL); - } - - switch (move.operation) - { - case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: - { - uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); - if (mapCount > 0) - { - allocator = vector->m_hAllocator; - VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); - bool notPresent = true; - for (FragmentedBlock& block : mappedBlocks) - { - if (block.block == newMapBlock) - { - notPresent = false; - block.data += mapCount; - break; - } - } - if (notPresent) - mappedBlocks.push_back({ mapCount, newMapBlock }); - } - - // Scope for locks, Free have it's own lock - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - prevCount = vector->GetBlockCount(); - freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); - } - vector->Free(move.dstTmpAllocation); - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - currentCount = vector->GetBlockCount(); - } - - result = VK_INCOMPLETE; - break; - } - case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: - { - m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); - --m_PassStats.allocationsMoved; - vector->Free(move.dstTmpAllocation); - - VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); - bool notPresent = true; - for (const FragmentedBlock& block : immovableBlocks) - { - if (block.block == newBlock) - { - notPresent = false; - break; - } - } - if (notPresent) - immovableBlocks.push_back({ vectorIndex, newBlock }); - break; - } - case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: - { - m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); - --m_PassStats.allocationsMoved; - // Scope for locks, Free have it's own lock - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - prevCount = vector->GetBlockCount(); - freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); - } - vector->Free(move.srcAllocation); - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - currentCount = vector->GetBlockCount(); - } - freedBlockSize *= prevCount - currentCount; - - VkDeviceSize dstBlockSize; - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); - } - vector->Free(move.dstTmpAllocation); - { - VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); - currentCount = vector->GetBlockCount(); - } - - result = VK_INCOMPLETE; - break; - } - default: - VMA_ASSERT(0); - } - - if (prevCount > currentCount) - { - size_t freedBlocks = prevCount - currentCount; - m_PassStats.deviceMemoryBlocksFreed += static_cast(freedBlocks); - m_PassStats.bytesFreed += freedBlockSize; - } - - switch (m_Algorithm) - { - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - { - if (m_AlgorithmState != VMA_NULL) - { - // Avoid unnecessary tries to allocate when new free block is avaiable - StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; - if (state.firstFreeBlock != SIZE_MAX) - { - const size_t diff = prevCount - currentCount; - if (state.firstFreeBlock >= diff) - { - state.firstFreeBlock -= diff; - if (state.firstFreeBlock != 0) - state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); - } - else - state.firstFreeBlock = 0; - } - } - } - } - } - moveInfo.moveCount = 0; - moveInfo.pMoves = VMA_NULL; - m_Moves.clear(); - - // Update stats - m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; - m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; - m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; - m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; - m_PassStats = { 0 }; - - // Move blocks with immovable allocations according to algorithm - if (immovableBlocks.size() > 0) - { - switch (m_Algorithm) - { - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - { - if (m_AlgorithmState != VMA_NULL) - { - bool swapped = false; - // Move to the start of free blocks range - for (const FragmentedBlock& block : immovableBlocks) - { - StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; - if (state.operation != StateExtensive::Operation::Cleanup) - { - VmaBlockVector* vector = m_pBlockVectors[block.data]; - VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - - for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) - { - if (vector->GetBlock(i) == block.block) - { - VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); - if (state.firstFreeBlock != SIZE_MAX) - { - if (i + 1 < state.firstFreeBlock) - { - if (state.firstFreeBlock > 1) - VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); - else - --state.firstFreeBlock; - } - } - swapped = true; - break; - } - } - } - } - if (swapped) - result = VK_INCOMPLETE; - break; - } - } - default: - { - // Move to the begining - for (const FragmentedBlock& block : immovableBlocks) - { - VmaBlockVector* vector = m_pBlockVectors[block.data]; - VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); - - for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) - { - if (vector->GetBlock(i) == block.block) - { - VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); - break; - } - } - } - break; - } - } - } - - // Bulk-map destination blocks - for (const FragmentedBlock& block : mappedBlocks) - { - VkResult res = block.block->Map(allocator, block.data, VMA_NULL); - VMA_ASSERT(res == VK_SUCCESS); - } - return result; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) -{ - switch (m_Algorithm) - { - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: - return ComputeDefragmentation_Fast(vector); - default: - VMA_ASSERT(0); - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: - return ComputeDefragmentation_Balanced(vector, index, true); - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: - return ComputeDefragmentation_Full(vector); - case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - return ComputeDefragmentation_Extensive(vector, index); - } -} - -VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( - VmaAllocHandle handle, VmaBlockMetadata* metadata) -{ - MoveAllocationData moveData; - moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); - moveData.size = moveData.move.srcAllocation->GetSize(); - moveData.alignment = moveData.move.srcAllocation->GetAlignment(); - moveData.type = moveData.move.srcAllocation->GetSuballocationType(); - moveData.flags = 0; - - if (moveData.move.srcAllocation->IsPersistentMap()) - moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; - if (moveData.move.srcAllocation->IsMappingAllowed()) - moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; - - return moveData; -} - -VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) -{ - // Ignore allocation if will exceed max size for copy - if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) - { - if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) - return CounterStatus::Ignore; - else - return CounterStatus::End; - } - return CounterStatus::Pass; -} - -bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) -{ - m_PassStats.bytesMoved += bytes; - // Early return when max found - if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) - { - VMA_ASSERT(m_PassStats.allocationsMoved == m_MaxPassAllocations || - m_PassStats.bytesMoved == m_MaxPassBytes && "Exceeded maximal pass threshold!"); - return true; - } - return false; -} - -bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) -{ - VmaBlockMetadata* metadata = block->m_pMetadata; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); - if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) - { - VmaAllocationRequest request = {}; - if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) - { - if (metadata->GetAllocationOffset(request.allocHandle) < offset) - { - if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) - { - m_Moves.push_back(moveData.move); - if (IncrementCounters(moveData.size)) - return true; - } - } - } - } - } - return false; -} - -bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) -{ - for (; start < end; ++start) - { - VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); - if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) - { - if (vector.AllocateFromBlock(dstBlock, - data.size, - data.alignment, - data.flags, - this, - data.type, - 0, - &data.move.dstTmpAllocation) == VK_SUCCESS) - { - m_Moves.push_back(data.move); - if (IncrementCounters(data.size)) - return true; - break; - } - } - } - return false; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) -{ - // Move only between blocks - - // Go through allocations in last blocks and try to fit them inside first ones - for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) - { - VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Check all previous blocks for free space - if (AllocInOtherBlock(0, i, moveData, vector)) - return true; - } - } - return false; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) -{ - // Go over every allocation and try to fit it in previous blocks at lowest offsets, - // if not possible: realloc within single block to minimize offset (exclude offset == 0), - // but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block) - VMA_ASSERT(m_AlgorithmState != VMA_NULL); - - StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; - if (update && vectorState.avgAllocSize == UINT64_MAX) - UpdateVectorStatistics(vector, vectorState); - - const size_t startMoveCount = m_Moves.size(); - VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; - for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) - { - VmaDeviceMemoryBlock* block = vector.GetBlock(i); - VmaBlockMetadata* metadata = block->m_pMetadata; - VkDeviceSize prevFreeRegionSize = 0; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Check all previous blocks for free space - const size_t prevMoveCount = m_Moves.size(); - if (AllocInOtherBlock(0, i, moveData, vector)) - return true; - - VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); - // If no room found then realloc within block for lower offset - VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); - if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) - { - // Check if realloc will make sense - if (prevFreeRegionSize >= minimalFreeRegion || - nextFreeRegionSize >= minimalFreeRegion || - moveData.size <= vectorState.avgFreeSize || - moveData.size <= vectorState.avgAllocSize) - { - VmaAllocationRequest request = {}; - if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) - { - if (metadata->GetAllocationOffset(request.allocHandle) < offset) - { - if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) - { - m_Moves.push_back(moveData.move); - if (IncrementCounters(moveData.size)) - return true; - } - } - } - } - } - prevFreeRegionSize = nextFreeRegionSize; - } - } - - // No moves perfomed, update statistics to current vector state - if (startMoveCount == m_Moves.size() && !update) - { - vectorState.avgAllocSize = UINT64_MAX; - return ComputeDefragmentation_Balanced(vector, index, false); - } - return false; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) -{ - // Go over every allocation and try to fit it in previous blocks at lowest offsets, - // if not possible: realloc within single block to minimize offset (exclude offset == 0) - - for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) - { - VmaDeviceMemoryBlock* block = vector.GetBlock(i); - VmaBlockMetadata* metadata = block->m_pMetadata; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Check all previous blocks for free space - const size_t prevMoveCount = m_Moves.size(); - if (AllocInOtherBlock(0, i, moveData, vector)) - return true; - - // If no room found then realloc within block for lower offset - VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); - if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) - { - VmaAllocationRequest request = {}; - if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) - { - if (metadata->GetAllocationOffset(request.allocHandle) < offset) - { - if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) - { - m_Moves.push_back(moveData.move); - if (IncrementCounters(moveData.size)) - return true; - } - } - } - } - } - } - return false; -} - -bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) -{ - // First free single block, then populate it to the brim, then free another block, and so on - - // Fallback to previous algorithm since without granularity conflicts it can achieve max packing - if (vector.m_BufferImageGranularity == 1) - return ComputeDefragmentation_Full(vector); - - VMA_ASSERT(m_AlgorithmState != VMA_NULL); - - StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; - - bool texturePresent = false, bufferPresent = false, otherPresent = false; - switch (vectorState.operation) - { - case StateExtensive::Operation::Done: // Vector defragmented - return false; - case StateExtensive::Operation::FindFreeBlockBuffer: - case StateExtensive::Operation::FindFreeBlockTexture: - case StateExtensive::Operation::FindFreeBlockAll: - { - // No more blocks to free, just perform fast realloc and move to cleanup - if (vectorState.firstFreeBlock == 0) - { - vectorState.operation = StateExtensive::Operation::Cleanup; - return ComputeDefragmentation_Fast(vector); - } - - // No free blocks, have to clear last one - size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; - VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; - - const size_t prevMoveCount = m_Moves.size(); - for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = freeMetadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, freeMetadata); - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Check all previous blocks for free space - if (AllocInOtherBlock(0, last, moveData, vector)) - { - // Full clear performed already - if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) - reinterpret_cast(m_AlgorithmState)[index] = last; - return true; - } - } - - if (prevMoveCount == m_Moves.size()) - { - // Cannot perform full clear, have to move data in other blocks around - if (last != 0) - { - for (size_t i = last - 1; i; --i) - { - if (ReallocWithinBlock(vector, vector.GetBlock(i))) - return true; - } - } - - if (prevMoveCount == m_Moves.size()) - { - // No possible reallocs within blocks, try to move them around fast - return ComputeDefragmentation_Fast(vector); - } - } - else - { - switch (vectorState.operation) - { - case StateExtensive::Operation::FindFreeBlockBuffer: - vectorState.operation = StateExtensive::Operation::MoveBuffers; - break; - default: - VMA_ASSERT(0); - case StateExtensive::Operation::FindFreeBlockTexture: - vectorState.operation = StateExtensive::Operation::MoveTextures; - break; - case StateExtensive::Operation::FindFreeBlockAll: - vectorState.operation = StateExtensive::Operation::MoveAll; - break; - } - vectorState.firstFreeBlock = last; - // Nothing done, block found without reallocations, can perform another reallocs in same pass - return ComputeDefragmentation_Extensive(vector, index); - } - break; - } - case StateExtensive::Operation::MoveTextures: - { - if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) - { - if (texturePresent) - { - vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; - return ComputeDefragmentation_Extensive(vector, index); - } - - if (!bufferPresent && !otherPresent) - { - vectorState.operation = StateExtensive::Operation::Cleanup; - break; - } - - // No more textures to move, check buffers - vectorState.operation = StateExtensive::Operation::MoveBuffers; - bufferPresent = false; - otherPresent = false; - } - else - break; - } - case StateExtensive::Operation::MoveBuffers: - { - if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) - { - if (bufferPresent) - { - vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; - return ComputeDefragmentation_Extensive(vector, index); - } - - if (!otherPresent) - { - vectorState.operation = StateExtensive::Operation::Cleanup; - break; - } - - // No more buffers to move, check all others - vectorState.operation = StateExtensive::Operation::MoveAll; - otherPresent = false; - } - else - break; - } - case StateExtensive::Operation::MoveAll: - { - if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) - { - if (otherPresent) - { - vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; - return ComputeDefragmentation_Extensive(vector, index); - } - // Everything moved - vectorState.operation = StateExtensive::Operation::Cleanup; - } - break; - } - case StateExtensive::Operation::Cleanup: - // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). - break; - } - - if (vectorState.operation == StateExtensive::Operation::Cleanup) - { - // All other work done, pack data in blocks even tighter if possible - const size_t prevMoveCount = m_Moves.size(); - for (size_t i = 0; i < vector.GetBlockCount(); ++i) - { - if (ReallocWithinBlock(vector, vector.GetBlock(i))) - return true; - } - - if (prevMoveCount == m_Moves.size()) - vectorState.operation = StateExtensive::Operation::Done; - } - return false; -} - -void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) -{ - size_t allocCount = 0; - size_t freeCount = 0; - state.avgFreeSize = 0; - state.avgAllocSize = 0; - - for (size_t i = 0; i < vector.GetBlockCount(); ++i) - { - VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; - - allocCount += metadata->GetAllocationCount(); - freeCount += metadata->GetFreeRegionsCount(); - state.avgFreeSize += metadata->GetSumFreeSize(); - state.avgAllocSize += metadata->GetSize(); - } - - state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; - state.avgFreeSize /= freeCount; -} - -bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, - VmaBlockVector& vector, size_t firstFreeBlock, - bool& texturePresent, bool& bufferPresent, bool& otherPresent) -{ - const size_t prevMoveCount = m_Moves.size(); - for (size_t i = firstFreeBlock ; i;) - { - VmaDeviceMemoryBlock* block = vector.GetBlock(--i); - VmaBlockMetadata* metadata = block->m_pMetadata; - - for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) - { - MoveAllocationData moveData = GetMoveData(handle, metadata); - // Ignore newly created allocations by defragmentation algorithm - if (moveData.move.srcAllocation->GetUserData() == this) - continue; - switch (CheckCounters(moveData.move.srcAllocation->GetSize())) - { - case CounterStatus::Ignore: - continue; - case CounterStatus::End: - return true; - default: - VMA_ASSERT(0); - case CounterStatus::Pass: - break; - } - - // Move only single type of resources at once - if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) - { - // Try to fit allocation into free blocks - if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) - return false; - } - - if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) - texturePresent = true; - else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) - bufferPresent = true; - else - otherPresent = true; - } - } - return prevMoveCount == m_Moves.size(); -} -#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS - -#ifndef _VMA_POOL_T_FUNCTIONS -VmaPool_T::VmaPool_T( - VmaAllocator hAllocator, - const VmaPoolCreateInfo& createInfo, - VkDeviceSize preferredBlockSize) - : m_BlockVector( - hAllocator, - this, // hParentPool - createInfo.memoryTypeIndex, - createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, - createInfo.minBlockCount, - createInfo.maxBlockCount, - (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), - createInfo.blockSize != 0, // explicitBlockSize - createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm - createInfo.priority, - VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), - createInfo.pMemoryAllocateNext), - m_Id(0), - m_Name(VMA_NULL) {} - -VmaPool_T::~VmaPool_T() -{ - VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); -} - -void VmaPool_T::SetName(const char* pName) -{ - const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); - VmaFreeString(allocs, m_Name); - - if (pName != VMA_NULL) - { - m_Name = VmaCreateStringCopy(allocs, pName); - } - else - { - m_Name = VMA_NULL; - } -} -#endif // _VMA_POOL_T_FUNCTIONS - -#ifndef _VMA_ALLOCATOR_T_FUNCTIONS -VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : - m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), - m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), - m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), - m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), - m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), - m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), - m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), - m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), - m_hDevice(pCreateInfo->device), - m_hInstance(pCreateInfo->instance), - m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), - m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? - *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), - m_AllocationObjectAllocator(&m_AllocationCallbacks), - m_HeapSizeLimitMask(0), - m_DeviceMemoryCount(0), - m_PreferredLargeHeapBlockSize(0), - m_PhysicalDevice(pCreateInfo->physicalDevice), - m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), - m_NextPoolId(0), - m_GlobalMemoryTypeBits(UINT32_MAX) -{ - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - m_UseKhrDedicatedAllocation = false; - m_UseKhrBindMemory2 = false; - } - - if(VMA_DEBUG_DETECT_CORRUPTION) - { - // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. - VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); - } - - VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); - - if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) - { -#if !(VMA_DEDICATED_ALLOCATION) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); - } -#endif -#if !(VMA_BIND_MEMORY2) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); - } -#endif - } -#if !(VMA_MEMORY_BUDGET) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); - } -#endif -#if !(VMA_BUFFER_DEVICE_ADDRESS) - if(m_UseKhrBufferDeviceAddress) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); - } -#endif -#if VMA_VULKAN_VERSION < 1002000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) - { - VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); - } -#endif -#if VMA_VULKAN_VERSION < 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); - } -#endif -#if !(VMA_MEMORY_PRIORITY) - if(m_UseExtMemoryPriority) - { - VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); - } -#endif - - memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); - memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); - memset(&m_MemProps, 0, sizeof(m_MemProps)); - - memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); - memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); - -#if VMA_EXTERNAL_MEMORY - memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); -#endif // #if VMA_EXTERNAL_MEMORY - - if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) - { - m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; - m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; - m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; - } - - ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); - - (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); - (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); - - VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT)); - VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); - VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); - VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); - - m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? - pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); - - m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); - -#if VMA_EXTERNAL_MEMORY - if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) - { - memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, - sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); - } -#endif // #if VMA_EXTERNAL_MEMORY - - if(pCreateInfo->pHeapSizeLimit != VMA_NULL) - { - for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) - { - const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; - if(limit != VK_WHOLE_SIZE) - { - m_HeapSizeLimitMask |= 1u << heapIndex; - if(limit < m_MemProps.memoryHeaps[heapIndex].size) - { - m_MemProps.memoryHeaps[heapIndex].size = limit; - } - } - } - } - - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - // Create only supported types - if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) - { - const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); - m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( - this, - VK_NULL_HANDLE, // hParentPool - memTypeIndex, - preferredBlockSize, - 0, - SIZE_MAX, - GetBufferImageGranularity(), - false, // explicitBlockSize - 0, // algorithm - 0.5f, // priority (0.5 is the default per Vulkan spec) - GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment - VMA_NULL); // // pMemoryAllocateNext - // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, - // becase minBlockCount is 0. - } - } -} - -VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) -{ - VkResult res = VK_SUCCESS; - -#if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) - { - UpdateVulkanBudget(); - } -#endif // #if VMA_MEMORY_BUDGET - - return res; -} - -VmaAllocator_T::~VmaAllocator_T() -{ - VMA_ASSERT(m_Pools.IsEmpty()); - - for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) - { - vma_delete(this, m_pBlockVectors[memTypeIndex]); - } -} - -void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) -{ -#if VMA_STATIC_VULKAN_FUNCTIONS == 1 - ImportVulkanFunctions_Static(); -#endif - - if(pVulkanFunctions != VMA_NULL) - { - ImportVulkanFunctions_Custom(pVulkanFunctions); - } - -#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 - ImportVulkanFunctions_Dynamic(); -#endif - - ValidateVulkanFunctions(); -} - -#if VMA_STATIC_VULKAN_FUNCTIONS == 1 - -void VmaAllocator_T::ImportVulkanFunctions_Static() -{ - // Vulkan 1.0 - m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr; - m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr; - m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; - m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; - m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; - m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; - m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; - m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; - m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; - m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; - m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; - m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; - m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; - m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; - m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; - m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; - m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; - m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; - m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; - - // Vulkan 1.1 -#if VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; - m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; - m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; - m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; - m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; - } -#endif - -#if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) - { - m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; - m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; - } -#endif -} - -#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 - -void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) -{ - VMA_ASSERT(pVulkanFunctions != VMA_NULL); - -#define VMA_COPY_IF_NOT_NULL(funcName) \ - if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; - - VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); - VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); - VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); - VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); - VMA_COPY_IF_NOT_NULL(vkAllocateMemory); - VMA_COPY_IF_NOT_NULL(vkFreeMemory); - VMA_COPY_IF_NOT_NULL(vkMapMemory); - VMA_COPY_IF_NOT_NULL(vkUnmapMemory); - VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); - VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); - VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); - VMA_COPY_IF_NOT_NULL(vkBindImageMemory); - VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); - VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); - VMA_COPY_IF_NOT_NULL(vkCreateBuffer); - VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); - VMA_COPY_IF_NOT_NULL(vkCreateImage); - VMA_COPY_IF_NOT_NULL(vkDestroyImage); - VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); - -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); - VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); -#endif - -#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); - VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); -#endif - -#if VMA_MEMORY_BUDGET - VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); -#endif - -#if VMA_VULKAN_VERSION >= 1003000 - VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); - VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); -#endif - -#undef VMA_COPY_IF_NOT_NULL -} - -#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 - -void VmaAllocator_T::ImportVulkanFunctions_Dynamic() -{ - VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && - "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " - "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " - "Other members can be null."); - -#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ - if(m_VulkanFunctions.memberName == VMA_NULL) \ - m_VulkanFunctions.memberName = \ - (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); -#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ - if(m_VulkanFunctions.memberName == VMA_NULL) \ - m_VulkanFunctions.memberName = \ - (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); - - VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); - VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); - VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); - VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); - VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); - VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); - VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); - VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); - VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); - VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); - VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); - VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); - VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); - VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); - VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); - VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); - VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); - -#if VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); - VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); - VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); - VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); - VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2"); - } -#endif - -#if VMA_DEDICATED_ALLOCATION - if(m_UseKhrDedicatedAllocation) - { - VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); - VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); - } -#endif - -#if VMA_BIND_MEMORY2 - if(m_UseKhrBindMemory2) - { - VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); - VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); - } -#endif // #if VMA_BIND_MEMORY2 - -#if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) - { - VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); - } -#endif // #if VMA_MEMORY_BUDGET - -#if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) - { - VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); - VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); - } -#endif - -#undef VMA_FETCH_DEVICE_FUNC -#undef VMA_FETCH_INSTANCE_FUNC -} - -#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 - -void VmaAllocator_T::ValidateVulkanFunctions() -{ - VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); - -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) - { - VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); - } -#endif - -#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) - { - VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); - } -#endif - -#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); - } -#endif - -#if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) - { - VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); - VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); - } -#endif -} - -VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) -{ - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); - const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; - const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; - return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); -} - -VkResult VmaAllocator_T::AllocateMemoryOfType( - VmaPool pool, - VkDeviceSize size, - VkDeviceSize alignment, - bool dedicatedPreferred, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, - uint32_t memTypeIndex, - VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - VmaBlockVector& blockVector, - size_t allocationCount, - VmaAllocation* pAllocations) -{ - VMA_ASSERT(pAllocations != VMA_NULL); - VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); - - VmaAllocationCreateInfo finalCreateInfo = createInfo; - VkResult res = CalcMemTypeParams( - finalCreateInfo, - memTypeIndex, - size, - allocationCount); - if(res != VK_SUCCESS) - return res; - - if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) - { - return AllocateDedicatedMemory( - pool, - size, - suballocType, - dedicatedAllocations, - memTypeIndex, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, - (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, - finalCreateInfo.pUserData, - finalCreateInfo.priority, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - allocationCount, - pAllocations, - blockVector.GetAllocationNextPtr()); - } - else - { - const bool canAllocateDedicated = - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && - (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); - - if(canAllocateDedicated) - { - // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. - if(size > blockVector.GetPreferredBlockSize() / 2) - { - dedicatedPreferred = true; - } - // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, - // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above - // 3/4 of the maximum allocation count. - if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) - { - dedicatedPreferred = false; - } - - if(dedicatedPreferred) - { - res = AllocateDedicatedMemory( - pool, - size, - suballocType, - dedicatedAllocations, - memTypeIndex, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, - (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, - finalCreateInfo.pUserData, - finalCreateInfo.priority, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - allocationCount, - pAllocations, - blockVector.GetAllocationNextPtr()); - if(res == VK_SUCCESS) - { - // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. - VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); - return VK_SUCCESS; - } - } - } - - res = blockVector.Allocate( - size, - alignment, - finalCreateInfo, - suballocType, - allocationCount, - pAllocations); - if(res == VK_SUCCESS) - return VK_SUCCESS; - - // Try dedicated memory. - if(canAllocateDedicated && !dedicatedPreferred) - { - res = AllocateDedicatedMemory( - pool, - size, - suballocType, - dedicatedAllocations, - memTypeIndex, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, - (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, - (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, - finalCreateInfo.pUserData, - finalCreateInfo.priority, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - allocationCount, - pAllocations, - blockVector.GetAllocationNextPtr()); - if(res == VK_SUCCESS) - { - // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. - VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); - return VK_SUCCESS; - } - } - // Everything failed: Return error code. - VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); - return res; - } -} - -VkResult VmaAllocator_T::AllocateDedicatedMemory( - VmaPool pool, - VkDeviceSize size, - VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - uint32_t memTypeIndex, - bool map, - bool isUserDataString, - bool isMappingAllowed, - bool canAliasMemory, - void* pUserData, - float priority, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - size_t allocationCount, - VmaAllocation* pAllocations, - const void* pNextChain) -{ - VMA_ASSERT(allocationCount > 0 && pAllocations); - - VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; - allocInfo.memoryTypeIndex = memTypeIndex; - allocInfo.allocationSize = size; - allocInfo.pNext = pNextChain; - -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; - if(!canAliasMemory) - { - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - if(dedicatedBuffer != VK_NULL_HANDLE) - { - VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); - dedicatedAllocInfo.buffer = dedicatedBuffer; - VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); - } - else if(dedicatedImage != VK_NULL_HANDLE) - { - dedicatedAllocInfo.image = dedicatedImage; - VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); - } - } - } -#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - -#if VMA_BUFFER_DEVICE_ADDRESS - VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; - if(m_UseKhrBufferDeviceAddress) - { - bool canContainBufferWithDeviceAddress = true; - if(dedicatedBuffer != VK_NULL_HANDLE) - { - canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown - (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; - } - else if(dedicatedImage != VK_NULL_HANDLE) - { - canContainBufferWithDeviceAddress = false; - } - if(canContainBufferWithDeviceAddress) - { - allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; - VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); - } - } -#endif // #if VMA_BUFFER_DEVICE_ADDRESS - -#if VMA_MEMORY_PRIORITY - VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; - if(m_UseExtMemoryPriority) - { - VMA_ASSERT(priority >= 0.f && priority <= 1.f); - priorityInfo.priority = priority; - VmaPnextChainPushFront(&allocInfo, &priorityInfo); - } -#endif // #if VMA_MEMORY_PRIORITY - -#if VMA_EXTERNAL_MEMORY - // Attach VkExportMemoryAllocateInfoKHR if necessary. - VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; - exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); - if(exportMemoryAllocInfo.handleTypes != 0) - { - VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); - } -#endif // #if VMA_EXTERNAL_MEMORY - - size_t allocIndex; - VkResult res = VK_SUCCESS; - for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) - { - res = AllocateDedicatedMemoryPage( - pool, - size, - suballocType, - memTypeIndex, - allocInfo, - map, - isUserDataString, - isMappingAllowed, - pUserData, - pAllocations + allocIndex); - if(res != VK_SUCCESS) - { - break; - } - } - - if(res == VK_SUCCESS) - { - for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) - { - dedicatedAllocations.Register(pAllocations[allocIndex]); - } - VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); - } - else - { - // Free all already created allocations. - while(allocIndex--) - { - VmaAllocation currAlloc = pAllocations[allocIndex]; - VkDeviceMemory hMemory = currAlloc->GetMemory(); - - /* - There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory - before vkFreeMemory. - - if(currAlloc->GetMappedData() != VMA_NULL) - { - (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); - } - */ - - FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); - m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); - m_AllocationObjectAllocator.Free(currAlloc); - } - - memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); - } - - return res; -} - -VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( - VmaPool pool, - VkDeviceSize size, - VmaSuballocationType suballocType, - uint32_t memTypeIndex, - const VkMemoryAllocateInfo& allocInfo, - bool map, - bool isUserDataString, - bool isMappingAllowed, - void* pUserData, - VmaAllocation* pAllocation) -{ - VkDeviceMemory hMemory = VK_NULL_HANDLE; - VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); - if(res < 0) - { - VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); - return res; - } - - void* pMappedData = VMA_NULL; - if(map) - { - res = (*m_VulkanFunctions.vkMapMemory)( - m_hDevice, - hMemory, - 0, - VK_WHOLE_SIZE, - 0, - &pMappedData); - if(res < 0) - { - VMA_DEBUG_LOG(" vkMapMemory FAILED"); - FreeVulkanMemory(memTypeIndex, size, hMemory); - return res; - } - } - - *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); - (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size); - if (isUserDataString) - (*pAllocation)->SetName(this, (const char*)pUserData); - else - (*pAllocation)->SetUserData(this, pUserData); - m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) - { - FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); - } - - return VK_SUCCESS; -} - -void VmaAllocator_T::GetBufferMemoryRequirements( - VkBuffer hBuffer, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const -{ -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; - memReqInfo.buffer = hBuffer; - - VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; - - VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; - VmaPnextChainPushFront(&memReq2, &memDedicatedReq); - - (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); - - memReq = memReq2.memoryRequirements; - requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); - prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); - } - else -#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - { - (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); - requiresDedicatedAllocation = false; - prefersDedicatedAllocation = false; - } -} - -void VmaAllocator_T::GetImageMemoryRequirements( - VkImage hImage, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const -{ -#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) - { - VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; - memReqInfo.image = hImage; - - VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; - - VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; - VmaPnextChainPushFront(&memReq2, &memDedicatedReq); - - (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); - - memReq = memReq2.memoryRequirements; - requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); - prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); - } - else -#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - { - (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); - requiresDedicatedAllocation = false; - prefersDedicatedAllocation = false; - } -} - -VkResult VmaAllocator_T::FindMemoryTypeIndex( - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkFlags bufImgUsage, - uint32_t* pMemoryTypeIndex) const -{ - memoryTypeBits &= GetGlobalMemoryTypeBits(); - - if(pAllocationCreateInfo->memoryTypeBits != 0) - { - memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; - } - - VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; - if(!FindMemoryPreferences( - IsIntegratedGpu(), - *pAllocationCreateInfo, - bufImgUsage, - requiredFlags, preferredFlags, notPreferredFlags)) - { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - *pMemoryTypeIndex = UINT32_MAX; - uint32_t minCost = UINT32_MAX; - for(uint32_t memTypeIndex = 0, memTypeBit = 1; - memTypeIndex < GetMemoryTypeCount(); - ++memTypeIndex, memTypeBit <<= 1) - { - // This memory type is acceptable according to memoryTypeBits bitmask. - if((memTypeBit & memoryTypeBits) != 0) - { - const VkMemoryPropertyFlags currFlags = - m_MemProps.memoryTypes[memTypeIndex].propertyFlags; - // This memory type contains requiredFlags. - if((requiredFlags & ~currFlags) == 0) - { - // Calculate cost as number of bits from preferredFlags not present in this memory type. - uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + - VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); - // Remember memory type with lowest cost. - if(currCost < minCost) - { - *pMemoryTypeIndex = memTypeIndex; - if(currCost == 0) - { - return VK_SUCCESS; - } - minCost = currCost; - } - } - } - } - return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; -} - -VkResult VmaAllocator_T::CalcMemTypeParams( - VmaAllocationCreateInfo& inoutCreateInfo, - uint32_t memTypeIndex, - VkDeviceSize size, - size_t allocationCount) -{ - // If memory type is not HOST_VISIBLE, disable MAPPED. - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && - (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) - { - inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; - } - - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && - (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) - { - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); - VmaBudget heapBudget = {}; - GetHeapBudgets(&heapBudget, heapIndex, 1); - if(heapBudget.usage + size * allocationCount > heapBudget.budget) - { - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - } - return VK_SUCCESS; -} - -VkResult VmaAllocator_T::CalcAllocationParams( - VmaAllocationCreateInfo& inoutCreateInfo, - bool dedicatedRequired, - bool dedicatedPreferred) -{ - VMA_ASSERT((inoutCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && - "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); - VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || - (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && - "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); - if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) - { - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) - { - VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && - "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); - } - } - - // If memory is lazily allocated, it should be always dedicated. - if(dedicatedRequired || - inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) - { - inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - } - - if(inoutCreateInfo.pool != VK_NULL_HANDLE) - { - if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && - (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) - { - VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); - return VK_ERROR_FEATURE_NOT_PRESENT; - } - inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); - } - - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && - (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) - { - VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && - (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) - { - inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - } - - // Non-auto USAGE values imply HOST_ACCESS flags. - // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. - // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. - // Otherwise they just protect from assert on mapping. - if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && - inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && - inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) - { - if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) - { - inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; - } - } - - return VK_SUCCESS; -} - -VkResult VmaAllocator_T::AllocateMemory( - const VkMemoryRequirements& vkMemReq, - bool requiresDedicatedAllocation, - bool prefersDedicatedAllocation, - VkBuffer dedicatedBuffer, - VkImage dedicatedImage, - VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, - VmaSuballocationType suballocType, - size_t allocationCount, - VmaAllocation* pAllocations) -{ - memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); - - VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); - - if(vkMemReq.size == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - - VmaAllocationCreateInfo createInfoFinal = createInfo; - VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation); - if(res != VK_SUCCESS) - return res; - - if(createInfoFinal.pool != VK_NULL_HANDLE) - { - VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; - return AllocateMemoryOfType( - createInfoFinal.pool, - vkMemReq.size, - vkMemReq.alignment, - prefersDedicatedAllocation, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - createInfoFinal, - blockVector.GetMemoryTypeIndex(), - suballocType, - createInfoFinal.pool->m_DedicatedAllocations, - blockVector, - allocationCount, - pAllocations); - } - else - { - // Bit mask of memory Vulkan types acceptable for this allocation. - uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; - uint32_t memTypeIndex = UINT32_MAX; - res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); - // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. - if(res != VK_SUCCESS) - return res; - do - { - VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; - VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); - res = AllocateMemoryOfType( - VK_NULL_HANDLE, - vkMemReq.size, - vkMemReq.alignment, - requiresDedicatedAllocation || prefersDedicatedAllocation, - dedicatedBuffer, - dedicatedImage, - dedicatedBufferImageUsage, - createInfoFinal, - memTypeIndex, - suballocType, - m_DedicatedAllocations[memTypeIndex], - *blockVector, - allocationCount, - pAllocations); - // Allocation succeeded - if(res == VK_SUCCESS) - return VK_SUCCESS; - - // Remove old memTypeIndex from list of possibilities. - memoryTypeBits &= ~(1u << memTypeIndex); - // Find alternative memTypeIndex. - res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); - } while(res == VK_SUCCESS); - - // No other matching memory type index could be found. - // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } -} - -void VmaAllocator_T::FreeMemory( - size_t allocationCount, - const VmaAllocation* pAllocations) -{ - VMA_ASSERT(pAllocations); - - for(size_t allocIndex = allocationCount; allocIndex--; ) - { - VmaAllocation allocation = pAllocations[allocIndex]; - - if(allocation != VK_NULL_HANDLE) - { - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) - { - FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); - } - - allocation->FreeName(this); - - switch(allocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaBlockVector* pBlockVector = VMA_NULL; - VmaPool hPool = allocation->GetParentPool(); - if(hPool != VK_NULL_HANDLE) - { - pBlockVector = &hPool->m_BlockVector; - } - else - { - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - pBlockVector = m_pBlockVectors[memTypeIndex]; - VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); - } - pBlockVector->Free(allocation); - } - break; - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - FreeDedicatedMemory(allocation); - break; - default: - VMA_ASSERT(0); - } - } - } -} - -void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) -{ - // Initialize. - VmaClearDetailedStatistics(pStats->total); - for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) - VmaClearDetailedStatistics(pStats->memoryType[i]); - for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) - VmaClearDetailedStatistics(pStats->memoryHeap[i]); - - // Process default pools. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; - if (pBlockVector != VMA_NULL) - pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); - } - - // Process custom pools. - { - VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) - { - VmaBlockVector& blockVector = pool->m_BlockVector; - const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); - blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); - pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); - } - } - - // Process dedicated allocations. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); - } - - // Sum from memory types to memory heaps. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; - VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); - } - - // Sum from memory heaps to total. - for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) - VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); - - VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || - pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); - VMA_ASSERT(pStats->total.unusedRangeCount == 0 || - pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); -} - -void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) -{ -#if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) - { - if(m_Budget.m_OperationsSinceBudgetFetch < 30) - { - VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); - for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) - { - const uint32_t heapIndex = firstHeap + i; - - outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; - outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; - outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; - outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; - - if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) - { - outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + - outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; - } - else - { - outBudgets->usage = 0; - } - - // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. - outBudgets->budget = VMA_MIN( - m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); - } - } - else - { - UpdateVulkanBudget(); // Outside of mutex lock - GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion - } - } - else -#endif - { - for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) - { - const uint32_t heapIndex = firstHeap + i; - - outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; - outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; - outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; - outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; - - outBudgets->usage = outBudgets->statistics.blockBytes; - outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. - } - } -} - -void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) -{ - pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); - pAllocationInfo->deviceMemory = hAllocation->GetMemory(); - pAllocationInfo->offset = hAllocation->GetOffset(); - pAllocationInfo->size = hAllocation->GetSize(); - pAllocationInfo->pMappedData = hAllocation->GetMappedData(); - pAllocationInfo->pUserData = hAllocation->GetUserData(); - pAllocationInfo->pName = hAllocation->GetName(); -} - -VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) -{ - VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); - - VmaPoolCreateInfo newCreateInfo = *pCreateInfo; - - // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. - if(pCreateInfo->pMemoryAllocateNext) - { - VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); - } - - if(newCreateInfo.maxBlockCount == 0) - { - newCreateInfo.maxBlockCount = SIZE_MAX; - } - if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - // Memory type index out of range or forbidden. - if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || - ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) - { - return VK_ERROR_FEATURE_NOT_PRESENT; - } - if(newCreateInfo.minAllocationAlignment > 0) - { - VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); - } - - const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); - - *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); - - VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); - if(res != VK_SUCCESS) - { - vma_delete(this, *pPool); - *pPool = VMA_NULL; - return res; - } - - // Add to m_Pools. - { - VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); - (*pPool)->SetId(m_NextPoolId++); - m_Pools.PushBack(*pPool); - } - - return VK_SUCCESS; -} - -void VmaAllocator_T::DestroyPool(VmaPool pool) -{ - // Remove from m_Pools. - { - VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); - m_Pools.Remove(pool); - } - - vma_delete(this, pool); -} - -void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) -{ - VmaClearStatistics(*pPoolStats); - pool->m_BlockVector.AddStatistics(*pPoolStats); - pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); -} - -void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) -{ - VmaClearDetailedStatistics(*pPoolStats); - pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); - pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); -} - -void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) -{ - m_CurrentFrameIndex.store(frameIndex); - -#if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) - { - UpdateVulkanBudget(); - } -#endif // #if VMA_MEMORY_BUDGET -} - -VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) -{ - return hPool->m_BlockVector.CheckCorruption(); -} - -VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) -{ - VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; - - // Process default pools. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; - if(pBlockVector != VMA_NULL) - { - VkResult localRes = pBlockVector->CheckCorruption(); - switch(localRes) - { - case VK_ERROR_FEATURE_NOT_PRESENT: - break; - case VK_SUCCESS: - finalRes = VK_SUCCESS; - break; - default: - return localRes; - } - } - } - - // Process custom pools. - { - VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) - { - if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) - { - VkResult localRes = pool->m_BlockVector.CheckCorruption(); - switch(localRes) - { - case VK_ERROR_FEATURE_NOT_PRESENT: - break; - case VK_SUCCESS: - finalRes = VK_SUCCESS; - break; - default: - return localRes; - } - } - } - } - - return finalRes; -} - -VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) -{ - AtomicTransactionalIncrement deviceMemoryCountIncrement; - const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); -#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT - if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) - { - return VK_ERROR_TOO_MANY_OBJECTS; - } -#endif - - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); - - // HeapSizeLimit is in effect for this heap. - if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) - { - const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; - VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; - for(;;) - { - const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; - if(blockBytesAfterAllocation > heapSize) - { - return VK_ERROR_OUT_OF_DEVICE_MEMORY; - } - if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) - { - break; - } - } - } - else - { - m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; - } - ++m_Budget.m_BlockCount[heapIndex]; - - // VULKAN CALL vkAllocateMemory. - VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); - - if(res == VK_SUCCESS) - { -#if VMA_MEMORY_BUDGET - ++m_Budget.m_OperationsSinceBudgetFetch; -#endif - - // Informative callback. - if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) - { - (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); - } - - deviceMemoryCountIncrement.Commit(); - } - else - { - --m_Budget.m_BlockCount[heapIndex]; - m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; - } - - return res; -} - -void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) -{ - // Informative callback. - if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) - { - (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); - } - - // VULKAN CALL vkFreeMemory. - (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); - - const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); - --m_Budget.m_BlockCount[heapIndex]; - m_Budget.m_BlockBytes[heapIndex] -= size; - - --m_DeviceMemoryCount; -} - -VkResult VmaAllocator_T::BindVulkanBuffer( - VkDeviceMemory memory, - VkDeviceSize memoryOffset, - VkBuffer buffer, - const void* pNext) -{ - if(pNext != VMA_NULL) - { -#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 - if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && - m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) - { - VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; - bindBufferMemoryInfo.pNext = pNext; - bindBufferMemoryInfo.buffer = buffer; - bindBufferMemoryInfo.memory = memory; - bindBufferMemoryInfo.memoryOffset = memoryOffset; - return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); - } - else -#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 - { - return VK_ERROR_EXTENSION_NOT_PRESENT; - } - } - else - { - return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); - } -} - -VkResult VmaAllocator_T::BindVulkanImage( - VkDeviceMemory memory, - VkDeviceSize memoryOffset, - VkImage image, - const void* pNext) -{ - if(pNext != VMA_NULL) - { -#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 - if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && - m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) - { - VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; - bindBufferMemoryInfo.pNext = pNext; - bindBufferMemoryInfo.image = image; - bindBufferMemoryInfo.memory = memory; - bindBufferMemoryInfo.memoryOffset = memoryOffset; - return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); - } - else -#endif // #if VMA_BIND_MEMORY2 - { - return VK_ERROR_EXTENSION_NOT_PRESENT; - } - } - else - { - return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); - } -} - -VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) -{ - switch(hAllocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); - char *pBytes = VMA_NULL; - VkResult res = pBlock->Map(this, 1, (void**)&pBytes); - if(res == VK_SUCCESS) - { - *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); - hAllocation->BlockAllocMap(); - } - return res; - } - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - return hAllocation->DedicatedAllocMap(this, ppData); - default: - VMA_ASSERT(0); - return VK_ERROR_MEMORY_MAP_FAILED; - } -} - -void VmaAllocator_T::Unmap(VmaAllocation hAllocation) -{ - switch(hAllocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); - hAllocation->BlockAllocUnmap(); - pBlock->Unmap(this, 1); - } - break; - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - hAllocation->DedicatedAllocUnmap(this); - break; - default: - VMA_ASSERT(0); - } -} - -VkResult VmaAllocator_T::BindBufferMemory( - VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkBuffer hBuffer, - const void* pNext) -{ - VkResult res = VK_SUCCESS; - switch(hAllocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); - break; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); - VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); - res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); - break; - } - default: - VMA_ASSERT(0); - } - return res; -} - -VkResult VmaAllocator_T::BindImageMemory( - VmaAllocation hAllocation, - VkDeviceSize allocationLocalOffset, - VkImage hImage, - const void* pNext) -{ - VkResult res = VK_SUCCESS; - switch(hAllocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); - break; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); - VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); - res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); - break; - } - default: - VMA_ASSERT(0); - } - return res; -} - -VkResult VmaAllocator_T::FlushOrInvalidateAllocation( - VmaAllocation hAllocation, - VkDeviceSize offset, VkDeviceSize size, - VMA_CACHE_OPERATION op) -{ - VkResult res = VK_SUCCESS; - - VkMappedMemoryRange memRange = {}; - if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) - { - switch(op) - { - case VMA_CACHE_FLUSH: - res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); - break; - case VMA_CACHE_INVALIDATE: - res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); - break; - default: - VMA_ASSERT(0); - } - } - // else: Just ignore this call. - return res; -} - -VkResult VmaAllocator_T::FlushOrInvalidateAllocations( - uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, const VkDeviceSize* sizes, - VMA_CACHE_OPERATION op) -{ - typedef VmaStlAllocator RangeAllocator; - typedef VmaSmallVector RangeVector; - RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); - - for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) - { - const VmaAllocation alloc = allocations[allocIndex]; - const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; - const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; - VkMappedMemoryRange newRange; - if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) - { - ranges.push_back(newRange); - } - } - - VkResult res = VK_SUCCESS; - if(!ranges.empty()) - { - switch(op) - { - case VMA_CACHE_FLUSH: - res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); - break; - case VMA_CACHE_INVALIDATE: - res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); - break; - default: - VMA_ASSERT(0); - } - } - // else: Just ignore this call. - return res; -} - -void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) -{ - VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); - - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - VmaPool parentPool = allocation->GetParentPool(); - if(parentPool == VK_NULL_HANDLE) - { - // Default pool - m_DedicatedAllocations[memTypeIndex].Unregister(allocation); - } - else - { - // Custom pool - parentPool->m_DedicatedAllocations.Unregister(allocation); - } - - VkDeviceMemory hMemory = allocation->GetMemory(); - - /* - There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory - before vkFreeMemory. - - if(allocation->GetMappedData() != VMA_NULL) - { - (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); - } - */ - - FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); - - m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); - m_AllocationObjectAllocator.Free(allocation); - - VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); -} - -uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const -{ - VkBufferCreateInfo dummyBufCreateInfo; - VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); - - uint32_t memoryTypeBits = 0; - - // Create buffer. - VkBuffer buf = VK_NULL_HANDLE; - VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( - m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); - if(res == VK_SUCCESS) - { - // Query for supported memory types. - VkMemoryRequirements memReq; - (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); - memoryTypeBits = memReq.memoryTypeBits; - - // Destroy buffer. - (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); - } - - return memoryTypeBits; -} - -uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const -{ - // Make sure memory information is already fetched. - VMA_ASSERT(GetMemoryTypeCount() > 0); - - uint32_t memoryTypeBits = UINT32_MAX; - - if(!m_UseAmdDeviceCoherentMemory) - { - // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) - { - memoryTypeBits &= ~(1u << memTypeIndex); - } - } - } - - return memoryTypeBits; -} - -bool VmaAllocator_T::GetFlushOrInvalidateRange( - VmaAllocation allocation, - VkDeviceSize offset, VkDeviceSize size, - VkMappedMemoryRange& outRange) const -{ - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) - { - const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; - const VkDeviceSize allocationSize = allocation->GetSize(); - VMA_ASSERT(offset <= allocationSize); - - outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; - outRange.pNext = VMA_NULL; - outRange.memory = allocation->GetMemory(); - - switch(allocation->GetType()) - { - case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: - outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); - if(size == VK_WHOLE_SIZE) - { - outRange.size = allocationSize - outRange.offset; - } - else - { - VMA_ASSERT(offset + size <= allocationSize); - outRange.size = VMA_MIN( - VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), - allocationSize - outRange.offset); - } - break; - case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - // 1. Still within this allocation. - outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); - if(size == VK_WHOLE_SIZE) - { - size = allocationSize - offset; - } - else - { - VMA_ASSERT(offset + size <= allocationSize); - } - outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); - - // 2. Adjust to whole block. - const VkDeviceSize allocationOffset = allocation->GetOffset(); - VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); - const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); - outRange.offset += allocationOffset; - outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); - - break; - } - default: - VMA_ASSERT(0); - } - return true; - } - return false; -} - -#if VMA_MEMORY_BUDGET -void VmaAllocator_T::UpdateVulkanBudget() -{ - VMA_ASSERT(m_UseExtMemoryBudget); - - VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; - - VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; - VmaPnextChainPushFront(&memProps, &budgetProps); - - GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); - - { - VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); - - for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) - { - m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; - m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); - - // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. - if(m_Budget.m_VulkanBudget[heapIndex] == 0) - { - m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. - } - else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) - { - m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; - } - if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) - { - m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; - } - } - m_Budget.m_OperationsSinceBudgetFetch = 0; - } -} -#endif // VMA_MEMORY_BUDGET - -void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) -{ - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && - hAllocation->IsMappingAllowed() && - (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) - { - void* pData = VMA_NULL; - VkResult res = Map(hAllocation, &pData); - if(res == VK_SUCCESS) - { - memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); - FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); - Unmap(hAllocation); - } - else - { - VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); - } - } -} - -uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() -{ - uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); - if(memoryTypeBits == UINT32_MAX) - { - memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); - m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); - } - return memoryTypeBits; -} - -#if VMA_STATS_STRING_ENABLED -void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) -{ - json.WriteString("DefaultPools"); - json.BeginObject(); - { - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; - VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; - if (pBlockVector != VMA_NULL) - { - json.BeginString("Type "); - json.ContinueString(memTypeIndex); - json.EndString(); - json.BeginObject(); - { - json.WriteString("PreferredBlockSize"); - json.WriteNumber(pBlockVector->GetPreferredBlockSize()); - - json.WriteString("Blocks"); - pBlockVector->PrintDetailedMap(json); - - json.WriteString("DedicatedAllocations"); - dedicatedAllocList.BuildStatsString(json); - } - json.EndObject(); - } - } - } - json.EndObject(); - - json.WriteString("CustomPools"); - json.BeginObject(); - { - VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - if (!m_Pools.IsEmpty()) - { - for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) - { - bool displayType = true; - size_t index = 0; - for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) - { - VmaBlockVector& blockVector = pool->m_BlockVector; - if (blockVector.GetMemoryTypeIndex() == memTypeIndex) - { - if (displayType) - { - json.BeginString("Type "); - json.ContinueString(memTypeIndex); - json.EndString(); - json.BeginArray(); - displayType = false; - } - - json.BeginObject(); - { - json.WriteString("Name"); - json.BeginString(); - json.ContinueString_Size(index++); - if (pool->GetName()) - { - json.ContinueString(" - "); - json.ContinueString(pool->GetName()); - } - json.EndString(); - - json.WriteString("PreferredBlockSize"); - json.WriteNumber(blockVector.GetPreferredBlockSize()); - - json.WriteString("Blocks"); - blockVector.PrintDetailedMap(json); - - json.WriteString("DedicatedAllocations"); - pool->m_DedicatedAllocations.BuildStatsString(json); - } - json.EndObject(); - } - } - - if (!displayType) - json.EndArray(); - } - } - } - json.EndObject(); -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_ALLOCATOR_T_FUNCTIONS - - -#ifndef _VMA_PUBLIC_INTERFACE -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( - const VmaAllocatorCreateInfo* pCreateInfo, - VmaAllocator* pAllocator) -{ - VMA_ASSERT(pCreateInfo && pAllocator); - VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || - (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); - VMA_DEBUG_LOG("vmaCreateAllocator"); - *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); - VkResult result = (*pAllocator)->Init(pCreateInfo); - if(result < 0) - { - vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); - *pAllocator = VK_NULL_HANDLE; - } - return result; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( - VmaAllocator allocator) -{ - if(allocator != VK_NULL_HANDLE) - { - VMA_DEBUG_LOG("vmaDestroyAllocator"); - VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. - vma_delete(&allocationCallbacks, allocator); - } -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) -{ - VMA_ASSERT(allocator && pAllocatorInfo); - pAllocatorInfo->instance = allocator->m_hInstance; - pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); - pAllocatorInfo->device = allocator->m_hDevice; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( - VmaAllocator allocator, - const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) -{ - VMA_ASSERT(allocator && ppPhysicalDeviceProperties); - *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( - VmaAllocator allocator, - const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) -{ - VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); - *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( - VmaAllocator allocator, - uint32_t memoryTypeIndex, - VkMemoryPropertyFlags* pFlags) -{ - VMA_ASSERT(allocator && pFlags); - VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); - *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( - VmaAllocator allocator, - uint32_t frameIndex) -{ - VMA_ASSERT(allocator); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->SetCurrentFrameIndex(frameIndex); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( - VmaAllocator allocator, - VmaTotalStatistics* pStats) -{ - VMA_ASSERT(allocator && pStats); - VMA_DEBUG_GLOBAL_MUTEX_LOCK - allocator->CalculateStatistics(pStats); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( - VmaAllocator allocator, - VmaBudget* pBudgets) -{ - VMA_ASSERT(allocator && pBudgets); - VMA_DEBUG_GLOBAL_MUTEX_LOCK - allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount()); -} - -#if VMA_STATS_STRING_ENABLED - -VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( - VmaAllocator allocator, - char** ppStatsString, - VkBool32 detailedMap) -{ - VMA_ASSERT(allocator && ppStatsString); - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VmaStringBuilder sb(allocator->GetAllocationCallbacks()); - { - VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; - allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); - - VmaTotalStatistics stats; - allocator->CalculateStatistics(&stats); - - VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); - json.BeginObject(); - { - json.WriteString("General"); - json.BeginObject(); - { - const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; - const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; - - json.WriteString("API"); - json.WriteString("Vulkan"); - - json.WriteString("apiVersion"); - json.BeginString(); - json.ContinueString(VK_API_VERSION_MAJOR(deviceProperties.apiVersion)); - json.ContinueString("."); - json.ContinueString(VK_API_VERSION_MINOR(deviceProperties.apiVersion)); - json.ContinueString("."); - json.ContinueString(VK_API_VERSION_PATCH(deviceProperties.apiVersion)); - json.EndString(); - - json.WriteString("GPU"); - json.WriteString(deviceProperties.deviceName); - json.WriteString("deviceType"); - json.WriteNumber(static_cast(deviceProperties.deviceType)); - - json.WriteString("maxMemoryAllocationCount"); - json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); - json.WriteString("bufferImageGranularity"); - json.WriteNumber(deviceProperties.limits.bufferImageGranularity); - json.WriteString("nonCoherentAtomSize"); - json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); - - json.WriteString("memoryHeapCount"); - json.WriteNumber(memoryProperties.memoryHeapCount); - json.WriteString("memoryTypeCount"); - json.WriteNumber(memoryProperties.memoryTypeCount); - } - json.EndObject(); - } - { - json.WriteString("Total"); - VmaPrintDetailedStatistics(json, stats.total); - } - { - json.WriteString("MemoryInfo"); - json.BeginObject(); - { - for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) - { - json.BeginString("Heap "); - json.ContinueString(heapIndex); - json.EndString(); - json.BeginObject(); - { - const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; - json.WriteString("Flags"); - json.BeginArray(true); - { - if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) - json.WriteString("DEVICE_LOCAL"); - #if VMA_VULKAN_VERSION >= 1001000 - if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) - json.WriteString("MULTI_INSTANCE"); - #endif - - VkMemoryHeapFlags flags = heapInfo.flags & - ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT - #if VMA_VULKAN_VERSION >= 1001000 - | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT - #endif - ); - if (flags != 0) - json.WriteNumber(flags); - } - json.EndArray(); - - json.WriteString("Size"); - json.WriteNumber(heapInfo.size); - - json.WriteString("Budget"); - json.BeginObject(); - { - json.WriteString("BudgetBytes"); - json.WriteNumber(budgets[heapIndex].budget); - json.WriteString("UsageBytes"); - json.WriteNumber(budgets[heapIndex].usage); - } - json.EndObject(); - - json.WriteString("Stats"); - VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); - - json.WriteString("MemoryPools"); - json.BeginObject(); - { - for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) - { - if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) - { - json.BeginString("Type "); - json.ContinueString(typeIndex); - json.EndString(); - json.BeginObject(); - { - json.WriteString("Flags"); - json.BeginArray(true); - { - VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; - if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) - json.WriteString("DEVICE_LOCAL"); - if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) - json.WriteString("HOST_VISIBLE"); - if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) - json.WriteString("HOST_COHERENT"); - if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) - json.WriteString("HOST_CACHED"); - if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) - json.WriteString("LAZILY_ALLOCATED"); - #if VMA_VULKAN_VERSION >= 1001000 - if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) - json.WriteString("PROTECTED"); - #endif - #if VK_AMD_device_coherent_memory - if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) - json.WriteString("DEVICE_COHERENT_AMD"); - if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) - json.WriteString("DEVICE_UNCACHED_AMD"); - #endif - - flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT - #if VMA_VULKAN_VERSION >= 1001000 - | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT - #endif - #if VK_AMD_device_coherent_memory - | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY - | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY - #endif - | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT - | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT - | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); - if (flags != 0) - json.WriteNumber(flags); - } - json.EndArray(); - - json.WriteString("Stats"); - VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); - } - json.EndObject(); - } - } - - } - json.EndObject(); - } - json.EndObject(); - } - } - json.EndObject(); - } - - if (detailedMap == VK_TRUE) - allocator->PrintDetailedMap(json); - - json.EndObject(); - } - - *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength()); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( - VmaAllocator allocator, - char* pStatsString) -{ - if(pStatsString != VMA_NULL) - { - VMA_ASSERT(allocator); - VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); - } -} - -#endif // VMA_STATS_STRING_ENABLED - -/* -This function is not protected by any mutex because it just reads immutable data. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( - VmaAllocator allocator, - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) -{ - VMA_ASSERT(allocator != VK_NULL_HANDLE); - VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); - VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); - - return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( - VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) -{ - VMA_ASSERT(allocator != VK_NULL_HANDLE); - VMA_ASSERT(pBufferCreateInfo != VMA_NULL); - VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); - VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); - - const VkDevice hDev = allocator->m_hDevice; - const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); - VkResult res; - -#if VMA_VULKAN_VERSION >= 1003000 - if(funcs->vkGetDeviceBufferMemoryRequirements) - { - // Can query straight from VkBufferCreateInfo :) - VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS}; - devBufMemReq.pCreateInfo = pBufferCreateInfo; - - VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; - (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); - - res = allocator->FindMemoryTypeIndex( - memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); - } - else -#endif // #if VMA_VULKAN_VERSION >= 1003000 - { - // Must create a dummy buffer to query :( - VkBuffer hBuffer = VK_NULL_HANDLE; - res = funcs->vkCreateBuffer( - hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); - if(res == VK_SUCCESS) - { - VkMemoryRequirements memReq = {}; - funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); - - res = allocator->FindMemoryTypeIndex( - memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); - - funcs->vkDestroyBuffer( - hDev, hBuffer, allocator->GetAllocationCallbacks()); - } - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( - VmaAllocator allocator, - const VkImageCreateInfo* pImageCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) -{ - VMA_ASSERT(allocator != VK_NULL_HANDLE); - VMA_ASSERT(pImageCreateInfo != VMA_NULL); - VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); - VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); - - const VkDevice hDev = allocator->m_hDevice; - const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); - VkResult res; - -#if VMA_VULKAN_VERSION >= 1003000 - if(funcs->vkGetDeviceImageMemoryRequirements) - { - // Can query straight from VkImageCreateInfo :) - VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS}; - devImgMemReq.pCreateInfo = pImageCreateInfo; - VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && - "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); - - VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; - (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); - - res = allocator->FindMemoryTypeIndex( - memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); - } - else -#endif // #if VMA_VULKAN_VERSION >= 1003000 - { - // Must create a dummy image to query :( - VkImage hImage = VK_NULL_HANDLE; - res = funcs->vkCreateImage( - hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); - if(res == VK_SUCCESS) - { - VkMemoryRequirements memReq = {}; - funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); - - res = allocator->FindMemoryTypeIndex( - memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); - - funcs->vkDestroyImage( - hDev, hImage, allocator->GetAllocationCallbacks()); - } - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( - VmaAllocator allocator, - const VmaPoolCreateInfo* pCreateInfo, - VmaPool* pPool) -{ - VMA_ASSERT(allocator && pCreateInfo && pPool); - - VMA_DEBUG_LOG("vmaCreatePool"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->CreatePool(pCreateInfo, pPool); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( - VmaAllocator allocator, - VmaPool pool) -{ - VMA_ASSERT(allocator); - - if(pool == VK_NULL_HANDLE) - { - return; - } - - VMA_DEBUG_LOG("vmaDestroyPool"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->DestroyPool(pool); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( - VmaAllocator allocator, - VmaPool pool, - VmaStatistics* pPoolStats) -{ - VMA_ASSERT(allocator && pool && pPoolStats); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->GetPoolStatistics(pool, pPoolStats); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( - VmaAllocator allocator, - VmaPool pool, - VmaDetailedStatistics* pPoolStats) -{ - VMA_ASSERT(allocator && pool && pPoolStats); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->CalculatePoolStatistics(pool, pPoolStats); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) -{ - VMA_ASSERT(allocator && pool); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VMA_DEBUG_LOG("vmaCheckPoolCorruption"); - - return allocator->CheckPoolCorruption(pool); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( - VmaAllocator allocator, - VmaPool pool, - const char** ppName) -{ - VMA_ASSERT(allocator && pool && ppName); - - VMA_DEBUG_LOG("vmaGetPoolName"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *ppName = pool->GetName(); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( - VmaAllocator allocator, - VmaPool pool, - const char* pName) -{ - VMA_ASSERT(allocator && pool); - - VMA_DEBUG_LOG("vmaSetPoolName"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - pool->SetName(pName); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( - VmaAllocator allocator, - const VkMemoryRequirements* pVkMemoryRequirements, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); - - VMA_DEBUG_LOG("vmaAllocateMemory"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VkResult result = allocator->AllocateMemory( - *pVkMemoryRequirements, - false, // requiresDedicatedAllocation - false, // prefersDedicatedAllocation - VK_NULL_HANDLE, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_UNKNOWN, - 1, // allocationCount - pAllocation); - - if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return result; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( - VmaAllocator allocator, - const VkMemoryRequirements* pVkMemoryRequirements, - const VmaAllocationCreateInfo* pCreateInfo, - size_t allocationCount, - VmaAllocation* pAllocations, - VmaAllocationInfo* pAllocationInfo) -{ - if(allocationCount == 0) - { - return VK_SUCCESS; - } - - VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); - - VMA_DEBUG_LOG("vmaAllocateMemoryPages"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VkResult result = allocator->AllocateMemory( - *pVkMemoryRequirements, - false, // requiresDedicatedAllocation - false, // prefersDedicatedAllocation - VK_NULL_HANDLE, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_UNKNOWN, - allocationCount, - pAllocations); - - if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) - { - for(size_t i = 0; i < allocationCount; ++i) - { - allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); - } - } - - return result; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( - VmaAllocator allocator, - VkBuffer buffer, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); - - VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetBufferMemoryRequirements(buffer, vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation); - - VkResult result = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - buffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_BUFFER, - 1, // allocationCount - pAllocation); - - if(pAllocationInfo && result == VK_SUCCESS) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return result; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( - VmaAllocator allocator, - VkImage image, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); - - VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetImageMemoryRequirements(image, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); - - VkResult result = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - VK_NULL_HANDLE, // dedicatedBuffer - image, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage - *pCreateInfo, - VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, - 1, // allocationCount - pAllocation); - - if(pAllocationInfo && result == VK_SUCCESS) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return result; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( - VmaAllocator allocator, - VmaAllocation allocation) -{ - VMA_ASSERT(allocator); - - if(allocation == VK_NULL_HANDLE) - { - return; - } - - VMA_DEBUG_LOG("vmaFreeMemory"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->FreeMemory( - 1, // allocationCount - &allocation); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( - VmaAllocator allocator, - size_t allocationCount, - const VmaAllocation* pAllocations) -{ - if(allocationCount == 0) - { - return; - } - - VMA_ASSERT(allocator); - - VMA_DEBUG_LOG("vmaFreeMemoryPages"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->FreeMemory(allocationCount, pAllocations); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( - VmaAllocator allocator, - VmaAllocation allocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && allocation && pAllocationInfo); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->GetAllocationInfo(allocation, pAllocationInfo); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( - VmaAllocator allocator, - VmaAllocation allocation, - void* pUserData) -{ - VMA_ASSERT(allocator && allocation); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocation->SetUserData(allocator, pUserData); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const char* VMA_NULLABLE pName) -{ - allocation->SetName(allocator, pName); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) -{ - VMA_ASSERT(allocator && allocation && pFlags); - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( - VmaAllocator allocator, - VmaAllocation allocation, - void** ppData) -{ - VMA_ASSERT(allocator && allocation && ppData); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->Map(allocation, ppData); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( - VmaAllocator allocator, - VmaAllocation allocation) -{ - VMA_ASSERT(allocator && allocation); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - allocator->Unmap(allocation); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize offset, - VkDeviceSize size) -{ - VMA_ASSERT(allocator && allocation); - - VMA_DEBUG_LOG("vmaFlushAllocation"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); - - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize offset, - VkDeviceSize size) -{ - VMA_ASSERT(allocator && allocation); - - VMA_DEBUG_LOG("vmaInvalidateAllocation"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); - - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( - VmaAllocator allocator, - uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, - const VkDeviceSize* sizes) -{ - VMA_ASSERT(allocator); - - if(allocationCount == 0) - { - return VK_SUCCESS; - } - - VMA_ASSERT(allocations); - - VMA_DEBUG_LOG("vmaFlushAllocations"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); - - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( - VmaAllocator allocator, - uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, - const VkDeviceSize* sizes) -{ - VMA_ASSERT(allocator); - - if(allocationCount == 0) - { - return VK_SUCCESS; - } - - VMA_ASSERT(allocations); - - VMA_DEBUG_LOG("vmaInvalidateAllocations"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); - - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( - VmaAllocator allocator, - uint32_t memoryTypeBits) -{ - VMA_ASSERT(allocator); - - VMA_DEBUG_LOG("vmaCheckCorruption"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->CheckCorruption(memoryTypeBits); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( - VmaAllocator allocator, - const VmaDefragmentationInfo* pInfo, - VmaDefragmentationContext* pContext) -{ - VMA_ASSERT(allocator && pInfo && pContext); - - VMA_DEBUG_LOG("vmaBeginDefragmentation"); - - if (pInfo->pool != VMA_NULL) - { - // Check if run on supported algorithms - if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) - return VK_ERROR_FEATURE_NOT_PRESENT; - } - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); - return VK_SUCCESS; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( - VmaAllocator allocator, - VmaDefragmentationContext context, - VmaDefragmentationStats* pStats) -{ - VMA_ASSERT(allocator && context); - - VMA_DEBUG_LOG("vmaEndDefragmentation"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - if (pStats) - context->GetStats(*pStats); - vma_delete(allocator, context); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) -{ - VMA_ASSERT(context && pPassInfo); - - VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return context->DefragmentPassBegin(*pPassInfo); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) -{ - VMA_ASSERT(context && pPassInfo); - - VMA_DEBUG_LOG("vmaEndDefragmentationPass"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return context->DefragmentPassEnd(*pPassInfo); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( - VmaAllocator allocator, - VmaAllocation allocation, - VkBuffer buffer) -{ - VMA_ASSERT(allocator && allocation && buffer); - - VMA_DEBUG_LOG("vmaBindBufferMemory"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize allocationLocalOffset, - VkBuffer buffer, - const void* pNext) -{ - VMA_ASSERT(allocator && allocation && buffer); - - VMA_DEBUG_LOG("vmaBindBufferMemory2"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( - VmaAllocator allocator, - VmaAllocation allocation, - VkImage image) -{ - VMA_ASSERT(allocator && allocation && image); - - VMA_DEBUG_LOG("vmaBindImageMemory"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( - VmaAllocator allocator, - VmaAllocation allocation, - VkDeviceSize allocationLocalOffset, - VkImage image, - const void* pNext) -{ - VMA_ASSERT(allocator && allocation && image); - - VMA_DEBUG_LOG("vmaBindImageMemory2"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( - VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkBuffer* pBuffer, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); - - if(pBufferCreateInfo->size == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && - !allocator->m_UseKhrBufferDeviceAddress) - { - VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_LOG("vmaCreateBuffer"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *pBuffer = VK_NULL_HANDLE; - *pAllocation = VK_NULL_HANDLE; - - // 1. Create VkBuffer. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( - allocator->m_hDevice, - pBufferCreateInfo, - allocator->GetAllocationCallbacks(), - pBuffer); - if(res >= 0) - { - // 2. vkGetBufferMemoryRequirements. - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); - - // 3. Allocate memory using allocator. - res = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - *pBuffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - pBufferCreateInfo->usage, // dedicatedBufferImageUsage - *pAllocationCreateInfo, - VMA_SUBALLOCATION_TYPE_BUFFER, - 1, // allocationCount - pAllocation); - - if(res >= 0) - { - // 3. Bind buffer with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) - { - res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); - } - if(res >= 0) - { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return VK_SUCCESS; - } - allocator->FreeMemory( - 1, // allocationCount - pAllocation); - *pAllocation = VK_NULL_HANDLE; - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - *pBuffer = VK_NULL_HANDLE; - return res; - } - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - *pBuffer = VK_NULL_HANDLE; - return res; - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( - VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkDeviceSize minAlignment, - VkBuffer* pBuffer, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); - - if(pBufferCreateInfo->size == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && - !allocator->m_UseKhrBufferDeviceAddress) - { - VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_LOG("vmaCreateBufferWithAlignment"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *pBuffer = VK_NULL_HANDLE; - *pAllocation = VK_NULL_HANDLE; - - // 1. Create VkBuffer. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( - allocator->m_hDevice, - pBufferCreateInfo, - allocator->GetAllocationCallbacks(), - pBuffer); - if(res >= 0) - { - // 2. vkGetBufferMemoryRequirements. - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); - - // 2a. Include minAlignment - vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment); - - // 3. Allocate memory using allocator. - res = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - *pBuffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage - pBufferCreateInfo->usage, // dedicatedBufferImageUsage - *pAllocationCreateInfo, - VMA_SUBALLOCATION_TYPE_BUFFER, - 1, // allocationCount - pAllocation); - - if(res >= 0) - { - // 3. Bind buffer with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) - { - res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); - } - if(res >= 0) - { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return VK_SUCCESS; - } - allocator->FreeMemory( - 1, // allocationCount - pAllocation); - *pAllocation = VK_NULL_HANDLE; - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - *pBuffer = VK_NULL_HANDLE; - return res; - } - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - *pBuffer = VK_NULL_HANDLE; - return res; - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) -{ - VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); - - VMA_DEBUG_LOG("vmaCreateAliasingBuffer"); - - *pBuffer = VK_NULL_HANDLE; - - if (pBufferCreateInfo->size == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && - !allocator->m_UseKhrBufferDeviceAddress) - { - VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - // 1. Create VkBuffer. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( - allocator->m_hDevice, - pBufferCreateInfo, - allocator->GetAllocationCallbacks(), - pBuffer); - if (res >= 0) - { - // 2. Bind buffer with memory. - res = allocator->BindBufferMemory(allocation, 0, *pBuffer, VMA_NULL); - if (res >= 0) - { - return VK_SUCCESS; - } - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); - } - return res; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( - VmaAllocator allocator, - VkBuffer buffer, - VmaAllocation allocation) -{ - VMA_ASSERT(allocator); - - if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) - { - return; - } - - VMA_DEBUG_LOG("vmaDestroyBuffer"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - if(buffer != VK_NULL_HANDLE) - { - (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); - } - - if(allocation != VK_NULL_HANDLE) - { - allocator->FreeMemory( - 1, // allocationCount - &allocation); - } -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( - VmaAllocator allocator, - const VkImageCreateInfo* pImageCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkImage* pImage, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) -{ - VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); - - if(pImageCreateInfo->extent.width == 0 || - pImageCreateInfo->extent.height == 0 || - pImageCreateInfo->extent.depth == 0 || - pImageCreateInfo->mipLevels == 0 || - pImageCreateInfo->arrayLayers == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_LOG("vmaCreateImage"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - *pImage = VK_NULL_HANDLE; - *pAllocation = VK_NULL_HANDLE; - - // 1. Create VkImage. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( - allocator->m_hDevice, - pImageCreateInfo, - allocator->GetAllocationCallbacks(), - pImage); - if(res >= 0) - { - VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? - VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : - VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; - - // 2. Allocate memory using allocator. - VkMemoryRequirements vkMemReq = {}; - bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; - allocator->GetImageMemoryRequirements(*pImage, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); - - res = allocator->AllocateMemory( - vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation, - VK_NULL_HANDLE, // dedicatedBuffer - *pImage, // dedicatedImage - pImageCreateInfo->usage, // dedicatedBufferImageUsage - *pAllocationCreateInfo, - suballocType, - 1, // allocationCount - pAllocation); - - if(res >= 0) - { - // 3. Bind image with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) - { - res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); - } - if(res >= 0) - { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) - { - allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); - } - - return VK_SUCCESS; - } - allocator->FreeMemory( - 1, // allocationCount - pAllocation); - *pAllocation = VK_NULL_HANDLE; - (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); - *pImage = VK_NULL_HANDLE; - return res; - } - (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); - *pImage = VK_NULL_HANDLE; - return res; - } - return res; -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) -{ - VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); - - *pImage = VK_NULL_HANDLE; - - VMA_DEBUG_LOG("vmaCreateImage"); - - if (pImageCreateInfo->extent.width == 0 || - pImageCreateInfo->extent.height == 0 || - pImageCreateInfo->extent.depth == 0 || - pImageCreateInfo->mipLevels == 0 || - pImageCreateInfo->arrayLayers == 0) - { - return VK_ERROR_INITIALIZATION_FAILED; - } - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - // 1. Create VkImage. - VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( - allocator->m_hDevice, - pImageCreateInfo, - allocator->GetAllocationCallbacks(), - pImage); - if (res >= 0) - { - // 2. Bind image with memory. - res = allocator->BindImageMemory(allocation, 0, *pImage, VMA_NULL); - if (res >= 0) - { - return VK_SUCCESS; - } - (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); - } - return res; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( - VmaAllocator VMA_NOT_NULL allocator, - VkImage VMA_NULLABLE_NON_DISPATCHABLE image, - VmaAllocation VMA_NULLABLE allocation) -{ - VMA_ASSERT(allocator); - - if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) - { - return; - } - - VMA_DEBUG_LOG("vmaDestroyImage"); - - VMA_DEBUG_GLOBAL_MUTEX_LOCK - - if(image != VK_NULL_HANDLE) - { - (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); - } - if(allocation != VK_NULL_HANDLE) - { - allocator->FreeMemory( - 1, // allocationCount - &allocation); - } -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( - const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) -{ - VMA_ASSERT(pCreateInfo && pVirtualBlock); - VMA_ASSERT(pCreateInfo->size > 0); - VMA_DEBUG_LOG("vmaCreateVirtualBlock"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); - VkResult res = (*pVirtualBlock)->Init(); - if(res < 0) - { - vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); - *pVirtualBlock = VK_NULL_HANDLE; - } - return res; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) -{ - if(virtualBlock != VK_NULL_HANDLE) - { - VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying. - vma_delete(&allocationCallbacks, virtualBlock); - } -} - -VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE; -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); - VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); -} - -VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, - VkDeviceSize* VMA_NULLABLE pOffset) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); - VMA_DEBUG_LOG("vmaVirtualAllocate"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) -{ - if(allocation != VK_NULL_HANDLE) - { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaVirtualFree"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->Free(allocation); - } -} - -VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaClearVirtualBlock"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->Clear(); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->SetAllocationUserData(allocation, pUserData); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaStatistics* VMA_NOT_NULL pStats) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); - VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->GetStatistics(*pStats); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaDetailedStatistics* VMA_NOT_NULL pStats) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); - VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - virtualBlock->CalculateDetailedStatistics(*pStats); -} - -#if VMA_STATS_STRING_ENABLED - -VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) -{ - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); - VmaStringBuilder sb(allocationCallbacks); - virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); - *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); -} - -VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE pStatsString) -{ - if(pStatsString != VMA_NULL) - { - VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); - VMA_DEBUG_GLOBAL_MUTEX_LOCK; - VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString); - } -} -#endif // VMA_STATS_STRING_ENABLED -#endif // _VMA_PUBLIC_INTERFACE -#endif // VMA_IMPLEMENTATION - -/** -\page quick_start Quick start - -\section quick_start_project_setup Project setup - -Vulkan Memory Allocator comes in form of a "stb-style" single header file. -You don't need to build it as a separate library project. -You can add this file directly to your project and submit it to code repository next to your other source files. - -"Single header" doesn't mean that everything is contained in C/C++ declarations, -like it tends to be in case of inline functions or C++ templates. -It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. -If you don't do it properly, you will get linker errors. - -To do it properly: - --# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. - This includes declarations of all members of the library. --# In exactly one CPP file define following macro before this include. - It enables also internal definitions. - -\code -#define VMA_IMPLEMENTATION -#include "vk_mem_alloc.h" -\endcode - -It may be a good idea to create dedicated CPP file just for this purpose. - -This library includes header ``, which in turn -includes `` on Windows. If you need some specific macros defined -before including these headers (like `WIN32_LEAN_AND_MEAN` or -`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define -them before every `#include` of this library. - -This library is written in C++, but has C-compatible interface. -Thus you can include and use vk_mem_alloc.h in C or C++ code, but full -implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. -Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used. - - -\section quick_start_initialization Initialization - -At program startup: - --# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object. --# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by - calling vmaCreateAllocator(). - -Only members `physicalDevice`, `device`, `instance` are required. -However, you should inform the library which Vulkan version do you use by setting -VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable -by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address). -Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. - -You may need to configure importing Vulkan functions. There are 3 ways to do this: - --# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows): - - You don't need to do anything. - - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default. --# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`, - `vkGetDeviceProcAddr` (this is the option presented in the example below): - - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1. - - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr, - VmaVulkanFunctions::vkGetDeviceProcAddr. - - The library will fetch pointers to all other functions it needs internally. --# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like - [Volk](https://github.com/zeux/volk): - - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. - - Pass these pointers via structure #VmaVulkanFunctions. - -\code -VmaVulkanFunctions vulkanFunctions = {}; -vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr; -vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr; - -VmaAllocatorCreateInfo allocatorCreateInfo = {}; -allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; -allocatorCreateInfo.physicalDevice = physicalDevice; -allocatorCreateInfo.device = device; -allocatorCreateInfo.instance = instance; -allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; - -VmaAllocator allocator; -vmaCreateAllocator(&allocatorCreateInfo, &allocator); -\endcode - - -\section quick_start_resource_allocation Resource allocation - -When you want to create a buffer or image: - --# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. --# Fill VmaAllocationCreateInfo structure. --# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory - already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory. - -\code -VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufferInfo.size = 65536; -bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocInfo = {}; -allocInfo.usage = VMA_MEMORY_USAGE_AUTO; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); -\endcode - -Don't forget to destroy your objects when no longer needed: - -\code -vmaDestroyBuffer(allocator, buffer, allocation); -vmaDestroyAllocator(allocator); -\endcode - - -\page choosing_memory_type Choosing memory type - -Physical devices in Vulkan support various combinations of memory heaps and -types. Help with choosing correct and optimal memory type for your specific -resource is one of the key features of this library. You can use it by filling -appropriate members of VmaAllocationCreateInfo structure, as described below. -You can also combine multiple methods. - --# If you just want to find memory type index that meets your requirements, you - can use function: vmaFindMemoryTypeIndexForBufferInfo(), - vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). --# If you want to allocate a region of device memory without association with any - specific image or buffer, you can use function vmaAllocateMemory(). Usage of - this function is not recommended and usually not needed. - vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, - which may be useful for sparse binding. --# If you already have a buffer or an image created, you want to allocate memory - for it and then you will bind it yourself, you can use function - vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). - For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() - or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). --# **This is the easiest and recommended way to use this library:** - If you want to create a buffer or an image, allocate memory for it and bind - them together, all in one call, you can use function vmaCreateBuffer(), - vmaCreateImage(). - -When using 3. or 4., the library internally queries Vulkan for memory types -supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) -and uses only one of these types. - -If no memory type can be found that meets all the requirements, these functions -return `VK_ERROR_FEATURE_NOT_PRESENT`. - -You can leave VmaAllocationCreateInfo structure completely filled with zeros. -It means no requirements are specified for memory type. -It is valid, although not very useful. - -\section choosing_memory_type_usage Usage - -The easiest way to specify memory requirements is to fill member -VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. -It defines high level, common usage types. -Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. - -For example, if you want to create a uniform buffer that will be filled using -transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can -do it using following code. The buffer will most likely end up in a memory type with -`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. - -\code -VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufferInfo.size = 65536; -bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocInfo = {}; -allocInfo.usage = VMA_MEMORY_USAGE_AUTO; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); -\endcode - -If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory -on systems with discrete graphics card that have the memories separate, you can use -#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. - -When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, -you also need to specify one of the host access flags: -#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. -This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` -so you can map it. - -For example, a staging buffer that will be filled via mapped pointer and then -used as a source of transfer to the buffer decribed previously can be created like this. -It will likely and up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` -but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). - -\code -VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -stagingBufferInfo.size = 65536; -stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - -VmaAllocationCreateInfo stagingAllocInfo = {}; -stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; -stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; - -VkBuffer stagingBuffer; -VmaAllocation stagingAllocation; -vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); -\endcode - -For more examples of creating different kinds of resources, see chapter \ref usage_patterns. - -Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows -about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, -so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. -If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting -memory type, as decribed below. - -\note -Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, -`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) -are still available and work same way as in previous versions of the library -for backward compatibility, but they are not recommended. - -\section choosing_memory_type_required_preferred_flags Required and preferred flags - -You can specify more detailed requirements by filling members -VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags -with a combination of bits from enum `VkMemoryPropertyFlags`. For example, -if you want to create a buffer that will be persistently mapped on host (so it -must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, -use following code: - -\code -VmaAllocationCreateInfo allocInfo = {}; -allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; -allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; -allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); -\endcode - -A memory type is chosen that has all the required flags and as many preferred -flags set as possible. - -Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, -plus some extra "magic" (heuristics). - -\section choosing_memory_type_explicit_memory_types Explicit memory types - -If you inspected memory types available on the physical device and you have -a preference for memory types that you want to use, you can fill member -VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set -means that a memory type with that index is allowed to be used for the -allocation. Special value 0, just like `UINT32_MAX`, means there are no -restrictions to memory type index. - -Please note that this member is NOT just a memory type index. -Still you can use it to choose just one, specific memory type. -For example, if you already determined that your buffer should be created in -memory type 2, use following code: - -\code -uint32_t memoryTypeIndex = 2; - -VmaAllocationCreateInfo allocInfo = {}; -allocInfo.memoryTypeBits = 1u << memoryTypeIndex; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); -\endcode - - -\section choosing_memory_type_custom_memory_pools Custom memory pools - -If you allocate from custom memory pool, all the ways of specifying memory -requirements described above are not applicable and the aforementioned members -of VmaAllocationCreateInfo structure are ignored. Memory type is selected -explicitly when creating the pool and then used to make all the allocations from -that pool. For further details, see \ref custom_memory_pools. - -\section choosing_memory_type_dedicated_allocations Dedicated allocations - -Memory for allocations is reserved out of larger block of `VkDeviceMemory` -allocated from Vulkan internally. That is the main feature of this whole library. -You can still request a separate memory block to be created for an allocation, -just like you would do in a trivial solution without using any allocator. -In that case, a buffer or image is always bound to that memory at offset 0. -This is called a "dedicated allocation". -You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. -The library can also internally decide to use dedicated allocation in some cases, e.g.: - -- When the size of the allocation is large. -- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled - and it reports that dedicated allocation is required or recommended for the resource. -- When allocation of next big memory block fails due to not enough device memory, - but allocation with the exact requested size succeeds. - - -\page memory_mapping Memory mapping - -To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, -to be able to read from it or write to it in CPU code. -Mapping is possible only of memory allocated from a memory type that has -`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. -Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. -You can use them directly with memory allocated by this library, -but it is not recommended because of following issue: -Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. -This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. -Because of this, Vulkan Memory Allocator provides following facilities: - -\note If you want to be able to map an allocation, you need to specify one of the flags -#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT -in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable -when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. -For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, -but they can still be used for consistency. - -\section memory_mapping_mapping_functions Mapping functions - -The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory(). -They are safer and more convenient to use than standard Vulkan functions. -You can map an allocation multiple times simultaneously - mapping is reference-counted internally. -You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. -The way it is implemented is that the library always maps entire memory block, not just region of the allocation. -For further details, see description of vmaMapMemory() function. -Example: - -\code -// Having these objects initialized: -struct ConstantBuffer -{ - ... -}; -ConstantBuffer constantBufferData = ... - -VmaAllocator allocator = ... -VkBuffer constantBuffer = ... -VmaAllocation constantBufferAllocation = ... - -// You can map and fill your buffer using following code: - -void* mappedData; -vmaMapMemory(allocator, constantBufferAllocation, &mappedData); -memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); -vmaUnmapMemory(allocator, constantBufferAllocation); -\endcode - -When mapping, you may see a warning from Vulkan validation layer similar to this one: - -Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. - -It happens because the library maps entire `VkDeviceMemory` block, where different -types of images and buffers may end up together, especially on GPUs with unified memory like Intel. -You can safely ignore it if you are sure you access only memory of the intended -object that you wanted to map. - - -\section memory_mapping_persistently_mapped_memory Persistently mapped memory - -Kepping your memory persistently mapped is generally OK in Vulkan. -You don't need to unmap it before using its data on the GPU. -The library provides a special feature designed for that: -Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in -VmaAllocationCreateInfo::flags stay mapped all the time, -so you can just access CPU pointer to it any time -without a need to call any "map" or "unmap" function. -Example: - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = sizeof(ConstantBuffer); -bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -// Buffer is already mapped. You can access its memory. -memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); -\endcode - -\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up -in a mappable memory type. -For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or -#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. -#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. -For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. - -\section memory_mapping_cache_control Cache flush and invalidate - -Memory in Vulkan doesn't need to be unmapped before using it on GPU, -but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, -you need to manually **invalidate** cache before reading of mapped pointer -and **flush** cache after writing to mapped pointer. -Map/unmap operations don't do that automatically. -Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, -`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient -functions that refer to given allocation object: vmaFlushAllocation(), -vmaInvalidateAllocation(), -or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). - -Regions of memory specified for flush/invalidate must be aligned to -`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. -In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations -within blocks are aligned to this value, so their offsets are always multiply of -`nonCoherentAtomSize` and two different allocations never share same "line" of this size. - -Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) -currently provide `HOST_COHERENT` flag on all memory types that are -`HOST_VISIBLE`, so on PC you may not need to bother. - - -\page staying_within_budget Staying within budget - -When developing a graphics-intensive game or program, it is important to avoid allocating -more GPU memory than it is physically available. When the memory is over-committed, -various bad things can happen, depending on the specific GPU, graphics driver, and -operating system: - -- It may just work without any problems. -- The application may slow down because some memory blocks are moved to system RAM - and the GPU has to access them through PCI Express bus. -- A new allocation may take very long time to complete, even few seconds, and possibly - freeze entire system. -- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. -- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` - returned somewhere later. - -\section staying_within_budget_querying_for_budget Querying for budget - -To query for current memory usage and available budget, use function vmaGetHeapBudgets(). -Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. - -Please note that this function returns different information and works faster than -vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every -allocation, while vmaCalculateStatistics() is intended to be used rarely, -only to obtain statistical information, e.g. for debugging purposes. - -It is recommended to use VK_EXT_memory_budget device extension to obtain information -about the budget from Vulkan device. VMA is able to use this extension automatically. -When not enabled, the allocator behaves same way, but then it estimates current usage -and available budget based on its internal information and Vulkan memory heap sizes, -which may be less precise. In order to use this extension: - -1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 - required by it are available and enable them. Please note that the first is a device - extension and the second is instance extension! -2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. -3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from - Vulkan inside of it to avoid overhead of querying it with every allocation. - -\section staying_within_budget_controlling_memory_usage Controlling memory usage - -There are many ways in which you can try to stay within the budget. - -First, when making new allocation requires allocating a new memory block, the library -tries not to exceed the budget automatically. If a block with default recommended size -(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even -dedicated memory for just this resource. - -If the size of the requested resource plus current memory usage is more than the -budget, by default the library still tries to create it, leaving it to the Vulkan -implementation whether the allocation succeeds or fails. You can change this behavior -by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is -not made if it would exceed the budget or if the budget is already exceeded. -VMA then tries to make the allocation from the next eligible Vulkan memory type. -The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. -Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag -when creating resources that are not essential for the application (e.g. the texture -of a specific object) and not to pass it when creating critically important resources -(e.g. render targets). - -On AMD graphics cards there is a custom vendor extension available: VK_AMD_memory_overallocation_behavior -that allows to control the behavior of the Vulkan implementation in out-of-memory cases - -whether it should fail with an error code or still allow the allocation. -Usage of this extension involves only passing extra structure on Vulkan device creation, -so it is out of scope of this library. - -Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure -a new allocation is created only when it fits inside one of the existing memory blocks. -If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. -This also ensures that the function call is very fast because it never goes to Vulkan -to obtain a new block. - -\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount -set to more than 0 will currently try to allocate memory blocks without checking whether they -fit within budget. - - -\page resource_aliasing Resource aliasing (overlap) - -New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory -management, give an opportunity to alias (overlap) multiple resources in the -same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). -It can be useful to save video memory, but it must be used with caution. - -For example, if you know the flow of your whole render frame in advance, you -are going to use some intermediate textures or buffers only during a small range of render passes, -and you know these ranges don't overlap in time, you can bind these resources to -the same place in memory, even if they have completely different parameters (width, height, format etc.). - -![Resource aliasing (overlap)](../gfx/Aliasing.png) - -Such scenario is possible using VMA, but you need to create your images manually. -Then you need to calculate parameters of an allocation to be made using formula: - -- allocation size = max(size of each image) -- allocation alignment = max(alignment of each image) -- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) - -Following example shows two different images bound to the same place in memory, -allocated to fit largest of them. - -\code -// A 512x512 texture to be sampled. -VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; -img1CreateInfo.extent.width = 512; -img1CreateInfo.extent.height = 512; -img1CreateInfo.extent.depth = 1; -img1CreateInfo.mipLevels = 10; -img1CreateInfo.arrayLayers = 1; -img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; -img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; -img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; -img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; -img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - -// A full screen texture to be used as color attachment. -VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; -img2CreateInfo.extent.width = 1920; -img2CreateInfo.extent.height = 1080; -img2CreateInfo.extent.depth = 1; -img2CreateInfo.mipLevels = 1; -img2CreateInfo.arrayLayers = 1; -img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; -img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; -img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; -img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; -img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - -VkImage img1; -res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); -VkImage img2; -res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); - -VkMemoryRequirements img1MemReq; -vkGetImageMemoryRequirements(device, img1, &img1MemReq); -VkMemoryRequirements img2MemReq; -vkGetImageMemoryRequirements(device, img2, &img2MemReq); - -VkMemoryRequirements finalMemReq = {}; -finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); -finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); -finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; -// Validate if(finalMemReq.memoryTypeBits != 0) - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; - -VmaAllocation alloc; -res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); - -res = vmaBindImageMemory(allocator, alloc, img1); -res = vmaBindImageMemory(allocator, alloc, img2); - -// You can use img1, img2 here, but not at the same time! - -vmaFreeMemory(allocator, alloc); -vkDestroyImage(allocator, img2, nullptr); -vkDestroyImage(allocator, img1, nullptr); -\endcode - -Remember that using resources that alias in memory requires proper synchronization. -You need to issue a memory barrier to make sure commands that use `img1` and `img2` -don't overlap on GPU timeline. -You also need to treat a resource after aliasing as uninitialized - containing garbage data. -For example, if you use `img1` and then want to use `img2`, you need to issue -an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. - -Additional considerations: - -- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. -See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. -- You can create more complex layout where different images and buffers are bound -at different offsets inside one large allocation. For example, one can imagine -a big texture used in some render passes, aliasing with a set of many small buffers -used between in some further passes. To bind a resource at non-zero offset in an allocation, -use vmaBindBufferMemory2() / vmaBindImageMemory2(). -- Before allocating memory for the resources you want to alias, check `memoryTypeBits` -returned in memory requirements of each resource to make sure the bits overlap. -Some GPUs may expose multiple memory types suitable e.g. only for buffers or -images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your -resources may be disjoint. Aliasing them is not possible in that case. - - -\page custom_memory_pools Custom memory pools - -A memory pool contains a number of `VkDeviceMemory` blocks. -The library automatically creates and manages default pool for each memory type available on the device. -Default memory pool automatically grows in size. -Size of allocated blocks is also variable and managed automatically. - -You can create custom pool and allocate memory out of it. -It can be useful if you want to: - -- Keep certain kind of allocations separate from others. -- Enforce particular, fixed size of Vulkan memory blocks. -- Limit maximum amount of Vulkan memory allocated for that pool. -- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. -- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in - #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. -- Perform defragmentation on a specific subset of your allocations. - -To use custom memory pools: - --# Fill VmaPoolCreateInfo structure. --# Call vmaCreatePool() to obtain #VmaPool handle. --# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. - You don't need to specify any other parameters of this structure, like `usage`. - -Example: - -\code -// Find memoryTypeIndex for the pool. -VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -sampleBufCreateInfo.size = 0x10000; // Doesn't matter. -sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo sampleAllocCreateInfo = {}; -sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; - -uint32_t memTypeIndex; -VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, - &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); -// Check res... - -// Create a pool that can have at most 2 blocks, 128 MiB each. -VmaPoolCreateInfo poolCreateInfo = {}; -poolCreateInfo.memoryTypeIndex = memTypeIndex; -poolCreateInfo.blockSize = 128ull * 1024 * 1024; -poolCreateInfo.maxBlockCount = 2; - -VmaPool pool; -res = vmaCreatePool(allocator, &poolCreateInfo, &pool); -// Check res... - -// Allocate a buffer out of it. -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = 1024; -bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.pool = pool; - -VkBuffer buf; -VmaAllocation alloc; -res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); -// Check res... -\endcode - -You have to free all allocations made from this pool before destroying it. - -\code -vmaDestroyBuffer(allocator, buf, alloc); -vmaDestroyPool(allocator, pool); -\endcode - -New versions of this library support creating dedicated allocations in custom pools. -It is supported only when VmaPoolCreateInfo::blockSize = 0. -To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and -VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - -\note Excessive use of custom pools is a common mistake when using this library. -Custom pools may be useful for special purposes - when you want to -keep certain type of resources separate e.g. to reserve minimum amount of memory -for them or limit maximum amount of memory they can occupy. For most -resources this is not needed and so it is not recommended to create #VmaPool -objects and allocations out of them. Allocating from the default pool is sufficient. - - -\section custom_memory_pools_MemTypeIndex Choosing memory type index - -When creating a pool, you must explicitly specify memory type index. -To find the one suitable for your buffers or images, you can use helper functions -vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). -You need to provide structures with example parameters of buffers or images -that you are going to create in that pool. - -\code -VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -exampleBufCreateInfo.size = 1024; // Doesn't matter -exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; - -uint32_t memTypeIndex; -vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); - -VmaPoolCreateInfo poolCreateInfo = {}; -poolCreateInfo.memoryTypeIndex = memTypeIndex; -// ... -\endcode - -When creating buffers/images allocated in that pool, provide following parameters: - -- `VkBufferCreateInfo`: Prefer to pass same parameters as above. - Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. - Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers - or the other way around. -- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. - Other members are ignored anyway. - -\section linear_algorithm Linear allocation algorithm - -Each Vulkan memory block managed by this library has accompanying metadata that -keeps track of used and unused regions. By default, the metadata structure and -algorithm tries to find best place for new allocations among free regions to -optimize memory usage. This way you can allocate and free objects in any order. - -![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) - -Sometimes there is a need to use simpler, linear allocation algorithm. You can -create custom pool that uses such algorithm by adding flag -#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating -#VmaPool object. Then an alternative metadata management is used. It always -creates new allocations after last one and doesn't reuse free regions after -allocations freed in the middle. It results in better allocation performance and -less memory consumed by metadata. - -![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) - -With this one flag, you can create a custom pool that can be used in many ways: -free-at-once, stack, double stack, and ring buffer. See below for details. -You don't need to specify explicitly which of these options you are going to use - it is detected automatically. - -\subsection linear_algorithm_free_at_once Free-at-once - -In a pool that uses linear algorithm, you still need to free all the allocations -individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free -them in any order. New allocations are always made after last one - free space -in the middle is not reused. However, when you release all the allocation and -the pool becomes empty, allocation starts from the beginning again. This way you -can use linear algorithm to speed up creation of allocations that you are going -to release all at once. - -![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) - -This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount -value that allows multiple memory blocks. - -\subsection linear_algorithm_stack Stack - -When you free an allocation that was created last, its space can be reused. -Thanks to this, if you always release allocations in the order opposite to their -creation (LIFO - Last In First Out), you can achieve behavior of a stack. - -![Stack](../gfx/Linear_allocator_4_stack.png) - -This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount -value that allows multiple memory blocks. - -\subsection linear_algorithm_double_stack Double stack - -The space reserved by a custom pool with linear algorithm may be used by two -stacks: - -- First, default one, growing up from offset 0. -- Second, "upper" one, growing down from the end towards lower offsets. - -To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT -to VmaAllocationCreateInfo::flags. - -![Double stack](../gfx/Linear_allocator_7_double_stack.png) - -Double stack is available only in pools with one memory block - -VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. - -When the two stacks' ends meet so there is not enough space between them for a -new allocation, such allocation fails with usual -`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. - -\subsection linear_algorithm_ring_buffer Ring buffer - -When you free some allocations from the beginning and there is not enough free space -for a new one at the end of a pool, allocator's "cursor" wraps around to the -beginning and starts allocation there. Thanks to this, if you always release -allocations in the same order as you created them (FIFO - First In First Out), -you can achieve behavior of a ring buffer / queue. - -![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) - -Ring buffer is available only in pools with one memory block - -VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. - -\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. - - -\page defragmentation Defragmentation - -Interleaved allocations and deallocations of many objects of varying size can -cause fragmentation over time, which can lead to a situation where the library is unable -to find a continuous range of free memory for a new allocation despite there is -enough free space, just scattered across many small free ranges between existing -allocations. - -To mitigate this problem, you can use defragmentation feature. -It doesn't happen automatically though and needs your cooperation, -because VMA is a low level library that only allocates memory. -It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. -It cannot copy their contents as it doesn't record any commands to a command buffer. - -Example: - -\code -VmaDefragmentationInfo defragInfo = {}; -defragInfo.pool = myPool; -defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; - -VmaDefragmentationContext defragCtx; -VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); -// Check res... - -for(;;) -{ - VmaDefragmentationPassMoveInfo pass; - res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); - if(res == VK_SUCCESS) - break; - else if(res != VK_INCOMPLETE) - // Handle error... - - for(uint32_t i = 0; i < pass.moveCount; ++i) - { - // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. - VmaAllocationInfo allocInfo; - vmaGetAllocationInfo(allocator, pMoves[i].srcAllocation, &allocInfo); - MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; - - // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. - VkImageCreateInfo imgCreateInfo = ... - VkImage newImg; - res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); - // Check res... - res = vmaBindImageMemory(allocator, pMoves[i].dstTmpAllocation, newImg); - // Check res... - - // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. - vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); - } - - // Make sure the copy commands finished executing. - vkWaitForFences(...); - - // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. - for(uint32_t i = 0; i < pass.moveCount; ++i) - { - // ... - vkDestroyImage(device, resData->img, nullptr); - } - - // Update appropriate descriptors to point to the new places... - - res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); - if(res == VK_SUCCESS) - break; - else if(res != VK_INCOMPLETE) - // Handle error... -} - -vmaEndDefragmentation(allocator, defragCtx, nullptr); -\endcode - -Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() -create/destroy an allocation and a buffer/image at once, these are just a shortcut for -creating the resource, allocating memory, and binding them together. -Defragmentation works on memory allocations only. You must handle the rest manually. -Defragmentation is an iterative process that should repreat "passes" as long as related functions -return `VK_INCOMPLETE` not `VK_SUCCESS`. -In each pass: - -1. vmaBeginDefragmentationPass() function call: - - Calculates and returns the list of allocations to be moved in this pass. - Note this can be a time-consuming process. - - Reserves destination memory for them by creating temporary destination allocations - that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). -2. Inside the pass, **you should**: - - Inspect the returned list of allocations to be moved. - - Create new buffers/images and bind them at the returned destination temporary allocations. - - Copy data from source to destination resources if necessary. - - Destroy the source buffers/images, but NOT their allocations. -3. vmaEndDefragmentationPass() function call: - - Frees the source memory reserved for the allocations that are moved. - - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. - - Frees `VkDeviceMemory` blocks that became empty. - -Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. -Defragmentation algorithm tries to move all suitable allocations. -You can, however, refuse to move some of them inside a defragmentation pass, by setting -`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. -This is not recommended and may result in suboptimal packing of the allocations after defragmentation. -If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. - -Inside a pass, for each allocation that should be moved: - -- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. - - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). -- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, - filled, and used temporarily in each rendering frame, you can just recreate this image - without copying its data. -- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU - using `memcpy()`. -- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. - This will cancel the move. - - vmaEndDefragmentationPass() will then free the destination memory - not the source memory of the allocation, leaving it unchanged. -- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), - you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. - - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. - -You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool -(like in the example above) or all the default pools by setting this member to null. - -Defragmentation is always performed in each pool separately. -Allocations are never moved between different Vulkan memory types. -The size of the destination memory reserved for a moved allocation is the same as the original one. -Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. -Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. - -You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved -in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. -See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. - -It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA -usage, possibly from multiple threads, with the exception that allocations -returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. - -Mapping is preserved on allocations that are moved during defragmentation. -Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations -are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried -using VmaAllocationInfo::pMappedData. - -\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. - - -\page statistics Statistics - -This library contains several functions that return information about its internal state, -especially the amount of memory allocated from Vulkan. - -\section statistics_numeric_statistics Numeric statistics - -If you need to obtain basic statistics about memory usage per heap, together with current budget, -you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. -This is useful to keep track of memory usage and stay withing budget -(see also \ref staying_within_budget). -Example: - -\code -uint32_t heapIndex = ... - -VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; -vmaGetHeapBudgets(allocator, budgets); - -printf("My heap currently has %u allocations taking %llu B,\n", - budgets[heapIndex].statistics.allocationCount, - budgets[heapIndex].statistics.allocationBytes); -printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", - budgets[heapIndex].statistics.blockCount, - budgets[heapIndex].statistics.blockBytes); -printf("Vulkan reports total usage %llu B with budget %llu B.\n", - budgets[heapIndex].usage, - budgets[heapIndex].budget); -\endcode - -You can query for more detailed statistics per memory heap, type, and totals, -including minimum and maximum allocation size and unused range size, -by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. -This function is slower though, as it has to traverse all the internal data structures, -so it should be used only for debugging purposes. - -You can query for statistics of a custom pool using function vmaGetPoolStatistics() -or vmaCalculatePoolStatistics(). - -You can query for information about a specific allocation using function vmaGetAllocationInfo(). -It fill structure #VmaAllocationInfo. - -\section statistics_json_dump JSON dump - -You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). -The result is guaranteed to be correct JSON. -It uses ANSI encoding. -Any strings provided by user (see [Allocation names](@ref allocation_names)) -are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, -this JSON string can be treated as using this encoding. -It must be freed using function vmaFreeStatsString(). - -The format of this JSON string is not part of official documentation of the library, -but it will not change in backward-incompatible way without increasing library major version number -and appropriate mention in changelog. - -The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). -It can also contain detailed map of allocated memory blocks and their regions - -free and occupied by allocations. -This allows e.g. to visualize the memory or assess fragmentation. - - -\page allocation_annotation Allocation names and user data - -\section allocation_user_data Allocation user data - -You can annotate allocations with your own information, e.g. for debugging purposes. -To do that, fill VmaAllocationCreateInfo::pUserData field when creating -an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, -some handle, index, key, ordinal number or any other value that would associate -the allocation with your custom metadata. -It it useful to identify appropriate data structures in your engine given #VmaAllocation, -e.g. when doing \ref defragmentation. - -\code -VkBufferCreateInfo bufCreateInfo = ... - -MyBufferMetadata* pMetadata = CreateBufferMetadata(); - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.pUserData = pMetadata; - -VkBuffer buffer; -VmaAllocation allocation; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); -\endcode - -The pointer may be later retrieved as VmaAllocationInfo::pUserData: - -\code -VmaAllocationInfo allocInfo; -vmaGetAllocationInfo(allocator, allocation, &allocInfo); -MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; -\endcode - -It can also be changed using function vmaSetAllocationUserData(). - -Values of (non-zero) allocations' `pUserData` are printed in JSON report created by -vmaBuildStatsString() in hexadecimal form. - -\section allocation_names Allocation names - -An allocation can also carry a null-terminated string, giving a name to the allocation. -To set it, call vmaSetAllocationName(). -The library creates internal copy of the string, so the pointer you pass doesn't need -to be valid for whole lifetime of the allocation. You can free it after the call. - -\code -std::string imageName = "Texture: "; -imageName += fileName; -vmaSetAllocationName(allocator, allocation, imageName.c_str()); -\endcode - -The string can be later retrieved by inspecting VmaAllocationInfo::pName. -It is also printed in JSON report created by vmaBuildStatsString(). - -\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. -You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. - - -\page virtual_allocator Virtual allocator - -As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator". -It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block". -You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan. -A common use case is sub-allocation of pieces of one large GPU buffer. - -\section virtual_allocator_creating_virtual_block Creating virtual block - -To use this functionality, there is no main "allocator" object. -You don't need to have #VmaAllocator object created. -All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator: - --# Fill in #VmaVirtualBlockCreateInfo structure. --# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object. - -Example: - -\code -VmaVirtualBlockCreateInfo blockCreateInfo = {}; -blockCreateInfo.size = 1048576; // 1 MB - -VmaVirtualBlock block; -VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); -\endcode - -\section virtual_allocator_making_virtual_allocations Making virtual allocations - -#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions -using the same code as the main Vulkan memory allocator. -Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type -that represents an opaque handle to an allocation withing the virtual block. - -In order to make such allocation: - --# Fill in #VmaVirtualAllocationCreateInfo structure. --# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. - You can also receive `VkDeviceSize offset` that was assigned to the allocation. - -Example: - -\code -VmaVirtualAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.size = 4096; // 4 KB - -VmaVirtualAllocation alloc; -VkDeviceSize offset; -res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); -if(res == VK_SUCCESS) -{ - // Use the 4 KB of your memory starting at offset. -} -else -{ - // Allocation failed - no space for it could be found. Handle this error! -} -\endcode - -\section virtual_allocator_deallocation Deallocation - -When no longer needed, an allocation can be freed by calling vmaVirtualFree(). -You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() -called for the same #VmaVirtualBlock. - -When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). -All allocations must be freed before the block is destroyed, which is checked internally by an assert. -However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once - -a feature not available in normal Vulkan memory allocator. Example: - -\code -vmaVirtualFree(block, alloc); -vmaDestroyVirtualBlock(block); -\endcode - -\section virtual_allocator_allocation_parameters Allocation parameters - -You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData(). -Its default value is null. -It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some -larger data structure containing more information. Example: - -\code -struct CustomAllocData -{ - std::string m_AllocName; -}; -CustomAllocData* allocData = new CustomAllocData(); -allocData->m_AllocName = "My allocation 1"; -vmaSetVirtualAllocationUserData(block, alloc, allocData); -\endcode - -The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function -vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. -If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! -Example: - -\code -VmaVirtualAllocationInfo allocInfo; -vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); -delete (CustomAllocData*)allocInfo.pUserData; - -vmaVirtualFree(block, alloc); -\endcode - -\section virtual_allocator_alignment_and_units Alignment and units - -It feels natural to express sizes and offsets in bytes. -If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member -VmaVirtualAllocationCreateInfo::alignment to request it. Example: - -\code -VmaVirtualAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.size = 4096; // 4 KB -allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B - -VmaVirtualAllocation alloc; -res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); -\endcode - -Alignments of different allocations made from one block may vary. -However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`, -you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes. -It might be more convenient, but you need to make sure to use this new unit consistently in all the places: - -- VmaVirtualBlockCreateInfo::size -- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment -- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset - -\section virtual_allocator_statistics Statistics - -You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() -(to get brief statistics that are fast to calculate) -or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). -The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. -Example: - -\code -VmaStatistics stats; -vmaGetVirtualBlockStatistics(block, &stats); -printf("My virtual block has %llu bytes used by %u virtual allocations\n", - stats.allocationBytes, stats.allocationCount); -\endcode - -You can also request a full list of allocations and free regions as a string in JSON format by calling -vmaBuildVirtualBlockStatsString(). -Returned string must be later freed using vmaFreeVirtualBlockStatsString(). -The format of this string differs from the one returned by the main Vulkan allocator, but it is similar. - -\section virtual_allocator_additional_considerations Additional considerations - -The "virtual allocator" functionality is implemented on a level of individual memory blocks. -Keeping track of a whole collection of blocks, allocating new ones when out of free space, -deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user. - -Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory. -See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT). -You can find their description in chapter \ref custom_memory_pools. -Allocation strategies are also supported. -See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT). - -Following features are supported only by the allocator of the real GPU memory and not by virtual allocations: -buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`. - - -\page debugging_memory_usage Debugging incorrect memory usage - -If you suspect a bug with memory usage, like usage of uninitialized memory or -memory being overwritten out of bounds of an allocation, -you can use debug features of this library to verify this. - -\section debugging_memory_usage_initialization Memory initialization - -If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, -you can enable automatic memory initialization to verify this. -To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. - -\code -#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 -#include "vk_mem_alloc.h" -\endcode - -It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. -Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. -Memory is automatically mapped and unmapped if necessary. - -If you find these values while debugging your program, good chances are that you incorrectly -read Vulkan memory that is allocated but not initialized, or already freed, respectively. - -Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. -It works also with dedicated allocations. - -\section debugging_memory_usage_margins Margins - -By default, allocations are laid out in memory blocks next to each other if possible -(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). - -![Allocations without margin](../gfx/Margins_1.png) - -Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified -number of bytes as a margin after every allocation. - -\code -#define VMA_DEBUG_MARGIN 16 -#include "vk_mem_alloc.h" -\endcode - -![Allocations with margin](../gfx/Margins_2.png) - -If your bug goes away after enabling margins, it means it may be caused by memory -being overwritten outside of allocation boundaries. It is not 100% certain though. -Change in application behavior may also be caused by different order and distribution -of allocations across memory blocks after margins are applied. - -Margins work with all types of memory. - -Margin is applied only to allocations made out of memory blocks and not to dedicated -allocations, which have their own memory block of specific size. -It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag -or those automatically decided to put into dedicated allocations, e.g. due to its -large size or recommended by VK_KHR_dedicated_allocation extension. - -Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. - -Note that enabling margins increases memory usage and fragmentation. - -Margins do not apply to \ref virtual_allocator. - -\section debugging_memory_usage_corruption_detection Corruption detection - -You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation -of contents of the margins. - -\code -#define VMA_DEBUG_MARGIN 16 -#define VMA_DEBUG_DETECT_CORRUPTION 1 -#include "vk_mem_alloc.h" -\endcode - -When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` -(it must be multiply of 4) after every allocation is filled with a magic number. -This idea is also know as "canary". -Memory is automatically mapped and unmapped if necessary. - -This number is validated automatically when the allocation is destroyed. -If it is not equal to the expected value, `VMA_ASSERT()` is executed. -It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, -which indicates a serious bug. - -You can also explicitly request checking margins of all allocations in all memory blocks -that belong to specified memory types by using function vmaCheckCorruption(), -or in memory blocks that belong to specified custom pool, by using function -vmaCheckPoolCorruption(). - -Margin validation (corruption detection) works only for memory types that are -`HOST_VISIBLE` and `HOST_COHERENT`. - - -\page opengl_interop OpenGL Interop - -VMA provides some features that help with interoperability with OpenGL. - -\section opengl_interop_exporting_memory Exporting memory - -If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library: - -It is recommended to create \ref custom_memory_pools for such allocations. -Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext -while creating the custom pool. -Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool, -not only while creating it, as no copy of the structure is made, -but its original pointer is used for each allocation instead. - -If you want to export all memory allocated by the library from certain memory types, -also dedicated allocations or other allocations made from default pools, -an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. -It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library -through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type. -Please note that new versions of the library also support dedicated allocations created in custom pools. - -You should not mix these two methods in a way that allows to apply both to the same memory type. -Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`. - - -\section opengl_interop_custom_alignment Custom alignment - -Buffers or images exported to a different API like OpenGL may require a different alignment, -higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`. -To impose such alignment: - -It is recommended to create \ref custom_memory_pools for such allocations. -Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation -to be made out of this pool. -The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image -from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically. - -If you want to create a buffer with a specific minimum alignment out of default pools, -use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`. - -Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated -allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block. -Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation. - - -\page usage_patterns Recommended usage patterns - -Vulkan gives great flexibility in memory allocation. -This chapter shows the most common patterns. - -See also slides from talk: -[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) - - -\section usage_patterns_gpu_only GPU-only resource - -When: -Any resources that you frequently write and read on GPU, -e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, -images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). - -What to do: -Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - -\code -VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; -imgCreateInfo.extent.width = 3840; -imgCreateInfo.extent.height = 2160; -imgCreateInfo.extent.depth = 1; -imgCreateInfo.mipLevels = 1; -imgCreateInfo.arrayLayers = 1; -imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; -imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; -imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; -imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; -imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; -allocCreateInfo.priority = 1.0f; - -VkImage img; -VmaAllocation alloc; -vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); -\endcode - -Also consider: -Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, -especially if they are large or if you plan to destroy and recreate them with different sizes -e.g. when display resolution changes. -Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. -When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation -to decrease chances to be evicted to system memory by the operating system. - -\section usage_patterns_staging_copy_upload Staging copy for upload - -When: -A "staging" buffer than you want to map and fill from CPU code, then use as a source od transfer -to some GPU resource. - -What to do: -Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. -Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = 65536; -bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -... - -memcpy(allocInfo.pMappedData, myData, myDataSize); -\endcode - -Also consider: -You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped -using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. - - -\section usage_patterns_readback Readback - -When: -Buffers for data written by or transferred from the GPU that you want to read back on the CPU, -e.g. results of some computations. - -What to do: -Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. -Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` -and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = 65536; -bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -... - -const float* downloadedData = (const float*)allocInfo.pMappedData; -\endcode - - -\section usage_patterns_advanced_data_uploading Advanced data uploading - -For resources that you frequently write on CPU via mapped pointer and -freqnently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: - --# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, - even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, - and make the device reach out to that resource directly. - - Reads performed by the device will then go through PCI Express bus. - The performace of this access may be limited, but it may be fine depending on the size - of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity - of access. --# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), - a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` - (fast to access from the GPU). Then, it is likely the best choice for such type of resource. --# Systems with a discrete graphics card and separate video memory may or may not expose - a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). - If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) - that is available to CPU for mapping. - - Writes performed by the host to that memory go through PCI Express bus. - The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, - as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. --# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, - a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. - -Thankfully, VMA offers an aid to create and use such resources in the the way optimal -for the current Vulkan device. To help the library make the best choice, -use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with -#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. -It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), -but if no such memory type is available or allocation from it fails -(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), -it will fall back to `DEVICE_LOCAL` memory for fast GPU access. -It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, -so you need to create another "staging" allocation and perform explicit transfers. - -\code -VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; -bufCreateInfo.size = 65536; -bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - -VkBuffer buf; -VmaAllocation alloc; -VmaAllocationInfo allocInfo; -vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); - -VkMemoryPropertyFlags memPropFlags; -vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); - -if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) -{ - // Allocation ended up in a mappable memory and is already mapped - write to it directly. - - // [Executed in runtime]: - memcpy(allocInfo.pMappedData, myData, myDataSize); -} -else -{ - // Allocation ended up in a non-mappable memory - need to transfer. - VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; - stagingBufCreateInfo.size = 65536; - stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; - - VmaAllocationCreateInfo stagingAllocCreateInfo = {}; - stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; - stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | - VMA_ALLOCATION_CREATE_MAPPED_BIT; - - VkBuffer stagingBuf; - VmaAllocation stagingAlloc; - VmaAllocationInfo stagingAllocInfo; - vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, - &stagingBuf, &stagingAlloc, stagingAllocInfo); - - // [Executed in runtime]: - memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); - //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT - VkBufferCopy bufCopy = { - 0, // srcOffset - 0, // dstOffset, - myDataSize); // size - vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); -} -\endcode - -\section usage_patterns_other_use_cases Other use cases - -Here are some other, less obvious use cases and their recommended settings: - -- An image that is used only as transfer source and destination, but it should stay on the device, - as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, - for temporal antialiasing or other temporal effects. - - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` - - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO -- An image that is used only as transfer source and destination, but it should be placed - in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict - least recently used textures from VRAM. - - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` - - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, - as VMA needs a hint here to differentiate from the previous case. -- A buffer that you want to map and write from the CPU, directly read from the GPU - (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or - host memory due to its large size. - - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` - - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST - - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT - - -\page configuration Configuration - -Please check "CONFIGURATION SECTION" in the code to find macros that you can define -before each include of this file or change directly in this file to provide -your own implementation of basic facilities like assert, `min()` and `max()` functions, -mutex, atomic etc. -The library uses its own implementation of containers by default, but you can switch to using -STL containers instead. - -For example, define `VMA_ASSERT(expr)` before including the library to provide -custom implementation of the assertion, compatible with your project. -By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration -and empty otherwise. - -\section config_Vulkan_functions Pointers to Vulkan functions - -There are multiple ways to import pointers to Vulkan functions in the library. -In the simplest case you don't need to do anything. -If the compilation or linking of your program or the initialization of the #VmaAllocator -doesn't work for you, you can try to reconfigure it. - -First, the allocator tries to fetch pointers to Vulkan functions linked statically, -like this: - -\code -m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; -\endcode - -If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. - -Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. -You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or -by using a helper library like [volk](https://github.com/zeux/volk). - -Third, VMA tries to fetch remaining pointers that are still null by calling -`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. -You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. -Other pointers will be fetched automatically. -If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. - -Finally, all the function pointers required by the library (considering selected -Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. - - -\section custom_memory_allocator Custom host memory allocator - -If you use custom allocator for CPU memory rather than default operator `new` -and `delete` from C++, you can make this library using your allocator as well -by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These -functions will be passed to Vulkan, as well as used by the library itself to -make any CPU-side allocations. - -\section allocation_callbacks Device memory allocation callbacks - -The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. -You can setup callbacks to be informed about these calls, e.g. for the purpose -of gathering some statistics. To do it, fill optional member -VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. - -\section heap_memory_limit Device heap memory limit - -When device memory of certain heap runs out of free space, new allocations may -fail (returning error code) or they may succeed, silently pushing some existing_ -memory blocks from GPU VRAM to system RAM (which degrades performance). This -behavior is implementation-dependent - it depends on GPU vendor and graphics -driver. - -On AMD cards it can be controlled while creating Vulkan device object by using -VK_AMD_memory_overallocation_behavior extension, if available. - -Alternatively, if you want to test how your program behaves with limited amount of Vulkan device -memory available without switching your graphics card to one that really has -smaller VRAM, you can use a feature of this library intended for this purpose. -To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. - - - -\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation - -VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve -performance on some GPUs. It augments Vulkan API with possibility to query -driver whether it prefers particular buffer or image to have its own, dedicated -allocation (separate `VkDeviceMemory` block) for better efficiency - to be able -to do some internal optimizations. The extension is supported by this library. -It will be used automatically when enabled. - -It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version -and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, -you are all set. - -Otherwise, if you want to use it as an extension: - -1 . When creating Vulkan device, check if following 2 device extensions are -supported (call `vkEnumerateDeviceExtensionProperties()`). -If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). - -- VK_KHR_get_memory_requirements2 -- VK_KHR_dedicated_allocation - -If you enabled these extensions: - -2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating -your #VmaAllocator to inform the library that you enabled required extensions -and you want the library to use them. - -\code -allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; - -vmaCreateAllocator(&allocatorInfo, &allocator); -\endcode - -That is all. The extension will be automatically used whenever you create a -buffer using vmaCreateBuffer() or image using vmaCreateImage(). - -When using the extension together with Vulkan Validation Layer, you will receive -warnings like this: - -_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ - -It is OK, you should just ignore it. It happens because you use function -`vkGetBufferMemoryRequirements2KHR()` instead of standard -`vkGetBufferMemoryRequirements()`, while the validation layer seems to be -unaware of it. - -To learn more about this extension, see: - -- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation) -- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) - - - -\page vk_ext_memory_priority VK_EXT_memory_priority - -VK_EXT_memory_priority is a device extension that allows to pass additional "priority" -value to Vulkan memory allocations that the implementation may use prefer certain -buffers and images that are critical for performance to stay in device-local memory -in cases when the memory is over-subscribed, while some others may be moved to the system memory. - -VMA offers convenient usage of this extension. -If you enable it, you can pass "priority" parameter when creating allocations or custom pools -and the library automatically passes the value to Vulkan using this extension. - -If you want to use this extension in connection with VMA, follow these steps: - -\section vk_ext_memory_priority_initialization Initialization - -1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. -Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". - -2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. -Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. -Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. - -3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" -to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. - -4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. -Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. -Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to -`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. - -5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you -have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT -to VmaAllocatorCreateInfo::flags. - -\section vk_ext_memory_priority_usage Usage - -When using this extension, you should initialize following member: - -- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. -- VmaPoolCreateInfo::priority when creating a custom pool. - -It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`. -Memory allocated with higher value can be treated by the Vulkan implementation as higher priority -and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. - -It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images -as dedicated and set high priority to them. For example: - -\code -VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; -imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; -imgCreateInfo.extent.width = 3840; -imgCreateInfo.extent.height = 2160; -imgCreateInfo.extent.depth = 1; -imgCreateInfo.mipLevels = 1; -imgCreateInfo.arrayLayers = 1; -imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; -imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; -imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; -imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; -imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; - -VmaAllocationCreateInfo allocCreateInfo = {}; -allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; -allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; -allocCreateInfo.priority = 1.0f; - -VkImage img; -VmaAllocation alloc; -vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); -\endcode - -`priority` member is ignored in the following situations: - -- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters - from the parametrs passed in #VmaPoolCreateInfo when the pool was created. -- Allocations created in default pools: They inherit the priority from the parameters - VMA used when creating default pools, which means `priority == 0.5f`. - - -\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory - -VK_AMD_device_coherent_memory is a device extension that enables access to -additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and -`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for -allocation of buffers intended for writing "breadcrumb markers" in between passes -or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. - -When the extension is available but has not been enabled, Vulkan physical device -still exposes those memory types, but their usage is forbidden. VMA automatically -takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt -to allocate memory of such type is made. - -If you want to use this extension in connection with VMA, follow these steps: - -\section vk_amd_device_coherent_memory_initialization Initialization - -1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. -Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". - -2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. -Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. -Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. - -3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" -to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. - -4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. -Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. -Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to -`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. - -5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you -have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT -to VmaAllocatorCreateInfo::flags. - -\section vk_amd_device_coherent_memory_usage Usage - -After following steps described above, you can create VMA allocations and custom pools -out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible -devices. There are multiple ways to do it, for example: - -- You can request or prefer to allocate out of such memory types by adding - `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags - or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with - other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. -- If you manually found memory type index to use for this purpose, force allocation - from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. - -\section vk_amd_device_coherent_memory_more_information More information - -To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html) - -Example use of this extension can be found in the code of the sample and test suite -accompanying this library. - - -\page enabling_buffer_device_address Enabling buffer device address - -Device extension VK_KHR_buffer_device_address -allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. -It has been promoted to core Vulkan 1.2. - -If you want to use this feature in connection with VMA, follow these steps: - -\section enabling_buffer_device_address_initialization Initialization - -1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. -Check if the extension is supported - if returned array of `VkExtensionProperties` contains -"VK_KHR_buffer_device_address". - -2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. -Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. -Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true. - -3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add -"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. - -4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. -Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. -Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to -`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. - -5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you -have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT -to VmaAllocatorCreateInfo::flags. - -\section enabling_buffer_device_address_usage Usage - -After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. -The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to -allocated memory blocks wherever it might be needed. - -Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. -The second part of this functionality related to "capture and replay" is not supported, -as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. - -\section enabling_buffer_device_address_more_information More information - -To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) - -Example use of this extension can be found in the code of the sample and test suite -accompanying this library. - -\page general_considerations General considerations - -\section general_considerations_thread_safety Thread safety - -- The library has no global state, so separate #VmaAllocator objects can be used - independently. - There should be no need to create multiple such objects though - one per `VkDevice` is enough. -- By default, all calls to functions that take #VmaAllocator as first parameter - are safe to call from multiple threads simultaneously because they are - synchronized internally when needed. - This includes allocation and deallocation from default memory pool, as well as custom #VmaPool. -- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT - flag, calls to functions that take such #VmaAllocator object must be - synchronized externally. -- Access to a #VmaAllocation object must be externally synchronized. For example, - you must not call vmaGetAllocationInfo() and vmaMapMemory() from different - threads at the same time if you pass the same #VmaAllocation object to these - functions. -- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. - -\section general_considerations_versioning_and_compatibility Versioning and compatibility - -The library uses [**Semantic Versioning**](https://semver.org/), -which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: - -- Incremented Patch version means a release is backward- and forward-compatible, - introducing only some internal improvements, bug fixes, optimizations etc. - or changes that are out of scope of the official API described in this documentation. -- Incremented Minor version means a release is backward-compatible, - so existing code that uses the library should continue to work, while some new - symbols could have been added: new structures, functions, new values in existing - enums and bit flags, new structure members, but not new function parameters. -- Incrementing Major version means a release could break some backward compatibility. - -All changes between official releases are documented in file "CHANGELOG.md". - -\warning Backward compatiblity is considered on the level of C++ source code, not binary linkage. -Adding new members to existing structures is treated as backward compatible if initializing -the new members to binary zero results in the old behavior. -You should always fully initialize all library structures to zeros and not rely on their -exact binary size. - -\section general_considerations_validation_layer_warnings Validation layer warnings - -When using this library, you can meet following types of warnings issued by -Vulkan validation layer. They don't necessarily indicate a bug, so you may need -to just ignore them. - -- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* - - It happens when VK_KHR_dedicated_allocation extension is enabled. - `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. -- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* - - It happens when you map a buffer or image, because the library maps entire - `VkDeviceMemory` block, where different types of images and buffers may end - up together, especially on GPUs with unified memory like Intel. -- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* - - It may happen when you use [defragmentation](@ref defragmentation). - -\section general_considerations_allocation_algorithm Allocation algorithm - -The library uses following algorithm for allocation, in order: - --# Try to find free range of memory in existing blocks. --# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. --# If failed, try to create such block with size / 2, size / 4, size / 8. --# If failed, try to allocate separate `VkDeviceMemory` for this allocation, - just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. --# If failed, choose other memory type that meets the requirements specified in - VmaAllocationCreateInfo and go to point 1. --# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - -\section general_considerations_features_not_supported Features not supported - -Features deliberately excluded from the scope of this library: - --# **Data transfer.** Uploading (streaming) and downloading data of buffers and images - between CPU and GPU memory and related synchronization is responsibility of the user. - Defining some "texture" object that would automatically stream its data from a - staging copy in CPU memory to GPU memory would rather be a feature of another, - higher-level library implemented on top of VMA. - VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. --# **Recreation of buffers and images.** Although the library has functions for - buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to - recreate these objects yourself after defragmentation. That is because the big - structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in - #VmaAllocation object. --# **Handling CPU memory allocation failures.** When dynamically creating small C++ - objects in CPU memory (not Vulkan memory), allocation failures are not checked - and handled gracefully, because that would complicate code significantly and - is usually not needed in desktop PC applications anyway. - Success of an allocation is just checked with an assert. --# **Code free of any compiler warnings.** Maintaining the library to compile and - work correctly on so many different platforms is hard enough. Being free of - any warnings, on any version of any compiler, is simply not feasible. - There are many preprocessor macros that make some variables unused, function parameters unreferenced, - or conditional expressions constant in some configurations. - The code of this library should not be bigger or more complicated just to silence these warnings. - It is recommended to disable such warnings instead. --# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but - are not going to be included into this repository. -*/ +// +// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H +#define AMD_VULKAN_MEMORY_ALLOCATOR_H + +/** \mainpage Vulkan Memory Allocator + +Version 3.0.1 (2022-05-26) + +Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n +License: MIT + +API documentation divided into groups: [Modules](modules.html) + +\section main_table_of_contents Table of contents + +- User guide + - \subpage quick_start + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) + - \subpage choosing_memory_type + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) + - \subpage memory_mapping + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) + - \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) + - \subpage resource_aliasing + - \subpage custom_memory_pools + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) + - \subpage defragmentation + - \subpage statistics + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) + - \subpage allocation_annotation + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) + - \subpage virtual_allocator + - \subpage debugging_memory_usage + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - \subpage opengl_interop +- \subpage usage_patterns + - [GPU-only resource](@ref usage_patterns_gpu_only) + - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) + - [Readback](@ref usage_patterns_readback) + - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) + - [Other use cases](@ref usage_patterns_other_use_cases) +- \subpage configuration + - [Pointers to Vulkan functions](@ref config_Vulkan_functions) + - [Custom host memory allocator](@ref custom_memory_allocator) + - [Device memory allocation callbacks](@ref allocation_callbacks) + - [Device heap memory limit](@ref heap_memory_limit) +- Extension support + - \subpage vk_khr_dedicated_allocation + - \subpage enabling_buffer_device_address + - \subpage vk_ext_memory_priority + - \subpage vk_amd_device_coherent_memory +- \subpage general_considerations + - [Thread safety](@ref general_considerations_thread_safety) + - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) + - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) + - [Allocation algorithm](@ref general_considerations_allocation_algorithm) + - [Features not supported](@ref general_considerations_features_not_supported) + +\section main_see_also See also + +- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) +- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + +\defgroup group_init Library initialization + +\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. + +\defgroup group_alloc Memory allocation + +\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. +Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). + +\defgroup group_virtual Virtual allocator + +\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm +for user-defined purpose without allocating any real GPU memory. + +\defgroup group_stats Statistics + +\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. +See documentation chapter: \ref statistics. +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef VULKAN_H_ + #include +#endif + +// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC, +// where AAA = major, BBB = minor, CCC = patch. +// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion. +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_3) + #define VMA_VULKAN_VERSION 1003000 + #elif defined(VK_VERSION_1_2) + #define VMA_VULKAN_VERSION 1002000 + #elif defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif +#endif + +#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + extern PFN_vkAllocateMemory vkAllocateMemory; + extern PFN_vkFreeMemory vkFreeMemory; + extern PFN_vkMapMemory vkMapMemory; + extern PFN_vkUnmapMemory vkUnmapMemory; + extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + extern PFN_vkBindBufferMemory vkBindBufferMemory; + extern PFN_vkBindImageMemory vkBindImageMemory; + extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + extern PFN_vkCreateBuffer vkCreateBuffer; + extern PFN_vkDestroyBuffer vkDestroyBuffer; + extern PFN_vkCreateImage vkCreateImage; + extern PFN_vkDestroyImage vkDestroyImage; + extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + #if VMA_VULKAN_VERSION >= 1001000 + extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; + extern PFN_vkBindImageMemory2 vkBindImageMemory2; + extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; + #endif // #if VMA_VULKAN_VERSION >= 1001000 +#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES + +#if !defined(VMA_DEDICATED_ALLOCATION) + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. +#if !defined(VMA_BUFFER_DEVICE_ADDRESS) + #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 + #define VMA_BUFFER_DEVICE_ADDRESS 1 + #else + #define VMA_BUFFER_DEVICE_ADDRESS 0 + #endif +#endif + +// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. +#if !defined(VMA_MEMORY_PRIORITY) + #if VK_EXT_memory_priority + #define VMA_MEMORY_PRIORITY 1 + #else + #define VMA_MEMORY_PRIORITY 0 + #endif +#endif + +// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. +#if !defined(VMA_EXTERNAL_MEMORY) + #if VK_KHR_external_memory + #define VMA_EXTERNAL_MEMORY 1 + #else + #define VMA_EXTERNAL_MEMORY 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporting the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE +#endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST +#endif + +// Define this macro to decorate pointers with an attribute specifying the +// length of the array they point to if they are not null. +// +// The length may be one of +// - The name of another parameter in the argument list where the pointer is declared +// - The name of another member in the struct where the pointer is declared +// - The name of a member of a struct type, meaning the value of that member in +// the context of the call. For example +// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), +// this means the number of memory heaps available in the device associated +// with the VmaAllocator being dealt with. +#ifndef VMA_LEN_IF_NOT_NULL + #define VMA_LEN_IF_NOT_NULL(len) +#endif + +// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nullable +#ifndef VMA_NULLABLE + #ifdef __clang__ + #define VMA_NULLABLE _Nullable + #else + #define VMA_NULLABLE + #endif +#endif + +// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull +#ifndef VMA_NOT_NULL + #ifdef __clang__ + #define VMA_NOT_NULL _Nonnull + #else + #define VMA_NOT_NULL + #endif +#endif + +// If non-dispatchable handles are represented as pointers then we can give +// then nullability annotations +#ifndef VMA_NOT_NULL_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL + #else + #define VMA_NOT_NULL_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_NULLABLE_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE + #else + #define VMA_NULLABLE_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_STATS_STRING_ENABLED + #define VMA_STATS_STRING_ENABLED 1 +#endif + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// INTERFACE +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. +#ifndef _VMA_ENUM_DECLARATIONS + +/** +\addtogroup group_init +@{ +*/ + +/// Flags for created #VmaAllocator. +typedef enum VmaAllocatorCreateFlagBits +{ + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. + + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + Using this extension will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) + + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enables usage of VK_AMD_device_coherent_memory extension. + + You may set this flag only if you: + + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. + + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, + /** + Enables usage of "buffer device address" feature, which allows you to use function + `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. + + You may set this flag only if you: + + 1. (For Vulkan version < 1.2) Found as available and enabled device extension + VK_KHR_buffer_device_address. + This extension is promoted to core Vulkan 1.2. + 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. + + When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. + The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to + allocated memory blocks wherever it might be needed. + + For more information, see documentation chapter \ref enabling_buffer_device_address. + */ + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, + /** + Enables usage of VK_EXT_memory_priority extension in the library. + + You may set this flag only if you found available and enabled this device extension, + along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + + When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority + are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. + + A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + Larger values are higher priority. The granularity of the priorities is implementation-dependent. + It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. + The value to be used for default priority is 0.5. + For more details, see the documentation of the VK_EXT_memory_priority extension. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, + + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocatorCreateFlagBits; +/// See #VmaAllocatorCreateFlagBits. +typedef VkFlags VmaAllocatorCreateFlags; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/// \brief Intended usage of the allocated memory. +typedef enum VmaMemoryUsage +{ + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** + Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + /** + Selects best memory type automatically. + This flag is recommended for most common use cases. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO = 7, + /** + Selects best memory type automatically with preference for GPU (device) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, + /** + Selects best memory type automatically with preference for CPU (host) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, + + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF +} VmaMemoryUsage; + +/// Flags to be passed as VmaAllocationCreateInfo::flags. +typedef enum VmaAllocationCreateFlagBits +{ + /** \brief Set this flag if the allocation should have its own memory block. + + Use it for special, big resources, like fullscreen images used as attachments. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + + It is valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. + + Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pName`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + + If you want to make sure the new buffer/image is not tied to the new memory allocation + through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, + use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + /** \brief Set this flag if the allocated memory will have aliasing resources. + + Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. + Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. + */ + VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, + never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. + + \warning Violating this declaration may work correctly, but will likely be very slow. + Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` + Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory can be read, written, and accessed in random order, + so a `HOST_CACHED` memory type is required. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, + /** + Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, + it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected + if it may improve performance. + + By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type + (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and + issue an explicit transfer to write/read your data. + To prepare for this possibility, don't forget to add appropriate flags like + `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, + /** Allocation strategy that chooses smallest possible free range for the allocation + to minimize memory usage and fragmentation, possibly at the expense of allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, + /** Allocation strategy that chooses first suitable free range for the allocation - + not necessarily in terms of the smallest offset but the one that is easiest and fastest to find + to minimize allocation time, possibly at the expense of allocation quality. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + Used internally by defragmentation, not recomended in typical usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocationCreateFlagBits; +/// See #VmaAllocationCreateFlagBits. +typedef VkFlags VmaAllocationCreateFlags; + +/// Flags to be passed as VmaPoolCreateInfo::flags. +typedef enum VmaPoolCreateFlagBits +{ + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + + /** \brief Enables alternative, linear allocation algorithm in this pool. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaPoolCreateFlagBits; +/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. +typedef VkFlags VmaPoolCreateFlags; + +/// Flags to be passed as VmaDefragmentationInfo::flags. +typedef enum VmaDefragmentationFlagBits +{ + /* \brief Use simple but fast algorithm for defragmentation. + May not achieve best results but will require least time to compute and least allocations to copy. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, + /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. + Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, + /* \brief Perform full defragmentation of memory. + Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, + /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. + Only available when bufferImageGranularity is greater than 1, since it aims to reduce + alignment issues between different types of resources. + Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, + + /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, + + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaDefragmentationFlagBits; +/// See #VmaDefragmentationFlagBits. +typedef VkFlags VmaDefragmentationFlags; + +/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. +typedef enum VmaDefragmentationMoveOperation +{ + /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). + VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, + /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. + VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, + /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. + VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, +} VmaDefragmentationMoveOperation; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. +typedef enum VmaVirtualBlockCreateFlagBits +{ + /** \brief Enables alternative, linear allocation algorithm in this virtual block. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, + + /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualBlockCreateFlagBits; +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. +typedef VkFlags VmaVirtualBlockCreateFlags; + +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. +typedef enum VmaVirtualAllocationCreateFlagBits +{ + /** \brief Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, + /** \brief Allocation strategy that tries to minimize memory usage. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** \brief Allocation strategy that tries to minimize allocation time. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. + + These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, + + VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualAllocationCreateFlagBits; +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. +typedef VkFlags VmaVirtualAllocationCreateFlags; + +/** @} */ + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_DATA_TYPES_DECLARATIONS + +/** +\addtogroup group_init +@{ */ + +/** \struct VmaAllocator +\brief Represents main object of this library initialized. + +Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. +Call function vmaDestroyAllocator() to destroy it. + +It is recommended to create just one object of this type per `VkDevice` object, +right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. +*/ +VK_DEFINE_HANDLE(VmaAllocator) + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \struct VmaPool +\brief Represents custom memory pool + +Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. +Call function vmaDestroyPool() to destroy it. + +For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). +*/ +VK_DEFINE_HANDLE(VmaPool) + +/** \struct VmaAllocation +\brief Represents single memory allocation. + +It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type +plus unique offset. + +There are multiple ways to create such object. +You need to fill structure VmaAllocationCreateInfo. +For more information see [Choosing memory type](@ref choosing_memory_type). + +Although the library provides convenience functions that create Vulkan buffer or image, +allocate memory for it and bind them together, +binding of the allocation to a buffer or an image is out of scope of the allocation itself. +Allocation object can exist without buffer/image bound, +binding can be done manually by the user, and destruction of it can be done +independently of destruction of the allocation. + +The object also remembers its size and some other information. +To retrieve this information, use function vmaGetAllocationInfo() and inspect +returned structure VmaAllocationInfo. +*/ +VK_DEFINE_HANDLE(VmaAllocation) + +/** \struct VmaDefragmentationContext +\brief An opaque object that represents started defragmentation process. + +Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. +Call function vmaEndDefragmentation() to destroy it. +*/ +VK_DEFINE_HANDLE(VmaDefragmentationContext) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualAllocation +\brief Represents single memory allocation done inside VmaVirtualBlock. + +Use it as a unique identifier to virtual allocation within the single block. + +Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. +*/ +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation); + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualBlock +\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. + +Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. +For more information, see documentation chapter \ref virtual_allocator. + +This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. +*/ +VK_DEFINE_HANDLE(VmaVirtualBlock) + +/** @} */ + +/** +\addtogroup group_init +@{ +*/ + +/// Callback function called after successful vkAllocateMemory. +typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/// Callback function called before vkFreeMemory. +typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + +Provided for informative purpose, e.g. to gather statistics about number of +allocations or total amount of memory allocated in Vulkan. + +Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. +*/ +typedef struct VmaDeviceMemoryCallbacks +{ + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; + /// Optional, can be null. + void* VMA_NULLABLE pUserData; +} VmaDeviceMemoryCallbacks; + +/** \brief Pointers to some Vulkan functions - a subset used by the library. + +Used in VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +typedef struct VmaVulkanFunctions +{ + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; + PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; + PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; + PFN_vkMapMemory VMA_NULLABLE vkMapMemory; + PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; + PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; + PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; + PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; + PFN_vkCreateImage VMA_NULLABLE vkCreateImage; + PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; + PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; + /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; + /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; +#endif +#if VMA_VULKAN_VERSION >= 1003000 + /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; + /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements; +#endif +} VmaVulkanFunctions; + +/// Description of a Allocator to be created. +typedef struct VmaAllocatorCreateInfo +{ + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice VMA_NOT_NULL device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; + + /** \brief Pointers to Vulkan functions. Can be null. + + For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). + */ + const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; + /** \brief Handle to Vulkan instance object. + + Starting from version 3.0.0 this member is no longer optional, it must be set! + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Optional. The highest version of Vulkan that the application is designed to use. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. + Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + */ + uint32_t vulkanApiVersion; +#if VMA_EXTERNAL_MEMORY + /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. + + If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` + elements, defining external memory handle types of particular Vulkan memory type, + to be passed using `VkExportMemoryAllocateInfoKHR`. + + Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. + This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. + */ + const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; +#endif // #if VMA_EXTERNAL_MEMORY +} VmaAllocatorCreateInfo; + +/// Information about existing #VmaAllocator object. +typedef struct VmaAllocatorInfo +{ + /** \brief Handle to Vulkan instance object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::instance. + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Handle to Vulkan physical device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. + */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /** \brief Handle to Vulkan device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::device. + */ + VkDevice VMA_NOT_NULL device; +} VmaAllocatorInfo; + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. + +These are fast to calculate. +See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). +*/ +typedef struct VmaStatistics +{ + /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. + */ + uint32_t blockCount; + /** \brief Number of #VmaAllocation objects allocated. + + Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. + */ + uint32_t allocationCount; + /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. + + \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object + (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls + "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. + */ + VkDeviceSize blockBytes; + /** \brief Total number of bytes occupied by all #VmaAllocation objects. + + Always less or equal than `blockBytes`. + Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan + but unused by any #VmaAllocation. + */ + VkDeviceSize allocationBytes; +} VmaStatistics; + +/** \brief More detailed statistics than #VmaStatistics. + +These are slower to calculate. Use for debugging purposes. +See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). + +Previous version of the statistics API provided averages, but they have been removed +because they can be easily calculated as: + +\code +VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; +VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; +VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; +\endcode +*/ +typedef struct VmaDetailedStatistics +{ + /// Basic statistics. + VmaStatistics statistics; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. + VkDeviceSize allocationSizeMin; + /// Largest allocation size. 0 if there are 0 allocations. + VkDeviceSize allocationSizeMax; + /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMin; + /// Largest empty range size. 0 if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMax; +} VmaDetailedStatistics; + +/** \brief General statistics from current state of the Allocator - +total memory usage across all memory heaps and types. + +These are slower to calculate. Use for debugging purposes. +See function vmaCalculateStatistics(). +*/ +typedef struct VmaTotalStatistics +{ + VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; + VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaDetailedStatistics total; +} VmaTotalStatistics; + +/** \brief Statistics of current memory usage and available budget for a specific memory heap. + +These are fast to calculate. +See function vmaGetHeapBudgets(). +*/ +typedef struct VmaBudget +{ + /** \brief Statistics fetched from the library. + */ + VmaStatistics statistics; + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, decided by the operating system. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Parameters of new #VmaAllocation. + +To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. +*/ +typedef struct VmaAllocationCreateInfo +{ + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. + + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* VMA_NULLABLE pUserData; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object + and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. + */ + float priority; +} VmaAllocationCreateInfo; + +/// Describes parameter of created #VmaPool. +typedef struct VmaPoolCreateInfo +{ + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + In this case, the pool will also support dedicated allocations. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. + + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. + + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. + Otherwise, this variable is ignored. + */ + float priority; + /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. + + Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. + It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, + e.g. when doing interop with OpenGL. + */ + VkDeviceSize minAllocationAlignment; + /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. + + Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. + It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. + Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. + + Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, + can be attached automatically by this library when using other, more convenient of its features. + */ + void* VMA_NULLABLE pMemoryAllocateNext; +} VmaPoolCreateInfo; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). +typedef struct VmaAllocationInfo +{ + /** \brief Memory type index that this allocation was allocated from. + + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. + + Same memory object can be shared by multiple allocations. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; + /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. + + You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function + vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, + not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation + and apply this offset automatically. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. + + It never changes. + + \note Allocation size returned in this variable may be greater than the size + requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the + allocation is accessible for operations on memory e.g. using a pointer after + mapping with vmaMapMemory(), but operations on the resource e.g. using + `vkCmdCopyBuffer` must be limited to the size of the resource. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. + + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. + + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after the allocation is moved during \ref defragmentation. + */ + void* VMA_NULLABLE pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* VMA_NULLABLE pUserData; + /** \brief Custom allocation name that was set with vmaSetAllocationName(). + + It can change after call to vmaSetAllocationName() for this allocation. + + Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with + additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. + */ + const char* VMA_NULLABLE pName; +} VmaAllocationInfo; + +/** \brief Parameters for defragmentation. + +To be used with function vmaBeginDefragmentation(). +*/ +typedef struct VmaDefragmentationInfo +{ + /// \brief Use combination of #VmaDefragmentationFlagBits. + VmaDefragmentationFlags flags; + /** \brief Custom pool to be defragmented. + + If null then default pools will undergo defragmentation process. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. + + `0` means no limit. + */ + VkDeviceSize maxBytesPerPass; + /** \brief Maximum number of allocations that can be moved during single pass to a different place. + + `0` means no limit. + */ + uint32_t maxAllocationsPerPass; +} VmaDefragmentationInfo; + +/// Single move of an allocation to be done for defragmentation. +typedef struct VmaDefragmentationMove +{ + /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. + VmaDefragmentationMoveOperation operation; + /// Allocation that should be moved. + VmaAllocation VMA_NOT_NULL srcAllocation; + /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. + + \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, + to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). + vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. + */ + VmaAllocation VMA_NOT_NULL dstTmpAllocation; +} VmaDefragmentationMove; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassMoveInfo +{ + /// Number of elements in the `pMoves` array. + uint32_t moveCount; + /** \brief Array of moves to be performed by the user in the current defragmentation pass. + + Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). + + For each element, you should: + + 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. + 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. + 3. Make sure these commands finished executing on the GPU. + 4. Destroy the old buffer/image. + + Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). + After this call, the allocation will point to the new place in memory. + + Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + + Alternatively, if you decide you want to completely remove the allocation: + + 1. Destroy its buffer/image. + 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + + Then, after vmaEndDefragmentationPass() the allocation will be freed. + */ + VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; +} VmaDefragmentationPassMoveInfo; + +/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). +typedef struct VmaDefragmentationStats +{ + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; +} VmaDefragmentationStats; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). +typedef struct VmaVirtualBlockCreateInfo +{ + /** \brief Total size of the virtual block. + + Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. + For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. + */ + VkDeviceSize size; + + /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. + */ + VmaVirtualBlockCreateFlags flags; + + /** \brief Custom CPU memory allocation callbacks. Optional. + + Optional, can be null. When specified, they will be used for all CPU-side memory allocations. + */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; +} VmaVirtualBlockCreateInfo; + +/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). +typedef struct VmaVirtualAllocationCreateInfo +{ + /** \brief Size of the allocation. + + Cannot be zero. + */ + VkDeviceSize size; + /** \brief Required alignment of the allocation. Optional. + + Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. + */ + VkDeviceSize alignment; + /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. + */ + VmaVirtualAllocationCreateFlags flags; + /** \brief Custom pointer to be associated with the allocation. Optional. + + It can be any value and can be used for user-defined purposes. It can be fetched or changed later. + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationCreateInfo; + +/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). +typedef struct VmaVirtualAllocationInfo +{ + /** \brief Offset of the allocation. + + Offset at which the allocation was made. + */ + VkDeviceSize offset; + /** \brief Size of the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::size. + */ + VkDeviceSize size; + /** \brief Custom pointer associated with the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationInfo; + +/** @} */ + +#endif // _VMA_DATA_TYPES_DECLARATIONS + +#ifndef _VMA_FUNCTION_HEADERS + +/** +\addtogroup group_init +@{ +*/ + +/// Creates #VmaAllocator object. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); + +/// Destroys allocator object. +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator VMA_NULLABLE allocator); + +/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. + +It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to +`VkPhysicalDevice`, `VkDevice` etc. every time using this function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); + +/** +PhysicalDeviceProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); + +/** +PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); + +/** +\brief Given Memory Type Index, returns Property Flags of this memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Sets index of the current frame. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t frameIndex); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. Use it for debugging purposes. +For faster but more brief statistics suitable to be called every frame or every allocation, +use vmaGetHeapBudgets(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaTotalStatistics* VMA_NOT_NULL pStats); + +/** \brief Retrieves information about current memory usage and budget for all memory heaps. + +\param allocator +\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator VMA_NOT_NULL allocator, + VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** +\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. + +This algorithm tries to find a memory type that: + +- Is allowed by memoryTypeBits. +- Contains all the flags from pAllocationCreateInfo->requiredFlags. +- Matches intended usage. +- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. + +\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result +from this function or any other allocating function probably means that your +device doesn't support any memory type with requested features for the specific +type of resource you want to use it for. Please check parameters of your +resource, like image layout (OPTIMAL versus LINEAR) or mip level count. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy buffer that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy image that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** \brief Allocates Vulkan device memory and creates #VmaPool object. + +\param allocator Allocator object. +\param pCreateInfo Parameters of pool to create. +\param[out] pPool Handle to created pool. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator VMA_NOT_NULL allocator, + const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); + +/** \brief Destroys #VmaPool object and frees Vulkan device memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NULLABLE pool); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaStatistics* VMA_NOT_NULL pPoolStats); + +/** \brief Retrieves detailed statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE* VMA_NOT_NULL ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE pName); + +/** \brief General purpose memory allocation. + +\param allocator +\param pVkMemoryRequirements +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), +vmaCreateBuffer(), vmaCreateImage() instead whenever possible. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief General purpose memory allocation for multiple allocation objects at once. + +\param allocator Allocator object. +\param pVkMemoryRequirements Memory requirements for each allocation. +\param pCreateInfo Creation parameters for each allocation. +\param allocationCount Number of allocations to make. +\param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, + size_t allocationCount, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkBuffer`. + +\param allocator +\param buffer +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateBuffer(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkImage`. + +\param allocator +\param image +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateImage(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NULLABLE allocation); + +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + size_t allocationCount, + const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); + +/** \brief Returns current information about specified allocation. + +Current paramteres of given allocation are returned in `pAllocationInfo`. + +Although this function doesn't lock any mutex, so it should be quite efficient, +you should avoid calling it too often. +You can retrieve same VmaAllocationInfo structure while creating your resource, from function +vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change +(e.g. due to defragmentation). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); + +/** \brief Sets pUserData in given allocation to new value. + +The value of pointer `pUserData` is copied to allocation's `pUserData`. +It is opaque, so you can use it however you want - e.g. +as a pointer, ordinal number or some handle to you own data. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Sets pName in given allocation to new value. + +`pName` must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pName`. String +passed as pName doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +`pName` is freed from memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName); + +/** +\brief Given an allocation, returns Property Flags of its memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetAllocationInfo() + vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Maps memory represented by given allocation and returns pointer to it. + +Maps memory represented by given allocation to make it accessible to CPU code. +When succeeded, `*ppData` contains pointer to first byte of this memory. + +\warning +If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is +correctly offsetted to the beginning of region assigned to this particular allocation. +Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. +You should not add VmaAllocationInfo::offset to it! + +Mapping is internally reference-counted and synchronized, so despite raw Vulkan +function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` +multiple times simultaneously, it is safe to call this function on allocations +assigned to the same memory block. Actual Vulkan memory will be mapped on first +mapping and unmapped on last unmapping. + +If the function succeeded, you must call vmaUnmapMemory() to unmap the +allocation when mapping is no longer needed or before freeing the allocation, at +the latest. + +It also safe to call this function multiple times on the same allocation. You +must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + +It is also safe to call this function on allocation created with +#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. +You must still call vmaUnmapMemory() same number of times as you called +vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the +"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + +This function fails when used on allocation made in memory type that is not +`HOST_VISIBLE`. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE* VMA_NOT_NULL ppData); + +/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + +For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Flushes memory of given allocation. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Invalidates memory of given allocation. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if +it is called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Flushes memory of given set of allocations. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaFlushAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Invalidates memory of given set of allocations. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaInvalidateAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + +\param allocator +\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits); + +/** \brief Begins defragmentation process. + +\param allocator Allocator object. +\param pInfo Structure filled with parameters of defragmentation. +\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. +\returns +- `VK_SUCCESS` if defragmentation can begin. +- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. + +For more information about defragmentation, see documentation chapter: +[Defragmentation](@ref defragmentation). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, + VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); + +/** \brief Ends defragmentation process. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pStats Optional stats for the defragmentation. Can be null. + +Use this function to finish defragmentation started by vmaBeginDefragmentation(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationStats* VMA_NULLABLE pStats); + +/** \brief Starts single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pPassInfo Computed informations for current pass. +\returns +- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. +- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), + and then preferably try another pass with vmaBeginDefragmentationPass(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Ends single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param pPassInfo Computed informations for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. + +Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. + +Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. +After this call: + +- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY + (which is the default) will be pointing to the new destination place. +- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY + will be freed. + +If no more moves are possible you can end whole defragmentation. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Binds buffer to allocation. + +Binds specified buffer to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create a buffer, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindBufferMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateBuffer() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param buffer +\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const void* VMA_NULLABLE pNext); + +/** \brief Binds image to allocation. + +Binds specified image to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create an image, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindImageMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateImage() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); + +/** \brief Binds image to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param image +\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const void* VMA_NULLABLE pNext); + +/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. + +\param allocator +\param pBufferCreateInfo +\param pAllocationCreateInfo +\param[out] pBuffer Buffer that was created. +\param[out] pAllocation Allocation that was created. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +This function automatically: + +-# Creates buffer. +-# Allocates appropriate memory for it. +-# Binds the buffer with the memory. + +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, `*pBuffer` and `*pAllocation` are null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, +VK_KHR_dedicated_allocation extension is used internally to query driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible +(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, +although recommended as a good practice, is out of scope of this library and could be implemented +by the user as a higher-level logic on top of VMA. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a buffer with additional minimum alignment. + +Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, +minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. +for interop with OpenGL. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Destroys Vulkan buffer and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyBuffer(device, buffer, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It it safe to pass null as buffer and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, + VmaAllocation VMA_NULLABLE allocation); + +/// Function similar to vmaCreateBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/// Function similar to vmaCreateAliasingBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/** \brief Destroys Vulkan image and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyImage(device, image, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It it safe to pass null as image and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation); + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \brief Creates new #VmaVirtualBlock object. + +\param pCreateInfo Parameters for creation. +\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); + +/** \brief Destroys #VmaVirtualBlock object. + +Please note that you should consciously handle virtual allocations that could remain unfreed in the block. +You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() +if you are sure this is what you want. If you do neither, an assert is called. + +If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, +don't forget to free them. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( + VmaVirtualBlock VMA_NULLABLE virtualBlock); + +/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. +*/ +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); + +/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. + +If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned +(despite the function doesn't ever allocate actual GPU memory). +`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. + +\param virtualBlock Virtual block +\param pCreateInfo Parameters for the allocation +\param[out] pAllocation Returned handle of the new allocation +\param[out] pOffset Returned offset of the new allocation. Optional, can be null. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset); + +/** \brief Frees virtual allocation inside given #VmaVirtualBlock. + +It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); + +/** \brief Frees all virtual allocations inside given #VmaVirtualBlock. + +You must either call this function or free each virtual allocation individually with vmaVirtualFree() +before destroying a virtual block. Otherwise, an assert is called. + +If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, +don't forget to free it as well. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Changes custom pointer associated with given virtual allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats); + +/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is slow to call. Use for debugging purposes. +For less detailed statistics, see vmaGetVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats); + +/** @} */ + +#if VMA_STATS_STRING_ENABLED +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. +\param virtualBlock Virtual block. +\param[out] ppStatsString Returned string. +\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. + +Returned string must be freed using vmaFreeVirtualBlockStatsString(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +/// Frees a string returned by vmaBuildVirtualBlockStatsString(). +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString); + +/** \brief Builds and returns statistics as a null-terminated string in JSON format. +\param allocator +\param[out] ppStatsString Must be freed using vmaFreeStatsString() function. +\param detailedMap +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE pStatsString); + +/** @} */ + +#endif // VMA_STATS_STRING_ENABLED + +#endif // _VMA_FUNCTION_HEADERS + +#ifdef __cplusplus +} +#endif + +#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// For Visual Studio IntelliSense. +#if defined(__cplusplus) && defined(__INTELLISENSE__) +#define VMA_IMPLEMENTATION +#endif + +#ifdef VMA_IMPLEMENTATION +#undef VMA_IMPLEMENTATION + +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #include // For functions like __popcnt, _BitScanForward etc. +#endif +#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + #include // For std::popcount +#endif + +/******************************************************************************* +CONFIGURATION SECTION + +Define some of these macros before each #include of this header or change them +here if you need other then default behavior depending on your environment. +*/ +#ifndef _VMA_CONFIGURATION + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; +*/ +#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) + #define VMA_STATIC_VULKAN_FUNCTIONS 1 +#endif + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory"); + +To use this feature in new versions of VMA you now have to pass +VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as +VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. +*/ +#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) + #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#endif + +#ifndef VMA_USE_STL_SHARED_MUTEX + // Compiler conforms to C++17. + #if __cplusplus >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif +#endif + +/* +Define this macro to include custom header files without having to edit this file directly, e.g.: + + // Inside of "my_vma_configuration_user_includes.h": + + #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT + #include "my_custom_min.h" // for my_custom_min + #include + #include + + // Inside a different file, which includes "vk_mem_alloc.h": + + #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h" + #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr) + #define VMA_MIN(v1, v2) (my_custom_min(v1, v2)) + #include "vk_mem_alloc.h" + ... + +The following headers are used in this CONFIGURATION section only, so feel free to +remove them if not needed. +*/ +#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) + #include // for assert + #include // for min, max + #include +#else + #include VMA_CONFIGURATION_USER_INCLUDES_H +#endif + +#ifndef VMA_NULL + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr +#endif + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) +#include +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + return memalign(alignment, size); +} +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) +#include + +#if defined(__APPLE__) +#include +#endif + +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) + // Therefore, for now disable this specific exception until a proper solution is found. + //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) + //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 + // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only + // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds + // // MAC_OS_X_VERSION_10_16), even though the function is marked + // // availabe for 10.15. That is why the preprocessor checks for 10.16 but + // // the __builtin_available checks for 10.15. + // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. + // if (__builtin_available(macOS 10.15, iOS 13, *)) + // return aligned_alloc(alignment, size); + //#endif + //#endif + + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + void *pointer; + if(posix_memalign(&pointer, alignment, size) == 0) + return pointer; + return VMA_NULL; +} +#elif defined(_WIN32) +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return _aligned_malloc(size, alignment); +} +#else +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return aligned_alloc(alignment, size); +} +#endif + +#if defined(_WIN32) +static void vma_aligned_free(void* ptr) +{ + _aligned_free(ptr); +} +#else +static void vma_aligned_free(void* VMA_NULLABLE ptr) +{ + free(ptr); +} +#endif + +// If your compiler is not compatible with C++11 and definition of +// aligned_alloc() function is missing, uncommeting following line may help: + +//#include + +// Normal assert to check for programmer's errors, especially in Debug configuration. +#ifndef VMA_ASSERT + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif +#endif + +// Assert that will be called very often, like inside data structures e.g. operator[]. +// Making it non-empty can make program slow. +#ifndef VMA_HEAVY_ASSERT + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif +#endif + +#ifndef VMA_ALIGN_OF + #define VMA_ALIGN_OF(type) (__alignof(type)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_MALLOC + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_FREE + // VMA_SYSTEM_FREE is the old name, but might have been defined by the user + #if defined(VMA_SYSTEM_FREE) + #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) + #else + #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) + #endif +#endif + +#ifndef VMA_COUNT_BITS_SET + // Returns number of bits set to 1 in (v) + #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) +#endif + +#ifndef VMA_BITSCAN_LSB + // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) +#endif + +#ifndef VMA_BITSCAN_MSB + // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) +#endif + +#ifndef VMA_MIN + #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) +#endif + +#ifndef VMA_MAX + #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) +#endif + +#ifndef VMA_SWAP + #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) +#endif + +#ifndef VMA_SORT + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#endif + +#ifndef VMA_DEBUG_LOG + #define VMA_DEBUG_LOG(format, ...) + /* + #define VMA_DEBUG_LOG(format, ...) do { \ + printf(format, __VA_ARGS__); \ + printf("\n"); \ + } while(false) + */ +#endif + +// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. +#if VMA_STATS_STRING_ENABLED + static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%u", static_cast(num)); + } + static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%llu", static_cast(num)); + } + static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } +#endif + +#ifndef VMA_MUTEX + class VmaMutex + { + public: + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex +#endif + +// Read-write mutex, where "read" is shared access, "write" is exclusive access. +#ifndef VMA_RW_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX +#endif // #ifndef VMA_RW_MUTEX + +/* +If providing your own implementation, you need to implement a subset of std::atomic. +*/ +#ifndef VMA_ATOMIC_UINT32 + #include + #define VMA_ATOMIC_UINT32 std::atomic +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include + #define VMA_ATOMIC_UINT64 std::atomic +#endif + +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +#endif + +#ifndef VMA_MIN_ALIGNMENT + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes. Must be power of two. + */ + #ifdef VMA_DEBUG_ALIGNMENT // Old name + #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT + #else + #define VMA_MIN_ALIGNMENT (1) + #endif +#endif + +#ifndef VMA_DEBUG_MARGIN + /** + Minimum margin after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) +#endif + +#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +#endif + +#ifndef VMA_DEBUG_DETECT_CORRUPTION + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) +#endif + +#ifndef VMA_DEBUG_GLOBAL_MUTEX + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) +#endif + +#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +#endif + +#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + /* + Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount + and return error instead of leaving up to Vulkan implementation what to do in such cases. + */ + #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) +#endif + +#ifndef VMA_SMALL_HEAP_MAX_SIZE + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) +#endif + +#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) +#endif + +/* +Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called +or a persistently mapped allocation is created and destroyed several times in a row. +It keeps additional +1 mapping of a device memory block to prevent calling actual +vkMapMemory/vkUnmapMemory too many times, which may improve performance and help +tools like RenderDOc. +*/ +#ifndef VMA_MAPPING_HYSTERESIS_ENABLED + #define VMA_MAPPING_HYSTERESIS_ENABLED 1 +#endif + +#ifndef VMA_CLASS_NO_COPY + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; +#endif + +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) + +/******************************************************************************* +END OF CONFIGURATION +*/ +#endif // _VMA_CONFIGURATION + + +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; +// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. +static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; + +// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; +static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; +static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; +static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; +static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; +static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; +static const uint32_t VMA_VENDOR_ID_AMD = 4098; + +// This one is tricky. Vulkan specification defines this code as available since +// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. +// See pull request #207. +#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) + + +#if VMA_STATS_STRING_ENABLED +// Correspond to values of enum VmaSuballocationType. +static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = +{ + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", +}; +#endif + +static VkAllocationCallbacks VmaEmptyAllocationCallbacks = + { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; + + +#ifndef _VMA_ENUM_DECLARATIONS + +enum VmaSuballocationType +{ + VMA_SUBALLOCATION_TYPE_FREE = 0, + VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, + VMA_SUBALLOCATION_TYPE_BUFFER = 2, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, + VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +}; + +enum VMA_CACHE_OPERATION +{ + VMA_CACHE_FLUSH, + VMA_CACHE_INVALIDATE +}; + +enum class VmaAllocationRequestType +{ + Normal, + TLSF, + // Used by "Linear" algorithm. + UpperAddress, + EndOf1st, + EndOf2nd, +}; + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_FORWARD_DECLARATIONS +// Opaque handle used by allocation algorithms to identify single allocation in any conforming way. +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle); + +struct VmaMutexLock; +struct VmaMutexLockRead; +struct VmaMutexLockWrite; + +template +struct AtomicTransactionalIncrement; + +template +struct VmaStlAllocator; + +template +class VmaVector; + +template +class VmaSmallVector; + +template +class VmaPoolAllocator; + +template +struct VmaListItem; + +template +class VmaRawList; + +template +class VmaList; + +template +class VmaIntrusiveLinkedList; + +// Unused in this version +#if 0 +template +struct VmaPair; +template +struct VmaPairFirstLess; + +template +class VmaMap; +#endif + +#if VMA_STATS_STRING_ENABLED +class VmaStringBuilder; +class VmaJsonWriter; +#endif + +class VmaDeviceMemoryBlock; + +struct VmaDedicatedAllocationListItemTraits; +class VmaDedicatedAllocationList; + +struct VmaSuballocation; +struct VmaSuballocationOffsetLess; +struct VmaSuballocationOffsetGreater; +struct VmaSuballocationItemSizeLess; + +typedef VmaList> VmaSuballocationList; + +struct VmaAllocationRequest; + +class VmaBlockMetadata; +class VmaBlockMetadata_Linear; +class VmaBlockMetadata_TLSF; + +class VmaBlockVector; + +struct VmaPoolListItemTraits; + +struct VmaCurrentBudgetData; + +class VmaAllocationObjectAllocator; + +#endif // _VMA_FORWARD_DECLARATIONS + + +#ifndef _VMA_FUNCTIONS + +/* +Returns number of bits set to 1 in (v). + +On specific platforms and compilers you can use instrinsics like: + +Visual Studio: + return __popcnt(v); +GCC, Clang: + return static_cast(__builtin_popcount(v)); + +Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. +But you need to check in runtime whether user's CPU supports these, as some old processors don't. +*/ +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ +#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + return std::popcount(v); +#else + uint32_t c = v - ((v >> 1) & 0x55555555); + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 16) + c) & 0x0000FFFF; + return c; +#endif +} + +static inline uint8_t VmaBitScanLSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanForward64(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffsll(mask)) - 1U; +#else + uint8_t pos = 0; + uint64_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 63); + return UINT8_MAX; +#endif +} + +static inline uint8_t VmaBitScanLSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanForward(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffs(mask)) - 1U; +#else + uint8_t pos = 0; + uint32_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 31); + return UINT8_MAX; +#endif +} + +static inline uint8_t VmaBitScanMSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanReverse64(&pos, mask)) + return static_cast(pos); +#elif defined __GNUC__ || defined __clang__ + if (mask) + return 63 - static_cast(__builtin_clzll(mask)); +#else + uint8_t pos = 63; + uint64_t bit = 1ULL << 63; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +static inline uint8_t VmaBitScanMSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanReverse(&pos, mask)) + return static_cast(pos); +#elif defined __GNUC__ || defined __clang__ + if (mask) + return 31 - static_cast(__builtin_clz(mask)); +#else + uint8_t pos = 31; + uint32_t bit = 1UL << 31; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +/* +Returns true if given number is a power of two. +T must be unsigned integer number or signed integer but always nonnegative. +For 0 returns true. +*/ +template +inline bool VmaIsPow2(T x) +{ + return (x & (x - 1)) == 0; +} + +// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignUp(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return (val + alignment - 1) & ~(alignment - 1); +} + +// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignDown(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return val & ~(alignment - 1); +} + +// Division with mathematical rounding to nearest number. +template +static inline T VmaRoundDiv(T x, T y) +{ + return (x + (y / (T)2)) / y; +} + +// Divide by 'y' and round up to nearest integer. +template +static inline T VmaDivideRoundingUp(T x, T y) +{ + return (x + y - (T)1) / y; +} + +// Returns smallest power of 2 greater or equal to v. +static inline uint32_t VmaNextPow2(uint32_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; +} + +static inline uint64_t VmaNextPow2(uint64_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +// Returns largest power of 2 less or equal to v. +static inline uint32_t VmaPrevPow2(uint32_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = v ^ (v >> 1); + return v; +} + +static inline uint64_t VmaPrevPow2(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v = v ^ (v >> 1); + return v; +} + +static inline bool VmaStrIsEmpty(const char* pStr) +{ + return pStr == VMA_NULL || *pStr == '\0'; +} + +/* +Returns true if two memory blocks occupy overlapping pages. +ResourceA must be in less memory offset than ResourceB. + +Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" +chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". +*/ +static inline bool VmaBlocksOnSamePage( + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ + VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); + VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; + VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); + VkDeviceSize resourceBStart = resourceBOffset; + VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); + return resourceAEndPage == resourceBStartPage; +} + +/* +Returns true if given suballocation types could conflict and must respect +VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer +or linear image and another one is optimal image. If type is unknown, behave +conservatively. +*/ +static inline bool VmaIsBufferImageGranularityConflict( + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if (suballocType1 > suballocType2) + { + VMA_SWAP(suballocType1, suballocType2); + } + + switch (suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; + } +} + +static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pDst) + { + *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; + } +#else + // no-op +#endif +} + +static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { + return false; + } + } +#endif + return true; +} + +/* +Fills structure with parameters of an example buffer to be used for transfers +during GPU memory defragmentation. +*/ +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ + memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); + outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. +} + + +/* +Performs binary search and returns iterator to first element that is greater or +equal to (key), according to comparison (cmp). + +Cmp should return true if first argument is less than second argument. + +Returned value is the found element, if present in the collection or place where +new element with value (key) should be inserted. +*/ +template +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) +{ + size_t down = 0, up = (end - beg); + while (down < up) + { + const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation + if (cmp(*(beg + mid), key)) + { + down = mid + 1; + } + else + { + up = mid; + } + } + return beg + down; +} + +template +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess( + beg, end, value, cmp); + if (it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + +/* +Returns true if all pointers in the array are not-null and unique. +Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. +T must be pointer type, e.g. VmaAllocation, VmaPool. +*/ +template +static bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for (uint32_t i = 0; i < count; ++i) + { + const T iPtr = arr[i]; + if (iPtr == VMA_NULL) + { + return false; + } + for (uint32_t j = i + 1; j < count; ++j) + { + if (iPtr == arr[j]) + { + return false; + } + } + } + return true; +} + +template +static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) +{ + newStruct->pNext = mainStruct->pNext; + mainStruct->pNext = newStruct; +} + +// This is the main algorithm that guides the selection of a memory type best for an allocation - +// converts usage to required/preferred/not preferred flags. +static bool FindMemoryPreferences( + bool isIntegratedGPU, + const VmaAllocationCreateInfo& allocCreateInfo, + VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. + VkMemoryPropertyFlags& outRequiredFlags, + VkMemoryPropertyFlags& outPreferredFlags, + VkMemoryPropertyFlags& outNotPreferredFlags) +{ + outRequiredFlags = allocCreateInfo.requiredFlags; + outPreferredFlags = allocCreateInfo.preferredFlags; + outNotPreferredFlags = 0; + + switch(allocCreateInfo.usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + case VMA_MEMORY_USAGE_AUTO: + case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: + case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: + { + if(bufImgUsage == UINT32_MAX) + { + VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known."); + return false; + } + // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*. + const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; + const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; + const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; + const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; + const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; + + // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. + if(hostAccessRandom) + { + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. + // Omitting HOST_VISIBLE here is intentional. + // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. + // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + } + else + { + // Always CPU memory, cached. + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + } + } + // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. + else if(hostAccessSequentialWrite) + { + // Want uncached and write-combined. + outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + else + { + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) + if(deviceAccess) + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) + else + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. + if(preferDevice) + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + } + } + // No CPU access + else + { + // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory + if(deviceAccess) + { + // ...unless there is a clear preference from the user not to do so. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // No direct GPU access, no CPU access, just transfers. + // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or + // a "swap file" copy to free some GPU memory (then better CPU memory). + // Up to the user to decide. If no preferece, assume the former and choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + } + default: + VMA_ASSERT(0); + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; + } + + return true; +} + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation + +static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + void* result = VMA_NULL; + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { + result = (*pAllocationCallbacks->pfnAllocation)( + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { + result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); + } + VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); + return result; +} + +static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { + (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); + } + else + { + VMA_SYSTEM_ALIGNED_FREE(ptr); + } +} + +template +static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) + +#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) + +template +static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ + ptr->~T(); + VmaFree(pAllocationCallbacks, ptr); +} + +template +static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if (ptr != VMA_NULL) + { + for (size_t i = count; i--; ) + { + ptr[i].~T(); + } + VmaFree(pAllocationCallbacks, ptr); + } +} + +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if (srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + return VMA_NULL; +} + +#if VMA_STATS_STRING_ENABLED +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) +{ + if (srcStr != VMA_NULL) + { + char* const result = vma_new_array(allocs, char, strLen + 1); + memcpy(result, srcStr, strLen); + result[strLen] = '\0'; + return result; + } + return VMA_NULL; +} +#endif // VMA_STATS_STRING_ENABLED + +static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if (str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + +template +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); + VmaVectorInsert(vector, indexToInsert, value); + return indexToInsert; +} + +template +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + CmpLess comparator; + typename VectorT::iterator it = VmaBinaryFindFirstNotLess( + vector.begin(), + vector.end(), + value, + comparator); + if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { + size_t indexToRemove = it - vector.begin(); + VmaVectorRemove(vector, indexToRemove); + return true; + } + return false; +} +#endif // _VMA_FUNCTIONS + +#ifndef _VMA_STATISTICS_FUNCTIONS + +static void VmaClearStatistics(VmaStatistics& outStats) +{ + outStats.blockCount = 0; + outStats.allocationCount = 0; + outStats.blockBytes = 0; + outStats.allocationBytes = 0; +} + +static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) +{ + inoutStats.blockCount += src.blockCount; + inoutStats.allocationCount += src.allocationCount; + inoutStats.blockBytes += src.blockBytes; + inoutStats.allocationBytes += src.allocationBytes; +} + +static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) +{ + VmaClearStatistics(outStats.statistics); + outStats.unusedRangeCount = 0; + outStats.allocationSizeMin = VK_WHOLE_SIZE; + outStats.allocationSizeMax = 0; + outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; + outStats.unusedRangeSizeMax = 0; +} + +static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.statistics.allocationCount++; + inoutStats.statistics.allocationBytes += size; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); +} + +static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.unusedRangeCount++; + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); +} + +static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) +{ + VmaAddStatistics(inoutStats.statistics, src.statistics); + inoutStats.unusedRangeCount += src.unusedRangeCount; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); +} + +#endif // _VMA_STATISTICS_FUNCTIONS + +#ifndef _VMA_MUTEX_LOCK +// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). +struct VmaMutexLock +{ + VMA_CLASS_NO_COPY(VmaMutexLock) +public: + VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->Lock(); } + } + ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } + +private: + VMA_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. +struct VmaMutexLockRead +{ + VMA_CLASS_NO_COPY(VmaMutexLockRead) +public: + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockRead(); } + } + ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. +struct VmaMutexLockWrite +{ + VMA_CLASS_NO_COPY(VmaMutexLockWrite) +public: + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) + : m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockWrite(); } + } + ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +#if VMA_DEBUG_GLOBAL_MUTEX + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +#else + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#endif +#endif // _VMA_MUTEX_LOCK + +#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT +// An object that increments given atomic but decrements it back in the destructor unless Commit() is called. +template +struct AtomicTransactionalIncrement +{ +public: + typedef std::atomic AtomicT; + + ~AtomicTransactionalIncrement() + { + if(m_Atomic) + --(*m_Atomic); + } + + void Commit() { m_Atomic = nullptr; } + T Increment(AtomicT* atomic) + { + m_Atomic = atomic; + return m_Atomic->fetch_add(1); + } + +private: + AtomicT* m_Atomic = nullptr; +}; +#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT + +#ifndef _VMA_STL_ALLOCATOR +// STL-compatible allocator. +template +struct VmaStlAllocator +{ + const VkAllocationCallbacks* const m_pCallbacks; + typedef T value_type; + + VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} + template + VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} + VmaStlAllocator(const VmaStlAllocator&) = default; + VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; + + T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } + + template + bool operator==(const VmaStlAllocator& rhs) const + { + return m_pCallbacks == rhs.m_pCallbacks; + } + template + bool operator!=(const VmaStlAllocator& rhs) const + { + return m_pCallbacks != rhs.m_pCallbacks; + } +}; +#endif // _VMA_STL_ALLOCATOR + +#ifndef _VMA_VECTOR +/* Class with interface compatible with subset of std::vector. +T must be POD because constructors and destructors are not called and memcpy is +used for these objects. */ +template +class VmaVector +{ +public: + typedef T value_type; + typedef T* iterator; + typedef const T* const_iterator; + + VmaVector(const AllocatorT& allocator); + VmaVector(size_t count, const AllocatorT& allocator); + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} + VmaVector(const VmaVector& src); + VmaVector& operator=(const VmaVector& rhs); + ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_pArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + const T* data() const { return m_pArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + + iterator begin() { return m_pArray; } + iterator end() { return m_pArray + m_Count; } + const_iterator cbegin() const { return m_pArray; } + const_iterator cend() const { return m_pArray + m_Count; } + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void reserve(size_t newCapacity, bool freeMemory = false); + void resize(size_t newCount); + void clear() { resize(0); } + void shrink_to_fit(); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + +private: + AllocatorT m_Allocator; + T* m_pArray; + size_t m_Count; + size_t m_Capacity; +}; + +#ifndef _VMA_VECTOR_FUNCTIONS +template +VmaVector::VmaVector(const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) {} + +template +VmaVector::VmaVector(size_t count, const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) {} + +template +VmaVector::VmaVector(const VmaVector& src) + : m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) +{ + if (m_Count != 0) + { + memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); + } +} + +template +VmaVector& VmaVector::operator=(const VmaVector& rhs) +{ + if (&rhs != this) + { + resize(rhs.m_Count); + if (m_Count != 0) + { + memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); + } + } + return *this; +} + +template +void VmaVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + m_pArray[newIndex] = src; +} + +template +void VmaVector::reserve(size_t newCapacity, bool freeMemory) +{ + newCapacity = VMA_MAX(newCapacity, m_Count); + + if ((newCapacity < m_Capacity) && !freeMemory) + { + newCapacity = m_Capacity; + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + if (m_Count != 0) + { + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } +} + +template +void VmaVector::resize(size_t newCount) +{ + size_t newCapacity = m_Capacity; + if (newCount > m_Capacity) + { + newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + const size_t elementsToCopy = VMA_MIN(m_Count, newCount); + if (elementsToCopy != 0) + { + memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + + m_Count = newCount; +} + +template +void VmaVector::shrink_to_fit() +{ + if (m_Capacity > m_Count) + { + T* newArray = VMA_NULL; + if (m_Count > 0) + { + newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = m_Count; + m_pArray = newArray; + } +} + +template +void VmaVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + if (index < oldCount) + { + memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); + } + m_pArray[index] = src; +} + +template +void VmaVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_VECTOR_FUNCTIONS + +template +static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +{ + vec.insert(index, item); +} + +template +static void VmaVectorRemove(VmaVector& vec, size_t index) +{ + vec.remove(index); +} +#endif // _VMA_VECTOR + +#ifndef _VMA_SMALL_VECTOR +/* +This is a vector (a variable-sized array), optimized for the case when the array is small. + +It contains some number of elements in-place, which allows it to avoid heap allocation +when the actual number of elements is below that threshold. This allows normal "small" +cases to be fast without losing generality for large inputs. +*/ +template +class VmaSmallVector +{ +public: + typedef T value_type; + typedef T* iterator; + + VmaSmallVector(const AllocatorT& allocator); + VmaSmallVector(size_t count, const AllocatorT& allocator); + template + VmaSmallVector(const VmaSmallVector&) = delete; + template + VmaSmallVector& operator=(const VmaSmallVector&) = delete; + ~VmaSmallVector() = default; + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + + iterator begin() { return data(); } + iterator end() { return data() + m_Count; } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void resize(size_t newCount, bool freeMemory = false); + void clear(bool freeMemory = false); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + +private: + size_t m_Count; + T m_StaticArray[N]; // Used when m_Size <= N + VmaVector m_DynamicArray; // Used when m_Size > N +}; + +#ifndef _VMA_SMALL_VECTOR_FUNCTIONS +template +VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) + : m_Count(0), + m_DynamicArray(allocator) {} + +template +VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) + : m_Count(count), + m_DynamicArray(count > N ? count : 0, allocator) {} + +template +void VmaSmallVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + data()[newIndex] = src; +} + +template +void VmaSmallVector::resize(size_t newCount, bool freeMemory) +{ + if (newCount > N && m_Count > N) + { + // Any direction, staying in m_DynamicArray + m_DynamicArray.resize(newCount); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else if (newCount > N && m_Count <= N) + { + // Growing, moving from m_StaticArray to m_DynamicArray + m_DynamicArray.resize(newCount); + if (m_Count > 0) + { + memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); + } + } + else if (newCount <= N && m_Count > N) + { + // Shrinking, moving from m_DynamicArray to m_StaticArray + if (newCount > 0) + { + memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); + } + m_DynamicArray.resize(0); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else + { + // Any direction, staying in m_StaticArray - nothing to do here + } + m_Count = newCount; +} + +template +void VmaSmallVector::clear(bool freeMemory) +{ + m_DynamicArray.clear(); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + m_Count = 0; +} + +template +void VmaSmallVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + T* const dataPtr = data(); + if (index < oldCount) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. + memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); + } + dataPtr[index] = src; +} + +template +void VmaSmallVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. + T* const dataPtr = data(); + memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_SMALL_VECTOR_FUNCTIONS +#endif // _VMA_SMALL_VECTOR + +#ifndef _VMA_POOL_ALLOCATOR +/* +Allocator for objects of type T using a list of arrays (pools) to speed up +allocation. Number of elements that can be allocated is not bounded because +allocator can create multiple blocks. +*/ +template +class VmaPoolAllocator +{ + VMA_CLASS_NO_COPY(VmaPoolAllocator) +public: + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + ~VmaPoolAllocator(); + template T* Alloc(Types&&... args); + void Free(T* ptr); + +private: + union Item + { + uint32_t NextFreeIndex; + alignas(T) char Value[sizeof(T)]; + }; + struct ItemBlock + { + Item* pItems; + uint32_t Capacity; + uint32_t FirstFreeIndex; + }; + + const VkAllocationCallbacks* m_pAllocationCallbacks; + const uint32_t m_FirstBlockCapacity; + VmaVector> m_ItemBlocks; + + ItemBlock& CreateNewBlock(); +}; + +#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) +{ + VMA_ASSERT(m_FirstBlockCapacity > 1); +} + +template +VmaPoolAllocator::~VmaPoolAllocator() +{ + for (size_t i = m_ItemBlocks.size(); i--;) + vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); + m_ItemBlocks.clear(); +} + +template +template T* VmaPoolAllocator::Alloc(Types&&... args) +{ + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // This block has some free items: Use first one. + if (block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; + block.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; + } + } + + // No block has free item: Create new one and use it. + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; + newBlock.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result) T(std::forward(args)...); // Explicit constructor call. + return result; +} + +template +void VmaPoolAllocator::Free(T* ptr) +{ + // Search all memory blocks to find ptr. + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + + // Casting to union. + Item* pItemPtr; + memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); + + // Check if pItemPtr is in address range of this block. + if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. + const uint32_t index = static_cast(pItemPtr - block.pItems); + pItemPtr->NextFreeIndex = block.FirstFreeIndex; + block.FirstFreeIndex = index; + return; + } + } + VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); +} + +template +typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +{ + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + + const ItemBlock newBlock = + { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0 + }; + + m_ItemBlocks.push_back(newBlock); + + // Setup singly-linked list of all free items in this block. + for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) + newBlock.pItems[i].NextFreeIndex = i + 1; + newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; + return m_ItemBlocks.back(); +} +#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS +#endif // _VMA_POOL_ALLOCATOR + +#ifndef _VMA_RAW_LIST +template +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; + T Value; +}; + +// Doubly linked list. +template +class VmaRawList +{ + VMA_CLASS_NO_COPY(VmaRawList) +public: + typedef VmaListItem ItemType; + + VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + // Intentionally not calling Clear, because that would be unnecessary + // computations to return all items to m_ItemAllocator as free. + ~VmaRawList() = default; + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + + ItemType* Front() { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Front() const { return m_pFront; } + const ItemType* Back() const { return m_pBack; } + + ItemType* PushFront(); + ItemType* PushBack(); + ItemType* PushFront(const T& value); + ItemType* PushBack(const T& value); + void PopFront(); + void PopBack(); + + // Item can be null - it means PushBack. + ItemType* InsertBefore(ItemType* pItem); + // Item can be null - it means PushFront. + ItemType* InsertAfter(ItemType* pItem); + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); + + void Clear(); + void Remove(ItemType* pItem); + +private: + const VkAllocationCallbacks* const m_pAllocationCallbacks; + VmaPoolAllocator m_ItemAllocator; + ItemType* m_pFront; + ItemType* m_pBack; + size_t m_Count; +}; + +#ifndef _VMA_RAW_LIST_FUNCTIONS +template +VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) {} + +template +VmaListItem* VmaRawList::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pPrev = VMA_NULL; + if (IsEmpty()) + { + pNewItem->pNext = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pNext = m_pFront; + m_pFront->pPrev = pNewItem; + m_pFront = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pNext = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pPrev = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pPrev = m_pBack; + m_pBack->pNext = pNewItem; + m_pBack = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); + pNewItem->Value = value; + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); + pNewItem->Value = value; + return pNewItem; +} + +template +void VmaRawList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if (pNextItem != VMA_NULL) + { + pNextItem->pPrev = VMA_NULL; + } + m_pFront = pNextItem; + m_ItemAllocator.Free(pFrontItem); + --m_Count; +} + +template +void VmaRawList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { + pPrevItem->pNext = VMA_NULL; + } + m_pBack = pPrevItem; + m_ItemAllocator.Free(pBackItem); + --m_Count; +} + +template +void VmaRawList::Clear() +{ + if (IsEmpty() == false) + { + ItemType* pItem = m_pBack; + while (pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; + m_ItemAllocator.Free(pItem); + pItem = pPrevItem; + } + m_pFront = VMA_NULL; + m_pBack = VMA_NULL; + m_Count = 0; + } +} + +template +void VmaRawList::Remove(ItemType* pItem) +{ + VMA_HEAVY_ASSERT(pItem != VMA_NULL); + VMA_HEAVY_ASSERT(m_Count > 0); + + if(pItem->pPrev != VMA_NULL) + { + pItem->pPrev->pNext = pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = pItem->pNext; + } + + if(pItem->pNext != VMA_NULL) + { + pItem->pNext->pPrev = pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = pItem->pPrev; + } + + m_ItemAllocator.Free(pItem); + --m_Count; +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pPrev = prevItem; + newItem->pNext = pItem; + pItem->pPrev = newItem; + if(prevItem != VMA_NULL) + { + prevItem->pNext = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = newItem; + } + ++m_Count; + return newItem; + } + else + return PushBack(); +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pNext = nextItem; + newItem->pPrev = pItem; + pItem->pNext = newItem; + if(nextItem != VMA_NULL) + { + nextItem->pPrev = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = newItem; + } + ++m_Count; + return newItem; + } + else + return PushFront(); +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); + newItem->Value = value; + return newItem; +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); + newItem->Value = value; + return newItem; +} +#endif // _VMA_RAW_LIST_FUNCTIONS +#endif // _VMA_RAW_LIST + +#ifndef _VMA_LIST +template +class VmaList +{ + VMA_CLASS_NO_COPY(VmaList) +public: + class reverse_iterator; + class const_iterator; + class const_reverse_iterator; + + class iterator + { + friend class const_iterator; + friend class VmaList; + public: + iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + iterator operator++(int) { iterator result = *this; ++*this; return result; } + iterator operator--(int) { iterator result = *this; --*this; return result; } + + iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class reverse_iterator + { + friend class const_reverse_iterator; + friend class VmaList; + public: + reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } + reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } + + reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + reverse_iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_iterator + { + friend class VmaList; + public: + const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } + const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } + + const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + const_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_reverse_iterator + { + friend class VmaList; + public: + const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } + const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } + + const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + const_reverse_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + + VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} + + bool empty() const { return m_RawList.IsEmpty(); } + size_t size() const { return m_RawList.GetCount(); } + + iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } + iterator end() { return iterator(&m_RawList, VMA_NULL); } + + const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } + const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } + + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } + reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } + const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator rbegin() const { return crbegin(); } + const_reverse_iterator rend() const { return crend(); } + + void push_back(const T& value) { m_RawList.PushBack(value); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + + void clear() { m_RawList.Clear(); } + void erase(iterator it) { m_RawList.Remove(it.m_pItem); } + +private: + VmaRawList m_RawList; +}; + +#ifndef _VMA_LIST_FUNCTIONS +template +typename VmaList::iterator& VmaList::iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Front(); + } + return *this; +} + +template +typename VmaList::const_iterator& VmaList::const_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} +#endif // _VMA_LIST_FUNCTIONS +#endif // _VMA_LIST + +#ifndef _VMA_INTRUSIVE_LINKED_LIST +/* +Expected interface of ItemTypeTraits: +struct MyItemTypeTraits +{ + typedef MyItem ItemType; + static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; } + static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; } + static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; } + static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } +}; +*/ +template +class VmaIntrusiveLinkedList +{ +public: + typedef typename ItemTypeTraits::ItemType ItemType; + static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } + static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } + + // Movable, not copyable. + VmaIntrusiveLinkedList() = default; + VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src); + VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; + VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); + VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; + ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + ItemType* Front() { return m_Front; } + ItemType* Back() { return m_Back; } + const ItemType* Front() const { return m_Front; } + const ItemType* Back() const { return m_Back; } + + void PushBack(ItemType* item); + void PushFront(ItemType* item); + ItemType* PopBack(); + ItemType* PopFront(); + + // MyItem can be null - it means PushBack. + void InsertBefore(ItemType* existingItem, ItemType* newItem); + // MyItem can be null - it means PushFront. + void InsertAfter(ItemType* existingItem, ItemType* newItem); + void Remove(ItemType* item); + void RemoveAll(); + +private: + ItemType* m_Front = VMA_NULL; + ItemType* m_Back = VMA_NULL; + size_t m_Count = 0; +}; + +#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +template +VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) + : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) +{ + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; +} + +template +VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) +{ + if (&src != this) + { + VMA_HEAVY_ASSERT(IsEmpty()); + m_Front = src.m_Front; + m_Back = src.m_Back; + m_Count = src.m_Count; + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; + } + return *this; +} + +template +void VmaIntrusiveLinkedList::PushBack(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessPrev(item) = m_Back; + ItemTypeTraits::AccessNext(m_Back) = item; + m_Back = item; + ++m_Count; + } +} + +template +void VmaIntrusiveLinkedList::PushFront(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessNext(item) = m_Front; + ItemTypeTraits::AccessPrev(m_Front) = item; + m_Front = item; + ++m_Count; + } +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const backItem = m_Back; + ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; + } + m_Back = prevItem; + --m_Count; + ItemTypeTraits::AccessPrev(backItem) = VMA_NULL; + ItemTypeTraits::AccessNext(backItem) = VMA_NULL; + return backItem; +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const frontItem = m_Front; + ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; + } + m_Front = nextItem; + --m_Count; + ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL; + ItemTypeTraits::AccessNext(frontItem) = VMA_NULL; + return frontItem; +} + +template +void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); + ItemTypeTraits::AccessPrev(newItem) = prevItem; + ItemTypeTraits::AccessNext(newItem) = existingItem; + ItemTypeTraits::AccessPrev(existingItem) = newItem; + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Front == existingItem); + m_Front = newItem; + } + ++m_Count; + } + else + PushBack(newItem); +} + +template +void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); + ItemTypeTraits::AccessNext(newItem) = nextItem; + ItemTypeTraits::AccessPrev(newItem) = existingItem; + ItemTypeTraits::AccessNext(existingItem) = newItem; + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Back == existingItem); + m_Back = newItem; + } + ++m_Count; + } + else + return PushFront(newItem); +} + +template +void VmaIntrusiveLinkedList::Remove(ItemType* item) +{ + VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); + if (ItemTypeTraits::GetPrev(item) != VMA_NULL) + { + ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item); + } + else + { + VMA_HEAVY_ASSERT(m_Front == item); + m_Front = ItemTypeTraits::GetNext(item); + } + + if (ItemTypeTraits::GetNext(item) != VMA_NULL) + { + ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item); + } + else + { + VMA_HEAVY_ASSERT(m_Back == item); + m_Back = ItemTypeTraits::GetPrev(item); + } + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + --m_Count; +} + +template +void VmaIntrusiveLinkedList::RemoveAll() +{ + if (!IsEmpty()) + { + ItemType* item = m_Back; + while (item != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + item = prevItem; + } + m_Front = VMA_NULL; + m_Back = VMA_NULL; + m_Count = 0; + } +} +#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +#endif // _VMA_INTRUSIVE_LINKED_LIST + +// Unused in this version. +#if 0 + +#ifndef _VMA_PAIR +template +struct VmaPair +{ + T1 first; + T2 second; + + VmaPair() : first(), second() {} + VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {} +}; + +template +struct VmaPairFirstLess +{ + bool operator()(const VmaPair& lhs, const VmaPair& rhs) const + { + return lhs.first < rhs.first; + } + bool operator()(const VmaPair& lhs, const FirstT& rhsFirst) const + { + return lhs.first < rhsFirst; + } +}; +#endif // _VMA_PAIR + +#ifndef _VMA_MAP +/* Class compatible with subset of interface of std::unordered_map. +KeyT, ValueT must be POD because they will be stored in VmaVector. +*/ +template +class VmaMap +{ +public: + typedef VmaPair PairType; + typedef PairType* iterator; + + VmaMap(const VmaStlAllocator& allocator) : m_Vector(allocator) {} + + iterator begin() { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + size_t size() { return m_Vector.size(); } + + void insert(const PairType& pair); + iterator find(const KeyT& key); + void erase(iterator it); + +private: + VmaVector< PairType, VmaStlAllocator> m_Vector; +}; + +#ifndef _VMA_MAP_FUNCTIONS +template +void VmaMap::insert(const PairType& pair) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + pair, + VmaPairFirstLess()) - m_Vector.data(); + VmaVectorInsert(m_Vector, indexToInsert, pair); +} + +template +VmaPair* VmaMap::find(const KeyT& key) +{ + PairType* it = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + key, + VmaPairFirstLess()); + if ((it != m_Vector.end()) && (it->first == key)) + { + return it; + } + else + { + return m_Vector.end(); + } +} + +template +void VmaMap::erase(iterator it) +{ + VmaVectorRemove(m_Vector, it - m_Vector.begin()); +} +#endif // _VMA_MAP_FUNCTIONS +#endif // _VMA_MAP + +#endif // #if 0 + +#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED +class VmaStringBuilder +{ +public: + VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} + ~VmaStringBuilder() = default; + + size_t GetLength() const { return m_Data.size(); } + const char* GetData() const { return m_Data.data(); } + void AddNewLine() { Add('\n'); } + void Add(char ch) { m_Data.push_back(ch); } + + void Add(const char* pStr); + void AddNumber(uint32_t num); + void AddNumber(uint64_t num); + void AddPointer(const void* ptr); + +private: + VmaVector> m_Data; +}; + +#ifndef _VMA_STRING_BUILDER_FUNCTIONS +void VmaStringBuilder::Add(const char* pStr) +{ + const size_t strLen = strlen(pStr); + if (strLen > 0) + { + const size_t oldCount = m_Data.size(); + m_Data.resize(oldCount + strLen); + memcpy(m_Data.data() + oldCount, pStr, strLen); + } +} + +void VmaStringBuilder::AddNumber(uint32_t num) +{ + char buf[11]; + buf[10] = '\0'; + char* p = &buf[10]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddNumber(uint64_t num) +{ + char buf[21]; + buf[20] = '\0'; + char* p = &buf[20]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddPointer(const void* ptr) +{ + char buf[21]; + VmaPtrToStr(buf, sizeof(buf), ptr); + Add(buf); +} +#endif //_VMA_STRING_BUILDER_FUNCTIONS +#endif // _VMA_STRING_BUILDER + +#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED +/* +Allows to conveniently build a correct JSON document to be written to the +VmaStringBuilder passed to the constructor. +*/ +class VmaJsonWriter +{ + VMA_CLASS_NO_COPY(VmaJsonWriter) +public: + // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + ~VmaJsonWriter(); + + // Begins object by writing "{". + // Inside an object, you must call pairs of WriteString and a value, e.g.: + // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject(); + // Will write: { "A": 1, "B": 2 } + void BeginObject(bool singleLine = false); + // Ends object by writing "}". + void EndObject(); + + // Begins array by writing "[". + // Inside an array, you can write a sequence of any values. + void BeginArray(bool singleLine = false); + // Ends array by writing "[". + void EndArray(); + + // Writes a string value inside "". + // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. + void WriteString(const char* pStr); + + // Begins writing a string value. + // Call BeginString, ContinueString, ContinueString, ..., EndString instead of + // WriteString to conveniently build the string content incrementally, made of + // parts including numbers. + void BeginString(const char* pStr = VMA_NULL); + // Posts next part of an open string. + void ContinueString(const char* pStr); + // Posts next part of an open string. The number is converted to decimal characters. + void ContinueString(uint32_t n); + void ContinueString(uint64_t n); + void ContinueString_Size(size_t n); + // Posts next part of an open string. Pointer value is converted to characters + // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 + void ContinueString_Pointer(const void* ptr); + // Ends writing a string value by writing '"'. + void EndString(const char* pStr = VMA_NULL); + + // Writes a number value. + void WriteNumber(uint32_t n); + void WriteNumber(uint64_t n); + void WriteSize(size_t n); + // Writes a boolean value - false or true. + void WriteBool(bool b); + // Writes a null value. + void WriteNull(); + +private: + enum COLLECTION_TYPE + { + COLLECTION_TYPE_OBJECT, + COLLECTION_TYPE_ARRAY, + }; + struct StackItem + { + COLLECTION_TYPE type; + uint32_t valueCount; + bool singleLineMode; + }; + + static const char* const INDENT; + + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator > m_Stack; + bool m_InsideString; + + // Write size_t for less than 64bits + void WriteSize(size_t n, std::integral_constant) { m_SB.AddNumber(static_cast(n)); } + // Write size_t for 64bits + void WriteSize(size_t n, std::integral_constant) { m_SB.AddNumber(static_cast(n)); } + + void BeginValue(bool isString); + void WriteIndent(bool oneLess = false); +}; +const char* const VmaJsonWriter::INDENT = " "; + +#ifndef _VMA_JSON_WRITER_FUNCTIONS +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) + : m_SB(sb), + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) {} + +VmaJsonWriter::~VmaJsonWriter() +{ + VMA_ASSERT(!m_InsideString); + VMA_ASSERT(m_Stack.empty()); +} + +void VmaJsonWriter::BeginObject(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('{'); + + StackItem item; + item.type = COLLECTION_TYPE_OBJECT; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndObject() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add('}'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); + m_Stack.pop_back(); +} + +void VmaJsonWriter::BeginArray(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('['); + + StackItem item; + item.type = COLLECTION_TYPE_ARRAY; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndArray() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add(']'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); + m_Stack.pop_back(); +} + +void VmaJsonWriter::WriteString(const char* pStr) +{ + BeginString(pStr); + EndString(); +} + +void VmaJsonWriter::BeginString(const char* pStr) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(true); + m_SB.Add('"'); + m_InsideString = true; + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } +} + +void VmaJsonWriter::ContinueString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + + const size_t strLen = strlen(pStr); + for (size_t i = 0; i < strLen; ++i) + { + char ch = pStr[i]; + if (ch == '\\') + { + m_SB.Add("\\\\"); + } + else if (ch == '"') + { + m_SB.Add("\\\""); + } + else if (ch >= 32) + { + m_SB.Add(ch); + } + else switch (ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + break; + } + } +} + +void VmaJsonWriter::ContinueString(uint32_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString(uint64_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString_Size(size_t n) +{ + VMA_ASSERT(m_InsideString); + // Fix for AppleClang incorrect type casting + // TODO: Change to if constexpr when C++17 used as minimal standard + WriteSize(n, std::is_same{}); +} + +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddPointer(ptr); +} + +void VmaJsonWriter::EndString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } + m_SB.Add('"'); + m_InsideString = false; +} + +void VmaJsonWriter::WriteNumber(uint32_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteNumber(uint64_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteSize(size_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + // Fix for AppleClang incorrect type casting + // TODO: Change to if constexpr when C++17 used as minimal standard + WriteSize(n, std::is_same{}); +} + +void VmaJsonWriter::WriteBool(bool b) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add(b ? "true" : "false"); +} + +void VmaJsonWriter::WriteNull() +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add("null"); +} + +void VmaJsonWriter::BeginValue(bool isString) +{ + if (!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { + VMA_ASSERT(isString); + } + + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { + m_SB.Add(": "); + } + else if (currItem.valueCount > 0) + { + m_SB.Add(", "); + WriteIndent(); + } + else + { + WriteIndent(); + } + ++currItem.valueCount; + } +} + +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if (!m_Stack.empty() && !m_Stack.back().singleLineMode) + { + m_SB.AddNewLine(); + + size_t count = m_Stack.size(); + if (count > 0 && oneLess) + { + --count; + } + for (size_t i = 0; i < count; ++i) + { + m_SB.Add(INDENT); + } + } +} +#endif // _VMA_JSON_WRITER_FUNCTIONS + +static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) +{ + json.BeginObject(); + + json.WriteString("BlockCount"); + json.WriteNumber(stat.statistics.blockCount); + json.WriteString("BlockBytes"); + json.WriteNumber(stat.statistics.blockBytes); + json.WriteString("AllocationCount"); + json.WriteNumber(stat.statistics.allocationCount); + json.WriteString("AllocationBytes"); + json.WriteNumber(stat.statistics.allocationBytes); + json.WriteString("UnusedRangeCount"); + json.WriteNumber(stat.unusedRangeCount); + + if (stat.statistics.allocationCount > 1) + { + json.WriteString("AllocationSizeMin"); + json.WriteNumber(stat.allocationSizeMin); + json.WriteString("AllocationSizeMax"); + json.WriteNumber(stat.allocationSizeMax); + } + if (stat.unusedRangeCount > 1) + { + json.WriteString("UnusedRangeSizeMin"); + json.WriteNumber(stat.unusedRangeSizeMin); + json.WriteString("UnusedRangeSizeMax"); + json.WriteNumber(stat.unusedRangeSizeMax); + } + json.EndObject(); +} +#endif // _VMA_JSON_WRITER + +#ifndef _VMA_MAPPING_HYSTERESIS + +class VmaMappingHysteresis +{ + VMA_CLASS_NO_COPY(VmaMappingHysteresis) +public: + VmaMappingHysteresis() = default; + + uint32_t GetExtraMapping() const { return m_ExtraMapping; } + + // Call when Map was called. + // Returns true if switched to extra +1 mapping reference count. + bool PostMap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) + { + m_ExtraMapping = 1; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + + // Call when Unmap was called. + void PostUnmap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + ++m_MajorCounter; + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was made from the memory block. + void PostAlloc() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + ++m_MajorCounter; + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was freed from the memory block. + // Returns true if switched to extra -1 mapping reference count. + bool PostFree() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && + m_MajorCounter > m_MinorCounter + 1) + { + m_ExtraMapping = 0; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + +private: + static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7; + + uint32_t m_MinorCounter = 0; + uint32_t m_MajorCounter = 0; + uint32_t m_ExtraMapping = 0; // 0 or 1. + + void PostMinorCounter() + { + if(m_MinorCounter < m_MajorCounter) + { + ++m_MinorCounter; + } + else if(m_MajorCounter > 0) + { + --m_MajorCounter; + --m_MinorCounter; + } + } +}; + +#endif // _VMA_MAPPING_HYSTERESIS + +#ifndef _VMA_DEVICE_MEMORY_BLOCK +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, #VmaAllocation), assigned and free. + +Thread-safety: +- Access to m_pMetadata must be externally synchronized. +- Map, Unmap, Bind* are synchronized internally. +*/ +class VmaDeviceMemoryBlock +{ + VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock) +public: + VmaBlockMetadata* m_pMetadata; + + VmaDeviceMemoryBlock(VmaAllocator hAllocator); + ~VmaDeviceMemoryBlock(); + + // Always call after construction. + void Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity); + // Always call before destruction. + void Destroy(VmaAllocator allocator); + + VmaPool GetParentPool() const { return m_hParentPool; } + VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + uint32_t GetId() const { return m_Id; } + void* GetMappedData() const { return m_pMappedData; } + uint32_t GetMapRefCount() const { return m_MapCount; } + + // Call when allocation/free was made from m_pMetadata. + // Used for m_MappingHysteresis. + void PostAlloc() { m_MappingHysteresis.PostAlloc(); } + void PostFree(VmaAllocator hAllocator); + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + VkResult CheckCorruption(VmaAllocator hAllocator); + + // ppData can be null. + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + void Unmap(VmaAllocator hAllocator, uint32_t count); + + VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + + VkResult BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + +private: + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + uint32_t m_MemoryTypeIndex; + uint32_t m_Id; + VkDeviceMemory m_hMemory; + + /* + Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ + VMA_MUTEX m_MapAndBindMutex; + VmaMappingHysteresis m_MappingHysteresis; + uint32_t m_MapCount; + void* m_pMappedData; +}; +#endif // _VMA_DEVICE_MEMORY_BLOCK + +#ifndef _VMA_ALLOCATION_T +struct VmaAllocation_T +{ + friend struct VmaDedicatedAllocationListItemTraits; + + enum FLAGS + { + FLAG_PERSISTENT_MAP = 0x01, + FLAG_MAPPING_ALLOWED = 0x02, + }; + +public: + enum ALLOCATION_TYPE + { + ALLOCATION_TYPE_NONE, + ALLOCATION_TYPE_BLOCK, + ALLOCATION_TYPE_DEDICATED, + }; + + // This struct is allocated using VmaPoolAllocator. + VmaAllocation_T(bool mappingAllowed); + ~VmaAllocation_T(); + + void InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped); + // pMappedData not null means allocation is created with MAPPED flag. + void InitDedicatedAllocation( + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size); + + ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } + VkDeviceSize GetAlignment() const { return m_Alignment; } + VkDeviceSize GetSize() const { return m_Size; } + void* GetUserData() const { return m_pUserData; } + const char* GetName() const { return m_pName; } + VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } + + VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } + bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } + + void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } + void SetName(VmaAllocator hAllocator, const char* pName); + void FreeName(VmaAllocator hAllocator); + uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); + VmaAllocHandle GetAllocHandle() const; + VkDeviceSize GetOffset() const; + VmaPool GetParentPool() const; + VkDeviceMemory GetMemory() const; + void* GetMappedData() const; + + void BlockAllocMap(); + void BlockAllocUnmap(); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + void DedicatedAllocUnmap(VmaAllocator hAllocator); + +#if VMA_STATS_STRING_ENABLED + uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } + + void InitBufferImageUsage(uint32_t bufferImageUsage); + void PrintParameters(class VmaJsonWriter& json) const; +#endif + +private: + // Allocation out of VmaDeviceMemoryBlock. + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; + VmaAllocHandle m_AllocHandle; + }; + // Allocation for an object that has its own private VkDeviceMemory. + struct DedicatedAllocation + { + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + VkDeviceMemory m_hMemory; + void* m_pMappedData; // Not null means memory is mapped. + VmaAllocation_T* m_Prev; + VmaAllocation_T* m_Next; + }; + union + { + // Allocation out of VmaDeviceMemoryBlock. + BlockAllocation m_BlockAllocation; + // Allocation for an object that has its own private VkDeviceMemory. + DedicatedAllocation m_DedicatedAllocation; + }; + + VkDeviceSize m_Alignment; + VkDeviceSize m_Size; + void* m_pUserData; + char* m_pName; + uint32_t m_MemoryTypeIndex; + uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_SuballocationType; // VmaSuballocationType + // Reference counter for vmaMapMemory()/vmaUnmapMemory(). + uint8_t m_MapCount; + uint8_t m_Flags; // enum FLAGS +#if VMA_STATS_STRING_ENABLED + uint32_t m_BufferImageUsage; // 0 if unknown. +#endif +}; +#endif // _VMA_ALLOCATION_T + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS +struct VmaDedicatedAllocationListItemTraits +{ + typedef VmaAllocation_T ItemType; + + static ItemType* GetPrev(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType* GetNext(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } + static ItemType*& AccessPrev(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType*& AccessNext(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } +}; +#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST +/* +Stores linked list of VmaAllocation_T objects. +Thread-safe, synchronized internally. +*/ +class VmaDedicatedAllocationList +{ +public: + VmaDedicatedAllocationList() {} + ~VmaDedicatedAllocationList(); + + void Init(bool useMutex) { m_UseMutex = useMutex; } + bool Validate(); + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + void AddStatistics(VmaStatistics& inoutStats); +#if VMA_STATS_STRING_ENABLED + // Writes JSON array with the list of allocations. + void BuildStatsString(VmaJsonWriter& json); +#endif + + bool IsEmpty(); + void Register(VmaAllocation alloc); + void Unregister(VmaAllocation alloc); + +private: + typedef VmaIntrusiveLinkedList DedicatedAllocationLinkedList; + + bool m_UseMutex = true; + VMA_RW_MUTEX m_Mutex; + DedicatedAllocationLinkedList m_AllocationList; +}; + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS + +VmaDedicatedAllocationList::~VmaDedicatedAllocationList() +{ + VMA_HEAVY_ASSERT(Validate()); + + if (!m_AllocationList.IsEmpty()) + { + VMA_ASSERT(false && "Unfreed dedicated allocations found!"); + } +} + +bool VmaDedicatedAllocationList::Validate() +{ + const size_t declaredCount = m_AllocationList.GetCount(); + size_t actualCount = 0; + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + ++actualCount; + } + VMA_VALIDATE(actualCount == declaredCount); + + return true; +} + +void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); + } +} + +void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + + const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); + inoutStats.blockCount += allocCount; + inoutStats.allocationCount += allocCount; + + for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size; + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + json.BeginArray(); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + json.BeginObject(true); + alloc->PrintParameters(json); + json.EndObject(); + } + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaDedicatedAllocationList::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + return m_AllocationList.IsEmpty(); +} + +void VmaDedicatedAllocationList::Register(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.PushBack(alloc); +} + +void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.Remove(alloc); +} +#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS +#endif // _VMA_DEDICATED_ALLOCATION_LIST + +#ifndef _VMA_SUBALLOCATION +/* +Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as +allocated memory block or free. +*/ +struct VmaSuballocation +{ + VkDeviceSize offset; + VkDeviceSize size; + void* userData; + VmaSuballocationType type; +}; + +// Comparator for offsets. +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset < rhs.offset; + } +}; + +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset > rhs.offset; + } +}; + +struct VmaSuballocationItemSizeLess +{ + bool operator()(const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { + return lhs->size < rhs->size; + } + + bool operator()(const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { + return lhs->size < rhsSize; + } +}; +#endif // _VMA_SUBALLOCATION + +#ifndef _VMA_ALLOCATION_REQUEST +/* +Parameters of planned allocation inside a VmaDeviceMemoryBlock. +item points to a FREE suballocation. +*/ +struct VmaAllocationRequest +{ + VmaAllocHandle allocHandle; + VkDeviceSize size; + VmaSuballocationList::iterator item; + void* customData; + uint64_t algorithmData; + VmaAllocationRequestType type; +}; +#endif // _VMA_ALLOCATION_REQUEST + +#ifndef _VMA_BLOCK_METADATA +/* +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ +public: + // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. + VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata() = default; + + virtual void Init(VkDeviceSize size) { m_Size = size; } + bool IsVirtual() const { return m_IsVirtual; } + VkDeviceSize GetSize() const { return m_Size; } + + // Validates all data structures inside this object. If not valid, returns false. + virtual bool Validate() const = 0; + virtual size_t GetAllocationCount() const = 0; + virtual size_t GetFreeRegionsCount() const = 0; + virtual VkDeviceSize GetSumFreeSize() const = 0; + // Returns true if this block is empty - contains only single free suballocation. + virtual bool IsEmpty() const = 0; + virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; + virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; + virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; + + virtual VmaAllocHandle GetAllocationListBegin() const = 0; + virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; + virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; + + // Shouldn't modify blockCount. + virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; + virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; +#endif + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + virtual bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual VkResult CheckCorruption(const void* pBlockData) = 0; + + // Makes actual allocation based on request. Request must already be checked and valid. + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) = 0; + + // Frees suballocation assigned to given memory region. + virtual void Free(VmaAllocHandle allocHandle) = 0; + + // Frees all allocations. + // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! + virtual void Clear() = 0; + + virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; + virtual void DebugLogAllAllocations() const = 0; + +protected: + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + VkDeviceSize GetDebugMargin() const { return IsVirtual() ? 0 : VMA_DEBUG_MARGIN; } + + void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; +#if VMA_STATS_STRING_ENABLED + // mapRefCount == UINT32_MAX means unspecified. + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Size; + const VkAllocationCallbacks* m_pAllocationCallbacks; + const VkDeviceSize m_BufferImageGranularity; + const bool m_IsVirtual; +}; + +#ifndef _VMA_BLOCK_METADATA_FUNCTIONS +VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : m_Size(0), + m_pAllocationCallbacks(pAllocationCallbacks), + m_BufferImageGranularity(bufferImageGranularity), + m_IsVirtual(isVirtual) {} + +void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + if (IsVirtual()) + { + VMA_DEBUG_LOG("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData); + } + else + { + VMA_ASSERT(userData != VMA_NULL); + VmaAllocation allocation = reinterpret_cast(userData); + + userData = allocation->GetUserData(); + const char* name = allocation->GetName(); + +#if VMA_STATS_STRING_ENABLED + VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u", + offset, size, userData, name ? name : "vma_empty", + VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], + allocation->GetBufferImageUsage()); +#else + VMA_DEBUG_LOG("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u", + offset, size, userData, name ? name : "vma_empty", + (uint32_t)allocation->GetSuballocationType()); +#endif // VMA_STATS_STRING_ENABLED + } + +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const +{ + json.WriteString("TotalBytes"); + json.WriteNumber(GetSize()); + + json.WriteString("UnusedBytes"); + json.WriteSize(unusedBytes); + + json.WriteString("Allocations"); + json.WriteSize(allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteSize(unusedRangeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); +} + +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + if (IsVirtual()) + { + json.WriteString("Size"); + json.WriteNumber(size); + if (userData) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(userData); + json.EndString(); + } + } + else + { + ((VmaAllocation)userData)->PrintParameters(json); + } + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); + + json.WriteString("Size"); + json.WriteNumber(size); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +{ + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_BLOCK_METADATA_FUNCTIONS +#endif // _VMA_BLOCK_METADATA + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY +// Before deleting object of this class remember to call 'Destroy()' +class VmaBlockBufferImageGranularity final +{ +public: + struct ValidationContext + { + const VkAllocationCallbacks* allocCallbacks; + uint16_t* pageAllocs; + }; + + VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); + ~VmaBlockBufferImageGranularity(); + + bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } + + void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); + // Before destroying object you must call free it's memory + void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); + + void RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const; + + bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const; + + void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); + void FreePages(VkDeviceSize offset, VkDeviceSize size); + void Clear(); + + ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, + bool isVirutal) const; + bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; + bool FinishValidation(ValidationContext& ctx) const; + +private: + static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; + + struct RegionInfo + { + uint8_t allocType; + uint16_t allocCount; + }; + + VkDeviceSize m_BufferImageGranularity; + uint32_t m_RegionCount; + RegionInfo* m_RegionInfo; + + uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } + uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } + + uint32_t OffsetToPageIndex(VkDeviceSize offset) const; + void AllocPage(RegionInfo& page, uint8_t allocType); +}; + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) + : m_BufferImageGranularity(bufferImageGranularity), + m_RegionCount(0), + m_RegionInfo(VMA_NULL) {} + +VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() +{ + VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); +} + +void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) +{ + if (IsEnabled()) + { + m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); + m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); + } +} + +void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) +{ + if (m_RegionInfo) + { + vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); + m_RegionInfo = VMA_NULL; + } +} + +void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const +{ + if (m_BufferImageGranularity > 1 && + m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) + { + if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); + inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); + } + } +} + +bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(inOutAllocOffset); + if (m_RegionInfo[startPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) + { + inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity); + if (blockSize < allocSize + inOutAllocOffset - blockOffset) + return true; + ++startPage; + } + uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize); + if (endPage != startPage && + m_RegionInfo[endPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) + { + return true; + } + } + return false; +} + +void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + AllocPage(m_RegionInfo[startPage], allocType); + + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + AllocPage(m_RegionInfo[endPage], allocType); + } +} + +void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + --m_RegionInfo[startPage].allocCount; + if (m_RegionInfo[startPage].allocCount == 0) + m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + { + --m_RegionInfo[endPage].allocCount; + if (m_RegionInfo[endPage].allocCount == 0) + m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + } + } +} + +void VmaBlockBufferImageGranularity::Clear() +{ + if (m_RegionInfo) + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); +} + +VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( + const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const +{ + ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; + if (!isVirutal && IsEnabled()) + { + ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); + memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); + } + return ctx; +} + +bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, + VkDeviceSize offset, VkDeviceSize size) const +{ + if (IsEnabled()) + { + uint32_t start = GetStartPage(offset); + ++ctx.pageAllocs[start]; + VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); + + uint32_t end = GetEndPage(offset, size); + if (start != end) + { + ++ctx.pageAllocs[end]; + VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); + } + } + return true; +} + +bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const +{ + // Check proper page structure + if (IsEnabled()) + { + VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); + + for (uint32_t page = 0; page < m_RegionCount; ++page) + { + VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); + } + vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); + ctx.pageAllocs = VMA_NULL; + } + return true; +} + +uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const +{ + return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); +} + +void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) +{ + // When current alloc type is free then it can be overriden by new type + if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) + page.allocType = allocType; + + ++page.allocCount; +} +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY + +#if 0 +#ifndef _VMA_BLOCK_METADATA_GENERIC +class VmaBlockMetadata_Generic : public VmaBlockMetadata +{ + friend class VmaDefragmentationAlgorithm_Generic; + friend class VmaDefragmentationAlgorithm_Fast; + VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) +public: + VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_Generic() = default; + + size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; } + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); } + void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + uint32_t m_FreeCount; + VkDeviceSize m_SumFreeSize; + VmaSuballocationList m_Suballocations; + // Suballocations that are free. Sorted by size, ascending. + VmaVector> m_FreeSuballocationsBySize; + + VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); } + + VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const; + bool ValidateFreeSuballocationList() const; + + // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. + // If yes, fills pOffset and returns true. If no, returns false. + bool CheckAllocation( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + VmaAllocHandle* pAllocHandle) const; + + // Given free suballocation, it merges it with following one, which must also be free. + void MergeFreeWithNext(VmaSuballocationList::iterator item); + // Releases given suballocation, making it free. + // Merges it with adjacent free suballocations if applicable. + // Returns iterator to new free suballocation at this place. + VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); + // Given free suballocation, it inserts it into sorted list of + // m_FreeSuballocationsBySize if it is suitable. + void RegisterFreeSuballocation(VmaSuballocationList::iterator item); + // Given free suballocation, it removes it from sorted list of + // m_FreeSuballocationsBySize if it is suitable. + void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); +}; + +#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS +VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_FreeCount(0), + m_SumFreeSize(0), + m_Suballocations(VmaStlAllocator(pAllocationCallbacks)), + m_FreeSuballocationsBySize(VmaStlAllocator(pAllocationCallbacks)) {} + +void VmaBlockMetadata_Generic::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_FreeCount = 1; + m_SumFreeSize = size; + + VmaSuballocation suballoc = {}; + suballoc.offset = 0; + suballoc.size = size; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + + m_Suballocations.push_back(suballoc); + m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); +} + +bool VmaBlockMetadata_Generic::Validate() const +{ + VMA_VALIDATE(!m_Suballocations.empty()); + + // Expected offset of new suballocation as calculated from previous ones. + VkDeviceSize calculatedOffset = 0; + // Expected number of free suballocations as calculated from traversing their list. + uint32_t calculatedFreeCount = 0; + // Expected sum size of free suballocations as calculated from traversing their list. + VkDeviceSize calculatedSumFreeSize = 0; + // Expected number of free suballocations that should be registered in + // m_FreeSuballocationsBySize calculated from traversing their list. + size_t freeSuballocationsToRegister = 0; + // True if previous visited suballocation was free. + bool prevFree = false; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + for (const auto& subAlloc : m_Suballocations) + { + // Actual offset of this suballocation doesn't match expected one. + VMA_VALIDATE(subAlloc.offset == calculatedOffset); + + const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Two adjacent free suballocations are invalid. They should be merged. + VMA_VALIDATE(!prevFree || !currFree); + + VmaAllocation alloc = (VmaAllocation)subAlloc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + + if (currFree) + { + calculatedSumFreeSize += subAlloc.size; + ++calculatedFreeCount; + ++freeSuballocationsToRegister; + + // Margin required between allocations - every free space must be at least that large. + VMA_VALIDATE(subAlloc.size >= debugMargin); + } + else + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == subAlloc.size); + } + + // Margin required between allocations - previous allocation must be free. + VMA_VALIDATE(debugMargin == 0 || prevFree); + } + + calculatedOffset += subAlloc.size; + prevFree = currFree; + } + + // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't + // match expected one. + VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister); + + VkDeviceSize lastSize = 0; + for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) + { + VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i]; + + // Only free suballocations can be registered in m_FreeSuballocationsBySize. + VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE); + // They must be sorted by size ascending. + VMA_VALIDATE(suballocItem->size >= lastSize); + + lastSize = suballocItem->size; + } + + // Check if totals match calculated values. + VMA_VALIDATE(ValidateFreeSuballocationList()); + VMA_VALIDATE(calculatedOffset == GetSize()); + VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize); + VMA_VALIDATE(calculatedFreeCount == m_FreeCount); + + return true; +} + +void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + else + VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size); + } +} + +void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - m_SumFreeSize; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const +{ + PrintDetailedMap_Begin(json, + m_SumFreeSize, // unusedBytes + m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount + m_FreeCount, // unusedRangeCount + mapRefCount); + + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) + { + PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size); + } + else + { + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + } + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Generic::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(!upperAddress); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + + allocSize = AlignAllocationSize(allocSize); + + pAllocationRequest->type = VmaAllocationRequestType::Normal; + pAllocationRequest->size = allocSize; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + // There is not enough total free space in this block to fulfill the request: Early return. + if (m_SumFreeSize < allocSize + debugMargin) + { + return false; + } + + // New algorithm, efficiently searching freeSuballocationsBySize. + const size_t freeSuballocCount = m_FreeSuballocationsBySize.size(); + if (freeSuballocCount > 0) + { + if (strategy == 0 || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) + { + // Find first free suballocation with size not less than allocSize + debugMargin. + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + freeSuballocCount, + allocSize + debugMargin, + VmaSuballocationItemSizeLess()); + size_t index = it - m_FreeSuballocationsBySize.data(); + for (; index < freeSuballocCount; ++index) + { + if (CheckAllocation( + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + &pAllocationRequest->allocHandle)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) + { + for (VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( + allocSize, + allocAlignment, + allocType, + it, + &pAllocationRequest->allocHandle)) + { + pAllocationRequest->item = it; + return true; + } + } + } + else + { + VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )); + // Search staring from biggest suballocations. + for (size_t index = freeSuballocCount; index--; ) + { + if (CheckAllocation( + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + &pAllocationRequest->allocHandle)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + } + + return false; +} + +VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) +{ + for (auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Generic::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + VMA_ASSERT(request.item != m_Suballocations.end()); + VmaSuballocation& suballoc = *request.item; + // Given suballocation is a free block. + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + // Given offset is inside this suballocation. + VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset); + const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1; + VMA_ASSERT(suballoc.size >= paddingBegin + request.size); + const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size; + + // Unregister this free suballocation from m_FreeSuballocationsBySize and update + // it to become used. + UnregisterFreeSuballocation(request.item); + + suballoc.offset = (VkDeviceSize)request.allocHandle - 1; + suballoc.size = request.size; + suballoc.type = type; + suballoc.userData = userData; + + // If there are any free bytes remaining at the end, insert new free suballocation after current one. + if (paddingEnd) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = suballoc.offset + suballoc.size; + paddingSuballoc.size = paddingEnd; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + VmaSuballocationList::iterator next = request.item; + ++next; + const VmaSuballocationList::iterator paddingEndItem = + m_Suballocations.insert(next, paddingSuballoc); + RegisterFreeSuballocation(paddingEndItem); + } + + // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. + if (paddingBegin) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = suballoc.offset - paddingBegin; + paddingSuballoc.size = paddingBegin; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + const VmaSuballocationList::iterator paddingBeginItem = + m_Suballocations.insert(request.item, paddingSuballoc); + RegisterFreeSuballocation(paddingBeginItem); + } + + // Update totals. + m_FreeCount = m_FreeCount - 1; + if (paddingBegin > 0) + { + ++m_FreeCount; + } + if (paddingEnd > 0) + { + ++m_FreeCount; + } + m_SumFreeSize -= request.size; +} + +void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + outInfo.offset = (VkDeviceSize)allocHandle - 1; + const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset); + outInfo.size = suballoc.size; + outInfo.pUserData = suballoc.userData; +} + +void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData; +} + +VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const +{ + if (IsEmpty()) + return VK_NULL_HANDLE; + + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + return (VmaAllocHandle)(suballoc.offset + 1); + } + VMA_ASSERT(false && "Should contain at least 1 allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1); + + for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it) + { + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + return (VmaAllocHandle)(it->offset + 1); + } + return VK_NULL_HANDLE; +} + +void VmaBlockMetadata_Generic::Clear() +{ + const VkDeviceSize size = GetSize(); + + VMA_ASSERT(IsVirtual()); + m_FreeCount = 1; + m_SumFreeSize = size; + m_Suballocations.clear(); + m_FreeSuballocationsBySize.clear(); + + VmaSuballocation suballoc = {}; + suballoc.offset = 0; + suballoc.size = size; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + m_Suballocations.push_back(suballoc); + + m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); +} + +void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1); + suballoc.userData = userData; +} + +void VmaBlockMetadata_Generic::DebugLogAllAllocations() const +{ + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData); + } +} + +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const +{ + VMA_HEAVY_ASSERT(!m_Suballocations.empty()); + const VkDeviceSize last = m_Suballocations.rbegin()->offset; + if (last == offset) + return m_Suballocations.rbegin().drop_const(); + const VkDeviceSize first = m_Suballocations.begin()->offset; + if (first == offset) + return m_Suballocations.begin().drop_const(); + + const size_t suballocCount = m_Suballocations.size(); + const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount; + auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator + { + for (auto suballocItem = begin; + suballocItem != end; + ++suballocItem) + { + if (suballocItem->offset == offset) + return suballocItem.drop_const(); + } + VMA_ASSERT(false && "Not found!"); + return m_Suballocations.end().drop_const(); + }; + // If requested offset is closer to the end of range, search from the end + if (offset - first > suballocCount * step / 2) + { + return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend()); + } + return findSuballocation(m_Suballocations.begin(), m_Suballocations.end()); +} + +bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const +{ + VkDeviceSize lastSize = 0; + for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) + { + const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; + + VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_VALIDATE(it->size >= lastSize); + lastSize = it->size; + } + return true; +} + +bool VmaBlockMetadata_Generic::CheckAllocation( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + VmaAllocHandle* pAllocHandle) const +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(suballocItem != m_Suballocations.cend()); + VMA_ASSERT(pAllocHandle != VMA_NULL); + + const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + + const VmaSuballocation& suballoc = *suballocItem; + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + // Size of this suballocation is too small for this request: Early return. + if (suballoc.size < allocSize) + { + return false; + } + + // Start from offset equal to beginning of this suballocation. + VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin()); + + // Apply debugMargin from the end of previous alloc. + if (debugMargin > 0) + { + offset += debugMargin; + } + + // Apply alignment. + offset = VmaAlignUp(offset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment) + { + bool bufferImageGranularityConflict = false; + VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; + while (prevSuballocItem != m_Suballocations.cbegin()) + { + --prevSuballocItem; + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + offset = VmaAlignUp(offset, bufferImageGranularity); + } + } + + // Calculate padding at the beginning based on current offset. + const VkDeviceSize paddingBegin = offset - suballoc.offset; + + // Fail if requested size plus margin after is bigger than size of this suballocation. + if (paddingBegin + allocSize + debugMargin > suballoc.size) + { + return false; + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (allocSize % bufferImageGranularity || offset % bufferImageGranularity) + { + VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; + ++nextSuballocItem; + while (nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + ++nextSuballocItem; + } + } + + *pAllocHandle = (VmaAllocHandle)(offset + 1); + // All tests passed: Success. pAllocHandle is already filled. + return true; +} + +void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item != m_Suballocations.end()); + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaSuballocationList::iterator nextItem = item; + ++nextItem; + VMA_ASSERT(nextItem != m_Suballocations.end()); + VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); + + item->size += nextItem->size; + --m_FreeCount; + m_Suballocations.erase(nextItem); +} + +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) +{ + // Change this suballocation to be marked as free. + VmaSuballocation& suballoc = *suballocItem; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.userData = VMA_NULL; + + // Update totals. + ++m_FreeCount; + m_SumFreeSize += suballoc.size; + + // Merge with previous and/or next suballocation if it's also free. + bool mergeWithNext = false; + bool mergeWithPrev = false; + + VmaSuballocationList::iterator nextItem = suballocItem; + ++nextItem; + if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) + { + mergeWithNext = true; + } + + VmaSuballocationList::iterator prevItem = suballocItem; + if (suballocItem != m_Suballocations.begin()) + { + --prevItem; + if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + mergeWithPrev = true; + } + } + + if (mergeWithNext) + { + UnregisterFreeSuballocation(nextItem); + MergeFreeWithNext(suballocItem); + } + + if (mergeWithPrev) + { + UnregisterFreeSuballocation(prevItem); + MergeFreeWithNext(prevItem); + RegisterFreeSuballocation(prevItem); + return prevItem; + } + else + { + RegisterFreeSuballocation(suballocItem); + return suballocItem; + } +} + +void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + if (m_FreeSuballocationsBySize.empty()) + { + m_FreeSuballocationsBySize.push_back(item); + } + else + { + VmaVectorInsertSorted(m_FreeSuballocationsBySize, item); + } + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} + +void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), + item, + VmaSuballocationItemSizeLess()); + for (size_t index = it - m_FreeSuballocationsBySize.data(); + index < m_FreeSuballocationsBySize.size(); + ++index) + { + if (m_FreeSuballocationsBySize[index] == item) + { + VmaVectorRemove(m_FreeSuballocationsBySize, index); + return; + } + VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found."); + } + VMA_ASSERT(0 && "Not found."); + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} +#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_GENERIC +#endif // #if 0 + +#ifndef _VMA_BLOCK_METADATA_LINEAR +/* +Allocations and their references in internal data structure look like this: + +if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): + + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] +GetSize() +-------+ + +*/ +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) +public: + VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_Linear() = default; + + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return GetAllocationCount() == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; + + void Init(VkDeviceSize size) override; + bool Validate() const override; + size_t GetAllocationCount() const override; + size_t GetFreeRegionsCount() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + /* + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector> SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { + SECOND_VECTOR_EMPTY, + /* + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ + SECOND_VECTOR_RING_BUFFER, + /* + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ + SECOND_VECTOR_DOUBLE_STACK, + }; + + VkDeviceSize m_SumFreeSize; + SuballocationVectorType m_Suballocations0, m_Suballocations1; + uint32_t m_1stVectorIndex; + SECOND_VECTOR_MODE m_2ndVectorMode; + // Number of items in 1st vector with hAllocation = null at the beginning. + size_t m_1stNullItemsBeginCount; + // Number of other items in 1st vector with hAllocation = null somewhere in the middle. + size_t m_1stNullItemsMiddleCount; + // Number of items in 2nd vector with hAllocation = null. + size_t m_2ndNullItemsCount; + + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + + VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; + bool ShouldCompact1st() const; + void CleanupAfterFree(); + + bool CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + bool CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), + m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) {} + +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + m_SumFreeSize = size; +} + +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); + VMA_VALIDATE(!suballocations1st.empty() || + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + + if (!suballocations1st.empty()) + { + // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. + VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE); + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + if (!suballocations2nd.empty()) + { + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + + VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); + VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); + + VkDeviceSize sumUsedSize = 0; + const size_t suballoc1stCount = suballocations1st.size(); + const VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize offset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && + suballoc.userData == VMA_NULL); + } + + size_t nullItem1stCount = m_1stNullItemsBeginCount; + + for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem1stCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + VMA_VALIDATE(offset <= GetSize()); + VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); + + return true; +} + +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ + return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + + AccessSuballocations2nd().size() - m_2ndNullItemsCount; +} + +size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return SIZE_MAX; +} + +void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + if (lastOffset < freeSpace2ndTo1stEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + if (lastOffset < freeSpace1stTo2ndEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to size. + if (lastOffset < size) + { + const VkDeviceSize unusedRangeSize = size - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } +} + +void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VkDeviceSize size = GetSize(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.blockCount++; + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size - m_SumFreeSize; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + } + + // End of loop. + lastOffset = size; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + // FIRST PASS + + size_t unusedRangeCount = 0; + VkDeviceSize usedBytes = 0; + + VkDeviceSize lastOffset = 0; + + size_t alloc2ndCount = 0; + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + size_t alloc1stCount = 0; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc1stCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = size; + } + } + } + + const VkDeviceSize unusedBytes = size - usedBytes; + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + + // SECOND PASS + lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + nextAlloc1stIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Linear::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + pAllocationRequest->size = allocSize; + return upperAddress ? + CreateAllocationRequest_UpperAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest); +} + +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + VMA_ASSERT(!IsVirtual()); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Linear::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; + const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; + + switch (request.type) + { + case VmaAllocationRequestType::UpperAddress: + { + VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + suballocations2nd.push_back(newSuballoc); + m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + VMA_ASSERT(suballocations1st.empty() || + offset >= suballocations1st.back().offset + suballocations1st.back().size); + // Check if it fits before the end of the block. + VMA_ASSERT(offset + request.size <= GetSize()); + + suballocations1st.push_back(newSuballoc); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. + VMA_ASSERT(!suballocations1st.empty() && + offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch (m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); + } + + suballocations2nd.push_back(newSuballoc); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + + m_SumFreeSize -= newSuballoc.size; +} + +void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; + + if (!suballocations1st.empty()) + { + // First allocation: Mark it as next empty at the beginning. + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if (firstSuballoc.offset == offset) + { + firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + firstSuballoc.userData = VMA_NULL; + m_SumFreeSize += firstSuballoc.size; + ++m_1stNullItemsBeginCount; + CleanupAfterFree(); + return; + } + } + + // Last allocation in 2-part ring buffer or top of upper stack (same logic). + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations2nd.pop_back(); + CleanupAfterFree(); + return; + } + } + // Last allocation in 1st vector. + else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations1st.pop_back(); + CleanupAfterFree(); + return; + } + } + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the middle of 1st vector. + { + const SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Item from the middle of 2nd vector. + const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_2ndNullItemsCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); +} + +void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + outInfo.offset = (VkDeviceSize)allocHandle - 1; + VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); + outInfo.size = suballoc.size; + outInfo.pUserData = suballoc.userData; +} + +void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return 0; +} + +void VmaBlockMetadata_Linear::Clear() +{ + m_SumFreeSize = GetSize(); + m_Suballocations0.clear(); + m_Suballocations1.clear(); + // Leaving m_1stVectorIndex unchanged - it doesn't matter. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; +} + +void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); + suballoc.userData = userData; +} + +void VmaBlockMetadata_Linear::DebugLogAllAllocations() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); + + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); +} + +VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the 1st vector. + { + SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + return const_cast(*it); + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + return const_cast(*it); + } + } + + VMA_ASSERT(0 && "Allocation not found in linear allocator!"); + return const_cast(suballocations1st.back()); // Should never occur. +} + +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ + const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + const size_t suballocCount = AccessSuballocations1st().size(); + return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; +} + +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (IsEmpty()) + { + suballocations1st.clear(); + suballocations2nd.clear(); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + else + { + const size_t suballoc1stCount = suballocations1st.size(); + const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + VMA_ASSERT(nullItem1stCount <= suballoc1stCount); + + // Find more null items at the beginning of 1st vector. + while (m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + + // Find more null items at the end of 1st vector. + while (m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_1stNullItemsMiddleCount; + suballocations1st.pop_back(); + } + + // Find more null items at the end of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + suballocations2nd.pop_back(); + } + + // Find more null items at the beginning of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + VmaVectorRemove(suballocations2nd, 0); + } + + if (ShouldCompact1st()) + { + const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; + size_t srcIndex = m_1stNullItemsBeginCount; + for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++srcIndex; + } + if (dstIndex != srcIndex) + { + suballocations1st[dstIndex] = suballocations1st[srcIndex]; + } + ++srcIndex; + } + suballocations1st.resize(nonNullItemCount); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + } + + // 2nd vector became empty. + if (suballocations2nd.empty()) + { + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + + // 1st vector became empty. + if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { + suballocations1st.clear(); + m_1stNullItemsBeginCount = 0; + + if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + // Swap 1st with 2nd. Now 2nd is empty. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsMiddleCount = m_2ndNullItemsCount; + while (m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + m_2ndNullItemsCount = 0; + m_1stVectorIndex ^= 1; + } + } + } + + VMA_HEAVY_ASSERT(Validate()); +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + // Try to allocate at the end of 1st vector. + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? + suballocations2nd.back().offset : blockSize; + + // There is enough free space at the end after alignment. + if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on previous page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item, customData unused. + pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; + return true; + } + } + + // Wrap-around to end of 2nd vector. Try to allocate there, watching for the + // beginning of 1st vector as the end of free space. + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(!suballocations1st.empty()); + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + size_t index1st = m_1stNullItemsBeginCount; + + // There is enough free space at the end after alignment. + if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) || + (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset)) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) + { + for (size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; + // pAllocationRequest->item, customData unused. + return true; + } + } + + return false; +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); + return false; + } + + // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). + if (allocSize > blockSize) + { + return false; + } + VkDeviceSize resultBaseOffset = blockSize - allocSize; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset - allocSize; + if (allocSize > lastSuballoc.offset) + { + return false; + } + } + + // Start from offset equal to end of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + // Apply debugMargin at the end. + if (debugMargin > 0) + { + if (resultOffset < debugMargin) + { + return false; + } + resultOffset -= debugMargin; + } + + // Apply alignment. + resultOffset = VmaAlignDown(resultOffset, allocAlignment); + + // Check next suballocations from 2nd for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); + } + } + + // There is enough free space. + const VkDeviceSize endOf1st = !suballocations1st.empty() ? + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if (endOf1st + debugMargin <= resultOffset) + { + // Check previous suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (bufferImageGranularity > 1) + { + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item unused. + pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; + return true; + } + + return false; +} +#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_LINEAR + +#if 0 +#ifndef _VMA_BLOCK_METADATA_BUDDY +/* +- GetSize() is the original size of allocated memory block. +- m_UsableSize is this size aligned down to a power of two. + All allocations and calculations happen relative to m_UsableSize. +- GetUnusableSize() is the difference between them. + It is reported as separate, unused range, not available for allocations. + +Node at level 0 has size = m_UsableSize. +Each next level contains nodes with size 2 times smaller than current level. +m_LevelCount is the maximum number of levels to use in the current object. +*/ +class VmaBlockMetadata_Buddy : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) +public: + VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_Buddy(); + + size_t GetAllocationCount() const override { return m_AllocationCount; } + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); } + bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; } + VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }; + void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + +private: + static const size_t MAX_LEVELS = 48; + + struct ValidationContext + { + size_t calculatedAllocationCount = 0; + size_t calculatedFreeCount = 0; + VkDeviceSize calculatedSumFreeSize = 0; + }; + struct Node + { + VkDeviceSize offset; + enum TYPE + { + TYPE_FREE, + TYPE_ALLOCATION, + TYPE_SPLIT, + TYPE_COUNT + } type; + Node* parent; + Node* buddy; + + union + { + struct + { + Node* prev; + Node* next; + } free; + struct + { + void* userData; + } allocation; + struct + { + Node* leftChild; + } split; + }; + }; + + // Size of the memory block aligned down to a power of two. + VkDeviceSize m_UsableSize; + uint32_t m_LevelCount; + VmaPoolAllocator m_NodeAllocator; + Node* m_Root; + struct + { + Node* front; + Node* back; + } m_FreeList[MAX_LEVELS]; + + // Number of nodes in the tree with type == TYPE_ALLOCATION. + size_t m_AllocationCount; + // Number of nodes in the tree with type == TYPE_FREE. + size_t m_FreeCount; + // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes. + // Doesn't include unusable size. + VkDeviceSize m_SumFreeSize; + + VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; } + VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; } + + VkDeviceSize AlignAllocationSize(VkDeviceSize size) const + { + if (!IsVirtual()) + { + size = VmaAlignUp(size, (VkDeviceSize)16); + } + return VmaNextPow2(size); + } + Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const; + void DeleteNodeChildren(Node* node); + bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; + uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; + void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const; + // Adds node to the front of FreeList at given level. + // node->type must be FREE. + // node->free.prev, next can be undefined. + void AddToFreeListFront(uint32_t level, Node* node); + // Removes node from FreeList at given level. + // node->type must be FREE. + // node->free.prev, next stay untouched. + void RemoveFromFreeList(uint32_t level, Node* node); + void DebugLogAllAllocationNode(Node* node, uint32_t level) const; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const; +#endif +}; + +#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS +VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity + m_Root(VMA_NULL), + m_AllocationCount(0), + m_FreeCount(1), + m_SumFreeSize(0) +{ + memset(m_FreeList, 0, sizeof(m_FreeList)); +} + +VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() +{ + DeleteNodeChildren(m_Root); + m_NodeAllocator.Free(m_Root); +} + +void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_UsableSize = VmaPrevPow2(size); + m_SumFreeSize = m_UsableSize; + + // Calculate m_LevelCount. + const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16; + m_LevelCount = 1; + while (m_LevelCount < MAX_LEVELS && + LevelToNodeSize(m_LevelCount) >= minNodeSize) + { + ++m_LevelCount; + } + + Node* rootNode = m_NodeAllocator.Alloc(); + rootNode->offset = 0; + rootNode->type = Node::TYPE_FREE; + rootNode->parent = VMA_NULL; + rootNode->buddy = VMA_NULL; + + m_Root = rootNode; + AddToFreeListFront(0, rootNode); +} + +bool VmaBlockMetadata_Buddy::Validate() const +{ + // Validate tree. + ValidationContext ctx; + if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) + { + VMA_VALIDATE(false && "ValidateNode failed."); + } + VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount); + VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize); + + // Validate free node lists. + for (uint32_t level = 0; level < m_LevelCount; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL || + m_FreeList[level].front->free.prev == VMA_NULL); + + for (Node* node = m_FreeList[level].front; + node != VMA_NULL; + node = node->free.next) + { + VMA_VALIDATE(node->type == Node::TYPE_FREE); + + if (node->free.next == VMA_NULL) + { + VMA_VALIDATE(m_FreeList[level].back == node); + } + else + { + VMA_VALIDATE(node->free.next->free.prev == node); + } + } + } + + // Validate that free lists ar higher levels are empty. + for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL); + } + + return true; +} + +void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + + AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0)); + + const VkDeviceSize unusableSize = GetUnusableSize(); + if (unusableSize > 0) + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize); +} + +void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocationCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - m_SumFreeSize; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const +{ + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); + + PrintDetailedMap_Begin( + json, + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount, + mapRefCount); + + PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); + + const VkDeviceSize unusableSize = GetUnusableSize(); + if (unusableSize > 0) + { + PrintDetailedMap_UnusedRange(json, + m_UsableSize, // offset + unusableSize); // size + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Buddy::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + allocSize = AlignAllocationSize(allocSize); + + // Simple way to respect bufferImageGranularity. May be optimized some day. + // Whenever it might be an OPTIMAL image... + if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity()); + allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity()); + } + + if (allocSize > m_UsableSize) + { + return false; + } + + const uint32_t targetLevel = AllocSizeToLevel(allocSize); + for (uint32_t level = targetLevel; level--; ) + { + for (Node* freeNode = m_FreeList[level].front; + freeNode != VMA_NULL; + freeNode = freeNode->free.next) + { + if (freeNode->offset % allocAlignment == 0) + { + pAllocationRequest->type = VmaAllocationRequestType::Normal; + pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1); + pAllocationRequest->size = allocSize; + pAllocationRequest->customData = (void*)(uintptr_t)level; + return true; + } + } + } + + return false; +} + +void VmaBlockMetadata_Buddy::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + + const uint32_t targetLevel = AllocSizeToLevel(request.size); + uint32_t currLevel = (uint32_t)(uintptr_t)request.customData; + + Node* currNode = m_FreeList[currLevel].front; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; + while (currNode->offset != offset) + { + currNode = currNode->free.next; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + } + + // Go down, splitting free nodes. + while (currLevel < targetLevel) + { + // currNode is already first free node at currLevel. + // Remove it from list of free nodes at this currLevel. + RemoveFromFreeList(currLevel, currNode); + + const uint32_t childrenLevel = currLevel + 1; + + // Create two free sub-nodes. + Node* leftChild = m_NodeAllocator.Alloc(); + Node* rightChild = m_NodeAllocator.Alloc(); + + leftChild->offset = currNode->offset; + leftChild->type = Node::TYPE_FREE; + leftChild->parent = currNode; + leftChild->buddy = rightChild; + + rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel); + rightChild->type = Node::TYPE_FREE; + rightChild->parent = currNode; + rightChild->buddy = leftChild; + + // Convert current currNode to split type. + currNode->type = Node::TYPE_SPLIT; + currNode->split.leftChild = leftChild; + + // Add child nodes to free list. Order is important! + AddToFreeListFront(childrenLevel, rightChild); + AddToFreeListFront(childrenLevel, leftChild); + + ++m_FreeCount; + ++currLevel; + currNode = m_FreeList[currLevel].front; + + /* + We can be sure that currNode, as left child of node previously split, + also fulfills the alignment requirement. + */ + } + + // Remove from free list. + VMA_ASSERT(currLevel == targetLevel && + currNode != VMA_NULL && + currNode->type == Node::TYPE_FREE); + RemoveFromFreeList(currLevel, currNode); + + // Convert to allocation node. + currNode->type = Node::TYPE_ALLOCATION; + currNode->allocation.userData = userData; + + ++m_AllocationCount; + --m_FreeCount; + m_SumFreeSize -= request.size; +} + +void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + uint32_t level = 0; + outInfo.offset = (VkDeviceSize)allocHandle - 1; + const Node* const node = FindAllocationNode(outInfo.offset, level); + outInfo.size = LevelToNodeSize(level); + outInfo.pUserData = node->allocation.userData; +} + +void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + uint32_t level = 0; + const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); + return node->allocation.userData; +} + +VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + return VK_NULL_HANDLE; +} + +void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node) +{ + if (node->type == Node::TYPE_SPLIT) + { + DeleteNodeChildren(node->split.leftChild->buddy); + DeleteNodeChildren(node->split.leftChild); + const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks(); + m_NodeAllocator.Free(node->split.leftChild->buddy); + m_NodeAllocator.Free(node->split.leftChild); + } +} + +void VmaBlockMetadata_Buddy::Clear() +{ + DeleteNodeChildren(m_Root); + m_Root->type = Node::TYPE_FREE; + m_AllocationCount = 0; + m_FreeCount = 1; + m_SumFreeSize = m_UsableSize; +} + +void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + uint32_t level = 0; + Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); + node->allocation.userData = userData; +} + +VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const +{ + Node* node = m_Root; + VkDeviceSize nodeOffset = 0; + outLevel = 0; + VkDeviceSize levelNodeSize = LevelToNodeSize(0); + while (node->type == Node::TYPE_SPLIT) + { + const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1; + if (offset < nodeOffset + nextLevelNodeSize) + { + node = node->split.leftChild; + } + else + { + node = node->split.leftChild->buddy; + nodeOffset += nextLevelNodeSize; + } + ++outLevel; + levelNodeSize = nextLevelNodeSize; + } + + VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION); + return node; +} + +bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const +{ + VMA_VALIDATE(level < m_LevelCount); + VMA_VALIDATE(curr->parent == parent); + VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL)); + VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr); + switch (curr->type) + { + case Node::TYPE_FREE: + // curr->free.prev, next are validated separately. + ctx.calculatedSumFreeSize += levelNodeSize; + ++ctx.calculatedFreeCount; + break; + case Node::TYPE_ALLOCATION: + ++ctx.calculatedAllocationCount; + if (!IsVirtual()) + { + VMA_VALIDATE(curr->allocation.userData != VMA_NULL); + } + break; + case Node::TYPE_SPLIT: + { + const uint32_t childrenLevel = level + 1; + const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1; + const Node* const leftChild = curr->split.leftChild; + VMA_VALIDATE(leftChild != VMA_NULL); + VMA_VALIDATE(leftChild->offset == curr->offset); + if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for left child failed."); + } + const Node* const rightChild = leftChild->buddy; + VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize); + if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for right child failed."); + } + } + break; + default: + return false; + } + + return true; +} + +uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const +{ + // I know this could be optimized somehow e.g. by using std::log2p1 from C++20. + uint32_t level = 0; + VkDeviceSize currLevelNodeSize = m_UsableSize; + VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1; + while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) + { + ++level; + currLevelNodeSize >>= 1; + nextLevelNodeSize >>= 1; + } + return level; +} + +void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle) +{ + uint32_t level = 0; + Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); + + ++m_FreeCount; + --m_AllocationCount; + m_SumFreeSize += LevelToNodeSize(level); + + node->type = Node::TYPE_FREE; + + // Join free nodes if possible. + while (level > 0 && node->buddy->type == Node::TYPE_FREE) + { + RemoveFromFreeList(level, node->buddy); + Node* const parent = node->parent; + + m_NodeAllocator.Free(node->buddy); + m_NodeAllocator.Free(node); + parent->type = Node::TYPE_FREE; + + node = parent; + --level; + --m_FreeCount; + } + + AddToFreeListFront(level, node); +} + +void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch (node->type) + { + case Node::TYPE_FREE: + VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize); + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} + +void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node) +{ + VMA_ASSERT(node->type == Node::TYPE_FREE); + + // List is empty. + Node* const frontNode = m_FreeList[level].front; + if (frontNode == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == VMA_NULL); + node->free.prev = node->free.next = VMA_NULL; + m_FreeList[level].front = m_FreeList[level].back = node; + } + else + { + VMA_ASSERT(frontNode->free.prev == VMA_NULL); + node->free.prev = VMA_NULL; + node->free.next = frontNode; + frontNode->free.prev = node; + m_FreeList[level].front = node; + } +} + +void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node) +{ + VMA_ASSERT(m_FreeList[level].front != VMA_NULL); + + // It is at the front. + if (node->free.prev == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].front == node); + m_FreeList[level].front = node->free.next; + } + else + { + Node* const prevFreeNode = node->free.prev; + VMA_ASSERT(prevFreeNode->free.next == node); + prevFreeNode->free.next = node->free.next; + } + + // It is at the back. + if (node->free.next == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == node); + m_FreeList[level].back = node->free.prev; + } + else + { + Node* const nextFreeNode = node->free.next; + VMA_ASSERT(nextFreeNode->free.prev == node); + nextFreeNode->free.prev = node->free.prev; + } +} + +void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const +{ + switch (node->type) + { + case Node::TYPE_FREE: + break; + case Node::TYPE_ALLOCATION: + DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData); + break; + case Node::TYPE_SPLIT: + { + ++level; + DebugLogAllAllocationNode(node->split.leftChild, level); + DebugLogAllAllocationNode(node->split.leftChild->buddy, level); + } + break; + default: + VMA_ASSERT(0); + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch (node->type) + { + case Node::TYPE_FREE: + PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData); + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + PrintDetailedMapNode(json, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + PrintDetailedMapNode(json, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_BUDDY +#endif // #if 0 + +#ifndef _VMA_BLOCK_METADATA_TLSF +// To not search current larger region if first allocation won't succeed and skip to smaller range +// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest(). +// When fragmentation and reusal of previous blocks doesn't matter then use with +// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible. +class VmaBlockMetadata_TLSF : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_TLSF) +public: + VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_TLSF(); + + size_t GetAllocationCount() const override { return m_AllocCount; } + size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } + VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } + bool IsEmpty() const override { return m_NullBlock->offset == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }; + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + // According to original paper it should be preferable 4 or 5: + // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" + // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf + static const uint8_t SECOND_LEVEL_INDEX = 5; + static const uint16_t SMALL_BUFFER_SIZE = 256; + static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; + static const uint8_t MEMORY_CLASS_SHIFT = 7; + static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT; + + class Block + { + public: + VkDeviceSize offset; + VkDeviceSize size; + Block* prevPhysical; + Block* nextPhysical; + + void MarkFree() { prevFree = VMA_NULL; } + void MarkTaken() { prevFree = this; } + bool IsFree() const { return prevFree != this; } + void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } + Block*& PrevFree() { return prevFree; } + Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } + + private: + Block* prevFree; // Address of the same block here indicates that block is taken + union + { + Block* nextFree; + void* userData; + }; + }; + + size_t m_AllocCount; + // Total number of free blocks besides null block + size_t m_BlocksFreeCount; + // Total size of free blocks excluding null block + VkDeviceSize m_BlocksFreeSize; + uint32_t m_IsFreeBitmap; + uint8_t m_MemoryClasses; + uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; + uint32_t m_ListsCount; + /* + * 0: 0-3 lists for small buffers + * 1+: 0-(2^SLI-1) lists for normal buffers + */ + Block** m_FreeList; + VmaPoolAllocator m_BlockAllocator; + Block* m_NullBlock; + VmaBlockBufferImageGranularity m_GranularityHandler; + + uint8_t SizeToMemoryClass(VkDeviceSize size) const; + uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; + uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; + uint32_t GetListIndex(VkDeviceSize size) const; + + void RemoveFreeBlock(Block* block); + void InsertFreeBlock(Block* block); + void MergeBlock(Block* block, Block* prev); + + Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; + bool CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_AllocCount(0), + m_BlocksFreeCount(0), + m_BlocksFreeSize(0), + m_IsFreeBitmap(0), + m_MemoryClasses(0), + m_ListsCount(0), + m_FreeList(VMA_NULL), + m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), + m_NullBlock(VMA_NULL), + m_GranularityHandler(bufferImageGranularity) {} + +VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() +{ + if (m_FreeList) + vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); + m_GranularityHandler.Destroy(GetAllocationCallbacks()); +} + +void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + if (!IsVirtual()) + m_GranularityHandler.Init(GetAllocationCallbacks(), size); + + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = size; + m_NullBlock->offset = 0; + m_NullBlock->prevPhysical = VMA_NULL; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + uint8_t memoryClass = SizeToMemoryClass(size); + uint16_t sli = SizeToSecondIndex(size, memoryClass); + m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1; + if (IsVirtual()) + m_ListsCount += 1UL << SECOND_LEVEL_INDEX; + else + m_ListsCount += 4; + + m_MemoryClasses = memoryClass + 2; + memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); + + m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); +} + +bool VmaBlockMetadata_TLSF::Validate() const +{ + VMA_VALIDATE(GetSumFreeSize() <= GetSize()); + + VkDeviceSize calculatedSize = m_NullBlock->size; + VkDeviceSize calculatedFreeSize = m_NullBlock->size; + size_t allocCount = 0; + size_t freeCount = 0; + + // Check integrity of free lists + for (uint32_t list = 0; list < m_ListsCount; ++list) + { + Block* block = m_FreeList[list]; + if (block != VMA_NULL) + { + VMA_VALIDATE(block->IsFree()); + VMA_VALIDATE(block->PrevFree() == VMA_NULL); + while (block->NextFree()) + { + VMA_VALIDATE(block->NextFree()->IsFree()); + VMA_VALIDATE(block->NextFree()->PrevFree() == block); + block = block->NextFree(); + } + } + } + + VkDeviceSize nextOffset = m_NullBlock->offset; + auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); + + VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); + if (m_NullBlock->prevPhysical) + { + VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); + } + // Check all blocks + for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) + { + VMA_VALIDATE(prev->offset + prev->size == nextOffset); + nextOffset = prev->offset; + calculatedSize += prev->size; + + uint32_t listIndex = GetListIndex(prev->size); + if (prev->IsFree()) + { + ++freeCount; + // Check if free block belongs to free list + Block* freeBlock = m_FreeList[listIndex]; + VMA_VALIDATE(freeBlock != VMA_NULL); + + bool found = false; + do + { + if (freeBlock == prev) + found = true; + + freeBlock = freeBlock->NextFree(); + } while (!found && freeBlock != VMA_NULL); + + VMA_VALIDATE(found); + calculatedFreeSize += prev->size; + } + else + { + ++allocCount; + // Check if taken block is not on a free list + Block* freeBlock = m_FreeList[listIndex]; + while (freeBlock) + { + VMA_VALIDATE(freeBlock != prev); + freeBlock = freeBlock->NextFree(); + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); + } + } + + if (prev->prevPhysical) + { + VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); + } + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); + } + + VMA_VALIDATE(nextOffset == 0); + VMA_VALIDATE(calculatedSize == GetSize()); + VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); + VMA_VALIDATE(allocCount == m_AllocCount); + VMA_VALIDATE(freeCount == m_BlocksFreeCount); + + return true; +} + +void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + if (m_NullBlock->size > 0) + VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); + + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree()) + VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); + else + VmaAddDetailedStatisticsAllocation(inoutStats, block->size); + } +} + +void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const +{ + size_t blockCount = m_AllocCount + m_BlocksFreeCount; + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(blockCount, allocator); + + size_t i = blockCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + blockList[--i] = block; + } + VMA_ASSERT(i == 0); + + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); + + PrintDetailedMap_Begin(json, + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount); + + for (; i < blockCount; ++i) + { + Block* block = blockList[i]; + if (block->IsFree()) + PrintDetailedMap_UnusedRange(json, block->offset, block->size); + else + PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData()); + } + if (m_NullBlock->size > 0) + PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); + + PrintDetailedMap_End(json); +} +#endif + +bool VmaBlockMetadata_TLSF::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // For small granularity round up + if (!IsVirtual()) + m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); + + allocSize += GetDebugMargin(); + // Quick check for too small pool + if (allocSize > GetSumFreeSize()) + return false; + + // If no free blocks in pool then check only null block + if (m_BlocksFreeCount == 0) + return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); + + // Round up to the next block + VkDeviceSize sizeForNextList = allocSize; + VkDeviceSize smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4); + if (allocSize > SMALL_BUFFER_SIZE) + { + sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)); + } + else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep) + sizeForNextList = SMALL_BUFFER_SIZE + 1; + else + sizeForNextList += smallSizeStep; + + uint32_t nextListIndex = 0; + uint32_t prevListIndex = 0; + Block* nextListBlock = VMA_NULL; + Block* prevListBlock = VMA_NULL; + + // Check blocks according to strategies + if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) + { + // Quick check for larger block first + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // If not fitted then null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Null block failed, search larger bucket + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // Failed again, check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) + { + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) + { + // Perform search from the start + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(m_BlocksFreeCount, allocator); + + size_t i = m_BlocksFreeCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree() && block->size >= allocSize) + blockList[--i] = block; + } + + for (; i < m_BlocksFreeCount; ++i) + { + Block& block = *blockList[i]; + if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Whole range searched, no more memory + return false; + } + else + { + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + + // Worst case, full search has to be done + while (++nextListIndex < m_ListsCount) + { + nextListBlock = m_FreeList[nextListIndex]; + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + + // No more memory sadly + return false; +} + +VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (!block->IsFree()) + { + if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_TLSF::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); + + // Get block and pop it from the free list + Block* currentBlock = (Block*)request.allocHandle; + VkDeviceSize offset = request.algorithmData; + VMA_ASSERT(currentBlock != VMA_NULL); + VMA_ASSERT(currentBlock->offset <= offset); + + if (currentBlock != m_NullBlock) + RemoveFreeBlock(currentBlock); + + VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize misssingAlignment = offset - currentBlock->offset; + + // Append missing alignment to prev block or create new one + if (misssingAlignment) + { + Block* prevBlock = currentBlock->prevPhysical; + VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); + + if (prevBlock->IsFree() && prevBlock->size != debugMargin) + { + uint32_t oldList = GetListIndex(prevBlock->size); + prevBlock->size += misssingAlignment; + // Check if new size crosses list bucket + if (oldList != GetListIndex(prevBlock->size)) + { + prevBlock->size -= misssingAlignment; + RemoveFreeBlock(prevBlock); + prevBlock->size += misssingAlignment; + InsertFreeBlock(prevBlock); + } + else + m_BlocksFreeSize += misssingAlignment; + } + else + { + Block* newBlock = m_BlockAllocator.Alloc(); + currentBlock->prevPhysical = newBlock; + prevBlock->nextPhysical = newBlock; + newBlock->prevPhysical = prevBlock; + newBlock->nextPhysical = currentBlock; + newBlock->size = misssingAlignment; + newBlock->offset = currentBlock->offset; + newBlock->MarkTaken(); + + InsertFreeBlock(newBlock); + } + + currentBlock->size -= misssingAlignment; + currentBlock->offset += misssingAlignment; + } + + VkDeviceSize size = request.size + debugMargin; + if (currentBlock->size == size) + { + if (currentBlock == m_NullBlock) + { + // Setup new null block + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = 0; + m_NullBlock->offset = currentBlock->offset + size; + m_NullBlock->prevPhysical = currentBlock; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->PrevFree() = VMA_NULL; + m_NullBlock->NextFree() = VMA_NULL; + currentBlock->nextPhysical = m_NullBlock; + currentBlock->MarkTaken(); + } + } + else + { + VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); + + // Create new free block + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = currentBlock->size - size; + newBlock->offset = currentBlock->offset + size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + currentBlock->nextPhysical = newBlock; + currentBlock->size = size; + + if (currentBlock == m_NullBlock) + { + m_NullBlock = newBlock; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + currentBlock->MarkTaken(); + } + else + { + newBlock->nextPhysical->prevPhysical = newBlock; + newBlock->MarkTaken(); + InsertFreeBlock(newBlock); + } + } + currentBlock->UserData() = userData; + + if (debugMargin > 0) + { + currentBlock->size -= debugMargin; + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = debugMargin; + newBlock->offset = currentBlock->offset + currentBlock->size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + newBlock->MarkTaken(); + currentBlock->nextPhysical->prevPhysical = newBlock; + currentBlock->nextPhysical = newBlock; + InsertFreeBlock(newBlock); + } + + if (!IsVirtual()) + m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, + currentBlock->offset, currentBlock->size); + ++m_AllocCount; +} + +void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) +{ + Block* block = (Block*)allocHandle; + Block* next = block->nextPhysical; + VMA_ASSERT(!block->IsFree() && "Block is already free!"); + + if (!IsVirtual()) + m_GranularityHandler.FreePages(block->offset, block->size); + --m_AllocCount; + + VkDeviceSize debugMargin = GetDebugMargin(); + if (debugMargin > 0) + { + RemoveFreeBlock(next); + MergeBlock(next, block); + block = next; + next = next->nextPhysical; + } + + // Try merging + Block* prev = block->prevPhysical; + if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) + { + RemoveFreeBlock(prev); + MergeBlock(block, prev); + } + + if (!next->IsFree()) + InsertFreeBlock(block); + else if (next == m_NullBlock) + MergeBlock(m_NullBlock, block); + else + { + RemoveFreeBlock(next); + MergeBlock(next, block); + InsertFreeBlock(next); + } +} + +void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); + outInfo.offset = block->offset; + outInfo.size = block->size; + outInfo.pUserData = block->UserData(); +} + +void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); + return block->UserData(); +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const +{ + if (m_AllocCount == 0) + return VK_NULL_HANDLE; + + for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + Block* startBlock = (Block*)prevAlloc; + VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); + + for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + Block* block = (Block*)alloc; + VMA_ASSERT(!block->IsFree() && "Incorrect block!"); + + if (block->prevPhysical) + return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; + return 0; +} + +void VmaBlockMetadata_TLSF::Clear() +{ + m_AllocCount = 0; + m_BlocksFreeCount = 0; + m_BlocksFreeSize = 0; + m_IsFreeBitmap = 0; + m_NullBlock->offset = 0; + m_NullBlock->size = GetSize(); + Block* block = m_NullBlock->prevPhysical; + m_NullBlock->prevPhysical = VMA_NULL; + while (block) + { + Block* prev = block->prevPhysical; + m_BlockAllocator.Free(block); + block = prev; + } + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); + memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); + m_GranularityHandler.Clear(); +} + +void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); + block->UserData() = userData; +} + +void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + if (!block->IsFree()) + DebugLogAllocation(block->offset, block->size, block->UserData()); +} + +uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const +{ + if (size > SMALL_BUFFER_SIZE) + return VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT; + return 0; +} + +uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const +{ + if (memoryClass == 0) + { + if (IsVirtual()) + return static_cast((size - 1) / 8); + else + return static_cast((size - 1) / 64); + } + return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const +{ + if (memoryClass == 0) + return secondIndex; + + const uint32_t index = static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex; + if (IsVirtual()) + return index + (1 << SECOND_LEVEL_INDEX); + else + return index + 4; +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); +} + +void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(block->IsFree()); + + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block->PrevFree(); + if (block->PrevFree() != VMA_NULL) + block->PrevFree()->NextFree() = block->NextFree(); + else + { + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(m_FreeList[index] == block); + m_FreeList[index] = block->NextFree(); + if (block->NextFree() == VMA_NULL) + { + m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex); + if (m_InnerIsFreeBitmap[memClass] == 0) + m_IsFreeBitmap &= ~(1UL << memClass); + } + } + block->MarkTaken(); + block->UserData() = VMA_NULL; + --m_BlocksFreeCount; + m_BlocksFreeSize -= block->size; +} + +void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); + + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(index < m_ListsCount); + block->PrevFree() = VMA_NULL; + block->NextFree() = m_FreeList[index]; + m_FreeList[index] = block; + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block; + else + { + m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex; + m_IsFreeBitmap |= 1UL << memClass; + } + ++m_BlocksFreeCount; + m_BlocksFreeSize += block->size; +} + +void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) +{ + VMA_ASSERT(block->prevPhysical == prev && "Cannot merge seperate physical regions!"); + VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); + + block->offset = prev->offset; + block->size += prev->size; + block->prevPhysical = prev->prevPhysical; + if (block->prevPhysical) + block->prevPhysical->nextPhysical = block; + m_BlockAllocator.Free(prev); +} + +VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); + if (!innerFreeMap) + { + // Check higher levels for avaiable blocks + uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1)); + if (!freeMap) + return VMA_NULL; // No more memory avaible + + // Find lowest free region + memoryClass = VMA_BITSCAN_LSB(freeMap); + innerFreeMap = m_InnerIsFreeBitmap[memoryClass]; + VMA_ASSERT(innerFreeMap != 0); + } + // Find lowest free subregion + listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap)); + VMA_ASSERT(m_FreeList[listIndex]); + return m_FreeList[listIndex]; +} + +bool VmaBlockMetadata_TLSF::CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(block.IsFree() && "Block is already taken!"); + + VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); + if (block.size < allocSize + alignedOffset - block.offset) + return false; + + // Check for granularity conflicts + if (!IsVirtual() && + m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType)) + return false; + + // Alloc successful + pAllocationRequest->type = VmaAllocationRequestType::TLSF; + pAllocationRequest->allocHandle = (VmaAllocHandle)█ + pAllocationRequest->size = allocSize - GetDebugMargin(); + pAllocationRequest->customData = (void*)allocType; + pAllocationRequest->algorithmData = alignedOffset; + + // Place block at the start of list if it's normal block + if (listIndex != m_ListsCount && block.PrevFree()) + { + block.PrevFree()->NextFree() = block.NextFree(); + if (block.NextFree()) + block.NextFree()->PrevFree() = block.PrevFree(); + block.PrevFree() = VMA_NULL; + block.NextFree() = m_FreeList[listIndex]; + m_FreeList[listIndex] = █ + if (block.NextFree()) + block.NextFree()->PrevFree() = █ + } + + return true; +} +#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_TLSF + +#ifndef _VMA_BLOCK_VECTOR +/* +Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific +Vulkan memory type. + +Synchronized internally with a mutex. +*/ +class VmaBlockVector +{ + friend struct VmaDefragmentationContext_T; + VMA_CLASS_NO_COPY(VmaBlockVector) +public: + VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext); + ~VmaBlockVector(); + + VmaAllocator GetAllocator() const { return m_hAllocator; } + VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + uint32_t GetAlgorithm() const { return m_Algorithm; } + bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } + float GetPriority() const { return m_Priority; } + const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } + // To be used only while the m_Mutex is locked. Used during defragmentation. + size_t GetBlockCount() const { return m_Blocks.size(); } + // To be used only while the m_Mutex is locked. Used during defragmentation. + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + VMA_RW_MUTEX &GetMutex() { return m_Mutex; } + + VkResult CreateMinBlocks(); + void AddStatistics(VmaStatistics& inoutStats); + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + bool IsEmpty(); + bool IsCorruptionDetectionEnabled() const; + + VkResult Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + void Free(const VmaAllocation hAllocation); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + VkResult CheckCorruption(); + +private: + const VmaAllocator m_hAllocator; + const VmaPool m_hParentPool; + const uint32_t m_MemoryTypeIndex; + const VkDeviceSize m_PreferredBlockSize; + const size_t m_MinBlockCount; + const size_t m_MaxBlockCount; + const VkDeviceSize m_BufferImageGranularity; + const bool m_ExplicitBlockSize; + const uint32_t m_Algorithm; + const float m_Priority; + const VkDeviceSize m_MinAllocationAlignment; + + void* const m_pMemoryAllocateNext; + VMA_RW_MUTEX m_Mutex; + // Incrementally sorted by sumFreeSize, ascending. + VmaVector> m_Blocks; + uint32_t m_NextBlockId; + bool m_IncrementalSort = true; + + void SetIncrementalSort(bool val) { m_IncrementalSort = val; } + + VkDeviceSize CalcMaxBlockSize() const; + // Finds and removes given block from vector. + void Remove(VmaDeviceMemoryBlock* pBlock); + // Performs single step in sorting m_Blocks. They may not be fully sorted + // after this call. + void IncrementallySortBlocks(); + void SortByFreeSize(); + + VkResult AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + bool HasEmptyBlock(); +}; +#endif // _VMA_BLOCK_VECTOR + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT +struct VmaDefragmentationContext_T +{ + VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) +public: + VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info); + ~VmaDefragmentationContext_T(); + + void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } + + VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); + VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); + +private: + // Max number of allocations to ignore due to size constraints before ending single pass + static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; + enum class CounterStatus { Pass, Ignore, End }; + + struct FragmentedBlock + { + uint32_t data; + VmaDeviceMemoryBlock* block; + }; + struct StateBalanced + { + VkDeviceSize avgFreeSize = 0; + VkDeviceSize avgAllocSize = UINT64_MAX; + }; + struct StateExtensive + { + enum class Operation : uint8_t + { + FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, + MoveBuffers, MoveTextures, MoveAll, + Cleanup, Done + }; + + Operation operation = Operation::FindFreeBlockTexture; + size_t firstFreeBlock = SIZE_MAX; + }; + struct MoveAllocationData + { + VkDeviceSize size; + VkDeviceSize alignment; + VmaSuballocationType type; + VmaAllocationCreateFlags flags; + VmaDefragmentationMove move = {}; + }; + + const VkDeviceSize m_MaxPassBytes; + const uint32_t m_MaxPassAllocations; + + VmaStlAllocator m_MoveAllocator; + VmaVector> m_Moves; + + uint8_t m_IgnoredAllocs = 0; + uint32_t m_Algorithm; + uint32_t m_BlockVectorCount; + VmaBlockVector* m_PoolBlockVector; + VmaBlockVector** m_pBlockVectors; + size_t m_ImmovableBlockCount = 0; + VmaDefragmentationStats m_GlobalStats = { 0 }; + VmaDefragmentationStats m_PassStats = { 0 }; + void* m_AlgorithmState = VMA_NULL; + + static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); + CounterStatus CheckCounters(VkDeviceSize bytes); + bool IncrementCounters(VkDeviceSize bytes); + bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); + bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); + + bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); + bool ComputeDefragmentation_Fast(VmaBlockVector& vector); + bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); + bool ComputeDefragmentation_Full(VmaBlockVector& vector); + bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); + + void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); + bool MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent); +}; +#endif // _VMA_DEFRAGMENTATION_CONTEXT + +#ifndef _VMA_POOL_T +struct VmaPool_T +{ + friend struct VmaPoolListItemTraits; + VMA_CLASS_NO_COPY(VmaPool_T) +public: + VmaBlockVector m_BlockVector; + VmaDedicatedAllocationList m_DedicatedAllocations; + + VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); + ~VmaPool_T(); + + uint32_t GetId() const { return m_Id; } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); + +#if VMA_STATS_STRING_ENABLED + //void PrintDetailedMap(class VmaStringBuilder& sb); +#endif + +private: + uint32_t m_Id; + char* m_Name; + VmaPool_T* m_PrevPool = VMA_NULL; + VmaPool_T* m_NextPool = VMA_NULL; +}; + +struct VmaPoolListItemTraits +{ + typedef VmaPool_T ItemType; + + static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } + static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } + static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } + static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } +}; +#endif // _VMA_POOL_T + +#ifndef _VMA_CURRENT_BUDGET_DATA +struct VmaCurrentBudgetData +{ + VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // VMA_MEMORY_BUDGET + + VmaCurrentBudgetData(); + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); +}; + +#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +VmaCurrentBudgetData::VmaCurrentBudgetData() +{ + for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockCount[heapIndex] = 0; + m_AllocationCount[heapIndex] = 0; + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif +} + +void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + m_AllocationBytes[heapIndex] += allocationSize; + ++m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} + +void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); + m_AllocationBytes[heapIndex] -= allocationSize; + VMA_ASSERT(m_AllocationCount[heapIndex] > 0); + --m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} +#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +#endif // _VMA_CURRENT_BUDGET_DATA + +#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR +/* +Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. +*/ +class VmaAllocationObjectAllocator +{ + VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator) +public: + VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) + : m_Allocator(pAllocationCallbacks, 1024) {} + + template VmaAllocation Allocate(Types&&... args); + void Free(VmaAllocation hAlloc); + +private: + VMA_MUTEX m_Mutex; + VmaPoolAllocator m_Allocator; +}; + +template +VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) +{ + VmaMutexLock mutexLock(m_Mutex); + return m_Allocator.Alloc(std::forward(args)...); +} + +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ + VmaMutexLock mutexLock(m_Mutex); + m_Allocator.Free(hAlloc); +} +#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR + +#ifndef _VMA_VIRTUAL_BLOCK_T +struct VmaVirtualBlock_T +{ + VMA_CLASS_NO_COPY(VmaVirtualBlock_T) +public: + const bool m_AllocationCallbacksSpecified; + const VkAllocationCallbacks m_AllocationCallbacks; + + VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); + ~VmaVirtualBlock_T(); + + VkResult Init() { return VK_SUCCESS; } + bool IsEmpty() const { return m_Metadata->IsEmpty(); } + void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } + void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } + void Clear() { m_Metadata->Clear(); } + + const VkAllocationCallbacks* GetAllocationCallbacks() const; + void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); + VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset); + void GetStatistics(VmaStatistics& outStats) const; + void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; +#if VMA_STATS_STRING_ENABLED + void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; +#endif + +private: + VmaBlockMetadata* m_Metadata; +}; + +#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) + : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) +{ + const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; + switch (algorithm) + { + default: + VMA_ASSERT(0); + case 0: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + break; + case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); + break; + } + + m_Metadata->Init(createInfo.size); +} + +VmaVirtualBlock_T::~VmaVirtualBlock_T() +{ + // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations + if (!m_Metadata->IsEmpty()) + m_Metadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased virtual allocations. + VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!"); + + vma_delete(GetAllocationCallbacks(), m_Metadata); +} + +const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const +{ + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; +} + +void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) +{ + m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); +} + +VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset) +{ + VmaAllocationRequest request = {}; + if (m_Metadata->CreateAllocationRequest( + createInfo.size, // allocSize + VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment + (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress + VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant + createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy + &request)) + { + m_Metadata->Alloc(request, + VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant + createInfo.pUserData); + outAllocation = (VmaVirtualAllocation)request.allocHandle; + if(outOffset) + *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); + return VK_SUCCESS; + } + outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE; + if (outOffset) + *outOffset = UINT64_MAX; + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const +{ + VmaClearStatistics(outStats); + m_Metadata->AddStatistics(outStats); +} + +void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const +{ + VmaClearDetailedStatistics(outStats); + m_Metadata->AddDetailedStatistics(outStats); +} + +#if VMA_STATS_STRING_ENABLED +void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const +{ + VmaJsonWriter json(GetAllocationCallbacks(), sb); + json.BeginObject(); + + VmaDetailedStatistics stats; + CalculateDetailedStatistics(stats); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats); + + if (detailedMap) + { + json.WriteString("Details"); + json.BeginObject(); + m_Metadata->PrintDetailedMap(json); + json.EndObject(); + } + + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +#endif // _VMA_VIRTUAL_BLOCK_T + + +// Main allocator object. +struct VmaAllocator_T +{ + VMA_CLASS_NO_COPY(VmaAllocator_T) +public: + bool m_UseMutex; + uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + bool m_UseAmdDeviceCoherentMemory; + bool m_UseKhrBufferDeviceAddress; + bool m_UseExtMemoryPriority; + VkDevice m_hDevice; + VkInstance m_hInstance; + bool m_AllocationCallbacksSpecified; + VkAllocationCallbacks m_AllocationCallbacks; + VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; + VmaAllocationObjectAllocator m_AllocationObjectAllocator; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; + + VkPhysicalDeviceProperties m_PhysicalDeviceProperties; + VkPhysicalDeviceMemoryProperties m_MemProps; + + // Default pools. + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; + + VmaCurrentBudgetData m_Budget; + VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. + + VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + ~VmaAllocator_T(); + + const VkAllocationCallbacks* GetAllocationCallbacks() const + { + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; + } + const VmaVulkanFunctions& GetVulkanFunctions() const + { + return m_VulkanFunctions; + } + + VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } + + VkDeviceSize GetBufferImageGranularity() const + { + return VMA_MAX( + static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); + } + + uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } + uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } + + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { + VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); + return m_MemProps.memoryTypes[memTypeIndex].heapIndex; + } + // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { + return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + // Minimum alignment for all allocations in specific memory type. + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { + return IsMemoryTypeNonCoherent(memTypeIndex) ? + VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_MIN_ALIGNMENT; + } + + bool IsIntegratedGpu() const + { + return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + } + + uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } + + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + VkResult FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. + uint32_t* pMemoryTypeIndex) const; + + // Main allocation function. + VkResult AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown. + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Main deallocation function. + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); + + void CalculateStatistics(VmaTotalStatistics* pStats); + + void GetHeapBudgets( + VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + void DestroyPool(VmaPool pool); + void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); + void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); + + void SetCurrentFrameIndex(uint32_t frameIndex); + uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } + + VkResult CheckPoolCorruption(VmaPool hPool); + VkResult CheckCorruption(uint32_t memoryTypeBits); + + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. + void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext); + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext); + + VkResult Map(VmaAllocation hAllocation, void** ppData); + void Unmap(VmaAllocation hAllocation); + + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + + VkResult FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); + VkResult FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op); + + void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); + + /* + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ + uint32_t GetGpuDefragmentationMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const + { + return m_TypeExternalMemoryHandleTypes[memTypeIndex]; + } +#endif // #if VMA_EXTERNAL_MEMORY + +private: + VkDeviceSize m_PreferredLargeHeapBlockSize; + + VkPhysicalDevice m_PhysicalDevice; + VMA_ATOMIC_UINT32 m_CurrentFrameIndex; + VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES]; +#endif // #if VMA_EXTERNAL_MEMORY + + VMA_RW_MUTEX m_PoolsMutex; + typedef VmaIntrusiveLinkedList PoolList; + // Protected by m_PoolsMutex. + PoolList m_Pools; + uint32_t m_NextPoolId; + + VmaVulkanFunctions m_VulkanFunctions; + + // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. + uint32_t m_GlobalMemoryTypeBits; + + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Static(); +#endif + + void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Dynamic(); +#endif + + void ValidateVulkanFunctions(); + + VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); + + VkResult AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation); + + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. + VkResult AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain = nullptr); + + void FreeDedicatedMemory(const VmaAllocation allocation); + + VkResult CalcMemTypeParams( + VmaAllocationCreateInfo& outCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount); + VkResult CalcAllocationParams( + VmaAllocationCreateInfo& outCreateInfo, + bool dedicatedRequired, + bool dedicatedPreferred); + + /* + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ + uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + uint32_t CalculateGlobalMemoryTypeBits() const; + + bool GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET +}; + + +#ifndef _VMA_MEMORY_FUNCTIONS +static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ + return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); +} + +static void VmaFree(VmaAllocator hAllocator, void* ptr) +{ + VmaFree(&hAllocator->m_AllocationCallbacks, ptr); +} + +template +static T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +template +static void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { + ptr->~T(); + VmaFree(hAllocator, ptr); + } +} + +template +static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + ptr[i].~T(); + VmaFree(hAllocator, ptr); + } +} +#endif // _VMA_MEMORY_FUNCTIONS + +#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) + : m_pMetadata(VMA_NULL), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL) {} + +VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() +{ + VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); +} + +void VmaDeviceMemoryBlock::Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_hParentPool = hParentPool; + m_MemoryTypeIndex = newMemoryTypeIndex; + m_Id = id; + m_hMemory = newMemory; + + switch (algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + default: + VMA_ASSERT(0); + // Fall-through. + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + } + m_pMetadata->Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // Define macro VMA_DEBUG_LOG to receive the list of the unfreed allocations + if (!m_pMetadata->IsEmpty()) + m_pMetadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); + m_hMemory = VK_NULL_HANDLE; + + vma_delete(allocator, m_pMetadata); + m_pMetadata = VMA_NULL; +} + +void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) +{ + if(m_MappingHysteresis.PostFree()) + { + VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); + if (m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && + (m_pMetadata->GetSize() != 0)); + + return m_pMetadata->Validate(); +} + +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = nullptr; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + res = m_pMetadata->CheckCorruption(pData); + + Unmap(hAllocator, 1); + + return res; +} + +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if (count == 0) + { + return VK_SUCCESS; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + m_MappingHysteresis.PostMap(); + if (oldTotalMapCount != 0) + { + m_MapCount += count; + VMA_ASSERT(m_pMappedData != VMA_NULL); + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + return VK_SUCCESS; + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if (result == VK_SUCCESS) + { + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + m_MapCount = count; + } + return result; + } +} + +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if (count == 0) + { + return; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if (m_MapCount >= count) + { + m_MapCount -= count; + const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (totalMapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + m_MappingHysteresis.PostUnmap(); + } + else + { + VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); + } +} + +VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + VmaWriteMagicValue(pData, allocOffset + allocSize); + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); + } + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); +} + +VkResult VmaDeviceMemoryBlock::BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); +} +#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS + +#ifndef _VMA_ALLOCATION_T_FUNCTIONS +VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) + : m_Alignment{ 1 }, + m_Size{ 0 }, + m_pUserData{ VMA_NULL }, + m_pName{ VMA_NULL }, + m_MemoryTypeIndex{ 0 }, + m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, + m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, + m_MapCount{ 0 }, + m_Flags{ 0 } +{ + if(mappingAllowed) + m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; + +#if VMA_STATS_STRING_ENABLED + m_BufferImageUsage = 0; +#endif +} + +VmaAllocation_T::~VmaAllocation_T() +{ + VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction."); + + // Check if owned string was freed. + VMA_ASSERT(m_pName == VMA_NULL); +} + +void VmaAllocation_T::InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(block != VMA_NULL); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_Alignment = alignment; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + if(mapped) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } + m_SuballocationType = (uint8_t)suballocationType; + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_AllocHandle = allocHandle; +} + +void VmaAllocation_T::InitDedicatedAllocation( + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(hMemory != VK_NULL_HANDLE); + m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; + m_Alignment = 0; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_SuballocationType = (uint8_t)suballocationType; + if(pMappedData != VMA_NULL) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } + m_DedicatedAllocation.m_hParentPool = hParentPool; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_pMappedData = pMappedData; + m_DedicatedAllocation.m_Prev = VMA_NULL; + m_DedicatedAllocation.m_Next = VMA_NULL; +} + +void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) +{ + VMA_ASSERT(pName == VMA_NULL || pName != m_pName); + + FreeName(hAllocator); + + if (pName != VMA_NULL) + m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); +} + +uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) +{ + VMA_ASSERT(allocation != VMA_NULL); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount != 0) + m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); + + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); + VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation); + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); + +#if VMA_STATS_STRING_ENABLED + VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage); +#endif + return m_MapCount; +} + +VmaAllocHandle VmaAllocation_T::GetAllocHandle() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_AllocHandle; + case ALLOCATION_TYPE_DEDICATED: + return VK_NULL_HANDLE; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; + } +} + +VmaPool VmaAllocation_T::GetParentPool() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetParentPool(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hParentPool; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +void* VmaAllocation_T::GetMappedData() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if (m_MapCount != 0 || IsPersistentMap()) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + GetOffset(); + } + else + { + return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap())); + return m_DedicatedAllocation.m_pMappedData; + default: + VMA_ASSERT(0); + return VMA_NULL; + } +} + +void VmaAllocation_T::BlockAllocMap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + if (m_MapCount < 0xFF) + { + ++m_MapCount; + } + else + { + VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); + } +} + +void VmaAllocation_T::BlockAllocUnmap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount > 0) + { + --m_MapCount; + } + else + { + VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); + } +} + +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + if (m_MapCount != 0 || IsPersistentMap()) + { + if (m_MapCount < 0xFF) + { + VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); + *ppData = m_DedicatedAllocation.m_pMappedData; + ++m_MapCount; + return VK_SUCCESS; + } + else + { + VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); + return VK_ERROR_MEMORY_MAP_FAILED; + } + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if (result == VK_SUCCESS) + { + m_DedicatedAllocation.m_pMappedData = *ppData; + m_MapCount = 1; + } + return result; + } +} + +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if (m_MapCount > 0) + { + --m_MapCount; + if (m_MapCount == 0 && !IsPersistentMap()) + { + m_DedicatedAllocation.m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage) +{ + VMA_ASSERT(m_BufferImageUsage == 0); + m_BufferImageUsage = bufferImageUsage; +} + +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); + + json.WriteString("Size"); + json.WriteNumber(m_Size); + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage); + + if (m_pUserData != VMA_NULL) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); + } + if (m_pName != VMA_NULL) + { + json.WriteString("Name"); + json.WriteString(m_pName); + } +} +#endif // VMA_STATS_STRING_ENABLED + +void VmaAllocation_T::FreeName(VmaAllocator hAllocator) +{ + if(m_pName) + { + VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); + m_pName = VMA_NULL; + } +} +#endif // _VMA_ALLOCATION_T_FUNCTIONS + +#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS +VmaBlockVector::VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext) + : m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_Priority(priority), + m_MinAllocationAlignment(minAllocationAlignment), + m_pMemoryAllocateNext(pMemoryAllocateNext), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) {} + +VmaBlockVector::~VmaBlockVector() +{ + for (size_t i = m_Blocks.size(); i--; ) + { + m_Blocks[i]->Destroy(m_hAllocator); + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +VkResult VmaBlockVector::CreateMinBlocks() +{ + for (size_t i = 0; i < m_MinBlockCount; ++i) + { + VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddStatistics(inoutStats); + } +} + +void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); + } +} + +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ + const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + return (VMA_DEBUG_DETECT_CORRUPTION != 0) && + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; +} + +VkResult VmaBlockVector::Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex; + VkResult res = VK_SUCCESS; + + alignment = VMA_MAX(alignment, m_MinAllocationAlignment); + + if (IsCorruptionDetectionEnabled()) + { + size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + } + + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if (res != VK_SUCCESS) + { + break; + } + } + } + + if (res != VK_SUCCESS) + { + // Free all already created allocations. + while (allocIndex--) + Free(pAllocations[allocIndex]); + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VkDeviceSize freeMemory; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !HasExplicitBlockSize() && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; + const bool canCreateNewBlock = + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); + uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; + + // Upper address can only be used with linear allocator and within single memory block. + if (isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Early reject: requested allocation size is larger that maximum block size for this block vector. + if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + // 1. Search existing allocations. Try to allocate. + if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Use only last block. + if (!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + else + { + if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default + { + const bool isHostVisible = + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + if(isHostVisible) + { + const bool isMappingAllowed = (createInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + /* + For non-mappable allocations, check blocks that are not mapped first. + For mappable allocations, check blocks that are already mapped first. + This way, having many blocks, we will separate mappable and non-mappable allocations, + hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. + */ + for(size_t mappingI = 0; mappingI < 2; ++mappingI) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; + if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) + { + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + } + else + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + + // 2. Try to create new block. + if (canCreateNewBlock) + { + // Calculate optimal size for new block. + VkDeviceSize newBlockSize = m_PreferredBlockSize; + uint32_t newBlockSizeShift = 0; + const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; + + if (!m_ExplicitBlockSize) + { + // Allocate 1/8, 1/4, 1/2 as first blocks. + const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); + for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + } + else + { + break; + } + } + } + + size_t newBlockIndex = 0; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. + if (!m_ExplicitBlockSize) + { + while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize >= size) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + break; + } + } + } + + if (res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); + + res = AllocateFromBlock( + pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + else + { + // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaBlockVector::Free(const VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } + + // Scope for lock. + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); + } + + if (hAllocation->IsPersistentMap()) + { + pBlock->Unmap(m_hAllocator, 1); + } + + const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); + pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); + pBlock->PostFree(m_hAllocator); + VMA_HEAVY_ASSERT(pBlock->Validate()); + + VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); + + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; + // pBlock became empty after this deallocation. + if (pBlock->m_pMetadata->IsEmpty()) + { + // Already had empty block. We don't want to have two, so delete this one. + if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) + { + pBlockToDelete = pBlock; + Remove(pBlock); + } + // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. + } + // pBlock didn't become empty, but we have another empty block - find and free that one. + // (This is optional, heuristics.) + else if (hadEmptyBlockBeforeFree && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if (pLastBlock->m_pMetadata->IsEmpty()) + { + pBlockToDelete = pLastBlock; + m_Blocks.pop_back(); + } + } + + IncrementallySortBlocks(); + } + + // Destruction of a free block. Deferred until this point, outside of mutex + // lock, for performance reason. + if (pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG(" Deleted empty block #%u", pBlockToDelete->GetId()); + pBlockToDelete->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlockToDelete); + } + + m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); + m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); +} + +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ + VkDeviceSize result = 0; + for (size_t i = m_Blocks.size(); i--; ) + { + result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); + if (result >= m_PreferredBlockSize) + { + break; + } + } + return result; +} + +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if (m_Blocks[blockIndex] == pBlock) + { + VmaVectorRemove(m_Blocks, blockIndex); + return; + } + } + VMA_ASSERT(0); +} + +void VmaBlockVector::IncrementallySortBlocks() +{ + if (!m_IncrementalSort) + return; + if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Bubble sort only until first swap. + for (size_t i = 1; i < m_Blocks.size(); ++i) + { + if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { + VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); + return; + } + } + } +} + +void VmaBlockVector::SortByFreeSize() +{ + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), + [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool + { + return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); + }); +} + +VkResult VmaBlockVector::AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VmaAllocationRequest currRequest = {}; + if (pBlock->m_pMetadata->CreateAllocationRequest( + size, + alignment, + isUpperAddress, + suballocType, + strategy, + &currRequest)) + { + return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +VkResult VmaBlockVector::CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + const bool isMappingAllowed = (allocFlags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + + pBlock->PostAlloc(); + // Allocate from pCurrBlock. + if (mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); + pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); + (*pAllocation)->InitBlockAllocation( + pBlock, + allocRequest.allocHandle, + alignment, + allocRequest.size, // Not size, as actual allocation size may be larger than requested! + m_MemoryTypeIndex, + suballocType, + mapped); + VMA_HEAVY_ASSERT(pBlock->Validate()); + if (isUserDataString) + (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); + else + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); + if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; +} + +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.pNext = m_pMemoryAllocateNext; + allocInfo.memoryTypeIndex = m_MemoryTypeIndex; + allocInfo.allocationSize = blockSize; + +#if VMA_BUFFER_DEVICE_ADDRESS + // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if (m_hAllocator->m_UseKhrBufferDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } +#endif // VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if (m_hAllocator->m_UseExtMemoryPriority) + { + VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); + priorityInfo.priority = m_Priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); + if (exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // VMA_EXTERNAL_MEMORY + + VkDeviceMemory mem = VK_NULL_HANDLE; + VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); + if (res < 0) + { + return res; + } + + // New VkDeviceMemory successfully created. + + // Create new Allocation for it. + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + pBlock->Init( + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm, + m_BufferImageGranularity); + + m_Blocks.push_back(pBlock); + if (pNewBlockIndex != VMA_NULL) + { + *pNewBlockIndex = m_Blocks.size() - 1; + } + + return VK_SUCCESS; +} + +bool VmaBlockVector::HasEmptyBlock() +{ + for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if (pBlock->m_pMetadata->IsEmpty()) + { + return true; + } + } + return false; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + + json.BeginObject(); + for (size_t i = 0; i < m_Blocks.size(); ++i) + { + json.BeginString(); + json.ContinueString(m_Blocks[i]->GetId()); + json.EndString(); + + json.BeginObject(); + json.WriteString("MapRefCount"); + json.WriteNumber(m_Blocks[i]->GetMapRefCount()); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + json.EndObject(); + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED + +VkResult VmaBlockVector::CheckCorruption() +{ + if (!IsCorruptionDetectionEnabled()) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VkResult res = pBlock->CheckCorruption(m_hAllocator); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +#endif // _VMA_BLOCK_VECTOR_FUNCTIONS + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS +VmaDefragmentationContext_T::VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info) + : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), + m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), + m_MoveAllocator(hAllocator->GetAllocationCallbacks()), + m_Moves(m_MoveAllocator) +{ + m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; + + if (info.pool != VMA_NULL) + { + m_BlockVectorCount = 1; + m_PoolBlockVector = &info.pool->m_BlockVector; + m_pBlockVectors = &m_PoolBlockVector; + m_PoolBlockVector->SetIncrementalSort(false); + m_PoolBlockVector->SortByFreeSize(); + } + else + { + m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); + m_PoolBlockVector = VMA_NULL; + m_pBlockVectors = hAllocator->m_pBlockVectors; + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + { + vector->SetIncrementalSort(false); + vector->SortByFreeSize(); + } + } + } + + switch (m_Algorithm) + { + case 0: // Default algorithm + m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + { + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + } + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + { + if (hAllocator->GetBufferImageGranularity() > 1) + { + m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); + } + break; + } + } +} + +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + if (m_PoolBlockVector != VMA_NULL) + { + m_PoolBlockVector->SetIncrementalSort(true); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + vector->SetIncrementalSort(true); + } + } + + if (m_AlgorithmState) + { + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + default: + VMA_ASSERT(0); + } + } +} + +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) +{ + if (m_PoolBlockVector != VMA_NULL) + { + VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); + + if (m_PoolBlockVector->GetBlockCount() > 1) + ComputeDefragmentation(*m_PoolBlockVector, 0); + else if (m_PoolBlockVector->GetBlockCount() == 1) + ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + if (m_pBlockVectors[i] != VMA_NULL) + { + VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); + + if (m_pBlockVectors[i]->GetBlockCount() > 1) + { + if (ComputeDefragmentation(*m_pBlockVectors[i], i)) + break; + } + else if (m_pBlockVectors[i]->GetBlockCount() == 1) + { + if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) + break; + } + } + } + } + + moveInfo.moveCount = static_cast(m_Moves.size()); + if (moveInfo.moveCount > 0) + { + moveInfo.pMoves = m_Moves.data(); + return VK_INCOMPLETE; + } + + moveInfo.pMoves = VMA_NULL; + return VK_SUCCESS; +} + +VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) +{ + VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); + + VkResult result = VK_SUCCESS; + VmaStlAllocator blockAllocator(m_MoveAllocator.m_pCallbacks); + VmaVector> immovableBlocks(blockAllocator); + VmaVector> mappedBlocks(blockAllocator); + + VmaAllocator allocator = VMA_NULL; + for (uint32_t i = 0; i < moveInfo.moveCount; ++i) + { + VmaDefragmentationMove& move = moveInfo.pMoves[i]; + size_t prevCount = 0, currentCount = 0; + VkDeviceSize freedBlockSize = 0; + + uint32_t vectorIndex; + VmaBlockVector* vector; + if (m_PoolBlockVector != VMA_NULL) + { + vectorIndex = 0; + vector = m_PoolBlockVector; + } + else + { + vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); + vector = m_pBlockVectors[vectorIndex]; + VMA_ASSERT(vector != VMA_NULL); + } + + switch (move.operation) + { + case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: + { + uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); + if (mapCount > 0) + { + allocator = vector->m_hAllocator; + VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (FragmentedBlock& block : mappedBlocks) + { + if (block.block == newMapBlock) + { + notPresent = false; + block.data += mapCount; + break; + } + } + if (notPresent) + mappedBlocks.push_back({ mapCount, newMapBlock }); + } + + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + vector->Free(move.dstTmpAllocation); + + VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (const FragmentedBlock& block : immovableBlocks) + { + if (block.block == newBlock) + { + notPresent = false; + break; + } + } + if (notPresent) + immovableBlocks.push_back({ vectorIndex, newBlock }); + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.srcAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + freedBlockSize *= prevCount - currentCount; + + VkDeviceSize dstBlockSize; + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + default: + VMA_ASSERT(0); + } + + if (prevCount > currentCount) + { + size_t freedBlocks = prevCount - currentCount; + m_PassStats.deviceMemoryBlocksFreed += static_cast(freedBlocks); + m_PassStats.bytesFreed += freedBlockSize; + } + + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + { + if (m_AlgorithmState != VMA_NULL) + { + // Avoid unnecessary tries to allocate when new free block is avaiable + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; + if (state.firstFreeBlock != SIZE_MAX) + { + const size_t diff = prevCount - currentCount; + if (state.firstFreeBlock >= diff) + { + state.firstFreeBlock -= diff; + if (state.firstFreeBlock != 0) + state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); + } + else + state.firstFreeBlock = 0; + } + } + } + } + } + moveInfo.moveCount = 0; + moveInfo.pMoves = VMA_NULL; + m_Moves.clear(); + + // Update stats + m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; + m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; + m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; + m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; + m_PassStats = { 0 }; + + // Move blocks with immovable allocations according to algorithm + if (immovableBlocks.size() > 0) + { + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + { + if (m_AlgorithmState != VMA_NULL) + { + bool swapped = false; + // Move to the start of free blocks range + for (const FragmentedBlock& block : immovableBlocks) + { + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; + if (state.operation != StateExtensive::Operation::Cleanup) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) + { + if (vector->GetBlock(i) == block.block) + { + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); + if (state.firstFreeBlock != SIZE_MAX) + { + if (i + 1 < state.firstFreeBlock) + { + if (state.firstFreeBlock > 1) + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); + else + --state.firstFreeBlock; + } + } + swapped = true; + break; + } + } + } + } + if (swapped) + result = VK_INCOMPLETE; + break; + } + } + default: + { + // Move to the begining + for (const FragmentedBlock& block : immovableBlocks) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) + { + if (vector->GetBlock(i) == block.block) + { + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); + break; + } + } + } + break; + } + } + } + + // Bulk-map destination blocks + for (const FragmentedBlock& block : mappedBlocks) + { + VkResult res = block.block->Map(allocator, block.data, VMA_NULL); + VMA_ASSERT(res == VK_SUCCESS); + } + return result; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) +{ + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: + return ComputeDefragmentation_Fast(vector); + default: + VMA_ASSERT(0); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + return ComputeDefragmentation_Balanced(vector, index, true); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: + return ComputeDefragmentation_Full(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + return ComputeDefragmentation_Extensive(vector, index); + } +} + +VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( + VmaAllocHandle handle, VmaBlockMetadata* metadata) +{ + MoveAllocationData moveData; + moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); + moveData.size = moveData.move.srcAllocation->GetSize(); + moveData.alignment = moveData.move.srcAllocation->GetAlignment(); + moveData.type = moveData.move.srcAllocation->GetSuballocationType(); + moveData.flags = 0; + + if (moveData.move.srcAllocation->IsPersistentMap()) + moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; + if (moveData.move.srcAllocation->IsMappingAllowed()) + moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + + return moveData; +} + +VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) +{ + // Ignore allocation if will exceed max size for copy + if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) + { + if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) + return CounterStatus::Ignore; + else + return CounterStatus::End; + } + return CounterStatus::Pass; +} + +bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) +{ + m_PassStats.bytesMoved += bytes; + // Early return when max found + if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) + { + VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations || + m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!"); + return true; + } + return false; +} + +bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) +{ + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } + + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) +{ + for (; start < end; ++start) + { + VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); + if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) + { + if (vector.AllocateFromBlock(dstBlock, + data.size, + data.alignment, + data.flags, + this, + data.type, + 0, + &data.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(data.move); + if (IncrementCounters(data.size)) + return true; + break; + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) +{ + // Move only between blocks + + // Go through allocations in last blocks and try to fit them inside first ones + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0), + // but only if there are noticable gaps between them (some heuristic, ex. average size of allocation in block) + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + if (update && vectorState.avgAllocSize == UINT64_MAX) + UpdateVectorStatistics(vector, vectorState); + + const size_t startMoveCount = m_Moves.size(); + VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + VkDeviceSize prevFreeRegionSize = 0; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + // Check if realloc will make sense + if (prevFreeRegionSize >= minimalFreeRegion || + nextFreeRegionSize >= minimalFreeRegion || + moveData.size <= vectorState.avgFreeSize || + moveData.size <= vectorState.avgAllocSize) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + prevFreeRegionSize = nextFreeRegionSize; + } + } + + // No moves perfomed, update statistics to current vector state + if (startMoveCount == m_Moves.size() && !update) + { + vectorState.avgAllocSize = UINT64_MAX; + return ComputeDefragmentation_Balanced(vector, index, false); + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0) + + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) +{ + // First free single block, then populate it to the brim, then free another block, and so on + + // Fallback to previous algorithm since without granularity conflicts it can achieve max packing + if (vector.m_BufferImageGranularity == 1) + return ComputeDefragmentation_Full(vector); + + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + + bool texturePresent = false, bufferPresent = false, otherPresent = false; + switch (vectorState.operation) + { + case StateExtensive::Operation::Done: // Vector defragmented + return false; + case StateExtensive::Operation::FindFreeBlockBuffer: + case StateExtensive::Operation::FindFreeBlockTexture: + case StateExtensive::Operation::FindFreeBlockAll: + { + // No more blocks to free, just perform fast realloc and move to cleanup + if (vectorState.firstFreeBlock == 0) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + return ComputeDefragmentation_Fast(vector); + } + + // No free blocks, have to clear last one + size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; + VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; + + const size_t prevMoveCount = m_Moves.size(); + for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = freeMetadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, freeMetadata); + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, last, moveData, vector)) + { + // Full clear performed already + if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) + reinterpret_cast(m_AlgorithmState)[index] = last; + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // Cannot perform full clear, have to move data in other blocks around + if (last != 0) + { + for (size_t i = last - 1; i; --i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // No possible reallocs within blocks, try to move them around fast + return ComputeDefragmentation_Fast(vector); + } + } + else + { + switch (vectorState.operation) + { + case StateExtensive::Operation::FindFreeBlockBuffer: + vectorState.operation = StateExtensive::Operation::MoveBuffers; + break; + default: + VMA_ASSERT(0); + case StateExtensive::Operation::FindFreeBlockTexture: + vectorState.operation = StateExtensive::Operation::MoveTextures; + break; + case StateExtensive::Operation::FindFreeBlockAll: + vectorState.operation = StateExtensive::Operation::MoveAll; + break; + } + vectorState.firstFreeBlock = last; + // Nothing done, block found without reallocations, can perform another reallocs in same pass + return ComputeDefragmentation_Extensive(vector, index); + } + break; + } + case StateExtensive::Operation::MoveTextures: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (texturePresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!bufferPresent && !otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more textures to move, check buffers + vectorState.operation = StateExtensive::Operation::MoveBuffers; + bufferPresent = false; + otherPresent = false; + } + else + break; + } + case StateExtensive::Operation::MoveBuffers: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (bufferPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more buffers to move, check all others + vectorState.operation = StateExtensive::Operation::MoveAll; + otherPresent = false; + } + else + break; + } + case StateExtensive::Operation::MoveAll: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (otherPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + // Everything moved + vectorState.operation = StateExtensive::Operation::Cleanup; + } + break; + } + case StateExtensive::Operation::Cleanup: + // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). + break; + } + + if (vectorState.operation == StateExtensive::Operation::Cleanup) + { + // All other work done, pack data in blocks even tighter if possible + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + + if (prevMoveCount == m_Moves.size()) + vectorState.operation = StateExtensive::Operation::Done; + } + return false; +} + +void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) +{ + size_t allocCount = 0; + size_t freeCount = 0; + state.avgFreeSize = 0; + state.avgAllocSize = 0; + + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + allocCount += metadata->GetAllocationCount(); + freeCount += metadata->GetFreeRegionsCount(); + state.avgFreeSize += metadata->GetSumFreeSize(); + state.avgAllocSize += metadata->GetSize(); + } + + state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; + state.avgFreeSize /= freeCount; +} + +bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent) +{ + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = firstFreeBlock ; i;) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(--i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + default: + VMA_ASSERT(0); + case CounterStatus::Pass: + break; + } + + // Move only single type of resources at once + if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) + { + // Try to fit allocation into free blocks + if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) + return false; + } + + if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) + texturePresent = true; + else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) + bufferPresent = true; + else + otherPresent = true; + } + } + return prevMoveCount == m_Moves.size(); +} +#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS + +#ifndef _VMA_POOL_T_FUNCTIONS +VmaPool_T::VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) + : m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm + createInfo.priority, + VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), + createInfo.pMemoryAllocateNext), + m_Id(0), + m_Name(VMA_NULL) {} + +VmaPool_T::~VmaPool_T() +{ + VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if (pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } +} +#endif // _VMA_POOL_T_FUNCTIONS + +#ifndef _VMA_ALLOCATOR_T_FUNCTIONS +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), + m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_DeviceMemoryCount(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) +{ + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { + // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. + VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); + } + + VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); + + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { +#if !(VMA_DEDICATED_ALLOCATION) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if !(VMA_BUFFER_DEVICE_ADDRESS) + if(m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if VMA_VULKAN_VERSION < 1002000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if !(VMA_MEMORY_PRIORITY) + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif + + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); + memset(&m_MemProps, 0, sizeof(m_MemProps)); + + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); + +#if VMA_EXTERNAL_MEMORY + memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { + m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; + m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; + m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; + } + + ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); + + (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); + (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); + + VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT)); + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); + + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? + pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) + { + memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, + sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); + } +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1u << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { + m_MemProps.memoryHeaps[heapIndex].size = limit; + } + } + } + } + + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + // Create only supported types + if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) + { + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); + m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + false, // explicitBlockSize + 0, // algorithm + 0.5f, // priority (0.5 is the default per Vulkan spec) + GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment + VMA_NULL); // // pMemoryAllocateNext + // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, + // becase minBlockCount is 0. + } + } +} + +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ + VkResult res = VK_SUCCESS; + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + + return res; +} + +VmaAllocator_T::~VmaAllocator_T() +{ + VMA_ASSERT(m_Pools.IsEmpty()); + + for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) + { + vma_delete(this, m_pBlockVectors[memTypeIndex]); + } +} + +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Static(); +#endif + + if(pVulkanFunctions != VMA_NULL) + { + ImportVulkanFunctions_Custom(pVulkanFunctions); + } + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Dynamic(); +#endif + + ValidateVulkanFunctions(); +} + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Static() +{ + // Vulkan 1.0 + m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr; + m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr; + m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; + m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; + m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; + m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; + m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; + m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; + m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; + m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; + m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; + m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; + m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; + m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; + m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; + m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; + m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; + m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; + + // Vulkan 1.1 +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; + m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; + m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; + } +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; + m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; + } +#endif +} + +#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) +{ + VMA_ASSERT(pVulkanFunctions != VMA_NULL); + +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + + VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif + +#if VMA_MEMORY_BUDGET + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); +#endif + +#undef VMA_COPY_IF_NOT_NULL +} + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Dynamic() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && + "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " + "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " + "Other members can be null."); + +#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); +#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); + + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); + VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); + VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); + VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); + VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); + VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); + VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); + VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); + VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); + VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2"); + } +#endif + +#if VMA_DEDICATED_ALLOCATION + if(m_UseKhrDedicatedAllocation) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); + } +#endif + +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif // #if VMA_MEMORY_BUDGET + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); + } +#endif + +#undef VMA_FETCH_DEVICE_FUNC +#undef VMA_FETCH_INSTANCE_FUNC +} + +#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ValidateVulkanFunctions() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); + } +#endif +} + +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); +} + +VkResult VmaAllocator_T::AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); + + VmaAllocationCreateInfo finalCreateInfo = createInfo; + VkResult res = CalcMemTypeParams( + finalCreateInfo, + memTypeIndex, + size, + allocationCount); + if(res != VK_SUCCESS) + return res; + + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + return AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + } + else + { + const bool canAllocateDedicated = + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); + + if(canAllocateDedicated) + { + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + if(size > blockVector.GetPreferredBlockSize() / 2) + { + dedicatedPreferred = true; + } + // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, + // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above + // 3/4 of the maximum allocation count. + if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) + { + dedicatedPreferred = false; + } + + if(dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + } + + res = blockVector.Allocate( + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Try dedicated memory. + if(canAllocateDedicated && !dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + // Everything failed: Return error code. + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } +} + +VkResult VmaAllocator_T::AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain) +{ + VMA_ASSERT(allocationCount > 0 && pAllocations); + + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = memTypeIndex; + allocInfo.allocationSize = size; + allocInfo.pNext = pNextChain; + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; + if(!canAliasMemory) + { + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { + VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); + dedicatedAllocInfo.buffer = dedicatedBuffer; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + dedicatedAllocInfo.image = dedicatedImage; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + } + } +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + +#if VMA_BUFFER_DEVICE_ADDRESS + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_UseKhrBufferDeviceAddress) + { + bool canContainBufferWithDeviceAddress = true; + if(dedicatedBuffer != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown + (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = false; + } + if(canContainBufferWithDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(priority >= 0.f && priority <= 1.f); + priorityInfo.priority = priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // #if VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); + if(exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // #if VMA_EXTERNAL_MEMORY + + size_t allocIndex; + VkResult res = VK_SUCCESS; + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + pool, + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + isMappingAllowed, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + dedicatedAllocations.Register(pAllocations[allocIndex]); + } + VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); + m_AllocationObjectAllocator.Free(currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation) +{ + VkDeviceMemory hMemory = VK_NULL_HANDLE; + VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); + if(res < 0) + { + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + + void* pMappedData = VMA_NULL; + if(map) + { + res = (*m_VulkanFunctions.vkMapMemory)( + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { + VMA_DEBUG_LOG(" vkMapMemory FAILED"); + FreeVulkanMemory(memTypeIndex, size, hMemory); + return res; + } + } + + *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); + (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size); + if (isUserDataString) + (*pAllocation)->SetName(this, (const char*)pUserData); + else + (*pAllocation)->SetUserData(this, pUserData); + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +VkResult VmaAllocator_T::FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkFlags bufImgUsage, + uint32_t* pMemoryTypeIndex) const +{ + memoryTypeBits &= GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; + if(!FindMemoryPreferences( + IsIntegratedGpu(), + *pAllocationCreateInfo, + bufImgUsage, + requiredFlags, preferredFlags, notPreferredFlags)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + + VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + +VkResult VmaAllocator_T::CalcMemTypeParams( + VmaAllocationCreateInfo& inoutCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount) +{ + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetHeapBudgets(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::CalcAllocationParams( + VmaAllocationCreateInfo& inoutCreateInfo, + bool dedicatedRequired, + bool dedicatedPreferred) +{ + VMA_ASSERT((inoutCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && + "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); + VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || + (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && + "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) + { + VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && + "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + } + } + + // If memory is lazily allocated, it should be always dedicated. + if(dedicatedRequired || + inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if(inoutCreateInfo.pool != VK_NULL_HANDLE) + { + if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + // Non-auto USAGE values imply HOST_ACCESS flags. + // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. + // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. + // Otherwise they just protect from assert on mapping. + if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + } + } + + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); + + if(vkMemReq.size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VmaAllocationCreateInfo createInfoFinal = createInfo; + VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation); + if(res != VK_SUCCESS) + return res; + + if(createInfoFinal.pool != VK_NULL_HANDLE) + { + VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; + return AllocateMemoryOfType( + createInfoFinal.pool, + vkMemReq.size, + vkMemReq.alignment, + prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + blockVector.GetMemoryTypeIndex(), + suballocType, + createInfoFinal.pool->m_DedicatedAllocations, + blockVector, + allocationCount, + pAllocations); + } + else + { + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + if(res != VK_SUCCESS) + return res; + do + { + VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); + res = AllocateMemoryOfType( + VK_NULL_HANDLE, + vkMemReq.size, + vkMemReq.alignment, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + memTypeIndex, + suballocType, + m_DedicatedAllocations[memTypeIndex], + *blockVector, + allocationCount, + pAllocations); + // Allocation succeeded + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1u << memTypeIndex); + // Find alternative memTypeIndex. + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + } while(res == VK_SUCCESS); + + // No other matching memory type index could be found. + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } +} + +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations); + + for(size_t allocIndex = allocationCount; allocIndex--; ) + { + VmaAllocation allocation = pAllocations[allocIndex]; + + if(allocation != VK_NULL_HANDLE) + { + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); + } + + allocation->FreeName(this); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetParentPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); + } + } + } +} + +void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) +{ + // Initialize. + VmaClearDetailedStatistics(pStats->total); + for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + VmaClearDetailedStatistics(pStats->memoryType[i]); + for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + VmaClearDetailedStatistics(pStats->memoryHeap[i]); + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if (pBlockVector != VMA_NULL) + pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); + blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + } + + // Process dedicated allocations. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Sum from memory types to memory heaps. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; + VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); + } + + // Sum from memory heaps to total. + for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) + VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); + + VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || + pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); + VMA_ASSERT(pStats->total.unusedRangeCount == 0 || + pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); +} + +void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudgets->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudgets->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudgets->usage = outBudgets->statistics.blockBytes; + outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = hAllocation->GetMappedData(); + pAllocationInfo->pUserData = hAllocation->GetUserData(); + pAllocationInfo->pName = hAllocation->GetName(); +} + +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ + VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); + + VmaPoolCreateInfo newCreateInfo = *pCreateInfo; + + // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. + if(pCreateInfo->pMemoryAllocateNext) + { + VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); + } + + if(newCreateInfo.maxBlockCount == 0) + { + newCreateInfo.maxBlockCount = SIZE_MAX; + } + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + if(newCreateInfo.minAllocationAlignment > 0) + { + VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); + } + + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); + + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) + { + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; + } + + // Add to m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + (*pPool)->SetId(m_NextPoolId++); + m_Pools.PushBack(*pPool); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ + // Remove from m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + m_Pools.Remove(pool); + } + + vma_delete(this, pool); +} + +void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) +{ + VmaClearStatistics(*pPoolStats); + pool->m_BlockVector.AddStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); +} + +void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) +{ + VmaClearDetailedStatistics(*pPoolStats); + pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); +} + +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ + m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET +} + +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ + return hPool->m_BlockVector.CheckCorruption(); +} + +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ + VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if(pBlockVector != VMA_NULL) + { + VkResult localRes = pBlockVector->CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { + VkResult localRes = pool->m_BlockVector.CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + } + + return finalRes; +} + +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ + AtomicTransactionalIncrement deviceMemoryCountIncrement; + const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); +#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) + { + return VK_ERROR_TOO_MANY_OBJECTS; + } +#endif + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); + + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; + } + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + ++m_Budget.m_BlockCount[heapIndex]; + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); + + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); + } + + deviceMemoryCountIncrement.Commit(); + } + else + { + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; + } + + return res; +} + +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); + } + + // VULKAN CALL vkFreeMemory. + (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= size; + + --m_DeviceMemoryCount; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); + } + return res; + } + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; + } +} + +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VkResult res = VK_SUCCESS; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VkResult res = VK_SUCCESS; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ + VkResult res = VK_SUCCESS; + + VkMappedMemoryRange memRange = {}; + if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op) +{ + typedef VmaStlAllocator RangeAllocator; + typedef VmaSmallVector RangeVector; + RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); + + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation alloc = allocations[allocIndex]; + const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; + const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; + VkMappedMemoryRange newRange; + if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) + { + ranges.push_back(newRange); + } + } + + VkResult res = VK_SUCCESS; + if(!ranges.empty()) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) +{ + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + VmaPool parentPool = allocation->GetParentPool(); + if(parentPool == VK_NULL_HANDLE) + { + // Default pool + m_DedicatedAllocations[memTypeIndex].Unregister(allocation); + } + else + { + // Custom pool + parentPool->m_DedicatedAllocations.Unregister(allocation); + } + + VkDeviceMemory hMemory = allocation->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + m_AllocationObjectAllocator.Free(allocation); + + VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); +} + +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ + VkBufferCreateInfo dummyBufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); + + uint32_t memoryTypeBits = 0; + + // Create buffer. + VkBuffer buf = VK_NULL_HANDLE; + VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { + // Query for supported memory types. + VkMemoryRequirements memReq; + (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); + memoryTypeBits = memReq.memoryTypeBits; + + // Destroy buffer. + (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); + } + + return memoryTypeBits; +} + +uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const +{ + // Make sure memory information is already fetched. + VMA_ASSERT(GetMemoryTypeCount() > 0); + + uint32_t memoryTypeBits = UINT32_MAX; + + if(!m_UseAmdDeviceCoherentMemory) + { + // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + memoryTypeBits &= ~(1u << memTypeIndex); + } + } + } + + return memoryTypeBits; +} + +bool VmaAllocator_T::GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const +{ + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { + const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + const VkDeviceSize allocationSize = allocation->GetSize(); + VMA_ASSERT(offset <= allocationSize); + + outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + outRange.pNext = VMA_NULL; + outRange.memory = allocation->GetMemory(); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + outRange.size = allocationSize - outRange.offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + outRange.size = VMA_MIN( + VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), + allocationSize - outRange.offset); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = allocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); + outRange.offset += allocationOffset; + outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); + + break; + } + default: + VMA_ASSERT(0); + } + return true; + } + return false; +} + +#if VMA_MEMORY_BUDGET +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + VmaPnextChainPushFront(&memProps, &budgetProps); + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + + // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. + if(m_Budget.m_VulkanBudget[heapIndex] == 0) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; + } + if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) + { + m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} +#endif // VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) +{ + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + hAllocation->IsMappingAllowed() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; + VkResult res = Map(hAllocation, &pData); + if(res == VK_SUCCESS) + { + memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); + FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); + Unmap(hAllocation); + } + else + { + VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); + } + } +} + +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ + uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); + if(memoryTypeBits == UINT32_MAX) + { + memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); + m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); + } + return memoryTypeBits; +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ + json.WriteString("DefaultPools"); + json.BeginObject(); + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; + VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; + if (pBlockVector != VMA_NULL) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("PreferredBlockSize"); + json.WriteNumber(pBlockVector->GetPreferredBlockSize()); + + json.WriteString("Blocks"); + pBlockVector->PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + dedicatedAllocList.BuildStatsString(json); + } + json.EndObject(); + } + } + } + json.EndObject(); + + json.WriteString("CustomPools"); + json.BeginObject(); + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + if (!m_Pools.IsEmpty()) + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + bool displayType = true; + size_t index = 0; + for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + if (blockVector.GetMemoryTypeIndex() == memTypeIndex) + { + if (displayType) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginArray(); + displayType = false; + } + + json.BeginObject(); + { + json.WriteString("Name"); + json.BeginString(); + json.ContinueString_Size(index++); + if (pool->GetName()) + { + json.ContinueString(" - "); + json.ContinueString(pool->GetName()); + } + json.EndString(); + + json.WriteString("PreferredBlockSize"); + json.WriteNumber(blockVector.GetPreferredBlockSize()); + + json.WriteString("Blocks"); + blockVector.PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + pool->m_DedicatedAllocations.BuildStatsString(json); + } + json.EndObject(); + } + } + + if (!displayType) + json.EndArray(); + } + } + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_ALLOCATOR_T_FUNCTIONS + + +#ifndef _VMA_PUBLIC_INTERFACE +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ + VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); + VMA_DEBUG_LOG("vmaCreateAllocator"); + *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); + VkResult result = (*pAllocator)->Init(pCreateInfo); + if(result < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); + *pAllocator = VK_NULL_HANDLE; + } + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyAllocator"); + VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, allocator); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) +{ + VMA_ASSERT(allocator && pAllocatorInfo); + pAllocatorInfo->instance = allocator->m_hInstance; + pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); + pAllocatorInfo->device = allocator->m_hDevice; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceProperties); + *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); + *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ + VMA_ASSERT(allocator && pFlags); + VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); + *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->SetCurrentFrameIndex(frameIndex); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator allocator, + VmaTotalStatistics* pStats) +{ + VMA_ASSERT(allocator && pStats); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->CalculateStatistics(pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator allocator, + VmaBudget* pBudgets) +{ + VMA_ASSERT(allocator && pBudgets); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount()); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ + VMA_ASSERT(allocator && ppStatsString); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VmaStringBuilder sb(allocator->GetAllocationCallbacks()); + { + VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; + allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); + + VmaTotalStatistics stats; + allocator->CalculateStatistics(&stats); + + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); + { + json.WriteString("General"); + json.BeginObject(); + { + const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; + const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; + + json.WriteString("API"); + json.WriteString("Vulkan"); + + json.WriteString("apiVersion"); + json.BeginString(); + json.ContinueString(VK_API_VERSION_MAJOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_API_VERSION_MINOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_API_VERSION_PATCH(deviceProperties.apiVersion)); + json.EndString(); + + json.WriteString("GPU"); + json.WriteString(deviceProperties.deviceName); + json.WriteString("deviceType"); + json.WriteNumber(static_cast(deviceProperties.deviceType)); + + json.WriteString("maxMemoryAllocationCount"); + json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); + json.WriteString("bufferImageGranularity"); + json.WriteNumber(deviceProperties.limits.bufferImageGranularity); + json.WriteString("nonCoherentAtomSize"); + json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); + + json.WriteString("memoryHeapCount"); + json.WriteNumber(memoryProperties.memoryHeapCount); + json.WriteString("memoryTypeCount"); + json.WriteNumber(memoryProperties.memoryTypeCount); + } + json.EndObject(); + } + { + json.WriteString("Total"); + VmaPrintDetailedStatistics(json, stats.total); + } + { + json.WriteString("MemoryInfo"); + json.BeginObject(); + { + for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { + json.BeginString("Heap "); + json.ContinueString(heapIndex); + json.EndString(); + json.BeginObject(); + { + const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; + json.WriteString("Flags"); + json.BeginArray(true); + { + if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + #if VMA_VULKAN_VERSION >= 1001000 + if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) + json.WriteString("MULTI_INSTANCE"); + #endif + + VkMemoryHeapFlags flags = heapInfo.flags & + ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT + #endif + ); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Size"); + json.WriteNumber(heapInfo.size); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BudgetBytes"); + json.WriteNumber(budgets[heapIndex].budget); + json.WriteString("UsageBytes"); + json.WriteNumber(budgets[heapIndex].usage); + } + json.EndObject(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); + + json.WriteString("MemoryPools"); + json.BeginObject(); + { + for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("Flags"); + json.BeginArray(true); + { + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) + json.WriteString("HOST_VISIBLE"); + if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) + json.WriteString("HOST_COHERENT"); + if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) + json.WriteString("HOST_CACHED"); + if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) + json.WriteString("LAZILY_ALLOCATED"); + #if VMA_VULKAN_VERSION >= 1001000 + if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) + json.WriteString("PROTECTED"); + #endif + #if VK_AMD_device_coherent_memory + if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) + json.WriteString("DEVICE_COHERENT_AMD"); + if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) + json.WriteString("DEVICE_UNCACHED_AMD"); + #endif + + flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT + #endif + #if VK_AMD_device_coherent_memory + | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY + | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY + #endif + | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT + | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT + | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); + } + json.EndObject(); + } + } + + } + json.EndObject(); + } + json.EndObject(); + } + } + json.EndObject(); + } + + if (detailedMap == VK_TRUE) + allocator->PrintDetailedMap(json); + + json.EndObject(); + } + + *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(allocator); + VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); + } +} + +#endif // VMA_STATS_STRING_ENABLED + +/* +This function is not protected by any mutex because it just reads immutable data. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pBufferCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res; + +#if VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceBufferMemoryRequirements) + { + // Can query straight from VkBufferCreateInfo :) + VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS}; + devBufMemReq.pCreateInfo = pBufferCreateInfo; + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy buffer to query :( + VkBuffer hBuffer = VK_NULL_HANDLE; + res = funcs->vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); + + funcs->vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pImageCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res; + +#if VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceImageMemoryRequirements) + { + // Can query straight from VkImageCreateInfo :) + VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS}; + devImgMemReq.pCreateInfo = pImageCreateInfo; + VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && + "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy image to query :( + VkImage hImage = VK_NULL_HANDLE; + res = funcs->vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); + + funcs->vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ + VMA_ASSERT(allocator && pCreateInfo && pPool); + + VMA_DEBUG_LOG("vmaCreatePool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CreatePool(pCreateInfo, pPool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ + VMA_ASSERT(allocator); + + if(pool == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyPool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->DestroyPool(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaDetailedStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->CalculatePoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VMA_DEBUG_LOG("vmaCheckPoolCorruption"); + + return allocator->CheckPoolCorruption(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool && ppName); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + image, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaFreeMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory(allocationCount, pAllocations); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetAllocationInfo(allocation, pAllocationInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocation->SetUserData(allocator, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName) +{ + allocation->SetName(allocator, pName); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) +{ + VMA_ASSERT(allocator && allocation && pFlags); + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ + VMA_ASSERT(allocator && allocation && ppData); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->Map(allocation, ppData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->Unmap(allocation); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaFlushAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaInvalidateAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaFlushAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaInvalidateAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator allocator, + uint32_t memoryTypeBits) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaCheckCorruption"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CheckCorruption(memoryTypeBits); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator allocator, + const VmaDefragmentationInfo* pInfo, + VmaDefragmentationContext* pContext) +{ + VMA_ASSERT(allocator && pInfo && pContext); + + VMA_DEBUG_LOG("vmaBeginDefragmentation"); + + if (pInfo->pool != VMA_NULL) + { + // Check if run on supported algorithms + if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); + return VK_SUCCESS; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationStats* pStats) +{ + VMA_ASSERT(allocator && context); + + VMA_DEBUG_LOG("vmaEndDefragmentation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if (pStats) + context->GetStats(*pStats); + vma_delete(allocator, context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassBegin(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassEnd(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + pBufferCreateInfo->usage, // dedicatedBufferImageUsage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateBufferWithAlignment"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 2a. Include minAlignment + vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + pBufferCreateInfo->usage, // dedicatedBufferImageUsage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); + + VMA_DEBUG_LOG("vmaCreateAliasingBuffer"); + + *pBuffer = VK_NULL_HANDLE; + + if (pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if (res >= 0) + { + // 2. Bind buffer with memory. + res = allocator->BindBufferMemory(allocation, 0, *pBuffer, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(buffer != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); + } + + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + + if(pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if(res >= 0) + { + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + + // 2. Allocate memory using allocator. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(*pImage, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + *pImage, // dedicatedImage + pImageCreateInfo->usage, // dedicatedBufferImageUsage + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind image with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); + + *pImage = VK_NULL_HANDLE; + + VMA_DEBUG_LOG("vmaCreateImage"); + + if (pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if (res >= 0) + { + // 2. Bind image with memory. + res = allocator->BindImageMemory(allocation, 0, *pImage, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation) +{ + VMA_ASSERT(allocator); + + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(image != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); + } + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) +{ + VMA_ASSERT(pCreateInfo && pVirtualBlock); + VMA_ASSERT(pCreateInfo->size > 0); + VMA_DEBUG_LOG("vmaCreateVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); + VkResult res = (*pVirtualBlock)->Init(); + if(res < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); + *pVirtualBlock = VK_NULL_HANDLE; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) +{ + if(virtualBlock != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, virtualBlock); + } +} + +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); + VMA_DEBUG_LOG("vmaVirtualAllocate"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) +{ + if(allocation != VK_NULL_HANDLE) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaVirtualFree"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Free(allocation); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaClearVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Clear(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->SetAllocationUserData(allocation, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetStatistics(*pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->CalculateDetailedStatistics(*pStats); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); + VmaStringBuilder sb(allocationCallbacks); + virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); + *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString); + } +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_PUBLIC_INTERFACE +#endif // VMA_IMPLEMENTATION + +/** +\page quick_start Quick start + +\section quick_start_project_setup Project setup + +Vulkan Memory Allocator comes in form of a "stb-style" single header file. +You don't need to build it as a separate library project. +You can add this file directly to your project and submit it to code repository next to your other source files. + +"Single header" doesn't mean that everything is contained in C/C++ declarations, +like it tends to be in case of inline functions or C++ templates. +It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. +If you don't do it properly, you will get linker errors. + +To do it properly: + +-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. + This includes declarations of all members of the library. +-# In exactly one CPP file define following macro before this include. + It enables also internal definitions. + +\code +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" +\endcode + +It may be a good idea to create dedicated CPP file just for this purpose. + +This library includes header ``, which in turn +includes `` on Windows. If you need some specific macros defined +before including these headers (like `WIN32_LEAN_AND_MEAN` or +`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define +them before every `#include` of this library. + +This library is written in C++, but has C-compatible interface. +Thus you can include and use vk_mem_alloc.h in C or C++ code, but full +implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. +Some features of C++14 used. STL containers, RTTI, or C++ exceptions are not used. + + +\section quick_start_initialization Initialization + +At program startup: + +-# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object. +-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by + calling vmaCreateAllocator(). + +Only members `physicalDevice`, `device`, `instance` are required. +However, you should inform the library which Vulkan version do you use by setting +VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable +by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address). +Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. + +You may need to configure importing Vulkan functions. There are 3 ways to do this: + +-# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows): + - You don't need to do anything. + - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default. +-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`, + `vkGetDeviceProcAddr` (this is the option presented in the example below): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1. + - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr, + VmaVulkanFunctions::vkGetDeviceProcAddr. + - The library will fetch pointers to all other functions it needs internally. +-# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like + [Volk](https://github.com/zeux/volk): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. + - Pass these pointers via structure #VmaVulkanFunctions. + +\code +VmaVulkanFunctions vulkanFunctions = {}; +vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr; +vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr; + +VmaAllocatorCreateInfo allocatorCreateInfo = {}; +allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; +allocatorCreateInfo.physicalDevice = physicalDevice; +allocatorCreateInfo.device = device; +allocatorCreateInfo.instance = instance; +allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; + +VmaAllocator allocator; +vmaCreateAllocator(&allocatorCreateInfo, &allocator); +\endcode + + +\section quick_start_resource_allocation Resource allocation + +When you want to create a buffer or image: + +-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. +-# Fill VmaAllocationCreateInfo structure. +-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory + already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +Don't forget to destroy your objects when no longer needed: + +\code +vmaDestroyBuffer(allocator, buffer, allocation); +vmaDestroyAllocator(allocator); +\endcode + + +\page choosing_memory_type Choosing memory type + +Physical devices in Vulkan support various combinations of memory heaps and +types. Help with choosing correct and optimal memory type for your specific +resource is one of the key features of this library. You can use it by filling +appropriate members of VmaAllocationCreateInfo structure, as described below. +You can also combine multiple methods. + +-# If you just want to find memory type index that meets your requirements, you + can use function: vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). +-# If you want to allocate a region of device memory without association with any + specific image or buffer, you can use function vmaAllocateMemory(). Usage of + this function is not recommended and usually not needed. + vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, + which may be useful for sparse binding. +-# If you already have a buffer or an image created, you want to allocate memory + for it and then you will bind it yourself, you can use function + vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). +-# **This is the easiest and recommended way to use this library:** + If you want to create a buffer or an image, allocate memory for it and bind + them together, all in one call, you can use function vmaCreateBuffer(), + vmaCreateImage(). + +When using 3. or 4., the library internally queries Vulkan for memory types +supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) +and uses only one of these types. + +If no memory type can be found that meets all the requirements, these functions +return `VK_ERROR_FEATURE_NOT_PRESENT`. + +You can leave VmaAllocationCreateInfo structure completely filled with zeros. +It means no requirements are specified for memory type. +It is valid, although not very useful. + +\section choosing_memory_type_usage Usage + +The easiest way to specify memory requirements is to fill member +VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. +It defines high level, common usage types. +Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. + +For example, if you want to create a uniform buffer that will be filled using +transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can +do it using following code. The buffer will most likely end up in a memory type with +`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory +on systems with discrete graphics card that have the memories separate, you can use +#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. + +When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, +you also need to specify one of the host access flags: +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +so you can map it. + +For example, a staging buffer that will be filled via mapped pointer and then +used as a source of transfer to the buffer decribed previously can be created like this. +It will likely and up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` +but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). + +\code +VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +stagingBufferInfo.size = 65536; +stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo stagingAllocInfo = {}; +stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; +stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer stagingBuffer; +VmaAllocation stagingAllocation; +vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); +\endcode + +For more examples of creating different kinds of resources, see chapter \ref usage_patterns. + +Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows +about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, +so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. +If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting +memory type, as decribed below. + +\note +Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, +`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) +are still available and work same way as in previous versions of the library +for backward compatibility, but they are not recommended. + +\section choosing_memory_type_required_preferred_flags Required and preferred flags + +You can specify more detailed requirements by filling members +VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags +with a combination of bits from enum `VkMemoryPropertyFlags`. For example, +if you want to create a buffer that will be persistently mapped on host (so it +must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, +use following code: + +\code +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; +allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +A memory type is chosen that has all the required flags and as many preferred +flags set as possible. + +Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, +plus some extra "magic" (heuristics). + +\section choosing_memory_type_explicit_memory_types Explicit memory types + +If you inspected memory types available on the physical device and you have +a preference for memory types that you want to use, you can fill member +VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set +means that a memory type with that index is allowed to be used for the +allocation. Special value 0, just like `UINT32_MAX`, means there are no +restrictions to memory type index. + +Please note that this member is NOT just a memory type index. +Still you can use it to choose just one, specific memory type. +For example, if you already determined that your buffer should be created in +memory type 2, use following code: + +\code +uint32_t memoryTypeIndex = 2; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.memoryTypeBits = 1u << memoryTypeIndex; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + + +\section choosing_memory_type_custom_memory_pools Custom memory pools + +If you allocate from custom memory pool, all the ways of specifying memory +requirements described above are not applicable and the aforementioned members +of VmaAllocationCreateInfo structure are ignored. Memory type is selected +explicitly when creating the pool and then used to make all the allocations from +that pool. For further details, see \ref custom_memory_pools. + +\section choosing_memory_type_dedicated_allocations Dedicated allocations + +Memory for allocations is reserved out of larger block of `VkDeviceMemory` +allocated from Vulkan internally. That is the main feature of this whole library. +You can still request a separate memory block to be created for an allocation, +just like you would do in a trivial solution without using any allocator. +In that case, a buffer or image is always bound to that memory at offset 0. +This is called a "dedicated allocation". +You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +The library can also internally decide to use dedicated allocation in some cases, e.g.: + +- When the size of the allocation is large. +- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled + and it reports that dedicated allocation is required or recommended for the resource. +- When allocation of next big memory block fails due to not enough device memory, + but allocation with the exact requested size succeeds. + + +\page memory_mapping Memory mapping + +To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, +to be able to read from it or write to it in CPU code. +Mapping is possible only of memory allocated from a memory type that has +`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. +You can use them directly with memory allocated by this library, +but it is not recommended because of following issue: +Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. +This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. +Because of this, Vulkan Memory Allocator provides following facilities: + +\note If you want to be able to map an allocation, you need to specify one of the flags +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable +when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. +For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, +but they can still be used for consistency. + +\section memory_mapping_mapping_functions Mapping functions + +The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory(). +They are safer and more convenient to use than standard Vulkan functions. +You can map an allocation multiple times simultaneously - mapping is reference-counted internally. +You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. +The way it is implemented is that the library always maps entire memory block, not just region of the allocation. +For further details, see description of vmaMapMemory() function. +Example: + +\code +// Having these objects initialized: +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData = ... + +VmaAllocator allocator = ... +VkBuffer constantBuffer = ... +VmaAllocation constantBufferAllocation = ... + +// You can map and fill your buffer using following code: + +void* mappedData; +vmaMapMemory(allocator, constantBufferAllocation, &mappedData); +memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); +vmaUnmapMemory(allocator, constantBufferAllocation); +\endcode + +When mapping, you may see a warning from Vulkan validation layer similar to this one: + +Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. + +It happens because the library maps entire `VkDeviceMemory` block, where different +types of images and buffers may end up together, especially on GPUs with unified memory like Intel. +You can safely ignore it if you are sure you access only memory of the intended +object that you wanted to map. + + +\section memory_mapping_persistently_mapped_memory Persistently mapped memory + +Kepping your memory persistently mapped is generally OK in Vulkan. +You don't need to unmap it before using its data on the GPU. +The library provides a special feature designed for that: +Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in +VmaAllocationCreateInfo::flags stay mapped all the time, +so you can just access CPU pointer to it any time +without a need to call any "map" or "unmap" function. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +// Buffer is already mapped. You can access its memory. +memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +\endcode + +\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up +in a mappable memory type. +For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. +For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. + +\section memory_mapping_cache_control Cache flush and invalidate + +Memory in Vulkan doesn't need to be unmapped before using it on GPU, +but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. +Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, +`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient +functions that refer to given allocation object: vmaFlushAllocation(), +vmaInvalidateAllocation(), +or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). + +Regions of memory specified for flush/invalidate must be aligned to +`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. +In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations +within blocks are aligned to this value, so their offsets are always multiply of +`nonCoherentAtomSize` and two different allocations never share same "line" of this size. + +Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) +currently provide `HOST_COHERENT` flag on all memory types that are +`HOST_VISIBLE`, so on PC you may not need to bother. + + +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it is physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetHeapBudgets(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every +allocation, while vmaCalculateStatistics() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use VK_EXT_memory_budget device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +VMA then tries to make the allocation from the next eligible Vulkan memory type. +The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +On AMD graphics cards there is a custom vendor extension available: VK_AMD_memory_overallocation_behavior +that allows to control the behavior of the Vulkan implementation in out-of-memory cases - +whether it should fail with an error code or still allow the allocation. +Usage of this extension involves only passing extra structure on Vulkan device creation, +so it is out of scope of this library. + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will currently try to allocate memory blocks without checking whether they +fit within budget. + + +\page resource_aliasing Resource aliasing (overlap) + +New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory +management, give an opportunity to alias (overlap) multiple resources in the +same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). +It can be useful to save video memory, but it must be used with caution. + +For example, if you know the flow of your whole render frame in advance, you +are going to use some intermediate textures or buffers only during a small range of render passes, +and you know these ranges don't overlap in time, you can bind these resources to +the same place in memory, even if they have completely different parameters (width, height, format etc.). + +![Resource aliasing (overlap)](../gfx/Aliasing.png) + +Such scenario is possible using VMA, but you need to create your images manually. +Then you need to calculate parameters of an allocation to be made using formula: + +- allocation size = max(size of each image) +- allocation alignment = max(alignment of each image) +- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) + +Following example shows two different images bound to the same place in memory, +allocated to fit largest of them. + +\code +// A 512x512 texture to be sampled. +VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img1CreateInfo.extent.width = 512; +img1CreateInfo.extent.height = 512; +img1CreateInfo.extent.depth = 1; +img1CreateInfo.mipLevels = 10; +img1CreateInfo.arrayLayers = 1; +img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; +img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; +img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +// A full screen texture to be used as color attachment. +VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img2CreateInfo.extent.width = 1920; +img2CreateInfo.extent.height = 1080; +img2CreateInfo.extent.depth = 1; +img2CreateInfo.mipLevels = 1; +img2CreateInfo.arrayLayers = 1; +img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VkImage img1; +res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); +VkImage img2; +res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); + +VkMemoryRequirements img1MemReq; +vkGetImageMemoryRequirements(device, img1, &img1MemReq); +VkMemoryRequirements img2MemReq; +vkGetImageMemoryRequirements(device, img2, &img2MemReq); + +VkMemoryRequirements finalMemReq = {}; +finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); +finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); +finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; +// Validate if(finalMemReq.memoryTypeBits != 0) + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + +VmaAllocation alloc; +res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); + +res = vmaBindImageMemory(allocator, alloc, img1); +res = vmaBindImageMemory(allocator, alloc, img2); + +// You can use img1, img2 here, but not at the same time! + +vmaFreeMemory(allocator, alloc); +vkDestroyImage(allocator, img2, nullptr); +vkDestroyImage(allocator, img1, nullptr); +\endcode + +Remember that using resources that alias in memory requires proper synchronization. +You need to issue a memory barrier to make sure commands that use `img1` and `img2` +don't overlap on GPU timeline. +You also need to treat a resource after aliasing as uninitialized - containing garbage data. +For example, if you use `img1` and then want to use `img2`, you need to issue +an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. + +Additional considerations: + +- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. +See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. +- You can create more complex layout where different images and buffers are bound +at different offsets inside one large allocation. For example, one can imagine +a big texture used in some render passes, aliasing with a set of many small buffers +used between in some further passes. To bind a resource at non-zero offset in an allocation, +use vmaBindBufferMemory2() / vmaBindImageMemory2(). +- Before allocating memory for the resources you want to alias, check `memoryTypeBits` +returned in memory requirements of each resource to make sure the bits overlap. +Some GPUs may expose multiple memory types suitable e.g. only for buffers or +images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your +resources may be disjoint. Aliasing them is not possible in that case. + + +\page custom_memory_pools Custom memory pools + +A memory pool contains a number of `VkDeviceMemory` blocks. +The library automatically creates and manages default pool for each memory type available on the device. +Default memory pool automatically grows in size. +Size of allocated blocks is also variable and managed automatically. + +You can create custom pool and allocate memory out of it. +It can be useful if you want to: + +- Keep certain kind of allocations separate from others. +- Enforce particular, fixed size of Vulkan memory blocks. +- Limit maximum amount of Vulkan memory allocated for that pool. +- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. +- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in + #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. +- Perform defragmentation on a specific subset of your allocations. + +To use custom memory pools: + +-# Fill VmaPoolCreateInfo structure. +-# Call vmaCreatePool() to obtain #VmaPool handle. +-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. + You don't need to specify any other parameters of this structure, like `usage`. + +Example: + +\code +// Find memoryTypeIndex for the pool. +VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +sampleBufCreateInfo.size = 0x10000; // Doesn't matter. +sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo sampleAllocCreateInfo = {}; +sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, + &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); +// Check res... + +// Create a pool that can have at most 2 blocks, 128 MiB each. +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +poolCreateInfo.blockSize = 128ull * 1024 * 1024; +poolCreateInfo.maxBlockCount = 2; + +VmaPool pool; +res = vmaCreatePool(allocator, &poolCreateInfo, &pool); +// Check res... + +// Allocate a buffer out of it. +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 1024; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; + +VkBuffer buf; +VmaAllocation alloc; +res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); +// Check res... +\endcode + +You have to free all allocations made from this pool before destroying it. + +\code +vmaDestroyBuffer(allocator, buf, alloc); +vmaDestroyPool(allocator, pool); +\endcode + +New versions of this library support creating dedicated allocations in custom pools. +It is supported only when VmaPoolCreateInfo::blockSize = 0. +To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and +VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note Excessive use of custom pools is a common mistake when using this library. +Custom pools may be useful for special purposes - when you want to +keep certain type of resources separate e.g. to reserve minimum amount of memory +for them or limit maximum amount of memory they can occupy. For most +resources this is not needed and so it is not recommended to create #VmaPool +objects and allocations out of them. Allocating from the default pool is sufficient. + + +\section custom_memory_pools_MemTypeIndex Choosing memory type index + +When creating a pool, you must explicitly specify memory type index. +To find the one suitable for your buffers or images, you can use helper functions +vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). +You need to provide structures with example parameters of buffers or images +that you are going to create in that pool. + +\code +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 1024; // Doesn't matter +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); + +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +// ... +\endcode + +When creating buffers/images allocated in that pool, provide following parameters: + +- `VkBufferCreateInfo`: Prefer to pass same parameters as above. + Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. + Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers + or the other way around. +- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. + Other members are ignored anyway. + +\section linear_algorithm Linear allocation algorithm + +Each Vulkan memory block managed by this library has accompanying metadata that +keeps track of used and unused regions. By default, the metadata structure and +algorithm tries to find best place for new allocations among free regions to +optimize memory usage. This way you can allocate and free objects in any order. + +![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) + +Sometimes there is a need to use simpler, linear allocation algorithm. You can +create custom pool that uses such algorithm by adding flag +#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. Then an alternative metadata management is used. It always +creates new allocations after last one and doesn't reuse free regions after +allocations freed in the middle. It results in better allocation performance and +less memory consumed by metadata. + +![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) + +With this one flag, you can create a custom pool that can be used in many ways: +free-at-once, stack, double stack, and ring buffer. See below for details. +You don't need to specify explicitly which of these options you are going to use - it is detected automatically. + +\subsection linear_algorithm_free_at_once Free-at-once + +In a pool that uses linear algorithm, you still need to free all the allocations +individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free +them in any order. New allocations are always made after last one - free space +in the middle is not reused. However, when you release all the allocation and +the pool becomes empty, allocation starts from the beginning again. This way you +can use linear algorithm to speed up creation of allocations that you are going +to release all at once. + +![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_stack Stack + +When you free an allocation that was created last, its space can be reused. +Thanks to this, if you always release allocations in the order opposite to their +creation (LIFO - Last In First Out), you can achieve behavior of a stack. + +![Stack](../gfx/Linear_allocator_4_stack.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_double_stack Double stack + +The space reserved by a custom pool with linear algorithm may be used by two +stacks: + +- First, default one, growing up from offset 0. +- Second, "upper" one, growing down from the end towards lower offsets. + +To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT +to VmaAllocationCreateInfo::flags. + +![Double stack](../gfx/Linear_allocator_7_double_stack.png) + +Double stack is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +When the two stacks' ends meet so there is not enough space between them for a +new allocation, such allocation fails with usual +`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + +\subsection linear_algorithm_ring_buffer Ring buffer + +When you free some allocations from the beginning and there is not enough free space +for a new one at the end of a pool, allocator's "cursor" wraps around to the +beginning and starts allocation there. Thanks to this, if you always release +allocations in the same order as you created them (FIFO - First In First Out), +you can achieve behavior of a ring buffer / queue. + +![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) + +Ring buffer is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page defragmentation Defragmentation + +Interleaved allocations and deallocations of many objects of varying size can +cause fragmentation over time, which can lead to a situation where the library is unable +to find a continuous range of free memory for a new allocation despite there is +enough free space, just scattered across many small free ranges between existing +allocations. + +To mitigate this problem, you can use defragmentation feature. +It doesn't happen automatically though and needs your cooperation, +because VMA is a low level library that only allocates memory. +It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. +It cannot copy their contents as it doesn't record any commands to a command buffer. + +Example: + +\code +VmaDefragmentationInfo defragInfo = {}; +defragInfo.pool = myPool; +defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; + +VmaDefragmentationContext defragCtx; +VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); +// Check res... + +for(;;) +{ + VmaDefragmentationPassMoveInfo pass; + res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... + + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, pMoves[i].srcAllocation, &allocInfo); + MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; + + // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. + VkImageCreateInfo imgCreateInfo = ... + VkImage newImg; + res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); + // Check res... + res = vmaBindImageMemory(allocator, pMoves[i].dstTmpAllocation, newImg); + // Check res... + + // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. + vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); + } + + // Make sure the copy commands finished executing. + vkWaitForFences(...); + + // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // ... + vkDestroyImage(device, resData->img, nullptr); + } + + // Update appropriate descriptors to point to the new places... + + res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... +} + +vmaEndDefragmentation(allocator, defragCtx, nullptr); +\endcode + +Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() +create/destroy an allocation and a buffer/image at once, these are just a shortcut for +creating the resource, allocating memory, and binding them together. +Defragmentation works on memory allocations only. You must handle the rest manually. +Defragmentation is an iterative process that should repreat "passes" as long as related functions +return `VK_INCOMPLETE` not `VK_SUCCESS`. +In each pass: + +1. vmaBeginDefragmentationPass() function call: + - Calculates and returns the list of allocations to be moved in this pass. + Note this can be a time-consuming process. + - Reserves destination memory for them by creating temporary destination allocations + that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). +2. Inside the pass, **you should**: + - Inspect the returned list of allocations to be moved. + - Create new buffers/images and bind them at the returned destination temporary allocations. + - Copy data from source to destination resources if necessary. + - Destroy the source buffers/images, but NOT their allocations. +3. vmaEndDefragmentationPass() function call: + - Frees the source memory reserved for the allocations that are moved. + - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. + - Frees `VkDeviceMemory` blocks that became empty. + +Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. +Defragmentation algorithm tries to move all suitable allocations. +You can, however, refuse to move some of them inside a defragmentation pass, by setting +`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. +This is not recommended and may result in suboptimal packing of the allocations after defragmentation. +If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. + +Inside a pass, for each allocation that should be moved: + +- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. + - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). +- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, + filled, and used temporarily in each rendering frame, you can just recreate this image + without copying its data. +- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU + using `memcpy()`. +- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + This will cancel the move. + - vmaEndDefragmentationPass() will then free the destination memory + not the source memory of the allocation, leaving it unchanged. +- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), + you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. + +You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool +(like in the example above) or all the default pools by setting this member to null. + +Defragmentation is always performed in each pool separately. +Allocations are never moved between different Vulkan memory types. +The size of the destination memory reserved for a moved allocation is the same as the original one. +Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. +Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. + +You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved +in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. +See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. + +It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA +usage, possibly from multiple threads, with the exception that allocations +returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. + +Mapping is preserved on allocations that are moved during defragmentation. +Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations +are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried +using VmaAllocationInfo::pMappedData. + +\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page statistics Statistics + +This library contains several functions that return information about its internal state, +especially the amount of memory allocated from Vulkan. + +\section statistics_numeric_statistics Numeric statistics + +If you need to obtain basic statistics about memory usage per heap, together with current budget, +you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. +This is useful to keep track of memory usage and stay withing budget +(see also \ref staying_within_budget). +Example: + +\code +uint32_t heapIndex = ... + +VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; +vmaGetHeapBudgets(allocator, budgets); + +printf("My heap currently has %u allocations taking %llu B,\n", + budgets[heapIndex].statistics.allocationCount, + budgets[heapIndex].statistics.allocationBytes); +printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", + budgets[heapIndex].statistics.blockCount, + budgets[heapIndex].statistics.blockBytes); +printf("Vulkan reports total usage %llu B with budget %llu B.\n", + budgets[heapIndex].usage, + budgets[heapIndex].budget); +\endcode + +You can query for more detailed statistics per memory heap, type, and totals, +including minimum and maximum allocation size and unused range size, +by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. +This function is slower though, as it has to traverse all the internal data structures, +so it should be used only for debugging purposes. + +You can query for statistics of a custom pool using function vmaGetPoolStatistics() +or vmaCalculatePoolStatistics(). + +You can query for information about a specific allocation using function vmaGetAllocationInfo(). +It fill structure #VmaAllocationInfo. + +\section statistics_json_dump JSON dump + +You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). +The result is guaranteed to be correct JSON. +It uses ANSI encoding. +Any strings provided by user (see [Allocation names](@ref allocation_names)) +are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, +this JSON string can be treated as using this encoding. +It must be freed using function vmaFreeStatsString(). + +The format of this JSON string is not part of official documentation of the library, +but it will not change in backward-incompatible way without increasing library major version number +and appropriate mention in changelog. + +The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). +It can also contain detailed map of allocated memory blocks and their regions - +free and occupied by allocations. +This allows e.g. to visualize the memory or assess fragmentation. + + +\page allocation_annotation Allocation names and user data + +\section allocation_user_data Allocation user data + +You can annotate allocations with your own information, e.g. for debugging purposes. +To do that, fill VmaAllocationCreateInfo::pUserData field when creating +an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, +some handle, index, key, ordinal number or any other value that would associate +the allocation with your custom metadata. +It it useful to identify appropriate data structures in your engine given #VmaAllocation, +e.g. when doing \ref defragmentation. + +\code +VkBufferCreateInfo bufCreateInfo = ... + +MyBufferMetadata* pMetadata = CreateBufferMetadata(); + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.pUserData = pMetadata; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +\endcode + +The pointer may be later retrieved as VmaAllocationInfo::pUserData: + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; +\endcode + +It can also be changed using function vmaSetAllocationUserData(). + +Values of (non-zero) allocations' `pUserData` are printed in JSON report created by +vmaBuildStatsString() in hexadecimal form. + +\section allocation_names Allocation names + +An allocation can also carry a null-terminated string, giving a name to the allocation. +To set it, call vmaSetAllocationName(). +The library creates internal copy of the string, so the pointer you pass doesn't need +to be valid for whole lifetime of the allocation. You can free it after the call. + +\code +std::string imageName = "Texture: "; +imageName += fileName; +vmaSetAllocationName(allocator, allocation, imageName.c_str()); +\endcode + +The string can be later retrieved by inspecting VmaAllocationInfo::pName. +It is also printed in JSON report created by vmaBuildStatsString(). + +\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + + +\page virtual_allocator Virtual allocator + +As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator". +It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block". +You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan. +A common use case is sub-allocation of pieces of one large GPU buffer. + +\section virtual_allocator_creating_virtual_block Creating virtual block + +To use this functionality, there is no main "allocator" object. +You don't need to have #VmaAllocator object created. +All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator: + +-# Fill in #VmaVirtualBlockCreateInfo structure. +-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object. + +Example: + +\code +VmaVirtualBlockCreateInfo blockCreateInfo = {}; +blockCreateInfo.size = 1048576; // 1 MB + +VmaVirtualBlock block; +VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); +\endcode + +\section virtual_allocator_making_virtual_allocations Making virtual allocations + +#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions +using the same code as the main Vulkan memory allocator. +Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type +that represents an opaque handle to an allocation withing the virtual block. + +In order to make such allocation: + +-# Fill in #VmaVirtualAllocationCreateInfo structure. +-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. + You can also receive `VkDeviceSize offset` that was assigned to the allocation. + +Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB + +VmaVirtualAllocation alloc; +VkDeviceSize offset; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); +if(res == VK_SUCCESS) +{ + // Use the 4 KB of your memory starting at offset. +} +else +{ + // Allocation failed - no space for it could be found. Handle this error! +} +\endcode + +\section virtual_allocator_deallocation Deallocation + +When no longer needed, an allocation can be freed by calling vmaVirtualFree(). +You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() +called for the same #VmaVirtualBlock. + +When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). +All allocations must be freed before the block is destroyed, which is checked internally by an assert. +However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once - +a feature not available in normal Vulkan memory allocator. Example: + +\code +vmaVirtualFree(block, alloc); +vmaDestroyVirtualBlock(block); +\endcode + +\section virtual_allocator_allocation_parameters Allocation parameters + +You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData(). +Its default value is null. +It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some +larger data structure containing more information. Example: + +\code +struct CustomAllocData +{ + std::string m_AllocName; +}; +CustomAllocData* allocData = new CustomAllocData(); +allocData->m_AllocName = "My allocation 1"; +vmaSetVirtualAllocationUserData(block, alloc, allocData); +\endcode + +The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function +vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. +If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! +Example: + +\code +VmaVirtualAllocationInfo allocInfo; +vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); +delete (CustomAllocData*)allocInfo.pUserData; + +vmaVirtualFree(block, alloc); +\endcode + +\section virtual_allocator_alignment_and_units Alignment and units + +It feels natural to express sizes and offsets in bytes. +If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member +VmaVirtualAllocationCreateInfo::alignment to request it. Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB +allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B + +VmaVirtualAllocation alloc; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); +\endcode + +Alignments of different allocations made from one block may vary. +However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`, +you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes. +It might be more convenient, but you need to make sure to use this new unit consistently in all the places: + +- VmaVirtualBlockCreateInfo::size +- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment +- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset + +\section virtual_allocator_statistics Statistics + +You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() +(to get brief statistics that are fast to calculate) +or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). +The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. +Example: + +\code +VmaStatistics stats; +vmaGetVirtualBlockStatistics(block, &stats); +printf("My virtual block has %llu bytes used by %u virtual allocations\n", + stats.allocationBytes, stats.allocationCount); +\endcode + +You can also request a full list of allocations and free regions as a string in JSON format by calling +vmaBuildVirtualBlockStatsString(). +Returned string must be later freed using vmaFreeVirtualBlockStatsString(). +The format of this string differs from the one returned by the main Vulkan allocator, but it is similar. + +\section virtual_allocator_additional_considerations Additional considerations + +The "virtual allocator" functionality is implemented on a level of individual memory blocks. +Keeping track of a whole collection of blocks, allocating new ones when out of free space, +deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user. + +Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory. +See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT). +You can find their description in chapter \ref custom_memory_pools. +Allocation strategies are also supported. +See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT). + +Following features are supported only by the allocator of the real GPU memory and not by virtual allocations: +buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`. + + +\page debugging_memory_usage Debugging incorrect memory usage + +If you suspect a bug with memory usage, like usage of uninitialized memory or +memory being overwritten out of bounds of an allocation, +you can use debug features of this library to verify this. + +\section debugging_memory_usage_initialization Memory initialization + +If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, +you can enable automatic memory initialization to verify this. +To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. + +\code +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#include "vk_mem_alloc.h" +\endcode + +It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. +Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. +Memory is automatically mapped and unmapped if necessary. + +If you find these values while debugging your program, good chances are that you incorrectly +read Vulkan memory that is allocated but not initialized, or already freed, respectively. + +Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. +It works also with dedicated allocations. + +\section debugging_memory_usage_margins Margins + +By default, allocations are laid out in memory blocks next to each other if possible +(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). + +![Allocations without margin](../gfx/Margins_1.png) + +Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified +number of bytes as a margin after every allocation. + +\code +#define VMA_DEBUG_MARGIN 16 +#include "vk_mem_alloc.h" +\endcode + +![Allocations with margin](../gfx/Margins_2.png) + +If your bug goes away after enabling margins, it means it may be caused by memory +being overwritten outside of allocation boundaries. It is not 100% certain though. +Change in application behavior may also be caused by different order and distribution +of allocations across memory blocks after margins are applied. + +Margins work with all types of memory. + +Margin is applied only to allocations made out of memory blocks and not to dedicated +allocations, which have their own memory block of specific size. +It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag +or those automatically decided to put into dedicated allocations, e.g. due to its +large size or recommended by VK_KHR_dedicated_allocation extension. + +Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. + +Note that enabling margins increases memory usage and fragmentation. + +Margins do not apply to \ref virtual_allocator. + +\section debugging_memory_usage_corruption_detection Corruption detection + +You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation +of contents of the margins. + +\code +#define VMA_DEBUG_MARGIN 16 +#define VMA_DEBUG_DETECT_CORRUPTION 1 +#include "vk_mem_alloc.h" +\endcode + +When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` +(it must be multiply of 4) after every allocation is filled with a magic number. +This idea is also know as "canary". +Memory is automatically mapped and unmapped if necessary. + +This number is validated automatically when the allocation is destroyed. +If it is not equal to the expected value, `VMA_ASSERT()` is executed. +It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, +which indicates a serious bug. + +You can also explicitly request checking margins of all allocations in all memory blocks +that belong to specified memory types by using function vmaCheckCorruption(), +or in memory blocks that belong to specified custom pool, by using function +vmaCheckPoolCorruption(). + +Margin validation (corruption detection) works only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. + + +\page opengl_interop OpenGL Interop + +VMA provides some features that help with interoperability with OpenGL. + +\section opengl_interop_exporting_memory Exporting memory + +If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library: + +It is recommended to create \ref custom_memory_pools for such allocations. +Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext +while creating the custom pool. +Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool, +not only while creating it, as no copy of the structure is made, +but its original pointer is used for each allocation instead. + +If you want to export all memory allocated by the library from certain memory types, +also dedicated allocations or other allocations made from default pools, +an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. +It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library +through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type. +Please note that new versions of the library also support dedicated allocations created in custom pools. + +You should not mix these two methods in a way that allows to apply both to the same memory type. +Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`. + + +\section opengl_interop_custom_alignment Custom alignment + +Buffers or images exported to a different API like OpenGL may require a different alignment, +higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`. +To impose such alignment: + +It is recommended to create \ref custom_memory_pools for such allocations. +Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation +to be made out of this pool. +The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image +from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically. + +If you want to create a buffer with a specific minimum alignment out of default pools, +use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`. + +Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated +allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block. +Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation. + + +\page usage_patterns Recommended usage patterns + +Vulkan gives great flexibility in memory allocation. +This chapter shows the most common patterns. + +See also slides from talk: +[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) + + +\section usage_patterns_gpu_only GPU-only resource + +When: +Any resources that you frequently write and read on GPU, +e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, +images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). + +What to do: +Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +Also consider: +Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them with different sizes +e.g. when display resolution changes. +Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. +When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation +to decrease chances to be evicted to system memory by the operating system. + +\section usage_patterns_staging_copy_upload Staging copy for upload + +When: +A "staging" buffer than you want to map and fill from CPU code, then use as a source od transfer +to some GPU resource. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +memcpy(allocInfo.pMappedData, myData, myDataSize); +\endcode + +Also consider: +You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped +using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. + + +\section usage_patterns_readback Readback + +When: +Buffers for data written by or transferred from the GPU that you want to read back on the CPU, +e.g. results of some computations. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +const float* downloadedData = (const float*)allocInfo.pMappedData; +\endcode + + +\section usage_patterns_advanced_data_uploading Advanced data uploading + +For resources that you frequently write on CPU via mapped pointer and +freqnently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: + +-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, + even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, + and make the device reach out to that resource directly. + - Reads performed by the device will then go through PCI Express bus. + The performace of this access may be limited, but it may be fine depending on the size + of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity + of access. +-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), + a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` + (fast to access from the GPU). Then, it is likely the best choice for such type of resource. +-# Systems with a discrete graphics card and separate video memory may or may not expose + a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). + If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) + that is available to CPU for mapping. + - Writes performed by the host to that memory go through PCI Express bus. + The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, + as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. +-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, + a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. + +Thankfully, VMA offers an aid to create and use such resources in the the way optimal +for the current Vulkan device. To help the library make the best choice, +use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with +#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. +It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), +but if no such memory type is available or allocation from it fails +(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), +it will fall back to `DEVICE_LOCAL` memory for fast GPU access. +It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, +so you need to create another "staging" allocation and perform explicit transfers. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +VkMemoryPropertyFlags memPropFlags; +vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); + +if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) +{ + // Allocation ended up in a mappable memory and is already mapped - write to it directly. + + // [Executed in runtime]: + memcpy(allocInfo.pMappedData, myData, myDataSize); +} +else +{ + // Allocation ended up in a non-mappable memory - need to transfer. + VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + stagingBufCreateInfo.size = 65536; + stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo stagingAllocCreateInfo = {}; + stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + + VkBuffer stagingBuf; + VmaAllocation stagingAlloc; + VmaAllocationInfo stagingAllocInfo; + vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, + &stagingBuf, &stagingAlloc, stagingAllocInfo); + + // [Executed in runtime]: + memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); + //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT + VkBufferCopy bufCopy = { + 0, // srcOffset + 0, // dstOffset, + myDataSize); // size + vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); +} +\endcode + +\section usage_patterns_other_use_cases Other use cases + +Here are some other, less obvious use cases and their recommended settings: + +- An image that is used only as transfer source and destination, but it should stay on the device, + as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, + for temporal antialiasing or other temporal effects. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO +- An image that is used only as transfer source and destination, but it should be placed + in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict + least recently used textures from VRAM. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, + as VMA needs a hint here to differentiate from the previous case. +- A buffer that you want to map and write from the CPU, directly read from the GPU + (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or + host memory due to its large size. + - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST + - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT + + +\page configuration Configuration + +Please check "CONFIGURATION SECTION" in the code to find macros that you can define +before each include of this file or change directly in this file to provide +your own implementation of basic facilities like assert, `min()` and `max()` functions, +mutex, atomic etc. +The library uses its own implementation of containers by default, but you can switch to using +STL containers instead. + +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + +\section config_Vulkan_functions Pointers to Vulkan functions + +There are multiple ways to import pointers to Vulkan functions in the library. +In the simplest case you don't need to do anything. +If the compilation or linking of your program or the initialization of the #VmaAllocator +doesn't work for you, you can try to reconfigure it. + +First, the allocator tries to fetch pointers to Vulkan functions linked statically, +like this: + +\code +m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; +\endcode + +If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. + +Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. +You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or +by using a helper library like [volk](https://github.com/zeux/volk). + +Third, VMA tries to fetch remaining pointers that are still null by calling +`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. +You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. +Other pointers will be fetched automatically. +If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. + +Finally, all the function pointers required by the library (considering selected +Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. + + +\section custom_memory_allocator Custom host memory allocator + +If you use custom allocator for CPU memory rather than default operator `new` +and `delete` from C++, you can make this library using your allocator as well +by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These +functions will be passed to Vulkan, as well as used by the library itself to +make any CPU-side allocations. + +\section allocation_callbacks Device memory allocation callbacks + +The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. +You can setup callbacks to be informed about these calls, e.g. for the purpose +of gathering some statistics. To do it, fill optional member +VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + +\section heap_memory_limit Device heap memory limit + +When device memory of certain heap runs out of free space, new allocations may +fail (returning error code) or they may succeed, silently pushing some existing_ +memory blocks from GPU VRAM to system RAM (which degrades performance). This +behavior is implementation-dependent - it depends on GPU vendor and graphics +driver. + +On AMD cards it can be controlled while creating Vulkan device object by using +VK_AMD_memory_overallocation_behavior extension, if available. + +Alternatively, if you want to test how your program behaves with limited amount of Vulkan device +memory available without switching your graphics card to one that really has +smaller VRAM, you can use a feature of this library intended for this purpose. +To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. + + + +\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation + +VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve +performance on some GPUs. It augments Vulkan API with possibility to query +driver whether it prefers particular buffer or image to have its own, dedicated +allocation (separate `VkDeviceMemory` block) for better efficiency - to be able +to do some internal optimizations. The extension is supported by this library. +It will be used automatically when enabled. + +It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version +and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, +you are all set. + +Otherwise, if you want to use it as an extension: + +1 . When creating Vulkan device, check if following 2 device extensions are +supported (call `vkEnumerateDeviceExtensionProperties()`). +If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). + +- VK_KHR_get_memory_requirements2 +- VK_KHR_dedicated_allocation + +If you enabled these extensions: + +2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating +your #VmaAllocator to inform the library that you enabled required extensions +and you want the library to use them. + +\code +allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; + +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +That is all. The extension will be automatically used whenever you create a +buffer using vmaCreateBuffer() or image using vmaCreateImage(). + +When using the extension together with Vulkan Validation Layer, you will receive +warnings like this: + +_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ + +It is OK, you should just ignore it. It happens because you use function +`vkGetBufferMemoryRequirements2KHR()` instead of standard +`vkGetBufferMemoryRequirements()`, while the validation layer seems to be +unaware of it. + +To learn more about this extension, see: + +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) + + + +\page vk_ext_memory_priority VK_EXT_memory_priority + +VK_EXT_memory_priority is a device extension that allows to pass additional "priority" +value to Vulkan memory allocations that the implementation may use prefer certain +buffers and images that are critical for performance to stay in device-local memory +in cases when the memory is over-subscribed, while some others may be moved to the system memory. + +VMA offers convenient usage of this extension. +If you enable it, you can pass "priority" parameter when creating allocations or custom pools +and the library automatically passes the value to Vulkan using this extension. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_ext_memory_priority_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to +`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_ext_memory_priority_usage Usage + +When using this extension, you should initialize following member: + +- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +- VmaPoolCreateInfo::priority when creating a custom pool. + +It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`. +Memory allocated with higher value can be treated by the Vulkan implementation as higher priority +and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. + +It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images +as dedicated and set high priority to them. For example: + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +`priority` member is ignored in the following situations: + +- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters + from the parametrs passed in #VmaPoolCreateInfo when the pool was created. +- Allocations created in default pools: They inherit the priority from the parameters + VMA used when creating default pools, which means `priority == 0.5f`. + + +\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory + +VK_AMD_device_coherent_memory is a device extension that enables access to +additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and +`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for +allocation of buffers intended for writing "breadcrumb markers" in between passes +or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. + +When the extension is available but has not been enabled, Vulkan physical device +still exposes those memory types, but their usage is forbidden. VMA automatically +takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt +to allocate memory of such type is made. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_amd_device_coherent_memory_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_amd_device_coherent_memory_usage Usage + +After following steps described above, you can create VMA allocations and custom pools +out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible +devices. There are multiple ways to do it, for example: + +- You can request or prefer to allocate out of such memory types by adding + `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with + other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. +- If you manually found memory type index to use for this purpose, force allocation + from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. + +\section vk_amd_device_coherent_memory_more_information More information + +To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + + +\page enabling_buffer_device_address Enabling buffer device address + +Device extension VK_KHR_buffer_device_address +allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. +It has been promoted to core Vulkan 1.2. + +If you want to use this feature in connection with VMA, follow these steps: + +\section enabling_buffer_device_address_initialization Initialization + +1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains +"VK_KHR_buffer_device_address". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true. + +3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add +"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +to VmaAllocatorCreateInfo::flags. + +\section enabling_buffer_device_address_usage Usage + +After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. +The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to +allocated memory blocks wherever it might be needed. + +Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. +The second part of this functionality related to "capture and replay" is not supported, +as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. + +\section enabling_buffer_device_address_more_information More information + +To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + +\page general_considerations General considerations + +\section general_considerations_thread_safety Thread safety + +- The library has no global state, so separate #VmaAllocator objects can be used + independently. + There should be no need to create multiple such objects though - one per `VkDevice` is enough. +- By default, all calls to functions that take #VmaAllocator as first parameter + are safe to call from multiple threads simultaneously because they are + synchronized internally when needed. + This includes allocation and deallocation from default memory pool, as well as custom #VmaPool. +- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT + flag, calls to functions that take such #VmaAllocator object must be + synchronized externally. +- Access to a #VmaAllocation object must be externally synchronized. For example, + you must not call vmaGetAllocationInfo() and vmaMapMemory() from different + threads at the same time if you pass the same #VmaAllocation object to these + functions. +- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. + +\section general_considerations_versioning_and_compatibility Versioning and compatibility + +The library uses [**Semantic Versioning**](https://semver.org/), +which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: + +- Incremented Patch version means a release is backward- and forward-compatible, + introducing only some internal improvements, bug fixes, optimizations etc. + or changes that are out of scope of the official API described in this documentation. +- Incremented Minor version means a release is backward-compatible, + so existing code that uses the library should continue to work, while some new + symbols could have been added: new structures, functions, new values in existing + enums and bit flags, new structure members, but not new function parameters. +- Incrementing Major version means a release could break some backward compatibility. + +All changes between official releases are documented in file "CHANGELOG.md". + +\warning Backward compatiblity is considered on the level of C++ source code, not binary linkage. +Adding new members to existing structures is treated as backward compatible if initializing +the new members to binary zero results in the old behavior. +You should always fully initialize all library structures to zeros and not rely on their +exact binary size. + +\section general_considerations_validation_layer_warnings Validation layer warnings + +When using this library, you can meet following types of warnings issued by +Vulkan validation layer. They don't necessarily indicate a bug, so you may need +to just ignore them. + +- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* + - It happens when VK_KHR_dedicated_allocation extension is enabled. + `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. +- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* + - It happens when you map a buffer or image, because the library maps entire + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. +- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* + - It may happen when you use [defragmentation](@ref defragmentation). + +\section general_considerations_allocation_algorithm Allocation algorithm + +The library uses following algorithm for allocation, in order: + +-# Try to find free range of memory in existing blocks. +-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. +-# If failed, try to create such block with size / 2, size / 4, size / 8. +-# If failed, try to allocate separate `VkDeviceMemory` for this allocation, + just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +-# If failed, choose other memory type that meets the requirements specified in + VmaAllocationCreateInfo and go to point 1. +-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + +\section general_considerations_features_not_supported Features not supported + +Features deliberately excluded from the scope of this library: + +-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. + VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. +-# **Recreation of buffers and images.** Although the library has functions for + buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to + recreate these objects yourself after defragmentation. That is because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +-# **Handling CPU memory allocation failures.** When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. + Success of an allocation is just checked with an assert. +-# **Code free of any compiler warnings.** Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. + There are many preprocessor macros that make some variables unused, function parameters unreferenced, + or conditional expressions constant in some configurations. + The code of this library should not be bigger or more complicated just to silence these warnings. + It is recommended to disable such warnings instead. +-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but + are not going to be included into this repository. +*/ diff --git a/intern/cycles/CMakeLists.txt b/intern/cycles/CMakeLists.txt index 53e87fc5c3a..366d38cc94c 100644 --- a/intern/cycles/CMakeLists.txt +++ b/intern/cycles/CMakeLists.txt @@ -85,15 +85,11 @@ elseif(WIN32 AND MSVC AND NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") # there is no /arch:SSE3, but intrinsics are available anyway if(CMAKE_CL_64) set(CYCLES_SSE2_KERNEL_FLAGS "${CYCLES_KERNEL_FLAGS}") - set(CYCLES_SSE3_KERNEL_FLAGS "${CYCLES_KERNEL_FLAGS}") set(CYCLES_SSE41_KERNEL_FLAGS "${CYCLES_KERNEL_FLAGS}") - set(CYCLES_AVX_KERNEL_FLAGS "${CYCLES_AVX_ARCH_FLAGS} ${CYCLES_KERNEL_FLAGS}") set(CYCLES_AVX2_KERNEL_FLAGS "${CYCLES_AVX2_ARCH_FLAGS} ${CYCLES_KERNEL_FLAGS}") else() set(CYCLES_SSE2_KERNEL_FLAGS "/arch:SSE2 ${CYCLES_KERNEL_FLAGS}") - set(CYCLES_SSE3_KERNEL_FLAGS "/arch:SSE2 ${CYCLES_KERNEL_FLAGS}") set(CYCLES_SSE41_KERNEL_FLAGS "/arch:SSE2 ${CYCLES_KERNEL_FLAGS}") - set(CYCLES_AVX_KERNEL_FLAGS "${CYCLES_AVX_ARCH_FLAGS} ${CYCLES_KERNEL_FLAGS}") set(CYCLES_AVX2_KERNEL_FLAGS "${CYCLES_AVX2_ARCH_FLAGS} ${CYCLES_KERNEL_FLAGS}") endif() @@ -126,11 +122,7 @@ elseif(CMAKE_COMPILER_IS_GNUCC OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang")) endif() set(CYCLES_SSE2_KERNEL_FLAGS "${CYCLES_KERNEL_FLAGS} -msse -msse2") - set(CYCLES_SSE3_KERNEL_FLAGS "${CYCLES_SSE2_KERNEL_FLAGS} -msse3 -mssse3") - set(CYCLES_SSE41_KERNEL_FLAGS "${CYCLES_SSE3_KERNEL_FLAGS} -msse4.1") - if(CXX_HAS_AVX) - set(CYCLES_AVX_KERNEL_FLAGS "${CYCLES_SSE41_KERNEL_FLAGS} -mavx") - endif() + set(CYCLES_SSE41_KERNEL_FLAGS "${CYCLES_SSE2_KERNEL_FLAGS} -msse3 -mssse3 -msse4.1") if(CXX_HAS_AVX2) set(CYCLES_AVX2_KERNEL_FLAGS "${CYCLES_SSE41_KERNEL_FLAGS} -mavx -mavx2 -mfma -mlzcnt -mbmi -mbmi2 -mf16c") endif() @@ -144,13 +136,8 @@ elseif(WIN32 AND CMAKE_CXX_COMPILER_ID MATCHES "Intel") if(CXX_HAS_SSE) set(CYCLES_SSE2_KERNEL_FLAGS "/QxSSE2") - set(CYCLES_SSE3_KERNEL_FLAGS "/QxSSSE3") set(CYCLES_SSE41_KERNEL_FLAGS "/QxSSE4.1") - if(CXX_HAS_AVX) - set(CYCLES_AVX_KERNEL_FLAGS "/arch:AVX") - endif() - if(CXX_HAS_AVX2) set(CYCLES_AVX2_KERNEL_FLAGS "/QxCORE-AVX2") endif() @@ -174,13 +161,8 @@ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Intel") set(CYCLES_SSE2_KERNEL_FLAGS "-xsse2") endif() - set(CYCLES_SSE3_KERNEL_FLAGS "-xssse3") set(CYCLES_SSE41_KERNEL_FLAGS "-xsse4.1") - if(CXX_HAS_AVX) - set(CYCLES_AVX_KERNEL_FLAGS "-xavx") - endif() - if(CXX_HAS_AVX2) set(CYCLES_AVX2_KERNEL_FLAGS "-xcore-avx2") endif() @@ -190,15 +172,10 @@ endif() if(CXX_HAS_SSE) add_definitions( -DWITH_KERNEL_SSE2 - -DWITH_KERNEL_SSE3 -DWITH_KERNEL_SSE41 ) endif() -if(CXX_HAS_AVX) - add_definitions(-DWITH_KERNEL_AVX) -endif() - if(CXX_HAS_AVX2) add_definitions(-DWITH_KERNEL_AVX2) endif() diff --git a/intern/cycles/blender/addon/properties.py b/intern/cycles/blender/addon/properties.py index 9ec663eb258..eed51eed95f 100644 --- a/intern/cycles/blender/addon/properties.py +++ b/intern/cycles/blender/addon/properties.py @@ -951,9 +951,7 @@ class CyclesRenderSettings(bpy.types.PropertyGroup): return _cycles.debug_flags_update(scene) debug_use_cpu_avx2: BoolProperty(name="AVX2", default=True) - debug_use_cpu_avx: BoolProperty(name="AVX", default=True) debug_use_cpu_sse41: BoolProperty(name="SSE41", default=True) - debug_use_cpu_sse3: BoolProperty(name="SSE3", default=True) debug_use_cpu_sse2: BoolProperty(name="SSE2", default=True) debug_bvh_layout: EnumProperty( name="BVH Layout", @@ -1673,19 +1671,19 @@ class CyclesPreferences(bpy.types.AddonPreferences): elif device_type == 'HIP': import sys if sys.platform[:3] == "win": - col.label(text="Requires AMD GPU with Vega or RDNA architecture", icon='BLANK1') + col.label(text="Requires AMD GPU with RDNA architecture", icon='BLANK1') col.label(text="and AMD Radeon Pro 21.Q4 driver or newer", icon='BLANK1') elif sys.platform.startswith("linux"): - col.label(text="Requires AMD GPU with Vega or RDNA architecture", icon='BLANK1') + col.label(text="Requires AMD GPU with RDNA architecture", icon='BLANK1') col.label(text="and AMD driver version 22.10 or newer", icon='BLANK1') elif device_type == 'ONEAPI': import sys if sys.platform.startswith("win"): col.label(text="Requires Intel GPU with Xe-HPG architecture", icon='BLANK1') - col.label(text="and Windows driver version 101.3430 or newer", icon='BLANK1') + col.label(text="and Windows driver version 101.4032 or newer", icon='BLANK1') elif sys.platform.startswith("linux"): col.label(text="Requires Intel GPU with Xe-HPG architecture and", icon='BLANK1') - col.label(text=" - intel-level-zero-gpu version 1.3.23904 or newer", icon='BLANK1') + col.label(text=" - intel-level-zero-gpu version 1.3.24931 or newer", icon='BLANK1') col.label(text=" - oneAPI Level-Zero Loader", icon='BLANK1') elif device_type == 'METAL': col.label(text="Requires Apple Silicon with macOS 12.2 or newer", icon='BLANK1') diff --git a/intern/cycles/blender/addon/ui.py b/intern/cycles/blender/addon/ui.py index 102e014297f..81f940529d1 100644 --- a/intern/cycles/blender/addon/ui.py +++ b/intern/cycles/blender/addon/ui.py @@ -2112,9 +2112,7 @@ class CYCLES_RENDER_PT_debug(CyclesDebugButtonsPanel, Panel): row = col.row(align=True) row.prop(cscene, "debug_use_cpu_sse2", toggle=True) - row.prop(cscene, "debug_use_cpu_sse3", toggle=True) row.prop(cscene, "debug_use_cpu_sse41", toggle=True) - row.prop(cscene, "debug_use_cpu_avx", toggle=True) row.prop(cscene, "debug_use_cpu_avx2", toggle=True) col.prop(cscene, "debug_bvh_layout", text="BVH") diff --git a/intern/cycles/blender/light.cpp b/intern/cycles/blender/light.cpp index b8db4c24eb3..d5aba2041ad 100644 --- a/intern/cycles/blender/light.cpp +++ b/intern/cycles/blender/light.cpp @@ -48,6 +48,8 @@ void BlenderSync::sync_light(BL::Object &b_parent, case BL::Light::type_SPOT: { BL::SpotLight b_spot_light(b_light); light->set_size(b_spot_light.shadow_soft_size()); + light->set_axisu(transform_get_column(&tfm, 0)); + light->set_axisv(transform_get_column(&tfm, 1)); light->set_light_type(LIGHT_SPOT); light->set_spot_angle(b_spot_light.spot_size()); light->set_spot_smooth(b_spot_light.spot_blend()); diff --git a/intern/cycles/blender/python.cpp b/intern/cycles/blender/python.cpp index 96cb204be4b..ebbdc8abf7f 100644 --- a/intern/cycles/blender/python.cpp +++ b/intern/cycles/blender/python.cpp @@ -63,9 +63,7 @@ static void debug_flags_sync_from_scene(BL::Scene b_scene) PointerRNA cscene = RNA_pointer_get(&b_scene.ptr, "cycles"); /* Synchronize CPU flags. */ flags.cpu.avx2 = get_boolean(cscene, "debug_use_cpu_avx2"); - flags.cpu.avx = get_boolean(cscene, "debug_use_cpu_avx"); flags.cpu.sse41 = get_boolean(cscene, "debug_use_cpu_sse41"); - flags.cpu.sse3 = get_boolean(cscene, "debug_use_cpu_sse3"); flags.cpu.sse2 = get_boolean(cscene, "debug_use_cpu_sse2"); flags.cpu.bvh_layout = (BVHLayout)get_enum(cscene, "debug_bvh_layout"); /* Synchronize CUDA flags. */ diff --git a/intern/cycles/cmake/macros.cmake b/intern/cycles/cmake/macros.cmake index d1a929f2b35..0753e8cc592 100644 --- a/intern/cycles/cmake/macros.cmake +++ b/intern/cycles/cmake/macros.cmake @@ -111,8 +111,10 @@ macro(cycles_external_libraries_append libraries) endif() if(WITH_OPENIMAGEDENOISE) list(APPEND ${libraries} ${OPENIMAGEDENOISE_LIBRARIES}) - if(APPLE AND "${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64") - list(APPEND ${libraries} "-framework Accelerate") + if(APPLE) + if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64") + list(APPEND ${libraries} "-framework Accelerate") + endif() endif() endif() if(WITH_ALEMBIC) @@ -136,7 +138,15 @@ macro(cycles_external_libraries_append libraries) ${PYTHON_LIBRARIES} ${ZLIB_LIBRARIES} ${CMAKE_DL_LIBS} - ${PTHREADS_LIBRARIES} + ) + + if(DEFINED PTHREADS_LIBRARIES) + list(APPEND ${libraries} + ${PTHREADS_LIBRARIES} + ) + endif() + + list(APPEND ${libraries} ${PLATFORM_LINKLIBS} ) diff --git a/intern/cycles/device/cpu/device.cpp b/intern/cycles/device/cpu/device.cpp index 9b249063aec..580f70b25d7 100644 --- a/intern/cycles/device/cpu/device.cpp +++ b/intern/cycles/device/cpu/device.cpp @@ -45,9 +45,7 @@ string device_cpu_capabilities() { string capabilities = ""; capabilities += system_cpu_support_sse2() ? "SSE2 " : ""; - capabilities += system_cpu_support_sse3() ? "SSE3 " : ""; capabilities += system_cpu_support_sse41() ? "SSE41 " : ""; - capabilities += system_cpu_support_avx() ? "AVX " : ""; capabilities += system_cpu_support_avx2() ? "AVX2" : ""; if (capabilities[capabilities.size() - 1] == ' ') capabilities.resize(capabilities.size() - 1); diff --git a/intern/cycles/device/cpu/kernel.cpp b/intern/cycles/device/cpu/kernel.cpp index 3e078129bca..4ca68e875a3 100644 --- a/intern/cycles/device/cpu/kernel.cpp +++ b/intern/cycles/device/cpu/kernel.cpp @@ -9,8 +9,7 @@ CCL_NAMESPACE_BEGIN #define KERNEL_FUNCTIONS(name) \ KERNEL_NAME_EVAL(cpu, name), KERNEL_NAME_EVAL(cpu_sse2, name), \ - KERNEL_NAME_EVAL(cpu_sse3, name), KERNEL_NAME_EVAL(cpu_sse41, name), \ - KERNEL_NAME_EVAL(cpu_avx, name), KERNEL_NAME_EVAL(cpu_avx2, name) + KERNEL_NAME_EVAL(cpu_sse41, name), KERNEL_NAME_EVAL(cpu_avx2, name) #define REGISTER_KERNEL(name) name(KERNEL_FUNCTIONS(name)) #define REGISTER_KERNEL_FILM_CONVERT(name) \ diff --git a/intern/cycles/device/cpu/kernel_function.h b/intern/cycles/device/cpu/kernel_function.h index 6171f582518..4875f66f8a8 100644 --- a/intern/cycles/device/cpu/kernel_function.h +++ b/intern/cycles/device/cpu/kernel_function.h @@ -17,13 +17,10 @@ template class CPUKernelFunction { public: CPUKernelFunction(FunctionType kernel_default, FunctionType kernel_sse2, - FunctionType kernel_sse3, FunctionType kernel_sse41, - FunctionType kernel_avx, FunctionType kernel_avx2) { - kernel_info_ = get_best_kernel_info( - kernel_default, kernel_sse2, kernel_sse3, kernel_sse41, kernel_avx, kernel_avx2); + kernel_info_ = get_best_kernel_info(kernel_default, kernel_sse2, kernel_sse41, kernel_avx2); } template inline auto operator()(Args... args) const @@ -60,16 +57,12 @@ template class CPUKernelFunction { KernelInfo get_best_kernel_info(FunctionType kernel_default, FunctionType kernel_sse2, - FunctionType kernel_sse3, FunctionType kernel_sse41, - FunctionType kernel_avx, FunctionType kernel_avx2) { /* Silence warnings about unused variables when compiling without some architectures. */ (void)kernel_sse2; - (void)kernel_sse3; (void)kernel_sse41; - (void)kernel_avx; (void)kernel_avx2; #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_AVX2 @@ -78,24 +71,12 @@ template class CPUKernelFunction { } #endif -#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_AVX - if (DebugFlags().cpu.has_avx() && system_cpu_support_avx()) { - return KernelInfo("AVX", kernel_avx); - } -#endif - #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE41 if (DebugFlags().cpu.has_sse41() && system_cpu_support_sse41()) { return KernelInfo("SSE4.1", kernel_sse41); } #endif -#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE3 - if (DebugFlags().cpu.has_sse3() && system_cpu_support_sse3()) { - return KernelInfo("SSE3", kernel_sse3); - } -#endif - #ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE2 if (DebugFlags().cpu.has_sse2() && system_cpu_support_sse2()) { return KernelInfo("SSE2", kernel_sse2); diff --git a/intern/cycles/device/hip/util.h b/intern/cycles/device/hip/util.h index 4e4906171d1..c8b4b67ded8 100644 --- a/intern/cycles/device/hip/util.h +++ b/intern/cycles/device/hip/util.h @@ -51,7 +51,7 @@ static inline bool hipSupportsDevice(const int hipDevId) hipDeviceGetAttribute(&major, hipDeviceAttributeComputeCapabilityMajor, hipDevId); hipDeviceGetAttribute(&minor, hipDeviceAttributeComputeCapabilityMinor, hipDevId); - return (major >= 9); + return (major >= 10); } CCL_NAMESPACE_END diff --git a/intern/cycles/device/metal/device.mm b/intern/cycles/device/metal/device.mm index 51e3323370a..5ffd3a09d56 100644 --- a/intern/cycles/device/metal/device.mm +++ b/intern/cycles/device/metal/device.mm @@ -55,6 +55,10 @@ void device_metal_info(vector &devices) info.denoisers = DENOISER_NONE; info.id = id; + if (MetalInfo::get_device_vendor(device) == METAL_GPU_AMD) { + info.has_light_tree = false; + } + devices.push_back(info); device_index++; } diff --git a/intern/cycles/device/metal/device_impl.mm b/intern/cycles/device/metal/device_impl.mm index 87614f656c3..453418386a4 100644 --- a/intern/cycles/device/metal/device_impl.mm +++ b/intern/cycles/device/metal/device_impl.mm @@ -327,10 +327,21 @@ void MetalDevice::make_source(MetalPipelineType pso_type, const uint kernel_feat # define KERNEL_STRUCT_BEGIN(name, parent) \ string_replace_same_length(source, "kernel_data." #parent ".", "kernel_data_" #parent "_"); + bool next_member_is_specialized = true; + +# define KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE next_member_is_specialized = false; + /* Add constants to md5 so that 'get_best_pipeline' is able to return a suitable match. */ # define KERNEL_STRUCT_MEMBER(parent, _type, name) \ - baked_constants += string(#parent "." #name "=") + \ - to_string(_type(launch_params.data.parent.name)) + "\n"; + if (next_member_is_specialized) { \ + baked_constants += string(#parent "." #name "=") + \ + to_string(_type(launch_params.data.parent.name)) + "\n"; \ + } \ + else { \ + string_replace( \ + source, "kernel_data_" #parent "_" #name, "kernel_data." #parent ".__unused_" #name); \ + next_member_is_specialized = true; \ + } # include "kernel/data_template.h" diff --git a/intern/cycles/device/metal/kernel.mm b/intern/cycles/device/metal/kernel.mm index e4ce5e19f63..6312c5f88ee 100644 --- a/intern/cycles/device/metal/kernel.mm +++ b/intern/cycles/device/metal/kernel.mm @@ -49,6 +49,18 @@ struct ShaderCache { if (MetalInfo::get_device_vendor(mtlDevice) == METAL_GPU_APPLE) { switch (MetalInfo::get_apple_gpu_architecture(mtlDevice)) { default: + case APPLE_M2_BIG: + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_STATES] = {384, 128}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_INIT_FROM_CAMERA] = {640, 128}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_INTERSECT_CLOSEST] = {1024, 64}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_INTERSECT_SHADOW] = {704, 704}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_INTERSECT_SUBSURFACE] = {640, 32}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_QUEUED_PATHS_ARRAY] = {896, 768}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_SHADE_BACKGROUND] = {512, 128}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_SHADE_SHADOW] = {32, 32}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE] = {768, 576}; + occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_SORTED_PATHS_ARRAY] = {896, 768}; + break; case APPLE_M2: occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_COMPACT_SHADOW_STATES] = {32, 32}; occupancy_tuning[DEVICE_KERNEL_INTEGRATOR_INIT_FROM_CAMERA] = {832, 32}; @@ -448,13 +460,18 @@ static MTLFunctionConstantValues *GetConstantValues(KernelData const *data = nul if (!data) { data = &zero_data; } - int zero_int = 0; - [constant_values setConstantValue:&zero_int type:MTLDataType_int atIndex:Kernel_DummyConstant]; + [constant_values setConstantValue:&zero_data type:MTLDataType_int atIndex:Kernel_DummyConstant]; + + bool next_member_is_specialized = true; + +# define KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE next_member_is_specialized = false; # define KERNEL_STRUCT_MEMBER(parent, _type, name) \ - [constant_values setConstantValue:&data->parent.name \ + [constant_values setConstantValue:next_member_is_specialized ? (void *)&data->parent.name : \ + (void *)&zero_data \ type:MTLDataType_##_type \ - atIndex:KernelData_##parent##_##name]; + atIndex:KernelData_##parent##_##name]; \ + next_member_is_specialized = true; # include "kernel/data_template.h" diff --git a/intern/cycles/device/metal/queue.mm b/intern/cycles/device/metal/queue.mm index 837be0b0c23..f335844c3f9 100644 --- a/intern/cycles/device/metal/queue.mm +++ b/intern/cycles/device/metal/queue.mm @@ -278,7 +278,8 @@ int MetalDeviceQueue::num_concurrent_states(const size_t state_size) const if (metal_device_->device_vendor == METAL_GPU_APPLE) { result *= 4; - if (MetalInfo::get_apple_gpu_architecture(metal_device_->mtlDevice) == APPLE_M2) { + /* Increasing the state count doesn't notably benefit M1-family systems. */ + if (MetalInfo::get_apple_gpu_architecture(metal_device_->mtlDevice) != APPLE_M1) { size_t system_ram = system_physical_ram(); size_t allocated_so_far = [metal_device_->mtlDevice currentAllocatedSize]; size_t max_recommended_working_set = [metal_device_->mtlDevice recommendedMaxWorkingSetSize]; diff --git a/intern/cycles/device/metal/util.h b/intern/cycles/device/metal/util.h index a988d01d361..c30c4ccd9bc 100644 --- a/intern/cycles/device/metal/util.h +++ b/intern/cycles/device/metal/util.h @@ -29,6 +29,7 @@ enum AppleGPUArchitecture { APPLE_UNKNOWN, APPLE_M1, APPLE_M2, + APPLE_M2_BIG, }; /* Contains static Metal helper functions. */ diff --git a/intern/cycles/device/metal/util.mm b/intern/cycles/device/metal/util.mm index f47638fac15..984e7a70c76 100644 --- a/intern/cycles/device/metal/util.mm +++ b/intern/cycles/device/metal/util.mm @@ -52,7 +52,7 @@ AppleGPUArchitecture MetalInfo::get_apple_gpu_architecture(id device) return APPLE_M1; } else if (strstr(device_name, "M2")) { - return APPLE_M2; + return get_apple_gpu_core_count(device) <= 10 ? APPLE_M2 : APPLE_M2_BIG; } return APPLE_UNKNOWN; } diff --git a/intern/cycles/device/oneapi/device_impl.cpp b/intern/cycles/device/oneapi/device_impl.cpp index edffd9525b1..0aec8268bd5 100644 --- a/intern/cycles/device/oneapi/device_impl.cpp +++ b/intern/cycles/device/oneapi/device_impl.cpp @@ -377,7 +377,7 @@ void OneapiDevice::tex_alloc(device_texture &mem) generic_alloc(mem); generic_copy_to(mem); - /* Resize if needed. Also, in case of resize - allocate in advance for future allocs. */ + /* Resize if needed. Also, in case of resize - allocate in advance for future allocations. */ const uint slot = mem.slot; if (slot >= texture_info_.size()) { texture_info_.resize(slot + 128); @@ -631,9 +631,9 @@ bool OneapiDevice::enqueue_kernel(KernelContext *kernel_context, /* Compute-runtime (ie. NEO) version is what gets returned by sycl/L0 on Windows * since Windows driver 101.3268. */ /* The same min compute-runtime version is currently required across Windows and Linux. - * For Windows driver 101.3430, compute-runtime version is 23904. */ -static const int lowest_supported_driver_version_win = 1013430; -static const int lowest_supported_driver_version_neo = 23904; + * For Windows driver 101.4032, compute-runtime version is 24931. */ +static const int lowest_supported_driver_version_win = 1014032; +static const int lowest_supported_driver_version_neo = 24931; int OneapiDevice::parse_driver_build_version(const sycl::device &device) { diff --git a/intern/cycles/graph/CMakeLists.txt b/intern/cycles/graph/CMakeLists.txt index ca4f996ed5d..d183b77c6ab 100644 --- a/intern/cycles/graph/CMakeLists.txt +++ b/intern/cycles/graph/CMakeLists.txt @@ -5,6 +5,9 @@ set(INC .. ) +set(INC_SYS +) + set(SRC node.cpp node_type.cpp diff --git a/intern/cycles/integrator/CMakeLists.txt b/intern/cycles/integrator/CMakeLists.txt index 9869a8744a3..0559a3e9401 100644 --- a/intern/cycles/integrator/CMakeLists.txt +++ b/intern/cycles/integrator/CMakeLists.txt @@ -5,6 +5,9 @@ set(INC .. ) +set(INC_SYS +) + set(SRC adaptive_sampling.cpp denoiser.cpp diff --git a/intern/cycles/kernel/CMakeLists.txt b/intern/cycles/kernel/CMakeLists.txt index a3ef08f4644..50d2ff3b898 100644 --- a/intern/cycles/kernel/CMakeLists.txt +++ b/intern/cycles/kernel/CMakeLists.txt @@ -14,9 +14,7 @@ set(INC_SYS set(SRC_KERNEL_DEVICE_CPU device/cpu/kernel.cpp device/cpu/kernel_sse2.cpp - device/cpu/kernel_sse3.cpp device/cpu/kernel_sse41.cpp - device/cpu/kernel_avx.cpp device/cpu/kernel_avx2.cpp ) @@ -736,25 +734,25 @@ if(WITH_CYCLES_DEVICE_ONEAPI) endif() # SYCL_CPP_FLAGS is a variable that the user can set to pass extra compiler options set(sycl_compiler_flags - ${CMAKE_CURRENT_SOURCE_DIR}/${SRC_KERNEL_DEVICE_ONEAPI} - -fsycl - -fsycl-unnamed-lambda - -fdelayed-template-parsing - -mllvm -inlinedefault-threshold=250 - -mllvm -inlinehint-threshold=350 - -fsycl-device-code-split=per_kernel - -fsycl-max-parallel-link-jobs=${SYCL_OFFLINE_COMPILER_PARALLEL_JOBS} - -shared - -DWITH_ONEAPI - -ffast-math - -DNDEBUG - -O2 - -o ${cycles_kernel_oneapi_lib} - -I${CMAKE_CURRENT_SOURCE_DIR}/.. - ${SYCL_CPP_FLAGS} - ) + ${CMAKE_CURRENT_SOURCE_DIR}/${SRC_KERNEL_DEVICE_ONEAPI} + -fsycl + -fsycl-unnamed-lambda + -fdelayed-template-parsing + -mllvm -inlinedefault-threshold=250 + -mllvm -inlinehint-threshold=350 + -fsycl-device-code-split=per_kernel + -fsycl-max-parallel-link-jobs=${SYCL_OFFLINE_COMPILER_PARALLEL_JOBS} + -shared + -DWITH_ONEAPI + -ffast-math + -DNDEBUG + -O2 + -o ${cycles_kernel_oneapi_lib} + -I${CMAKE_CURRENT_SOURCE_DIR}/.. + ${SYCL_CPP_FLAGS} + ) - if (WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION) + if(WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION) list(APPEND sycl_compiler_flags -DWITH_ONEAPI_SYCL_HOST_TASK) endif() @@ -942,14 +940,9 @@ set_source_files_properties(device/cpu/kernel.cpp PROPERTIES COMPILE_FLAGS "${CY if(CXX_HAS_SSE) set_source_files_properties(device/cpu/kernel_sse2.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE2_KERNEL_FLAGS}") - set_source_files_properties(device/cpu/kernel_sse3.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE3_KERNEL_FLAGS}") set_source_files_properties(device/cpu/kernel_sse41.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE41_KERNEL_FLAGS}") endif() -if(CXX_HAS_AVX) - set_source_files_properties(device/cpu/kernel_avx.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_AVX_KERNEL_FLAGS}") -endif() - if(CXX_HAS_AVX2) set_source_files_properties(device/cpu/kernel_avx2.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_AVX2_KERNEL_FLAGS}") endif() diff --git a/intern/cycles/kernel/bake/bake.h b/intern/cycles/kernel/bake/bake.h index 384ca9168f0..899aa783289 100644 --- a/intern/cycles/kernel/bake/bake.h +++ b/intern/cycles/kernel/bake/bake.h @@ -63,8 +63,9 @@ ccl_device void kernel_background_evaluate(KernelGlobals kg, shader_setup_from_background(kg, &sd, ray_P, ray_D, ray_time); /* Evaluate shader. - * This is being evaluated for all BSDFs, so path flag does not contain a specific type. */ - const uint32_t path_flag = PATH_RAY_EMISSION; + * This is being evaluated for all BSDFs, so path flag does not contain a specific type. + * However, we want to flag the ray visibility to ignore the sun in the background map. */ + const uint32_t path_flag = PATH_RAY_EMISSION | PATH_RAY_IMPORTANCE_BAKE; surface_shader_eval( kg, INTEGRATOR_STATE_NULL, &sd, NULL, path_flag); diff --git a/intern/cycles/kernel/closure/bsdf.h b/intern/cycles/kernel/closure/bsdf.h index 6de645cd1fe..e7754bde7c5 100644 --- a/intern/cycles/kernel/closure/bsdf.h +++ b/intern/cycles/kernel/closure/bsdf.h @@ -102,10 +102,9 @@ ccl_device_inline float shift_cos_in(float cos_in, const float frequency_multipl return val; } -ccl_device_inline bool bsdf_is_transmission(ccl_private const ShaderClosure *sc, - const float3 omega_in) +ccl_device_inline bool bsdf_is_transmission(ccl_private const ShaderClosure *sc, const float3 wo) { - return dot(sc->N, omega_in) < 0.0f; + return dot(sc->N, wo) < 0.0f; } ccl_device_inline int bsdf_sample(KernelGlobals kg, @@ -114,7 +113,7 @@ ccl_device_inline int bsdf_sample(KernelGlobals kg, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness, ccl_private float *eta) @@ -126,43 +125,43 @@ ccl_device_inline int bsdf_sample(KernelGlobals kg, switch (sc->type) { case CLOSURE_BSDF_DIFFUSE_ID: - label = bsdf_diffuse_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_diffuse_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = one_float2(); *eta = 1.0f; break; #if defined(__SVM__) || defined(__OSL__) case CLOSURE_BSDF_OREN_NAYAR_ID: - label = bsdf_oren_nayar_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_oren_nayar_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = one_float2(); *eta = 1.0f; break; # ifdef __OSL__ case CLOSURE_BSDF_PHONG_RAMP_ID: label = bsdf_phong_ramp_sample( - sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness); + sc, Ng, sd->wi, randu, randv, eval, wo, pdf, sampled_roughness); *eta = 1.0f; break; case CLOSURE_BSDF_DIFFUSE_RAMP_ID: - label = bsdf_diffuse_ramp_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_diffuse_ramp_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = one_float2(); *eta = 1.0f; break; # endif case CLOSURE_BSDF_TRANSLUCENT_ID: - label = bsdf_translucent_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_translucent_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = one_float2(); *eta = 1.0f; break; case CLOSURE_BSDF_REFLECTION_ID: - label = bsdf_reflection_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, eta); + label = bsdf_reflection_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf, eta); *sampled_roughness = zero_float2(); break; case CLOSURE_BSDF_REFRACTION_ID: - label = bsdf_refraction_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, eta); + label = bsdf_refraction_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf, eta); *sampled_roughness = zero_float2(); break; case CLOSURE_BSDF_TRANSPARENT_ID: - label = bsdf_transparent_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_transparent_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = zero_float2(); *eta = 1.0f; break; @@ -171,85 +170,65 @@ ccl_device_inline int bsdf_sample(KernelGlobals kg, case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID: case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID: label = bsdf_microfacet_ggx_sample( - kg, sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness, eta); + sc, Ng, sd->wi, randu, randv, eval, wo, pdf, sampled_roughness, eta); break; case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID: case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID: - label = bsdf_microfacet_multi_ggx_sample(kg, - sc, - Ng, - sd->I, - randu, - randv, - eval, - omega_in, - pdf, - &sd->lcg_state, - sampled_roughness, - eta); + label = bsdf_microfacet_multi_ggx_sample( + kg, sc, Ng, sd->wi, randu, randv, eval, wo, pdf, &sd->lcg_state, sampled_roughness, eta); break; case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID: case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID: - label = bsdf_microfacet_multi_ggx_glass_sample(kg, - sc, - Ng, - sd->I, - randu, - randv, - eval, - omega_in, - pdf, - &sd->lcg_state, - sampled_roughness, - eta); + label = bsdf_microfacet_multi_ggx_glass_sample( + kg, sc, Ng, sd->wi, randu, randv, eval, wo, pdf, &sd->lcg_state, sampled_roughness, eta); break; case CLOSURE_BSDF_MICROFACET_BECKMANN_ID: case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID: label = bsdf_microfacet_beckmann_sample( - kg, sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness, eta); + sc, Ng, sd->wi, randu, randv, eval, wo, pdf, sampled_roughness, eta); break; case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID: label = bsdf_ashikhmin_shirley_sample( - sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness); + sc, Ng, sd->wi, randu, randv, eval, wo, pdf, sampled_roughness); *eta = 1.0f; break; case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID: - label = bsdf_ashikhmin_velvet_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_ashikhmin_velvet_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = one_float2(); *eta = 1.0f; break; case CLOSURE_BSDF_DIFFUSE_TOON_ID: - label = bsdf_diffuse_toon_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_diffuse_toon_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = one_float2(); *eta = 1.0f; break; case CLOSURE_BSDF_GLOSSY_TOON_ID: - label = bsdf_glossy_toon_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_glossy_toon_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); // double check if this is valid *sampled_roughness = one_float2(); *eta = 1.0f; break; case CLOSURE_BSDF_HAIR_REFLECTION_ID: label = bsdf_hair_reflection_sample( - sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness); + sc, Ng, sd->wi, randu, randv, eval, wo, pdf, sampled_roughness); *eta = 1.0f; break; case CLOSURE_BSDF_HAIR_TRANSMISSION_ID: label = bsdf_hair_transmission_sample( - sc, Ng, sd->I, randu, randv, eval, omega_in, pdf, sampled_roughness); + sc, Ng, sd->wi, randu, randv, eval, wo, pdf, sampled_roughness); *eta = 1.0f; break; case CLOSURE_BSDF_HAIR_PRINCIPLED_ID: label = bsdf_principled_hair_sample( - kg, sc, sd, randu, randv, eval, omega_in, pdf, sampled_roughness, eta); + kg, sc, sd, randu, randv, eval, wo, pdf, sampled_roughness, eta); break; case CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID: - label = bsdf_principled_diffuse_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_principled_diffuse_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = one_float2(); *eta = 1.0f; break; case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID: - label = bsdf_principled_sheen_sample(sc, Ng, sd->I, randu, randv, eval, omega_in, pdf); + label = bsdf_principled_sheen_sample(sc, Ng, sd->wi, randu, randv, eval, wo, pdf); *sampled_roughness = one_float2(); *eta = 1.0f; break; @@ -274,12 +253,12 @@ ccl_device_inline int bsdf_sample(KernelGlobals kg, const float frequency_multiplier = kernel_data_fetch(objects, sd->object).shadow_terminator_shading_offset; if (frequency_multiplier > 1.0f) { - const float cosNI = dot(*omega_in, sc->N); - *eval *= shift_cos_in(cosNI, frequency_multiplier); + const float cosNO = dot(*wo, sc->N); + *eval *= shift_cos_in(cosNO, frequency_multiplier); } if (label & LABEL_DIFFUSE) { if (!isequal(sc->N, sd->N)) { - *eval *= bump_shadowing_term(sd->N, sc->N, *omega_in); + *eval *= bump_shadowing_term(sd->N, sc->N, *wo); } } } @@ -426,7 +405,7 @@ ccl_device_inline void bsdf_roughness_eta(const KernelGlobals kg, ccl_device_inline int bsdf_label(const KernelGlobals kg, ccl_private const ShaderClosure *sc, - const float3 omega_in) + const float3 wo) { /* For curves use the smooth normal, particularly for ribbons the geometric * normal gives too much darkening otherwise. */ @@ -482,8 +461,8 @@ ccl_device_inline int bsdf_label(const KernelGlobals kg, } case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID: case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID: - label = (bsdf_is_transmission(sc, omega_in)) ? LABEL_TRANSMIT | LABEL_GLOSSY : - LABEL_REFLECT | LABEL_GLOSSY; + label = (bsdf_is_transmission(sc, wo)) ? LABEL_TRANSMIT | LABEL_GLOSSY : + LABEL_REFLECT | LABEL_GLOSSY; break; case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID: label = LABEL_REFLECT | LABEL_GLOSSY; @@ -504,7 +483,7 @@ ccl_device_inline int bsdf_label(const KernelGlobals kg, label = LABEL_TRANSMIT | LABEL_GLOSSY; break; case CLOSURE_BSDF_HAIR_PRINCIPLED_ID: - if (bsdf_is_transmission(sc, omega_in)) + if (bsdf_is_transmission(sc, wo)) label = LABEL_TRANSMIT | LABEL_GLOSSY; else label = LABEL_REFLECT | LABEL_GLOSSY; @@ -543,83 +522,83 @@ ccl_device_inline bsdf_eval(KernelGlobals kg, ccl_private ShaderData *sd, ccl_private const ShaderClosure *sc, - const float3 omega_in, + const float3 wo, ccl_private float *pdf) { Spectrum eval = zero_spectrum(); switch (sc->type) { case CLOSURE_BSDF_DIFFUSE_ID: - eval = bsdf_diffuse_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_diffuse_eval(sc, sd->wi, wo, pdf); break; #if defined(__SVM__) || defined(__OSL__) case CLOSURE_BSDF_OREN_NAYAR_ID: - eval = bsdf_oren_nayar_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_oren_nayar_eval(sc, sd->wi, wo, pdf); break; # ifdef __OSL__ case CLOSURE_BSDF_PHONG_RAMP_ID: - eval = bsdf_phong_ramp_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_phong_ramp_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_DIFFUSE_RAMP_ID: - eval = bsdf_diffuse_ramp_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_diffuse_ramp_eval(sc, sd->wi, wo, pdf); break; # endif case CLOSURE_BSDF_TRANSLUCENT_ID: - eval = bsdf_translucent_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_translucent_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_REFLECTION_ID: - eval = bsdf_reflection_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_reflection_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_REFRACTION_ID: - eval = bsdf_refraction_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_refraction_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_TRANSPARENT_ID: - eval = bsdf_transparent_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_transparent_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_MICROFACET_GGX_ID: case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID: case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID: case CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID: - eval = bsdf_microfacet_ggx_eval(sc, sd->N, sd->I, omega_in, pdf); + eval = bsdf_microfacet_ggx_eval(sc, sd->N, sd->wi, wo, pdf); break; case CLOSURE_BSDF_MICROFACET_MULTI_GGX_ID: case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID: - eval = bsdf_microfacet_multi_ggx_eval(sc, sd->N, sd->I, omega_in, pdf, &sd->lcg_state); + eval = bsdf_microfacet_multi_ggx_eval(sc, sd->N, sd->wi, wo, pdf, &sd->lcg_state); break; case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID: case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID: - eval = bsdf_microfacet_multi_ggx_glass_eval(sc, sd->I, omega_in, pdf, &sd->lcg_state); + eval = bsdf_microfacet_multi_ggx_glass_eval(sc, sd->wi, wo, pdf, &sd->lcg_state); break; case CLOSURE_BSDF_MICROFACET_BECKMANN_ID: case CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID: - eval = bsdf_microfacet_beckmann_eval(sc, sd->N, sd->I, omega_in, pdf); + eval = bsdf_microfacet_beckmann_eval(sc, sd->N, sd->wi, wo, pdf); break; case CLOSURE_BSDF_ASHIKHMIN_SHIRLEY_ID: - eval = bsdf_ashikhmin_shirley_eval(sc, sd->N, sd->I, omega_in, pdf); + eval = bsdf_ashikhmin_shirley_eval(sc, sd->N, sd->wi, wo, pdf); break; case CLOSURE_BSDF_ASHIKHMIN_VELVET_ID: - eval = bsdf_ashikhmin_velvet_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_ashikhmin_velvet_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_DIFFUSE_TOON_ID: - eval = bsdf_diffuse_toon_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_diffuse_toon_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_GLOSSY_TOON_ID: - eval = bsdf_glossy_toon_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_glossy_toon_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_HAIR_PRINCIPLED_ID: - eval = bsdf_principled_hair_eval(kg, sd, sc, omega_in, pdf); + eval = bsdf_principled_hair_eval(kg, sd, sc, wo, pdf); break; case CLOSURE_BSDF_HAIR_REFLECTION_ID: - eval = bsdf_hair_reflection_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_hair_reflection_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_HAIR_TRANSMISSION_ID: - eval = bsdf_hair_transmission_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_hair_transmission_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID: - eval = bsdf_principled_diffuse_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_principled_diffuse_eval(sc, sd->wi, wo, pdf); break; case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID: - eval = bsdf_principled_sheen_eval(sc, sd->I, omega_in, pdf); + eval = bsdf_principled_sheen_eval(sc, sd->wi, wo, pdf); break; #endif default: @@ -628,7 +607,7 @@ ccl_device_inline if (CLOSURE_IS_BSDF_DIFFUSE(sc->type)) { if (!isequal(sc->N, sd->N)) { - eval *= bump_shadowing_term(sd->N, sc->N, omega_in); + eval *= bump_shadowing_term(sd->N, sc->N, wo); } } @@ -636,9 +615,9 @@ ccl_device_inline const float frequency_multiplier = kernel_data_fetch(objects, sd->object).shadow_terminator_shading_offset; if (frequency_multiplier > 1.0f) { - const float cosNI = dot(omega_in, sc->N); - if (cosNI >= 0.0f) { - eval *= shift_cos_in(cosNI, frequency_multiplier); + const float cosNO = dot(wo, sc->N); + if (cosNO >= 0.0f) { + eval *= shift_cos_in(cosNO, frequency_multiplier); } } @@ -682,4 +661,38 @@ ccl_device void bsdf_blur(KernelGlobals kg, ccl_private ShaderClosure *sc, float #endif } +ccl_device_inline Spectrum bsdf_albedo(ccl_private const ShaderData *sd, + ccl_private const ShaderClosure *sc) +{ + Spectrum albedo = sc->weight; + /* Some closures include additional components such as Fresnel terms that cause their albedo to + * be below 1. The point of this function is to return a best-effort estimation of their albedo, + * meaning the amount of reflected/refracted light that would be expected when illuminated by a + * uniform white background. + * This is used for the denoising albedo pass and diffuse/glossy/transmission color passes. + * NOTE: This should always match the sample_weight of the closure - as in, if there's an albedo + * adjustment in here, the sample_weight should also be reduced accordingly. + * TODO(lukas): Consider calling this function to determine the sample_weight? Would be a bit of + * extra overhead though. */ +#if defined(__SVM__) || defined(__OSL__) + switch (sc->type) { + case CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID: + case CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID: + case CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID: + case CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID: + albedo *= microfacet_fresnel((ccl_private const MicrofacetBsdf *)sc, sd->wi, sc->N); + break; + case CLOSURE_BSDF_PRINCIPLED_SHEEN_ID: + albedo *= ((ccl_private const PrincipledSheenBsdf *)sc)->avg_value; + break; + case CLOSURE_BSDF_HAIR_PRINCIPLED_ID: + albedo *= bsdf_principled_hair_albedo(sc); + break; + default: + break; + } +#endif + return albedo; +} + CCL_NAMESPACE_END diff --git a/intern/cycles/kernel/closure/bsdf_ashikhmin_shirley.h b/intern/cycles/kernel/closure/bsdf_ashikhmin_shirley.h index db02dad3909..32f6ae17d1c 100644 --- a/intern/cycles/kernel/closure/bsdf_ashikhmin_shirley.h +++ b/intern/cycles/kernel/closure/bsdf_ashikhmin_shirley.h @@ -41,20 +41,20 @@ ccl_device_inline float bsdf_ashikhmin_shirley_roughness_to_exponent(float rough ccl_device_forceinline Spectrum bsdf_ashikhmin_shirley_eval(ccl_private const ShaderClosure *sc, const float3 Ng, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc; - const float cosNgI = dot(Ng, omega_in); + const float cosNgO = dot(Ng, wo); float3 N = bsdf->N; - float NdotI = dot(N, I); /* in Cycles/OSL convention I is omega_out */ - float NdotO = dot(N, omega_in); /* and consequently we use for O omaga_in ;) */ + float NdotI = dot(N, wi); + float NdotO = dot(N, wo); float out = 0.0f; - if ((cosNgI < 0.0f) || fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f || + if ((cosNgO < 0.0f) || fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f || !(NdotI > 0.0f && NdotO > 0.0f)) { *pdf = 0.0f; return zero_spectrum(); @@ -62,15 +62,15 @@ ccl_device_forceinline Spectrum bsdf_ashikhmin_shirley_eval(ccl_private const Sh NdotI = fmaxf(NdotI, 1e-6f); NdotO = fmaxf(NdotO, 1e-6f); - float3 H = normalize(omega_in + I); - float HdotI = fmaxf(fabsf(dot(H, I)), 1e-6f); + float3 H = normalize(wi + wo); + float HdotI = fmaxf(fabsf(dot(H, wi)), 1e-6f); float HdotN = fmaxf(dot(H, N), 1e-6f); /* pump from original paper * (first derivative disc., but cancels the HdotI in the pdf nicely) */ - float pump = 1.0f / fmaxf(1e-6f, (HdotI * fmaxf(NdotO, NdotI))); + float pump = 1.0f / fmaxf(1e-6f, (HdotI * fmaxf(NdotI, NdotO))); /* pump from d-brdf paper */ - /*float pump = 1.0f / fmaxf(1e-4f, ((NdotO + NdotI) * (NdotO*NdotI))); */ + /*float pump = 1.0f / fmaxf(1e-4f, ((NdotI + NdotO) * (NdotI * NdotO))); */ float n_x = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_x); float n_y = bsdf_ashikhmin_shirley_roughness_to_exponent(bsdf->alpha_y); @@ -124,11 +124,11 @@ ccl_device_inline void bsdf_ashikhmin_shirley_sample_first_quadrant(float n_x, ccl_device int bsdf_ashikhmin_shirley_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness) { @@ -137,7 +137,7 @@ ccl_device int bsdf_ashikhmin_shirley_sample(ccl_private const ShaderClosure *sc float3 N = bsdf->N; int label = LABEL_REFLECT | LABEL_GLOSSY; - float NdotI = dot(N, I); + float NdotI = dot(N, wi); if (!(NdotI > 0.0f)) { *pdf = 0.0f; *eval = zero_spectrum(); @@ -198,12 +198,12 @@ ccl_device int bsdf_ashikhmin_shirley_sample(ccl_private const ShaderClosure *sc /* half vector to world space */ float3 H = h.x * X + h.y * Y + h.z * N; - float HdotI = dot(H, I); + float HdotI = dot(H, wi); if (HdotI < 0.0f) H = -H; - /* reflect I on H to get omega_in */ - *omega_in = -I + (2.0f * HdotI) * H; + /* reflect wi on H to get wo */ + *wo = -wi + (2.0f * HdotI) * H; if (fmaxf(bsdf->alpha_x, bsdf->alpha_y) <= 1e-4f) { /* Some high number for MIS. */ @@ -213,7 +213,7 @@ ccl_device int bsdf_ashikhmin_shirley_sample(ccl_private const ShaderClosure *sc } else { /* leave the rest to eval */ - *eval = bsdf_ashikhmin_shirley_eval(sc, N, I, *omega_in, pdf); + *eval = bsdf_ashikhmin_shirley_eval(sc, N, wi, *wo, pdf); } return label; diff --git a/intern/cycles/kernel/closure/bsdf_ashikhmin_velvet.h b/intern/cycles/kernel/closure/bsdf_ashikhmin_velvet.h index ac2183e0848..382686cde1c 100644 --- a/intern/cycles/kernel/closure/bsdf_ashikhmin_velvet.h +++ b/intern/cycles/kernel/closure/bsdf_ashikhmin_velvet.h @@ -32,35 +32,35 @@ ccl_device int bsdf_ashikhmin_velvet_setup(ccl_private VelvetBsdf *bsdf) } ccl_device Spectrum bsdf_ashikhmin_velvet_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const VelvetBsdf *bsdf = (ccl_private const VelvetBsdf *)sc; float m_invsigma2 = bsdf->invsigma2; float3 N = bsdf->N; - float cosNO = dot(N, I); - float cosNI = dot(N, omega_in); - if (!(cosNO > 0 && cosNI > 0)) { + float cosNI = dot(N, wi); + float cosNO = dot(N, wo); + if (!(cosNI > 0 && cosNO > 0)) { *pdf = 0.0f; return zero_spectrum(); } - float3 H = normalize(omega_in + I); + float3 H = normalize(wi + wo); float cosNH = dot(N, H); - float cosHO = fabsf(dot(I, H)); + float cosHI = fabsf(dot(wi, H)); - if (!(fabsf(cosNH) < 1.0f - 1e-5f && cosHO > 1e-5f)) { + if (!(fabsf(cosNH) < 1.0f - 1e-5f && cosHI > 1e-5f)) { *pdf = 0.0f; return zero_spectrum(); } - float cosNHdivHO = cosNH / cosHO; - cosNHdivHO = fmaxf(cosNHdivHO, 1e-5f); + float cosNHdivHI = cosNH / cosHI; + cosNHdivHI = fmaxf(cosNHdivHI, 1e-5f); - float fac1 = 2 * fabsf(cosNHdivHO * cosNO); - float fac2 = 2 * fabsf(cosNHdivHO * cosNI); + float fac1 = 2 * fabsf(cosNHdivHI * cosNI); + float fac2 = 2 * fabsf(cosNHdivHI * cosNO); float sinNH2 = 1 - cosNH * cosNH; float sinNH4 = sinNH2 * sinNH2; @@ -69,7 +69,7 @@ ccl_device Spectrum bsdf_ashikhmin_velvet_eval(ccl_private const ShaderClosure * float D = expf(-cotangent2 * m_invsigma2) * m_invsigma2 * M_1_PI_F / sinNH4; float G = fminf(1.0f, fminf(fac1, fac2)); // TODO: derive G from D analytically - float out = 0.25f * (D * G) / cosNO; + float out = 0.25f * (D * G) / cosNI; *pdf = 0.5f * M_1_PI_F; return make_spectrum(out); @@ -77,11 +77,11 @@ ccl_device Spectrum bsdf_ashikhmin_velvet_eval(ccl_private const ShaderClosure * ccl_device int bsdf_ashikhmin_velvet_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { ccl_private const VelvetBsdf *bsdf = (ccl_private const VelvetBsdf *)sc; @@ -90,32 +90,32 @@ ccl_device int bsdf_ashikhmin_velvet_sample(ccl_private const ShaderClosure *sc, // we are viewing the surface from above - send a ray out with uniform // distribution over the hemisphere - sample_uniform_hemisphere(N, randu, randv, omega_in, pdf); + sample_uniform_hemisphere(N, randu, randv, wo, pdf); - if (!(dot(Ng, *omega_in) > 0)) { + if (!(dot(Ng, *wo) > 0)) { *pdf = 0.0f; *eval = zero_spectrum(); return LABEL_NONE; } - float3 H = normalize(*omega_in + I); + float3 H = normalize(wi + *wo); - float cosNI = dot(N, *omega_in); - float cosNO = dot(N, I); + float cosNI = dot(N, wi); + float cosNO = dot(N, *wo); + float cosHI = fabsf(dot(wi, H)); float cosNH = dot(N, H); - float cosHO = fabsf(dot(I, H)); - if (!(fabsf(cosNO) > 1e-5f && fabsf(cosNH) < 1.0f - 1e-5f && cosHO > 1e-5f)) { + if (!(fabsf(cosNI) > 1e-5f && fabsf(cosNH) < 1.0f - 1e-5f && cosHI > 1e-5f)) { *pdf = 0.0f; *eval = zero_spectrum(); return LABEL_NONE; } - float cosNHdivHO = cosNH / cosHO; - cosNHdivHO = fmaxf(cosNHdivHO, 1e-5f); + float cosNHdivHI = cosNH / cosHI; + cosNHdivHI = fmaxf(cosNHdivHI, 1e-5f); - float fac1 = 2 * fabsf(cosNHdivHO * cosNO); - float fac2 = 2 * fabsf(cosNHdivHO * cosNI); + float fac1 = 2 * fabsf(cosNHdivHI * cosNI); + float fac2 = 2 * fabsf(cosNHdivHI * cosNO); float sinNH2 = 1 - cosNH * cosNH; float sinNH4 = sinNH2 * sinNH2; @@ -124,7 +124,7 @@ ccl_device int bsdf_ashikhmin_velvet_sample(ccl_private const ShaderClosure *sc, float D = expf(-cotangent2 * m_invsigma2) * m_invsigma2 * M_1_PI_F / sinNH4; float G = fminf(1.0f, fminf(fac1, fac2)); // TODO: derive G from D analytically - float power = 0.25f * (D * G) / cosNO; + float power = 0.25f * (D * G) / cosNI; *eval = make_spectrum(power); diff --git a/intern/cycles/kernel/closure/bsdf_diffuse.h b/intern/cycles/kernel/closure/bsdf_diffuse.h index 827b762f4c7..ac6cba76c30 100644 --- a/intern/cycles/kernel/closure/bsdf_diffuse.h +++ b/intern/cycles/kernel/closure/bsdf_diffuse.h @@ -27,34 +27,34 @@ ccl_device int bsdf_diffuse_setup(ccl_private DiffuseBsdf *bsdf) } ccl_device Spectrum bsdf_diffuse_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const DiffuseBsdf *bsdf = (ccl_private const DiffuseBsdf *)sc; float3 N = bsdf->N; - float cos_pi = fmaxf(dot(N, omega_in), 0.0f) * M_1_PI_F; - *pdf = cos_pi; - return make_spectrum(cos_pi); + float cosNO = fmaxf(dot(N, wo), 0.0f) * M_1_PI_F; + *pdf = cosNO; + return make_spectrum(cosNO); } ccl_device int bsdf_diffuse_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { ccl_private const DiffuseBsdf *bsdf = (ccl_private const DiffuseBsdf *)sc; float3 N = bsdf->N; // distribution over the hemisphere - sample_cos_hemisphere(N, randu, randv, omega_in, pdf); + sample_cos_hemisphere(N, randu, randv, wo, pdf); - if (dot(Ng, *omega_in) > 0.0f) { + if (dot(Ng, *wo) > 0.0f) { *eval = make_spectrum(*pdf); } else { @@ -73,25 +73,25 @@ ccl_device int bsdf_translucent_setup(ccl_private DiffuseBsdf *bsdf) } ccl_device Spectrum bsdf_translucent_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const DiffuseBsdf *bsdf = (ccl_private const DiffuseBsdf *)sc; float3 N = bsdf->N; - float cos_pi = fmaxf(-dot(N, omega_in), 0.0f) * M_1_PI_F; - *pdf = cos_pi; - return make_spectrum(cos_pi); + float cosNO = fmaxf(-dot(N, wo), 0.0f) * M_1_PI_F; + *pdf = cosNO; + return make_spectrum(cosNO); } ccl_device int bsdf_translucent_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { ccl_private const DiffuseBsdf *bsdf = (ccl_private const DiffuseBsdf *)sc; @@ -99,8 +99,8 @@ ccl_device int bsdf_translucent_sample(ccl_private const ShaderClosure *sc, // we are viewing the surface from the right side - send a ray out with cosine // distribution over the hemisphere - sample_cos_hemisphere(-N, randu, randv, omega_in, pdf); - if (dot(Ng, *omega_in) < 0) { + sample_cos_hemisphere(-N, randu, randv, wo, pdf); + if (dot(Ng, *wo) < 0) { *eval = make_spectrum(*pdf); } else { diff --git a/intern/cycles/kernel/closure/bsdf_diffuse_ramp.h b/intern/cycles/kernel/closure/bsdf_diffuse_ramp.h index e955ed00b92..6721f497646 100644 --- a/intern/cycles/kernel/closure/bsdf_diffuse_ramp.h +++ b/intern/cycles/kernel/closure/bsdf_diffuse_ramp.h @@ -48,17 +48,17 @@ ccl_device void bsdf_diffuse_ramp_blur(ccl_private ShaderClosure *sc, float roug } ccl_device Spectrum bsdf_diffuse_ramp_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { const DiffuseRampBsdf *bsdf = (const DiffuseRampBsdf *)sc; float3 N = bsdf->N; - float cos_pi = fmaxf(dot(N, omega_in), 0.0f); - if (cos_pi >= 0.0f) { - *pdf = cos_pi * M_1_PI_F; - return rgb_to_spectrum(bsdf_diffuse_ramp_get_color(bsdf->colors, cos_pi) * M_1_PI_F); + float cosNO = fmaxf(dot(N, wo), 0.0f); + if (cosNO >= 0.0f) { + *pdf = cosNO * M_1_PI_F; + return rgb_to_spectrum(bsdf_diffuse_ramp_get_color(bsdf->colors, cosNO) * M_1_PI_F); } else { *pdf = 0.0f; @@ -68,20 +68,20 @@ ccl_device Spectrum bsdf_diffuse_ramp_eval(ccl_private const ShaderClosure *sc, ccl_device int bsdf_diffuse_ramp_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { const DiffuseRampBsdf *bsdf = (const DiffuseRampBsdf *)sc; float3 N = bsdf->N; // distribution over the hemisphere - sample_cos_hemisphere(N, randu, randv, omega_in, pdf); + sample_cos_hemisphere(N, randu, randv, wo, pdf); - if (dot(Ng, *omega_in) > 0.0f) { + if (dot(Ng, *wo) > 0.0f) { *eval = rgb_to_spectrum(bsdf_diffuse_ramp_get_color(bsdf->colors, *pdf * M_PI_F) * M_1_PI_F); } else { diff --git a/intern/cycles/kernel/closure/bsdf_hair.h b/intern/cycles/kernel/closure/bsdf_hair.h index 989714bd695..029408ff7e4 100644 --- a/intern/cycles/kernel/closure/bsdf_hair.h +++ b/intern/cycles/kernel/closure/bsdf_hair.h @@ -38,12 +38,12 @@ ccl_device int bsdf_hair_transmission_setup(ccl_private HairBsdf *bsdf) } ccl_device Spectrum bsdf_hair_reflection_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const HairBsdf *bsdf = (ccl_private const HairBsdf *)sc; - if (dot(bsdf->N, omega_in) < 0.0f) { + if (dot(bsdf->N, wo) < 0.0f) { *pdf = 0.0f; return zero_spectrum(); } @@ -53,16 +53,16 @@ ccl_device Spectrum bsdf_hair_reflection_eval(ccl_private const ShaderClosure *s float roughness1 = bsdf->roughness1; float roughness2 = bsdf->roughness2; - float Iz = dot(Tg, I); - float3 locy = normalize(I - Tg * Iz); + float Iz = dot(Tg, wi); + float3 locy = normalize(wi - Tg * Iz); float theta_r = M_PI_2_F - fast_acosf(Iz); - float omega_in_z = dot(Tg, omega_in); - float3 omega_in_y = normalize(omega_in - Tg * omega_in_z); + float wo_z = dot(Tg, wo); + float3 wo_y = normalize(wo - Tg * wo_z); - float theta_i = M_PI_2_F - fast_acosf(omega_in_z); - float cosphi_i = dot(omega_in_y, locy); + float theta_i = M_PI_2_F - fast_acosf(wo_z); + float cosphi_i = dot(wo_y, locy); if (M_PI_2_F - fabsf(theta_i) < 0.001f || cosphi_i < 0.0f) { *pdf = 0.0f; @@ -90,12 +90,12 @@ ccl_device Spectrum bsdf_hair_reflection_eval(ccl_private const ShaderClosure *s } ccl_device Spectrum bsdf_hair_transmission_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const HairBsdf *bsdf = (ccl_private const HairBsdf *)sc; - if (dot(bsdf->N, omega_in) >= 0.0f) { + if (dot(bsdf->N, wo) >= 0.0f) { *pdf = 0.0f; return zero_spectrum(); } @@ -104,16 +104,16 @@ ccl_device Spectrum bsdf_hair_transmission_eval(ccl_private const ShaderClosure float3 Tg = bsdf->T; float roughness1 = bsdf->roughness1; float roughness2 = bsdf->roughness2; - float Iz = dot(Tg, I); - float3 locy = normalize(I - Tg * Iz); + float Iz = dot(Tg, wi); + float3 locy = normalize(wi - Tg * Iz); float theta_r = M_PI_2_F - fast_acosf(Iz); - float omega_in_z = dot(Tg, omega_in); - float3 omega_in_y = normalize(omega_in - Tg * omega_in_z); + float wo_z = dot(Tg, wo); + float3 wo_y = normalize(wo - Tg * wo_z); - float theta_i = M_PI_2_F - fast_acosf(omega_in_z); - float phi_i = fast_acosf(dot(omega_in_y, locy)); + float theta_i = M_PI_2_F - fast_acosf(wo_z); + float phi_i = fast_acosf(dot(wo_y, locy)); if (M_PI_2_F - fabsf(theta_i) < 0.001f) { *pdf = 0.0f; @@ -142,11 +142,11 @@ ccl_device Spectrum bsdf_hair_transmission_eval(ccl_private const ShaderClosure ccl_device int bsdf_hair_reflection_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness) { @@ -156,8 +156,8 @@ ccl_device int bsdf_hair_reflection_sample(ccl_private const ShaderClosure *sc, float roughness1 = bsdf->roughness1; float roughness2 = bsdf->roughness2; *sampled_roughness = make_float2(roughness1, roughness2); - float Iz = dot(Tg, I); - float3 locy = normalize(I - Tg * Iz); + float Iz = dot(Tg, wi); + float3 locy = normalize(wi - Tg * Iz); float3 locx = cross(locy, Tg); float theta_r = M_PI_2_F - fast_acosf(Iz); @@ -182,7 +182,7 @@ ccl_device int bsdf_hair_reflection_sample(ccl_private const ShaderClosure *sc, float sinphi, cosphi; fast_sincosf(phi, &sinphi, &cosphi); - *omega_in = (cosphi * costheta_i) * locy - (sinphi * costheta_i) * locx + (sintheta_i)*Tg; + *wo = (cosphi * costheta_i) * locy - (sinphi * costheta_i) * locx + (sintheta_i)*Tg; *pdf = fabsf(phi_pdf * theta_pdf); if (M_PI_2_F - fabsf(theta_i) < 0.001f) @@ -195,11 +195,11 @@ ccl_device int bsdf_hair_reflection_sample(ccl_private const ShaderClosure *sc, ccl_device int bsdf_hair_transmission_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness) { @@ -209,8 +209,8 @@ ccl_device int bsdf_hair_transmission_sample(ccl_private const ShaderClosure *sc float roughness1 = bsdf->roughness1; float roughness2 = bsdf->roughness2; *sampled_roughness = make_float2(roughness1, roughness2); - float Iz = dot(Tg, I); - float3 locy = normalize(I - Tg * Iz); + float Iz = dot(Tg, wi); + float3 locy = normalize(wi - Tg * Iz); float3 locx = cross(locy, Tg); float theta_r = M_PI_2_F - fast_acosf(Iz); @@ -235,7 +235,7 @@ ccl_device int bsdf_hair_transmission_sample(ccl_private const ShaderClosure *sc float sinphi, cosphi; fast_sincosf(phi, &sinphi, &cosphi); - *omega_in = (cosphi * costheta_i) * locy - (sinphi * costheta_i) * locx + (sintheta_i)*Tg; + *wo = (cosphi * costheta_i) * locy - (sinphi * costheta_i) * locx + (sintheta_i)*Tg; *pdf = fabsf(phi_pdf * theta_pdf); if (M_PI_2_F - fabsf(theta_i) < 0.001f) { @@ -247,7 +247,7 @@ ccl_device int bsdf_hair_transmission_sample(ccl_private const ShaderClosure *sc /* TODO(sergey): Should always be negative, but seems some precision issue * is involved here. */ - kernel_assert(dot(locy, *omega_in) < 1e-4f); + kernel_assert(dot(locy, *wo) < 1e-4f); return LABEL_TRANSMIT | LABEL_GLOSSY; } diff --git a/intern/cycles/kernel/closure/bsdf_hair_principled.h b/intern/cycles/kernel/closure/bsdf_hair_principled.h index 5a6465c7af6..f7cf3b716f6 100644 --- a/intern/cycles/kernel/closure/bsdf_hair_principled.h +++ b/intern/cycles/kernel/closure/bsdf_hair_principled.h @@ -41,11 +41,6 @@ static_assert(sizeof(ShaderClosure) >= sizeof(PrincipledHairBSDF), static_assert(sizeof(ShaderClosure) >= sizeof(PrincipledHairExtra), "PrincipledHairExtra is too large!"); -ccl_device_inline float cos_from_sin(const float s) -{ - return safe_sqrtf(1.0f - s * s); -} - /* Gives the change in direction in the normal plane for the given angles and p-th-order * scattering. */ ccl_device_inline float delta_phi(int p, float gamma_o, float gamma_t) @@ -179,7 +174,7 @@ ccl_device int bsdf_principled_hair_setup(ccl_private ShaderData *sd, /* Compute local frame, aligned to curve tangent and ray direction. */ float3 X = safe_normalize(sd->dPdu); - float3 Y = safe_normalize(cross(X, sd->I)); + float3 Y = safe_normalize(cross(X, sd->wi)); float3 Z = safe_normalize(cross(X, Y)); /* h -1..0..1 means the rays goes from grazing the hair, to hitting it at @@ -259,7 +254,7 @@ ccl_device_inline void hair_alpha_angles(float sin_theta_i, ccl_device Spectrum bsdf_principled_hair_eval(KernelGlobals kg, ccl_private const ShaderData *sd, ccl_private const ShaderClosure *sc, - const float3 omega_in, + const float3 wo, ccl_private float *pdf) { kernel_assert(isfinite_safe(sd->P) && isfinite_safe(sd->ray_length)); @@ -271,12 +266,13 @@ ccl_device Spectrum bsdf_principled_hair_eval(KernelGlobals kg, kernel_assert(fabsf(dot(X, Y)) < 1e-3f); const float3 Z = safe_normalize(cross(X, Y)); - const float3 wo = make_float3(dot(sd->I, X), dot(sd->I, Y), dot(sd->I, Z)); - const float3 wi = make_float3(dot(omega_in, X), dot(omega_in, Y), dot(omega_in, Z)); + /* local_I is the illumination direction. */ + const float3 local_O = make_float3(dot(sd->wi, X), dot(sd->wi, Y), dot(sd->wi, Z)); + const float3 local_I = make_float3(dot(wo, X), dot(wo, Y), dot(wo, Z)); - const float sin_theta_o = wo.x; + const float sin_theta_o = local_O.x; const float cos_theta_o = cos_from_sin(sin_theta_o); - const float phi_o = atan2f(wo.z, wo.y); + const float phi_o = atan2f(local_O.z, local_O.y); const float sin_theta_t = sin_theta_o / bsdf->eta; const float cos_theta_t = cos_from_sin(sin_theta_t); @@ -295,9 +291,9 @@ ccl_device Spectrum bsdf_principled_hair_eval(KernelGlobals kg, hair_attenuation( kg, fresnel_dielectric_cos(cos_theta_o * cos_gamma_o, bsdf->eta), T, Ap, Ap_energy); - const float sin_theta_i = wi.x; + const float sin_theta_i = local_I.x; const float cos_theta_i = cos_from_sin(sin_theta_i); - const float phi_i = atan2f(wi.z, wi.y); + const float phi_i = atan2f(local_I.z, local_I.y); const float phi = phi_i - phi_o; @@ -343,7 +339,7 @@ ccl_device int bsdf_principled_hair_sample(KernelGlobals kg, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness, ccl_private float *eta) @@ -359,16 +355,16 @@ ccl_device int bsdf_principled_hair_sample(KernelGlobals kg, kernel_assert(fabsf(dot(X, Y)) < 1e-3f); const float3 Z = safe_normalize(cross(X, Y)); - const float3 wo = make_float3(dot(sd->I, X), dot(sd->I, Y), dot(sd->I, Z)); + const float3 local_O = make_float3(dot(sd->wi, X), dot(sd->wi, Y), dot(sd->wi, Z)); float2 u[2]; u[0] = make_float2(randu, randv); u[1].x = lcg_step_float(&sd->lcg_state); u[1].y = lcg_step_float(&sd->lcg_state); - const float sin_theta_o = wo.x; + const float sin_theta_o = local_O.x; const float cos_theta_o = cos_from_sin(sin_theta_o); - const float phi_o = atan2f(wo.z, wo.y); + const float phi_o = atan2f(local_O.z, local_O.y); const float sin_theta_t = sin_theta_o / bsdf->eta; const float cos_theta_t = cos_from_sin(sin_theta_t); @@ -458,7 +454,7 @@ ccl_device int bsdf_principled_hair_sample(KernelGlobals kg, *eval = F; *pdf = F_energy; - *omega_in = X * sin_theta_i + Y * cos_theta_i * cosf(phi_i) + Z * cos_theta_i * sinf(phi_i); + *wo = X * sin_theta_i + Y * cos_theta_i * cosf(phi_i) + Z * cos_theta_i * sinf(phi_i); return LABEL_GLOSSY | ((p == 0) ? LABEL_REFLECT : LABEL_TRANSMIT); } diff --git a/intern/cycles/kernel/closure/bsdf_microfacet.h b/intern/cycles/kernel/closure/bsdf_microfacet.h index dcd55dc9bd7..d7c3d3651fc 100644 --- a/intern/cycles/kernel/closure/bsdf_microfacet.h +++ b/intern/cycles/kernel/closure/bsdf_microfacet.h @@ -16,10 +16,13 @@ CCL_NAMESPACE_BEGIN +enum MicrofacetType { + BECKMANN, + GGX, +}; + typedef struct MicrofacetExtra { Spectrum color, cspec0; - Spectrum fresnel_color; - float clearcoat; } MicrofacetExtra; typedef struct MicrofacetBsdf { @@ -32,190 +35,99 @@ typedef struct MicrofacetBsdf { static_assert(sizeof(ShaderClosure) >= sizeof(MicrofacetBsdf), "MicrofacetBsdf is too large!"); -/* Beckmann and GGX microfacet importance sampling. */ +/* Beckmann VNDF importance sampling algorithm from: + * Importance Sampling Microfacet-Based BSDFs using the Distribution of Visible Normals. + * Eric Heitz and Eugene d'Eon, EGSR 2014. + * https://hal.inria.fr/hal-00996995v2/document */ -ccl_device_inline void microfacet_beckmann_sample_slopes(KernelGlobals kg, - const float cos_theta_i, - const float sin_theta_i, - float randu, - float randv, - ccl_private float *slope_x, - ccl_private float *slope_y, - ccl_private float *G1i) +ccl_device_forceinline float3 microfacet_beckmann_sample_vndf(const float3 wi, + const float alpha_x, + const float alpha_y, + const float randu, + const float randv) { - /* special case (normal incidence) */ - if (cos_theta_i >= 0.99999f) { + /* 1. stretch wi */ + float3 wi_ = make_float3(alpha_x * wi.x, alpha_y * wi.y, wi.z); + wi_ = normalize(wi_); + + /* 2. sample P22_{wi}(x_slope, y_slope, 1, 1) */ + float slope_x, slope_y; + float cos_phi_i = 1.0f; + float sin_phi_i = 0.0f; + + if (wi_.z >= 0.99999f) { + /* Special case (normal incidence). */ const float r = sqrtf(-logf(randu)); const float phi = M_2PI_F * randv; - *slope_x = r * cosf(phi); - *slope_y = r * sinf(phi); - *G1i = 1.0f; - return; - } - - /* precomputations */ - const float tan_theta_i = sin_theta_i / cos_theta_i; - const float inv_a = tan_theta_i; - const float cot_theta_i = 1.0f / tan_theta_i; - const float erf_a = fast_erff(cot_theta_i); - const float exp_a2 = expf(-cot_theta_i * cot_theta_i); - const float SQRT_PI_INV = 0.56418958354f; - const float Lambda = 0.5f * (erf_a - 1.0f) + (0.5f * SQRT_PI_INV) * (exp_a2 * inv_a); - const float G1 = 1.0f / (1.0f + Lambda); /* masking */ - - *G1i = G1; - -#if defined(__KERNEL_GPU__) - /* Based on paper from Wenzel Jakob - * An Improved Visible Normal Sampling Routine for the Beckmann Distribution - * - * http://www.mitsuba-renderer.org/~wenzel/files/visnormal.pdf - * - * Reformulation from OpenShadingLanguage which avoids using inverse - * trigonometric functions. - */ - - /* Sample slope X. - * - * Compute a coarse approximation using the approximation: - * exp(-ierf(x)^2) ~= 1 - x * x - * solve y = 1 + b + K * (1 - b * b) - */ - float K = tan_theta_i * SQRT_PI_INV; - float y_approx = randu * (1.0f + erf_a + K * (1 - erf_a * erf_a)); - float y_exact = randu * (1.0f + erf_a + K * exp_a2); - float b = K > 0 ? (0.5f - sqrtf(K * (K - y_approx + 1.0f) + 0.25f)) / K : y_approx - 1.0f; - - /* Perform newton step to refine toward the true root. */ - float inv_erf = fast_ierff(b); - float value = 1.0f + b + K * expf(-inv_erf * inv_erf) - y_exact; - /* Check if we are close enough already, - * this also avoids NaNs as we get close to the root. - */ - if (fabsf(value) > 1e-6f) { - b -= value / (1.0f - inv_erf * tan_theta_i); /* newton step 1. */ - inv_erf = fast_ierff(b); - value = 1.0f + b + K * expf(-inv_erf * inv_erf) - y_exact; - b -= value / (1.0f - inv_erf * tan_theta_i); /* newton step 2. */ - /* Compute the slope from the refined value. */ - *slope_x = fast_ierff(b); + slope_x = r * cosf(phi); + slope_y = r * sinf(phi); } else { - /* We are close enough already. */ - *slope_x = inv_erf; - } - *slope_y = fast_ierff(2.0f * randv - 1.0f); -#else - /* Use precomputed table on CPU, it gives better performance. */ - int beckmann_table_offset = kernel_data.tables.beckmann_offset; + /* Precomputations. */ + const float cos_theta_i = wi_.z; + const float sin_theta_i = sin_from_cos(cos_theta_i); + const float tan_theta_i = sin_theta_i / cos_theta_i; + const float cot_theta_i = 1.0f / tan_theta_i; + const float erf_a = fast_erff(cot_theta_i); + const float exp_a2 = expf(-cot_theta_i * cot_theta_i); + const float SQRT_PI_INV = 0.56418958354f; - *slope_x = lookup_table_read_2D( - kg, randu, cos_theta_i, beckmann_table_offset, BECKMANN_TABLE_SIZE, BECKMANN_TABLE_SIZE); - *slope_y = fast_ierff(2.0f * randv - 1.0f); -#endif -} + float invlen = 1.0f / sin_theta_i; + cos_phi_i = wi_.x * invlen; + sin_phi_i = wi_.y * invlen; -/* GGX microfacet importance sampling from: - * - * Importance Sampling Microfacet-Based BSDFs using the Distribution of Visible Normals. - * E. Heitz and E. d'Eon, EGSR 2014 - */ + /* Based on paper from Wenzel Jakob + * An Improved Visible Normal Sampling Routine for the Beckmann Distribution + * + * http://www.mitsuba-renderer.org/~wenzel/files/visnormal.pdf + * + * Reformulation from OpenShadingLanguage which avoids using inverse + * trigonometric functions. + */ -ccl_device_inline void microfacet_ggx_sample_slopes(const float cos_theta_i, - const float sin_theta_i, - float randu, - float randv, - ccl_private float *slope_x, - ccl_private float *slope_y, - ccl_private float *G1i) -{ - /* special case (normal incidence) */ - if (cos_theta_i >= 0.99999f) { - const float r = sqrtf(randu / (1.0f - randu)); - const float phi = M_2PI_F * randv; - *slope_x = r * cosf(phi); - *slope_y = r * sinf(phi); - *G1i = 1.0f; + /* Sample slope X. + * + * Compute a coarse approximation using the approximation: + * exp(-ierf(x)^2) ~= 1 - x * x + * solve y = 1 + b + K * (1 - b * b) + */ + const float K = tan_theta_i * SQRT_PI_INV; + const float y_approx = randu * (1.0f + erf_a + K * (1 - erf_a * erf_a)); + const float y_exact = randu * (1.0f + erf_a + K * exp_a2); + float b = K > 0 ? (0.5f - sqrtf(K * (K - y_approx + 1.0f) + 0.25f)) / K : y_approx - 1.0f; - return; - } + float inv_erf = fast_ierff(b); + float2 begin = make_float2(-1.0f, -y_exact); + float2 end = make_float2(erf_a, 1.0f + erf_a + K * exp_a2 - y_exact); + float2 current = make_float2(b, 1.0f + b + K * expf(-sqr(inv_erf)) - y_exact); - /* precomputations */ - const float tan_theta_i = sin_theta_i / cos_theta_i; - const float G1_inv = 0.5f * (1.0f + safe_sqrtf(1.0f + tan_theta_i * tan_theta_i)); + /* Find root in a monotonic interval using newton method, under given precision and maximal + * iterations. Falls back to bisection if newton step produces results outside of the valid + * interval.*/ + const float precision = 1e-6f; + const int max_iter = 3; + int iter = 0; + while (fabsf(current.y) > precision && iter++ < max_iter) { + if (signf(begin.y) == signf(current.y)) { + begin.x = current.x; + begin.y = current.y; + } + else { + end.x = current.x; + } + const float newton_x = current.x - current.y / (1.0f - inv_erf * tan_theta_i); + current.x = (newton_x >= begin.x && newton_x <= end.x) ? newton_x : 0.5f * (begin.x + end.x); + inv_erf = fast_ierff(current.x); + current.y = 1.0f + current.x + K * expf(-sqr(inv_erf)) - y_exact; + } - *G1i = 1.0f / G1_inv; - - /* sample slope_x */ - const float A = 2.0f * randu * G1_inv - 1.0f; - const float AA = A * A; - const float tmp = 1.0f / (AA - 1.0f); - const float B = tan_theta_i; - const float BB = B * B; - const float D = safe_sqrtf(BB * (tmp * tmp) - (AA - BB) * tmp); - const float slope_x_1 = B * tmp - D; - const float slope_x_2 = B * tmp + D; - *slope_x = (A < 0.0f || slope_x_2 * tan_theta_i > 1.0f) ? slope_x_1 : slope_x_2; - - /* sample slope_y */ - float S; - - if (randv > 0.5f) { - S = 1.0f; - randv = 2.0f * (randv - 0.5f); - } - else { - S = -1.0f; - randv = 2.0f * (0.5f - randv); - } - - const float z = (randv * (randv * (randv * 0.27385f - 0.73369f) + 0.46341f)) / - (randv * (randv * (randv * 0.093073f + 0.309420f) - 1.000000f) + 0.597999f); - *slope_y = S * z * safe_sqrtf(1.0f + (*slope_x) * (*slope_x)); -} - -ccl_device_forceinline float3 microfacet_sample_stretched(KernelGlobals kg, - const float3 omega_i, - const float alpha_x, - const float alpha_y, - const float randu, - const float randv, - bool beckmann, - ccl_private float *G1i) -{ - /* 1. stretch omega_i */ - float3 omega_i_ = make_float3(alpha_x * omega_i.x, alpha_y * omega_i.y, omega_i.z); - omega_i_ = normalize(omega_i_); - - /* get polar coordinates of omega_i_ */ - float costheta_ = 1.0f; - float sintheta_ = 0.0f; - float cosphi_ = 1.0f; - float sinphi_ = 0.0f; - - if (omega_i_.z < 0.99999f) { - costheta_ = omega_i_.z; - sintheta_ = safe_sqrtf(1.0f - costheta_ * costheta_); - - float invlen = 1.0f / sintheta_; - cosphi_ = omega_i_.x * invlen; - sinphi_ = omega_i_.y * invlen; - } - - /* 2. sample P22_{omega_i}(x_slope, y_slope, 1, 1) */ - float slope_x, slope_y; - - if (beckmann) { - microfacet_beckmann_sample_slopes( - kg, costheta_, sintheta_, randu, randv, &slope_x, &slope_y, G1i); - } - else { - microfacet_ggx_sample_slopes(costheta_, sintheta_, randu, randv, &slope_x, &slope_y, G1i); + slope_x = inv_erf; + slope_y = fast_ierff(2.0f * randv - 1.0f); } /* 3. rotate */ - float tmp = cosphi_ * slope_x - sinphi_ * slope_y; - slope_y = sinphi_ * slope_x + cosphi_ * slope_y; + float tmp = cos_phi_i * slope_x - sin_phi_i * slope_y; + slope_y = sin_phi_i * slope_x + cos_phi_i * slope_y; slope_x = tmp; /* 4. unstretch */ @@ -226,6 +138,43 @@ ccl_device_forceinline float3 microfacet_sample_stretched(KernelGlobals kg, return normalize(make_float3(-slope_x, -slope_y, 1.0f)); } +/* GGX VNDF importance sampling algorithm from: + * Sampling the GGX Distribution of Visible Normals. + * Eric Heitz, JCGT Vol. 7, No. 4, 2018. + * https://jcgt.org/published/0007/04/01/ */ +ccl_device_forceinline float3 microfacet_ggx_sample_vndf(const float3 wi, + const float alpha_x, + const float alpha_y, + const float randu, + const float randv) +{ + /* Section 3.2: Transforming the view direction to the hemisphere configuration. */ + float3 wi_ = normalize(make_float3(alpha_x * wi.x, alpha_y * wi.y, wi.z)); + + /* Section 4.1: Orthonormal basis. */ + float lensq = sqr(wi_.x) + sqr(wi_.y); + float3 T1, T2; + if (lensq > 1e-7f) { + T1 = make_float3(-wi_.y, wi_.x, 0.0f) * inversesqrtf(lensq); + T2 = cross(wi_, T1); + } + else { + /* Normal incidence, any basis is fine. */ + T1 = make_float3(1.0f, 0.0f, 0.0f); + T2 = make_float3(0.0f, 1.0f, 0.0f); + } + + /* Section 4.2: Parameterization of the projected area. */ + float2 t = concentric_sample_disk(randu, randv); + t.y = mix(safe_sqrtf(1.0f - sqr(t.x)), t.y, 0.5f * (1.0f + wi_.z)); + + /* Section 4.3: Reprojection onto hemisphere. */ + float3 H_ = t.x * T1 + t.y * T2 + safe_sqrtf(1.0f - len_squared(t)) * wi_; + + /* Section 3.4: Transforming the normal back to the ellipsoid configuration. */ + return normalize(make_float3(alpha_x * H_.x, alpha_y * H_.y, max(0.0f, H_.z))); +} + /* Calculate the reflection color * * If fresnel is used, the color is an interpolation of the F0 color and white @@ -233,45 +182,316 @@ ccl_device_forceinline float3 microfacet_sample_stretched(KernelGlobals kg, * * Else it is simply white */ -ccl_device_forceinline Spectrum reflection_color(ccl_private const MicrofacetBsdf *bsdf, - float3 L, - float3 H) +ccl_device_forceinline Spectrum microfacet_fresnel(ccl_private const MicrofacetBsdf *bsdf, + float3 wi, + float3 H) { - Spectrum F = one_spectrum(); - bool use_fresnel = (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID || - bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID); - if (use_fresnel) { - float F0 = fresnel_dielectric_cos(1.0f, bsdf->ior); - - F = interpolate_fresnel_color(L, H, bsdf->ior, F0, bsdf->extra->cspec0); + if (CLOSURE_IS_BSDF_MICROFACET_FRESNEL(bsdf->type)) { + return interpolate_fresnel_color(wi, H, bsdf->ior, bsdf->extra->cspec0); + } + else if (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) { + return make_spectrum(fresnel_dielectric_cos(dot(wi, H), bsdf->ior)); + } + else { + return one_spectrum(); } - - return F; } -ccl_device_forceinline float D_GTR1(float NdotH, float alpha) +ccl_device_forceinline void bsdf_microfacet_adjust_weight(ccl_private const ShaderData *sd, + ccl_private MicrofacetBsdf *bsdf) { - if (alpha >= 1.0f) + bsdf->sample_weight *= average(microfacet_fresnel(bsdf, sd->wi, bsdf->N)); +} + +/* Generalized Trowbridge-Reitz for clearcoat. */ +ccl_device_forceinline float bsdf_clearcoat_D(float alpha2, float cos_NH) +{ + if (alpha2 >= 1.0f) { return M_1_PI_F; - float alpha2 = alpha * alpha; - float t = 1.0f + (alpha2 - 1.0f) * NdotH * NdotH; + } + + const float t = 1.0f + (alpha2 - 1.0f) * cos_NH * cos_NH; return (alpha2 - 1.0f) / (M_PI_F * logf(alpha2) * t); } -ccl_device_forceinline void bsdf_microfacet_fresnel_color(ccl_private const ShaderData *sd, - ccl_private MicrofacetBsdf *bsdf) +/* Smith shadowing-masking term, here in the non-separable form. + * For details, see: + * Understanding the Masking-Shadowing Function in Microfacet-Based BRDFs. + * Eric Heitz, JCGT Vol. 3, No. 2, 2014. + * https://jcgt.org/published/0003/02/03/ */ +template +ccl_device_inline float bsdf_lambda_from_sqr_alpha_tan_n(float sqr_alpha_tan_n) { - kernel_assert(CLOSURE_IS_BSDF_MICROFACET_FRESNEL(bsdf->type)); + if (m_type == MicrofacetType::GGX) { + /* Equation 72. */ + return 0.5f * (sqrtf(1.0f + sqr_alpha_tan_n) - 1.0f); + } + else { + /* m_type == MicrofacetType::BECKMANN + * Approximation from below Equation 69. */ + if (sqr_alpha_tan_n < 0.39f) { + /* Equivalent to a >= 1.6f, but also handles sqr_alpha_tan_n == 0.0f cleanly. */ + return 0.0f; + } - float F0 = fresnel_dielectric_cos(1.0f, bsdf->ior); - bsdf->extra->fresnel_color = interpolate_fresnel_color( - sd->I, bsdf->N, bsdf->ior, F0, bsdf->extra->cspec0); + const float a = inversesqrtf(sqr_alpha_tan_n); + return ((0.396f * a - 1.259f) * a + 1.0f) / ((2.181f * a + 3.535f) * a); + } +} - if (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) { - bsdf->extra->fresnel_color *= 0.25f * bsdf->extra->clearcoat; +template ccl_device_inline float bsdf_lambda(float alpha2, float cos_N) +{ + return bsdf_lambda_from_sqr_alpha_tan_n(alpha2 * fmaxf(1.0f / sqr(cos_N) - 1.0f, 0.0f)); +} + +template +ccl_device_inline float bsdf_aniso_lambda(float alpha_x, float alpha_y, float3 V) +{ + const float sqr_alpha_tan_n = (sqr(alpha_x * V.x) + sqr(alpha_y * V.y)) / sqr(V.z); + return bsdf_lambda_from_sqr_alpha_tan_n(sqr_alpha_tan_n); +} + +/* Combined shadowing-masking term. */ +template +ccl_device_inline float bsdf_G(float alpha2, float cos_NI, float cos_NO) +{ + return 1.0f / (1.0f + bsdf_lambda(alpha2, cos_NI) + bsdf_lambda(alpha2, cos_NO)); +} + +/* Normal distribution function. */ +template ccl_device_inline float bsdf_D(float alpha2, float cos_NH) +{ + const float cos_NH2 = sqr(cos_NH); + + if (m_type == MicrofacetType::BECKMANN) { + return expf((1.0f - 1.0f / cos_NH2) / alpha2) / (M_PI_F * alpha2 * sqr(cos_NH2)); + } + else { + /* m_type == MicrofacetType::GGX */ + return alpha2 / (M_PI_F * sqr(1.0f + (alpha2 - 1.0f) * cos_NH2)); + } +} + +template +ccl_device_inline float bsdf_aniso_D(float alpha_x, float alpha_y, float3 H) +{ + H /= make_float3(alpha_x, alpha_y, 1.0f); + + const float cos_NH2 = sqr(H.z); + const float alpha2 = alpha_x * alpha_y; + + if (m_type == MicrofacetType::BECKMANN) { + return expf(-(sqr(H.x) + sqr(H.y)) / cos_NH2) / (M_PI_F * alpha2 * sqr(cos_NH2)); + } + else { + /* m_type == MicrofacetType::GGX */ + return M_1_PI_F / (alpha2 * sqr(len_squared(H))); + } +} + +template +ccl_device Spectrum bsdf_microfacet_eval(ccl_private const ShaderClosure *sc, + const float3 Ng, + const float3 wi, + const float3 wo, + ccl_private float *pdf) +{ + ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc; + const bool m_refractive = (bsdf->type == CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID) || + (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID); + + const float3 N = bsdf->N; + const float cos_NI = dot(N, wi); + const float cos_NO = dot(N, wo); + const float cos_NgO = dot(Ng, wo); + + const float alpha_x = bsdf->alpha_x; + const float alpha_y = bsdf->alpha_y; + + if ((cos_NI <= 0) || ((cos_NgO < 0.0f) != m_refractive) || ((cos_NO < 0.0f) != m_refractive) || + (alpha_x * alpha_y <= 1e-7f)) { + *pdf = 0.0f; + return zero_spectrum(); } - bsdf->sample_weight *= average(bsdf->extra->fresnel_color); + /* Compute half vector. */ + float3 H = m_refractive ? -(bsdf->ior * wo + wi) : (wi + wo); + const float inv_len_H = 1.0f / len(H); + H *= inv_len_H; + + const float cos_NH = dot(N, H); + float D, lambdaI, lambdaO; + + /* TODO: add support for anisotropic transmission. */ + if (alpha_x == alpha_y || m_refractive) { /* Isotropic. */ + float alpha2 = alpha_x * alpha_y; + + if (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) { + D = bsdf_clearcoat_D(alpha2, cos_NH); + + /* The masking-shadowing term for clearcoat has a fixed alpha of 0.25 + * => alpha2 = 0.25 * 0.25 */ + alpha2 = 0.0625f; + } + else { + D = bsdf_D(alpha2, cos_NH); + } + + lambdaI = bsdf_lambda(alpha2, cos_NI); + lambdaO = bsdf_lambda(alpha2, cos_NO); + } + else { /* Anisotropic. */ + float3 X, Y; + make_orthonormals_tangent(N, bsdf->T, &X, &Y); + + const float3 local_H = make_float3(dot(X, H), dot(Y, H), cos_NH); + const float3 local_I = make_float3(dot(X, wi), dot(Y, wi), cos_NI); + const float3 local_O = make_float3(dot(X, wo), dot(Y, wo), cos_NO); + + D = bsdf_aniso_D(alpha_x, alpha_y, local_H); + + lambdaI = bsdf_aniso_lambda(alpha_x, alpha_y, local_I); + lambdaO = bsdf_aniso_lambda(alpha_x, alpha_y, local_O); + } + + const float common = D / cos_NI * + (m_refractive ? + sqr(bsdf->ior * inv_len_H) * fabsf(dot(H, wi) * dot(H, wo)) : + 0.25f); + + *pdf = common / (1.0f + lambdaI); + + const Spectrum F = microfacet_fresnel(bsdf, wo, H); + return F * common / (1.0f + lambdaO + lambdaI); +} + +template +ccl_device int bsdf_microfacet_sample(ccl_private const ShaderClosure *sc, + float3 Ng, + float3 wi, + float randu, + float randv, + ccl_private Spectrum *eval, + ccl_private float3 *wo, + ccl_private float *pdf, + ccl_private float2 *sampled_roughness, + ccl_private float *eta) +{ + ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc; + + const float m_eta = bsdf->ior; + const bool m_refractive = (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID) || + (bsdf->type == CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID); + int label = m_refractive ? LABEL_TRANSMIT : LABEL_REFLECT; + + const float3 N = bsdf->N; + const float cos_NI = dot(N, wi); + if (cos_NI <= 0) { + return label | LABEL_GLOSSY; + } + + float3 X, Y; + const float alpha_x = bsdf->alpha_x; + const float alpha_y = bsdf->alpha_y; + if (alpha_x == alpha_y) { + make_orthonormals(N, &X, &Y); + } + else { + make_orthonormals_tangent(N, bsdf->T, &X, &Y); + } + + /* Importance sampling with distribution of visible normals. Vectors are transformed to local + * space before and after sampling. */ + const float3 local_I = make_float3(dot(X, wi), dot(Y, wi), cos_NI); + float3 local_H; + if (m_type == MicrofacetType::GGX) { + local_H = microfacet_ggx_sample_vndf(local_I, alpha_x, alpha_y, randu, randv); + } + else { + /* m_type == MicrofacetType::BECKMANN */ + local_H = microfacet_beckmann_sample_vndf(local_I, alpha_x, alpha_y, randu, randv); + } + + const float3 H = X * local_H.x + Y * local_H.y + N * local_H.z; + const float cos_NH = local_H.z; + const float cos_HI = dot(H, wi); + + bool valid = false; + if (m_refractive) { + float3 R, T; + bool inside; + + float fresnel = fresnel_dielectric(m_eta, H, wi, &R, &T, &inside); + *wo = T; + + valid = !inside && fresnel != 1.0f; + } + else { + /* Eq. 39 - compute actual reflected direction */ + *wo = 2 * cos_HI * H - wi; + + valid = dot(Ng, *wo) > 0; + } + + if (!valid) { + *eval = zero_spectrum(); + *pdf = 0.0f; + return label | LABEL_GLOSSY; + } + + if (alpha_x * alpha_y <= 1e-7f || (m_refractive && fabsf(m_eta - 1.0f) < 1e-4f)) { + label |= LABEL_SINGULAR; + /* Some high number for MIS. */ + *pdf = 1e6f; + *eval = make_spectrum(1e6f) * microfacet_fresnel(bsdf, *wo, H); + } + else { + label |= LABEL_GLOSSY; + float cos_NO = dot(N, *wo); + float D, lambdaI, lambdaO; + + /* TODO: add support for anisotropic transmission. */ + if (alpha_x == alpha_y || m_refractive) { /* Isotropic. */ + float alpha2 = alpha_x * alpha_y; + + if (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) { + D = bsdf_clearcoat_D(alpha2, cos_NH); + + /* The masking-shadowing term for clearcoat has a fixed alpha of 0.25 + * => alpha2 = 0.25 * 0.25 */ + alpha2 = 0.0625f; + } + else { + D = bsdf_D(alpha2, cos_NH); + } + + lambdaO = bsdf_lambda(alpha2, cos_NO); + lambdaI = bsdf_lambda(alpha2, cos_NI); + } + else { /* Anisotropic. */ + const float3 local_O = make_float3(dot(X, *wo), dot(Y, *wo), cos_NO); + + D = bsdf_aniso_D(alpha_x, alpha_y, local_H); + + lambdaO = bsdf_aniso_lambda(alpha_x, alpha_y, local_O); + lambdaI = bsdf_aniso_lambda(alpha_x, alpha_y, local_I); + } + + const float cos_HO = dot(H, *wo); + const float common = D / cos_NI * + (m_refractive ? fabsf(cos_HI * cos_HO) / sqr(cos_HO + cos_HI / m_eta) : + 0.25f); + + *pdf = common / (1.0f + lambdaI); + + Spectrum F = microfacet_fresnel(bsdf, *wo, H); + *eval = F * common / (1.0f + lambdaI + lambdaO); + } + + *sampled_roughness = make_float2(alpha_x, alpha_y); + *eta = m_refractive ? 1.0f / m_eta : m_eta; + + return label; } /* GGX microfacet with Smith shadow-masking from: @@ -299,14 +519,6 @@ ccl_device int bsdf_microfacet_ggx_setup(ccl_private MicrofacetBsdf *bsdf) return SD_BSDF | SD_BSDF_HAS_EVAL; } -/* Required to maintain OSL interface. */ -ccl_device int bsdf_microfacet_ggx_isotropic_setup(ccl_private MicrofacetBsdf *bsdf) -{ - bsdf->alpha_y = bsdf->alpha_x; - - return bsdf_microfacet_ggx_setup(bsdf); -} - ccl_device int bsdf_microfacet_ggx_fresnel_setup(ccl_private MicrofacetBsdf *bsdf, ccl_private const ShaderData *sd) { @@ -317,7 +529,7 @@ ccl_device int bsdf_microfacet_ggx_fresnel_setup(ccl_private MicrofacetBsdf *bsd bsdf->type = CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID; - bsdf_microfacet_fresnel_color(sd, bsdf); + bsdf_microfacet_adjust_weight(sd, bsdf); return SD_BSDF | SD_BSDF_HAS_EVAL; } @@ -325,14 +537,12 @@ ccl_device int bsdf_microfacet_ggx_fresnel_setup(ccl_private MicrofacetBsdf *bsd ccl_device int bsdf_microfacet_ggx_clearcoat_setup(ccl_private MicrofacetBsdf *bsdf, ccl_private const ShaderData *sd) { - bsdf->extra->cspec0 = saturate(bsdf->extra->cspec0); - bsdf->alpha_x = saturatef(bsdf->alpha_x); bsdf->alpha_y = bsdf->alpha_x; bsdf->type = CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID; - bsdf_microfacet_fresnel_color(sd, bsdf); + bsdf_microfacet_adjust_weight(sd, bsdf); return SD_BSDF | SD_BSDF_HAS_EVAL; } @@ -357,398 +567,28 @@ ccl_device void bsdf_microfacet_ggx_blur(ccl_private ShaderClosure *sc, float ro bsdf->alpha_y = fmaxf(roughness, bsdf->alpha_y); } -ccl_device Spectrum bsdf_microfacet_ggx_eval_reflect(ccl_private const MicrofacetBsdf *bsdf, - const float3 N, - const float3 I, - const float3 omega_in, - ccl_private float *pdf, - const float alpha_x, - const float alpha_y, - const float cosNO, - const float cosNI) -{ - if (!(cosNI > 0 && cosNO > 0)) { - *pdf = 0.0f; - return zero_spectrum(); - } - - /* get half vector */ - float3 m = normalize(omega_in + I); - float alpha2 = alpha_x * alpha_y; - float D, G1o, G1i; - - if (alpha_x == alpha_y) { - /* isotropic - * eq. 20: (F*G*D)/(4*in*on) - * eq. 33: first we calculate D(m) */ - float cosThetaM = dot(N, m); - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - float tanThetaM2 = (1 - cosThetaM2) / cosThetaM2; - - if (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) { - /* use GTR1 for clearcoat */ - D = D_GTR1(cosThetaM, bsdf->alpha_x); - - /* the alpha value for clearcoat is a fixed 0.25 => alpha2 = 0.25 * 0.25 */ - alpha2 = 0.0625f; - } - else { - /* use GTR2 otherwise */ - D = alpha2 / (M_PI_F * cosThetaM4 * (alpha2 + tanThetaM2) * (alpha2 + tanThetaM2)); - } - - /* eq. 34: now calculate G1(i,m) and G1(o,m) */ - G1o = 2 / (1 + safe_sqrtf(1 + alpha2 * (1 - cosNO * cosNO) / (cosNO * cosNO))); - G1i = 2 / (1 + safe_sqrtf(1 + alpha2 * (1 - cosNI * cosNI) / (cosNI * cosNI))); - } - else { - /* anisotropic */ - float3 X, Y, Z = N; - make_orthonormals_tangent(Z, bsdf->T, &X, &Y); - - /* distribution */ - float3 local_m = make_float3(dot(X, m), dot(Y, m), dot(Z, m)); - float slope_x = -local_m.x / (local_m.z * alpha_x); - float slope_y = -local_m.y / (local_m.z * alpha_y); - float slope_len = 1 + slope_x * slope_x + slope_y * slope_y; - - float cosThetaM = local_m.z; - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - - D = 1 / ((slope_len * slope_len) * M_PI_F * alpha2 * cosThetaM4); - - /* G1(i,m) and G1(o,m) */ - float tanThetaO2 = (1 - cosNO * cosNO) / (cosNO * cosNO); - float cosPhiO = dot(I, X); - float sinPhiO = dot(I, Y); - - float alphaO2 = (cosPhiO * cosPhiO) * (alpha_x * alpha_x) + - (sinPhiO * sinPhiO) * (alpha_y * alpha_y); - alphaO2 /= cosPhiO * cosPhiO + sinPhiO * sinPhiO; - - G1o = 2 / (1 + safe_sqrtf(1 + alphaO2 * tanThetaO2)); - - float tanThetaI2 = (1 - cosNI * cosNI) / (cosNI * cosNI); - float cosPhiI = dot(omega_in, X); - float sinPhiI = dot(omega_in, Y); - - float alphaI2 = (cosPhiI * cosPhiI) * (alpha_x * alpha_x) + - (sinPhiI * sinPhiI) * (alpha_y * alpha_y); - alphaI2 /= cosPhiI * cosPhiI + sinPhiI * sinPhiI; - - G1i = 2 / (1 + safe_sqrtf(1 + alphaI2 * tanThetaI2)); - } - - float G = G1o * G1i; - - /* eq. 20 */ - float common = D * 0.25f / cosNO; - - Spectrum F = reflection_color(bsdf, omega_in, m); - if (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) { - F *= 0.25f * bsdf->extra->clearcoat; - } - - Spectrum out = F * G * common; - - /* eq. 2 in distribution of visible normals sampling - * `pm = Dw = G1o * dot(m, I) * D / dot(N, I);` */ - - /* eq. 38 - but see also: - * eq. 17 in http://www.graphics.cornell.edu/~bjw/wardnotes.pdf - * `pdf = pm * 0.25 / dot(m, I);` */ - *pdf = G1o * common; - - return out; -} - -ccl_device Spectrum bsdf_microfacet_ggx_eval_transmit(ccl_private const MicrofacetBsdf *bsdf, - const float3 N, - const float3 I, - const float3 omega_in, - ccl_private float *pdf, - const float alpha_x, - const float alpha_y, - const float cosNO, - const float cosNI) -{ - if (cosNO <= 0 || cosNI >= 0) { - *pdf = 0.0f; - return zero_spectrum(); /* vectors on same side -- not possible */ - } - /* compute half-vector of the refraction (eq. 16) */ - float m_eta = bsdf->ior; - float3 ht = -(m_eta * omega_in + I); - float3 Ht = normalize(ht); - float cosHO = dot(Ht, I); - float cosHI = dot(Ht, omega_in); - - float D, G1o, G1i; - - /* eq. 33: first we calculate D(m) with m=Ht: */ - float alpha2 = alpha_x * alpha_y; - float cosThetaM = dot(N, Ht); - float cosThetaM2 = cosThetaM * cosThetaM; - float tanThetaM2 = (1 - cosThetaM2) / cosThetaM2; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - D = alpha2 / (M_PI_F * cosThetaM4 * (alpha2 + tanThetaM2) * (alpha2 + tanThetaM2)); - - /* eq. 34: now calculate G1(i,m) and G1(o,m) */ - G1o = 2 / (1 + safe_sqrtf(1 + alpha2 * (1 - cosNO * cosNO) / (cosNO * cosNO))); - G1i = 2 / (1 + safe_sqrtf(1 + alpha2 * (1 - cosNI * cosNI) / (cosNI * cosNI))); - - float G = G1o * G1i; - - /* probability */ - float Ht2 = dot(ht, ht); - - /* eq. 2 in distribution of visible normals sampling - * pm = Dw = G1o * dot(m, I) * D / dot(N, I); */ - - /* out = fabsf(cosHI * cosHO) * (m_eta * m_eta) * G * D / (cosNO * Ht2) - * pdf = pm * (m_eta * m_eta) * fabsf(cosHI) / Ht2 */ - float common = D * (m_eta * m_eta) / (cosNO * Ht2); - float out = G * fabsf(cosHI * cosHO) * common; - *pdf = G1o * fabsf(cosHO * cosHI) * common; - - return make_spectrum(out); -} - ccl_device Spectrum bsdf_microfacet_ggx_eval(ccl_private const ShaderClosure *sc, const float3 Ng, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { - ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc; - const bool m_refractive = bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID; - const float alpha_x = bsdf->alpha_x; - const float alpha_y = bsdf->alpha_y; - const float cosNgI = dot(Ng, omega_in); - - if (((cosNgI < 0.0f) != m_refractive) || alpha_x * alpha_y <= 1e-7f) { - *pdf = 0.0f; - return zero_spectrum(); - } - - const float3 N = bsdf->N; - const float cosNO = dot(N, I); - const float cosNI = dot(N, omega_in); - - return (cosNgI < 0.0f) ? bsdf_microfacet_ggx_eval_transmit( - bsdf, N, I, omega_in, pdf, alpha_x, alpha_y, cosNO, cosNI) : - bsdf_microfacet_ggx_eval_reflect( - bsdf, N, I, omega_in, pdf, alpha_x, alpha_y, cosNO, cosNI); + return bsdf_microfacet_eval(sc, Ng, wi, wo, pdf); } -ccl_device int bsdf_microfacet_ggx_sample(KernelGlobals kg, - ccl_private const ShaderClosure *sc, +ccl_device int bsdf_microfacet_ggx_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness, ccl_private float *eta) { - ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc; - float alpha_x = bsdf->alpha_x; - float alpha_y = bsdf->alpha_y; - bool m_refractive = bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID; - - *sampled_roughness = make_float2(alpha_x, alpha_y); - *eta = m_refractive ? 1.0f / bsdf->ior : bsdf->ior; - - float3 N = bsdf->N; - int label; - - float cosNO = dot(N, I); - if (cosNO > 0) { - float3 X, Y, Z = N; - - if (alpha_x == alpha_y) - make_orthonormals(Z, &X, &Y); - else - make_orthonormals_tangent(Z, bsdf->T, &X, &Y); - - /* importance sampling with distribution of visible normals. vectors are - * transformed to local space before and after */ - float3 local_I = make_float3(dot(X, I), dot(Y, I), cosNO); - float3 local_m; - float G1o; - - local_m = microfacet_sample_stretched( - kg, local_I, alpha_x, alpha_y, randu, randv, false, &G1o); - - float3 m = X * local_m.x + Y * local_m.y + Z * local_m.z; - float cosThetaM = local_m.z; - - /* reflection or refraction? */ - if (!m_refractive) { - float cosMO = dot(m, I); - label = LABEL_REFLECT | LABEL_GLOSSY; - - if (cosMO > 0) { - /* eq. 39 - compute actual reflected direction */ - *omega_in = 2 * cosMO * m - I; - - if (dot(Ng, *omega_in) > 0) { - if (alpha_x * alpha_y <= 1e-7f) { - /* some high number for MIS */ - *pdf = 1e6f; - *eval = make_spectrum(1e6f); - - bool use_fresnel = (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID || - bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID); - - /* if fresnel is used, calculate the color with reflection_color(...) */ - if (use_fresnel) { - *eval *= reflection_color(bsdf, *omega_in, m); - } - - label = LABEL_REFLECT | LABEL_SINGULAR; - } - else { - /* microfacet normal is visible to this ray */ - /* eq. 33 */ - float alpha2 = alpha_x * alpha_y; - float D, G1i; - - if (alpha_x == alpha_y) { - /* isotropic */ - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - float tanThetaM2 = 1 / (cosThetaM2)-1; - - /* eval BRDF*cosNI */ - float cosNI = dot(N, *omega_in); - - if (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) { - /* use GTR1 for clearcoat */ - D = D_GTR1(cosThetaM, bsdf->alpha_x); - - /* the alpha value for clearcoat is a fixed 0.25 => alpha2 = 0.25 * 0.25 */ - alpha2 = 0.0625f; - - /* recalculate G1o */ - G1o = 2 / (1 + safe_sqrtf(1 + alpha2 * (1 - cosNO * cosNO) / (cosNO * cosNO))); - } - else { - /* use GTR2 otherwise */ - D = alpha2 / (M_PI_F * cosThetaM4 * (alpha2 + tanThetaM2) * (alpha2 + tanThetaM2)); - } - - /* eq. 34: now calculate G1(i,m) */ - G1i = 2 / (1 + safe_sqrtf(1 + alpha2 * (1 - cosNI * cosNI) / (cosNI * cosNI))); - } - else { - /* anisotropic distribution */ - float3 local_m = make_float3(dot(X, m), dot(Y, m), dot(Z, m)); - float slope_x = -local_m.x / (local_m.z * alpha_x); - float slope_y = -local_m.y / (local_m.z * alpha_y); - float slope_len = 1 + slope_x * slope_x + slope_y * slope_y; - - float cosThetaM = local_m.z; - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - - D = 1 / ((slope_len * slope_len) * M_PI_F * alpha2 * cosThetaM4); - - /* calculate G1(i,m) */ - float cosNI = dot(N, *omega_in); - - float tanThetaI2 = (1 - cosNI * cosNI) / (cosNI * cosNI); - float cosPhiI = dot(*omega_in, X); - float sinPhiI = dot(*omega_in, Y); - - float alphaI2 = (cosPhiI * cosPhiI) * (alpha_x * alpha_x) + - (sinPhiI * sinPhiI) * (alpha_y * alpha_y); - alphaI2 /= cosPhiI * cosPhiI + sinPhiI * sinPhiI; - - G1i = 2 / (1 + safe_sqrtf(1 + alphaI2 * tanThetaI2)); - } - - /* see eval function for derivation */ - float common = (G1o * D) * 0.25f / cosNO; - *pdf = common; - - Spectrum F = reflection_color(bsdf, *omega_in, m); - - *eval = G1i * common * F; - } - - if (bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) { - *eval *= 0.25f * bsdf->extra->clearcoat; - } - } - else { - *eval = zero_spectrum(); - *pdf = 0.0f; - } - } - } - else { - label = LABEL_TRANSMIT | LABEL_GLOSSY; - - /* CAUTION: the i and o variables are inverted relative to the paper - * eq. 39 - compute actual refractive direction */ - float3 R, T; - float m_eta = bsdf->ior, fresnel; - bool inside; - - fresnel = fresnel_dielectric(m_eta, m, I, &R, &T, &inside); - - if (!inside && fresnel != 1.0f) { - *omega_in = T; - - if (alpha_x * alpha_y <= 1e-7f || fabsf(m_eta - 1.0f) < 1e-4f) { - /* some high number for MIS */ - *pdf = 1e6f; - *eval = make_spectrum(1e6f); - label = LABEL_TRANSMIT | LABEL_SINGULAR; - } - else { - /* eq. 33 */ - float alpha2 = alpha_x * alpha_y; - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - float tanThetaM2 = 1 / (cosThetaM2)-1; - float D = alpha2 / (M_PI_F * cosThetaM4 * (alpha2 + tanThetaM2) * (alpha2 + tanThetaM2)); - - /* eval BRDF*cosNI */ - float cosNI = dot(N, *omega_in); - - /* eq. 34: now calculate G1(i,m) */ - float G1i = 2 / (1 + safe_sqrtf(1 + alpha2 * (1 - cosNI * cosNI) / (cosNI * cosNI))); - - /* eq. 21 */ - float cosHI = dot(m, *omega_in); - float cosHO = dot(m, I); - float Ht2 = m_eta * cosHI + cosHO; - Ht2 *= Ht2; - - /* see eval function for derivation */ - float common = (G1o * D) * (m_eta * m_eta) / (cosNO * Ht2); - float out = G1i * fabsf(cosHI * cosHO) * common; - *pdf = cosHO * fabsf(cosHI) * common; - - *eval = make_spectrum(out); - } - } - else { - *eval = zero_spectrum(); - *pdf = 0.0f; - } - } - } - else { - label = (m_refractive) ? LABEL_TRANSMIT | LABEL_GLOSSY : LABEL_REFLECT | LABEL_GLOSSY; - } - return label; + return bsdf_microfacet_sample( + sc, Ng, wi, randu, randv, eval, wo, pdf, sampled_roughness, eta); } /* Beckmann microfacet with Smith shadow-masking from: @@ -765,14 +605,6 @@ ccl_device int bsdf_microfacet_beckmann_setup(ccl_private MicrofacetBsdf *bsdf) return SD_BSDF | SD_BSDF_HAS_EVAL; } -/* Required to maintain OSL interface. */ -ccl_device int bsdf_microfacet_beckmann_isotropic_setup(ccl_private MicrofacetBsdf *bsdf) -{ - bsdf->alpha_y = bsdf->alpha_x; - - return bsdf_microfacet_beckmann_setup(bsdf); -} - ccl_device int bsdf_microfacet_beckmann_refraction_setup(ccl_private MicrofacetBsdf *bsdf) { bsdf->alpha_x = saturatef(bsdf->alpha_x); @@ -790,356 +622,28 @@ ccl_device void bsdf_microfacet_beckmann_blur(ccl_private ShaderClosure *sc, flo bsdf->alpha_y = fmaxf(roughness, bsdf->alpha_y); } -ccl_device_inline float bsdf_beckmann_G1(float alpha, float cos_n) -{ - cos_n *= cos_n; - float invA = alpha * safe_sqrtf((1.0f - cos_n) / cos_n); - if (invA < 0.625f) { - return 1.0f; - } - - float a = 1.0f / invA; - return ((2.181f * a + 3.535f) * a) / ((2.577f * a + 2.276f) * a + 1.0f); -} - -ccl_device_inline float bsdf_beckmann_aniso_G1( - float alpha_x, float alpha_y, float cos_n, float cos_phi, float sin_phi) -{ - cos_n *= cos_n; - sin_phi *= sin_phi; - cos_phi *= cos_phi; - alpha_x *= alpha_x; - alpha_y *= alpha_y; - - float alphaO2 = (cos_phi * alpha_x + sin_phi * alpha_y) / (cos_phi + sin_phi); - float invA = safe_sqrtf(alphaO2 * (1 - cos_n) / cos_n); - if (invA < 0.625f) { - return 1.0f; - } - - float a = 1.0f / invA; - return ((2.181f * a + 3.535f) * a) / ((2.577f * a + 2.276f) * a + 1.0f); -} - -ccl_device Spectrum bsdf_microfacet_beckmann_eval_reflect(ccl_private const MicrofacetBsdf *bsdf, - const float3 N, - const float3 I, - const float3 omega_in, - ccl_private float *pdf, - const float alpha_x, - const float alpha_y, - const float cosNO, - const float cosNI) -{ - if (!(cosNO > 0 && cosNI > 0)) { - *pdf = 0.0f; - return zero_spectrum(); - } - - /* get half vector */ - float3 m = normalize(omega_in + I); - - float alpha2 = alpha_x * alpha_y; - float D, G1o, G1i; - - if (alpha_x == alpha_y) { - /* isotropic - * eq. 20: (F*G*D)/(4*in*on) - * eq. 25: first we calculate D(m) */ - float cosThetaM = dot(N, m); - float cosThetaM2 = cosThetaM * cosThetaM; - float tanThetaM2 = (1 - cosThetaM2) / cosThetaM2; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - D = expf(-tanThetaM2 / alpha2) / (M_PI_F * alpha2 * cosThetaM4); - - /* eq. 26, 27: now calculate G1(i,m) and G1(o,m) */ - G1o = bsdf_beckmann_G1(alpha_x, cosNO); - G1i = bsdf_beckmann_G1(alpha_x, cosNI); - } - else { - /* anisotropic */ - float3 X, Y, Z = N; - make_orthonormals_tangent(Z, bsdf->T, &X, &Y); - - /* distribution */ - float3 local_m = make_float3(dot(X, m), dot(Y, m), dot(Z, m)); - float slope_x = -local_m.x / (local_m.z * alpha_x); - float slope_y = -local_m.y / (local_m.z * alpha_y); - - float cosThetaM = local_m.z; - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - - D = expf(-slope_x * slope_x - slope_y * slope_y) / (M_PI_F * alpha2 * cosThetaM4); - - /* G1(i,m) and G1(o,m) */ - G1o = bsdf_beckmann_aniso_G1(alpha_x, alpha_y, cosNO, dot(I, X), dot(I, Y)); - G1i = bsdf_beckmann_aniso_G1(alpha_x, alpha_y, cosNI, dot(omega_in, X), dot(omega_in, Y)); - } - - float G = G1o * G1i; - - /* eq. 20 */ - float common = D * 0.25f / cosNO; - float out = G * common; - - /* eq. 2 in distribution of visible normals sampling - * pm = Dw = G1o * dot(m, I) * D / dot(N, I); */ - - /* eq. 38 - but see also: - * eq. 17 in http://www.graphics.cornell.edu/~bjw/wardnotes.pdf - * pdf = pm * 0.25 / dot(m, I); */ - *pdf = G1o * common; - - return make_spectrum(out); -} - -ccl_device Spectrum bsdf_microfacet_beckmann_eval_transmit(ccl_private const MicrofacetBsdf *bsdf, - const float3 N, - const float3 I, - const float3 omega_in, - ccl_private float *pdf, - const float alpha_x, - const float alpha_y, - const float cosNO, - const float cosNI) -{ - if (cosNO <= 0 || cosNI >= 0) { - *pdf = 0.0f; - return zero_spectrum(); - } - - const float m_eta = bsdf->ior; - /* compute half-vector of the refraction (eq. 16) */ - float3 ht = -(m_eta * omega_in + I); - float3 Ht = normalize(ht); - float cosHO = dot(Ht, I); - float cosHI = dot(Ht, omega_in); - - /* eq. 25: first we calculate D(m) with m=Ht: */ - float alpha2 = alpha_x * alpha_y; - float cosThetaM = min(dot(N, Ht), 1.0f); - float cosThetaM2 = cosThetaM * cosThetaM; - float tanThetaM2 = (1 - cosThetaM2) / cosThetaM2; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - float D = expf(-tanThetaM2 / alpha2) / (M_PI_F * alpha2 * cosThetaM4); - - /* eq. 26, 27: now calculate G1(i,m) and G1(o,m) */ - float G1o = bsdf_beckmann_G1(alpha_x, cosNO); - float G1i = bsdf_beckmann_G1(alpha_x, cosNI); - float G = G1o * G1i; - - /* probability */ - float Ht2 = dot(ht, ht); - - /* eq. 2 in distribution of visible normals sampling - * pm = Dw = G1o * dot(m, I) * D / dot(N, I); */ - - /* out = fabsf(cosHI * cosHO) * (m_eta * m_eta) * G * D / (cosNO * Ht2) - * pdf = pm * (m_eta * m_eta) * fabsf(cosHI) / Ht2 */ - float common = D * (m_eta * m_eta) / (cosNO * Ht2); - float out = G * fabsf(cosHI * cosHO) * common; - *pdf = G1o * fabsf(cosHO * cosHI) * common; - - return make_spectrum(out); -} - ccl_device Spectrum bsdf_microfacet_beckmann_eval(ccl_private const ShaderClosure *sc, const float3 Ng, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { - ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc; - const bool m_refractive = bsdf->type == CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID; - const float alpha_x = bsdf->alpha_x; - const float alpha_y = bsdf->alpha_y; - const float cosNgI = dot(Ng, omega_in); - - if (((cosNgI < 0.0f) != m_refractive) || alpha_x * alpha_y <= 1e-7f) { - *pdf = 0.0f; - return zero_spectrum(); - } - - const float3 N = bsdf->N; - const float cosNO = dot(N, I); - const float cosNI = dot(N, omega_in); - - return (cosNI < 0.0f) ? bsdf_microfacet_beckmann_eval_transmit( - bsdf, N, I, omega_in, pdf, alpha_x, alpha_y, cosNO, cosNI) : - bsdf_microfacet_beckmann_eval_reflect( - bsdf, N, I, omega_in, pdf, alpha_x, alpha_y, cosNO, cosNI); + return bsdf_microfacet_eval(sc, Ng, wi, wo, pdf); } -ccl_device int bsdf_microfacet_beckmann_sample(KernelGlobals kg, - ccl_private const ShaderClosure *sc, +ccl_device int bsdf_microfacet_beckmann_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness, ccl_private float *eta) { - ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc; - float alpha_x = bsdf->alpha_x; - float alpha_y = bsdf->alpha_y; - bool m_refractive = bsdf->type == CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID; - float3 N = bsdf->N; - int label; - - *sampled_roughness = make_float2(alpha_x, alpha_y); - *eta = m_refractive ? 1.0f / bsdf->ior : bsdf->ior; - - float cosNO = dot(N, I); - if (cosNO > 0) { - float3 X, Y, Z = N; - - if (alpha_x == alpha_y) - make_orthonormals(Z, &X, &Y); - else - make_orthonormals_tangent(Z, bsdf->T, &X, &Y); - - /* importance sampling with distribution of visible normals. vectors are - * transformed to local space before and after */ - float3 local_I = make_float3(dot(X, I), dot(Y, I), cosNO); - float3 local_m; - float G1o; - - local_m = microfacet_sample_stretched(kg, local_I, alpha_x, alpha_x, randu, randv, true, &G1o); - - float3 m = X * local_m.x + Y * local_m.y + Z * local_m.z; - float cosThetaM = local_m.z; - - /* reflection or refraction? */ - if (!m_refractive) { - label = LABEL_REFLECT | LABEL_GLOSSY; - float cosMO = dot(m, I); - - if (cosMO > 0) { - /* eq. 39 - compute actual reflected direction */ - *omega_in = 2 * cosMO * m - I; - - if (dot(Ng, *omega_in) > 0) { - if (alpha_x * alpha_y <= 1e-7f) { - /* some high number for MIS */ - *pdf = 1e6f; - *eval = make_spectrum(1e6f); - label = LABEL_REFLECT | LABEL_SINGULAR; - } - else { - /* microfacet normal is visible to this ray - * eq. 25 */ - float alpha2 = alpha_x * alpha_y; - float D, G1i; - - if (alpha_x == alpha_y) { - /* Isotropic distribution. */ - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - float tanThetaM2 = 1 / (cosThetaM2)-1; - D = expf(-tanThetaM2 / alpha2) / (M_PI_F * alpha2 * cosThetaM4); - - /* eval BRDF*cosNI */ - float cosNI = dot(N, *omega_in); - - /* eq. 26, 27: now calculate G1(i,m) */ - G1i = bsdf_beckmann_G1(alpha_x, cosNI); - } - else { - /* anisotropic distribution */ - float3 local_m = make_float3(dot(X, m), dot(Y, m), dot(Z, m)); - float slope_x = -local_m.x / (local_m.z * alpha_x); - float slope_y = -local_m.y / (local_m.z * alpha_y); - - float cosThetaM = local_m.z; - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - - D = expf(-slope_x * slope_x - slope_y * slope_y) / (M_PI_F * alpha2 * cosThetaM4); - - /* G1(i,m) */ - G1i = bsdf_beckmann_aniso_G1( - alpha_x, alpha_y, dot(*omega_in, N), dot(*omega_in, X), dot(*omega_in, Y)); - } - - float G = G1o * G1i; - - /* see eval function for derivation */ - float common = D * 0.25f / cosNO; - float out = G * common; - *pdf = G1o * common; - - *eval = make_spectrum(out); - } - } - else { - *eval = zero_spectrum(); - *pdf = 0.0f; - } - } - } - else { - label = LABEL_TRANSMIT | LABEL_GLOSSY; - - /* CAUTION: the i and o variables are inverted relative to the paper - * eq. 39 - compute actual refractive direction */ - float3 R, T; - float m_eta = bsdf->ior, fresnel; - bool inside; - - fresnel = fresnel_dielectric(m_eta, m, I, &R, &T, &inside); - - if (!inside && fresnel != 1.0f) { - *omega_in = T; - - if (alpha_x * alpha_y <= 1e-7f || fabsf(m_eta - 1.0f) < 1e-4f) { - /* some high number for MIS */ - *pdf = 1e6f; - *eval = make_spectrum(1e6f); - label = LABEL_TRANSMIT | LABEL_SINGULAR; - } - else { - /* eq. 33 */ - float alpha2 = alpha_x * alpha_y; - float cosThetaM2 = cosThetaM * cosThetaM; - float cosThetaM4 = cosThetaM2 * cosThetaM2; - float tanThetaM2 = 1 / (cosThetaM2)-1; - float D = expf(-tanThetaM2 / alpha2) / (M_PI_F * alpha2 * cosThetaM4); - - /* eval BRDF*cosNI */ - float cosNI = dot(N, *omega_in); - - /* eq. 26, 27: now calculate G1(i,m) */ - float G1i = bsdf_beckmann_G1(alpha_x, cosNI); - float G = G1o * G1i; - - /* eq. 21 */ - float cosHI = dot(m, *omega_in); - float cosHO = dot(m, I); - float Ht2 = m_eta * cosHI + cosHO; - Ht2 *= Ht2; - - /* see eval function for derivation */ - float common = D * (m_eta * m_eta) / (cosNO * Ht2); - float out = G * fabsf(cosHI * cosHO) * common; - *pdf = G1o * cosHO * fabsf(cosHI) * common; - - *eval = make_spectrum(out); - } - } - else { - *eval = zero_spectrum(); - *pdf = 0.0f; - } - } - } - else { - label = (m_refractive) ? LABEL_TRANSMIT | LABEL_GLOSSY : LABEL_REFLECT | LABEL_GLOSSY; - } - return label; + return bsdf_microfacet_sample( + sc, Ng, wi, randu, randv, eval, wo, pdf, sampled_roughness, eta); } CCL_NAMESPACE_END diff --git a/intern/cycles/kernel/closure/bsdf_microfacet_multi.h b/intern/cycles/kernel/closure/bsdf_microfacet_multi.h index 29e1473160e..c2ef383f79f 100644 --- a/intern/cycles/kernel/closure/bsdf_microfacet_multi.h +++ b/intern/cycles/kernel/closure/bsdf_microfacet_multi.h @@ -43,7 +43,7 @@ ccl_device_forceinline float2 mf_sampleP22_11(const float cosI, return make_float2(r * cosf(phi), r * sinf(phi)); } - const float sinI = safe_sqrtf(1.0f - cosI * cosI); + const float sinI = sin_from_cos(cosI); const float tanI = sinI / cosI; const float projA = 0.5f * (cosI + 1.0f); if (projA < 0.0001f) @@ -401,7 +401,7 @@ ccl_device int bsdf_microfacet_multi_ggx_fresnel_setup(ccl_private MicrofacetBsd bsdf->type = CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID; - bsdf_microfacet_fresnel_color(sd, bsdf); + bsdf_microfacet_adjust_weight(sd, bsdf); return bsdf_microfacet_multi_ggx_common_setup(bsdf); } @@ -417,15 +417,15 @@ ccl_device int bsdf_microfacet_multi_ggx_refraction_setup(ccl_private Microfacet ccl_device Spectrum bsdf_microfacet_multi_ggx_eval(ccl_private const ShaderClosure *sc, const float3 Ng, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf, ccl_private uint *lcg_state) { ccl_private const MicrofacetBsdf *bsdf = (ccl_private const MicrofacetBsdf *)sc; - const float cosNgI = dot(Ng, omega_in); + const float cosNgO = dot(Ng, wo); - if ((cosNgI < 0.0f) || bsdf->alpha_x * bsdf->alpha_y < 1e-7f) { + if ((cosNgO < 0.0f) || bsdf->alpha_x * bsdf->alpha_y < 1e-7f) { *pdf = 0.0f; return zero_spectrum(); } @@ -434,7 +434,7 @@ ccl_device Spectrum bsdf_microfacet_multi_ggx_eval(ccl_private const ShaderClosu Z = bsdf->N; /* Ensure that the both directions are on the outside w.r.t. the shading normal. */ - if (dot(Z, I) <= 0.0f || dot(Z, omega_in) <= 0.0f) { + if (dot(Z, wi) <= 0.0f || dot(Z, wo) <= 0.0f) { *pdf = 0.0f; return zero_spectrum(); } @@ -447,21 +447,21 @@ ccl_device Spectrum bsdf_microfacet_multi_ggx_eval(ccl_private const ShaderClosu else make_orthonormals(Z, &X, &Y); - float3 localI = make_float3(dot(I, X), dot(I, Y), dot(I, Z)); - float3 localO = make_float3(dot(omega_in, X), dot(omega_in, Y), dot(omega_in, Z)); + float3 local_I = make_float3(dot(wi, X), dot(wi, Y), dot(wi, Z)); + float3 local_O = make_float3(dot(wo, X), dot(wo, Y), dot(wo, Z)); if (is_aniso) - *pdf = mf_ggx_aniso_pdf(localI, localO, make_float2(bsdf->alpha_x, bsdf->alpha_y)); + *pdf = mf_ggx_aniso_pdf(local_I, local_O, make_float2(bsdf->alpha_x, bsdf->alpha_y)); else - *pdf = mf_ggx_pdf(localI, localO, bsdf->alpha_x); + *pdf = mf_ggx_pdf(local_I, local_O, bsdf->alpha_x); if (*pdf <= 0.f) { *pdf = 0.f; return make_float3(0.f, 0.f, 0.f); } - return mf_eval_glossy(localI, - localO, + return mf_eval_glossy(local_I, + local_O, true, bsdf->extra->color, bsdf->alpha_x, @@ -475,11 +475,11 @@ ccl_device Spectrum bsdf_microfacet_multi_ggx_eval(ccl_private const ShaderClosu ccl_device int bsdf_microfacet_multi_ggx_sample(KernelGlobals kg, ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private uint *lcg_state, ccl_private float2 *sampled_roughness, @@ -491,7 +491,7 @@ ccl_device int bsdf_microfacet_multi_ggx_sample(KernelGlobals kg, Z = bsdf->N; /* Ensure that the view direction is on the outside w.r.t. the shading normal. */ - if (dot(Z, I) <= 0.0f) { + if (dot(Z, wi) <= 0.0f) { *pdf = 0.0f; return LABEL_NONE; } @@ -499,8 +499,8 @@ ccl_device int bsdf_microfacet_multi_ggx_sample(KernelGlobals kg, /* Special case: Extremely low roughness. * Don't bother with microfacets, just do specular reflection. */ if (bsdf->alpha_x * bsdf->alpha_y < 1e-7f) { - *omega_in = 2 * dot(Z, I) * Z - I; - if (dot(Ng, *omega_in) <= 0.0f) { + *wo = 2 * dot(Z, wi) * Z - wi; + if (dot(Ng, *wo) <= 0.0f) { *pdf = 0.0f; return LABEL_NONE; } @@ -520,11 +520,11 @@ ccl_device int bsdf_microfacet_multi_ggx_sample(KernelGlobals kg, else make_orthonormals(Z, &X, &Y); - float3 localI = make_float3(dot(I, X), dot(I, Y), dot(I, Z)); - float3 localO; + float3 local_I = make_float3(dot(wi, X), dot(wi, Y), dot(wi, Z)); + float3 local_O; - *eval = mf_sample_glossy(localI, - &localO, + *eval = mf_sample_glossy(local_I, + &local_O, bsdf->extra->color, bsdf->alpha_x, bsdf->alpha_y, @@ -532,18 +532,18 @@ ccl_device int bsdf_microfacet_multi_ggx_sample(KernelGlobals kg, bsdf->ior, use_fresnel, bsdf->extra->cspec0); - *omega_in = X * localO.x + Y * localO.y + Z * localO.z; + *wo = X * local_O.x + Y * local_O.y + Z * local_O.z; /* Ensure that the light direction is on the outside w.r.t. the geometry normal. */ - if (dot(Ng, *omega_in) <= 0.0f) { + if (dot(Ng, *wo) <= 0.0f) { *pdf = 0.0f; return LABEL_NONE; } if (is_aniso) - *pdf = mf_ggx_aniso_pdf(localI, localO, make_float2(bsdf->alpha_x, bsdf->alpha_y)); + *pdf = mf_ggx_aniso_pdf(local_I, local_O, make_float2(bsdf->alpha_x, bsdf->alpha_y)); else - *pdf = mf_ggx_pdf(localI, localO, bsdf->alpha_x); + *pdf = mf_ggx_pdf(local_I, local_O, bsdf->alpha_x); *pdf = fmaxf(0.f, *pdf); *eval *= *pdf; @@ -575,14 +575,14 @@ ccl_device int bsdf_microfacet_multi_ggx_glass_fresnel_setup(ccl_private Microfa bsdf->type = CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID; - bsdf_microfacet_fresnel_color(sd, bsdf); + bsdf_microfacet_adjust_weight(sd, bsdf); return SD_BSDF | SD_BSDF_HAS_EVAL | SD_BSDF_NEEDS_LCG; } ccl_device Spectrum bsdf_microfacet_multi_ggx_glass_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf, ccl_private uint *lcg_state) { @@ -597,17 +597,17 @@ ccl_device Spectrum bsdf_microfacet_multi_ggx_glass_eval(ccl_private const Shade Z = bsdf->N; make_orthonormals(Z, &X, &Y); - float3 localI = make_float3(dot(I, X), dot(I, Y), dot(I, Z)); - float3 localO = make_float3(dot(omega_in, X), dot(omega_in, Y), dot(omega_in, Z)); + float3 local_I = make_float3(dot(wi, X), dot(wi, Y), dot(wi, Z)); + float3 local_O = make_float3(dot(wo, X), dot(wo, Y), dot(wo, Z)); - const bool is_transmission = localO.z < 0.0f; + const bool is_transmission = local_O.z < 0.0f; const bool use_fresnel = !is_transmission && (bsdf->type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID); - *pdf = mf_glass_pdf(localI, localO, bsdf->alpha_x, bsdf->ior); + *pdf = mf_glass_pdf(local_I, local_O, bsdf->alpha_x, bsdf->ior); kernel_assert(*pdf >= 0.f); - return mf_eval_glass(localI, - localO, + return mf_eval_glass(local_I, + local_O, !is_transmission, bsdf->extra->color, bsdf->alpha_x, @@ -621,11 +621,11 @@ ccl_device Spectrum bsdf_microfacet_multi_ggx_glass_eval(ccl_private const Shade ccl_device int bsdf_microfacet_multi_ggx_glass_sample(KernelGlobals kg, ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private uint *lcg_state, ccl_private float2 *sampled_roughness, @@ -642,16 +642,16 @@ ccl_device int bsdf_microfacet_multi_ggx_glass_sample(KernelGlobals kg, if (bsdf->alpha_x * bsdf->alpha_y < 1e-7f) { float3 R, T; bool inside; - float fresnel = fresnel_dielectric(bsdf->ior, Z, I, &R, &T, &inside); + float fresnel = fresnel_dielectric(bsdf->ior, Z, wi, &R, &T, &inside); *pdf = 1e6f; *eval = make_spectrum(1e6f); if (randu < fresnel) { - *omega_in = R; + *wo = R; return LABEL_REFLECT | LABEL_SINGULAR; } else { - *omega_in = T; + *wo = T; return LABEL_TRANSMIT | LABEL_SINGULAR; } } @@ -660,11 +660,11 @@ ccl_device int bsdf_microfacet_multi_ggx_glass_sample(KernelGlobals kg, make_orthonormals(Z, &X, &Y); - float3 localI = make_float3(dot(I, X), dot(I, Y), dot(I, Z)); - float3 localO; + float3 local_I = make_float3(dot(wi, X), dot(wi, Y), dot(wi, Z)); + float3 local_O; - *eval = mf_sample_glass(localI, - &localO, + *eval = mf_sample_glass(local_I, + &local_O, bsdf->extra->color, bsdf->alpha_x, bsdf->alpha_y, @@ -672,12 +672,12 @@ ccl_device int bsdf_microfacet_multi_ggx_glass_sample(KernelGlobals kg, bsdf->ior, use_fresnel, bsdf->extra->cspec0); - *pdf = mf_glass_pdf(localI, localO, bsdf->alpha_x, bsdf->ior); + *pdf = mf_glass_pdf(local_I, local_O, bsdf->alpha_x, bsdf->ior); kernel_assert(*pdf >= 0.f); *eval *= *pdf; - *omega_in = X * localO.x + Y * localO.y + Z * localO.z; - if (localO.z * localI.z > 0.0f) { + *wo = X * local_O.x + Y * local_O.y + Z * local_O.z; + if (local_O.z * local_I.z > 0.0f) { return LABEL_REFLECT | LABEL_GLOSSY; } else { diff --git a/intern/cycles/kernel/closure/bsdf_microfacet_multi_impl.h b/intern/cycles/kernel/closure/bsdf_microfacet_multi_impl.h index 91fb9158050..8b595ebfa3b 100644 --- a/intern/cycles/kernel/closure/bsdf_microfacet_multi_impl.h +++ b/intern/cycles/kernel/closure/bsdf_microfacet_multi_impl.h @@ -73,9 +73,8 @@ ccl_device_forceinline Spectrum MF_FUNCTION_FULL_NAME(mf_eval)(float3 wi, eval = make_spectrum(val); #endif - float F0 = fresnel_dielectric_cos(1.0f, eta); if (use_fresnel) { - throughput = interpolate_fresnel_color(wi, wh, eta, F0, cspec0); + throughput = interpolate_fresnel_color(wi, wh, eta, cspec0); eval *= throughput; } @@ -144,11 +143,11 @@ ccl_device_forceinline Spectrum MF_FUNCTION_FULL_NAME(mf_eval)(float3 wi, throughput *= color; } else if (use_fresnel && order > 0) { - throughput *= interpolate_fresnel_color(wi_prev, wm, eta, F0, cspec0); + throughput *= interpolate_fresnel_color(wi_prev, wm, eta, cspec0); } #else /* MF_MULTI_GLOSSY */ if (use_fresnel && order > 0) { - throughput *= interpolate_fresnel_color(-wr, wm, eta, F0, cspec0); + throughput *= interpolate_fresnel_color(-wr, wm, eta, cspec0); } wr = mf_sample_phase_glossy(-wr, &throughput, wm); #endif @@ -192,8 +191,6 @@ ccl_device_forceinline Spectrum MF_FUNCTION_FULL_NAME(mf_sample)(float3 wi, float G1_r = 0.0f; bool outside = true; - float F0 = fresnel_dielectric_cos(1.0f, eta); - int order; for (order = 0; order < 10; order++) { /* Sample microfacet height. */ @@ -229,22 +226,12 @@ ccl_device_forceinline Spectrum MF_FUNCTION_FULL_NAME(mf_sample)(float3 wi, throughput *= color; } else { - Spectrum t_color = interpolate_fresnel_color(wi_prev, wm, eta, F0, cspec0); - - if (order == 0) - throughput = t_color; - else - throughput *= t_color; + throughput *= interpolate_fresnel_color(wi_prev, wm, eta, cspec0); } } #else /* MF_MULTI_GLOSSY */ if (use_fresnel) { - Spectrum t_color = interpolate_fresnel_color(-wr, wm, eta, F0, cspec0); - - if (order == 0) - throughput = t_color; - else - throughput *= t_color; + throughput *= interpolate_fresnel_color(-wr, wm, eta, cspec0); } wr = mf_sample_phase_glossy(-wr, &throughput, wm); #endif diff --git a/intern/cycles/kernel/closure/bsdf_oren_nayar.h b/intern/cycles/kernel/closure/bsdf_oren_nayar.h index 6912d5b3f18..3f6074be53c 100644 --- a/intern/cycles/kernel/closure/bsdf_oren_nayar.h +++ b/intern/cycles/kernel/closure/bsdf_oren_nayar.h @@ -48,14 +48,14 @@ ccl_device int bsdf_oren_nayar_setup(ccl_private OrenNayarBsdf *bsdf) } ccl_device Spectrum bsdf_oren_nayar_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const OrenNayarBsdf *bsdf = (ccl_private const OrenNayarBsdf *)sc; - if (dot(bsdf->N, omega_in) > 0.0f) { + if (dot(bsdf->N, wo) > 0.0f) { *pdf = 0.5f * M_1_PI_F; - return bsdf_oren_nayar_get_intensity(sc, bsdf->N, I, omega_in); + return bsdf_oren_nayar_get_intensity(sc, bsdf->N, wi, wo); } else { *pdf = 0.0f; @@ -65,18 +65,18 @@ ccl_device Spectrum bsdf_oren_nayar_eval(ccl_private const ShaderClosure *sc, ccl_device int bsdf_oren_nayar_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { ccl_private const OrenNayarBsdf *bsdf = (ccl_private const OrenNayarBsdf *)sc; - sample_uniform_hemisphere(bsdf->N, randu, randv, omega_in, pdf); + sample_uniform_hemisphere(bsdf->N, randu, randv, wo, pdf); - if (dot(Ng, *omega_in) > 0.0f) { - *eval = bsdf_oren_nayar_get_intensity(sc, bsdf->N, I, *omega_in); + if (dot(Ng, *wo) > 0.0f) { + *eval = bsdf_oren_nayar_get_intensity(sc, bsdf->N, wi, *wo); } else { *pdf = 0.0f; diff --git a/intern/cycles/kernel/closure/bsdf_phong_ramp.h b/intern/cycles/kernel/closure/bsdf_phong_ramp.h index 04bc165af30..5e2b9aad233 100644 --- a/intern/cycles/kernel/closure/bsdf_phong_ramp.h +++ b/intern/cycles/kernel/closure/bsdf_phong_ramp.h @@ -45,23 +45,23 @@ ccl_device int bsdf_phong_ramp_setup(ccl_private PhongRampBsdf *bsdf) } ccl_device Spectrum bsdf_phong_ramp_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const PhongRampBsdf *bsdf = (ccl_private const PhongRampBsdf *)sc; float m_exponent = bsdf->exponent; - float cosNI = dot(bsdf->N, omega_in); - float cosNO = dot(bsdf->N, I); + float cosNI = dot(bsdf->N, wi); + float cosNO = dot(bsdf->N, wo); if (cosNI > 0 && cosNO > 0) { // reflect the view vector - float3 R = (2 * cosNO) * bsdf->N - I; - float cosRI = dot(R, omega_in); - if (cosRI > 0) { - float cosp = powf(cosRI, m_exponent); + float3 R = (2 * cosNI) * bsdf->N - wi; + float cosRO = dot(R, wo); + if (cosRO > 0) { + float cosp = powf(cosRO, m_exponent); float common = 0.5f * M_1_PI_F * cosp; - float out = cosNI * (m_exponent + 2) * common; + float out = cosNO * (m_exponent + 2) * common; *pdf = (m_exponent + 1) * common; return rgb_to_spectrum(bsdf_phong_ramp_get_color(bsdf->colors, cosp) * out); } @@ -77,39 +77,39 @@ ccl_device_inline float phong_ramp_exponent_to_roughness(float exponent) ccl_device int bsdf_phong_ramp_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness) { ccl_private const PhongRampBsdf *bsdf = (ccl_private const PhongRampBsdf *)sc; - float cosNO = dot(bsdf->N, I); + float cosNI = dot(bsdf->N, wi); float m_exponent = bsdf->exponent; const float m_roughness = phong_ramp_exponent_to_roughness(m_exponent); *sampled_roughness = make_float2(m_roughness, m_roughness); - if (cosNO > 0) { + if (cosNI > 0) { // reflect the view vector - float3 R = (2 * cosNO) * bsdf->N - I; + float3 R = (2 * cosNI) * bsdf->N - wi; float3 T, B; make_orthonormals(R, &T, &B); float phi = M_2PI_F * randu; float cosTheta = powf(randv, 1 / (m_exponent + 1)); float sinTheta2 = 1 - cosTheta * cosTheta; float sinTheta = sinTheta2 > 0 ? sqrtf(sinTheta2) : 0; - *omega_in = (cosf(phi) * sinTheta) * T + (sinf(phi) * sinTheta) * B + (cosTheta)*R; - if (dot(Ng, *omega_in) > 0.0f) { + *wo = (cosf(phi) * sinTheta) * T + (sinf(phi) * sinTheta) * B + (cosTheta)*R; + if (dot(Ng, *wo) > 0.0f) { // common terms for pdf and eval - float cosNI = dot(bsdf->N, *omega_in); + float cosNO = dot(bsdf->N, *wo); // make sure the direction we chose is still in the right hemisphere - if (cosNI > 0) { + if (cosNO > 0) { float cosp = powf(cosTheta, m_exponent); float common = 0.5f * M_1_PI_F * cosp; *pdf = (m_exponent + 1) * common; - float out = cosNI * (m_exponent + 2) * common; + float out = cosNO * (m_exponent + 2) * common; *eval = rgb_to_spectrum(bsdf_phong_ramp_get_color(bsdf->colors, cosp) * out); } } diff --git a/intern/cycles/kernel/closure/bsdf_principled_diffuse.h b/intern/cycles/kernel/closure/bsdf_principled_diffuse.h index be8ee78fcac..a63492eadd9 100644 --- a/intern/cycles/kernel/closure/bsdf_principled_diffuse.h +++ b/intern/cycles/kernel/closure/bsdf_principled_diffuse.h @@ -110,17 +110,17 @@ ccl_device int bsdf_principled_diffuse_setup(ccl_private PrincipledDiffuseBsdf * } ccl_device Spectrum bsdf_principled_diffuse_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const PrincipledDiffuseBsdf *bsdf = (ccl_private const PrincipledDiffuseBsdf *)sc; const float3 N = bsdf->N; - if (dot(N, omega_in) > 0.0f) { - const float3 V = I; // outgoing - const float3 L = omega_in; // incoming - *pdf = fmaxf(dot(N, omega_in), 0.0f) * M_1_PI_F; + if (dot(N, wo) > 0.0f) { + const float3 V = wi; + const float3 L = wo; + *pdf = fmaxf(dot(N, wo), 0.0f) * M_1_PI_F; return bsdf_principled_diffuse_compute_brdf(bsdf, N, V, L, pdf); } else { @@ -131,21 +131,21 @@ ccl_device Spectrum bsdf_principled_diffuse_eval(ccl_private const ShaderClosure ccl_device int bsdf_principled_diffuse_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { ccl_private const PrincipledDiffuseBsdf *bsdf = (ccl_private const PrincipledDiffuseBsdf *)sc; float3 N = bsdf->N; - sample_cos_hemisphere(N, randu, randv, omega_in, pdf); + sample_cos_hemisphere(N, randu, randv, wo, pdf); - if (dot(Ng, *omega_in) > 0) { - *eval = bsdf_principled_diffuse_compute_brdf(bsdf, N, I, *omega_in, pdf); + if (dot(Ng, *wo) > 0) { + *eval = bsdf_principled_diffuse_compute_brdf(bsdf, N, wi, *wo, pdf); } else { *pdf = 0.0f; diff --git a/intern/cycles/kernel/closure/bsdf_principled_sheen.h b/intern/cycles/kernel/closure/bsdf_principled_sheen.h index f6499cc437c..06296f1cef5 100644 --- a/intern/cycles/kernel/closure/bsdf_principled_sheen.h +++ b/intern/cycles/kernel/closure/bsdf_principled_sheen.h @@ -54,25 +54,25 @@ ccl_device int bsdf_principled_sheen_setup(ccl_private const ShaderData *sd, ccl_private PrincipledSheenBsdf *bsdf) { bsdf->type = CLOSURE_BSDF_PRINCIPLED_SHEEN_ID; - bsdf->avg_value = calculate_avg_principled_sheen_brdf(bsdf->N, sd->I); + bsdf->avg_value = calculate_avg_principled_sheen_brdf(bsdf->N, sd->wi); bsdf->sample_weight *= bsdf->avg_value; return SD_BSDF | SD_BSDF_HAS_EVAL; } ccl_device Spectrum bsdf_principled_sheen_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const PrincipledSheenBsdf *bsdf = (ccl_private const PrincipledSheenBsdf *)sc; const float3 N = bsdf->N; - if (dot(N, omega_in) > 0.0f) { - const float3 V = I; // outgoing - const float3 L = omega_in; // incoming + if (dot(N, wo) > 0.0f) { + const float3 V = wi; + const float3 L = wo; const float3 H = normalize(L + V); - *pdf = fmaxf(dot(N, omega_in), 0.0f) * M_1_PI_F; + *pdf = fmaxf(dot(N, wo), 0.0f) * M_1_PI_F; return calculate_principled_sheen_brdf(N, V, L, H, pdf); } else { @@ -83,23 +83,23 @@ ccl_device Spectrum bsdf_principled_sheen_eval(ccl_private const ShaderClosure * ccl_device int bsdf_principled_sheen_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { ccl_private const PrincipledSheenBsdf *bsdf = (ccl_private const PrincipledSheenBsdf *)sc; float3 N = bsdf->N; - sample_cos_hemisphere(N, randu, randv, omega_in, pdf); + sample_cos_hemisphere(N, randu, randv, wo, pdf); - if (dot(Ng, *omega_in) > 0) { - float3 H = normalize(I + *omega_in); + if (dot(Ng, *wo) > 0) { + float3 H = normalize(wi + *wo); - *eval = calculate_principled_sheen_brdf(N, I, *omega_in, H, pdf); + *eval = calculate_principled_sheen_brdf(N, wi, *wo, H, pdf); } else { *eval = zero_spectrum(); diff --git a/intern/cycles/kernel/closure/bsdf_reflection.h b/intern/cycles/kernel/closure/bsdf_reflection.h index 2f761974e9a..71ee5f389d2 100644 --- a/intern/cycles/kernel/closure/bsdf_reflection.h +++ b/intern/cycles/kernel/closure/bsdf_reflection.h @@ -19,8 +19,8 @@ ccl_device int bsdf_reflection_setup(ccl_private MicrofacetBsdf *bsdf) } ccl_device Spectrum bsdf_reflection_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { *pdf = 0.0f; @@ -29,11 +29,11 @@ ccl_device Spectrum bsdf_reflection_eval(ccl_private const ShaderClosure *sc, ccl_device int bsdf_reflection_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float *eta) { @@ -42,10 +42,10 @@ ccl_device int bsdf_reflection_sample(ccl_private const ShaderClosure *sc, *eta = bsdf->ior; // only one direction is possible - float cosNO = dot(N, I); - if (cosNO > 0) { - *omega_in = (2 * cosNO) * N - I; - if (dot(Ng, *omega_in) > 0) { + float cosNI = dot(N, wi); + if (cosNI > 0) { + *wo = (2 * cosNI) * N - wi; + if (dot(Ng, *wo) > 0) { /* Some high number for MIS. */ *pdf = 1e6f; *eval = make_spectrum(1e6f); diff --git a/intern/cycles/kernel/closure/bsdf_refraction.h b/intern/cycles/kernel/closure/bsdf_refraction.h index e4f66245a0b..f4beff81dd1 100644 --- a/intern/cycles/kernel/closure/bsdf_refraction.h +++ b/intern/cycles/kernel/closure/bsdf_refraction.h @@ -19,8 +19,8 @@ ccl_device int bsdf_refraction_setup(ccl_private MicrofacetBsdf *bsdf) } ccl_device Spectrum bsdf_refraction_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { *pdf = 0.0f; @@ -29,11 +29,11 @@ ccl_device Spectrum bsdf_refraction_eval(ccl_private const ShaderClosure *sc, ccl_device int bsdf_refraction_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float *eta) { @@ -46,13 +46,13 @@ ccl_device int bsdf_refraction_sample(ccl_private const ShaderClosure *sc, float3 R, T; bool inside; float fresnel; - fresnel = fresnel_dielectric(m_eta, N, I, &R, &T, &inside); + fresnel = fresnel_dielectric(m_eta, N, wi, &R, &T, &inside); if (!inside && fresnel != 1.0f) { /* Some high number for MIS. */ *pdf = 1e6f; *eval = make_spectrum(1e6f); - *omega_in = T; + *wo = T; } else { *pdf = 0.0f; diff --git a/intern/cycles/kernel/closure/bsdf_toon.h b/intern/cycles/kernel/closure/bsdf_toon.h index 9f78c86b3b7..1b109da0e4f 100644 --- a/intern/cycles/kernel/closure/bsdf_toon.h +++ b/intern/cycles/kernel/closure/bsdf_toon.h @@ -50,17 +50,17 @@ ccl_device float bsdf_toon_get_sample_angle(float max_angle, float smooth) } ccl_device Spectrum bsdf_diffuse_toon_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const ToonBsdf *bsdf = (ccl_private const ToonBsdf *)sc; - float cosNI = dot(bsdf->N, omega_in); + float cosNO = dot(bsdf->N, wo); - if (cosNI >= 0.0f) { + if (cosNO >= 0.0f) { float max_angle = bsdf->size * M_PI_2_F; float smooth = bsdf->smooth * M_PI_2_F; - float angle = safe_acosf(fmaxf(cosNI, 0.0f)); + float angle = safe_acosf(fmaxf(cosNO, 0.0f)); float eval = bsdf_toon_get_intensity(max_angle, smooth, angle); @@ -78,11 +78,11 @@ ccl_device Spectrum bsdf_diffuse_toon_eval(ccl_private const ShaderClosure *sc, ccl_device int bsdf_diffuse_toon_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { ccl_private const ToonBsdf *bsdf = (ccl_private const ToonBsdf *)sc; @@ -92,9 +92,9 @@ ccl_device int bsdf_diffuse_toon_sample(ccl_private const ShaderClosure *sc, float angle = sample_angle * randu; if (sample_angle > 0.0f) { - sample_uniform_cone(bsdf->N, sample_angle, randu, randv, omega_in, pdf); + sample_uniform_cone(bsdf->N, sample_angle, randu, randv, wo, pdf); - if (dot(Ng, *omega_in) > 0.0f) { + if (dot(Ng, *wo) > 0.0f) { *eval = make_spectrum(*pdf * bsdf_toon_get_intensity(max_angle, smooth, angle)); } else { @@ -122,22 +122,22 @@ ccl_device int bsdf_glossy_toon_setup(ccl_private ToonBsdf *bsdf) } ccl_device Spectrum bsdf_glossy_toon_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { ccl_private const ToonBsdf *bsdf = (ccl_private const ToonBsdf *)sc; float max_angle = bsdf->size * M_PI_2_F; float smooth = bsdf->smooth * M_PI_2_F; - float cosNI = dot(bsdf->N, omega_in); - float cosNO = dot(bsdf->N, I); + float cosNI = dot(bsdf->N, wi); + float cosNO = dot(bsdf->N, wo); if (cosNI > 0 && cosNO > 0) { /* reflect the view vector */ - float3 R = (2 * cosNO) * bsdf->N - I; - float cosRI = dot(R, omega_in); + float3 R = (2 * cosNI) * bsdf->N - wi; + float cosRO = dot(R, wo); - float angle = safe_acosf(fmaxf(cosRI, 0.0f)); + float angle = safe_acosf(fmaxf(cosRO, 0.0f)); float eval = bsdf_toon_get_intensity(max_angle, smooth, angle); float sample_angle = bsdf_toon_get_sample_angle(max_angle, smooth); @@ -151,32 +151,32 @@ ccl_device Spectrum bsdf_glossy_toon_eval(ccl_private const ShaderClosure *sc, ccl_device int bsdf_glossy_toon_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { ccl_private const ToonBsdf *bsdf = (ccl_private const ToonBsdf *)sc; float max_angle = bsdf->size * M_PI_2_F; float smooth = bsdf->smooth * M_PI_2_F; - float cosNO = dot(bsdf->N, I); + float cosNI = dot(bsdf->N, wi); - if (cosNO > 0) { + if (cosNI > 0) { /* reflect the view vector */ - float3 R = (2 * cosNO) * bsdf->N - I; + float3 R = (2 * cosNI) * bsdf->N - wi; float sample_angle = bsdf_toon_get_sample_angle(max_angle, smooth); float angle = sample_angle * randu; - sample_uniform_cone(R, sample_angle, randu, randv, omega_in, pdf); + sample_uniform_cone(R, sample_angle, randu, randv, wo, pdf); - if (dot(Ng, *omega_in) > 0.0f) { - float cosNI = dot(bsdf->N, *omega_in); + if (dot(Ng, *wo) > 0.0f) { + float cosNO = dot(bsdf->N, *wo); /* make sure the direction we chose is still in the right hemisphere */ - if (cosNI > 0) { + if (cosNO > 0) { *eval = make_spectrum(*pdf * bsdf_toon_get_intensity(max_angle, smooth, angle)); } else { diff --git a/intern/cycles/kernel/closure/bsdf_transparent.h b/intern/cycles/kernel/closure/bsdf_transparent.h index 9306e82b579..9c7607e2e97 100644 --- a/intern/cycles/kernel/closure/bsdf_transparent.h +++ b/intern/cycles/kernel/closure/bsdf_transparent.h @@ -60,8 +60,8 @@ ccl_device void bsdf_transparent_setup(ccl_private ShaderData *sd, } ccl_device Spectrum bsdf_transparent_eval(ccl_private const ShaderClosure *sc, - const float3 I, - const float3 omega_in, + const float3 wi, + const float3 wo, ccl_private float *pdf) { *pdf = 0.0f; @@ -70,15 +70,15 @@ ccl_device Spectrum bsdf_transparent_eval(ccl_private const ShaderClosure *sc, ccl_device int bsdf_transparent_sample(ccl_private const ShaderClosure *sc, float3 Ng, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { // only one direction is possible - *omega_in = -I; + *wo = -wi; *pdf = 1; *eval = one_spectrum(); return LABEL_TRANSMIT | LABEL_TRANSPARENT; diff --git a/intern/cycles/kernel/closure/bsdf_util.h b/intern/cycles/kernel/closure/bsdf_util.h index 3c48b98fed9..ee992375914 100644 --- a/intern/cycles/kernel/closure/bsdf_util.h +++ b/intern/cycles/kernel/closure/bsdf_util.h @@ -89,19 +89,21 @@ ccl_device float schlick_fresnel(float u) return m2 * m2 * m; // pow(m, 5) } -/* Calculate the fresnel color which is a blend between white and the F0 color (cspec0) */ -ccl_device_forceinline Spectrum -interpolate_fresnel_color(float3 L, float3 H, float ior, float F0, Spectrum cspec0) +/* Calculate the fresnel color, which is a blend between white and the F0 color */ +ccl_device_forceinline Spectrum interpolate_fresnel_color(float3 L, + float3 H, + float ior, + Spectrum F0) { - /* Calculate the fresnel interpolation factor - * The value from fresnel_dielectric_cos(...) has to be normalized because - * the cspec0 keeps the F0 color - */ - float F0_norm = 1.0f / (1.0f - F0); - float FH = (fresnel_dielectric_cos(dot(L, H), ior) - F0) * F0_norm; + /* Compute the real Fresnel term and remap it from real_F0..1 to F0..1. + * The reason why we use this remapping instead of directly doing the + * Schlick approximation lerp(F0, 1.0, (1.0-cosLH)^5) is that for cases + * with similar IORs (e.g. ice in water), the relative IOR can be close + * enough to 1.0 that the Schlick approximation becomes inaccurate. */ + float real_F = fresnel_dielectric_cos(dot(L, H), ior); + float real_F0 = fresnel_dielectric_cos(1.0f, ior); - /* Blend between white and a specular color with respect to the fresnel */ - return cspec0 * (1.0f - FH) + make_spectrum(FH); + return mix(F0, one_spectrum(), inverse_lerp(real_F0, 1.0f, real_F)); } ccl_device float3 ensure_valid_reflection(float3 Ng, float3 I, float3 N) diff --git a/intern/cycles/kernel/closure/bssrdf.h b/intern/cycles/kernel/closure/bssrdf.h index 7131d9d8f38..f03e3ad507e 100644 --- a/intern/cycles/kernel/closure/bssrdf.h +++ b/intern/cycles/kernel/closure/bssrdf.h @@ -293,7 +293,7 @@ ccl_device int bssrdf_setup(ccl_private ShaderData *sd, /* Ad-hoc weight adjustment to avoid retro-reflection taking away half the * samples from BSSRDF. */ - bsdf->sample_weight *= bsdf_principled_diffuse_retro_reflection_sample_weight(bsdf, sd->I); + bsdf->sample_weight *= bsdf_principled_diffuse_retro_reflection_sample_weight(bsdf, sd->wi); } } diff --git a/intern/cycles/kernel/closure/emissive.h b/intern/cycles/kernel/closure/emissive.h index d896721f77b..36af7377504 100644 --- a/intern/cycles/kernel/closure/emissive.h +++ b/intern/cycles/kernel/closure/emissive.h @@ -36,27 +36,24 @@ ccl_device void emission_setup(ccl_private ShaderData *sd, const Spectrum weight } } -/* return the probability distribution function in the direction I, +/* return the probability distribution function in the direction wi, * given the parameters and the light's surface normal. This MUST match * the PDF computed by sample(). */ -ccl_device float emissive_pdf(const float3 Ng, const float3 I) +ccl_device float emissive_pdf(const float3 Ng, const float3 wi) { - float cosNO = fabsf(dot(Ng, I)); - return (cosNO > 0.0f) ? 1.0f : 0.0f; + float cosNI = fabsf(dot(Ng, wi)); + return (cosNI > 0.0f) ? 1.0f : 0.0f; } -ccl_device void emissive_sample(const float3 Ng, - float randu, - float randv, - ccl_private float3 *omega_out, - ccl_private float *pdf) +ccl_device void emissive_sample( + const float3 Ng, float randu, float randv, ccl_private float3 *wi, ccl_private float *pdf) { /* todo: not implemented and used yet */ } -ccl_device Spectrum emissive_simple_eval(const float3 Ng, const float3 I) +ccl_device Spectrum emissive_simple_eval(const float3 Ng, const float3 wi) { - float res = emissive_pdf(Ng, I); + float res = emissive_pdf(Ng, wi); return make_spectrum(res); } diff --git a/intern/cycles/kernel/closure/volume.h b/intern/cycles/kernel/closure/volume.h index 9dbb5154457..a9a28c2fa4a 100644 --- a/intern/cycles/kernel/closure/volume.h +++ b/intern/cycles/kernel/closure/volume.h @@ -49,18 +49,18 @@ ccl_device int volume_henyey_greenstein_setup(ccl_private HenyeyGreensteinVolume } ccl_device Spectrum volume_henyey_greenstein_eval_phase(ccl_private const ShaderVolumeClosure *svc, - const float3 I, - float3 omega_in, + const float3 wi, + float3 wo, ccl_private float *pdf) { float g = svc->g; - /* note that I points towards the viewer */ + /* note that wi points towards the viewer */ if (fabsf(g) < 1e-3f) { *pdf = M_1_PI_F * 0.25f; } else { - float cos_theta = dot(-I, omega_in); + float cos_theta = dot(-wi, wo); *pdf = single_peaked_henyey_greenstein(cos_theta, g); } @@ -88,7 +88,7 @@ henyey_greenstrein_sample(float3 D, float g, float randu, float randv, ccl_priva } } - float sin_theta = safe_sqrtf(1.0f - cos_theta * cos_theta); + float sin_theta = sin_from_cos(cos_theta); float phi = M_2PI_F * randv; float3 dir = make_float3(sin_theta * cosf(phi), sin_theta * sinf(phi), cos_theta); @@ -100,17 +100,17 @@ henyey_greenstrein_sample(float3 D, float g, float randu, float randv, ccl_priva } ccl_device int volume_henyey_greenstein_sample(ccl_private const ShaderVolumeClosure *svc, - float3 I, + float3 wi, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { float g = svc->g; - /* note that I points towards the viewer and so is used negated */ - *omega_in = henyey_greenstrein_sample(-I, g, randu, randv, pdf); + /* note that wi points towards the viewer and so is used negated */ + *wo = henyey_greenstrein_sample(-wi, g, randu, randv, pdf); *eval = make_spectrum(*pdf); /* perfect importance sampling */ return LABEL_VOLUME_SCATTER; @@ -120,10 +120,10 @@ ccl_device int volume_henyey_greenstein_sample(ccl_private const ShaderVolumeClo ccl_device Spectrum volume_phase_eval(ccl_private const ShaderData *sd, ccl_private const ShaderVolumeClosure *svc, - float3 omega_in, + float3 wo, ccl_private float *pdf) { - return volume_henyey_greenstein_eval_phase(svc, sd->I, omega_in, pdf); + return volume_henyey_greenstein_eval_phase(svc, sd->wi, wo, pdf); } ccl_device int volume_phase_sample(ccl_private const ShaderData *sd, @@ -131,10 +131,10 @@ ccl_device int volume_phase_sample(ccl_private const ShaderData *sd, float randu, float randv, ccl_private Spectrum *eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { - return volume_henyey_greenstein_sample(svc, sd->I, randu, randv, eval, omega_in, pdf); + return volume_henyey_greenstein_sample(svc, sd->wi, randu, randv, eval, wo, pdf); } /* Volume sampling utilities. */ diff --git a/intern/cycles/kernel/data_template.h b/intern/cycles/kernel/data_template.h index ddc462e02f6..fd25644e56b 100644 --- a/intern/cycles/kernel/data_template.h +++ b/intern/cycles/kernel/data_template.h @@ -10,6 +10,9 @@ #ifndef KERNEL_STRUCT_MEMBER # define KERNEL_STRUCT_MEMBER(parent, type, name) #endif +#ifndef KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE +# define KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE +#endif /* Background. */ @@ -179,9 +182,12 @@ KERNEL_STRUCT_MEMBER(integrator, float, sample_clamp_indirect) KERNEL_STRUCT_MEMBER(integrator, int, use_caustics) /* Sampling pattern. */ KERNEL_STRUCT_MEMBER(integrator, int, sampling_pattern) -KERNEL_STRUCT_MEMBER(integrator, int, tabulated_sobol_sequence_size) -KERNEL_STRUCT_MEMBER(integrator, int, sobol_index_mask) KERNEL_STRUCT_MEMBER(integrator, float, scrambling_distance) +/* Sobol pattern. */ +KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE +KERNEL_STRUCT_MEMBER(integrator, int, tabulated_sobol_sequence_size) +KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE +KERNEL_STRUCT_MEMBER(integrator, int, sobol_index_mask) /* Volume render. */ KERNEL_STRUCT_MEMBER(integrator, int, use_volumes) KERNEL_STRUCT_MEMBER(integrator, int, volume_max_steps) @@ -216,4 +222,5 @@ KERNEL_STRUCT_END(KernelSVMUsage) #undef KERNEL_STRUCT_BEGIN #undef KERNEL_STRUCT_MEMBER +#undef KERNEL_STRUCT_MEMBER_DONT_SPECIALIZE #undef KERNEL_STRUCT_END diff --git a/intern/cycles/kernel/device/cpu/kernel.h b/intern/cycles/kernel/device/cpu/kernel.h index 647b405140a..e43d7375eea 100644 --- a/intern/cycles/kernel/device/cpu/kernel.h +++ b/intern/cycles/kernel/device/cpu/kernel.h @@ -35,15 +35,9 @@ void kernel_global_memory_copy(KernelGlobalsCPU *kg, const char *name, void *mem #define KERNEL_ARCH cpu_sse2 #include "kernel/device/cpu/kernel_arch.h" -#define KERNEL_ARCH cpu_sse3 -#include "kernel/device/cpu/kernel_arch.h" - #define KERNEL_ARCH cpu_sse41 #include "kernel/device/cpu/kernel_arch.h" -#define KERNEL_ARCH cpu_avx -#include "kernel/device/cpu/kernel_arch.h" - #define KERNEL_ARCH cpu_avx2 #include "kernel/device/cpu/kernel_arch.h" diff --git a/intern/cycles/kernel/device/cpu/kernel_avx.cpp b/intern/cycles/kernel/device/cpu/kernel_avx.cpp deleted file mode 100644 index 872ad5ce7e2..00000000000 --- a/intern/cycles/kernel/device/cpu/kernel_avx.cpp +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: Apache-2.0 - * Copyright 2011-2022 Blender Foundation */ - -/* Optimized CPU kernel entry points. This file is compiled with AVX - * optimization flags and nearly all functions inlined, while kernel.cpp - * is compiled without for other CPU's. */ - -#include "util/optimization.h" - -#ifndef WITH_CYCLES_OPTIMIZED_KERNEL_AVX -# define KERNEL_STUB -#else -/* SSE optimization disabled for now on 32 bit, see bug T36316. */ -# if !(defined(__GNUC__) && (defined(i386) || defined(_M_IX86))) -# define __KERNEL_SSE__ -# define __KERNEL_SSE2__ -# define __KERNEL_SSE3__ -# define __KERNEL_SSSE3__ -# define __KERNEL_SSE41__ -# define __KERNEL_AVX__ -# endif -#endif /* WITH_CYCLES_OPTIMIZED_KERNEL_AVX */ - -#include "kernel/device/cpu/kernel.h" -#define KERNEL_ARCH cpu_avx -#include "kernel/device/cpu/kernel_arch_impl.h" diff --git a/intern/cycles/kernel/device/cpu/kernel_sse3.cpp b/intern/cycles/kernel/device/cpu/kernel_sse3.cpp deleted file mode 100644 index eb78b61a723..00000000000 --- a/intern/cycles/kernel/device/cpu/kernel_sse3.cpp +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: Apache-2.0 - * Copyright 2011-2022 Blender Foundation */ - -/* Optimized CPU kernel entry points. This file is compiled with SSE3/SSSE3 - * optimization flags and nearly all functions inlined, while kernel.cpp - * is compiled without for other CPU's. */ - -#include "util/optimization.h" - -#ifndef WITH_CYCLES_OPTIMIZED_KERNEL_SSE3 -# define KERNEL_STUB -#else -/* SSE optimization disabled for now on 32 bit, see bug T36316. */ -# if !(defined(__GNUC__) && (defined(i386) || defined(_M_IX86))) -# define __KERNEL_SSE2__ -# define __KERNEL_SSE3__ -# define __KERNEL_SSSE3__ -# endif -#endif /* WITH_CYCLES_OPTIMIZED_KERNEL_SSE3 */ - -#include "kernel/device/cpu/kernel.h" -#define KERNEL_ARCH cpu_sse3 -#include "kernel/device/cpu/kernel_arch_impl.h" diff --git a/intern/cycles/kernel/device/metal/context_begin.h b/intern/cycles/kernel/device/metal/context_begin.h index 4bde9be455a..60d7e36589c 100644 --- a/intern/cycles/kernel/device/metal/context_begin.h +++ b/intern/cycles/kernel/device/metal/context_begin.h @@ -34,7 +34,7 @@ class MetalKernelContext { kernel_assert(0); return 0; } - + #ifdef __KERNEL_METAL_INTEL__ template inline __attribute__((__always_inline__)) @@ -55,7 +55,7 @@ class MetalKernelContext { } } #endif - + // texture2d template<> inline __attribute__((__always_inline__)) diff --git a/intern/cycles/kernel/device/oneapi/compat.h b/intern/cycles/kernel/device/oneapi/compat.h index 0691c01b3b5..73384b7be59 100644 --- a/intern/cycles/kernel/device/oneapi/compat.h +++ b/intern/cycles/kernel/device/oneapi/compat.h @@ -195,7 +195,15 @@ using sycl::half; #define fmodf(x, y) sycl::fmod((x), (y)) #define lgammaf(x) sycl::lgamma((x)) -#define cosf(x) sycl::native::cos(((float)(x))) +/* `sycl::native::cos` precision is not sufficient and `-ffast-math` lets + * the current DPC++ compiler overload `sycl::cos` with it. + * We work around this issue by directly calling the SPIRV implementation which + * provides greater precision. */ +#if defined(__SYCL_DEVICE_ONLY__) && defined(__SPIR__) +# define cosf(x) __spirv_ocl_cos(((float)(x))) +#else +# define cosf(x) sycl::cos(((float)(x))) +#endif #define sinf(x) sycl::native::sin(((float)(x))) #define powf(x, y) sycl::native::powr(((float)(x)), ((float)(y))) #define tanf(x) sycl::native::tan(((float)(x))) diff --git a/intern/cycles/kernel/film/denoising_passes.h b/intern/cycles/kernel/film/denoising_passes.h index 0a32df19a3e..b81e65c2ab2 100644 --- a/intern/cycles/kernel/film/denoising_passes.h +++ b/intern/cycles/kernel/film/denoising_passes.h @@ -58,23 +58,7 @@ ccl_device_forceinline void film_write_denoising_features_surface(KernelGlobals normal += sc->N * sc->sample_weight; sum_weight += sc->sample_weight; - Spectrum closure_albedo = sc->weight; - /* Closures that include a Fresnel term typically have weights close to 1 even though their - * actual contribution is significantly lower. - * To account for this, we scale their weight by the average fresnel factor (the same is also - * done for the sample weight in the BSDF setup, so we don't need to scale that here). */ - if (CLOSURE_IS_BSDF_MICROFACET_FRESNEL(sc->type)) { - ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)sc; - closure_albedo *= bsdf->extra->fresnel_color; - } - else if (sc->type == CLOSURE_BSDF_PRINCIPLED_SHEEN_ID) { - ccl_private PrincipledSheenBsdf *bsdf = (ccl_private PrincipledSheenBsdf *)sc; - closure_albedo *= bsdf->avg_value; - } - else if (sc->type == CLOSURE_BSDF_HAIR_PRINCIPLED_ID) { - closure_albedo *= bsdf_principled_hair_albedo(sc); - } - else if (sc->type == CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID) { + if (sc->type == CLOSURE_BSDF_PRINCIPLED_DIFFUSE_ID) { /* BSSRDF already accounts for weight, retro-reflection would double up. */ ccl_private const PrincipledDiffuseBsdf *bsdf = (ccl_private const PrincipledDiffuseBsdf *) sc; @@ -83,6 +67,7 @@ ccl_device_forceinline void film_write_denoising_features_surface(KernelGlobals } } + Spectrum closure_albedo = bsdf_albedo(sd, sc); if (bsdf_get_specular_roughness_squared(sc) > sqr(0.075f)) { diffuse_albedo += closure_albedo; sum_nonspecular_weight += sc->sample_weight; diff --git a/intern/cycles/kernel/geom/curve.h b/intern/cycles/kernel/geom/curve.h index e243adfde21..307da6e8ac8 100644 --- a/intern/cycles/kernel/geom/curve.h +++ b/intern/cycles/kernel/geom/curve.h @@ -252,7 +252,7 @@ ccl_device float3 curve_tangent_normal(KernelGlobals kg, ccl_private const Shade if (sd->type & PRIMITIVE_CURVE) { - tgN = -(-sd->I - sd->dPdu * (dot(sd->dPdu, -sd->I) / len_squared(sd->dPdu))); + tgN = -(-sd->wi - sd->dPdu * (dot(sd->dPdu, -sd->wi) / len_squared(sd->dPdu))); tgN = normalize(tgN); /* need to find suitable scaled gd for corrected normal */ diff --git a/intern/cycles/kernel/geom/curve_intersect.h b/intern/cycles/kernel/geom/curve_intersect.h index 97644aacaa8..e8ce0c58c1e 100644 --- a/intern/cycles/kernel/geom/curve_intersect.h +++ b/intern/cycles/kernel/geom/curve_intersect.h @@ -720,7 +720,7 @@ ccl_device_inline void curve_shader_setup(KernelGlobals kg, const float3 tangent = normalize(dPdu); const float3 bitangent = normalize(cross(tangent, -D)); const float sine = sd->v; - const float cosine = safe_sqrtf(1.0f - sine * sine); + const float cosine = cos_from_sin(sine); sd->N = normalize(sine * bitangent - cosine * normalize(cross(tangent, bitangent))); # if 0 @@ -738,7 +738,7 @@ ccl_device_inline void curve_shader_setup(KernelGlobals kg, /* NOTE: It is possible that P will be the same as P_inside (precision issues, or very small * radius). In this case use the view direction to approximate the normal. */ const float3 P_inside = float4_to_float3(catmull_rom_basis_eval(P_curve, sd->u)); - const float3 N = (!isequal(P, P_inside)) ? normalize(P - P_inside) : -sd->I; + const float3 N = (!isequal(P, P_inside)) ? normalize(P - P_inside) : -sd->wi; sd->N = N; sd->v = 0.0f; @@ -757,7 +757,7 @@ ccl_device_inline void curve_shader_setup(KernelGlobals kg, } sd->P = P; - sd->Ng = (sd->type & PRIMITIVE_CURVE_RIBBON) ? sd->I : sd->N; + sd->Ng = (sd->type & PRIMITIVE_CURVE_RIBBON) ? sd->wi : sd->N; sd->dPdv = cross(sd->dPdu, sd->Ng); sd->shader = kernel_data_fetch(curves, sd->prim).shader_id; } diff --git a/intern/cycles/kernel/geom/shader_data.h b/intern/cycles/kernel/geom/shader_data.h index b67d19365a3..eb05121b7cd 100644 --- a/intern/cycles/kernel/geom/shader_data.h +++ b/intern/cycles/kernel/geom/shader_data.h @@ -55,7 +55,7 @@ ccl_device_inline void shader_setup_from_ray(KernelGlobals kg, #endif /* Read ray data into shader globals. */ - sd->I = -ray->D; + sd->wi = -ray->D; #ifdef __HAIR__ if (sd->type & PRIMITIVE_CURVE) { @@ -111,7 +111,7 @@ ccl_device_inline void shader_setup_from_ray(KernelGlobals kg, sd->flag = kernel_data_fetch(shaders, (sd->shader & SHADER_MASK)).flags; /* backfacing test */ - bool backfacing = (dot(sd->Ng, sd->I) < 0.0f); + bool backfacing = (dot(sd->Ng, sd->wi) < 0.0f); if (backfacing) { sd->flag |= SD_BACKFACING; @@ -152,7 +152,7 @@ ccl_device_inline void shader_setup_from_sample(KernelGlobals kg, sd->P = P; sd->N = Ng; sd->Ng = Ng; - sd->I = I; + sd->wi = I; sd->shader = shader; if (prim != PRIM_NONE) sd->type = PRIMITIVE_TRIANGLE; @@ -185,7 +185,7 @@ ccl_device_inline void shader_setup_from_sample(KernelGlobals kg, object_position_transform_auto(kg, sd, &sd->P); object_normal_transform_auto(kg, sd, &sd->Ng); sd->N = sd->Ng; - object_dir_transform_auto(kg, sd, &sd->I); + object_dir_transform_auto(kg, sd, &sd->wi); } if (sd->type == PRIMITIVE_TRIANGLE) { @@ -227,7 +227,7 @@ ccl_device_inline void shader_setup_from_sample(KernelGlobals kg, /* backfacing test */ if (sd->prim != PRIM_NONE) { - bool backfacing = (dot(sd->Ng, sd->I) < 0.0f); + bool backfacing = (dot(sd->Ng, sd->wi) < 0.0f); if (backfacing) { sd->flag |= SD_BACKFACING; @@ -341,7 +341,7 @@ ccl_device void shader_setup_from_curve(KernelGlobals kg, } /* No view direction, normals or bitangent. */ - sd->I = zero_float3(); + sd->wi = zero_float3(); sd->N = zero_float3(); sd->Ng = zero_float3(); #ifdef __DPDU__ @@ -372,7 +372,7 @@ ccl_device_inline void shader_setup_from_background(KernelGlobals kg, sd->P = ray_D; sd->N = -ray_D; sd->Ng = -ray_D; - sd->I = -ray_D; + sd->wi = -ray_D; sd->shader = kernel_data.background.surface_shader; sd->flag = kernel_data_fetch(shaders, (sd->shader & SHADER_MASK)).flags; sd->object_flag = 0; @@ -412,7 +412,7 @@ ccl_device_inline void shader_setup_from_volume(KernelGlobals kg, sd->P = ray->P + ray->D * ray->tmin; sd->N = -ray->D; sd->Ng = -ray->D; - sd->I = -ray->D; + sd->wi = -ray->D; sd->shader = SHADER_NONE; sd->flag = 0; sd->object_flag = 0; diff --git a/intern/cycles/kernel/integrator/guiding.h b/intern/cycles/kernel/integrator/guiding.h index 634bba2a9b4..93c80539140 100644 --- a/intern/cycles/kernel/integrator/guiding.h +++ b/intern/cycles/kernel/integrator/guiding.h @@ -44,7 +44,7 @@ ccl_device_forceinline void guiding_record_surface_segment(KernelGlobals kg, state->guiding.path_segment = kg->opgl_path_segment_storage->NextSegment(); openpgl::cpp::SetPosition(state->guiding.path_segment, guiding_point3f(sd->P)); - openpgl::cpp::SetDirectionOut(state->guiding.path_segment, guiding_vec3f(sd->I)); + openpgl::cpp::SetDirectionOut(state->guiding.path_segment, guiding_vec3f(sd->wi)); openpgl::cpp::SetVolumeScatter(state->guiding.path_segment, false); openpgl::cpp::SetScatteredContribution(state->guiding.path_segment, zero); openpgl::cpp::SetDirectContribution(state->guiding.path_segment, zero); @@ -60,7 +60,7 @@ ccl_device_forceinline void guiding_record_surface_bounce(KernelGlobals kg, const Spectrum weight, const float pdf, const float3 N, - const float3 omega_in, + const float3 wo, const float2 roughness, const float eta) { @@ -78,7 +78,7 @@ ccl_device_forceinline void guiding_record_surface_bounce(KernelGlobals kg, openpgl::cpp::SetTransmittanceWeight(state->guiding.path_segment, guiding_vec3f(one_float3())); openpgl::cpp::SetVolumeScatter(state->guiding.path_segment, false); openpgl::cpp::SetNormal(state->guiding.path_segment, guiding_vec3f(normal)); - openpgl::cpp::SetDirectionIn(state->guiding.path_segment, guiding_vec3f(omega_in)); + openpgl::cpp::SetDirectionIn(state->guiding.path_segment, guiding_vec3f(wo)); openpgl::cpp::SetPDFDirectionIn(state->guiding.path_segment, pdf); openpgl::cpp::SetScatteringWeight(state->guiding.path_segment, guiding_vec3f(weight_rgb)); openpgl::cpp::SetIsDelta(state->guiding.path_segment, is_delta); @@ -113,7 +113,7 @@ ccl_device_forceinline void guiding_record_surface_emission(KernelGlobals kg, ccl_device_forceinline void guiding_record_bssrdf_segment(KernelGlobals kg, IntegratorState state, const float3 P, - const float3 I) + const float3 wi) { #if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 1 if (!kernel_data.integrator.train_guiding) { @@ -124,7 +124,7 @@ ccl_device_forceinline void guiding_record_bssrdf_segment(KernelGlobals kg, state->guiding.path_segment = kg->opgl_path_segment_storage->NextSegment(); openpgl::cpp::SetPosition(state->guiding.path_segment, guiding_point3f(P)); - openpgl::cpp::SetDirectionOut(state->guiding.path_segment, guiding_vec3f(I)); + openpgl::cpp::SetDirectionOut(state->guiding.path_segment, guiding_vec3f(wi)); openpgl::cpp::SetVolumeScatter(state->guiding.path_segment, true); openpgl::cpp::SetScatteredContribution(state->guiding.path_segment, zero); openpgl::cpp::SetDirectContribution(state->guiding.path_segment, zero); @@ -166,7 +166,7 @@ ccl_device_forceinline void guiding_record_bssrdf_bounce(KernelGlobals kg, IntegratorState state, const float pdf, const float3 N, - const float3 omega_in, + const float3 wo, const Spectrum weight, const Spectrum albedo) { @@ -181,7 +181,7 @@ ccl_device_forceinline void guiding_record_bssrdf_bounce(KernelGlobals kg, openpgl::cpp::SetVolumeScatter(state->guiding.path_segment, false); openpgl::cpp::SetNormal(state->guiding.path_segment, guiding_vec3f(normal)); - openpgl::cpp::SetDirectionIn(state->guiding.path_segment, guiding_vec3f(omega_in)); + openpgl::cpp::SetDirectionIn(state->guiding.path_segment, guiding_vec3f(wo)); openpgl::cpp::SetPDFDirectionIn(state->guiding.path_segment, pdf); openpgl::cpp::SetTransmittanceWeight(state->guiding.path_segment, guiding_vec3f(weight_rgb)); #endif @@ -222,7 +222,7 @@ ccl_device_forceinline void guiding_record_volume_bounce(KernelGlobals kg, ccl_private const ShaderData *sd, const Spectrum weight, const float pdf, - const float3 omega_in, + const float3 wo, const float roughness) { #if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4 @@ -237,7 +237,7 @@ ccl_device_forceinline void guiding_record_volume_bounce(KernelGlobals kg, openpgl::cpp::SetVolumeScatter(state->guiding.path_segment, true); openpgl::cpp::SetTransmittanceWeight(state->guiding.path_segment, guiding_vec3f(one_float3())); openpgl::cpp::SetNormal(state->guiding.path_segment, guiding_vec3f(normal)); - openpgl::cpp::SetDirectionIn(state->guiding.path_segment, guiding_vec3f(omega_in)); + openpgl::cpp::SetDirectionIn(state->guiding.path_segment, guiding_vec3f(wo)); openpgl::cpp::SetPDFDirectionIn(state->guiding.path_segment, pdf); openpgl::cpp::SetScatteringWeight(state->guiding.path_segment, guiding_vec3f(weight_rgb)); openpgl::cpp::SetIsDelta(state->guiding.path_segment, false); @@ -467,13 +467,13 @@ ccl_device_forceinline bool guiding_bsdf_init(KernelGlobals kg, ccl_device_forceinline float guiding_bsdf_sample(KernelGlobals kg, IntegratorState state, const float2 rand_bsdf, - ccl_private float3 *omega_in) + ccl_private float3 *wo) { #if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4 - pgl_vec3f wo; + pgl_vec3f pgl_wo; const pgl_point2f rand = openpgl::cpp::Point2(rand_bsdf.x, rand_bsdf.y); - const float pdf = kg->opgl_surface_sampling_distribution->SamplePDF(rand, wo); - *omega_in = make_float3(wo.x, wo.y, wo.z); + const float pdf = kg->opgl_surface_sampling_distribution->SamplePDF(rand, pgl_wo); + *wo = make_float3(pgl_wo.x, pgl_wo.y, pgl_wo.z); return pdf; #else return 0.0f; @@ -482,10 +482,10 @@ ccl_device_forceinline float guiding_bsdf_sample(KernelGlobals kg, ccl_device_forceinline float guiding_bsdf_pdf(KernelGlobals kg, IntegratorState state, - const float3 omega_in) + const float3 wo) { #if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4 - return kg->opgl_surface_sampling_distribution->PDF(guiding_vec3f(omega_in)); + return kg->opgl_surface_sampling_distribution->PDF(guiding_vec3f(wo)); #else return 0.0f; #endif @@ -520,13 +520,13 @@ ccl_device_forceinline bool guiding_phase_init(KernelGlobals kg, ccl_device_forceinline float guiding_phase_sample(KernelGlobals kg, IntegratorState state, const float2 rand_phase, - ccl_private float3 *omega_in) + ccl_private float3 *wo) { #if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4 - pgl_vec3f wo; + pgl_vec3f pgl_wo; const pgl_point2f rand = openpgl::cpp::Point2(rand_phase.x, rand_phase.y); - const float pdf = kg->opgl_volume_sampling_distribution->SamplePDF(rand, wo); - *omega_in = make_float3(wo.x, wo.y, wo.z); + const float pdf = kg->opgl_volume_sampling_distribution->SamplePDF(rand, pgl_wo); + *wo = make_float3(pgl_wo.x, pgl_wo.y, pgl_wo.z); return pdf; #else return 0.0f; @@ -535,10 +535,10 @@ ccl_device_forceinline float guiding_phase_sample(KernelGlobals kg, ccl_device_forceinline float guiding_phase_pdf(KernelGlobals kg, IntegratorState state, - const float3 omega_in) + const float3 wo) { #if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4 - return kg->opgl_volume_sampling_distribution->PDF(guiding_vec3f(omega_in)); + return kg->opgl_volume_sampling_distribution->PDF(guiding_vec3f(wo)); #else return 0.0f; #endif diff --git a/intern/cycles/kernel/integrator/mnee.h b/intern/cycles/kernel/integrator/mnee.h index debbce497dc..f50e3bc6e0c 100644 --- a/intern/cycles/kernel/integrator/mnee.h +++ b/intern/cycles/kernel/integrator/mnee.h @@ -607,24 +607,22 @@ ccl_device_forceinline Spectrum mnee_eval_bsdf_contribution(ccl_private ShaderCl { ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)closure; - float cosNO = dot(bsdf->N, wi); - float cosNI = dot(bsdf->N, wo); + float cosNI = dot(bsdf->N, wi); + float cosNO = dot(bsdf->N, wo); float3 Ht = normalize(-(bsdf->ior * wo + wi)); - float cosHO = dot(Ht, wi); + float cosHI = dot(Ht, wi); float alpha2 = bsdf->alpha_x * bsdf->alpha_y; float cosThetaM = dot(bsdf->N, Ht); + /* Now calculate G1(i, m) and G1(o, m). */ float G; if (bsdf->type == CLOSURE_BSDF_MICROFACET_BECKMANN_REFRACTION_ID) { - /* Eq. 26, 27: now calculate G1(i,m) and G1(o,m). */ - G = bsdf_beckmann_G1(bsdf->alpha_x, cosNO) * bsdf_beckmann_G1(bsdf->alpha_x, cosNI); + G = bsdf_G(alpha2, cosNI, cosNO); } else { /* bsdf->type == CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID assumed */ - /* Eq. 34: now calculate G1(i,m) and G1(o,m). */ - G = (2.f / (1.f + safe_sqrtf(1.f + alpha2 * (1.f - cosNO * cosNO) / (cosNO * cosNO)))) * - (2.f / (1.f + safe_sqrtf(1.f + alpha2 * (1.f - cosNI * cosNI) / (cosNI * cosNI)))); + G = bsdf_G(alpha2, cosNI, cosNO); } /* @@ -635,7 +633,7 @@ ccl_device_forceinline Spectrum mnee_eval_bsdf_contribution(ccl_private ShaderCl * contribution = bsdf_do * |do/dh| * |n.wo / n.h| / pdf_dh * = (1 - F) * G * |h.wi / (n.wi * n.h^2)| */ - return bsdf->weight * G * fabsf(cosHO / (cosNO * sqr(cosThetaM))); + return bsdf->weight * G * fabsf(cosHI / (cosNI * sqr(cosThetaM))); } /* Compute transfer matrix determinant |T1| = |dx1/dxn| (and |dh/dx| in the process) */ @@ -706,9 +704,9 @@ ccl_device_forceinline bool mnee_compute_transfer_matrix(ccl_private const Shade float ilo = -eta * ilh; float cos_theta = dot(wo, m.n); - float sin_theta = safe_sqrtf(1.f - sqr(cos_theta)); + float sin_theta = sin_from_cos(cos_theta); float cos_phi = dot(wo, s); - float sin_phi = safe_sqrtf(1.f - sqr(cos_phi)); + float sin_phi = sin_from_cos(cos_phi); /* Wo = (cos_phi * sin_theta) * s + (sin_phi * sin_theta) * t + cos_theta * n. */ float3 dH_dtheta = ilo * (cos_theta * (cos_phi * s + sin_phi * t) - sin_theta * m.n); diff --git a/intern/cycles/kernel/integrator/shade_surface.h b/intern/cycles/kernel/integrator/shade_surface.h index 3410195cd19..dbeb5f91ce7 100644 --- a/intern/cycles/kernel/integrator/shade_surface.h +++ b/intern/cycles/kernel/integrator/shade_surface.h @@ -235,8 +235,6 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg, light_sample_to_surface_shadow_ray(kg, sd, &ls, &ray); } - const bool is_light = light_sample_is_light(&ls); - /* Branch off shadow kernel. */ IntegratorShadowState shadow_state = integrator_shadow_path_init( kg, state, DEVICE_KERNEL_INTEGRATOR_INTERSECT_SHADOW, false); @@ -264,7 +262,6 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg, /* Copy state from main path to shadow path. */ uint32_t shadow_flag = INTEGRATOR_STATE(state, path, flag); - shadow_flag |= (is_light) ? PATH_RAY_SHADOW_FOR_LIGHT : 0; const Spectrum unlit_throughput = INTEGRATOR_STATE(state, path, throughput); const Spectrum throughput = unlit_throughput * bsdf_eval_sum(&bsdf_eval); @@ -364,7 +361,7 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce( /* BSDF closure, sample direction. */ float bsdf_pdf = 0.0f, unguided_bsdf_pdf = 0.0f; BsdfEval bsdf_eval ccl_optional_struct_init; - float3 bsdf_omega_in ccl_optional_struct_init; + float3 bsdf_wo ccl_optional_struct_init; int label; float2 bsdf_sampled_roughness = make_float2(1.0f, 1.0f); @@ -378,7 +375,7 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce( sc, rand_bsdf, &bsdf_eval, - &bsdf_omega_in, + &bsdf_wo, &bsdf_pdf, &unguided_bsdf_pdf, &bsdf_sampled_roughness, @@ -398,7 +395,7 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce( sc, rand_bsdf, &bsdf_eval, - &bsdf_omega_in, + &bsdf_wo, &bsdf_pdf, &bsdf_sampled_roughness, &bsdf_eta); @@ -416,7 +413,7 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce( } else { /* Setup ray with changed origin and direction. */ - const float3 D = normalize(bsdf_omega_in); + const float3 D = normalize(bsdf_wo); INTEGRATOR_STATE_WRITE(state, ray, P) = integrate_surface_ray_offset(kg, sd, sd->P, D); INTEGRATOR_STATE_WRITE(state, ray, D) = D; INTEGRATOR_STATE_WRITE(state, ray, tmin) = 0.0f; @@ -455,7 +452,7 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce( bsdf_weight, bsdf_pdf, sd->N, - normalize(bsdf_omega_in), + normalize(bsdf_wo), bsdf_sampled_roughness, bsdf_eta); diff --git a/intern/cycles/kernel/integrator/shade_volume.h b/intern/cycles/kernel/integrator/shade_volume.h index 624aeb5edee..5b460a2fe7a 100644 --- a/intern/cycles/kernel/integrator/shade_volume.h +++ b/intern/cycles/kernel/integrator/shade_volume.h @@ -821,7 +821,6 @@ ccl_device_forceinline void integrate_volume_direct_light( /* Create shadow ray. */ Ray ray ccl_optional_struct_init; light_sample_to_volume_shadow_ray(kg, sd, &ls, P, &ray); - const bool is_light = light_sample_is_light(&ls); /* Branch off shadow kernel. */ IntegratorShadowState shadow_state = integrator_shadow_path_init( @@ -838,7 +837,6 @@ ccl_device_forceinline void integrate_volume_direct_light( const uint16_t bounce = INTEGRATOR_STATE(state, path, bounce); const uint16_t transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce); uint32_t shadow_flag = INTEGRATOR_STATE(state, path, flag); - shadow_flag |= (is_light) ? PATH_RAY_SHADOW_FOR_LIGHT : 0; const Spectrum throughput_phase = throughput * bsdf_eval_sum(&phase_eval); if (kernel_data.kernel_features & KERNEL_FEATURE_LIGHT_PASSES) { @@ -912,7 +910,7 @@ ccl_device_forceinline bool integrate_volume_phase_scatter( /* Phase closure, sample direction. */ float phase_pdf = 0.0f, unguided_phase_pdf = 0.0f; BsdfEval phase_eval ccl_optional_struct_init; - float3 phase_omega_in ccl_optional_struct_init; + float3 phase_wo ccl_optional_struct_init; float sampled_roughness = 1.0f; int label; @@ -924,7 +922,7 @@ ccl_device_forceinline bool integrate_volume_phase_scatter( svc, rand_phase, &phase_eval, - &phase_omega_in, + &phase_wo, &phase_pdf, &unguided_phase_pdf, &sampled_roughness); @@ -938,15 +936,8 @@ ccl_device_forceinline bool integrate_volume_phase_scatter( else # endif { - label = volume_shader_phase_sample(kg, - sd, - phases, - svc, - rand_phase, - &phase_eval, - &phase_omega_in, - &phase_pdf, - &sampled_roughness); + label = volume_shader_phase_sample( + kg, sd, phases, svc, rand_phase, &phase_eval, &phase_wo, &phase_pdf, &sampled_roughness); if (phase_pdf == 0.0f || bsdf_eval_is_zero(&phase_eval)) { return false; @@ -957,7 +948,7 @@ ccl_device_forceinline bool integrate_volume_phase_scatter( /* Setup ray. */ INTEGRATOR_STATE_WRITE(state, ray, P) = sd->P; - INTEGRATOR_STATE_WRITE(state, ray, D) = normalize(phase_omega_in); + INTEGRATOR_STATE_WRITE(state, ray, D) = normalize(phase_wo); INTEGRATOR_STATE_WRITE(state, ray, tmin) = 0.0f; INTEGRATOR_STATE_WRITE(state, ray, tmax) = FLT_MAX; # ifdef __RAY_DIFFERENTIALS__ @@ -971,7 +962,7 @@ ccl_device_forceinline bool integrate_volume_phase_scatter( /* Add phase function sampling data to the path segment. */ guiding_record_volume_bounce( - kg, state, sd, phase_weight, phase_pdf, normalize(phase_omega_in), sampled_roughness); + kg, state, sd, phase_weight, phase_pdf, normalize(phase_wo), sampled_roughness); /* Update throughput. */ const Spectrum throughput = INTEGRATOR_STATE(state, path, throughput); @@ -1076,7 +1067,7 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg, float3 transmittance_weight = spectrum_to_rgb( safe_divide_color(result.indirect_throughput, initial_throughput)); guiding_record_volume_transmission(kg, state, transmittance_weight); - guiding_record_volume_segment(kg, state, direct_P, sd.I); + guiding_record_volume_segment(kg, state, direct_P, sd.wi); guiding_generated_new_segment = true; unlit_throughput = result.indirect_throughput / continuation_probability; rand_phase_guiding = path_state_rng_1D(kg, &rng_state, PRNG_VOLUME_PHASE_GUIDING_DISTANCE); @@ -1139,7 +1130,7 @@ ccl_device VolumeIntegrateEvent volume_integrate(KernelGlobals kg, # if defined(__PATH_GUIDING__) # if PATH_GUIDING_LEVEL >= 1 if (!guiding_generated_new_segment) { - guiding_record_volume_segment(kg, state, sd.P, sd.I); + guiding_record_volume_segment(kg, state, sd.P, sd.wi); } # endif # if PATH_GUIDING_LEVEL >= 4 diff --git a/intern/cycles/kernel/integrator/subsurface_random_walk.h b/intern/cycles/kernel/integrator/subsurface_random_walk.h index fdcb66c32f5..70e2920349f 100644 --- a/intern/cycles/kernel/integrator/subsurface_random_walk.h +++ b/intern/cycles/kernel/integrator/subsurface_random_walk.h @@ -136,7 +136,7 @@ ccl_device_forceinline float diffusion_length_dwivedi(float alpha) ccl_device_forceinline float3 direction_from_cosine(float3 D, float cos_theta, float randv) { - float sin_theta = safe_sqrtf(1.0f - cos_theta * cos_theta); + float sin_theta = sin_from_cos(cos_theta); float phi = M_2PI_F * randv; float3 dir = make_float3(sin_theta * cosf(phi), sin_theta * sinf(phi), cos_theta); diff --git a/intern/cycles/kernel/integrator/surface_shader.h b/intern/cycles/kernel/integrator/surface_shader.h index 5e47a34f77e..4255b512b94 100644 --- a/intern/cycles/kernel/integrator/surface_shader.h +++ b/intern/cycles/kernel/integrator/surface_shader.h @@ -174,14 +174,14 @@ ccl_device_inline void surface_shader_prepare_closures(KernelGlobals kg, #if 0 ccl_device_inline void surface_shader_validate_bsdf_sample(const KernelGlobals kg, const ShaderClosure *sc, - const float3 omega_in, + const float3 wo, const int org_label, const float2 org_roughness, const float org_eta) { /* Validate the the bsdf_label and bsdf_roughness_eta functions * by estimating the values after a bsdf sample. */ - const int comp_label = bsdf_label(kg, sc, omega_in); + const int comp_label = bsdf_label(kg, sc, wo); kernel_assert(org_label == comp_label); float2 comp_roughness; @@ -218,7 +218,7 @@ ccl_device_forceinline bool _surface_shader_exclude(ClosureType type, uint light ccl_device_inline float _surface_shader_bsdf_eval_mis(KernelGlobals kg, ccl_private ShaderData *sd, - const float3 omega_in, + const float3 wo, ccl_private const ShaderClosure *skip_sc, ccl_private BsdfEval *result_eval, float sum_pdf, @@ -237,7 +237,7 @@ ccl_device_inline float _surface_shader_bsdf_eval_mis(KernelGlobals kg, if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) { if (CLOSURE_IS_BSDF(sc->type) && !_surface_shader_exclude(sc->type, light_shader_flags)) { float bsdf_pdf = 0.0f; - Spectrum eval = bsdf_eval(kg, sd, sc, omega_in, &bsdf_pdf); + Spectrum eval = bsdf_eval(kg, sd, sc, wo, &bsdf_pdf); if (bsdf_pdf != 0.0f) { bsdf_eval_accum(result_eval, sc->type, eval * sc->weight); @@ -254,7 +254,7 @@ ccl_device_inline float _surface_shader_bsdf_eval_mis(KernelGlobals kg, ccl_device_inline float surface_shader_bsdf_eval_pdfs(const KernelGlobals kg, ccl_private ShaderData *sd, - const float3 omega_in, + const float3 wo, ccl_private BsdfEval *result_eval, ccl_private float *pdfs, const uint light_shader_flags) @@ -270,7 +270,7 @@ ccl_device_inline float surface_shader_bsdf_eval_pdfs(const KernelGlobals kg, if (CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) { if (CLOSURE_IS_BSDF(sc->type) && !_surface_shader_exclude(sc->type, light_shader_flags)) { float bsdf_pdf = 0.0f; - Spectrum eval = bsdf_eval(kg, sd, sc, omega_in, &bsdf_pdf); + Spectrum eval = bsdf_eval(kg, sd, sc, wo, &bsdf_pdf); kernel_assert(bsdf_pdf >= 0.0f); if (bsdf_pdf != 0.0f) { bsdf_eval_accum(result_eval, sc->type, eval * sc->weight); @@ -310,20 +310,20 @@ ccl_device_inline surface_shader_bsdf_eval(KernelGlobals kg, IntegratorState state, ccl_private ShaderData *sd, - const float3 omega_in, + const float3 wo, ccl_private BsdfEval *bsdf_eval, const uint light_shader_flags) { bsdf_eval_init(bsdf_eval, CLOSURE_NONE_ID, zero_spectrum()); float pdf = _surface_shader_bsdf_eval_mis( - kg, sd, omega_in, NULL, bsdf_eval, 0.0f, 0.0f, light_shader_flags); + kg, sd, wo, NULL, bsdf_eval, 0.0f, 0.0f, light_shader_flags); #if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4 if (state->guiding.use_surface_guiding) { const float guiding_sampling_prob = state->guiding.surface_guiding_sampling_prob; const float bssrdf_sampling_prob = state->guiding.bssrdf_sampling_prob; - const float guide_pdf = guiding_bsdf_pdf(kg, state, omega_in); + const float guide_pdf = guiding_bsdf_pdf(kg, state, wo); pdf = (guiding_sampling_prob * guide_pdf * (1.0f - bssrdf_sampling_prob)) + (1.0f - guiding_sampling_prob) * pdf; } @@ -407,7 +407,7 @@ ccl_device int surface_shader_bsdf_guided_sample_closure(KernelGlobals kg, ccl_private const ShaderClosure *sc, const float2 rand_bsdf, ccl_private BsdfEval *bsdf_eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *bsdf_pdf, ccl_private float *unguided_bsdf_pdf, ccl_private float2 *sampled_rougness, @@ -443,14 +443,14 @@ ccl_device int surface_shader_bsdf_guided_sample_closure(KernelGlobals kg, if (sample_guiding) { /* Sample guiding distribution. */ - guide_pdf = guiding_bsdf_sample(kg, state, rand_bsdf, omega_in); + guide_pdf = guiding_bsdf_sample(kg, state, rand_bsdf, wo); *bsdf_pdf = 0.0f; if (guide_pdf != 0.0f) { float unguided_bsdf_pdfs[MAX_CLOSURE]; *unguided_bsdf_pdf = surface_shader_bsdf_eval_pdfs( - kg, sd, *omega_in, bsdf_eval, unguided_bsdf_pdfs, 0); + kg, sd, *wo, bsdf_eval, unguided_bsdf_pdfs, 0); *bsdf_pdf = (guiding_sampling_prob * guide_pdf * (1.0f - bssrdf_sampling_prob)) + ((1.0f - guiding_sampling_prob) * (*unguided_bsdf_pdf)); float sum_pdfs = 0.0f; @@ -471,7 +471,7 @@ ccl_device int surface_shader_bsdf_guided_sample_closure(KernelGlobals kg, * the sum of all unguided_bsdf_pdfs is just < 1.0f. */ idx = (rand_bsdf_guiding > sum_pdfs) ? sd->num_closure - 1 : idx; - label = bsdf_label(kg, &sd->closure[idx], *omega_in); + label = bsdf_label(kg, &sd->closure[idx], *wo); } } @@ -483,19 +483,11 @@ ccl_device int surface_shader_bsdf_guided_sample_closure(KernelGlobals kg, else { /* Sample BSDF. */ *bsdf_pdf = 0.0f; - label = bsdf_sample(kg, - sd, - sc, - rand_bsdf.x, - rand_bsdf.y, - &eval, - omega_in, - unguided_bsdf_pdf, - sampled_rougness, - eta); + label = bsdf_sample( + kg, sd, sc, rand_bsdf.x, rand_bsdf.y, &eval, wo, unguided_bsdf_pdf, sampled_rougness, eta); # if 0 if (*unguided_bsdf_pdf > 0.0f) { - surface_shader_validate_bsdf_sample(kg, sc, *omega_in, label, sampled_roughness, eta); + surface_shader_validate_bsdf_sample(kg, sc, *wo, label, sampled_roughness, eta); } # endif @@ -507,13 +499,13 @@ ccl_device int surface_shader_bsdf_guided_sample_closure(KernelGlobals kg, if (sd->num_closure > 1) { float sweight = sc->sample_weight; *unguided_bsdf_pdf = _surface_shader_bsdf_eval_mis( - kg, sd, *omega_in, sc, bsdf_eval, (*unguided_bsdf_pdf) * sweight, sweight, 0); + kg, sd, *wo, sc, bsdf_eval, (*unguided_bsdf_pdf) * sweight, sweight, 0); kernel_assert(reduce_min(bsdf_eval_sum(bsdf_eval)) >= 0.0f); } *bsdf_pdf = *unguided_bsdf_pdf; if (use_surface_guiding) { - guide_pdf = guiding_bsdf_pdf(kg, state, *omega_in); + guide_pdf = guiding_bsdf_pdf(kg, state, *wo); *bsdf_pdf *= 1.0f - guiding_sampling_prob; *bsdf_pdf += guiding_sampling_prob * guide_pdf * (1.0f - bssrdf_sampling_prob); } @@ -533,7 +525,7 @@ ccl_device int surface_shader_bsdf_sample_closure(KernelGlobals kg, ccl_private const ShaderClosure *sc, const float2 rand_bsdf, ccl_private BsdfEval *bsdf_eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float2 *sampled_roughness, ccl_private float *eta) @@ -546,15 +538,14 @@ ccl_device int surface_shader_bsdf_sample_closure(KernelGlobals kg, *pdf = 0.0f; label = bsdf_sample( - kg, sd, sc, rand_bsdf.x, rand_bsdf.y, &eval, omega_in, pdf, sampled_roughness, eta); + kg, sd, sc, rand_bsdf.x, rand_bsdf.y, &eval, wo, pdf, sampled_roughness, eta); if (*pdf != 0.0f) { bsdf_eval_init(bsdf_eval, sc->type, eval * sc->weight); if (sd->num_closure > 1) { float sweight = sc->sample_weight; - *pdf = _surface_shader_bsdf_eval_mis( - kg, sd, *omega_in, sc, bsdf_eval, *pdf * sweight, sweight, 0); + *pdf = _surface_shader_bsdf_eval_mis(kg, sd, *wo, sc, bsdf_eval, *pdf * sweight, sweight, 0); } } else { @@ -630,7 +621,7 @@ ccl_device Spectrum surface_shader_diffuse(KernelGlobals kg, ccl_private const S ccl_private const ShaderClosure *sc = &sd->closure[i]; if (CLOSURE_IS_BSDF_DIFFUSE(sc->type) || CLOSURE_IS_BSSRDF(sc->type)) - eval += sc->weight; + eval += bsdf_albedo(sd, sc); } return eval; @@ -644,7 +635,7 @@ ccl_device Spectrum surface_shader_glossy(KernelGlobals kg, ccl_private const Sh ccl_private const ShaderClosure *sc = &sd->closure[i]; if (CLOSURE_IS_BSDF_GLOSSY(sc->type)) - eval += sc->weight; + eval += bsdf_albedo(sd, sc); } return eval; @@ -658,7 +649,7 @@ ccl_device Spectrum surface_shader_transmission(KernelGlobals kg, ccl_private co ccl_private const ShaderClosure *sc = &sd->closure[i]; if (CLOSURE_IS_BSDF_TRANSMISSION(sc->type)) - eval += sc->weight; + eval += bsdf_albedo(sd, sc); } return eval; @@ -758,7 +749,7 @@ ccl_device Spectrum surface_shader_background(ccl_private const ShaderData *sd) ccl_device Spectrum surface_shader_emission(ccl_private const ShaderData *sd) { if (sd->flag & SD_EMISSION) { - return emissive_simple_eval(sd->Ng, sd->I) * sd->closure_emission_background; + return emissive_simple_eval(sd->Ng, sd->wi) * sd->closure_emission_background; } else { return zero_spectrum(); diff --git a/intern/cycles/kernel/integrator/volume_shader.h b/intern/cycles/kernel/integrator/volume_shader.h index 625205e9931..45804859d7c 100644 --- a/intern/cycles/kernel/integrator/volume_shader.h +++ b/intern/cycles/kernel/integrator/volume_shader.h @@ -202,7 +202,7 @@ ccl_device_inline ccl_private const ShaderVolumeClosure *volume_shader_phase_pic ccl_device_inline float _volume_shader_phase_eval_mis(ccl_private const ShaderData *sd, ccl_private const ShaderVolumePhases *phases, - const float3 omega_in, + const float3 wo, int skip_phase, ccl_private BsdfEval *result_eval, float sum_pdf, @@ -214,7 +214,7 @@ ccl_device_inline float _volume_shader_phase_eval_mis(ccl_private const ShaderDa ccl_private const ShaderVolumeClosure *svc = &phases->closure[i]; float phase_pdf = 0.0f; - Spectrum eval = volume_phase_eval(sd, svc, omega_in, &phase_pdf); + Spectrum eval = volume_phase_eval(sd, svc, wo, &phase_pdf); if (phase_pdf != 0.0f) { bsdf_eval_accum(result_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval); @@ -230,11 +230,11 @@ ccl_device_inline float _volume_shader_phase_eval_mis(ccl_private const ShaderDa ccl_device float volume_shader_phase_eval(KernelGlobals kg, ccl_private const ShaderData *sd, ccl_private const ShaderVolumeClosure *svc, - const float3 omega_in, + const float3 wo, ccl_private BsdfEval *phase_eval) { float phase_pdf = 0.0f; - Spectrum eval = volume_phase_eval(sd, svc, omega_in, &phase_pdf); + Spectrum eval = volume_phase_eval(sd, svc, wo, &phase_pdf); if (phase_pdf != 0.0f) { bsdf_eval_accum(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval); @@ -247,17 +247,17 @@ ccl_device float volume_shader_phase_eval(KernelGlobals kg, IntegratorState state, ccl_private const ShaderData *sd, ccl_private const ShaderVolumePhases *phases, - const float3 omega_in, + const float3 wo, ccl_private BsdfEval *phase_eval) { bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, zero_spectrum()); - float pdf = _volume_shader_phase_eval_mis(sd, phases, omega_in, -1, phase_eval, 0.0f, 0.0f); + float pdf = _volume_shader_phase_eval_mis(sd, phases, wo, -1, phase_eval, 0.0f, 0.0f); # if defined(__PATH_GUIDING__) && PATH_GUIDING_LEVEL >= 4 if (state->guiding.use_volume_guiding) { const float guiding_sampling_prob = state->guiding.volume_guiding_sampling_prob; - const float guide_pdf = guiding_phase_pdf(kg, state, omega_in); + const float guide_pdf = guiding_phase_pdf(kg, state, wo); pdf = (guiding_sampling_prob * guide_pdf) + (1.0f - guiding_sampling_prob) * pdf; } # endif @@ -272,7 +272,7 @@ ccl_device int volume_shader_phase_guided_sample(KernelGlobals kg, ccl_private const ShaderVolumeClosure *svc, const float2 rand_phase, ccl_private BsdfEval *phase_eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *phase_pdf, ccl_private float *unguided_phase_pdf, ccl_private float *sampled_roughness) @@ -304,11 +304,11 @@ ccl_device int volume_shader_phase_guided_sample(KernelGlobals kg, if (sample_guiding) { /* Sample guiding distribution. */ - guide_pdf = guiding_phase_sample(kg, state, rand_phase, omega_in); + guide_pdf = guiding_phase_sample(kg, state, rand_phase, wo); *phase_pdf = 0.0f; if (guide_pdf != 0.0f) { - *unguided_phase_pdf = volume_shader_phase_eval(kg, sd, svc, *omega_in, phase_eval); + *unguided_phase_pdf = volume_shader_phase_eval(kg, sd, svc, *wo, phase_eval); *phase_pdf = (guiding_sampling_prob * guide_pdf) + ((1.0f - guiding_sampling_prob) * (*unguided_phase_pdf)); label = LABEL_VOLUME_SCATTER; @@ -318,14 +318,14 @@ ccl_device int volume_shader_phase_guided_sample(KernelGlobals kg, /* Sample phase. */ *phase_pdf = 0.0f; label = volume_phase_sample( - sd, svc, rand_phase.x, rand_phase.y, &eval, omega_in, unguided_phase_pdf); + sd, svc, rand_phase.x, rand_phase.y, &eval, wo, unguided_phase_pdf); if (*unguided_phase_pdf != 0.0f) { bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval); *phase_pdf = *unguided_phase_pdf; if (use_volume_guiding) { - guide_pdf = guiding_phase_pdf(kg, state, *omega_in); + guide_pdf = guiding_phase_pdf(kg, state, *wo); *phase_pdf *= 1.0f - guiding_sampling_prob; *phase_pdf += guiding_sampling_prob * guide_pdf; } @@ -349,7 +349,7 @@ ccl_device int volume_shader_phase_sample(KernelGlobals kg, ccl_private const ShaderVolumeClosure *svc, float2 rand_phase, ccl_private BsdfEval *phase_eval, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf, ccl_private float *sampled_roughness) { @@ -357,7 +357,7 @@ ccl_device int volume_shader_phase_sample(KernelGlobals kg, Spectrum eval = zero_spectrum(); *pdf = 0.0f; - int label = volume_phase_sample(sd, svc, rand_phase.x, rand_phase.y, &eval, omega_in, pdf); + int label = volume_phase_sample(sd, svc, rand_phase.x, rand_phase.y, &eval, wo, pdf); if (*pdf != 0.0f) { bsdf_eval_init(phase_eval, CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID, eval); diff --git a/intern/cycles/kernel/light/area.h b/intern/cycles/kernel/light/area.h index 9c0ca0c8a70..a4badf907a0 100644 --- a/intern/cycles/kernel/light/area.h +++ b/intern/cycles/kernel/light/area.h @@ -102,7 +102,7 @@ ccl_device float area_light_spread_attenuation(const float3 D, /* The factor M_PI_F comes from integrating the radiance over the hemisphere */ return (cos_a > 0.9999997f) ? M_PI_F : 0.0f; } - const float sin_a = safe_sqrtf(1.0f - sqr(cos_a)); + const float sin_a = sin_from_cos(cos_a); const float tan_a = sin_a / cos_a; return max((tan_half_spread - tan_a) * normalize_spread, 0.0f); } diff --git a/intern/cycles/kernel/light/sample.h b/intern/cycles/kernel/light/sample.h index 423024c6b3d..f56ca19e968 100644 --- a/intern/cycles/kernel/light/sample.h +++ b/intern/cycles/kernel/light/sample.h @@ -88,13 +88,6 @@ light_sample_shader_eval(KernelGlobals kg, return eval; } -/* Test if light sample is from a light or emission from geometry. */ -ccl_device_inline bool light_sample_is_light(ccl_private const LightSample *ccl_restrict ls) -{ - /* return if it's a lamp for shadow pass */ - return (ls->prim == PRIM_NONE && ls->type != LIGHT_BACKGROUND); -} - /* Early path termination of shadow rays. */ ccl_device_inline bool light_sample_terminate(KernelGlobals kg, ccl_private const LightSample *ccl_restrict ls, diff --git a/intern/cycles/kernel/light/spot.h b/intern/cycles/kernel/light/spot.h index b1d652f13f9..1ffaebe7c17 100644 --- a/intern/cycles/kernel/light/spot.h +++ b/intern/cycles/kernel/light/spot.h @@ -7,24 +7,13 @@ CCL_NAMESPACE_BEGIN -ccl_device float spot_light_attenuation(float3 dir, - float cos_half_spot_angle, - float spot_smooth, - float3 N) +ccl_device float spot_light_attenuation(const ccl_global KernelSpotLight *spot, float3 ray) { - float attenuation = dot(dir, N); + const float3 scaled_ray = safe_normalize( + make_float3(dot(ray, spot->axis_u), dot(ray, spot->axis_v), dot(ray, spot->dir)) / + spot->len); - if (attenuation <= cos_half_spot_angle) { - attenuation = 0.0f; - } - else { - float t = attenuation - cos_half_spot_angle; - - if (t < spot_smooth && spot_smooth != 0.0f) - attenuation *= smoothstepf(t / spot_smooth); - } - - return attenuation; + return smoothstepf((scaled_ray.z - spot->cos_half_spot_angle) / spot->spot_smooth); } template @@ -57,8 +46,7 @@ ccl_device_inline bool spot_light_sample(const ccl_global KernelLight *klight, ls->eval_fac = (0.25f * M_1_PI_F) * invarea; /* spot light attenuation */ - ls->eval_fac *= spot_light_attenuation( - klight->spot.dir, klight->spot.cos_half_spot_angle, klight->spot.spot_smooth, -ls->D); + ls->eval_fac *= spot_light_attenuation(&klight->spot, -ls->D); if (!in_volume_segment && ls->eval_fac == 0.0f) { return false; } @@ -87,8 +75,7 @@ ccl_device_forceinline void spot_light_update_position(const ccl_global KernelLi ls->pdf = invarea; /* spot light attenuation */ - ls->eval_fac *= spot_light_attenuation( - klight->spot.dir, klight->spot.cos_half_spot_angle, klight->spot.spot_smooth, ls->Ng); + ls->eval_fac *= spot_light_attenuation(&klight->spot, ls->Ng); } ccl_device_inline bool spot_light_intersect(const ccl_global KernelLight *klight, @@ -129,8 +116,7 @@ ccl_device_inline bool spot_light_sample_from_intersection( ls->pdf = invarea; /* spot light attenuation */ - ls->eval_fac *= spot_light_attenuation( - klight->spot.dir, klight->spot.cos_half_spot_angle, klight->spot.spot_smooth, -ls->D); + ls->eval_fac *= spot_light_attenuation(&klight->spot, -ls->D); if (ls->eval_fac == 0.0f) { return false; diff --git a/intern/cycles/kernel/light/tree.h b/intern/cycles/kernel/light/tree.h index 423879bcddc..441e9758088 100644 --- a/intern/cycles/kernel/light/tree.h +++ b/intern/cycles/kernel/light/tree.h @@ -47,11 +47,6 @@ ccl_device float light_tree_cos_bounding_box_angle(const BoundingBox bbox, return cos_theta_u; } -ccl_device_forceinline float sin_from_cos(const float c) -{ - return safe_sqrtf(1.0f - sqr(c)); -} - /* Compute vector v as in Fig .8. P_v is the corresponding point along the ray. */ ccl_device float3 compute_v( const float3 centroid, const float3 P, const float3 D, const float3 bcone_axis, const float t) diff --git a/intern/cycles/kernel/light/triangle.h b/intern/cycles/kernel/light/triangle.h index 7a9a395c2b6..b6724c6d6e1 100644 --- a/intern/cycles/kernel/light/triangle.h +++ b/intern/cycles/kernel/light/triangle.h @@ -63,7 +63,7 @@ ccl_device_forceinline float triangle_light_pdf(KernelGlobals kg, const float3 e2 = V[2] - V[1]; const float longest_edge_squared = max(len_squared(e0), max(len_squared(e1), len_squared(e2))); const float3 N = cross(e0, e1); - const float distance_to_plane = fabsf(dot(N, sd->I * t)) / dot(N, N); + const float distance_to_plane = fabsf(dot(N, sd->wi * t)) / dot(N, N); const float area = 0.5f * len(N); float pdf; @@ -71,7 +71,7 @@ ccl_device_forceinline float triangle_light_pdf(KernelGlobals kg, if (longest_edge_squared > distance_to_plane * distance_to_plane) { /* sd contains the point on the light source * calculate Px, the point that we're shading */ - const float3 Px = sd->P + sd->I * t; + const float3 Px = sd->P + sd->wi * t; const float3 v0_p = V[0] - Px; const float3 v1_p = V[1] - Px; const float3 v2_p = V[2] - Px; @@ -99,7 +99,7 @@ ccl_device_forceinline float triangle_light_pdf(KernelGlobals kg, return 0.0f; } - pdf = triangle_light_pdf_area_sampling(sd->Ng, sd->I, t) / area; + pdf = triangle_light_pdf_area_sampling(sd->Ng, sd->wi, t) / area; } /* Belongs in distribution.h but can reuse computations here. */ @@ -218,7 +218,7 @@ ccl_device_forceinline bool triangle_light_sample(KernelGlobals kg, /* Finally, select a random point along the edge of the new triangle * That point on the spherical triangle is the sampled ray direction */ const float z = 1.0f - randv * (1.0f - dot(C_, B)); - ls->D = z * B + safe_sqrtf(1.0f - z * z) * safe_normalize(C_ - dot(C_, B) * B); + ls->D = z * B + sin_from_cos(z) * safe_normalize(C_ - dot(C_, B) * B); /* calculate intersection with the planar triangle */ if (!ray_triangle_intersect( diff --git a/intern/cycles/kernel/osl/closures_setup.h b/intern/cycles/kernel/osl/closures_setup.h index ceaf56ccba6..5ea8f080410 100644 --- a/intern/cycles/kernel/osl/closures_setup.h +++ b/intern/cycles/kernel/osl/closures_setup.h @@ -80,7 +80,7 @@ ccl_device void osl_closure_diffuse_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); sd->flag |= bsdf_diffuse_setup(bsdf); } @@ -101,7 +101,7 @@ ccl_device void osl_closure_oren_nayar_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->roughness = closure->roughness; sd->flag |= bsdf_oren_nayar_setup(bsdf); @@ -123,7 +123,7 @@ ccl_device void osl_closure_translucent_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); sd->flag |= bsdf_translucent_setup(bsdf); } @@ -144,7 +144,7 @@ ccl_device void osl_closure_reflection_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); sd->flag |= bsdf_reflection_setup(bsdf); } @@ -165,7 +165,7 @@ ccl_device void osl_closure_refraction_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->ior = closure->ior; sd->flag |= bsdf_refraction_setup(bsdf); @@ -199,7 +199,7 @@ ccl_device void osl_closure_microfacet_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = closure->alpha_y; bsdf->ior = closure->ior; @@ -209,14 +209,7 @@ ccl_device void osl_closure_microfacet_setup(KernelGlobals kg, if (closure->distribution == make_string("ggx", 11253504724482777663ull) || closure->distribution == make_string("default", 4430693559278735917ull)) { if (!closure->refract) { - if (closure->alpha_x == closure->alpha_y) { - /* Isotropic */ - sd->flag |= bsdf_microfacet_ggx_isotropic_setup(bsdf); - } - else { - /* Anisotropic */ - sd->flag |= bsdf_microfacet_ggx_setup(bsdf); - } + sd->flag |= bsdf_microfacet_ggx_setup(bsdf); } else { sd->flag |= bsdf_microfacet_ggx_refraction_setup(bsdf); @@ -225,14 +218,7 @@ ccl_device void osl_closure_microfacet_setup(KernelGlobals kg, /* Beckmann */ else { if (!closure->refract) { - if (closure->alpha_x == closure->alpha_y) { - /* Isotropic */ - sd->flag |= bsdf_microfacet_beckmann_isotropic_setup(bsdf); - } - else { - /* Anisotropic */ - sd->flag |= bsdf_microfacet_beckmann_setup(bsdf); - } + sd->flag |= bsdf_microfacet_beckmann_setup(bsdf); } else { sd->flag |= bsdf_microfacet_beckmann_refraction_setup(bsdf); @@ -257,10 +243,10 @@ ccl_device void osl_closure_microfacet_ggx_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); - bsdf->alpha_x = closure->alpha_x; + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); + bsdf->alpha_x = bsdf->alpha_y = closure->alpha_x; - sd->flag |= bsdf_microfacet_ggx_isotropic_setup(bsdf); + sd->flag |= bsdf_microfacet_ggx_setup(bsdf); } ccl_device void osl_closure_microfacet_ggx_aniso_setup( @@ -280,7 +266,7 @@ ccl_device void osl_closure_microfacet_ggx_aniso_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = closure->alpha_y; bsdf->T = closure->T; @@ -305,7 +291,7 @@ ccl_device void osl_closure_microfacet_ggx_refraction_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->ior = closure->ior; @@ -337,7 +323,7 @@ ccl_device void osl_closure_microfacet_ggx_fresnel_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = bsdf->alpha_x; bsdf->ior = closure->ior; @@ -345,7 +331,6 @@ ccl_device void osl_closure_microfacet_ggx_fresnel_setup( bsdf->extra = extra; bsdf->extra->color = rgb_to_spectrum(closure->color); bsdf->extra->cspec0 = rgb_to_spectrum(closure->cspec0); - bsdf->extra->clearcoat = 0.0f; bsdf->T = zero_float3(); @@ -375,7 +360,7 @@ ccl_device void osl_closure_microfacet_ggx_aniso_fresnel_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = closure->alpha_y; bsdf->ior = closure->ior; @@ -383,7 +368,6 @@ ccl_device void osl_closure_microfacet_ggx_aniso_fresnel_setup( bsdf->extra = extra; bsdf->extra->color = rgb_to_spectrum(closure->color); bsdf->extra->cspec0 = rgb_to_spectrum(closure->cspec0); - bsdf->extra->clearcoat = 0.0f; bsdf->T = closure->T; @@ -418,7 +402,7 @@ ccl_device void osl_closure_microfacet_multi_ggx_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = bsdf->alpha_x; bsdf->ior = 1.0f; @@ -426,7 +410,6 @@ ccl_device void osl_closure_microfacet_multi_ggx_setup( bsdf->extra = extra; bsdf->extra->color = rgb_to_spectrum(closure->color); bsdf->extra->cspec0 = zero_spectrum(); - bsdf->extra->clearcoat = 0.0f; bsdf->T = zero_float3(); @@ -459,7 +442,7 @@ ccl_device void osl_closure_microfacet_multi_ggx_glass_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = bsdf->alpha_x; bsdf->ior = closure->ior; @@ -467,7 +450,6 @@ ccl_device void osl_closure_microfacet_multi_ggx_glass_setup( bsdf->extra = extra; bsdf->extra->color = rgb_to_spectrum(closure->color); bsdf->extra->cspec0 = zero_spectrum(); - bsdf->extra->clearcoat = 0.0f; bsdf->T = zero_float3(); @@ -500,7 +482,7 @@ ccl_device void osl_closure_microfacet_multi_ggx_aniso_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = closure->alpha_y; bsdf->ior = 1.0f; @@ -508,7 +490,6 @@ ccl_device void osl_closure_microfacet_multi_ggx_aniso_setup( bsdf->extra = extra; bsdf->extra->color = rgb_to_spectrum(closure->color); bsdf->extra->cspec0 = zero_spectrum(); - bsdf->extra->clearcoat = 0.0f; bsdf->T = closure->T; @@ -543,7 +524,7 @@ ccl_device void osl_closure_microfacet_multi_ggx_fresnel_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = bsdf->alpha_x; bsdf->ior = closure->ior; @@ -551,7 +532,6 @@ ccl_device void osl_closure_microfacet_multi_ggx_fresnel_setup( bsdf->extra = extra; bsdf->extra->color = rgb_to_spectrum(closure->color); bsdf->extra->cspec0 = rgb_to_spectrum(closure->cspec0); - bsdf->extra->clearcoat = 0.0f; bsdf->T = zero_float3(); @@ -584,7 +564,7 @@ ccl_device void osl_closure_microfacet_multi_ggx_glass_fresnel_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = bsdf->alpha_x; bsdf->ior = closure->ior; @@ -592,7 +572,6 @@ ccl_device void osl_closure_microfacet_multi_ggx_glass_fresnel_setup( bsdf->extra = extra; bsdf->extra->color = rgb_to_spectrum(closure->color); bsdf->extra->cspec0 = rgb_to_spectrum(closure->cspec0); - bsdf->extra->clearcoat = 0.0f; bsdf->T = zero_float3(); @@ -625,7 +604,7 @@ ccl_device void osl_closure_microfacet_multi_ggx_aniso_fresnel_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = closure->alpha_y; bsdf->ior = closure->ior; @@ -633,7 +612,6 @@ ccl_device void osl_closure_microfacet_multi_ggx_aniso_fresnel_setup( bsdf->extra = extra; bsdf->extra->color = rgb_to_spectrum(closure->color); bsdf->extra->cspec0 = rgb_to_spectrum(closure->cspec0); - bsdf->extra->clearcoat = 0.0f; bsdf->T = closure->T; @@ -659,10 +637,10 @@ ccl_device void osl_closure_microfacet_beckmann_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); - bsdf->alpha_x = closure->alpha_x; + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); + bsdf->alpha_x = bsdf->alpha_y = closure->alpha_x; - sd->flag |= bsdf_microfacet_beckmann_isotropic_setup(bsdf); + sd->flag |= bsdf_microfacet_beckmann_setup(bsdf); } ccl_device void osl_closure_microfacet_beckmann_aniso_setup( @@ -682,7 +660,7 @@ ccl_device void osl_closure_microfacet_beckmann_aniso_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = closure->alpha_y; bsdf->T = closure->T; @@ -707,7 +685,7 @@ ccl_device void osl_closure_microfacet_beckmann_refraction_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->ior = closure->ior; @@ -733,7 +711,7 @@ ccl_device void osl_closure_ashikhmin_velvet_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->sigma = closure->sigma; sd->flag |= bsdf_ashikhmin_velvet_setup(bsdf); @@ -756,7 +734,7 @@ ccl_device void osl_closure_ashikhmin_shirley_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->alpha_x; bsdf->alpha_y = closure->alpha_y; bsdf->T = closure->T; @@ -780,7 +758,7 @@ ccl_device void osl_closure_diffuse_toon_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->size = closure->size; bsdf->smooth = closure->smooth; @@ -803,7 +781,7 @@ ccl_device void osl_closure_glossy_toon_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->size = closure->size; bsdf->smooth = closure->smooth; @@ -829,7 +807,7 @@ ccl_device void osl_closure_principled_diffuse_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->roughness = closure->roughness; sd->flag |= bsdf_principled_diffuse_setup(bsdf); @@ -852,7 +830,7 @@ ccl_device void osl_closure_principled_sheen_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->avg_value = 0.0f; sd->flag |= bsdf_principled_sheen_setup(sd, bsdf); @@ -865,27 +843,18 @@ ccl_device void osl_closure_principled_clearcoat_setup( float3 weight, ccl_private const PrincipledClearcoatClosure *closure) { + weight *= 0.25f * closure->clearcoat; ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc( sd, sizeof(MicrofacetBsdf), rgb_to_spectrum(weight)); if (!bsdf) { return; } - MicrofacetExtra *extra = (MicrofacetExtra *)closure_alloc_extra(sd, sizeof(MicrofacetExtra)); - if (!extra) { - return; - } - - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->alpha_x = closure->clearcoat_roughness; bsdf->alpha_y = closure->clearcoat_roughness; bsdf->ior = 1.5f; - bsdf->extra = extra; - bsdf->extra->color = zero_spectrum(); - bsdf->extra->cspec0 = make_spectrum(0.04f); - bsdf->extra->clearcoat = closure->clearcoat; - bsdf->T = zero_float3(); sd->flag |= bsdf_microfacet_ggx_clearcoat_setup(bsdf, sd); @@ -948,7 +917,7 @@ ccl_device void osl_closure_diffuse_ramp_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->colors = (float3 *)closure_alloc_extra(sd, sizeof(float3) * 8); if (!bsdf->colors) { @@ -973,7 +942,7 @@ ccl_device void osl_closure_phong_ramp_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->exponent = closure->exponent; bsdf->colors = (float3 *)closure_alloc_extra(sd, sizeof(float3) * 8); @@ -1024,7 +993,7 @@ ccl_device void osl_closure_bssrdf_setup(KernelGlobals kg, /* create one closure per color channel */ bssrdf->albedo = closure->albedo; - bssrdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bssrdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bssrdf->roughness = closure->roughness; bssrdf->anisotropy = clamp(closure->anisotropy, 0.0f, 0.9f); @@ -1049,7 +1018,7 @@ ccl_device void osl_closure_hair_reflection_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->T = closure->T; bsdf->roughness1 = closure->roughness1; bsdf->roughness2 = closure->roughness2; @@ -1075,7 +1044,7 @@ ccl_device void osl_closure_hair_transmission_setup( return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->T = closure->T; bsdf->roughness1 = closure->roughness1; bsdf->roughness2 = closure->roughness2; @@ -1107,7 +1076,7 @@ ccl_device void osl_closure_principled_hair_setup(KernelGlobals kg, return; } - bsdf->N = ensure_valid_reflection(sd->Ng, sd->I, closure->N); + bsdf->N = ensure_valid_reflection(sd->Ng, sd->wi, closure->N); bsdf->sigma = closure->sigma; bsdf->v = closure->v; bsdf->s = closure->s; diff --git a/intern/cycles/kernel/osl/osl.h b/intern/cycles/kernel/osl/osl.h index cc5c81ad027..18288d202b5 100644 --- a/intern/cycles/kernel/osl/osl.h +++ b/intern/cycles/kernel/osl/osl.h @@ -25,13 +25,13 @@ ccl_device_inline void shaderdata_to_shaderglobals(KernelGlobals kg, ccl_private ShaderGlobals *globals) { const differential3 dP = differential_from_compact(sd->Ng, sd->dP); - const differential3 dI = differential_from_compact(sd->I, sd->dI); + const differential3 dI = differential_from_compact(sd->wi, sd->dI); /* copy from shader data to shader globals */ globals->P = sd->P; globals->dPdx = dP.dx; globals->dPdy = dP.dy; - globals->I = sd->I; + globals->I = sd->wi; globals->dIdx = dI.dx; globals->dIdy = dI.dy; globals->N = sd->N; @@ -161,7 +161,10 @@ ccl_device_inline void osl_eval_nodes(KernelGlobals kg, /* shadeindex = */ 0); # endif - if (globals.Ci) { + if constexpr (type == SHADER_TYPE_DISPLACEMENT) { + sd->P = globals.P; + } + else if (globals.Ci) { flatten_closure_tree(kg, sd, path_flag, globals.Ci); } } diff --git a/intern/cycles/kernel/osl/services.cpp b/intern/cycles/kernel/osl/services.cpp index 3fd098de4bb..92708df3162 100644 --- a/intern/cycles/kernel/osl/services.cpp +++ b/intern/cycles/kernel/osl/services.cpp @@ -20,6 +20,7 @@ #include "kernel/osl/globals.h" #include "kernel/osl/services.h" +#include "kernel/osl/types.h" #include "util/foreach.h" #include "util/log.h" @@ -119,6 +120,8 @@ ustring OSLRenderServices::u_u("u"); ustring OSLRenderServices::u_v("v"); ustring OSLRenderServices::u_empty; +ImageManager *OSLRenderServices::image_manager = nullptr; + OSLRenderServices::OSLRenderServices(OSL::TextureSystem *texture_system, int device_type) : OSL::RendererServices(texture_system), device_type_(device_type) { @@ -1154,7 +1157,7 @@ TextureSystem::TextureHandle *OSLRenderServices::get_texture_handle(ustring file /* For non-OIIO textures, just return a pointer to our own OSLTextureHandle. */ if (it != textures.end()) { if (it->second->type != OSLTextureHandle::OIIO) { - return (TextureSystem::TextureHandle *)it->second.get(); + return reinterpret_cast(it->second.get()); } } @@ -1173,16 +1176,53 @@ TextureSystem::TextureHandle *OSLRenderServices::get_texture_handle(ustring file /* Assign OIIO texture handle and return. */ it->second->oiio_handle = handle; - return (TextureSystem::TextureHandle *)it->second.get(); + return reinterpret_cast(it->second.get()); } else { - if (it != textures.end() && it->second->type == OSLTextureHandle::SVM && - it->second->svm_slots[0].w == -1) { - return reinterpret_cast( - static_cast(it->second->svm_slots[0].y + 1)); + /* Construct GPU texture handle for existing textures. */ + if (it != textures.end()) { + switch (it->second->type) { + case OSLTextureHandle::OIIO: + return NULL; + case OSLTextureHandle::SVM: + if (!it->second->handle.empty() && it->second->handle.get_manager() != image_manager) { + it.clear(); + break; + } + return reinterpret_cast(OSL_TEXTURE_HANDLE_TYPE_SVM | + it->second->svm_slots[0].y); + case OSLTextureHandle::IES: + if (!it->second->handle.empty() && it->second->handle.get_manager() != image_manager) { + it.clear(); + break; + } + return reinterpret_cast(OSL_TEXTURE_HANDLE_TYPE_IES | + it->second->svm_slots[0].y); + case OSLTextureHandle::AO: + return reinterpret_cast( + OSL_TEXTURE_HANDLE_TYPE_AO_OR_BEVEL | 1); + case OSLTextureHandle::BEVEL: + return reinterpret_cast( + OSL_TEXTURE_HANDLE_TYPE_AO_OR_BEVEL | 2); + } } - return NULL; + if (!image_manager) { + return NULL; + } + + /* Load new textures using SVM image manager. */ + ImageHandle handle = image_manager->add_image(filename.string(), ImageParams()); + if (handle.empty()) { + return NULL; + } + + if (!textures.insert(filename, new OSLTextureHandle(handle))) { + return NULL; + } + + return reinterpret_cast(OSL_TEXTURE_HANDLE_TYPE_SVM | + handle.svm_slot()); } } @@ -1720,8 +1760,8 @@ bool OSLRenderServices::getmessage(OSL::ShaderGlobals *sg, return set_attribute_float3(f, type, derivatives, val); } else if (name == u_I) { - const differential3 dI = differential_from_compact(sd->I, sd->dI); - float3 f[3] = {sd->I, dI.dx, dI.dy}; + const differential3 dI = differential_from_compact(sd->wi, sd->dI); + float3 f[3] = {sd->wi, dI.dx, dI.dy}; return set_attribute_float3(f, type, derivatives, val); } else if (name == u_u) { diff --git a/intern/cycles/kernel/osl/services.h b/intern/cycles/kernel/osl/services.h index 9d875ae8e94..747eb242d8c 100644 --- a/intern/cycles/kernel/osl/services.h +++ b/intern/cycles/kernel/osl/services.h @@ -16,6 +16,8 @@ #include #include +#include "scene/image.h" + #ifdef WITH_PTEX class PtexCache; #endif @@ -54,10 +56,20 @@ struct OSLTextureHandle : public OIIO::RefCnt { { } + OSLTextureHandle(const ImageHandle &handle) + : type(SVM), + svm_slots(handle.get_svm_slots()), + oiio_handle(nullptr), + processor(nullptr), + handle(handle) + { + } + Type type; vector svm_slots; OSL::TextureSystem::TextureHandle *oiio_handle; ColorSpaceProcessor *processor; + ImageHandle handle; }; typedef OIIO::intrusive_ptr OSLTextureHandleRef; @@ -324,6 +336,8 @@ class OSLRenderServices : public OSL::RendererServices { * shading system. */ OSLTextureHandleMap textures; + static ImageManager *image_manager; + private: int device_type_; }; diff --git a/intern/cycles/kernel/osl/services_gpu.h b/intern/cycles/kernel/osl/services_gpu.h index b9ffd959f1a..2fa4299d3f9 100644 --- a/intern/cycles/kernel/osl/services_gpu.h +++ b/intern/cycles/kernel/osl/services_gpu.h @@ -1443,6 +1443,8 @@ OSL_NOISE_IMPL(osl_snoise, snoise) /* Texturing */ +#include "kernel/svm/ies.h" + ccl_device_extern ccl_private OSLTextureOptions *osl_get_texture_options( ccl_private ShaderGlobals *sg) { @@ -1548,25 +1550,31 @@ ccl_device_extern bool osl_texture(ccl_private ShaderGlobals *sg, ccl_private float *dalphady, ccl_private void *errormessage) { - if (!texture_handle) { - return false; + const unsigned int type = OSL_TEXTURE_HANDLE_TYPE(texture_handle); + const unsigned int slot = OSL_TEXTURE_HANDLE_SLOT(texture_handle); + + switch (type) { + case OSL_TEXTURE_HANDLE_TYPE_SVM: { + const float4 rgba = kernel_tex_image_interp(nullptr, slot, s, 1.0f - t); + if (nchannels > 0) + result[0] = rgba.x; + if (nchannels > 1) + result[1] = rgba.y; + if (nchannels > 2) + result[2] = rgba.z; + if (alpha) + *alpha = rgba.w; + return true; + } + case OSL_TEXTURE_HANDLE_TYPE_IES: { + if (nchannels > 0) + result[0] = kernel_ies_interp(nullptr, slot, s, t); + return true; + } + default: { + return false; + } } - - /* Only SVM textures are supported. */ - int id = static_cast(reinterpret_cast(texture_handle) - 1); - - const float4 rgba = kernel_tex_image_interp(nullptr, id, s, 1.0f - t); - - if (nchannels > 0) - result[0] = rgba.x; - if (nchannels > 1) - result[1] = rgba.y; - if (nchannels > 2) - result[2] = rgba.z; - if (alpha) - *alpha = rgba.w; - - return true; } ccl_device_extern bool osl_texture3d(ccl_private ShaderGlobals *sg, @@ -1586,25 +1594,26 @@ ccl_device_extern bool osl_texture3d(ccl_private ShaderGlobals *sg, ccl_private float *dalphady, ccl_private void *errormessage) { - if (!texture_handle) { - return false; + const unsigned int type = OSL_TEXTURE_HANDLE_TYPE(texture_handle); + const unsigned int slot = OSL_TEXTURE_HANDLE_SLOT(texture_handle); + + switch (type) { + case OSL_TEXTURE_HANDLE_TYPE_SVM: { + const float4 rgba = kernel_tex_image_interp_3d(nullptr, slot, *P, INTERPOLATION_NONE); + if (nchannels > 0) + result[0] = rgba.x; + if (nchannels > 1) + result[1] = rgba.y; + if (nchannels > 2) + result[2] = rgba.z; + if (alpha) + *alpha = rgba.w; + return true; + } + default: { + return false; + } } - - /* Only SVM textures are supported. */ - int id = static_cast(reinterpret_cast(texture_handle) - 1); - - const float4 rgba = kernel_tex_image_interp_3d(nullptr, id, *P, INTERPOLATION_NONE); - - if (nchannels > 0) - result[0] = rgba.x; - if (nchannels > 1) - result[1] = rgba.y; - if (nchannels > 2) - result[2] = rgba.z; - if (alpha) - *alpha = rgba.w; - - return true; } ccl_device_extern bool osl_environment(ccl_private ShaderGlobals *sg, diff --git a/intern/cycles/kernel/osl/shaders/node_principled_bsdf.osl b/intern/cycles/kernel/osl/shaders/node_principled_bsdf.osl index 9f8e7a68b9b..2499f90bc03 100644 --- a/intern/cycles/kernel/osl/shaders/node_principled_bsdf.osl +++ b/intern/cycles/kernel/osl/shaders/node_principled_bsdf.osl @@ -111,8 +111,8 @@ shader node_principled_bsdf(string distribution = "Multiscatter GGX", float eta = backfacing() ? 1.0 / f : f; if (distribution == "GGX" || Roughness <= 5e-2) { - float cosNO = dot(Normal, I); - float Fr = fresnel_dielectric_cos(cosNO, eta); + float cosNI = dot(Normal, I); + float Fr = fresnel_dielectric_cos(cosNI, eta); float refl_roughness = Roughness; if (Roughness <= 1e-2) diff --git a/intern/cycles/kernel/osl/shaders/node_sky_texture.osl b/intern/cycles/kernel/osl/shaders/node_sky_texture.osl index 1373db04a31..763f73a35d7 100644 --- a/intern/cycles/kernel/osl/shaders/node_sky_texture.osl +++ b/intern/cycles/kernel/osl/shaders/node_sky_texture.osl @@ -135,8 +135,9 @@ color sky_radiance_nishita(vector dir, float nishita_data[10], string filename) float half_angular = angular_diameter / 2.0; float dir_elevation = M_PI_2 - direction[0]; - /* if ray inside sun disc render it, otherwise render sky */ - if (sun_dir_angle < half_angular && sun_disc == 1) { + /* if ray inside sun disc render it, otherwise render sky. + * alternatively, ignore the sun if we're evaluating the background texture. */ + if (sun_dir_angle < half_angular && sun_disc == 1 && raytype("importance_bake") != 1) { /* get 2 pixels data */ color pixel_bottom = color(nishita_data[0], nishita_data[1], nishita_data[2]); color pixel_top = color(nishita_data[3], nishita_data[4], nishita_data[5]); diff --git a/intern/cycles/kernel/osl/types.h b/intern/cycles/kernel/osl/types.h index 692c2349a30..67d0073f48d 100644 --- a/intern/cycles/kernel/osl/types.h +++ b/intern/cycles/kernel/osl/types.h @@ -96,4 +96,13 @@ struct OSLNoiseOptions { struct OSLTextureOptions { }; +#define OSL_TEXTURE_HANDLE_TYPE_IES ((uintptr_t)0x2 << 30) +#define OSL_TEXTURE_HANDLE_TYPE_SVM ((uintptr_t)0x1 << 30) +#define OSL_TEXTURE_HANDLE_TYPE_AO_OR_BEVEL ((uintptr_t)0x3 << 30) + +#define OSL_TEXTURE_HANDLE_TYPE(handle) \ + ((unsigned int)((uintptr_t)(handle) & ((uintptr_t)0x3 << 30))) +#define OSL_TEXTURE_HANDLE_SLOT(handle) \ + ((unsigned int)((uintptr_t)(handle) & ((uintptr_t)0x3FFFFFFF))) + CCL_NAMESPACE_END diff --git a/intern/cycles/kernel/sample/mapping.h b/intern/cycles/kernel/sample/mapping.h index fc0e512b803..1cd7bce11d2 100644 --- a/intern/cycles/kernel/sample/mapping.h +++ b/intern/cycles/kernel/sample/mapping.h @@ -33,19 +33,19 @@ ccl_device void make_orthonormals_tangent(const float3 N, /* sample direction with cosine weighted distributed in hemisphere */ ccl_device_inline void sample_cos_hemisphere( - const float3 N, float randu, float randv, ccl_private float3 *omega_in, ccl_private float *pdf) + const float3 N, float randu, float randv, ccl_private float3 *wo, ccl_private float *pdf) { to_unit_disk(&randu, &randv); float costheta = sqrtf(max(1.0f - randu * randu - randv * randv, 0.0f)); float3 T, B; make_orthonormals(N, &T, &B); - *omega_in = randu * T + randv * B + costheta * N; + *wo = randu * T + randv * B + costheta * N; *pdf = costheta * M_1_PI_F; } /* sample direction uniformly distributed in hemisphere */ ccl_device_inline void sample_uniform_hemisphere( - const float3 N, float randu, float randv, ccl_private float3 *omega_in, ccl_private float *pdf) + const float3 N, float randu, float randv, ccl_private float3 *wo, ccl_private float *pdf) { float z = randu; float r = sqrtf(max(0.0f, 1.0f - z * z)); @@ -55,7 +55,7 @@ ccl_device_inline void sample_uniform_hemisphere( float3 T, B; make_orthonormals(N, &T, &B); - *omega_in = x * T + y * B + z * N; + *wo = x * T + y * B + z * N; *pdf = 0.5f * M_1_PI_F; } @@ -64,20 +64,21 @@ ccl_device_inline void sample_uniform_cone(const float3 N, float angle, float randu, float randv, - ccl_private float3 *omega_in, + ccl_private float3 *wo, ccl_private float *pdf) { - float zMin = cosf(angle); - float z = zMin - zMin * randu + randu; - float r = safe_sqrtf(1.0f - sqr(z)); - float phi = M_2PI_F * randv; - float x = r * cosf(phi); - float y = r * sinf(phi); + const float cosThetaMin = cosf(angle); + const float cosTheta = mix(cosThetaMin, 1.0f, randu); + const float sinTheta = sin_from_cos(cosTheta); + const float phi = M_2PI_F * randv; + const float x = sinTheta * cosf(phi); + const float y = sinTheta * sinf(phi); + const float z = cosTheta; float3 T, B; make_orthonormals(N, &T, &B); - *omega_in = x * T + y * B + z * N; - *pdf = M_1_2PI_F / (1.0f - zMin); + *wo = x * T + y * B + z * N; + *pdf = M_1_2PI_F / (1.0f - cosThetaMin); } ccl_device_inline float pdf_uniform_cone(const float3 N, float3 D, float angle) diff --git a/intern/cycles/kernel/svm/brick.h b/intern/cycles/kernel/svm/brick.h index f8fa4a4a84a..e64fc636334 100644 --- a/intern/cycles/kernel/svm/brick.h +++ b/intern/cycles/kernel/svm/brick.h @@ -46,17 +46,8 @@ ccl_device_noinline_cpu float2 svm_brick(float3 p, float tint = saturatef((brick_noise((rownum << 16) + (bricknum & 0xFFFF)) + bias)); float min_dist = min(min(x, y), min(brick_width - x, row_height - y)); - float mortar; - if (min_dist >= mortar_size) { - mortar = 0.0f; - } - else if (mortar_smooth == 0.0f) { - mortar = 1.0f; - } - else { - min_dist = 1.0f - min_dist / mortar_size; - mortar = (min_dist < mortar_smooth) ? smoothstepf(min_dist / mortar_smooth) : 1.0f; - } + min_dist = 1.0f - min_dist / mortar_size; + float mortar = smoothstepf(min_dist / mortar_smooth); return make_float2(tint, mortar); } diff --git a/intern/cycles/kernel/svm/closure.h b/intern/cycles/kernel/svm/closure.h index d18f2cc0854..f52e13e37d8 100644 --- a/intern/cycles/kernel/svm/closure.h +++ b/intern/cycles/kernel/svm/closure.h @@ -102,7 +102,7 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, float3 N = stack_valid(data_node.x) ? stack_load_float3(stack, data_node.x) : sd->N; if (!(sd->type & PRIMITIVE_CURVE)) { - N = ensure_valid_reflection(sd->Ng, sd->I, N); + N = ensure_valid_reflection(sd->Ng, sd->wi, N); } float param1 = (stack_valid(param1_offset)) ? stack_load_float(stack, param1_offset) : @@ -162,8 +162,8 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, float ior = (sd->flag & SD_BACKFACING) ? 1.0f / eta : eta; // calculate fresnel for refraction - float cosNO = dot(N, sd->I); - float fresnel = fresnel_dielectric_cos(cosNO, ior); + float cosNI = dot(N, sd->wi); + float fresnel = fresnel_dielectric_cos(cosNI, ior); // calculate weights of the diffuse and specular part float diffuse_weight = (1.0f - saturatef(metallic)) * (1.0f - saturatef(transmission)); @@ -185,7 +185,7 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, stack_load_float3(stack, data_cn_ssr.x) : sd->N; if (!(sd->type & PRIMITIVE_CURVE)) { - clearcoat_normal = ensure_valid_reflection(sd->Ng, sd->I, clearcoat_normal); + clearcoat_normal = ensure_valid_reflection(sd->Ng, sd->wi, clearcoat_normal); } float3 subsurface_radius = stack_valid(data_cn_ssr.y) ? stack_load_float3(stack, data_cn_ssr.y) : @@ -333,7 +333,6 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, bsdf->extra->cspec0 = rgb_to_spectrum( (specular * 0.08f * tmp_col) * (1.0f - metallic) + base_color * metallic); bsdf->extra->color = rgb_to_spectrum(base_color); - bsdf->extra->clearcoat = 0.0f; /* setup bsdf */ if (distribution == CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID || @@ -383,7 +382,6 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, bsdf->extra->color = rgb_to_spectrum(base_color); bsdf->extra->cspec0 = rgb_to_spectrum(cspec0); - bsdf->extra->clearcoat = 0.0f; /* setup bsdf */ sd->flag |= bsdf_microfacet_ggx_fresnel_setup(bsdf, sd); @@ -440,7 +438,6 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, bsdf->extra->color = rgb_to_spectrum(base_color); bsdf->extra->cspec0 = rgb_to_spectrum(cspec0); - bsdf->extra->clearcoat = 0.0f; /* setup bsdf */ sd->flag |= bsdf_microfacet_multi_ggx_glass_fresnel_setup(bsdf, sd); @@ -455,30 +452,20 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, #ifdef __CAUSTICS_TRICKS__ if (kernel_data.integrator.caustics_reflective || (path_flag & PATH_RAY_DIFFUSE) == 0) { #endif - if (clearcoat > CLOSURE_WEIGHT_CUTOFF) { - ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc( - sd, sizeof(MicrofacetBsdf), weight); - ccl_private MicrofacetExtra *extra = - (bsdf != NULL) ? - (ccl_private MicrofacetExtra *)closure_alloc_extra(sd, sizeof(MicrofacetExtra)) : - NULL; + Spectrum clearcoat_weight = 0.25f * clearcoat * weight; + ccl_private MicrofacetBsdf *bsdf = (ccl_private MicrofacetBsdf *)bsdf_alloc( + sd, sizeof(MicrofacetBsdf), clearcoat_weight); - if (bsdf && extra) { - bsdf->N = clearcoat_normal; - bsdf->T = zero_float3(); - bsdf->ior = 1.5f; - bsdf->extra = extra; + if (bsdf) { + bsdf->N = clearcoat_normal; + bsdf->T = zero_float3(); + bsdf->ior = 1.5f; - bsdf->alpha_x = clearcoat_roughness * clearcoat_roughness; - bsdf->alpha_y = clearcoat_roughness * clearcoat_roughness; + bsdf->alpha_x = clearcoat_roughness * clearcoat_roughness; + bsdf->alpha_y = clearcoat_roughness * clearcoat_roughness; - bsdf->extra->color = zero_spectrum(); - bsdf->extra->cspec0 = make_spectrum(0.04f); - bsdf->extra->clearcoat = clearcoat; - - /* setup bsdf */ - sd->flag |= bsdf_microfacet_ggx_clearcoat_setup(bsdf, sd); - } + /* setup bsdf */ + sd->flag |= bsdf_microfacet_ggx_clearcoat_setup(bsdf, sd); } #ifdef __CAUSTICS_TRICKS__ } @@ -584,7 +571,6 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, if (bsdf->extra) { bsdf->extra->color = rgb_to_spectrum(stack_load_float3(stack, data_node.w)); bsdf->extra->cspec0 = zero_spectrum(); - bsdf->extra->clearcoat = 0.0f; sd->flag |= bsdf_microfacet_multi_ggx_setup(bsdf); } } @@ -652,8 +638,8 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, eta = (sd->flag & SD_BACKFACING) ? 1.0f / eta : eta; /* fresnel */ - float cosNO = dot(N, sd->I); - float fresnel = fresnel_dielectric_cos(cosNO, eta); + float cosNI = dot(N, sd->wi); + float fresnel = fresnel_dielectric_cos(cosNI, eta); float roughness = sqr(param1); /* reflection */ @@ -724,7 +710,6 @@ ccl_device_noinline int svm_node_closure_bsdf(KernelGlobals kg, kernel_assert(stack_valid(data_node.z)); bsdf->extra->color = rgb_to_spectrum(stack_load_float3(stack, data_node.z)); bsdf->extra->cspec0 = zero_spectrum(); - bsdf->extra->clearcoat = 0.0f; /* setup bsdf */ sd->flag |= bsdf_microfacet_multi_ggx_glass_setup(bsdf); diff --git a/intern/cycles/kernel/svm/displace.h b/intern/cycles/kernel/svm/displace.h index 230f8c73820..d2be8c4844b 100644 --- a/intern/cycles/kernel/svm/displace.h +++ b/intern/cycles/kernel/svm/displace.h @@ -71,7 +71,7 @@ ccl_device_noinline void svm_node_set_bump(KernelGlobals kg, object_normal_transform(kg, sd, &normal_out); } - normal_out = ensure_valid_reflection(sd->Ng, sd->I, normal_out); + normal_out = ensure_valid_reflection(sd->Ng, sd->wi, normal_out); stack_store_float3(stack, node.w, normal_out); } else diff --git a/intern/cycles/kernel/svm/fresnel.h b/intern/cycles/kernel/svm/fresnel.h index 4b68b70799b..e5970740798 100644 --- a/intern/cycles/kernel/svm/fresnel.h +++ b/intern/cycles/kernel/svm/fresnel.h @@ -22,7 +22,7 @@ ccl_device_noinline void svm_node_fresnel(ccl_private ShaderData *sd, eta = fmaxf(eta, 1e-5f); eta = (sd->flag & SD_BACKFACING) ? 1.0f / eta : eta; - float f = fresnel_dielectric_cos(dot(sd->I, normal_in), eta); + float f = fresnel_dielectric_cos(dot(sd->wi, normal_in), eta); stack_store_float(stack, out_offset, f); } @@ -50,10 +50,10 @@ ccl_device_noinline void svm_node_layer_weight(ccl_private ShaderData *sd, float eta = fmaxf(1.0f - blend, 1e-5f); eta = (sd->flag & SD_BACKFACING) ? eta : 1.0f / eta; - f = fresnel_dielectric_cos(dot(sd->I, normal_in), eta); + f = fresnel_dielectric_cos(dot(sd->wi, normal_in), eta); } else { - f = fabsf(dot(sd->I, normal_in)); + f = fabsf(dot(sd->wi, normal_in)); if (blend != 0.5f) { blend = clamp(blend, 0.0f, 1.0f - 1e-5f); diff --git a/intern/cycles/kernel/svm/geometry.h b/intern/cycles/kernel/svm/geometry.h index cbd87d84409..829b0ab8566 100644 --- a/intern/cycles/kernel/svm/geometry.h +++ b/intern/cycles/kernel/svm/geometry.h @@ -28,7 +28,7 @@ ccl_device_noinline void svm_node_geometry(KernelGlobals kg, break; #endif case NODE_GEOM_I: - data = sd->I; + data = sd->wi; break; case NODE_GEOM_Ng: data = sd->Ng; diff --git a/intern/cycles/kernel/svm/ies.h b/intern/cycles/kernel/svm/ies.h index 3648cb580d5..b40e04df62c 100644 --- a/intern/cycles/kernel/svm/ies.h +++ b/intern/cycles/kernel/svm/ies.h @@ -84,6 +84,7 @@ ccl_device_inline float kernel_ies_interp(KernelGlobals kg, int slot, float h_an return max(cubic_interp(a, b, c, d, h_frac), 0.0f); } +#ifdef __SVM__ ccl_device_noinline void svm_node_ies(KernelGlobals kg, ccl_private ShaderData *sd, ccl_private float *stack, @@ -105,5 +106,6 @@ ccl_device_noinline void svm_node_ies(KernelGlobals kg, stack_store_float(stack, fac_offset, fac); } } +#endif CCL_NAMESPACE_END diff --git a/intern/cycles/kernel/svm/sky.h b/intern/cycles/kernel/svm/sky.h index 1638e783a69..92b292d660d 100644 --- a/intern/cycles/kernel/svm/sky.h +++ b/intern/cycles/kernel/svm/sky.h @@ -118,6 +118,7 @@ ccl_device float3 geographical_to_direction(float lat, float lon) ccl_device float3 sky_radiance_nishita(KernelGlobals kg, float3 dir, + uint32_t path_flag, float3 pixel_bottom, float3 pixel_top, ccl_private float *nishita_data, @@ -140,8 +141,9 @@ ccl_device float3 sky_radiance_nishita(KernelGlobals kg, float half_angular = angular_diameter / 2.0f; float dir_elevation = M_PI_2_F - direction.x; - /* if ray inside sun disc render it, otherwise render sky */ - if (sun_disc && sun_dir_angle < half_angular) { + /* if ray inside sun disc render it, otherwise render sky. + * alternatively, ignore the sun if we're evaluating the background texture. */ + if (sun_disc && sun_dir_angle < half_angular && !(path_flag & PATH_RAY_IMPORTANCE_BAKE)) { /* get 2 pixels data */ float y; @@ -197,8 +199,12 @@ ccl_device float3 sky_radiance_nishita(KernelGlobals kg, return xyz_to_rgb_clamped(kg, xyz); } -ccl_device_noinline int svm_node_tex_sky( - KernelGlobals kg, ccl_private ShaderData *sd, ccl_private float *stack, uint4 node, int offset) +ccl_device_noinline int svm_node_tex_sky(KernelGlobals kg, + ccl_private ShaderData *sd, + uint32_t path_flag, + ccl_private float *stack, + uint4 node, + int offset) { /* Load data */ uint dir_offset = node.y; @@ -310,7 +316,8 @@ ccl_device_noinline int svm_node_tex_sky( uint texture_id = __float_as_uint(data.z); /* Compute Sky */ - f = sky_radiance_nishita(kg, dir, pixel_bottom, pixel_top, nishita_data, texture_id); + f = sky_radiance_nishita( + kg, dir, path_flag, pixel_bottom, pixel_top, nishita_data, texture_id); } stack_store_float3(stack, out_offset, f); diff --git a/intern/cycles/kernel/svm/svm.h b/intern/cycles/kernel/svm/svm.h index 3ca632c5f0b..96b2b82d8af 100644 --- a/intern/cycles/kernel/svm/svm.h +++ b/intern/cycles/kernel/svm/svm.h @@ -463,7 +463,7 @@ ccl_device void svm_eval_nodes(KernelGlobals kg, svm_node_tex_environment(kg, sd, stack, node); break; SVM_CASE(NODE_TEX_SKY) - offset = svm_node_tex_sky(kg, sd, stack, node, offset); + offset = svm_node_tex_sky(kg, sd, path_flag, stack, node, offset); break; SVM_CASE(NODE_TEX_GRADIENT) svm_node_tex_gradient(sd, stack, node); diff --git a/intern/cycles/kernel/svm/tex_coord.h b/intern/cycles/kernel/svm/tex_coord.h index 8154c542e6f..b294616603d 100644 --- a/intern/cycles/kernel/svm/tex_coord.h +++ b/intern/cycles/kernel/svm/tex_coord.h @@ -64,9 +64,9 @@ ccl_device_noinline int svm_node_tex_coord(KernelGlobals kg, } case NODE_TEXCO_REFLECTION: { if (sd->object != OBJECT_NONE) - data = 2.0f * dot(sd->N, sd->I) * sd->N - sd->I; + data = 2.0f * dot(sd->N, sd->wi) * sd->N - sd->wi; else - data = sd->I; + data = sd->wi; break; } case NODE_TEXCO_DUPLI_GENERATED: { @@ -146,9 +146,9 @@ ccl_device_noinline int svm_node_tex_coord_bump_dx(KernelGlobals kg, } case NODE_TEXCO_REFLECTION: { if (sd->object != OBJECT_NONE) - data = 2.0f * dot(sd->N, sd->I) * sd->N - sd->I; + data = 2.0f * dot(sd->N, sd->wi) * sd->N - sd->wi; else - data = sd->I; + data = sd->wi; break; } case NODE_TEXCO_DUPLI_GENERATED: { @@ -231,9 +231,9 @@ ccl_device_noinline int svm_node_tex_coord_bump_dy(KernelGlobals kg, } case NODE_TEXCO_REFLECTION: { if (sd->object != OBJECT_NONE) - data = 2.0f * dot(sd->N, sd->I) * sd->N - sd->I; + data = 2.0f * dot(sd->N, sd->wi) * sd->N - sd->wi; else - data = sd->I; + data = sd->wi; break; } case NODE_TEXCO_DUPLI_GENERATED: { diff --git a/intern/cycles/kernel/svm/types.h b/intern/cycles/kernel/svm/types.h index 7e956505c7f..12c5a17072b 100644 --- a/intern/cycles/kernel/svm/types.h +++ b/intern/cycles/kernel/svm/types.h @@ -489,8 +489,7 @@ typedef enum ClosureType { #define CLOSURE_IS_BSDF_MICROFACET_FRESNEL(type) \ (type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_FRESNEL_ID || \ type == CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_FRESNEL_ID || \ - type == CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID || \ - type == CLOSURE_BSDF_MICROFACET_GGX_CLEARCOAT_ID) + type == CLOSURE_BSDF_MICROFACET_GGX_FRESNEL_ID) #define CLOSURE_IS_BSDF_OR_BSSRDF(type) (type <= CLOSURE_BSSRDF_RANDOM_WALK_FIXED_RADIUS_ID) #define CLOSURE_IS_BSSRDF(type) \ (type >= CLOSURE_BSSRDF_BURLEY_ID && type <= CLOSURE_BSSRDF_RANDOM_WALK_FIXED_RADIUS_ID) diff --git a/intern/cycles/kernel/svm/wireframe.h b/intern/cycles/kernel/svm/wireframe.h index 91fadf4cfc4..cae850419e9 100644 --- a/intern/cycles/kernel/svm/wireframe.h +++ b/intern/cycles/kernel/svm/wireframe.h @@ -47,8 +47,8 @@ ccl_device_inline float wireframe(KernelGlobals kg, if (pixel_size) { // Project the derivatives of P to the viewing plane defined // by I so we have a measure of how big is a pixel at this point - float pixelwidth_x = len(dP.dx - dot(dP.dx, sd->I) * sd->I); - float pixelwidth_y = len(dP.dy - dot(dP.dy, sd->I) * sd->I); + float pixelwidth_x = len(dP.dx - dot(dP.dx, sd->wi) * sd->wi); + float pixelwidth_y = len(dP.dy - dot(dP.dy, sd->wi) * sd->wi); // Take the average of both axis' length pixelwidth = (pixelwidth_x + pixelwidth_y) * 0.5f; } diff --git a/intern/cycles/kernel/types.h b/intern/cycles/kernel/types.h index ff75e70ba6b..8637c717ddc 100644 --- a/intern/cycles/kernel/types.h +++ b/intern/cycles/kernel/types.h @@ -34,8 +34,6 @@ CCL_NAMESPACE_BEGIN #define VOLUME_BOUNDS_MAX 1024 -#define BECKMANN_TABLE_SIZE 256 - #define SHADER_NONE (~0) #define OBJECT_NONE (~0) #define PRIM_NONE (~0) @@ -76,7 +74,8 @@ CCL_NAMESPACE_BEGIN #define __VOLUME__ /* TODO: solve internal compiler errors and enable light tree on HIP. */ -#ifdef __KERNEL_HIP__ +/* TODO: solve internal compiler perf issue and enable light tree on Metal/AMD. */ +#if defined(__KERNEL_HIP__) || defined(__KERNEL_METAL_AMD__) # undef __LIGHT_TREE__ #endif @@ -208,23 +207,24 @@ enum PathRayFlag : uint32_t { PATH_RAY_SINGULAR = (1U << 5U), PATH_RAY_TRANSPARENT = (1U << 6U), PATH_RAY_VOLUME_SCATTER = (1U << 7U), + PATH_RAY_IMPORTANCE_BAKE = (1U << 8U), /* Shadow ray visibility. */ - PATH_RAY_SHADOW_OPAQUE = (1U << 8U), - PATH_RAY_SHADOW_TRANSPARENT = (1U << 9U), + PATH_RAY_SHADOW_OPAQUE = (1U << 9U), + PATH_RAY_SHADOW_TRANSPARENT = (1U << 10U), PATH_RAY_SHADOW = (PATH_RAY_SHADOW_OPAQUE | PATH_RAY_SHADOW_TRANSPARENT), /* Subset of flags used for ray visibility for intersection. * * NOTE: SHADOW_CATCHER macros below assume there are no more than * 16 visibility bits. */ - PATH_RAY_ALL_VISIBILITY = ((1U << 10U) - 1U), + PATH_RAY_ALL_VISIBILITY = ((1U << 11U) - 1U), /* Special flag to tag unaligned BVH nodes. * Only set and used in BVH nodes to distinguish how to interpret bounding box information stored * in the node (either it should be intersected as AABB or as OBBU). * So this can overlap with path flags. */ - PATH_RAY_NODE_UNALIGNED = (1U << 10U), + PATH_RAY_NODE_UNALIGNED = (1U << 11U), /* -------------------------------------------------------------------- * Path flags. @@ -232,60 +232,59 @@ enum PathRayFlag : uint32_t { /* Surface had transmission component at previous bounce. Used for light tree * traversal and culling to be consistent with MIS PDF at the next bounce. */ - PATH_RAY_MIS_HAD_TRANSMISSION = (1U << 10U), + PATH_RAY_MIS_HAD_TRANSMISSION = (1U << 11U), /* Don't apply multiple importance sampling weights to emission from * lamp or surface hits, because they were not direct light sampled. */ - PATH_RAY_MIS_SKIP = (1U << 11U), + PATH_RAY_MIS_SKIP = (1U << 12U), /* Diffuse bounce earlier in the path, skip SSS to improve performance * and avoid branching twice with disk sampling SSS. */ - PATH_RAY_DIFFUSE_ANCESTOR = (1U << 12U), + PATH_RAY_DIFFUSE_ANCESTOR = (1U << 13U), /* Single pass has been written. */ - PATH_RAY_SINGLE_PASS_DONE = (1U << 13U), + PATH_RAY_SINGLE_PASS_DONE = (1U << 14U), /* Zero background alpha, for camera or transparent glass rays. */ - PATH_RAY_TRANSPARENT_BACKGROUND = (1U << 14U), + PATH_RAY_TRANSPARENT_BACKGROUND = (1U << 15U), /* Terminate ray immediately at next bounce. */ - PATH_RAY_TERMINATE_ON_NEXT_SURFACE = (1U << 15U), - PATH_RAY_TERMINATE_IN_NEXT_VOLUME = (1U << 16U), + PATH_RAY_TERMINATE_ON_NEXT_SURFACE = (1U << 16U), + PATH_RAY_TERMINATE_IN_NEXT_VOLUME = (1U << 17U), /* Ray is to be terminated, but continue with transparent bounces and * emission as long as we encounter them. This is required to make the * MIS between direct and indirect light rays match, as shadow rays go * through transparent surfaces to reach emission too. */ - PATH_RAY_TERMINATE_AFTER_TRANSPARENT = (1U << 17U), + PATH_RAY_TERMINATE_AFTER_TRANSPARENT = (1U << 18U), /* Terminate ray immediately after volume shading. */ - PATH_RAY_TERMINATE_AFTER_VOLUME = (1U << 18U), + PATH_RAY_TERMINATE_AFTER_VOLUME = (1U << 19U), /* Ray is to be terminated. */ PATH_RAY_TERMINATE = (PATH_RAY_TERMINATE_ON_NEXT_SURFACE | PATH_RAY_TERMINATE_IN_NEXT_VOLUME | PATH_RAY_TERMINATE_AFTER_TRANSPARENT | PATH_RAY_TERMINATE_AFTER_VOLUME), /* Path and shader is being evaluated for direct lighting emission. */ - PATH_RAY_EMISSION = (1U << 19U), + PATH_RAY_EMISSION = (1U << 20U), /* Perform subsurface scattering. */ - PATH_RAY_SUBSURFACE_RANDOM_WALK = (1U << 20U), - PATH_RAY_SUBSURFACE_DISK = (1U << 21U), - PATH_RAY_SUBSURFACE_USE_FRESNEL = (1U << 22U), - PATH_RAY_SUBSURFACE_BACKFACING = (1U << 23U), + PATH_RAY_SUBSURFACE_RANDOM_WALK = (1U << 21U), + PATH_RAY_SUBSURFACE_DISK = (1U << 22U), + PATH_RAY_SUBSURFACE_USE_FRESNEL = (1U << 23U), + PATH_RAY_SUBSURFACE_BACKFACING = (1U << 24U), PATH_RAY_SUBSURFACE = (PATH_RAY_SUBSURFACE_RANDOM_WALK | PATH_RAY_SUBSURFACE_DISK | PATH_RAY_SUBSURFACE_USE_FRESNEL | PATH_RAY_SUBSURFACE_BACKFACING), /* Contribute to denoising features. */ - PATH_RAY_DENOISING_FEATURES = (1U << 24U), + PATH_RAY_DENOISING_FEATURES = (1U << 25U), /* Render pass categories. */ - PATH_RAY_SURFACE_PASS = (1U << 25U), - PATH_RAY_VOLUME_PASS = (1U << 26U), + PATH_RAY_SURFACE_PASS = (1U << 26U), + PATH_RAY_VOLUME_PASS = (1U << 27U), PATH_RAY_ANY_PASS = (PATH_RAY_SURFACE_PASS | PATH_RAY_VOLUME_PASS), - /* Shadow ray is for a light or surface, or AO. */ - PATH_RAY_SHADOW_FOR_LIGHT = (1U << 27U), + /* Shadow ray is for AO. */ PATH_RAY_SHADOW_FOR_AO = (1U << 28U), /* A shadow catcher object was hit and the path was split into two. */ @@ -888,7 +887,7 @@ typedef struct ccl_align(16) ShaderData /* true geometric normal */ float3 Ng; /* view/incoming direction */ - float3 I; + float3 wi; /* shader id */ int shader; /* booleans describing shader, see ShaderDataFlag */ @@ -920,7 +919,7 @@ typedef struct ccl_align(16) ShaderData #ifdef __RAY_DIFFERENTIALS__ /* Radius of differential of P. */ float dP; - /* Radius of differential of I. */ + /* Radius of differential of wi. */ float dI; /* differential of u, v */ differential du; @@ -1187,9 +1186,8 @@ typedef enum KernelBVHLayout { #include "kernel/data_template.h" typedef struct KernelTables { - int beckmann_offset; int filter_table_offset; - int pad1, pad2; + int pad1, pad2, pad3; } KernelTables; static_assert_align(KernelTables, 16); @@ -1293,12 +1291,14 @@ typedef struct KernelCurveSegment { static_assert_align(KernelCurveSegment, 8); typedef struct KernelSpotLight { + packed_float3 axis_u; float radius; + packed_float3 axis_v; float invarea; - float cos_half_spot_angle; - float spot_smooth; packed_float3 dir; - float pad; + float cos_half_spot_angle; + packed_float3 len; + float spot_smooth; } KernelSpotLight; /* PointLight is SpotLight with only radius and invarea being used. */ diff --git a/intern/cycles/scene/geometry.cpp b/intern/cycles/scene/geometry.cpp index 8e831187477..4c5013b5a9f 100644 --- a/intern/cycles/scene/geometry.cpp +++ b/intern/cycles/scene/geometry.cpp @@ -23,7 +23,10 @@ #include "subd/patch_table.h" #include "subd/split.h" -#include "kernel/osl/globals.h" +#ifdef WITH_OSL +# include "kernel/osl/globals.h" +# include "kernel/osl/services.h" +#endif #include "util/foreach.h" #include "util/log.h" @@ -306,6 +309,11 @@ void GeometryManager::update_osl_globals(Device *device, Scene *scene) { #ifdef WITH_OSL OSLGlobals *og = (OSLGlobals *)device->get_cpu_osl_memory(); + if (og == nullptr) { + /* Can happen when rendering with multiple GPUs, but no CPU (in which case the name maps filled + * below are not used anyway) */ + return; + } og->object_name_map.clear(); og->object_names.clear(); @@ -1666,6 +1674,7 @@ void GeometryManager::device_update_displacement_images(Device *device, TaskPool pool; ImageManager *image_manager = scene->image_manager; set bump_images; + bool has_osl_node = false; foreach (Geometry *geom, scene->geometry) { if (geom->is_modified()) { /* Geometry-level check for hair shadow transparency. @@ -1685,6 +1694,9 @@ void GeometryManager::device_update_displacement_images(Device *device, continue; } foreach (ShaderNode *node, shader->graph->nodes) { + if (node->special_type == SHADER_SPECIAL_TYPE_OSL) { + has_osl_node = true; + } if (node->special_type != SHADER_SPECIAL_TYPE_IMAGE_SLOT) { continue; } @@ -1700,6 +1712,28 @@ void GeometryManager::device_update_displacement_images(Device *device, } } } + +#ifdef WITH_OSL + /* If any OSL node is used for displacement, it may reference a texture. But it's + * unknown which ones, so have to load them all. */ + if (has_osl_node) { + set services_shared; + device->foreach_device([&services_shared](Device *sub_device) { + OSLGlobals *og = (OSLGlobals *)sub_device->get_cpu_osl_memory(); + services_shared.insert(og->services); + }); + + for (OSLRenderServices *services : services_shared) { + for (auto it = services->textures.begin(); it != services->textures.end(); ++it) { + if (it->second->handle.get_manager() == image_manager) { + const int slot = it->second->handle.svm_slot(); + bump_images.insert(slot); + } + } + } + } +#endif + foreach (int slot, bump_images) { pool.push(function_bind( &ImageManager::device_update_slot, image_manager, device, scene, slot, &progress)); diff --git a/intern/cycles/scene/image.cpp b/intern/cycles/scene/image.cpp index a5c794bc762..925583f88b5 100644 --- a/intern/cycles/scene/image.cpp +++ b/intern/cycles/scene/image.cpp @@ -222,6 +222,11 @@ VDBImageLoader *ImageHandle::vdb_loader(const int tile_index) const return NULL; } +ImageManager *ImageHandle::get_manager() const +{ + return manager; +} + bool ImageHandle::operator==(const ImageHandle &other) const { return manager == other.manager && tile_slots == other.tile_slots; diff --git a/intern/cycles/scene/image.h b/intern/cycles/scene/image.h index da47d8144bc..36bfe17a69d 100644 --- a/intern/cycles/scene/image.h +++ b/intern/cycles/scene/image.h @@ -153,6 +153,8 @@ class ImageHandle { VDBImageLoader *vdb_loader(const int tile_index = 0) const; + ImageManager *get_manager() const; + protected: vector tile_slots; ImageManager *manager; diff --git a/intern/cycles/scene/light.cpp b/intern/cycles/scene/light.cpp index 9070c444f63..3c5698b4218 100644 --- a/intern/cycles/scene/light.cpp +++ b/intern/cycles/scene/light.cpp @@ -721,6 +721,7 @@ void LightManager::device_update_background(Device *device, int2 environment_res = make_int2(0, 0); Shader *shader = scene->background->get_shader(scene); int num_suns = 0; + float sun_average_radiance = 0.0f; foreach (ShaderNode *node, shader->graph->nodes) { if (node->type == EnvironmentTextureNode::get_node_type()) { EnvironmentTextureNode *env = (EnvironmentTextureNode *)node; @@ -762,6 +763,7 @@ void LightManager::device_update_background(Device *device, /* empirical value */ kbackground->sun_weight = 4.0f; + sun_average_radiance = sky->get_sun_average_radiance(); environment_res.x = max(environment_res.x, 512); environment_res.y = max(environment_res.y, 256); num_suns++; @@ -830,7 +832,18 @@ void LightManager::device_update_background(Device *device, float cdf_total = marg_cdf[res.y - 1].y + marg_cdf[res.y - 1].x / res.y; marg_cdf[res.y].x = cdf_total; - background_light->set_average_radiance(cdf_total * M_PI_2_F); + float map_average_radiance = cdf_total * M_PI_2_F; + if (sun_average_radiance > 0.0f) { + /* The weighting here is just a heuristic that was empirically determined. + * The sun's average radiance is much higher than the map's average radiance, + * but we don't want to weight the background light too much because + * visibility is not accounted for anyway. */ + background_light->set_average_radiance(0.8f * map_average_radiance + + 0.2f * sun_average_radiance); + } + else { + background_light->set_average_radiance(map_average_radiance); + } if (cdf_total > 0.0f) for (int i = 1; i < res.y; i++) @@ -1063,23 +1076,31 @@ void LightManager::device_update_lights(Device *device, DeviceScene *dscene, Sce else if (light->light_type == LIGHT_SPOT) { shader_id &= ~SHADER_AREA_LIGHT; + float3 len; + float3 axis_u = normalize_len(light->axisu, &len.x); + float3 axis_v = normalize_len(light->axisv, &len.y); + float3 dir = normalize_len(light->dir, &len.z); + if (len.z == 0.0f) { + dir = zero_float3(); + } + float radius = light->size; float invarea = (radius > 0.0f) ? 1.0f / (M_PI_F * radius * radius) : 1.0f; float cos_half_spot_angle = cosf(light->spot_angle * 0.5f); float spot_smooth = (1.0f - cos_half_spot_angle) * light->spot_smooth; - float3 dir = light->dir; - - dir = safe_normalize(dir); if (light->use_mis && radius > 0.0f) shader_id |= SHADER_USE_MIS; klights[light_index].co = co; + klights[light_index].spot.axis_u = axis_u; klights[light_index].spot.radius = radius; + klights[light_index].spot.axis_v = axis_v; klights[light_index].spot.invarea = invarea; - klights[light_index].spot.cos_half_spot_angle = cos_half_spot_angle; - klights[light_index].spot.spot_smooth = spot_smooth; klights[light_index].spot.dir = dir; + klights[light_index].spot.cos_half_spot_angle = cos_half_spot_angle; + klights[light_index].spot.len = len; + klights[light_index].spot.spot_smooth = spot_smooth; } klights[light_index].shader_id = shader_id; diff --git a/intern/cycles/scene/light_tree.cpp b/intern/cycles/scene/light_tree.cpp index 4fa4755479b..1261f09cda0 100644 --- a/intern/cycles/scene/light_tree.cpp +++ b/intern/cycles/scene/light_tree.cpp @@ -156,7 +156,13 @@ LightTreePrimitive::LightTreePrimitive(Scene *scene, int prim_id, int object_id) } else if (type == LIGHT_SPOT) { bcone.theta_o = 0; - bcone.theta_e = lamp->get_spot_angle() * 0.5f; + + const float unscaled_theta_e = lamp->get_spot_angle() * 0.5f; + const float len_u = len(lamp->get_axisu()); + const float len_v = len(lamp->get_axisv()); + const float len_w = len(lamp->get_dir()); + + bcone.theta_e = fast_atanf(fast_tanf(unscaled_theta_e) * fmaxf(len_u, len_v) / len_w); /* Point and spot lights can emit light from any point within its radius. */ const float3 radius = make_float3(size); diff --git a/intern/cycles/scene/osl.cpp b/intern/cycles/scene/osl.cpp index 9afd6577b10..53e993b8135 100644 --- a/intern/cycles/scene/osl.cpp +++ b/intern/cycles/scene/osl.cpp @@ -184,9 +184,19 @@ void OSLShaderManager::device_update_specific(Device *device, * is being freed after the Session is freed. */ thread_scoped_lock lock(ss_shared_mutex); + + /* Set current image manager during the lock, so that there is no conflict with other shader + * manager instances. + * + * It is used in "OSLRenderServices::get_texture_handle" called during optimization below to + * load images for the GPU. */ + OSLRenderServices::image_manager = scene->image_manager; + for (const auto &[device_type, ss] : ss_shared) { ss->optimize_all_groups(); } + + OSLRenderServices::image_manager = nullptr; } /* load kernels */ @@ -213,6 +223,22 @@ void OSLShaderManager::device_free(Device *device, DeviceScene *dscene, Scene *s og->bump_state.clear(); og->background_state.reset(); }); + + /* Remove any textures specific to an image manager from shared render services textures, since + * the image manager may get destroyed next. */ + for (const auto &[device_type, ss] : ss_shared) { + OSLRenderServices *services = static_cast(ss->renderer()); + + for (auto it = services->textures.begin(); it != services->textures.end(); ++it) { + if (it->second->handle.get_manager() == scene->image_manager) { + /* Don't lock again, since the iterator already did so. */ + services->textures.erase(it->first, false); + it.clear(); + /* Iterator was invalidated, start from the beginning again. */ + it = services->textures.begin(); + } + } + } } void OSLShaderManager::texture_system_init() @@ -279,14 +305,15 @@ void OSLShaderManager::shading_system_init() /* our own ray types */ static const char *raytypes[] = { - "camera", /* PATH_RAY_CAMERA */ - "reflection", /* PATH_RAY_REFLECT */ - "refraction", /* PATH_RAY_TRANSMIT */ - "diffuse", /* PATH_RAY_DIFFUSE */ - "glossy", /* PATH_RAY_GLOSSY */ - "singular", /* PATH_RAY_SINGULAR */ - "transparent", /* PATH_RAY_TRANSPARENT */ - "volume_scatter", /* PATH_RAY_VOLUME_SCATTER */ + "camera", /* PATH_RAY_CAMERA */ + "reflection", /* PATH_RAY_REFLECT */ + "refraction", /* PATH_RAY_TRANSMIT */ + "diffuse", /* PATH_RAY_DIFFUSE */ + "glossy", /* PATH_RAY_GLOSSY */ + "singular", /* PATH_RAY_SINGULAR */ + "transparent", /* PATH_RAY_TRANSPARENT */ + "volume_scatter", /* PATH_RAY_VOLUME_SCATTER */ + "importance_bake", /* PATH_RAY_IMPORTANCE_BAKE */ "shadow", /* PATH_RAY_SHADOW_OPAQUE */ "shadow", /* PATH_RAY_SHADOW_TRANSPARENT */ @@ -315,7 +342,6 @@ void OSLShaderManager::shading_system_init() "__unused__", "__unused__", "__unused__", - "__unused__", }; const int nraytypes = sizeof(raytypes) / sizeof(raytypes[0]); @@ -1215,6 +1241,7 @@ void OSLCompiler::compile(OSLGlobals *og, Shader *shader) shader->has_surface = false; shader->has_surface_transparent = false; + shader->has_surface_raytrace = false; shader->has_surface_bssrdf = false; shader->has_bump = has_bump; shader->has_bssrdf_bump = has_bump; diff --git a/intern/cycles/scene/shader.cpp b/intern/cycles/scene/shader.cpp index e03b77917ef..f0faa91b4be 100644 --- a/intern/cycles/scene/shader.cpp +++ b/intern/cycles/scene/shader.cpp @@ -32,114 +32,6 @@ namespace OCIO = OCIO_NAMESPACE; CCL_NAMESPACE_BEGIN thread_mutex ShaderManager::lookup_table_mutex; -vector ShaderManager::beckmann_table; -bool ShaderManager::beckmann_table_ready = false; - -/* Beckmann sampling precomputed table, see bsdf_microfacet.h */ - -/* 2D slope distribution (alpha = 1.0) */ -static float beckmann_table_P22(const float slope_x, const float slope_y) -{ - return expf(-(slope_x * slope_x + slope_y * slope_y)); -} - -/* maximal slope amplitude (range that contains 99.99% of the distribution) */ -static float beckmann_table_slope_max() -{ - return 6.0; -} - -/* MSVC 2015 needs this ugly hack to prevent a codegen bug on x86 - * see T50176 for details - */ -#if defined(_MSC_VER) && (_MSC_VER == 1900) -# define MSVC_VOLATILE volatile -#else -# define MSVC_VOLATILE -#endif - -/* Paper used: Importance Sampling Microfacet-Based BSDFs with the - * Distribution of Visible Normals. Supplemental Material 2/2. - * - * http://hal.inria.fr/docs/01/00/66/20/ANNEX/supplemental2.pdf - */ -static void beckmann_table_rows(float *table, int row_from, int row_to) -{ - /* allocate temporary data */ - const int DATA_TMP_SIZE = 512; - vector slope_x(DATA_TMP_SIZE); - vector CDF_P22_omega_i(DATA_TMP_SIZE); - - /* loop over incident directions */ - for (int index_theta = row_from; index_theta < row_to; index_theta++) { - /* incident vector */ - const float cos_theta = index_theta / (BECKMANN_TABLE_SIZE - 1.0f); - const float sin_theta = safe_sqrtf(1.0f - cos_theta * cos_theta); - - /* for a given incident vector - * integrate P22_{omega_i}(x_slope, 1, 1), Eq. (10) */ - slope_x[0] = (double)-beckmann_table_slope_max(); - CDF_P22_omega_i[0] = 0; - - for (MSVC_VOLATILE int index_slope_x = 1; index_slope_x < DATA_TMP_SIZE; ++index_slope_x) { - /* slope_x */ - slope_x[index_slope_x] = (double)(-beckmann_table_slope_max() + - 2.0f * beckmann_table_slope_max() * index_slope_x / - (DATA_TMP_SIZE - 1.0f)); - - /* dot product with incident vector */ - float dot_product = fmaxf(0.0f, -(float)slope_x[index_slope_x] * sin_theta + cos_theta); - /* marginalize P22_{omega_i}(x_slope, 1, 1), Eq. (10) */ - float P22_omega_i = 0.0f; - - for (int j = 0; j < 100; ++j) { - float slope_y = -beckmann_table_slope_max() + - 2.0f * beckmann_table_slope_max() * j * (1.0f / 99.0f); - P22_omega_i += dot_product * beckmann_table_P22((float)slope_x[index_slope_x], slope_y); - } - - /* CDF of P22_{omega_i}(x_slope, 1, 1), Eq. (10) */ - CDF_P22_omega_i[index_slope_x] = CDF_P22_omega_i[index_slope_x - 1] + (double)P22_omega_i; - } - - /* renormalize CDF_P22_omega_i */ - for (int index_slope_x = 1; index_slope_x < DATA_TMP_SIZE; ++index_slope_x) - CDF_P22_omega_i[index_slope_x] /= CDF_P22_omega_i[DATA_TMP_SIZE - 1]; - - /* loop over random number U1 */ - int index_slope_x = 0; - - for (int index_U = 0; index_U < BECKMANN_TABLE_SIZE; ++index_U) { - const double U = 0.0000001 + 0.9999998 * index_U / (double)(BECKMANN_TABLE_SIZE - 1); - - /* inverse CDF_P22_omega_i, solve Eq.(11) */ - while (CDF_P22_omega_i[index_slope_x] <= U) - ++index_slope_x; - - const double interp = (CDF_P22_omega_i[index_slope_x] - U) / - (CDF_P22_omega_i[index_slope_x] - CDF_P22_omega_i[index_slope_x - 1]); - - /* store value */ - table[index_U + index_theta * BECKMANN_TABLE_SIZE] = - (float)(interp * slope_x[index_slope_x - 1] + (1.0 - interp) * slope_x[index_slope_x]); - } - } -} - -#undef MSVC_VOLATILE - -static void beckmann_table_build(vector &table) -{ - table.resize(BECKMANN_TABLE_SIZE * BECKMANN_TABLE_SIZE); - - /* multithreaded build */ - TaskPool pool; - - for (int i = 0; i < BECKMANN_TABLE_SIZE; i += 8) - pool.push(function_bind(&beckmann_table_rows, &table[0], i, i + 8)); - - pool.wait_work(); -} /* Shader */ @@ -491,7 +383,6 @@ bool Shader::need_update_geometry() const ShaderManager::ShaderManager() { update_flags = UPDATE_ALL; - beckmann_table_offset = TABLE_OFFSET_INVALID; init_xyz_transforms(); } @@ -663,22 +554,6 @@ void ShaderManager::device_update_common(Device * /*device*/, dscene->shaders.copy_to_device(); - /* lookup tables */ - KernelTables *ktables = &dscene->data.tables; - - /* beckmann lookup table */ - if (beckmann_table_offset == TABLE_OFFSET_INVALID) { - if (!beckmann_table_ready) { - thread_scoped_lock lock(lookup_table_mutex); - if (!beckmann_table_ready) { - beckmann_table_build(beckmann_table); - beckmann_table_ready = true; - } - } - beckmann_table_offset = scene->lookup_tables->add_table(dscene, beckmann_table); - } - ktables->beckmann_offset = (int)beckmann_table_offset; - /* integrator */ KernelIntegrator *kintegrator = &dscene->data.integrator; kintegrator->use_volumes = has_volumes; @@ -700,8 +575,6 @@ void ShaderManager::device_update_common(Device * /*device*/, void ShaderManager::device_free_common(Device *, DeviceScene *dscene, Scene *scene) { - scene->lookup_tables->remove_table(&beckmann_table_offset); - dscene->shaders.free(); } @@ -844,7 +717,6 @@ uint ShaderManager::get_kernel_features(Scene *scene) void ShaderManager::free_memory() { - beckmann_table.free_memory(); #ifdef WITH_OSL OSLShaderManager::free_memory(); diff --git a/intern/cycles/scene/shader.h b/intern/cycles/scene/shader.h index 8f59eefae05..fab09fcd9d3 100644 --- a/intern/cycles/scene/shader.h +++ b/intern/cycles/scene/shader.h @@ -232,10 +232,6 @@ class ShaderManager { AttributeIDMap unique_attribute_id; static thread_mutex lookup_table_mutex; - static vector beckmann_table; - static bool beckmann_table_ready; - - size_t beckmann_table_offset; uint get_graph_kernel_features(ShaderGraph *graph); diff --git a/intern/cycles/scene/shader_nodes.cpp b/intern/cycles/scene/shader_nodes.cpp index 8cd64cd189e..1b7d66eeff1 100644 --- a/intern/cycles/scene/shader_nodes.cpp +++ b/intern/cycles/scene/shader_nodes.cpp @@ -779,6 +779,68 @@ static void sky_texture_precompute_nishita(SunSky *sunsky, sunsky->nishita_data[9] = sun_intensity; } +float SkyTextureNode::get_sun_average_radiance() +{ + float clamped_altitude = clamp(altitude, 1.0f, 59999.0f); + float angular_diameter = get_sun_size(); + + float pix_bottom[3]; + float pix_top[3]; + SKY_nishita_skymodel_precompute_sun(sun_elevation, + angular_diameter, + clamped_altitude, + air_density, + dust_density, + pix_bottom, + pix_top); + + /* Approximate the direction's elevation as the sun's elevation. */ + float dir_elevation = sun_elevation; + float half_angular = angular_diameter / 2.0f; + float3 pixel_bottom = make_float3(pix_bottom[0], pix_bottom[1], pix_bottom[2]); + float3 pixel_top = make_float3(pix_top[0], pix_top[1], pix_top[2]); + + /* Same code as in the sun evaluation shader. */ + float3 xyz = make_float3(0.0f, 0.0f, 0.0f); + float y = 0.0f; + if (sun_elevation - half_angular > 0.0f) { + if (sun_elevation + half_angular > 0.0f) { + y = ((dir_elevation - sun_elevation) / angular_diameter) + 0.5f; + xyz = interp(pixel_bottom, pixel_top, y) * sun_intensity; + } + } + else { + if (sun_elevation + half_angular > 0.0f) { + y = dir_elevation / (sun_elevation + half_angular); + xyz = interp(pixel_bottom, pixel_top, y) * sun_intensity; + } + } + + /* We first approximate the sun's contribution by + * multiplying the evaluated point by the square of the angular diameter. + * Then we scale the approximation using a piecewise function (determined empirically). */ + float sun_contribution = average(xyz) * sqr(angular_diameter); + + float first_point = 0.8f / 180.0f * M_PI_F; + float second_point = 1.0f / 180.0f * M_PI_F; + float third_point = M_PI_2_F; + if (angular_diameter < first_point) { + sun_contribution *= 1.0f; + } + else if (angular_diameter < second_point) { + float diff = angular_diameter - first_point; + float slope = (0.8f - 1.0f) / (second_point - first_point); + sun_contribution *= 1.0f + slope * diff; + } + else { + float diff = angular_diameter - 1.0f / 180.0f * M_PI_F; + float slope = (0.45f - 0.8f) / (third_point - second_point); + sun_contribution *= 0.8f + slope * diff; + } + + return sun_contribution; +} + NODE_DEFINE(SkyTextureNode) { NodeType *type = NodeType::add("sky_texture", create, NodeType::SHADER); diff --git a/intern/cycles/scene/shader_nodes.h b/intern/cycles/scene/shader_nodes.h index fa9210ff5cc..35c5a7a61ac 100644 --- a/intern/cycles/scene/shader_nodes.h +++ b/intern/cycles/scene/shader_nodes.h @@ -174,6 +174,8 @@ class SkyTextureNode : public TextureNode { /* Clamping for numerical precision. */ return fmaxf(sun_size, 0.0005f); } + + float get_sun_average_radiance(); }; class OutputNode : public ShaderNode { diff --git a/intern/cycles/session/CMakeLists.txt b/intern/cycles/session/CMakeLists.txt index 4f3a0a99ee1..9f4b4e3cc36 100644 --- a/intern/cycles/session/CMakeLists.txt +++ b/intern/cycles/session/CMakeLists.txt @@ -5,6 +5,9 @@ set(INC .. ) +set(INC_SYS +) + set(SRC buffers.cpp denoising.cpp diff --git a/intern/cycles/test/CMakeLists.txt b/intern/cycles/test/CMakeLists.txt index 34e5a4770ea..cdf8f7db0bb 100644 --- a/intern/cycles/test/CMakeLists.txt +++ b/intern/cycles/test/CMakeLists.txt @@ -45,19 +45,6 @@ set(SRC # Disable AVX tests on macOS. Rosetta has problems running them, and other # platforms should be enough to verify AVX operations are implemented correctly. if(NOT APPLE) - if(CXX_HAS_SSE) - list(APPEND SRC - util_float8_sse2_test.cpp - ) - set_source_files_properties(util_float8_avx_test.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE2_KERNEL_FLAGS}") - endif() - - if(CXX_HAS_AVX) - list(APPEND SRC - util_float8_avx_test.cpp - ) - set_source_files_properties(util_float8_avx_test.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_AVX_KERNEL_FLAGS}") - endif() if(CXX_HAS_AVX2) list(APPEND SRC util_float8_avx2_test.cpp diff --git a/intern/cycles/util/debug.cpp b/intern/cycles/util/debug.cpp index 8210e21f951..e7cf7b545b8 100644 --- a/intern/cycles/util/debug.cpp +++ b/intern/cycles/util/debug.cpp @@ -29,9 +29,7 @@ void DebugFlags::CPU::reset() } while (0) CHECK_CPU_FLAGS(avx2, "CYCLES_CPU_NO_AVX2"); - CHECK_CPU_FLAGS(avx, "CYCLES_CPU_NO_AVX"); CHECK_CPU_FLAGS(sse41, "CYCLES_CPU_NO_SSE41"); - CHECK_CPU_FLAGS(sse3, "CYCLES_CPU_NO_SSE3"); CHECK_CPU_FLAGS(sse2, "CYCLES_CPU_NO_SSE2"); #undef STRINGIFY diff --git a/intern/cycles/util/debug.h b/intern/cycles/util/debug.h index ab200649f59..9ee09f08581 100644 --- a/intern/cycles/util/debug.h +++ b/intern/cycles/util/debug.h @@ -26,9 +26,7 @@ class DebugFlags { /* Flags describing which instructions sets are allowed for use. */ bool avx2 = true; - bool avx = true; bool sse41 = true; - bool sse3 = true; bool sse2 = true; /* Check functions to see whether instructions up to the given one @@ -36,19 +34,11 @@ class DebugFlags { */ bool has_avx2() { - return has_avx() && avx2; - } - bool has_avx() - { - return has_sse41() && avx; + return has_sse41() && avx2; } bool has_sse41() { - return has_sse3() && sse41; - } - bool has_sse3() - { - return has_sse2() && sse3; + return has_sse2() && sse41; } bool has_sse2() { diff --git a/intern/cycles/util/math.h b/intern/cycles/util/math.h index 2eeb4aebd54..df75e7bf2b5 100644 --- a/intern/cycles/util/math.h +++ b/intern/cycles/util/math.h @@ -483,6 +483,12 @@ ccl_device_inline float compatible_signf(float f) ccl_device_inline float smoothstepf(float f) { + if (f <= 0.0f) { + return 0.0f; + } + if (f >= 1.0f) { + return 1.0f; + } float ff = f * f; return (3.0f * ff - 2.0f * ff * f); } @@ -750,6 +756,16 @@ ccl_device_inline float sqr(float a) return a * a; } +ccl_device_inline float sin_from_cos(const float c) +{ + return safe_sqrtf(1.0f - sqr(c)); +} + +ccl_device_inline float cos_from_sin(const float s) +{ + return safe_sqrtf(1.0f - sqr(s)); +} + ccl_device_inline float pow20(float a) { return sqr(sqr(sqr(sqr(a)) * a)); diff --git a/intern/cycles/util/math_float2.h b/intern/cycles/util/math_float2.h index ad806d0f08a..45391f6848a 100644 --- a/intern/cycles/util/math_float2.h +++ b/intern/cycles/util/math_float2.h @@ -134,6 +134,11 @@ ccl_device_inline float len(const float2 a) return sqrtf(dot(a, a)); } +ccl_device_inline float len_squared(const float2 a) +{ + return dot(a, a); +} + #if !defined(__KERNEL_METAL__) ccl_device_inline float distance(const float2 a, const float2 b) { diff --git a/intern/cycles/util/optimization.h b/intern/cycles/util/optimization.h index 19b96510c47..b6194dc0382 100644 --- a/intern/cycles/util/optimization.h +++ b/intern/cycles/util/optimization.h @@ -17,9 +17,6 @@ # ifdef WITH_KERNEL_SSE2 # define WITH_CYCLES_OPTIMIZED_KERNEL_SSE2 # endif -# ifdef WITH_KERNEL_SSE3 -# define WITH_CYCLES_OPTIMIZED_KERNEL_SSE3 -# endif /* x86-64 * @@ -30,15 +27,9 @@ /* SSE2 is always available on x86-64 CPUs, so auto enable */ # define __KERNEL_SSE2__ /* no SSE2 kernel on x86-64, part of regular kernel */ -# ifdef WITH_KERNEL_SSE3 -# define WITH_CYCLES_OPTIMIZED_KERNEL_SSE3 -# endif # ifdef WITH_KERNEL_SSE41 # define WITH_CYCLES_OPTIMIZED_KERNEL_SSE41 # endif -# ifdef WITH_KERNEL_AVX -# define WITH_CYCLES_OPTIMIZED_KERNEL_AVX -# endif # ifdef WITH_KERNEL_AVX2 # define WITH_CYCLES_OPTIMIZED_KERNEL_AVX2 # endif diff --git a/intern/cycles/util/system.cpp b/intern/cycles/util/system.cpp index 3183ac06f26..c1c496380c5 100644 --- a/intern/cycles/util/system.cpp +++ b/intern/cycles/util/system.cpp @@ -204,24 +204,12 @@ bool system_cpu_support_sse2() return caps.sse2; } -bool system_cpu_support_sse3() -{ - CPUCapabilities &caps = system_cpu_capabilities(); - return caps.sse3; -} - bool system_cpu_support_sse41() { CPUCapabilities &caps = system_cpu_capabilities(); return caps.sse41; } -bool system_cpu_support_avx() -{ - CPUCapabilities &caps = system_cpu_capabilities(); - return caps.avx; -} - bool system_cpu_support_avx2() { CPUCapabilities &caps = system_cpu_capabilities(); @@ -234,20 +222,11 @@ bool system_cpu_support_sse2() return false; } -bool system_cpu_support_sse3() -{ - return false; -} - bool system_cpu_support_sse41() { return false; } -bool system_cpu_support_avx() -{ - return false; -} bool system_cpu_support_avx2() { return false; diff --git a/intern/cycles/util/system.h b/intern/cycles/util/system.h index 2152b89ed24..66140cabf84 100644 --- a/intern/cycles/util/system.h +++ b/intern/cycles/util/system.h @@ -17,9 +17,7 @@ int system_console_width(); std::string system_cpu_brand_string(); int system_cpu_bits(); bool system_cpu_support_sse2(); -bool system_cpu_support_sse3(); bool system_cpu_support_sse41(); -bool system_cpu_support_avx(); bool system_cpu_support_avx2(); size_t system_physical_ram(); diff --git a/intern/cycles/util/thread.cpp b/intern/cycles/util/thread.cpp index f4949aa2b44..7f1e8eb4159 100644 --- a/intern/cycles/util/thread.cpp +++ b/intern/cycles/util/thread.cpp @@ -6,6 +6,8 @@ #include "util/system.h" #include "util/windows.h" +#include + CCL_NAMESPACE_BEGIN thread::thread(function run_cb) : run_cb_(run_cb), joined_(false) diff --git a/intern/ghost/CMakeLists.txt b/intern/ghost/CMakeLists.txt index c8b576023c2..06256004010 100644 --- a/intern/ghost/CMakeLists.txt +++ b/intern/ghost/CMakeLists.txt @@ -85,10 +85,12 @@ if(WITH_VULKAN_BACKEND) list(APPEND INC_SYS ${VULKAN_INCLUDE_DIRS} + ${MOLTENVK_INCLUDE_DIRS} ) list(APPEND LIB ${VULKAN_LIBRARIES} + ${MOLTENVK_LIBRARIES} ) add_definitions(-DWITH_VULKAN_BACKEND) @@ -360,6 +362,15 @@ elseif(WITH_GHOST_X11 OR WITH_GHOST_WAYLAND) COMMAND ${WAYLAND_SCANNER} private-code ${PROT_DEF} ${INC_DST}/${_name}-client-protocol.c DEPENDS ${INC_DST}/${_name}-client-protocol.h ) + + if(CMAKE_C_COMPILER_ID MATCHES "Clang") + # Prevent warnings/failure to compile with generated `WL_PRIVATE` declarations. + set_source_files_properties( + "${INC_DST}/${_name}-client-protocol.c" + PROPERTIES COMPILE_FLAGS "-Wno-missing-variable-declarations" + ) + endif() + list(APPEND SRC ${INC_DST}/${_name}-client-protocol.c ${INC_DST}/${_name}-client-protocol.h diff --git a/intern/ghost/GHOST_C-api.h b/intern/ghost/GHOST_C-api.h index 5c113a7e328..cf810caf778 100644 --- a/intern/ghost/GHOST_C-api.h +++ b/intern/ghost/GHOST_C-api.h @@ -1201,7 +1201,7 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle context, void *r_instance, void *r_physical_device, void *r_device, - uint32_t *r_graphic_queue_familly); + uint32_t *r_graphic_queue_family); /** * Return VULKAN back-buffer resources handles for the given window. diff --git a/intern/ghost/GHOST_Types.h b/intern/ghost/GHOST_Types.h index ee664557f9d..b20fc42f144 100644 --- a/intern/ghost/GHOST_Types.h +++ b/intern/ghost/GHOST_Types.h @@ -210,6 +210,8 @@ typedef enum { GHOST_kEventWindowActivate, GHOST_kEventWindowDeactivate, GHOST_kEventWindowUpdate, + /** Client side window decorations have changed and need to be redrawn. */ + GHOST_kEventWindowUpdateDecor, GHOST_kEventWindowSize, GHOST_kEventWindowMove, GHOST_kEventWindowDPIHintChanged, diff --git a/intern/ghost/intern/GHOST_C-api.cpp b/intern/ghost/intern/GHOST_C-api.cpp index bdeb6208b91..a3c1eedc9c0 100644 --- a/intern/ghost/intern/GHOST_C-api.cpp +++ b/intern/ghost/intern/GHOST_C-api.cpp @@ -1203,10 +1203,10 @@ void GHOST_GetVulkanHandles(GHOST_ContextHandle contexthandle, void *r_instance, void *r_physical_device, void *r_device, - uint32_t *r_graphic_queue_familly) + uint32_t *r_graphic_queue_family) { GHOST_IContext *context = (GHOST_IContext *)contexthandle; - context->getVulkanHandles(r_instance, r_physical_device, r_device, r_graphic_queue_familly); + context->getVulkanHandles(r_instance, r_physical_device, r_device, r_graphic_queue_family); } void GHOST_GetVulkanBackbuffer(GHOST_WindowHandle windowhandle, diff --git a/intern/ghost/intern/GHOST_Context.h b/intern/ghost/intern/GHOST_Context.h index 59c5b08c45c..4349e4601b9 100644 --- a/intern/ghost/intern/GHOST_Context.h +++ b/intern/ghost/intern/GHOST_Context.h @@ -150,7 +150,7 @@ class GHOST_Context : public GHOST_IContext { virtual GHOST_TSuccess getVulkanHandles(void * /*r_instance*/, void * /*r_physical_device*/, void * /*r_device*/, - uint32_t * /*r_graphic_queue_familly*/) override + uint32_t * /*r_graphic_queue_family*/) override { return GHOST_kFailure; }; diff --git a/intern/ghost/intern/GHOST_ContextCGL.h b/intern/ghost/intern/GHOST_ContextCGL.h index d19fffffb43..3134606d59b 100644 --- a/intern/ghost/intern/GHOST_ContextCGL.h +++ b/intern/ghost/intern/GHOST_ContextCGL.h @@ -23,6 +23,22 @@ @class NSView; class GHOST_ContextCGL : public GHOST_Context { + + public: + /* Defines the number of simultaneous command buffers which can be in flight. + * The default limit of `64` is considered to be optimal for Blender. Too many command buffers + * will result in workload fragmentation and additional system-level overhead. This limit should + * also only be increased if the application is consistently exceeding the limit, and there are + * no command buffer leaks. + * + * If this limit is reached, starting a new command buffer will fail. The Metal back-end will + * therefore stall until completion and log a warning when this limit is reached in order to + * ensure correct function of the app. + * + * It is generally preferable to reduce the prevalence of GPU_flush or GPU Context switches + * (which will both break command submissions), rather than increasing this limit. */ + static const int max_command_buffer_count = 64; + public: /** * Constructor. diff --git a/intern/ghost/intern/GHOST_ContextCGL.mm b/intern/ghost/intern/GHOST_ContextCGL.mm index 9dad337a5d6..1aa0cb9def4 100644 --- a/intern/ghost/intern/GHOST_ContextCGL.mm +++ b/intern/ghost/intern/GHOST_ContextCGL.mm @@ -529,7 +529,8 @@ void GHOST_ContextCGL::metalInit() id device = m_metalLayer.device; /* Create a command queue for blit/present operation. */ - m_metalCmdQueue = (MTLCommandQueue *)[device newCommandQueue]; + m_metalCmdQueue = (MTLCommandQueue *)[device + newCommandQueueWithMaxCommandBufferCount:GHOST_ContextCGL::max_command_buffer_count]; [m_metalCmdQueue retain]; /* Create shaders for blit operation. */ diff --git a/intern/ghost/intern/GHOST_ContextVK.cpp b/intern/ghost/intern/GHOST_ContextVK.cpp index 4ec045ccee2..4ee48243234 100644 --- a/intern/ghost/intern/GHOST_ContextVK.cpp +++ b/intern/ghost/intern/GHOST_ContextVK.cpp @@ -311,12 +311,12 @@ GHOST_TSuccess GHOST_ContextVK::getVulkanBackbuffer(void *image, GHOST_TSuccess GHOST_ContextVK::getVulkanHandles(void *r_instance, void *r_physical_device, void *r_device, - uint32_t *r_graphic_queue_familly) + uint32_t *r_graphic_queue_family) { *((VkInstance *)r_instance) = m_instance; *((VkPhysicalDevice *)r_physical_device) = m_physical_device; *((VkDevice *)r_device) = m_device; - *r_graphic_queue_familly = m_queue_family_graphic; + *r_graphic_queue_family = m_queue_family_graphic; return GHOST_kSuccess; } @@ -520,13 +520,14 @@ static GHOST_TSuccess getGraphicQueueFamily(VkPhysicalDevice device, uint32_t *r *r_queue_index = 0; for (const auto &queue_family : queue_families) { - if (queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) { + if ((queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) && + (queue_family.queueFlags & VK_QUEUE_COMPUTE_BIT)) { return GHOST_kSuccess; } (*r_queue_index)++; } - fprintf(stderr, "Couldn't find any Graphic queue familly on selected device\n"); + fprintf(stderr, "Couldn't find any Graphic queue family on selected device\n"); return GHOST_kFailure; } @@ -551,7 +552,7 @@ static GHOST_TSuccess getPresetQueueFamily(VkPhysicalDevice device, (*r_queue_index)++; } - fprintf(stderr, "Couldn't find any Present queue familly on selected device\n"); + fprintf(stderr, "Couldn't find any Present queue family on selected device\n"); return GHOST_kFailure; } diff --git a/intern/ghost/intern/GHOST_ContextVK.h b/intern/ghost/intern/GHOST_ContextVK.h index e26808cc317..1a2d38bc701 100644 --- a/intern/ghost/intern/GHOST_ContextVK.h +++ b/intern/ghost/intern/GHOST_ContextVK.h @@ -113,7 +113,7 @@ class GHOST_ContextVK : public GHOST_Context { GHOST_TSuccess getVulkanHandles(void *r_instance, void *r_physical_device, void *r_device, - uint32_t *r_graphic_queue_familly); + uint32_t *r_graphic_queue_family); /** * Gets the Vulkan framebuffer related resource handles associated with the Vulkan context. * Needs to be called after each swap events as the framebuffer will change. diff --git a/intern/ghost/intern/GHOST_NDOFManager.cpp b/intern/ghost/intern/GHOST_NDOFManager.cpp index 5484da82a18..ffd9c57803c 100644 --- a/intern/ghost/intern/GHOST_NDOFManager.cpp +++ b/intern/ghost/intern/GHOST_NDOFManager.cpp @@ -464,12 +464,17 @@ void GHOST_NDOFManager::updateButton(int button_number, bool press, uint64_t tim ndof_button_names[button]); GHOST_IWindow *window = system_.getWindowManager()->getActiveWindow(); - const GHOST_TKey key = ghost_map_keyboard_from_ndof_buttom(button); - if (key != GHOST_kKeyUnknown) { - sendKeyEvent(key, press, time, window); - } - else { - sendButtonEvent(button, press, time, window); + + /* Delivery will fail, so don't bother sending. + * Do, however update the buttons internal depressed state. */ + if (window != nullptr) { + const GHOST_TKey key = ghost_map_keyboard_from_ndof_buttom(button); + if (key != GHOST_kKeyUnknown) { + sendKeyEvent(key, press, time, window); + } + else { + sendButtonEvent(button, press, time, window); + } } int mask = 1 << button_number; @@ -547,9 +552,11 @@ bool GHOST_NDOFManager::sendMotionEvent() GHOST_IWindow *window = system_.getWindowManager()->getActiveWindow(); + /* Delivery will fail, so don't bother sending. */ if (window == nullptr) { - motion_state_ = GHOST_kNotStarted; /* Avoid large `dt` times when changing windows. */ - return false; /* Delivery will fail, so don't bother sending. */ + /* Avoid large `dt` times when changing windows. */ + motion_state_ = GHOST_kNotStarted; + return false; } GHOST_EventNDOFMotion *event = new GHOST_EventNDOFMotion(motion_time_, window); diff --git a/intern/ghost/intern/GHOST_SystemWayland.cpp b/intern/ghost/intern/GHOST_SystemWayland.cpp index d9e5b0c4e9d..014f3d24bae 100644 --- a/intern/ghost/intern/GHOST_SystemWayland.cpp +++ b/intern/ghost/intern/GHOST_SystemWayland.cpp @@ -82,6 +82,8 @@ #include "CLG_log.h" #ifdef USE_EVENT_BACKGROUND_THREAD +# include "GHOST_TimerTask.h" + # include #endif @@ -215,13 +217,15 @@ static bool use_gnome_confine_hack = false; /** * The event codes are used to differentiate from which mouse button an event comes from. */ -#define BTN_LEFT 0x110 -#define BTN_RIGHT 0x111 -#define BTN_MIDDLE 0x112 -#define BTN_SIDE 0x113 -#define BTN_EXTRA 0x114 -#define BTN_FORWARD 0x115 -#define BTN_BACK 0x116 +enum { + BTN_LEFT = 0x110, + BTN_RIGHT = 0x111, + BTN_MIDDLE = 0x112, + BTN_SIDE = 0x113, + BTN_EXTRA = 0x114, + BTN_FORWARD = 0x115, + BTN_BACK = 0x116 +}; // #define BTN_TASK 0x117 /* UNUSED. */ /** @@ -232,28 +236,34 @@ static bool use_gnome_confine_hack = false; * at the Blender studio, having the button closest to the nib be MMB is preferable, * so use this as a default. If needs be - swapping these could be a preference. */ -#define BTN_STYLUS 0x14b /* Use as middle-mouse. */ -#define BTN_STYLUS2 0x14c /* Use as right-mouse. */ -/* NOTE(@campbellbarton): Map to an additional button (not sure which hardware uses this). */ -#define BTN_STYLUS3 0x149 +enum { + /** Use as middle-mouse. */ + BTN_STYLUS = 0x14b, + /** Use as right-mouse. */ + BTN_STYLUS2 = 0x14c, + /** NOTE(@campbellbarton): Map to an additional button (not sure which hardware uses this). */ + BTN_STYLUS3 = 0x149, +}; /** * Keyboard scan-codes. */ -#define KEY_GRAVE 41 +enum { + KEY_GRAVE = 41, #ifdef USE_NON_LATIN_KB_WORKAROUND -# define KEY_1 2 -# define KEY_2 3 -# define KEY_3 4 -# define KEY_4 5 -# define KEY_5 6 -# define KEY_6 7 -# define KEY_7 8 -# define KEY_8 9 -# define KEY_9 10 -# define KEY_0 11 + KEY_1 = 2, + KEY_2 = 3, + KEY_3 = 4, + KEY_4 = 5, + KEY_5 = 6, + KEY_6 = 7, + KEY_7 = 8, + KEY_8 = 9, + KEY_9 = 10, + KEY_0 = 11, #endif +}; /** \} */ @@ -280,41 +290,41 @@ struct GWL_ModifierInfo { }; static const GWL_ModifierInfo g_modifier_info_table[MOD_INDEX_NUM] = { - /* MOD_INDEX_SHIFT */ + /*MOD_INDEX_SHIFT*/ { - /* display_name */ "Shift", - /* xkb_id */ XKB_MOD_NAME_SHIFT, - /* key_l */ GHOST_kKeyLeftShift, - /* key_r */ GHOST_kKeyRightShift, - /* mod_l */ GHOST_kModifierKeyLeftShift, - /* mod_r */ GHOST_kModifierKeyRightShift, + /*display_name*/ "Shift", + /*xkb_id*/ XKB_MOD_NAME_SHIFT, + /*key_l*/ GHOST_kKeyLeftShift, + /*key_r*/ GHOST_kKeyRightShift, + /*mod_l*/ GHOST_kModifierKeyLeftShift, + /*mod_r*/ GHOST_kModifierKeyRightShift, }, - /* MOD_INDEX_ALT */ + /*MOD_INDEX_ALT*/ { - /* display_name */ "Alt", - /* xkb_id */ XKB_MOD_NAME_ALT, - /* key_l */ GHOST_kKeyLeftAlt, - /* key_r */ GHOST_kKeyRightAlt, - /* mod_l */ GHOST_kModifierKeyLeftAlt, - /* mod_r */ GHOST_kModifierKeyRightAlt, + /*display_name*/ "Alt", + /*xkb_id*/ XKB_MOD_NAME_ALT, + /*key_l*/ GHOST_kKeyLeftAlt, + /*key_r*/ GHOST_kKeyRightAlt, + /*mod_l*/ GHOST_kModifierKeyLeftAlt, + /*mod_r*/ GHOST_kModifierKeyRightAlt, }, - /* MOD_INDEX_CTRL */ + /*MOD_INDEX_CTRL*/ { - /* display_name */ "Control", - /* xkb_id */ XKB_MOD_NAME_CTRL, - /* key_l */ GHOST_kKeyLeftControl, - /* key_r */ GHOST_kKeyRightControl, - /* mod_l */ GHOST_kModifierKeyLeftControl, - /* mod_r */ GHOST_kModifierKeyRightControl, + /*display_name*/ "Control", + /*xkb_id*/ XKB_MOD_NAME_CTRL, + /*key_l*/ GHOST_kKeyLeftControl, + /*key_r*/ GHOST_kKeyRightControl, + /*mod_l*/ GHOST_kModifierKeyLeftControl, + /*mod_r*/ GHOST_kModifierKeyRightControl, }, - /* MOD_INDEX_OS */ + /*MOD_INDEX_OS*/ { - /* display_name */ "OS", - /* xkb_id */ XKB_MOD_NAME_LOGO, - /* key_l */ GHOST_kKeyLeftOS, - /* key_r */ GHOST_kKeyRightOS, - /* mod_l */ GHOST_kModifierKeyLeftOS, - /* mod_r */ GHOST_kModifierKeyRightOS, + /*display_name*/ "OS", + /*xkb_id*/ XKB_MOD_NAME_LOGO, + /*key_l*/ GHOST_kKeyLeftOS, + /*key_r*/ GHOST_kKeyRightOS, + /*mod_l*/ GHOST_kModifierKeyLeftOS, + /*mod_r*/ GHOST_kModifierKeyRightOS, }, }; @@ -760,7 +770,12 @@ struct GWL_Seat { int32_t rate = 0; /** Time (milliseconds) after which to start repeating keys. */ int32_t delay = 0; - /** Timer for key repeats. */ + /** + * Timer for key repeats. + * + * \note For as long as #USE_EVENT_BACKGROUND_THREAD is defined, any access to this + * (including null checks, must lock `timer_mutex` first. + */ GHOST_ITimerTask *timer = nullptr; } key_repeat; @@ -824,6 +839,42 @@ static bool gwl_seat_key_depressed_suppress_warning(const GWL_Seat *seat) return suppress_warning; } +/** + * \note Caller must lock `timer_mutex`. + */ +static void gwl_seat_key_repeat_timer_add(GWL_Seat *seat, + GHOST_TimerProcPtr key_repeat_fn, + GHOST_TUserDataPtr payload, + const bool use_delay) +{ + GHOST_SystemWayland *system = seat->system; + const uint64_t time_step = 1000 / seat->key_repeat.rate; + const uint64_t time_start = use_delay ? seat->key_repeat.delay : time_step; +#ifdef USE_EVENT_BACKGROUND_THREAD + GHOST_TimerTask *timer = new GHOST_TimerTask( + system->getMilliSeconds() + time_start, time_step, key_repeat_fn, payload); + seat->key_repeat.timer = timer; + system->ghost_timer_manager()->addTimer(timer); +#else + seat->key_repeat.timer = system->installTimer(time_start, time_step, key_repeat_fn, payload); +#endif +} + +/** + * \note The caller must lock `timer_mutex`. + */ +static void gwl_seat_key_repeat_timer_remove(GWL_Seat *seat) +{ + GHOST_SystemWayland *system = seat->system; +#ifdef USE_EVENT_BACKGROUND_THREAD + system->ghost_timer_manager()->removeTimer( + static_cast(seat->key_repeat.timer)); +#else + system->removeTimer(seat->key_repeat.timer); +#endif + seat->key_repeat.timer = nullptr; +} + /** \} */ /* -------------------------------------------------------------------- */ @@ -898,6 +949,16 @@ struct GWL_Display { /** Guard against multiple threads accessing `events_pending` at once. */ std::mutex events_pending_mutex; + /** + * A separate timer queue, needed so the WAYLAND thread can lock access. + * Using the system's #GHOST_Sysem::getTimerManager is not thread safe because + * access to the timer outside of WAYLAND specific logic will not lock. + * + * Needed because #GHOST_System::dispatchEvents fires timers + * outside of WAYLAND (without locking the `timer_mutex`). + */ + GHOST_TimerManager *ghost_timer_manager; + #endif /* USE_EVENT_BACKGROUND_THREAD */ }; @@ -914,6 +975,9 @@ static void gwl_display_destroy(GWL_Display *display) ghost_wl_display_lock_without_input(display->wl_display, display->system->server_mutex); display->events_pthread_is_active = false; } + + delete display->ghost_timer_manager; + display->ghost_timer_manager = nullptr; #endif /* For typical WAYLAND use this will always be set. @@ -2586,8 +2650,6 @@ static void pointer_handle_enter(void *data, GHOST_WindowWayland *win = ghost_wl_surface_user_data(wl_surface); - win->activate(); - GWL_Seat *seat = static_cast(data); seat->cursor_source_serial = serial; seat->pointer.serial = serial; @@ -2627,8 +2689,6 @@ static void pointer_handle_leave(void *data, static_cast(data)->pointer.wl_surface_window = nullptr; if (wl_surface && ghost_wl_surface_own(wl_surface)) { CLOG_INFO(LOG, 2, "leave"); - GHOST_WindowWayland *win = ghost_wl_surface_user_data(wl_surface); - win->deactivate(); } else { CLOG_INFO(LOG, 2, "leave (skipped)"); @@ -3714,9 +3774,14 @@ static void keyboard_handle_leave(void *data, GWL_Seat *seat = static_cast(data); seat->keyboard.wl_surface_window = nullptr; - /* Losing focus must stop repeating text. */ - if (seat->key_repeat.timer) { - keyboard_handle_key_repeat_cancel(seat); + { +#ifdef USE_EVENT_BACKGROUND_THREAD + std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; +#endif + /* Losing focus must stop repeating text. */ + if (seat->key_repeat.timer) { + keyboard_handle_key_repeat_cancel(seat); + } } #ifdef USE_GNOME_KEYBOARD_SUPPRESS_WARNING @@ -3776,36 +3841,32 @@ static xkb_keysym_t xkb_state_key_get_one_sym_without_modifiers( return sym; } +/** + * \note Caller must lock `timer_mutex`. + */ static void keyboard_handle_key_repeat_cancel(GWL_Seat *seat) { -#ifdef USE_EVENT_BACKGROUND_THREAD - std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; -#endif GHOST_ASSERT(seat->key_repeat.timer != nullptr, "Caller much check for timer"); delete static_cast(seat->key_repeat.timer->getUserData()); - seat->system->removeTimer(seat->key_repeat.timer); - seat->key_repeat.timer = nullptr; + + gwl_seat_key_repeat_timer_remove(seat); } /** * Restart the key-repeat timer. * \param use_delay: When false, use the interval * (prevents pause when the setting changes while the key is held). + * + * \note Caller must lock `timer_mutex`. */ static void keyboard_handle_key_repeat_reset(GWL_Seat *seat, const bool use_delay) { -#ifdef USE_EVENT_BACKGROUND_THREAD - std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; -#endif GHOST_ASSERT(seat->key_repeat.timer != nullptr, "Caller much check for timer"); - GHOST_SystemWayland *system = seat->system; - GHOST_ITimerTask *timer = seat->key_repeat.timer; - GHOST_TimerProcPtr key_repeat_fn = timer->getTimerProc(); + GHOST_TimerProcPtr key_repeat_fn = seat->key_repeat.timer->getTimerProc(); GHOST_TUserDataPtr payload = seat->key_repeat.timer->getUserData(); - seat->system->removeTimer(seat->key_repeat.timer); - const uint64_t time_step = 1000 / seat->key_repeat.rate; - const uint64_t time_start = use_delay ? seat->key_repeat.delay : time_step; - seat->key_repeat.timer = system->installTimer(time_start, time_step, key_repeat_fn, payload); + + gwl_seat_key_repeat_timer_remove(seat); + gwl_seat_key_repeat_timer_add(seat, key_repeat_fn, payload, use_delay); } static void keyboard_handle_key(void *data, @@ -3844,6 +3905,11 @@ static void keyboard_handle_key(void *data, break; } +#ifdef USE_EVENT_BACKGROUND_THREAD + /* Any access to `seat->key_repeat.timer` must lock. */ + std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; +#endif + struct GWL_KeyRepeatPlayload *key_repeat_payload = nullptr; /* Delete previous timer. */ @@ -3882,23 +3948,14 @@ static void keyboard_handle_key(void *data, break; } case RESET: { -#ifdef USE_EVENT_BACKGROUND_THREAD - std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; -#endif /* The payload will be added again. */ - seat->system->removeTimer(seat->key_repeat.timer); - seat->key_repeat.timer = nullptr; + gwl_seat_key_repeat_timer_remove(seat); break; } case CANCEL: { -#ifdef USE_EVENT_BACKGROUND_THREAD - std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; -#endif delete key_repeat_payload; key_repeat_payload = nullptr; - - seat->system->removeTimer(seat->key_repeat.timer); - seat->key_repeat.timer = nullptr; + gwl_seat_key_repeat_timer_remove(seat); break; } } @@ -3952,8 +4009,8 @@ static void keyboard_handle_key(void *data, utf8_buf)); } }; - seat->key_repeat.timer = seat->system->installTimer( - seat->key_repeat.delay, 1000 / seat->key_repeat.rate, key_repeat_fn, key_repeat_payload); + + gwl_seat_key_repeat_timer_add(seat, key_repeat_fn, key_repeat_payload, true); } } @@ -3978,8 +4035,13 @@ static void keyboard_handle_modifiers(void *data, /* A modifier changed so reset the timer, * see comment in #keyboard_handle_key regarding this behavior. */ - if (seat->key_repeat.timer) { - keyboard_handle_key_repeat_reset(seat, true); + { +#ifdef USE_EVENT_BACKGROUND_THREAD + std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; +#endif + if (seat->key_repeat.timer) { + keyboard_handle_key_repeat_reset(seat, true); + } } #ifdef USE_GNOME_KEYBOARD_SUPPRESS_WARNING @@ -3998,9 +4060,14 @@ static void keyboard_repeat_handle_info(void *data, seat->key_repeat.rate = rate; seat->key_repeat.delay = delay; - /* Unlikely possible this setting changes while repeating. */ - if (seat->key_repeat.timer) { - keyboard_handle_key_repeat_reset(seat, false); + { +#ifdef USE_EVENT_BACKGROUND_THREAD + std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; +#endif + /* Unlikely possible this setting changes while repeating. */ + if (seat->key_repeat.timer) { + keyboard_handle_key_repeat_reset(seat, false); + } } } @@ -4271,8 +4338,14 @@ static void gwl_seat_capability_keyboard_disable(GWL_Seat *seat) if (!seat->wl_keyboard) { return; } - if (seat->key_repeat.timer) { - keyboard_handle_key_repeat_cancel(seat); + + { +#ifdef USE_EVENT_BACKGROUND_THREAD + std::lock_guard lock_timer_guard{*seat->system->timer_mutex}; +#endif + if (seat->key_repeat.timer) { + keyboard_handle_key_repeat_cancel(seat); + } } wl_keyboard_destroy(seat->wl_keyboard); seat->wl_keyboard = nullptr; @@ -5328,7 +5401,7 @@ GHOST_SystemWayland::GHOST_SystemWayland(bool background) /* Connect to the Wayland server. */ display_->wl_display = wl_display_connect(nullptr); if (!display_->wl_display) { - this->~GHOST_SystemWayland(); + display_destroy_and_free_all(); throw std::runtime_error("Wayland: unable to connect to display!"); } @@ -5372,7 +5445,7 @@ GHOST_SystemWayland::GHOST_SystemWayland(bool background) "WAYLAND found but libdecor was not, install libdecor for Wayland support, " "falling back to X11\n"); # endif - this->~GHOST_SystemWayland(); + display_destroy_and_free_all(); throw std::runtime_error("Wayland: unable to find libdecor!"); use_libdecor = true; @@ -5389,7 +5462,7 @@ GHOST_SystemWayland::GHOST_SystemWayland(bool background) GWL_LibDecor_System &decor = *display_->libdecor; decor.context = libdecor_new(display_->wl_display, &libdecor_interface); if (!decor.context) { - this->~GHOST_SystemWayland(); + display_destroy_and_free_all(); throw std::runtime_error("Wayland: unable to create window decorations!"); } } @@ -5400,17 +5473,19 @@ GHOST_SystemWayland::GHOST_SystemWayland(bool background) { GWL_XDG_Decor_System &decor = *display_->xdg_decor; if (!decor.shell) { - this->~GHOST_SystemWayland(); + display_destroy_and_free_all(); throw std::runtime_error("Wayland: unable to access xdg_shell!"); } } #ifdef USE_EVENT_BACKGROUND_THREAD gwl_display_event_thread_create(display_); + + display_->ghost_timer_manager = new GHOST_TimerManager(); #endif } -GHOST_SystemWayland::~GHOST_SystemWayland() +void GHOST_SystemWayland::display_destroy_and_free_all() { gwl_display_destroy(display_); @@ -5420,6 +5495,11 @@ GHOST_SystemWayland::~GHOST_SystemWayland() #endif } +GHOST_SystemWayland::~GHOST_SystemWayland() +{ + display_destroy_and_free_all(); +} + GHOST_TSuccess GHOST_SystemWayland::init() { GHOST_TSuccess success = GHOST_System::init(); @@ -5482,10 +5562,16 @@ bool GHOST_SystemWayland::processEvents(bool waitForEvent) #endif /* USE_EVENT_BACKGROUND_THREAD */ { + const uint64_t now = getMilliSeconds(); #ifdef USE_EVENT_BACKGROUND_THREAD - std::lock_guard lock_timer_guard{*display_->system->timer_mutex}; + { + std::lock_guard lock_timer_guard{*display_->system->timer_mutex}; + if (ghost_timer_manager()->fireTimers(now)) { + any_processed = true; + } + } #endif - if (getTimerManager()->fireTimers(getMilliSeconds())) { + if (getTimerManager()->fireTimers(now)) { any_processed = true; } } @@ -6708,6 +6794,13 @@ struct wl_shm *GHOST_SystemWayland::wl_shm() const return display_->wl_shm; } +#ifdef USE_EVENT_BACKGROUND_THREAD +GHOST_TimerManager *GHOST_SystemWayland::ghost_timer_manager() +{ + return display_->ghost_timer_manager; +} +#endif + /** \} */ /* -------------------------------------------------------------------- */ diff --git a/intern/ghost/intern/GHOST_SystemWayland.h b/intern/ghost/intern/GHOST_SystemWayland.h index c745d3b1d36..153931a0a39 100644 --- a/intern/ghost/intern/GHOST_SystemWayland.h +++ b/intern/ghost/intern/GHOST_SystemWayland.h @@ -165,6 +165,16 @@ class GHOST_SystemWayland : public GHOST_System { bool cursor_grab_use_software_display_get(const GHOST_TGrabCursorMode mode); +#ifdef USE_EVENT_BACKGROUND_THREAD + /** + * Return a separate WAYLAND local timer manager to #GHOST_System::getTimerManager + * Manipulation & access must lock with #GHOST_WaylandSystem::server_mutex. + * + * See #GWL_Display::ghost_timer_manager doc-string for details on why this is needed. + */ + GHOST_TimerManager *ghost_timer_manager(); +#endif + /* WAYLAND direct-data access. */ struct wl_display *wl_display(); @@ -233,7 +243,14 @@ class GHOST_SystemWayland : public GHOST_System { * from running at the same time. */ std::mutex *server_mutex = nullptr; - /** Threads must lock this before manipulating timers. */ + /** + * Threads must lock this before manipulating #GWL_Display::ghost_timer_manager. + * + * \note Using a separate lock to `server_mutex` is necessary because the + * server lock is already held when calling `ghost_wl_display_event_pump`. + * If manipulating the timer used the `server_mutex`, event pump can indirectly + * handle key up/down events which would lock `server_mutex` causing a dead-lock. + */ std::mutex *timer_mutex = nullptr; std::thread::id main_thread_id; @@ -242,5 +259,11 @@ class GHOST_SystemWayland : public GHOST_System { #endif private: + /** + * Support freeing the internal data separately from the destructor + * so it can be called when WAYLAND isn't running (immediately before raising an exception). + */ + void display_destroy_and_free_all(); + struct GWL_Display *display_; }; diff --git a/intern/ghost/intern/GHOST_WindowWayland.cpp b/intern/ghost/intern/GHOST_WindowWayland.cpp index 9c179a6394f..f486fc319c8 100644 --- a/intern/ghost/intern/GHOST_WindowWayland.cpp +++ b/intern/ghost/intern/GHOST_WindowWayland.cpp @@ -311,10 +311,9 @@ enum eGWL_PendingWindowActions { # ifdef GHOST_OPENGL_ALPHA PENDING_OPAQUE_SET, # endif - PENDING_SWAP_BUFFERS, PENDING_SCALE_UPDATE, }; -# define PENDING_NUM (PENDING_SWAP_BUFFERS + 1) +# define PENDING_NUM (PENDING_SCALE_UPDATE + 1) static void gwl_window_pending_actions_tag(GWL_Window *win, enum eGWL_PendingWindowActions type) { @@ -338,9 +337,6 @@ static void gwl_window_pending_actions_handle(GWL_Window *win) if (win->pending_actions[PENDING_SCALE_UPDATE].exchange(false)) { win->ghost_window->outputs_changed_update_scale(); } - if (win->pending_actions[PENDING_SWAP_BUFFERS].exchange(false)) { - win->ghost_window->swapBuffers(); - } } #endif /* USE_EVENT_BACKGROUND_THREAD */ @@ -356,8 +352,6 @@ static void gwl_window_frame_update_from_pending_lockfree(GWL_Window *win) #endif - bool do_redraw = false; - if (win->frame_pending.size[0] != 0 && win->frame_pending.size[1] != 0) { if ((win->frame.size[0] != win->frame_pending.size[0]) || (win->frame.size[1] != win->frame_pending.size[1])) { @@ -365,9 +359,6 @@ static void gwl_window_frame_update_from_pending_lockfree(GWL_Window *win) } } - bool is_active_ghost = (win->ghost_window == - win->ghost_system->getWindowManager()->getActiveWindow()); - if (win->frame_pending.is_active) { win->ghost_window->activate(); } @@ -375,10 +366,6 @@ static void gwl_window_frame_update_from_pending_lockfree(GWL_Window *win) win->ghost_window->deactivate(); } - if (is_active_ghost != win->frame_pending.is_active) { - do_redraw = true; - } - win->frame_pending.size[0] = win->frame.size[0]; win->frame_pending.size[1] = win->frame.size[1]; @@ -387,15 +374,6 @@ static void gwl_window_frame_update_from_pending_lockfree(GWL_Window *win) /* Signal not to apply the scale unless it's configured. */ win->frame_pending.size[0] = 0; win->frame_pending.size[1] = 0; - - if (do_redraw) { -#ifdef USE_EVENT_BACKGROUND_THREAD - /* Could swap buffers, use pending to a redundant call in some cases. */ - gwl_window_pending_actions_tag(win, PENDING_SWAP_BUFFERS); -#else - win->ghost_window->swapBuffers(); -#endif - } } static void gwl_window_frame_update_from_pending(GWL_Window *win) @@ -621,12 +599,11 @@ static void frame_handle_commit(struct libdecor_frame * /*frame*/, void *data) { CLOG_INFO(LOG, 2, "commit"); +# if 0 GWL_Window *win = static_cast(data); - -# ifdef USE_EVENT_BACKGROUND_THREAD - gwl_window_pending_actions_tag(win, PENDING_SWAP_BUFFERS); + win->ghost_window->notify_decor_redraw(); # else - win->ghost_window->swapBuffers(); + (void)data; # endif } @@ -1321,8 +1298,17 @@ GHOST_TSuccess GHOST_WindowWayland::activate() return GHOST_kFailure; } } - return system_->pushEvent_maybe_pending( + const GHOST_TSuccess success = system_->pushEvent_maybe_pending( new GHOST_Event(system_->getMilliSeconds(), GHOST_kEventWindowActivate, this)); +#ifdef WITH_GHOST_WAYLAND_LIBDECOR + if (success == GHOST_kSuccess) { + if (use_libdecor) { + /* Ensure there is a swap-buffers, needed for the updated window borders to refresh. */ + notify_decor_redraw(); + } + } +#endif + return success; } GHOST_TSuccess GHOST_WindowWayland::deactivate() @@ -1335,8 +1321,17 @@ GHOST_TSuccess GHOST_WindowWayland::deactivate() { system_->getWindowManager()->setWindowInactive(this); } - return system_->pushEvent_maybe_pending( + const GHOST_TSuccess success = system_->pushEvent_maybe_pending( new GHOST_Event(system_->getMilliSeconds(), GHOST_kEventWindowDeactivate, this)); +#ifdef WITH_GHOST_WAYLAND_LIBDECOR + if (success == GHOST_kSuccess) { + if (use_libdecor) { + /* Ensure there is a swap-buffers, needed for the updated window borders to refresh. */ + notify_decor_redraw(); + } + } +#endif + return success; } GHOST_TSuccess GHOST_WindowWayland::notify_size() @@ -1358,6 +1353,14 @@ GHOST_TSuccess GHOST_WindowWayland::notify_size() new GHOST_Event(system_->getMilliSeconds(), GHOST_kEventWindowSize, this)); } +GHOST_TSuccess GHOST_WindowWayland::notify_decor_redraw() +{ + /* NOTE: we want to `swapBuffers`, however this may run from a thread and + * when this windows OpenGL context is not active, so send and update event instead. */ + return system_->pushEvent_maybe_pending( + new GHOST_Event(system_->getMilliSeconds(), GHOST_kEventWindowUpdateDecor, this)); +} + /** \} */ /* -------------------------------------------------------------------- */ diff --git a/intern/ghost/intern/GHOST_WindowWayland.h b/intern/ghost/intern/GHOST_WindowWayland.h index 326c1d5e994..c5554f70200 100644 --- a/intern/ghost/intern/GHOST_WindowWayland.h +++ b/intern/ghost/intern/GHOST_WindowWayland.h @@ -150,6 +150,7 @@ class GHOST_WindowWayland : public GHOST_Window { GHOST_TSuccess activate(); GHOST_TSuccess deactivate(); GHOST_TSuccess notify_size(); + GHOST_TSuccess notify_decor_redraw(); /* WAYLAND utility functions. */ diff --git a/intern/guardedalloc/CMakeLists.txt b/intern/guardedalloc/CMakeLists.txt index 8e66e3d98d4..1aa300f9844 100644 --- a/intern/guardedalloc/CMakeLists.txt +++ b/intern/guardedalloc/CMakeLists.txt @@ -42,10 +42,11 @@ if(WIN32 AND NOT UNIX) list(APPEND INC_SYS ${PTHREADS_INC} ) - - list(APPEND LIB - ${PTHREADS_LIBRARIES} - ) + if(DEFINED PTHREADS_LIBRARIES) + list(APPEND LIB + ${PTHREADS_LIBRARIES} + ) + endif() endif() # Jemalloc 5.0.0+ needs extra configuration. diff --git a/intern/rigidbody/CMakeLists.txt b/intern/rigidbody/CMakeLists.txt index b6911129de4..ef5d7f66040 100644 --- a/intern/rigidbody/CMakeLists.txt +++ b/intern/rigidbody/CMakeLists.txt @@ -20,7 +20,6 @@ set(SRC ) set(LIB - extern_bullet ${BULLET_LIBRARIES} ) diff --git a/make.bat b/make.bat index 394b2d0dad5..239be60ebf0 100644 --- a/make.bat +++ b/make.bat @@ -69,7 +69,7 @@ if "%BUILD_UPDATE%" == "1" ( REM Then update SVN platform libraries, since updating python while python is REM running tends to be problematic. The python script that update_sources REM calls later on may still try to switch branches and run into trouble, - REM but for *most* people this will side step the problem. + REM but for *most* people this will side step the problem. call "%BLENDER_DIR%\build_files\windows\svn_update.cmd" ) REM Finally call the python script shared between all platforms that updates git diff --git a/readme.rst b/readme.rst deleted file mode 100644 index 6d03d004be4..00000000000 --- a/readme.rst +++ /dev/null @@ -1,41 +0,0 @@ - -.. Keep this document short & concise, - linking to external resources instead of including content in-line. - See 'release/text/readme.html' for the end user read-me. - - -Blender -======= - -Blender is the free and open source 3D creation suite. -It supports the entirety of the 3D pipeline-modeling, rigging, animation, simulation, rendering, compositing, -motion tracking and video editing. - -.. figure:: https://code.blender.org/wp-content/uploads/2018/12/springrg.jpg - :scale: 50 % - :align: center - - -Project Pages -------------- - -- `Main Website `__ -- `Reference Manual `__ -- `User Community `__ - -Development ------------ - -- `Build Instructions `__ -- `Code Review & Bug Tracker `__ -- `Developer Forum `__ -- `Developer Documentation `__ - - -License -------- - -Blender as a whole is licensed under the GNU General Public License, Version 3. -Individual files may have a different, but compatible license. - -See `blender.org/about/license `__ for details. diff --git a/release/datafiles/blender_icons_geom.py b/release/datafiles/blender_icons_geom.py index faf4a5c5fe6..9277f0b3e79 100644 --- a/release/datafiles/blender_icons_geom.py +++ b/release/datafiles/blender_icons_geom.py @@ -128,10 +128,14 @@ def get_active_vcol(me): def mesh_data_lists_from_mesh(me, material_colors): me_loops = me.loops[:] - me_loops_color = me.attributes.active_color.data[:] me_verts = me.vertices[:] me_polys = me.polygons[:] - + + if me.attributes.active_color: + me_loops_color = me_loops_color_active.data[:] + else: + me_loops_color = None + tris_data = [] class white: @@ -148,7 +152,8 @@ def mesh_data_lists_from_mesh(me, material_colors): l_sta = p.loop_start l_len = p.loop_total loops_poly = me_loops[l_sta:l_sta + l_len] - color_poly = me_loops_color[l_sta:l_sta + l_len] if me_loops_color else None + if me_loops_color is not None: + color_poly = me_loops_color[l_sta:l_sta + l_len] i0 = 0 i1 = 1 @@ -159,14 +164,14 @@ def mesh_data_lists_from_mesh(me, material_colors): l0 = loops_poly[i0] l1 = loops_poly[i1] l2 = loops_poly[i2] - - if color_poly: - c0 = color_poly[i0] - c1 = color_poly[i1] - c2 = color_poly[i2] + + if me_loops_color is not None: + c0 = color_poly[i0].color + c1 = color_poly[i1].color + c2 = color_poly[i2].color else: - c0 = c1 = c2 = white - + c0 = c1 = c2 = (1.0, 1.0, 1.0, 1.0) + v0 = me_verts[l0.vertex_index] v1 = me_verts[l1.vertex_index] v2 = me_verts[l2.vertex_index] @@ -199,7 +204,7 @@ def color_multiply_and_from_linear_to_srgb(base_color, vertex_color): The final color is the product between the base color and the vertex color. """ import mathutils - color_linear = [c * b for c, b in zip(vertex_color.color, base_color)] + color_linear = [c * b for c, b in zip(vertex_color, base_color)] color_srgb = mathutils.Color(color_linear[:3]).from_scene_linear_to_srgb() return tuple(round(c * 255) for c in (*color_srgb, color_linear[3])) diff --git a/release/lts/create_download_urls.py b/release/lts/create_download_urls.py index 753e05c98b4..c6b01acdf60 100755 --- a/release/lts/create_download_urls.py +++ b/release/lts/create_download_urls.py @@ -41,7 +41,7 @@ def get_download_url(version: Version, file_name: str) -> str: """ Get the download url for the given version and file_name """ - return (f"https://www.blender.org/download/Blender{version.major}" + return (f"https://www.blender.org/download/release/Blender{version.major}" f".{version.minor}/{file_name}") diff --git a/release/scripts/modules/bl_keymap_utils/keymap_hierarchy.py b/release/scripts/modules/bl_keymap_utils/keymap_hierarchy.py index 7172d7809f2..f183877749c 100644 --- a/release/scripts/modules/bl_keymap_utils/keymap_hierarchy.py +++ b/release/scripts/modules/bl_keymap_utils/keymap_hierarchy.py @@ -55,6 +55,7 @@ _km_hierarchy = [ ('Curve', 'EMPTY', 'WINDOW', [ _km_expand_from_toolsystem('VIEW_3D', 'EDIT_CURVE'), ]), + ('Curves', 'EMPTY', 'WINDOW', []), ('Armature', 'EMPTY', 'WINDOW', [ _km_expand_from_toolsystem('VIEW_3D', 'EDIT_ARMATURE'), ]), diff --git a/release/scripts/modules/rna_manual_reference.py b/release/scripts/modules/rna_manual_reference.py index 28091e119cb..fd7b7dbb786 100644 --- a/release/scripts/modules/rna_manual_reference.py +++ b/release/scripts/modules/rna_manual_reference.py @@ -50,6 +50,8 @@ url_manual_mapping = ( ("bpy.types.cyclesobjectsettings.shadow_terminator_geometry_offset*", "render/cycles/object_settings/object_data.html#bpy-types-cyclesobjectsettings-shadow-terminator-geometry-offset"), ("bpy.types.sequencertoolsettings.use_snap_current_frame_to_strips*", "video_editing/edit/montage/editing.html#bpy-types-sequencertoolsettings-use-snap-current-frame-to-strips"), ("bpy.types.clothcollisionsettings.vertex_group_object_collisions*", "physics/cloth/settings/collisions.html#bpy-types-clothcollisionsettings-vertex-group-object-collisions"), + ("bpy.types.gpencilsculptsettings.use_automasking_material_active*", "grease_pencil/modes/sculpting/introduction.html#bpy-types-gpencilsculptsettings-use-automasking-material-active"), + ("bpy.types.gpencilsculptsettings.use_automasking_material_stroke*", "grease_pencil/modes/sculpting/introduction.html#bpy-types-gpencilsculptsettings-use-automasking-material-stroke"), ("bpy.types.cycleslightsettings.use_multiple_importance_sampling*", "render/cycles/light_settings.html#bpy-types-cycleslightsettings-use-multiple-importance-sampling"), ("bpy.types.fluiddomainsettings.sndparticle_potential_max_energy*", "physics/fluid/type/domain/liquid/particles.html#bpy-types-fluiddomainsettings-sndparticle-potential-max-energy"), ("bpy.types.fluiddomainsettings.sndparticle_potential_min_energy*", "physics/fluid/type/domain/liquid/particles.html#bpy-types-fluiddomainsettings-sndparticle-potential-min-energy"), @@ -61,6 +63,8 @@ url_manual_mapping = ( ("bpy.types.cyclesrendersettings.preview_denoising_start_sample*", "render/cycles/render_settings/sampling.html#bpy-types-cyclesrendersettings-preview-denoising-start-sample"), ("bpy.types.fluiddomainsettings.sndparticle_sampling_trappedair*", "physics/fluid/type/domain/liquid/particles.html#bpy-types-fluiddomainsettings-sndparticle-sampling-trappedair"), ("bpy.types.fluiddomainsettings.sndparticle_sampling_wavecrest*", "physics/fluid/type/domain/liquid/particles.html#bpy-types-fluiddomainsettings-sndparticle-sampling-wavecrest"), + ("bpy.types.gpencilsculptsettings.use_automasking_layer_active*", "grease_pencil/modes/sculpting/introduction.html#bpy-types-gpencilsculptsettings-use-automasking-layer-active"), + ("bpy.types.gpencilsculptsettings.use_automasking_layer_stroke*", "grease_pencil/modes/sculpting/introduction.html#bpy-types-gpencilsculptsettings-use-automasking-layer-stroke"), ("bpy.types.lineartgpencilmodifier.use_image_boundary_trimming*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-image-boundary-trimming"), ("bpy.types.materiallineart.use_intersection_priority_override*", "render/materials/line_art.html#bpy-types-materiallineart-use-intersection-priority-override"), ("bpy.types.rigidbodyconstraint.use_override_solver_iterations*", "physics/rigid_body/constraints/introduction.html#bpy-types-rigidbodyconstraint-use-override-solver-iterations"), @@ -125,6 +129,7 @@ url_manual_mapping = ( ("bpy.types.cyclesrendersettings.min_transparent_bounces*", "render/cycles/render_settings/sampling.html#bpy-types-cyclesrendersettings-min-transparent-bounces"), ("bpy.types.fluiddomainsettings.use_collision_border_top*", "physics/fluid/type/domain/settings.html#bpy-types-fluiddomainsettings-use-collision-border-top"), ("bpy.types.gpencilsculptsettings.intersection_threshold*", "grease_pencil/modes/draw/tools/cutter.html#bpy-types-gpencilsculptsettings-intersection-threshold"), + ("bpy.types.gpencilsculptsettings.use_automasking_stroke*", "grease_pencil/modes/sculpting/introduction.html#bpy-types-gpencilsculptsettings-use-automasking-stroke"), ("bpy.types.gpencilsculptsettings.use_multiframe_falloff*", "grease_pencil/multiframe.html#bpy-types-gpencilsculptsettings-use-multiframe-falloff"), ("bpy.types.lineartgpencilmodifier.use_back_face_culling*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-back-face-culling"), ("bpy.types.lineartgpencilmodifier.use_intersection_mask*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-intersection-mask"), @@ -152,6 +157,7 @@ url_manual_mapping = ( ("bpy.types.linestylegeometrymodifier_backbonestretcher*", "render/freestyle/view_layer/line_style/modifiers/geometry/backbone_stretcher.html#bpy-types-linestylegeometrymodifier-backbonestretcher"), ("bpy.types.linestylegeometrymodifier_sinusdisplacement*", "render/freestyle/view_layer/line_style/modifiers/geometry/sinus_displacement.html#bpy-types-linestylegeometrymodifier-sinusdisplacement"), ("bpy.types.sequencertoolsettings.snap_to_current_frame*", "video_editing/edit/montage/editing.html#bpy-types-sequencertoolsettings-snap-to-current-frame"), + ("bpy.types.view3doverlay.sculpt_mode_face_sets_opacity*", "sculpt_paint/sculpting/editing/face_sets.html#bpy-types-view3doverlay-sculpt-mode-face-sets-opacity"), ("bpy.ops.object.geometry_nodes_input_attribute_toggle*", "modeling/modifiers/generate/geometry_nodes.html#bpy-ops-object-geometry-nodes-input-attribute-toggle"), ("bpy.types.animvizmotionpaths.show_keyframe_highlight*", "animation/motion_paths.html#bpy-types-animvizmotionpaths-show-keyframe-highlight"), ("bpy.types.brushcurvessculptsettings.minimum_distance*", "sculpt_paint/curves_sculpting/tools/density_curves.html#bpy-types-brushcurvessculptsettings-minimum-distance"), @@ -290,8 +296,8 @@ url_manual_mapping = ( ("bpy.types.fluiddomainsettings.vector_display_type*", "physics/fluid/type/domain/gas/viewport_display.html#bpy-types-fluiddomainsettings-vector-display-type"), ("bpy.types.freestylelineset.select_by_image_border*", "render/freestyle/view_layer/line_set.html#bpy-types-freestylelineset-select-by-image-border"), ("bpy.types.freestylesettings.kr_derivative_epsilon*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings-kr-derivative-epsilon"), - ("bpy.types.geometrynodecurveprimitivebeziersegment*", "modeling/geometry_nodes/curve_primitives/bezier_segment.html#bpy-types-geometrynodecurveprimitivebeziersegment"), - ("bpy.types.geometrynodecurveprimitivequadrilateral*", "modeling/geometry_nodes/curve_primitives/quadrilateral.html#bpy-types-geometrynodecurveprimitivequadrilateral"), + ("bpy.types.geometrynodecurveprimitivebeziersegment*", "modeling/geometry_nodes/curve/primitives/bezier_segment.html#bpy-types-geometrynodecurveprimitivebeziersegment"), + ("bpy.types.geometrynodecurveprimitivequadrilateral*", "modeling/geometry_nodes/curve/primitives/quadrilateral.html#bpy-types-geometrynodecurveprimitivequadrilateral"), ("bpy.types.lineartgpencilmodifier.crease_threshold*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-crease-threshold"), ("bpy.types.lineartgpencilmodifier.smooth_tolerance*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-smooth-tolerance"), ("bpy.types.lineartgpencilmodifier.use_intersection*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-intersection"), @@ -403,8 +409,8 @@ url_manual_mapping = ( ("bpy.types.freestylelineset.select_by_collection*", "render/freestyle/view_layer/line_set.html#bpy-types-freestylelineset-select-by-collection"), ("bpy.types.freestylelineset.select_by_edge_types*", "render/freestyle/view_layer/line_set.html#bpy-types-freestylelineset-select-by-edge-types"), ("bpy.types.freestylelineset.select_by_face_marks*", "render/freestyle/view_layer/line_set.html#bpy-types-freestylelineset-select-by-face-marks"), - ("bpy.types.geometrynodeinputcurvehandlepositions*", "modeling/geometry_nodes/curve/curve_handle_position.html#bpy-types-geometrynodeinputcurvehandlepositions"), - ("bpy.types.geometrynodeinputedgepathstoselection*", "modeling/geometry_nodes/mesh/edge_paths_to_selection.html#bpy-types-geometrynodeinputedgepathstoselection"), + ("bpy.types.geometrynodeinputcurvehandlepositions*", "modeling/geometry_nodes/curve/read/curve_handle_position.html#bpy-types-geometrynodeinputcurvehandlepositions"), + ("bpy.types.geometrynodeinputedgepathstoselection*", "modeling/geometry_nodes/mesh/operations/edge_paths_to_selection.html#bpy-types-geometrynodeinputedgepathstoselection"), ("bpy.types.linestyle*modifier_distancefromcamera*", "render/freestyle/view_layer/line_style/modifiers/color/distance_from_camera.html#bpy-types-linestyle-modifier-distancefromcamera"), ("bpy.types.linestyle*modifier_distancefromobject*", "render/freestyle/view_layer/line_style/modifiers/color/distance_from_object.html#bpy-types-linestyle-modifier-distancefromobject"), ("bpy.types.linestylegeometrymodifier_2dtransform*", "render/freestyle/view_layer/line_style/modifiers/geometry/2d_transform.html#bpy-types-linestylegeometrymodifier-2dtransform"), @@ -414,6 +420,7 @@ url_manual_mapping = ( ("bpy.types.movietrackingplanetrack.image_opacity*", "movie_clip/tracking/clip/sidebar/track/plane_track.html#bpy-types-movietrackingplanetrack-image-opacity"), ("bpy.types.particlesettings.use_parent_particles*", "physics/particles/emitter/render.html#bpy-types-particlesettings-use-parent-particles"), ("bpy.types.rigidbodyconstraint.solver_iterations*", "physics/rigid_body/constraints/introduction.html#bpy-types-rigidbodyconstraint-solver-iterations"), + ("bpy.types.rigidbodyobject.collision_collections*", "physics/rigid_body/properties/collisions.html#bpy-types-rigidbodyobject-collision-collections"), ("bpy.types.sculpt.automasking_start_normal_limit*", "sculpt_paint/sculpting/controls.html#bpy-types-sculpt-automasking-start-normal-limit"), ("bpy.types.sculpt.use_automasking_boundary_edges*", "sculpt_paint/sculpting/controls.html#bpy-types-sculpt-use-automasking-boundary-edges"), ("bpy.types.sequenceeditor.use_overlay_frame_lock*", "editors/video_sequencer/preview/sidebar.html#bpy-types-sequenceeditor-use-overlay-frame-lock"), @@ -456,9 +463,9 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.material_boundary*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-material-boundary"), ("bpy.types.freestylelinestyle.use_split_pattern*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-split-pattern"), ("bpy.types.freestylesettings.use_view_map_cache*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings-use-view-map-cache"), - ("bpy.types.geometrynodecurvehandletypeselection*", "modeling/geometry_nodes/curve/handle_type_selection.html#bpy-types-geometrynodecurvehandletypeselection"), + ("bpy.types.geometrynodecurvehandletypeselection*", "modeling/geometry_nodes/curve/read/handle_type_selection.html#bpy-types-geometrynodecurvehandletypeselection"), ("bpy.types.geometrynodedistributepointsinvolume*", "modeling/geometry_nodes/point/distribute_points_in_volume.html#bpy-types-geometrynodedistributepointsinvolume"), - ("bpy.types.geometrynodeinputmeshvertexneighbors*", "modeling/geometry_nodes/mesh/vertex_neighbors.html#bpy-types-geometrynodeinputmeshvertexneighbors"), + ("bpy.types.geometrynodeinputmeshvertexneighbors*", "modeling/geometry_nodes/mesh/read/vertex_neighbors.html#bpy-types-geometrynodeinputmeshvertexneighbors"), ("bpy.types.greasepencil.curve_edit_corner_angle*", "grease_pencil/modes/edit/curve_editing.html#bpy-types-greasepencil-curve-edit-corner-angle"), ("bpy.types.imageformatsettings.color_management*", "render/output/properties/output.html#bpy-types-imageformatsettings-color-management"), ("bpy.types.lineartgpencilmodifier.source_camera*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-source-camera"), @@ -472,6 +479,8 @@ url_manual_mapping = ( ("bpy.types.movietrackingtrack.use_green_channel*", "movie_clip/tracking/clip/sidebar/track/track.html#bpy-types-movietrackingtrack-use-green-channel"), ("bpy.types.rendersettings.resolution_percentage*", "render/output/properties/format.html#bpy-types-rendersettings-resolution-percentage"), ("bpy.types.rendersettings_simplify_gpencil_tint*", "render/cycles/render_settings/simplify.html#bpy-types-rendersettings-simplify-gpencil-tint"), + ("bpy.types.softbodysettings.use_estimate_matrix*", "physics/soft_body/settings/solver.html#bpy-types-softbodysettings-use-estimate-matrix"), + ("bpy.types.softbodysettings.vertex_group_spring*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-vertex-group-spring"), ("bpy.types.spaceimageeditor.show_gizmo_navigate*", "editors/image/introduction.html#bpy-types-spaceimageeditor-show-gizmo-navigate"), ("bpy.types.spaceoutliner.lib_override_view_mode*", "editors/outliner/interface.html#bpy-types-spaceoutliner-lib-override-view-mode"), ("bpy.types.spaceoutliner.use_filter_object_mesh*", "editors/outliner/interface.html#bpy-types-spaceoutliner-use-filter-object-mesh"), @@ -527,7 +536,7 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.integration_type*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-integration-type"), ("bpy.types.freestylelinestyle.use_split_length*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-split-length"), ("bpy.types.geometrynodedistributepointsonfaces*", "modeling/geometry_nodes/point/distribute_points_on_faces.html#bpy-types-geometrynodedistributepointsonfaces"), - ("bpy.types.geometrynodesetcurvehandlepositions*", "modeling/geometry_nodes/curve/set_handle_positions.html#bpy-types-geometrynodesetcurvehandlepositions"), + ("bpy.types.geometrynodesetcurvehandlepositions*", "modeling/geometry_nodes/curve/write/set_handle_positions.html#bpy-types-geometrynodesetcurvehandlepositions"), ("bpy.types.greasepencil.stroke_thickness_space*", "grease_pencil/properties/strokes.html#bpy-types-greasepencil-stroke-thickness-space"), ("bpy.types.lineartgpencilmodifier.use_material*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-material"), ("bpy.types.linestylegeometrymodifier_blueprint*", "render/freestyle/view_layer/line_style/modifiers/geometry/blueprint.html#bpy-types-linestylegeometrymodifier-blueprint"), @@ -540,6 +549,9 @@ url_manual_mapping = ( ("bpy.types.rendersettings.simplify_subdivision*", "render/cycles/render_settings/simplify.html#bpy-types-rendersettings-simplify-subdivision"), ("bpy.types.sculpt.use_automasking_start_normal*", "sculpt_paint/sculpting/controls.html#bpy-types-sculpt-use-automasking-start-normal"), ("bpy.types.sequencerpreviewoverlay.show_cursor*", "editors/video_sequencer/preview/display/overlays.html#bpy-types-sequencerpreviewoverlay-show-cursor"), + ("bpy.types.softbodysettings.use_edge_collision*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-use-edge-collision"), + ("bpy.types.softbodysettings.use_face_collision*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-use-face-collision"), + ("bpy.types.softbodysettings.use_self_collision*", "physics/soft_body/settings/self_collision.html#bpy-types-softbodysettings-use-self-collision"), ("bpy.types.spacegrapheditor.show_extrapolation*", "editors/graph_editor/introduction.html#bpy-types-spacegrapheditor-show-extrapolation"), ("bpy.types.spaceoutliner.use_filter_collection*", "editors/outliner/interface.html#bpy-types-spaceoutliner-use-filter-collection"), ("bpy.types.spacesequenceeditor.cursor_location*", "editors/video_sequencer/preview/sidebar.html#bpy-types-spacesequenceeditor-cursor-location"), @@ -591,12 +603,12 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.use_chain_count*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-chain-count"), ("bpy.types.freestylelinestyle.use_dashed_line*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-dashed-line"), ("bpy.types.freestylelinestyle.use_same_object*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-same-object"), - ("bpy.types.functionnodeinputspecialcharacters*", "modeling/geometry_nodes/text/special_characters.html#bpy-types-functionnodeinputspecialcharacters"), - ("bpy.types.geometrynodecurveendpointselection*", "modeling/geometry_nodes/curve/endpoint_selection.html#bpy-types-geometrynodecurveendpointselection"), - ("bpy.types.geometrynodeinputedgepathstocurves*", "modeling/geometry_nodes/mesh/edge_paths_to_curves.html#bpy-types-geometrynodeinputedgepathstocurves"), - ("bpy.types.geometrynodeinputmeshedgeneighbors*", "modeling/geometry_nodes/mesh/edge_neighbors.html#bpy-types-geometrynodeinputmeshedgeneighbors"), - ("bpy.types.geometrynodeinputmeshfaceneighbors*", "modeling/geometry_nodes/mesh/face_neighbors.html#bpy-types-geometrynodeinputmeshfaceneighbors"), - ("bpy.types.geometrynodeinputshortestedgepaths*", "modeling/geometry_nodes/mesh/shortest_edge_paths.html#bpy-types-geometrynodeinputshortestedgepaths"), + ("bpy.types.functionnodeinputspecialcharacters*", "modeling/geometry_nodes/utilities/text/special_characters.html#bpy-types-functionnodeinputspecialcharacters"), + ("bpy.types.geometrynodecurveendpointselection*", "modeling/geometry_nodes/curve/read/endpoint_selection.html#bpy-types-geometrynodecurveendpointselection"), + ("bpy.types.geometrynodeinputedgepathstocurves*", "modeling/geometry_nodes/mesh/operations/edge_paths_to_curves.html#bpy-types-geometrynodeinputedgepathstocurves"), + ("bpy.types.geometrynodeinputmeshedgeneighbors*", "modeling/geometry_nodes/mesh/read/edge_neighbors.html#bpy-types-geometrynodeinputmeshedgeneighbors"), + ("bpy.types.geometrynodeinputmeshfaceneighbors*", "modeling/geometry_nodes/mesh/read/face_neighbors.html#bpy-types-geometrynodeinputmeshfaceneighbors"), + ("bpy.types.geometrynodeinputshortestedgepaths*", "modeling/geometry_nodes/mesh/read/shortest_edge_paths.html#bpy-types-geometrynodeinputshortestedgepaths"), ("bpy.types.gpencilsculptguide.reference_point*", "grease_pencil/modes/draw/guides.html#bpy-types-gpencilsculptguide-reference-point"), ("bpy.types.greasepencil.edit_curve_resolution*", "grease_pencil/modes/edit/curve_editing.html#bpy-types-greasepencil-edit-curve-resolution"), ("bpy.types.lineartgpencilmodifier.use_contour*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-contour"), @@ -613,6 +625,9 @@ url_manual_mapping = ( ("bpy.types.sculpt.use_automasking_view_normal*", "sculpt_paint/sculpting/controls.html#bpy-types-sculpt-use-automasking-view-normal"), ("bpy.types.sequencertimelineoverlay.show_grid*", "editors/video_sequencer/sequencer/display.html#bpy-types-sequencertimelineoverlay-show-grid"), ("bpy.types.sequencertoolsettings.overlap_mode*", "video_editing/edit/montage/editing.html#bpy-types-sequencertoolsettings-overlap-mode"), + ("bpy.types.softbodysettings.aerodynamics_type*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-aerodynamics-type"), + ("bpy.types.softbodysettings.vertex_group_goal*", "physics/soft_body/settings/goal.html#bpy-types-softbodysettings-vertex-group-goal"), + ("bpy.types.softbodysettings.vertex_group_mass*", "physics/soft_body/settings/object.html#bpy-types-softbodysettings-vertex-group-mass"), ("bpy.types.spaceclipeditor.show_green_channel*", "editors/clip/display/clip_display.html#bpy-types-spaceclipeditor-show-green-channel"), ("bpy.types.spacenodeoverlay.show_context_path*", "interface/controls/nodes/introduction.html#bpy-types-spacenodeoverlay-show-context-path"), ("bpy.types.spaceoutliner.show_restrict_column*", "editors/outliner/interface.html#bpy-types-spaceoutliner-show-restrict-column"), @@ -652,12 +667,12 @@ url_manual_mapping = ( ("bpy.types.freestylelineset.select_edge_mark*", "render/freestyle/view_layer/line_set.html#bpy-types-freestylelineset-select-edge-mark"), ("bpy.types.freestylelinestyle.use_length_max*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-length-max"), ("bpy.types.freestylelinestyle.use_length_min*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-length-min"), - ("bpy.types.geometrynodedeformcurvesonsurface*", "modeling/geometry_nodes/curve/deform_curves_on_surface.html#bpy-types-geometrynodedeformcurvesonsurface"), + ("bpy.types.geometrynodedeformcurvesonsurface*", "modeling/geometry_nodes/curve/operations/deform_curves_on_surface.html#bpy-types-geometrynodedeformcurvesonsurface"), ("bpy.types.geometrynodeinputinstancerotation*", "modeling/geometry_nodes/instances/instance_rotation.html#bpy-types-geometrynodeinputinstancerotation"), - ("bpy.types.geometrynodeinputmeshedgevertices*", "modeling/geometry_nodes/mesh/edge_vertices.html#bpy-types-geometrynodeinputmeshedgevertices"), - ("bpy.types.geometrynodeinputmeshfaceisplanar*", "modeling/geometry_nodes/mesh/face_is_planar.html#bpy-types-geometrynodeinputmeshfaceisplanar"), - ("bpy.types.geometrynodeinputsplineresolution*", "modeling/geometry_nodes/curve/spline_resolution.html#bpy-types-geometrynodeinputsplineresolution"), - ("bpy.types.geometrynodemeshfacesetboundaries*", "modeling/geometry_nodes/mesh/face_set_boundaries.html#bpy-types-geometrynodemeshfacesetboundaries"), + ("bpy.types.geometrynodeinputmeshedgevertices*", "modeling/geometry_nodes/mesh/read/edge_vertices.html#bpy-types-geometrynodeinputmeshedgevertices"), + ("bpy.types.geometrynodeinputmeshfaceisplanar*", "modeling/geometry_nodes/mesh/read/face_is_planar.html#bpy-types-geometrynodeinputmeshfaceisplanar"), + ("bpy.types.geometrynodeinputsplineresolution*", "modeling/geometry_nodes/curve/read/spline_resolution.html#bpy-types-geometrynodeinputsplineresolution"), + ("bpy.types.geometrynodemeshfacesetboundaries*", "modeling/geometry_nodes/mesh/read/face_set_boundaries.html#bpy-types-geometrynodemeshfacesetboundaries"), ("bpy.types.greasepencil.curve_edit_threshold*", "grease_pencil/modes/edit/curve_editing.html#bpy-types-greasepencil-curve-edit-threshold"), ("bpy.types.lineartgpencilmodifier.use_crease*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-crease"), ("bpy.types.lineartgpencilmodifier.use_shadow*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-shadow"), @@ -669,6 +684,7 @@ url_manual_mapping = ( ("bpy.types.rendersettings.use_crop_to_border*", "render/output/properties/format.html#bpy-types-rendersettings-use-crop-to-border"), ("bpy.types.rendersettings.use_file_extension*", "render/output/properties/output.html#bpy-types-rendersettings-use-file-extension"), ("bpy.types.sculpt.constant_detail_resolution*", "sculpt_paint/sculpting/tool_settings/dyntopo.html#bpy-types-sculpt-constant-detail-resolution"), + ("bpy.types.sequencemodifier.input_mask_strip*", "editors/video_sequencer/sequencer/sidebar/modifiers.html#bpy-types-sequencemodifier-input-mask-strip"), ("bpy.types.sequencertoolsettings.pivot_point*", "editors/video_sequencer/preview/controls/pivot_point.html#bpy-types-sequencertoolsettings-pivot-point"), ("bpy.types.spaceclipeditor.annotation_source*", "movie_clip/tracking/clip/sidebar/view.html#bpy-types-spaceclipeditor-annotation-source"), ("bpy.types.spaceclipeditor.mask_display_type*", "editors/clip/display/mask_display.html#bpy-types-spaceclipeditor-mask-display-type"), @@ -676,6 +692,7 @@ url_manual_mapping = ( ("bpy.types.spaceclipeditor.show_blue_channel*", "editors/clip/display/clip_display.html#bpy-types-spaceclipeditor-show-blue-channel"), ("bpy.types.spaceclipeditor.show_mask_overlay*", "editors/clip/display/mask_display.html#bpy-types-spaceclipeditor-show-mask-overlay"), ("bpy.types.spacefilebrowser.system_bookmarks*", "editors/file_browser.html#bpy-types-spacefilebrowser-system-bookmarks"), + ("bpy.types.spaceimageeditor.display_channels*", "editors/image/introduction.html#bpy-types-spaceimageeditor-display-channels"), ("bpy.types.spaceoutliner.use_filter_children*", "editors/outliner/interface.html#bpy-types-spaceoutliner-use-filter-children"), ("bpy.types.spaceoutliner.use_filter_complete*", "editors/outliner/interface.html#bpy-types-spaceoutliner-use-filter-complete"), ("bpy.types.spacespreadsheet.attribute_domain*", "editors/spreadsheet.html#bpy-types-spacespreadsheet-attribute-domain"), @@ -730,10 +747,10 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.use_angle_min*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-angle-min"), ("bpy.types.freestylesettings.as_render_pass*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings-as-render-pass"), ("bpy.types.freestylesettings.use_smoothness*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings-use-smoothness"), - ("bpy.types.geometrynodecurveprimitivecircle*", "modeling/geometry_nodes/curve_primitives/curve_circle.html#bpy-types-geometrynodecurveprimitivecircle"), - ("bpy.types.geometrynodecurvequadraticbezier*", "modeling/geometry_nodes/curve_primitives/quadratic_bezier.html#bpy-types-geometrynodecurvequadraticbezier"), + ("bpy.types.geometrynodecurveprimitivecircle*", "modeling/geometry_nodes/curve/primitives/curve_circle.html#bpy-types-geometrynodecurveprimitivecircle"), + ("bpy.types.geometrynodecurvequadraticbezier*", "modeling/geometry_nodes/curve/primitives/quadratic_bezier.html#bpy-types-geometrynodecurvequadraticbezier"), ("bpy.types.geometrynoderemovenamedattribute*", "modeling/geometry_nodes/attribute/remove_named_attribute.html#bpy-types-geometrynoderemovenamedattribute"), - ("bpy.types.geometrynodesamplenearestsurface*", "modeling/geometry_nodes/mesh/sample_nearest_surface.html#bpy-types-geometrynodesamplenearestsurface"), + ("bpy.types.geometrynodesamplenearestsurface*", "modeling/geometry_nodes/mesh/operations/sample_nearest_surface.html#bpy-types-geometrynodesamplenearestsurface"), ("bpy.types.gpencillayer.use_viewlayer_masks*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-use-viewlayer-masks"), ("bpy.types.greasepencil.onion_keyframe_type*", "grease_pencil/properties/onion_skinning.html#bpy-types-greasepencil-onion-keyframe-type"), ("bpy.types.lineartgpencilmodifier.use_cache*", "grease_pencil/modifiers/generate/line_art.html#bpy-types-lineartgpencilmodifier-use-cache"), @@ -744,8 +761,12 @@ url_manual_mapping = ( ("bpy.types.movietrackingcamera.sensor_width*", "movie_clip/tracking/clip/sidebar/track/camera.html#bpy-types-movietrackingcamera-sensor-width"), ("bpy.types.posebone.use_ik_rotation_control*", "animation/armatures/posing/bone_constraints/inverse_kinematics/introduction.html#bpy-types-posebone-use-ik-rotation-control"), ("bpy.types.rendersettings.use_bake_multires*", "render/cycles/baking.html#bpy-types-rendersettings-use-bake-multires"), + ("bpy.types.rigidbodyobject.collision_margin*", "physics/rigid_body/properties/collisions.html#bpy-types-rigidbodyobject-collision-margin"), ("bpy.types.scenegpencil.antialias_threshold*", "render/cycles/render_settings/grease_pencil.html#bpy-types-scenegpencil-antialias-threshold"), ("bpy.types.sculpt.use_automasking_face_sets*", "sculpt_paint/sculpting/controls.html#bpy-types-sculpt-use-automasking-face-sets"), + ("bpy.types.sequencemodifier.input_mask_type*", "editors/video_sequencer/sequencer/sidebar/modifiers.html#bpy-types-sequencemodifier-input-mask-type"), + ("bpy.types.softbodysettings.error_threshold*", "physics/soft_body/settings/solver.html#bpy-types-softbodysettings-error-threshold"), + ("bpy.types.softbodysettings.use_stiff_quads*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-use-stiff-quads"), ("bpy.types.spaceclipeditor.show_mask_spline*", "editors/clip/display/mask_display.html#bpy-types-spaceclipeditor-show-mask-spline"), ("bpy.types.spaceclipeditor.show_red_channel*", "editors/clip/display/clip_display.html#bpy-types-spaceclipeditor-show-red-channel"), ("bpy.types.spaceclipeditor.use_mute_footage*", "editors/clip/display/clip_display.html#bpy-types-spaceclipeditor-use-mute-footage"), @@ -803,7 +824,7 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.use_chaining*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-chaining"), ("bpy.types.freestylesettings.sphere_radius*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings-sphere-radius"), ("bpy.types.geometrynodeattributedomainsize*", "modeling/geometry_nodes/attribute/domain_size.html#bpy-types-geometrynodeattributedomainsize"), - ("bpy.types.geometrynodesetsplineresolution*", "modeling/geometry_nodes/curve/set_spline_resolution.html#bpy-types-geometrynodesetsplineresolution"), + ("bpy.types.geometrynodesetsplineresolution*", "modeling/geometry_nodes/curve/write/set_spline_resolution.html#bpy-types-geometrynodesetsplineresolution"), ("bpy.types.geometrynodestorenamedattribute*", "modeling/geometry_nodes/attribute/store_named_attribute.html#bpy-types-geometrynodestorenamedattribute"), ("bpy.types.gpencillayer.annotation_opacity*", "interface/annotate_tool.html#bpy-types-gpencillayer-annotation-opacity"), ("bpy.types.gpencillayer.use_onion_skinning*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-use-onion-skinning"), @@ -825,8 +846,10 @@ url_manual_mapping = ( ("bpy.types.rendersettings.simplify_volumes*", "render/cycles/render_settings/simplify.html#bpy-types-rendersettings-simplify-volumes"), ("bpy.types.rendersettings.use_render_cache*", "render/output/properties/output.html#bpy-types-rendersettings-use-render-cache"), ("bpy.types.rendersettings.use_single_layer*", "render/layers/view_layer.html#bpy-types-rendersettings-use-single-layer"), + ("bpy.types.rigidbodyobject.collision_shape*", "physics/rigid_body/properties/collisions.html#bpy-types-rigidbodyobject-collision-shape"), ("bpy.types.sceneeevee.use_taa_reprojection*", "render/eevee/render_settings/sampling.html#bpy-types-sceneeevee-use-taa-reprojection"), ("bpy.types.sculpt.use_automasking_topology*", "sculpt_paint/sculpting/controls.html#bpy-types-sculpt-use-automasking-topology"), + ("bpy.types.softbodysettings.collision_type*", "physics/soft_body/settings/self_collision.html#bpy-types-softbodysettings-collision-type"), ("bpy.types.spaceclipeditor.cursor_location*", "editors/clip/sidebar.html#bpy-types-spaceclipeditor-cursor-location"), ("bpy.types.spacefilebrowser.recent_folders*", "editors/file_browser.html#bpy-types-spacefilebrowser-recent-folders"), ("bpy.types.spacefilebrowser.system_folders*", "editors/file_browser.html#bpy-types-spacefilebrowser-system-folders"), @@ -885,17 +908,17 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.chain_count*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-chain-count"), ("bpy.types.freestylelinestyle.use_sorting*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-use-sorting"), ("bpy.types.freestylesettings.crease_angle*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings-crease-angle"), - ("bpy.types.functionnodealigneulertovector*", "modeling/geometry_nodes/utilities/align_euler_to_vector.html#bpy-types-functionnodealigneulertovector"), + ("bpy.types.functionnodealigneulertovector*", "modeling/geometry_nodes/utilities/rotation/align_euler_to_vector.html#bpy-types-functionnodealigneulertovector"), ("bpy.types.geometrynodeattributestatistic*", "modeling/geometry_nodes/attribute/attribute_statistic.html#bpy-types-geometrynodeattributestatistic"), - ("bpy.types.geometrynodecurveprimitiveline*", "modeling/geometry_nodes/curve_primitives/curve_line.html#bpy-types-geometrynodecurveprimitiveline"), + ("bpy.types.geometrynodecurveprimitiveline*", "modeling/geometry_nodes/curve/primitives/curve_line.html#bpy-types-geometrynodecurveprimitiveline"), ("bpy.types.geometrynodegeometrytoinstance*", "modeling/geometry_nodes/geometry/geometry_to_instance.html#bpy-types-geometrynodegeometrytoinstance"), ("bpy.types.geometrynodeinputinstancescale*", "modeling/geometry_nodes/instances/instance_scale.html#bpy-types-geometrynodeinputinstancescale"), ("bpy.types.geometrynodeinputmaterialindex*", "modeling/geometry_nodes/material/material_index.html#bpy-types-geometrynodeinputmaterialindex"), - ("bpy.types.geometrynodeinputmeshedgeangle*", "modeling/geometry_nodes/mesh/edge_angle.html#bpy-types-geometrynodeinputmeshedgeangle"), - ("bpy.types.geometrynodeoffsetcornerinface*", "modeling/geometry_nodes/mesh_topology/offset_corner_in_face.html#bpy-types-geometrynodeoffsetcornerinface"), - ("bpy.types.geometrynodeoffsetpointincurve*", "modeling/geometry_nodes/curve_topology/offset_point_in_curve.html#bpy-types-geometrynodeoffsetpointincurve"), - ("bpy.types.geometrynodeseparatecomponents*", "modeling/geometry_nodes/geometry/separate_components.html#bpy-types-geometrynodeseparatecomponents"), - ("bpy.types.geometrynodesubdivisionsurface*", "modeling/geometry_nodes/mesh/subdivision_surface.html#bpy-types-geometrynodesubdivisionsurface"), + ("bpy.types.geometrynodeinputmeshedgeangle*", "modeling/geometry_nodes/mesh/read/edge_angle.html#bpy-types-geometrynodeinputmeshedgeangle"), + ("bpy.types.geometrynodeoffsetcornerinface*", "modeling/geometry_nodes/mesh/topology/offset_corner_in_face.html#bpy-types-geometrynodeoffsetcornerinface"), + ("bpy.types.geometrynodeoffsetpointincurve*", "modeling/geometry_nodes/curve/topology/offset_point_in_curve.html#bpy-types-geometrynodeoffsetpointincurve"), + ("bpy.types.geometrynodeseparatecomponents*", "modeling/geometry_nodes/geometry/operations/separate_components.html#bpy-types-geometrynodeseparatecomponents"), + ("bpy.types.geometrynodesubdivisionsurface*", "modeling/geometry_nodes/mesh/operations/subdivision_surface.html#bpy-types-geometrynodesubdivisionsurface"), ("bpy.types.geometrynodetranslateinstances*", "modeling/geometry_nodes/instances/translate_instances.html#bpy-types-geometrynodetranslateinstances"), ("bpy.types.greasepencil.ghost_after_range*", "grease_pencil/properties/onion_skinning.html#bpy-types-greasepencil-ghost-after-range"), ("bpy.types.greasepencil.use_ghosts_always*", "grease_pencil/properties/onion_skinning.html#bpy-types-greasepencil-use-ghosts-always"), @@ -920,7 +943,11 @@ url_manual_mapping = ( ("bpy.types.rendersettings.use_compositing*", "render/output/properties/post_processing.html#bpy-types-rendersettings-use-compositing"), ("bpy.types.rendersettings.use_motion_blur*", "render/cycles/render_settings/motion_blur.html#bpy-types-rendersettings-use-motion-blur"), ("bpy.types.rendersettings.use_placeholder*", "render/output/properties/output.html#bpy-types-rendersettings-use-placeholder"), + ("bpy.types.sequencemodifier.input_mask_id*", "editors/video_sequencer/sequencer/sidebar/modifiers.html#bpy-types-sequencemodifier-input-mask-id"), ("bpy.types.shadernodesubsurfacescattering*", "render/shader_nodes/shader/sss.html#bpy-types-shadernodesubsurfacescattering"), + ("bpy.types.softbodysettings.goal_friction*", "physics/soft_body/settings/goal.html#bpy-types-softbodysettings-goal-friction"), + ("bpy.types.softbodysettings.spring_length*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-spring-length"), + ("bpy.types.softbodysettings.use_auto_step*", "physics/soft_body/settings/solver.html#bpy-types-softbodysettings-use-auto-step"), ("bpy.types.spaceclipeditor.lock_selection*", "editors/clip/introduction.html#bpy-types-spaceclipeditor-lock-selection"), ("bpy.types.spacedopesheeteditor.auto_snap*", "editors/dope_sheet/editing.html#bpy-types-spacedopesheeteditor-auto-snap"), ("bpy.types.spacenodeoverlay.show_overlays*", "interface/controls/nodes/introduction.html#bpy-types-spacenodeoverlay-show-overlays"), @@ -989,9 +1016,9 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.sort_order*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-sort-order"), ("bpy.types.freestylelinestyle.split_dash*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-split-dash"), ("bpy.types.freestylesettings.use_culling*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings-use-culling"), - ("bpy.types.geometrynodeduplicateelements*", "modeling/geometry_nodes/geometry/duplicate_elements.html#bpy-types-geometrynodeduplicateelements"), - ("bpy.types.geometrynodeinputmeshfacearea*", "modeling/geometry_nodes/mesh/face_area.html#bpy-types-geometrynodeinputmeshfacearea"), - ("bpy.types.geometrynodeinputsplinecyclic*", "modeling/geometry_nodes/curve/is_spline_cyclic.html#bpy-types-geometrynodeinputsplinecyclic"), + ("bpy.types.geometrynodeduplicateelements*", "modeling/geometry_nodes/geometry/operations/duplicate_elements.html#bpy-types-geometrynodeduplicateelements"), + ("bpy.types.geometrynodeinputmeshfacearea*", "modeling/geometry_nodes/mesh/read/face_area.html#bpy-types-geometrynodeinputmeshfacearea"), + ("bpy.types.geometrynodeinputsplinecyclic*", "modeling/geometry_nodes/curve/read/is_spline_cyclic.html#bpy-types-geometrynodeinputsplinecyclic"), ("bpy.types.geometrynodeinstancestopoints*", "modeling/geometry_nodes/instances/instances_to_points.html#bpy-types-geometrynodeinstancestopoints"), ("bpy.types.gpencillayer.viewlayer_render*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-viewlayer-render"), ("bpy.types.imagepaint.use_normal_falloff*", "sculpt_paint/brush/falloff.html#bpy-types-imagepaint-use-normal-falloff"), @@ -1011,6 +1038,8 @@ url_manual_mapping = ( ("bpy.types.sceneeevee.taa_render_samples*", "render/eevee/render_settings/sampling.html#bpy-types-sceneeevee-taa-render-samples"), ("bpy.types.sculpt.use_automasking_cavity*", "sculpt_paint/sculpting/controls.html#bpy-types-sculpt-use-automasking-cavity"), ("bpy.types.sequence.frame_final_duration*", "editors/video_sequencer/sequencer/sidebar/strip.html#bpy-types-sequence-frame-final-duration"), + ("bpy.types.softbodysettings.goal_default*", "physics/soft_body/settings/goal.html#bpy-types-softbodysettings-goal-default"), + ("bpy.types.softbodysettings.use_diagnose*", "physics/soft_body/settings/solver.html#bpy-types-softbodysettings-use-diagnose"), ("bpy.types.spaceoutliner.use_sync_select*", "editors/outliner/interface.html#bpy-types-spaceoutliner-use-sync-select"), ("bpy.types.spaceproperties.outliner_sync*", "editors/properties_editor.html#bpy-types-spaceproperties-outliner-sync"), ("bpy.types.spaceproperties.search_filter*", "editors/properties_editor.html#bpy-types-spaceproperties-search-filter"), @@ -1069,11 +1098,11 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.split_gap*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-split-gap"), ("bpy.types.freestylelinestyle.use_nodes*", "render/freestyle/view_layer/line_style/texture.html#bpy-types-freestylelinestyle-use-nodes"), ("bpy.types.geometrynodecaptureattribute*", "modeling/geometry_nodes/attribute/capture_attribute.html#bpy-types-geometrynodecaptureattribute"), - ("bpy.types.geometrynodeinputshadesmooth*", "modeling/geometry_nodes/mesh/is_shade_smooth.html#bpy-types-geometrynodeinputshadesmooth"), + ("bpy.types.geometrynodeinputshadesmooth*", "modeling/geometry_nodes/mesh/read/is_shade_smooth.html#bpy-types-geometrynodeinputshadesmooth"), ("bpy.types.geometrynodeinstanceonpoints*", "modeling/geometry_nodes/instances/instance_on_points.html#bpy-types-geometrynodeinstanceonpoints"), ("bpy.types.geometrynodepointstovertices*", "modeling/geometry_nodes/point/points_to_vertices.html#bpy-types-geometrynodepointstovertices"), ("bpy.types.geometrynoderealizeinstances*", "modeling/geometry_nodes/instances/realize_instances.html#bpy-types-geometrynoderealizeinstances"), - ("bpy.types.geometrynodeseparategeometry*", "modeling/geometry_nodes/geometry/separate_geometry.html#bpy-types-geometrynodeseparategeometry"), + ("bpy.types.geometrynodeseparategeometry*", "modeling/geometry_nodes/geometry/operations/separate_geometry.html#bpy-types-geometrynodeseparategeometry"), ("bpy.types.geometrynodesetmaterialindex*", "modeling/geometry_nodes/material/set_material_index.html#bpy-types-geometrynodesetmaterialindex"), ("bpy.types.greasepencil.edit_line_color*", "grease_pencil/properties/display.html#bpy-types-greasepencil-edit-line-color"), ("bpy.types.material.preview_render_type*", "render/materials/preview.html#bpy-types-material-preview-render-type"), @@ -1094,6 +1123,7 @@ url_manual_mapping = ( ("bpy.types.sequencetimelinechannel.name*", "editors/video_sequencer/sequencer/channels.html#bpy-types-sequencetimelinechannel-name"), ("bpy.types.shadernodebsdfhairprincipled*", "render/shader_nodes/shader/hair_principled.html#bpy-types-shadernodebsdfhairprincipled"), ("bpy.types.shadernodevectordisplacement*", "render/shader_nodes/vector/vector_displacement.html#bpy-types-shadernodevectordisplacement"), + ("bpy.types.softbodysettings.goal_spring*", "physics/soft_body/settings/goal.html#bpy-types-softbodysettings-goal-spring"), ("bpy.types.spaceclipeditor.blend_factor*", "editors/clip/display/mask_display.html#bpy-types-spaceclipeditor-blend-factor"), ("bpy.types.spacegrapheditor.show_cursor*", "editors/graph_editor/introduction.html#bpy-types-spacegrapheditor-show-cursor"), ("bpy.types.spaceimageeditor.show_repeat*", "editors/image/sidebar.html#bpy-types-spaceimageeditor-show-repeat"), @@ -1141,6 +1171,7 @@ url_manual_mapping = ( ("bpy.types.brush.boundary_falloff_type*", "sculpt_paint/sculpting/tools/boundary.html#bpy-types-brush-boundary-falloff-type"), ("bpy.types.brush.cursor_color_subtract*", "sculpt_paint/brush/cursor.html#bpy-types-brush-cursor-color-subtract"), ("bpy.types.brush.texture_overlay_alpha*", "sculpt_paint/brush/cursor.html#bpy-types-brush-texture-overlay-alpha"), + ("bpy.types.brush.use_frontface_falloff*", "sculpt_paint/brush/falloff.html#bpy-types-brush-use-frontface-falloff"), ("bpy.types.brush.use_space_attenuation*", "sculpt_paint/brush/stroke.html#bpy-types-brush-use-space-attenuation"), ("bpy.types.brushgpencilsettings.aspect*", "grease_pencil/modes/draw/tools/draw.html#bpy-types-brushgpencilsettings-aspect"), ("bpy.types.brushgpencilsettings.dilate*", "grease_pencil/modes/draw/tools/fill.html#bpy-types-brushgpencilsettings-dilate"), @@ -1172,17 +1203,17 @@ url_manual_mapping = ( ("bpy.types.freestylelineset.visibility*", "render/freestyle/view_layer/line_set.html#bpy-types-freestylelineset-visibility"), ("bpy.types.freestylelinestyle.chaining*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-chaining"), ("bpy.types.freestylelinestyle.sort_key*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-sort-key"), - ("bpy.types.geometrynodeaccumulatefield*", "modeling/geometry_nodes/utilities/accumulate_field.html#bpy-types-geometrynodeaccumulatefield"), - ("bpy.types.geometrynodecornersofvertex*", "modeling/geometry_nodes/mesh_topology/corners_of_vertex.html#bpy-types-geometrynodecornersofvertex"), - ("bpy.types.geometrynodecurvesethandles*", "modeling/geometry_nodes/curve/set_handle_type.html#bpy-types-geometrynodecurvesethandles"), - ("bpy.types.geometrynodecurvesplinetype*", "modeling/geometry_nodes/curve/set_spline_type.html#bpy-types-geometrynodecurvesplinetype"), - ("bpy.types.geometrynodeinputmeshisland*", "modeling/geometry_nodes/mesh/mesh_island.html#bpy-types-geometrynodeinputmeshisland"), - ("bpy.types.geometrynodemergebydistance*", "modeling/geometry_nodes/geometry/merge_by_distance.html#bpy-types-geometrynodemergebydistance"), + ("bpy.types.geometrynodeaccumulatefield*", "modeling/geometry_nodes/utilities/field/accumulate_field.html#bpy-types-geometrynodeaccumulatefield"), + ("bpy.types.geometrynodecornersofvertex*", "modeling/geometry_nodes/mesh/topology/corners_of_vertex.html#bpy-types-geometrynodecornersofvertex"), + ("bpy.types.geometrynodecurvesethandles*", "modeling/geometry_nodes/curve/write/set_handle_type.html#bpy-types-geometrynodecurvesethandles"), + ("bpy.types.geometrynodecurvesplinetype*", "modeling/geometry_nodes/curve/write/set_spline_type.html#bpy-types-geometrynodecurvesplinetype"), + ("bpy.types.geometrynodeinputmeshisland*", "modeling/geometry_nodes/mesh/read/mesh_island.html#bpy-types-geometrynodeinputmeshisland"), + ("bpy.types.geometrynodemergebydistance*", "modeling/geometry_nodes/geometry/operations/merge_by_distance.html#bpy-types-geometrynodemergebydistance"), ("bpy.types.geometrynodereplacematerial*", "modeling/geometry_nodes/material/replace_material.html#bpy-types-geometrynodereplacematerial"), ("bpy.types.geometrynoderotateinstances*", "modeling/geometry_nodes/instances/rotate_instances.html#bpy-types-geometrynoderotateinstances"), - ("bpy.types.geometrynodesampleuvsurface*", "modeling/geometry_nodes/mesh/sample_uv_surface.html#bpy-types-geometrynodesampleuvsurface"), - ("bpy.types.geometrynodesetsplinecyclic*", "modeling/geometry_nodes/curve/set_spline_cyclic.html#bpy-types-geometrynodesetsplinecyclic"), - ("bpy.types.geometrynodesplineparameter*", "modeling/geometry_nodes/curve/spline_parameter.html#bpy-types-geometrynodesplineparameter"), + ("bpy.types.geometrynodesampleuvsurface*", "modeling/geometry_nodes/mesh/operations/sample_uv_surface.html#bpy-types-geometrynodesampleuvsurface"), + ("bpy.types.geometrynodesetsplinecyclic*", "modeling/geometry_nodes/curve/write/set_spline_cyclic.html#bpy-types-geometrynodesetsplinecyclic"), + ("bpy.types.geometrynodesplineparameter*", "modeling/geometry_nodes/curve/read/spline_parameter.html#bpy-types-geometrynodesplineparameter"), ("bpy.types.gpencillayer.use_mask_layer*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-use-mask-layer"), ("bpy.types.greasepencil.use_curve_edit*", "grease_pencil/modes/edit/curve_editing.html#bpy-types-greasepencil-use-curve-edit"), ("bpy.types.greasepencil.use_onion_fade*", "grease_pencil/properties/onion_skinning.html#bpy-types-greasepencil-use-onion-fade"), @@ -1202,12 +1233,15 @@ url_manual_mapping = ( ("bpy.types.rigidbodyconstraint.enabled*", "physics/rigid_body/constraints/introduction.html#bpy-types-rigidbodyconstraint-enabled"), ("bpy.types.rigidbodyconstraint.object1*", "physics/rigid_body/constraints/introduction.html#bpy-types-rigidbodyconstraint-object1"), ("bpy.types.rigidbodyconstraint.object2*", "physics/rigid_body/constraints/introduction.html#bpy-types-rigidbodyconstraint-object2"), + ("bpy.types.rigidbodyobject.mesh_source*", "physics/rigid_body/properties/collisions.html#bpy-types-rigidbodyobject-mesh-source"), + ("bpy.types.rigidbodyobject.restitution*", "physics/rigid_body/properties/collisions.html#bpy-types-rigidbodyobject-restitution"), ("bpy.types.sceneeevee.volumetric_light*", "render/eevee/render_settings/volumetrics.html#bpy-types-sceneeevee-volumetric-light"), ("bpy.types.sculpt.detail_refine_method*", "sculpt_paint/sculpting/tool_settings/dyntopo.html#bpy-types-sculpt-detail-refine-method"), ("bpy.types.sculpt.symmetrize_direction*", "sculpt_paint/sculpting/tool_settings/symmetry.html#bpy-types-sculpt-symmetrize-direction"), ("bpy.types.sequence.frame_offset_start*", "editors/video_sequencer/sequencer/sidebar/strip.html#bpy-types-sequence-frame-offset-start"), ("bpy.types.sequenceeditor.show_overlay*", "editors/video_sequencer/preview/sidebar.html#bpy-types-sequenceeditor-show-overlay"), ("bpy.types.sequenceeditor.use_prefetch*", "editors/video_sequencer/preview/sidebar.html#bpy-types-sequenceeditor-use-prefetch"), + ("bpy.types.softbodysettings.ball_stiff*", "physics/soft_body/settings/self_collision.html#bpy-types-softbodysettings-ball-stiff"), ("bpy.types.soundsequence.show_waveform*", "editors/video_sequencer/sequencer/sidebar/strip.html#bpy-types-soundsequence-show-waveform"), ("bpy.types.spaceclipeditor.show_stable*", "editors/clip/display/clip_display.html#bpy-types-spaceclipeditor-show-stable"), ("bpy.types.spaceimageeditor.show_gizmo*", "editors/image/introduction.html#bpy-types-spaceimageeditor-show-gizmo"), @@ -1267,20 +1301,20 @@ url_manual_mapping = ( ("bpy.types.fileselectparams.directory*", "editors/file_browser.html#bpy-types-fileselectparams-directory"), ("bpy.types.fluidflowsettings.use_flow*", "physics/fluid/type/flow.html#bpy-types-fluidflowsettings-use-flow"), ("bpy.types.fmodifierfunctiongenerator*", "editors/graph_editor/fcurves/modifiers.html#bpy-types-fmodifierfunctiongenerator"), - ("bpy.types.geometrynodecollectioninfo*", "modeling/geometry_nodes/input/collection_info.html#bpy-types-geometrynodecollectioninfo"), - ("bpy.types.geometrynodedeletegeometry*", "modeling/geometry_nodes/geometry/delete_geometry.html#bpy-types-geometrynodedeletegeometry"), - ("bpy.types.geometrynodeinputcurvetilt*", "modeling/geometry_nodes/curve/curve_tilt.html#bpy-types-geometrynodeinputcurvetilt"), - ("bpy.types.geometrynodeinputscenetime*", "modeling/geometry_nodes/input/scene_time.html#bpy-types-geometrynodeinputscenetime"), - ("bpy.types.geometrynodenamedattribute*", "modeling/geometry_nodes/input/named_attribute.html#bpy-types-geometrynodenamedattribute"), + ("bpy.types.geometrynodecollectioninfo*", "modeling/geometry_nodes/input/scene/collection_info.html#bpy-types-geometrynodecollectioninfo"), + ("bpy.types.geometrynodedeletegeometry*", "modeling/geometry_nodes/geometry/operations/delete_geometry.html#bpy-types-geometrynodedeletegeometry"), + ("bpy.types.geometrynodeinputcurvetilt*", "modeling/geometry_nodes/curve/read/curve_tilt.html#bpy-types-geometrynodeinputcurvetilt"), + ("bpy.types.geometrynodeinputscenetime*", "modeling/geometry_nodes/input/scene/scene_time.html#bpy-types-geometrynodeinputscenetime"), + ("bpy.types.geometrynodenamedattribute*", "modeling/geometry_nodes/geometry/read/named_attribute.html#bpy-types-geometrynodenamedattribute"), ("bpy.types.geometrynodepointstovolume*", "modeling/geometry_nodes/point/points_to_volume.html#bpy-types-geometrynodepointstovolume"), ("bpy.types.geometrynodescaleinstances*", "modeling/geometry_nodes/instances/scale_instances.html#bpy-types-geometrynodescaleinstances"), - ("bpy.types.geometrynodesetcurvenormal*", "modeling/geometry_nodes/curve/set_curve_normal.html#bpy-types-geometrynodesetcurvenormal"), - ("bpy.types.geometrynodesetcurveradius*", "modeling/geometry_nodes/curve/set_curve_radius.html#bpy-types-geometrynodesetcurveradius"), + ("bpy.types.geometrynodesetcurvenormal*", "modeling/geometry_nodes/curve/write/set_curve_normal.html#bpy-types-geometrynodesetcurvenormal"), + ("bpy.types.geometrynodesetcurveradius*", "modeling/geometry_nodes/curve/write/set_curve_radius.html#bpy-types-geometrynodesetcurveradius"), ("bpy.types.geometrynodesetpointradius*", "modeling/geometry_nodes/point/set_point_radius.html#bpy-types-geometrynodesetpointradius"), - ("bpy.types.geometrynodesetshadesmooth*", "modeling/geometry_nodes/mesh/set_shade_smooth.html#bpy-types-geometrynodesetshadesmooth"), - ("bpy.types.geometrynodestringtocurves*", "modeling/geometry_nodes/text/string_to_curves.html#bpy-types-geometrynodestringtocurves"), - ("bpy.types.geometrynodesubdividecurve*", "modeling/geometry_nodes/curve/subdivide_curve.html#bpy-types-geometrynodesubdividecurve"), - ("bpy.types.geometrynodevertexofcorner*", "modeling/geometry_nodes/mesh_topology/vertex_of_corner.html#bpy-types-geometrynodevertexofcorner"), + ("bpy.types.geometrynodesetshadesmooth*", "modeling/geometry_nodes/mesh/write/set_shade_smooth.html#bpy-types-geometrynodesetshadesmooth"), + ("bpy.types.geometrynodestringtocurves*", "modeling/geometry_nodes/utilities/text/string_to_curves.html#bpy-types-geometrynodestringtocurves"), + ("bpy.types.geometrynodesubdividecurve*", "modeling/geometry_nodes/curve/operations/subdivide_curve.html#bpy-types-geometrynodesubdividecurve"), + ("bpy.types.geometrynodevertexofcorner*", "modeling/geometry_nodes/mesh/topology/vertex_of_corner.html#bpy-types-geometrynodevertexofcorner"), ("bpy.types.gpencillayer.channel_color*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-channel-color"), ("bpy.types.gpencillayer.use_solo_mode*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-use-solo-mode"), ("bpy.types.greasepencil.use_multiedit*", "grease_pencil/multiframe.html#bpy-types-greasepencil-use-multiedit"), @@ -1292,12 +1326,17 @@ url_manual_mapping = ( ("bpy.types.object.show_only_shape_key*", "animation/shape_keys/shape_keys_panel.html#bpy-types-object-show-only-shape-key"), ("bpy.types.regionview3d.lock_rotation*", "editors/3dview/navigate/views.html#bpy-types-regionview3d-lock-rotation"), ("bpy.types.rendersettings.hair_subdiv*", "render/eevee/render_settings/hair.html#bpy-types-rendersettings-hair-subdiv"), + ("bpy.types.rigidbodyobject.use_deform*", "physics/rigid_body/properties/collisions.html#bpy-types-rigidbodyobject-use-deform"), ("bpy.types.scene.audio_distance_model*", "scene_layout/scene/properties.html#bpy-types-scene-audio-distance-model"), ("bpy.types.scene.audio_doppler_factor*", "scene_layout/scene/properties.html#bpy-types-scene-audio-doppler-factor"), + ("bpy.types.sequencemodifier.mask_time*", "editors/video_sequencer/sequencer/sidebar/modifiers.html#bpy-types-sequencemodifier-mask-time"), ("bpy.types.sequencetransform.rotation*", "editors/video_sequencer/sequencer/sidebar/strip.html#bpy-types-sequencetransform-rotation"), ("bpy.types.shadernodeambientocclusion*", "render/shader_nodes/input/ao.html#bpy-types-shadernodeambientocclusion"), ("bpy.types.shadernodevolumeabsorption*", "render/shader_nodes/shader/volume_absorption.html#bpy-types-shadernodevolumeabsorption"), ("bpy.types.shadernodevolumeprincipled*", "render/shader_nodes/shader/volume_principled.html#bpy-types-shadernodevolumeprincipled"), + ("bpy.types.softbodysettings.ball_damp*", "physics/soft_body/settings/self_collision.html#bpy-types-softbodysettings-ball-damp"), + ("bpy.types.softbodysettings.ball_size*", "physics/soft_body/settings/self_collision.html#bpy-types-softbodysettings-ball-size"), + ("bpy.types.softbodysettings.use_edges*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-use-edges"), ("bpy.types.spacefilebrowser.bookmarks*", "editors/file_browser.html#bpy-types-spacefilebrowser-bookmarks"), ("bpy.types.spaceoutliner.display_mode*", "editors/outliner/interface.html#bpy-types-spaceoutliner-display-mode"), ("bpy.types.spaceoutliner.filter_state*", "editors/outliner/interface.html#bpy-types-spaceoutliner-filter-state"), @@ -1375,22 +1414,22 @@ url_manual_mapping = ( ("bpy.types.fluidflowsettings.density*", "physics/fluid/type/flow.html#bpy-types-fluidflowsettings-density"), ("bpy.types.freestylelineset.qi_start*", "render/freestyle/view_layer/line_set.html#bpy-types-freestylelineset-qi-start"), ("bpy.types.freestylelinestyle.rounds*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-rounds"), - ("bpy.types.functionnodereplacestring*", "modeling/geometry_nodes/text/replace_string.html#bpy-types-functionnodereplacestring"), - ("bpy.types.functionnodeseparatecolor*", "modeling/geometry_nodes/color/separate_color.html#bpy-types-functionnodeseparatecolor"), - ("bpy.types.functionnodevaluetostring*", "modeling/geometry_nodes/text/value_to_string.html#bpy-types-functionnodevaluetostring"), - ("bpy.types.geometrynodecurvetopoints*", "modeling/geometry_nodes/curve/curve_to_points.html#bpy-types-geometrynodecurvetopoints"), - ("bpy.types.geometrynodeedgesofcorner*", "modeling/geometry_nodes/mesh_topology/edges_of_corner.html#bpy-types-geometrynodeedgesofcorner"), - ("bpy.types.geometrynodeedgesofvertex*", "modeling/geometry_nodes/mesh_topology/edges_of_vertex.html#bpy-types-geometrynodeedgesofvertex"), - ("bpy.types.geometrynodefieldondomain*", "modeling/geometry_nodes/utilities/interpolate_domain.html#bpy-types-geometrynodefieldondomain"), - ("bpy.types.geometrynodeinputmaterial*", "modeling/geometry_nodes/input/material.html#bpy-types-geometrynodeinputmaterial"), - ("bpy.types.geometrynodeinputposition*", "modeling/geometry_nodes/input/position.html#bpy-types-geometrynodeinputposition"), - ("bpy.types.geometrynodemeshicosphere*", "modeling/geometry_nodes/mesh_primitives/icosphere.html#bpy-types-geometrynodemeshicosphere"), - ("bpy.types.geometrynodepointsofcurve*", "modeling/geometry_nodes/curve_topology/points_of_curve.html#bpy-types-geometrynodepointsofcurve"), - ("bpy.types.geometrynoderesamplecurve*", "modeling/geometry_nodes/curve/resample_curve.html#bpy-types-geometrynoderesamplecurve"), - ("bpy.types.geometrynodesamplenearest*", "modeling/geometry_nodes/geometry/sample_nearest.html#bpy-types-geometrynodesamplenearest"), - ("bpy.types.geometrynodescaleelements*", "modeling/geometry_nodes/mesh/scale_elements.html#bpy-types-geometrynodescaleelements"), - ("bpy.types.geometrynodesubdividemesh*", "modeling/geometry_nodes/mesh/subdivide_mesh.html#bpy-types-geometrynodesubdividemesh"), - ("bpy.types.geometrynodeuvpackislands*", "modeling/geometry_nodes/uv/pack_uv_islands.html#bpy-types-geometrynodeuvpackislands"), + ("bpy.types.functionnodereplacestring*", "modeling/geometry_nodes/utilities/text/replace_string.html#bpy-types-functionnodereplacestring"), + ("bpy.types.functionnodeseparatecolor*", "modeling/geometry_nodes/utilities/color/separate_color.html#bpy-types-functionnodeseparatecolor"), + ("bpy.types.functionnodevaluetostring*", "modeling/geometry_nodes/utilities/text/value_to_string.html#bpy-types-functionnodevaluetostring"), + ("bpy.types.geometrynodecurvetopoints*", "modeling/geometry_nodes/curve/operations/curve_to_points.html#bpy-types-geometrynodecurvetopoints"), + ("bpy.types.geometrynodeedgesofcorner*", "modeling/geometry_nodes/mesh/topology/edges_of_corner.html#bpy-types-geometrynodeedgesofcorner"), + ("bpy.types.geometrynodeedgesofvertex*", "modeling/geometry_nodes/mesh/topology/edges_of_vertex.html#bpy-types-geometrynodeedgesofvertex"), + ("bpy.types.geometrynodefieldondomain*", "modeling/geometry_nodes/utilities/field/evaluate_on_domain.html#bpy-types-geometrynodefieldondomain"), + ("bpy.types.geometrynodeinputmaterial*", "modeling/geometry_nodes/input/constant/material.html#bpy-types-geometrynodeinputmaterial"), + ("bpy.types.geometrynodeinputposition*", "modeling/geometry_nodes/geometry/read/position.html#bpy-types-geometrynodeinputposition"), + ("bpy.types.geometrynodemeshicosphere*", "modeling/geometry_nodes/mesh/primitives/icosphere.html#bpy-types-geometrynodemeshicosphere"), + ("bpy.types.geometrynodepointsofcurve*", "modeling/geometry_nodes/curve/topology/points_of_curve.html#bpy-types-geometrynodepointsofcurve"), + ("bpy.types.geometrynoderesamplecurve*", "modeling/geometry_nodes/curve/operations/resample_curve.html#bpy-types-geometrynoderesamplecurve"), + ("bpy.types.geometrynodesamplenearest*", "modeling/geometry_nodes/geometry/sample/sample_nearest.html#bpy-types-geometrynodesamplenearest"), + ("bpy.types.geometrynodescaleelements*", "modeling/geometry_nodes/mesh/operations/scale_elements.html#bpy-types-geometrynodescaleelements"), + ("bpy.types.geometrynodesubdividemesh*", "modeling/geometry_nodes/mesh/operations/subdivide_mesh.html#bpy-types-geometrynodesubdividemesh"), + ("bpy.types.geometrynodeuvpackislands*", "modeling/geometry_nodes/mesh/uv/pack_uv_islands.html#bpy-types-geometrynodeuvpackislands"), ("bpy.types.greasepencil.before_color*", "grease_pencil/properties/onion_skinning.html#bpy-types-greasepencil-before-color"), ("bpy.types.greasepencil.onion_factor*", "grease_pencil/properties/onion_skinning.html#bpy-types-greasepencil-onion-factor"), ("bpy.types.greasepencil.pixel_factor*", "grease_pencil/properties/strokes.html#bpy-types-greasepencil-pixel-factor"), @@ -1421,6 +1460,12 @@ url_manual_mapping = ( ("bpy.types.shadernodetexpointdensity*", "render/shader_nodes/textures/point_density.html#bpy-types-shadernodetexpointdensity"), ("bpy.types.shadernodevectortransform*", "render/shader_nodes/vector/transform.html#bpy-types-shadernodevectortransform"), ("bpy.types.shrinkwrapgpencilmodifier*", "grease_pencil/modifiers/deform/shrinkwrap.html#bpy-types-shrinkwrapgpencilmodifier"), + ("bpy.types.softbodysettings.friction*", "physics/soft_body/settings/object.html#bpy-types-softbodysettings-friction"), + ("bpy.types.softbodysettings.goal_max*", "physics/soft_body/settings/goal.html#bpy-types-softbodysettings-goal-max"), + ("bpy.types.softbodysettings.goal_min*", "physics/soft_body/settings/goal.html#bpy-types-softbodysettings-goal-min"), + ("bpy.types.softbodysettings.step_max*", "physics/soft_body/settings/solver.html#bpy-types-softbodysettings-step-max"), + ("bpy.types.softbodysettings.step_min*", "physics/soft_body/settings/solver.html#bpy-types-softbodysettings-step-min"), + ("bpy.types.softbodysettings.use_goal*", "physics/soft_body/settings/goal.html#bpy-types-softbodysettings-use-goal"), ("bpy.types.spaceclipeditor.show_grid*", "editors/clip/display/clip_display.html#bpy-types-spaceclipeditor-show-grid"), ("bpy.types.spaceoutliner.filter_text*", "editors/outliner/interface.html#bpy-types-spaceoutliner-filter-text"), ("bpy.types.spacetexteditor.find_text*", "editors/text_editor.html#bpy-types-spacetexteditor-find-text"), @@ -1496,21 +1541,21 @@ url_manual_mapping = ( ("bpy.types.freestylelineset.exclude*", "render/freestyle/view_layer/line_set.html#bpy-types-freestylelineset-exclude"), ("bpy.types.freestylelinestyle.alpha*", "render/freestyle/view_layer/line_style/alpha.html#bpy-types-freestylelinestyle-alpha"), ("bpy.types.freestylelinestyle.color*", "render/freestyle/view_layer/line_style/color.html#bpy-types-freestylelinestyle-color"), - ("bpy.types.functionnodecombinecolor*", "modeling/geometry_nodes/color/combine_color.html#bpy-types-functionnodecombinecolor"), - ("bpy.types.functionnodestringlength*", "modeling/geometry_nodes/text/string_length.html#bpy-types-functionnodestringlength"), - ("bpy.types.geometrynodecurveofpoint*", "modeling/geometry_nodes/curve_topology/curve_of_point.html#bpy-types-geometrynodecurveofpoint"), - ("bpy.types.geometrynodefaceofcorner*", "modeling/geometry_nodes/mesh_topology/face_of_corner.html#bpy-types-geometrynodefaceofcorner"), - ("bpy.types.geometrynodefieldatindex*", "modeling/geometry_nodes/utilities/field_at_index.html#bpy-types-geometrynodefieldatindex"), + ("bpy.types.functionnodecombinecolor*", "modeling/geometry_nodes/utilities/color/combine_color.html#bpy-types-functionnodecombinecolor"), + ("bpy.types.functionnodestringlength*", "modeling/geometry_nodes/utilities/text/string_length.html#bpy-types-functionnodestringlength"), + ("bpy.types.geometrynodecurveofpoint*", "modeling/geometry_nodes/curve/topology/curve_of_point.html#bpy-types-geometrynodecurveofpoint"), + ("bpy.types.geometrynodefaceofcorner*", "modeling/geometry_nodes/mesh/topology/face_of_corner.html#bpy-types-geometrynodefaceofcorner"), + ("bpy.types.geometrynodefieldatindex*", "modeling/geometry_nodes/utilities/field/evaluate_at_index.html#bpy-types-geometrynodefieldatindex"), ("bpy.types.geometrynodeimagetexture*", "modeling/geometry_nodes/texture/image.html#bpy-types-geometrynodeimagetexture"), - ("bpy.types.geometrynodeinputtangent*", "modeling/geometry_nodes/curve/curve_tangent.html#bpy-types-geometrynodeinputtangent"), + ("bpy.types.geometrynodeinputtangent*", "modeling/geometry_nodes/curve/read/curve_tangent.html#bpy-types-geometrynodeinputtangent"), ("bpy.types.geometrynodejoingeometry*", "modeling/geometry_nodes/geometry/join_geometry.html#bpy-types-geometrynodejoingeometry"), - ("bpy.types.geometrynodemeshcylinder*", "modeling/geometry_nodes/mesh_primitives/cylinder.html#bpy-types-geometrynodemeshcylinder"), - ("bpy.types.geometrynodemeshtopoints*", "modeling/geometry_nodes/mesh/mesh_to_points.html#bpy-types-geometrynodemeshtopoints"), - ("bpy.types.geometrynodemeshtovolume*", "modeling/geometry_nodes/mesh/mesh_to_volume.html#bpy-types-geometrynodemeshtovolume"), - ("bpy.types.geometrynodemeshuvsphere*", "modeling/geometry_nodes/mesh_primitives/uv_sphere.html#bpy-types-geometrynodemeshuvsphere"), - ("bpy.types.geometrynodereversecurve*", "modeling/geometry_nodes/curve/reverse_curve.html#bpy-types-geometrynodereversecurve"), - ("bpy.types.geometrynodesetcurvetilt*", "modeling/geometry_nodes/curve/set_curve_tilt.html#bpy-types-geometrynodesetcurvetilt"), - ("bpy.types.geometrynodesplinelength*", "modeling/geometry_nodes/curve/spline_length.html#bpy-types-geometrynodesplinelength"), + ("bpy.types.geometrynodemeshcylinder*", "modeling/geometry_nodes/mesh/primitives/cylinder.html#bpy-types-geometrynodemeshcylinder"), + ("bpy.types.geometrynodemeshtopoints*", "modeling/geometry_nodes/mesh/operations/mesh_to_points.html#bpy-types-geometrynodemeshtopoints"), + ("bpy.types.geometrynodemeshtovolume*", "modeling/geometry_nodes/mesh/operations/mesh_to_volume.html#bpy-types-geometrynodemeshtovolume"), + ("bpy.types.geometrynodemeshuvsphere*", "modeling/geometry_nodes/mesh/primitives/uv_sphere.html#bpy-types-geometrynodemeshuvsphere"), + ("bpy.types.geometrynodereversecurve*", "modeling/geometry_nodes/curve/operations/reverse_curve.html#bpy-types-geometrynodereversecurve"), + ("bpy.types.geometrynodesetcurvetilt*", "modeling/geometry_nodes/curve/write/set_curve_tilt.html#bpy-types-geometrynodesetcurvetilt"), + ("bpy.types.geometrynodesplinelength*", "modeling/geometry_nodes/curve/read/spline_length.html#bpy-types-geometrynodesplinelength"), ("bpy.types.geometrynodevolumetomesh*", "modeling/geometry_nodes/volume/volume_to_mesh.html#bpy-types-geometrynodevolumetomesh"), ("bpy.types.gpencillayer.line_change*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-line-change"), ("bpy.types.gpencillayer.parent_type*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-parent-type"), @@ -1530,6 +1575,7 @@ url_manual_mapping = ( ("bpy.types.particleinstancemodifier*", "modeling/modifiers/physics/particle_instance.html#bpy-types-particleinstancemodifier"), ("bpy.types.rendersettings.hair_type*", "render/eevee/render_settings/hair.html#bpy-types-rendersettings-hair-type"), ("bpy.types.rendersettings.tile_size*", "render/cycles/render_settings/performance.html#bpy-types-rendersettings-tile-size"), + ("bpy.types.rigidbodyobject.friction*", "physics/rigid_body/properties/collisions.html#bpy-types-rigidbodyobject-friction"), ("bpy.types.scenedisplay.viewport_aa*", "render/workbench/sampling.html#bpy-types-scenedisplay-viewport-aa"), ("bpy.types.sequencertimelineoverlay*", "editors/video_sequencer/sequencer/display.html#bpy-types-sequencertimelineoverlay"), ("bpy.types.sequencetransform.filter*", "editors/video_sequencer/sequencer/sidebar/strip.html#bpy-types-sequencetransform-filter"), @@ -1539,6 +1585,8 @@ url_manual_mapping = ( ("bpy.types.shadernodebsdfrefraction*", "render/shader_nodes/shader/refraction.html#bpy-types-shadernodebsdfrefraction"), ("bpy.types.shadernodeoutputmaterial*", "render/shader_nodes/output/material.html#bpy-types-shadernodeoutputmaterial"), ("bpy.types.shadernodetexenvironment*", "render/shader_nodes/textures/environment.html#bpy-types-shadernodetexenvironment"), + ("bpy.types.softbodysettings.damping*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-damping"), + ("bpy.types.softbodysettings.plastic*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-plastic"), ("bpy.types.spacesequenceeditor.show*", "editors/video_sequencer/preview/header.html#bpy-types-spacesequenceeditor-show"), ("bpy.types.spaceuveditor.show_faces*", "editors/uv/overlays.html#bpy-types-spaceuveditor-show-faces"), ("bpy.types.spaceuveditor.uv_opacity*", "editors/uv/overlays.html#bpy-types-spaceuveditor-uv-opacity"), @@ -1625,26 +1673,26 @@ url_manual_mapping = ( ("bpy.types.freestylelinestyle.caps*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-caps"), ("bpy.types.freestylelinestyle.dash*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-dash"), ("bpy.types.freestylemodulesettings*", "render/freestyle/python.html#bpy-types-freestylemodulesettings"), - ("bpy.types.functionnodebooleanmath*", "modeling/geometry_nodes/utilities/boolean_math.html#bpy-types-functionnodebooleanmath"), - ("bpy.types.functionnodeinputstring*", "modeling/geometry_nodes/input/string.html#bpy-types-functionnodeinputstring"), - ("bpy.types.functionnodeinputvector*", "modeling/geometry_nodes/input/vector.html#bpy-types-functionnodeinputvector"), + ("bpy.types.functionnodebooleanmath*", "modeling/geometry_nodes/utilities/math/boolean_math.html#bpy-types-functionnodebooleanmath"), + ("bpy.types.functionnodeinputstring*", "modeling/geometry_nodes/input/constant/string.html#bpy-types-functionnodeinputstring"), + ("bpy.types.functionnodeinputvector*", "modeling/geometry_nodes/input/constant/vector.html#bpy-types-functionnodeinputvector"), ("bpy.types.functionnoderandomvalue*", "modeling/geometry_nodes/utilities/random_value.html#bpy-types-functionnoderandomvalue"), - ("bpy.types.functionnoderotateeuler*", "modeling/geometry_nodes/utilities/rotate_euler.html#bpy-types-functionnoderotateeuler"), - ("bpy.types.functionnodeslicestring*", "modeling/geometry_nodes/text/slice_string.html#bpy-types-functionnodeslicestring"), - ("bpy.types.geometrynodecurvelength*", "modeling/geometry_nodes/curve/curve_length.html#bpy-types-geometrynodecurvelength"), - ("bpy.types.geometrynodecurvespiral*", "modeling/geometry_nodes/curve_primitives/curve_spiral.html#bpy-types-geometrynodecurvespiral"), - ("bpy.types.geometrynodecurvetomesh*", "modeling/geometry_nodes/curve/curve_to_mesh.html#bpy-types-geometrynodecurvetomesh"), - ("bpy.types.geometrynodeextrudemesh*", "modeling/geometry_nodes/mesh/extrude_mesh.html#bpy-types-geometrynodeextrudemesh"), - ("bpy.types.geometrynodefilletcurve*", "modeling/geometry_nodes/curve/fillet_curve.html#bpy-types-geometrynodefilletcurve"), - ("bpy.types.geometrynodeinputnormal*", "modeling/geometry_nodes/input/normal.html#bpy-types-geometrynodeinputnormal"), - ("bpy.types.geometrynodeinputradius*", "modeling/geometry_nodes/input/radius.html#bpy-types-geometrynodeinputradius"), - ("bpy.types.geometrynodemeshboolean*", "modeling/geometry_nodes/mesh/mesh_boolean.html#bpy-types-geometrynodemeshboolean"), - ("bpy.types.geometrynodemeshtocurve*", "modeling/geometry_nodes/mesh/mesh_to_curve.html#bpy-types-geometrynodemeshtocurve"), - ("bpy.types.geometrynodesamplecurve*", "modeling/geometry_nodes/curve/sample_curve.html#bpy-types-geometrynodesamplecurve"), - ("bpy.types.geometrynodesampleindex*", "modeling/geometry_nodes/geometry/sample_index.html#bpy-types-geometrynodesampleindex"), + ("bpy.types.functionnoderotateeuler*", "modeling/geometry_nodes/utilities/rotation/rotate_euler.html#bpy-types-functionnoderotateeuler"), + ("bpy.types.functionnodeslicestring*", "modeling/geometry_nodes/utilities/text/slice_string.html#bpy-types-functionnodeslicestring"), + ("bpy.types.geometrynodecurvelength*", "modeling/geometry_nodes/curve/read/curve_length.html#bpy-types-geometrynodecurvelength"), + ("bpy.types.geometrynodecurvespiral*", "modeling/geometry_nodes/curve/primitives/curve_spiral.html#bpy-types-geometrynodecurvespiral"), + ("bpy.types.geometrynodecurvetomesh*", "modeling/geometry_nodes/curve/operations/curve_to_mesh.html#bpy-types-geometrynodecurvetomesh"), + ("bpy.types.geometrynodeextrudemesh*", "modeling/geometry_nodes/mesh/operations/extrude_mesh.html#bpy-types-geometrynodeextrudemesh"), + ("bpy.types.geometrynodefilletcurve*", "modeling/geometry_nodes/curve/operations/fillet_curve.html#bpy-types-geometrynodefilletcurve"), + ("bpy.types.geometrynodeinputnormal*", "modeling/geometry_nodes/geometry/read/normal.html#bpy-types-geometrynodeinputnormal"), + ("bpy.types.geometrynodeinputradius*", "modeling/geometry_nodes/geometry/read/radius.html#bpy-types-geometrynodeinputradius"), + ("bpy.types.geometrynodemeshboolean*", "modeling/geometry_nodes/mesh/operations/mesh_boolean.html#bpy-types-geometrynodemeshboolean"), + ("bpy.types.geometrynodemeshtocurve*", "modeling/geometry_nodes/mesh/operations/mesh_to_curve.html#bpy-types-geometrynodemeshtocurve"), + ("bpy.types.geometrynodesamplecurve*", "modeling/geometry_nodes/curve/operations/sample_curve.html#bpy-types-geometrynodesamplecurve"), + ("bpy.types.geometrynodesampleindex*", "modeling/geometry_nodes/geometry/sample/sample_index.html#bpy-types-geometrynodesampleindex"), ("bpy.types.geometrynodesetmaterial*", "modeling/geometry_nodes/material/set_material.html#bpy-types-geometrynodesetmaterial"), - ("bpy.types.geometrynodesetposition*", "modeling/geometry_nodes/geometry/set_position.html#bpy-types-geometrynodesetposition"), - ("bpy.types.geometrynodetriangulate*", "modeling/geometry_nodes/mesh/triangulate.html#bpy-types-geometrynodetriangulate"), + ("bpy.types.geometrynodesetposition*", "modeling/geometry_nodes/geometry/write/set_position.html#bpy-types-geometrynodesetposition"), + ("bpy.types.geometrynodetriangulate*", "modeling/geometry_nodes/mesh/operations/triangulate.html#bpy-types-geometrynodetriangulate"), ("bpy.types.gpencillayer.blend_mode*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-blend-mode"), ("bpy.types.gpencillayer.pass_index*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-pass-index"), ("bpy.types.gpencillayer.tint_color*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-tint-color"), @@ -1724,7 +1772,6 @@ url_manual_mapping = ( ("bpy.ops.rigidbody.mass_calculate*", "scene_layout/object/editing/rigid_body.html#bpy-ops-rigidbody-mass-calculate"), ("bpy.ops.screen.spacedata_cleanup*", "advanced/operators.html#bpy-ops-screen-spacedata-cleanup"), ("bpy.ops.sculpt.detail_flood_fill*", "sculpt_paint/sculpting/tool_settings/dyntopo.html#bpy-ops-sculpt-detail-flood-fill"), - ("bpy.ops.sculpt_curves.select_all*", "sculpt_paint/curves_sculpting/introduction.html#bpy-ops-sculpt-curves-select-all"), ("bpy.ops.sculpt_curves.select_end*", "sculpt_paint/curves_sculpting/introduction.html#bpy-ops-sculpt-curves-select-end"), ("bpy.ops.sequencer.duplicate_move*", "video_editing/edit/montage/editing.html#bpy-ops-sequencer-duplicate-move"), ("bpy.ops.sequencer.select_grouped*", "video_editing/edit/montage/selecting.html#bpy-ops-sequencer-select-grouped"), @@ -1779,17 +1826,17 @@ url_manual_mapping = ( ("bpy.types.fieldsettings.strength*", "physics/forces/force_fields/introduction.html#bpy-types-fieldsettings-strength"), ("bpy.types.freestylelinestyle.gap*", "render/freestyle/view_layer/line_style/strokes.html#bpy-types-freestylelinestyle-gap"), ("bpy.types.freestylesettings.mode*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings-mode"), - ("bpy.types.functionnodefloattoint*", "modeling/geometry_nodes/utilities/float_to_integer.html#bpy-types-functionnodefloattoint"), - ("bpy.types.functionnodeinputcolor*", "modeling/geometry_nodes/input/color.html#bpy-types-functionnodeinputcolor"), - ("bpy.types.geometrynodeconvexhull*", "modeling/geometry_nodes/geometry/convex_hull.html#bpy-types-geometrynodeconvexhull"), - ("bpy.types.geometrynodeimageinput*", "modeling/geometry_nodes/input/image_input.html#bpy-types-geometrynodeimageinput"), - ("bpy.types.geometrynodeinputindex*", "modeling/geometry_nodes/input/input_index.html#bpy-types-geometrynodeinputindex"), - ("bpy.types.geometrynodeisviewport*", "modeling/geometry_nodes/input/is_viewport.html#bpy-types-geometrynodeisviewport"), - ("bpy.types.geometrynodemeshcircle*", "modeling/geometry_nodes/mesh_primitives/mesh_circle.html#bpy-types-geometrynodemeshcircle"), - ("bpy.types.geometrynodeobjectinfo*", "modeling/geometry_nodes/input/object_info.html#bpy-types-geometrynodeobjectinfo"), - ("bpy.types.geometrynodeselfobject*", "modeling/geometry_nodes/input/self_object.html#bpy-types-geometrynodeselfobject"), - ("bpy.types.geometrynodesplitedges*", "modeling/geometry_nodes/mesh/split_edges.html#bpy-types-geometrynodesplitedges"), - ("bpy.types.geometrynodestringjoin*", "modeling/geometry_nodes/text/join_strings.html#bpy-types-geometrynodestringjoin"), + ("bpy.types.functionnodefloattoint*", "modeling/geometry_nodes/utilities/math/float_to_integer.html#bpy-types-functionnodefloattoint"), + ("bpy.types.functionnodeinputcolor*", "modeling/geometry_nodes/input/constant/color.html#bpy-types-functionnodeinputcolor"), + ("bpy.types.geometrynodeconvexhull*", "modeling/geometry_nodes/geometry/operations/convex_hull.html#bpy-types-geometrynodeconvexhull"), + ("bpy.types.geometrynodeimageinput*", "modeling/geometry_nodes/input/constant/image.html#bpy-types-geometrynodeimageinput"), + ("bpy.types.geometrynodeinputindex*", "modeling/geometry_nodes/geometry/read/input_index.html#bpy-types-geometrynodeinputindex"), + ("bpy.types.geometrynodeisviewport*", "modeling/geometry_nodes/input/scene/is_viewport.html#bpy-types-geometrynodeisviewport"), + ("bpy.types.geometrynodemeshcircle*", "modeling/geometry_nodes/mesh/primitives/mesh_circle.html#bpy-types-geometrynodemeshcircle"), + ("bpy.types.geometrynodeobjectinfo*", "modeling/geometry_nodes/input/scene/object_info.html#bpy-types-geometrynodeobjectinfo"), + ("bpy.types.geometrynodeselfobject*", "modeling/geometry_nodes/input/scene/self_object.html#bpy-types-geometrynodeselfobject"), + ("bpy.types.geometrynodesplitedges*", "modeling/geometry_nodes/mesh/operations/split_edges.html#bpy-types-geometrynodesplitedges"), + ("bpy.types.geometrynodestringjoin*", "modeling/geometry_nodes/utilities/text/join_strings.html#bpy-types-geometrynodestringjoin"), ("bpy.types.geometrynodevolumecube*", "modeling/geometry_nodes/volume/volume_cube.html#bpy-types-geometrynodevolumecube"), ("bpy.types.greasepencilgrid.color*", "grease_pencil/properties/display.html#bpy-types-greasepencilgrid-color"), ("bpy.types.greasepencilgrid.lines*", "grease_pencil/properties/display.html#bpy-types-greasepencilgrid-lines"), @@ -1821,6 +1868,10 @@ url_manual_mapping = ( ("bpy.types.shadernodeparticleinfo*", "render/shader_nodes/input/particle_info.html#bpy-types-shadernodeparticleinfo"), ("bpy.types.shadernodevectorrotate*", "render/shader_nodes/vector/vector_rotate.html#bpy-types-shadernodevectorrotate"), ("bpy.types.shapekey.interpolation*", "animation/shape_keys/shape_keys_panel.html#bpy-types-shapekey-interpolation"), + ("bpy.types.softbodysettings.choke*", "physics/soft_body/settings/solver.html#bpy-types-softbodysettings-choke"), + ("bpy.types.softbodysettings.fuzzy*", "physics/soft_body/settings/solver.html#bpy-types-softbodysettings-fuzzy"), + ("bpy.types.softbodysettings.shear*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-shear"), + ("bpy.types.softbodysettings.speed*", "physics/soft_body/settings/simulation.html#bpy-types-softbodysettings-speed"), ("bpy.types.sound.use_memory_cache*", "editors/video_sequencer/sequencer/sidebar/strip.html#bpy-types-sound-use-memory-cache"), ("bpy.types.spaceview3d.clip_start*", "editors/3dview/sidebar.html#bpy-types-spaceview3d-clip-start"), ("bpy.types.spaceview3d.show_gizmo*", "editors/3dview/display/gizmo.html#bpy-types-spaceview3d-show-gizmo"), @@ -1930,15 +1981,15 @@ url_manual_mapping = ( ("bpy.types.ffmpegsettings.format*", "render/output/properties/output.html#bpy-types-ffmpegsettings-format"), ("bpy.types.fluideffectorsettings*", "physics/fluid/type/effector.html#bpy-types-fluideffectorsettings"), ("bpy.types.followtrackconstraint*", "animation/constraints/motion_tracking/follow_track.html#bpy-types-followtrackconstraint"), - ("bpy.types.functionnodeinputbool*", "modeling/geometry_nodes/input/boolean.html#bpy-types-functionnodeinputbool"), - ("bpy.types.geometrycornersofface*", "modeling/geometry_nodes/mesh_topology/corners_of_face.html#bpy-types-geometrycornersofface"), - ("bpy.types.geometrynodecurvestar*", "modeling/geometry_nodes/curve_primitives/star.html#bpy-types-geometrynodecurvestar"), - ("bpy.types.geometrynodefillcurve*", "modeling/geometry_nodes/curve/fill_curve.html#bpy-types-geometrynodefillcurve"), - ("bpy.types.geometrynodeflipfaces*", "modeling/geometry_nodes/mesh/flip_faces.html#bpy-types-geometrynodeflipfaces"), - ("bpy.types.geometrynodeimageinfo*", "modeling/geometry_nodes/input/image_info.html#bpy-types-geometrynodeimageinfo"), - ("bpy.types.geometrynodeproximity*", "modeling/geometry_nodes/geometry/geometry_proximity.html#bpy-types-geometrynodeproximity"), - ("bpy.types.geometrynodetransform*", "modeling/geometry_nodes/geometry/transform_geometry.html#bpy-types-geometrynodetransform"), - ("bpy.types.geometrynodetrimcurve*", "modeling/geometry_nodes/curve/trim_curve.html#bpy-types-geometrynodetrimcurve"), + ("bpy.types.functionnodeinputbool*", "modeling/geometry_nodes/input/constant/boolean.html#bpy-types-functionnodeinputbool"), + ("bpy.types.geometrycornersofface*", "modeling/geometry_nodes/mesh/topology/corners_of_face.html#bpy-types-geometrycornersofface"), + ("bpy.types.geometrynodecurvestar*", "modeling/geometry_nodes/curve/primitives/star.html#bpy-types-geometrynodecurvestar"), + ("bpy.types.geometrynodefillcurve*", "modeling/geometry_nodes/curve/operations/fill_curve.html#bpy-types-geometrynodefillcurve"), + ("bpy.types.geometrynodeflipfaces*", "modeling/geometry_nodes/mesh/operations/flip_faces.html#bpy-types-geometrynodeflipfaces"), + ("bpy.types.geometrynodeimageinfo*", "modeling/geometry_nodes/input/scene/image_info.html#bpy-types-geometrynodeimageinfo"), + ("bpy.types.geometrynodeproximity*", "modeling/geometry_nodes/geometry/sample/geometry_proximity.html#bpy-types-geometrynodeproximity"), + ("bpy.types.geometrynodetransform*", "modeling/geometry_nodes/geometry/operations/transform_geometry.html#bpy-types-geometrynodetransform"), + ("bpy.types.geometrynodetrimcurve*", "modeling/geometry_nodes/curve/operations/trim_curve.html#bpy-types-geometrynodetrimcurve"), ("bpy.types.gpencillayer.location*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-location"), ("bpy.types.gpencillayer.rotation*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-rotation"), ("bpy.types.gpencilsculptsettings*", "grease_pencil/properties/index.html#bpy-types-gpencilsculptsettings"), @@ -1961,6 +2012,7 @@ url_manual_mapping = ( ("bpy.types.screen.show_statusbar*", "interface/window_system/topbar.html#bpy-types-screen-show-statusbar"), ("bpy.types.sculpt.detail_percent*", "sculpt_paint/sculpting/tool_settings/dyntopo.html#bpy-types-sculpt-detail-percent"), ("bpy.types.sculpt.gravity_object*", "sculpt_paint/sculpting/tool_settings/options.html#bpy-types-sculpt-gravity-object"), + ("bpy.types.sculpt.show_face_sets*", "sculpt_paint/sculpting/editing/face_sets.html#bpy-types-sculpt-show-face-sets"), ("bpy.types.shadernodebsdfdiffuse*", "render/shader_nodes/shader/diffuse.html#bpy-types-shadernodebsdfdiffuse"), ("bpy.types.shadernodelayerweight*", "render/shader_nodes/input/layer_weight.html#bpy-types-shadernodelayerweight"), ("bpy.types.shadernodenewgeometry*", "render/shader_nodes/input/geometry.html#bpy-types-shadernodenewgeometry"), @@ -1974,6 +2026,11 @@ url_manual_mapping = ( ("bpy.types.shapekey.relative_key*", "animation/shape_keys/shape_keys_panel.html#bpy-types-shapekey-relative-key"), ("bpy.types.shapekey.vertex_group*", "animation/shape_keys/shape_keys_panel.html#bpy-types-shapekey-vertex-group"), ("bpy.types.smoothgpencilmodifier*", "grease_pencil/modifiers/deform/smooth.html#bpy-types-smoothgpencilmodifier"), + ("bpy.types.softbodysettings.aero*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-aero"), + ("bpy.types.softbodysettings.bend*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-bend"), + ("bpy.types.softbodysettings.mass*", "physics/soft_body/settings/object.html#bpy-types-softbodysettings-mass"), + ("bpy.types.softbodysettings.pull*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-pull"), + ("bpy.types.softbodysettings.push*", "physics/soft_body/settings/edges.html#bpy-types-softbodysettings-push"), ("bpy.types.spline.use_endpoint_u*", "modeling/curves/properties/active_spline.html#bpy-types-spline-use-endpoint-u"), ("bpy.types.surfacedeformmodifier*", "modeling/modifiers/deform/surface_deform.html#bpy-types-surfacedeformmodifier"), ("bpy.types.texturenodetexvoronoi*", "editors/texture_node/types/textures/voronoi.html#bpy-types-texturenodetexvoronoi"), @@ -2003,6 +2060,7 @@ url_manual_mapping = ( ("bpy.ops.mesh.loop_multi_select*", "modeling/meshes/selecting/loops.html#bpy-ops-mesh-loop-multi-select"), ("bpy.ops.mesh.vert_connect_path*", "modeling/meshes/editing/vertex/connect_vertex_path.html#bpy-ops-mesh-vert-connect-path"), ("bpy.ops.nla.action_sync_length*", "editors/nla/editing.html#bpy-ops-nla-action-sync-length"), + ("bpy.ops.node.move_detach_links*", "interface/controls/nodes/editing.html#bpy-ops-node-move-detach-links"), ("bpy.ops.object.make_links_data*", "scene_layout/object/editing/link_transfer/link_data.html#bpy-ops-object-make-links-data"), ("bpy.ops.object.modifier_remove*", "modeling/modifiers/introduction.html#bpy-ops-object-modifier-remove"), ("bpy.ops.object.paths_calculate*", "animation/motion_paths.html#bpy-ops-object-paths-calculate"), @@ -2048,17 +2106,17 @@ url_manual_mapping = ( ("bpy.types.editbone.use_connect*", "animation/armatures/bones/properties/relations.html#bpy-types-editbone-use-connect"), ("bpy.types.ffmpegsettings.codec*", "render/output/properties/output.html#bpy-types-ffmpegsettings-codec"), ("bpy.types.followpathconstraint*", "animation/constraints/relationship/follow_path.html#bpy-types-followpathconstraint"), - ("bpy.types.functionnodeinputint*", "modeling/geometry_nodes/input/integer.html#bpy-types-functionnodeinputint"), + ("bpy.types.functionnodeinputint*", "modeling/geometry_nodes/input/constant/integer.html#bpy-types-functionnodeinputint"), ("bpy.types.gaussianblursequence*", "video_editing/edit/montage/strips/effects/blur.html#bpy-types-gaussianblursequence"), - ("bpy.types.geometrynodeboundbox*", "modeling/geometry_nodes/geometry/bounding_box.html#bpy-types-geometrynodeboundbox"), - ("bpy.types.geometrynodecurvearc*", "modeling/geometry_nodes/curve_primitives/arc.html#bpy-types-geometrynodecurvearc"), - ("bpy.types.geometrynodedualmesh*", "modeling/geometry_nodes/mesh/dual_mesh.html#bpy-types-geometrynodedualmesh"), + ("bpy.types.geometrynodeboundbox*", "modeling/geometry_nodes/geometry/operations/bounding_box.html#bpy-types-geometrynodeboundbox"), + ("bpy.types.geometrynodecurvearc*", "modeling/geometry_nodes/curve/primitives/arc.html#bpy-types-geometrynodecurvearc"), + ("bpy.types.geometrynodedualmesh*", "modeling/geometry_nodes/mesh/operations/dual_mesh.html#bpy-types-geometrynodedualmesh"), ("bpy.types.geometrynodematerial*", "-1"), - ("bpy.types.geometrynodemeshcone*", "modeling/geometry_nodes/mesh_primitives/cone.html#bpy-types-geometrynodemeshcone"), - ("bpy.types.geometrynodemeshcube*", "modeling/geometry_nodes/mesh_primitives/cube.html#bpy-types-geometrynodemeshcube"), - ("bpy.types.geometrynodemeshgrid*", "modeling/geometry_nodes/mesh_primitives/grid.html#bpy-types-geometrynodemeshgrid"), - ("bpy.types.geometrynodemeshline*", "modeling/geometry_nodes/mesh_primitives/mesh_line.html#bpy-types-geometrynodemeshline"), - ("bpy.types.geometrynodeuvunwrap*", "modeling/geometry_nodes/uv/uv_unwrap.html#bpy-types-geometrynodeuvunwrap"), + ("bpy.types.geometrynodemeshcone*", "modeling/geometry_nodes/mesh/primitives/cone.html#bpy-types-geometrynodemeshcone"), + ("bpy.types.geometrynodemeshcube*", "modeling/geometry_nodes/mesh/primitives/cube.html#bpy-types-geometrynodemeshcube"), + ("bpy.types.geometrynodemeshgrid*", "modeling/geometry_nodes/mesh/primitives/grid.html#bpy-types-geometrynodemeshgrid"), + ("bpy.types.geometrynodemeshline*", "modeling/geometry_nodes/mesh/primitives/mesh_line.html#bpy-types-geometrynodemeshline"), + ("bpy.types.geometrynodeuvunwrap*", "modeling/geometry_nodes/mesh/uv/uv_unwrap.html#bpy-types-geometrynodeuvunwrap"), ("bpy.types.gpencillayer.opacity*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-opacity"), ("bpy.types.image.display_aspect*", "editors/image/sidebar.html#bpy-types-image-display-aspect"), ("bpy.types.keyframe.handle_left*", "editors/graph_editor/fcurves/properties.html#bpy-types-keyframe-handle-left"), @@ -2198,9 +2256,9 @@ url_manual_mapping = ( ("bpy.types.fieldsettings.noise*", "physics/forces/force_fields/introduction.html#bpy-types-fieldsettings-noise"), ("bpy.types.fieldsettings.shape*", "physics/forces/force_fields/introduction.html#bpy-types-fieldsettings-shape"), ("bpy.types.fluiddomainsettings*", "physics/fluid/type/domain/index.html#bpy-types-fluiddomainsettings"), - ("bpy.types.functionnodecompare*", "modeling/geometry_nodes/utilities/compare.html#bpy-types-functionnodecompare"), - ("bpy.types.geometrynodeinputid*", "modeling/geometry_nodes/input/id.html#bpy-types-geometrynodeinputid"), - ("bpy.types.geometrynoderaycast*", "modeling/geometry_nodes/geometry/raycast.html#bpy-types-geometrynoderaycast"), + ("bpy.types.functionnodecompare*", "modeling/geometry_nodes/utilities/math/compare.html#bpy-types-functionnodecompare"), + ("bpy.types.geometrynodeinputid*", "modeling/geometry_nodes/geometry/read/id.html#bpy-types-geometrynodeinputid"), + ("bpy.types.geometrynoderaycast*", "modeling/geometry_nodes/geometry/sample/raycast.html#bpy-types-geometrynoderaycast"), ("bpy.types.gpencillayer.parent*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-parent"), ("bpy.types.hookgpencilmodifier*", "grease_pencil/modifiers/deform/hook.html#bpy-types-hookgpencilmodifier"), ("bpy.types.imageformatsettings*", "files/media/image_formats.html#bpy-types-imageformatsettings"), @@ -2372,7 +2430,7 @@ url_manual_mapping = ( ("bpy.types.shadernodeemission*", "render/shader_nodes/shader/emission.html#bpy-types-shadernodeemission"), ("bpy.types.shadernodehairinfo*", "render/shader_nodes/input/hair_info.html#bpy-types-shadernodehairinfo"), ("bpy.types.shadernodemaprange*", "render/shader_nodes/converter/map_range.html#bpy-types-shadernodemaprange"), - ("bpy.types.shadernodergbcurve*", "modeling/geometry_nodes/color/rgb_curves.html#bpy-types-shadernodergbcurve"), + ("bpy.types.shadernodergbcurve*", "modeling/geometry_nodes/utilities/color/rgb_curves.html#bpy-types-shadernodergbcurve"), ("bpy.types.shadernodetexbrick*", "render/shader_nodes/textures/brick.html#bpy-types-shadernodetexbrick"), ("bpy.types.shadernodetexcoord*", "render/shader_nodes/input/texture_coordinate.html#bpy-types-shadernodetexcoord"), ("bpy.types.shadernodeteximage*", "render/shader_nodes/textures/image.html#bpy-types-shadernodeteximage"), @@ -2479,7 +2537,7 @@ url_manual_mapping = ( ("bpy.types.fmodifierenvelope*", "editors/graph_editor/fcurves/modifiers.html#bpy-types-fmodifierenvelope"), ("bpy.types.freestylesettings*", "render/freestyle/view_layer/freestyle.html#bpy-types-freestylesettings"), ("bpy.types.geometrynodegroup*", "modeling/geometry_nodes/group.html#bpy-types-geometrynodegroup"), - ("bpy.types.geometrynodesetid*", "modeling/geometry_nodes/geometry/set_id.html#bpy-types-geometrynodesetid"), + ("bpy.types.geometrynodesetid*", "modeling/geometry_nodes/geometry/write/set_id.html#bpy-types-geometrynodesetid"), ("bpy.types.gpencillayer.hide*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-hide"), ("bpy.types.gpencillayer.lock*", "grease_pencil/properties/layers.html#bpy-types-gpencillayer-lock"), ("bpy.types.imagepaint.dither*", "sculpt_paint/texture_paint/tool_settings/options.html#bpy-types-imagepaint-dither"), @@ -2570,7 +2628,7 @@ url_manual_mapping = ( ("bpy.ops.pose.select_mirror*", "animation/armatures/posing/selecting.html#bpy-ops-pose-select-mirror"), ("bpy.ops.screen.marker_jump*", "animation/markers.html#bpy-ops-screen-marker-jump"), ("bpy.ops.screen.repeat_last*", "interface/undo_redo.html#bpy-ops-screen-repeat-last"), - ("bpy.ops.sculpt.mask_expand*", "sculpt_paint/sculpting/editing/mask.html#bpy-ops-sculpt-mask-expand"), + ("bpy.ops.sculpt.mask_expand*", "sculpt_paint/sculpting/editing/expand.html#bpy-ops-sculpt-mask-expand"), ("bpy.ops.sculpt.mask_filter*", "sculpt_paint/sculpting/editing/mask.html#bpy-ops-sculpt-mask-filter"), ("bpy.ops.transform.tosphere*", "modeling/meshes/editing/mesh/transform/to_sphere.html#bpy-ops-transform-tosphere"), ("bpy.ops.view3d.clip_border*", "editors/3dview/navigate/regions.html#bpy-ops-view3d-clip-border"), @@ -2627,7 +2685,7 @@ url_manual_mapping = ( ("bpy.types.shaderfxcolorize*", "grease_pencil/visual_effects/colorize.html#bpy-types-shaderfxcolorize"), ("bpy.types.shaderfxpixelate*", "grease_pencil/visual_effects/pixelate.html#bpy-types-shaderfxpixelate"), ("bpy.types.shadernodeinvert*", "render/shader_nodes/color/invert.html#bpy-types-shadernodeinvert"), - ("bpy.types.shadernodemixrgb*", "modeling/geometry_nodes/color/mix_rgb.html#bpy-types-shadernodemixrgb"), + ("bpy.types.shadernodemixrgb*", "modeling/geometry_nodes/utilities/color/mix_rgb.html#bpy-types-shadernodemixrgb"), ("bpy.types.shadernodenormal*", "render/shader_nodes/vector/normal.html#bpy-types-shadernodenormal"), ("bpy.types.shadernodescript*", "render/shader_nodes/osl.html#bpy-types-shadernodescript"), ("bpy.types.shadernodetexies*", "render/shader_nodes/textures/ies.html#bpy-types-shadernodetexies"), @@ -2658,6 +2716,7 @@ url_manual_mapping = ( ("bpy.ops.clip.solve_camera*", "movie_clip/tracking/clip/editing/track.html#bpy-ops-clip-solve-camera"), ("bpy.ops.constraint.delete*", "animation/constraints/interface/header.html#bpy-ops-constraint-delete"), ("bpy.ops.curve.smooth_tilt*", "modeling/curves/editing/control_points.html#bpy-ops-curve-smooth-tilt"), + ("bpy.ops.curves.select_all*", "sculpt_paint/curves_sculpting/introduction.html#bpy-ops-curves-select-all"), ("bpy.ops.file.reset_recent*", "editors/file_browser.html#bpy-ops-file-reset-recent"), ("bpy.ops.fluid.bake_guides*", "physics/fluid/type/domain/guides.html#bpy-ops-fluid-bake-guides"), ("bpy.ops.fluid.free_guides*", "physics/fluid/type/domain/guides.html#bpy-ops-fluid-free-guides"), @@ -3053,7 +3112,7 @@ url_manual_mapping = ( ("bpy.ops.render.opengl*", "editors/3dview/viewport_render.html#bpy-ops-render-opengl"), ("bpy.ops.screen.header*", "interface/window_system/regions.html#bpy-ops-screen-header"), ("bpy.ops.script.reload*", "advanced/operators.html#bpy-ops-script-reload"), - ("bpy.ops.sculpt.expand*", "sculpt_paint/sculpting/editing/mask.html#bpy-ops-sculpt-expand"), + ("bpy.ops.sculpt.expand*", "sculpt_paint/sculpting/editing/expand.html#bpy-ops-sculpt-expand"), ("bpy.ops.sculpt_curves*", "sculpt_paint/curves_sculpting/index.html#bpy-ops-sculpt-curves"), ("bpy.ops.ui.eyedropper*", "interface/controls/buttons/eyedropper.html#bpy-ops-ui-eyedropper"), ("bpy.ops.view3d.select*", "editors/3dview/selecting.html#bpy-ops-view3d-select"), diff --git a/release/scripts/modules/rna_prop_ui.py b/release/scripts/modules/rna_prop_ui.py index bbbb526495d..180fe785f8e 100644 --- a/release/scripts/modules/rna_prop_ui.py +++ b/release/scripts/modules/rna_prop_ui.py @@ -184,6 +184,7 @@ def draw(layout, context, context_member, property_type, *, use_edit=True): value_column.prop(rna_item, '["%s"]' % escape_identifier(key), text="") operator_row = value_row.row() + operator_row.alignment = 'RIGHT' # Do not allow editing of overridden properties (we cannot use a poll function # of the operators here since they's have no access to the specific property). diff --git a/release/scripts/presets/keyconfig/Blender.py b/release/scripts/presets/keyconfig/Blender.py index 2e77f233ca4..e77fde4c39c 100644 --- a/release/scripts/presets/keyconfig/Blender.py +++ b/release/scripts/presets/keyconfig/Blender.py @@ -352,8 +352,7 @@ def load(): use_v3d_tab_menu=kc_prefs.use_v3d_tab_menu, use_v3d_shade_ex_pie=kc_prefs.use_v3d_shade_ex_pie, use_gizmo_drag=(is_select_left and kc_prefs.gizmo_action == 'DRAG'), - use_fallback_tool=True, - use_fallback_tool_rmb=(False if is_select_left else kc_prefs.rmb_action == 'FALLBACK_TOOL'), + use_fallback_tool=True if is_select_left else (kc_prefs.rmb_action == 'FALLBACK_TOOL'), use_tweak_select_passthrough=(show_developer_ui and kc_prefs.use_tweak_select_passthrough), use_tweak_tool_lmb_interaction=( False if is_select_left else diff --git a/release/scripts/presets/keyconfig/keymap_data/blender_default.py b/release/scripts/presets/keyconfig/keymap_data/blender_default.py index 6b24452131d..d336a5042d0 100644 --- a/release/scripts/presets/keyconfig/keymap_data/blender_default.py +++ b/release/scripts/presets/keyconfig/keymap_data/blender_default.py @@ -11,7 +11,7 @@ __all__ = ( # - This script should run without Blender (no references to the `bpy` module for example). # - All configuration must be passed into the `generate_keymaps` function (via `Params`). # - Supporting some combinations of options is becoming increasingly complex, -# especially `Params.select_mouse` & `Params.use_fallback_tool_rmb`. +# especially `Params.select_mouse` & `Params.use_fallback_tool`. # To ensure changes don't unintentionally break other configurations, see: # `source/tools/utils/blender_keyconfig_export_permutations.py --help` # @@ -52,8 +52,6 @@ class Params: "use_gizmo_drag", # Use the fallback tool instead of tweak for RMB select. "use_fallback_tool", - # Only set for RMB select. - "use_fallback_tool_rmb", # Use pie menu for tab by default (swap 'Tab/Ctrl-Tab'). "use_v3d_tab_menu", # Use extended pie menu for shading. @@ -78,9 +76,9 @@ class Params: # (derived from other settings). # # The fallback tool is activated on the same button as selection. - # Shorthand for: `(True if (select_mouse == 'LEFT') else self.use_fallback_tool_rmb)` + # Shorthand for: `(True if (select_mouse == 'LEFT') else self.use_fallback_tool)` "use_fallback_tool_select_mouse", - # Shorthand for: `('CLICK' if self.use_fallback_tool_rmb else self.select_mouse_value)`. + # Shorthand for: `('CLICK' if self.use_fallback_tool and select_mouse == 'RIGHT' else self.select_mouse_value)`. "select_mouse_value_fallback", # Shorthand for: `{"type": params.select_mouse, "value": 'CLICK_DRAG'}`. "select_tweak_event", @@ -110,7 +108,6 @@ class Params: use_select_all_toggle=False, use_gizmo_drag=True, use_fallback_tool=False, - use_fallback_tool_rmb=False, use_tweak_select_passthrough=False, use_tweak_tool_lmb_interaction=False, use_v3d_tab_menu=False, @@ -202,11 +199,12 @@ class Params: self.use_tweak_select_passthrough = use_tweak_select_passthrough self.use_fallback_tool = use_fallback_tool - self.use_fallback_tool_rmb = use_fallback_tool_rmb # Convenience variables: - self.use_fallback_tool_select_mouse = True if (select_mouse == 'LEFT') else self.use_fallback_tool_rmb - self.select_mouse_value_fallback = 'CLICK' if self.use_fallback_tool_rmb else self.select_mouse_value + self.use_fallback_tool_select_mouse = True if (select_mouse == 'LEFT') else self.use_fallback_tool + self.select_mouse_value_fallback = ( + 'CLICK' if (self.use_fallback_tool and select_mouse == 'RIGHT') else self.select_mouse_value + ) self.select_tweak_event = {"type": self.select_mouse, "value": 'CLICK_DRAG'} self.pie_value = 'CLICK_DRAG' if use_pie_click_drag else 'PRESS' self.tool_tweak_event = {"type": self.tool_mouse, "value": 'CLICK_DRAG'} @@ -4423,6 +4421,11 @@ def km_weight_paint_vertex_selection(params): ("view3d.select_lasso", {"type": params.action_mouse, "value": 'CLICK_DRAG', "shift": True, "ctrl": True}, {"properties": [("mode", 'SUB')]}), ("view3d.select_circle", {"type": 'C', "value": 'PRESS'}, None), + ("paint.vert_select_linked", {"type": 'L', "value": 'PRESS', "ctrl": True}, None), + ("paint.vert_select_linked_pick", {"type": 'L', "value": 'PRESS'}, + {"properties": [("select", True)]}), + ("paint.vert_select_linked_pick", {"type": 'L', "value": 'PRESS', "shift": True}, + {"properties": [("select", False)]}), ]) return keymap @@ -4768,7 +4771,7 @@ def _template_paint_radial_control_channels(paint, rotation=False, secondary_rot def _template_view3d_select(*, type, value, legacy, select_passthrough, exclude_mod=None): # NOTE: `exclude_mod` is needed since we don't want this tool to exclude Control-RMB actions when this is used - # as a tool key-map with RMB-select and `use_fallback_tool_rmb` is enabled. See T92467. + # as a tool key-map with RMB-select and `use_fallback_tool` is enabled with RMB select. See T92467. props_vert_without_handles = () if select_passthrough: @@ -4963,7 +4966,6 @@ def km_image_paint(params): {"properties": [("data_path", 'image_paint_object.data.use_paint_mask')]}), ("wm.context_toggle", {"type": 'S', "value": 'PRESS', "shift": True}, {"properties": [("data_path", 'tool_settings.image_paint.brush.use_smooth_stroke')]}), - op_menu("VIEW3D_MT_angle_control", {"type": 'R', "value": 'PRESS'}), ("wm.context_menu_enum", {"type": 'E', "value": 'PRESS'}, {"properties": [("data_path", 'tool_settings.image_paint.brush.stroke_method')]}), *_template_items_context_panel("VIEW3D_PT_paint_texture_context_menu", params.context_menu_event), @@ -5012,7 +5014,6 @@ def km_vertex_paint(params): {"properties": [("data_path", 'vertex_paint_object.data.use_paint_mask')]}), ("wm.context_toggle", {"type": 'S', "value": 'PRESS', "shift": True}, {"properties": [("data_path", 'tool_settings.vertex_paint.brush.use_smooth_stroke')]}), - op_menu("VIEW3D_MT_angle_control", {"type": 'R', "value": 'PRESS'}), ("wm.context_menu_enum", {"type": 'E', "value": 'PRESS'}, {"properties": [("data_path", 'tool_settings.vertex_paint.brush.stroke_method')]}), ("paint.face_vert_reveal", {"type": 'H', "value": 'PRESS', "alt": True}, None), @@ -5103,7 +5104,7 @@ def km_sculpt(params): {"properties": [ ("target", "MASK"), ("falloff_type", "GEODESIC"), - ("invert", True), + ("invert", False), ("use_auto_mask", False), ("use_mask_preserve", True), ]}), @@ -5168,11 +5169,11 @@ def km_sculpt(params): {"properties": [("data_path", 'scene.tool_settings.sculpt.show_mask')]}), # Dynamic topology ("sculpt.dynamic_topology_toggle", {"type": 'D', "value": 'PRESS', "ctrl": True}, None), - ("sculpt.dyntopo_detail_size_edit", {"type": 'D', "value": 'PRESS', "shift": True}, None), + ("sculpt.dyntopo_detail_size_edit", {"type": 'R', "value": 'PRESS'}, None), ("sculpt.set_detail_size", {"type": 'D', "value": 'PRESS', "shift": True, "alt": True}, None), # Remesh ("object.voxel_remesh", {"type": 'R', "value": 'PRESS', "ctrl": True}, None), - ("object.voxel_size_edit", {"type": 'R', "value": 'PRESS', "shift": True}, None), + ("object.voxel_size_edit", {"type": 'R', "value": 'PRESS'}, None), ("object.quadriflow_remesh", {"type": 'R', "value": 'PRESS', "ctrl": True, "alt": True}, None), # Color ("sculpt.sample_color", {"type": 'S', "value": 'PRESS'}, None), @@ -5223,7 +5224,6 @@ def km_sculpt(params): {"properties": [("data_path", 'tool_settings.sculpt.brush.stroke_method')]}), ("wm.context_toggle", {"type": 'S', "value": 'PRESS', "shift": True}, {"properties": [("data_path", 'tool_settings.sculpt.brush.use_smooth_stroke')]}), - op_menu("VIEW3D_MT_angle_control", {"type": 'R', "value": 'PRESS'}), op_menu_pie("VIEW3D_MT_sculpt_mask_edit_pie", {"type": 'A', "value": 'PRESS'}), op_menu_pie("VIEW3D_MT_sculpt_automasking_pie", {"type": 'A', "alt": True, "value": 'PRESS'}), op_menu_pie("VIEW3D_MT_sculpt_face_sets_edit_pie", {"type": 'W', "value": 'PRESS'}), @@ -5673,6 +5673,14 @@ def km_curves(params): {"items": items}, ) + items.extend([ + ("curves.set_selection_domain", {"type": 'ONE', "value": 'PRESS'}, {"properties": [("domain", 'POINT')]}), + ("curves.set_selection_domain", {"type": 'TWO', "value": 'PRESS'}, {"properties": [("domain", 'CURVE')]}), + ("curves.disable_selection", {"type": 'ONE', "value": 'PRESS', "alt": True}, None), + ("curves.disable_selection", {"type": 'TWO', "value": 'PRESS', "alt": True}, None), + *_template_items_select_actions(params, "curves.select_all"), + ]) + return keymap @@ -5695,7 +5703,7 @@ def km_sculpt_curves(params): ("curves.set_selection_domain", {"type": 'TWO', "value": 'PRESS'}, {"properties": [("domain", 'CURVE')]}), *_template_paint_radial_control("curves_sculpt"), *_template_items_select_actions(params, "curves.select_all"), - ("sculpt_curves.min_distance_edit", {"type": 'R', "value": 'PRESS', "shift": True}, {}), + ("sculpt_curves.min_distance_edit", {"type": 'R', "value": 'PRESS'}, {}), ("sculpt_curves.select_grow", {"type": 'A', "value": 'PRESS', "shift": True}, {}), ]) @@ -6327,6 +6335,7 @@ def km_sculpt_expand_modal(_params): ("CANCEL", {"type": 'ESC', "value": 'PRESS', "any": True}, None), ("CANCEL", {"type": 'RIGHTMOUSE', "value": 'PRESS', "any": True}, None), ("CONFIRM", {"type": 'LEFTMOUSE', "value": 'PRESS', "any": True}, None), + ("CONFIRM", {"type": 'LEFTMOUSE', "value": 'RELEASE', "any": True}, None), ("INVERT", {"type": 'F', "value": 'PRESS', "any": True}, None), ("PRESERVE", {"type": 'E', "value": 'PRESS', "any": True}, None), ("GRADIENT", {"type": 'G', "value": 'PRESS', "any": True}, None), @@ -6335,6 +6344,8 @@ def km_sculpt_expand_modal(_params): ("MOVE_TOGGLE", {"type": 'SPACE', "value": 'ANY', "any": True}, None), *((e, {"type": NUMBERS_1[i], "value": 'PRESS', "any": True}, None) for i, e in enumerate( ("FALLOFF_GEODESICS", "FALLOFF_TOPOLOGY", "FALLOFF_TOPOLOGY_DIAGONALS", "FALLOFF_SPHERICAL"))), + *((e, {"type": "NUMPAD_%i" % (i + 1), "value": 'PRESS', "any": True}, None) for i, e in enumerate( + ("FALLOFF_GEODESICS", "FALLOFF_TOPOLOGY", "FALLOFF_TOPOLOGY_DIAGONALS", "FALLOFF_SPHERICAL"))), ("SNAP_TOGGLE", {"type": 'LEFT_CTRL', "value": 'ANY'}, None), ("SNAP_TOGGLE", {"type": 'RIGHT_CTRL', "value": 'ANY'}, None), ("LOOP_COUNT_INCREASE", {"type": 'W', "value": 'PRESS', "any": True, "repeat": True}, None), @@ -6365,6 +6376,25 @@ def km_curve_pen_modal_map(_params): return keymap +def km_node_link_modal_map(_params): + items = [] + keymap = ( + "Node Link Modal Map", + {"space_type": 'EMPTY', "region_type": 'WINDOW', "modal": True}, + {"items": items}, + ) + + items.extend([ + ("BEGIN", {"type": 'LEFTMOUSE', "value": 'PRESS', "any": True}, None), + ("CONFIRM", {"type": 'LEFTMOUSE', "value": 'RELEASE', "any": True}, None), + ("CANCEL", {"type": 'RIGHTMOUSE', "value": 'PRESS', "any": True}, None), + ("CANCEL", {"type": 'ESC', "value": 'PRESS', "any": True}, None), + ("SWAP", {"type": 'LEFT_ALT', "value": 'ANY', "any": True}, None), + ("SWAP", {"type": 'RIGHT_ALT', "value": 'ANY', "any": True}, None), + ]) + + return keymap + # Fallback for gizmos that don't have custom a custom key-map. def km_generic_gizmo(_params): keymap = ( @@ -6550,11 +6580,12 @@ def km_image_editor_tool_uv_select(params, *, fallback): {"items": [ *([] if (fallback and (params.select_mouse == 'RIGHTMOUSE')) else _template_items_tool_select( params, "uv.select", "uv.cursor_set", fallback=fallback)), - *([] if (not params.use_fallback_tool_rmb) else _template_uv_select( - type=params.select_mouse, - value=params.select_mouse_value, - select_passthrough=params.use_tweak_select_passthrough, - legacy=params.legacy, + *([] if (not (params.use_fallback_tool and params.select_mouse == 'RIGHTMOUSE')) else + _template_uv_select( + type=params.select_mouse, + value=params.select_mouse_value, + select_passthrough=params.use_tweak_select_passthrough, + legacy=params.legacy, )), ]}, ) @@ -6769,12 +6800,13 @@ def km_3d_view_tool_select(params, *, fallback): {"items": [ *([] if (fallback and (params.select_mouse == 'RIGHTMOUSE')) else _template_items_tool_select( params, "view3d.select", "view3d.cursor3d", operator_props=operator_props, fallback=fallback)), - *([] if (not params.use_fallback_tool_rmb) else _template_view3d_select( - type=params.select_mouse, - value=params.select_mouse_value, - legacy=params.legacy, - select_passthrough=params.use_tweak_select_passthrough, - exclude_mod="ctrl", + *([] if (not (params.use_fallback_tool and params.select_mouse == 'RIGHTMOUSE')) else + _template_view3d_select( + type=params.select_mouse, + value=params.select_mouse_value, + legacy=params.legacy, + select_passthrough=params.use_tweak_select_passthrough, + exclude_mod="ctrl", )), ]}, ) @@ -7747,8 +7779,12 @@ def km_3d_view_tool_edit_gpencil_select(params, *, fallback): {"items": [ *([] if (fallback and (params.select_mouse == 'RIGHTMOUSE')) else _template_items_tool_select( params, "gpencil.select", "view3d.cursor3d", fallback=fallback)), - *([] if (not params.use_fallback_tool_rmb) else _template_view3d_gpencil_select( - type=params.select_mouse, value=params.select_mouse_value, legacy=params.legacy)), + *([] if (not (params.use_fallback_tool and params.select_mouse == 'RIGHTMOUSE')) else + _template_view3d_gpencil_select( + type=params.select_mouse, + value=params.select_mouse_value, + legacy=params.legacy, + )), ]}, ) @@ -7926,8 +7962,9 @@ def km_sequencer_editor_tool_generic_select(params, *, fallback): *([] if (fallback and (params.select_mouse == 'RIGHTMOUSE')) else _template_items_tool_select( params, "sequencer.select", "sequencer.cursor_set", cursor_prioritize=True, fallback=fallback)), - *([] if (not params.use_fallback_tool_rmb) else _template_sequencer_preview_select( - type=params.select_mouse, value=params.select_mouse_value, legacy=params.legacy)), + *([] if (not (params.use_fallback_tool and params.select_mouse == 'RIGHTMOUSE')) else + _template_sequencer_preview_select( + type=params.select_mouse, value=params.select_mouse_value, legacy=params.legacy)), # Ignored for preview. *_template_items_change_frame(params), ]}, @@ -8161,6 +8198,7 @@ def generate_keymaps(params=None): km_paint_stroke_modal(params), km_sculpt_expand_modal(params), km_curve_pen_modal_map(params), + km_node_link_modal_map(params), # Gizmos. km_generic_gizmo(params), diff --git a/release/scripts/presets/keyconfig/keymap_data/industry_compatible_data.py b/release/scripts/presets/keyconfig/keymap_data/industry_compatible_data.py index 2f15d908364..98cee34519f 100644 --- a/release/scripts/presets/keyconfig/keymap_data/industry_compatible_data.py +++ b/release/scripts/presets/keyconfig/keymap_data/industry_compatible_data.py @@ -2972,6 +2972,11 @@ def km_weight_paint_vertex_selection(params): ("paint.vert_select_hide", {"type": 'H', "value": 'PRESS', "shift": True}, {"properties": [("unselected", True)]}), ("paint.face_vert_reveal", {"type": 'H', "value": 'PRESS', "alt": True}, None), + ("paint.vert_select_linked", {"type": 'L', "value": 'PRESS', "ctrl": True}, None), + ("paint.vert_select_linked_pick", {"type": 'L', "value": 'PRESS'}, + {"properties": [("select", True)]}), + ("paint.vert_select_linked_pick", {"type": 'L', "value": 'PRESS', "shift": True}, + {"properties": [("select", False)]}), ]) return keymap @@ -3281,7 +3286,6 @@ def km_image_paint(params): {"properties": [("data_path", 'image_paint_object.data.use_paint_mask')]}), ("wm.context_toggle", {"type": 'S', "value": 'PRESS', "shift": True}, {"properties": [("data_path", 'tool_settings.image_paint.brush.use_smooth_stroke')]}), - op_menu("VIEW3D_MT_angle_control", {"type": 'R', "value": 'PRESS'}), *_template_items_context_panel("VIEW3D_PT_paint_texture_context_menu", {"type": 'RIGHTMOUSE', "value": 'PRESS'}), # Tools @@ -3332,7 +3336,6 @@ def km_vertex_paint(params): {"properties": [("data_path", 'vertex_paint_object.data.use_paint_mask')]}), ("wm.context_toggle", {"type": 'S', "value": 'PRESS', "shift": True}, {"properties": [("data_path", 'tool_settings.vertex_paint.brush.use_smooth_stroke')]}), - op_menu("VIEW3D_MT_angle_control", {"type": 'R', "value": 'PRESS'}), ("paint.face_vert_reveal", {"type": 'H', "value": 'PRESS', "alt": True}, None), *_template_items_context_panel("VIEW3D_PT_paint_vertex_context_menu", {"type": 'RIGHTMOUSE', "value": 'PRESS'}), # Tools diff --git a/release/scripts/startup/bl_operators/presets.py b/release/scripts/startup/bl_operators/presets.py index e4b9021926e..f8146308600 100644 --- a/release/scripts/startup/bl_operators/presets.py +++ b/release/scripts/startup/bl_operators/presets.py @@ -317,11 +317,11 @@ class AddPresetCamera(AddPresetBase, Operator): return preset_values -class AddPresetSafeAreas(AddPresetBase, Operator): +class AddPresetCameraSafeAreas(AddPresetBase, Operator): """Add or remove a Safe Areas Preset""" - bl_idname = "safe_areas.preset_add" + bl_idname = "camera.safe_areas_preset_add" bl_label = "Add Safe Area Preset" - preset_menu = "SAFE_AREAS_PT_presets" + preset_menu = "CAMERA_PT_safe_areas_presets" preset_defines = [ "safe_areas = bpy.context.scene.safe_areas" @@ -691,7 +691,7 @@ classes = ( AddPresetNodeColor, AddPresetOperator, AddPresetRender, - AddPresetSafeAreas, + AddPresetCameraSafeAreas, AddPresetTrackingCamera, AddPresetTrackingSettings, AddPresetTrackingTrackColor, diff --git a/release/scripts/startup/bl_operators/wm.py b/release/scripts/startup/bl_operators/wm.py index 7f96626974a..c41b42846ff 100644 --- a/release/scripts/startup/bl_operators/wm.py +++ b/release/scripts/startup/bl_operators/wm.py @@ -1587,7 +1587,7 @@ class WM_OT_properties_edit(Operator): elif self.property_type == 'STRING': self.default_string = rna_data["default"] elif self.property_type in {'BOOL', 'BOOL_ARRAY'}: - self.default_int = self._convert_new_value_array(rna_data["default"], bool, 32) + self.default_bool = self._convert_new_value_array(rna_data["default"], bool, 32) if self.property_type in {'FLOAT_ARRAY', 'INT_ARRAY', 'BOOL_ARRAY'}: self.array_length = len(item[name]) @@ -1604,33 +1604,26 @@ class WM_OT_properties_edit(Operator): def _get_converted_value(self, item, name_old, prop_type_new): if prop_type_new == 'INT': return self._convert_new_value_single(item[name_old], int) - - if prop_type_new == 'FLOAT': + elif prop_type_new == 'FLOAT': return self._convert_new_value_single(item[name_old], float) - - if prop_type_new == 'BOOL': + elif prop_type_new == 'BOOL': return self._convert_new_value_single(item[name_old], bool) - - if prop_type_new == 'INT_ARRAY': + elif prop_type_new == 'INT_ARRAY': prop_type_old = self.get_property_type(item, name_old) if prop_type_old in {'INT', 'FLOAT', 'INT_ARRAY', 'FLOAT_ARRAY', 'BOOL_ARRAY'}: return self._convert_new_value_array(item[name_old], int, self.array_length) - - if prop_type_new == 'FLOAT_ARRAY': + elif prop_type_new == 'FLOAT_ARRAY': prop_type_old = self.get_property_type(item, name_old) if prop_type_old in {'INT', 'FLOAT', 'FLOAT_ARRAY', 'INT_ARRAY', 'BOOL_ARRAY'}: return self._convert_new_value_array(item[name_old], float, self.array_length) - - if prop_type_new == 'BOOL_ARRAY': + elif prop_type_new == 'BOOL_ARRAY': prop_type_old = self.get_property_type(item, name_old) - if prop_type_old in {'INT', 'FLOAT', 'FLOAT_ARRAY', 'INT_ARRAY'}: + if prop_type_old in {'INT', 'FLOAT', 'FLOAT_ARRAY', 'INT_ARRAY', 'BOOL_ARRAY'}: return self._convert_new_value_array(item[name_old], bool, self.array_length) else: return [False] * self.array_length - - if prop_type_new == 'STRING': + elif prop_type_new == 'STRING': return self.convert_custom_property_to_string(item, name_old) - # If all else fails, create an empty string property. That should avoid errors later on anyway. return "" @@ -1667,7 +1660,7 @@ class WM_OT_properties_edit(Operator): default=self.default_int[0] if prop_type_new == 'INT' else self.default_int[:self.array_length], description=self.description, ) - if prop_type_new in {'BOOL', 'BOOL_ARRAY'}: + elif prop_type_new in {'BOOL', 'BOOL_ARRAY'}: ui_data = item.id_properties_ui(name) ui_data.update( default=self.default_bool[0] if prop_type_new == 'BOOL' else self.default_bool[:self.array_length], diff --git a/release/scripts/startup/bl_ui/__init__.py b/release/scripts/startup/bl_ui/__init__.py index 8df1a4ebe5d..da9096f460e 100644 --- a/release/scripts/startup/bl_ui/__init__.py +++ b/release/scripts/startup/bl_ui/__init__.py @@ -52,6 +52,7 @@ _modules = [ "properties_texture", "properties_world", "properties_collection", + "generic_ui_list", "sculpt_ui", diff --git a/release/scripts/startup/bl_ui/generic_ui_list.py b/release/scripts/startup/bl_ui/generic_ui_list.py new file mode 100644 index 00000000000..ef9c22d55eb --- /dev/null +++ b/release/scripts/startup/bl_ui/generic_ui_list.py @@ -0,0 +1,258 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +This module (in particular the draw_ui_list function) lets you draw the commonly +used UIList layout, seen all over Blender. + +This includes the list itself, and a column of buttons to the right of it, which +contains buttons to add, remove, and move entries up or down, as well as a +drop-down menu. + +You can get an example of how to use this via the Blender Text Editor-> +Templates->Ui List Generic. +""" + +import bpy +from bpy.types import Operator +from bpy.props import ( + EnumProperty, + StringProperty, +) + +__all__ = ( + "draw_ui_list", +) + + +def draw_ui_list( + layout, + context, + class_name="UI_UL_list", + *, + unique_id="", + list_path, + active_index_path, + insertion_operators=True, + move_operators=True, + menu_class_name="", + **kwargs, +): + """ + Draw a UIList with Add/Remove/Move buttons and a menu. + + :arg layout: UILayout to draw the list in. + :type layout: :class:`UILayout` + :arg context: Blender context to get the list data from. + :type context: :class:`Context` + :arg class_name: Name of the UIList class to draw. The default is the UIList class that ships with Blender. + :type class_name: str + :arg unique_id: Optional identifier, in case wanting to draw multiple unique copies of a list. + :type unique_id: str + :arg list_path: Data path of the list relative to context, eg. "object.vertex_groups". + :type list_path: str + :arg active_index_path: Data path of the list active index integer relative to context, + eg. "object.vertex_groups.active_index". + :type active_index_path: str + :arg insertion_operators: Whether to draw Add/Remove buttons. + :type insertion_operators: bool + :arg move_operators: Whether to draw Move Up/Down buttons. + :type move_operators: str + :arg menu_class_name: Identifier of a Menu that should be drawn as a drop-down. + :type menu_class_name: str + + :returns: The right side column. + :rtype: :class:`UILayout`. + + Additional keyword arguments are passed to :class:`UIList.template_list`. + """ + + row = layout.row() + + list_owner_path, list_prop_name = list_path.rsplit('.', 1) + list_owner = _get_context_attr(context, list_owner_path) + + index_owner_path, index_prop_name = active_index_path.rsplit('.', 1) + index_owner = _get_context_attr(context, index_owner_path) + + list_to_draw = _get_context_attr(context, list_path) + + row.template_list( + class_name, + unique_id, + list_owner, list_prop_name, + index_owner, index_prop_name, + rows=4 if list_to_draw else 1, + **kwargs + ) + + col = row.column() + + if insertion_operators: + _draw_add_remove_buttons( + layout=col, + list_path=list_path, + active_index_path=active_index_path, + list_length=len(list_to_draw) + ) + layout.separator() + + if menu_class_name: + col.menu(menu_class_name, icon='DOWNARROW_HLT', text="") + col.separator() + + if move_operators and list_to_draw: + _draw_move_buttons( + layout=col, + list_path=list_path, + active_index_path=active_index_path, + list_length=len(list_to_draw) + ) + + # Return the right-side column. + return col + + +def _draw_add_remove_buttons( + *, + layout, + list_path, + active_index_path, + list_length, +): + """Draw the +/- buttons to add and remove list entries.""" + add_op = layout.operator(UILIST_OT_entry_add.bl_idname, text="", icon='ADD') + add_op.list_path = list_path + add_op.active_index_path = active_index_path + + row = layout.row() + row.enabled = list_length > 0 + remove_op = row.operator(UILIST_OT_entry_remove.bl_idname, text="", icon='REMOVE') + remove_op.list_path = list_path + remove_op.active_index_path = active_index_path + + +def _draw_move_buttons( + *, + layout, + list_path, + active_index_path, + list_length, +): + """Draw the up/down arrows to move elements in the list.""" + col = layout.column() + col.enabled = list_length > 1 + move_up_op = layout.operator(UILIST_OT_entry_move.bl_idname, text="", icon='TRIA_UP') + move_up_op.direction = 'UP' + move_up_op.list_path = list_path + move_up_op.active_index_path = active_index_path + + move_down_op = layout.operator(UILIST_OT_entry_move.bl_idname, text="", icon='TRIA_DOWN') + move_down_op.direction = 'DOWN' + move_down_op.list_path = list_path + move_down_op.active_index_path = active_index_path + + +def _get_context_attr(context, data_path): + """Return the value of a context member based on its data path.""" + return context.path_resolve(data_path) + + +def _set_context_attr(context, data_path, value) -> None: + """Set the value of a context member based on its data path.""" + owner_path, attr_name = data_path.rsplit('.', 1) + owner = context.path_resolve(owner_path) + setattr(owner, attr_name, value) + + +class GenericUIListOperator: + """Mix-in class containing functionality shared by operators + that deal with managing Blender list entries.""" + bl_options = {'REGISTER', 'UNDO', 'INTERNAL'} + + list_path: StringProperty() + active_index_path: StringProperty() + + def get_list(self, context) -> str: + return _get_context_attr(context, self.list_path) + + def get_active_index(self, context) -> str: + return _get_context_attr(context, self.active_index_path) + + def set_active_index(self, context, index): + _set_context_attr(context, self.active_index_path, index) + + +class UILIST_OT_entry_remove(GenericUIListOperator, Operator): + """Remove the selected entry from the list""" + + bl_idname = "uilist.entry_remove" + bl_label = "Remove Selected Entry" + + def execute(self, context): + my_list = self.get_list(context) + active_index = self.get_active_index(context) + + my_list.remove(active_index) + to_index = min(active_index, len(my_list) - 1) + self.set_active_index(context, to_index) + + return {'FINISHED'} + + +class UILIST_OT_entry_add(GenericUIListOperator, Operator): + """Add an entry to the list after the current active item""" + + bl_idname = "uilist.entry_add" + bl_label = "Add Entry" + + def execute(self, context): + my_list = self.get_list(context) + active_index = self.get_active_index(context) + + to_index = min(len(my_list), active_index + 1) + + my_list.add() + my_list.move(len(my_list) - 1, to_index) + self.set_active_index(context, to_index) + + return {'FINISHED'} + + +class UILIST_OT_entry_move(GenericUIListOperator, Operator): + """Move an entry in the list up or down""" + + bl_idname = "uilist.entry_move" + bl_label = "Move Entry" + + direction: EnumProperty( + name="Direction", + items=(('UP', 'UP', 'UP'), + ('DOWN', 'DOWN', 'DOWN')), + default='UP' + ) + + def execute(self, context): + my_list = self.get_list(context) + active_index = self.get_active_index(context) + + delta = { + "DOWN": 1, + "UP": -1, + }[self.direction] + + to_index = (active_index + delta) % len(my_list) + + my_list.move(active_index, to_index) + self.set_active_index(context, to_index) + + return {'FINISHED'} + + +# Registration. +classes = ( + UILIST_OT_entry_remove, + UILIST_OT_entry_add, + UILIST_OT_entry_move, +) + +register, unregister = bpy.utils.register_classes_factory(classes) diff --git a/release/scripts/startup/bl_ui/node_add_menu_geometry.py b/release/scripts/startup/bl_ui/node_add_menu_geometry.py index f86601da88c..b829795a232 100644 --- a/release/scripts/startup/bl_ui/node_add_menu_geometry.py +++ b/release/scripts/startup/bl_ui/node_add_menu_geometry.py @@ -46,6 +46,7 @@ class NODE_MT_geometry_node_GEO_CURVE(Menu): def draw(self, _context): layout = self.layout layout.menu("NODE_MT_geometry_node_GEO_CURVE_READ") + layout.menu("NODE_MT_geometry_node_GEO_CURVE_SAMPLE") layout.menu("NODE_MT_geometry_node_GEO_CURVE_WRITE") layout.separator() layout.menu("NODE_MT_geometry_node_GEO_CURVE_OPERATIONS") @@ -72,6 +73,17 @@ class NODE_MT_geometry_node_GEO_CURVE_READ(Menu): node_add_menu.add_node_type(layout, "GeometryNodeInputSplineResolution") node_add_menu.draw_assets_for_catalog(layout, self.bl_label) + +class NODE_MT_geometry_node_GEO_CURVE_SAMPLE(Menu): + bl_idname = "NODE_MT_geometry_node_GEO_CURVE_SAMPLE" + bl_label = "Sample" + + def draw(self, _context): + layout = self.layout + node_add_menu.add_node_type(layout, "GeometryNodeSampleCurve") + node_add_menu.draw_assets_for_catalog(layout, self.bl_label) + + class NODE_MT_geometry_node_GEO_CURVE_WRITE(Menu): bl_idname = "NODE_MT_geometry_node_GEO_CURVE_WRITE" bl_label = "Write" @@ -100,9 +112,9 @@ class NODE_MT_geometry_node_GEO_CURVE_OPERATIONS(Menu): node_add_menu.add_node_type(layout, "GeometryNodeDeformCurvesOnSurface") node_add_menu.add_node_type(layout, "GeometryNodeFillCurve") node_add_menu.add_node_type(layout, "GeometryNodeFilletCurve") + node_add_menu.add_node_type(layout, "GeometryNodeInterpolateCurves") node_add_menu.add_node_type(layout, "GeometryNodeResampleCurve") node_add_menu.add_node_type(layout, "GeometryNodeReverseCurve") - node_add_menu.add_node_type(layout, "GeometryNodeSampleCurve") node_add_menu.add_node_type(layout, "GeometryNodeSubdivideCurve") node_add_menu.add_node_type(layout, "GeometryNodeTrimCurve") node_add_menu.draw_assets_for_catalog(layout, self.bl_label) @@ -144,14 +156,16 @@ class NODE_MT_geometry_node_GEO_GEOMETRY(Menu): def draw(self, _context): layout = self.layout layout.menu("NODE_MT_geometry_node_GEO_GEOMETRY_READ") - layout.menu("NODE_MT_geometry_node_GEO_GEOMETRY_WRITE") - layout.menu("NODE_MT_geometry_node_GEO_GEOMETRY_OPERATIONS") layout.menu("NODE_MT_geometry_node_GEO_GEOMETRY_SAMPLE") + layout.menu("NODE_MT_geometry_node_GEO_GEOMETRY_WRITE") + layout.separator() + layout.menu("NODE_MT_geometry_node_GEO_GEOMETRY_OPERATIONS") layout.separator() node_add_menu.add_node_type(layout, "GeometryNodeJoinGeometry") node_add_menu.add_node_type(layout, "GeometryNodeGeometryToInstance") node_add_menu.draw_assets_for_catalog(layout, self.bl_label) + class NODE_MT_geometry_node_GEO_GEOMETRY_READ(Menu): bl_idname = "NODE_MT_geometry_node_GEO_GEOMETRY_READ" bl_label = "Read" @@ -177,6 +191,7 @@ class NODE_MT_geometry_node_GEO_GEOMETRY_WRITE(Menu): node_add_menu.add_node_type(layout, "GeometryNodeSetPosition") node_add_menu.draw_assets_for_catalog(layout, self.bl_label) + class NODE_MT_geometry_node_GEO_GEOMETRY_OPERATIONS(Menu): bl_idname = "NODE_MT_geometry_node_GEO_GEOMETRY_OPERATIONS" bl_label = "Operations" @@ -194,6 +209,7 @@ class NODE_MT_geometry_node_GEO_GEOMETRY_OPERATIONS(Menu): node_add_menu.add_node_type(layout, "GeometryNodeSeparateGeometry") node_add_menu.draw_assets_for_catalog(layout, self.bl_label) + class NODE_MT_geometry_node_GEO_GEOMETRY_SAMPLE(Menu): bl_idname = "NODE_MT_geometry_node_GEO_GEOMETRY_SAMPLE" bl_label = "Sample" @@ -303,6 +319,7 @@ class NODE_MT_geometry_node_GEO_MESH(Menu): def draw(self, _context): layout = self.layout layout.menu("NODE_MT_geometry_node_GEO_MESH_READ") + layout.menu("NODE_MT_geometry_node_GEO_MESH_SAMPLE") layout.menu("NODE_MT_geometry_node_GEO_MESH_WRITE") layout.separator() layout.menu("NODE_MT_geometry_node_GEO_MESH_OPERATIONS") @@ -332,6 +349,17 @@ class NODE_MT_geometry_node_GEO_MESH_READ(Menu): node_add_menu.draw_assets_for_catalog(layout, self.bl_label) +class NODE_MT_geometry_node_GEO_MESH_SAMPLE(Menu): + bl_idname = "NODE_MT_geometry_node_GEO_MESH_SAMPLE" + bl_label = "Sample" + + def draw(self, _context): + layout = self.layout + node_add_menu.add_node_type(layout, "GeometryNodeSampleNearestSurface") + node_add_menu.add_node_type(layout, "GeometryNodeSampleUVSurface") + node_add_menu.draw_assets_for_catalog(layout, self.bl_label) + + class NODE_MT_geometry_node_GEO_MESH_WRITE(Menu): bl_idname = "NODE_MT_geometry_node_GEO_MESH_WRITE" bl_label = "Write" @@ -357,8 +385,6 @@ class NODE_MT_geometry_node_GEO_MESH_OPERATIONS(Menu): node_add_menu.add_node_type(layout, "GeometryNodeMeshToCurve") node_add_menu.add_node_type(layout, "GeometryNodeMeshToPoints") node_add_menu.add_node_type(layout, "GeometryNodeMeshToVolume") - node_add_menu.add_node_type(layout, "GeometryNodeSampleNearestSurface") - node_add_menu.add_node_type(layout, "GeometryNodeSampleUVSurface") node_add_menu.add_node_type(layout, "GeometryNodeScaleElements") node_add_menu.add_node_type(layout, "GeometryNodeSplitEdges") node_add_menu.add_node_type(layout, "GeometryNodeSubdivideMesh") @@ -506,6 +532,7 @@ class NODE_MT_category_GEO_UTILITIES_ROTATION(Menu): node_add_menu.add_node_type(layout, "FunctionNodeRotateEuler") node_add_menu.draw_assets_for_catalog(layout, self.bl_label) + class NODE_MT_category_GEO_UTILITIES_MATH(Menu): bl_idname = "NODE_MT_category_GEO_UTILITIES_MATH" bl_label = "Math" @@ -622,6 +649,7 @@ classes = ( NODE_MT_category_GEO_OUTPUT, NODE_MT_geometry_node_GEO_CURVE, NODE_MT_geometry_node_GEO_CURVE_READ, + NODE_MT_geometry_node_GEO_CURVE_SAMPLE, NODE_MT_geometry_node_GEO_CURVE_WRITE, NODE_MT_geometry_node_GEO_CURVE_OPERATIONS, NODE_MT_geometry_node_GEO_PRIMITIVES_CURVE, @@ -634,6 +662,7 @@ classes = ( NODE_MT_geometry_node_GEO_INSTANCE, NODE_MT_geometry_node_GEO_MESH, NODE_MT_geometry_node_GEO_MESH_READ, + NODE_MT_geometry_node_GEO_MESH_SAMPLE, NODE_MT_geometry_node_GEO_MESH_WRITE, NODE_MT_geometry_node_GEO_MESH_OPERATIONS, NODE_MT_category_GEO_UV, diff --git a/release/scripts/startup/bl_ui/properties_data_armature.py b/release/scripts/startup/bl_ui/properties_data_armature.py index d1a6c0165e0..06e44f7f09f 100644 --- a/release/scripts/startup/bl_ui/properties_data_armature.py +++ b/release/scripts/startup/bl_ui/properties_data_armature.py @@ -248,7 +248,7 @@ class DATA_PT_motion_paths_display(MotionPathButtonsPanel_display, Panel): class DATA_PT_custom_props_arm(ArmatureButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Armature diff --git a/release/scripts/startup/bl_ui/properties_data_bone.py b/release/scripts/startup/bl_ui/properties_data_bone.py index 14f6da83be2..8eedcc4961b 100644 --- a/release/scripts/startup/bl_ui/properties_data_bone.py +++ b/release/scripts/startup/bl_ui/properties_data_bone.py @@ -444,7 +444,12 @@ class BONE_PT_deform(BoneButtonsPanel, Panel): class BONE_PT_custom_props(BoneButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _property_type = bpy.types.Bone, bpy.types.EditBone, bpy.types.PoseBone @property diff --git a/release/scripts/startup/bl_ui/properties_data_camera.py b/release/scripts/startup/bl_ui/properties_data_camera.py index 963ffc60806..7043c24ace1 100644 --- a/release/scripts/startup/bl_ui/properties_data_camera.py +++ b/release/scripts/startup/bl_ui/properties_data_camera.py @@ -21,21 +21,36 @@ class CAMERA_PT_presets(PresetPanel, Panel): preset_subdir = "camera" preset_operator = "script.execute_preset" preset_add_operator = "camera.preset_add" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} -class SAFE_AREAS_PT_presets(PresetPanel, Panel): +class CAMERA_PT_safe_areas_presets(PresetPanel, Panel): bl_label = "Camera Presets" preset_subdir = "safe_areas" preset_operator = "script.execute_preset" - preset_add_operator = "safe_areas.preset_add" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + preset_add_operator = "camera.safe_areas_preset_add" + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} class DATA_PT_context_camera(CameraButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -52,7 +67,12 @@ class DATA_PT_context_camera(CameraButtonsPanel, Panel): class DATA_PT_lens(CameraButtonsPanel, Panel): bl_label = "Lens" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -100,7 +120,7 @@ class DATA_PT_lens(CameraButtonsPanel, Panel): col.prop(ccam, "fisheye_polynomial_k3", text="K3") col.prop(ccam, "fisheye_polynomial_k4", text="K4") - elif engine in {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'}: + elif engine in {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'}: if cam.lens_unit == 'MILLIMETERS': col.prop(cam, "lens") elif cam.lens_unit == 'FOV': @@ -122,7 +142,12 @@ class DATA_PT_lens(CameraButtonsPanel, Panel): class DATA_PT_camera_stereoscopy(CameraButtonsPanel, Panel): bl_label = "Stereoscopy" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -171,7 +196,12 @@ class DATA_PT_camera_stereoscopy(CameraButtonsPanel, Panel): class DATA_PT_camera(CameraButtonsPanel, Panel): bl_label = "Camera" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header_preset(self, _context): CAMERA_PT_presets.draw_panel_header(self.layout) @@ -201,7 +231,7 @@ class DATA_PT_camera(CameraButtonsPanel, Panel): class DATA_PT_camera_dof(CameraButtonsPanel, Panel): bl_label = "Depth of Field" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): cam = context.camera @@ -228,7 +258,7 @@ class DATA_PT_camera_dof(CameraButtonsPanel, Panel): class DATA_PT_camera_dof_aperture(CameraButtonsPanel, Panel): bl_label = "Aperture" bl_parent_id = "DATA_PT_camera_dof" - COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -252,7 +282,12 @@ class DATA_PT_camera_dof_aperture(CameraButtonsPanel, Panel): class DATA_PT_camera_background_image(CameraButtonsPanel, Panel): bl_label = "Background Images" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): cam = context.camera @@ -359,7 +394,12 @@ class DATA_PT_camera_background_image(CameraButtonsPanel, Panel): class DATA_PT_camera_display(CameraButtonsPanel, Panel): bl_label = "Viewport Display" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -392,7 +432,12 @@ class DATA_PT_camera_display_composition_guides(CameraButtonsPanel, Panel): bl_label = "Composition Guides" bl_parent_id = "DATA_PT_camera_display" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -419,7 +464,12 @@ class DATA_PT_camera_display_composition_guides(CameraButtonsPanel, Panel): class DATA_PT_camera_safe_areas(CameraButtonsPanel, Panel): bl_label = "Safe Areas" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): cam = context.camera @@ -427,7 +477,7 @@ class DATA_PT_camera_safe_areas(CameraButtonsPanel, Panel): self.layout.prop(cam, "show_safe_areas", text="") def draw_header_preset(self, _context): - SAFE_AREAS_PT_presets.draw_panel_header(self.layout) + CAMERA_PT_safe_areas_presets.draw_panel_header(self.layout) def draw(self, context): layout = self.layout @@ -449,7 +499,12 @@ class DATA_PT_camera_safe_areas_center_cut(CameraButtonsPanel, Panel): bl_label = "Center-Cut Safe Areas" bl_parent_id = "DATA_PT_camera_safe_areas" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): cam = context.camera @@ -473,7 +528,12 @@ class DATA_PT_camera_safe_areas_center_cut(CameraButtonsPanel, Panel): class DATA_PT_custom_props_camera(CameraButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Camera @@ -503,7 +563,7 @@ def draw_display_safe_settings(layout, safe_data, settings): classes = ( CAMERA_PT_presets, - SAFE_AREAS_PT_presets, + CAMERA_PT_safe_areas_presets, DATA_PT_context_camera, DATA_PT_lens, DATA_PT_camera_dof, diff --git a/release/scripts/startup/bl_ui/properties_data_curve.py b/release/scripts/startup/bl_ui/properties_data_curve.py index 88dd3caaa74..bd239203123 100644 --- a/release/scripts/startup/bl_ui/properties_data_curve.py +++ b/release/scripts/startup/bl_ui/properties_data_curve.py @@ -116,7 +116,12 @@ class DATA_PT_shape_curve(CurveButtonsPanel, Panel): class DATA_PT_curve_texture_space(CurveButtonsPanel, Panel): bl_label = "Texture Space" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -475,7 +480,12 @@ class DATA_PT_text_boxes(CurveButtonsPanelText, Panel): class DATA_PT_custom_props_curve(CurveButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Curve diff --git a/release/scripts/startup/bl_ui/properties_data_curves.py b/release/scripts/startup/bl_ui/properties_data_curves.py index e17d749acd5..8064a9b5696 100644 --- a/release/scripts/startup/bl_ui/properties_data_curves.py +++ b/release/scripts/startup/bl_ui/properties_data_curves.py @@ -18,7 +18,12 @@ class DataButtonsPanel: class DATA_PT_context_curves(DataButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -35,7 +40,12 @@ class DATA_PT_context_curves(DataButtonsPanel, Panel): class DATA_PT_curves_surface(DataButtonsPanel, Panel): bl_label = "Surface" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -118,7 +128,12 @@ class CURVES_UL_attributes(UIList): class DATA_PT_CURVES_attributes(DataButtonsPanel, Panel): bl_label = "Attributes" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): curves = context.curves @@ -143,7 +158,12 @@ class DATA_PT_CURVES_attributes(DataButtonsPanel, Panel): class DATA_PT_custom_props_curves(DataButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Curves if hasattr(bpy.types, "Curves") else None diff --git a/release/scripts/startup/bl_ui/properties_data_lattice.py b/release/scripts/startup/bl_ui/properties_data_lattice.py index e57b46989fe..f9b451a772a 100644 --- a/release/scripts/startup/bl_ui/properties_data_lattice.py +++ b/release/scripts/startup/bl_ui/properties_data_lattice.py @@ -64,7 +64,12 @@ class DATA_PT_lattice(DataButtonsPanel, Panel): class DATA_PT_custom_props_lattice(DataButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Lattice diff --git a/release/scripts/startup/bl_ui/properties_data_light.py b/release/scripts/startup/bl_ui/properties_data_light.py index b313ae4dcb9..272191aebe5 100644 --- a/release/scripts/startup/bl_ui/properties_data_light.py +++ b/release/scripts/startup/bl_ui/properties_data_light.py @@ -18,7 +18,12 @@ class DataButtonsPanel: class DATA_PT_context_light(DataButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE_NEXT', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_EEVEE', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -44,7 +49,7 @@ class DATA_PT_preview(DataButtonsPanel, Panel): class DATA_PT_light(DataButtonsPanel, Panel): bl_label = "Light" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -230,7 +235,12 @@ class DATA_PT_EEVEE_shadow_contact(DataButtonsPanel, Panel): class DATA_PT_spot(DataButtonsPanel, Panel): bl_label = "Spot Shape" bl_parent_id = "DATA_PT_EEVEE_light" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE_NEXT', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_EEVEE', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -275,7 +285,12 @@ class DATA_PT_falloff_curve(DataButtonsPanel, Panel): class DATA_PT_custom_props_light(DataButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE_NEXT', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_EEVEE', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Light diff --git a/release/scripts/startup/bl_ui/properties_data_mesh.py b/release/scripts/startup/bl_ui/properties_data_mesh.py index 6bc328e4f90..73bc3bb2280 100644 --- a/release/scripts/startup/bl_ui/properties_data_mesh.py +++ b/release/scripts/startup/bl_ui/properties_data_mesh.py @@ -171,7 +171,7 @@ class MeshButtonsPanel: class DATA_PT_context_mesh(MeshButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -189,7 +189,7 @@ class DATA_PT_context_mesh(MeshButtonsPanel, Panel): class DATA_PT_normals(MeshButtonsPanel, Panel): bl_label = "Normals" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -211,7 +211,7 @@ class DATA_PT_normals(MeshButtonsPanel, Panel): class DATA_PT_texture_space(MeshButtonsPanel, Panel): bl_label = "Texture Space" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -231,7 +231,7 @@ class DATA_PT_texture_space(MeshButtonsPanel, Panel): class DATA_PT_vertex_groups(MeshButtonsPanel, Panel): bl_label = "Vertex Groups" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -288,7 +288,7 @@ class DATA_PT_vertex_groups(MeshButtonsPanel, Panel): class DATA_PT_face_maps(MeshButtonsPanel, Panel): bl_label = "Face Maps" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -331,7 +331,7 @@ class DATA_PT_face_maps(MeshButtonsPanel, Panel): class DATA_PT_shape_keys(MeshButtonsPanel, Panel): bl_label = "Shape Keys" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -428,7 +428,7 @@ class DATA_PT_shape_keys(MeshButtonsPanel, Panel): class DATA_PT_uv_texture(MeshButtonsPanel, Panel): bl_label = "UV Maps" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -448,7 +448,7 @@ class DATA_PT_uv_texture(MeshButtonsPanel, Panel): class DATA_PT_remesh(MeshButtonsPanel, Panel): bl_label = "Remesh" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -478,7 +478,7 @@ class DATA_PT_remesh(MeshButtonsPanel, Panel): class DATA_PT_customdata(MeshButtonsPanel, Panel): bl_label = "Geometry Data" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -520,7 +520,7 @@ class DATA_PT_customdata(MeshButtonsPanel, Panel): class DATA_PT_custom_props_mesh(MeshButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Mesh @@ -571,7 +571,7 @@ class MESH_UL_attributes(UIList): class DATA_PT_mesh_attributes(MeshButtonsPanel, Panel): bl_label = "Attributes" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): mesh = context.mesh @@ -695,7 +695,7 @@ class MESH_UL_color_attributes_selector(UIList, ColorAttributesListBase): class DATA_PT_vertex_colors(DATA_PT_mesh_attributes, Panel): bl_label = "Color Attributes" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): mesh = context.mesh diff --git a/release/scripts/startup/bl_ui/properties_data_metaball.py b/release/scripts/startup/bl_ui/properties_data_metaball.py index eba5676535f..2fc37617a21 100644 --- a/release/scripts/startup/bl_ui/properties_data_metaball.py +++ b/release/scripts/startup/bl_ui/properties_data_metaball.py @@ -56,7 +56,12 @@ class DATA_PT_metaball(DataButtonsPanel, Panel): class DATA_PT_mball_texture_space(DataButtonsPanel, Panel): bl_label = "Texture Space" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -111,7 +116,12 @@ class DATA_PT_metaball_element(DataButtonsPanel, Panel): class DATA_PT_custom_props_metaball(DataButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.MetaBall diff --git a/release/scripts/startup/bl_ui/properties_data_pointcloud.py b/release/scripts/startup/bl_ui/properties_data_pointcloud.py index db1b83f0cf4..7b9dd18cfa3 100644 --- a/release/scripts/startup/bl_ui/properties_data_pointcloud.py +++ b/release/scripts/startup/bl_ui/properties_data_pointcloud.py @@ -18,7 +18,12 @@ class DataButtonsPanel: class DATA_PT_context_pointcloud(DataButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -97,7 +102,12 @@ class POINTCLOUD_UL_attributes(UIList): class DATA_PT_pointcloud_attributes(DataButtonsPanel, Panel): bl_label = "Attributes" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): pointcloud = context.pointcloud @@ -122,7 +132,12 @@ class DATA_PT_pointcloud_attributes(DataButtonsPanel, Panel): class DATA_PT_custom_props_pointcloud(DataButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.PointCloud if hasattr(bpy.types, "PointCloud") else None diff --git a/release/scripts/startup/bl_ui/properties_data_speaker.py b/release/scripts/startup/bl_ui/properties_data_speaker.py index 9bdf0e22c2f..9bd62106f74 100644 --- a/release/scripts/startup/bl_ui/properties_data_speaker.py +++ b/release/scripts/startup/bl_ui/properties_data_speaker.py @@ -18,7 +18,12 @@ class DataButtonsPanel: class DATA_PT_context_speaker(DataButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -35,7 +40,12 @@ class DATA_PT_context_speaker(DataButtonsPanel, Panel): class DATA_PT_speaker(DataButtonsPanel, Panel): bl_label = "Sound" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -57,7 +67,12 @@ class DATA_PT_speaker(DataButtonsPanel, Panel): class DATA_PT_distance(DataButtonsPanel, Panel): bl_label = "Distance" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -81,7 +96,12 @@ class DATA_PT_distance(DataButtonsPanel, Panel): class DATA_PT_cone(DataButtonsPanel, Panel): bl_label = "Cone" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -103,7 +123,12 @@ class DATA_PT_cone(DataButtonsPanel, Panel): class DATA_PT_custom_props_speaker(DataButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Speaker diff --git a/release/scripts/startup/bl_ui/properties_data_volume.py b/release/scripts/startup/bl_ui/properties_data_volume.py index 148bb60de85..e79cebb606a 100644 --- a/release/scripts/startup/bl_ui/properties_data_volume.py +++ b/release/scripts/startup/bl_ui/properties_data_volume.py @@ -18,7 +18,12 @@ class DataButtonsPanel: class DATA_PT_context_volume(DataButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -35,7 +40,12 @@ class DATA_PT_context_volume(DataButtonsPanel, Panel): class DATA_PT_volume_file(DataButtonsPanel, Panel): bl_label = "OpenVDB File" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -80,7 +90,12 @@ class VOLUME_UL_grids(UIList): class DATA_PT_volume_grids(DataButtonsPanel, Panel): bl_label = "Grids" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -93,7 +108,12 @@ class DATA_PT_volume_grids(DataButtonsPanel, Panel): class DATA_PT_volume_render(DataButtonsPanel, Panel): bl_label = "Render" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -125,7 +145,12 @@ class DATA_PT_volume_render(DataButtonsPanel, Panel): class DATA_PT_volume_viewport_display(DataButtonsPanel, Panel): bl_label = "Viewport Display" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -149,7 +174,12 @@ class DATA_PT_volume_viewport_display(DataButtonsPanel, Panel): class DATA_PT_volume_viewport_display_slicing(DataButtonsPanel, Panel): bl_label = "" bl_parent_id = 'DATA_PT_volume_viewport_display' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): layout = self.layout @@ -175,7 +205,12 @@ class DATA_PT_volume_viewport_display_slicing(DataButtonsPanel, Panel): class DATA_PT_custom_props_volume(DataButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.data" _property_type = bpy.types.Volume diff --git a/release/scripts/startup/bl_ui/properties_freestyle.py b/release/scripts/startup/bl_ui/properties_freestyle.py index e74e1725aa2..4d5a8e45207 100644 --- a/release/scripts/startup/bl_ui/properties_freestyle.py +++ b/release/scripts/startup/bl_ui/properties_freestyle.py @@ -21,7 +21,7 @@ class RENDER_PT_freestyle(RenderFreestyleButtonsPanel, Panel): bl_label = "Freestyle" bl_options = {'DEFAULT_CLOSED'} bl_order = 10 - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): rd = context.scene.render @@ -79,7 +79,7 @@ class ViewLayerFreestyleEditorButtonsPanel(ViewLayerFreestyleButtonsPanel): class ViewLayerFreestyleLineStyle(ViewLayerFreestyleEditorButtonsPanel): # Freestyle Linestyle Panels - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -123,7 +123,7 @@ class RENDER_MT_lineset_context_menu(Menu): class VIEWLAYER_PT_freestyle(ViewLayerFreestyleButtonsPanel, Panel): bl_label = "Freestyle" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): view_layer = context.view_layer @@ -153,7 +153,7 @@ class VIEWLAYER_PT_freestyle(ViewLayerFreestyleButtonsPanel, Panel): class VIEWLAYER_PT_freestyle_edge_detection(ViewLayerFreestyleButtonsPanel, Panel): bl_label = "Edge Detection" bl_parent_id = "VIEWLAYER_PT_freestyle" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -183,7 +183,7 @@ class VIEWLAYER_PT_freestyle_edge_detection(ViewLayerFreestyleButtonsPanel, Pane class VIEWLAYER_PT_freestyle_style_modules(ViewLayerFreestyleButtonsPanel, Panel): bl_label = "Style Modules" bl_parent_id = "VIEWLAYER_PT_freestyle" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -219,7 +219,7 @@ class VIEWLAYER_PT_freestyle_style_modules(ViewLayerFreestyleButtonsPanel, Panel class VIEWLAYER_PT_freestyle_lineset(ViewLayerFreestyleEditorButtonsPanel, Panel): bl_label = "Freestyle Line Set" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw_edge_type_buttons(self, box, lineset, edge_type): # property names @@ -282,7 +282,7 @@ class VIEWLAYER_PT_freestyle_lineset(ViewLayerFreestyleEditorButtonsPanel, Panel class VIEWLAYER_PT_freestyle_lineset_visibilty(ViewLayerFreestyleLineStyle, Panel): bl_label = "Visibility" bl_parent_id = "VIEWLAYER_PT_freestyle_lineset" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): layout = self.layout @@ -316,7 +316,7 @@ class VIEWLAYER_PT_freestyle_lineset_visibilty(ViewLayerFreestyleLineStyle, Pane class VIEWLAYER_PT_freestyle_lineset_edgetype(ViewLayerFreestyleLineStyle, Panel): bl_label = "Edge Type" bl_parent_id = "VIEWLAYER_PT_freestyle_lineset" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): layout = self.layout @@ -366,7 +366,7 @@ class VIEWLAYER_PT_freestyle_lineset_edgetype(ViewLayerFreestyleLineStyle, Panel class VIEWLAYER_PT_freestyle_lineset_facemarks(ViewLayerFreestyleLineStyle, Panel): bl_label = "Face Marks" bl_parent_id = "VIEWLAYER_PT_freestyle_lineset" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} bl_options = {'DEFAULT_CLOSED'} def draw_header(self, context): @@ -395,7 +395,7 @@ class VIEWLAYER_PT_freestyle_lineset_facemarks(ViewLayerFreestyleLineStyle, Pane class VIEWLAYER_PT_freestyle_lineset_collection(ViewLayerFreestyleLineStyle, Panel): bl_label = "Collection" bl_parent_id = "VIEWLAYER_PT_freestyle_lineset" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} bl_options = {'DEFAULT_CLOSED'} def draw_header(self, context): @@ -1236,7 +1236,7 @@ class MaterialFreestyleButtonsPanel: class MATERIAL_PT_freestyle_line(MaterialFreestyleButtonsPanel, Panel): bl_label = "Freestyle Line" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout diff --git a/release/scripts/startup/bl_ui/properties_grease_pencil_common.py b/release/scripts/startup/bl_ui/properties_grease_pencil_common.py index e0f5d65db16..1403bb0d07e 100644 --- a/release/scripts/startup/bl_ui/properties_grease_pencil_common.py +++ b/release/scripts/startup/bl_ui/properties_grease_pencil_common.py @@ -565,9 +565,11 @@ class GreasePencilMaterialsPanel: if is_view3d and ma is not None and ma.grease_pencil is not None: gpcolor = ma.grease_pencil - if gpcolor.stroke_style == 'SOLID': - row = layout.row() - row.prop(gpcolor, "color", text="Stroke Color") + col = layout.column(align=True) + if gpcolor.show_stroke and gpcolor.stroke_style == 'SOLID': + col.prop(gpcolor, "color", text="Stroke Color") + if gpcolor.show_fill and gpcolor.fill_style == 'SOLID': + col.prop(gpcolor, "fill_color", text="Fill Color") else: space = context.space_data diff --git a/release/scripts/startup/bl_ui/properties_material.py b/release/scripts/startup/bl_ui/properties_material.py index afcd1b753d2..efe67a0c294 100644 --- a/release/scripts/startup/bl_ui/properties_material.py +++ b/release/scripts/startup/bl_ui/properties_material.py @@ -60,7 +60,12 @@ class MATERIAL_PT_preview(MaterialButtonsPanel, Panel): class MATERIAL_PT_custom_props(MaterialButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "material" _property_type = bpy.types.Material @@ -69,7 +74,7 @@ class EEVEE_MATERIAL_PT_context_material(MaterialButtonsPanel, Panel): bl_label = "" bl_context = "material" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): diff --git a/release/scripts/startup/bl_ui/properties_material_gpencil.py b/release/scripts/startup/bl_ui/properties_material_gpencil.py index de1df732b7e..3db7b9fb484 100644 --- a/release/scripts/startup/bl_ui/properties_material_gpencil.py +++ b/release/scripts/startup/bl_ui/properties_material_gpencil.py @@ -228,7 +228,7 @@ class MATERIAL_PT_gpencil_preview(GPMaterialButtonsPanel, Panel): class MATERIAL_PT_gpencil_custom_props(GPMaterialButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} _context_path = "object.active_material" _property_type = bpy.types.Material diff --git a/release/scripts/startup/bl_ui/properties_object.py b/release/scripts/startup/bl_ui/properties_object.py index 7afa2f81b97..836ac2dcb75 100644 --- a/release/scripts/startup/bl_ui/properties_object.py +++ b/release/scripts/startup/bl_ui/properties_object.py @@ -200,7 +200,7 @@ class OBJECT_PT_display(ObjectButtonsPanel, Panel): col = layout.column(heading="Show") col.prop(obj, "show_name", text="Name") - col.prop(obj, "show_axis", text="Axis") + col.prop(obj, "show_axis", text="Axes") # Makes no sense for cameras, armatures, etc.! # but these settings do apply to dupli instances @@ -366,7 +366,7 @@ class OBJECT_PT_motion_paths_display(MotionPathButtonsPanel_display, Panel): class OBJECT_PT_visibility(ObjectButtonsPanel, Panel): bl_label = "Visibility" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -395,7 +395,7 @@ class OBJECT_PT_visibility(ObjectButtonsPanel, Panel): class OBJECT_PT_custom_props(ObjectButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} _context_path = "object" _property_type = bpy.types.Object diff --git a/release/scripts/startup/bl_ui/properties_output.py b/release/scripts/startup/bl_ui/properties_output.py index 00c81d0dc87..9d6e527dbd8 100644 --- a/release/scripts/startup/bl_ui/properties_output.py +++ b/release/scripts/startup/bl_ui/properties_output.py @@ -42,7 +42,12 @@ class RenderOutputButtonsPanel: class RENDER_PT_format(RenderOutputButtonsPanel, Panel): bl_label = "Format" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _frame_rate_args_prev = None _preset_class = None @@ -120,7 +125,12 @@ class RENDER_PT_format(RenderOutputButtonsPanel, Panel): class RENDER_PT_frame_range(RenderOutputButtonsPanel, Panel): bl_label = "Frame Range" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -139,7 +149,12 @@ class RENDER_PT_time_stretching(RenderOutputButtonsPanel, Panel): bl_label = "Time Stretching" bl_parent_id = "RENDER_PT_frame_range" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -156,7 +171,12 @@ class RENDER_PT_time_stretching(RenderOutputButtonsPanel, Panel): class RENDER_PT_post_processing(RenderOutputButtonsPanel, Panel): bl_label = "Post Processing" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -174,7 +194,12 @@ class RENDER_PT_post_processing(RenderOutputButtonsPanel, Panel): class RENDER_PT_stamp(RenderOutputButtonsPanel, Panel): bl_label = "Metadata" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -208,7 +233,12 @@ class RENDER_PT_stamp_note(RenderOutputButtonsPanel, Panel): bl_label = "Note" bl_parent_id = "RENDER_PT_stamp" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): rd = context.scene.render @@ -228,7 +258,12 @@ class RENDER_PT_stamp_burn(RenderOutputButtonsPanel, Panel): bl_label = "Burn Into Image" bl_parent_id = "RENDER_PT_stamp" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): rd = context.scene.render @@ -252,7 +287,12 @@ class RENDER_PT_stamp_burn(RenderOutputButtonsPanel, Panel): class RENDER_PT_output(RenderOutputButtonsPanel, Panel): bl_label = "Output" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -281,7 +321,12 @@ class RENDER_PT_output(RenderOutputButtonsPanel, Panel): class RENDER_PT_output_views(RenderOutputButtonsPanel, Panel): bl_label = "Views" bl_parent_id = "RENDER_PT_output" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -301,7 +346,12 @@ class RENDER_PT_output_color_management(RenderOutputButtonsPanel, Panel): bl_label = "Color Management" bl_options = {'DEFAULT_CLOSED'} bl_parent_id = "RENDER_PT_output" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): scene = context.scene @@ -336,7 +386,12 @@ class RENDER_PT_encoding(RenderOutputButtonsPanel, Panel): bl_label = "Encoding" bl_parent_id = "RENDER_PT_output" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header_preset(self, _context): RENDER_PT_ffmpeg_presets.draw_panel_header(self.layout) @@ -361,7 +416,12 @@ class RENDER_PT_encoding(RenderOutputButtonsPanel, Panel): class RENDER_PT_encoding_video(RenderOutputButtonsPanel, Panel): bl_label = "Video" bl_parent_id = "RENDER_PT_encoding" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -437,7 +497,12 @@ class RENDER_PT_encoding_video(RenderOutputButtonsPanel, Panel): class RENDER_PT_encoding_audio(RenderOutputButtonsPanel, Panel): bl_label = "Audio" bl_parent_id = "RENDER_PT_encoding" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -479,7 +544,12 @@ class RENDER_UL_renderviews(UIList): class RENDER_PT_stereoscopy(RenderOutputButtonsPanel, Panel): bl_label = "Stereoscopy" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} bl_options = {'DEFAULT_CLOSED'} def draw_header(self, context): diff --git a/release/scripts/startup/bl_ui/properties_particle.py b/release/scripts/startup/bl_ui/properties_particle.py index 8464578ea25..cbe6b9f3e6d 100644 --- a/release/scripts/startup/bl_ui/properties_particle.py +++ b/release/scripts/startup/bl_ui/properties_particle.py @@ -52,7 +52,12 @@ def particle_get_settings(context): class PARTICLE_MT_context_menu(Menu): bl_label = "Particle Specials" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -92,7 +97,12 @@ class PARTICLE_PT_hair_dynamics_presets(PresetPanel, Panel): preset_subdir = "hair_dynamics" preset_operator = "script.execute_preset" preset_add_operator = "particle.hair_dynamics_preset_add" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} class ParticleButtonsPanel: @@ -146,7 +156,12 @@ class PARTICLE_UL_particle_systems(bpy.types.UIList): class PARTICLE_PT_context_particles(ParticleButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -240,7 +255,12 @@ class PARTICLE_PT_context_particles(ParticleButtonsPanel, Panel): class PARTICLE_PT_emission(ParticleButtonsPanel, Panel): bl_label = "Emission" bl_translation_context = i18n_contexts.id_particlesettings - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -292,7 +312,12 @@ class PARTICLE_PT_emission_source(ParticleButtonsPanel, Panel): bl_label = "Source" bl_parent_id = "PARTICLE_PT_emission" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -329,7 +354,12 @@ class PARTICLE_PT_emission_source(ParticleButtonsPanel, Panel): class PARTICLE_PT_hair_dynamics(ParticleButtonsPanel, Panel): bl_label = "Hair Dynamics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -412,7 +442,12 @@ class PARTICLE_PT_hair_dynamics_collision(ParticleButtonsPanel, Panel): bl_label = "Collisions" bl_parent_id = "PARTICLE_PT_hair_dynamics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -444,7 +479,12 @@ class PARTICLE_PT_hair_dynamics_structure(ParticleButtonsPanel, Panel): bl_label = "Structure" bl_parent_id = "PARTICLE_PT_hair_dynamics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -475,7 +515,12 @@ class PARTICLE_PT_hair_dynamics_volume(ParticleButtonsPanel, Panel): bl_label = "Volume" bl_parent_id = "PARTICLE_PT_hair_dynamics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -506,7 +551,12 @@ class PARTICLE_PT_hair_dynamics_volume(ParticleButtonsPanel, Panel): class PARTICLE_PT_cache(ParticleButtonsPanel, Panel): bl_label = "Cache" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -539,7 +589,12 @@ class PARTICLE_PT_cache(ParticleButtonsPanel, Panel): class PARTICLE_PT_velocity(ParticleButtonsPanel, Panel): bl_label = "Velocity" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -588,7 +643,12 @@ class PARTICLE_PT_velocity(ParticleButtonsPanel, Panel): class PARTICLE_PT_rotation(ParticleButtonsPanel, Panel): bl_label = "Rotation" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -643,7 +703,12 @@ class PARTICLE_PT_rotation_angular_velocity(ParticleButtonsPanel, Panel): bl_label = "Angular Velocity" bl_parent_id = "PARTICLE_PT_rotation" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -668,7 +733,12 @@ class PARTICLE_PT_rotation_angular_velocity(ParticleButtonsPanel, Panel): class PARTICLE_PT_physics(ParticleButtonsPanel, Panel): bl_label = "Physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -721,7 +791,12 @@ class PARTICLE_PT_physics_fluid_advanced(ParticleButtonsPanel, Panel): bl_label = "Advanced" bl_parent_id = "PARTICLE_PT_physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -766,7 +841,12 @@ class PARTICLE_PT_physics_fluid_springs(ParticleButtonsPanel, Panel): bl_label = "Springs" bl_parent_id = "PARTICLE_PT_physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -790,7 +870,12 @@ class PARTICLE_PT_physics_fluid_springs_viscoelastic(ParticleButtonsPanel, Panel bl_label = "Viscoelastic Springs" bl_parent_id = "PARTICLE_PT_physics_fluid_springs" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -826,7 +911,12 @@ class PARTICLE_PT_physics_fluid_springs_advanced(ParticleButtonsPanel, Panel): bl_label = "Advanced" bl_parent_id = "PARTICLE_PT_physics_fluid_springs" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -850,7 +940,12 @@ class PARTICLE_PT_physics_boids_movement(ParticleButtonsPanel, Panel): bl_label = "Movement" bl_parent_id = "PARTICLE_PT_physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -903,7 +998,12 @@ class PARTICLE_PT_physics_boids_battle(ParticleButtonsPanel, Panel): bl_label = "Battle" bl_parent_id = "PARTICLE_PT_physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -930,7 +1030,12 @@ class PARTICLE_PT_physics_boids_misc(ParticleButtonsPanel, Panel): bl_label = "Misc" bl_parent_id = "PARTICLE_PT_physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -955,7 +1060,12 @@ class PARTICLE_PT_physics_relations(ParticleButtonsPanel, Panel): bl_label = "Relations" bl_parent_id = "PARTICLE_PT_physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1010,7 +1120,12 @@ class PARTICLE_PT_physics_fluid_interaction(ParticleButtonsPanel, Panel): bl_label = "Fluid Interaction" bl_parent_id = "PARTICLE_PT_physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1051,7 +1166,12 @@ class PARTICLE_PT_physics_deflection(ParticleButtonsPanel, Panel): bl_label = "Deflection" bl_parent_id = "PARTICLE_PT_physics" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1077,7 +1197,12 @@ class PARTICLE_PT_physics_deflection(ParticleButtonsPanel, Panel): class PARTICLE_PT_physics_forces(ParticleButtonsPanel, Panel): bl_label = "Forces" bl_parent_id = "PARTICLE_PT_physics" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1104,7 +1229,12 @@ class PARTICLE_PT_physics_integration(ParticleButtonsPanel, Panel): bl_label = "Integration" bl_options = {'DEFAULT_CLOSED'} bl_parent_id = "PARTICLE_PT_physics" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1138,7 +1268,12 @@ class PARTICLE_PT_boidbrain(ParticleButtonsPanel, Panel): bl_options = {'DEFAULT_CLOSED'} bl_parent_id = "PARTICLE_PT_physics" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1236,7 +1371,12 @@ class PARTICLE_PT_boidbrain(ParticleButtonsPanel, Panel): class PARTICLE_PT_render(ParticleButtonsPanel, Panel): bl_label = "Render" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1283,7 +1423,12 @@ class PARTICLE_PT_render_extra(ParticleButtonsPanel, Panel): bl_label = "Extra" bl_parent_id = "PARTICLE_PT_render" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1307,7 +1452,12 @@ class PARTICLE_PT_render_extra(ParticleButtonsPanel, Panel): class PARTICLE_PT_render_path(ParticleButtonsPanel, Panel): bl_label = "Path" bl_parent_id = "PARTICLE_PT_render" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1329,7 +1479,12 @@ class PARTICLE_PT_render_path_timing(ParticleButtonsPanel, Panel): bl_label = "Timing" bl_parent_id = "PARTICLE_PT_render" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1357,7 +1512,12 @@ class PARTICLE_PT_render_path_timing(ParticleButtonsPanel, Panel): class PARTICLE_PT_render_object(ParticleButtonsPanel, Panel): bl_label = "Object" bl_parent_id = "PARTICLE_PT_render" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1382,7 +1542,12 @@ class PARTICLE_PT_render_object(ParticleButtonsPanel, Panel): class PARTICLE_PT_render_collection(ParticleButtonsPanel, Panel): bl_label = "Collection" bl_parent_id = "PARTICLE_PT_render" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1412,7 +1577,12 @@ class PARTICLE_PT_render_collection_use_count(ParticleButtonsPanel, Panel): bl_label = "Use Count" bl_parent_id = "PARTICLE_PT_render_collection" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1460,7 +1630,12 @@ class PARTICLE_PT_render_collection_use_count(ParticleButtonsPanel, Panel): class PARTICLE_PT_draw(ParticleButtonsPanel, Panel): bl_label = "Viewport Display" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1519,7 +1694,12 @@ class PARTICLE_PT_children(ParticleButtonsPanel, Panel): bl_label = "Children" bl_translation_context = i18n_contexts.id_particlesettings bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1572,7 +1752,12 @@ class PARTICLE_PT_children_parting(ParticleButtonsPanel, Panel): bl_label = "Parting" bl_parent_id = "PARTICLE_PT_children" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1603,7 +1788,12 @@ class PARTICLE_PT_children_clumping(ParticleButtonsPanel, Panel): bl_label = "Clumping" bl_parent_id = "PARTICLE_PT_children" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1639,7 +1829,12 @@ class PARTICLE_PT_children_clumping_noise(ParticleButtonsPanel, Panel): bl_label = "Clump Noise" bl_parent_id = "PARTICLE_PT_children_clumping" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): @@ -1663,7 +1858,12 @@ class PARTICLE_PT_children_roughness(ParticleButtonsPanel, Panel): bl_translation_context = i18n_contexts.id_particlesettings bl_parent_id = "PARTICLE_PT_children" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1704,7 +1904,12 @@ class PARTICLE_PT_children_kink(ParticleButtonsPanel, Panel): bl_label = "Kink" bl_parent_id = "PARTICLE_PT_children" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1754,7 +1959,12 @@ class PARTICLE_PT_children_kink(ParticleButtonsPanel, Panel): class PARTICLE_PT_field_weights(ParticleButtonsPanel, Panel): bl_label = "Field Weights" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1775,7 +1985,12 @@ class PARTICLE_PT_field_weights(ParticleButtonsPanel, Panel): class PARTICLE_PT_force_fields(ParticleButtonsPanel, Panel): bl_label = "Force Field Settings" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -1791,7 +2006,12 @@ class PARTICLE_PT_force_fields(ParticleButtonsPanel, Panel): class PARTICLE_PT_force_fields_type1(ParticleButtonsPanel, Panel): bl_label = "Type 1" bl_parent_id = "PARTICLE_PT_force_fields" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -1808,7 +2028,12 @@ class PARTICLE_PT_force_fields_type1(ParticleButtonsPanel, Panel): class PARTICLE_PT_force_fields_type2(ParticleButtonsPanel, Panel): bl_label = "Type 2" bl_parent_id = "PARTICLE_PT_force_fields" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -1826,7 +2051,12 @@ class PARTICLE_PT_force_fields_type1_falloff(ParticleButtonsPanel, Panel): bl_label = "Falloff" bl_options = {'DEFAULT_CLOSED'} bl_parent_id = "PARTICLE_PT_force_fields_type1" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -1842,7 +2072,12 @@ class PARTICLE_PT_force_fields_type2_falloff(ParticleButtonsPanel, Panel): bl_label = "Falloff" bl_options = {'DEFAULT_CLOSED'} bl_parent_id = "PARTICLE_PT_force_fields_type2" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -1857,7 +2092,12 @@ class PARTICLE_PT_force_fields_type2_falloff(ParticleButtonsPanel, Panel): class PARTICLE_PT_vertexgroups(ParticleButtonsPanel, Panel): bl_label = "Vertex Groups" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1946,7 +2186,12 @@ class PARTICLE_PT_vertexgroups(ParticleButtonsPanel, Panel): class PARTICLE_PT_textures(ParticleButtonsPanel, Panel): bl_label = "Textures" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1978,7 +2223,12 @@ class PARTICLE_PT_textures(ParticleButtonsPanel, Panel): class PARTICLE_PT_hair_shape(ParticleButtonsPanel, Panel): bl_label = "Hair Shape" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -2006,7 +2256,12 @@ class PARTICLE_PT_hair_shape(ParticleButtonsPanel, Panel): class PARTICLE_PT_custom_props(ParticleButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "particle_system.settings" _property_type = bpy.types.ParticleSettings diff --git a/release/scripts/startup/bl_ui/properties_physics_cloth.py b/release/scripts/startup/bl_ui/properties_physics_cloth.py index 335cf08a715..fb6eb653f25 100644 --- a/release/scripts/startup/bl_ui/properties_physics_cloth.py +++ b/release/scripts/startup/bl_ui/properties_physics_cloth.py @@ -35,7 +35,12 @@ class PhysicButtonsPanel: class PHYSICS_PT_cloth(PhysicButtonsPanel, Panel): bl_label = "Cloth" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header_preset(self, _context): CLOTH_PT_presets.draw_panel_header(self.layout) @@ -60,7 +65,12 @@ class PHYSICS_PT_cloth(PhysicButtonsPanel, Panel): class PHYSICS_PT_cloth_physical_properties(PhysicButtonsPanel, Panel): bl_label = "Physical Properties" bl_parent_id = 'PHYSICS_PT_cloth' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -84,7 +94,12 @@ class PHYSICS_PT_cloth_physical_properties(PhysicButtonsPanel, Panel): class PHYSICS_PT_cloth_stiffness(PhysicButtonsPanel, Panel): bl_label = "Stiffness" bl_parent_id = 'PHYSICS_PT_cloth_physical_properties' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -115,7 +130,12 @@ class PHYSICS_PT_cloth_stiffness(PhysicButtonsPanel, Panel): class PHYSICS_PT_cloth_damping(PhysicButtonsPanel, Panel): bl_label = "Damping" bl_parent_id = 'PHYSICS_PT_cloth_physical_properties' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -146,7 +166,12 @@ class PHYSICS_PT_cloth_damping(PhysicButtonsPanel, Panel): class PHYSICS_PT_cloth_internal_springs(PhysicButtonsPanel, Panel): bl_label = "Internal Springs" bl_parent_id = 'PHYSICS_PT_cloth_physical_properties' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): cloth = context.cloth.settings @@ -188,7 +213,12 @@ class PHYSICS_PT_cloth_internal_springs(PhysicButtonsPanel, Panel): class PHYSICS_PT_cloth_pressure(PhysicButtonsPanel, Panel): bl_label = "Pressure" bl_parent_id = 'PHYSICS_PT_cloth_physical_properties' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): cloth = context.cloth.settings @@ -232,7 +262,12 @@ class PHYSICS_PT_cloth_cache(PhysicButtonsPanel, Panel): bl_label = "Cache" bl_parent_id = 'PHYSICS_PT_cloth' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): md = context.cloth @@ -243,7 +278,12 @@ class PHYSICS_PT_cloth_shape(PhysicButtonsPanel, Panel): bl_label = "Shape" bl_parent_id = 'PHYSICS_PT_cloth' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -293,7 +333,12 @@ class PHYSICS_PT_cloth_collision(PhysicButtonsPanel, Panel): bl_label = "Collisions" bl_parent_id = 'PHYSICS_PT_cloth' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -313,7 +358,12 @@ class PHYSICS_PT_cloth_collision(PhysicButtonsPanel, Panel): class PHYSICS_PT_cloth_object_collision(PhysicButtonsPanel, Panel): bl_label = "Object Collisions" bl_parent_id = 'PHYSICS_PT_cloth_collision' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): cloth = context.cloth.collision_settings @@ -349,7 +399,12 @@ class PHYSICS_PT_cloth_object_collision(PhysicButtonsPanel, Panel): class PHYSICS_PT_cloth_self_collision(PhysicButtonsPanel, Panel): bl_label = "Self Collisions" bl_parent_id = 'PHYSICS_PT_cloth_collision' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): cloth = context.cloth.collision_settings @@ -386,7 +441,12 @@ class PHYSICS_PT_cloth_property_weights(PhysicButtonsPanel, Panel): bl_label = "Property Weights" bl_parent_id = 'PHYSICS_PT_cloth' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -440,7 +500,12 @@ class PHYSICS_PT_cloth_field_weights(PhysicButtonsPanel, Panel): bl_label = "Field Weights" bl_parent_id = 'PHYSICS_PT_cloth' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): cloth = context.cloth.settings diff --git a/release/scripts/startup/bl_ui/properties_physics_common.py b/release/scripts/startup/bl_ui/properties_physics_common.py index 4146a8ca51a..cae4687faac 100644 --- a/release/scripts/startup/bl_ui/properties_physics_common.py +++ b/release/scripts/startup/bl_ui/properties_physics_common.py @@ -50,7 +50,12 @@ def physics_add_special(layout, data, name, addop, removeop, typeicon): class PHYSICS_PT_add(PhysicButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout diff --git a/release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py b/release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py index f71fc56a9f0..2dc79088167 100644 --- a/release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py +++ b/release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py @@ -83,7 +83,12 @@ class PhysicButtonsPanel: class PHYSICS_PT_dynamic_paint(PhysicButtonsPanel, Panel): bl_label = "Dynamic Paint" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -104,7 +109,12 @@ class PHYSICS_PT_dynamic_paint(PhysicButtonsPanel, Panel): class PHYSICS_PT_dynamic_paint_settings(PhysicButtonsPanel, Panel): bl_label = "Settings" bl_parent_id = 'PHYSICS_PT_dynamic_paint' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -188,7 +198,12 @@ class PHYSICS_PT_dynamic_paint_settings(PhysicButtonsPanel, Panel): class PHYSICS_PT_dp_surface_canvas(PhysicButtonsPanel, Panel): bl_label = "Surface" bl_parent_id = "PHYSICS_PT_dynamic_paint" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -251,7 +266,12 @@ class PHYSICS_PT_dp_surface_canvas_paint_dry(PhysicButtonsPanel, Panel): bl_label = "Dry" bl_parent_id = "PHYSICS_PT_dp_surface_canvas" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -287,7 +307,12 @@ class PHYSICS_PT_dp_surface_canvas_paint_dissolve(PhysicButtonsPanel, Panel): bl_label = "Dissolve" bl_parent_id = "PHYSICS_PT_dp_surface_canvas" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -324,7 +349,12 @@ class PHYSICS_PT_dp_canvas_output(PhysicButtonsPanel, Panel): bl_label = "Output" bl_parent_id = "PHYSICS_PT_dynamic_paint" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -400,7 +430,12 @@ class PHYSICS_PT_dp_canvas_output_paintmaps(PhysicButtonsPanel, Panel): bl_label = "Paintmaps" bl_parent_id = "PHYSICS_PT_dp_canvas_output" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -430,7 +465,12 @@ class PHYSICS_PT_dp_canvas_output_wetmaps(PhysicButtonsPanel, Panel): bl_label = "Wetmaps" bl_parent_id = "PHYSICS_PT_dp_canvas_output" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -460,7 +500,12 @@ class PHYSICS_PT_dp_canvas_initial_color(PhysicButtonsPanel, Panel): bl_label = "Initial Color" bl_parent_id = "PHYSICS_PT_dynamic_paint" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -500,7 +545,12 @@ class PHYSICS_PT_dp_effects(PhysicButtonsPanel, Panel): bl_label = "Effects" bl_parent_id = 'PHYSICS_PT_dynamic_paint' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -517,7 +567,12 @@ class PHYSICS_PT_dp_effects_spread(PhysicButtonsPanel, Panel): bl_label = "Spread" bl_parent_id = "PHYSICS_PT_dp_effects" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -552,7 +607,12 @@ class PHYSICS_PT_dp_effects_drip(PhysicButtonsPanel, Panel): bl_label = "Drip" bl_parent_id = "PHYSICS_PT_dp_effects" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -588,7 +648,12 @@ class PHYSICS_PT_dp_effects_drip_weights(PhysicButtonsPanel, Panel): bl_label = "Weights" bl_parent_id = "PHYSICS_PT_dp_effects_drip" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -612,7 +677,12 @@ class PHYSICS_PT_dp_effects_shrink(PhysicButtonsPanel, Panel): bl_label = "Shrink" bl_parent_id = "PHYSICS_PT_dp_effects" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -642,7 +712,12 @@ class PHYSICS_PT_dp_cache(PhysicButtonsPanel, Panel): bl_label = "Cache" bl_parent_id = "PHYSICS_PT_dynamic_paint" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -662,7 +737,12 @@ class PHYSICS_PT_dp_cache(PhysicButtonsPanel, Panel): class PHYSICS_PT_dp_brush_source(PhysicButtonsPanel, Panel): bl_label = "Source" bl_parent_id = "PHYSICS_PT_dynamic_paint" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -725,7 +805,12 @@ class PHYSICS_PT_dp_brush_source(PhysicButtonsPanel, Panel): class PHYSICS_PT_dp_brush_source_color_ramp(PhysicButtonsPanel, Panel): bl_label = "Falloff Ramp" bl_parent_id = "PHYSICS_PT_dp_brush_source" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -752,7 +837,12 @@ class PHYSICS_PT_dp_brush_velocity(PhysicButtonsPanel, Panel): bl_label = "Velocity" bl_parent_id = "PHYSICS_PT_dynamic_paint" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -783,7 +873,12 @@ class PHYSICS_PT_dp_brush_velocity_color_ramp(PhysicButtonsPanel, Panel): bl_label = "Ramp" bl_parent_id = "PHYSICS_PT_dp_brush_velocity" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -804,7 +899,12 @@ class PHYSICS_PT_dp_brush_velocity_smudge(PhysicButtonsPanel, Panel): bl_label = "Smudge" bl_parent_id = "PHYSICS_PT_dp_brush_velocity" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -832,7 +932,12 @@ class PHYSICS_PT_dp_brush_wave(PhysicButtonsPanel, Panel): bl_label = "Waves" bl_parent_id = "PHYSICS_PT_dynamic_paint" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): diff --git a/release/scripts/startup/bl_ui/properties_physics_field.py b/release/scripts/startup/bl_ui/properties_physics_field.py index 36d5dc7f68d..757ac57c171 100644 --- a/release/scripts/startup/bl_ui/properties_physics_field.py +++ b/release/scripts/startup/bl_ui/properties_physics_field.py @@ -27,7 +27,12 @@ class PhysicButtonsPanel: class PHYSICS_PT_field(PhysicButtonsPanel, Panel): bl_label = "Force Fields" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -49,7 +54,12 @@ class PHYSICS_PT_field(PhysicButtonsPanel, Panel): class PHYSICS_PT_field_settings(PhysicButtonsPanel, Panel): bl_label = "Settings" bl_parent_id = 'PHYSICS_PT_field' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -136,7 +146,12 @@ class PHYSICS_PT_field_settings(PhysicButtonsPanel, Panel): class PHYSICS_PT_field_settings_kink(PhysicButtonsPanel, Panel): bl_label = "Kink" bl_parent_id = 'PHYSICS_PT_field_settings' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -170,7 +185,12 @@ class PHYSICS_PT_field_settings_kink(PhysicButtonsPanel, Panel): class PHYSICS_PT_field_settings_texture_select(PhysicButtonsPanel, Panel): bl_label = "Texture" bl_parent_id = 'PHYSICS_PT_field_settings' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -192,7 +212,12 @@ class PHYSICS_PT_field_settings_texture_select(PhysicButtonsPanel, Panel): class PHYSICS_PT_field_falloff(PhysicButtonsPanel, Panel): bl_label = "Falloff" bl_parent_id = "PHYSICS_PT_field" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -217,7 +242,12 @@ class PHYSICS_PT_field_falloff(PhysicButtonsPanel, Panel): class PHYSICS_PT_field_falloff_angular(PhysicButtonsPanel, Panel): bl_label = "Angular" bl_parent_id = "PHYSICS_PT_field_falloff" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -256,7 +286,12 @@ class PHYSICS_PT_field_falloff_angular(PhysicButtonsPanel, Panel): class PHYSICS_PT_field_falloff_radial(PhysicButtonsPanel, Panel): bl_label = "Radial" bl_parent_id = "PHYSICS_PT_field_falloff" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -300,7 +335,12 @@ def collision_warning(layout): class PHYSICS_PT_collision(PhysicButtonsPanel, Panel): bl_label = "Collision" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -331,7 +371,12 @@ class PHYSICS_PT_collision(PhysicButtonsPanel, Panel): class PHYSICS_PT_collision_particle(PhysicButtonsPanel, Panel): bl_label = "Particle" bl_parent_id = "PHYSICS_PT_collision" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -377,7 +422,12 @@ class PHYSICS_PT_collision_particle(PhysicButtonsPanel, Panel): class PHYSICS_PT_collision_softbody(PhysicButtonsPanel, Panel): bl_label = "Softbody & Cloth" bl_parent_id = "PHYSICS_PT_collision" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): diff --git a/release/scripts/startup/bl_ui/properties_physics_fluid.py b/release/scripts/startup/bl_ui/properties_physics_fluid.py index ef8ee7712e5..cbf385adbc8 100644 --- a/release/scripts/startup/bl_ui/properties_physics_fluid.py +++ b/release/scripts/startup/bl_ui/properties_physics_fluid.py @@ -98,7 +98,12 @@ class PhysicButtonsPanel: class PHYSICS_PT_fluid(PhysicButtonsPanel, Panel): bl_label = "Fluid" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -122,7 +127,12 @@ class PHYSICS_PT_fluid(PhysicButtonsPanel, Panel): class PHYSICS_PT_settings(PhysicButtonsPanel, Panel): bl_label = "Settings" bl_parent_id = 'PHYSICS_PT_fluid' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -285,7 +295,12 @@ class PHYSICS_PT_settings(PhysicButtonsPanel, Panel): class PHYSICS_PT_borders(PhysicButtonsPanel, Panel): bl_label = "Border Collisions" bl_parent_id = 'PHYSICS_PT_settings' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -318,7 +333,12 @@ class PHYSICS_PT_borders(PhysicButtonsPanel, Panel): class PHYSICS_PT_smoke(PhysicButtonsPanel, Panel): bl_label = "Gas" bl_parent_id = 'PHYSICS_PT_fluid' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -351,7 +371,12 @@ class PHYSICS_PT_smoke_dissolve(PhysicButtonsPanel, Panel): bl_label = "Dissolve" bl_parent_id = 'PHYSICS_PT_smoke' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -395,7 +420,12 @@ class PHYSICS_PT_fire(PhysicButtonsPanel, Panel): bl_label = "Fire" bl_parent_id = 'PHYSICS_PT_smoke' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -434,7 +464,12 @@ class PHYSICS_PT_fire(PhysicButtonsPanel, Panel): class PHYSICS_PT_liquid(PhysicButtonsPanel, Panel): bl_label = "Liquid" bl_parent_id = 'PHYSICS_PT_fluid' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -497,7 +532,12 @@ class PHYSICS_PT_flow_source(PhysicButtonsPanel, Panel): bl_label = "Flow Source" bl_parent_id = 'PHYSICS_PT_settings' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -538,7 +578,12 @@ class PHYSICS_PT_flow_source(PhysicButtonsPanel, Panel): class PHYSICS_PT_flow_initial_velocity(PhysicButtonsPanel, Panel): bl_label = "Initial Velocity" bl_parent_id = 'PHYSICS_PT_settings' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -580,7 +625,12 @@ class PHYSICS_PT_flow_texture(PhysicButtonsPanel, Panel): bl_label = "Texture" bl_parent_id = 'PHYSICS_PT_settings' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -631,7 +681,12 @@ class PHYSICS_PT_adaptive_domain(PhysicButtonsPanel, Panel): bl_label = "Adaptive Domain" bl_parent_id = 'PHYSICS_PT_settings' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -683,7 +738,12 @@ class PHYSICS_PT_noise(PhysicButtonsPanel, Panel): bl_label = "Noise" bl_parent_id = 'PHYSICS_PT_smoke' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -763,7 +823,12 @@ class PHYSICS_PT_mesh(PhysicButtonsPanel, Panel): bl_label = "Mesh" bl_parent_id = 'PHYSICS_PT_liquid' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -858,7 +923,12 @@ class PHYSICS_PT_particles(PhysicButtonsPanel, Panel): bl_label = "Particles" bl_parent_id = 'PHYSICS_PT_liquid' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -989,7 +1059,12 @@ class PHYSICS_PT_viscosity(PhysicButtonsPanel, Panel): bl_label = "Viscosity" bl_parent_id = 'PHYSICS_PT_liquid' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1029,7 +1104,12 @@ class PHYSICS_PT_diffusion(PhysicButtonsPanel, Panel): bl_label = "Diffusion" bl_parent_id = 'PHYSICS_PT_liquid' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1076,7 +1156,12 @@ class PHYSICS_PT_guide(PhysicButtonsPanel, Panel): bl_label = "Guides" bl_parent_id = 'PHYSICS_PT_fluid' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1142,7 +1227,12 @@ class PHYSICS_PT_collections(PhysicButtonsPanel, Panel): bl_label = "Collections" bl_parent_id = 'PHYSICS_PT_fluid' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1169,7 +1259,12 @@ class PHYSICS_PT_collections(PhysicButtonsPanel, Panel): class PHYSICS_PT_cache(PhysicButtonsPanel, Panel): bl_label = "Cache" bl_parent_id = 'PHYSICS_PT_fluid' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1253,7 +1348,12 @@ class PHYSICS_PT_export(PhysicButtonsPanel, Panel): bl_label = "Advanced" bl_parent_id = 'PHYSICS_PT_cache' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1298,7 +1398,12 @@ class PHYSICS_PT_field_weights(PhysicButtonsPanel, Panel): bl_label = "Field Weights" bl_parent_id = 'PHYSICS_PT_fluid' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -1487,7 +1592,12 @@ class PHYSICS_PT_fluid_domain_render(PhysicButtonsPanel, Panel): bl_label = "Render" bl_parent_id = 'PHYSICS_PT_fluid' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): diff --git a/release/scripts/startup/bl_ui/properties_physics_rigidbody.py b/release/scripts/startup/bl_ui/properties_physics_rigidbody.py index 85d1c883b50..26e55311fa8 100644 --- a/release/scripts/startup/bl_ui/properties_physics_rigidbody.py +++ b/release/scripts/startup/bl_ui/properties_physics_rigidbody.py @@ -19,7 +19,12 @@ class PHYSICS_PT_rigidbody_panel: class PHYSICS_PT_rigid_body(PHYSICS_PT_rigidbody_panel, Panel): bl_label = "Rigid Body" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -54,7 +59,12 @@ class PHYSICS_PT_rigid_body(PHYSICS_PT_rigidbody_panel, Panel): class PHYSICS_PT_rigid_body_settings(PHYSICS_PT_rigidbody_panel, Panel): bl_label = "Settings" bl_parent_id = 'PHYSICS_PT_rigid_body' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -86,7 +96,12 @@ class PHYSICS_PT_rigid_body_settings(PHYSICS_PT_rigidbody_panel, Panel): class PHYSICS_PT_rigid_body_collisions(PHYSICS_PT_rigidbody_panel, Panel): bl_label = "Collisions" bl_parent_id = 'PHYSICS_PT_rigid_body' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -136,7 +151,12 @@ class PHYSICS_PT_rigid_body_collisions_surface(PHYSICS_PT_rigidbody_panel, Panel bl_label = "Surface Response" bl_parent_id = 'PHYSICS_PT_rigid_body_collisions' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -164,7 +184,12 @@ class PHYSICS_PT_rigid_body_collisions_sensitivity(PHYSICS_PT_rigidbody_panel, P bl_label = "Sensitivity" bl_parent_id = 'PHYSICS_PT_rigid_body_collisions' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -201,7 +226,12 @@ class PHYSICS_PT_rigid_body_collisions_collections(PHYSICS_PT_rigidbody_panel, P bl_label = "Collections" bl_parent_id = 'PHYSICS_PT_rigid_body_collisions' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -223,7 +253,12 @@ class PHYSICS_PT_rigid_body_dynamics(PHYSICS_PT_rigidbody_panel, Panel): bl_label = "Dynamics" bl_parent_id = 'PHYSICS_PT_rigid_body' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -256,7 +291,12 @@ class PHYSICS_PT_rigid_body_dynamics_deactivation(PHYSICS_PT_rigidbody_panel, Pa bl_label = "Deactivation" bl_parent_id = 'PHYSICS_PT_rigid_body_dynamics' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): diff --git a/release/scripts/startup/bl_ui/properties_physics_rigidbody_constraint.py b/release/scripts/startup/bl_ui/properties_physics_rigidbody_constraint.py index 12b64abec8f..4c4c2f1f27a 100644 --- a/release/scripts/startup/bl_ui/properties_physics_rigidbody_constraint.py +++ b/release/scripts/startup/bl_ui/properties_physics_rigidbody_constraint.py @@ -13,7 +13,12 @@ class PHYSICS_PT_rigidbody_constraint_panel: class PHYSICS_PT_rigid_body_constraint(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Rigid Body Constraint" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -33,7 +38,12 @@ class PHYSICS_PT_rigid_body_constraint(PHYSICS_PT_rigidbody_constraint_panel, Pa class PHYSICS_PT_rigid_body_constraint_settings(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Settings" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -64,7 +74,12 @@ class PHYSICS_PT_rigid_body_constraint_settings(PHYSICS_PT_rigidbody_constraint_ class PHYSICS_PT_rigid_body_constraint_objects(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Objects" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -85,7 +100,12 @@ class PHYSICS_PT_rigid_body_constraint_objects(PHYSICS_PT_rigidbody_constraint_p class PHYSICS_PT_rigid_body_constraint_override_iterations(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Override Iterations" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -111,7 +131,12 @@ class PHYSICS_PT_rigid_body_constraint_override_iterations(PHYSICS_PT_rigidbody_ class PHYSICS_PT_rigid_body_constraint_limits(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Limits" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -128,7 +153,12 @@ class PHYSICS_PT_rigid_body_constraint_limits(PHYSICS_PT_rigidbody_constraint_pa class PHYSICS_PT_rigid_body_constraint_limits_linear(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Linear" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint_limits' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -185,7 +215,12 @@ class PHYSICS_PT_rigid_body_constraint_limits_linear(PHYSICS_PT_rigidbody_constr class PHYSICS_PT_rigid_body_constraint_limits_angular(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Angular" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint_limits' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -251,7 +286,12 @@ class PHYSICS_PT_rigid_body_constraint_limits_angular(PHYSICS_PT_rigidbody_const class PHYSICS_PT_rigid_body_constraint_motor(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Motor" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -268,7 +308,12 @@ class PHYSICS_PT_rigid_body_constraint_motor(PHYSICS_PT_rigidbody_constraint_pan class PHYSICS_PT_rigid_body_constraint_motor_angular(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Angular" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint_motor' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -304,7 +349,12 @@ class PHYSICS_PT_rigid_body_constraint_motor_angular(PHYSICS_PT_rigidbody_constr class PHYSICS_PT_rigid_body_constraint_motor_linear(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Linear" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint_motor' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -340,7 +390,12 @@ class PHYSICS_PT_rigid_body_constraint_motor_linear(PHYSICS_PT_rigidbody_constra class PHYSICS_PT_rigid_body_constraint_springs(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Springs" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -364,7 +419,12 @@ class PHYSICS_PT_rigid_body_constraint_springs(PHYSICS_PT_rigidbody_constraint_p class PHYSICS_PT_rigid_body_constraint_springs_angular(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Angular" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint_springs' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -412,7 +472,12 @@ class PHYSICS_PT_rigid_body_constraint_springs_angular(PHYSICS_PT_rigidbody_cons class PHYSICS_PT_rigid_body_constraint_springs_linear(PHYSICS_PT_rigidbody_constraint_panel, Panel): bl_label = "Linear" bl_parent_id = 'PHYSICS_PT_rigid_body_constraint_springs' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): diff --git a/release/scripts/startup/bl_ui/properties_physics_softbody.py b/release/scripts/startup/bl_ui/properties_physics_softbody.py index ade331ac649..988ec0659dd 100644 --- a/release/scripts/startup/bl_ui/properties_physics_softbody.py +++ b/release/scripts/startup/bl_ui/properties_physics_softbody.py @@ -28,7 +28,12 @@ class PhysicButtonsPanel: class PHYSICS_PT_softbody(PhysicButtonsPanel, Panel): bl_label = "Soft Body" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -44,7 +49,12 @@ class PHYSICS_PT_softbody_object(PhysicButtonsPanel, Panel): bl_label = "Object" bl_parent_id = 'PHYSICS_PT_softbody' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -72,7 +82,12 @@ class PHYSICS_PT_softbody_simulation(PhysicButtonsPanel, Panel): bl_label = "Simulation" bl_parent_id = 'PHYSICS_PT_softbody' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -90,7 +105,12 @@ class PHYSICS_PT_softbody_cache(PhysicButtonsPanel, Panel): bl_label = "Cache" bl_parent_id = 'PHYSICS_PT_softbody' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): md = context.soft_body @@ -101,7 +121,12 @@ class PHYSICS_PT_softbody_goal(PhysicButtonsPanel, Panel): bl_label = "Goal" bl_parent_id = 'PHYSICS_PT_softbody' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): softbody = context.soft_body.settings @@ -126,7 +151,12 @@ class PHYSICS_PT_softbody_goal_strengths(PhysicButtonsPanel, Panel): bl_label = "Strengths" bl_parent_id = 'PHYSICS_PT_softbody_goal' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -152,7 +182,12 @@ class PHYSICS_PT_softbody_goal_settings(PhysicButtonsPanel, Panel): bl_label = "Settings" bl_parent_id = 'PHYSICS_PT_softbody_goal' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -175,7 +210,12 @@ class PHYSICS_PT_softbody_edge(PhysicButtonsPanel, Panel): bl_label = "Edges" bl_parent_id = 'PHYSICS_PT_softbody' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): softbody = context.soft_body.settings @@ -226,7 +266,12 @@ class PHYSICS_PT_softbody_edge_aerodynamics(PhysicButtonsPanel, Panel): bl_label = "Aerodynamics" bl_parent_id = 'PHYSICS_PT_softbody_edge' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -249,7 +294,12 @@ class PHYSICS_PT_softbody_edge_stiffness(PhysicButtonsPanel, Panel): bl_label = "Stiffness" bl_parent_id = 'PHYSICS_PT_softbody_edge' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): softbody = context.soft_body.settings @@ -273,7 +323,12 @@ class PHYSICS_PT_softbody_collision(PhysicButtonsPanel, Panel): bl_label = "Self Collision" bl_parent_id = 'PHYSICS_PT_softbody' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): softbody = context.soft_body.settings @@ -308,7 +363,12 @@ class PHYSICS_PT_softbody_solver(PhysicButtonsPanel, Panel): bl_label = "Solver" bl_parent_id = 'PHYSICS_PT_softbody' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -333,7 +393,12 @@ class PHYSICS_PT_softbody_solver_diagnostics(PhysicButtonsPanel, Panel): bl_label = "Diagnostics" bl_parent_id = 'PHYSICS_PT_softbody_solver' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -352,7 +417,12 @@ class PHYSICS_PT_softbody_solver_helpers(PhysicButtonsPanel, Panel): bl_label = "Helpers" bl_parent_id = 'PHYSICS_PT_softbody_solver' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -375,7 +445,12 @@ class PHYSICS_PT_softbody_field_weights(PhysicButtonsPanel, Panel): bl_label = "Field Weights" bl_parent_id = 'PHYSICS_PT_softbody' bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): md = context.soft_body diff --git a/release/scripts/startup/bl_ui/properties_render.py b/release/scripts/startup/bl_ui/properties_render.py index dafe32c5e5d..2296d7ad157 100644 --- a/release/scripts/startup/bl_ui/properties_render.py +++ b/release/scripts/startup/bl_ui/properties_render.py @@ -47,7 +47,12 @@ class RENDER_PT_color_management(RenderButtonsPanel, Panel): bl_label = "Color Management" bl_options = {'DEFAULT_CLOSED'} bl_order = 100 - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -80,7 +85,12 @@ class RENDER_PT_color_management_curves(RenderButtonsPanel, Panel): bl_label = "Use Curves" bl_parent_id = "RENDER_PT_color_management" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): @@ -640,7 +650,7 @@ class RENDER_PT_eevee_hair(RenderButtonsPanel, Panel): class RENDER_PT_eevee_performance(RenderButtonsPanel, Panel): bl_label = "Performance" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -661,7 +671,12 @@ class RENDER_PT_gpencil(RenderButtonsPanel, Panel): bl_label = "Grease Pencil" bl_options = {'DEFAULT_CLOSED'} bl_order = 10 - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -677,7 +692,7 @@ class RENDER_PT_gpencil(RenderButtonsPanel, Panel): class RENDER_PT_opengl_sampling(RenderButtonsPanel, Panel): bl_label = "Sampling" - COMPAT_ENGINES = {'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -699,7 +714,7 @@ class RENDER_PT_opengl_sampling(RenderButtonsPanel, Panel): class RENDER_PT_opengl_film(RenderButtonsPanel, Panel): bl_label = "Film" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -712,7 +727,7 @@ class RENDER_PT_opengl_film(RenderButtonsPanel, Panel): class RENDER_PT_opengl_lighting(RenderButtonsPanel, Panel): bl_label = "Lighting" - COMPAT_ENGINES = {'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -724,7 +739,7 @@ class RENDER_PT_opengl_lighting(RenderButtonsPanel, Panel): class RENDER_PT_opengl_color(RenderButtonsPanel, Panel): bl_label = "Color" - COMPAT_ENGINES = {'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -736,7 +751,7 @@ class RENDER_PT_opengl_color(RenderButtonsPanel, Panel): class RENDER_PT_opengl_options(RenderButtonsPanel, Panel): bl_label = "Options" - COMPAT_ENGINES = {'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -749,7 +764,12 @@ class RENDER_PT_opengl_options(RenderButtonsPanel, Panel): class RENDER_PT_simplify(RenderButtonsPanel, Panel): bl_label = "Simplify" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): rd = context.scene.render @@ -762,7 +782,12 @@ class RENDER_PT_simplify(RenderButtonsPanel, Panel): class RENDER_PT_simplify_viewport(RenderButtonsPanel, Panel): bl_label = "Viewport" bl_parent_id = "RENDER_PT_simplify" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -787,7 +812,12 @@ class RENDER_PT_simplify_viewport(RenderButtonsPanel, Panel): class RENDER_PT_simplify_render(RenderButtonsPanel, Panel): bl_label = "Render" bl_parent_id = "RENDER_PT_simplify" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -815,7 +845,7 @@ class RENDER_PT_simplify_greasepencil(RenderButtonsPanel, Panel, GreasePencilSim 'BLENDER_CLAY', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', - 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT', } bl_options = {'DEFAULT_CLOSED'} diff --git a/release/scripts/startup/bl_ui/properties_texture.py b/release/scripts/startup/bl_ui/properties_texture.py index d9c51397d6e..8abd8b61839 100644 --- a/release/scripts/startup/bl_ui/properties_texture.py +++ b/release/scripts/startup/bl_ui/properties_texture.py @@ -67,7 +67,12 @@ class TextureButtonsPanel: class TEXTURE_PT_preview(TextureButtonsPanel, Panel): bl_label = "Preview" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -96,7 +101,12 @@ class TEXTURE_PT_context(TextureButtonsPanel, Panel): bl_label = "" bl_context = "texture" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -135,7 +145,12 @@ class TEXTURE_PT_context(TextureButtonsPanel, Panel): class TEXTURE_PT_node(TextureButtonsPanel, Panel): bl_label = "Node" bl_context = "texture" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -164,7 +179,12 @@ class TextureTypePanel(TextureButtonsPanel): class TEXTURE_PT_clouds(TextureTypePanel, Panel): bl_label = "Clouds" tex_type = 'CLOUDS' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -196,7 +216,12 @@ class TEXTURE_PT_clouds(TextureTypePanel, Panel): class TEXTURE_PT_wood(TextureTypePanel, Panel): bl_label = "Wood" tex_type = 'WOOD' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -233,7 +258,12 @@ class TEXTURE_PT_wood(TextureTypePanel, Panel): class TEXTURE_PT_marble(TextureTypePanel, Panel): bl_label = "Marble" tex_type = 'MARBLE' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -267,7 +297,12 @@ class TEXTURE_PT_marble(TextureTypePanel, Panel): class TEXTURE_PT_magic(TextureTypePanel, Panel): bl_label = "Magic" tex_type = 'MAGIC' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -286,7 +321,12 @@ class TEXTURE_PT_magic(TextureTypePanel, Panel): class TEXTURE_PT_blend(TextureTypePanel, Panel): bl_label = "Blend" tex_type = 'BLEND' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -308,7 +348,12 @@ class TEXTURE_PT_blend(TextureTypePanel, Panel): class TEXTURE_PT_stucci(TextureTypePanel, Panel): bl_label = "Stucci" tex_type = 'STUCCI' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -339,7 +384,12 @@ class TEXTURE_PT_stucci(TextureTypePanel, Panel): class TEXTURE_PT_image(TextureTypePanel, Panel): bl_label = "Image" tex_type = 'IMAGE' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, _context): # TODO: maybe expose the template_ID from the template image here. @@ -351,7 +401,12 @@ class TEXTURE_PT_image_settings(TextureTypePanel, Panel): bl_label = "Settings" bl_parent_id = 'TEXTURE_PT_image' tex_type = 'IMAGE' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -506,7 +561,12 @@ class TEXTURE_PT_image_mapping_crop(TextureTypePanel, Panel): class TEXTURE_PT_musgrave(TextureTypePanel, Panel): bl_label = "Musgrave" tex_type = 'MUSGRAVE' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -551,7 +611,12 @@ class TEXTURE_PT_musgrave(TextureTypePanel, Panel): class TEXTURE_PT_voronoi(TextureTypePanel, Panel): bl_label = "Voronoi" tex_type = 'VORONOI' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -584,7 +649,12 @@ class TEXTURE_PT_voronoi_feature_weights(TextureTypePanel, Panel): bl_label = "Feature Weights" bl_parent_id = "TEXTURE_PT_voronoi" tex_type = 'VORONOI' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -605,7 +675,12 @@ class TEXTURE_PT_voronoi_feature_weights(TextureTypePanel, Panel): class TEXTURE_PT_distortednoise(TextureTypePanel, Panel): bl_label = "Distorted Noise" tex_type = 'DISTORTED_NOISE' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -630,7 +705,12 @@ class TEXTURE_PT_distortednoise(TextureTypePanel, Panel): class TextureSlotPanel(TextureButtonsPanel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -642,7 +722,12 @@ class TextureSlotPanel(TextureButtonsPanel): class TEXTURE_PT_mapping(TextureSlotPanel, Panel): bl_label = "Mapping" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -710,7 +795,12 @@ class TEXTURE_PT_mapping(TextureSlotPanel, Panel): class TEXTURE_PT_influence(TextureSlotPanel, Panel): bl_label = "Influence" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -792,7 +882,12 @@ class TextureColorsPoll: class TEXTURE_PT_colors(TextureButtonsPanel, TextureColorsPoll, Panel): bl_label = "Colors" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -821,7 +916,12 @@ class TEXTURE_PT_colors_ramp(TextureButtonsPanel, TextureColorsPoll, Panel): bl_label = "Color Ramp" bl_options = {'DEFAULT_CLOSED'} bl_parent_id = 'TEXTURE_PT_colors' - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw_header(self, context): tex = context.texture @@ -842,7 +942,12 @@ class TEXTURE_PT_colors_ramp(TextureButtonsPanel, TextureColorsPoll, Panel): class TEXTURE_PT_custom_props(TextureButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "texture" _property_type = Texture diff --git a/release/scripts/startup/bl_ui/properties_view_layer.py b/release/scripts/startup/bl_ui/properties_view_layer.py index c6d1ee2a065..50579fe2da2 100644 --- a/release/scripts/startup/bl_ui/properties_view_layer.py +++ b/release/scripts/startup/bl_ui/properties_view_layer.py @@ -26,7 +26,12 @@ class ViewLayerButtonsPanel: class VIEWLAYER_PT_layer(ViewLayerButtonsPanel, Panel): bl_label = "View Layer" - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): layout = self.layout @@ -44,7 +49,7 @@ class VIEWLAYER_PT_layer(ViewLayerButtonsPanel, Panel): class VIEWLAYER_PT_layer_passes(ViewLayerButtonsPanel, Panel): bl_label = "Passes" - COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'} + COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH_NEXT'} def draw(self, context): pass @@ -95,6 +100,24 @@ class VIEWLAYER_PT_eevee_next_layer_passes_data(ViewLayerButtonsPanel, Panel): sub.prop(view_layer, "use_pass_vector") +class VIEWLAYER_PT_eevee_next_layer_passes_data(ViewLayerButtonsPanel, Panel): + bl_label = "Data" + bl_parent_id = "VIEWLAYER_PT_layer_passes" + + COMPAT_ENGINES = {'BLENDER_WORKBENCH_NEXT'} + + def draw(self, context): + layout = self.layout + layout.use_property_split = True + layout.use_property_decorate = False + + view_layer = context.view_layer + + col = layout.column() + col.prop(view_layer, "use_pass_combined") + col.prop(view_layer, "use_pass_z") + + class VIEWLAYER_PT_eevee_layer_passes_light(ViewLayerButtonsPanel, Panel): bl_label = "Light" bl_parent_id = "VIEWLAYER_PT_layer_passes" diff --git a/release/scripts/startup/bl_ui/properties_world.py b/release/scripts/startup/bl_ui/properties_world.py index b0ea36abd6b..691666f346a 100644 --- a/release/scripts/startup/bl_ui/properties_world.py +++ b/release/scripts/startup/bl_ui/properties_world.py @@ -19,7 +19,12 @@ class WorldButtonsPanel: class WORLD_PT_context_world(WorldButtonsPanel, Panel): bl_label = "" bl_options = {'HIDE_HEADER'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): @@ -63,7 +68,12 @@ class EEVEE_WORLD_PT_mist(WorldButtonsPanel, Panel): class WORLD_PT_custom_props(WorldButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = { + 'BLENDER_RENDER', + 'BLENDER_EEVEE', + 'BLENDER_EEVEE_NEXT', + 'BLENDER_WORKBENCH', + 'BLENDER_WORKBENCH_NEXT'} _context_path = "world" _property_type = bpy.types.World diff --git a/release/scripts/startup/bl_ui/space_clip.py b/release/scripts/startup/bl_ui/space_clip.py index 5bf68cb04de..076cd322360 100644 --- a/release/scripts/startup/bl_ui/space_clip.py +++ b/release/scripts/startup/bl_ui/space_clip.py @@ -1750,11 +1750,11 @@ class CLIP_MT_marker_pie(Menu): # Match Keyframe prop = pie.operator("wm.context_set_enum", text="Match Previous", icon='KEYFRAME_HLT') prop.data_path = "space_data.clip.tracking.tracks.active.pattern_match" - prop.value = 'KEYFRAME' + prop.value = 'PREV_FRAME' # Match Previous Frame prop = pie.operator("wm.context_set_enum", text="Match Keyframe", icon='KEYFRAME') prop.data_path = "space_data.clip.tracking.tracks.active.pattern_match" - prop.value = 'PREV_FRAME' + prop.value = 'KEYFRAME' class CLIP_MT_tracking_pie(Menu): diff --git a/release/scripts/startup/bl_ui/space_dopesheet.py b/release/scripts/startup/bl_ui/space_dopesheet.py index 99b33840051..af9b9e527e6 100644 --- a/release/scripts/startup/bl_ui/space_dopesheet.py +++ b/release/scripts/startup/bl_ui/space_dopesheet.py @@ -708,6 +708,9 @@ class DOPESHEET_MT_channel_context_menu(Menu): operator = "action.extrapolation_type" layout.operator_menu_enum(operator, "type", text="Extrapolation Mode") + if is_graph_editor: + layout.operator_menu_enum("graph.fmodifier_add", "type", text="Add F-Curve Modifier").only_active = False + layout.separator() layout.operator("anim.channels_expand") layout.operator("anim.channels_collapse") diff --git a/release/scripts/startup/bl_ui/space_filebrowser.py b/release/scripts/startup/bl_ui/space_filebrowser.py index a7e9663d186..85313b14341 100644 --- a/release/scripts/startup/bl_ui/space_filebrowser.py +++ b/release/scripts/startup/bl_ui/space_filebrowser.py @@ -8,6 +8,8 @@ from bpy_extras import ( asset_utils, ) +from bpy.app.translations import contexts as i18n_contexts + class FILEBROWSER_HT_header(Header): bl_space_type = 'FILE_BROWSER' @@ -229,6 +231,7 @@ class FILEBROWSER_PT_bookmarks_volumes(Panel): bl_region_type = 'TOOLS' bl_category = "Bookmarks" bl_label = "Volumes" + bl_translation_context = i18n_contexts.editor_filebrowser @classmethod def poll(cls, context): diff --git a/release/scripts/startup/bl_ui/space_graph.py b/release/scripts/startup/bl_ui/space_graph.py index b2c8822a75d..5a550acd107 100644 --- a/release/scripts/startup/bl_ui/space_graph.py +++ b/release/scripts/startup/bl_ui/space_graph.py @@ -222,6 +222,7 @@ class GRAPH_MT_channel(Menu): layout.separator() layout.operator("anim.channels_editable_toggle") layout.operator_menu_enum("graph.extrapolation_type", "type", text="Extrapolation Mode") + layout.operator_menu_enum("graph.fmodifier_add", "type", text="Add F-Curve Modifier").only_active = False layout.separator() layout.operator("graph.hide", text="Hide Selected Curves").unselected = False diff --git a/release/scripts/startup/bl_ui/space_node.py b/release/scripts/startup/bl_ui/space_node.py index bf8a39ea26f..7e221d63d4a 100644 --- a/release/scripts/startup/bl_ui/space_node.py +++ b/release/scripts/startup/bl_ui/space_node.py @@ -670,7 +670,7 @@ class NODE_PT_texture_mapping(Panel): bl_category = "Node" bl_label = "Texture Mapping" bl_options = {'DEFAULT_CLOSED'} - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} @classmethod def poll(cls, context): diff --git a/release/scripts/startup/bl_ui/space_sequencer.py b/release/scripts/startup/bl_ui/space_sequencer.py index 56781b8c6d7..8b08ec99d89 100644 --- a/release/scripts/startup/bl_ui/space_sequencer.py +++ b/release/scripts/startup/bl_ui/space_sequencer.py @@ -946,6 +946,7 @@ class SEQUENCER_MT_strip(Menu): if strip and strip.type == 'SCENE': layout.operator("sequencer.delete", text="Delete Strip & Data").delete_data = True + layout.operator("sequencer.scene_frame_range_update") if has_sequencer: if strip: @@ -1068,6 +1069,7 @@ class SEQUENCER_MT_context_menu(Menu): strip = context.active_sequence_strip if strip and strip.type == 'SCENE': layout.operator("sequencer.delete", text="Delete Strip & Data").delete_data = True + layout.operator("sequencer.scene_frame_range_update") layout.separator() @@ -1577,7 +1579,7 @@ class SEQUENCER_PT_effect_text_style(SequencerButtonsPanel, Panel): subsub.prop(strip, "shadow_color", text="") row.prop_decorator(strip, "shadow_color") - row = layout.row(align=True, heading="Box") + row = layout.row(align=True, heading="Box", heading_ctxt=i18n_contexts.id_sequence) row.use_property_decorate = False sub = row.row(align=True) sub.prop(strip, "use_box", text="") @@ -1845,17 +1847,17 @@ class SEQUENCER_PT_time(SequencerButtonsPanel, Panel): frame_offset_end = strip.frame_offset_end length_list = ( - str(frame_start), - str(frame_final_end), - str(frame_final_duration), - str(frame_offset_start), - str(frame_offset_end), + str(round(frame_start, 0)), + str(round(frame_final_end, 0)), + str(round(frame_final_duration, 0)), + str(round(frame_offset_start, 0)), + str(round(frame_offset_end, 0)), ) if not is_effect: length_list = length_list + ( - str(strip.animation_offset_start), - str(strip.animation_offset_end), + str(round(strip.animation_offset_start, 0)), + str(round(strip.animation_offset_end, 0)), ) max_length = max(len(x) for x in length_list) @@ -1994,7 +1996,7 @@ class SEQUENCER_PT_adjust_sound(SequencerButtonsPanel, Panel): split = col.split(factor=0.4) split.alignment = 'RIGHT' - split.label(text="Pan") + split.label(text="Pan", heading_ctxt=i18n_contexts.id_sound) split.prop(strip, "pan", text="") split.enabled = pan_enabled @@ -2607,7 +2609,7 @@ class SEQUENCER_PT_annotation_onion(AnnotationOnionSkin, SequencerButtonsPanel_O class SEQUENCER_PT_custom_props(SequencerButtonsPanel, PropertyPanel, Panel): - COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'} + COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH', 'BLENDER_WORKBENCH_NEXT'} _context_path = "active_sequence_strip" _property_type = (bpy.types.Sequence,) bl_category = "Strip" diff --git a/release/scripts/startup/bl_ui/space_topbar.py b/release/scripts/startup/bl_ui/space_topbar.py index 97f8a1bfad1..50bb1e42602 100644 --- a/release/scripts/startup/bl_ui/space_topbar.py +++ b/release/scripts/startup/bl_ui/space_topbar.py @@ -468,7 +468,7 @@ class TOPBAR_MT_file_import(Menu): self.layout.operator("wm.alembic_import", text="Alembic (.abc)") if bpy.app.build_options.usd: self.layout.operator( - "wm.usd_import", text="Universal Scene Description (.usd, .usdc, .usda)") + "wm.usd_import", text="Universal Scene Description (.usd*)") if bpy.app.build_options.io_gpencil: self.layout.operator("wm.gpencil_import_svg", text="SVG as Grease Pencil") diff --git a/release/scripts/startup/bl_ui/space_userpref.py b/release/scripts/startup/bl_ui/space_userpref.py index 6fed8ce846b..2ce8dd55b0b 100644 --- a/release/scripts/startup/bl_ui/space_userpref.py +++ b/release/scripts/startup/bl_ui/space_userpref.py @@ -598,7 +598,7 @@ class USERPREF_PT_system_cycles_devices(SystemPanel, CenterAlignMixIn, Panel): class USERPREF_PT_system_gpu_backend(SystemPanel, CenterAlignMixIn, Panel): - bl_label = "GPU Back end" + bl_label = "GPU Backend" @classmethod def poll(cls, _context): @@ -2283,6 +2283,7 @@ class USERPREF_PT_experimental_prototypes(ExperimentalPanel, Panel): ({"property": "use_sculpt_texture_paint"}, "T96225"), ({"property": "use_full_frame_compositor"}, "T88150"), ({"property": "enable_eevee_next"}, "T93220"), + ({"property": "enable_workbench_next"}, "T101619"), ), ) diff --git a/release/scripts/startup/bl_ui/space_view3d.py b/release/scripts/startup/bl_ui/space_view3d.py index 374dc1831cf..032fccf7d91 100644 --- a/release/scripts/startup/bl_ui/space_view3d.py +++ b/release/scripts/startup/bl_ui/space_view3d.py @@ -758,7 +758,7 @@ class VIEW3D_HT_header(Header): if object_mode == 'PARTICLE_EDIT': row = layout.row() row.prop(tool_settings.particle_edit, "select_mode", text="", expand=True) - elif object_mode == 'SCULPT_CURVES' and obj.type == 'CURVES': + elif object_mode in {'EDIT', 'SCULPT_CURVES'} and obj.type == 'CURVES': curves = obj.data row = layout.row(align=True) @@ -2051,13 +2051,20 @@ class VIEW3D_MT_select_paint_mask_vertex(Menu): layout.separator() layout.operator("paint.vert_select_ungrouped", text="Ungrouped Vertices") + layout.operator("paint.vert_select_linked", text="Select Linked") class VIEW3D_MT_select_edit_curves(Menu): bl_label = "Select" def draw(self, _context): - pass + layout = self.layout + + layout.operator("curves.select_all", text="All").action = 'SELECT' + layout.operator("curves.select_all", text="None").action = 'DESELECT' + layout.operator("curves.select_all", text="Invert").action = 'INVERT' + layout.operator("curves.select_random", text="Random") + layout.operator("curves.select_end", text="Endpoints") class VIEW3D_MT_select_sculpt_curves(Menu): @@ -2070,44 +2077,10 @@ class VIEW3D_MT_select_sculpt_curves(Menu): layout.operator("curves.select_all", text="None").action = 'DESELECT' layout.operator("curves.select_all", text="Invert").action = 'INVERT' layout.operator("sculpt_curves.select_random", text="Random") - layout.operator("sculpt_curves.select_end", text="Endpoints") + layout.operator("curves.select_end", text="Endpoints") layout.operator("sculpt_curves.select_grow", text="Grow") -class VIEW3D_MT_angle_control(Menu): - bl_label = "Angle Control" - - @classmethod - def poll(cls, context): - settings = UnifiedPaintPanel.paint_settings(context) - if not settings: - return False - - brush = settings.brush - tex_slot = brush.texture_slot - - return tex_slot.has_texture_angle and tex_slot.has_texture_angle_source - - def draw(self, context): - layout = self.layout - - settings = UnifiedPaintPanel.paint_settings(context) - brush = settings.brush - - sculpt = (context.sculpt_object is not None) - - tex_slot = brush.texture_slot - - layout.prop(tex_slot, "use_rake", text="Rake") - - if brush.brush_capabilities.has_random_texture_angle and tex_slot.has_random_texture_angle: - if sculpt: - if brush.sculpt_capabilities.has_random_texture_angle: - layout.prop(tex_slot, "use_random", text="Random") - else: - layout.prop(tex_slot, "use_random", text="Random") - - class VIEW3D_MT_mesh_add(Menu): bl_idname = "VIEW3D_MT_mesh_add" bl_label = "Mesh" @@ -2287,6 +2260,7 @@ class VIEW3D_MT_camera_add(Menu): class VIEW3D_MT_volume_add(Menu): bl_idname = "VIEW3D_MT_volume_add" bl_label = "Volume" + bl_translation_context = i18n_contexts.id_id def draw(self, _context): layout = self.layout @@ -4299,7 +4273,10 @@ class VIEW3D_MT_edit_mesh_faces_data(Menu): layout.separator() + layout.operator("mesh.flip_quad_tessellation") + if with_freestyle: + layout.separator() layout.operator("mesh.mark_freestyle_face").clear = False layout.operator("mesh.mark_freestyle_face", text="Clear Freestyle Face").clear = True @@ -5321,7 +5298,10 @@ class VIEW3D_MT_edit_curves(Menu): bl_label = "Curves" def draw(self, _context): - pass + layout = self.layout + + layout.menu("VIEW3D_MT_transform") + layout.separator() class VIEW3D_MT_object_mode_pie(Menu): @@ -6215,14 +6195,15 @@ class VIEW3D_PT_shading_compositor(Panel): def draw(self, context): shading = context.space_data.shading - import sys - is_macos = sys.platform == "darwin" + import gpu + is_supported = (gpu.capabilities.compute_shader_support_get() + and gpu.capabilities.shader_image_load_store_support_get()) row = self.layout.row() - row.active = not is_macos + row.active = is_supported row.prop(shading, "use_compositor", expand=True) - if is_macos and shading.use_compositor != "DISABLED": - self.layout.label(text="Compositor not supported on MacOS", icon='ERROR') + if shading.use_compositor != "DISABLED" and not is_supported: + self.layout.label(text="Compositor not supported on this platform", icon='ERROR') class VIEW3D_PT_gizmo_display(Panel): @@ -6311,7 +6292,7 @@ class VIEW3D_PT_overlay_guides(Panel): row_el.prop(overlay, "show_ortho_grid", text="Grid") grid_active = bool(view.region_quadviews or (view.region_3d.is_orthographic_side_view and not view.region_3d.is_perspective)) row_el.active = grid_active - row.prop(overlay, "show_floor", text="Floor") + row.prop(overlay, "show_floor", text="Floor", text_ctxt=i18n_contexts.editor_view3d) if overlay.show_floor or overlay.show_ortho_grid: sub = col.row(align=True) @@ -8069,7 +8050,6 @@ classes = ( VIEW3D_MT_select_paint_mask_vertex, VIEW3D_MT_select_edit_curves, VIEW3D_MT_select_sculpt_curves, - VIEW3D_MT_angle_control, VIEW3D_MT_mesh_add, VIEW3D_MT_curve_add, VIEW3D_MT_surface_add, diff --git a/release/scripts/templates_py/ui_list_generic.py b/release/scripts/templates_py/ui_list_generic.py new file mode 100644 index 00000000000..ec6135a62ac --- /dev/null +++ b/release/scripts/templates_py/ui_list_generic.py @@ -0,0 +1,46 @@ +import bpy +from bl_ui.generic_ui_list import draw_ui_list + + +class MyPropGroup(bpy.types.PropertyGroup): + name: bpy.props.StringProperty() + + +class MyPanel(bpy.types.Panel): + bl_label = "My Label" + bl_idname = "SCENE_PT_list_demo" + bl_space_type = 'VIEW_3D' + bl_region_type = 'UI' + bl_category = 'My Category' + + def draw(self, context): + layout = self.layout + draw_ui_list( + layout, + context, + list_context_path="scene.my_list", + active_index_context_path="scene.my_list_active_index" + ) + + +classes = [ + MyPropGroup, + MyPanel +] + +class_register, class_unregister = bpy.utils.register_classes_factory(classes) + + +def register(): + class_register() + bpy.types.Scene.my_list = bpy.props.CollectionProperty(type=MyPropGroup) + bpy.types.Scene.my_list_active_index = bpy.props.IntProperty() + + +def unregister(): + class_unregister() + del bpy.types.Scene.my_list + del bpy.types.Scene.my_list_active_index + + +register() diff --git a/source/blender/asset_system/AS_asset_library.h b/source/blender/asset_system/AS_asset_library.h index 0a67df2ecbf..870d2041655 100644 --- a/source/blender/asset_system/AS_asset_library.h +++ b/source/blender/asset_system/AS_asset_library.h @@ -7,7 +7,6 @@ #pragma once struct IDRemapper; -struct Main; #ifdef __cplusplus extern "C" { diff --git a/source/blender/blendthumb/CMakeLists.txt b/source/blender/blendthumb/CMakeLists.txt index 6160d225d45..5a5bc20fed7 100644 --- a/source/blender/blendthumb/CMakeLists.txt +++ b/source/blender/blendthumb/CMakeLists.txt @@ -50,5 +50,7 @@ else() add_executable(blender-thumbnailer ${SRC} ${SRC_CMD}) setup_platform_linker_flags(blender-thumbnailer) target_link_libraries(blender-thumbnailer bf_blenlib) - target_link_libraries(blender-thumbnailer ${PTHREADS_LIBRARIES}) + if(DEFINED PTHREADS_LIBRARIES) + target_link_libraries(blender-thumbnailer ${PTHREADS_LIBRARIES}) + endif() endif() diff --git a/source/blender/blenfont/BLF_api.h b/source/blender/blenfont/BLF_api.h index 01b6d1d8942..558fd5f1dc2 100644 --- a/source/blender/blenfont/BLF_api.h +++ b/source/blender/blenfont/BLF_api.h @@ -28,7 +28,6 @@ extern "C" { struct ColorManagedDisplay; struct ResultBLF; -struct rctf; struct rcti; int BLF_init(void); diff --git a/source/blender/blenfont/intern/blf_internal.h b/source/blender/blenfont/intern/blf_internal.h index 2f3f7b52233..cbdfa191877 100644 --- a/source/blender/blenfont/intern/blf_internal.h +++ b/source/blender/blenfont/intern/blf_internal.h @@ -11,7 +11,6 @@ struct FontBLF; struct GlyphBLF; struct GlyphCacheBLF; struct ResultBLF; -struct rctf; struct rcti; /* Max number of FontBLFs in memory. Take care that every font has a glyph cache per size/dpi, diff --git a/source/blender/blenkernel/BKE_animsys.h b/source/blender/blenkernel/BKE_animsys.h index 91ecfe09f38..46aacf1e7fd 100644 --- a/source/blender/blenkernel/BKE_animsys.h +++ b/source/blender/blenkernel/BKE_animsys.h @@ -33,7 +33,6 @@ struct PointerRNA; struct PropertyRNA; struct bAction; struct bActionGroup; -struct bContext; /* Container for data required to do FCurve and Driver evaluation. */ typedef struct AnimationEvalContext { diff --git a/source/blender/blenkernel/BKE_attribute.hh b/source/blender/blenkernel/BKE_attribute.hh index 0ff120328d3..afa9f1fba23 100644 --- a/source/blender/blenkernel/BKE_attribute.hh +++ b/source/blender/blenkernel/BKE_attribute.hh @@ -753,20 +753,6 @@ Vector retrieve_attributes_for_transfer( const AnonymousAttributePropagationInfo &propagation_info, const Set &skip = {}); -/** - * Copy attributes for the domain based on the elementwise mask. - * - * \param mask_indices: Indexed elements to copy from the source data-block. - * \param domain: Attribute domain to transfer. - * \param skip: Named attributes to ignore/skip. - */ -void copy_attribute_domain(AttributeAccessor src_attributes, - MutableAttributeAccessor dst_attributes, - IndexMask selection, - eAttrDomain domain, - const AnonymousAttributePropagationInfo &propagation_info, - const Set &skip = {}); - bool allow_procedural_attribute_access(StringRef attribute_name); extern const char *no_procedural_access_message; diff --git a/source/blender/blenkernel/BKE_blendfile_link_append.h b/source/blender/blenkernel/BKE_blendfile_link_append.h index bd00ed51d99..0f54760e03a 100644 --- a/source/blender/blenkernel/BKE_blendfile_link_append.h +++ b/source/blender/blenkernel/BKE_blendfile_link_append.h @@ -13,11 +13,7 @@ struct BlendHandle; struct ID; struct Library; struct LibraryLink_Params; -struct Main; struct ReportList; -struct Scene; -struct View3D; -struct ViewLayer; typedef struct BlendfileLinkAppendContext BlendfileLinkAppendContext; typedef struct BlendfileLinkAppendContextItem BlendfileLinkAppendContextItem; diff --git a/source/blender/blenkernel/BKE_bpath.h b/source/blender/blenkernel/BKE_bpath.h index 555cddd34bd..20fd0758370 100644 --- a/source/blender/blenkernel/BKE_bpath.h +++ b/source/blender/blenkernel/BKE_bpath.h @@ -19,7 +19,6 @@ extern "C" { #endif struct ID; -struct ListBase; struct Main; struct ReportList; diff --git a/source/blender/blenkernel/BKE_brush.h b/source/blender/blenkernel/BKE_brush.h index c206e999e38..23612b7b93b 100644 --- a/source/blender/blenkernel/BKE_brush.h +++ b/source/blender/blenkernel/BKE_brush.h @@ -18,7 +18,6 @@ extern "C" { struct Brush; struct ImBuf; struct ImagePool; -struct Object; struct Main; struct MTex; struct Scene; diff --git a/source/blender/blenkernel/BKE_collection.h b/source/blender/blenkernel/BKE_collection.h index dd7866d83e5..61f3e675391 100644 --- a/source/blender/blenkernel/BKE_collection.h +++ b/source/blender/blenkernel/BKE_collection.h @@ -153,6 +153,14 @@ bool BKE_collection_object_remove(struct Main *bmain, struct Collection *collection, struct Object *object, bool free_us); +/** + * Replace one object with another in a collection (managing user counts). + */ +bool BKE_collection_object_replace(struct Main *bmain, + struct Collection *collection, + struct Object *ob_old, + struct Object *ob_new); + /** * Move object from a collection into another * diff --git a/source/blender/blenkernel/BKE_collision.h b/source/blender/blenkernel/BKE_collision.h index 01e0a0ce062..8e566f08b5d 100644 --- a/source/blender/blenkernel/BKE_collision.h +++ b/source/blender/blenkernel/BKE_collision.h @@ -16,7 +16,6 @@ struct CollisionModifierData; struct Depsgraph; struct MVertTri; struct Object; -struct Scene; //////////////////////////////////////// // used for collisions in collision.c diff --git a/source/blender/blenkernel/BKE_curve_legacy_convert.hh b/source/blender/blenkernel/BKE_curve_legacy_convert.hh index 88f93282f25..e7996faa0c5 100644 --- a/source/blender/blenkernel/BKE_curve_legacy_convert.hh +++ b/source/blender/blenkernel/BKE_curve_legacy_convert.hh @@ -13,7 +13,14 @@ struct Curves; namespace blender::bke { +/** + * Convert the old curve type to the new data type. Caller owns the returned pointer. + */ Curves *curve_legacy_to_curves(const Curve &curve_legacy); +/** + * Convert the old curve type to the new data type using a specific list of #Nurb for the actual + * geometry data. Caller owns the returned pointer. + */ Curves *curve_legacy_to_curves(const Curve &curve_legacy, const ListBase &nurbs_list); } // namespace blender::bke diff --git a/source/blender/blenkernel/BKE_curvemapping_cache.h b/source/blender/blenkernel/BKE_curvemapping_cache.h index cdbd99bb476..ecdde624cb3 100644 --- a/source/blender/blenkernel/BKE_curvemapping_cache.h +++ b/source/blender/blenkernel/BKE_curvemapping_cache.h @@ -1,3 +1,9 @@ +#pragma once; + +#ifdef __cplusplus +extern "C" { +#endif + struct GHash; struct CurveMapping; @@ -22,3 +28,6 @@ bool BKE_curvemapping_in_cache(CurveMapping *curve); void BKE_curvemapping_cache_release_or_free(CurveMappingCache *cache, CurveMapping *curve); void BKE_curvemapping_cache_exit(); +#ifdef __cplusplus +} +#endif diff --git a/source/blender/blenkernel/BKE_curves.h b/source/blender/blenkernel/BKE_curves.h index 71a0562e1df..83a0ba201f8 100644 --- a/source/blender/blenkernel/BKE_curves.h +++ b/source/blender/blenkernel/BKE_curves.h @@ -15,7 +15,6 @@ extern "C" { struct BoundBox; struct Curves; -struct CustomDataLayer; struct Depsgraph; struct Main; struct Object; diff --git a/source/blender/blenkernel/BKE_curves.hh b/source/blender/blenkernel/BKE_curves.hh index 1ed872c8ab8..04898f6d2d6 100644 --- a/source/blender/blenkernel/BKE_curves.hh +++ b/source/blender/blenkernel/BKE_curves.hh @@ -18,6 +18,7 @@ #include "BLI_generic_virtual_array.hh" #include "BLI_index_mask.hh" #include "BLI_math_vector_types.hh" +#include "BLI_offset_indices.hh" #include "BLI_shared_cache.hh" #include "BLI_span.hh" #include "BLI_task.hh" @@ -29,17 +30,6 @@ namespace blender::bke { -template)> -constexpr IndexRange offsets_to_range(Span offsets, int64_t index) -{ - BLI_assert(index >= 0); - BLI_assert(index < offsets.size()); - - const int offset = offsets[index]; - const int offset_next = offsets[index + 1]; - return {offset, offset_next - offset}; -} - namespace curves::nurbs { struct BasisCache { @@ -79,21 +69,24 @@ class CurvesGeometryRuntime { * Cache of offsets into the evaluated array for each curve, accounting for all previous * evaluated points, Bezier curve vector segments, different resolutions per curve, etc. */ - mutable Vector evaluated_offsets_cache; - mutable Vector bezier_evaluated_offsets; - mutable CacheMutex offsets_cache_mutex; + struct EvaluatedOffsets { + Vector evaluated_offsets; + Vector all_bezier_offsets; + }; + mutable SharedCache evaluated_offsets_cache; - mutable Vector nurbs_basis_cache; - mutable CacheMutex nurbs_basis_cache_mutex; + mutable SharedCache> nurbs_basis_cache; /** Cache of evaluated positions. */ - mutable Vector evaluated_position_cache; - mutable CacheMutex position_cache_mutex; - /** - * The evaluated positions result, using a separate span in case all curves are poly curves, - * in which case a separate array of evaluated positions is unnecessary. - */ - mutable Span evaluated_positions_span; + struct EvaluatedPositions { + Vector vector; + /** + * The evaluated positions result, using a separate span in case all curves are poly curves, + * in which case a separate array of evaluated positions is unnecessary. + */ + Span span; + }; + mutable SharedCache evaluated_position_cache; /** * A cache of bounds shared between data-blocks with unchanged positions and radii. @@ -107,16 +100,13 @@ class CurvesGeometryRuntime { * cyclic, it needs one more length value to correspond to the last segment, so in order to * make slicing this array for a curve fast, an extra float is stored for every curve. */ - mutable Vector evaluated_length_cache; - mutable CacheMutex length_cache_mutex; + mutable SharedCache> evaluated_length_cache; /** Direction of the curve at each evaluated point. */ - mutable Vector evaluated_tangent_cache; - mutable CacheMutex tangent_cache_mutex; + mutable SharedCache> evaluated_tangent_cache; /** Normal direction vectors for each evaluated point. */ - mutable Vector evaluated_normal_cache; - mutable CacheMutex normal_cache_mutex; + mutable SharedCache> evaluated_normal_cache; }; /** @@ -138,17 +128,6 @@ class CurvesGeometry : public ::CurvesGeometry { CurvesGeometry &operator=(CurvesGeometry &&other); ~CurvesGeometry(); - static CurvesGeometry &wrap(::CurvesGeometry &dna_struct) - { - CurvesGeometry *geometry = reinterpret_cast(&dna_struct); - return *geometry; - } - static const CurvesGeometry &wrap(const ::CurvesGeometry &dna_struct) - { - const CurvesGeometry *geometry = reinterpret_cast(&dna_struct); - return *geometry; - } - /* -------------------------------------------------------------------- * Accessors. */ @@ -164,23 +143,17 @@ class CurvesGeometry : public ::CurvesGeometry { IndexRange points_range() const; IndexRange curves_range() const; - /** - * Number of control points in the indexed curve. - */ - int points_num_for_curve(const int index) const; - /** * The index of the first point in every curve. The size of this span is one larger than the - * number of curves. Consider using #points_for_curve rather than using the offsets directly. + * number of curves. Consider using #points_by_curve rather than using the offsets directly. */ Span offsets() const; MutableSpan offsets_for_write(); /** - * Access a range of indices of point data for a specific curve. + * The offsets of every curve into arrays on the points domain. */ - IndexRange points_for_curve(int index) const; - IndexRange points_for_curves(IndexRange curves) const; + OffsetIndices points_by_curve() const; /** The type (#CurveType) of each curve, or potentially a single if all are the same type. */ VArray curve_types() const; @@ -308,25 +281,14 @@ class CurvesGeometry : public ::CurvesGeometry { int evaluated_points_num() const; /** - * Access a range of indices of point data for a specific curve. - * Call #evaluated_offsets() first to ensure that the evaluated offsets cache is current. + * The offsets of every curve's evaluated points. */ - IndexRange evaluated_points_for_curve(int index) const; - IndexRange evaluated_points_for_curves(IndexRange curves) const; + OffsetIndices evaluated_points_by_curve() const; /** - * The index of the first evaluated point for every curve. The size of this span is one larger - * than the number of curves. Consider using #evaluated_points_for_curve rather than using the - * offsets directly. - */ - Span evaluated_offsets() const; - - /** Makes sure the data described by #evaluated_offsets if necessary. */ - void ensure_evaluated_offsets() const; - - /** - * Retrieve offsets into a Bezier curve's evaluated points for each control point. - * Call #ensure_evaluated_offsets() first to ensure that the evaluated offsets cache is current. + * Retrieve offsets into a Bezier curve's evaluated points for each control point. Stored in the + * same format as #OffsetIndices. Call #evaluated_points_by_curve() first to ensure that the + * evaluated offsets cache is current. */ Span bezier_evaluated_offsets_for_curve(int curve_index) const; @@ -435,6 +397,8 @@ class CurvesGeometry : public ::CurvesGeometry { } }; +static_assert(sizeof(blender::bke::CurvesGeometry) == sizeof(::CurvesGeometry)); + /** * Used to propagate deformation data through modifier evaluation so that sculpt tools can work on * evaluated data. @@ -494,6 +458,17 @@ inline float3 decode_surface_bary_coord(const float2 &v) return {v.x, v.y, 1.0f - v.x - v.y}; } +/** + * Return a range used to retrieve values from an array of values stored per point, but with an + * extra element at the end of each curve. This is useful for offsets within curves, where it is + * convenient to store the first 0 and have the last offset be the total result curve size, using + * the same rules as #OffsetIndices. + */ +inline IndexRange per_curve_point_offsets_range(const IndexRange points, const int curve_index) +{ + return {curve_index + points.start(), points.size() + 1}; +} + /** \} */ /* -------------------------------------------------------------------- */ @@ -572,8 +547,9 @@ bool point_is_sharp(Span handle_types_left, Span handle_types_ri * point edges generate the number of edges specified by the resolution, vector segments only * generate one edge. * - * The size of the offsets array must be the same as the number of points. The value at each index - * is the evaluated point offset including the following segment. + * The expectations for the result \a evaluated_offsets are the same as for #OffsetIndices, so the + * size must be one greater than the number of points. The value at each index is the evaluated + * point at the start of that segment. */ void calculate_evaluated_offsets(Span handle_types_left, Span handle_types_right, @@ -673,7 +649,7 @@ void evaluate_segment(const float3 &point_0, void calculate_evaluated_positions(Span positions, Span handles_left, Span handles_right, - Span evaluated_offsets, + OffsetIndices evaluated_offsets, MutableSpan evaluated_positions); /** @@ -681,7 +657,7 @@ void calculate_evaluated_positions(Span positions, * #evaluated_offsets. Unlike other curve types, for Bezier curves generic data and positions * are treated separately, since attribute values aren't stored for the handle control points. */ -void interpolate_to_evaluated(GSpan src, Span evaluated_offsets, GMutableSpan dst); +void interpolate_to_evaluated(GSpan src, OffsetIndices evaluated_offsets, GMutableSpan dst); } // namespace bezier @@ -707,12 +683,12 @@ int calculate_evaluated_num(int points_num, bool cyclic, int resolution); void interpolate_to_evaluated(GSpan src, bool cyclic, int resolution, GMutableSpan dst); /** - * Evaluate the Catmull Rom curve. The size of each segment and its offset in the #dst span - * is encoded in #evaluated_offsets, with the same method as #CurvesGeometry::offsets(). + * Evaluate the Catmull Rom curve. The placement of each segment in the #dst span is described by + * #evaluated_offsets. */ void interpolate_to_evaluated(const GSpan src, const bool cyclic, - const Span evaluated_offsets, + const OffsetIndices evaluated_offsets, GMutableSpan dst); void calculate_basis(const float parameter, float4 &r_weights); @@ -852,16 +828,6 @@ inline IndexRange CurvesGeometry::curves_range() const return IndexRange(this->curves_num()); } -inline int CurvesGeometry::points_num_for_curve(const int index) const -{ - BLI_assert(this->curve_num > 0); - BLI_assert(this->curve_num > index); - BLI_assert(this->curve_offsets != nullptr); - const int offset = this->curve_offsets[index]; - const int offset_next = this->curve_offsets[index + 1]; - return offset_next - offset; -} - inline bool CurvesGeometry::is_single_type(const CurveType type) const { return this->curve_type_counts()[type] == this->curves_num(); @@ -884,59 +850,31 @@ inline const std::array &CurvesGeometry::curve_type_counts return this->runtime->type_counts; } -inline IndexRange CurvesGeometry::points_for_curve(const int index) const +inline OffsetIndices CurvesGeometry::points_by_curve() const { - /* Offsets are not allocated when there are no curves. */ - BLI_assert(this->curve_num > 0); - BLI_assert(this->curve_num > index); - BLI_assert(this->curve_offsets != nullptr); - const int offset = this->curve_offsets[index]; - const int offset_next = this->curve_offsets[index + 1]; - return {offset, offset_next - offset}; -} - -inline IndexRange CurvesGeometry::points_for_curves(const IndexRange curves) const -{ - /* Offsets are not allocated when there are no curves. */ - BLI_assert(this->curve_num > 0); - BLI_assert(this->curve_offsets != nullptr); - const int offset = this->curve_offsets[curves.start()]; - const int offset_next = this->curve_offsets[curves.one_after_last()]; - return {offset, offset_next - offset}; + return OffsetIndices({this->curve_offsets, this->curve_num + 1}); } inline int CurvesGeometry::evaluated_points_num() const { /* This could avoid calculating offsets in the future in simple circumstances. */ - return this->evaluated_offsets().last(); -} - -inline IndexRange CurvesGeometry::evaluated_points_for_curve(int index) const -{ - BLI_assert(this->runtime->offsets_cache_mutex.is_cached()); - return offsets_to_range(this->runtime->evaluated_offsets_cache.as_span(), index); -} - -inline IndexRange CurvesGeometry::evaluated_points_for_curves(const IndexRange curves) const -{ - BLI_assert(this->runtime->offsets_cache_mutex.is_cached()); - BLI_assert(this->curve_num > 0); - const int offset = this->runtime->evaluated_offsets_cache[curves.start()]; - const int offset_next = this->runtime->evaluated_offsets_cache[curves.one_after_last()]; - return {offset, offset_next - offset}; + return this->evaluated_points_by_curve().total_size(); } inline Span CurvesGeometry::bezier_evaluated_offsets_for_curve(const int curve_index) const { - const IndexRange points = this->points_for_curve(curve_index); - return this->runtime->bezier_evaluated_offsets.as_span().slice(points); + const OffsetIndices points_by_curve = this->points_by_curve(); + const IndexRange points = points_by_curve[curve_index]; + const IndexRange range = curves::per_curve_point_offsets_range(points, curve_index); + const Span offsets = this->runtime->evaluated_offsets_cache.data().all_bezier_offsets; + return offsets.slice(range); } inline IndexRange CurvesGeometry::lengths_range_for_curve(const int curve_index, const bool cyclic) const { BLI_assert(cyclic == this->cyclic()[curve_index]); - const IndexRange points = this->evaluated_points_for_curve(curve_index); + const IndexRange points = this->evaluated_points_by_curve()[curve_index]; const int start = points.start() + curve_index; return {start, curves::segments_num(points.size(), cyclic)}; } @@ -944,9 +882,8 @@ inline IndexRange CurvesGeometry::lengths_range_for_curve(const int curve_index, inline Span CurvesGeometry::evaluated_lengths_for_curve(const int curve_index, const bool cyclic) const { - BLI_assert(this->runtime->length_cache_mutex.is_cached()); const IndexRange range = this->lengths_range_for_curve(curve_index, cyclic); - return this->runtime->evaluated_length_cache.as_span().slice(range); + return this->runtime->evaluated_length_cache.data().as_span().slice(range); } inline float CurvesGeometry::evaluated_length_total_for_curve(const int curve_index, @@ -1020,3 +957,12 @@ struct CurvesSurfaceTransforms { }; } // namespace blender::bke + +inline blender::bke::CurvesGeometry &CurvesGeometry::wrap() +{ + return *reinterpret_cast(this); +} +inline const blender::bke::CurvesGeometry &CurvesGeometry::wrap() const +{ + return *reinterpret_cast(this); +} diff --git a/source/blender/blenkernel/BKE_curves_utils.hh b/source/blender/blenkernel/BKE_curves_utils.hh index 1e06cb2d4c7..9549a643c05 100644 --- a/source/blender/blenkernel/BKE_curves_utils.hh +++ b/source/blender/blenkernel/BKE_curves_utils.hh @@ -76,14 +76,17 @@ struct CurvePoint : public CurveSegment { * [0, range_size) can be iterated over an arbitrary amount of times in between. */ class IndexRangeCyclic { - /* Index to the start and end of the iterated range. + /** + * Index to the start and end of the iterated range. */ int start_ = 0; int end_ = 0; - /* Size of the underlying iterable range. + /** + * Size of the underlying iterable range. */ int range_size_ = 0; - /* Number of times the range end is passed when the range is iterated. + /** + * Number of times the range end is passed when the range is iterated. */ int cycles_ = 0; @@ -468,74 +471,83 @@ class IndexRangeCyclic { * ranges, assuming that all curves have the same number of control points in #src_curves * and #dst_curves. */ -void copy_point_data(const CurvesGeometry &src_curves, - const CurvesGeometry &dst_curves, +void copy_point_data(OffsetIndices src_points_by_curve, + OffsetIndices dst_points_by_curve, Span curve_ranges, GSpan src, GMutableSpan dst); -void copy_point_data(const CurvesGeometry &src_curves, - const CurvesGeometry &dst_curves, +void copy_point_data(OffsetIndices src_points_by_curve, + OffsetIndices dst_points_by_curve, IndexMask src_curve_selection, GSpan src, GMutableSpan dst); template -void copy_point_data(const CurvesGeometry &src_curves, - const CurvesGeometry &dst_curves, +void copy_point_data(OffsetIndices src_points_by_curve, + OffsetIndices dst_points_by_curve, IndexMask src_curve_selection, Span src, MutableSpan dst) { - copy_point_data(src_curves, dst_curves, src_curve_selection, GSpan(src), GMutableSpan(dst)); + copy_point_data(src_points_by_curve, + dst_points_by_curve, + src_curve_selection, + GSpan(src), + GMutableSpan(dst)); } -void fill_points(const CurvesGeometry &curves, +void fill_points(OffsetIndices points_by_curve, IndexMask curve_selection, GPointer value, GMutableSpan dst); template -void fill_points(const CurvesGeometry &curves, +void fill_points(const OffsetIndices points_by_curve, IndexMask curve_selection, const T &value, MutableSpan dst) { - fill_points(curves, curve_selection, &value, dst); + fill_points(points_by_curve, curve_selection, &value, dst); } -void fill_points(const CurvesGeometry &curves, +void fill_points(const OffsetIndices points_by_curve, Span curve_ranges, GPointer value, GMutableSpan dst); template -void fill_points(const CurvesGeometry &curves, +void fill_points(const OffsetIndices points_by_curve, Span curve_ranges, const T &value, MutableSpan dst) { - fill_points(curves, curve_ranges, &value, dst); + fill_points(points_by_curve, curve_ranges, &value, dst); } /** - * Copy only the information on the point domain, but not the offsets or any point attributes, - * meant for operations that change the number of points but not the number of curves. + * Create new curves with the same number of curves as the input, but no points. Copy all curve + * domain attributes to the new curves, except the offsets encoding the size of each curve. + * + * Used for operations that change the number of points but not the number of curves, allowing + * creation of the new offsets directly inside the new array. + * * \warning The returned curves have invalid offsets! */ bke::CurvesGeometry copy_only_curve_domain(const bke::CurvesGeometry &src_curves); /** - * Copy the size of every curve in #curve_ranges to the corresponding index in #counts. + * Copy the number of points in every curve in the mask to the corresponding index in #sizes. */ -void fill_curve_counts(const bke::CurvesGeometry &curves, - Span curve_ranges, - MutableSpan counts); +void copy_curve_sizes(OffsetIndices points_by_curve, IndexMask mask, MutableSpan sizes); /** - * Turn an array of sizes into the offset at each index including all previous sizes. + * Copy the number of points in every curve in #curve_ranges to the corresponding index in + * #sizes. */ -void accumulate_counts_to_offsets(MutableSpan counts_to_offsets, int start_offset = 0); +void copy_curve_sizes(OffsetIndices points_by_curve, + Span curve_ranges, + MutableSpan sizes); IndexMask indices_for_type(const VArray &types, const std::array &type_counts, diff --git a/source/blender/blenkernel/BKE_customdata.h b/source/blender/blenkernel/BKE_customdata.h index 2f95c5d6fa6..62ca82f7265 100644 --- a/source/blender/blenkernel/BKE_customdata.h +++ b/source/blender/blenkernel/BKE_customdata.h @@ -723,7 +723,7 @@ typedef struct CustomDataTransferLayerMap { /** If non-NULL, array of weights, one for each dest item, replaces mix_factor. */ const float *mix_weights; - /** Data source array (can be regular CD data, vertices/edges/etc., keyblocks...). */ + /** Data source array (can be regular CD data, vertices/edges/etc., key-blocks...). */ const void *data_src; /** Data dest array (same type as dat_src). */ void *data_dst; @@ -741,7 +741,7 @@ typedef struct CustomDataTransferLayerMap { /** For bit-flag transfer, flag(s) to affect in transferred data. */ uint64_t data_flag; - /** Opaque pointer, to be used by specific interp callback (e.g. transformspace for normals). */ + /** Opaque pointer, to be used by specific interp callback (e.g. transform-space for normals). */ void *interp_data; cd_datatransfer_interp interp; diff --git a/source/blender/blenkernel/BKE_data_transfer.h b/source/blender/blenkernel/BKE_data_transfer.h index 1b6c1dc4205..518b5146b18 100644 --- a/source/blender/blenkernel/BKE_data_transfer.h +++ b/source/blender/blenkernel/BKE_data_transfer.h @@ -92,8 +92,10 @@ int BKE_object_data_transfer_dttype_to_srcdst_index(int dtdata_type); DT_TYPE_SHAPEKEY, \ DT_TYPE_MPROPCOL_VERT, \ DT_TYPE_MLOOPCOL_VERT, \ + DT_TYPE_MPROPCOL_VERT | DT_TYPE_MLOOPCOL_VERT, \ DT_TYPE_MPROPCOL_LOOP, \ DT_TYPE_MLOOPCOL_LOOP, \ + DT_TYPE_MPROPCOL_LOOP | DT_TYPE_MLOOPCOL_LOOP, \ DT_TYPE_UV) enum { diff --git a/source/blender/blenkernel/BKE_displist.h b/source/blender/blenkernel/BKE_displist.h index 6551e732300..410cc0b473a 100644 --- a/source/blender/blenkernel/BKE_displist.h +++ b/source/blender/blenkernel/BKE_displist.h @@ -43,7 +43,6 @@ enum { struct Depsgraph; struct ListBase; -struct Mesh; struct Object; struct Scene; diff --git a/source/blender/blenkernel/BKE_geometry_fields.hh b/source/blender/blenkernel/BKE_geometry_fields.hh index 5f5333beb63..019ff41184b 100644 --- a/source/blender/blenkernel/BKE_geometry_fields.hh +++ b/source/blender/blenkernel/BKE_geometry_fields.hh @@ -8,7 +8,6 @@ * Common field utilities and field definitions for geometry components. */ -#include "BKE_attribute.h" #include "BKE_geometry_set.hh" #include "FN_field.hh" diff --git a/source/blender/blenkernel/BKE_geometry_set.h b/source/blender/blenkernel/BKE_geometry_set.h index 97e69f3fe1f..c3403e66cab 100644 --- a/source/blender/blenkernel/BKE_geometry_set.h +++ b/source/blender/blenkernel/BKE_geometry_set.h @@ -10,7 +10,6 @@ extern "C" { #endif -struct Collection; struct GeometrySet; struct Object; diff --git a/source/blender/blenkernel/BKE_geometry_set.hh b/source/blender/blenkernel/BKE_geometry_set.hh index 42d773055fa..72f77c889c1 100644 --- a/source/blender/blenkernel/BKE_geometry_set.hh +++ b/source/blender/blenkernel/BKE_geometry_set.hh @@ -10,24 +10,18 @@ #include #include -#include "BLI_float4x4.hh" #include "BLI_function_ref.hh" -#include "BLI_hash.hh" #include "BLI_map.hh" #include "BLI_math_vector_types.hh" -#include "BLI_set.hh" #include "BLI_user_counter.hh" #include "BLI_vector_set.hh" -#include "BKE_anonymous_attribute_id.hh" #include "BKE_attribute.hh" #include "BKE_geometry_set.h" struct Curves; -struct Collection; struct Curve; struct Mesh; -struct Object; struct PointCloud; struct Volume; diff --git a/source/blender/blenkernel/BKE_geometry_set_instances.hh b/source/blender/blenkernel/BKE_geometry_set_instances.hh index 6d4b9a2128c..3e9fb1d0379 100644 --- a/source/blender/blenkernel/BKE_geometry_set_instances.hh +++ b/source/blender/blenkernel/BKE_geometry_set_instances.hh @@ -2,6 +2,8 @@ #pragma once +#include "BLI_float4x4.hh" + #include "BKE_geometry_set.hh" namespace blender::bke { diff --git a/source/blender/blenkernel/BKE_global.h b/source/blender/blenkernel/BKE_global.h index f3acb7d3746..23cbd73cf36 100644 --- a/source/blender/blenkernel/BKE_global.h +++ b/source/blender/blenkernel/BKE_global.h @@ -139,6 +139,13 @@ typedef struct Global { * Typically Python drivers. */ char autoexec_fail[200]; + + /** + * Has there been an opengl deprecation call detected when running on a none OpenGL backend. + */ + bool opengl_deprecation_usage_detected; + const char *opengl_deprecation_usage_filename; + int opengl_deprecation_usage_lineno; } Global; /* **************** GLOBAL ********************* */ diff --git a/source/blender/blenkernel/BKE_gpencil.h b/source/blender/blenkernel/BKE_gpencil.h index dc7a5ab003a..55ca1c38af4 100644 --- a/source/blender/blenkernel/BKE_gpencil.h +++ b/source/blender/blenkernel/BKE_gpencil.h @@ -16,7 +16,6 @@ struct Brush; struct CurveMapping; struct Depsgraph; struct GHash; -struct GPencilUpdateCache; struct ListBase; struct MDeformVert; struct Main; diff --git a/source/blender/blenkernel/BKE_gpencil_geom.h b/source/blender/blenkernel/BKE_gpencil_geom.h index 976961f27ae..70b12477b43 100644 --- a/source/blender/blenkernel/BKE_gpencil_geom.h +++ b/source/blender/blenkernel/BKE_gpencil_geom.h @@ -14,7 +14,6 @@ extern "C" { struct Depsgraph; struct Main; struct Object; -struct RegionView3D; struct Scene; struct bGPDcurve; struct bGPDframe; diff --git a/source/blender/blenkernel/BKE_idprop.h b/source/blender/blenkernel/BKE_idprop.h index 84412fd139f..32239a30312 100644 --- a/source/blender/blenkernel/BKE_idprop.h +++ b/source/blender/blenkernel/BKE_idprop.h @@ -28,18 +28,17 @@ typedef union IDPropertyTemplate { double d; struct { const char *str; + /** String length (including the null byte): `strlen(str) + 1`. */ int len; + /** #eIDPropertySubType */ char subtype; } string; struct ID *id; struct { int len; + /** #eIDPropertyType */ char type; } array; - struct { - int matvec_size; - const float *example; - } matrix_or_vector; } IDPropertyTemplate; /* ----------- Property Array Type ---------- */ diff --git a/source/blender/blenkernel/BKE_idprop.hh b/source/blender/blenkernel/BKE_idprop.hh index ce11a56ad5f..10110517402 100644 --- a/source/blender/blenkernel/BKE_idprop.hh +++ b/source/blender/blenkernel/BKE_idprop.hh @@ -32,6 +32,9 @@ class IDPropertyDeleter { } }; +/** \brief Allocate a new IDProperty of type IDP_BOOLEAN, set its name and value. */ +std::unique_ptr create_bool(StringRefNull prop_name, bool value); + /** \brief Allocate a new IDProperty of type IDP_INT, set its name and value. */ std::unique_ptr create(StringRefNull prop_name, int32_t value); diff --git a/source/blender/blenkernel/BKE_idtype.h b/source/blender/blenkernel/BKE_idtype.h index 256ddec5505..be13a3c1d2a 100644 --- a/source/blender/blenkernel/BKE_idtype.h +++ b/source/blender/blenkernel/BKE_idtype.h @@ -39,6 +39,15 @@ enum { IDTYPE_FLAGS_APPEND_IS_REUSABLE = 1 << 3, /** Indicates that the given IDType does not have animation data. */ IDTYPE_FLAGS_NO_ANIMDATA = 1 << 4, + /** + * Indicates that the given IDType is not handled through memfile (aka global) undo. + * + * \note This currently only affect local data-blocks. + * + * \note Current readfile undo code expects these data-blocks to not be used by any 'regular' + * data-blocks. + */ + IDTYPE_FLAGS_NO_MEMFILE_UNDO = 1 << 5, }; typedef struct IDCacheKey { diff --git a/source/blender/blenkernel/BKE_image.h b/source/blender/blenkernel/BKE_image.h index eb43ce823ac..f00da370ccf 100644 --- a/source/blender/blenkernel/BKE_image.h +++ b/source/blender/blenkernel/BKE_image.h @@ -565,7 +565,6 @@ struct PartialUpdateUser *BKE_image_partial_update_create(const struct Image *im void BKE_image_partial_update_free(struct PartialUpdateUser *user); /* --- partial updater (image side) --- */ -struct PartialUpdateRegister; void BKE_image_partial_update_register_free(struct Image *image); /** \brief Mark a region of the image to update. */ diff --git a/source/blender/blenkernel/BKE_image_partial_update.hh b/source/blender/blenkernel/BKE_image_partial_update.hh index 8f962ace268..8e914940b3a 100644 --- a/source/blender/blenkernel/BKE_image_partial_update.hh +++ b/source/blender/blenkernel/BKE_image_partial_update.hh @@ -23,7 +23,6 @@ #include "DNA_image_types.h" extern "C" { -struct PartialUpdateRegister; struct PartialUpdateUser; } diff --git a/source/blender/blenkernel/BKE_mesh.h b/source/blender/blenkernel/BKE_mesh.h index 8eb55706ff5..346292d431a 100644 --- a/source/blender/blenkernel/BKE_mesh.h +++ b/source/blender/blenkernel/BKE_mesh.h @@ -44,7 +44,6 @@ struct MPoly; struct Main; struct MemArena; struct Mesh; -struct ModifierData; struct Object; struct PointCloud; struct Scene; @@ -218,7 +217,7 @@ void BKE_mesh_to_curve(struct Main *bmain, struct Depsgraph *depsgraph, struct Scene *scene, struct Object *ob); -void BKE_pointcloud_from_mesh(struct Mesh *me, struct PointCloud *pointcloud); +void BKE_pointcloud_from_mesh(const struct Mesh *me, struct PointCloud *pointcloud); void BKE_mesh_to_pointcloud(struct Main *bmain, struct Depsgraph *depsgraph, struct Scene *scene, @@ -252,11 +251,13 @@ struct BoundBox *BKE_mesh_boundbox_get(struct Object *ob); void BKE_mesh_texspace_calc(struct Mesh *me); void BKE_mesh_texspace_ensure(struct Mesh *me); -void BKE_mesh_texspace_get(struct Mesh *me, float r_loc[3], float r_size[3]); +void BKE_mesh_texspace_get(struct Mesh *me, + float r_texspace_location[3], + float r_texspace_size[3]); void BKE_mesh_texspace_get_reference(struct Mesh *me, - char **r_texflag, - float **r_loc, - float **r_size); + char **r_texspace_flag, + float **r_texspace_location, + float **r_texspace_size); void BKE_mesh_texspace_copy_from_object(struct Mesh *me, struct Object *ob); /** diff --git a/source/blender/blenkernel/BKE_mesh_legacy_convert.h b/source/blender/blenkernel/BKE_mesh_legacy_convert.h index 5b72d85a433..087716706e1 100644 --- a/source/blender/blenkernel/BKE_mesh_legacy_convert.h +++ b/source/blender/blenkernel/BKE_mesh_legacy_convert.h @@ -36,7 +36,7 @@ void BKE_mesh_legacy_convert_uvs_to_generic(Mesh *mesh); * Move face sets to the legacy type from a generic type. */ void BKE_mesh_legacy_face_set_from_generic( - Mesh *mesh, blender::MutableSpan poly_layers_to_write); + blender::MutableSpan poly_layers_to_write); /** * Copy face sets to the generic data type from the legacy type. */ diff --git a/source/blender/blenkernel/BKE_mesh_wrapper.h b/source/blender/blenkernel/BKE_mesh_wrapper.h index b4742583b03..a9b9bb96178 100644 --- a/source/blender/blenkernel/BKE_mesh_wrapper.h +++ b/source/blender/blenkernel/BKE_mesh_wrapper.h @@ -8,7 +8,6 @@ struct BMEditMesh; struct CustomData_MeshMasks; struct Mesh; -struct Object; #ifdef __cplusplus extern "C" { diff --git a/source/blender/blenkernel/BKE_multires.h b/source/blender/blenkernel/BKE_multires.h index fbbcf287979..ef6845bf451 100644 --- a/source/blender/blenkernel/BKE_multires.h +++ b/source/blender/blenkernel/BKE_multires.h @@ -25,8 +25,6 @@ struct Scene; struct SubdivCCG; struct BMesh; -struct MLoop; -struct MLoopTri; struct MPoly; /** @@ -44,7 +42,7 @@ void multires_flush_sculpt_updates(struct Object *object); void multires_force_sculpt_rebuild(struct Object *object); void multires_force_external_reload(struct Object *object); -/* internal, only called in subsurf_ccg.c */ +/* internal, only called in subsurf_ccg.cc */ void multires_modifier_update_mdisps(struct DerivedMesh *dm, struct Scene *scene); void multires_modifier_update_hidden(struct DerivedMesh *dm); @@ -159,7 +157,7 @@ void old_mdisps_bilinear(float out[3], float (*disps)[3], int st, float u, float int mdisp_rot_face_to_crn( struct MPoly *mpoly, int face_side, float u, float v, float *x, float *y); -/* Reshaping, define in multires_reshape.c */ +/* Reshaping, define in multires_reshape.cc */ bool multiresModifier_reshapeFromVertcos(struct Depsgraph *depsgraph, struct Object *object, @@ -207,7 +205,7 @@ void multiresModifier_subdivide_to_level(struct Object *object, int top_level, eMultiresSubdivideModeType mode); -/* Subdivision integration, defined in multires_subdiv.c */ +/* Subdivision integration, defined in multires_subdiv.cc */ struct SubdivSettings; struct SubdivToMeshSettings; diff --git a/source/blender/blenkernel/BKE_nla.h b/source/blender/blenkernel/BKE_nla.h index 41980999a18..086f21aa897 100644 --- a/source/blender/blenkernel/BKE_nla.h +++ b/source/blender/blenkernel/BKE_nla.h @@ -10,6 +10,8 @@ /** Temp constant defined for these functions only. */ #define NLASTRIP_MIN_LEN_THRESH 0.1f +#include "DNA_listBase.h" + #ifdef __cplusplus extern "C" { #endif @@ -131,7 +133,19 @@ void BKE_nlastrips_sort_strips(ListBase *strips); /** * Add the given NLA-Strip to the given list of strips, assuming that it - * isn't currently a member of another list + * isn't currently a member of another list, NULL, or conflicting with existing + * strips position. + */ +void BKE_nlastrips_add_strip_unsafe(ListBase *strips, struct NlaStrip *strip); + +/** + * \brief NULL checks incoming strip and verifies no overlap / invalid + * configuration against other strips in NLA Track. + * + * \param strips: + * \param strip: + * \return true + * \return false */ bool BKE_nlastrips_add_strip(ListBase *strips, struct NlaStrip *strip); @@ -295,6 +309,11 @@ void BKE_nlastrip_recalculate_bounds(struct NlaStrip *strip); */ void BKE_nlastrip_recalculate_bounds_sync_action(struct NlaStrip *strip); +/** + * Recalculate the blend-in and blend-out values after a strip transform update. + */ +void BKE_nlastrip_recalculate_blend(struct NlaStrip *strip); + /** * Find (and set) a unique name for a strip from the whole AnimData block * Uses a similar method to the BLI method, but is implemented differently diff --git a/source/blender/blenkernel/BKE_node.h b/source/blender/blenkernel/BKE_node.h index c99f5cb076e..c358f56c0d9 100644 --- a/source/blender/blenkernel/BKE_node.h +++ b/source/blender/blenkernel/BKE_node.h @@ -29,27 +29,19 @@ extern "C" { /* not very important, but the stack solver likes to know a maximum */ #define MAX_SOCKET 512 -struct ARegion; struct BlendDataReader; struct BlendExpander; struct BlendLibReader; struct BlendWriter; -struct ColorManagedDisplaySettings; -struct ColorManagedViewSettings; -struct CryptomatteSession; struct FreestyleLineStyle; struct GPUMaterial; struct GPUNodeStack; struct ID; struct ImBuf; -struct ImageFormatData; struct Light; -struct ListBase; -struct MTex; struct Main; struct Material; struct PointerRNA; -struct RenderData; struct Scene; struct SpaceNode; struct Tex; @@ -104,6 +96,7 @@ namespace nodes { class DNode; class NodeMultiFunctionBuilder; class GeoNodeExecParams; +class NodeDeclaration; class NodeDeclarationBuilder; class GatherLinkSearchOpParams; } // namespace nodes @@ -118,6 +111,9 @@ using CPPTypeHandle = blender::CPPType; using NodeMultiFunctionBuildFunction = void (*)(blender::nodes::NodeMultiFunctionBuilder &builder); using NodeGeometryExecFunction = void (*)(blender::nodes::GeoNodeExecParams params); using NodeDeclareFunction = void (*)(blender::nodes::NodeDeclarationBuilder &builder); +using NodeDeclareDynamicFunction = void (*)(const bNodeTree &tree, + const bNode &node, + blender::nodes::NodeDeclaration &r_declaration); using SocketGetCPPValueFunction = void (*)(const struct bNodeSocket &socket, void *r_value); using SocketGetGeometryNodesCPPValueFunction = void (*)(const struct bNodeSocket &socket, void *r_value); @@ -137,6 +133,7 @@ typedef void *NodeGetCompositorShaderNodeFunction; typedef void *NodeMultiFunctionBuildFunction; typedef void *NodeGeometryExecFunction; typedef void *NodeDeclareFunction; +typedef void *NodeDeclareDynamicFunction; typedef void *NodeGatherSocketLinkOperationsFunction; typedef void *SocketGetCPPTypeFunction; typedef void *SocketGetGeometryNodesCPPTypeFunction; @@ -173,11 +170,6 @@ typedef struct bNodeSocketType { struct bNode *node, struct bNodeSocket *sock, const char *data_path); - void (*interface_verify_socket)(struct bNodeTree *ntree, - const struct bNodeSocket *interface_socket, - struct bNode *node, - struct bNodeSocket *sock, - const char *data_path); void (*interface_from_socket)(struct bNodeTree *ntree, struct bNodeSocket *interface_socket, const struct bNode *node, @@ -306,8 +298,8 @@ typedef struct bNodeType { const struct bNodeTree *nodetree, const char **r_disabled_hint); - /* optional handling of link insertion */ - void (*insert_link)(struct bNodeTree *ntree, struct bNode *node, struct bNodeLink *link); + /* Optional handling of link insertion. Returns false if the link shouldn't be created. */ + bool (*insert_link)(struct bNodeTree *ntree, struct bNode *node, struct bNodeLink *link); void (*free_self)(struct bNodeType *ntype); @@ -344,8 +336,13 @@ typedef struct bNodeType { /* Declares which sockets the node has. */ NodeDeclareFunction declare; - /* Different nodes of this type can have different declarations. */ - bool declaration_is_dynamic; + /** + * Declare which sockets the node has for declarations that aren't static per node type. + * In other words, defining this callback means that different nodes of this type can have + * different declarations and different sockets. + */ + NodeDeclareDynamicFunction declare_dynamic; + /* Declaration to be used when it is not dynamic. */ NodeDeclarationHandle *fixed_declaration; @@ -363,7 +360,7 @@ typedef struct bNodeType { ExtensionRNA rna_ext; } bNodeType; -/* nodetype->nclass, for add-menu and themes */ +/** #bNodeType.nclass (for add-menu and themes). */ #define NODE_CLASS_INPUT 0 #define NODE_CLASS_OUTPUT 1 #define NODE_CLASS_OP_COLOR 3 @@ -1350,8 +1347,6 @@ void BKE_nodetree_remove_layer_n(struct bNodeTree *ntree, struct Scene *scene, i /** \name Texture Nodes * \{ */ -struct TexResult; - #define TEX_NODE_OUTPUT 401 #define TEX_NODE_CHECKER 402 #define TEX_NODE_TEXTURE 403 @@ -1517,7 +1512,7 @@ struct TexResult; #define GEO_NODE_INPUT_SHORTEST_EDGE_PATHS 1168 #define GEO_NODE_EDGE_PATHS_TO_CURVES 1169 #define GEO_NODE_EDGE_PATHS_TO_SELECTION 1170 -#define GEO_NODE_MESH_FACE_SET_BOUNDARIES 1171 +#define GEO_NODE_MESH_FACE_GROUP_BOUNDARIES 1171 #define GEO_NODE_DISTRIBUTE_POINTS_IN_VOLUME 1172 #define GEO_NODE_SELF_OBJECT 1173 #define GEO_NODE_SAMPLE_INDEX 1174 @@ -1538,6 +1533,7 @@ struct TexResult; #define GEO_NODE_IMAGE_INFO 1189 #define GEO_NODE_BLUR_ATTRIBUTE 1190 #define GEO_NODE_IMAGE 1191 +#define GEO_NODE_INTERPOLATE_CURVES 1192 /** \} */ diff --git a/source/blender/blenkernel/BKE_object.h b/source/blender/blenkernel/BKE_object.h index bbc45eabbd0..cfadf8f2060 100644 --- a/source/blender/blenkernel/BKE_object.h +++ b/source/blender/blenkernel/BKE_object.h @@ -22,7 +22,6 @@ struct Base; struct BoundBox; struct Curve; struct Depsgraph; -struct GeometrySet; struct GpencilModifierData; struct HookGpencilModifierData; struct HookModifierData; @@ -35,7 +34,6 @@ struct Object; struct RegionView3D; struct RigidBodyWorld; struct Scene; -struct ShaderFxData; struct SubsurfModifierData; struct View3D; struct ViewLayer; @@ -509,9 +507,9 @@ void BKE_object_handle_update_ex(struct Depsgraph *depsgraph, void BKE_object_sculpt_data_create(struct Object *ob); bool BKE_object_obdata_texspace_get(struct Object *ob, - char **r_texflag, - float **r_loc, - float **r_size); + char **r_texspace_flag, + float **r_texspace_location, + float **r_texspace_size); struct Mesh *BKE_object_get_evaluated_mesh_no_subsurf(const struct Object *object); /** Get evaluated mesh for given object. */ diff --git a/source/blender/blenkernel/BKE_paint.h b/source/blender/blenkernel/BKE_paint.h index 31f466e5223..3f348e89004 100644 --- a/source/blender/blenkernel/BKE_paint.h +++ b/source/blender/blenkernel/BKE_paint.h @@ -489,9 +489,6 @@ typedef struct SculptClothSimulation { } SculptClothSimulation; typedef struct SculptVertexInfo { - /* Indexed by vertex, stores and ID of its topologically connected component. */ - int *connected_component; - /* Indexed by base mesh vertex index, stores if that vertex is a boundary. */ BLI_bitmap *boundary; @@ -733,6 +730,8 @@ typedef struct SculptAttributePointers { SculptAttribute *automasking_stroke_id; SculptAttribute *automasking_cavity; + SculptAttribute *topology_island_key; /* CD_PROP_INT8 */ + /* BMesh */ SculptAttribute *dyntopo_node_id_vertex; SculptAttribute *dyntopo_node_id_face; @@ -1014,6 +1013,7 @@ typedef struct SculptSession { int last_automasking_settings_hash; uchar last_automask_stroke_id; bool *sharp_edge; + bool islands_valid; /* Is attrs.topology_island_key valid? */ } SculptSession; typedef enum eSculptBoundary { @@ -1051,7 +1051,7 @@ int BKE_sculptsession_vertex_count(const SculptSession *ss); void BKE_sculpt_ensure_idmap(struct Object *ob); - /* Ensure an attribute layer exists. */ +/* Ensure an attribute layer exists. */ SculptAttribute *BKE_sculpt_attribute_ensure(struct Object *ob, eAttrDomain domain, eCustomDataType proptype, diff --git a/source/blender/blenkernel/BKE_particle.h b/source/blender/blenkernel/BKE_particle.h index fc9769a94a4..ad7045a8c2e 100644 --- a/source/blender/blenkernel/BKE_particle.h +++ b/source/blender/blenkernel/BKE_particle.h @@ -31,7 +31,6 @@ struct CustomData_MeshMasks; struct Depsgraph; struct EdgeHash; struct KDTree_3d; -struct LatticeDeformData; struct LinkNode; struct MCol; struct MFace; @@ -210,10 +209,12 @@ typedef struct ParticleCollision { ParticleCollisionElement pce; - /* total_time is the amount of time in this subframe - * inv_total_time is the opposite - * inv_timestep is the inverse of the amount of time in this frame */ - float total_time, inv_total_time, inv_timestep; + /** The amount of time in this sub-frame. */ + float total_time; + /** The inverse of `total_time`. */ + float inv_total_time; + /** The inverse of the amount of time in this frame. */ + float inv_timestep; float radius; float co1[3], co2[3]; diff --git a/source/blender/blenkernel/BKE_pbvh.h b/source/blender/blenkernel/BKE_pbvh.h index c3aafb88da5..2439d6799a6 100644 --- a/source/blender/blenkernel/BKE_pbvh.h +++ b/source/blender/blenkernel/BKE_pbvh.h @@ -30,28 +30,6 @@ extern "C" { /* Experimental feature to detect quad diagonals and mark (but not dissolve) them. */ //#define SCULPT_DIAGONAL_EDGE_MARKS - -/* - These structs represent logical verts/edges/faces. - for PBVH_GRIDS and PBVH_FACES they store integer - offsets, PBVH_BMESH stores pointers. - - The idea is to enforce stronger type checking by encapsulating - intptr_t's in structs.*/ -typedef struct PBVHVertRef { - intptr_t i; -} PBVHVertRef; - -typedef struct PBVHEdgeRef { - intptr_t i; -} PBVHEdgeRef; - -typedef struct PBVHFaceRef { - intptr_t i; -} PBVHFaceRef; - -#define PBVH_REF_NONE ((intptr_t)-1) - typedef struct SculptPMap { struct MeshElemMap *pmap; int *pmap_mem; @@ -68,30 +46,6 @@ typedef struct SculptLoopRef { # include "BLI_smallhash.h" #endif -typedef struct PBVHTri { - int v[3]; // references into PBVHTriBuf->verts - int eflag; // bitmask of which edges in the tri are real edges in the mesh - intptr_t l[3]; // loops - - float no[3]; - PBVHFaceRef f; -} PBVHTri; - -typedef struct PBVHTriBuf { - PBVHTri *tris; - PBVHVertRef *verts; - int *edges; - int totvert, totedge, tottri; - int verts_size, edges_size, tris_size; - - SmallHash vertmap; // maps vertex ptrs to indices within verts - - // private field - intptr_t *loops; - int totloop, mat_nr; - float min[3], max[3]; -} PBVHTriBuf; - //#define WITH_PBVH_CACHE struct BMesh; @@ -212,6 +166,83 @@ struct PBVHPublic { BMesh *bm; }; +/* + * These structs represent logical verts/edges/faces. + * for PBVH_GRIDS and PBVH_FACES they store integer + * offsets, PBVH_BMESH stores pointers. + * + * The idea is to enforce stronger type checking by encapsulating + * intptr_t's in structs. + */ + +/* A generic PBVH vertex. + * + * NOTE: in PBVH_GRIDS we consider the final grid points + * to be vertices. This is not true of edges or faces which are pulled from + * the base mesh. + */ + +#ifdef __cplusplus +/* A few C++ methods to play nice with sets and maps. */ +# define PBVH_REF_CXX_METHODS(Class) \ + bool operator==(const Class b) const \ + { \ + return i == b.i; \ + } \ + uint64_t hash() const \ + { \ + return i; \ + } +#else +# define PBVH_REF_CXX_METHODS(cls) +#endif + +typedef struct PBVHVertRef { + intptr_t i; + + PBVH_REF_CXX_METHODS(PBVHVertRef) +} PBVHVertRef; + +/* NOTE: edges in PBVH_GRIDS are always pulled from the base mesh. */ +typedef struct PBVHEdgeRef { + intptr_t i; + + PBVH_REF_CXX_METHODS(PBVHVertRef) +} PBVHEdgeRef; + +/* NOTE: faces in PBVH_GRIDS are always puled from the base mesh. */ +typedef struct PBVHFaceRef { + intptr_t i; + + PBVH_REF_CXX_METHODS(PBVHVertRef) +} PBVHFaceRef; + +#define PBVH_REF_NONE -1LL + +typedef struct PBVHTri { + int v[3]; // references into PBVHTriBuf->verts + int eflag; // bitmask of which edges in the tri are real edges in the mesh + intptr_t l[3]; // loops + + float no[3]; + PBVHFaceRef f; +} PBVHTri; + +typedef struct PBVHTriBuf { + PBVHTri *tris; + PBVHVertRef *verts; + int *edges; + int totvert, totedge, tottri; + int verts_size, edges_size, tris_size; + + SmallHash vertmap; // maps vertex ptrs to indices within verts + + // private field + intptr_t *loops; + int totloop, mat_nr; + float min[3], max[3]; +} PBVHTriBuf; + typedef struct { float (*co)[3]; } PBVHProxyNode; @@ -273,10 +304,11 @@ typedef enum { /* tri areas are not guaranteed to be up to date, tools should update all nodes on first step of brush*/ PBVH_UpdateTriAreas = 1 << 20, - PBVH_UpdateOtherVerts = 1 << 21 + PBVH_UpdateOtherVerts = 1 << 21, + PBVH_TexLeaf = 1 << 22, + PBVH_TopologyUpdated = 1 << 23, /* Used internally by pbvh_bmesh.c */ } PBVHNodeFlags; - -ENUM_OPERATORS(PBVHNodeFlags, PBVH_UpdateOtherVerts); +ENUM_OPERATORS(PBVHNodeFlags, PBVH_TopologyUpdated); typedef struct PBVHFrustumPlanes { float (*planes)[4]; @@ -496,7 +528,12 @@ void BKE_pbvh_search_callback(PBVH *pbvh, void BKE_pbvh_search_gather( PBVH *pbvh, BKE_pbvh_SearchCallback scb, void *search_data, PBVHNode ***array, int *tot); - +void BKE_pbvh_search_gather_ex(PBVH *pbvh, + BKE_pbvh_SearchCallback scb, + void *search_data, + PBVHNode ***r_array, + int *r_tot, + PBVHNodeFlags leaf_flag); /* Ray-cast * the hit callback is called for all leaf nodes intersecting the ray; * it's up to the callback to find the primitive within the leaves that is diff --git a/source/blender/blenkernel/BKE_pbvh_pixels.hh b/source/blender/blenkernel/BKE_pbvh_pixels.hh index b6e006805ec..31dbad0abe1 100644 --- a/source/blender/blenkernel/BKE_pbvh_pixels.hh +++ b/source/blender/blenkernel/BKE_pbvh_pixels.hh @@ -200,6 +200,10 @@ struct NodeData { { undo_regions.clear(); for (UDIMTilePixels &tile : tiles) { + if (tile.pixel_rows.size() == 0) { + continue; + } + rcti region; BLI_rcti_init_minmax(®ion); for (PackedPixelRow &pixel_row : tile.pixel_rows) { diff --git a/source/blender/blenkernel/BKE_pointcloud.h b/source/blender/blenkernel/BKE_pointcloud.h index 48be080968d..c6a72af6fab 100644 --- a/source/blender/blenkernel/BKE_pointcloud.h +++ b/source/blender/blenkernel/BKE_pointcloud.h @@ -20,7 +20,6 @@ extern "C" { #endif struct BoundBox; -struct CustomDataLayer; struct Depsgraph; struct Main; struct Object; diff --git a/source/blender/blenkernel/BKE_subsurf.h b/source/blender/blenkernel/BKE_subsurf.h index 557a71fd06b..3630c95ec76 100644 --- a/source/blender/blenkernel/BKE_subsurf.h +++ b/source/blender/blenkernel/BKE_subsurf.h @@ -24,7 +24,6 @@ struct CCGVert; struct DMFlagMat; struct DerivedMesh; struct EdgeHash; -struct MPoly; struct Mesh; struct MeshElemMap; struct Object; diff --git a/source/blender/blenkernel/BKE_undo_system.h b/source/blender/blenkernel/BKE_undo_system.h index 5d1a27f8ba0..365a3a86b7c 100644 --- a/source/blender/blenkernel/BKE_undo_system.h +++ b/source/blender/blenkernel/BKE_undo_system.h @@ -93,6 +93,7 @@ typedef enum eUndoPushReturn { UNDO_PUSH_RET_SUCCESS = (1 << 0), UNDO_PUSH_RET_OVERRIDE_CHANGED = (1 << 1), } eUndoPushReturn; +ENUM_OPERATORS(eUndoPushReturn, UNDO_PUSH_RET_OVERRIDE_CHANGED) typedef void (*UndoTypeForEachIDRefFn)(void *user_data, struct UndoRefID *id_ref); @@ -137,7 +138,7 @@ typedef struct UndoType { /** * The size of the undo struct 'inherited' from #UndoStep for that specific type. Used for - * generic allocation in BKE's `undo_system.c`. */ + * generic allocation in BKE's `undo_system.cc`. */ size_t step_size; } UndoType; diff --git a/source/blender/blenkernel/BKE_volume.h b/source/blender/blenkernel/BKE_volume.h index 00b5993c5eb..d300d08da91 100644 --- a/source/blender/blenkernel/BKE_volume.h +++ b/source/blender/blenkernel/BKE_volume.h @@ -114,7 +114,9 @@ int BKE_volume_grid_channels(const struct VolumeGrid *grid); * Transformation from index space to object space. */ void BKE_volume_grid_transform_matrix(const struct VolumeGrid *grid, float mat[4][4]); -void BKE_volume_grid_transform_matrix_set(struct VolumeGrid *volume_grid, const float mat[4][4]); +void BKE_volume_grid_transform_matrix_set(const struct Volume *volume, + struct VolumeGrid *volume_grid, + const float mat[4][4]); /* Volume Editing * diff --git a/source/blender/blenkernel/CMakeLists.txt b/source/blender/blenkernel/CMakeLists.txt index b70bedc471a..d027d756c5c 100644 --- a/source/blender/blenkernel/CMakeLists.txt +++ b/source/blender/blenkernel/CMakeLists.txt @@ -81,9 +81,9 @@ set(SRC intern/bassrelief.cc intern/blender.c intern/blender_copybuffer.c - intern/blender_undo.c + intern/blender_undo.cc intern/blender_user_menu.c - intern/blendfile.c + intern/blendfile.cc intern/blendfile_link_append.c intern/boids.c intern/bpath.c @@ -97,7 +97,7 @@ set(SRC intern/cachefile.c intern/callbacks.c intern/camera.c - intern/cdderivedmesh.c + intern/cdderivedmesh.cc intern/cloth.cc intern/collection.c intern/collision.c @@ -128,11 +128,11 @@ set(SRC intern/customdata.cc intern/customdata_file.c intern/data_transfer.cc - intern/deform.c + intern/deform.cc intern/displist.cc - intern/dynamicpaint.c intern/dyntopo.cc intern/dyntopo_collapse.cc + intern/dynamicpaint.cc intern/editlattice.c intern/editmesh.cc intern/editmesh_bvh.c @@ -142,7 +142,7 @@ set(SRC intern/fcurve.c intern/fcurve_cache.c intern/fcurve_driver.c - intern/fluid.c + intern/fluid.cc intern/fmodifier.c intern/freestyle.c intern/geometry_component_curves.cc @@ -227,16 +227,16 @@ set(SRC intern/modifier.cc intern/movieclip.c intern/multires.cc - intern/multires_reshape.c - intern/multires_reshape_apply_base.c - intern/multires_reshape_ccg.c - intern/multires_reshape_smooth.c - intern/multires_reshape_subdivide.c - intern/multires_reshape_util.c - intern/multires_reshape_vertcos.c - intern/multires_subdiv.c - intern/multires_unsubdivide.c - intern/multires_versioning.c + intern/multires_reshape.cc + intern/multires_reshape_apply_base.cc + intern/multires_reshape_ccg.cc + intern/multires_reshape_smooth.cc + intern/multires_reshape_subdivide.cc + intern/multires_reshape_util.cc + intern/multires_reshape_vertcos.cc + intern/multires_subdiv.cc + intern/multires_unsubdivide.cc + intern/multires_versioning.cc intern/nla.c intern/node.cc intern/node_runtime.cc @@ -259,10 +259,10 @@ set(SRC intern/particle_child.c intern/particle_distribute.c intern/particle_system.c - intern/pbvh.c intern/pbvh.cc - intern/pbvh_bmesh.c + intern/pbvh_bmesh.cc intern/pbvh_displacement.c + intern/pbvh_colors.cc intern/pbvh_pixels.cc intern/pbvh_uv_islands.cc intern/pointcache.c @@ -280,22 +280,22 @@ set(SRC intern/sound.c intern/speaker.c intern/studiolight.c - intern/subdiv.c + intern/subdiv.cc intern/subdiv_ccg.cc - intern/subdiv_ccg_mask.c - intern/subdiv_ccg_material.c - intern/subdiv_converter.c - intern/subdiv_converter_mesh.c - intern/subdiv_deform.c - intern/subdiv_displacement.c - intern/subdiv_displacement_multires.c - intern/subdiv_eval.c - intern/subdiv_foreach.c + intern/subdiv_ccg_mask.cc + intern/subdiv_ccg_material.cc + intern/subdiv_converter.cc + intern/subdiv_converter_mesh.cc + intern/subdiv_deform.cc + intern/subdiv_displacement.cc + intern/subdiv_displacement_multires.cc + intern/subdiv_eval.cc + intern/subdiv_foreach.cc intern/subdiv_mesh.cc intern/subdiv_modifier.cc - intern/subdiv_stats.c - intern/subdiv_topology.c - intern/subsurf_ccg.c + intern/subdiv_stats.cc + intern/subdiv_topology.cc + intern/subsurf_ccg.cc intern/text.c intern/text_suggestions.c intern/texture.cc @@ -308,7 +308,7 @@ set(SRC intern/tracking_stabilize.c intern/tracking_util.c intern/type_conversions.cc - intern/undo_system.c + intern/undo_system.cc intern/unit.c intern/vfont.c intern/vfontdata_freetype.c @@ -520,10 +520,10 @@ set(SRC intern/data_transfer_intern.h intern/lib_intern.h intern/multires_inline.h - intern/multires_reshape.h + intern/multires_reshape.hh intern/multires_unsubdivide.h intern/ocean_intern.h - intern/pbvh_intern.h + intern/pbvh_intern.hh intern/pbvh_uv_islands.hh intern/subdiv_converter.h intern/subdiv_inline.h @@ -577,15 +577,16 @@ if(WIN32) endif() if(WITH_AUDASPACE) - add_definitions(-DWITH_AUDASPACE) - list(APPEND INC_SYS ${AUDASPACE_C_INCLUDE_DIRS} ) - list(APPEND LIB - ${AUDASPACE_C_LIBRARIES} - ${AUDASPACE_PY_LIBRARIES} - ) + if(WITH_SYSTEM_AUDASPACE) + list(APPEND LIB + ${AUDASPACE_C_LIBRARIES} + ${AUDASPACE_PY_LIBRARIES} + ) + endif() + add_definitions(-DWITH_AUDASPACE) endif() if(WITH_BULLET) @@ -595,13 +596,6 @@ if(WITH_BULLET) list(APPEND INC ../../../intern/rigidbody ) - - if(NOT WITH_SYSTEM_BULLET) - list(APPEND LIB - extern_bullet - ) - endif() - list(APPEND LIB bf_intern_rigidbody @@ -871,6 +865,7 @@ if(WITH_GTESTS) intern/lib_id_remapper_test.cc intern/lib_id_test.cc intern/lib_remap_test.cc + intern/nla_test.cc intern/tracking_test.cc ) set(TEST_INC diff --git a/source/blender/blenkernel/intern/DerivedMesh.cc b/source/blender/blenkernel/intern/DerivedMesh.cc index d09e4728744..ff55ad4d244 100644 --- a/source/blender/blenkernel/intern/DerivedMesh.cc +++ b/source/blender/blenkernel/intern/DerivedMesh.cc @@ -1220,11 +1220,12 @@ static void editbmesh_calc_modifier_final_normals(Mesh *mesh_final, } } else { - /* Same as mesh_calc_modifiers. If using loop normals, poly nors have already been computed. */ + /* Same as #mesh_calc_modifiers. + * If using loop normals, poly normals have already been computed. */ BKE_mesh_ensure_normals_for_display(mesh_final); /* Some modifiers, like data-transfer, may generate those data, we do not want to keep them, - * as they are used by display code when available (i.e. even if autosmooth is disabled). */ + * as they are used by display code when available (i.e. even if auto-smooth is disabled). */ if (CustomData_has_layer(&mesh_final->ldata, CD_NORMAL)) { CustomData_free_layers(&mesh_final->ldata, CD_NORMAL, mesh_final->totloop); } diff --git a/source/blender/blenkernel/intern/action.c b/source/blender/blenkernel/intern/action.c index dcd0c25967b..d4a7a7cbb1e 100644 --- a/source/blender/blenkernel/intern/action.c +++ b/source/blender/blenkernel/intern/action.c @@ -294,7 +294,7 @@ static void action_asset_pre_save(void *asset_ptr, struct AssetMetaData *asset_d } static AssetTypeInfo AssetType_AC = { - /* pre_save_fn */ action_asset_pre_save, + /*pre_save_fn*/ action_asset_pre_save, }; IDTypeInfo IDType_ID_AC = { diff --git a/source/blender/blenkernel/intern/anim_visualization.c b/source/blender/blenkernel/intern/anim_visualization.c index 799845cbce8..31335931704 100644 --- a/source/blender/blenkernel/intern/anim_visualization.c +++ b/source/blender/blenkernel/intern/anim_visualization.c @@ -128,7 +128,7 @@ bMotionPath *animviz_verify_motionpaths(ReportList *reports, dst = &ob->mpath; } - /* avoid 0 size allocs */ + /* Avoid 0 size allocations. */ if (avs->path_sf >= avs->path_ef) { BKE_reportf(reports, RPT_ERROR, diff --git a/source/blender/blenkernel/intern/attribute.cc b/source/blender/blenkernel/intern/attribute.cc index 0348c3da387..12e7056cbe4 100644 --- a/source/blender/blenkernel/intern/attribute.cc +++ b/source/blender/blenkernel/intern/attribute.cc @@ -112,7 +112,7 @@ static std::optional get_attribute_acces } case ID_CV: { Curves &curves_id = reinterpret_cast(id); - CurvesGeometry &curves = CurvesGeometry::wrap(curves_id.geometry); + CurvesGeometry &curves = curves_id.geometry.wrap(); return curves.attributes_for_write(); } default: { diff --git a/source/blender/blenkernel/intern/attribute_access.cc b/source/blender/blenkernel/intern/attribute_access.cc index 92bb7247673..8a6e1486701 100644 --- a/source/blender/blenkernel/intern/attribute_access.cc +++ b/source/blender/blenkernel/intern/attribute_access.cc @@ -913,38 +913,6 @@ Vector retrieve_attributes_for_transfer( return attributes; } -void copy_attribute_domain(const AttributeAccessor src_attributes, - MutableAttributeAccessor dst_attributes, - const IndexMask selection, - const eAttrDomain domain, - const AnonymousAttributePropagationInfo &propagation_info, - const Set &skip) -{ - src_attributes.for_all( - [&](const bke::AttributeIDRef &id, const bke::AttributeMetaData &meta_data) { - if (meta_data.domain != domain) { - return true; - } - if (id.is_anonymous() && !propagation_info.propagate(id.anonymous_id())) { - return true; - } - if (skip.contains(id.name())) { - return true; - } - - const GVArray src = src_attributes.lookup(id, meta_data.domain); - BLI_assert(src); - - /* Copy attribute. */ - GSpanAttributeWriter dst = dst_attributes.lookup_or_add_for_write_only_span( - id, domain, meta_data.data_type); - array_utils::copy(src, selection, dst.span); - dst.finish(); - - return true; - }); -} - } // namespace blender::bke /** \} */ diff --git a/source/blender/blenkernel/intern/blender_undo.c b/source/blender/blenkernel/intern/blender_undo.cc similarity index 85% rename from source/blender/blenkernel/intern/blender_undo.c rename to source/blender/blenkernel/intern/blender_undo.cc index f22dfc6054a..9f548c539c7 100644 --- a/source/blender/blenkernel/intern/blender_undo.c +++ b/source/blender/blenkernel/intern/blender_undo.cc @@ -63,10 +63,10 @@ bool BKE_memfile_undo_decode(MemFileUndoData *mfu, G.fileflags |= G_FILE_NO_UI; if (UNDO_DISK) { - const struct BlendFileReadParams params = {0}; - BlendFileReadReport bf_reports = {.reports = NULL}; + const BlendFileReadParams params{}; + BlendFileReadReport bf_reports{}; struct BlendFileData *bfd = BKE_blendfile_read(mfu->filepath, ¶ms, &bf_reports); - if (bfd != NULL) { + if (bfd != nullptr) { BKE_blendfile_read_setup(C, bfd, ¶ms, &bf_reports); success = true; } @@ -77,10 +77,11 @@ bool BKE_memfile_undo_decode(MemFileUndoData *mfu, if (!use_old_bmain_data) { params.skip_flags |= BLO_READ_SKIP_UNDO_OLD_MAIN; } + BlendFileReadReport blend_file_read_report{}; struct BlendFileData *bfd = BKE_blendfile_read_from_memfile( - bmain, &mfu->memfile, ¶ms, NULL); - if (bfd != NULL) { - BKE_blendfile_read_setup(C, bfd, ¶ms, &(BlendFileReadReport){NULL}); + bmain, &mfu->memfile, ¶ms, nullptr); + if (bfd != nullptr) { + BKE_blendfile_read_setup(C, bfd, ¶ms, &blend_file_read_report); success = true; } } @@ -100,7 +101,7 @@ bool BKE_memfile_undo_decode(MemFileUndoData *mfu, MemFileUndoData *BKE_memfile_undo_encode(Main *bmain, MemFileUndoData *mfu_prev) { - MemFileUndoData *mfu = MEM_callocN(sizeof(MemFileUndoData), __func__); + MemFileUndoData *mfu = MEM_cnew(__func__); /* Include recovery information since undo-data is written out as #BLENDER_QUIT_FILE. */ const int fileflags = G.fileflags | G_FILE_RECOVER_WRITE; @@ -118,13 +119,14 @@ MemFileUndoData *BKE_memfile_undo_encode(Main *bmain, MemFileUndoData *mfu_prev) BLI_snprintf(numstr, sizeof(numstr), "%d.blend", counter); BLI_path_join(filepath, sizeof(filepath), BKE_tempdir_session(), numstr); + const BlendFileWriteParams blend_file_write_params{}; /* success = */ /* UNUSED */ BLO_write_file( - bmain, filepath, fileflags, &(const struct BlendFileWriteParams){0}, NULL); + bmain, filepath, fileflags, &blend_file_write_params, nullptr); BLI_strncpy(mfu->filepath, filepath, sizeof(mfu->filepath)); } else { - MemFile *prevfile = (mfu_prev) ? &(mfu_prev->memfile) : NULL; + MemFile *prevfile = (mfu_prev) ? &(mfu_prev->memfile) : nullptr; if (prevfile) { BLO_memfile_clear_future(prevfile); } diff --git a/source/blender/blenkernel/intern/blendfile.c b/source/blender/blenkernel/intern/blendfile.cc similarity index 87% rename from source/blender/blenkernel/intern/blendfile.c rename to source/blender/blenkernel/intern/blendfile.cc index f4421fc0d1c..e151c671b06 100644 --- a/source/blender/blenkernel/intern/blendfile.c +++ b/source/blender/blenkernel/intern/blendfile.cc @@ -83,7 +83,7 @@ static bool blendfile_or_libraries_versions_atleast(Main *bmain, return true; } -static bool foreach_path_clean_cb(BPathForeachPathData *UNUSED(bpath_data), +static bool foreach_path_clean_cb(BPathForeachPathData * /*bpath_data*/, char *path_dst, const char *path_src) { @@ -95,12 +95,13 @@ static bool foreach_path_clean_cb(BPathForeachPathData *UNUSED(bpath_data), /* make sure path names are correct for OS */ static void clean_paths(Main *bmain) { - BKE_bpath_foreach_path_main(&(BPathForeachPathData){ - .bmain = bmain, - .callback_function = foreach_path_clean_cb, - .flag = BKE_BPATH_FOREACH_PATH_SKIP_MULTIFILE, - .user_data = NULL, - }); + BPathForeachPathData foreach_path_data{}; + foreach_path_data.bmain = bmain; + foreach_path_data.callback_function = foreach_path_clean_cb; + foreach_path_data.flag = BKE_BPATH_FOREACH_PATH_SKIP_MULTIFILE; + foreach_path_data.user_data = nullptr; + + BKE_bpath_foreach_path_main(&foreach_path_data); LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) { BLI_path_slash_native(scene->r.pic); @@ -110,7 +111,7 @@ static void clean_paths(Main *bmain) static bool wm_scene_is_visible(wmWindowManager *wm, Scene *scene) { wmWindow *win; - for (win = wm->windows.first; win; win = win->next) { + for (win = static_cast(wm->windows.first); win; win = win->next) { if (win->scene == scene) { return true; } @@ -123,7 +124,7 @@ static void setup_app_userdef(BlendFileData *bfd) if (bfd->user) { /* only here free userdef themes... */ BKE_blender_userdef_data_set_and_free(bfd->user); - bfd->user = NULL; + bfd->user = nullptr; /* Security issue: any blend file could include a USER block. * @@ -151,7 +152,7 @@ static void setup_app_data(bContext *C, BlendFileReadReport *reports) { Main *bmain = G_MAIN; - Scene *curscene = NULL; + Scene *curscene = nullptr; const bool recover = (G.fileflags & G_FILE_RECOVER_READ) != 0; const bool is_startup = params->is_startup; enum { @@ -161,12 +162,12 @@ static void setup_app_data(bContext *C, } mode; if (params->undo_direction != STEP_INVALID) { - BLI_assert(bfd->curscene != NULL); + BLI_assert(bfd->curscene != nullptr); mode = LOAD_UNDO; } - /* may happen with library files - UNDO file should never have NULL curscene (but may have a - * NULL curscreen)... */ - else if (ELEM(NULL, bfd->curscreen, bfd->curscene)) { + /* may happen with library files - UNDO file should never have nullptr curscene (but may have a + * nullptr curscreen)... */ + else if (ELEM(nullptr, bfd->curscreen, bfd->curscene)) { BKE_report(reports->reports, RPT_WARNING, "Library file, loading empty scene"); mode = LOAD_UI_OFF; } @@ -205,7 +206,7 @@ static void setup_app_data(bContext *C, * see: T43424 */ wmWindow *win; - bScreen *curscreen = NULL; + bScreen *curscreen = nullptr; ViewLayer *cur_view_layer; bool track_undo_scene; @@ -213,10 +214,10 @@ static void setup_app_data(bContext *C, SWAP(ListBase, bmain->wm, bfd->main->wm); SWAP(ListBase, bmain->workspaces, bfd->main->workspaces); SWAP(ListBase, bmain->screens, bfd->main->screens); - if (bmain->name_map != NULL) { + if (bmain->name_map != nullptr) { BKE_main_namemap_destroy(&bmain->name_map); } - if (bfd->main->name_map != NULL) { + if (bfd->main->name_map != nullptr) { BKE_main_namemap_destroy(&bfd->main->name_map); } @@ -250,14 +251,14 @@ static void setup_app_data(bContext *C, track_undo_scene = (mode == LOAD_UNDO && curscreen && curscene && bfd->main->wm.first); - if (curscene == NULL) { - curscene = bfd->main->scenes.first; + if (curscene == nullptr) { + curscene = static_cast(bfd->main->scenes.first); } /* empty file, we add a scene to make Blender work */ - if (curscene == NULL) { + if (curscene == nullptr) { curscene = BKE_scene_add(bfd->main, "Empty"); } - if (cur_view_layer == NULL) { + if (cur_view_layer == nullptr) { /* fallback to scene layer */ cur_view_layer = BKE_view_layer_default_view(curscene); } @@ -267,7 +268,7 @@ static void setup_app_data(bContext *C, * replace it with 'curscene' if its needed */ } /* and we enforce curscene to be in current screen */ - else if (win) { /* The window may be NULL in background-mode. */ + else if (win) { /* The window may be nullptr in background-mode. */ win->scene = curscene; } @@ -278,7 +279,7 @@ static void setup_app_data(bContext *C, } if (track_undo_scene) { - wmWindowManager *wm = bfd->main->wm.first; + wmWindowManager *wm = static_cast(bfd->main->wm.first); if (wm_scene_is_visible(wm, bfd->curscene) == false) { curscene = bfd->curscene; win->scene = curscene; @@ -296,7 +297,7 @@ static void setup_app_data(bContext *C, BKE_blender_globals_main_replace(bfd->main); bmain = G_MAIN; - bfd->main = NULL; + bfd->main = nullptr; CTX_data_main_set(C, bmain); @@ -306,12 +307,12 @@ static void setup_app_data(bContext *C, CTX_data_scene_set(C, curscene); } else { - CTX_wm_manager_set(C, bmain->wm.first); + CTX_wm_manager_set(C, static_cast(bmain->wm.first)); CTX_wm_screen_set(C, bfd->curscreen); CTX_data_scene_set(C, bfd->curscene); - CTX_wm_area_set(C, NULL); - CTX_wm_region_set(C, NULL); - CTX_wm_menu_set(C, NULL); + CTX_wm_area_set(C, nullptr); + CTX_wm_region_set(C, nullptr); + CTX_wm_menu_set(C, nullptr); curscene = bfd->curscene; } @@ -320,7 +321,7 @@ static void setup_app_data(bContext *C, G.fileflags = (G.fileflags & fileflags_keep) | (bfd->fileflags & ~fileflags_keep); /* this can happen when active scene was lib-linked, and doesn't exist anymore */ - if (CTX_data_scene(C) == NULL) { + if (CTX_data_scene(C) == nullptr) { wmWindow *win = CTX_wm_window(C); /* in case we don't even have a local scene, add one */ @@ -328,7 +329,7 @@ static void setup_app_data(bContext *C, BKE_scene_add(bmain, "Empty"); } - CTX_data_scene_set(C, bmain->scenes.first); + CTX_data_scene_set(C, static_cast(bmain->scenes.first)); win->scene = CTX_data_scene(C); curscene = CTX_data_scene(C); } @@ -381,10 +382,10 @@ static void setup_app_data(bContext *C, STRNCPY(bmain->filepath, bfd->filepath); } - /* baseflags, groups, make depsgraph, etc */ + /* Base-flags, groups, make depsgraph, etc. */ /* first handle case if other windows have different scenes visible */ if (mode == LOAD_UI) { - wmWindowManager *wm = bmain->wm.first; + wmWindowManager *wm = static_cast(bmain->wm.first); if (wm) { LISTBASE_FOREACH (wmWindow *, win, &wm->windows) { @@ -435,7 +436,7 @@ static void setup_app_data(bContext *C, reports->duration.lib_overrides_resync; /* We need to rebuild some of the deleted override rules (for UI feedback purpose). */ - BKE_lib_override_library_main_operations_create(bmain, true, NULL); + BKE_lib_override_library_main_operations_create(bmain, true, nullptr); } } @@ -487,7 +488,7 @@ void BKE_blendfile_read_setup(bContext *C, const struct BlendFileReadParams *params, BlendFileReadReport *reports) { - BKE_blendfile_read_setup_ex(C, bfd, params, reports, false, NULL); + BKE_blendfile_read_setup_ex(C, bfd, params, reports, false, nullptr); } struct BlendFileData *BKE_blendfile_read(const char *filepath, @@ -499,7 +500,7 @@ struct BlendFileData *BKE_blendfile_read(const char *filepath, printf("Read blend: %s\n", filepath); } - BlendFileData *bfd = BLO_read_from_file(filepath, params->skip_flags, reports); + BlendFileData *bfd = BLO_read_from_file(filepath, eBLOReadSkip(params->skip_flags), reports); if (bfd) { handle_subversion_warning(bfd->main, reports); } @@ -514,7 +515,8 @@ struct BlendFileData *BKE_blendfile_read_from_memory(const void *filebuf, const struct BlendFileReadParams *params, ReportList *reports) { - BlendFileData *bfd = BLO_read_from_memory(filebuf, filelength, params->skip_flags, reports); + BlendFileData *bfd = BLO_read_from_memory( + filebuf, filelength, eBLOReadSkip(params->skip_flags), reports); if (bfd) { /* Pass. */ } @@ -595,11 +597,13 @@ void BKE_blendfile_read_make_empty(bContext *C) UserDef *BKE_blendfile_userdef_read(const char *filepath, ReportList *reports) { BlendFileData *bfd; - UserDef *userdef = NULL; + UserDef *userdef = nullptr; - bfd = BLO_read_from_file(filepath, - BLO_READ_SKIP_ALL & ~BLO_READ_SKIP_USERDEF, - &(struct BlendFileReadReport){.reports = reports}); + BlendFileReadReport blend_file_read_reports{}; + blend_file_read_reports.reports = reports; + + bfd = BLO_read_from_file( + filepath, BLO_READ_SKIP_ALL & ~BLO_READ_SKIP_USERDEF, &blend_file_read_reports); if (bfd) { if (bfd->user) { userdef = bfd->user; @@ -616,7 +620,7 @@ UserDef *BKE_blendfile_userdef_read_from_memory(const void *filebuf, ReportList *reports) { BlendFileData *bfd; - UserDef *userdef = NULL; + UserDef *userdef = nullptr; bfd = BLO_read_from_memory( filebuf, filelength, BLO_READ_SKIP_ALL & ~BLO_READ_SKIP_USERDEF, reports); @@ -636,8 +640,8 @@ UserDef *BKE_blendfile_userdef_read_from_memory(const void *filebuf, UserDef *BKE_blendfile_userdef_from_defaults(void) { - UserDef *userdef = MEM_mallocN(sizeof(*userdef), __func__); - memcpy(userdef, &U_default, sizeof(*userdef)); + UserDef *userdef = static_cast(MEM_callocN(sizeof(UserDef), __func__)); + *userdef = blender::dna::shallow_copy(U_default); /* Add-ons. */ { @@ -663,7 +667,7 @@ UserDef *BKE_blendfile_userdef_from_defaults(void) /* Theme. */ { - bTheme *btheme = MEM_mallocN(sizeof(*btheme), __func__); + bTheme *btheme = static_cast(MEM_mallocN(sizeof(*btheme), __func__)); memcpy(btheme, &U_theme_default, sizeof(*btheme)); BLI_addtail(&userdef->themes, btheme); @@ -696,16 +700,13 @@ UserDef *BKE_blendfile_userdef_from_defaults(void) bool BKE_blendfile_userdef_write(const char *filepath, ReportList *reports) { - Main *mainb = MEM_callocN(sizeof(Main), "empty main"); + Main *mainb = MEM_cnew
("empty main"); bool ok = false; - if (BLO_write_file(mainb, - filepath, - 0, - &(const struct BlendFileWriteParams){ - .use_userdef = true, - }, - reports)) { + BlendFileWriteParams params{}; + params.use_userdef = true; + + if (BLO_write_file(mainb, filepath, 0, ¶ms, reports)) { ok = true; } @@ -721,9 +722,9 @@ bool BKE_blendfile_userdef_write_app_template(const char *filepath, ReportList * * falling back to the defaults. * If the preferences exists but file reading fails - the file can be assumed corrupt * so overwriting the file is OK. */ - UserDef *userdef_default = BLI_exists(filepath) ? BKE_blendfile_userdef_read(filepath, NULL) : - NULL; - if (userdef_default == NULL) { + UserDef *userdef_default = BLI_exists(filepath) ? BKE_blendfile_userdef_read(filepath, nullptr) : + nullptr; + if (userdef_default == nullptr) { userdef_default = BKE_blendfile_userdef_from_defaults(); } @@ -742,7 +743,7 @@ bool BKE_blendfile_userdef_write_all(ReportList *reports) bool ok = true; const bool use_template_userpref = BKE_appdir_app_template_has_userpref(U.app_template); - if ((cfgdir = BKE_appdir_folder_id_create(BLENDER_USER_CONFIG, NULL))) { + if ((cfgdir = BKE_appdir_folder_id_create(BLENDER_USER_CONFIG, nullptr))) { bool ok_write; BLI_path_join(filepath, sizeof(filepath), cfgdir, BLENDER_USERPREF_FILE); @@ -806,18 +807,19 @@ WorkspaceConfigFileData *BKE_blendfile_workspace_config_read(const char *filepat ReportList *reports) { BlendFileData *bfd; - WorkspaceConfigFileData *workspace_config = NULL; + WorkspaceConfigFileData *workspace_config = nullptr; if (filepath) { - bfd = BLO_read_from_file( - filepath, BLO_READ_SKIP_USERDEF, &(struct BlendFileReadReport){.reports = reports}); + BlendFileReadReport blend_file_read_reports{}; + blend_file_read_reports.reports = reports; + bfd = BLO_read_from_file(filepath, BLO_READ_SKIP_USERDEF, &blend_file_read_reports); } else { bfd = BLO_read_from_memory(filebuf, filelength, BLO_READ_SKIP_USERDEF, reports); } if (bfd) { - workspace_config = MEM_callocN(sizeof(*workspace_config), __func__); + workspace_config = MEM_cnew(__func__); workspace_config->main = bfd->main; /* Only 2.80+ files have actual workspaces, don't try to use screens @@ -839,7 +841,8 @@ bool BKE_blendfile_workspace_config_write(Main *bmain, const char *filepath, Rep BKE_blendfile_write_partial_begin(bmain); - for (WorkSpace *workspace = bmain->workspaces.first; workspace; workspace = workspace->id.next) { + for (WorkSpace *workspace = static_cast(bmain->workspaces.first); workspace; + workspace = static_cast(workspace->id.next)) { BKE_blendfile_write_partial_tag_ID(&workspace->id, true); } @@ -880,10 +883,10 @@ void BKE_blendfile_write_partial_tag_ID(ID *id, bool set) } } -static void blendfile_write_partial_cb(void *UNUSED(handle), Main *UNUSED(bmain), void *vid) +static void blendfile_write_partial_cb(void * /*handle*/, Main * /*bmain*/, void *vid) { if (vid) { - ID *id = vid; + ID *id = static_cast(vid); /* only tag for need-expand if not done, prevents eternal loops */ if ((id->tag & LIB_TAG_DOIT) == 0) { id->tag |= LIB_TAG_NEED_EXPAND | LIB_TAG_DOIT; @@ -901,11 +904,11 @@ bool BKE_blendfile_write_partial(Main *bmain_src, const int remap_mode, ReportList *reports) { - Main *bmain_dst = MEM_callocN(sizeof(Main), "copybuffer"); + Main *bmain_dst = MEM_cnew
("copybuffer"); ListBase *lbarray_dst[INDEX_ID_MAX], *lbarray_src[INDEX_ID_MAX]; int a, retval; - void *path_list_backup = NULL; + void *path_list_backup = nullptr; const eBPathForeachFlag path_list_flag = (BKE_BPATH_FOREACH_PATH_SKIP_LINKED | BKE_BPATH_FOREACH_PATH_SKIP_MULTIFILE); @@ -914,7 +917,7 @@ bool BKE_blendfile_write_partial(Main *bmain_src, STRNCPY(bmain_dst->filepath, bmain_src->filepath); BLO_main_expander(blendfile_write_partial_cb); - BLO_expand_main(NULL, bmain_src); + BLO_expand_main(nullptr, bmain_src); /* move over all tagged blocks */ set_listbasepointers(bmain_src, lbarray_src); @@ -923,8 +926,8 @@ bool BKE_blendfile_write_partial(Main *bmain_src, ID *id, *nextid; ListBase *lb_dst = lbarray_dst[a], *lb_src = lbarray_src[a]; - for (id = lb_src->first; id; id = nextid) { - nextid = id->next; + for (id = static_cast(lb_src->first); id; id = nextid) { + nextid = static_cast(id->next); if (id->tag & LIB_TAG_DOIT) { BLI_remlink(lb_src, id); BLI_addtail(lb_dst, id); @@ -946,13 +949,9 @@ bool BKE_blendfile_write_partial(Main *bmain_src, } /* save the buffer */ - retval = BLO_write_file(bmain_dst, - filepath, - write_flags, - &(const struct BlendFileWriteParams){ - .remap_mode = remap_mode, - }, - reports); + BlendFileWriteParams blend_file_write_params{}; + blend_file_write_params.remap_mode = eBLO_WritePathRemap(remap_mode); + retval = BLO_write_file(bmain_dst, filepath, write_flags, &blend_file_write_params, reports); if (path_list_backup) { BKE_bpath_list_restore(bmain_dst, path_list_flag, path_list_backup); @@ -966,9 +965,9 @@ bool BKE_blendfile_write_partial(Main *bmain_src, ID *id; ListBase *lb_dst = lbarray_dst[a], *lb_src = lbarray_src[a]; - while ((id = BLI_pophead(lb_src))) { + while ((id = static_cast(BLI_pophead(lb_src)))) { BLI_addtail(lb_dst, id); - id_sort_by_name(lb_dst, id, NULL); + id_sort_by_name(lb_dst, id, nullptr); } } diff --git a/source/blender/blenkernel/intern/blendfile_link_append.c b/source/blender/blenkernel/intern/blendfile_link_append.c index 3f3c1028d10..49b480fcb64 100644 --- a/source/blender/blenkernel/intern/blendfile_link_append.c +++ b/source/blender/blenkernel/intern/blendfile_link_append.c @@ -574,7 +574,8 @@ static void loose_data_instantiate_obdata_preprocess( * (return false). */ static bool loose_data_instantiate_collection_parents_check_recursive(Collection *collection) { - for (CollectionParent *parent_collection = collection->parents.first; parent_collection != NULL; + for (CollectionParent *parent_collection = collection->runtime.parents.first; + parent_collection != NULL; parent_collection = parent_collection->next) { if ((parent_collection->collection->id.tag & LIB_TAG_DOIT) != 0) { return true; diff --git a/source/blender/blenkernel/intern/brush.cc b/source/blender/blenkernel/intern/brush.cc index 2725a420e40..f4542ee4862 100644 --- a/source/blender/blenkernel/intern/brush.cc +++ b/source/blender/blenkernel/intern/brush.cc @@ -83,7 +83,8 @@ static void brush_copy_data(Main * /*bmain*/, ID *id_dst, const ID *id_src, cons brush_dst->automasking_cavity_curve = BKE_curvemapping_copy(brush_src->automasking_cavity_curve); if (brush_src->gpencil_settings != nullptr) { - brush_dst->gpencil_settings = MEM_cnew(__func__, *(brush_src->gpencil_settings)); + brush_dst->gpencil_settings = MEM_cnew(__func__, + *(brush_src->gpencil_settings)); brush_dst->gpencil_settings->curve_sensitivity = BKE_curvemapping_copy( brush_src->gpencil_settings->curve_sensitivity); brush_dst->gpencil_settings->curve_strength = BKE_curvemapping_copy( @@ -105,7 +106,8 @@ static void brush_copy_data(Main * /*bmain*/, ID *id_dst, const ID *id_src, cons brush_src->gpencil_settings->curve_rand_value); } if (brush_src->curves_sculpt_settings != nullptr) { - brush_dst->curves_sculpt_settings = MEM_cnew(__func__, *(brush_src->curves_sculpt_settings)); + brush_dst->curves_sculpt_settings = MEM_cnew( + __func__, *(brush_src->curves_sculpt_settings)); } /* enable fake user by default */ @@ -173,7 +175,8 @@ static void brush_make_local(Main *bmain, ID *id, const int flags) BKE_lib_id_make_local_generic_action_define(bmain, id, flags, &force_local, &force_copy); if (brush->clone.image) { - /* Special case: ima always local immediately. Clone image should only have one user anyway. */ + /* Special case: `ima` always local immediately. + * Clone image should only have one user anyway. */ /* FIXME: Recursive calls affecting other non-embedded IDs are really bad and should be avoided * in IDType callbacks. Higher-level ID management code usually does not expect such things and * does not deal properly with it. */ @@ -536,33 +539,33 @@ static void brush_undo_preserve(BlendLibReader *reader, ID *id_new, ID *id_old) } IDTypeInfo IDType_ID_BR = { - /* id_code */ ID_BR, - /* id_filter */ FILTER_ID_BR, - /* main_listbase_index */ INDEX_ID_BR, - /* struct_size */ sizeof(Brush), - /* name */ "Brush", - /* name_plural */ "brushes", - /* translation_context */ BLT_I18NCONTEXT_ID_BRUSH, - /* flags */ IDTYPE_FLAGS_NO_ANIMDATA, - /* asset_type_info */ nullptr, + /*id_code*/ ID_BR, + /*id_filter*/ FILTER_ID_BR, + /*main_listbase_index*/ INDEX_ID_BR, + /*struct_size*/ sizeof(Brush), + /*name*/ "Brush", + /*name_plural*/ "brushes", + /*translation_context*/ BLT_I18NCONTEXT_ID_BRUSH, + /*flags*/ IDTYPE_FLAGS_NO_ANIMDATA, + /*asset_type_info*/ nullptr, - /* init_data */ brush_init_data, - /* copy_data */ brush_copy_data, - /* free_data */ brush_free_data, - /* make_local */ brush_make_local, - /* foreach_id */ brush_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ brush_foreach_path, - /* owner_pointer_get */ nullptr, + /*init_data*/ brush_init_data, + /*copy_data*/ brush_copy_data, + /*free_data*/ brush_free_data, + /*make_local*/ brush_make_local, + /*foreach_id*/ brush_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ brush_foreach_path, + /*owner_pointer_get*/ nullptr, - /* blend_write */ brush_blend_write, - /* blend_read_data */ brush_blend_read_data, - /* blend_read_lib */ brush_blend_read_lib, - /* blend_read_expand */ brush_blend_read_expand, + /*blend_write*/ brush_blend_write, + /*blend_read_data*/ brush_blend_read_data, + /*blend_read_lib*/ brush_blend_read_lib, + /*blend_read_expand*/ brush_blend_read_expand, - /* blend_read_undo_preserve */ brush_undo_preserve, + /*blend_read_undo_preserve*/ brush_undo_preserve, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; static RNG *brush_rng; diff --git a/source/blender/blenkernel/intern/cachefile.c b/source/blender/blenkernel/intern/cachefile.c index 5d19db323f8..5968a6b7296 100644 --- a/source/blender/blenkernel/intern/cachefile.c +++ b/source/blender/blenkernel/intern/cachefile.c @@ -366,7 +366,7 @@ void BKE_cachefile_eval(Main *bmain, Depsgraph *depsgraph, CacheFile *cache_file } #endif #ifdef WITH_USD - if (BLI_path_extension_check_glob(filepath, "*.usd;*.usda;*.usdc")) { + if (BLI_path_extension_check_glob(filepath, "*.usd;*.usda;*.usdc;*.usdz")) { cache_file->type = CACHEFILE_TYPE_USD; cache_file->handle = USD_create_handle(bmain, filepath, &cache_file->object_paths); BLI_strncpy(cache_file->handle_filepath, filepath, FILE_MAX); diff --git a/source/blender/blenkernel/intern/cdderivedmesh.c b/source/blender/blenkernel/intern/cdderivedmesh.cc similarity index 89% rename from source/blender/blenkernel/intern/cdderivedmesh.c rename to source/blender/blenkernel/intern/cdderivedmesh.cc index a5d179fb2cb..2a98dbe6fb9 100644 --- a/source/blender/blenkernel/intern/cdderivedmesh.c +++ b/source/blender/blenkernel/intern/cdderivedmesh.cc @@ -7,6 +7,10 @@ * BKE_cdderivedmesh.h contains the function prototypes for this file. */ +#include +#include +#include + #include "atomic_ops.h" #include "BLI_math.h" @@ -29,11 +33,7 @@ #include "MEM_guardedalloc.h" -#include -#include -#include - -typedef struct { +struct CDDerivedMesh { DerivedMesh dm; /* these point to data in the DerivedMesh custom data layers, @@ -52,7 +52,7 @@ typedef struct { /* Mesh connectivity */ MeshElemMap *pmap; int *pmap_mem; -} CDDerivedMesh; +}; /**************** DerivedMesh interface functions ****************/ static int cdDM_getNumVerts(DerivedMesh *dm) @@ -131,7 +131,7 @@ static void cdDM_recalc_looptri(DerivedMesh *dm) BLI_assert(cddm->dm.looptris.array == NULL); atomic_cas_ptr( (void **)&cddm->dm.looptris.array, cddm->dm.looptris.array, cddm->dm.looptris.array_wip); - cddm->dm.looptris.array_wip = NULL; + cddm->dm.looptris.array_wip = nullptr; } static void cdDM_free_internal(CDDerivedMesh *cddm) @@ -157,11 +157,8 @@ static void cdDM_release(DerivedMesh *dm) /**************** CDDM interface functions ****************/ static CDDerivedMesh *cdDM_create(const char *desc) { - CDDerivedMesh *cddm; - DerivedMesh *dm; - - cddm = MEM_callocN(sizeof(*cddm), desc); - dm = &cddm->dm; + CDDerivedMesh *cddm = MEM_cnew(desc); + DerivedMesh *dm = &cddm->dm; dm->getNumVerts = cdDM_getNumVerts; dm->getNumEdges = cdDM_getNumEdges; @@ -221,18 +218,21 @@ static DerivedMesh *cdDM_from_mesh_ex(Mesh *mesh, CustomData_merge(&mesh->ldata, &dm->loopData, cddata_masks.lmask, alloctype, mesh->totloop); CustomData_merge(&mesh->pdata, &dm->polyData, cddata_masks.pmask, alloctype, mesh->totpoly); - cddm->vert_positions = CustomData_get_layer_named_for_write( - &dm->vertData, CD_PROP_FLOAT3, "position", mesh->totvert); + cddm->vert_positions = static_cast(CustomData_get_layer_named_for_write( + &dm->vertData, CD_PROP_FLOAT3, "position", mesh->totvert)); /* Though this may be an unnecessary calculation, simply retrieving the layer may return nothing * or dirty normals. */ cddm->vert_normals = BKE_mesh_vertex_normals_ensure(mesh); - cddm->medge = CustomData_get_layer_for_write(&dm->edgeData, CD_MEDGE, mesh->totedge); - cddm->mloop = CustomData_get_layer_for_write(&dm->loopData, CD_MLOOP, mesh->totloop); - cddm->mpoly = CustomData_get_layer_for_write(&dm->polyData, CD_MPOLY, mesh->totpoly); + cddm->medge = static_cast( + CustomData_get_layer_for_write(&dm->edgeData, CD_MEDGE, mesh->totedge)); + cddm->mloop = static_cast( + CustomData_get_layer_for_write(&dm->loopData, CD_MLOOP, mesh->totloop)); + cddm->mpoly = static_cast( + CustomData_get_layer_for_write(&dm->polyData, CD_MPOLY, mesh->totpoly)); #if 0 cddm->mface = CustomData_get_layer(&dm->faceData, CD_MFACE); #else - cddm->mface = NULL; + cddm->mface = nullptr; #endif /* commented since even when CD_ORIGINDEX was first added this line fails diff --git a/source/blender/blenkernel/intern/collection.c b/source/blender/blenkernel/intern/collection.c index 862098768d0..b9c56085274 100644 --- a/source/blender/blenkernel/intern/collection.c +++ b/source/blender/blenkernel/intern/collection.c @@ -112,14 +112,13 @@ static void collection_copy_data(Main *bmain, ID *id_dst, const ID *id_src, cons collection_dst->preview = NULL; } - collection_dst->flag &= ~COLLECTION_HAS_OBJECT_CACHE; - collection_dst->flag &= ~COLLECTION_HAS_OBJECT_CACHE_INSTANCED; - BLI_listbase_clear(&collection_dst->object_cache); - BLI_listbase_clear(&collection_dst->object_cache_instanced); + collection_dst->flag &= ~(COLLECTION_HAS_OBJECT_CACHE | COLLECTION_HAS_OBJECT_CACHE_INSTANCED); + BLI_listbase_clear(&collection_dst->runtime.object_cache); + BLI_listbase_clear(&collection_dst->runtime.object_cache_instanced); BLI_listbase_clear(&collection_dst->gobject); BLI_listbase_clear(&collection_dst->children); - BLI_listbase_clear(&collection_dst->parents); + BLI_listbase_clear(&collection_dst->runtime.parents); LISTBASE_FOREACH (CollectionChild *, child, &collection_src->children) { collection_child_add(collection_dst, child->collection, flag, false); @@ -138,7 +137,7 @@ static void collection_free_data(ID *id) BLI_freelistN(&collection->gobject); BLI_freelistN(&collection->children); - BLI_freelistN(&collection->parents); + BLI_freelistN(&collection->runtime.parents); BKE_collection_object_cache_free(collection); } @@ -148,7 +147,7 @@ static void collection_foreach_id(ID *id, LibraryForeachIDData *data) Collection *collection = (Collection *)id; BKE_LIB_FOREACHID_PROCESS_ID( - data, collection->owner_id, IDWALK_CB_LOOPBACK | IDWALK_CB_NEVER_SELF); + data, collection->runtime.owner_id, IDWALK_CB_LOOPBACK | IDWALK_CB_NEVER_SELF); LISTBASE_FOREACH (CollectionObject *, cob, &collection->gobject) { BKE_LIB_FOREACHID_PROCESS_IDSUPER(data, cob->ob, IDWALK_CB_USER); @@ -157,7 +156,7 @@ static void collection_foreach_id(ID *id, LibraryForeachIDData *data) BKE_LIB_FOREACHID_PROCESS_IDSUPER( data, child->collection, IDWALK_CB_NEVER_SELF | IDWALK_CB_USER); } - LISTBASE_FOREACH (CollectionParent *, parent, &collection->parents) { + LISTBASE_FOREACH (CollectionParent *, parent, &collection->runtime.parents) { /* XXX This is very weak. The whole idea of keeping pointers to private IDs is very bad * anyway... */ const int cb_flag = ((parent->collection != NULL && @@ -178,11 +177,12 @@ static ID **collection_owner_pointer_get(ID *id) Collection *master_collection = (Collection *)id; BLI_assert((master_collection->flag & COLLECTION_IS_MASTER) != 0); - BLI_assert(master_collection->owner_id != NULL); - BLI_assert(GS(master_collection->owner_id->name) == ID_SCE); - BLI_assert(((Scene *)master_collection->owner_id)->master_collection == master_collection); + BLI_assert(master_collection->runtime.owner_id != NULL); + BLI_assert(GS(master_collection->runtime.owner_id->name) == ID_SCE); + BLI_assert(((Scene *)master_collection->runtime.owner_id)->master_collection == + master_collection); - return &master_collection->owner_id; + return &master_collection->runtime.owner_id; } void BKE_collection_blend_write_nolib(BlendWriter *writer, Collection *collection) @@ -205,13 +205,9 @@ static void collection_blend_write(BlendWriter *writer, ID *id, const void *id_a { Collection *collection = (Collection *)id; + memset(&collection->runtime, 0, sizeof(collection->runtime)); /* Clean up, important in undo case to reduce false detection of changed data-blocks. */ - collection->flag &= ~COLLECTION_HAS_OBJECT_CACHE; - collection->flag &= ~COLLECTION_HAS_OBJECT_CACHE_INSTANCED; - collection->tag = 0; - BLI_listbase_clear(&collection->object_cache); - BLI_listbase_clear(&collection->object_cache_instanced); - BLI_listbase_clear(&collection->parents); + collection->flag &= ~COLLECTION_FLAG_ALL_RUNTIME; /* write LibData */ BLO_write_id_struct(writer, Collection, id_address, &collection->id); @@ -258,7 +254,11 @@ void BKE_collection_blend_read_data(BlendDataReader *reader, Collection *collect } collection->id.flag |= LIB_EMBEDDED_DATA; } - collection->owner_id = owner_id; + + memset(&collection->runtime, 0, sizeof(collection->runtime)); + collection->flag &= ~COLLECTION_FLAG_ALL_RUNTIME; + + collection->runtime.owner_id = owner_id; BLO_read_list(reader, &collection->gobject); BLO_read_list(reader, &collection->children); @@ -266,13 +266,6 @@ void BKE_collection_blend_read_data(BlendDataReader *reader, Collection *collect BLO_read_data_address(reader, &collection->preview); BKE_previewimg_blend_read(reader, collection->preview); - collection->flag &= ~COLLECTION_HAS_OBJECT_CACHE; - collection->flag &= ~COLLECTION_HAS_OBJECT_CACHE_INSTANCED; - collection->tag = 0; - BLI_listbase_clear(&collection->object_cache); - BLI_listbase_clear(&collection->object_cache_instanced); - BLI_listbase_clear(&collection->parents); - #ifdef USE_COLLECTION_COMPAT_28 /* This runs before the very first doversion. */ BLO_read_data_address(reader, &collection->collection); @@ -543,7 +536,7 @@ bool BKE_collection_delete(Main *bmain, Collection *collection, bool hierarchy) else { /* Link child collections into parent collection. */ LISTBASE_FOREACH (CollectionChild *, child, &collection->children) { - LISTBASE_FOREACH (CollectionParent *, cparent, &collection->parents) { + LISTBASE_FOREACH (CollectionParent *, cparent, &collection->runtime.parents) { Collection *parent = cparent->collection; collection_child_add(parent, child->collection, 0, true); } @@ -552,7 +545,7 @@ bool BKE_collection_delete(Main *bmain, Collection *collection, bool hierarchy) CollectionObject *cob = collection->gobject.first; while (cob != NULL) { /* Link child object into parent collections. */ - LISTBASE_FOREACH (CollectionParent *, cparent, &collection->parents) { + LISTBASE_FOREACH (CollectionParent *, cparent, &collection->runtime.parents) { Collection *parent = cparent->collection; collection_object_add(bmain, parent, cob->ob, 0, true); } @@ -819,13 +812,13 @@ ListBase BKE_collection_object_cache_get(Collection *collection) BLI_mutex_lock(&cache_lock); if (!(collection->flag & COLLECTION_HAS_OBJECT_CACHE)) { - collection_object_cache_fill(&collection->object_cache, collection, 0, false); + collection_object_cache_fill(&collection->runtime.object_cache, collection, 0, false); collection->flag |= COLLECTION_HAS_OBJECT_CACHE; } BLI_mutex_unlock(&cache_lock); } - return collection->object_cache; + return collection->runtime.object_cache; } ListBase BKE_collection_object_cache_instanced_get(Collection *collection) @@ -835,24 +828,24 @@ ListBase BKE_collection_object_cache_instanced_get(Collection *collection) BLI_mutex_lock(&cache_lock); if (!(collection->flag & COLLECTION_HAS_OBJECT_CACHE_INSTANCED)) { - collection_object_cache_fill(&collection->object_cache_instanced, collection, 0, true); + collection_object_cache_fill( + &collection->runtime.object_cache_instanced, collection, 0, true); collection->flag |= COLLECTION_HAS_OBJECT_CACHE_INSTANCED; } BLI_mutex_unlock(&cache_lock); } - return collection->object_cache_instanced; + return collection->runtime.object_cache_instanced; } static void collection_object_cache_free(Collection *collection) { /* Clear own cache an for all parents, since those are affected by changes as well. */ - collection->flag &= ~COLLECTION_HAS_OBJECT_CACHE; - collection->flag &= ~COLLECTION_HAS_OBJECT_CACHE_INSTANCED; - BLI_freelistN(&collection->object_cache); - BLI_freelistN(&collection->object_cache_instanced); + collection->flag &= ~(COLLECTION_HAS_OBJECT_CACHE | COLLECTION_HAS_OBJECT_CACHE_INSTANCED); + BLI_freelistN(&collection->runtime.object_cache); + BLI_freelistN(&collection->runtime.object_cache_instanced); - LISTBASE_FOREACH (CollectionParent *, parent, &collection->parents) { + LISTBASE_FOREACH (CollectionParent *, parent, &collection->runtime.parents) { collection_object_cache_free(parent->collection); } } @@ -887,7 +880,7 @@ Collection *BKE_collection_master_add(Scene *scene) Collection *master_collection = BKE_libblock_alloc( NULL, ID_GR, BKE_SCENE_COLLECTION_NAME, LIB_ID_CREATE_NO_MAIN); master_collection->id.flag |= LIB_EMBEDDED_DATA; - master_collection->owner_id = &scene->id; + master_collection->runtime.owner_id = &scene->id; master_collection->flag |= COLLECTION_IS_MASTER; master_collection->color_tag = COLLECTION_COLOR_NONE; @@ -1027,7 +1020,7 @@ static void collection_tag_update_parent_recursive(Main *bmain, DEG_id_tag_update_ex(bmain, &collection->id, flag); - LISTBASE_FOREACH (CollectionParent *, collection_parent, &collection->parents) { + LISTBASE_FOREACH (CollectionParent *, collection_parent, &collection->runtime.parents) { if (collection_parent->collection->flag & COLLECTION_IS_MASTER) { /* We don't care about scene/master collection here. */ continue; @@ -1048,7 +1041,7 @@ static Collection *collection_parent_editable_find_recursive(const ViewLayer *vi return NULL; } - LISTBASE_FOREACH (CollectionParent *, collection_parent, &collection->parents) { + LISTBASE_FOREACH (CollectionParent *, collection_parent, &collection->runtime.parents) { if (!ID_IS_LINKED(collection_parent->collection) && !ID_IS_OVERRIDE_LIBRARY(collection_parent->collection)) { if (view_layer != NULL && @@ -1074,8 +1067,8 @@ static bool collection_object_add( { if (ob->instance_collection) { /* Cyclic dependency check. */ - if (collection_find_child_recursive(ob->instance_collection, collection) || - ob->instance_collection == collection) { + if ((ob->instance_collection == collection) || + collection_find_child_recursive(ob->instance_collection, collection)) { return false; } } @@ -1224,6 +1217,28 @@ bool BKE_collection_object_remove(Main *bmain, return true; } +bool BKE_collection_object_replace(Main *bmain, + Collection *collection, + Object *ob_old, + Object *ob_new) +{ + CollectionObject *cob = BLI_findptr( + &collection->gobject, ob_old, offsetof(CollectionObject, ob)); + if (cob == NULL) { + return false; + } + + id_us_min(&cob->ob->id); + cob->ob = ob_new; + id_us_plus(&cob->ob->id); + + if (BKE_collection_is_in_scene(collection)) { + BKE_main_collection_sync(bmain); + } + + return true; +} + /** * Remove object from all collections of scene * \param collection_skip: Don't remove base from this collection. @@ -1347,9 +1362,9 @@ static void collection_null_children_remove(Collection *collection) static void collection_missing_parents_remove(Collection *collection) { - LISTBASE_FOREACH_MUTABLE (CollectionParent *, parent, &collection->parents) { + LISTBASE_FOREACH_MUTABLE (CollectionParent *, parent, &collection->runtime.parents) { if ((parent->collection == NULL) || !collection_find_child(parent->collection, collection)) { - BLI_freelinkN(&collection->parents, parent); + BLI_freelinkN(&collection->runtime.parents, parent); } } } @@ -1383,11 +1398,11 @@ void BKE_collections_child_remove_nulls(Main *bmain, } } else { - LISTBASE_FOREACH_MUTABLE (CollectionParent *, parent, &child_collection->parents) { + LISTBASE_FOREACH_MUTABLE (CollectionParent *, parent, &child_collection->runtime.parents) { collection_null_children_remove(parent->collection); if (!collection_find_child(parent->collection, child_collection)) { - BLI_freelinkN(&child_collection->parents, parent); + BLI_freelinkN(&child_collection->runtime.parents, parent); } } } @@ -1423,7 +1438,7 @@ bool BKE_collection_is_in_scene(Collection *collection) return true; } - LISTBASE_FOREACH (CollectionParent *, cparent, &collection->parents) { + LISTBASE_FOREACH (CollectionParent *, cparent, &collection->runtime.parents) { if (BKE_collection_is_in_scene(cparent->collection)) { return true; } @@ -1477,7 +1492,7 @@ bool BKE_collection_cycle_find(Collection *new_ancestor, Collection *collection) collection = new_ancestor; } - LISTBASE_FOREACH (CollectionParent *, parent, &new_ancestor->parents) { + LISTBASE_FOREACH (CollectionParent *, parent, &new_ancestor->runtime.parents) { if (BKE_collection_cycle_find(parent->collection, collection)) { return true; } @@ -1517,7 +1532,7 @@ static bool collection_cycle_fix_recursive(Main *bmain, { bool cycles_found = false; - LISTBASE_FOREACH_MUTABLE (CollectionParent *, parent, &parent_collection->parents) { + LISTBASE_FOREACH_MUTABLE (CollectionParent *, parent, &parent_collection->runtime.parents) { if (BKE_collection_cycle_find(parent->collection, collection)) { BKE_collection_child_remove(bmain, parent->collection, parent_collection); cycles_found = true; @@ -1563,7 +1578,7 @@ bool BKE_collection_has_collection(const Collection *parent, const Collection *c static CollectionParent *collection_find_parent(Collection *child, Collection *collection) { - return BLI_findptr(&child->parents, collection, offsetof(CollectionParent, collection)); + return BLI_findptr(&child->runtime.parents, collection, offsetof(CollectionParent, collection)); } static bool collection_child_add(Collection *parent, @@ -1587,7 +1602,7 @@ static bool collection_child_add(Collection *parent, if ((flag & LIB_ID_CREATE_NO_MAIN) == 0) { CollectionParent *cparent = MEM_callocN(sizeof(CollectionParent), "CollectionParent"); cparent->collection = parent; - BLI_addtail(&collection->parents, cparent); + BLI_addtail(&collection->runtime.parents, cparent); } if (add_us) { @@ -1607,7 +1622,7 @@ static bool collection_child_remove(Collection *parent, Collection *collection) } CollectionParent *cparent = collection_find_parent(collection, parent); - BLI_freelinkN(&collection->parents, cparent); + BLI_freelinkN(&collection->runtime.parents, cparent); BLI_freelinkN(&parent->children, child); id_us_min(&collection->id); @@ -1667,19 +1682,19 @@ void BKE_collection_parent_relations_rebuild(Collection *collection) BLI_assert(collection_find_parent(child->collection, collection) == NULL); CollectionParent *cparent = MEM_callocN(sizeof(CollectionParent), __func__); cparent->collection = collection; - BLI_addtail(&child->collection->parents, cparent); + BLI_addtail(&child->collection->runtime.parents, cparent); } } static void collection_parents_rebuild_recursive(Collection *collection) { /* A same collection may be child of several others, no need to process it more than once. */ - if ((collection->tag & COLLECTION_TAG_RELATION_REBUILD) == 0) { + if ((collection->runtime.tag & COLLECTION_TAG_RELATION_REBUILD) == 0) { return; } BKE_collection_parent_relations_rebuild(collection); - collection->tag &= ~COLLECTION_TAG_RELATION_REBUILD; + collection->runtime.tag &= ~COLLECTION_TAG_RELATION_REBUILD; LISTBASE_FOREACH (CollectionChild *, child, &collection->children) { /* See comment above in `BKE_collection_parent_relations_rebuild`. */ @@ -1694,9 +1709,9 @@ void BKE_main_collections_parent_relations_rebuild(Main *bmain) { /* Only collections not in bmain (master ones in scenes) have no parent... */ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) { - BLI_freelistN(&collection->parents); + BLI_freelistN(&collection->runtime.parents); - collection->tag |= COLLECTION_TAG_RELATION_REBUILD; + collection->runtime.tag |= COLLECTION_TAG_RELATION_REBUILD; } /* Scene's master collections will be 'root' parent of most of our collections, so start with @@ -1705,8 +1720,8 @@ void BKE_main_collections_parent_relations_rebuild(Main *bmain) /* This function can be called from readfile.c, when this pointer is not guaranteed to be NULL. */ if (scene->master_collection != NULL) { - BLI_assert(BLI_listbase_is_empty(&scene->master_collection->parents)); - scene->master_collection->tag |= COLLECTION_TAG_RELATION_REBUILD; + BLI_assert(BLI_listbase_is_empty(&scene->master_collection->runtime.parents)); + scene->master_collection->runtime.tag |= COLLECTION_TAG_RELATION_REBUILD; collection_parents_rebuild_recursive(scene->master_collection); } } @@ -1714,7 +1729,7 @@ void BKE_main_collections_parent_relations_rebuild(Main *bmain) /* We may have parent chains outside of scene's master_collection context? At least, readfile's * lib_link_collection_data() seems to assume that, so do the same here. */ LISTBASE_FOREACH (Collection *, collection, &bmain->collections) { - if (collection->tag & COLLECTION_TAG_RELATION_REBUILD) { + if (collection->runtime.tag & COLLECTION_TAG_RELATION_REBUILD) { /* NOTE: we do not have easy access to 'which collections is root' info in that case, which * means test for cycles in collection relationships may fail here. I don't think that is an * issue in practice here, but worth keeping in mind... */ diff --git a/source/blender/blenkernel/intern/constraint.c b/source/blender/blenkernel/intern/constraint.c index 4ebf7d184b3..fa9a18859b8 100644 --- a/source/blender/blenkernel/intern/constraint.c +++ b/source/blender/blenkernel/intern/constraint.c @@ -124,7 +124,7 @@ bConstraintOb *BKE_constraints_make_evalob( /* create regardless of whether we have any data! */ cob = MEM_callocN(sizeof(bConstraintOb), "bConstraintOb"); - /* for system time, part of deglobalization, code nicer later with local time (ton) */ + /* NOTE(@ton): For system time, part of de-globalization, code nicer later with local time. */ cob->scene = scene; cob->depsgraph = depsgraph; @@ -236,7 +236,7 @@ void BKE_constraints_clear_evalob(bConstraintOb *cob) } } - /* free tempolary struct */ + /* Free temporary struct. */ MEM_freeN(cob); } @@ -787,18 +787,18 @@ static void constraint_target_to_mat4(Object *ob, */ #if 0 static bConstraintTypeInfo CTI_CONSTRNAME = { - CONSTRAINT_TYPE_CONSTRNAME, /* type */ - sizeof(bConstrNameConstraint), /* size */ - "ConstrName", /* name */ - "bConstrNameConstraint", /* struct name */ - constrname_free, /* free data */ - constrname_id_looper, /* id looper */ - constrname_copy, /* copy data */ - constrname_new_data, /* new data */ - constrname_get_tars, /* get constraint targets */ - constrname_flush_tars, /* flush constraint targets */ - constrname_get_tarmat, /* get target matrix */ - constrname_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_CONSTRNAME, + /*size*/ sizeof(bConstrNameConstraint), + /*name*/ "ConstrName", + /*structName*/ "bConstrNameConstraint", + /*free_data*/ constrname_free, + /*id_looper*/ constrname_id_looper, + /*copy_data*/ constrname_copy, + /*new_data*/ constrname_new_data, + /*get_constraint_targets*/ constrname_get_tars, + /*flush_constraint_targets*/ constrname_flush_tars, + /*get_target_matrix*/ constrname_get_tarmat, + /*evaluate_constraint*/ constrname_evaluate, }; #endif @@ -1109,18 +1109,18 @@ static void childof_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *tar /* XXX NOTE: con->flag should be CONSTRAINT_SPACEONCE for bone-childof, patched in `readfile.c`. */ static bConstraintTypeInfo CTI_CHILDOF = { - CONSTRAINT_TYPE_CHILDOF, /* type */ - sizeof(bChildOfConstraint), /* size */ - N_("Child Of"), /* name */ - "bChildOfConstraint", /* struct name */ - NULL, /* free data */ - childof_id_looper, /* id looper */ - NULL, /* copy data */ - childof_new_data, /* new data */ - childof_get_tars, /* get constraint targets */ - childof_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get a target matrix */ - childof_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_CHILDOF, + /*size*/ sizeof(bChildOfConstraint), + /*name*/ N_("Child Of"), + /*structName*/ "bChildOfConstraint", + /*free_data*/ NULL, + /*id_looper*/ childof_id_looper, + /*copy_data*/ NULL, + /*new_data*/ childof_new_data, + /*get_constraint_targets*/ childof_get_tars, + /*flush_constraint_targets*/ childof_flush_tars, + /* get a target matrix */ default_get_tarmat, + /*evaluate_constraint*/ childof_evaluate, }; /* -------- TrackTo Constraint ------- */ @@ -1293,18 +1293,18 @@ static void trackto_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *tar } static bConstraintTypeInfo CTI_TRACKTO = { - CONSTRAINT_TYPE_TRACKTO, /* type */ - sizeof(bTrackToConstraint), /* size */ - N_("Track To"), /* name */ - "bTrackToConstraint", /* struct name */ - NULL, /* free data */ - trackto_id_looper, /* id looper */ - NULL, /* copy data */ - trackto_new_data, /* new data */ - trackto_get_tars, /* get constraint targets */ - trackto_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - trackto_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_TRACKTO, + /*size*/ sizeof(bTrackToConstraint), + /*name*/ N_("Track To"), + /*structName*/ "bTrackToConstraint", + /*free_data*/ NULL, + /*id_looper*/ trackto_id_looper, + /*copy_data*/ NULL, + /*new_data*/ trackto_new_data, + /*get_constraint_targets*/ trackto_get_tars, + /*flush_constraint_targets*/ trackto_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ trackto_evaluate, }; /* --------- Inverse-Kinematics --------- */ @@ -1399,18 +1399,18 @@ static void kinematic_get_tarmat(struct Depsgraph *UNUSED(depsgraph), } static bConstraintTypeInfo CTI_KINEMATIC = { - CONSTRAINT_TYPE_KINEMATIC, /* type */ - sizeof(bKinematicConstraint), /* size */ - N_("IK"), /* name */ - "bKinematicConstraint", /* struct name */ - NULL, /* free data */ - kinematic_id_looper, /* id looper */ - NULL, /* copy data */ - kinematic_new_data, /* new data */ - kinematic_get_tars, /* get constraint targets */ - kinematic_flush_tars, /* flush constraint targets */ - kinematic_get_tarmat, /* get target matrix */ - NULL, /* evaluate - solved as separate loop */ + /*type*/ CONSTRAINT_TYPE_KINEMATIC, + /*size*/ sizeof(bKinematicConstraint), + /*name*/ N_("IK"), + /*structName*/ "bKinematicConstraint", + /*free_data*/ NULL, + /*id_looper*/ kinematic_id_looper, + /*copy_data*/ NULL, + /*new_data*/ kinematic_new_data, + /*get_constraint_targets*/ kinematic_get_tars, + /*flush_constraint_targets*/ kinematic_flush_tars, + /*get_target_matrix*/ kinematic_get_tarmat, + /* evaluate - solved as separate loop */ NULL, }; /* -------- Follow-Path Constraint ---------- */ @@ -1577,18 +1577,18 @@ static void followpath_evaluate(bConstraint *con, bConstraintOb *cob, ListBase * } static bConstraintTypeInfo CTI_FOLLOWPATH = { - CONSTRAINT_TYPE_FOLLOWPATH, /* type */ - sizeof(bFollowPathConstraint), /* size */ - N_("Follow Path"), /* name */ - "bFollowPathConstraint", /* struct name */ - NULL, /* free data */ - followpath_id_looper, /* id looper */ - NULL, /* copy data */ - followpath_new_data, /* new data */ - followpath_get_tars, /* get constraint targets */ - followpath_flush_tars, /* flush constraint targets */ - followpath_get_tarmat, /* get target matrix */ - followpath_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_FOLLOWPATH, + /*size*/ sizeof(bFollowPathConstraint), + /*name*/ N_("Follow Path"), + /*structName*/ "bFollowPathConstraint", + /*free_data*/ NULL, + /*id_looper*/ followpath_id_looper, + /*copy_data*/ NULL, + /*new_data*/ followpath_new_data, + /*get_constraint_targets*/ followpath_get_tars, + /*flush_constraint_targets*/ followpath_flush_tars, + /*get_target_matrix*/ followpath_get_tarmat, + /*evaluate_constraint*/ followpath_evaluate, }; /* --------- Limit Location --------- */ @@ -1630,18 +1630,18 @@ static void loclimit_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *UN } static bConstraintTypeInfo CTI_LOCLIMIT = { - CONSTRAINT_TYPE_LOCLIMIT, /* type */ - sizeof(bLocLimitConstraint), /* size */ - N_("Limit Location"), /* name */ - "bLocLimitConstraint", /* struct name */ - NULL, /* free data */ - NULL, /* id looper */ - NULL, /* copy data */ - NULL, /* new data */ - NULL, /* get constraint targets */ - NULL, /* flush constraint targets */ - NULL, /* get target matrix */ - loclimit_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_LOCLIMIT, + /*size*/ sizeof(bLocLimitConstraint), + /*name*/ N_("Limit Location"), + /*structName*/ "bLocLimitConstraint", + /*free_data*/ NULL, + /*id_looper*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ NULL, + /*get_constraint_targets*/ NULL, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ NULL, + /*evaluate_constraint*/ loclimit_evaluate, }; /* -------- Limit Rotation --------- */ @@ -1711,18 +1711,18 @@ static void rotlimit_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *UN } static bConstraintTypeInfo CTI_ROTLIMIT = { - CONSTRAINT_TYPE_ROTLIMIT, /* type */ - sizeof(bRotLimitConstraint), /* size */ - N_("Limit Rotation"), /* name */ - "bRotLimitConstraint", /* struct name */ - NULL, /* free data */ - NULL, /* id looper */ - NULL, /* copy data */ - NULL, /* new data */ - NULL, /* get constraint targets */ - NULL, /* flush constraint targets */ - NULL, /* get target matrix */ - rotlimit_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_ROTLIMIT, + /*size*/ sizeof(bRotLimitConstraint), + /*name*/ N_("Limit Rotation"), + /*structName*/ "bRotLimitConstraint", + /*free_data*/ NULL, + /*id_looper*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ NULL, + /*get_constraint_targets*/ NULL, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ NULL, + /*evaluate_constraint*/ rotlimit_evaluate, }; /* --------- Limit Scale --------- */ @@ -1778,18 +1778,18 @@ static void sizelimit_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *U } static bConstraintTypeInfo CTI_SIZELIMIT = { - CONSTRAINT_TYPE_SIZELIMIT, /* type */ - sizeof(bSizeLimitConstraint), /* size */ - N_("Limit Scale"), /* name */ - "bSizeLimitConstraint", /* struct name */ - NULL, /* free data */ - NULL, /* id looper */ - NULL, /* copy data */ - NULL, /* new data */ - NULL, /* get constraint targets */ - NULL, /* flush constraint targets */ - NULL, /* get target matrix */ - sizelimit_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_SIZELIMIT, + /*size*/ sizeof(bSizeLimitConstraint), + /*name*/ N_("Limit Scale"), + /*structName*/ "bSizeLimitConstraint", + /*free_data*/ NULL, + /*id_looper*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ NULL, + /*get_constraint_targets*/ NULL, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ NULL, + /*evaluate_constraint*/ sizelimit_evaluate, }; /* ----------- Copy Location ------------- */ @@ -1875,18 +1875,18 @@ static void loclike_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *tar } static bConstraintTypeInfo CTI_LOCLIKE = { - CONSTRAINT_TYPE_LOCLIKE, /* type */ - sizeof(bLocateLikeConstraint), /* size */ - N_("Copy Location"), /* name */ - "bLocateLikeConstraint", /* struct name */ - NULL, /* free data */ - loclike_id_looper, /* id looper */ - NULL, /* copy data */ - loclike_new_data, /* new data */ - loclike_get_tars, /* get constraint targets */ - loclike_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - loclike_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_LOCLIKE, + /*size*/ sizeof(bLocateLikeConstraint), + /*name*/ N_("Copy Location"), + /*structName*/ "bLocateLikeConstraint", + /*free_data*/ NULL, + /*id_looper*/ loclike_id_looper, + /*copy_data*/ NULL, + /*new_data*/ loclike_new_data, + /*get_constraint_targets*/ loclike_get_tars, + /*flush_constraint_targets*/ loclike_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ loclike_evaluate, }; /* ----------- Copy Rotation ------------- */ @@ -2052,18 +2052,18 @@ static void rotlike_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *tar } static bConstraintTypeInfo CTI_ROTLIKE = { - CONSTRAINT_TYPE_ROTLIKE, /* type */ - sizeof(bRotateLikeConstraint), /* size */ - N_("Copy Rotation"), /* name */ - "bRotateLikeConstraint", /* struct name */ - NULL, /* free data */ - rotlike_id_looper, /* id looper */ - NULL, /* copy data */ - rotlike_new_data, /* new data */ - rotlike_get_tars, /* get constraint targets */ - rotlike_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - rotlike_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_ROTLIKE, + /*size*/ sizeof(bRotateLikeConstraint), + /*name*/ N_("Copy Rotation"), + /*structName*/ "bRotateLikeConstraint", + /*free_data*/ NULL, + /*id_looper*/ rotlike_id_looper, + /*copy_data*/ NULL, + /*new_data*/ rotlike_new_data, + /*get_constraint_targets*/ rotlike_get_tars, + /*flush_constraint_targets*/ rotlike_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ rotlike_evaluate, }; /* ---------- Copy Scale ---------- */ @@ -2182,18 +2182,18 @@ static void sizelike_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *ta } static bConstraintTypeInfo CTI_SIZELIKE = { - CONSTRAINT_TYPE_SIZELIKE, /* type */ - sizeof(bSizeLikeConstraint), /* size */ - N_("Copy Scale"), /* name */ - "bSizeLikeConstraint", /* struct name */ - NULL, /* free data */ - sizelike_id_looper, /* id looper */ - NULL, /* copy data */ - sizelike_new_data, /* new data */ - sizelike_get_tars, /* get constraint targets */ - sizelike_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - sizelike_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_SIZELIKE, + /*size*/ sizeof(bSizeLikeConstraint), + /*name*/ N_("Copy Scale"), + /*structName*/ "bSizeLikeConstraint", + /*free_data*/ NULL, + /*id_looper*/ sizelike_id_looper, + /*copy_data*/ NULL, + /*new_data*/ sizelike_new_data, + /*get_constraint_targets*/ sizelike_get_tars, + /*flush_constraint_targets*/ sizelike_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ sizelike_evaluate, }; /* ----------- Copy Transforms ------------- */ @@ -2288,18 +2288,18 @@ static void translike_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *t } static bConstraintTypeInfo CTI_TRANSLIKE = { - CONSTRAINT_TYPE_TRANSLIKE, /* type */ - sizeof(bTransLikeConstraint), /* size */ - N_("Copy Transforms"), /* name */ - "bTransLikeConstraint", /* struct name */ - NULL, /* free data */ - translike_id_looper, /* id looper */ - NULL, /* copy data */ - NULL, /* new data */ - translike_get_tars, /* get constraint targets */ - translike_flush_tars, /* flush constraint targets */ - default_get_tarmat_full_bbone, /* get target matrix */ - translike_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_TRANSLIKE, + /*size*/ sizeof(bTransLikeConstraint), + /*name*/ N_("Copy Transforms"), + /*structName*/ "bTransLikeConstraint", + /*free_data*/ NULL, + /*id_looper*/ translike_id_looper, + /*copy_data*/ NULL, + /*new_data*/ NULL, + /*get_constraint_targets*/ translike_get_tars, + /*flush_constraint_targets*/ translike_flush_tars, + /*get_target_matrix*/ default_get_tarmat_full_bbone, + /*evaluate_constraint*/ translike_evaluate, }; /* ---------- Maintain Volume ---------- */ @@ -2357,18 +2357,18 @@ static void samevolume_evaluate(bConstraint *con, bConstraintOb *cob, ListBase * } static bConstraintTypeInfo CTI_SAMEVOL = { - CONSTRAINT_TYPE_SAMEVOL, /* type */ - sizeof(bSameVolumeConstraint), /* size */ - N_("Maintain Volume"), /* name */ - "bSameVolumeConstraint", /* struct name */ - NULL, /* free data */ - NULL, /* id looper */ - NULL, /* copy data */ - samevolume_new_data, /* new data */ - NULL, /* get constraint targets */ - NULL, /* flush constraint targets */ - NULL, /* get target matrix */ - samevolume_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_SAMEVOL, + /*size*/ sizeof(bSameVolumeConstraint), + /*name*/ N_("Maintain Volume"), + /*structName*/ "bSameVolumeConstraint", + /*free_data*/ NULL, + /*id_looper*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ samevolume_new_data, + /*get_constraint_targets*/ NULL, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ NULL, + /*evaluate_constraint*/ samevolume_evaluate, }; /* ----------- Python Constraint -------------- */ @@ -2489,18 +2489,18 @@ static void pycon_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *targe } static bConstraintTypeInfo CTI_PYTHON = { - CONSTRAINT_TYPE_PYTHON, /* type */ - sizeof(bPythonConstraint), /* size */ - N_("Script"), /* name */ - "bPythonConstraint", /* struct name */ - pycon_free, /* free data */ - pycon_id_looper, /* id looper */ - pycon_copy, /* copy data */ - pycon_new_data, /* new data */ - pycon_get_tars, /* get constraint targets */ - NULL, /* flush constraint targets */ - pycon_get_tarmat, /* get target matrix */ - pycon_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_PYTHON, + /*size*/ sizeof(bPythonConstraint), + /*name*/ N_("Script"), + /*structName*/ "bPythonConstraint", + /*free_data*/ pycon_free, + /*id_looper*/ pycon_id_looper, + /*copy_data*/ pycon_copy, + /*new_data*/ pycon_new_data, + /*get_constraint_targets*/ pycon_get_tars, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ pycon_get_tarmat, + /*evaluate_constraint*/ pycon_evaluate, }; /* ----------- Armature Constraint -------------- */ @@ -2741,18 +2741,18 @@ static void armdef_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *targ } static bConstraintTypeInfo CTI_ARMATURE = { - CONSTRAINT_TYPE_ARMATURE, /* type */ - sizeof(bArmatureConstraint), /* size */ - N_("Armature"), /* name */ - "bArmatureConstraint", /* struct name */ - armdef_free, /* free data */ - armdef_id_looper, /* id looper */ - armdef_copy, /* copy data */ - NULL, /* new data */ - armdef_get_tars, /* get constraint targets */ - NULL, /* flush constraint targets */ - armdef_get_tarmat, /* get target matrix */ - armdef_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_ARMATURE, + /*size*/ sizeof(bArmatureConstraint), + /*name*/ N_("Armature"), + /*structName*/ "bArmatureConstraint", + /*free_data*/ armdef_free, + /*id_looper*/ armdef_id_looper, + /*copy_data*/ armdef_copy, + /*new_data*/ NULL, + /*get_constraint_targets*/ armdef_get_tars, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ armdef_get_tarmat, + /*evaluate_constraint*/ armdef_evaluate, }; /* -------- Action Constraint ----------- */ @@ -2957,18 +2957,18 @@ static void actcon_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *targ } static bConstraintTypeInfo CTI_ACTION = { - CONSTRAINT_TYPE_ACTION, /* type */ - sizeof(bActionConstraint), /* size */ - N_("Action"), /* name */ - "bActionConstraint", /* struct name */ - NULL, /* free data */ - actcon_id_looper, /* id looper */ - NULL, /* copy data */ - actcon_new_data, /* new data */ - actcon_get_tars, /* get constraint targets */ - actcon_flush_tars, /* flush constraint targets */ - actcon_get_tarmat, /* get target matrix */ - actcon_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_ACTION, + /*size*/ sizeof(bActionConstraint), + /*name*/ N_("Action"), + /*structName*/ "bActionConstraint", + /*free_data*/ NULL, + /*id_looper*/ actcon_id_looper, + /*copy_data*/ NULL, + /*new_data*/ actcon_new_data, + /*get_constraint_targets*/ actcon_get_tars, + /*flush_constraint_targets*/ actcon_flush_tars, + /*get_target_matrix*/ actcon_get_tarmat, + /*evaluate_constraint*/ actcon_evaluate, }; /* --------- Locked Track ---------- */ @@ -3273,18 +3273,18 @@ static void locktrack_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *t } static bConstraintTypeInfo CTI_LOCKTRACK = { - CONSTRAINT_TYPE_LOCKTRACK, /* type */ - sizeof(bLockTrackConstraint), /* size */ - N_("Locked Track"), /* name */ - "bLockTrackConstraint", /* struct name */ - NULL, /* free data */ - locktrack_id_looper, /* id looper */ - NULL, /* copy data */ - locktrack_new_data, /* new data */ - locktrack_get_tars, /* get constraint targets */ - locktrack_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - locktrack_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_LOCKTRACK, + /*size*/ sizeof(bLockTrackConstraint), + /*name*/ N_("Locked Track"), + /*structName*/ "bLockTrackConstraint", + /*free_data*/ NULL, + /*id_looper*/ locktrack_id_looper, + /*copy_data*/ NULL, + /*new_data*/ locktrack_new_data, + /*get_constraint_targets*/ locktrack_get_tars, + /*flush_constraint_targets*/ locktrack_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ locktrack_evaluate, }; /* ---------- Limit Distance Constraint ----------- */ @@ -3416,18 +3416,18 @@ static void distlimit_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *t } static bConstraintTypeInfo CTI_DISTLIMIT = { - CONSTRAINT_TYPE_DISTLIMIT, /* type */ - sizeof(bDistLimitConstraint), /* size */ - N_("Limit Distance"), /* name */ - "bDistLimitConstraint", /* struct name */ - NULL, /* free data */ - distlimit_id_looper, /* id looper */ - NULL, /* copy data */ - distlimit_new_data, /* new data */ - distlimit_get_tars, /* get constraint targets */ - distlimit_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get a target matrix */ - distlimit_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_DISTLIMIT, + /*size*/ sizeof(bDistLimitConstraint), + /*name*/ N_("Limit Distance"), + /*structName*/ "bDistLimitConstraint", + /*free_data*/ NULL, + /*id_looper*/ distlimit_id_looper, + /*copy_data*/ NULL, + /*new_data*/ distlimit_new_data, + /*get_constraint_targets*/ distlimit_get_tars, + /*flush_constraint_targets*/ distlimit_flush_tars, + /* get a target matrix */ default_get_tarmat, + /*evaluate_constraint*/ distlimit_evaluate, }; /* ---------- Stretch To ------------ */ @@ -3624,18 +3624,18 @@ static void stretchto_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *t } static bConstraintTypeInfo CTI_STRETCHTO = { - CONSTRAINT_TYPE_STRETCHTO, /* type */ - sizeof(bStretchToConstraint), /* size */ - N_("Stretch To"), /* name */ - "bStretchToConstraint", /* struct name */ - NULL, /* free data */ - stretchto_id_looper, /* id looper */ - NULL, /* copy data */ - stretchto_new_data, /* new data */ - stretchto_get_tars, /* get constraint targets */ - stretchto_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - stretchto_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_STRETCHTO, + /*size*/ sizeof(bStretchToConstraint), + /*name*/ N_("Stretch To"), + /*structName*/ "bStretchToConstraint", + /*free_data*/ NULL, + /*id_looper*/ stretchto_id_looper, + /*copy_data*/ NULL, + /*new_data*/ stretchto_new_data, + /*get_constraint_targets*/ stretchto_get_tars, + /*flush_constraint_targets*/ stretchto_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ stretchto_evaluate, }; /* ---------- Floor ------------ */ @@ -3755,18 +3755,18 @@ static void minmax_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *targ } static bConstraintTypeInfo CTI_MINMAX = { - CONSTRAINT_TYPE_MINMAX, /* type */ - sizeof(bMinMaxConstraint), /* size */ - N_("Floor"), /* name */ - "bMinMaxConstraint", /* struct name */ - NULL, /* free data */ - minmax_id_looper, /* id looper */ - NULL, /* copy data */ - minmax_new_data, /* new data */ - minmax_get_tars, /* get constraint targets */ - minmax_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - minmax_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_MINMAX, + /*size*/ sizeof(bMinMaxConstraint), + /*name*/ N_("Floor"), + /*structName*/ "bMinMaxConstraint", + /*free_data*/ NULL, + /*id_looper*/ minmax_id_looper, + /*copy_data*/ NULL, + /*new_data*/ minmax_new_data, + /*get_constraint_targets*/ minmax_get_tars, + /*flush_constraint_targets*/ minmax_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ minmax_evaluate, }; /* -------- Clamp To ---------- */ @@ -3842,7 +3842,7 @@ static void clampto_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *tar * be used that doesn't lazy initialize to avoid thread safety issues in the future. */ BKE_object_minmax(ct->tar, curveMin, curveMax, true); - /* get targetmatrix */ + /* Get target-matrix. */ if (data->tar->runtime.curve_cache && data->tar->runtime.curve_cache->anim_path_accum_length) { float vec[4], totmat[4][4]; float curvetime; @@ -3941,18 +3941,18 @@ static void clampto_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *tar } static bConstraintTypeInfo CTI_CLAMPTO = { - CONSTRAINT_TYPE_CLAMPTO, /* type */ - sizeof(bClampToConstraint), /* size */ - N_("Clamp To"), /* name */ - "bClampToConstraint", /* struct name */ - NULL, /* free data */ - clampto_id_looper, /* id looper */ - NULL, /* copy data */ - NULL, /* new data */ - clampto_get_tars, /* get constraint targets */ - clampto_flush_tars, /* flush constraint targets */ - clampto_get_tarmat, /* get target matrix */ - clampto_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_CLAMPTO, + /*size*/ sizeof(bClampToConstraint), + /*name*/ N_("Clamp To"), + /*structName*/ "bClampToConstraint", + /*free_data*/ NULL, + /*id_looper*/ clampto_id_looper, + /*copy_data*/ NULL, + /*new_data*/ NULL, + /*get_constraint_targets*/ clampto_get_tars, + /*flush_constraint_targets*/ clampto_flush_tars, + /*get_target_matrix*/ clampto_get_tarmat, + /*evaluate_constraint*/ clampto_evaluate, }; /* ---------- Transform Constraint ----------- */ @@ -4150,18 +4150,18 @@ static void transform_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *t } static bConstraintTypeInfo CTI_TRANSFORM = { - CONSTRAINT_TYPE_TRANSFORM, /* type */ - sizeof(bTransformConstraint), /* size */ - N_("Transformation"), /* name */ - "bTransformConstraint", /* struct name */ - NULL, /* free data */ - transform_id_looper, /* id looper */ - NULL, /* copy data */ - transform_new_data, /* new data */ - transform_get_tars, /* get constraint targets */ - transform_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get a target matrix */ - transform_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_TRANSFORM, + /*size*/ sizeof(bTransformConstraint), + /*name*/ N_("Transformation"), + /*structName*/ "bTransformConstraint", + /*free_data*/ NULL, + /*id_looper*/ transform_id_looper, + /*copy_data*/ NULL, + /*new_data*/ transform_new_data, + /*get_constraint_targets*/ transform_get_tars, + /*flush_constraint_targets*/ transform_flush_tars, + /* get a target matrix */ default_get_tarmat, + /*evaluate_constraint*/ transform_evaluate, }; /* ---------- Shrinkwrap Constraint ----------- */ @@ -4381,18 +4381,18 @@ static void shrinkwrap_evaluate(bConstraint *UNUSED(con), bConstraintOb *cob, Li } static bConstraintTypeInfo CTI_SHRINKWRAP = { - CONSTRAINT_TYPE_SHRINKWRAP, /* type */ - sizeof(bShrinkwrapConstraint), /* size */ - N_("Shrinkwrap"), /* name */ - "bShrinkwrapConstraint", /* struct name */ - NULL, /* free data */ - shrinkwrap_id_looper, /* id looper */ - NULL, /* copy data */ - shrinkwrap_new_data, /* new data */ - shrinkwrap_get_tars, /* get constraint targets */ - shrinkwrap_flush_tars, /* flush constraint targets */ - shrinkwrap_get_tarmat, /* get a target matrix */ - shrinkwrap_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_SHRINKWRAP, + /*size*/ sizeof(bShrinkwrapConstraint), + /*name*/ N_("Shrinkwrap"), + /*structName*/ "bShrinkwrapConstraint", + /*free_data*/ NULL, + /*id_looper*/ shrinkwrap_id_looper, + /*copy_data*/ NULL, + /*new_data*/ shrinkwrap_new_data, + /*get_constraint_targets*/ shrinkwrap_get_tars, + /*flush_constraint_targets*/ shrinkwrap_flush_tars, + /* get a target matrix */ shrinkwrap_get_tarmat, + /*evaluate_constraint*/ shrinkwrap_evaluate, }; /* --------- Damped Track ---------- */ @@ -4528,7 +4528,7 @@ static void damptrack_do_transform(float matrix[4][4], const float tarvec_in[3], } } else if (norm < 0.1f) { - /* near 0 and Pi arcsin has way better precision than arccos */ + /* Near 0 and Pi `arcsin` has way better precision than `arccos`. */ rangle = (rangle > M_PI_2) ? M_PI - asinf(norm) : asinf(norm); } @@ -4546,18 +4546,18 @@ static void damptrack_do_transform(float matrix[4][4], const float tarvec_in[3], } static bConstraintTypeInfo CTI_DAMPTRACK = { - CONSTRAINT_TYPE_DAMPTRACK, /* type */ - sizeof(bDampTrackConstraint), /* size */ - N_("Damped Track"), /* name */ - "bDampTrackConstraint", /* struct name */ - NULL, /* free data */ - damptrack_id_looper, /* id looper */ - NULL, /* copy data */ - damptrack_new_data, /* new data */ - damptrack_get_tars, /* get constraint targets */ - damptrack_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - damptrack_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_DAMPTRACK, + /*size*/ sizeof(bDampTrackConstraint), + /*name*/ N_("Damped Track"), + /*structName*/ "bDampTrackConstraint", + /*free_data*/ NULL, + /*id_looper*/ damptrack_id_looper, + /*copy_data*/ NULL, + /*new_data*/ damptrack_new_data, + /*get_constraint_targets*/ damptrack_get_tars, + /*flush_constraint_targets*/ damptrack_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ damptrack_evaluate, }; /* ----------- Spline IK ------------ */ @@ -4641,18 +4641,18 @@ static void splineik_get_tarmat(struct Depsgraph *UNUSED(depsgraph), } static bConstraintTypeInfo CTI_SPLINEIK = { - CONSTRAINT_TYPE_SPLINEIK, /* type */ - sizeof(bSplineIKConstraint), /* size */ - N_("Spline IK"), /* name */ - "bSplineIKConstraint", /* struct name */ - splineik_free, /* free data */ - splineik_id_looper, /* id looper */ - splineik_copy, /* copy data */ - splineik_new_data, /* new data */ - splineik_get_tars, /* get constraint targets */ - splineik_flush_tars, /* flush constraint targets */ - splineik_get_tarmat, /* get target matrix */ - NULL, /* evaluate - solved as separate loop */ + /*type*/ CONSTRAINT_TYPE_SPLINEIK, + /*size*/ sizeof(bSplineIKConstraint), + /*name*/ N_("Spline IK"), + /*structName*/ "bSplineIKConstraint", + /*free_data*/ splineik_free, + /*id_looper*/ splineik_id_looper, + /*copy_data*/ splineik_copy, + /*new_data*/ splineik_new_data, + /*get_constraint_targets*/ splineik_get_tars, + /*flush_constraint_targets*/ splineik_flush_tars, + /*get_target_matrix*/ splineik_get_tarmat, + /* evaluate - solved as separate loop */ NULL, }; /* ----------- Pivot ------------- */ @@ -4765,19 +4765,19 @@ static void pivotcon_evaluate(bConstraint *con, bConstraintOb *cob, ListBase *ta } static bConstraintTypeInfo CTI_PIVOT = { - CONSTRAINT_TYPE_PIVOT, /* type */ - sizeof(bPivotConstraint), /* size */ - N_("Pivot"), /* name */ - "bPivotConstraint", /* struct name */ - NULL, /* free data */ - pivotcon_id_looper, /* id looper */ - NULL, /* copy data */ - NULL, - /* new data */ /* XXX: might be needed to get 'normal' pivot behavior... */ - pivotcon_get_tars, /* get constraint targets */ - pivotcon_flush_tars, /* flush constraint targets */ - default_get_tarmat, /* get target matrix */ - pivotcon_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_PIVOT, + /*size*/ sizeof(bPivotConstraint), + /*name*/ N_("Pivot"), + /*structName*/ "bPivotConstraint", + /*free_data*/ NULL, + /*id_looper*/ pivotcon_id_looper, + /*copy_data*/ NULL, + /*new_data*/ NULL, + /* XXX: might be needed to get 'normal' pivot behavior. */ + /*get_constraint_targets*/ pivotcon_get_tars, + /*flush_constraint_targets*/ pivotcon_flush_tars, + /*get_target_matrix*/ default_get_tarmat, + /*evaluate_constraint*/ pivotcon_evaluate, }; /* ----------- Follow Track ------------- */ @@ -5185,18 +5185,18 @@ static void followtrack_evaluate(bConstraint *con, bConstraintOb *cob, ListBase } static bConstraintTypeInfo CTI_FOLLOWTRACK = { - CONSTRAINT_TYPE_FOLLOWTRACK, /* type */ - sizeof(bFollowTrackConstraint), /* size */ - N_("Follow Track"), /* name */ - "bFollowTrackConstraint", /* struct name */ - NULL, /* free data */ - followtrack_id_looper, /* id looper */ - NULL, /* copy data */ - followtrack_new_data, /* new data */ - NULL, /* get constraint targets */ - NULL, /* flush constraint targets */ - NULL, /* get target matrix */ - followtrack_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_FOLLOWTRACK, + /*size*/ sizeof(bFollowTrackConstraint), + /*name*/ N_("Follow Track"), + /*structName*/ "bFollowTrackConstraint", + /*free_data*/ NULL, + /*id_looper*/ followtrack_id_looper, + /*copy_data*/ NULL, + /*new_data*/ followtrack_new_data, + /*get_constraint_targets*/ NULL, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ NULL, + /*evaluate_constraint*/ followtrack_evaluate, }; /* ----------- Camera Solver ------------- */ @@ -5243,18 +5243,18 @@ static void camerasolver_evaluate(bConstraint *con, bConstraintOb *cob, ListBase } static bConstraintTypeInfo CTI_CAMERASOLVER = { - CONSTRAINT_TYPE_CAMERASOLVER, /* type */ - sizeof(bCameraSolverConstraint), /* size */ - N_("Camera Solver"), /* name */ - "bCameraSolverConstraint", /* struct name */ - NULL, /* free data */ - camerasolver_id_looper, /* id looper */ - NULL, /* copy data */ - camerasolver_new_data, /* new data */ - NULL, /* get constraint targets */ - NULL, /* flush constraint targets */ - NULL, /* get target matrix */ - camerasolver_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_CAMERASOLVER, + /*size*/ sizeof(bCameraSolverConstraint), + /*name*/ N_("Camera Solver"), + /*structName*/ "bCameraSolverConstraint", + /*free_data*/ NULL, + /*id_looper*/ camerasolver_id_looper, + /*copy_data*/ NULL, + /*new_data*/ camerasolver_new_data, + /*get_constraint_targets*/ NULL, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ NULL, + /*evaluate_constraint*/ camerasolver_evaluate, }; /* ----------- Object Solver ------------- */ @@ -5329,18 +5329,18 @@ static void objectsolver_evaluate(bConstraint *con, bConstraintOb *cob, ListBase } static bConstraintTypeInfo CTI_OBJECTSOLVER = { - CONSTRAINT_TYPE_OBJECTSOLVER, /* type */ - sizeof(bObjectSolverConstraint), /* size */ - N_("Object Solver"), /* name */ - "bObjectSolverConstraint", /* struct name */ - NULL, /* free data */ - objectsolver_id_looper, /* id looper */ - NULL, /* copy data */ - objectsolver_new_data, /* new data */ - NULL, /* get constraint targets */ - NULL, /* flush constraint targets */ - NULL, /* get target matrix */ - objectsolver_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_OBJECTSOLVER, + /*size*/ sizeof(bObjectSolverConstraint), + /*name*/ N_("Object Solver"), + /*structName*/ "bObjectSolverConstraint", + /*free_data*/ NULL, + /*id_looper*/ objectsolver_id_looper, + /*copy_data*/ NULL, + /*new_data*/ objectsolver_new_data, + /*get_constraint_targets*/ NULL, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ NULL, + /*evaluate_constraint*/ objectsolver_evaluate, }; /* ----------- Transform Cache ------------- */ @@ -5426,18 +5426,18 @@ static void transformcache_new_data(void *cdata) } static bConstraintTypeInfo CTI_TRANSFORM_CACHE = { - CONSTRAINT_TYPE_TRANSFORM_CACHE, /* type */ - sizeof(bTransformCacheConstraint), /* size */ - N_("Transform Cache"), /* name */ - "bTransformCacheConstraint", /* struct name */ - transformcache_free, /* free data */ - transformcache_id_looper, /* id looper */ - transformcache_copy, /* copy data */ - transformcache_new_data, /* new data */ - NULL, /* get constraint targets */ - NULL, /* flush constraint targets */ - NULL, /* get target matrix */ - transformcache_evaluate, /* evaluate */ + /*type*/ CONSTRAINT_TYPE_TRANSFORM_CACHE, + /*size*/ sizeof(bTransformCacheConstraint), + /*name*/ N_("Transform Cache"), + /*structName*/ "bTransformCacheConstraint", + /*free_data*/ transformcache_free, + /*id_looper*/ transformcache_id_looper, + /*copy_data*/ transformcache_copy, + /*new_data*/ transformcache_new_data, + /*get_constraint_targets*/ NULL, + /*flush_constraint_targets*/ NULL, + /*get_target_matrix*/ NULL, + /*evaluate_constraint*/ transformcache_evaluate, }; /* ************************* Constraints Type-Info *************************** */ diff --git a/source/blender/blenkernel/intern/crazyspace.cc b/source/blender/blenkernel/intern/crazyspace.cc index 2b1772540f2..d77f831166a 100644 --- a/source/blender/blenkernel/intern/crazyspace.cc +++ b/source/blender/blenkernel/intern/crazyspace.cc @@ -604,7 +604,7 @@ GeometryDeformation get_evaluated_curves_deformation(const Depsgraph &depsgraph, { BLI_assert(ob_orig.type == OB_CURVES); const Curves &curves_id_orig = *static_cast(ob_orig.data); - const CurvesGeometry &curves_orig = CurvesGeometry::wrap(curves_id_orig.geometry); + const CurvesGeometry &curves_orig = curves_id_orig.geometry.wrap(); const int points_num = curves_orig.points_num(); GeometryDeformation deformation; @@ -646,7 +646,7 @@ GeometryDeformation get_evaluated_curves_deformation(const Depsgraph &depsgraph, if (curves_component_eval != nullptr) { const Curves *curves_id_eval = curves_component_eval->get_for_read(); if (curves_id_eval != nullptr) { - const CurvesGeometry &curves_eval = CurvesGeometry::wrap(curves_id_eval->geometry); + const CurvesGeometry &curves_eval = curves_id_eval->geometry.wrap(); if (curves_eval.points_num() == points_num) { deformation.positions = curves_eval.positions(); } diff --git a/source/blender/blenkernel/intern/curve.cc b/source/blender/blenkernel/intern/curve.cc index 139d4a45553..61dc8a22077 100644 --- a/source/blender/blenkernel/intern/curve.cc +++ b/source/blender/blenkernel/intern/curve.cc @@ -258,7 +258,7 @@ static void curve_blend_read_data(BlendDataReader *reader, ID *id) switch_endian_knots(nu); } } - cu->texflag &= ~CU_AUTOSPACE_EVALUATED; + cu->texspace_flag &= ~CU_TEXSPACE_FLAG_AUTO_EVALUATED; BLO_read_data_address(reader, &cu->bevel_profile); if (cu->bevel_profile != nullptr) { @@ -304,33 +304,33 @@ static void curve_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_CU_LEGACY = { - /* id_code */ ID_CU_LEGACY, - /* id_filter */ FILTER_ID_CU_LEGACY, - /* main_listbase_index */ INDEX_ID_CU_LEGACY, - /* struct_size */ sizeof(Curve), - /* name */ "Curve", - /* name_plural */ "curves", - /* translation_context */ BLT_I18NCONTEXT_ID_CURVE_LEGACY, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_CU_LEGACY, + /*id_filter*/ FILTER_ID_CU_LEGACY, + /*main_listbase_index*/ INDEX_ID_CU_LEGACY, + /*struct_size*/ sizeof(Curve), + /*name*/ "Curve", + /*name_plural*/ "curves", + /*translation_context*/ BLT_I18NCONTEXT_ID_CURVE_LEGACY, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ curve_init_data, - /* copy_data */ curve_copy_data, - /* free_data */ curve_free_data, - /* make_local */ nullptr, - /* foreach_id */ curve_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ curve_init_data, + /*copy_data*/ curve_copy_data, + /*free_data*/ curve_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ curve_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ curve_blend_write, - /* blend_read_data */ curve_blend_read_data, - /* blend_read_lib */ curve_blend_read_lib, - /* blend_read_expand */ curve_blend_read_expand, + /*blend_write*/ curve_blend_write, + /*blend_read_data*/ curve_blend_read_data, + /*blend_read_lib*/ curve_blend_read_lib, + /*blend_read_expand*/ curve_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; void BKE_curve_editfont_free(Curve *cu) @@ -517,7 +517,7 @@ BoundBox *BKE_curve_boundbox_get(Object *ob) void BKE_curve_texspace_calc(Curve *cu) { - if (cu->texflag & CU_AUTOSPACE) { + if (cu->texspace_flag & CU_TEXSPACE_FLAG_AUTO) { float min[3], max[3]; INIT_MINMAX(min, max); @@ -526,35 +526,36 @@ void BKE_curve_texspace_calc(Curve *cu) max[0] = max[1] = max[2] = 1.0f; } - float loc[3], size[3]; - mid_v3_v3v3(loc, min, max); + float texspace_location[3], texspace_size[3]; + mid_v3_v3v3(texspace_location, min, max); - size[0] = (max[0] - min[0]) / 2.0f; - size[1] = (max[1] - min[1]) / 2.0f; - size[2] = (max[2] - min[2]) / 2.0f; + texspace_size[0] = (max[0] - min[0]) / 2.0f; + texspace_size[1] = (max[1] - min[1]) / 2.0f; + texspace_size[2] = (max[2] - min[2]) / 2.0f; for (int a = 0; a < 3; a++) { - if (size[a] == 0.0f) { - size[a] = 1.0f; + if (texspace_size[a] == 0.0f) { + texspace_size[a] = 1.0f; } - else if (size[a] > 0.0f && size[a] < 0.00001f) { - size[a] = 0.00001f; + else if (texspace_size[a] > 0.0f && texspace_size[a] < 0.00001f) { + texspace_size[a] = 0.00001f; } - else if (size[a] < 0.0f && size[a] > -0.00001f) { - size[a] = -0.00001f; + else if (texspace_size[a] < 0.0f && texspace_size[a] > -0.00001f) { + texspace_size[a] = -0.00001f; } } - copy_v3_v3(cu->loc, loc); - copy_v3_v3(cu->size, size); + copy_v3_v3(cu->texspace_location, texspace_location); + copy_v3_v3(cu->texspace_size, texspace_size); - cu->texflag |= CU_AUTOSPACE_EVALUATED; + cu->texspace_flag |= CU_TEXSPACE_FLAG_AUTO_EVALUATED; } } void BKE_curve_texspace_ensure(Curve *cu) { - if ((cu->texflag & CU_AUTOSPACE) && !(cu->texflag & CU_AUTOSPACE_EVALUATED)) { + if ((cu->texspace_flag & CU_TEXSPACE_FLAG_AUTO) && + (cu->texspace_flag & CU_TEXSPACE_FLAG_AUTO_EVALUATED) == 0) { BKE_curve_texspace_calc(cu); } } @@ -2062,7 +2063,7 @@ static void bevel_list_calc_bisect(BevList *bl) { BevPoint *bevp2, *bevp1, *bevp0; int nr; - bool is_cyclic = bl->poly != -1; + const bool is_cyclic = bl->poly != -1; if (is_cyclic) { bevp2 = bl->bevpoints; @@ -2228,19 +2229,19 @@ static void make_bevel_list_3D_zup(BevList *bl) } } -static void minimum_twist_between_two_points(BevPoint *current_point, BevPoint *previous_point) +static void minimum_twist_between_two_points(BevPoint *bevp_curr, const BevPoint *bevp_prev) { - float angle = angle_normalized_v3v3(previous_point->dir, current_point->dir); - float q[4]; + float angle = angle_normalized_v3v3(bevp_prev->dir, bevp_curr->dir); if (angle > 0.0f) { /* otherwise we can keep as is */ + float q[4]; float cross_tmp[3]; - cross_v3_v3v3(cross_tmp, previous_point->dir, current_point->dir); + cross_v3_v3v3(cross_tmp, bevp_prev->dir, bevp_curr->dir); axis_angle_to_quat(q, cross_tmp, angle); - mul_qt_qtqt(current_point->quat, q, previous_point->quat); + mul_qt_qtqt(bevp_curr->quat, q, bevp_prev->quat); } else { - copy_qt_qt(current_point->quat, previous_point->quat); + copy_qt_qt(bevp_curr->quat, bevp_prev->quat); } } @@ -2249,6 +2250,19 @@ static void make_bevel_list_3D_minimum_twist(BevList *bl) BevPoint *bevp2, *bevp1, *bevp0; /* Standard for all make_bevel_list_3D_* functions. */ int nr; float q[4]; + const bool is_cyclic = bl->poly != -1; + /* NOTE(@campbellbarton): For non-cyclic curves only initialize the first direction + * (via `vec_to_quat`), necessary for symmetry, see T71137. + * Otherwise initialize the first and second points before propagating rotation forward. + * This is historical as changing this can cause significantly different output. + * Specifically: `deform_modifiers` test: (`CurveMeshDeform`). + * + * While it would seem correct to only use the first point for non-cyclic curves as well + * the end-points direction is initialized from the input handles (instead of the directions + * between points), there is often a bigger difference in the first and second directions + * than you'd otherwise expect. So using only the first direction breaks compatibility + * enough it's best to leave it as-is. */ + const int nr_init = bl->nr - (is_cyclic ? 1 : 2); bevel_list_calc_bisect(bl); @@ -2256,21 +2270,18 @@ static void make_bevel_list_3D_minimum_twist(BevList *bl) bevp1 = bevp2 + (bl->nr - 1); bevp0 = bevp1 - 1; - /* The ordinal of the point being adjusted (bevp2). First point is 1. */ + nr = bl->nr; + while (nr--) { - /* First point is the reference, don't adjust. - * Skip this point in the following loop. */ - if (bl->nr > 0) { - vec_to_quat(bevp2->quat, bevp2->dir, 5, 1); + if (nr >= nr_init) { + /* Initialize the rotation, otherwise propagate the previous rotation forward. */ + vec_to_quat(bevp1->quat, bevp1->dir, 5, 1); + } + else { + minimum_twist_between_two_points(bevp1, bevp0); + } - bevp0 = bevp1; /* bevp0 is unused */ - bevp1 = bevp2; - bevp2++; - } - for (nr = 1; nr < bl->nr; nr++) { - minimum_twist_between_two_points(bevp2, bevp1); - - bevp0 = bevp1; /* bevp0 is unused */ + bevp0 = bevp1; bevp1 = bevp2; bevp2++; } @@ -2897,7 +2908,7 @@ void BKE_curve_bevelList_make(Object *ob, const ListBase *nurbs, const bool for_ /* Scale the threshold so high resolution shapes don't get over reduced, see: T49850. */ const float threshold_resolu = 0.00001f / resolu; - bool is_cyclic = bl->poly != -1; + const bool is_cyclic = bl->poly != -1; nr = bl->nr; if (is_cyclic) { bevp1 = bl->bevpoints; @@ -5508,10 +5519,10 @@ void BKE_curve_eval_geometry(Depsgraph *depsgraph, Curve *curve) BKE_curve_texspace_calc(curve); if (DEG_is_active(depsgraph)) { Curve *curve_orig = (Curve *)DEG_get_original_id(&curve->id); - if (curve->texflag & CU_AUTOSPACE_EVALUATED) { - curve_orig->texflag |= CU_AUTOSPACE_EVALUATED; - copy_v3_v3(curve_orig->loc, curve->loc); - copy_v3_v3(curve_orig->size, curve->size); + if (curve->texspace_flag & CU_TEXSPACE_FLAG_AUTO_EVALUATED) { + curve_orig->texspace_flag |= CU_TEXSPACE_FLAG_AUTO_EVALUATED; + copy_v3_v3(curve_orig->texspace_location, curve->texspace_location); + copy_v3_v3(curve_orig->texspace_size, curve->texspace_size); } } } diff --git a/source/blender/blenkernel/intern/curve_bezier.cc b/source/blender/blenkernel/intern/curve_bezier.cc index 3aa87be3787..460c96196ea 100644 --- a/source/blender/blenkernel/intern/curve_bezier.cc +++ b/source/blender/blenkernel/intern/curve_bezier.cc @@ -33,20 +33,21 @@ void calculate_evaluated_offsets(const Span handle_types_left, MutableSpan evaluated_offsets) { const int size = handle_types_left.size(); - BLI_assert(evaluated_offsets.size() == size); + BLI_assert(evaluated_offsets.size() == size + 1); + evaluated_offsets.first() = 0; if (size == 1) { - evaluated_offsets.first() = 1; + evaluated_offsets.last() = 1; return; } int offset = 0; - for (const int i : IndexRange(size - 1)) { - offset += segment_is_vector(handle_types_left, handle_types_right, i) ? 1 : resolution; evaluated_offsets[i] = offset; + offset += segment_is_vector(handle_types_left, handle_types_right, i) ? 1 : resolution; } + evaluated_offsets.last(1) = offset; if (cyclic) { offset += last_cyclic_segment_is_vector(handle_types_left, handle_types_right) ? 1 : resolution; @@ -233,12 +234,11 @@ void evaluate_segment(const float3 &point_0, void calculate_evaluated_positions(const Span positions, const Span handles_left, const Span handles_right, - const Span evaluated_offsets, + const OffsetIndices evaluated_offsets, MutableSpan evaluated_positions) { - BLI_assert(evaluated_offsets.last() == evaluated_positions.size()); - BLI_assert(evaluated_offsets.size() == positions.size()); - if (evaluated_offsets.last() == 1) { + BLI_assert(evaluated_offsets.total_size() == evaluated_positions.size()); + if (evaluated_offsets.total_size() == 1) { evaluated_positions.first() = positions.first(); return; } @@ -248,29 +248,29 @@ void calculate_evaluated_positions(const Span positions, handles_right.first(), handles_left[1], positions[1], - evaluated_positions.take_front(evaluated_offsets.first())); + evaluated_positions.slice(evaluated_offsets[0])); /* Give each task fewer segments as the resolution gets larger. */ const int grain_size = std::max(evaluated_positions.size() / positions.size() * 32, 1); - threading::parallel_for( - positions.index_range().drop_back(1).drop_front(1), grain_size, [&](IndexRange range) { - for (const int i : range) { - const IndexRange evaluated_range = offsets_to_range(evaluated_offsets, i - 1); - if (evaluated_range.size() == 1) { - evaluated_positions[evaluated_range.first()] = positions[i]; - } - else { - evaluate_segment(positions[i], - handles_right[i], - handles_left[i + 1], - positions[i + 1], - evaluated_positions.slice(evaluated_range)); - } - } - }); + const IndexRange inner_segments = positions.index_range().drop_back(1).drop_front(1); + threading::parallel_for(inner_segments, grain_size, [&](IndexRange range) { + for (const int i : range) { + const IndexRange evaluated_range = evaluated_offsets[i]; + if (evaluated_range.size() == 1) { + evaluated_positions[evaluated_range.first()] = positions[i]; + } + else { + evaluate_segment(positions[i], + handles_right[i], + handles_left[i + 1], + positions[i + 1], + evaluated_positions.slice(evaluated_range)); + } + } + }); /* Evaluate the final cyclic segment if necessary. */ - const IndexRange last_segment_points = offsets_to_range(evaluated_offsets, positions.size() - 2); + const IndexRange last_segment_points = evaluated_offsets[positions.index_range().last()]; if (last_segment_points.size() == 1) { evaluated_positions.last() = positions.last(); } @@ -295,34 +295,34 @@ static inline void linear_interpolation(const T &a, const T &b, MutableSpan d template static void interpolate_to_evaluated(const Span src, - const Span evaluated_offsets, + const OffsetIndices evaluated_offsets, MutableSpan dst) { BLI_assert(!src.is_empty()); - BLI_assert(evaluated_offsets.size() == src.size()); - BLI_assert(evaluated_offsets.last() == dst.size()); + BLI_assert(evaluated_offsets.total_size() == dst.size()); if (src.size() == 1) { BLI_assert(dst.size() == 1); dst.first() = src.first(); return; } - linear_interpolation(src.first(), src[1], dst.take_front(evaluated_offsets.first())); + linear_interpolation(src.first(), src[1], dst.slice(evaluated_offsets[0])); threading::parallel_for( src.index_range().drop_back(1).drop_front(1), 512, [&](IndexRange range) { for (const int i : range) { - const IndexRange segment_points = offsets_to_range(evaluated_offsets, i - 1); - linear_interpolation(src[i], src[i + 1], dst.slice(segment_points)); + const IndexRange segment = evaluated_offsets[i]; + linear_interpolation(src[i], src[i + 1], dst.slice(segment)); } }); - const IndexRange last_segment_points(evaluated_offsets.last(1), - evaluated_offsets.last() - evaluated_offsets.last(1)); - linear_interpolation(src.last(), src.first(), dst.slice(last_segment_points)); + const IndexRange last_segment = evaluated_offsets[src.index_range().last()]; + linear_interpolation(src.last(), src.first(), dst.slice(last_segment)); } -void interpolate_to_evaluated(const GSpan src, const Span evaluated_offsets, GMutableSpan dst) +void interpolate_to_evaluated(const GSpan src, + const OffsetIndices evaluated_offsets, + GMutableSpan dst) { attribute_math::convert_to_static_type(src.type(), [&](auto dummy) { using T = decltype(dummy); diff --git a/source/blender/blenkernel/intern/curve_catmull_rom.cc b/source/blender/blenkernel/intern/curve_catmull_rom.cc index 8247d9451e4..28f213ff68c 100644 --- a/source/blender/blenkernel/intern/curve_catmull_rom.cc +++ b/source/blender/blenkernel/intern/curve_catmull_rom.cc @@ -123,7 +123,7 @@ static void interpolate_to_evaluated(const Span src, template static void interpolate_to_evaluated(const Span src, const bool cyclic, - const Span evaluated_offsets, + const OffsetIndices evaluated_offsets, MutableSpan dst) { @@ -131,7 +131,7 @@ static void interpolate_to_evaluated(const Span src, src, cyclic, [evaluated_offsets](const int segment_i) -> IndexRange { - return bke::offsets_to_range(evaluated_offsets, segment_i); + return evaluated_offsets[segment_i]; }, dst); } @@ -149,7 +149,7 @@ void interpolate_to_evaluated(const GSpan src, void interpolate_to_evaluated(const GSpan src, const bool cyclic, - const Span evaluated_offsets, + const OffsetIndices evaluated_offsets, GMutableSpan dst) { attribute_math::convert_to_static_type(src.type(), [&](auto dummy) { diff --git a/source/blender/blenkernel/intern/curve_legacy_convert.cc b/source/blender/blenkernel/intern/curve_legacy_convert.cc index 938dcbd6269..c8ae56220c3 100644 --- a/source/blender/blenkernel/intern/curve_legacy_convert.cc +++ b/source/blender/blenkernel/intern/curve_legacy_convert.cc @@ -82,7 +82,7 @@ Curves *curve_legacy_to_curves(const Curve &curve_legacy, const ListBase &nurbs_ const Vector src_curves(nurbs_list); Curves *curves_id = curves_new_nomain(0, src_curves.size()); - CurvesGeometry &curves = CurvesGeometry::wrap(curves_id->geometry); + CurvesGeometry &curves = curves_id->geometry.wrap(); MutableAttributeAccessor curves_attributes = curves.attributes_for_write(); MutableSpan types = curves.curve_types_for_write(); @@ -108,6 +108,7 @@ Curves *curve_legacy_to_curves(const Curve &curve_legacy, const ListBase &nurbs_ return curves_id; } + const OffsetIndices points_by_curve = curves.points_by_curve(); MutableSpan positions = curves.positions_for_write(); SpanAttributeWriter radius_attribute = curves_attributes.lookup_or_add_for_write_only_span("radius", ATTR_DOMAIN_POINT); @@ -119,7 +120,7 @@ Curves *curve_legacy_to_curves(const Curve &curve_legacy, const ListBase &nurbs_ for (const int curve_i : selection.slice(range)) { const Nurb &src_curve = *src_curves[curve_i]; const Span src_points(src_curve.bp, src_curve.pntsu); - const IndexRange points = curves.points_for_curve(curve_i); + const IndexRange points = points_by_curve[curve_i]; for (const int i : src_points.index_range()) { const BPoint &bp = src_points[i]; @@ -146,7 +147,7 @@ Curves *curve_legacy_to_curves(const Curve &curve_legacy, const ListBase &nurbs_ for (const int curve_i : selection.slice(range)) { const Nurb &src_curve = *src_curves[curve_i]; const Span src_points(src_curve.bezt, src_curve.pntsu); - const IndexRange points = curves.points_for_curve(curve_i); + const IndexRange points = points_by_curve[curve_i]; resolutions[curve_i] = src_curve.resolu; @@ -174,7 +175,7 @@ Curves *curve_legacy_to_curves(const Curve &curve_legacy, const ListBase &nurbs_ for (const int curve_i : selection.slice(range)) { const Nurb &src_curve = *src_curves[curve_i]; const Span src_points(src_curve.bp, src_curve.pntsu); - const IndexRange points = curves.points_for_curve(curve_i); + const IndexRange points = points_by_curve[curve_i]; resolutions[curve_i] = src_curve.resolu; nurbs_orders[curve_i] = src_curve.orderu; diff --git a/source/blender/blenkernel/intern/curve_to_mesh_convert.cc b/source/blender/blenkernel/intern/curve_to_mesh_convert.cc index 481e3018942..23dcda10d8a 100644 --- a/source/blender/blenkernel/intern/curve_to_mesh_convert.cc +++ b/source/blender/blenkernel/intern/curve_to_mesh_convert.cc @@ -166,7 +166,7 @@ static void mark_bezier_vector_edges_sharp(const int profile_point_num, for (const int i : IndexRange(profile_point_num).drop_front(1)) { if (curves::bezier::point_is_sharp(handle_types_left, handle_types_right, i)) { - const int offset = main_edges_start + main_segment_num * control_point_offsets[i - 1]; + const int offset = main_edges_start + main_segment_num * control_point_offsets[i]; sharp_edges.slice(offset, main_segment_num).fill(true); } } @@ -246,8 +246,8 @@ static ResultOffsets calculate_result_offsets(const CurvesInfo &info, const bool result.main_indices.reinitialize(result.total); result.profile_indices.reinitialize(result.total); - info.main.ensure_evaluated_offsets(); - info.profile.ensure_evaluated_offsets(); + const OffsetIndices main_offsets = info.main.evaluated_points_by_curve(); + const OffsetIndices profile_offsets = info.profile.evaluated_points_by_curve(); int mesh_index = 0; int vert_offset = 0; @@ -256,7 +256,7 @@ static ResultOffsets calculate_result_offsets(const CurvesInfo &info, const bool int poly_offset = 0; for (const int i_main : info.main.curves_range()) { const bool main_cyclic = info.main_cyclic[i_main]; - const int main_point_num = info.main.evaluated_points_for_curve(i_main).size(); + const int main_point_num = main_offsets.size(i_main); const int main_segment_num = curves::segments_num(main_point_num, main_cyclic); for (const int i_profile : info.profile.curves_range()) { result.vert[mesh_index] = vert_offset; @@ -268,7 +268,7 @@ static ResultOffsets calculate_result_offsets(const CurvesInfo &info, const bool result.profile_indices[mesh_index] = i_profile; const bool profile_cyclic = info.profile_cyclic[i_profile]; - const int profile_point_num = info.profile.evaluated_points_for_curve(i_profile).size(); + const int profile_point_num = profile_offsets.size(i_profile); const int profile_segment_num = curves::segments_num(profile_point_num, profile_cyclic); const bool has_caps = fill_caps && !main_cyclic && profile_cyclic; @@ -377,13 +377,19 @@ static void foreach_curve_combination(const CurvesInfo &info, const ResultOffsets &offsets, const Fn &fn) { + const OffsetIndices main_offsets = info.main.evaluated_points_by_curve(); + const OffsetIndices profile_offsets = info.profile.evaluated_points_by_curve(); + const OffsetIndices vert_offsets(offsets.vert); + const OffsetIndices edge_offsets(offsets.edge); + const OffsetIndices poly_offsets(offsets.poly); + const OffsetIndices loop_offsets(offsets.loop); threading::parallel_for(IndexRange(offsets.total), 512, [&](IndexRange range) { for (const int i : range) { const int i_main = offsets.main_indices[i]; const int i_profile = offsets.profile_indices[i]; - const IndexRange main_points = info.main.evaluated_points_for_curve(i_main); - const IndexRange profile_points = info.profile.evaluated_points_for_curve(i_profile); + const IndexRange main_points = main_offsets[i_main]; + const IndexRange profile_points = profile_offsets[i_profile]; const bool main_cyclic = info.main_cyclic[i_main]; const bool profile_cyclic = info.profile_cyclic[i_profile]; @@ -399,10 +405,10 @@ static void foreach_curve_combination(const CurvesInfo &info, profile_cyclic, curves::segments_num(main_points.size(), main_cyclic), curves::segments_num(profile_points.size(), profile_cyclic), - offsets_to_range(offsets.vert.as_span(), i), - offsets_to_range(offsets.edge.as_span(), i), - offsets_to_range(offsets.poly.as_span(), i), - offsets_to_range(offsets.loop.as_span(), i)}); + vert_offsets[i], + edge_offsets[i], + poly_offsets[i], + loop_offsets[i]}); } }); } @@ -570,7 +576,7 @@ static void copy_profile_point_domain_attribute_to_mesh(const CurvesInfo &curves template static void copy_indices_to_offset_ranges(const VArray &src, const Span curve_indices, - const Span mesh_offsets, + const OffsetIndices mesh_offsets, MutableSpan dst) { /* This unnecessarily instantiates the "is single" case (which should be handled elsewhere if @@ -579,7 +585,7 @@ static void copy_indices_to_offset_ranges(const VArray &src, devirtualize_varray(src, [&](const auto &src) { threading::parallel_for(curve_indices.index_range(), 512, [&](IndexRange range) { for (const int i : range) { - dst.slice(offsets_to_range(mesh_offsets, i)).fill(src[curve_indices[i]]); + dst.slice(mesh_offsets[i]).fill(src[curve_indices[i]]); } }); }); @@ -633,10 +639,11 @@ static void write_sharp_bezier_edges(const CurvesInfo &curves_info, sharp_edges = mesh_attributes.lookup_or_add_for_write_span("sharp_edge", ATTR_DOMAIN_EDGE); + const OffsetIndices profile_points_by_curve = profile.points_by_curve(); const VArray types = profile.curve_types(); foreach_curve_combination(curves_info, offsets, [&](const CombinationInfo &info) { if (types[info.i_profile] == CURVE_TYPE_BEZIER) { - const IndexRange points = profile.points_for_curve(info.i_profile); + const IndexRange points = profile_points_by_curve[info.i_profile]; mark_bezier_vector_edges_sharp(points.size(), info.main_segment_num, profile.bezier_evaluated_offsets_for_curve(info.i_profile), diff --git a/source/blender/blenkernel/intern/curveprofile.cc b/source/blender/blenkernel/intern/curveprofile.cc index db0cf16d467..11fc4238e6a 100644 --- a/source/blender/blenkernel/intern/curveprofile.cc +++ b/source/blender/blenkernel/intern/curveprofile.cc @@ -760,6 +760,7 @@ static void create_samples(CurveProfile *profile, } BLI_assert(n_added == n_segments); /* n_added is just used for this assert, could remove it. */ + UNUSED_VARS_NDEBUG(n_added); /* Sample the points and add them to the locations table. */ for (int i_sample = 0, i = 0; i < totedges; i++) { diff --git a/source/blender/blenkernel/intern/curves.cc b/source/blender/blenkernel/intern/curves.cc index 84ffb5e5314..1519c0addd2 100644 --- a/source/blender/blenkernel/intern/curves.cc +++ b/source/blender/blenkernel/intern/curves.cc @@ -70,8 +70,8 @@ static void curves_copy_data(Main * /*bmain*/, ID *id_dst, const ID *id_src, con const Curves *curves_src = (const Curves *)id_src; curves_dst->mat = static_cast(MEM_dupallocN(curves_src->mat)); - const bke::CurvesGeometry &src = bke::CurvesGeometry::wrap(curves_src->geometry); - bke::CurvesGeometry &dst = bke::CurvesGeometry::wrap(curves_dst->geometry); + const bke::CurvesGeometry &src = curves_src->geometry.wrap(); + bke::CurvesGeometry &dst = curves_dst->geometry.wrap(); /* We need special handling here because the generic ID management code has already done a * shallow copy from the source to the destination, and because the copy-on-write functionality @@ -93,7 +93,13 @@ static void curves_copy_data(Main * /*bmain*/, ID *id_dst, const ID *id_src, con dst.runtime = MEM_new(__func__); dst.runtime->type_counts = src.runtime->type_counts; + dst.runtime->evaluated_offsets_cache = src.runtime->evaluated_offsets_cache; + dst.runtime->nurbs_basis_cache = src.runtime->nurbs_basis_cache; + dst.runtime->evaluated_position_cache = src.runtime->evaluated_position_cache; dst.runtime->bounds_cache = src.runtime->bounds_cache; + dst.runtime->evaluated_length_cache = src.runtime->evaluated_length_cache; + dst.runtime->evaluated_tangent_cache = src.runtime->evaluated_tangent_cache; + dst.runtime->evaluated_normal_cache = src.runtime->evaluated_normal_cache; curves_dst->batch_cache = nullptr; } @@ -103,7 +109,7 @@ static void curves_free_data(ID *id) Curves *curves = (Curves *)id; BKE_animdata_free(&curves->id, false); - blender::bke::CurvesGeometry::wrap(curves->geometry).~CurvesGeometry(); + curves->geometry.wrap().~CurvesGeometry(); BKE_curves_batch_cache_free(curves); @@ -174,7 +180,7 @@ static void curves_blend_read_data(BlendDataReader *reader, ID *id) curves->geometry.runtime = MEM_new(__func__); /* Recalculate curve type count cache that isn't saved in files. */ - blender::bke::CurvesGeometry::wrap(curves->geometry).update_curve_types(); + curves->geometry.wrap().update_curve_types(); /* Materials */ BLO_read_pointer_array(reader, (void **)&curves->mat); @@ -199,33 +205,33 @@ static void curves_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_CV = { - /* id_code */ ID_CV, - /* id_filter */ FILTER_ID_CV, - /* main_listbase_index */ INDEX_ID_CV, - /* struct_size */ sizeof(Curves), - /* name */ "Curves", - /* name_plural */ "hair_curves", - /* translation_context */ BLT_I18NCONTEXT_ID_CURVES, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_CV, + /*id_filter*/ FILTER_ID_CV, + /*main_listbase_index*/ INDEX_ID_CV, + /*struct_size*/ sizeof(Curves), + /*name*/ "Curves", + /*name_plural*/ "hair_curves", + /*translation_context*/ BLT_I18NCONTEXT_ID_CURVES, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ curves_init_data, - /* copy_data */ curves_copy_data, - /* free_data */ curves_free_data, - /* make_local */ nullptr, - /* foreach_id */ curves_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ curves_init_data, + /*copy_data*/ curves_copy_data, + /*free_data*/ curves_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ curves_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ curves_blend_write, - /* blend_read_data */ curves_blend_read_data, - /* blend_read_lib */ curves_blend_read_lib, - /* blend_read_expand */ curves_blend_read_expand, + /*blend_write*/ curves_blend_write, + /*blend_read_data*/ curves_blend_read_data, + /*blend_read_lib*/ curves_blend_read_lib, + /*blend_read_expand*/ curves_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; void *BKE_curves_add(Main *bmain, const char *name) @@ -247,8 +253,7 @@ BoundBox *BKE_curves_boundbox_get(Object *ob) if (ob->runtime.bb == nullptr) { ob->runtime.bb = MEM_cnew(__func__); - const blender::bke::CurvesGeometry &curves = blender::bke::CurvesGeometry::wrap( - curves_id->geometry); + const blender::bke::CurvesGeometry &curves = curves_id->geometry.wrap(); float3 min(FLT_MAX); float3 max(-FLT_MAX); @@ -372,7 +377,7 @@ Curves *curves_new_nomain(const int points_num, const int curves_num) BLI_assert(points_num >= 0); BLI_assert(curves_num >= 0); Curves *curves_id = static_cast(BKE_id_new_nomain(ID_CV, nullptr)); - CurvesGeometry &curves = CurvesGeometry::wrap(curves_id->geometry); + CurvesGeometry &curves = curves_id->geometry.wrap(); curves.resize(points_num, curves_num); return curves_id; } @@ -380,7 +385,7 @@ Curves *curves_new_nomain(const int points_num, const int curves_num) Curves *curves_new_nomain_single(const int points_num, const CurveType type) { Curves *curves_id = curves_new_nomain(points_num, 1); - CurvesGeometry &curves = CurvesGeometry::wrap(curves_id->geometry); + CurvesGeometry &curves = curves_id->geometry.wrap(); curves.offsets_for_write().last() = points_num; curves.fill_curve_types(type); return curves_id; @@ -389,7 +394,7 @@ Curves *curves_new_nomain_single(const int points_num, const CurveType type) Curves *curves_new_nomain(CurvesGeometry curves) { Curves *curves_id = static_cast(BKE_id_new_nomain(ID_CV, nullptr)); - bke::CurvesGeometry::wrap(curves_id->geometry) = std::move(curves); + curves_id->geometry.wrap() = std::move(curves); return curves_id; } diff --git a/source/blender/blenkernel/intern/curves_geometry.cc b/source/blender/blenkernel/intern/curves_geometry.cc index 65117ab00bb..380b3c54b80 100644 --- a/source/blender/blenkernel/intern/curves_geometry.cc +++ b/source/blender/blenkernel/intern/curves_geometry.cc @@ -93,7 +93,13 @@ static void copy_curves_geometry(CurvesGeometry &dst, const CurvesGeometry &src) /* Though type counts are a cache, they must be copied because they are calculated eagerly. */ dst.runtime->type_counts = src.runtime->type_counts; + dst.runtime->evaluated_offsets_cache = src.runtime->evaluated_offsets_cache; + dst.runtime->nurbs_basis_cache = src.runtime->nurbs_basis_cache; + dst.runtime->evaluated_position_cache = src.runtime->evaluated_position_cache; dst.runtime->bounds_cache = src.runtime->bounds_cache; + dst.runtime->evaluated_length_cache = src.runtime->evaluated_length_cache; + dst.runtime->evaluated_tangent_cache = src.runtime->evaluated_tangent_cache; + dst.runtime->evaluated_normal_cache = src.runtime->evaluated_normal_cache; } CurvesGeometry::CurvesGeometry(const CurvesGeometry &other) @@ -242,7 +248,14 @@ MutableSpan CurvesGeometry::curve_types_for_write() void CurvesGeometry::fill_curve_types(const CurveType type) { - this->curve_types_for_write().fill(type); + if (type == CURVE_TYPE_CATMULL_ROM) { + /* Avoid creating the attribute for Catmull Rom which is the default when the attribute doesn't + * exist anyway. */ + this->attributes_for_write().remove("curve_type"); + } + else { + this->curve_types_for_write().fill(type); + } this->runtime->type_counts.fill(0); this->runtime->type_counts[type] = this->curves_num(); this->tag_topology_changed(); @@ -256,7 +269,7 @@ void CurvesGeometry::fill_curve_types(const IndexMask selection, const CurveType } if (std::optional single_type = this->curve_types().get_if_single()) { if (single_type == type) { - /* No need for an array if the types are already a single with the correct type. */ + this->fill_curve_types(type); return; } } @@ -448,33 +461,36 @@ template void build_offsets(MutableSpan offsets, const Co static void calculate_evaluated_offsets(const CurvesGeometry &curves, MutableSpan offsets, - MutableSpan bezier_evaluated_offsets) + MutableSpan all_bezier_offsets) { - VArray types = curves.curve_types(); - VArray resolution = curves.resolution(); - VArray cyclic = curves.cyclic(); + const OffsetIndices points_by_curve = curves.points_by_curve(); + const VArray types = curves.curve_types(); + const VArray resolution = curves.resolution(); + const VArray cyclic = curves.cyclic(); - VArraySpan handle_types_left{curves.handle_types_left()}; - VArraySpan handle_types_right{curves.handle_types_right()}; + const VArraySpan handle_types_left{curves.handle_types_left()}; + const VArraySpan handle_types_right{curves.handle_types_right()}; - VArray nurbs_orders = curves.nurbs_orders(); - VArray nurbs_knots_modes = curves.nurbs_knots_modes(); + const VArray nurbs_orders = curves.nurbs_orders(); + const VArray nurbs_knots_modes = curves.nurbs_knots_modes(); build_offsets(offsets, [&](const int curve_index) -> int { - const IndexRange points = curves.points_for_curve(curve_index); + const IndexRange points = points_by_curve[curve_index]; switch (types[curve_index]) { case CURVE_TYPE_CATMULL_ROM: return curves::catmull_rom::calculate_evaluated_num( points.size(), cyclic[curve_index], resolution[curve_index]); case CURVE_TYPE_POLY: return points.size(); - case CURVE_TYPE_BEZIER: + case CURVE_TYPE_BEZIER: { + const IndexRange offsets = curves::per_curve_point_offsets_range(points, curve_index); curves::bezier::calculate_evaluated_offsets(handle_types_left.slice(points), handle_types_right.slice(points), cyclic[curve_index], resolution[curve_index], - bezier_evaluated_offsets.slice(points)); - return bezier_evaluated_offsets[points.last()]; + all_bezier_offsets.slice(offsets)); + return all_bezier_offsets[offsets.last()]; + } case CURVE_TYPE_NURBS: return curves::nurbs::calculate_evaluated_num(points.size(), nurbs_orders[curve_index], @@ -487,27 +503,33 @@ static void calculate_evaluated_offsets(const CurvesGeometry &curves, }); } -void CurvesGeometry::ensure_evaluated_offsets() const +OffsetIndices CurvesGeometry::evaluated_points_by_curve() const { - this->runtime->offsets_cache_mutex.ensure([&]() { - this->runtime->evaluated_offsets_cache.resize(this->curves_num() + 1); + const bke::CurvesGeometryRuntime &runtime = *this->runtime; + if (this->is_single_type(CURVE_TYPE_POLY)) { + /* When all the curves are poly curves, the evaluated offsets are the same as the control + * point offsets, so it's possible to completely avoid building a new offsets array. */ + runtime.evaluated_offsets_cache.ensure([&](CurvesGeometryRuntime::EvaluatedOffsets &r_data) { + r_data.evaluated_offsets.clear_and_shrink(); + r_data.all_bezier_offsets.clear_and_shrink(); + }); + return this->points_by_curve(); + } + + runtime.evaluated_offsets_cache.ensure([&](CurvesGeometryRuntime::EvaluatedOffsets &r_data) { + r_data.evaluated_offsets.resize(this->curves_num() + 1); if (this->has_curve_with_type(CURVE_TYPE_BEZIER)) { - this->runtime->bezier_evaluated_offsets.resize(this->points_num()); + r_data.all_bezier_offsets.resize(this->points_num() + this->curves_num()); } else { - this->runtime->bezier_evaluated_offsets.clear_and_shrink(); + r_data.all_bezier_offsets.clear_and_shrink(); } - calculate_evaluated_offsets( - *this, this->runtime->evaluated_offsets_cache, this->runtime->bezier_evaluated_offsets); + calculate_evaluated_offsets(*this, r_data.evaluated_offsets, r_data.all_bezier_offsets); }); -} -Span CurvesGeometry::evaluated_offsets() const -{ - this->ensure_evaluated_offsets(); - return this->runtime->evaluated_offsets_cache; + return OffsetIndices(runtime.evaluated_offsets_cache.data().evaluated_offsets); } IndexMask CurvesGeometry::indices_for_curve_type(const CurveType type, @@ -526,52 +548,54 @@ IndexMask CurvesGeometry::indices_for_curve_type(const CurveType type, Array CurvesGeometry::point_to_curve_map() const { + const OffsetIndices points_by_curve = this->points_by_curve(); Array map(this->points_num()); - for (const int i : this->curves_range()) { - map.as_mutable_span().slice(this->points_for_curve(i)).fill(i); - } + threading::parallel_for(this->curves_range(), 1024, [&](const IndexRange range) { + for (const int i_curve : range) { + map.as_mutable_span().slice(points_by_curve[i_curve]).fill(i_curve); + } + }); return map; } void CurvesGeometry::ensure_nurbs_basis_cache() const { - this->runtime->nurbs_basis_cache_mutex.ensure([&]() { + const bke::CurvesGeometryRuntime &runtime = *this->runtime; + runtime.nurbs_basis_cache.ensure([&](Vector &r_data) { Vector nurbs_indices; const IndexMask nurbs_mask = this->indices_for_curve_type(CURVE_TYPE_NURBS, nurbs_indices); if (nurbs_mask.is_empty()) { + r_data.clear_and_shrink(); return; } - this->runtime->nurbs_basis_cache.resize(this->curves_num()); - MutableSpan basis_caches(this->runtime->nurbs_basis_cache); + r_data.resize(this->curves_num()); - VArray cyclic = this->cyclic(); - VArray orders = this->nurbs_orders(); - VArray knots_modes = this->nurbs_knots_modes(); + const OffsetIndices points_by_curve = this->points_by_curve(); + const OffsetIndices evaluated_points_by_curve = this->evaluated_points_by_curve(); + const VArray cyclic = this->cyclic(); + const VArray orders = this->nurbs_orders(); + const VArray knots_modes = this->nurbs_knots_modes(); threading::parallel_for(nurbs_mask.index_range(), 64, [&](const IndexRange range) { + Vector knots; for (const int curve_index : nurbs_mask.slice(range)) { - const IndexRange points = this->points_for_curve(curve_index); - const IndexRange evaluated_points = this->evaluated_points_for_curve(curve_index); + const IndexRange points = points_by_curve[curve_index]; + const IndexRange evaluated_points = evaluated_points_by_curve[curve_index]; const int8_t order = orders[curve_index]; const bool is_cyclic = cyclic[curve_index]; const KnotsMode mode = KnotsMode(knots_modes[curve_index]); if (!curves::nurbs::check_valid_num_and_order(points.size(), order, is_cyclic, mode)) { - basis_caches[curve_index].invalid = true; + r_data[curve_index].invalid = true; continue; } - const int knots_num = curves::nurbs::knots_num(points.size(), order, is_cyclic); - Array knots(knots_num); + knots.reinitialize(curves::nurbs::knots_num(points.size(), order, is_cyclic)); curves::nurbs::calculate_knots(points.size(), mode, order, is_cyclic, knots); - curves::nurbs::calculate_basis_cache(points.size(), - evaluated_points.size(), - order, - is_cyclic, - knots, - basis_caches[curve_index]); + curves::nurbs::calculate_basis_cache( + points.size(), evaluated_points.size(), order, is_cyclic, knots, r_data[curve_index]); } }); }); @@ -579,35 +603,38 @@ void CurvesGeometry::ensure_nurbs_basis_cache() const Span CurvesGeometry::evaluated_positions() const { - this->runtime->position_cache_mutex.ensure([&]() { + const bke::CurvesGeometryRuntime &runtime = *this->runtime; + this->ensure_nurbs_basis_cache(); + runtime.evaluated_position_cache.ensure([&](CurvesGeometryRuntime::EvaluatedPositions &r_data) { if (this->is_single_type(CURVE_TYPE_POLY)) { - this->runtime->evaluated_positions_span = this->positions(); - this->runtime->evaluated_position_cache.clear_and_shrink(); + r_data.span = this->positions(); + r_data.vector.clear_and_shrink(); return; } - this->runtime->evaluated_position_cache.resize(this->evaluated_points_num()); - MutableSpan evaluated_positions = this->runtime->evaluated_position_cache; - this->runtime->evaluated_positions_span = evaluated_positions; + r_data.vector.resize(this->evaluated_points_num()); + r_data.span = r_data.vector; + MutableSpan evaluated_positions = r_data.vector; - VArray types = this->curve_types(); - VArray cyclic = this->cyclic(); - VArray resolution = this->resolution(); - Span positions = this->positions(); + const OffsetIndices points_by_curve = this->points_by_curve(); + const OffsetIndices evaluated_points_by_curve = this->evaluated_points_by_curve(); + const VArray types = this->curve_types(); + const VArray cyclic = this->cyclic(); + const VArray resolution = this->resolution(); + const Span positions = this->positions(); - Span handle_positions_left = this->handle_positions_left(); - Span handle_positions_right = this->handle_positions_right(); - Span bezier_evaluated_offsets = this->runtime->bezier_evaluated_offsets; + const Span handle_positions_left = this->handle_positions_left(); + const Span handle_positions_right = this->handle_positions_right(); + const Span all_bezier_offsets = runtime.evaluated_offsets_cache.data().all_bezier_offsets; - VArray nurbs_orders = this->nurbs_orders(); - Span nurbs_weights = this->nurbs_weights(); - - this->ensure_nurbs_basis_cache(); + const VArray nurbs_orders = this->nurbs_orders(); + const Span nurbs_weights = this->nurbs_weights(); + const Span nurbs_basis_cache = runtime.nurbs_basis_cache.data(); threading::parallel_for(this->curves_range(), 128, [&](IndexRange curves_range) { for (const int curve_index : curves_range) { - const IndexRange points = this->points_for_curve(curve_index); - const IndexRange evaluated_points = this->evaluated_points_for_curve(curve_index); + const IndexRange points = points_by_curve[curve_index]; + const IndexRange evaluated_points = evaluated_points_by_curve[curve_index]; switch (types[curve_index]) { case CURVE_TYPE_CATMULL_ROM: @@ -620,22 +647,23 @@ Span CurvesGeometry::evaluated_positions() const case CURVE_TYPE_POLY: evaluated_positions.slice(evaluated_points).copy_from(positions.slice(points)); break; - case CURVE_TYPE_BEZIER: + case CURVE_TYPE_BEZIER: { + const IndexRange offsets = curves::per_curve_point_offsets_range(points, curve_index); curves::bezier::calculate_evaluated_positions( positions.slice(points), handle_positions_left.slice(points), handle_positions_right.slice(points), - bezier_evaluated_offsets.slice(points), + all_bezier_offsets.slice(offsets), evaluated_positions.slice(evaluated_points)); break; - case CURVE_TYPE_NURBS: { - curves::nurbs::interpolate_to_evaluated(this->runtime->nurbs_basis_cache[curve_index], + } + case CURVE_TYPE_NURBS: + curves::nurbs::interpolate_to_evaluated(nurbs_basis_cache[curve_index], nurbs_orders[curve_index], nurbs_weights.slice_safe(points), positions.slice(points), evaluated_positions.slice(evaluated_points)); break; - } default: BLI_assert_unreachable(); break; @@ -643,21 +671,23 @@ Span CurvesGeometry::evaluated_positions() const } }); }); - return this->runtime->evaluated_positions_span; + return runtime.evaluated_position_cache.data().span; } Span CurvesGeometry::evaluated_tangents() const { - this->runtime->tangent_cache_mutex.ensure([&]() { + const bke::CurvesGeometryRuntime &runtime = *this->runtime; + runtime.evaluated_tangent_cache.ensure([&](Vector &r_data) { + const OffsetIndices evaluated_points_by_curve = this->evaluated_points_by_curve(); const Span evaluated_positions = this->evaluated_positions(); const VArray cyclic = this->cyclic(); - this->runtime->evaluated_tangent_cache.resize(this->evaluated_points_num()); - MutableSpan tangents = this->runtime->evaluated_tangent_cache; + r_data.resize(this->evaluated_points_num()); + MutableSpan tangents = r_data; threading::parallel_for(this->curves_range(), 128, [&](IndexRange curves_range) { for (const int curve_index : curves_range) { - const IndexRange evaluated_points = this->evaluated_points_for_curve(curve_index); + const IndexRange evaluated_points = evaluated_points_by_curve[curve_index]; curves::poly::calculate_tangents(evaluated_positions.slice(evaluated_points), cyclic[curve_index], tangents.slice(evaluated_points)); @@ -670,6 +700,7 @@ Span CurvesGeometry::evaluated_tangents() const Vector bezier_indices; const IndexMask bezier_mask = this->indices_for_curve_type(CURVE_TYPE_BEZIER, bezier_indices); if (!bezier_mask.is_empty()) { + const OffsetIndices points_by_curve = this->points_by_curve(); const Span positions = this->positions(); const Span handles_left = this->handle_positions_left(); const Span handles_right = this->handle_positions_right(); @@ -679,8 +710,8 @@ Span CurvesGeometry::evaluated_tangents() const if (cyclic[curve_index]) { continue; } - const IndexRange points = this->points_for_curve(curve_index); - const IndexRange evaluated_points = this->evaluated_points_for_curve(curve_index); + const IndexRange points = points_by_curve[curve_index]; + const IndexRange evaluated_points = evaluated_points_by_curve[curve_index]; const float epsilon = 1e-6f; if (!math::almost_equal_relative( @@ -697,7 +728,7 @@ Span CurvesGeometry::evaluated_tangents() const }); } }); - return this->runtime->evaluated_tangent_cache; + return runtime.evaluated_tangent_cache.data(); } static void rotate_directions_around_axes(MutableSpan directions, @@ -709,24 +740,75 @@ static void rotate_directions_around_axes(MutableSpan directions, } } +static void evaluate_generic_data_for_curve( + const int curve_index, + const IndexRange points, + const VArray &types, + const VArray &cyclic, + const VArray &resolution, + const Span all_bezier_evaluated_offsets, + const Span nurbs_basis_cache, + const VArray &nurbs_orders, + const Span nurbs_weights, + const GSpan src, + GMutableSpan dst) +{ + switch (types[curve_index]) { + case CURVE_TYPE_CATMULL_ROM: + curves::catmull_rom::interpolate_to_evaluated( + src, cyclic[curve_index], resolution[curve_index], dst); + break; + case CURVE_TYPE_POLY: + dst.copy_from(src); + break; + case CURVE_TYPE_BEZIER: { + const IndexRange offsets = curves::per_curve_point_offsets_range(points, curve_index); + curves::bezier::interpolate_to_evaluated( + src, all_bezier_evaluated_offsets.slice(offsets), dst); + break; + } + case CURVE_TYPE_NURBS: + curves::nurbs::interpolate_to_evaluated(nurbs_basis_cache[curve_index], + nurbs_orders[curve_index], + nurbs_weights.slice_safe(points), + src, + dst); + break; + } +} + Span CurvesGeometry::evaluated_normals() const { - this->runtime->normal_cache_mutex.ensure([&]() { - const Span evaluated_tangents = this->evaluated_tangents(); + const bke::CurvesGeometryRuntime &runtime = *this->runtime; + runtime.evaluated_normal_cache.ensure([&](Vector &r_data) { + const OffsetIndices points_by_curve = this->points_by_curve(); + const OffsetIndices evaluated_points_by_curve = this->evaluated_points_by_curve(); + const VArray types = this->curve_types(); const VArray cyclic = this->cyclic(); const VArray normal_mode = this->normal_mode(); - const VArray types = this->curve_types(); - const VArray tilt = this->tilt(); + const VArray resolution = this->resolution(); + const VArray nurbs_orders = this->nurbs_orders(); + const Span nurbs_weights = this->nurbs_weights(); + const Span all_bezier_offsets = runtime.evaluated_offsets_cache.data().all_bezier_offsets; + const Span nurbs_basis_cache = runtime.nurbs_basis_cache.data(); - this->runtime->evaluated_normal_cache.resize(this->evaluated_points_num()); - MutableSpan evaluated_normals = this->runtime->evaluated_normal_cache; + const Span evaluated_tangents = this->evaluated_tangents(); + const VArray tilt = this->tilt(); + VArraySpan tilt_span; + const bool use_tilt = !(tilt.is_single() && tilt.get_internal_single() == 0.0f); + if (use_tilt) { + tilt_span = tilt; + } + + r_data.resize(this->evaluated_points_num()); + MutableSpan evaluated_normals = r_data; threading::parallel_for(this->curves_range(), 128, [&](IndexRange curves_range) { /* Reuse a buffer for the evaluated tilts. */ Vector evaluated_tilts; for (const int curve_index : curves_range) { - const IndexRange evaluated_points = this->evaluated_points_for_curve(curve_index); + const IndexRange evaluated_points = evaluated_points_by_curve[curve_index]; switch (normal_mode[curve_index]) { case NORMAL_MODE_Z_UP: curves::poly::calculate_normals_z_up(evaluated_tangents.slice(evaluated_points), @@ -741,19 +823,26 @@ Span CurvesGeometry::evaluated_normals() const /* If the "tilt" attribute exists, rotate the normals around the tangents by the * evaluated angles. We can avoid copying the tilts to evaluate them for poly curves. */ - if (!(tilt.is_single() && tilt.get_internal_single() == 0.0f)) { - const IndexRange points = this->points_for_curve(curve_index); - Span curve_tilt = tilt.get_internal_span().slice(points); + if (use_tilt) { + const IndexRange points = points_by_curve[curve_index]; if (types[curve_index] == CURVE_TYPE_POLY) { rotate_directions_around_axes(evaluated_normals.slice(evaluated_points), evaluated_tangents.slice(evaluated_points), - curve_tilt); + tilt_span.slice(points)); } else { - evaluated_tilts.clear(); - evaluated_tilts.resize(evaluated_points.size()); - this->interpolate_to_evaluated( - curve_index, curve_tilt, evaluated_tilts.as_mutable_span()); + evaluated_tilts.reinitialize(evaluated_points.size()); + evaluate_generic_data_for_curve(curve_index, + points, + types, + cyclic, + resolution, + all_bezier_offsets, + nurbs_basis_cache, + nurbs_orders, + nurbs_weights, + tilt_span.slice(points), + evaluated_tilts.as_mutable_span()); rotate_directions_around_axes(evaluated_normals.slice(evaluated_points), evaluated_tangents.slice(evaluated_points), evaluated_tilts.as_span()); @@ -762,99 +851,81 @@ Span CurvesGeometry::evaluated_normals() const } }); }); - return this->runtime->evaluated_normal_cache; + return this->runtime->evaluated_normal_cache.data(); } void CurvesGeometry::interpolate_to_evaluated(const int curve_index, const GSpan src, GMutableSpan dst) const { - BLI_assert(this->runtime->offsets_cache_mutex.is_cached()); - BLI_assert(this->runtime->nurbs_basis_cache_mutex.is_cached()); - const IndexRange points = this->points_for_curve(curve_index); + const bke::CurvesGeometryRuntime &runtime = *this->runtime; + const OffsetIndices points_by_curve = this->points_by_curve(); + const IndexRange points = points_by_curve[curve_index]; BLI_assert(src.size() == points.size()); - BLI_assert(dst.size() == this->evaluated_points_for_curve(curve_index).size()); - switch (this->curve_types()[curve_index]) { - case CURVE_TYPE_CATMULL_ROM: - curves::catmull_rom::interpolate_to_evaluated( - src, this->cyclic()[curve_index], this->resolution()[curve_index], dst); - return; - case CURVE_TYPE_POLY: - dst.type().copy_assign_n(src.data(), dst.data(), src.size()); - return; - case CURVE_TYPE_BEZIER: - curves::bezier::interpolate_to_evaluated( - src, this->runtime->bezier_evaluated_offsets.as_span().slice(points), dst); - return; - case CURVE_TYPE_NURBS: - curves::nurbs::interpolate_to_evaluated(this->runtime->nurbs_basis_cache[curve_index], - this->nurbs_orders()[curve_index], - this->nurbs_weights().slice_safe(points), - src, - dst); - return; - } - BLI_assert_unreachable(); + BLI_assert(dst.size() == this->evaluated_points_by_curve().size(curve_index)); + evaluate_generic_data_for_curve(curve_index, + points, + this->curve_types(), + this->cyclic(), + this->resolution(), + runtime.evaluated_offsets_cache.data().all_bezier_offsets, + runtime.nurbs_basis_cache.data(), + this->nurbs_orders(), + this->nurbs_weights(), + src, + dst); } void CurvesGeometry::interpolate_to_evaluated(const GSpan src, GMutableSpan dst) const { - BLI_assert(this->runtime->offsets_cache_mutex.is_cached()); - BLI_assert(this->runtime->nurbs_basis_cache_mutex.is_cached()); + const bke::CurvesGeometryRuntime &runtime = *this->runtime; + const OffsetIndices points_by_curve = this->points_by_curve(); + const OffsetIndices evaluated_points_by_curve = this->evaluated_points_by_curve(); const VArray types = this->curve_types(); const VArray resolution = this->resolution(); const VArray cyclic = this->cyclic(); const VArray nurbs_orders = this->nurbs_orders(); const Span nurbs_weights = this->nurbs_weights(); + const Span all_bezier_offsets = runtime.evaluated_offsets_cache.data().all_bezier_offsets; + const Span nurbs_basis_cache = runtime.nurbs_basis_cache.data(); threading::parallel_for(this->curves_range(), 512, [&](IndexRange curves_range) { for (const int curve_index : curves_range) { - const IndexRange points = this->points_for_curve(curve_index); - const IndexRange evaluated_points = this->evaluated_points_for_curve(curve_index); - switch (types[curve_index]) { - case CURVE_TYPE_CATMULL_ROM: - curves::catmull_rom::interpolate_to_evaluated(src.slice(points), - cyclic[curve_index], - resolution[curve_index], - dst.slice(evaluated_points)); - continue; - case CURVE_TYPE_POLY: - dst.slice(evaluated_points).copy_from(src.slice(points)); - continue; - case CURVE_TYPE_BEZIER: - curves::bezier::interpolate_to_evaluated( - src.slice(points), - this->runtime->bezier_evaluated_offsets.as_span().slice(points), - dst.slice(evaluated_points)); - continue; - case CURVE_TYPE_NURBS: - curves::nurbs::interpolate_to_evaluated(this->runtime->nurbs_basis_cache[curve_index], - nurbs_orders[curve_index], - nurbs_weights.slice_safe(points), - src.slice(points), - dst.slice(evaluated_points)); - continue; - } + const IndexRange points = points_by_curve[curve_index]; + const IndexRange evaluated_points = evaluated_points_by_curve[curve_index]; + evaluate_generic_data_for_curve(curve_index, + points, + types, + cyclic, + resolution, + all_bezier_offsets, + nurbs_basis_cache, + nurbs_orders, + nurbs_weights, + src.slice(points), + dst.slice(evaluated_points)); } }); } void CurvesGeometry::ensure_evaluated_lengths() const { - this->runtime->length_cache_mutex.ensure([&]() { + const bke::CurvesGeometryRuntime &runtime = *this->runtime; + runtime.evaluated_length_cache.ensure([&](Vector &r_data) { /* Use an extra length value for the final cyclic segment for a consistent size * (see comment on #evaluated_length_cache). */ const int total_num = this->evaluated_points_num() + this->curves_num(); - this->runtime->evaluated_length_cache.resize(total_num); - MutableSpan evaluated_lengths = this->runtime->evaluated_length_cache; + r_data.resize(total_num); + MutableSpan evaluated_lengths = r_data; - Span evaluated_positions = this->evaluated_positions(); - VArray curves_cyclic = this->cyclic(); + const OffsetIndices evaluated_points_by_curve = this->evaluated_points_by_curve(); + const Span evaluated_positions = this->evaluated_positions(); + const VArray curves_cyclic = this->cyclic(); threading::parallel_for(this->curves_range(), 128, [&](IndexRange curves_range) { for (const int curve_index : curves_range) { const bool cyclic = curves_cyclic[curve_index]; - const IndexRange evaluated_points = this->evaluated_points_for_curve(curve_index); + const IndexRange evaluated_points = evaluated_points_by_curve[curve_index]; const IndexRange lengths_range = this->lengths_range_for_curve(curve_index, cyclic); length_parameterize::accumulate_lengths(evaluated_positions.slice(evaluated_points), cyclic, @@ -866,7 +937,7 @@ void CurvesGeometry::ensure_evaluated_lengths() const void CurvesGeometry::ensure_can_interpolate_to_evaluated() const { - this->ensure_evaluated_offsets(); + this->evaluated_points_by_curve(); this->ensure_nurbs_basis_cache(); } @@ -892,21 +963,21 @@ void CurvesGeometry::resize(const int points_num, const int curves_num) void CurvesGeometry::tag_positions_changed() { - this->runtime->position_cache_mutex.tag_dirty(); - this->runtime->tangent_cache_mutex.tag_dirty(); - this->runtime->normal_cache_mutex.tag_dirty(); - this->runtime->length_cache_mutex.tag_dirty(); + this->runtime->evaluated_position_cache.tag_dirty(); + this->runtime->evaluated_tangent_cache.tag_dirty(); + this->runtime->evaluated_normal_cache.tag_dirty(); + this->runtime->evaluated_length_cache.tag_dirty(); this->runtime->bounds_cache.tag_dirty(); } void CurvesGeometry::tag_topology_changed() { this->tag_positions_changed(); - this->runtime->offsets_cache_mutex.tag_dirty(); - this->runtime->nurbs_basis_cache_mutex.tag_dirty(); + this->runtime->evaluated_offsets_cache.tag_dirty(); + this->runtime->nurbs_basis_cache.tag_dirty(); } void CurvesGeometry::tag_normals_changed() { - this->runtime->normal_cache_mutex.tag_dirty(); + this->runtime->evaluated_normal_cache.tag_dirty(); } void CurvesGeometry::tag_radii_changed() { @@ -939,6 +1010,7 @@ void CurvesGeometry::calculate_bezier_auto_handles() if (this->handle_positions_left().is_empty() || this->handle_positions_right().is_empty()) { return; } + const OffsetIndices points_by_curve = this->points_by_curve(); const VArray types = this->curve_types(); const VArray cyclic = this->cyclic(); const VArraySpan types_left{this->handle_types_left()}; @@ -950,7 +1022,7 @@ void CurvesGeometry::calculate_bezier_auto_handles() threading::parallel_for(this->curves_range(), 128, [&](IndexRange range) { for (const int i_curve : range) { if (types[i_curve] == CURVE_TYPE_BEZIER) { - const IndexRange points = this->points_for_curve(i_curve); + const IndexRange points = points_by_curve[i_curve]; curves::bezier::calculate_auto_handles(cyclic[i_curve], types_left.slice(points), types_right.slice(points), @@ -1032,27 +1104,13 @@ static void copy_with_map(const GSpan src, const Span map, GMutableSpan dst }); } -/** - * Builds an array that for every point, contains the corresponding curve index. - */ -static Array build_point_to_curve_map(const CurvesGeometry &curves) -{ - Array point_to_curve_map(curves.points_num()); - threading::parallel_for(curves.curves_range(), 1024, [&](const IndexRange curves_range) { - for (const int i_curve : curves_range) { - point_to_curve_map.as_mutable_span().slice(curves.points_for_curve(i_curve)).fill(i_curve); - } - }); - return point_to_curve_map; -} - static CurvesGeometry copy_with_removed_points( const CurvesGeometry &curves, const IndexMask points_to_delete, const AnonymousAttributePropagationInfo &propagation_info) { /* Use a map from points to curves to facilitate using an #IndexMask input. */ - const Array point_to_curve_map = build_point_to_curve_map(curves); + const Array point_to_curve_map = curves.point_to_curve_map(); const Vector copy_point_ranges = points_to_delete.extract_ranges_invert( curves.points_range()); @@ -1168,6 +1226,7 @@ static CurvesGeometry copy_with_removed_curves( const IndexMask curves_to_delete, const AnonymousAttributePropagationInfo &propagation_info) { + const OffsetIndices old_points_by_curve = curves.points_by_curve(); const Span old_offsets = curves.offsets(); const Vector old_curve_ranges = curves_to_delete.extract_ranges_invert( curves.curves_range(), nullptr); @@ -1180,7 +1239,7 @@ static CurvesGeometry copy_with_removed_curves( new_curve_ranges.append(IndexRange(new_tot_curves, curve_range.size())); new_tot_curves += curve_range.size(); - const IndexRange old_point_range = curves.points_for_curves(curve_range); + const IndexRange old_point_range = old_points_by_curve[curve_range]; old_point_ranges.append(old_point_range); new_point_ranges.append(IndexRange(new_tot_points, old_point_range.size())); new_tot_points += old_point_range.size(); @@ -1285,9 +1344,10 @@ static void reverse_curve_point_data(const CurvesGeometry &curves, const IndexMask curve_selection, MutableSpan data) { + const OffsetIndices points_by_curve = curves.points_by_curve(); threading::parallel_for(curve_selection.index_range(), 256, [&](IndexRange range) { for (const int curve_i : curve_selection.slice(range)) { - data.slice(curves.points_for_curve(curve_i)).reverse(); + data.slice(points_by_curve[curve_i]).reverse(); } }); } @@ -1298,9 +1358,10 @@ static void reverse_swap_curve_point_data(const CurvesGeometry &curves, MutableSpan data_a, MutableSpan data_b) { + const OffsetIndices points_by_curve = curves.points_by_curve(); threading::parallel_for(curve_selection.index_range(), 256, [&](IndexRange range) { for (const int curve_i : curve_selection.slice(range)) { - const IndexRange points = curves.points_for_curve(curve_i); + const IndexRange points = points_by_curve[curve_i]; MutableSpan a = data_a.slice(points); MutableSpan b = data_b.slice(points); for (const int i : IndexRange(points.size() / 2)) { @@ -1406,9 +1467,10 @@ static void adapt_curve_domain_point_to_curve_impl(const CurvesGeometry &curves, { attribute_math::DefaultMixer mixer(r_values); + const OffsetIndices points_by_curve = curves.points_by_curve(); threading::parallel_for(curves.curves_range(), 128, [&](const IndexRange range) { for (const int i_curve : range) { - for (const int i_point : curves.points_for_curve(i_curve)) { + for (const int i_point : points_by_curve[i_curve]) { mixer.mix_in(i_curve, old_values[i_point]); } } @@ -1428,9 +1490,10 @@ void adapt_curve_domain_point_to_curve_impl(const CurvesGeometry &curves, const VArray &old_values, MutableSpan r_values) { + const OffsetIndices points_by_curve = curves.points_by_curve(); r_values.fill(true); for (const int i_curve : IndexRange(curves.curves_num())) { - for (const int i_point : curves.points_for_curve(i_curve)) { + for (const int i_point : points_by_curve[i_curve]) { if (!old_values[i_point]) { r_values[i_curve] = false; break; @@ -1466,8 +1529,9 @@ static void adapt_curve_domain_curve_to_point_impl(const CurvesGeometry &curves, const VArray &old_values, MutableSpan r_values) { + const OffsetIndices points_by_curve = curves.points_by_curve(); for (const int i_curve : IndexRange(curves.curves_num())) { - r_values.slice(curves.points_for_curve(i_curve)).fill(old_values[i_curve]); + r_values.slice(points_by_curve[i_curve]).fill(old_values[i_curve]); } } diff --git a/source/blender/blenkernel/intern/curves_utils.cc b/source/blender/blenkernel/intern/curves_utils.cc index f5a69a995a3..55f41e412a6 100644 --- a/source/blender/blenkernel/intern/curves_utils.cc +++ b/source/blender/blenkernel/intern/curves_utils.cc @@ -10,66 +10,65 @@ namespace blender::bke::curves { -void fill_curve_counts(const bke::CurvesGeometry &curves, - const Span curve_ranges, - MutableSpan counts) +void copy_curve_sizes(const OffsetIndices points_by_curve, + const IndexMask mask, + MutableSpan sizes) +{ + threading::parallel_for(mask.index_range(), 4096, [&](IndexRange ranges_range) { + for (const int64_t i : mask.slice(ranges_range)) { + sizes[i] = points_by_curve.size(i); + } + }); +} + +void copy_curve_sizes(const OffsetIndices points_by_curve, + const Span curve_ranges, + MutableSpan sizes) { threading::parallel_for(curve_ranges.index_range(), 512, [&](IndexRange ranges_range) { for (const IndexRange curves_range : curve_ranges.slice(ranges_range)) { threading::parallel_for(curves_range, 4096, [&](IndexRange range) { for (const int i : range) { - counts[i] = curves.points_for_curve(i).size(); + sizes[i] = points_by_curve.size(i); } }); } }); } -void accumulate_counts_to_offsets(MutableSpan counts_to_offsets, const int start_offset) -{ - int offset = start_offset; - for (const int i : counts_to_offsets.index_range().drop_back(1)) { - const int count = counts_to_offsets[i]; - BLI_assert(count > 0); - counts_to_offsets[i] = offset; - offset += count; - } - counts_to_offsets.last() = offset; -} - -void copy_point_data(const CurvesGeometry &src_curves, - const CurvesGeometry &dst_curves, +void copy_point_data(const OffsetIndices src_points_by_curve, + const OffsetIndices dst_points_by_curve, const Span curve_ranges, const GSpan src, GMutableSpan dst) { threading::parallel_for(curve_ranges.index_range(), 512, [&](IndexRange range) { for (const IndexRange range : curve_ranges.slice(range)) { - const IndexRange src_points = src_curves.points_for_curves(range); - const IndexRange dst_points = dst_curves.points_for_curves(range); + const IndexRange src_points = src_points_by_curve[range]; + const IndexRange dst_points = dst_points_by_curve[range]; /* The arrays might be large, so a threaded copy might make sense here too. */ dst.slice(dst_points).copy_from(src.slice(src_points)); } }); } -void copy_point_data(const CurvesGeometry &src_curves, - const CurvesGeometry &dst_curves, +void copy_point_data(const OffsetIndices src_points_by_curve, + const OffsetIndices dst_points_by_curve, const IndexMask src_curve_selection, const GSpan src, GMutableSpan dst) { threading::parallel_for(src_curve_selection.index_range(), 512, [&](IndexRange range) { for (const int i : src_curve_selection.slice(range)) { - const IndexRange src_points = src_curves.points_for_curve(i); - const IndexRange dst_points = dst_curves.points_for_curve(i); + const IndexRange src_points = src_points_by_curve[i]; + const IndexRange dst_points = dst_points_by_curve[i]; /* The arrays might be large, so a threaded copy might make sense here too. */ dst.slice(dst_points).copy_from(src.slice(src_points)); } }); } -void fill_points(const CurvesGeometry &curves, +void fill_points(const OffsetIndices points_by_curve, const IndexMask curve_selection, const GPointer value, GMutableSpan dst) @@ -78,13 +77,13 @@ void fill_points(const CurvesGeometry &curves, const CPPType &type = dst.type(); threading::parallel_for(curve_selection.index_range(), 512, [&](IndexRange range) { for (const int i : curve_selection.slice(range)) { - const IndexRange points = curves.points_for_curve(i); - type.fill_assign_n(value.get(), dst.slice(curves.points_for_curve(i)).data(), points.size()); + const IndexRange points = points_by_curve[i]; + type.fill_assign_n(value.get(), dst.slice(points).data(), points.size()); } }); } -void fill_points(const CurvesGeometry &curves, +void fill_points(const OffsetIndices points_by_curve, Span curve_ranges, GPointer value, GMutableSpan dst) @@ -93,7 +92,7 @@ void fill_points(const CurvesGeometry &curves, const CPPType &type = dst.type(); threading::parallel_for(curve_ranges.index_range(), 512, [&](IndexRange range) { for (const IndexRange range : curve_ranges.slice(range)) { - const IndexRange points = curves.points_for_curves(range); + const IndexRange points = points_by_curve[range]; type.fill_assign_n(value.get(), dst.slice(points).data(), points.size()); } }); diff --git a/source/blender/blenkernel/intern/customdata.cc b/source/blender/blenkernel/intern/customdata.cc index 45bb3874ede..1b20cef4224 100644 --- a/source/blender/blenkernel/intern/customdata.cc +++ b/source/blender/blenkernel/intern/customdata.cc @@ -271,8 +271,8 @@ static void layerInterp_mdeformvert(const void **sources, const int count, void *dest) { - /* a single linked list of MDeformWeight's - * use this to avoid double allocs (which LinkNode would do) */ + /* A single linked list of #MDeformWeight's. + * use this to avoid double allocations (which #LinkNode would do). */ struct MDeformWeight_Link { struct MDeformWeight_Link *next; MDeformWeight dw; @@ -2197,80 +2197,81 @@ static const char *LAYERTYPENAMES[CD_NUMTYPES] = {/* 0-4 */ "CDPropInt16"}; const CustomData_MeshMasks CD_MASK_BAREMESH = { - /* vmask */ CD_MASK_PROP_FLOAT3 | CD_MASK_MESH_ID, - /* emask */ CD_MASK_MEDGE | CD_MASK_MESH_ID, - /* fmask */ 0, - /* pmask */ CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_MESH_ID, - /* lmask */ CD_MASK_MLOOP | CD_MASK_MESH_ID, + /*vmask*/ CD_MASK_PROP_FLOAT3 | CD_MASK_MESH_ID, + /*emask*/ CD_MASK_MEDGE | CD_MASK_MESH_ID, + /*fmask*/ 0, + /*pmask*/ CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_MESH_ID, + /*lmask*/ CD_MASK_MLOOP | CD_MASK_MESH_ID, }; const CustomData_MeshMasks CD_MASK_BAREMESH_ORIGINDEX = { - /* vmask */ CD_MASK_PROP_FLOAT3 | CD_MASK_ORIGINDEX | CD_MASK_MESH_ID, - /* emask */ CD_MASK_MEDGE | CD_MASK_ORIGINDEX | CD_MASK_MESH_ID, - /* fmask */ 0, - /* pmask */ CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_ORIGINDEX | CD_MASK_MESH_ID, - /* lmask */ CD_MASK_MLOOP | CD_MASK_MESH_ID, + /*vmask*/ CD_MASK_PROP_FLOAT3 | CD_MASK_ORIGINDEX | CD_MASK_MESH_ID, + /*emask*/ CD_MASK_MEDGE | CD_MASK_ORIGINDEX | CD_MASK_MESH_ID, + /*fmask*/ 0, + /*pmask*/ CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_ORIGINDEX | CD_MASK_MESH_ID, + /*lmask*/ CD_MASK_MLOOP | CD_MASK_MESH_ID, }; const CustomData_MeshMasks CD_MASK_MESH = { - /* vmask */ (CD_MASK_PROP_FLOAT3 | CD_MASK_MDEFORMVERT | CD_MASK_MVERT_SKIN | - CD_MASK_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_CREASE | CD_MASK_BWEIGHT | - CD_MASK_MESH_ID), - /* emask */ + /*vmask*/ (CD_MASK_PROP_FLOAT3 | CD_MASK_MDEFORMVERT | CD_MASK_MVERT_SKIN | + CD_MASK_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_CREASE | CD_MASK_BWEIGHT | + CD_MASK_MESH_ID), + /*emask*/ (CD_MASK_MEDGE | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL | CD_MASK_BWEIGHT | CD_MASK_CREASE | CD_MASK_MESH_ID), - /* fmask */ 0, - /* pmask */ + /*fmask*/ 0, + /*pmask*/ (CD_MASK_MPOLY | CD_MASK_FACEMAP | CD_MASK_FREESTYLE_FACE | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), - /* lmask */ + /*lmask*/ (CD_MASK_MLOOP | CD_MASK_MDISPS | CD_MASK_CUSTOMLOOPNORMAL | CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), }; const CustomData_MeshMasks CD_MASK_DERIVEDMESH = { - /* vmask */ (CD_MASK_ORIGINDEX | CD_MASK_MDEFORMVERT | CD_MASK_SHAPEKEY | CD_MASK_MVERT_SKIN | - CD_MASK_PAINT_MASK | CD_MASK_ORCO | CD_MASK_CLOTH_ORCO | CD_MASK_PROP_ALL | - CD_MASK_CREASE | CD_MASK_BWEIGHT | CD_MASK_MESH_ID), - /* emask */ + /*vmask*/ (CD_MASK_ORIGINDEX | CD_MASK_MDEFORMVERT | CD_MASK_SHAPEKEY | CD_MASK_MVERT_SKIN | + CD_MASK_PAINT_MASK | CD_MASK_ORCO | CD_MASK_CLOTH_ORCO | CD_MASK_PROP_ALL | + CD_MASK_CREASE | CD_MASK_BWEIGHT | CD_MASK_MESH_ID), + /*emask*/ (CD_MASK_ORIGINDEX | CD_MASK_FREESTYLE_EDGE | CD_MASK_BWEIGHT | CD_MASK_PROP_ALL | CD_MASK_CREASE | CD_MASK_MESH_ID), - /* fmask */ (CD_MASK_ORIGINDEX | CD_MASK_ORIGSPACE | CD_MASK_PREVIEW_MCOL | CD_MASK_TANGENT), - /* pmask */ + /*fmask*/ (CD_MASK_ORIGINDEX | CD_MASK_ORIGSPACE | CD_MASK_PREVIEW_MCOL | CD_MASK_TANGENT), + /*pmask*/ (CD_MASK_ORIGINDEX | CD_MASK_FREESTYLE_FACE | CD_MASK_FACEMAP | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), - /* lmask */ + /*lmask*/ (CD_MASK_CUSTOMLOOPNORMAL | CD_MASK_PREVIEW_MLOOPCOL | CD_MASK_ORIGSPACE_MLOOP | - CD_MASK_PROP_ALL | CD_MASK_MESH_ID), /* XXX MISSING CD_MASK_MLOOPTANGENT ? */ + CD_MASK_PROP_ALL | CD_MASK_MESH_ID), /* XXX: MISSING #CD_MASK_MLOOPTANGENT ? */ }; const CustomData_MeshMasks CD_MASK_BMESH = { - /* vmask */ (CD_MASK_MDEFORMVERT | CD_MASK_BWEIGHT | CD_MASK_MVERT_SKIN | CD_MASK_SHAPEKEY | - CD_MASK_SHAPE_KEYINDEX | CD_MASK_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_CREASE | - CD_MASK_MESH_ID | CD_MASK_DYNTOPO_VERT), - /* emask */ + /*vmask*/ (CD_MASK_MDEFORMVERT | CD_MASK_BWEIGHT | CD_MASK_MVERT_SKIN | CD_MASK_SHAPEKEY | + CD_MASK_SHAPE_KEYINDEX | CD_MASK_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_CREASE | + CD_MASK_MESH_ID), + /*emask*/ (CD_MASK_BWEIGHT | CD_MASK_CREASE | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), - /* fmask */ 0, - /* pmask */ + /*fmask*/ 0, + /*pmask*/ (CD_MASK_FREESTYLE_FACE | CD_MASK_FACEMAP | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), - /* lmask */ + /*lmask*/ (CD_MASK_MDISPS | CD_MASK_CUSTOMLOOPNORMAL | CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), }; const CustomData_MeshMasks CD_MASK_EVERYTHING = { - /* vmask */ (CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_MDEFORMVERT | - CD_MASK_BWEIGHT | CD_MASK_MVERT_SKIN | CD_MASK_ORCO | CD_MASK_CLOTH_ORCO | - CD_MASK_SHAPEKEY | CD_MASK_SHAPE_KEYINDEX | CD_MASK_PAINT_MASK | - CD_MASK_PROP_ALL | CD_MASK_CREASE | CD_MASK_MESH_ID | CD_MASK_DYNTOPO_VERT), - /* emask */ + /*vmask*/ (CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_MDEFORMVERT | CD_MASK_BWEIGHT | + CD_MASK_MVERT_SKIN | CD_MASK_ORCO | CD_MASK_CLOTH_ORCO | CD_MASK_SHAPEKEY | + CD_MASK_SHAPE_KEYINDEX | CD_MASK_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_CREASE | + CD_MASK_MESH_ID), + /*emask*/ (CD_MASK_MEDGE | CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_BWEIGHT | CD_MASK_CREASE | CD_MASK_FREESTYLE_EDGE | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), - /* fmask */ + /*fmask*/ (CD_MASK_MFACE | CD_MASK_ORIGINDEX | CD_MASK_NORMAL | CD_MASK_MTFACE | CD_MASK_MCOL | CD_MASK_ORIGSPACE | CD_MASK_TANGENT | CD_MASK_TESSLOOPNORMAL | CD_MASK_PREVIEW_MCOL | - CD_MASK_PROP_ALL), - /* pmask */ + CD_MASK_PROP_ALL | CD_MASK_MESH_ID), + /*pmask*/ (CD_MASK_MPOLY | CD_MASK_BM_ELEM_PYPTR | CD_MASK_ORIGINDEX | CD_MASK_FACEMAP | + CD_MASK_FREESTYLE_FACE | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), - /* lmask */ + /*lmask*/ (CD_MASK_MLOOP | CD_MASK_BM_ELEM_PYPTR | CD_MASK_MDISPS | CD_MASK_NORMAL | CD_MASK_CUSTOMLOOPNORMAL | CD_MASK_MLOOPTANGENT | CD_MASK_PREVIEW_MLOOPCOL | CD_MASK_ORIGSPACE_MLOOP | CD_MASK_GRID_PAINT_MASK | CD_MASK_PROP_ALL | CD_MASK_MESH_ID), @@ -6096,7 +6097,6 @@ const blender::CPPType *custom_data_type_to_cpp_type(const eCustomDataType type) default: return nullptr; } - return nullptr; } eCustomDataType cpp_type_to_custom_data_type(const blender::CPPType &type) diff --git a/source/blender/blenkernel/intern/data_transfer.cc b/source/blender/blenkernel/intern/data_transfer.cc index 39d6cde31e7..740af7fa4db 100644 --- a/source/blender/blenkernel/intern/data_transfer.cc +++ b/source/blender/blenkernel/intern/data_transfer.cc @@ -237,9 +237,11 @@ int BKE_object_data_transfer_dttype_to_srcdst_index(const int dtdata_type) return DT_MULTILAYER_INDEX_UV; case DT_TYPE_MPROPCOL_VERT: case DT_TYPE_MLOOPCOL_VERT: + case DT_TYPE_MPROPCOL_VERT | DT_TYPE_MLOOPCOL_VERT: return DT_MULTILAYER_INDEX_VCOL_VERT; case DT_TYPE_MPROPCOL_LOOP: case DT_TYPE_MLOOPCOL_LOOP: + case DT_TYPE_MPROPCOL_LOOP | DT_TYPE_MLOOPCOL_LOOP: return DT_MULTILAYER_INDEX_VCOL_LOOP; default: return DT_MULTILAYER_INDEX_INVALID; diff --git a/source/blender/blenkernel/intern/deform.c b/source/blender/blenkernel/intern/deform.cc similarity index 88% rename from source/blender/blenkernel/intern/deform.c rename to source/blender/blenkernel/intern/deform.cc index 8bc98270054..dda4d098126 100644 --- a/source/blender/blenkernel/intern/deform.c +++ b/source/blender/blenkernel/intern/deform.cc @@ -5,11 +5,11 @@ * \ingroup bke */ -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include "MEM_guardedalloc.h" @@ -46,7 +46,7 @@ bDeformGroup *BKE_object_defgroup_new(Object *ob, const char *name) BLI_assert(OB_TYPE_SUPPORT_VGROUP(ob->type)); - defgroup = MEM_callocN(sizeof(bDeformGroup), __func__); + defgroup = MEM_cnew(__func__); BLI_strncpy(defgroup->name, name, sizeof(defgroup->name)); @@ -62,31 +62,26 @@ bDeformGroup *BKE_object_defgroup_new(Object *ob, const char *name) void BKE_defgroup_copy_list(ListBase *outbase, const ListBase *inbase) { - bDeformGroup *defgroup, *defgroupn; - BLI_listbase_clear(outbase); - - for (defgroup = inbase->first; defgroup; defgroup = defgroup->next) { - defgroupn = BKE_defgroup_duplicate(defgroup); + LISTBASE_FOREACH (const bDeformGroup *, defgroup, inbase) { + bDeformGroup *defgroupn = BKE_defgroup_duplicate(defgroup); BLI_addtail(outbase, defgroupn); } } bDeformGroup *BKE_defgroup_duplicate(const bDeformGroup *ingroup) { - bDeformGroup *outgroup; - if (!ingroup) { BLI_assert(0); - return NULL; + return nullptr; } - outgroup = MEM_callocN(sizeof(bDeformGroup), "copy deformGroup"); + bDeformGroup *outgroup = MEM_cnew(__func__); /* For now, just copy everything over. */ memcpy(outgroup, ingroup, sizeof(bDeformGroup)); - outgroup->next = outgroup->prev = NULL; + outgroup->next = outgroup->prev = nullptr; return outgroup; } @@ -132,10 +127,10 @@ void BKE_defvert_copy(MDeformVert *dvert_dst, const MDeformVert *dvert_src) } if (dvert_src->totweight) { - dvert_dst->dw = MEM_dupallocN(dvert_src->dw); + dvert_dst->dw = static_cast(MEM_dupallocN(dvert_src->dw)); } else { - dvert_dst->dw = NULL; + dvert_dst->dw = nullptr; } dvert_dst->totweight = dvert_src->totweight; @@ -157,7 +152,7 @@ void BKE_defvert_copy_index(MDeformVert *dvert_dst, dw_dst->weight = dw_src->weight; } else { - /* Source was NULL, assign zero (could also remove). */ + /* Source was nullptr, assign zero (could also remove). */ dw_dst = BKE_defvert_find_index(dvert_dst, defgroup_dst); if (dw_dst) { @@ -307,7 +302,7 @@ void BKE_defvert_normalize_lock_single(MDeformVert *dvert, } } else { - MDeformWeight *dw_lock = NULL; + MDeformWeight *dw_lock = nullptr; MDeformWeight *dw; uint i; float tot_weight = 0.0f; @@ -423,7 +418,7 @@ void BKE_defvert_flip_merged(MDeformVert *dvert, const int *flip_map, const int for (dw = dvert->dw, i = 0; i < totweight; dw++, i++) { if (dw->def_nr < flip_map_num) { if (flip_map[dw->def_nr] >= 0) { - /* error checkers complain of this but we'll never get NULL return */ + /* error checkers complain of this but we'll never get nullptr return */ dw_cpy = BKE_defvert_ensure_index(dvert, flip_map[dw->def_nr]); dw = &dvert->dw[i]; /* in case array got realloced */ @@ -441,7 +436,7 @@ void BKE_defvert_flip_merged(MDeformVert *dvert, const int *flip_map, const int bool BKE_object_supports_vertex_groups(const Object *ob) { const ID *id = (const ID *)ob->data; - if (id == NULL) { + if (id == nullptr) { return false; } @@ -467,7 +462,7 @@ const ListBase *BKE_id_defgroup_list_get(const ID *id) BLI_assert_unreachable(); } } - return NULL; + return nullptr; } static const int *object_defgroup_active_index_get_p(const Object *ob) @@ -487,7 +482,7 @@ static const int *object_defgroup_active_index_get_p(const Object *ob) return &gpd->vertex_group_active_index; } } - return NULL; + return nullptr; } ListBase *BKE_id_defgroup_list_get_mutable(ID *id) @@ -498,38 +493,38 @@ ListBase *BKE_id_defgroup_list_get_mutable(ID *id) bDeformGroup *BKE_object_defgroup_find_name(const Object *ob, const char *name) { - if (name == NULL || name[0] == '\0') { - return NULL; + if (name == nullptr || name[0] == '\0') { + return nullptr; } const ListBase *defbase = BKE_object_defgroup_list(ob); - return BLI_findstring(defbase, name, offsetof(bDeformGroup, name)); + return static_cast(BLI_findstring(defbase, name, offsetof(bDeformGroup, name))); } int BKE_id_defgroup_name_index(const ID *id, const char *name) { int index; - if (!BKE_id_defgroup_name_find(id, name, &index, NULL)) { + if (!BKE_id_defgroup_name_find(id, name, &index, nullptr)) { return -1; } return index; } -bool BKE_id_defgroup_name_find(const struct ID *id, +bool BKE_id_defgroup_name_find(const ID *id, const char *name, int *r_index, - struct bDeformGroup **r_group) + bDeformGroup **r_group) { - if (name == NULL || name[0] == '\0') { + if (name == nullptr || name[0] == '\0') { return false; } const ListBase *defbase = BKE_id_defgroup_list_get(id); int index; LISTBASE_FOREACH_INDEX (bDeformGroup *, group, defbase, index) { if (STREQ(name, group->name)) { - if (r_index != NULL) { + if (r_index != nullptr) { *r_index = index; } - if (r_group != NULL) { + if (r_group != nullptr) { *r_group = group; } return true; @@ -582,19 +577,19 @@ static int *object_defgroup_unlocked_flip_map_ex(const Object *ob, *r_flip_map_num = defbase_num; if (defbase_num == 0) { - return NULL; + return nullptr; } bDeformGroup *dg; char name_flip[sizeof(dg->name)]; int i, flip_num; - int *map = MEM_mallocN(defbase_num * sizeof(int), __func__); + int *map = static_cast(MEM_mallocN(defbase_num * sizeof(int), __func__)); for (i = 0; i < defbase_num; i++) { map[i] = -1; } - for (dg = defbase->first, i = 0; dg; dg = dg->next, i++) { + for (dg = static_cast(defbase->first), i = 0; dg; dg = dg->next, i++) { if (map[i] == -1) { /* may be calculated previously */ /* in case no valid value is found, use this */ @@ -642,18 +637,17 @@ int *BKE_object_defgroup_flip_map_single(const Object *ob, *r_flip_map_num = defbase_num; if (defbase_num == 0) { - return NULL; + return nullptr; } - bDeformGroup *dg; - char name_flip[sizeof(dg->name)]; - int i, flip_num, *map = MEM_mallocN(defbase_num * sizeof(int), __func__); + char name_flip[sizeof(bDeformGroup::name)]; + int i, flip_num, *map = static_cast(MEM_mallocN(defbase_num * sizeof(int), __func__)); for (i = 0; i < defbase_num; i++) { map[i] = use_default ? i : -1; } - dg = BLI_findlink(defbase, defgroup); + bDeformGroup *dg = static_cast(BLI_findlink(defbase, defgroup)); BLI_string_flip_side_name(name_flip, dg->name, false, sizeof(name_flip)); if (!STREQ(name_flip, dg->name)) { @@ -671,7 +665,7 @@ int *BKE_object_defgroup_flip_map_single(const Object *ob, int BKE_object_defgroup_flip_index(const Object *ob, int index, const bool use_default) { const ListBase *defbase = BKE_object_defgroup_list(ob); - bDeformGroup *dg = BLI_findlink(defbase, index); + bDeformGroup *dg = static_cast(BLI_findlink(defbase, index)); int flip_index = -1; if (dg) { @@ -691,7 +685,7 @@ static bool defgroup_find_name_dupe(const char *name, bDeformGroup *dg, Object * const ListBase *defbase = BKE_object_defgroup_list(ob); bDeformGroup *curdef; - for (curdef = defbase->first; curdef; curdef = curdef->next) { + for (curdef = static_cast(defbase->first); curdef; curdef = curdef->next) { if (dg != curdef) { if (STREQ(curdef->name, name)) { return true; @@ -702,46 +696,42 @@ static bool defgroup_find_name_dupe(const char *name, bDeformGroup *dg, Object * return false; } +struct DeformGroupUniqueNameData { + Object *ob; + bDeformGroup *dg; +}; + static bool defgroup_unique_check(void *arg, const char *name) { - struct { - Object *ob; - void *dg; - } *data = arg; + DeformGroupUniqueNameData *data = static_cast(arg); return defgroup_find_name_dupe(name, data->dg, data->ob); } void BKE_object_defgroup_unique_name(bDeformGroup *dg, Object *ob) { - struct { - Object *ob; - void *dg; - } data; - data.ob = ob; - data.dg = dg; - + DeformGroupUniqueNameData data{ob, dg}; BLI_uniquename_cb(defgroup_unique_check, &data, DATA_("Group"), '.', dg->name, sizeof(dg->name)); } -float BKE_defvert_find_weight(const struct MDeformVert *dvert, const int defgroup) +float BKE_defvert_find_weight(const MDeformVert *dvert, const int defgroup) { MDeformWeight *dw = BKE_defvert_find_index(dvert, defgroup); return dw ? dw->weight : 0.0f; } -float BKE_defvert_array_find_weight_safe(const struct MDeformVert *dvert, +float BKE_defvert_array_find_weight_safe(const MDeformVert *dvert, const int index, const int defgroup) { /* Invalid defgroup index means the vgroup selected is invalid, * does not exist, in that case it is OK to return 1.0 * (i.e. maximum weight, as if no vgroup was selected). - * But in case of valid defgroup and NULL dvert data pointer, it means that vgroup **is** valid, - * and just totally empty, so we shall return '0.0' value then! */ + * But in case of valid defgroup and nullptr dvert data pointer, it means that vgroup **is** + * valid, and just totally empty, so we shall return '0.0' value then! */ if (defgroup == -1) { return 1.0f; } - if (dvert == NULL) { + if (dvert == nullptr) { return 0.0f; } @@ -764,7 +754,7 @@ MDeformWeight *BKE_defvert_find_index(const MDeformVert *dvert, const int defgro BLI_assert(0); } - return NULL; + return nullptr; } MDeformWeight *BKE_defvert_ensure_index(MDeformVert *dvert, const int defgroup) @@ -774,7 +764,7 @@ MDeformWeight *BKE_defvert_ensure_index(MDeformVert *dvert, const int defgroup) /* do this check always, this function is used to check for it */ if (!dvert || defgroup < 0) { BLI_assert(0); - return NULL; + return nullptr; } dw_new = BKE_defvert_find_index(dvert, defgroup); @@ -782,7 +772,8 @@ MDeformWeight *BKE_defvert_ensure_index(MDeformVert *dvert, const int defgroup) return dw_new; } - dw_new = MEM_mallocN(sizeof(MDeformWeight) * (dvert->totweight + 1), "deformWeight"); + dw_new = static_cast( + MEM_mallocN(sizeof(MDeformWeight) * (dvert->totweight + 1), __func__)); if (dvert->dw) { memcpy(dw_new, dvert->dw, sizeof(MDeformWeight) * dvert->totweight); MEM_freeN(dvert->dw); @@ -810,8 +801,8 @@ void BKE_defvert_add_index_notest(MDeformVert *dvert, const int defgroup, const return; } - dw_new = MEM_callocN(sizeof(MDeformWeight) * (dvert->totweight + 1), - "defvert_add_to group, new deformWeight"); + dw_new = static_cast( + MEM_callocN(sizeof(MDeformWeight) * (dvert->totweight + 1), __func__)); if (dvert->dw) { memcpy(dw_new, dvert->dw, sizeof(MDeformWeight) * dvert->totweight); MEM_freeN(dvert->dw); @@ -838,18 +829,19 @@ void BKE_defvert_remove_group(MDeformVert *dvert, MDeformWeight *dw) * this deform weight, and reshuffle the others. */ if (dvert->totweight) { - BLI_assert(dvert->dw != NULL); + BLI_assert(dvert->dw != nullptr); if (i != dvert->totweight) { dvert->dw[i] = dvert->dw[dvert->totweight]; } - dvert->dw = MEM_reallocN(dvert->dw, sizeof(MDeformWeight) * dvert->totweight); + dvert->dw = static_cast( + MEM_reallocN(dvert->dw, sizeof(MDeformWeight) * dvert->totweight)); } else { /* If there are no other deform weights left then just remove this one. */ MEM_freeN(dvert->dw); - dvert->dw = NULL; + dvert->dw = nullptr; } } } @@ -877,7 +869,7 @@ int BKE_defvert_find_shared(const MDeformVert *dvert_a, const MDeformVert *dvert return -1; } -bool BKE_defvert_is_weight_zero(const struct MDeformVert *dvert, const int defgroup_tot) +bool BKE_defvert_is_weight_zero(const MDeformVert *dvert, const int defgroup_tot) { MDeformWeight *dw = dvert->dw; for (int i = dvert->totweight; i != 0; i--, dw++) { @@ -891,14 +883,14 @@ bool BKE_defvert_is_weight_zero(const struct MDeformVert *dvert, const int defgr return true; } -float BKE_defvert_total_selected_weight(const struct MDeformVert *dv, +float BKE_defvert_total_selected_weight(const MDeformVert *dv, int defbase_num, const bool *defbase_sel) { float total = 0.0f; const MDeformWeight *dw = dv->dw; - if (defbase_sel == NULL) { + if (defbase_sel == nullptr) { return total; } @@ -913,7 +905,7 @@ float BKE_defvert_total_selected_weight(const struct MDeformVert *dv, return total; } -float BKE_defvert_multipaint_collective_weight(const struct MDeformVert *dv, +float BKE_defvert_multipaint_collective_weight(const MDeformVert *dv, const int defbase_num, const bool *defbase_sel, const int defbase_sel_num, @@ -959,7 +951,7 @@ float BKE_defvert_calc_lock_relative_weight(float weight, } float BKE_defvert_lock_relative_weight(const float weight, - const struct MDeformVert *dv, + const MDeformVert *dv, const int defbase_num, const bool *defbase_locked, const bool *defbase_unlocked) @@ -991,7 +983,8 @@ void BKE_defvert_array_copy(MDeformVert *dst, const MDeformVert *src, int totver for (int i = 0; i < totvert; i++) { if (src[i].dw) { - dst[i].dw = MEM_mallocN(sizeof(MDeformWeight) * src[i].totweight, "copy_deformWeight"); + dst[i].dw = static_cast( + MEM_mallocN(sizeof(MDeformWeight) * src[i].totweight, __func__)); memcpy(dst[i].dw, src[i].dw, sizeof(MDeformWeight) * src[i].totweight); } } @@ -1059,7 +1052,8 @@ void BKE_defvert_extract_vgroup_to_edgeweights(const MDeformVert *dvert, { if (dvert && defgroup != -1) { int i = edges_num; - float *tmp_weights = MEM_mallocN(sizeof(*tmp_weights) * (size_t)verts_num, __func__); + float *tmp_weights = static_cast( + MEM_mallocN(sizeof(*tmp_weights) * size_t(verts_num), __func__)); BKE_defvert_extract_vgroup_to_vertweights( dvert, defgroup, verts_num, invert_vgroup, tmp_weights); @@ -1087,7 +1081,8 @@ void BKE_defvert_extract_vgroup_to_loopweights(const MDeformVert *dvert, { if (dvert && defgroup != -1) { int i = loops_num; - float *tmp_weights = MEM_mallocN(sizeof(*tmp_weights) * (size_t)verts_num, __func__); + float *tmp_weights = static_cast( + MEM_mallocN(sizeof(*tmp_weights) * size_t(verts_num), __func__)); BKE_defvert_extract_vgroup_to_vertweights( dvert, defgroup, verts_num, invert_vgroup, tmp_weights); @@ -1109,7 +1104,7 @@ void BKE_defvert_extract_vgroup_to_polyweights(const MDeformVert *dvert, const int defgroup, const int verts_num, const MLoop *loops, - const int UNUSED(loops_num), + const int /*loops_num*/, const MPoly *polys, const int polys_num, const bool invert_vgroup, @@ -1117,7 +1112,8 @@ void BKE_defvert_extract_vgroup_to_polyweights(const MDeformVert *dvert, { if (dvert && defgroup != -1) { int i = polys_num; - float *tmp_weights = MEM_mallocN(sizeof(*tmp_weights) * (size_t)verts_num, __func__); + float *tmp_weights = static_cast( + MEM_mallocN(sizeof(*tmp_weights) * size_t(verts_num), __func__)); BKE_defvert_extract_vgroup_to_vertweights( dvert, defgroup, verts_num, invert_vgroup, tmp_weights); @@ -1131,7 +1127,7 @@ void BKE_defvert_extract_vgroup_to_polyweights(const MDeformVert *dvert, for (; j--; ml++) { w += tmp_weights[ml->v]; } - r_weights[i] = w / (float)mp->totloop; + r_weights[i] = w / float(mp->totloop); } MEM_freeN(tmp_weights); @@ -1217,9 +1213,9 @@ static bool data_transfer_layersmapping_vgroups_multisrc_to_dst(ListBase *r_map, Object *ob_dst, const MDeformVert *data_src, MDeformVert *data_dst, - const CustomData *UNUSED(cd_src), + const CustomData * /*cd_src*/, CustomData *cd_dst, - const bool UNUSED(use_dupref_dst), + const bool /*use_dupref_dst*/, const int tolayers, const bool *use_layers_src, const int num_layers_src) @@ -1231,7 +1227,7 @@ static bool data_transfer_layersmapping_vgroups_multisrc_to_dst(ListBase *r_map, const int tot_dst = BLI_listbase_count(dst_defbase); - const size_t elem_size = sizeof(*((MDeformVert *)NULL)); + const size_t elem_size = sizeof(*((MDeformVert *)nullptr)); switch (tolayers) { case DT_LAYERS_INDEX_DST: @@ -1258,15 +1254,15 @@ static bool data_transfer_layersmapping_vgroups_multisrc_to_dst(ListBase *r_map, } else if (use_delete && idx_dst > idx_src) { while (idx_dst-- > idx_src) { - BKE_object_defgroup_remove(ob_dst, dst_defbase->last); + BKE_object_defgroup_remove(ob_dst, static_cast(dst_defbase->last)); } } if (r_map) { /* At this stage, we **need** a valid CD_MDEFORMVERT layer on dest! * Again, use_create is not relevant in this case */ if (!data_dst) { - data_dst = CustomData_add_layer( - cd_dst, CD_MDEFORMVERT, CD_SET_DEFAULT, NULL, num_elem_dst); + data_dst = static_cast( + CustomData_add_layer(cd_dst, CD_MDEFORMVERT, CD_SET_DEFAULT, nullptr, num_elem_dst)); } while (idx_src--) { @@ -1287,7 +1283,7 @@ static bool data_transfer_layersmapping_vgroups_multisrc_to_dst(ListBase *r_map, 0, 0, vgroups_datatransfer_interp, - NULL); + nullptr); } } break; @@ -1296,7 +1292,7 @@ static bool data_transfer_layersmapping_vgroups_multisrc_to_dst(ListBase *r_map, if (use_delete) { /* Remove all unused dst vgroups first, simpler in this case. */ - for (dg_dst = dst_defbase->first; dg_dst;) { + for (dg_dst = static_cast(dst_defbase->first); dg_dst;) { bDeformGroup *dg_dst_next = dg_dst->next; if (BKE_object_defgroup_name_index(ob_src, dg_dst->name) == -1) { @@ -1306,7 +1302,8 @@ static bool data_transfer_layersmapping_vgroups_multisrc_to_dst(ListBase *r_map, } } - for (idx_src = 0, dg_src = src_list->first; idx_src < num_layers_src; + for (idx_src = 0, dg_src = static_cast(src_list->first); + idx_src < num_layers_src; idx_src++, dg_src = dg_src->next) { if (!use_layers_src[idx_src]) { continue; @@ -1326,8 +1323,8 @@ static bool data_transfer_layersmapping_vgroups_multisrc_to_dst(ListBase *r_map, /* At this stage, we **need** a valid CD_MDEFORMVERT layer on dest! * use_create is not relevant in this case */ if (!data_dst) { - data_dst = CustomData_add_layer( - cd_dst, CD_MDEFORMVERT, CD_SET_DEFAULT, NULL, num_elem_dst); + data_dst = static_cast(CustomData_add_layer( + cd_dst, CD_MDEFORMVERT, CD_SET_DEFAULT, nullptr, num_elem_dst)); } data_transfer_layersmapping_add_item(r_map, @@ -1344,7 +1341,7 @@ static bool data_transfer_layersmapping_vgroups_multisrc_to_dst(ListBase *r_map, 0, 0, vgroups_datatransfer_interp, - NULL); + nullptr); } } break; @@ -1373,13 +1370,13 @@ bool data_transfer_layersmapping_vgroups(ListBase *r_map, { int idx_src, idx_dst; - const size_t elem_size = sizeof(*((MDeformVert *)NULL)); + const size_t elem_size = sizeof(*((MDeformVert *)nullptr)); /* NOTE: * VGroups are a bit hairy, since their layout is defined on object level (ob->defbase), * while their actual data is a (mesh) CD layer. - * This implies we may have to handle data layout itself while having NULL data itself, - * and even have to support NULL data_src in transfer data code + * This implies we may have to handle data layout itself while having nullptr data itself, + * and even have to support nullptr data_src in transfer data code * (we always create a data_dst, though). * * NOTE: Above comment is outdated, but this function was written when that was true. @@ -1393,12 +1390,15 @@ bool data_transfer_layersmapping_vgroups(ListBase *r_map, return true; } - const MDeformVert *data_src = CustomData_get_layer(cd_src, CD_MDEFORMVERT); + const MDeformVert *data_src = static_cast( + CustomData_get_layer(cd_src, CD_MDEFORMVERT)); - MDeformVert *data_dst = CustomData_get_layer_for_write(cd_dst, CD_MDEFORMVERT, num_elem_dst); + MDeformVert *data_dst = static_cast( + CustomData_get_layer_for_write(cd_dst, CD_MDEFORMVERT, num_elem_dst)); if (data_dst && use_dupref_dst && r_map) { /* If dest is a derivedmesh, we do not want to overwrite cdlayers of org mesh! */ - data_dst = CustomData_get_layer_for_write(cd_dst, CD_MDEFORMVERT, num_elem_dst); + data_dst = static_cast( + CustomData_get_layer_for_write(cd_dst, CD_MDEFORMVERT, num_elem_dst)); } if (fromlayers == DT_LAYERS_ACTIVE_SRC || fromlayers >= 0) { @@ -1430,7 +1430,7 @@ bool data_transfer_layersmapping_vgroups(ListBase *r_map, if (!use_create) { return true; } - dg_src = BLI_findlink(src_defbase, idx_src); + dg_src = static_cast(BLI_findlink(src_defbase, idx_src)); BKE_object_defgroup_add_name(ob_dst, dg_src->name); idx_dst = BKE_object_defgroup_active_index_get(ob_dst) - 1; } @@ -1449,7 +1449,7 @@ bool data_transfer_layersmapping_vgroups(ListBase *r_map, } } else if (tolayers == DT_LAYERS_NAME_DST) { - bDeformGroup *dg_src = BLI_findlink(src_defbase, idx_src); + bDeformGroup *dg_src = static_cast(BLI_findlink(src_defbase, idx_src)); if ((idx_dst = BKE_object_defgroup_name_index(ob_dst, dg_src->name)) == -1) { if (!use_create) { return true; @@ -1466,8 +1466,8 @@ bool data_transfer_layersmapping_vgroups(ListBase *r_map, /* At this stage, we **need** a valid CD_MDEFORMVERT layer on dest! * use_create is not relevant in this case */ if (!data_dst) { - data_dst = CustomData_add_layer( - cd_dst, CD_MDEFORMVERT, CD_SET_DEFAULT, NULL, num_elem_dst); + data_dst = static_cast( + CustomData_add_layer(cd_dst, CD_MDEFORMVERT, CD_SET_DEFAULT, nullptr, num_elem_dst)); } data_transfer_layersmapping_add_item(r_map, @@ -1484,12 +1484,12 @@ bool data_transfer_layersmapping_vgroups(ListBase *r_map, 0, 0, vgroups_datatransfer_interp, - NULL); + nullptr); } } else { int num_src, num_sel_unused; - bool *use_layers_src = NULL; + bool *use_layers_src = nullptr; bool ret = false; switch (fromlayers) { @@ -1588,7 +1588,7 @@ void BKE_defbase_blend_write(BlendWriter *writer, const ListBase *defbase) void BKE_defvert_blend_write(BlendWriter *writer, int count, const MDeformVert *dvlist) { - if (dvlist == NULL) { + if (dvlist == nullptr) { return; } @@ -1605,22 +1605,23 @@ void BKE_defvert_blend_write(BlendWriter *writer, int count, const MDeformVert * void BKE_defvert_blend_read(BlendDataReader *reader, int count, MDeformVert *mdverts) { - if (mdverts == NULL) { + if (mdverts == nullptr) { return; } for (int i = count; i > 0; i--, mdverts++) { /* Convert to vertex group allocation system. */ MDeformWeight *dw; - if (mdverts->dw && (dw = BLO_read_get_new_data_address(reader, mdverts->dw))) { + if (mdverts->dw && + (dw = static_cast(BLO_read_get_new_data_address(reader, mdverts->dw)))) { const size_t dw_len = sizeof(MDeformWeight) * mdverts->totweight; void *dw_tmp = MEM_mallocN(dw_len, __func__); memcpy(dw_tmp, dw, dw_len); - mdverts->dw = dw_tmp; + mdverts->dw = static_cast(dw_tmp); MEM_freeN(dw); } else { - mdverts->dw = NULL; + mdverts->dw = nullptr; mdverts->totweight = 0; } } diff --git a/source/blender/blenkernel/intern/dynamicpaint.c b/source/blender/blenkernel/intern/dynamicpaint.cc similarity index 87% rename from source/blender/blenkernel/intern/dynamicpaint.c rename to source/blender/blenkernel/intern/dynamicpaint.cc index 4f24a503a4d..9b30d34aabb 100644 --- a/source/blender/blenkernel/intern/dynamicpaint.c +++ b/source/blender/blenkernel/intern/dynamicpaint.cc @@ -127,16 +127,16 @@ BLI_INLINE void value_dissolve(float *r_value, /***************************** Internal Structs ***************************/ -typedef struct Bounds2D { +struct Bounds2D { float min[2], max[2]; -} Bounds2D; +}; -typedef struct Bounds3D { +struct Bounds3D { float min[3], max[3]; bool valid; -} Bounds3D; +}; -typedef struct VolumeGrid { +struct VolumeGrid { int dim[3]; /** whole grid bounds */ Bounds3D grid_bounds; @@ -151,29 +151,29 @@ typedef struct VolumeGrid { int *t_index; int *temp_t_index; -} VolumeGrid; +}; -typedef struct Vec3f { +struct Vec3f { float v[3]; -} Vec3f; +}; -typedef struct BakeAdjPoint { +struct BakeAdjPoint { /** vector pointing towards this neighbor */ float dir[3]; /** distance to */ float dist; -} BakeAdjPoint; +}; /** Surface data used while processing a frame */ -typedef struct PaintBakeNormal { +struct PaintBakeNormal { /** current pixel world-space inverted normal */ float invNorm[3]; /** normal directional scale for displace mapping */ float normal_scale; -} PaintBakeNormal; +}; /** Temp surface data used to process a frame */ -typedef struct PaintBakeData { +struct PaintBakeData { /* point space data */ PaintBakeNormal *bNormal; /** index to start reading point sample realCoord */ @@ -206,10 +206,10 @@ typedef struct PaintBakeData { float prev_obmat[4][4]; /** flag to check if surface was cleared/reset -> have to redo velocity etc. */ int clear; -} PaintBakeData; +}; /** UV Image sequence format point */ -typedef struct PaintUVPoint { +struct PaintUVPoint { /* Pixel / mesh data */ /** tri index on domain derived mesh */ uint tri_index; @@ -219,18 +219,18 @@ typedef struct PaintUVPoint { /** If this pixel isn't uv mapped to any face, but its neighboring pixel is. */ uint neighbor_pixel; -} PaintUVPoint; +}; -typedef struct ImgSeqFormatData { +struct ImgSeqFormatData { PaintUVPoint *uv_p; Vec3f *barycentricWeights; /* b-weights for all pixel samples */ -} ImgSeqFormatData; +}; /* adjacency data flags */ #define ADJ_ON_MESH_EDGE (1 << 0) #define ADJ_BORDER_PIXEL (1 << 1) -typedef struct PaintAdjData { +struct PaintAdjData { /** Array of neighboring point indexes, for single sample use (n_index + neigh_num). */ int *n_target; /** Index to start reading n_target for each point. */ @@ -245,27 +245,27 @@ typedef struct PaintAdjData { int *border; /** Size of border. */ int total_border; -} PaintAdjData; +}; /************************* Runtime evaluation store ***************************/ void dynamicPaint_Modifier_free_runtime(DynamicPaintRuntime *runtime_data) { - if (runtime_data == NULL) { + if (runtime_data == nullptr) { return; } if (runtime_data->canvas_mesh) { - BKE_id_free(NULL, runtime_data->canvas_mesh); + BKE_id_free(nullptr, runtime_data->canvas_mesh); } if (runtime_data->brush_mesh) { - BKE_id_free(NULL, runtime_data->brush_mesh); + BKE_id_free(nullptr, runtime_data->brush_mesh); } MEM_freeN(runtime_data); } static DynamicPaintRuntime *dynamicPaint_Modifier_runtime_ensure(DynamicPaintModifierData *pmd) { - if (pmd->modifier.runtime == NULL) { + if (pmd->modifier.runtime == nullptr) { pmd->modifier.runtime = MEM_callocN(sizeof(DynamicPaintRuntime), "dynamic paint runtime"); } return (DynamicPaintRuntime *)pmd->modifier.runtime; @@ -273,8 +273,8 @@ static DynamicPaintRuntime *dynamicPaint_Modifier_runtime_ensure(DynamicPaintMod static Mesh *dynamicPaint_canvas_mesh_get(DynamicPaintCanvasSettings *canvas) { - if (canvas->pmd->modifier.runtime == NULL) { - return NULL; + if (canvas->pmd->modifier.runtime == nullptr) { + return nullptr; } DynamicPaintRuntime *runtime_data = (DynamicPaintRuntime *)canvas->pmd->modifier.runtime; return runtime_data->canvas_mesh; @@ -282,8 +282,8 @@ static Mesh *dynamicPaint_canvas_mesh_get(DynamicPaintCanvasSettings *canvas) static Mesh *dynamicPaint_brush_mesh_get(DynamicPaintBrushSettings *brush) { - if (brush->pmd->modifier.runtime == NULL) { - return NULL; + if (brush->pmd->modifier.runtime == nullptr) { + return nullptr; } DynamicPaintRuntime *runtime_data = (DynamicPaintRuntime *)brush->pmd->modifier.runtime; return runtime_data->brush_mesh; @@ -316,10 +316,10 @@ static int dynamicPaint_surfaceNumOfPoints(DynamicPaintSurface *surface) DynamicPaintSurface *get_activeSurface(DynamicPaintCanvasSettings *canvas) { - return BLI_findlink(&canvas->surfaces, canvas->active_sur); + return static_cast(BLI_findlink(&canvas->surfaces, canvas->active_sur)); } -bool dynamicPaint_outputLayerExists(struct DynamicPaintSurface *surface, Object *ob, int output) +bool dynamicPaint_outputLayerExists(DynamicPaintSurface *surface, Object *ob, int output) { const char *name; @@ -335,7 +335,7 @@ bool dynamicPaint_outputLayerExists(struct DynamicPaintSurface *surface, Object if (surface->format == MOD_DPAINT_SURFACE_F_VERTEX) { if (surface->type == MOD_DPAINT_SURFACE_T_PAINT) { - Mesh *me = ob->data; + Mesh *me = static_cast(ob->data); return (CustomData_get_named_layer_index(&me->ldata, CD_PROP_BYTE_COLOR, name) != -1); } if (surface->type == MOD_DPAINT_SURFACE_T_WEIGHT) { @@ -348,8 +348,9 @@ bool dynamicPaint_outputLayerExists(struct DynamicPaintSurface *surface, Object static bool surface_duplicateOutputExists(void *arg, const char *name) { - DynamicPaintSurface *t_surface = arg; - DynamicPaintSurface *surface = t_surface->canvas->surfaces.first; + DynamicPaintSurface *t_surface = static_cast(arg); + DynamicPaintSurface *surface = static_cast( + t_surface->canvas->surfaces.first); for (; surface; surface = surface->next) { if (surface != t_surface && surface->type == t_surface->type && @@ -387,8 +388,9 @@ static void surface_setUniqueOutputName(DynamicPaintSurface *surface, char *base static bool surface_duplicateNameExists(void *arg, const char *name) { - DynamicPaintSurface *t_surface = arg; - DynamicPaintSurface *surface = t_surface->canvas->surfaces.first; + DynamicPaintSurface *t_surface = static_cast(arg); + DynamicPaintSurface *surface = static_cast( + t_surface->canvas->surfaces.first); for (; surface; surface = surface->next) { if (surface != t_surface && STREQ(name, surface->name)) { @@ -406,7 +408,7 @@ void dynamicPaintSurface_setUniqueName(DynamicPaintSurface *surface, const char surface_duplicateNameExists, surface, name, '.', surface->name, sizeof(surface->name)); } -void dynamicPaintSurface_updateType(struct DynamicPaintSurface *surface) +void dynamicPaintSurface_updateType(DynamicPaintSurface *surface) { if (surface->format == MOD_DPAINT_SURFACE_F_IMAGESEQ) { surface->output_name[0] = '\0'; @@ -507,7 +509,7 @@ static float mixColors( static void scene_setSubframe(Scene *scene, float subframe) { - /* dynamic paint subframes must be done on previous frame */ + /* Dynamic paint sub-frames must be done on previous frame. */ scene->r.cfra -= 1; scene->r.subframe = subframe; } @@ -516,7 +518,7 @@ static int surface_getBrushFlags(DynamicPaintSurface *surface, Depsgraph *depsgr { uint numobjects; Object **objects = BKE_collision_objects_create( - depsgraph, NULL, surface->brush_group, &numobjects, eModifierType_DynamicPaint); + depsgraph, nullptr, surface->brush_group, &numobjects, eModifierType_DynamicPaint); int flags = 0; @@ -622,26 +624,26 @@ static void freeGrid(PaintSurfaceData *data) } MEM_freeN(bData->grid); - bData->grid = NULL; + bData->grid = nullptr; } static void grid_bound_insert_cb_ex(void *__restrict userdata, const int i, const TaskParallelTLS *__restrict tls) { - PaintBakeData *bData = userdata; + PaintBakeData *bData = static_cast(userdata); - Bounds3D *grid_bound = tls->userdata_chunk; + Bounds3D *grid_bound = static_cast(tls->userdata_chunk); boundInsert(grid_bound, bData->realCoord[bData->s_pos[i]].v); } -static void grid_bound_insert_reduce(const void *__restrict UNUSED(userdata), +static void grid_bound_insert_reduce(const void *__restrict /*userdata*/, void *__restrict chunk_join, void *__restrict chunk) { - Bounds3D *join = chunk_join; - Bounds3D *grid_bound = chunk; + Bounds3D *join = static_cast(chunk_join); + Bounds3D *grid_bound = static_cast(chunk); boundInsert(join, grid_bound->min); boundInsert(join, grid_bound->max); @@ -651,10 +653,10 @@ static void grid_cell_points_cb_ex(void *__restrict userdata, const int i, const TaskParallelTLS *__restrict tls) { - PaintBakeData *bData = userdata; + PaintBakeData *bData = static_cast(userdata); VolumeGrid *grid = bData->grid; int *temp_t_index = grid->temp_t_index; - int *s_num = tls->userdata_chunk; + int *s_num = static_cast(tls->userdata_chunk); int co[3]; @@ -672,12 +674,12 @@ static void grid_cell_points_reduce(const void *__restrict userdata, void *__restrict chunk_join, void *__restrict chunk) { - const PaintBakeData *bData = userdata; + const PaintBakeData *bData = static_cast(userdata); const VolumeGrid *grid = bData->grid; const int grid_cells = grid->dim[0] * grid->dim[1] * grid->dim[2]; - int *join_s_num = chunk_join; - int *s_num = chunk; + int *join_s_num = static_cast(chunk_join); + int *s_num = static_cast(chunk); /* calculate grid indexes */ for (int i = 0; i < grid_cells; i++) { @@ -687,9 +689,9 @@ static void grid_cell_points_reduce(const void *__restrict userdata, static void grid_cell_bounds_cb(void *__restrict userdata, const int x, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - PaintBakeData *bData = userdata; + PaintBakeData *bData = static_cast(userdata); VolumeGrid *grid = bData->grid; float *dim = bData->dim; int *grid_dim = grid->dim; @@ -708,20 +710,20 @@ static void grid_cell_bounds_cb(void *__restrict userdata, } } -static void surfaceGenerateGrid(struct DynamicPaintSurface *surface) +static void surfaceGenerateGrid(DynamicPaintSurface *surface) { PaintSurfaceData *sData = surface->data; PaintBakeData *bData = sData->bData; VolumeGrid *grid; int grid_cells, axis = 3; - int *temp_t_index = NULL; - int *temp_s_num = NULL; + int *temp_t_index = nullptr; + int *temp_s_num = nullptr; if (bData->grid) { freeGrid(sData); } - bData->grid = MEM_callocN(sizeof(VolumeGrid), "Surface Grid"); + bData->grid = MEM_cnew(__func__); grid = bData->grid; { @@ -758,7 +760,7 @@ static void surfaceGenerateGrid(struct DynamicPaintSurface *surface) if (axis == 0 || max_fff(td[0], td[1], td[2]) < 0.0001f) { MEM_freeN(bData->grid); - bData->grid = NULL; + bData->grid = nullptr; return; } @@ -766,25 +768,29 @@ static void surfaceGenerateGrid(struct DynamicPaintSurface *surface) volume = td[0] * td[1] * td[2]; /* determine final grid size by trying to fit average 10.000 points per grid cell */ - dim_factor = (float)pow((double)volume / ((double)sData->total_points / 10000.0), - 1.0 / (double)axis); + dim_factor = (float)pow(double(volume) / (double(sData->total_points) / 10000.0), + 1.0 / double(axis)); /* define final grid size using dim_factor, use min 3 for active axes */ for (i = 0; i < 3; i++) { - grid->dim[i] = (int)floor(td[i] / dim_factor); + grid->dim[i] = int(floor(td[i] / dim_factor)); CLAMP(grid->dim[i], (dim[i] >= min_dim) ? 3 : 1, 100); } grid_cells = grid->dim[0] * grid->dim[1] * grid->dim[2]; /* allocate memory for grids */ - grid->bounds = MEM_callocN(sizeof(Bounds3D) * grid_cells, "Surface Grid Bounds"); - grid->s_pos = MEM_callocN(sizeof(int) * grid_cells, "Surface Grid Position"); + grid->bounds = static_cast( + MEM_callocN(sizeof(Bounds3D) * grid_cells, "Surface Grid Bounds")); + grid->s_pos = static_cast( + MEM_callocN(sizeof(int) * grid_cells, "Surface Grid Position")); - grid->s_num = MEM_callocN(sizeof(int) * grid_cells, "Surface Grid Points"); - temp_s_num = MEM_callocN(sizeof(int) * grid_cells, "Temp Surface Grid Points"); - grid->t_index = MEM_callocN(sizeof(int) * sData->total_points, "Surface Grid Target Ids"); - grid->temp_t_index = temp_t_index = MEM_callocN(sizeof(int) * sData->total_points, - "Temp Surface Grid Target Ids"); + grid->s_num = static_cast(MEM_callocN(sizeof(int) * grid_cells, "Surface Grid Points")); + temp_s_num = static_cast( + MEM_callocN(sizeof(int) * grid_cells, "Temp Surface Grid Points")); + grid->t_index = static_cast( + MEM_callocN(sizeof(int) * sData->total_points, "Surface Grid Target Ids")); + grid->temp_t_index = temp_t_index = static_cast( + MEM_callocN(sizeof(int) * sData->total_points, "Temp Surface Grid Target Ids")); /* in case of an allocation failure abort here */ if (!grid->bounds || !grid->s_pos || !grid->s_num || !grid->t_index || !temp_s_num || @@ -840,7 +846,7 @@ static void surfaceGenerateGrid(struct DynamicPaintSurface *surface) /***************************** Freeing data ******************************/ -void dynamicPaint_freeBrush(struct DynamicPaintModifierData *pmd) +void dynamicPaint_freeBrush(DynamicPaintModifierData *pmd) { if (pmd->brush) { if (pmd->brush->paint_ramp) { @@ -851,7 +857,7 @@ void dynamicPaint_freeBrush(struct DynamicPaintModifierData *pmd) } MEM_freeN(pmd->brush); - pmd->brush = NULL; + pmd->brush = nullptr; } } @@ -874,7 +880,7 @@ static void dynamicPaint_freeAdjData(PaintSurfaceData *data) MEM_freeN(data->adj_data->border); } MEM_freeN(data->adj_data); - data->adj_data = NULL; + data->adj_data = nullptr; } } @@ -911,7 +917,7 @@ static void free_bakeData(PaintSurfaceData *data) } MEM_freeN(data->bData); - data->bData = NULL; + data->bData = nullptr; } } @@ -922,7 +928,7 @@ static void surface_freeUnusedData(DynamicPaintSurface *surface) return; } - /* free bakedata if not active or surface is baked */ + /* Free bake-data if not active or surface is baked. */ if (!(surface->flags & MOD_DPAINT_ACTIVE) || (surface->pointcache && surface->pointcache->flag & PTCACHE_BAKED)) { free_bakeData(surface->data); @@ -958,7 +964,7 @@ void dynamicPaint_freeSurfaceData(DynamicPaintSurface *surface) free_bakeData(data); MEM_freeN(surface->data); - surface->data = NULL; + surface->data = nullptr; } void dynamicPaint_freeSurface(const DynamicPaintModifierData *pmd, DynamicPaintSurface *surface) @@ -967,7 +973,7 @@ void dynamicPaint_freeSurface(const DynamicPaintModifierData *pmd, DynamicPaintS if ((pmd->modifier.flag & eModifierFlag_SharedCaches) == 0) { BKE_ptcache_free_list(&(surface->ptcaches)); } - surface->pointcache = NULL; + surface->pointcache = nullptr; MEM_SAFE_FREE(surface->effector_weights); @@ -980,8 +986,8 @@ void dynamicPaint_freeCanvas(DynamicPaintModifierData *pmd) { if (pmd->canvas) { /* Free surface data */ - DynamicPaintSurface *surface = pmd->canvas->surfaces.first; - DynamicPaintSurface *next_surface = NULL; + DynamicPaintSurface *surface = static_cast(pmd->canvas->surfaces.first); + DynamicPaintSurface *next_surface = nullptr; while (surface) { next_surface = surface->next; @@ -990,18 +996,18 @@ void dynamicPaint_freeCanvas(DynamicPaintModifierData *pmd) } MEM_freeN(pmd->canvas); - pmd->canvas = NULL; + pmd->canvas = nullptr; } } void dynamicPaint_Modifier_free(DynamicPaintModifierData *pmd) { - if (pmd == NULL) { + if (pmd == nullptr) { return; } dynamicPaint_freeCanvas(pmd); dynamicPaint_freeBrush(pmd); - dynamicPaint_Modifier_free_runtime(pmd->modifier.runtime); + dynamicPaint_Modifier_free_runtime(static_cast(pmd->modifier.runtime)); } /***************************** Initialize and reset ******************************/ @@ -1009,9 +1015,9 @@ void dynamicPaint_Modifier_free(DynamicPaintModifierData *pmd) DynamicPaintSurface *dynamicPaint_createNewSurface(DynamicPaintCanvasSettings *canvas, Scene *scene) { - DynamicPaintSurface *surface = MEM_callocN(sizeof(DynamicPaintSurface), "DynamicPaintSurface"); + DynamicPaintSurface *surface = MEM_cnew(__func__); if (!surface) { - return NULL; + return nullptr; } surface->canvas = canvas; @@ -1071,10 +1077,10 @@ DynamicPaintSurface *dynamicPaint_createNewSurface(DynamicPaintCanvasSettings *c BKE_modifier_path_init( surface->image_output_path, sizeof(surface->image_output_path), "cache_dynamicpaint"); - /* Using ID_BRUSH i18n context, as we have no physics/dpaint one for now... */ + /* Using ID_BRUSH i18n context, as we have no physics/dynamic-paint one for now. */ dynamicPaintSurface_setUniqueName(surface, CTX_DATA_(BLT_I18NCONTEXT_ID_BRUSH, "Surface")); - surface->effector_weights = BKE_effector_add_weights(NULL); + surface->effector_weights = BKE_effector_add_weights(nullptr); dynamicPaintSurface_updateType(surface); @@ -1083,7 +1089,7 @@ DynamicPaintSurface *dynamicPaint_createNewSurface(DynamicPaintCanvasSettings *c return surface; } -bool dynamicPaint_createType(struct DynamicPaintModifierData *pmd, int type, struct Scene *scene) +bool dynamicPaint_createType(DynamicPaintModifierData *pmd, int type, Scene *scene) { if (pmd) { if (type == MOD_DYNAMICPAINT_TYPE_CANVAS) { @@ -1092,8 +1098,7 @@ bool dynamicPaint_createType(struct DynamicPaintModifierData *pmd, int type, str dynamicPaint_freeCanvas(pmd); } - canvas = pmd->canvas = MEM_callocN(sizeof(DynamicPaintCanvasSettings), - "DynamicPaint Canvas"); + canvas = pmd->canvas = MEM_cnew(__func__); if (!canvas) { return false; } @@ -1110,13 +1115,13 @@ bool dynamicPaint_createType(struct DynamicPaintModifierData *pmd, int type, str dynamicPaint_freeBrush(pmd); } - brush = pmd->brush = MEM_callocN(sizeof(DynamicPaintBrushSettings), "DynamicPaint Paint"); + brush = pmd->brush = MEM_cnew(__func__); if (!brush) { return false; } brush->pmd = pmd; - brush->psys = NULL; + brush->psys = nullptr; brush->flags = MOD_DPAINT_ABS_ALPHA | MOD_DPAINT_RAMP_ALPHA; brush->collision = MOD_DPAINT_COL_VOLUME; @@ -1139,7 +1144,7 @@ bool dynamicPaint_createType(struct DynamicPaintModifierData *pmd, int type, str brush->smudge_strength = 0.3f; brush->max_velocity = 1.0f; - /* Paint proximity falloff colorramp. */ + /* Paint proximity falloff color-ramp. */ { CBData *ramp; @@ -1178,17 +1183,17 @@ bool dynamicPaint_createType(struct DynamicPaintModifierData *pmd, int type, str return true; } -void dynamicPaint_Modifier_copy(const struct DynamicPaintModifierData *pmd, - struct DynamicPaintModifierData *tpmd, +void dynamicPaint_Modifier_copy(const DynamicPaintModifierData *pmd, + DynamicPaintModifierData *tpmd, int flag) { /* Init modifier */ tpmd->type = pmd->type; if (pmd->canvas) { - dynamicPaint_createType(tpmd, MOD_DYNAMICPAINT_TYPE_CANVAS, NULL); + dynamicPaint_createType(tpmd, MOD_DYNAMICPAINT_TYPE_CANVAS, nullptr); } if (pmd->brush) { - dynamicPaint_createType(tpmd, MOD_DYNAMICPAINT_TYPE_BRUSH, NULL); + dynamicPaint_createType(tpmd, MOD_DYNAMICPAINT_TYPE_BRUSH, nullptr); } /* Copy data */ @@ -1197,14 +1202,16 @@ void dynamicPaint_Modifier_copy(const struct DynamicPaintModifierData *pmd, tpmd->canvas->pmd = tpmd; /* free default surface */ if (tpmd->canvas->surfaces.first) { - dynamicPaint_freeSurface(tpmd, tpmd->canvas->surfaces.first); + dynamicPaint_freeSurface(tpmd, + static_cast(tpmd->canvas->surfaces.first)); } tpmd->canvas->active_sur = pmd->canvas->active_sur; /* copy existing surfaces */ - for (surface = pmd->canvas->surfaces.first; surface; surface = surface->next) { - DynamicPaintSurface *t_surface = dynamicPaint_createNewSurface(tpmd->canvas, NULL); + for (surface = static_cast(pmd->canvas->surfaces.first); surface; + surface = surface->next) { + DynamicPaintSurface *t_surface = dynamicPaint_createNewSurface(tpmd->canvas, nullptr); if (flag & LIB_ID_COPY_SET_COPIED_ON_WRITE) { /* TODO(sergey): Consider passing some tips to the surface * creation to avoid this allocate-and-free cache behavior. */ @@ -1217,7 +1224,8 @@ void dynamicPaint_Modifier_copy(const struct DynamicPaintModifierData *pmd, /* surface settings */ t_surface->brush_group = surface->brush_group; MEM_freeN(t_surface->effector_weights); - t_surface->effector_weights = MEM_dupallocN(surface->effector_weights); + t_surface->effector_weights = static_cast( + MEM_dupallocN(surface->effector_weights)); BLI_strncpy(t_surface->name, surface->name, sizeof(t_surface->name)); t_surface->format = surface->format; @@ -1332,7 +1340,7 @@ static void dynamicPaint_allocateSurfaceType(DynamicPaintSurface *surface) break; } - if (sData->type_data == NULL) { + if (sData->type_data == nullptr) { setError(surface->canvas, N_("Not enough free memory")); } } @@ -1375,17 +1383,21 @@ static void dynamicPaint_initAdjacencyData(DynamicPaintSurface *surface, const b } /* allocate memory */ - ad = sData->adj_data = MEM_callocN(sizeof(PaintAdjData), "Surface Adj Data"); + ad = sData->adj_data = MEM_cnew(__func__); if (!ad) { return; } - ad->n_index = MEM_callocN(sizeof(int) * sData->total_points, "Surface Adj Index"); - ad->n_num = MEM_callocN(sizeof(int) * sData->total_points, "Surface Adj Counts"); - temp_data = MEM_callocN(sizeof(int) * sData->total_points, "Temp Adj Data"); - ad->n_target = MEM_callocN(sizeof(int) * neigh_points, "Surface Adj Targets"); - ad->flags = MEM_callocN(sizeof(int) * sData->total_points, "Surface Adj Flags"); + ad->n_index = static_cast( + MEM_callocN(sizeof(int) * sData->total_points, "Surface Adj Index")); + ad->n_num = static_cast( + MEM_callocN(sizeof(int) * sData->total_points, "Surface Adj Counts")); + temp_data = static_cast(MEM_callocN(sizeof(int) * sData->total_points, "Temp Adj Data")); + ad->n_target = static_cast( + MEM_callocN(sizeof(int) * neigh_points, "Surface Adj Targets")); + ad->flags = static_cast( + MEM_callocN(sizeof(int) * sData->total_points, "Surface Adj Flags")); ad->total_targets = neigh_points; - ad->border = NULL; + ad->border = nullptr; ad->total_border = 0; /* in case of allocation error, free memory */ @@ -1464,22 +1476,23 @@ static void dynamicPaint_initAdjacencyData(DynamicPaintSurface *surface, const b MEM_freeN(temp_data); } -typedef struct DynamicPaintSetInitColorData { +struct DynamicPaintSetInitColorData { const DynamicPaintSurface *surface; const MLoop *mloop; const float (*mloopuv)[2]; const MLoopTri *mlooptri; const MLoopCol *mloopcol; - struct ImagePool *pool; + ImagePool *pool; - const bool scene_color_manage; -} DynamicPaintSetInitColorData; + bool scene_color_manage; +}; -static void dynamic_paint_set_init_color_tex_to_vcol_cb( - void *__restrict userdata, const int i, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_set_init_color_tex_to_vcol_cb(void *__restrict userdata, + const int i, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintSetInitColorData *data = userdata; + const DynamicPaintSetInitColorData *data = static_cast(userdata); const PaintSurfaceData *sData = data->surface->data; PaintPoint *pPoint = (PaintPoint *)sData->type_data; @@ -1487,7 +1500,7 @@ static void dynamic_paint_set_init_color_tex_to_vcol_cb( const MLoop *mloop = data->mloop; const MLoopTri *mlooptri = data->mlooptri; const float(*mloopuv)[2] = data->mloopuv; - struct ImagePool *pool = data->pool; + ImagePool *pool = data->pool; Tex *tex = data->surface->init_texture; const bool scene_color_manage = data->scene_color_manage; @@ -1511,10 +1524,11 @@ static void dynamic_paint_set_init_color_tex_to_vcol_cb( } } -static void dynamic_paint_set_init_color_tex_to_imseq_cb( - void *__restrict userdata, const int i, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_set_init_color_tex_to_imseq_cb(void *__restrict userdata, + const int i, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintSetInitColorData *data = userdata; + const DynamicPaintSetInitColorData *data = static_cast(userdata); const PaintSurfaceData *sData = data->surface->data; PaintPoint *pPoint = (PaintPoint *)sData->type_data; @@ -1543,7 +1557,7 @@ static void dynamic_paint_set_init_color_tex_to_imseq_cb( uv_final[0] = uv_final[0] * 2.0f - 1.0f; uv_final[1] = uv_final[1] * 2.0f - 1.0f; - multitex_ext_safe(tex, uv_final, &texres, NULL, scene_color_manage, false); + multitex_ext_safe(tex, uv_final, &texres, nullptr, scene_color_manage, false); /* apply color */ copy_v3_v3(pPoint[i].color, texres.trgba); @@ -1551,9 +1565,9 @@ static void dynamic_paint_set_init_color_tex_to_imseq_cb( } static void dynamic_paint_set_init_color_vcol_to_imseq_cb( - void *__restrict userdata, const int i, const TaskParallelTLS *__restrict UNUSED(tls)) + void *__restrict userdata, const int i, const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintSetInitColorData *data = userdata; + const DynamicPaintSetInitColorData *data = static_cast(userdata); const PaintSurfaceData *sData = data->surface->data; PaintPoint *pPoint = (PaintPoint *)sData->type_data; @@ -1616,7 +1630,8 @@ static void dynamicPaint_setInitialColor(const Scene *scene, DynamicPaintSurface /* get uv map */ CustomData_validate_layer_name(&mesh->ldata, CD_PROP_FLOAT2, surface->init_layername, uvname); - const float(*mloopuv)[2] = CustomData_get_layer_named(&mesh->ldata, CD_PROP_FLOAT2, uvname); + const float(*mloopuv)[2] = static_cast( + CustomData_get_layer_named(&mesh->ldata, CD_PROP_FLOAT2, uvname)); if (!mloopuv) { return; @@ -1627,14 +1642,14 @@ static void dynamicPaint_setInitialColor(const Scene *scene, DynamicPaintSurface if (surface->format == MOD_DPAINT_SURFACE_F_VERTEX) { struct ImagePool *pool = BKE_image_pool_new(); - DynamicPaintSetInitColorData data = { - .surface = surface, - .mloop = mloop, - .mlooptri = mlooptri, - .mloopuv = mloopuv, - .pool = pool, - .scene_color_manage = scene_color_manage, - }; + DynamicPaintSetInitColorData data{}; + data.surface = surface; + data.mloop = mloop; + data.mlooptri = mlooptri; + data.mloopuv = mloopuv; + data.pool = pool; + data.scene_color_manage = scene_color_manage; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (tottri > 1000); @@ -1643,12 +1658,12 @@ static void dynamicPaint_setInitialColor(const Scene *scene, DynamicPaintSurface BKE_image_pool_free(pool); } else if (surface->format == MOD_DPAINT_SURFACE_F_IMAGESEQ) { - DynamicPaintSetInitColorData data = { - .surface = surface, - .mlooptri = mlooptri, - .mloopuv = mloopuv, - .scene_color_manage = scene_color_manage, - }; + DynamicPaintSetInitColorData data{}; + data.surface = surface; + data.mlooptri = mlooptri; + data.mloopuv = mloopuv; + data.scene_color_manage = scene_color_manage; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -1659,12 +1674,12 @@ static void dynamicPaint_setInitialColor(const Scene *scene, DynamicPaintSurface /* vertex color layer */ else if (surface->init_color_type == MOD_DPAINT_INITIAL_VERTEXCOLOR) { - /* for vertex surface, just copy colors from mcol */ + /* For vertex surface, just copy colors from #MLoopCol. */ if (surface->format == MOD_DPAINT_SURFACE_F_VERTEX) { const MLoop *mloop = BKE_mesh_loops(mesh); const int totloop = mesh->totloop; - const MLoopCol *col = CustomData_get_layer_named( - &mesh->ldata, CD_PROP_BYTE_COLOR, surface->init_layername); + const MLoopCol *col = static_cast( + CustomData_get_layer_named(&mesh->ldata, CD_PROP_BYTE_COLOR, surface->init_layername)); if (!col) { return; } @@ -1675,17 +1690,17 @@ static void dynamicPaint_setInitialColor(const Scene *scene, DynamicPaintSurface } else if (surface->format == MOD_DPAINT_SURFACE_F_IMAGESEQ) { const MLoopTri *mlooptri = BKE_mesh_runtime_looptri_ensure(mesh); - const MLoopCol *col = CustomData_get_layer_named( - &mesh->ldata, CD_PROP_BYTE_COLOR, surface->init_layername); + const MLoopCol *col = static_cast( + CustomData_get_layer_named(&mesh->ldata, CD_PROP_BYTE_COLOR, surface->init_layername)); if (!col) { return; } - DynamicPaintSetInitColorData data = { - .surface = surface, - .mlooptri = mlooptri, - .mloopcol = col, - }; + DynamicPaintSetInitColorData data{}; + data.surface = surface; + data.mlooptri = mlooptri; + data.mloopcol = col; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -1741,7 +1756,7 @@ bool dynamicPaint_resetSurface(const Scene *scene, DynamicPaintSurface *surface) } /* allocate memory */ - surface->data = MEM_callocN(sizeof(PaintSurfaceData), "PaintSurfaceData"); + surface->data = MEM_cnew(__func__); if (!surface->data) { return false; } @@ -1771,7 +1786,7 @@ static bool dynamicPaint_checkSurfaceData(const Scene *scene, DynamicPaintSurfac /***************************** Modifier processing ******************************/ -typedef struct DynamicPaintModifierApplyData { +struct DynamicPaintModifierApplyData { const DynamicPaintSurface *surface; Object *ob; @@ -1783,13 +1798,14 @@ typedef struct DynamicPaintModifierApplyData { float (*fcolor)[4]; MLoopCol *mloopcol; MLoopCol *mloopcol_wet; -} DynamicPaintModifierApplyData; +}; static void dynamic_paint_apply_surface_displace_cb(void *__restrict userdata, const int i, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintModifierApplyData *data = userdata; + const DynamicPaintModifierApplyData *data = static_cast( + userdata); const DynamicPaintSurface *surface = data->surface; @@ -1810,11 +1826,11 @@ static void dynamicPaint_applySurfaceDisplace(DynamicPaintSurface *surface, Mesh /* displace paint */ if (surface->type == MOD_DPAINT_SURFACE_T_DISPLACE) { - DynamicPaintModifierApplyData data = { - .surface = surface, - .vert_positions = BKE_mesh_vert_positions_for_write(result), - .vert_normals = BKE_mesh_vertex_normals_ensure(result), - }; + DynamicPaintModifierApplyData data{}; + data.surface = surface; + data.vert_positions = BKE_mesh_vert_positions_for_write(result); + data.vert_normals = BKE_mesh_vertex_normals_ensure(result); + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 10000); @@ -1823,10 +1839,12 @@ static void dynamicPaint_applySurfaceDisplace(DynamicPaintSurface *surface, Mesh } } -static void dynamic_paint_apply_surface_vpaint_blend_cb( - void *__restrict userdata, const int i, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_apply_surface_vpaint_blend_cb(void *__restrict userdata, + const int i, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintModifierApplyData *data = userdata; + const DynamicPaintModifierApplyData *data = static_cast( + userdata); PaintPoint *pPoint = (PaintPoint *)data->surface->data->type_data; float(*fcolor)[4] = data->fcolor; @@ -1838,9 +1856,10 @@ static void dynamic_paint_apply_surface_vpaint_blend_cb( static void dynamic_paint_apply_surface_vpaint_cb(void *__restrict userdata, const int p_index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintModifierApplyData *data = userdata; + const DynamicPaintModifierApplyData *data = static_cast( + userdata); const MLoop *mloop = data->mloop; const MPoly *mpoly = data->mpoly; @@ -1874,9 +1893,10 @@ static void dynamic_paint_apply_surface_vpaint_cb(void *__restrict userdata, static void dynamic_paint_apply_surface_wave_cb(void *__restrict userdata, const int i, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintModifierApplyData *data = userdata; + const DynamicPaintModifierApplyData *data = static_cast( + userdata); PaintWavePoint *wPoint = (PaintWavePoint *)data->surface->data->type_data; @@ -1896,7 +1916,8 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object * DynamicPaintSurface *surface; /* loop through surfaces */ - for (surface = pmd->canvas->surfaces.first; surface; surface = surface->next) { + for (surface = static_cast(pmd->canvas->surfaces.first); surface; + surface = surface->next) { PaintSurfaceData *sData = surface->data; if (surface->format != MOD_DPAINT_SURFACE_F_IMAGESEQ && sData) { @@ -1915,13 +1936,13 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object * const int totpoly = result->totpoly; /* paint is stored on dry and wet layers, so mix final color first */ - float(*fcolor)[4] = MEM_callocN(sizeof(*fcolor) * sData->total_points, - "Temp paint color"); + float(*fcolor)[4] = static_cast( + MEM_callocN(sizeof(*fcolor) * sData->total_points, "Temp paint color")); + + DynamicPaintModifierApplyData data{}; + data.surface = surface; + data.fcolor = fcolor; - DynamicPaintModifierApplyData data = { - .surface = surface, - .fcolor = fcolor, - }; { TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); @@ -1934,29 +1955,30 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object * } /* paint layer */ - MLoopCol *mloopcol = CustomData_get_layer_named_for_write( - &result->ldata, CD_PROP_BYTE_COLOR, surface->output_name, result->totloop); + MLoopCol *mloopcol = static_cast(CustomData_get_layer_named_for_write( + &result->ldata, CD_PROP_BYTE_COLOR, surface->output_name, result->totloop)); /* if output layer is lost from a constructive modifier, re-add it */ if (!mloopcol && dynamicPaint_outputLayerExists(surface, ob, 0)) { - mloopcol = CustomData_add_layer_named(&result->ldata, - CD_PROP_BYTE_COLOR, - CD_SET_DEFAULT, - NULL, - totloop, - surface->output_name); + mloopcol = static_cast(CustomData_add_layer_named(&result->ldata, + CD_PROP_BYTE_COLOR, + CD_SET_DEFAULT, + nullptr, + totloop, + surface->output_name)); } /* wet layer */ - MLoopCol *mloopcol_wet = CustomData_get_layer_named_for_write( - &result->ldata, CD_PROP_BYTE_COLOR, surface->output_name2, result->totloop); + MLoopCol *mloopcol_wet = static_cast(CustomData_get_layer_named_for_write( + &result->ldata, CD_PROP_BYTE_COLOR, surface->output_name2, result->totloop)); /* if output layer is lost from a constructive modifier, re-add it */ if (!mloopcol_wet && dynamicPaint_outputLayerExists(surface, ob, 1)) { - mloopcol_wet = CustomData_add_layer_named(&result->ldata, - CD_PROP_BYTE_COLOR, - CD_SET_DEFAULT, - NULL, - totloop, - surface->output_name2); + mloopcol_wet = static_cast( + CustomData_add_layer_named(&result->ldata, + CD_PROP_BYTE_COLOR, + CD_SET_DEFAULT, + nullptr, + totloop, + surface->output_name2)); } data.ob = ob; @@ -1978,14 +2000,14 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object * /* vertex group paint */ else if (surface->type == MOD_DPAINT_SURFACE_T_WEIGHT) { int defgrp_index = BKE_object_defgroup_name_index(ob, surface->output_name); - MDeformVert *dvert = CustomData_get_layer_for_write( - &result->vdata, CD_MDEFORMVERT, result->totvert); + MDeformVert *dvert = static_cast( + CustomData_get_layer_for_write(&result->vdata, CD_MDEFORMVERT, result->totvert)); float *weight = (float *)sData->type_data; /* apply weights into a vertex group, if doesn't exists add a new layer */ if (defgrp_index != -1 && !dvert && (surface->output_name[0] != '\0')) { - dvert = CustomData_add_layer( - &result->vdata, CD_MDEFORMVERT, CD_SET_DEFAULT, NULL, sData->total_points); + dvert = static_cast(CustomData_add_layer( + &result->vdata, CD_MDEFORMVERT, CD_SET_DEFAULT, nullptr, sData->total_points)); } if (defgrp_index != -1 && dvert) { for (int i = 0; i < sData->total_points; i++) { @@ -1993,9 +2015,9 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object * MDeformWeight *def_weight = BKE_defvert_find_index(dv, defgrp_index); /* skip if weight value is 0 and no existing weight is found */ - if ((def_weight != NULL) || (weight[i] != 0.0f)) { + if ((def_weight != nullptr) || (weight[i] != 0.0f)) { /* if not found, add a weight for it */ - if (def_weight == NULL) { + if (def_weight == nullptr) { def_weight = BKE_defvert_ensure_index(dv, defgrp_index); } @@ -2007,11 +2029,11 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object * } /* wave simulation */ else if (surface->type == MOD_DPAINT_SURFACE_T_WAVE) { - DynamicPaintModifierApplyData data = { - .surface = surface, - .vert_positions = BKE_mesh_vert_positions_for_write(result), - .vert_normals = BKE_mesh_vertex_normals_ensure(result), - }; + DynamicPaintModifierApplyData data{}; + data.surface = surface; + data.vert_positions = BKE_mesh_vert_positions_for_write(result); + data.vert_normals = BKE_mesh_vertex_normals_ensure(result); + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -2032,8 +2054,8 @@ static Mesh *dynamicPaint_Modifier_apply(DynamicPaintModifierData *pmd, Object * /* make a copy of mesh to use as brush data */ else if (pmd->brush && pmd->type == MOD_DYNAMICPAINT_TYPE_BRUSH) { DynamicPaintRuntime *runtime_data = dynamicPaint_Modifier_runtime_ensure(pmd); - if (runtime_data->brush_mesh != NULL) { - BKE_id_free(NULL, runtime_data->brush_mesh); + if (runtime_data->brush_mesh != nullptr) { + BKE_id_free(nullptr, runtime_data->brush_mesh); } runtime_data->brush_mesh = BKE_mesh_copy_for_eval(result, false); } @@ -2052,8 +2074,8 @@ void dynamicPaint_cacheUpdateFrames(DynamicPaintSurface *surface) static void canvas_copyMesh(DynamicPaintCanvasSettings *canvas, Mesh *mesh) { DynamicPaintRuntime *runtime = dynamicPaint_Modifier_runtime_ensure(canvas->pmd); - if (runtime->canvas_mesh != NULL) { - BKE_id_free(NULL, runtime->canvas_mesh); + if (runtime->canvas_mesh != nullptr) { + BKE_id_free(nullptr, runtime->canvas_mesh); } runtime->canvas_mesh = BKE_mesh_copy_for_eval(mesh, false); @@ -2062,15 +2084,12 @@ static void canvas_copyMesh(DynamicPaintCanvasSettings *canvas, Mesh *mesh) /* * Updates derived mesh copy and processes dynamic paint step / caches. */ -static void dynamicPaint_frameUpdate(DynamicPaintModifierData *pmd, - struct Depsgraph *depsgraph, - Scene *scene, - Object *ob, - Mesh *mesh) +static void dynamicPaint_frameUpdate( + DynamicPaintModifierData *pmd, Depsgraph *depsgraph, Scene *scene, Object *ob, Mesh *mesh) { if (pmd->canvas) { DynamicPaintCanvasSettings *canvas = pmd->canvas; - DynamicPaintSurface *surface = canvas->surfaces.first; + DynamicPaintSurface *surface = static_cast(canvas->surfaces.first); /* update derived mesh copy */ canvas_copyMesh(canvas, mesh); @@ -2082,7 +2101,7 @@ static void dynamicPaint_frameUpdate(DynamicPaintModifierData *pmd, /* loop through surfaces */ for (; surface; surface = surface->next) { - int current_frame = (int)scene->r.cfra; + int current_frame = int(scene->r.cfra); bool no_surface_data; /* free bake data if not required anymore */ @@ -2095,7 +2114,7 @@ static void dynamicPaint_frameUpdate(DynamicPaintModifierData *pmd, } /* make sure surface is valid */ - no_surface_data = surface->data == NULL; + no_surface_data = surface->data == nullptr; if (!dynamicPaint_checkSurfaceData(scene, surface)) { continue; } @@ -2104,7 +2123,7 @@ static void dynamicPaint_frameUpdate(DynamicPaintModifierData *pmd, CLAMP(current_frame, surface->start_frame, surface->end_frame); if (no_surface_data || current_frame != surface->current_frame || - (int)scene->r.cfra == surface->start_frame) { + int(scene->r.cfra) == surface->start_frame) { PointCache *cache = surface->pointcache; PTCacheID pid; surface->current_frame = current_frame; @@ -2113,21 +2132,21 @@ static void dynamicPaint_frameUpdate(DynamicPaintModifierData *pmd, BKE_ptcache_id_from_dynamicpaint(&pid, ob, surface); pid.cache->startframe = surface->start_frame; pid.cache->endframe = surface->end_frame; - BKE_ptcache_id_time(&pid, scene, (float)scene->r.cfra, NULL, NULL, NULL); + BKE_ptcache_id_time(&pid, scene, float(scene->r.cfra), nullptr, nullptr, nullptr); /* reset non-baked cache at first frame */ - if ((int)scene->r.cfra == surface->start_frame && !(cache->flag & PTCACHE_BAKED)) { + if (int(scene->r.cfra) == surface->start_frame && !(cache->flag & PTCACHE_BAKED)) { cache->flag |= PTCACHE_REDO_NEEDED; BKE_ptcache_id_reset(scene, &pid, PTCACHE_RESET_OUTDATED); cache->flag &= ~PTCACHE_REDO_NEEDED; } /* try to read from cache */ - bool can_simulate = ((int)scene->r.cfra == current_frame) && + bool can_simulate = (int(scene->r.cfra) == current_frame) && !(cache->flag & PTCACHE_BAKED); - if (BKE_ptcache_read(&pid, (float)scene->r.cfra, can_simulate)) { - BKE_ptcache_validate(cache, (int)scene->r.cfra); + if (BKE_ptcache_read(&pid, float(scene->r.cfra), can_simulate)) { + BKE_ptcache_validate(cache, int(scene->r.cfra)); } /* if read failed and we're on surface range do recalculate */ else if (can_simulate) { @@ -2150,11 +2169,8 @@ static void dynamicPaint_frameUpdate(DynamicPaintModifierData *pmd, } } -Mesh *dynamicPaint_Modifier_do(DynamicPaintModifierData *pmd, - struct Depsgraph *depsgraph, - Scene *scene, - Object *ob, - Mesh *mesh) +Mesh *dynamicPaint_Modifier_do( + DynamicPaintModifierData *pmd, Depsgraph *depsgraph, Scene *scene, Object *ob, Mesh *mesh) { /* Update canvas data for a new frame */ dynamicPaint_frameUpdate(pmd, depsgraph, scene, ob, mesh); @@ -2173,7 +2189,7 @@ Mesh *dynamicPaint_Modifier_do(DynamicPaintModifierData *pmd, 0.0f, 0.0f, -0.2f, -0.4f, 0.2f, 0.4f, 0.4f, -0.2f, -0.4f, 0.3f, \ } -typedef struct DynamicPaintCreateUVSurfaceData { +struct DynamicPaintCreateUVSurfaceData { const DynamicPaintSurface *surface; PaintUVPoint *tempPoints; @@ -2182,16 +2198,18 @@ typedef struct DynamicPaintCreateUVSurfaceData { const MLoopTri *mlooptri; const float (*mloopuv)[2]; const MLoop *mloop; - const int tottri; + int tottri; const Bounds2D *faceBB; uint32_t *active_points; -} DynamicPaintCreateUVSurfaceData; +}; -static void dynamic_paint_create_uv_surface_direct_cb( - void *__restrict userdata, const int ty, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_create_uv_surface_direct_cb(void *__restrict userdata, + const int ty, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintCreateUVSurfaceData *data = userdata; + const DynamicPaintCreateUVSurfaceData *data = + static_cast(userdata); const DynamicPaintSurface *surface = data->surface; PaintUVPoint *tempPoints = data->tempPoints; @@ -2220,24 +2238,24 @@ static void dynamic_paint_create_uv_surface_direct_cb( tPoint->pixel_index = index; /* Actual pixel center, used when collision is found */ - point[0][0] = ((float)tx + 0.5f) / w; - point[0][1] = ((float)ty + 0.5f) / h; + point[0][0] = (float(tx) + 0.5f) / w; + point[0][1] = (float(ty) + 0.5f) / h; /* * A pixel middle sample isn't enough to find very narrow polygons * So using 4 samples of each corner too */ - point[1][0] = ((float)tx) / w; - point[1][1] = ((float)ty) / h; + point[1][0] = (float(tx)) / w; + point[1][1] = (float(ty)) / h; - point[2][0] = ((float)tx + 1) / w; - point[2][1] = ((float)ty) / h; + point[2][0] = (float(tx) + 1) / w; + point[2][1] = (float(ty)) / h; - point[3][0] = ((float)tx) / w; - point[3][1] = ((float)ty + 1) / h; + point[3][0] = (float(tx)) / w; + point[3][1] = (float(ty) + 1) / h; - point[4][0] = ((float)tx + 1) / w; - point[4][1] = ((float)ty + 1) / h; + point[4][0] = (float(tx) + 1) / w; + point[4][1] = (float(ty) + 1) / h; /* Loop through samples, starting from middle point */ for (int sample = 0; sample < 5; sample++) { @@ -2283,10 +2301,12 @@ static void dynamic_paint_create_uv_surface_direct_cb( } } -static void dynamic_paint_create_uv_surface_neighbor_cb( - void *__restrict userdata, const int ty, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_create_uv_surface_neighbor_cb(void *__restrict userdata, + const int ty, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintCreateUVSurfaceData *data = userdata; + const DynamicPaintCreateUVSurfaceData *data = + static_cast(userdata); const DynamicPaintSurface *surface = data->surface; PaintUVPoint *tempPoints = data->tempPoints; @@ -2317,8 +2337,8 @@ static void dynamic_paint_create_uv_surface_neighbor_cb( const int v_min = (ty > 0) ? -1 : 0; const int v_max = (ty < (h - 1)) ? 1 : 0; - point[0] = ((float)tx + 0.5f) / w; - point[1] = ((float)ty + 0.5f) / h; + point[0] = (float(tx) + 0.5f) / w; + point[1] = (float(ty) + 0.5f) / h; /* search through defined area for neighbor, checking grid directions first */ for (int ni = 0; ni < 8; ni++) { @@ -2402,13 +2422,13 @@ static float dist_squared_to_looptri_uv_edges(const MLoopTri *mlooptri, return min_distance; } -typedef struct DynamicPaintFindIslandBorderData { +struct DynamicPaintFindIslandBorderData { const MeshElemMap *vert_to_looptri_map; int w, h, px, py; int best_index; float best_weight; -} DynamicPaintFindIslandBorderData; +}; static void dynamic_paint_find_island_border(const DynamicPaintCreateUVSurfaceData *data, DynamicPaintFindIslandBorderData *bdata, @@ -2474,20 +2494,19 @@ static int dynamic_paint_find_neighbor_pixel(const DynamicPaintCreateUVSurfaceDa * TODO: Implement something more accurate / optimized? */ { - DynamicPaintFindIslandBorderData bdata = { - .vert_to_looptri_map = vert_to_looptri_map, - .w = w, - .h = h, - .px = px, - .py = py, - .best_index = NOT_FOUND, - .best_weight = 1.0f, - }; + DynamicPaintFindIslandBorderData bdata{}; + bdata.vert_to_looptri_map = vert_to_looptri_map; + bdata.w = w; + bdata.h = h; + bdata.px = px; + bdata.py = py; + bdata.best_index = NOT_FOUND; + bdata.best_weight = 1.0f; float pixel[2]; - pixel[0] = ((float)(px + neighX[n_index]) + 0.5f) / (float)w; - pixel[1] = ((float)(py + neighY[n_index]) + 0.5f) / (float)h; + pixel[0] = (float(px + neighX[n_index]) + 0.5f) / float(w); + pixel[1] = (float(py + neighY[n_index]) + 0.5f) / float(h); /* Do a small recursive search for the best island edge. */ dynamic_paint_find_island_border(data, &bdata, cPoint->tri_index, pixel, -1, 5); @@ -2627,7 +2646,7 @@ static void dynamic_paint_find_island_border(const DynamicPaintCreateUVSurfaceDa int w = bdata->w, h = bdata->h, px = bdata->px, py = bdata->py; - const int final_pixel[2] = {(int)floorf(tgt_pixel[0] * w), (int)floorf(tgt_pixel[1] * h)}; + const int final_pixel[2] = {int(floorf(tgt_pixel[0] * w)), int(floorf(tgt_pixel[1] * h))}; /* If current pixel uv is outside of texture */ if (final_pixel[0] < 0 || final_pixel[0] >= w || final_pixel[1] < 0 || final_pixel[1] >= h) { @@ -2692,8 +2711,10 @@ static bool dynamicPaint_pointHasNeighbor(PaintAdjData *ed, int index, int neigh * I.e. if A is neighbor of B, B is neighbor of A. */ static bool dynamicPaint_symmetrizeAdjData(PaintAdjData *ed, int active_points) { - int *new_n_index = MEM_callocN(sizeof(int) * active_points, "Surface Adj Index"); - int *new_n_num = MEM_callocN(sizeof(int) * active_points, "Surface Adj Counts"); + int *new_n_index = static_cast( + MEM_callocN(sizeof(int) * active_points, "Surface Adj Index")); + int *new_n_num = static_cast( + MEM_callocN(sizeof(int) * active_points, "Surface Adj Counts")); if (new_n_num && new_n_index) { /* Count symmetrized neighbors */ @@ -2722,7 +2743,8 @@ static bool dynamicPaint_symmetrizeAdjData(PaintAdjData *ed, int active_points) } /* Allocate a new target map */ - int *new_n_target = MEM_callocN(sizeof(int) * total_targets, "Surface Adj Targets"); + int *new_n_target = static_cast( + MEM_callocN(sizeof(int) * total_targets, "Surface Adj Targets")); if (new_n_target) { /* Copy existing neighbors to the new map */ @@ -2787,7 +2809,7 @@ int dynamicPaint_createUVSurface(Scene *scene, float *progress, bool *do_update) { - /* Antialias jitter point relative coords */ + /* Anti-alias jitter point relative coords. */ const int aa_samples = (surface->flags & MOD_DPAINT_ANTIALIAS) ? 5 : 1; char uvname[MAX_CUSTOMDATA_LAYER_NAME]; uint32_t active_points = 0; @@ -2797,13 +2819,13 @@ int dynamicPaint_createUVSurface(Scene *scene, DynamicPaintCanvasSettings *canvas = surface->canvas; Mesh *mesh = dynamicPaint_canvas_mesh_get(canvas); - PaintUVPoint *tempPoints = NULL; - Vec3f *tempWeights = NULL; - const MLoopTri *mlooptri = NULL; - const float(*mloopuv)[2] = NULL; - const MLoop *mloop = NULL; + PaintUVPoint *tempPoints = nullptr; + Vec3f *tempWeights = nullptr; + const MLoopTri *mlooptri = nullptr; + const float(*mloopuv)[2] = nullptr; + const MLoop *mloop = nullptr; - Bounds2D *faceBB = NULL; + Bounds2D *faceBB = nullptr; int *final_index; *progress = 0.0f; @@ -2823,7 +2845,8 @@ int dynamicPaint_createUVSurface(Scene *scene, /* get uv map */ if (CustomData_has_layer(&mesh->ldata, CD_PROP_FLOAT2)) { CustomData_validate_layer_name(&mesh->ldata, CD_PROP_FLOAT2, surface->uvlayer_name, uvname); - mloopuv = CustomData_get_layer_named(&mesh->ldata, CD_PROP_FLOAT2, uvname); + mloopuv = static_cast( + CustomData_get_layer_named(&mesh->ldata, CD_PROP_FLOAT2, uvname)); } /* Check for validity */ @@ -2846,22 +2869,25 @@ int dynamicPaint_createUVSurface(Scene *scene, if (surface->data) { dynamicPaint_freeSurfaceData(surface); } - sData = surface->data = MEM_callocN(sizeof(PaintSurfaceData), "PaintSurfaceData"); + sData = surface->data = MEM_cnew(__func__); if (!surface->data) { return setError(canvas, N_("Not enough free memory")); } - tempPoints = MEM_callocN(w * h * sizeof(*tempPoints), "Temp PaintUVPoint"); + tempPoints = static_cast( + MEM_callocN(w * h * sizeof(*tempPoints), "Temp PaintUVPoint")); if (!tempPoints) { error = true; } - final_index = MEM_callocN(w * h * sizeof(*final_index), "Temp UV Final Indexes"); + final_index = static_cast( + MEM_callocN(w * h * sizeof(*final_index), "Temp UV Final Indexes")); if (!final_index) { error = true; } - tempWeights = MEM_mallocN(w * h * aa_samples * sizeof(*tempWeights), "Temp bWeights"); + tempWeights = static_cast( + MEM_mallocN(w * h * aa_samples * sizeof(*tempWeights), "Temp bWeights")); if (!tempWeights) { error = true; } @@ -2871,7 +2897,7 @@ int dynamicPaint_createUVSurface(Scene *scene, * the pixel-inside-a-face search. */ if (!error) { - faceBB = MEM_mallocN(tottri * sizeof(*faceBB), "MPCanvasFaceBB"); + faceBB = static_cast(MEM_mallocN(tottri * sizeof(*faceBB), "MPCanvasFaceBB")); if (!faceBB) { error = true; } @@ -2894,16 +2920,16 @@ int dynamicPaint_createUVSurface(Scene *scene, *do_update = true; /* Loop through every pixel and check if pixel is uv-mapped on a canvas face. */ - DynamicPaintCreateUVSurfaceData data = { - .surface = surface, - .tempPoints = tempPoints, - .tempWeights = tempWeights, - .mlooptri = mlooptri, - .mloopuv = mloopuv, - .mloop = mloop, - .tottri = tottri, - .faceBB = faceBB, - }; + DynamicPaintCreateUVSurfaceData data{}; + data.surface = surface; + data.tempPoints = tempPoints; + data.tempWeights = tempWeights; + data.mlooptri = mlooptri; + data.mloopuv = mloopuv; + data.mloop = mloop; + data.tottri = tottri; + data.faceBB = faceBB; + { TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); @@ -3008,7 +3034,8 @@ int dynamicPaint_createUVSurface(Scene *scene, } /* Create a list of border pixels */ - ed->border = MEM_callocN(sizeof(int) * total_border, "Border Pixel Index"); + ed->border = static_cast( + MEM_callocN(sizeof(int) * total_border, "Border Pixel Index")); if (ed->border) { ed->total_border = total_border; @@ -3068,11 +3095,12 @@ int dynamicPaint_createUVSurface(Scene *scene, *do_update = true; /* Create final surface data without inactive points */ - ImgSeqFormatData *f_data = MEM_callocN(sizeof(*f_data), "ImgSeqFormatData"); + ImgSeqFormatData *f_data = MEM_cnew(__func__); if (f_data) { - f_data->uv_p = MEM_callocN(active_points * sizeof(*f_data->uv_p), "PaintUVPoint"); - f_data->barycentricWeights = MEM_callocN( - active_points * aa_samples * sizeof(*f_data->barycentricWeights), "PaintUVPoint"); + f_data->uv_p = static_cast( + MEM_callocN(active_points * sizeof(*f_data->uv_p), "PaintUVPoint")); + f_data->barycentricWeights = static_cast(MEM_callocN( + active_points * aa_samples * sizeof(*f_data->barycentricWeights), "PaintUVPoint")); if (!f_data->uv_p || !f_data->barycentricWeights) { error = 1; @@ -3096,7 +3124,7 @@ int dynamicPaint_createUVSurface(Scene *scene, sData->total_points = 0; } else { - sData->total_points = (int)active_points; + sData->total_points = int(active_points); sData->format_data = f_data; for (int index = 0, cursor = 0; index < (w * h); index++) { @@ -3166,15 +3194,17 @@ int dynamicPaint_createUVSurface(Scene *scene, /* * Outputs an image file from uv surface data. */ -typedef struct DynamicPaintOutputSurfaceImageData { +struct DynamicPaintOutputSurfaceImageData { const DynamicPaintSurface *surface; ImBuf *ibuf; -} DynamicPaintOutputSurfaceImageData; +}; -static void dynamic_paint_output_surface_image_paint_cb( - void *__restrict userdata, const int index, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_output_surface_image_paint_cb(void *__restrict userdata, + const int index, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintOutputSurfaceImageData *data = userdata; + const DynamicPaintOutputSurfaceImageData *data = + static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintPoint *point = &((PaintPoint *)surface->data->type_data)[index]; @@ -3194,9 +3224,10 @@ static void dynamic_paint_output_surface_image_paint_cb( } static void dynamic_paint_output_surface_image_displace_cb( - void *__restrict userdata, const int index, const TaskParallelTLS *__restrict UNUSED(tls)) + void *__restrict userdata, const int index, const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintOutputSurfaceImageData *data = userdata; + const DynamicPaintOutputSurfaceImageData *data = + static_cast(userdata); const DynamicPaintSurface *surface = data->surface; float depth = ((float *)surface->data->type_data)[index]; @@ -3219,10 +3250,12 @@ static void dynamic_paint_output_surface_image_displace_cb( ibuf->rect_float[pos + 3] = 1.0f; } -static void dynamic_paint_output_surface_image_wave_cb( - void *__restrict userdata, const int index, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_output_surface_image_wave_cb(void *__restrict userdata, + const int index, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintOutputSurfaceImageData *data = userdata; + const DynamicPaintOutputSurfaceImageData *data = + static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintWavePoint *wPoint = &((PaintWavePoint *)surface->data->type_data)[index]; @@ -3243,10 +3276,12 @@ static void dynamic_paint_output_surface_image_wave_cb( ibuf->rect_float[pos + 3] = 1.0f; } -static void dynamic_paint_output_surface_image_wetmap_cb( - void *__restrict userdata, const int index, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_output_surface_image_wetmap_cb(void *__restrict userdata, + const int index, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintOutputSurfaceImageData *data = userdata; + const DynamicPaintOutputSurfaceImageData *data = + static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintPoint *point = &((PaintPoint *)surface->data->type_data)[index]; @@ -3263,7 +3298,7 @@ void dynamicPaint_outputSurfaceImage(DynamicPaintSurface *surface, const char *filepath, short output_layer) { - ImBuf *ibuf = NULL; + ImBuf *ibuf = nullptr; PaintSurfaceData *sData = surface->data; /* OpenEXR or PNG */ int format = (surface->image_fileformat & MOD_DPAINT_IMGFORMAT_OPENEXR) ? R_IMF_IMTYPE_OPENEXR : @@ -3289,15 +3324,15 @@ void dynamicPaint_outputSurfaceImage(DynamicPaintSurface *surface, /* Init image buffer */ ibuf = IMB_allocImBuf(surface->image_resolution, surface->image_resolution, 32, IB_rectfloat); - if (ibuf == NULL) { + if (ibuf == nullptr) { setError(surface->canvas, N_("Image save failed: not enough free memory")); return; } - DynamicPaintOutputSurfaceImageData data = { - .surface = surface, - .ibuf = ibuf, - }; + DynamicPaintOutputSurfaceImageData data{}; + data.surface = surface; + data.ibuf = ibuf; + switch (surface->type) { case MOD_DPAINT_SURFACE_T_PAINT: switch (output_layer) { @@ -3700,7 +3735,7 @@ static bool meshBrush_boundsIntersect(Bounds3D *b1, } /* calculate velocity for mesh vertices */ -typedef struct DynamicPaintBrushVelocityData { +struct DynamicPaintBrushVelocityData { Vec3f *brush_vel; const float (*positions_p)[3]; @@ -3709,14 +3744,15 @@ typedef struct DynamicPaintBrushVelocityData { float (*obmat)[4]; float (*prev_obmat)[4]; - const float timescale; -} DynamicPaintBrushVelocityData; + float timescale; +}; static void dynamic_paint_brush_velocity_compute_cb(void *__restrict userdata, const int i, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintBrushVelocityData *data = userdata; + const DynamicPaintBrushVelocityData *data = static_cast( + userdata); Vec3f *brush_vel = data->brush_vel; @@ -3793,8 +3829,7 @@ static void dynamicPaint_brushMeshCalculateVelocity(Depsgraph *depsgraph, numOfVerts_c = mesh_c->totvert; float(*positions_c)[3] = BKE_mesh_vert_positions_for_write(mesh_c); - (*brushVel) = (struct Vec3f *)MEM_mallocN(numOfVerts_c * sizeof(Vec3f), - "Dynamic Paint brush velocity"); + (*brushVel) = (Vec3f *)MEM_mallocN(numOfVerts_c * sizeof(Vec3f), "Dynamic Paint brush velocity"); if (!(*brushVel)) { return; } @@ -3805,21 +3840,21 @@ static void dynamicPaint_brushMeshCalculateVelocity(Depsgraph *depsgraph, } /* calculate speed */ - DynamicPaintBrushVelocityData data = { - .brush_vel = *brushVel, - .positions_p = positions_p, - .positions_c = positions_c, - .obmat = ob->object_to_world, - .prev_obmat = prev_obmat, - .timescale = timescale, - }; + DynamicPaintBrushVelocityData data{}; + data.brush_vel = *brushVel; + data.positions_p = positions_p; + data.positions_c = positions_c; + data.obmat = ob->object_to_world; + data.prev_obmat = prev_obmat; + data.timescale = timescale; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (numOfVerts_c > 10000); BLI_task_parallel_range( 0, numOfVerts_c, &data, dynamic_paint_brush_velocity_compute_cb, &settings); - BKE_id_free(NULL, mesh_p); + BKE_id_free(nullptr, mesh_p); } /* calculate velocity for object center point */ @@ -3870,37 +3905,38 @@ static void dynamicPaint_brushObjectCalculateVelocity( mul_v3_fl(brushVel->v, 1.0f / timescale); } -typedef struct DynamicPaintPaintData { +struct DynamicPaintPaintData { const DynamicPaintSurface *surface; const DynamicPaintBrushSettings *brush; Object *brushOb; const Scene *scene; - const float timescale; - const int c_index; + float timescale; + int c_index; Mesh *mesh; const float (*positions)[3]; const MLoop *mloop; const MLoopTri *mlooptri; - const float brush_radius; + float brush_radius; const float *avg_brushNor; const Vec3f *brushVelocity; const ParticleSystem *psys; - const float solidradius; + float solidradius; void *treeData; float *pointCoord; -} DynamicPaintPaintData; +}; /* * Paint a brush object mesh to the surface */ -static void dynamic_paint_paint_mesh_cell_point_cb_ex( - void *__restrict userdata, const int id, const TaskParallelTLS *__restrict UNUSED(tls)) +static void dynamic_paint_paint_mesh_cell_point_cb_ex(void *__restrict userdata, + const int id, + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintPaintData *data = userdata; + const DynamicPaintPaintData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -3919,12 +3955,12 @@ static void dynamic_paint_paint_mesh_cell_point_cb_ex( const float *avg_brushNor = data->avg_brushNor; const Vec3f *brushVelocity = data->brushVelocity; - BVHTreeFromMesh *treeData = data->treeData; + BVHTreeFromMesh *treeData = static_cast(data->treeData); const int index = grid->t_index[grid->s_pos[c_index] + id]; const int samples = bData->s_num[index]; int ss; - float total_sample = (float)samples; + float total_sample = float(samples); float brushStrength = 0.0f; /* brush influence factor */ float depth = 0.0f; /* brush intersection depth */ float velocity_val = 0.0f; @@ -3987,9 +4023,9 @@ static void dynamic_paint_paint_mesh_cell_point_cb_ex( /* For optimization sake, hit point normal isn't calculated in ray cast loop */ const int vtri[3] = { - mloop[mlooptri[hit.index].tri[0]].v, - mloop[mlooptri[hit.index].tri[1]].v, - mloop[mlooptri[hit.index].tri[2]].v, + int(mloop[mlooptri[hit.index].tri[0]].v), + int(mloop[mlooptri[hit.index].tri[1]].v), + int(mloop[mlooptri[hit.index].tri[2]].v), }; float dot; @@ -4236,10 +4272,10 @@ static bool dynamicPaint_paintMesh(Depsgraph *depsgraph, { PaintSurfaceData *sData = surface->data; PaintBakeData *bData = sData->bData; - Mesh *mesh = NULL; - Vec3f *brushVelocity = NULL; - const MLoopTri *mlooptri = NULL; - const MLoop *mloop = NULL; + Mesh *mesh = nullptr; + Vec3f *brushVelocity = nullptr; + const MLoopTri *mlooptri = nullptr; + const MLoop *mloop = nullptr; if (brush->flags & MOD_DPAINT_USES_VELOCITY) { dynamicPaint_brushMeshCalculateVelocity( @@ -4247,12 +4283,12 @@ static bool dynamicPaint_paintMesh(Depsgraph *depsgraph, } Mesh *brush_mesh = dynamicPaint_brush_mesh_get(brush); - if (brush_mesh == NULL) { + if (brush_mesh == nullptr) { return false; } { - BVHTreeFromMesh treeData = {NULL}; + BVHTreeFromMesh treeData = {nullptr}; float avg_brushNor[3] = {0.0f}; const float brush_radius = brush->paint_distance * surface->radius_scale; int numOfVerts; @@ -4286,7 +4322,7 @@ static bool dynamicPaint_paintMesh(Depsgraph *depsgraph, } if (brush->flags & MOD_DPAINT_PROX_PROJECT && brush->collision != MOD_DPAINT_COL_VOLUME) { - mul_v3_fl(avg_brushNor, 1.0f / (float)numOfVerts); + mul_v3_fl(avg_brushNor, 1.0f / float(numOfVerts)); /* instead of null vector use positive z */ if (UNLIKELY(normalize_v3(avg_brushNor) == 0.0f)) { avg_brushNor[2] = 1.0f; @@ -4309,22 +4345,22 @@ static bool dynamicPaint_paintMesh(Depsgraph *depsgraph, } /* loop through cell points and process brush */ - DynamicPaintPaintData data = { - .surface = surface, - .brush = brush, - .brushOb = brushOb, - .scene = scene, - .timescale = timescale, - .c_index = c_index, - .mesh = mesh, - .positions = positions, - .mloop = mloop, - .mlooptri = mlooptri, - .brush_radius = brush_radius, - .avg_brushNor = avg_brushNor, - .brushVelocity = brushVelocity, - .treeData = &treeData, - }; + DynamicPaintPaintData data{}; + data.surface = surface; + data.brush = brush; + data.brushOb = brushOb; + data.scene = scene; + data.timescale = timescale; + data.c_index = c_index; + data.mesh = mesh; + data.positions = positions; + data.mloop = mloop; + data.mlooptri = mlooptri; + data.brush_radius = brush_radius; + data.avg_brushNor = avg_brushNor; + data.brushVelocity = brushVelocity; + data.treeData = &treeData; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (grid->s_num[c_index] > 250); @@ -4338,7 +4374,7 @@ static bool dynamicPaint_paintMesh(Depsgraph *depsgraph, } /* free bvh tree */ free_bvhtree_from_mesh(&treeData); - BKE_id_free(NULL, mesh); + BKE_id_free(nullptr, mesh); } /* free brush velocity data */ @@ -4353,9 +4389,9 @@ static bool dynamicPaint_paintMesh(Depsgraph *depsgraph, * Paint a particle system to the surface */ static void dynamic_paint_paint_particle_cell_point_cb_ex( - void *__restrict userdata, const int id, const TaskParallelTLS *__restrict UNUSED(tls)) + void *__restrict userdata, const int id, const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintPaintData *data = userdata; + const DynamicPaintPaintData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -4369,7 +4405,7 @@ static void dynamic_paint_paint_particle_cell_point_cb_ex( const float timescale = data->timescale; const int c_index = data->c_index; - KDTree_3d *tree = data->treeData; + KDTree_3d *tree = static_cast(data->treeData); const float solidradius = data->solidradius; const float smooth = brush->particle_smooth * surface->radius_scale; @@ -4614,15 +4650,15 @@ static bool dynamicPaint_paintParticles(DynamicPaintSurface *surface, } /* loop through cell points */ - DynamicPaintPaintData data = { - .surface = surface, - .brush = brush, - .psys = psys, - .solidradius = solidradius, - .timescale = timescale, - .c_index = c_index, - .treeData = tree, - }; + DynamicPaintPaintData data{}; + data.surface = surface; + data.brush = brush; + data.psys = psys; + data.solidradius = solidradius; + data.timescale = timescale; + data.c_index = c_index; + data.treeData = tree; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (grid->s_num[c_index] > 250); @@ -4641,9 +4677,9 @@ static bool dynamicPaint_paintParticles(DynamicPaintSurface *surface, /* paint a single point of defined proximity radius to the surface */ static void dynamic_paint_paint_single_point_cb_ex(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintPaintData *data = userdata; + const DynamicPaintPaintData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -4755,17 +4791,17 @@ static bool dynamicPaint_paintSinglePoint( /* * Loop through every surface point */ - DynamicPaintPaintData data = { - .surface = surface, - .brush = brush, - .brushOb = brushOb, - .scene = scene, - .timescale = timescale, - .positions = positions, - .brush_radius = brush_radius, - .brushVelocity = &brushVel, - .pointCoord = pointCoord, - }; + DynamicPaintPaintData data{}; + data.surface = surface; + data.brush = brush; + data.brushOb = brushOb; + data.scene = scene; + data.timescale = timescale; + data.positions = positions; + data.brush_radius = brush_radius; + data.brushVelocity = &brushVel; + data.pointCoord = pointCoord; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -4783,9 +4819,9 @@ static bool dynamicPaint_paintSinglePoint( static void dynamic_paint_prepare_adjacency_cb(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - PaintSurfaceData *sData = userdata; + PaintSurfaceData *sData = static_cast(userdata); PaintBakeData *bData = sData->bData; BakeAdjPoint *bNeighs = bData->bNeighs; PaintAdjData *adj_data = sData->adj_data; @@ -4822,8 +4858,8 @@ static void dynamicPaint_prepareAdjacencyData(DynamicPaintSurface *surface, cons if (bData->bNeighs) { MEM_freeN(bData->bNeighs); } - bNeighs = bData->bNeighs = MEM_mallocN(sData->adj_data->total_targets * sizeof(*bNeighs), - "PaintEffectBake"); + bNeighs = bData->bNeighs = static_cast( + MEM_mallocN(sData->adj_data->total_targets * sizeof(*bNeighs), "PaintEffectBake")); if (!bNeighs) { return; } @@ -4842,7 +4878,7 @@ static void dynamicPaint_prepareAdjacencyData(DynamicPaintSurface *surface, cons int numOfNeighs = adj_data->n_num[index]; for (int i = 0; i < numOfNeighs; i++) { - bData->average_dist += (double)bNeighs[adj_data->n_index[index] + i].dist; + bData->average_dist += double(bNeighs[adj_data->n_index[index] + i].dist); } } bData->average_dist /= adj_data->total_targets; @@ -4923,11 +4959,11 @@ static void surface_determineForceTargetPoints(const PaintSurfaceData *sData, /* and multiply depending on how deeply force intersects surface */ temp = fabsf(force_intersect); CLAMP(temp, 0.0f, 1.0f); - mul_v2_fl(closest_d, acosf(temp) / (float)M_PI_2); + mul_v2_fl(closest_d, acosf(temp) / float(M_PI_2)); } else { /* if only single neighbor, still linearize force intersection effect */ - closest_d[0] = 1.0f - acosf(closest_d[0]) / (float)M_PI_2; + closest_d[0] = 1.0f - acosf(closest_d[0]) / float(M_PI_2); } } @@ -4950,9 +4986,9 @@ static void dynamicPaint_doSmudge(DynamicPaintSurface *surface, CLAMP_MIN(max_velocity, vel); } - int steps = (int)ceil((double)max_velocity / bData->average_dist * (double)timescale); + int steps = int(ceil(double(max_velocity) / bData->average_dist * double(timescale))); CLAMP(steps, 0, 12); - float eff_scale = brush->smudge_strength / (float)steps * timescale; + float eff_scale = brush->smudge_strength / float(steps) * timescale; for (int step = 0; step < steps; step++) { for (int index = 0; index < sData->total_points; index++) { @@ -5013,26 +5049,26 @@ static void dynamicPaint_doSmudge(DynamicPaintSurface *surface, } } -typedef struct DynamicPaintEffectData { +struct DynamicPaintEffectData { const DynamicPaintSurface *surface; Scene *scene; float *force; ListBase *effectors; const void *prevPoint; - const float eff_scale; + float eff_scale; uint8_t *point_locks; - const float wave_speed; - const float wave_scale; - const float wave_max_slope; + float wave_speed; + float wave_scale; + float wave_max_slope; - const float dt; - const float min_dist; - const float damp_factor; - const bool reset_wave; -} DynamicPaintEffectData; + float dt; + float min_dist; + float damp_factor; + bool reset_wave; +}; /* * Prepare data required by effects for current frame. @@ -5040,9 +5076,9 @@ typedef struct DynamicPaintEffectData { */ static void dynamic_paint_prepare_effect_cb(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintEffectData *data = userdata; + const DynamicPaintEffectData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -5062,12 +5098,13 @@ static void dynamic_paint_prepare_effect_cb(void *__restrict userdata, EffectedPoint epoint; pd_point_from_loc(scene, realCoord[bData->s_pos[index]].v, vel, index, &epoint); epoint.vel_to_sec = 1.0f; - BKE_effectors_apply(effectors, NULL, surface->effector_weights, &epoint, forc, NULL, NULL); + BKE_effectors_apply( + effectors, nullptr, surface->effector_weights, &epoint, forc, nullptr, nullptr); } /* if global gravity is enabled, add it too */ if (scene->physics_settings.flag & PHYS_GLOBAL_GRAVITY) { - /* also divide by 10 to about match default grav + /* also divide by 10 to about match default gravity * with default force strength (1.0). */ madd_v3_v3fl(forc, scene->physics_settings.gravity, @@ -5094,7 +5131,7 @@ static void dynamic_paint_prepare_effect_cb(void *__restrict userdata, force[index * 4 + 3] = normalize_v3_v3(&force[index * 4], forc); } -static int dynamicPaint_prepareEffectStep(struct Depsgraph *depsgraph, +static int dynamicPaint_prepareEffectStep(Depsgraph *depsgraph, DynamicPaintSurface *surface, Scene *scene, Object *ob, @@ -5111,18 +5148,19 @@ static int dynamicPaint_prepareEffectStep(struct Depsgraph *depsgraph, /* Init force data if required */ if (surface->effect & MOD_DPAINT_EFFECT_DO_DRIP) { ListBase *effectors = BKE_effectors_create( - depsgraph, ob, NULL, surface->effector_weights, false); + depsgraph, ob, nullptr, surface->effector_weights, false); /* allocate memory for force data (dir vector + strength) */ - *force = MEM_mallocN(sizeof(float[4]) * sData->total_points, "PaintEffectForces"); + *force = static_cast( + MEM_mallocN(sizeof(float[4]) * sData->total_points, "PaintEffectForces")); if (*force) { - DynamicPaintEffectData data = { - .surface = surface, - .scene = scene, - .force = *force, - .effectors = effectors, - }; + DynamicPaintEffectData data{}; + data.surface = surface; + data.scene = scene; + data.force = *force; + data.effectors = effectors; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -5139,9 +5177,9 @@ static int dynamicPaint_prepareEffectStep(struct Depsgraph *depsgraph, } /* Get number of required steps using average point distance - * so that just a few ultra close pixels won't increase substeps to max. */ + * so that just a few ultra close pixels won't increase sub-steps to max. */ - /* adjust number of required substep by fastest active effect */ + /* Adjust number of required sub-step by fastest active effect. */ if (surface->effect & MOD_DPAINT_EFFECT_DO_SPREAD) { spread_speed = surface->spread_speed; } @@ -5150,9 +5188,9 @@ static int dynamicPaint_prepareEffectStep(struct Depsgraph *depsgraph, } fastest_effect = max_fff(spread_speed, shrink_speed, average_force); - avg_dist = bData->average_dist * (double)CANVAS_REL_SIZE / (double)getSurfaceDimension(sData); + avg_dist = bData->average_dist * double(CANVAS_REL_SIZE) / double(getSurfaceDimension(sData)); - steps = (int)ceilf(1.5f * EFF_MOVEMENT_PER_FRAME * fastest_effect / avg_dist * timescale); + steps = int(ceilf(1.5f * EFF_MOVEMENT_PER_FRAME * fastest_effect / avg_dist * timescale)); CLAMP(steps, 1, 20); return steps; @@ -5163,9 +5201,9 @@ static int dynamicPaint_prepareEffectStep(struct Depsgraph *depsgraph, */ static void dynamic_paint_effect_spread_cb(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintEffectData *data = userdata; + const DynamicPaintEffectData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -5177,7 +5215,7 @@ static void dynamic_paint_effect_spread_cb(void *__restrict userdata, const int numOfNeighs = sData->adj_data->n_num[index]; BakeAdjPoint *bNeighs = sData->bData->bNeighs; PaintPoint *pPoint = &((PaintPoint *)sData->type_data)[index]; - const PaintPoint *prevPoint = data->prevPoint; + const PaintPoint *prevPoint = static_cast(data->prevPoint); const float eff_scale = data->eff_scale; const int *n_index = sData->adj_data->n_index; @@ -5222,9 +5260,9 @@ static void dynamic_paint_effect_spread_cb(void *__restrict userdata, static void dynamic_paint_effect_shrink_cb(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintEffectData *data = userdata; + const DynamicPaintEffectData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -5236,7 +5274,7 @@ static void dynamic_paint_effect_shrink_cb(void *__restrict userdata, const int numOfNeighs = sData->adj_data->n_num[index]; BakeAdjPoint *bNeighs = sData->bData->bNeighs; PaintPoint *pPoint = &((PaintPoint *)sData->type_data)[index]; - const PaintPoint *prevPoint = data->prevPoint; + const PaintPoint *prevPoint = static_cast(data->prevPoint); const float eff_scale = data->eff_scale; const int *n_index = sData->adj_data->n_index; @@ -5280,9 +5318,9 @@ static void dynamic_paint_effect_shrink_cb(void *__restrict userdata, static void dynamic_paint_effect_drip_cb(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintEffectData *data = userdata; + const DynamicPaintEffectData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -5293,7 +5331,7 @@ static void dynamic_paint_effect_drip_cb(void *__restrict userdata, BakeAdjPoint *bNeighs = sData->bData->bNeighs; PaintPoint *pPoint = &((PaintPoint *)sData->type_data)[index]; - const PaintPoint *prevPoint = data->prevPoint; + const PaintPoint *prevPoint = static_cast(data->prevPoint); const PaintPoint *pPoint_prev = &prevPoint[index]; const float *force = data->force; const float eff_scale = data->eff_scale; @@ -5331,9 +5369,9 @@ static void dynamic_paint_effect_drip_cb(void *__restrict userdata, float dir_factor, a_factor; const float speed_scale = eff_scale * force[index * 4 + 3] / bNeighs[n_idx].dist; - const uint n_trgt = (uint)n_target[n_idx]; + const uint n_trgt = uint(n_target[n_idx]); - /* Sort of spinlock, but only for given ePoint. + /* Sort of spin-lock, but only for given ePoint. * Since the odds a same ePoint is modified at the same time by several threads is very low, * this is much more efficient than a global spin lock. */ const uint epointlock_idx = n_trgt / 8; @@ -5430,13 +5468,13 @@ static void dynamicPaint_doEffectStep( timescale; /* Copy current surface to the previous points array to read unmodified values */ - memcpy(prevPoint, sData->type_data, sData->total_points * sizeof(struct PaintPoint)); + memcpy(prevPoint, sData->type_data, sData->total_points * sizeof(PaintPoint)); + + DynamicPaintEffectData data{}; + data.surface = surface; + data.prevPoint = prevPoint; + data.eff_scale = eff_scale; - DynamicPaintEffectData data = { - .surface = surface, - .prevPoint = prevPoint, - .eff_scale = eff_scale, - }; TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -5452,13 +5490,13 @@ static void dynamicPaint_doEffectStep( timescale; /* Copy current surface to the previous points array to read unmodified values */ - memcpy(prevPoint, sData->type_data, sData->total_points * sizeof(struct PaintPoint)); + memcpy(prevPoint, sData->type_data, sData->total_points * sizeof(PaintPoint)); + + DynamicPaintEffectData data{}; + data.surface = surface; + data.prevPoint = prevPoint; + data.eff_scale = eff_scale; - DynamicPaintEffectData data = { - .surface = surface, - .prevPoint = prevPoint, - .eff_scale = eff_scale, - }; TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -5474,18 +5512,19 @@ static void dynamicPaint_doEffectStep( /* Same as #BLI_bitmask, but handled atomically as 'ePoint' locks. */ const size_t point_locks_size = (sData->total_points / 8) + 1; - uint8_t *point_locks = MEM_callocN(sizeof(*point_locks) * point_locks_size, __func__); + uint8_t *point_locks = static_cast( + MEM_callocN(sizeof(*point_locks) * point_locks_size, __func__)); /* Copy current surface to the previous points array to read unmodified values */ - memcpy(prevPoint, sData->type_data, sData->total_points * sizeof(struct PaintPoint)); + memcpy(prevPoint, sData->type_data, sData->total_points * sizeof(PaintPoint)); + + DynamicPaintEffectData data{}; + data.surface = surface; + data.prevPoint = prevPoint; + data.eff_scale = eff_scale; + data.force = force; + data.point_locks = point_locks; - DynamicPaintEffectData data = { - .surface = surface, - .prevPoint = prevPoint, - .eff_scale = eff_scale, - .force = force, - .point_locks = point_locks, - }; TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -5498,9 +5537,9 @@ static void dynamicPaint_doEffectStep( static void dynamic_paint_border_cb(void *__restrict userdata, const int b_index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintEffectData *data = userdata; + const DynamicPaintEffectData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -5565,9 +5604,8 @@ static void dynamicPaint_doBorderStep(DynamicPaintSurface *surface) } /* Don't use prevPoint, relying on the condition that neighbors are never border pixels. */ - DynamicPaintEffectData data = { - .surface = surface, - }; + DynamicPaintEffectData data{}; + data.surface = surface; TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); @@ -5578,14 +5616,14 @@ static void dynamicPaint_doBorderStep(DynamicPaintSurface *surface) static void dynamic_paint_wave_step_cb(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintEffectData *data = userdata; + const DynamicPaintEffectData *data = static_cast(userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; BakeAdjPoint *bNeighs = sData->bData->bNeighs; - const PaintWavePoint *prevPoint = data->prevPoint; + const PaintWavePoint *prevPoint = static_cast(data->prevPoint); const float wave_speed = data->wave_speed; const float wave_scale = data->wave_scale; @@ -5691,7 +5729,8 @@ static void dynamicPaint_doWaveStep(DynamicPaintSurface *surface, float timescal const float wave_scale = CANVAS_REL_SIZE / canvas_size; /* allocate memory */ - PaintWavePoint *prevPoint = MEM_mallocN(sData->total_points * sizeof(PaintWavePoint), __func__); + PaintWavePoint *prevPoint = static_cast( + MEM_mallocN(sData->total_points * sizeof(PaintWavePoint), __func__)); if (!prevPoint) { return; } @@ -5701,14 +5740,14 @@ static void dynamicPaint_doWaveStep(DynamicPaintSurface *surface, float timescal int numOfNeighs = sData->adj_data->n_num[index]; for (int i = 0; i < numOfNeighs; i++) { - average_dist += (double)bNeighs[sData->adj_data->n_index[index] + i].dist; + average_dist += double(bNeighs[sData->adj_data->n_index[index] + i].dist); } } - average_dist *= (double)wave_scale / sData->adj_data->total_targets; + average_dist *= double(wave_scale) / sData->adj_data->total_targets; /* determine number of required steps */ - steps = (int)ceil((double)(WAVE_TIME_FAC * timescale * surface->wave_timescale) / - (average_dist / (double)wave_speed / 3)); + steps = (int)ceil(double(WAVE_TIME_FAC * timescale * surface->wave_timescale) / + (average_dist / double(wave_speed) / 3)); CLAMP(steps, 1, 20); timescale /= steps; @@ -5721,17 +5760,17 @@ static void dynamicPaint_doWaveStep(DynamicPaintSurface *surface, float timescal /* copy previous frame data */ memcpy(prevPoint, sData->type_data, sData->total_points * sizeof(PaintWavePoint)); - DynamicPaintEffectData data = { - .surface = surface, - .prevPoint = prevPoint, - .wave_speed = wave_speed, - .wave_scale = wave_scale, - .wave_max_slope = wave_max_slope, - .dt = dt, - .min_dist = min_dist, - .damp_factor = damp_factor, - .reset_wave = (ss == steps - 1), - }; + DynamicPaintEffectData data{}; + data.surface = surface; + data.prevPoint = prevPoint; + data.wave_speed = wave_speed; + data.wave_scale = wave_scale; + data.wave_max_slope = wave_max_slope; + data.dt = dt; + data.min_dist = min_dist; + data.damp_factor = damp_factor; + data.reset_wave = (ss == steps - 1); + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -5750,16 +5789,17 @@ static bool dynamic_paint_surface_needs_dry_dissolve(DynamicPaintSurface *surfac (surface->flags & MOD_DPAINT_DISSOLVE))); } -typedef struct DynamicPaintDissolveDryData { +struct DynamicPaintDissolveDryData { const DynamicPaintSurface *surface; - const float timescale; -} DynamicPaintDissolveDryData; + float timescale; +}; static void dynamic_paint_surface_pre_step_cb(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintDissolveDryData *data = userdata; + const DynamicPaintDissolveDryData *data = static_cast( + userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -5811,7 +5851,7 @@ static void dynamic_paint_surface_pre_step_cb(void *__restrict userdata, pPoint->state = DPAINT_PAINT_WET; } - /* in case of just dryed paint, just mix it to the dry layer and mark it empty */ + /* In case of just dried paint, just mix it to the dry layer and mark it empty. */ else if (pPoint->state > 0) { float f_color[4]; blendColors(pPoint->color, pPoint->color[3], pPoint->e_color, pPoint->e_color[3], f_color); @@ -5877,7 +5917,7 @@ static bool dynamicPaint_surfaceHasMoved(DynamicPaintSurface *surface, Object *o } /* Prepare for surface step by creating PaintBakeNormal data */ -typedef struct DynamicPaintGenerateBakeData { +struct DynamicPaintGenerateBakeData { const DynamicPaintSurface *surface; Object *ob; @@ -5885,15 +5925,16 @@ typedef struct DynamicPaintGenerateBakeData { const float (*vert_normals)[3]; const Vec3f *canvas_verts; - const bool do_velocity_data; - const bool new_bdata; -} DynamicPaintGenerateBakeData; + bool do_velocity_data; + bool new_bdata; +}; static void dynamic_paint_generate_bake_data_cb(void *__restrict userdata, const int index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - const DynamicPaintGenerateBakeData *data = userdata; + const DynamicPaintGenerateBakeData *data = static_cast( + userdata); const DynamicPaintSurface *surface = data->surface; const PaintSurfaceData *sData = surface->data; @@ -6035,16 +6076,16 @@ static bool dynamicPaint_generateBakeData(DynamicPaintSurface *surface, } } - canvas_verts = (struct Vec3f *)MEM_mallocN(canvasNumOfVerts * sizeof(struct Vec3f), - "Dynamic Paint transformed canvas verts"); + canvas_verts = (Vec3f *)MEM_mallocN(canvasNumOfVerts * sizeof(Vec3f), + "Dynamic Paint transformed canvas verts"); if (!canvas_verts) { return false; } /* allocate memory if required */ if (!bData) { - sData->bData = bData = (struct PaintBakeData *)MEM_callocN(sizeof(struct PaintBakeData), - "Dynamic Paint bake data"); + sData->bData = bData = (PaintBakeData *)MEM_callocN(sizeof(PaintBakeData), + "Dynamic Paint bake data"); if (!bData) { if (canvas_verts) { MEM_freeN(canvas_verts); @@ -6053,14 +6094,16 @@ static bool dynamicPaint_generateBakeData(DynamicPaintSurface *surface, } /* Init bdata */ - bData->bNormal = (struct PaintBakeNormal *)MEM_mallocN( - sData->total_points * sizeof(struct PaintBakeNormal), "Dynamic Paint step data"); - bData->s_pos = MEM_mallocN(sData->total_points * sizeof(uint), "Dynamic Paint bData s_pos"); - bData->s_num = MEM_mallocN(sData->total_points * sizeof(uint), "Dynamic Paint bData s_num"); - bData->realCoord = (struct Vec3f *)MEM_mallocN(surface_totalSamples(surface) * sizeof(Vec3f), - "Dynamic Paint point coords"); - bData->prev_positions = MEM_mallocN(canvasNumOfVerts * sizeof(float[3]), - "Dynamic Paint bData prev_positions"); + bData->bNormal = (PaintBakeNormal *)MEM_mallocN(sData->total_points * sizeof(PaintBakeNormal), + "Dynamic Paint step data"); + bData->s_pos = static_cast( + MEM_mallocN(sData->total_points * sizeof(uint), "Dynamic Paint bData s_pos")); + bData->s_num = static_cast( + MEM_mallocN(sData->total_points * sizeof(uint), "Dynamic Paint bData s_num")); + bData->realCoord = (Vec3f *)MEM_mallocN(surface_totalSamples(surface) * sizeof(Vec3f), + "Dynamic Paint point coords"); + bData->prev_positions = static_cast( + MEM_mallocN(canvasNumOfVerts * sizeof(float[3]), "Dynamic Paint bData prev_positions")); /* if any allocation failed, free everything */ if (!bData->bNormal || !bData->s_pos || !bData->s_num || !bData->realCoord || !canvas_verts) { @@ -6087,12 +6130,12 @@ static bool dynamicPaint_generateBakeData(DynamicPaintSurface *surface, } if (do_velocity_data && !bData->velocity) { - bData->velocity = (struct Vec3f *)MEM_callocN(sData->total_points * sizeof(Vec3f), - "Dynamic Paint velocity"); + bData->velocity = (Vec3f *)MEM_callocN(sData->total_points * sizeof(Vec3f), + "Dynamic Paint velocity"); } if (do_accel_data && !bData->prev_velocity) { - bData->prev_velocity = (struct Vec3f *)MEM_mallocN(sData->total_points * sizeof(Vec3f), - "Dynamic Paint prev velocity"); + bData->prev_velocity = (Vec3f *)MEM_mallocN(sData->total_points * sizeof(Vec3f), + "Dynamic Paint prev velocity"); /* copy previous vel */ if (bData->prev_velocity && bData->velocity) { memcpy(bData->prev_velocity, bData->velocity, sData->total_points * sizeof(Vec3f)); @@ -6112,15 +6155,15 @@ static bool dynamicPaint_generateBakeData(DynamicPaintSurface *surface, /* * Prepare each surface point for a new step */ - DynamicPaintGenerateBakeData data = { - .surface = surface, - .ob = ob, - .positions = positions, - .vert_normals = BKE_mesh_vertex_normals_ensure(mesh), - .canvas_verts = canvas_verts, - .do_velocity_data = do_velocity_data, - .new_bdata = new_bdata, - }; + DynamicPaintGenerateBakeData data{}; + data.surface = surface; + data.ob = ob; + data.positions = positions; + data.vert_normals = BKE_mesh_vertex_normals_ensure(mesh); + data.canvas_verts = canvas_verts; + data.do_velocity_data = do_velocity_data; + data.new_bdata = new_bdata; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -6164,10 +6207,10 @@ static int dynamicPaint_doStep(Depsgraph *depsgraph, } if (dynamic_paint_surface_needs_dry_dissolve(surface)) { - DynamicPaintDissolveDryData data = { - .surface = surface, - .timescale = timescale, - }; + DynamicPaintDissolveDryData data{}; + data.surface = surface; + data.timescale = timescale; + TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); settings.use_threading = (sData->total_points > 1000); @@ -6181,7 +6224,7 @@ static int dynamicPaint_doStep(Depsgraph *depsgraph, { uint numobjects; Object **objects = BKE_collision_objects_create( - depsgraph, NULL, surface->brush_group, &numobjects, eModifierType_DynamicPaint); + depsgraph, nullptr, surface->brush_group, &numobjects, eModifierType_DynamicPaint); /* backup current scene frame */ int scene_frame = scene->r.cfra; @@ -6200,8 +6243,8 @@ static int dynamicPaint_doStep(Depsgraph *depsgraph, /* calculate brush speed vectors if required */ if (surface->type == MOD_DPAINT_SURFACE_T_PAINT && brush->flags & MOD_DPAINT_DO_SMUDGE) { - bData->brush_velocity = MEM_callocN(sizeof(float[4]) * sData->total_points, - "Dynamic Paint brush velocity"); + bData->brush_velocity = static_cast(MEM_callocN( + sizeof(float[4]) * sData->total_points, "Dynamic Paint brush velocity")); /* init adjacency data if not already */ if (!sData->adj_data) { dynamicPaint_initAdjacencyData(surface, true); @@ -6271,7 +6314,7 @@ static int dynamicPaint_doStep(Depsgraph *depsgraph, dynamicPaint_doSmudge(surface, brush, timescale); } MEM_freeN(bData->brush_velocity); - bData->brush_velocity = NULL; + bData->brush_velocity = nullptr; } } } @@ -6291,11 +6334,11 @@ static int dynamicPaint_doStep(Depsgraph *depsgraph, if (surface->effect && surface->type == MOD_DPAINT_SURFACE_T_PAINT) { int steps = 1, s; PaintPoint *prevPoint; - float *force = NULL; + float *force = nullptr; /* Allocate memory for surface previous points to read unchanged values from */ - prevPoint = MEM_mallocN(sData->total_points * sizeof(struct PaintPoint), - "PaintSurfaceDataCopy"); + prevPoint = static_cast( + MEM_mallocN(sData->total_points * sizeof(PaintPoint), "PaintSurfaceDataCopy")); if (!prevPoint) { return setError(canvas, N_("Not enough free memory")); } @@ -6303,7 +6346,7 @@ static int dynamicPaint_doStep(Depsgraph *depsgraph, /* Prepare effects and get number of required steps */ steps = dynamicPaint_prepareEffectStep(depsgraph, surface, scene, ob, &force, timescale); for (s = 0; s < steps; s++) { - dynamicPaint_doEffectStep(surface, force, prevPoint, timescale, (float)steps); + dynamicPaint_doEffectStep(surface, force, prevPoint, timescale, float(steps)); } /* Free temporary effect data */ @@ -6324,11 +6367,8 @@ static int dynamicPaint_doStep(Depsgraph *depsgraph, return ret; } -int dynamicPaint_calculateFrame(DynamicPaintSurface *surface, - struct Depsgraph *depsgraph, - Scene *scene, - Object *cObject, - int frame) +int dynamicPaint_calculateFrame( + DynamicPaintSurface *surface, Depsgraph *depsgraph, Scene *scene, Object *cObject, int frame) { float timescale = 1.0f; @@ -6346,7 +6386,7 @@ int dynamicPaint_calculateFrame(DynamicPaintSurface *surface, timescale = 1.0f / (surface->substeps + 1); for (st = 1; st <= surface->substeps; st++) { - float subframe = ((float)st) / (surface->substeps + 1); + float subframe = (float(st)) / (surface->substeps + 1); if (!dynamicPaint_doStep(depsgraph, scene, cObject, surface, timescale, subframe)) { return 0; } diff --git a/source/blender/blenkernel/intern/dyntopo.cc b/source/blender/blenkernel/intern/dyntopo.cc index 6b84f27ec73..672698cf221 100644 --- a/source/blender/blenkernel/intern/dyntopo.cc +++ b/source/blender/blenkernel/intern/dyntopo.cc @@ -42,7 +42,7 @@ #include "bmesh_log.h" #include "dyntopo_intern.hh" -#include "pbvh_intern.h" +#include "pbvh_intern.hh" #include diff --git a/source/blender/blenkernel/intern/dyntopo_collapse.cc b/source/blender/blenkernel/intern/dyntopo_collapse.cc index 19ffd7da1c1..146c782d2ff 100644 --- a/source/blender/blenkernel/intern/dyntopo_collapse.cc +++ b/source/blender/blenkernel/intern/dyntopo_collapse.cc @@ -32,7 +32,7 @@ #include "bmesh_log.h" #include "dyntopo_intern.hh" -#include "pbvh_intern.h" +#include "pbvh_intern.hh" #include #include diff --git a/source/blender/blenkernel/intern/dyntopo_intern.hh b/source/blender/blenkernel/intern/dyntopo_intern.hh index 277b6f42784..879a93bbc6a 100644 --- a/source/blender/blenkernel/intern/dyntopo_intern.hh +++ b/source/blender/blenkernel/intern/dyntopo_intern.hh @@ -3,7 +3,7 @@ #include "BKE_paint.h" #include "BKE_pbvh.h" #include "bmesh.h" -#include "pbvh_intern.h" +#include "pbvh_intern.hh" struct MinMaxHeap; struct GHash; diff --git a/source/blender/blenkernel/intern/fluid.c b/source/blender/blenkernel/intern/fluid.cc similarity index 94% rename from source/blender/blenkernel/intern/fluid.c rename to source/blender/blenkernel/intern/fluid.cc index b95d82e83d8..47099b1714e 100644 --- a/source/blender/blenkernel/intern/fluid.c +++ b/source/blender/blenkernel/intern/fluid.cc @@ -34,10 +34,10 @@ #ifdef WITH_FLUID -# include -# include -# include -# include /* memset */ +# include +# include +# include +# include /* memset */ # include "DNA_customdata_types.h" # include "DNA_light_types.h" @@ -80,7 +80,7 @@ /** Max value for phi initialization */ #define PHI_MAX 9999.0f -static void fluid_modifier_reset_ex(struct FluidModifierData *fmd, bool need_lock); +static void fluid_modifier_reset_ex(FluidModifierData *fmd, bool need_lock); #ifdef WITH_FLUID // #define DEBUG_PRINT @@ -93,11 +93,6 @@ static CLG_LogRef LOG = {"bke.fluid"}; static ThreadMutex object_update_lock = BLI_MUTEX_INITIALIZER; -struct FluidModifierData; -struct Mesh; -struct Object; -struct Scene; - # define ADD_IF_LOWER_POS(a, b) (min_ff((a) + (b), max_ff((a), (b)))) # define ADD_IF_LOWER_NEG(a, b) (max_ff((a) + (b), min_ff((a), (b)))) # define ADD_IF_LOWER(a, b) (((b) > 0) ? ADD_IF_LOWER_POS((a), (b)) : ADD_IF_LOWER_NEG((a), (b))) @@ -108,7 +103,7 @@ bool BKE_fluid_reallocate_fluid(FluidDomainSettings *fds, int res[3], int free_o manta_free(fds->fluid); } if (!min_iii(res[0], res[1], res[2])) { - fds->fluid = NULL; + fds->fluid = nullptr; } else { fds->fluid = manta_init(res, fds->fmd); @@ -118,7 +113,7 @@ bool BKE_fluid_reallocate_fluid(FluidDomainSettings *fds, int res[3], int free_o fds->res_noise[2] = res[2] * fds->noise_scale; } - return (fds->fluid != NULL); + return (fds->fluid != nullptr); } void BKE_fluid_reallocate_copy_fluid(FluidDomainSettings *fds, @@ -130,7 +125,7 @@ void BKE_fluid_reallocate_copy_fluid(FluidDomainSettings *fds, int o_shift[3], int n_shift[3]) { - struct MANTA *fluid_old = fds->fluid; + MANTA *fluid_old = fds->fluid; const int block_size = fds->noise_scale; int new_shift[3] = {0}; sub_v3_v3v3_int(new_shift, n_shift, o_shift); @@ -446,28 +441,28 @@ static void manta_set_domain_from_mesh(FluidDomainSettings *fds, scale = res / size[0]; fds->scale = size[0] / fabsf(ob->scale[0]); fds->base_res[0] = res; - fds->base_res[1] = max_ii((int)(size[1] * scale + 0.5f), 4); - fds->base_res[2] = max_ii((int)(size[2] * scale + 0.5f), 4); + fds->base_res[1] = max_ii(int(size[1] * scale + 0.5f), 4); + fds->base_res[2] = max_ii(int(size[2] * scale + 0.5f), 4); } else if (size[1] >= MAX2(size[0], size[2])) { scale = res / size[1]; fds->scale = size[1] / fabsf(ob->scale[1]); - fds->base_res[0] = max_ii((int)(size[0] * scale + 0.5f), 4); + fds->base_res[0] = max_ii(int(size[0] * scale + 0.5f), 4); fds->base_res[1] = res; - fds->base_res[2] = max_ii((int)(size[2] * scale + 0.5f), 4); + fds->base_res[2] = max_ii(int(size[2] * scale + 0.5f), 4); } else { scale = res / size[2]; fds->scale = size[2] / fabsf(ob->scale[2]); - fds->base_res[0] = max_ii((int)(size[0] * scale + 0.5f), 4); - fds->base_res[1] = max_ii((int)(size[1] * scale + 0.5f), 4); + fds->base_res[0] = max_ii(int(size[0] * scale + 0.5f), 4); + fds->base_res[1] = max_ii(int(size[1] * scale + 0.5f), 4); fds->base_res[2] = res; } /* Set cell size. */ - fds->cell_size[0] /= (float)fds->base_res[0]; - fds->cell_size[1] /= (float)fds->base_res[1]; - fds->cell_size[2] /= (float)fds->base_res[2]; + fds->cell_size[0] /= float(fds->base_res[0]); + fds->cell_size[1] /= float(fds->base_res[1]); + fds->cell_size[2] /= float(fds->base_res[2]); } static void update_final_gravity(FluidDomainSettings *fds, Scene *scene) @@ -484,7 +479,7 @@ static void update_final_gravity(FluidDomainSettings *fds, Scene *scene) static bool fluid_modifier_init( FluidModifierData *fmd, Depsgraph *depsgraph, Object *ob, Scene *scene, Mesh *me) { - int scene_framenr = (int)DEG_get_ctime(depsgraph); + int scene_framenr = int(DEG_get_ctime(depsgraph)); if ((fmd->type & MOD_FLUID_TYPE_DOMAIN) && fmd->domain && !fmd->domain->fluid) { FluidDomainSettings *fds = fmd->domain; @@ -563,7 +558,7 @@ static int get_light(Scene *scene, ViewLayer *view_layer, float *light) BKE_view_layer_synced_ensure(scene, view_layer); LISTBASE_FOREACH (Base *, base_tmp, BKE_view_layer_object_bases_get(view_layer)) { if (base_tmp->object->type == OB_LAMP) { - Light *la = base_tmp->object->data; + Light *la = static_cast(base_tmp->object->data); if (la->type == LA_LOCAL) { copy_v3_v3(light, base_tmp->object->object_to_world[3]); @@ -595,10 +590,10 @@ static void clamp_bounds_in_domain(FluidDomainSettings *fds, /* Adapt to velocity. */ if (min_vel && min_vel[i] < 0.0f) { - min[i] += (int)floor(min_vel[i] * dt); + min[i] += int(floor(min_vel[i] * dt)); } if (max_vel && max_vel[i] > 0.0f) { - max[i] += (int)ceil(max_vel[i] * dt); + max[i] += int(ceil(max_vel[i] * dt)); } /* Clamp within domain max size. */ @@ -640,7 +635,7 @@ static bool is_static_object(Object *ob) /** \name Bounding Box * \{ */ -typedef struct FluidObjectBB { +struct FluidObjectBB { float *influence; float *velocity; float *distances; @@ -648,25 +643,25 @@ typedef struct FluidObjectBB { int min[3], max[3], res[3]; int hmin[3], hmax[3], hres[3]; int total_cells, valid; -} FluidObjectBB; +}; static void bb_boundInsert(FluidObjectBB *bb, const float point[3]) { int i = 0; if (!bb->valid) { for (; i < 3; i++) { - bb->min[i] = (int)floor(point[i]); - bb->max[i] = (int)ceil(point[i]); + bb->min[i] = int(floor(point[i])); + bb->max[i] = int(ceil(point[i])); } bb->valid = 1; } else { for (; i < 3; i++) { if (point[i] < bb->min[i]) { - bb->min[i] = (int)floor(point[i]); + bb->min[i] = int(floor(point[i])); } if (point[i] > bb->max[i]) { - bb->max[i] = (int)ceil(point[i]); + bb->max[i] = int(ceil(point[i])); } } } @@ -685,15 +680,19 @@ static void bb_allocateData(FluidObjectBB *bb, bool use_velocity, bool use_influ bb->total_cells = res[0] * res[1] * res[2]; copy_v3_v3_int(bb->res, res); - bb->numobjs = MEM_calloc_arrayN(bb->total_cells, sizeof(float), "fluid_bb_numobjs"); + bb->numobjs = static_cast( + MEM_calloc_arrayN(bb->total_cells, sizeof(float), "fluid_bb_numobjs")); if (use_influence) { - bb->influence = MEM_calloc_arrayN(bb->total_cells, sizeof(float), "fluid_bb_influence"); + bb->influence = static_cast( + MEM_calloc_arrayN(bb->total_cells, sizeof(float), "fluid_bb_influence")); } if (use_velocity) { - bb->velocity = MEM_calloc_arrayN(bb->total_cells, sizeof(float[3]), "fluid_bb_velocity"); + bb->velocity = static_cast( + MEM_calloc_arrayN(bb->total_cells, sizeof(float[3]), "fluid_bb_velocity")); } - bb->distances = MEM_malloc_arrayN(bb->total_cells, sizeof(float), "fluid_bb_distances"); + bb->distances = static_cast( + MEM_malloc_arrayN(bb->total_cells, sizeof(float), "fluid_bb_distances")); copy_vn_fl(bb->distances, bb->total_cells, FLT_MAX); bb->valid = true; @@ -722,7 +721,7 @@ static void bb_combineMaps(FluidObjectBB *output, { int i, x, y, z; - /* Copyfill input 1 struct and clear output for new allocation. */ + /* Copy-fill input 1 struct and clear output for new allocation. */ FluidObjectBB bb1; memcpy(&bb1, output, sizeof(FluidObjectBB)); memset(output, 0, sizeof(FluidObjectBB)); @@ -810,7 +809,7 @@ static void bb_combineMaps(FluidObjectBB *output, /** \name Effectors * \{ */ -BLI_INLINE void apply_effector_fields(FluidEffectorSettings *UNUSED(fes), +BLI_INLINE void apply_effector_fields(FluidEffectorSettings * /*fes*/, int index, float src_distance_value, float *dest_phi_in, @@ -935,7 +934,7 @@ static void update_velocities(FluidEffectorSettings *fes, } } -typedef struct ObstaclesFromDMData { +struct ObstaclesFromDMData { FluidEffectorSettings *fes; const float (*vert_positions)[3]; @@ -948,20 +947,20 @@ typedef struct ObstaclesFromDMData { bool has_velocity; float *vert_vel; int *min, *max, *res; -} ObstaclesFromDMData; +}; static void obstacles_from_mesh_task_cb(void *__restrict userdata, const int z, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - ObstaclesFromDMData *data = userdata; + ObstaclesFromDMData *data = static_cast(userdata); FluidObjectBB *bb = data->bb; for (int x = data->min[0]; x < data->max[0]; x++) { for (int y = data->min[1]; y < data->max[1]; y++) { const int index = manta_get_index( x - bb->min[0], bb->res[0], y - bb->min[1], bb->res[1], z - bb->min[2]); - const float ray_start[3] = {(float)x + 0.5f, (float)y + 0.5f, (float)z + 0.5f}; + const float ray_start[3] = {float(x) + 0.5f, float(y) + 0.5f, float(z) + 0.5f}; /* Calculate levelset values from meshes. Result in bb->distances. */ update_distances(index, @@ -999,10 +998,10 @@ static void obstacles_from_mesh(Object *coll_ob, { if (fes->mesh) { const MLoopTri *looptri; - BVHTreeFromMesh tree_data = {NULL}; + BVHTreeFromMesh tree_data = {nullptr}; int numverts, i; - float *vert_vel = NULL; + float *vert_vel = nullptr; bool has_velocity = false; Mesh *me = BKE_mesh_copy_for_eval(fes->mesh, false); @@ -1016,14 +1015,16 @@ static void obstacles_from_mesh(Object *coll_ob, /* TODO(sebbas): Make initialization of vertex velocities optional? */ { - vert_vel = MEM_callocN(sizeof(float[3]) * numverts, "manta_obs_velocity"); + vert_vel = static_cast( + MEM_callocN(sizeof(float[3]) * numverts, "manta_obs_velocity")); if (fes->numverts != numverts || !fes->verts_old) { if (fes->verts_old) { MEM_freeN(fes->verts_old); } - fes->verts_old = MEM_callocN(sizeof(float[3]) * numverts, "manta_obs_verts_old"); + fes->verts_old = static_cast( + MEM_callocN(sizeof(float[3]) * numverts, "manta_obs_verts_old")); fes->numverts = numverts; } else { @@ -1054,8 +1055,8 @@ static void obstacles_from_mesh(Object *coll_ob, /* Set emission map. * Use 3 cell diagonals as margin (3 * 1.732 = 5.196). */ - int bounds_margin = (int)ceil(5.196); - clamp_bounds_in_domain(fds, bb->min, bb->max, NULL, NULL, bounds_margin, dt); + int bounds_margin = int(ceil(5.196)); + clamp_bounds_in_domain(fds, bb->min, bb->max, nullptr, nullptr, bounds_margin, dt); bb_allocateData(bb, true, false); /* Setup loop bounds. */ @@ -1069,19 +1070,18 @@ static void obstacles_from_mesh(Object *coll_ob, bool use_effector = fes->flags & FLUID_EFFECTOR_USE_EFFEC; if (use_effector && BKE_bvhtree_from_mesh_get(&tree_data, me, BVHTREE_FROM_LOOPTRI, 4)) { - ObstaclesFromDMData data = { - .fes = fes, - .vert_positions = positions, - .mloop = mloop, - .mlooptri = looptri, - .tree = &tree_data, - .bb = bb, - .has_velocity = has_velocity, - .vert_vel = vert_vel, - .min = min, - .max = max, - .res = res, - }; + ObstaclesFromDMData data{}; + data.fes = fes; + data.vert_positions = positions; + data.mloop = mloop; + data.mlooptri = looptri; + data.tree = &tree_data; + data.bb = bb; + data.has_velocity = has_velocity; + data.vert_vel = vert_vel; + data.min = min; + data.max = max; + data.res = res; TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); @@ -1094,7 +1094,7 @@ static void obstacles_from_mesh(Object *coll_ob, if (vert_vel) { MEM_freeN(vert_vel); } - BKE_id_free(NULL, me); + BKE_id_free(nullptr, me); } } @@ -1153,7 +1153,7 @@ static void update_obstacleflags(FluidDomainSettings *fds, static bool escape_effectorobject(Object *flowobj, FluidDomainSettings *fds, - FluidEffectorSettings *UNUSED(fes), + FluidEffectorSettings * /*fes*/, int frame) { bool is_static = is_static_object(flowobj); @@ -1176,7 +1176,7 @@ static bool escape_effectorobject(Object *flowobj, static void compute_obstaclesemission(Scene *scene, FluidObjectBB *bb_maps, - struct Depsgraph *depsgraph, + Depsgraph *depsgraph, float dt, Object **effecobjs, int frame, @@ -1216,14 +1216,14 @@ static void compute_obstaclesemission(Scene *scene, } /* More splitting because of emission subframe: If no subframes present, sample_size is 1. */ - float sample_size = 1.0f / (float)(subframes + 1); + float sample_size = 1.0f / float(subframes + 1); float subframe_dt = dt * sample_size; /* Emission loop. When not using subframes this will loop only once. */ for (int subframe = 0; subframe <= subframes; subframe++) { /* Temporary emission map used when subframes are enabled, i.e. at least one subframe. */ - FluidObjectBB bb_temp = {NULL}; + FluidObjectBB bb_temp = {nullptr}; /* Set scene time */ /* Handle emission subframe */ @@ -1284,8 +1284,8 @@ static void update_obstacles(Depsgraph *depsgraph, int frame, float dt) { - FluidObjectBB *bb_maps = NULL; - Object **effecobjs = NULL; + FluidObjectBB *bb_maps = nullptr; + Object **effecobjs = nullptr; uint numeffecobjs = 0; bool is_resume = (fds->cache_frame_pause_data == frame); bool is_first_frame = (frame == fds->cache_frame_start); @@ -1298,7 +1298,8 @@ static void update_obstacles(Depsgraph *depsgraph, ensure_obstaclefields(fds); /* Allocate effector map for each effector object. */ - bb_maps = MEM_callocN(sizeof(struct FluidObjectBB) * numeffecobjs, "fluid_effector_bb_maps"); + bb_maps = static_cast( + MEM_callocN(sizeof(FluidObjectBB) * numeffecobjs, "fluid_effector_bb_maps")); /* Initialize effector map for each effector object. */ compute_obstaclesemission(scene, @@ -1459,7 +1460,7 @@ static void update_obstacles(Depsgraph *depsgraph, /** \name Flow * \{ */ -typedef struct EmitFromParticlesData { +struct EmitFromParticlesData { FluidFlowSettings *ffs; KDTree_3d *tree; @@ -1469,13 +1470,13 @@ typedef struct EmitFromParticlesData { float solid; float smooth; -} EmitFromParticlesData; +}; static void emit_from_particles_task_cb(void *__restrict userdata, const int z, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - EmitFromParticlesData *data = userdata; + EmitFromParticlesData *data = static_cast(userdata); FluidFlowSettings *ffs = data->ffs; FluidObjectBB *bb = data->bb; @@ -1483,7 +1484,7 @@ static void emit_from_particles_task_cb(void *__restrict userdata, for (int y = data->min[1]; y < data->max[1]; y++) { const int index = manta_get_index( x - bb->min[0], bb->res[0], y - bb->min[1], bb->res[1], z - bb->min[2]); - const float ray_start[3] = {((float)x) + 0.5f, ((float)y) + 0.5f, ((float)z) + 0.5f}; + const float ray_start[3] = {(float(x)) + 0.5f, (float(y)) + 0.5f, (float(z)) + 0.5f}; /* Find particle distance from the kdtree. */ KDTreeNearest_3d nearest; @@ -1527,7 +1528,7 @@ static void emit_from_particles(Object *flow_ob, /* radius based flow */ const float solid = ffs->particle_size * 0.5f; const float smooth = 0.5f; /* add 0.5 cells of linear falloff to reduce aliasing */ - KDTree_3d *tree = NULL; + KDTree_3d *tree = nullptr; sim.depsgraph = depsgraph; sim.scene = scene; @@ -1545,15 +1546,15 @@ static void emit_from_particles(Object *flow_ob, totchild = psys->totchild * psys->part->disp / 100; } - particle_pos = MEM_callocN(sizeof(float[3]) * (totpart + totchild), - "manta_flow_particles_pos"); - particle_vel = MEM_callocN(sizeof(float[3]) * (totpart + totchild), - "manta_flow_particles_vel"); + particle_pos = static_cast( + MEM_callocN(sizeof(float[3]) * (totpart + totchild), "manta_flow_particles_pos")); + particle_vel = static_cast( + MEM_callocN(sizeof(float[3]) * (totpart + totchild), "manta_flow_particles_vel")); /* setup particle radius emission if enabled */ if (ffs->flags & FLUID_FLOW_USE_PART_SIZE) { tree = BLI_kdtree_3d_new(psys->totpart + psys->totchild); - bounds_margin = (int)ceil(solid + smooth); + bounds_margin = int(ceil(solid + smooth)); } /* calculate local position for each particle */ @@ -1600,7 +1601,7 @@ static void emit_from_particles(Object *flow_ob, } /* set emission map */ - clamp_bounds_in_domain(fds, bb->min, bb->max, NULL, NULL, bounds_margin, dt); + clamp_bounds_in_domain(fds, bb->min, bb->max, nullptr, nullptr, bounds_margin, dt); bb_allocateData(bb, ffs->flags & FLUID_FLOW_INITVELOCITY, true); if (!(ffs->flags & FLUID_FLOW_USE_PART_SIZE)) { @@ -1646,17 +1647,16 @@ static void emit_from_particles(Object *flow_ob, BLI_kdtree_3d_balance(tree); - EmitFromParticlesData data = { - .ffs = ffs, - .tree = tree, - .bb = bb, - .particle_vel = particle_vel, - .min = min, - .max = max, - .res = res, - .solid = solid, - .smooth = smooth, - }; + EmitFromParticlesData data{}; + data.ffs = ffs; + data.tree = tree; + data.bb = bb; + data.particle_vel = particle_vel; + data.min = min; + data.max = max; + data.res = res; + data.solid = solid; + data.smooth = smooth; TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); @@ -1920,7 +1920,7 @@ static void sample_mesh(FluidFlowSettings *ffs, tex_co[1] = tex_co[1] * 2.0f - 1.0f; tex_co[2] = ffs->texture_offset; } - BKE_texture_get_value(NULL, ffs->noise_texture, tex_co, &texres, false); + BKE_texture_get_value(nullptr, ffs->noise_texture, tex_co, &texres, false); emission_strength *= texres.tin; } } @@ -1976,7 +1976,7 @@ static void sample_mesh(FluidFlowSettings *ffs, influence_map[index] = MAX2(volume_factor, emission_strength); } -typedef struct EmitFromDMData { +struct EmitFromDMData { FluidDomainSettings *fds; FluidFlowSettings *ffs; @@ -1995,20 +1995,20 @@ typedef struct EmitFromDMData { float *vert_vel; float *flow_center; int *min, *max, *res; -} EmitFromDMData; +}; static void emit_from_mesh_task_cb(void *__restrict userdata, const int z, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - EmitFromDMData *data = userdata; + EmitFromDMData *data = static_cast(userdata); FluidObjectBB *bb = data->bb; for (int x = data->min[0]; x < data->max[0]; x++) { for (int y = data->min[1]; y < data->max[1]; y++) { const int index = manta_get_index( x - bb->min[0], bb->res[0], y - bb->min[1], bb->res[1], z - bb->min[2]); - const float ray_start[3] = {((float)x) + 0.5f, ((float)y) + 0.5f, ((float)z) + 0.5f}; + const float ray_start[3] = {(float(x)) + 0.5f, (float(y)) + 0.5f, (float(z)) + 0.5f}; /* Compute emission only for flow objects that produce fluid (i.e. skip outflow objects). * Result in bb->influence. Also computes initial velocities. Result in bb->velocity. */ @@ -2031,9 +2031,9 @@ static void emit_from_mesh_task_cb(void *__restrict userdata, data->has_velocity, data->defgrp_index, data->dvert, - (float)x, - (float)y, - (float)z); + float(x), + float(y), + float(z)); } /* Calculate levelset values from meshes. Result in bb->distances. */ @@ -2051,10 +2051,10 @@ static void emit_from_mesh( Object *flow_ob, FluidDomainSettings *fds, FluidFlowSettings *ffs, FluidObjectBB *bb, float dt) { if (ffs->mesh) { - BVHTreeFromMesh tree_data = {NULL}; + BVHTreeFromMesh tree_data = {nullptr}; int i; - float *vert_vel = NULL; + float *vert_vel = nullptr; bool has_velocity = false; int defgrp_index = ffs->vgroup_density - 1; @@ -2070,17 +2070,19 @@ static void emit_from_mesh( const MLoopTri *mlooptri = BKE_mesh_runtime_looptri_ensure(me); const int numverts = me->totvert; const MDeformVert *dvert = BKE_mesh_deform_verts(me); - const float(*mloopuv)[2] = CustomData_get_layer_named( - &me->ldata, CD_PROP_FLOAT2, ffs->uvlayer_name); + const float(*mloopuv)[2] = static_cast( + CustomData_get_layer_named(&me->ldata, CD_PROP_FLOAT2, ffs->uvlayer_name)); if (ffs->flags & FLUID_FLOW_INITVELOCITY) { - vert_vel = MEM_callocN(sizeof(float[3]) * numverts, "manta_flow_velocity"); + vert_vel = static_cast( + MEM_callocN(sizeof(float[3]) * numverts, "manta_flow_velocity")); if (ffs->numverts != numverts || !ffs->verts_old) { if (ffs->verts_old) { MEM_freeN(ffs->verts_old); } - ffs->verts_old = MEM_callocN(sizeof(float[3]) * numverts, "manta_flow_verts_old"); + ffs->verts_old = static_cast( + MEM_callocN(sizeof(float[3]) * numverts, "manta_flow_verts_old")); ffs->numverts = numverts; } else { @@ -2121,8 +2123,8 @@ static void emit_from_mesh( /* Set emission map. * Use 3 cell diagonals as margin (3 * 1.732 = 5.196). */ - int bounds_margin = (int)ceil(5.196); - clamp_bounds_in_domain(fds, bb->min, bb->max, NULL, NULL, bounds_margin, dt); + int bounds_margin = int(ceil(5.196)); + clamp_bounds_in_domain(fds, bb->min, bb->max, nullptr, nullptr, bounds_margin, dt); bb_allocateData(bb, ffs->flags & FLUID_FLOW_INITVELOCITY, true); /* Setup loop bounds. */ @@ -2136,25 +2138,24 @@ static void emit_from_mesh( bool use_flow = ffs->flags & FLUID_FLOW_USE_INFLOW; if (use_flow && BKE_bvhtree_from_mesh_get(&tree_data, me, BVHTREE_FROM_LOOPTRI, 4)) { - EmitFromDMData data = { - .fds = fds, - .ffs = ffs, - .vert_positions = positions, - .vert_normals = vert_normals, - .mloop = mloop, - .mlooptri = mlooptri, - .mloopuv = mloopuv, - .dvert = dvert, - .defgrp_index = defgrp_index, - .tree = &tree_data, - .bb = bb, - .has_velocity = has_velocity, - .vert_vel = vert_vel, - .flow_center = flow_center, - .min = min, - .max = max, - .res = res, - }; + EmitFromDMData data{}; + data.fds = fds; + data.ffs = ffs; + data.vert_positions = positions; + data.vert_normals = vert_normals; + data.mloop = mloop; + data.mlooptri = mlooptri; + data.mloopuv = mloopuv; + data.dvert = dvert; + data.defgrp_index = defgrp_index; + data.tree = &tree_data; + data.bb = bb; + data.has_velocity = has_velocity; + data.vert_vel = vert_vel; + data.flow_center = flow_center; + data.min = min; + data.max = max; + data.res = res; TaskParallelSettings settings; BLI_parallel_range_settings_defaults(&settings); @@ -2167,7 +2168,7 @@ static void emit_from_mesh( if (vert_vel) { MEM_freeN(vert_vel); } - BKE_id_free(NULL, me); + BKE_id_free(nullptr, me); } } @@ -2198,9 +2199,9 @@ static void adaptive_domain_adjust( /* add to total shift */ add_v3_v3(fds->shift_f, frame_shift_f); /* convert to integer */ - total_shift[0] = (int)floorf(fds->shift_f[0]); - total_shift[1] = (int)floorf(fds->shift_f[1]); - total_shift[2] = (int)floorf(fds->shift_f[2]); + total_shift[0] = int(floorf(fds->shift_f[0])); + total_shift[1] = int(floorf(fds->shift_f[1])); + total_shift[2] = int(floorf(fds->shift_f[2])); int temp_shift[3]; copy_v3_v3_int(temp_shift, fds->shift); sub_v3_v3v3_int(new_shift, total_shift, fds->shift); @@ -2692,7 +2693,7 @@ static bool escape_flowsobject(Object *flowobj, static void compute_flowsemission(Scene *scene, FluidObjectBB *bb_maps, - struct Depsgraph *depsgraph, + Depsgraph *depsgraph, float dt, Object **flowobjs, int frame, @@ -2732,13 +2733,13 @@ static void compute_flowsemission(Scene *scene, } /* More splitting because of emission subframe: If no subframes present, sample_size is 1. */ - float sample_size = 1.0f / (float)(subframes + 1); + float sample_size = 1.0f / float(subframes + 1); float subframe_dt = dt * sample_size; /* Emission loop. When not using subframes this will loop only once. */ for (int subframe = 0; subframe <= subframes; subframe++) { /* Temporary emission map used when subframes are enabled, i.e. at least one subframe. */ - FluidObjectBB bb_temp = {NULL}; + FluidObjectBB bb_temp = {nullptr}; /* Set scene time */ if ((subframe < subframes || time_per_frame + dt + FLT_EPSILON < frame_length) && @@ -2812,7 +2813,7 @@ static void compute_flowsemission(Scene *scene, # endif } -static void update_flowsfluids(struct Depsgraph *depsgraph, +static void update_flowsfluids(Depsgraph *depsgraph, Scene *scene, Object *ob, FluidDomainSettings *fds, @@ -2821,8 +2822,8 @@ static void update_flowsfluids(struct Depsgraph *depsgraph, int frame, float dt) { - FluidObjectBB *bb_maps = NULL; - Object **flowobjs = NULL; + FluidObjectBB *bb_maps = nullptr; + Object **flowobjs = nullptr; uint numflowobjs = 0; bool is_resume = (fds->cache_frame_pause_data == frame); bool is_first_frame = (fds->cache_frame_start == frame); @@ -2835,7 +2836,8 @@ static void update_flowsfluids(struct Depsgraph *depsgraph, ensure_flowsfields(fds); /* Allocate emission map for each flow object. */ - bb_maps = MEM_callocN(sizeof(struct FluidObjectBB) * numflowobjs, "fluid_flow_bb_maps"); + bb_maps = static_cast( + MEM_callocN(sizeof(FluidObjectBB) * numflowobjs, "fluid_flow_bb_maps")); /* Initialize emission map for each flow object. */ compute_flowsemission(scene, @@ -3082,7 +3084,7 @@ static void update_flowsfluids(struct Depsgraph *depsgraph, } } -typedef struct UpdateEffectorsData { +struct UpdateEffectorsData { Scene *scene; FluidDomainSettings *fds; ListBase *effectors; @@ -3097,13 +3099,13 @@ typedef struct UpdateEffectorsData { float *velocity_z; int *flags; float *phi_obs_in; -} UpdateEffectorsData; +}; static void update_effectors_task_cb(void *__restrict userdata, const int x, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - UpdateEffectorsData *data = userdata; + UpdateEffectorsData *data = static_cast(userdata); FluidDomainSettings *fds = data->fds; for (int y = 0; y < fds->res[1]; y++) { @@ -3133,15 +3135,15 @@ static void update_effectors_task_cb(void *__restrict userdata, normalize_v3(vel); mul_v3_fl(vel, mag); - voxel_center[0] = fds->p0[0] + fds->cell_size[0] * ((float)(x + fds->res_min[0]) + 0.5f); - voxel_center[1] = fds->p0[1] + fds->cell_size[1] * ((float)(y + fds->res_min[1]) + 0.5f); - voxel_center[2] = fds->p0[2] + fds->cell_size[2] * ((float)(z + fds->res_min[2]) + 0.5f); + voxel_center[0] = fds->p0[0] + fds->cell_size[0] * (float(x + fds->res_min[0]) + 0.5f); + voxel_center[1] = fds->p0[1] + fds->cell_size[1] * (float(y + fds->res_min[1]) + 0.5f); + voxel_center[2] = fds->p0[2] + fds->cell_size[2] * (float(z + fds->res_min[2]) + 0.5f); mul_m4_v3(fds->obmat, voxel_center); /* Do effectors. */ pd_point_from_loc(data->scene, voxel_center, vel, index, &epoint); BKE_effectors_apply( - data->effectors, NULL, fds->effector_weights, &epoint, retvel, NULL, NULL); + data->effectors, nullptr, fds->effector_weights, &epoint, retvel, nullptr, nullptr); /* Convert retvel to local space. */ mag = len_v3(retvel); @@ -3168,12 +3170,12 @@ static void update_effectors_task_cb(void *__restrict userdata, } static void update_effectors( - Depsgraph *depsgraph, Scene *scene, Object *ob, FluidDomainSettings *fds, float UNUSED(dt)) + Depsgraph *depsgraph, Scene *scene, Object *ob, FluidDomainSettings *fds, float /*dt*/) { ListBase *effectors; /* make sure smoke flow influence is 0.0f */ fds->effector_weights->weight[PFIELD_FLUIDFLOW] = 0.0f; - effectors = BKE_effectors_create(depsgraph, ob, NULL, fds->effector_weights, false); + effectors = BKE_effectors_create(depsgraph, ob, nullptr, fds->effector_weights, false); if (effectors) { /* Precalculate wind forces. */ @@ -3231,7 +3233,7 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds, int num_verts, num_faces; if (!fds->fluid) { - return NULL; + return nullptr; } num_verts = manta_liquid_get_num_verts(fds->fluid); @@ -3243,12 +3245,12 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds, # endif if (!num_verts || !num_faces) { - return NULL; + return nullptr; } me = BKE_mesh_new_nomain(num_verts, 0, 0, num_faces * 3, num_faces); if (!me) { - return NULL; + return nullptr; } float(*positions)[3] = BKE_mesh_vert_positions_for_write(me); mpolys = BKE_mesh_polys_for_write(me); @@ -3277,13 +3279,13 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds, /* Velocities. */ /* If needed, vertex velocities will be read too. */ bool use_speedvectors = fds->flags & FLUID_DOMAIN_USE_SPEED_VECTORS; - float(*velarray)[3] = NULL; + float(*velarray)[3] = nullptr; float time_mult = fds->dx / (DT_DEFAULT * (25.0f / FPS)); if (use_speedvectors) { CustomDataLayer *velocity_layer = BKE_id_attribute_new( - &me->id, "velocity", CD_PROP_FLOAT3, ATTR_DOMAIN_POINT, NULL); - velarray = velocity_layer->data; + &me->id, "velocity", CD_PROP_FLOAT3, ATTR_DOMAIN_POINT, nullptr); + velarray = static_cast(velocity_layer->data); } /* Loop for vertices and normals. */ @@ -3295,7 +3297,7 @@ static Mesh *create_liquid_geometry(FluidDomainSettings *fds, positions[i][2] = manta_liquid_get_vertex_z_at(fds->fluid, i); /* Adjust coordinates from Mantaflow to match viewport scaling. */ - float tmp[3] = {(float)fds->res[0], (float)fds->res[1], (float)fds->res[2]}; + float tmp[3] = {float(fds->res[0]), float(fds->res[1]), float(fds->res[2])}; /* Scale to unit cube around 0. */ mul_v3_fl(tmp, fds->mesh_scale * 0.5f); sub_v3_v3(positions[i], tmp); @@ -3537,7 +3539,7 @@ static int manta_step( BLI_mutex_lock(&object_update_lock); - /* Loop as long as time_per_frame (sum of sub dt's) does not exceed actual framelength. */ + /* Loop as long as time_per_frame (sum of sub dt's) does not exceed actual frame-length. */ while (time_per_frame + FLT_EPSILON < frame_length) { manta_adapt_timestep(fds->fluid); dt = manta_get_timestep(fds->fluid); @@ -3580,7 +3582,7 @@ static int manta_step( fds->time_total = time_total; } - /* Total time must not exceed framecount times framelength. Correct tiny errors here. */ + /* Total time must not exceed frame-count times frame-length. Correct tiny errors here. */ CLAMP_MAX(fds->time_total, time_total_old + fds->frame_length); /* Compute shadow grid for gas simulations. Make sure to skip if bake job was canceled early. */ @@ -3620,7 +3622,7 @@ static void fluid_modifier_processFlow(FluidModifierData *fmd, if (fmd->flow) { if (fmd->flow->mesh) { - BKE_id_free(NULL, fmd->flow->mesh); + BKE_id_free(nullptr, fmd->flow->mesh); } fmd->flow->mesh = BKE_mesh_copy_for_eval(me, false); } @@ -3647,7 +3649,7 @@ static void fluid_modifier_processEffector(FluidModifierData *fmd, if (fmd->effector) { if (fmd->effector->mesh) { - BKE_id_free(NULL, fmd->effector->mesh); + BKE_id_free(nullptr, fmd->effector->mesh); } fmd->effector->mesh = BKE_mesh_copy_for_eval(me, false); } @@ -3669,10 +3671,10 @@ static void fluid_modifier_processDomain(FluidModifierData *fmd, const int scene_framenr) { FluidDomainSettings *fds = fmd->domain; - Object *guide_parent = NULL; - Object **objs = NULL; + Object *guide_parent = nullptr; + Object **objs = nullptr; uint numobj = 0; - FluidModifierData *fmd_parent = NULL; + FluidModifierData *fmd_parent = nullptr; bool is_startframe, has_advanced; is_startframe = (scene_framenr == fds->cache_frame_start); @@ -4076,7 +4078,7 @@ static void fluid_modifier_processDomain(FluidModifierData *fmd, static void fluid_modifier_process( FluidModifierData *fmd, Depsgraph *depsgraph, Scene *scene, Object *ob, Mesh *me) { - const int scene_framenr = (int)DEG_get_ctime(depsgraph); + const int scene_framenr = int(DEG_get_ctime(depsgraph)); if (fmd->type & MOD_FLUID_TYPE_FLOW) { fluid_modifier_processFlow(fmd, depsgraph, scene, ob, me, scene_framenr); @@ -4089,7 +4091,7 @@ static void fluid_modifier_process( } } -struct Mesh *BKE_fluid_modifier_do( +Mesh *BKE_fluid_modifier_do( FluidModifierData *fmd, Depsgraph *depsgraph, Scene *scene, Object *ob, Mesh *me) { /* Optimization: Do not update viewport during bakes (except in replay mode) @@ -4100,13 +4102,13 @@ struct Mesh *BKE_fluid_modifier_do( if (!G.moving) { /* Lock so preview render does not read smoke data while it gets modified. */ if ((fmd->type & MOD_FLUID_TYPE_DOMAIN) && fmd->domain) { - BLI_rw_mutex_lock(fmd->domain->fluid_mutex, THREAD_LOCK_WRITE); + BLI_rw_mutex_lock(static_cast(fmd->domain->fluid_mutex), THREAD_LOCK_WRITE); } fluid_modifier_process(fmd, depsgraph, scene, ob, me); if ((fmd->type & MOD_FLUID_TYPE_DOMAIN) && fmd->domain) { - BLI_rw_mutex_unlock(fmd->domain->fluid_mutex); + BLI_rw_mutex_unlock(static_cast(fmd->domain->fluid_mutex)); } if (fmd->domain) { @@ -4136,7 +4138,7 @@ struct Mesh *BKE_fluid_modifier_do( } } - Mesh *result = NULL; + Mesh *result = nullptr; if (fmd->type & MOD_FLUID_TYPE_DOMAIN && fmd->domain) { if (needs_viewport_update) { /* Return generated geometry depending on domain type. */ @@ -4302,15 +4304,15 @@ static void manta_smoke_calc_transparency(FluidDomainSettings *fds, /* Convert light pos to sim cell space. */ mul_m4_v3(fds->imat, light); - light[0] = (light[0] - fds->p0[0]) / fds->cell_size[0] - 0.5f - (float)fds->res_min[0]; - light[1] = (light[1] - fds->p0[1]) / fds->cell_size[1] - 0.5f - (float)fds->res_min[1]; - light[2] = (light[2] - fds->p0[2]) / fds->cell_size[2] - 0.5f - (float)fds->res_min[2]; + light[0] = (light[0] - fds->p0[0]) / fds->cell_size[0] - 0.5f - float(fds->res_min[0]); + light[1] = (light[1] - fds->p0[1]) / fds->cell_size[1] - 0.5f - float(fds->res_min[1]); + light[2] = (light[2] - fds->p0[2]) / fds->cell_size[2] - 0.5f - float(fds->res_min[2]); /* Calculate domain bounds in sim cell space. */ /* 0,2,4 = 0.0f */ - bv[1] = (float)fds->res[0]; /* X */ - bv[3] = (float)fds->res[1]; /* Y */ - bv[5] = (float)fds->res[2]; /* Z */ + bv[1] = float(fds->res[0]); /* X */ + bv[3] = float(fds->res[1]); /* Y */ + bv[5] = float(fds->res[2]); /* Z */ for (int z = 0; z < fds->res[2]; z++) { size_t index = z * slabsize; @@ -4325,22 +4327,22 @@ static void manta_smoke_calc_transparency(FluidDomainSettings *fds, /* Reset shadow value. */ shadow[index] = -1.0f; - voxel_center[0] = (float)x; - voxel_center[1] = (float)y; - voxel_center[2] = (float)z; + voxel_center[0] = float(x); + voxel_center[1] = float(y); + voxel_center[2] = float(z); /* Get starting cell (light pos). */ if (BLI_bvhtree_bb_raycast(bv, light, voxel_center, pos) > FLT_EPSILON) { /* We're outside -> use point on side of domain. */ - cell[0] = (int)floor(pos[0]); - cell[1] = (int)floor(pos[1]); - cell[2] = (int)floor(pos[2]); + cell[0] = int(floor(pos[0])); + cell[1] = int(floor(pos[1])); + cell[2] = int(floor(pos[2])); } else { /* We're inside -> use light itself. */ - cell[0] = (int)floor(light[0]); - cell[1] = (int)floor(light[1]); - cell[2] = (int)floor(light[2]); + cell[0] = int(floor(light[0])); + cell[1] = int(floor(light[1])); + cell[2] = int(floor(light[2])); } /* Clamp within grid bounds */ CLAMP(cell[0], 0, fds->res[0] - 1); @@ -4367,7 +4369,7 @@ static void manta_smoke_calc_transparency(FluidDomainSettings *fds, } } -float BKE_fluid_get_velocity_at(struct Object *ob, float position[3], float velocity[3]) +float BKE_fluid_get_velocity_at(Object *ob, float position[3], float velocity[3]) { FluidModifierData *fmd = (FluidModifierData *)BKE_modifiers_findby_type(ob, eModifierType_Fluid); zero_v3(velocity); @@ -4392,9 +4394,9 @@ float BKE_fluid_get_velocity_at(struct Object *ob, float position[3], float velo } /* map pos between 0.0 - 1.0 */ - pos[0] = (pos[0] - fds->res_min[0]) / ((float)fds->res[0]); - pos[1] = (pos[1] - fds->res_min[1]) / ((float)fds->res[1]); - pos[2] = (pos[2] - fds->res_min[2]) / ((float)fds->res[2]); + pos[0] = (pos[0] - fds->res_min[0]) / (float(fds->res[0])); + pos[1] = (pos[1] - fds->res_min[1]) / (float(fds->res[1])); + pos[2] = (pos[2] - fds->res_min[2]) / (float(fds->res[2])); /* Check if position is outside active area. */ if (fds->type == FLUID_DOMAIN_TYPE_GAS && fds->flags & FLUID_DOMAIN_USE_ADAPTIVE_DOMAIN) { @@ -4450,8 +4452,8 @@ int BKE_fluid_get_data_flags(FluidDomainSettings *fds) return flags; } -void BKE_fluid_particle_system_create(struct Main *bmain, - struct Object *ob, +void BKE_fluid_particle_system_create(Main *bmain, + Object *ob, const char *pset_name, const char *parts_name, const char *psys_name, @@ -4463,7 +4465,7 @@ void BKE_fluid_particle_system_create(struct Main *bmain, /* add particle system */ part = BKE_particlesettings_add(bmain, pset_name); - psys = MEM_callocN(sizeof(ParticleSystem), "particle_system"); + psys = MEM_cnew(__func__); part->type = psys_type; part->totpart = 0; @@ -4483,12 +4485,12 @@ void BKE_fluid_particle_system_create(struct Main *bmain, BKE_modifier_unique_name(&ob->modifiers, (ModifierData *)pfmd); } -void BKE_fluid_particle_system_destroy(struct Object *ob, const int particle_type) +void BKE_fluid_particle_system_destroy(Object *ob, const int particle_type) { ParticleSystemModifierData *pfmd; ParticleSystem *psys, *next_psys; - for (psys = ob->particlesystem.first; psys; psys = next_psys) { + for (psys = static_cast(ob->particlesystem.first); psys; psys = next_psys) { next_psys = psys->next; if (psys->part->type == particle_type) { /* clear modifier */ @@ -4608,7 +4610,7 @@ void BKE_fluid_domain_type_set(Object *object, FluidDomainSettings *settings, in settings->type = type; } -void BKE_fluid_flow_behavior_set(Object *UNUSED(object), FluidFlowSettings *settings, int behavior) +void BKE_fluid_flow_behavior_set(Object * /*object*/, FluidFlowSettings *settings, int behavior) { settings->behavior = behavior; } @@ -4628,7 +4630,7 @@ void BKE_fluid_flow_type_set(Object *object, FluidFlowSettings *settings, int ty settings->type = type; } -void BKE_fluid_effector_type_set(Object *UNUSED(object), FluidEffectorSettings *settings, int type) +void BKE_fluid_effector_type_set(Object * /*object*/, FluidEffectorSettings *settings, int type) { settings->type = type; } @@ -4650,7 +4652,7 @@ void BKE_fluid_fields_sanitize(FluidDomainSettings *settings) settings->coba_field = FLUID_DOMAIN_FIELD_DENSITY; } - /* Gas domains do not support vdb mini precision. */ + /* Gas domains do not support VDB mini precision. */ if (data_depth == VDB_PRECISION_MINI_FLOAT) { settings->openvdb_data_depth = VDB_PRECISION_HALF_FLOAT; } @@ -4688,14 +4690,14 @@ static void fluid_modifier_freeDomain(FluidModifierData *fmd) } if (fmd->domain->fluid_mutex) { - BLI_rw_mutex_free(fmd->domain->fluid_mutex); + BLI_rw_mutex_free(static_cast(fmd->domain->fluid_mutex)); } MEM_SAFE_FREE(fmd->domain->effector_weights); if (!(fmd->modifier.flag & eModifierFlag_SharedCaches)) { BKE_ptcache_free_list(&(fmd->domain->ptcaches[0])); - fmd->domain->point_cache[0] = NULL; + fmd->domain->point_cache[0] = nullptr; } if (fmd->domain->coba) { @@ -4703,7 +4705,7 @@ static void fluid_modifier_freeDomain(FluidModifierData *fmd) } MEM_freeN(fmd->domain); - fmd->domain = NULL; + fmd->domain = nullptr; } } @@ -4711,16 +4713,16 @@ static void fluid_modifier_freeFlow(FluidModifierData *fmd) { if (fmd->flow) { if (fmd->flow->mesh) { - BKE_id_free(NULL, fmd->flow->mesh); + BKE_id_free(nullptr, fmd->flow->mesh); } - fmd->flow->mesh = NULL; + fmd->flow->mesh = nullptr; MEM_SAFE_FREE(fmd->flow->verts_old); fmd->flow->numverts = 0; fmd->flow->flags &= ~FLUID_FLOW_NEEDS_UPDATE; MEM_freeN(fmd->flow); - fmd->flow = NULL; + fmd->flow = nullptr; } } @@ -4728,20 +4730,20 @@ static void fluid_modifier_freeEffector(FluidModifierData *fmd) { if (fmd->effector) { if (fmd->effector->mesh) { - BKE_id_free(NULL, fmd->effector->mesh); + BKE_id_free(nullptr, fmd->effector->mesh); } - fmd->effector->mesh = NULL; + fmd->effector->mesh = nullptr; MEM_SAFE_FREE(fmd->effector->verts_old); fmd->effector->numverts = 0; fmd->effector->flags &= ~FLUID_EFFECTOR_NEEDS_UPDATE; MEM_freeN(fmd->effector); - fmd->effector = NULL; + fmd->effector = nullptr; } } -static void fluid_modifier_reset_ex(struct FluidModifierData *fmd, bool need_lock) +static void fluid_modifier_reset_ex(FluidModifierData *fmd, bool need_lock) { if (!fmd) { return; @@ -4750,16 +4752,17 @@ static void fluid_modifier_reset_ex(struct FluidModifierData *fmd, bool need_loc if (fmd->domain) { if (fmd->domain->fluid) { if (need_lock) { - BLI_rw_mutex_lock(fmd->domain->fluid_mutex, THREAD_LOCK_WRITE); + BLI_rw_mutex_lock(static_cast(fmd->domain->fluid_mutex), + THREAD_LOCK_WRITE); } #ifdef WITH_FLUID manta_free(fmd->domain->fluid); #endif - fmd->domain->fluid = NULL; + fmd->domain->fluid = nullptr; if (need_lock) { - BLI_rw_mutex_unlock(fmd->domain->fluid_mutex); + BLI_rw_mutex_unlock(static_cast(fmd->domain->fluid_mutex)); } } @@ -4779,7 +4782,7 @@ static void fluid_modifier_reset_ex(struct FluidModifierData *fmd, bool need_loc } } -void BKE_fluid_modifier_reset(struct FluidModifierData *fmd) +void BKE_fluid_modifier_reset(FluidModifierData *fmd) { fluid_modifier_reset_ex(fmd, true); } @@ -4795,7 +4798,7 @@ void BKE_fluid_modifier_free(FluidModifierData *fmd) fluid_modifier_freeEffector(fmd); } -void BKE_fluid_modifier_create_type_data(struct FluidModifierData *fmd) +void BKE_fluid_modifier_create_type_data(FluidModifierData *fmd) { if (!fmd) { return; @@ -4819,7 +4822,7 @@ void BKE_fluid_modifier_create_type_data(struct FluidModifierData *fmd) fmd->domain->openvdb_compression = VDB_COMPRESSION_ZIP; #endif - fmd->domain->effector_weights = BKE_effector_add_weights(NULL); + fmd->domain->effector_weights = BKE_effector_add_weights(nullptr); fmd->domain->fluid_mutex = BLI_rw_mutex_alloc(); char cache_name[64]; @@ -4831,7 +4834,7 @@ void BKE_fluid_modifier_create_type_data(struct FluidModifierData *fmd) fmd->domain->point_cache[0] = BKE_ptcache_add(&(fmd->domain->ptcaches[0])); fmd->domain->point_cache[0]->flag |= PTCACHE_DISK_CACHE; fmd->domain->point_cache[0]->step = 1; - fmd->domain->point_cache[1] = NULL; /* Deprecated */ + fmd->domain->point_cache[1] = nullptr; /* Deprecated */ } else if (fmd->type & MOD_FLUID_TYPE_FLOW) { if (fmd->flow) { @@ -4851,9 +4854,7 @@ void BKE_fluid_modifier_create_type_data(struct FluidModifierData *fmd) } } -void BKE_fluid_modifier_copy(const struct FluidModifierData *fmd, - struct FluidModifierData *tfmd, - const int flag) +void BKE_fluid_modifier_copy(const FluidModifierData *fmd, FluidModifierData *tfmd, const int flag) { tfmd->type = fmd->type; tfmd->time = fmd->time; @@ -4871,7 +4872,7 @@ void BKE_fluid_modifier_copy(const struct FluidModifierData *fmd, if (tfds->effector_weights) { MEM_freeN(tfds->effector_weights); } - tfds->effector_weights = MEM_dupallocN(fds->effector_weights); + tfds->effector_weights = static_cast(MEM_dupallocN(fds->effector_weights)); /* adaptive domain options */ tfds->adapt_margin = fds->adapt_margin; @@ -5002,7 +5003,7 @@ void BKE_fluid_modifier_copy(const struct FluidModifierData *fmd, tfds->display_thickness = fds->display_thickness; tfds->show_gridlines = fds->show_gridlines; if (fds->coba) { - tfds->coba = MEM_dupallocN(fds->coba); + tfds->coba = static_cast(MEM_dupallocN(fds->coba)); } tfds->vector_scale = fds->vector_scale; tfds->vector_draw_type = fds->vector_draw_type; diff --git a/source/blender/blenkernel/intern/fmodifier.c b/source/blender/blenkernel/intern/fmodifier.c index 46dc01edbff..46828a43818 100644 --- a/source/blender/blenkernel/intern/fmodifier.c +++ b/source/blender/blenkernel/intern/fmodifier.c @@ -61,20 +61,19 @@ static CLG_LogRef LOG = {"bke.fmodifier"}; */ #if 0 static FModifierTypeInfo FMI_MODNAME = { - FMODIFIER_TYPE_MODNAME, /* type */ - sizeof(FMod_ModName), /* size */ - FMI_TYPE_SOME_ACTION, /* action type */ - FMI_REQUIRES_SOME_REQUIREMENT, /* requirements */ - "Modifier Name", /* name */ - "FMod_ModName", /* struct name */ - 0, /* storage size */ - fcm_modname_free, /* free data */ - fcm_modname_relink, /* relink data */ - fcm_modname_copy, /* copy data */ - fcm_modname_new_data, /* new data */ - fcm_modname_verify, /* verify */ - fcm_modname_time, /* evaluate time */ - fcm_modname_evaluate, /* evaluate */ + /*type*/ FMODIFIER_TYPE_MODNAME, + /*size*/ sizeof(FMod_ModName), + /*acttype*/ FMI_TYPE_SOME_ACTION, + /*requires*/ FMI_REQUIRES_SOME_REQUIREMENT, + /*name*/ "Modifier Name", + /*structName*/ "FMod_ModName", + /*storage_size*/ 0, + /*free_data*/ fcm_modname_free, + /*copy_data*/ fcm_modname_copy, + /*new_data*/ fcm_modname_new_data, + /*verify_data*/ fcm_modname_verify, + /*evaluate_modifier_time*/ fcm_modname_time, + /*evaluate_modifier*/ fcm_modname_evaluate, }; #endif @@ -226,19 +225,19 @@ static void fcm_generator_evaluate( } static FModifierTypeInfo FMI_GENERATOR = { - FMODIFIER_TYPE_GENERATOR, /* type */ - sizeof(FMod_Generator), /* size */ - FMI_TYPE_GENERATE_CURVE, /* action type */ - FMI_REQUIRES_NOTHING, /* requirements */ - N_("Generator"), /* name */ - "FMod_Generator", /* struct name */ - 0, /* storage size */ - fcm_generator_free, /* free data */ - fcm_generator_copy, /* copy data */ - fcm_generator_new_data, /* new data */ - fcm_generator_verify, /* verify */ - NULL, /* evaluate time */ - fcm_generator_evaluate, /* evaluate */ + /*type*/ FMODIFIER_TYPE_GENERATOR, + /*size*/ sizeof(FMod_Generator), + /*acttype*/ FMI_TYPE_GENERATE_CURVE, + /*requires*/ FMI_REQUIRES_NOTHING, + /*name*/ N_("Generator"), + /*structName*/ "FMod_Generator", + /*storage_size*/ 0, + /*free_data*/ fcm_generator_free, + /*copy_data*/ fcm_generator_copy, + /*new_data*/ fcm_generator_new_data, + /*verify_data*/ fcm_generator_verify, + /*evaluate_modifier_time*/ NULL, + /*evaluate_modifier*/ fcm_generator_evaluate, }; /* Built-In Function Generator F-Curve Modifier --------------------------- */ @@ -356,19 +355,19 @@ static void fcm_fn_generator_evaluate( } static FModifierTypeInfo FMI_FN_GENERATOR = { - FMODIFIER_TYPE_FN_GENERATOR, /* type */ - sizeof(FMod_FunctionGenerator), /* size */ - FMI_TYPE_GENERATE_CURVE, /* action type */ - FMI_REQUIRES_NOTHING, /* requirements */ - N_("Built-In Function"), /* name */ - "FMod_FunctionGenerator", /* struct name */ - 0, /* storage size */ - NULL, /* free data */ - NULL, /* copy data */ - fcm_fn_generator_new_data, /* new data */ - NULL, /* verify */ - NULL, /* evaluate time */ - fcm_fn_generator_evaluate, /* evaluate */ + /*type*/ FMODIFIER_TYPE_FN_GENERATOR, + /*size*/ sizeof(FMod_FunctionGenerator), + /*acttype*/ FMI_TYPE_GENERATE_CURVE, + /*requires*/ FMI_REQUIRES_NOTHING, + /*name*/ N_("Built-In Function"), + /*structName*/ "FMod_FunctionGenerator", + /*storage_size*/ 0, + /*free_data*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ fcm_fn_generator_new_data, + /*verify_data*/ NULL, + /*evaluate_modifier_time*/ NULL, + /*evaluate_modifier*/ fcm_fn_generator_evaluate, }; /* Envelope F-Curve Modifier --------------------------- */ @@ -469,19 +468,19 @@ static void fcm_envelope_evaluate( } static FModifierTypeInfo FMI_ENVELOPE = { - FMODIFIER_TYPE_ENVELOPE, /* type */ - sizeof(FMod_Envelope), /* size */ - FMI_TYPE_REPLACE_VALUES, /* action type */ - 0, /* requirements */ - N_("Envelope"), /* name */ - "FMod_Envelope", /* struct name */ - 0, /* storage size */ - fcm_envelope_free, /* free data */ - fcm_envelope_copy, /* copy data */ - fcm_envelope_new_data, /* new data */ - fcm_envelope_verify, /* verify */ - NULL, /* evaluate time */ - fcm_envelope_evaluate, /* evaluate */ + /*type*/ FMODIFIER_TYPE_ENVELOPE, + /*size*/ sizeof(FMod_Envelope), + /*acttype*/ FMI_TYPE_REPLACE_VALUES, + /*requires*/ 0, + /*name*/ N_("Envelope"), + /*structName*/ "FMod_Envelope", + /*storage_size*/ 0, + /*free_data*/ fcm_envelope_free, + /*copy_data*/ fcm_envelope_copy, + /*new_data*/ fcm_envelope_new_data, + /*verify_data*/ fcm_envelope_verify, + /*evaluate_modifier_time*/ NULL, + /*evaluate_modifier*/ fcm_envelope_evaluate, }; /* exported function for finding points */ @@ -768,19 +767,19 @@ static void fcm_cycles_evaluate(FCurve *UNUSED(fcu), } static FModifierTypeInfo FMI_CYCLES = { - FMODIFIER_TYPE_CYCLES, /* type */ - sizeof(FMod_Cycles), /* size */ - FMI_TYPE_EXTRAPOLATION, /* action type */ - FMI_REQUIRES_ORIGINAL_DATA, /* requirements */ - CTX_N_(BLT_I18NCONTEXT_ID_ACTION, "Cycles"), /* name */ - "FMod_Cycles", /* struct name */ - sizeof(tFCMED_Cycles), /* storage size */ - NULL, /* free data */ - NULL, /* copy data */ - fcm_cycles_new_data, /* new data */ - NULL /*fcm_cycles_verify*/, /* verify */ - fcm_cycles_time, /* evaluate time */ - fcm_cycles_evaluate, /* evaluate */ + /*type*/ FMODIFIER_TYPE_CYCLES, + /*size*/ sizeof(FMod_Cycles), + /*acttype*/ FMI_TYPE_EXTRAPOLATION, + /*requires*/ FMI_REQUIRES_ORIGINAL_DATA, + /*name*/ CTX_N_(BLT_I18NCONTEXT_ID_ACTION, "Cycles"), + /*structName*/ "FMod_Cycles", + /*storage_size*/ sizeof(tFCMED_Cycles), + /*free_data*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ fcm_cycles_new_data, + /*verify_data*/ NULL /*fcm_cycles_verify*/, + /*evaluate_modifier_time*/ fcm_cycles_time, + /*evaluate_modifier*/ fcm_cycles_evaluate, }; /* Noise F-Curve Modifier --------------------------- */ @@ -830,19 +829,19 @@ static void fcm_noise_evaluate( } static FModifierTypeInfo FMI_NOISE = { - FMODIFIER_TYPE_NOISE, /* type */ - sizeof(FMod_Noise), /* size */ - FMI_TYPE_REPLACE_VALUES, /* action type */ - 0, /* requirements */ - N_("Noise"), /* name */ - "FMod_Noise", /* struct name */ - 0, /* storage size */ - NULL, /* free data */ - NULL, /* copy data */ - fcm_noise_new_data, /* new data */ - NULL /*fcm_noise_verify*/, /* verify */ - NULL, /* evaluate time */ - fcm_noise_evaluate, /* evaluate */ + /*type*/ FMODIFIER_TYPE_NOISE, + /*size*/ sizeof(FMod_Noise), + /*acttype*/ FMI_TYPE_REPLACE_VALUES, + /*requires*/ 0, + /*name*/ N_("Noise"), + /*structName*/ "FMod_Noise", + /*storage_size*/ 0, + /*free_data*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ fcm_noise_new_data, + /*verify_data*/ NULL /*fcm_noise_verify*/, + /*evaluate_modifier_time*/ NULL, + /*evaluate_modifier*/ fcm_noise_evaluate, }; /* Python F-Curve Modifier --------------------------- */ @@ -888,19 +887,19 @@ static void fcm_python_evaluate(FCurve *UNUSED(fcu), } static FModifierTypeInfo FMI_PYTHON = { - FMODIFIER_TYPE_PYTHON, /* type */ - sizeof(FMod_Python), /* size */ - FMI_TYPE_GENERATE_CURVE, /* action type */ - FMI_REQUIRES_RUNTIME_CHECK, /* requirements */ - N_("Python"), /* name */ - "FMod_Python", /* struct name */ - 0, /* storage size */ - fcm_python_free, /* free data */ - fcm_python_copy, /* copy data */ - fcm_python_new_data, /* new data */ - NULL /*fcm_python_verify*/, /* verify */ - NULL /*fcm_python_time*/, /* evaluate time */ - fcm_python_evaluate, /* evaluate */ + /*type*/ FMODIFIER_TYPE_PYTHON, + /*size*/ sizeof(FMod_Python), + /*acttype*/ FMI_TYPE_GENERATE_CURVE, + /*requires*/ FMI_REQUIRES_RUNTIME_CHECK, + /*name*/ N_("Python"), + /*structName*/ "FMod_Python", + /*storage_size*/ 0, + /*free_data*/ fcm_python_free, + /*copy_data*/ fcm_python_copy, + /*new_data*/ fcm_python_new_data, + /*verify_data*/ NULL /*fcm_python_verify*/, + /*evaluate_modifier_time*/ NULL /*fcm_python_time*/, + /*evaluate_modifier*/ fcm_python_evaluate, }; /* Limits F-Curve Modifier --------------------------- */ @@ -943,20 +942,19 @@ static void fcm_limits_evaluate(FCurve *UNUSED(fcu), } static FModifierTypeInfo FMI_LIMITS = { - FMODIFIER_TYPE_LIMITS, /* type */ - sizeof(FMod_Limits), /* size */ - FMI_TYPE_GENERATE_CURVE, - /* action type */ /* XXX... err... */ - FMI_REQUIRES_RUNTIME_CHECK, /* requirements */ - N_("Limits"), /* name */ - "FMod_Limits", /* struct name */ - 0, /* storage size */ - NULL, /* free data */ - NULL, /* copy data */ - NULL, /* new data */ - NULL, /* verify */ - fcm_limits_time, /* evaluate time */ - fcm_limits_evaluate, /* evaluate */ + /*type*/ FMODIFIER_TYPE_LIMITS, + /*size*/ sizeof(FMod_Limits), + /*acttype*/ FMI_TYPE_GENERATE_CURVE, + /*requires*/ FMI_REQUIRES_RUNTIME_CHECK, /* XXX... err... */ + /*name*/ N_("Limits"), + /*structName*/ "FMod_Limits", + /*storage_size*/ 0, + /*free_data*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ NULL, + /*verify_data*/ NULL, + /*evaluate_modifier_time*/ fcm_limits_time, + /*evaluate_modifier*/ fcm_limits_evaluate, }; /* Stepped F-Curve Modifier --------------------------- */ @@ -1004,20 +1002,19 @@ static float fcm_stepped_time(FCurve *UNUSED(fcu), } static FModifierTypeInfo FMI_STEPPED = { - FMODIFIER_TYPE_STEPPED, /* type */ - sizeof(FMod_Limits), /* size */ - FMI_TYPE_GENERATE_CURVE, - /* action type */ /* XXX... err... */ - FMI_REQUIRES_RUNTIME_CHECK, /* requirements */ - N_("Stepped"), /* name */ - "FMod_Stepped", /* struct name */ - 0, /* storage size */ - NULL, /* free data */ - NULL, /* copy data */ - fcm_stepped_new_data, /* new data */ - NULL, /* verify */ - fcm_stepped_time, /* evaluate time */ - NULL, /* evaluate */ + /*type*/ FMODIFIER_TYPE_STEPPED, + /*size*/ sizeof(FMod_Limits), + /*acttype*/ FMI_TYPE_GENERATE_CURVE, + /*requires*/ FMI_REQUIRES_RUNTIME_CHECK, /* XXX... err... */ + /*name*/ N_("Stepped"), + /*structName*/ "FMod_Stepped", + /*storage_size*/ 0, + /*free_data*/ NULL, + /*copy_data*/ NULL, + /*new_data*/ fcm_stepped_new_data, + /*verify_data*/ NULL, + /*evaluate_modifier_time*/ fcm_stepped_time, + /*evaluate_modifier*/ NULL, }; /** \} */ diff --git a/source/blender/blenkernel/intern/geometry_component_curves.cc b/source/blender/blenkernel/intern/geometry_component_curves.cc index 075a6838704..e775a2a00a9 100644 --- a/source/blender/blenkernel/intern/geometry_component_curves.cc +++ b/source/blender/blenkernel/intern/geometry_component_curves.cc @@ -143,6 +143,8 @@ namespace blender::bke { static Array curve_normal_point_domain(const bke::CurvesGeometry &curves) { + const OffsetIndices points_by_curve = curves.points_by_curve(); + const OffsetIndices evaluated_points_by_curve = curves.evaluated_points_by_curve(); const VArray types = curves.curve_types(); const VArray resolutions = curves.resolution(); const VArray curves_cyclic = curves.cyclic(); @@ -158,8 +160,8 @@ static Array curve_normal_point_domain(const bke::CurvesGeometry &curves Vector nurbs_tangents; for (const int i_curve : range) { - const IndexRange points = curves.points_for_curve(i_curve); - const IndexRange evaluated_points = curves.evaluated_points_for_curve(i_curve); + const IndexRange points = points_by_curve[i_curve]; + const IndexRange evaluated_points = evaluated_points_by_curve[i_curve]; MutableSpan curve_normals = results.as_mutable_span().slice(points); @@ -180,7 +182,7 @@ static Array curve_normal_point_domain(const bke::CurvesGeometry &curves curve_normals.first() = normals.first(); const Span offsets = curves.bezier_evaluated_offsets_for_curve(i_curve); for (const int i : IndexRange(points.size()).drop_front(1)) { - curve_normals[i] = normals[offsets[i - 1]]; + curve_normals[i] = normals[offsets[i]]; } break; } @@ -241,7 +243,7 @@ static VArray construct_curve_length_gvarray(const CurvesGeometry &curves { curves.ensure_evaluated_lengths(); - VArray cyclic = curves.cyclic(); + const VArray cyclic = curves.cyclic(); VArray lengths = VArray::ForFunc( curves.curves_num(), [&curves, cyclic = std::move(cyclic)](int64_t index) { return curves.evaluated_length_total_for_curve(index, cyclic[index]); diff --git a/source/blender/blenkernel/intern/geometry_component_edit_data.cc b/source/blender/blenkernel/intern/geometry_component_edit_data.cc index 2c00de3254f..9a5cd1aa904 100644 --- a/source/blender/blenkernel/intern/geometry_component_edit_data.cc +++ b/source/blender/blenkernel/intern/geometry_component_edit_data.cc @@ -48,7 +48,7 @@ void GeometryComponentEditData::remember_deformed_curve_positions_if_necessary( if (curves_id == nullptr) { return; } - const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id->geometry); + const bke::CurvesGeometry &curves = curves_id->geometry.wrap(); const int points_num = curves.points_num(); if (points_num != edit_component.curves_edit_hints_->curves_id_orig.geometry.point_num) { return; diff --git a/source/blender/blenkernel/intern/geometry_component_mesh.cc b/source/blender/blenkernel/intern/geometry_component_mesh.cc index 7b694be324a..bdb1b0edf8b 100644 --- a/source/blender/blenkernel/intern/geometry_component_mesh.cc +++ b/source/blender/blenkernel/intern/geometry_component_mesh.cc @@ -984,26 +984,26 @@ class VArrayImpl_For_VertexWeights final : public VMutableArrayImpl { }); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, float *dst) const override { if (dverts_ == nullptr) { - return r_span.fill_indices(mask, 0.0f); + mask.foreach_index([&](const int i) { dst[i] = 0.0f; }); } threading::parallel_for(mask.index_range(), 4096, [&](const IndexRange range) { for (const int64_t i : mask.slice(range)) { if (const MDeformWeight *weight = this->find_weight_at_index(i)) { - r_span[i] = weight->weight; + dst[i] = weight->weight; } else { - r_span[i] = 0.0f; + dst[i] = 0.0f; } } }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, float *dst) const override { - this->materialize(mask, r_span); + this->materialize(mask, dst); } private: diff --git a/source/blender/blenkernel/intern/geometry_fields.cc b/source/blender/blenkernel/intern/geometry_fields.cc index f4f4d0d8719..fd07f7eceab 100644 --- a/source/blender/blenkernel/intern/geometry_fields.cc +++ b/source/blender/blenkernel/intern/geometry_fields.cc @@ -55,7 +55,7 @@ GeometryFieldContext::GeometryFieldContext(const GeometryComponent &component, case GEO_COMPONENT_TYPE_CURVE: { const CurveComponent &curve_component = static_cast(component); const Curves *curves = curve_component.get_for_read(); - geometry_ = curves ? &CurvesGeometry::wrap(curves->geometry) : nullptr; + geometry_ = curves ? &curves->geometry.wrap() : nullptr; break; } case GEO_COMPONENT_TYPE_POINT_CLOUD: { @@ -560,8 +560,7 @@ std::optional try_detect_field_domain(const GeometryComponent &comp } else if (const auto *curves_field_input = dynamic_cast( &field_input)) { - if (!handle_domain( - curves_field_input->preferred_domain(CurvesGeometry::wrap(curves->geometry)))) { + if (!handle_domain(curves_field_input->preferred_domain(curves->geometry.wrap()))) { return std::nullopt; } } diff --git a/source/blender/blenkernel/intern/geometry_set.cc b/source/blender/blenkernel/intern/geometry_set.cc index 8fff80e709f..2417f8e2f73 100644 --- a/source/blender/blenkernel/intern/geometry_set.cc +++ b/source/blender/blenkernel/intern/geometry_set.cc @@ -227,7 +227,7 @@ bool GeometrySet::compute_boundbox_without_instances(float3 *r_min, float3 *r_ma have_minmax |= BKE_volume_min_max(volume, *r_min, *r_max); } if (const Curves *curves_id = this->get_curves_for_read()) { - const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id->geometry); + const bke::CurvesGeometry &curves = curves_id->geometry.wrap(); have_minmax |= curves.bounds_min_max(*r_min, *r_max); } return have_minmax; diff --git a/source/blender/blenkernel/intern/gpencil.c b/source/blender/blenkernel/intern/gpencil.c index 5409bf61274..6405ce06a5b 100644 --- a/source/blender/blenkernel/intern/gpencil.c +++ b/source/blender/blenkernel/intern/gpencil.c @@ -1273,6 +1273,10 @@ bGPDframe *BKE_gpencil_layer_frame_get(bGPDlayer *gpl, int cframe, eGP_GetFrame_ gpl->actframe = gpf; } else if (addnew == GP_GETFRAME_ADD_COPY) { + /* The frame_addcopy function copies the active frame of gpl, + so we need to set the active frame before copying. + */ + gpl->actframe = gpf; gpl->actframe = BKE_gpencil_frame_addcopy(gpl, cframe); } else { @@ -1300,6 +1304,10 @@ bGPDframe *BKE_gpencil_layer_frame_get(bGPDlayer *gpl, int cframe, eGP_GetFrame_ gpl->actframe = gpf; } else if (addnew == GP_GETFRAME_ADD_COPY) { + /* The frame_addcopy function copies the active frame of gpl; + so we need to set the active frame before copying. + */ + gpl->actframe = gpf; gpl->actframe = BKE_gpencil_frame_addcopy(gpl, cframe); } else { @@ -1566,7 +1574,6 @@ bGPDlayer *BKE_gpencil_layer_active_get(bGPdata *gpd) bGPDlayer *BKE_gpencil_layer_get_by_name(bGPdata *gpd, char *name, int first_if_not_found) { bGPDlayer *gpl; - int i = 0; /* error checking */ if (ELEM(NULL, gpd, gpd->layers.first)) { @@ -1578,7 +1585,6 @@ bGPDlayer *BKE_gpencil_layer_get_by_name(bGPdata *gpd, char *name, int first_if_ if (STREQ(name, gpl->info)) { return gpl; } - i++; } /* no such layer */ @@ -1863,6 +1869,18 @@ void BKE_gpencil_vgroup_remove(Object *ob, bDeformGroup *defgroup) /* Remove the group */ BLI_freelinkN(&gpd->vertex_group_names, defgroup); + + /* Update the active deform index if necessary. */ + const int active_index = BKE_object_defgroup_active_index_get(ob); + if (active_index > def_nr) { + BKE_object_defgroup_active_index_set(ob, active_index - 1); + } + /* Keep a valid active index if we still have some vertex groups. */ + if (!BLI_listbase_is_empty(&gpd->vertex_group_names) && + BKE_object_defgroup_active_index_get(ob) < 1) { + BKE_object_defgroup_active_index_set(ob, 1); + } + DEG_id_tag_update(&gpd->id, ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY); } diff --git a/source/blender/blenkernel/intern/gpencil_curve.c b/source/blender/blenkernel/intern/gpencil_curve.c index a0a579e6d65..bf73b9d6ed6 100644 --- a/source/blender/blenkernel/intern/gpencil_curve.c +++ b/source/blender/blenkernel/intern/gpencil_curve.c @@ -971,7 +971,7 @@ static float *gpencil_stroke_points_from_editcurve_adaptive_resolu( MEM_freeN(segment_point_lengths); *r_points_len = points_len; - return (float(*))r_points; + return (float *)r_points; } /** @@ -1012,7 +1012,7 @@ static float *gpencil_stroke_points_from_editcurve_fixed_resolu(bGPDcurve_point } *r_points_len = points_len; - return (float(*))r_points; + return (float *)r_points; } void BKE_gpencil_stroke_update_geometry_from_editcurve(bGPDstroke *gps, diff --git a/source/blender/blenkernel/intern/gpencil_geom.cc b/source/blender/blenkernel/intern/gpencil_geom.cc index e209c772fa0..8115920f938 100644 --- a/source/blender/blenkernel/intern/gpencil_geom.cc +++ b/source/blender/blenkernel/intern/gpencil_geom.cc @@ -3216,7 +3216,8 @@ bGPDstroke *BKE_gpencil_stroke_delete_tagged_points(bGPdata *gpd, pts = new_stroke->points; for (j = 0; j < new_stroke->totpoints; j++, pts++) { - pts->time -= delta; + /* Some points have time = 0, so check to not get negative time values.*/ + pts->time = max_ff(pts->time - delta, 0.0f); /* set flag for select again later */ if (select == true) { pts->flag &= ~GP_SPOINT_SELECT; diff --git a/source/blender/blenkernel/intern/idprop.c b/source/blender/blenkernel/intern/idprop.c index b8f0db0699d..156ad97c923 100644 --- a/source/blender/blenkernel/intern/idprop.c +++ b/source/blender/blenkernel/intern/idprop.c @@ -39,19 +39,19 @@ static CLG_LogRef LOG = {"bke.idprop"}; -/* Local size table. */ +/** Local size table, aligned with #eIDPropertyType. */ static size_t idp_size_table[] = { - 1, /*strings*/ - sizeof(int), - sizeof(float), - sizeof(float[3]), /* Vector type, deprecated. */ - sizeof(float[16]), /* Matrix type, deprecated. */ - 0, /* Arrays don't have a fixed size. */ - sizeof(ListBase), /* Group type. */ - sizeof(void *), - sizeof(double), - 0, - sizeof(int8_t), /* Boolean type. */ + 1, /* #IDP_STRING */ + sizeof(int), /* #IDP_INT */ + sizeof(float), /* #IDP_FLOAT */ + sizeof(float[3]), /* DEPRECATED (was vector). */ + sizeof(float[16]), /* DEPRECATED (was matrix). */ + 0, /* #IDP_ARRAY (no fixed size). */ + sizeof(ListBase), /* #IDP_GROUP */ + sizeof(void *), /* #IDP_ID */ + sizeof(double), /* #IDP_DOUBLE */ + 0, /* #IDP_IDPARRAY (no fixed size). */ + sizeof(int8_t), /* #IDP_BOOLEAN */ }; /* -------------------------------------------------------------------- */ diff --git a/source/blender/blenkernel/intern/idprop_create.cc b/source/blender/blenkernel/intern/idprop_create.cc index 8a6e5cdcc50..cac4f736c69 100644 --- a/source/blender/blenkernel/intern/idprop_create.cc +++ b/source/blender/blenkernel/intern/idprop_create.cc @@ -21,6 +21,15 @@ std::unique_ptr create(const StringRefNull prop_n return std::unique_ptr(property); } +std::unique_ptr create_bool(const StringRefNull prop_name, + bool value) +{ + IDPropertyTemplate prop_template{0}; + prop_template.i = value; + IDProperty *property = IDP_New(IDP_BOOLEAN, &prop_template, prop_name.c_str()); + return std::unique_ptr(property); +} + std::unique_ptr create(const StringRefNull prop_name, float value) { IDPropertyTemplate prop_template{0}; diff --git a/source/blender/blenkernel/intern/image_gpu.cc b/source/blender/blenkernel/intern/image_gpu.cc index 12f3287ef97..c9d34800227 100644 --- a/source/blender/blenkernel/intern/image_gpu.cc +++ b/source/blender/blenkernel/intern/image_gpu.cc @@ -264,7 +264,7 @@ static GPUTexture **get_image_gpu_texture_ptr(Image *ima, eGPUTextureTarget textarget, const int multiview_eye) { - const bool in_range = (textarget >= 0) && (textarget < TEXTARGET_COUNT); + const bool in_range = (int(textarget) >= 0) && (textarget < TEXTARGET_COUNT); BLI_assert(in_range); BLI_assert(ELEM(multiview_eye, 0, 1)); diff --git a/source/blender/blenkernel/intern/image_partial_update.cc b/source/blender/blenkernel/intern/image_partial_update.cc index ecf55d6b694..4de807c0706 100644 --- a/source/blender/blenkernel/intern/image_partial_update.cc +++ b/source/blender/blenkernel/intern/image_partial_update.cc @@ -276,7 +276,7 @@ struct TileChangeset { const int chunk_len = chunk_x_len * chunk_y_len; for (int chunk_index = 0; chunk_index < chunk_len; chunk_index++) { - chunk_dirty_flags_[chunk_index] = chunk_dirty_flags_[chunk_index] | + chunk_dirty_flags_[chunk_index] = chunk_dirty_flags_[chunk_index] || other.chunk_dirty_flags_[chunk_index]; } has_dirty_chunks_ |= other.has_dirty_chunks_; diff --git a/source/blender/blenkernel/intern/key.cc b/source/blender/blenkernel/intern/key.cc index 4b3a42e4c8a..7d835c2464d 100644 --- a/source/blender/blenkernel/intern/key.cc +++ b/source/blender/blenkernel/intern/key.cc @@ -195,35 +195,35 @@ static void shapekey_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_KE = { - /* id_code */ ID_KE, - /* id_filter */ FILTER_ID_KE, - /* main_listbase_index */ INDEX_ID_KE, - /* struct_size */ sizeof(Key), - /* name */ "Key", - /* name_plural */ "shape_keys", - /* translation_context */ BLT_I18NCONTEXT_ID_SHAPEKEY, - /* flags */ IDTYPE_FLAGS_NO_LIBLINKING, - /* asset_type_info */ nullptr, + /*id_code*/ ID_KE, + /*id_filter*/ FILTER_ID_KE, + /*main_listbase_index*/ INDEX_ID_KE, + /*struct_size*/ sizeof(Key), + /*name*/ "Key", + /*name_plural*/ "shape_keys", + /*translation_context*/ BLT_I18NCONTEXT_ID_SHAPEKEY, + /*flags*/ IDTYPE_FLAGS_NO_LIBLINKING, + /*asset_type_info*/ nullptr, - /* init_data */ nullptr, - /* copy_data */ shapekey_copy_data, - /* free_data */ shapekey_free_data, - /* make_local */ nullptr, - /* foreach_id */ shapekey_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, + /*init_data*/ nullptr, + /*copy_data*/ shapekey_copy_data, + /*free_data*/ shapekey_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ shapekey_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, /* A bit weird, due to shape-keys not being strictly speaking embedded data... But they also * share a lot with those (non linkable, only ever used by one owner ID, etc.). */ - /* owner_pointer_get */ shapekey_owner_pointer_get, + /*owner_pointer_get*/ shapekey_owner_pointer_get, - /* blend_write */ shapekey_blend_write, - /* blend_read_data */ shapekey_blend_read_data, - /* blend_read_lib */ shapekey_blend_read_lib, - /* blend_read_expand */ shapekey_blend_read_expand, + /*blend_write*/ shapekey_blend_write, + /*blend_read_data*/ shapekey_blend_read_data, + /*blend_read_lib*/ shapekey_blend_read_lib, + /*blend_read_expand*/ shapekey_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; #define KEY_MODE_DUMMY 0 /* use where mode isn't checked for */ @@ -2218,7 +2218,7 @@ void BKE_keyblock_convert_to_mesh(const KeyBlock *kb, const int totvert) { const int tot = min_ii(kb->totelem, totvert); - memcpy(kb->data, vert_positions, sizeof(float[3]) * tot); + memcpy(vert_positions, kb->data, sizeof(float[3]) * tot); } void BKE_keyblock_mesh_calc_normals(const KeyBlock *kb, diff --git a/source/blender/blenkernel/intern/layer.cc b/source/blender/blenkernel/intern/layer.cc index 95c341212d1..9e452662055 100644 --- a/source/blender/blenkernel/intern/layer.cc +++ b/source/blender/blenkernel/intern/layer.cc @@ -626,7 +626,8 @@ static bool layer_collection_hidden(ViewLayer *view_layer, LayerCollection *lc) } /* Restriction flags stay set, so we need to check parents */ - CollectionParent *parent = static_cast(lc->collection->parents.first); + CollectionParent *parent = static_cast( + lc->collection->runtime.parents.first); if (parent) { lc = BKE_layer_collection_first_from_scene_collection(view_layer, parent->collection); @@ -635,8 +636,6 @@ static bool layer_collection_hidden(ViewLayer *view_layer, LayerCollection *lc) } return false; - - return false; } LayerCollection *BKE_layer_collection_from_index(ViewLayer *view_layer, const int index) @@ -662,7 +661,8 @@ bool BKE_layer_collection_activate(ViewLayer *view_layer, LayerCollection *lc) LayerCollection *BKE_layer_collection_activate_parent(ViewLayer *view_layer, LayerCollection *lc) { - CollectionParent *parent = static_cast(lc->collection->parents.first); + CollectionParent *parent = static_cast( + lc->collection->runtime.parents.first); if (parent) { lc = BKE_layer_collection_first_from_scene_collection(view_layer, parent->collection); @@ -1208,6 +1208,7 @@ static void layer_collection_sync(ViewLayer *view_layer, layer_resync->layer->layer_collections = new_lb_layer; BLI_assert(BLI_listbase_count(&layer_resync->collection->children) - skipped_children == BLI_listbase_count(&new_lb_layer)); + UNUSED_VARS_NDEBUG(skipped_children); /* Update bases etc. for objects. */ layer_collection_objects_sync(view_layer, diff --git a/source/blender/blenkernel/intern/library.c b/source/blender/blenkernel/intern/library.c index 516fb9b75b6..ee50c4ae753 100644 --- a/source/blender/blenkernel/intern/library.c +++ b/source/blender/blenkernel/intern/library.c @@ -32,7 +32,6 @@ /* Unused currently. */ // static CLG_LogRef LOG = {.identifier = "bke.library"}; -struct BlendWriter; struct BlendDataReader; static void library_runtime_reset(Library *lib) diff --git a/source/blender/blenkernel/intern/linestyle.cc b/source/blender/blenkernel/intern/linestyle.cc index 3976331d599..a08e7f34e1d 100644 --- a/source/blender/blenkernel/intern/linestyle.cc +++ b/source/blender/blenkernel/intern/linestyle.cc @@ -732,33 +732,33 @@ static void linestyle_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_LS = { - /* id_code */ ID_LS, - /* id_filter */ FILTER_ID_LS, - /* main_listbase_index */ INDEX_ID_LS, - /* struct_size */ sizeof(FreestyleLineStyle), - /* name */ "FreestyleLineStyle", - /* name_plural */ "linestyles", - /* translation_context */ BLT_I18NCONTEXT_ID_FREESTYLELINESTYLE, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_LS, + /*id_filter*/ FILTER_ID_LS, + /*main_listbase_index*/ INDEX_ID_LS, + /*struct_size*/ sizeof(FreestyleLineStyle), + /*name*/ "FreestyleLineStyle", + /*name_plural*/ "linestyles", + /*translation_context*/ BLT_I18NCONTEXT_ID_FREESTYLELINESTYLE, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ linestyle_init_data, - /* copy_data */ linestyle_copy_data, - /* free_data */ linestyle_free_data, - /* make_local */ nullptr, - /* foreach_id */ linestyle_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ linestyle_init_data, + /*copy_data*/ linestyle_copy_data, + /*free_data*/ linestyle_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ linestyle_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ linestyle_blend_write, - /* blend_read_data */ linestyle_blend_read_data, - /* blend_read_lib */ linestyle_blend_read_lib, - /* blend_read_expand */ linestyle_blend_read_expand, + /*blend_write*/ linestyle_blend_write, + /*blend_read_data*/ linestyle_blend_read_data, + /*blend_read_lib*/ linestyle_blend_read_lib, + /*blend_read_expand*/ linestyle_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; static const char *modifier_name[LS_MODIFIER_NUM] = { diff --git a/source/blender/blenkernel/intern/main_idmap.c b/source/blender/blenkernel/intern/main_idmap.c index 24c1da782fe..b280c962a39 100644 --- a/source/blender/blenkernel/intern/main_idmap.c +++ b/source/blender/blenkernel/intern/main_idmap.c @@ -57,7 +57,7 @@ struct IDNameLib_Map { struct GSet *valid_id_pointers; int idmap_types; - /* For storage of keys for the TypeMap ghash, avoids many single allocs. */ + /* For storage of keys for the #TypeMap #GHash, avoids many single allocations. */ BLI_mempool *type_maps_keys_pool; }; diff --git a/source/blender/blenkernel/intern/material.cc b/source/blender/blenkernel/intern/material.cc index 58478dce847..0fd3ec4f8cd 100644 --- a/source/blender/blenkernel/intern/material.cc +++ b/source/blender/blenkernel/intern/material.cc @@ -243,33 +243,33 @@ static void material_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_MA = { - /* id_code */ ID_MA, - /* id_filter */ FILTER_ID_MA, - /* main_listbase_index */ INDEX_ID_MA, - /* struct_size */ sizeof(Material), - /* name */ "Material", - /* name_plural */ "materials", - /* translation_context */ BLT_I18NCONTEXT_ID_MATERIAL, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_MA, + /*id_filter*/ FILTER_ID_MA, + /*main_listbase_index*/ INDEX_ID_MA, + /*struct_size*/ sizeof(Material), + /*name*/ "Material", + /*name_plural*/ "materials", + /*translation_context*/ BLT_I18NCONTEXT_ID_MATERIAL, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ material_init_data, - /* copy_data */ material_copy_data, - /* free_data */ material_free_data, - /* make_local */ nullptr, - /* foreach_id */ material_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ material_init_data, + /*copy_data*/ material_copy_data, + /*free_data*/ material_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ material_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ material_blend_write, - /* blend_read_data */ material_blend_read_data, - /* blend_read_lib */ material_blend_read_lib, - /* blend_read_expand */ material_blend_read_expand, + /*blend_write*/ material_blend_write, + /*blend_read_data*/ material_blend_read_data, + /*blend_read_lib*/ material_blend_read_lib, + /*blend_read_expand*/ material_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; void BKE_gpencil_material_attr_init(Material *ma) diff --git a/source/blender/blenkernel/intern/mball.cc b/source/blender/blenkernel/intern/mball.cc index 6b1394f65ab..c5025e51eb8 100644 --- a/source/blender/blenkernel/intern/mball.cc +++ b/source/blender/blenkernel/intern/mball.cc @@ -85,9 +85,6 @@ static void metaball_free_data(ID *id) MEM_SAFE_FREE(metaball->mat); BLI_freelistN(&metaball->elems); - if (metaball->disp.first) { - BKE_displist_free(&metaball->disp); - } } static void metaball_foreach_id(ID *id, LibraryForeachIDData *data) @@ -103,7 +100,6 @@ static void metaball_blend_write(BlendWriter *writer, ID *id, const void *id_add MetaBall *mb = (MetaBall *)id; /* Clean up, important in undo case to reduce false detection of changed datablocks. */ - BLI_listbase_clear(&mb->disp); mb->editelems = nullptr; /* Must always be cleared (meta's don't have their own edit-data). */ mb->needs_flush_to_id = 0; @@ -134,7 +130,6 @@ static void metaball_blend_read_data(BlendDataReader *reader, ID *id) BLO_read_list(reader, &(mb->elems)); - BLI_listbase_clear(&mb->disp); mb->editelems = nullptr; /* Must always be cleared (meta's don't have their own edit-data). */ mb->needs_flush_to_id = 0; @@ -161,33 +156,33 @@ static void metaball_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_MB = { - /* id_code */ ID_MB, - /* id_filter */ FILTER_ID_MB, - /* main_listbase_index */ INDEX_ID_MB, - /* struct_size */ sizeof(MetaBall), - /* name */ "Metaball", - /* name_plural */ "metaballs", - /* translation_context */ BLT_I18NCONTEXT_ID_METABALL, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_MB, + /*id_filter*/ FILTER_ID_MB, + /*main_listbase_index*/ INDEX_ID_MB, + /*struct_size*/ sizeof(MetaBall), + /*name*/ "Metaball", + /*name_plural*/ "metaballs", + /*translation_context*/ BLT_I18NCONTEXT_ID_METABALL, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ metaball_init_data, - /* copy_data */ metaball_copy_data, - /* free_data */ metaball_free_data, - /* make_local */ nullptr, - /* foreach_id */ metaball_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ metaball_init_data, + /*copy_data*/ metaball_copy_data, + /*free_data*/ metaball_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ metaball_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ metaball_blend_write, - /* blend_read_data */ metaball_blend_read_data, - /* blend_read_lib */ metaball_blend_read_lib, - /* blend_read_expand */ metaball_blend_read_expand, + /*blend_write*/ metaball_blend_write, + /*blend_read_data*/ metaball_blend_read_data, + /*blend_read_lib*/ metaball_blend_read_lib, + /*blend_read_expand*/ metaball_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; /* Functions */ diff --git a/source/blender/blenkernel/intern/mesh.cc b/source/blender/blenkernel/intern/mesh.cc index df34b51ed76..4e5c4f77913 100644 --- a/source/blender/blenkernel/intern/mesh.cc +++ b/source/blender/blenkernel/intern/mesh.cc @@ -271,7 +271,6 @@ static void mesh_blend_write(BlendWriter *writer, ID *id, const void *id_address BKE_mesh_legacy_convert_selection_layers_to_flags(mesh); BKE_mesh_legacy_convert_material_indices_to_mpoly(mesh); BKE_mesh_legacy_bevel_weight_from_layers(mesh); - BKE_mesh_legacy_face_set_from_generic(mesh, poly_layers); BKE_mesh_legacy_edge_crease_from_layers(mesh); BKE_mesh_legacy_sharp_edges_to_flags(mesh); BKE_mesh_legacy_attribute_strings_to_flags(mesh); @@ -293,6 +292,7 @@ static void mesh_blend_write(BlendWriter *writer, ID *id, const void *id_address if (!BLO_write_is_undo(writer)) { BKE_mesh_legacy_convert_uvs_to_struct(mesh, temp_arrays_for_legacy_format, loop_layers); + BKE_mesh_legacy_face_set_from_generic(poly_layers); } } @@ -361,7 +361,7 @@ static void mesh_blend_read_data(BlendDataReader *reader, ID *id) BLO_read_data_address(reader, &mesh->active_color_attribute); BLO_read_data_address(reader, &mesh->default_color_attribute); - mesh->texflag &= ~ME_AUTOSPACE_EVALUATED; + mesh->texspace_flag &= ~ME_TEXSPACE_FLAG_AUTO_EVALUATED; mesh->edit_mesh = nullptr; mesh->runtime = new blender::bke::MeshRuntime(); @@ -409,33 +409,33 @@ static void mesh_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_ME = { - /* id_code */ ID_ME, - /* id_filter */ FILTER_ID_ME, - /* main_listbase_index */ INDEX_ID_ME, - /* struct_size */ sizeof(Mesh), - /* name */ "Mesh", - /* name_plural */ "meshes", - /* translation_context */ BLT_I18NCONTEXT_ID_MESH, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_ME, + /*id_filter*/ FILTER_ID_ME, + /*main_listbase_index*/ INDEX_ID_ME, + /*struct_size*/ sizeof(Mesh), + /*name*/ "Mesh", + /*name_plural*/ "meshes", + /*translation_context*/ BLT_I18NCONTEXT_ID_MESH, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ mesh_init_data, - /* copy_data */ mesh_copy_data, - /* free_data */ mesh_free_data, - /* make_local */ nullptr, - /* foreach_id */ mesh_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ mesh_foreach_path, - /* owner_pointer_get */ nullptr, + /*init_data*/ mesh_init_data, + /*copy_data*/ mesh_copy_data, + /*free_data*/ mesh_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ mesh_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ mesh_foreach_path, + /*owner_pointer_get*/ nullptr, - /* blend_write */ mesh_blend_write, - /* blend_read_data */ mesh_blend_read_data, - /* blend_read_lib */ mesh_blend_read_lib, - /* blend_read_expand */ mesh_read_expand, + /*blend_write*/ mesh_blend_write, + /*blend_read_data*/ mesh_blend_read_data, + /*blend_read_lib*/ mesh_blend_read_lib, + /*blend_read_expand*/ mesh_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; enum { @@ -1030,9 +1030,9 @@ void BKE_mesh_copy_parameters(Mesh *me_dst, const Mesh *me_src) me_dst->face_sets_color_default = me_src->face_sets_color_default; /* Copy texture space. */ - me_dst->texflag = me_src->texflag; - copy_v3_v3(me_dst->loc, me_src->loc); - copy_v3_v3(me_dst->size, me_src->size); + me_dst->texspace_flag = me_src->texspace_flag; + copy_v3_v3(me_dst->texspace_location, me_src->texspace_location); + copy_v3_v3(me_dst->texspace_size, me_src->texspace_size); me_dst->vertex_group_active_index = me_src->vertex_group_active_index; me_dst->attributes_active_index = me_src->attributes_active_index; @@ -1239,7 +1239,7 @@ BoundBox *BKE_mesh_boundbox_get(Object *ob) void BKE_mesh_texspace_calc(Mesh *me) { - if (me->texflag & ME_AUTOSPACE) { + if (me->texspace_flag & ME_TEXSPACE_FLAG_AUTO) { float min[3], max[3]; INIT_MINMAX(min, max); @@ -1248,75 +1248,79 @@ void BKE_mesh_texspace_calc(Mesh *me) max[0] = max[1] = max[2] = 1.0f; } - float loc[3], size[3]; - mid_v3_v3v3(loc, min, max); + float texspace_location[3], texspace_size[3]; + mid_v3_v3v3(texspace_location, min, max); - size[0] = (max[0] - min[0]) / 2.0f; - size[1] = (max[1] - min[1]) / 2.0f; - size[2] = (max[2] - min[2]) / 2.0f; + texspace_size[0] = (max[0] - min[0]) / 2.0f; + texspace_size[1] = (max[1] - min[1]) / 2.0f; + texspace_size[2] = (max[2] - min[2]) / 2.0f; for (int a = 0; a < 3; a++) { - if (size[a] == 0.0f) { - size[a] = 1.0f; + if (texspace_size[a] == 0.0f) { + texspace_size[a] = 1.0f; } - else if (size[a] > 0.0f && size[a] < 0.00001f) { - size[a] = 0.00001f; + else if (texspace_size[a] > 0.0f && texspace_size[a] < 0.00001f) { + texspace_size[a] = 0.00001f; } - else if (size[a] < 0.0f && size[a] > -0.00001f) { - size[a] = -0.00001f; + else if (texspace_size[a] < 0.0f && texspace_size[a] > -0.00001f) { + texspace_size[a] = -0.00001f; } } - copy_v3_v3(me->loc, loc); - copy_v3_v3(me->size, size); + copy_v3_v3(me->texspace_location, texspace_location); + copy_v3_v3(me->texspace_size, texspace_size); - me->texflag |= ME_AUTOSPACE_EVALUATED; + me->texspace_flag |= ME_TEXSPACE_FLAG_AUTO_EVALUATED; } } void BKE_mesh_texspace_ensure(Mesh *me) { - if ((me->texflag & ME_AUTOSPACE) && !(me->texflag & ME_AUTOSPACE_EVALUATED)) { + if ((me->texspace_flag & ME_TEXSPACE_FLAG_AUTO) && + !(me->texspace_flag & ME_TEXSPACE_FLAG_AUTO_EVALUATED)) { BKE_mesh_texspace_calc(me); } } -void BKE_mesh_texspace_get(Mesh *me, float r_loc[3], float r_size[3]) +void BKE_mesh_texspace_get(Mesh *me, float r_texspace_location[3], float r_texspace_size[3]) { BKE_mesh_texspace_ensure(me); - if (r_loc) { - copy_v3_v3(r_loc, me->loc); + if (r_texspace_location) { + copy_v3_v3(r_texspace_location, me->texspace_location); } - if (r_size) { - copy_v3_v3(r_size, me->size); + if (r_texspace_size) { + copy_v3_v3(r_texspace_size, me->texspace_size); } } -void BKE_mesh_texspace_get_reference(Mesh *me, char **r_texflag, float **r_loc, float **r_size) +void BKE_mesh_texspace_get_reference(Mesh *me, + char **r_texspace_flag, + float **r_texspace_location, + float **r_texspace_size) { BKE_mesh_texspace_ensure(me); - if (r_texflag != nullptr) { - *r_texflag = &me->texflag; + if (r_texspace_flag != nullptr) { + *r_texspace_flag = &me->texspace_flag; } - if (r_loc != nullptr) { - *r_loc = me->loc; + if (r_texspace_location != nullptr) { + *r_texspace_location = me->texspace_location; } - if (r_size != nullptr) { - *r_size = me->size; + if (r_texspace_size != nullptr) { + *r_texspace_size = me->texspace_size; } } void BKE_mesh_texspace_copy_from_object(Mesh *me, Object *ob) { - float *texloc, *texsize; - char *texflag; + float *texspace_location, *texspace_size; + char *texspace_flag; - if (BKE_object_obdata_texspace_get(ob, &texflag, &texloc, &texsize)) { - me->texflag = *texflag; - copy_v3_v3(me->loc, texloc); - copy_v3_v3(me->size, texsize); + if (BKE_object_obdata_texspace_get(ob, &texspace_flag, &texspace_location, &texspace_size)) { + me->texspace_flag = *texspace_flag; + copy_v3_v3(me->texspace_location, texspace_location); + copy_v3_v3(me->texspace_size, texspace_size); } } @@ -1340,22 +1344,22 @@ float (*BKE_mesh_orco_verts_get(Object *ob))[3] void BKE_mesh_orco_verts_transform(Mesh *me, float (*orco)[3], int totvert, int invert) { - float loc[3], size[3]; + float texspace_location[3], texspace_size[3]; - BKE_mesh_texspace_get(me->texcomesh ? me->texcomesh : me, loc, size); + BKE_mesh_texspace_get(me->texcomesh ? me->texcomesh : me, texspace_location, texspace_size); if (invert) { for (int a = 0; a < totvert; a++) { float *co = orco[a]; - madd_v3_v3v3v3(co, loc, co, size); + madd_v3_v3v3v3(co, texspace_location, co, texspace_size); } } else { for (int a = 0; a < totvert; a++) { float *co = orco[a]; - co[0] = (co[0] - loc[0]) / size[0]; - co[1] = (co[1] - loc[1]) / size[1]; - co[2] = (co[2] - loc[2]) / size[2]; + co[0] = (co[0] - texspace_location[0]) / texspace_size[0]; + co[1] = (co[1] - texspace_location[1]) / texspace_size[1]; + co[2] = (co[2] - texspace_location[2]) / texspace_size[2]; } } } @@ -1900,10 +1904,10 @@ void BKE_mesh_eval_geometry(Depsgraph *depsgraph, Mesh *mesh) } if (DEG_is_active(depsgraph)) { Mesh *mesh_orig = (Mesh *)DEG_get_original_id(&mesh->id); - if (mesh->texflag & ME_AUTOSPACE_EVALUATED) { - mesh_orig->texflag |= ME_AUTOSPACE_EVALUATED; - copy_v3_v3(mesh_orig->loc, mesh->loc); - copy_v3_v3(mesh_orig->size, mesh->size); + if (mesh->texspace_flag & ME_TEXSPACE_FLAG_AUTO_EVALUATED) { + mesh_orig->texspace_flag |= ME_TEXSPACE_FLAG_AUTO_EVALUATED; + copy_v3_v3(mesh_orig->texspace_location, mesh->texspace_location); + copy_v3_v3(mesh_orig->texspace_size, mesh->texspace_size); } } } diff --git a/source/blender/blenkernel/intern/mesh_convert.cc b/source/blender/blenkernel/intern/mesh_convert.cc index 4f74f070e81..3d868d243ae 100644 --- a/source/blender/blenkernel/intern/mesh_convert.cc +++ b/source/blender/blenkernel/intern/mesh_convert.cc @@ -401,9 +401,9 @@ static Mesh *mesh_nurbs_displist_to_mesh(const Curve *cu, const ListBase *dispba */ static void mesh_copy_texture_space_from_curve_type(const Curve *cu, Mesh *me) { - me->texflag = cu->texflag & ~CU_AUTOSPACE; - copy_v3_v3(me->loc, cu->loc); - copy_v3_v3(me->size, cu->size); + me->texspace_flag = cu->texspace_flag & ~CU_TEXSPACE_FLAG_AUTO; + copy_v3_v3(me->texspace_location, cu->texspace_location); + copy_v3_v3(me->texspace_size, cu->texspace_size); BKE_mesh_texspace_calc(me); } @@ -627,29 +627,11 @@ void BKE_mesh_to_curve(Main *bmain, Depsgraph *depsgraph, Scene * /*scene*/, Obj } } -void BKE_pointcloud_from_mesh(Mesh *me, PointCloud *pointcloud) +void BKE_pointcloud_from_mesh(const Mesh *me, PointCloud *pointcloud) { - using namespace blender; - - BLI_assert(me != nullptr); - /* The pointcloud should only contain the position attribute, otherwise more attributes would - * need to be initialized below. */ - BLI_assert(pointcloud->attributes().all_ids().size() == 1); - CustomData_realloc(&pointcloud->pdata, pointcloud->totpoint, me->totvert); + CustomData_free(&pointcloud->pdata, pointcloud->totpoint); pointcloud->totpoint = me->totvert; - - /* Copy over all attributes. */ CustomData_merge(&me->vdata, &pointcloud->pdata, CD_MASK_PROP_ALL, CD_DUPLICATE, me->totvert); - - bke::AttributeAccessor mesh_attributes = me->attributes(); - bke::MutableAttributeAccessor point_attributes = pointcloud->attributes_for_write(); - - const VArray vert_positions = mesh_attributes.lookup_or_default( - "position", ATTR_DOMAIN_POINT, float3(0)); - bke::SpanAttributeWriter point_positions = - point_attributes.lookup_or_add_for_write_only_span("position", ATTR_DOMAIN_POINT); - vert_positions.materialize(point_positions.span); - point_positions.finish(); } void BKE_mesh_to_pointcloud(Main *bmain, Depsgraph *depsgraph, Scene * /*scene*/, Object *ob) @@ -675,10 +657,7 @@ void BKE_mesh_to_pointcloud(Main *bmain, Depsgraph *depsgraph, Scene * /*scene*/ void BKE_mesh_from_pointcloud(const PointCloud *pointcloud, Mesh *me) { - BLI_assert(pointcloud != nullptr); - me->totvert = pointcloud->totpoint; - CustomData_merge( &pointcloud->pdata, &me->vdata, CD_MASK_PROP_ALL, CD_DUPLICATE, pointcloud->totpoint); } @@ -823,8 +802,7 @@ static Mesh *mesh_new_from_evaluated_curve_type_object(const Object *evaluated_o } if (const Curves *curves = get_evaluated_curves_from_object(evaluated_object)) { const blender::bke::AnonymousAttributePropagationInfo propagation_info; - return blender::bke::curve_to_wire_mesh(blender::bke::CurvesGeometry::wrap(curves->geometry), - propagation_info); + return blender::bke::curve_to_wire_mesh(curves->geometry.wrap(), propagation_info); } return nullptr; } diff --git a/source/blender/blenkernel/intern/mesh_legacy_convert.cc b/source/blender/blenkernel/intern/mesh_legacy_convert.cc index 7673fd8d373..0447da090e8 100644 --- a/source/blender/blenkernel/intern/mesh_legacy_convert.cc +++ b/source/blender/blenkernel/intern/mesh_legacy_convert.cc @@ -1163,17 +1163,17 @@ static int mesh_tessface_calc(Mesh &mesh, CustomData_add_layer(fdata, CD_ORIGINDEX, CD_ASSIGN, mface_to_poly_map, totface); add_mface_layers(mesh, fdata, ldata, totface); - /* NOTE: quad detection issue - fourth vertidx vs fourth loopidx: + /* NOTE: quad detection issue - fourth vertex-index vs fourth loop-index: * Polygons take care of their loops ordering, hence not of their vertices ordering. * Currently, our tfaces' fourth vertex index might be 0 even for a quad. * However, we know our fourth loop index is never 0 for quads * (because they are sorted for polygons, and our quads are still mere copies of their polygons). - * So we pass nullptr as MFace pointer, and #mesh_loops_to_tessdata + * So we pass nullptr as #MFace pointer, and #mesh_loops_to_tessdata * will use the fourth loop index as quad test. */ mesh_loops_to_tessdata(fdata, ldata, nullptr, mface_to_poly_map, lindices, totface); - /* NOTE: quad detection issue - fourth vertidx vs fourth loopidx: - * ...However, most TFace code uses 'MFace->v4 == 0' test to check whether it is a tri or quad. + /* NOTE: quad detection issue - fourth vert-index vs fourth loop-index: + * ...However, most #TFace code uses `MFace->v4 == 0` test to check whether it is a tri or quad. * BKE_mesh_mface_index_validate() will check this and rotate the tessellated face if needed. */ #ifdef USE_TESSFACE_QUADS @@ -1224,28 +1224,47 @@ void BKE_mesh_tessface_ensure(struct Mesh *mesh) /** \name Face Set Conversion * \{ */ -void BKE_mesh_legacy_face_set_from_generic(Mesh *mesh, - blender::MutableSpan poly_layers) +void BKE_mesh_legacy_face_set_from_generic(blender::MutableSpan poly_layers) { using namespace blender; + bool changed = false; for (CustomDataLayer &layer : poly_layers) { if (StringRef(layer.name) == ".sculpt_face_set") { layer.type = CD_SCULPT_FACE_SETS; + layer.name[0] = '\0'; + changed = true; + break; } } - CustomData_update_typemap(&mesh->pdata); + if (!changed) { + return; + } + /* #CustomData expects the layers to be sorted in increasing order based on type. */ + std::stable_sort( + poly_layers.begin(), + poly_layers.end(), + [](const CustomDataLayer &a, const CustomDataLayer &b) { return a.type < b.type; }); } void BKE_mesh_legacy_face_set_to_generic(Mesh *mesh) { using namespace blender; - for (CustomDataLayer &layer : MutableSpan(mesh->pdata.layers, mesh->pdata.totlayer)) { - if (layer.type == CD_SCULPT_FACE_SETS) { - BLI_strncpy(layer.name, ".sculpt_face_set", sizeof(layer.name)); - layer.type = CD_PROP_INT32; + if (mesh->attributes().contains(".sculpt_face_set")) { + return; + } + void *faceset_data = nullptr; + for (const int i : IndexRange(mesh->pdata.totlayer)) { + if (mesh->pdata.layers[i].type == CD_SCULPT_FACE_SETS) { + faceset_data = mesh->pdata.layers[i].data; + mesh->pdata.layers[i].data = nullptr; + CustomData_free_layer(&mesh->pdata, CD_SCULPT_FACE_SETS, mesh->totpoly, i); + break; } } - CustomData_update_typemap(&mesh->pdata); + if (faceset_data != nullptr) { + CustomData_add_layer_named( + &mesh->pdata, CD_PROP_INT32, CD_ASSIGN, faceset_data, mesh->totpoly, ".sculpt_face_set"); + } } /** \} */ @@ -1290,21 +1309,25 @@ void BKE_mesh_legacy_bevel_weight_from_layers(Mesh *mesh) void BKE_mesh_legacy_bevel_weight_to_layers(Mesh *mesh) { using namespace blender; - const Span verts(mesh->mvert, mesh->totvert); - if (mesh->cd_flag & ME_CDFLAG_VERT_BWEIGHT) { - float *weights = static_cast( - CustomData_add_layer(&mesh->vdata, CD_BWEIGHT, CD_CONSTRUCT, nullptr, verts.size())); - for (const int i : verts.index_range()) { - weights[i] = verts[i].bweight_legacy / 255.0f; + if (mesh->mvert && !CustomData_has_layer(&mesh->vdata, CD_BWEIGHT)) { + const Span verts(mesh->mvert, mesh->totvert); + if (mesh->cd_flag & ME_CDFLAG_VERT_BWEIGHT) { + float *weights = static_cast( + CustomData_add_layer(&mesh->vdata, CD_BWEIGHT, CD_CONSTRUCT, nullptr, verts.size())); + for (const int i : verts.index_range()) { + weights[i] = verts[i].bweight_legacy / 255.0f; + } } } const Span edges = mesh->edges(); - if (mesh->cd_flag & ME_CDFLAG_EDGE_BWEIGHT) { - float *weights = static_cast( - CustomData_add_layer(&mesh->edata, CD_BWEIGHT, CD_CONSTRUCT, nullptr, edges.size())); - for (const int i : edges.index_range()) { - weights[i] = edges[i].bweight_legacy / 255.0f; + if (!CustomData_has_layer(&mesh->edata, CD_BWEIGHT)) { + if (mesh->cd_flag & ME_CDFLAG_EDGE_BWEIGHT) { + float *weights = static_cast( + CustomData_add_layer(&mesh->edata, CD_BWEIGHT, CD_CONSTRUCT, nullptr, edges.size())); + for (const int i : edges.index_range()) { + weights[i] = edges[i].bweight_legacy / 255.0f; + } } } } @@ -1337,6 +1360,9 @@ void BKE_mesh_legacy_edge_crease_from_layers(Mesh *mesh) void BKE_mesh_legacy_edge_crease_to_layers(Mesh *mesh) { using namespace blender; + if (CustomData_has_layer(&mesh->edata, CD_CREASE)) { + return; + } const Span edges = mesh->edges(); if (mesh->cd_flag & ME_CDFLAG_EDGE_CREASE) { float *creases = static_cast( @@ -1376,6 +1402,9 @@ void BKE_mesh_legacy_sharp_edges_from_flags(Mesh *mesh) using namespace blender::bke; const Span edges = mesh->edges(); MutableAttributeAccessor attributes = mesh->attributes_for_write(); + if (attributes.contains("sharp_edge")) { + return; + } if (std::any_of( edges.begin(), edges.end(), [](const MEdge &edge) { return edge.flag & ME_SHARP; })) { SpanAttributeWriter sharp_edges = attributes.lookup_or_add_for_write_only_span( @@ -1434,7 +1463,10 @@ void BKE_mesh_legacy_convert_flags_to_hide_layers(Mesh *mesh) using namespace blender; using namespace blender::bke; MutableAttributeAccessor attributes = mesh->attributes_for_write(); - + if (!mesh->mvert || attributes.contains(".hide_vert") || attributes.contains(".hide_edge") || + attributes.contains(".hide_poly")) { + return; + } const Span verts(mesh->mvert, mesh->totvert); if (std::any_of(verts.begin(), verts.end(), [](const MVert &vert) { return vert.flag_legacy & ME_HIDE; @@ -1502,6 +1534,9 @@ void BKE_mesh_legacy_convert_mpoly_to_material_indices(Mesh *mesh) using namespace blender; using namespace blender::bke; MutableAttributeAccessor attributes = mesh->attributes_for_write(); + if (attributes.contains("material_index")) { + return; + } const Span polys = mesh->polys(); if (std::any_of( polys.begin(), polys.end(), [](const MPoly &poly) { return poly.mat_nr_legacy != 0; })) { @@ -1589,6 +1624,9 @@ void BKE_mesh_legacy_convert_uvs_to_generic(Mesh *mesh) { using namespace blender; using namespace blender::bke; + if (!CustomData_has_layer(&mesh->ldata, CD_MLOOPUV)) { + return; + } /* Store layer names since they will be removed, used to set the active status of new layers. * Use intermediate #StringRef because the names can be null. */ @@ -1597,9 +1635,9 @@ void BKE_mesh_legacy_convert_uvs_to_generic(Mesh *mesh) const std::string default_uv = StringRef( CustomData_get_render_layer_name(&mesh->ldata, CD_MLOOPUV)); - Set uv_layers_to_convert; + Vector uv_layers_to_convert; for (const int uv_layer_i : IndexRange(CustomData_number_of_layers(&mesh->ldata, CD_MLOOPUV))) { - uv_layers_to_convert.add_as(CustomData_get_layer_name(&mesh->ldata, CD_MLOOPUV, uv_layer_i)); + uv_layers_to_convert.append(CustomData_get_layer_name(&mesh->ldata, CD_MLOOPUV, uv_layer_i)); } for (const StringRefNull name : uv_layers_to_convert) { @@ -1737,6 +1775,10 @@ void BKE_mesh_legacy_convert_flags_to_selection_layers(Mesh *mesh) using namespace blender; using namespace blender::bke; MutableAttributeAccessor attributes = mesh->attributes_for_write(); + if (!mesh->mvert || attributes.contains(".select_vert") || attributes.contains(".select_edge") || + attributes.contains(".select_poly")) { + return; + } const Span verts(mesh->mvert, mesh->totvert); if (std::any_of(verts.begin(), verts.end(), [](const MVert &vert) { @@ -1840,6 +1882,9 @@ void BKE_mesh_legacy_convert_verts_to_positions(Mesh *mesh) { using namespace blender; using namespace blender::bke; + if (!mesh->mvert || CustomData_get_layer_named(&mesh->vdata, CD_PROP_FLOAT3, "position")) { + return; + } const Span verts(static_cast(CustomData_get_layer(&mesh->vdata, CD_MVERT)), mesh->totvert); diff --git a/source/blender/blenkernel/intern/mesh_mapping.cc b/source/blender/blenkernel/intern/mesh_mapping.cc index 938382d9204..b469f54ea8b 100644 --- a/source/blender/blenkernel/intern/mesh_mapping.cc +++ b/source/blender/blenkernel/intern/mesh_mapping.cc @@ -33,7 +33,6 @@ /** \name Mesh Connectivity Mapping * \{ */ -/* ngon version wip, based on BM_uv_vert_map_create */ UvVertMap *BKE_mesh_uv_vert_map_create(const MPoly *mpoly, const bool *hide_poly, const bool *select_poly, @@ -45,13 +44,14 @@ UvVertMap *BKE_mesh_uv_vert_map_create(const MPoly *mpoly, const bool selected, const bool use_winding) { + /* NOTE: N-gon version WIP, based on #BM_uv_vert_map_create. */ + UvVertMap *vmap; UvMapVert *buf; const MPoly *mp; uint a; int i, totuv, nverts; - bool *winding = nullptr; BLI_buffer_declare_static(vec2f, tf_uv_buf, BLI_BUFFER_NOP, 32); totuv = 0; @@ -71,15 +71,17 @@ UvVertMap *BKE_mesh_uv_vert_map_create(const MPoly *mpoly, vmap = (UvVertMap *)MEM_callocN(sizeof(*vmap), "UvVertMap"); buf = vmap->buf = (UvMapVert *)MEM_callocN(sizeof(*vmap->buf) * size_t(totuv), "UvMapVert"); vmap->vert = (UvMapVert **)MEM_callocN(sizeof(*vmap->vert) * totvert, "UvMapVert*"); - if (use_winding) { - winding = static_cast(MEM_callocN(sizeof(*winding) * totpoly, "winding")); - } if (!vmap->vert || !vmap->buf) { BKE_mesh_uv_vert_map_free(vmap); return nullptr; } + bool *winding = nullptr; + if (use_winding) { + winding = static_cast(MEM_callocN(sizeof(*winding) * totpoly, "winding")); + } + mp = mpoly; for (a = 0; a < totpoly; a++, mp++) { if (!selected || (!(hide_poly && hide_poly[a]) && (select_poly && select_poly[a]))) { diff --git a/source/blender/blenkernel/intern/mesh_mirror.cc b/source/blender/blenkernel/intern/mesh_mirror.cc index 45433b2dc1a..8e99d814801 100644 --- a/source/blender/blenkernel/intern/mesh_mirror.cc +++ b/source/blender/blenkernel/intern/mesh_mirror.cc @@ -206,7 +206,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd, /* Subdivision-surface for eg won't have mesh data in the custom-data arrays. * Now add position/#MEdge/#MPoly layers. */ - if (BKE_mesh_vert_positions(mesh) != NULL) { + if (BKE_mesh_vert_positions(mesh) != nullptr) { memcpy(BKE_mesh_vert_positions_for_write(result), BKE_mesh_vert_positions(mesh), sizeof(float[3]) * mesh->totvert); diff --git a/source/blender/blenkernel/intern/mesh_remap.cc b/source/blender/blenkernel/intern/mesh_remap.cc index 2ecf06ab108..b6f9cb534aa 100644 --- a/source/blender/blenkernel/intern/mesh_remap.cc +++ b/source/blender/blenkernel/intern/mesh_remap.cc @@ -448,9 +448,9 @@ struct IslandResult { * This only concerns loops, currently (because of islands), and 'sampled' edges/polys norproj. */ -/* At most n raycasts per 'real' ray. */ +/** At most N ray-casts per 'real' ray. */ #define MREMAP_RAYCAST_APPROXIMATE_NR 3 -/* Each approximated raycasts will have n times bigger radius than previous one. */ +/** Each approximated ray-casts will have n times bigger radius than previous one. */ #define MREMAP_RAYCAST_APPROXIMATE_FAC 5.0f /* min 16 rays/face, max 400. */ diff --git a/source/blender/blenkernel/intern/mesh_runtime.cc b/source/blender/blenkernel/intern/mesh_runtime.cc index b3e416c2b7b..9509eea39e0 100644 --- a/source/blender/blenkernel/intern/mesh_runtime.cc +++ b/source/blender/blenkernel/intern/mesh_runtime.cc @@ -341,22 +341,22 @@ bool BKE_mesh_runtime_is_valid(Mesh *me_eval) do_fixes, &changed); - is_valid &= BKE_mesh_validate_arrays( - me_eval, - reinterpret_cast(positions.data()), - positions.size(), - edges.data(), - edges.size(), - static_cast(CustomData_get_layer(&me_eval->fdata, CD_MFACE)), - me_eval->totface, - loops.data(), - loops.size(), - polys.data(), - polys.size(), - me_eval->deform_verts_for_write().data(), - do_verbose, - do_fixes, - &changed); + is_valid &= BKE_mesh_validate_arrays(me_eval, + reinterpret_cast(positions.data()), + positions.size(), + edges.data(), + edges.size(), + static_cast(CustomData_get_layer_for_write( + &me_eval->fdata, CD_MFACE, me_eval->totface)), + me_eval->totface, + loops.data(), + loops.size(), + polys.data(), + polys.size(), + me_eval->deform_verts_for_write().data(), + do_verbose, + do_fixes, + &changed); BLI_assert(changed == false); diff --git a/source/blender/blenkernel/intern/multires.cc b/source/blender/blenkernel/intern/multires.cc index 1f08764a679..565d9d2fe5a 100644 --- a/source/blender/blenkernel/intern/multires.cc +++ b/source/blender/blenkernel/intern/multires.cc @@ -52,7 +52,7 @@ #include "bmesh.h" #include "multires_inline.h" -#include "multires_reshape.h" +#include "multires_reshape.hh" #include #include @@ -482,7 +482,7 @@ void multires_force_sculpt_rebuild(Object *object) } BKE_pbvh_pmap_release(ss->pmap); - ss->pmap = NULL; + ss->pmap = nullptr; } void multires_force_external_reload(Object *object) @@ -761,7 +761,7 @@ static DerivedMesh *multires_dm_create_local(Scene *scene, bool alloc_paint_mask, MultiresFlags flags) { - MultiresModifierData mmd = {{nullptr}}; + MultiresModifierData mmd{}; mmd.lvl = lvl; mmd.sculptlvl = lvl; @@ -884,7 +884,7 @@ extern "C" Object *multires_dump_grids_bmesh(Object *bmob, BMesh *bm) { if (!CustomData_has_layer(&bm->ldata, CD_MDISPS)) { printf("multires_dump_grids_bmesh: error: no multires grids\n"); - return NULL; + return nullptr; } bool spaceset = false; @@ -917,11 +917,11 @@ extern "C" Object *multires_dump_grids_bmesh(Object *bmob, BMesh *bm) DEG_id_type_tag(bmain, ID_OB); DEG_relations_tag_update(bmain); - if (ob->data != NULL) { + if (ob->data != nullptr) { DEG_id_tag_update_ex(bmain, (ID *)ob->data, ID_RECALC_EDITORS); } - ob->modifiers.first = ob->modifiers.last = NULL; + ob->modifiers.first = ob->modifiers.last = nullptr; zero_v3(ob->loc); printf("users: %d\n", ob->id.us); @@ -943,7 +943,7 @@ extern "C" Object *multires_dump_grids_bmesh(Object *bmob, BMesh *bm) if (!dimen) { printf("multires_dump_grids_bmesh: error: corrupted multires data\n"); - return NULL; + return nullptr; } int totvert = bm->totloop * dimen * dimen; @@ -1113,7 +1113,7 @@ static void multires_bmesh_space_set_cb(void *__restrict userdata, S = 0; do { MDisps *mdisp = (MDisps *)BM_ELEM_CD_GET_VOID_P(l, cd_mdisps_off); - float(*dispgrid)[3] = NULL; + float(*dispgrid)[3] = nullptr; dispgrid = mdisp->disps; @@ -1198,10 +1198,10 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode) } // get multires settings - MultiresModifierData *mmd = bm->haveMultiResSettings ? &bm->multires : NULL; + MultiresModifierData *mmd = bm->haveMultiResSettings ? &bm->multires : nullptr; if (!mmd && ob) { - mmd = get_multires_modifier(NULL, ob, true); + mmd = get_multires_modifier(nullptr, ob, true); } if (!mmd || !CustomData_has_layer(&bm->ldata, CD_MDISPS)) { @@ -1227,19 +1227,19 @@ void BKE_multires_bmesh_space_set(Object *ob, BMesh *bm, int mode) SubdivSettings settings2; // copy the settings and then set subdivision level to max - MultiresModifierData mmdcpy = *mmd; + MultiresModifierData mmdcpy = blender::dna::shallow_copy(*mmd); mmdcpy.lvl = mmdcpy.sculptlvl = mmdcpy.renderlvl = mmdcpy.totlvl; // set up subdivision surface BKE_multires_subdiv_settings_init(&settings2, &mmdcpy); Subdiv *sd = BKE_subdiv_new_from_mesh(&settings2, me); - BKE_subdiv_eval_begin_from_mesh(sd, me, NULL, SUBDIV_EVALUATOR_TYPE_CPU, NULL); + BKE_subdiv_eval_begin_from_mesh(sd, me, nullptr, SUBDIV_EVALUATOR_TYPE_CPU, nullptr); - // create a fake object with .sculpt set to NULL + // create a fake object with .sculpt set to nullptr Object fakeob; if (ob) { memcpy(static_cast(&fakeob), static_cast(ob), sizeof(*ob)); - fakeob.sculpt = NULL; + fakeob.sculpt = nullptr; } else { memset(&fakeob, 0, sizeof(fakeob)); diff --git a/source/blender/blenkernel/intern/multires_reshape.c b/source/blender/blenkernel/intern/multires_reshape.cc similarity index 83% rename from source/blender/blenkernel/intern/multires_reshape.c rename to source/blender/blenkernel/intern/multires_reshape.cc index 73e7c4bb360..ba7f20f38dd 100644 --- a/source/blender/blenkernel/intern/multires_reshape.c +++ b/source/blender/blenkernel/intern/multires_reshape.cc @@ -24,15 +24,15 @@ #include "DEG_depsgraph_query.h" -#include "multires_reshape.h" +#include "multires_reshape.hh" /* -------------------------------------------------------------------- */ /** \name Reshape from object * \{ */ -bool multiresModifier_reshapeFromVertcos(struct Depsgraph *depsgraph, - struct Object *object, - struct MultiresModifierData *mmd, +bool multiresModifier_reshapeFromVertcos(Depsgraph *depsgraph, + Object *object, + MultiresModifierData *mmd, const float (*vert_coords)[3], const int num_vert_coords) { @@ -41,7 +41,7 @@ bool multiresModifier_reshapeFromVertcos(struct Depsgraph *depsgraph, return false; } multires_reshape_store_original_grids(&reshape_context); - multires_reshape_ensure_grids(object->data, reshape_context.top.level); + multires_reshape_ensure_grids(static_cast(object->data), reshape_context.top.level); if (!multires_reshape_assign_final_coords_from_vertcos( &reshape_context, vert_coords, num_vert_coords)) { multires_reshape_context_free(&reshape_context); @@ -53,13 +53,13 @@ bool multiresModifier_reshapeFromVertcos(struct Depsgraph *depsgraph, return true; } -bool multiresModifier_reshapeFromObject(struct Depsgraph *depsgraph, - struct MultiresModifierData *mmd, - struct Object *dst, - struct Object *src) +bool multiresModifier_reshapeFromObject(Depsgraph *depsgraph, + MultiresModifierData *mmd, + Object *dst, + Object *src) { - struct Scene *scene_eval = DEG_get_evaluated_scene(depsgraph); - struct Object *src_eval = DEG_get_evaluated_object(depsgraph, src); + Scene *scene_eval = DEG_get_evaluated_scene(depsgraph); + Object *src_eval = DEG_get_evaluated_object(depsgraph, src); Mesh *src_mesh_eval = mesh_get_eval_final(depsgraph, scene_eval, src_eval, &CD_MASK_BAREMESH); int num_deformed_verts; @@ -79,12 +79,12 @@ bool multiresModifier_reshapeFromObject(struct Depsgraph *depsgraph, /** \name Reshape from modifier * \{ */ -bool multiresModifier_reshapeFromDeformModifier(struct Depsgraph *depsgraph, - struct Object *object, - struct MultiresModifierData *mmd, - struct ModifierData *deform_md) +bool multiresModifier_reshapeFromDeformModifier(Depsgraph *depsgraph, + Object *object, + MultiresModifierData *mmd, + ModifierData *deform_md) { - MultiresModifierData highest_mmd = *mmd; + MultiresModifierData highest_mmd = blender::dna::shallow_copy(*mmd); highest_mmd.sculptlvl = highest_mmd.totlvl; highest_mmd.lvl = highest_mmd.totlvl; highest_mmd.renderlvl = highest_mmd.totlvl; @@ -96,14 +96,14 @@ bool multiresModifier_reshapeFromDeformModifier(struct Depsgraph *depsgraph, float(*deformed_verts)[3] = BKE_mesh_vert_coords_alloc(multires_mesh, &num_deformed_verts); /* Apply deformation modifier on the multires, */ - const ModifierEvalContext modifier_ctx = { - .depsgraph = depsgraph, - .object = object, - .flag = MOD_APPLY_USECACHE | MOD_APPLY_IGNORE_SIMPLIFY, - }; + ModifierEvalContext modifier_ctx{}; + modifier_ctx.depsgraph = depsgraph; + modifier_ctx.object = object; + modifier_ctx.flag = MOD_APPLY_USECACHE | MOD_APPLY_IGNORE_SIMPLIFY; + BKE_modifier_deform_verts( deform_md, &modifier_ctx, multires_mesh, deformed_verts, multires_mesh->totvert); - BKE_id_free(NULL, multires_mesh); + BKE_id_free(nullptr, multires_mesh); /* Reshaping */ bool result = multiresModifier_reshapeFromVertcos( @@ -121,9 +121,7 @@ bool multiresModifier_reshapeFromDeformModifier(struct Depsgraph *depsgraph, /** \name Reshape from grids * \{ */ -bool multiresModifier_reshapeFromCCG(const int tot_level, - Mesh *coarse_mesh, - struct SubdivCCG *subdiv_ccg) +bool multiresModifier_reshapeFromCCG(const int tot_level, Mesh *coarse_mesh, SubdivCCG *subdiv_ccg) { MultiresReshapeContext reshape_context; if (!multires_reshape_context_create_from_ccg( @@ -159,8 +157,8 @@ void multiresModifier_subdivide(Object *object, multiresModifier_subdivide_to_level(object, mmd, top_level, mode); } -void multiresModifier_subdivide_to_level(struct Object *object, - struct MultiresModifierData *mmd, +void multiresModifier_subdivide_to_level(Object *object, + MultiresModifierData *mmd, const int top_level, const eMultiresSubdivideModeType mode) { @@ -168,7 +166,7 @@ void multiresModifier_subdivide_to_level(struct Object *object, return; } - Mesh *coarse_mesh = object->data; + Mesh *coarse_mesh = static_cast(object->data); if (coarse_mesh->totloop == 0) { /* If there are no loops in the mesh implies there is no CD_MDISPS as well. So can early output * from here as there is nothing to subdivide. */ @@ -182,7 +180,7 @@ void multiresModifier_subdivide_to_level(struct Object *object, const bool has_mdisps = CustomData_has_layer(&coarse_mesh->ldata, CD_MDISPS); if (!has_mdisps) { CustomData_add_layer( - &coarse_mesh->ldata, CD_MDISPS, CD_SET_DEFAULT, NULL, coarse_mesh->totloop); + &coarse_mesh->ldata, CD_MDISPS, CD_SET_DEFAULT, nullptr, coarse_mesh->totloop); } /* NOTE: Subdivision happens from the top level of the existing multires modifier. If it is set @@ -238,9 +236,7 @@ void multiresModifier_subdivide_to_level(struct Object *object, /** \name Apply base * \{ */ -void multiresModifier_base_apply(struct Depsgraph *depsgraph, - Object *object, - MultiresModifierData *mmd) +void multiresModifier_base_apply(Depsgraph *depsgraph, Object *object, MultiresModifierData *mmd) { multires_force_sculpt_rebuild(object); diff --git a/source/blender/blenkernel/intern/multires_reshape.h b/source/blender/blenkernel/intern/multires_reshape.hh similarity index 85% rename from source/blender/blenkernel/intern/multires_reshape.h rename to source/blender/blenkernel/intern/multires_reshape.hh index 1aa20cb7f48..8e112f18d7b 100644 --- a/source/blender/blenkernel/intern/multires_reshape.h +++ b/source/blender/blenkernel/intern/multires_reshape.hh @@ -23,26 +23,26 @@ struct Object; struct Subdiv; struct SubdivCCG; -typedef struct MultiresReshapeContext { +struct MultiresReshapeContext { /* NOTE: Only available when context is initialized from object. */ - struct Depsgraph *depsgraph; - struct Object *object; + Depsgraph *depsgraph; + Object *object; - struct MultiresModifierData *mmd; + MultiresModifierData *mmd; /* Base mesh from original object. * NOTE: Does NOT include any leading modifiers in it. */ - struct Mesh *base_mesh; + Mesh *base_mesh; const float (*base_positions)[3]; - const struct MEdge *base_edges; - const struct MPoly *base_polys; - const struct MLoop *base_loops; + const MEdge *base_edges; + const MPoly *base_polys; + const MLoop *base_loops; /* Subdivision surface created for multires modifier. * * The coarse mesh of this subdivision surface is a base mesh with all deformation modifiers * leading multires applied on it. */ - struct Subdiv *subdiv; + Subdiv *subdiv; bool need_free_subdiv; struct { @@ -65,8 +65,8 @@ typedef struct MultiresReshapeContext { struct { /* Copy of original displacement and painting masks. */ - struct MDisps *mdisps; - struct GridPaintMask *grid_paint_masks; + MDisps *mdisps; + GridPaintMask *grid_paint_masks; } orig; /* Number of grids which are required for base_mesh. */ @@ -74,8 +74,8 @@ typedef struct MultiresReshapeContext { /* Destination displacement and mask. * Points to a custom data on a destination mesh. */ - struct MDisps *mdisps; - struct GridPaintMask *grid_paint_masks; + MDisps *mdisps; + GridPaintMask *grid_paint_masks; /* Indexed by face index, gives first grid index of the face. */ int *face_start_grid_index; @@ -102,38 +102,38 @@ typedef struct MultiresReshapeContext { const float *cd_vertex_crease; /* Edge crease custom data layer, null if none is present. */ const float *cd_edge_crease; -} MultiresReshapeContext; +}; /** * Coordinate which identifies element of a grid. * This is directly related on how #CD_MDISPS stores displacement. */ -typedef struct GridCoord { +struct GridCoord { int grid_index; float u, v; -} GridCoord; +}; /** * Coordinate within ptex, which is what OpenSubdiv API operates on. */ -typedef struct PTexCoord { +struct PTexCoord { int ptex_face_index; float u, v; -} PTexCoord; +}; /** * Element of a grid data stored in the destination mesh. * This is where reshaped coordinates and mask values will be written to. */ -typedef struct ReshapeGridElement { +struct ReshapeGridElement { float *displacement; float *mask; -} ReshapeGridElement; +}; -typedef struct ReshapeConstGridElement { +struct ReshapeConstGridElement { float displacement[3]; float mask; -} ReshapeConstGridElement; +}; /* -------------------------------------------------------------------- * Construct/destruct reshape context. @@ -143,9 +143,9 @@ typedef struct ReshapeConstGridElement { * Create subdivision surface descriptor which is configured for surface evaluation at a given * multi-res modifier. */ -struct Subdiv *multires_reshape_create_subdiv(struct Depsgraph *depsgraph, - struct Object *object, - const struct MultiresModifierData *mmd); +Subdiv *multires_reshape_create_subdiv(Depsgraph *depsgraph, + Object *object, + const MultiresModifierData *mmd); /** * \note Initialized base mesh to object's mesh, the Subdivision is created from the deformed @@ -153,29 +153,29 @@ struct Subdiv *multires_reshape_create_subdiv(struct Depsgraph *depsgraph, * then Subdivision is created from base mesh (without any deformation applied). */ bool multires_reshape_context_create_from_object(MultiresReshapeContext *reshape_context, - struct Depsgraph *depsgraph, - struct Object *object, - struct MultiresModifierData *mmd); + Depsgraph *depsgraph, + Object *object, + MultiresModifierData *mmd); bool multires_reshape_context_create_from_base_mesh(MultiresReshapeContext *reshape_context, - struct Depsgraph *depsgraph, - struct Object *object, - struct MultiresModifierData *mmd); + Depsgraph *depsgraph, + Object *object, + MultiresModifierData *mmd); bool multires_reshape_context_create_from_ccg(MultiresReshapeContext *reshape_context, - struct SubdivCCG *subdiv_ccg, - struct Mesh *base_mesh, + SubdivCCG *subdiv_ccg, + Mesh *base_mesh, int top_level); bool multires_reshape_context_create_from_modifier(MultiresReshapeContext *reshape_context, - struct Object *object, - struct MultiresModifierData *mmd, + Object *object, + MultiresModifierData *mmd, int top_level); bool multires_reshape_context_create_from_subdiv(MultiresReshapeContext *reshape_context, - struct Object *object, - struct MultiresModifierData *mmd, - struct Subdiv *subdiv, + Object *object, + MultiresModifierData *mmd, + Subdiv *subdiv, int top_level); void multires_reshape_free_original_grids(MultiresReshapeContext *reshape_context); @@ -262,7 +262,7 @@ void multires_reshape_evaluate_limit_at_grid(const MultiresReshapeContext *resha /** * Make sure custom data is allocated for the given level. */ -void multires_reshape_ensure_grids(struct Mesh *mesh, int level); +void multires_reshape_ensure_grids(Mesh *mesh, int level); /* -------------------------------------------------------------------- * Functions specific to reshaping from a set of vertices in a object position. @@ -295,7 +295,7 @@ bool multires_reshape_assign_final_coords_from_vertcos( * \return truth if all coordinates have been updated. */ bool multires_reshape_assign_final_coords_from_ccg(const MultiresReshapeContext *reshape_context, - struct SubdivCCG *subdiv_ccg); + SubdivCCG *subdiv_ccg); /* -------------------------------------------------------------------- * Functions specific to reshaping from MDISPS. diff --git a/source/blender/blenkernel/intern/multires_reshape_apply_base.c b/source/blender/blenkernel/intern/multires_reshape_apply_base.cc similarity index 90% rename from source/blender/blenkernel/intern/multires_reshape_apply_base.c rename to source/blender/blenkernel/intern/multires_reshape_apply_base.cc index fd5faf1847d..f32de373529 100644 --- a/source/blender/blenkernel/intern/multires_reshape_apply_base.c +++ b/source/blender/blenkernel/intern/multires_reshape_apply_base.cc @@ -5,7 +5,7 @@ * \ingroup bke */ -#include "multires_reshape.h" +#include "multires_reshape.hh" #include "MEM_guardedalloc.h" @@ -84,8 +84,8 @@ void multires_reshape_apply_base_refit_base_mesh(MultiresReshapeContext *reshape base_mesh->totloop, false); - float(*origco)[3] = MEM_calloc_arrayN( - base_mesh->totvert, sizeof(float[3]), "multires apply base origco"); + float(*origco)[3] = static_cast( + MEM_calloc_arrayN(base_mesh->totvert, sizeof(float[3]), __func__)); for (int i = 0; i < base_mesh->totvert; i++) { copy_v3_v3(origco[i], base_positions[i]); } @@ -118,15 +118,15 @@ void multires_reshape_apply_base_refit_base_mesh(MultiresReshapeContext *reshape for (int j = 0; j < pmap[i].count; j++) { const MPoly *p = &reshape_context->base_polys[pmap[i].indices[j]]; MPoly fake_poly; - MLoop *fake_loops; - float(*fake_co)[3]; float no[3]; /* Set up poly, loops, and coords in order to call BKE_mesh_calc_poly_normal(). */ fake_poly.totloop = p->totloop; fake_poly.loopstart = 0; - fake_loops = MEM_malloc_arrayN(p->totloop, sizeof(MLoop), "fake_loops"); - fake_co = MEM_malloc_arrayN(p->totloop, sizeof(float[3]), "fake_co"); + MLoop *fake_loops = static_cast( + MEM_malloc_arrayN(p->totloop, sizeof(MLoop), __func__)); + float(*fake_co)[3] = static_cast( + MEM_malloc_arrayN(p->totloop, sizeof(float[3]), __func__)); for (int k = 0; k < p->totloop; k++) { const int vndx = reshape_context->base_loops[p->loopstart + k].v; @@ -168,20 +168,20 @@ void multires_reshape_apply_base_refit_base_mesh(MultiresReshapeContext *reshape void multires_reshape_apply_base_refine_from_base(MultiresReshapeContext *reshape_context) { - BKE_subdiv_eval_refine_from_mesh(reshape_context->subdiv, reshape_context->base_mesh, NULL); + BKE_subdiv_eval_refine_from_mesh(reshape_context->subdiv, reshape_context->base_mesh, nullptr); } void multires_reshape_apply_base_refine_from_deform(MultiresReshapeContext *reshape_context) { - struct Depsgraph *depsgraph = reshape_context->depsgraph; + Depsgraph *depsgraph = reshape_context->depsgraph; Object *object = reshape_context->object; MultiresModifierData *mmd = reshape_context->mmd; - BLI_assert(depsgraph != NULL); - BLI_assert(object != NULL); - BLI_assert(mmd != NULL); + BLI_assert(depsgraph != nullptr); + BLI_assert(object != nullptr); + BLI_assert(mmd != nullptr); float(*deformed_verts)[3] = BKE_multires_create_deformed_base_mesh_vert_coords( - depsgraph, object, mmd, NULL); + depsgraph, object, mmd, nullptr); BKE_subdiv_eval_refine_from_mesh( reshape_context->subdiv, reshape_context->base_mesh, deformed_verts); diff --git a/source/blender/blenkernel/intern/multires_reshape_ccg.c b/source/blender/blenkernel/intern/multires_reshape_ccg.cc similarity index 85% rename from source/blender/blenkernel/intern/multires_reshape_ccg.c rename to source/blender/blenkernel/intern/multires_reshape_ccg.cc index 6001aa715d5..6f63c336abb 100644 --- a/source/blender/blenkernel/intern/multires_reshape_ccg.c +++ b/source/blender/blenkernel/intern/multires_reshape_ccg.cc @@ -5,9 +5,9 @@ * \ingroup bke */ -#include "multires_reshape.h" +#include "multires_reshape.hh" -#include +#include #include "BLI_utildefines.h" @@ -15,21 +15,21 @@ #include "BKE_subdiv_ccg.h" bool multires_reshape_assign_final_coords_from_ccg(const MultiresReshapeContext *reshape_context, - struct SubdivCCG *subdiv_ccg) + SubdivCCG *subdiv_ccg) { CCGKey reshape_level_key; BKE_subdiv_ccg_key(&reshape_level_key, subdiv_ccg, reshape_context->reshape.level); const int reshape_grid_size = reshape_context->reshape.grid_size; - const float reshape_grid_size_1_inv = 1.0f / (((float)reshape_grid_size) - 1.0f); + const float reshape_grid_size_1_inv = 1.0f / ((float(reshape_grid_size)) - 1.0f); int num_grids = subdiv_ccg->num_grids; for (int grid_index = 0; grid_index < num_grids; ++grid_index) { CCGElem *ccg_grid = subdiv_ccg->grids[grid_index]; for (int y = 0; y < reshape_grid_size; ++y) { - const float v = (float)y * reshape_grid_size_1_inv; + const float v = float(y) * reshape_grid_size_1_inv; for (int x = 0; x < reshape_grid_size; ++x) { - const float u = (float)x * reshape_grid_size_1_inv; + const float u = float(x) * reshape_grid_size_1_inv; GridCoord grid_coord; grid_coord.grid_index = grid_index; @@ -39,7 +39,7 @@ bool multires_reshape_assign_final_coords_from_ccg(const MultiresReshapeContext ReshapeGridElement grid_element = multires_reshape_grid_element_for_grid_coord( reshape_context, &grid_coord); - BLI_assert(grid_element.displacement != NULL); + BLI_assert(grid_element.displacement != nullptr); memcpy(grid_element.displacement, CCG_grid_elem_co(&reshape_level_key, ccg_grid, x, y), sizeof(float[3])); @@ -66,7 +66,7 @@ bool multires_reshape_assign_final_coords_from_ccg(const MultiresReshapeContext /* NOTE: There is a known bug in Undo code that results in first Sculpt step * after a Memfile one to never be undone (see T83806). This might be the root cause of * this inconsistency. */ - if (reshape_level_key.has_mask && grid_element.mask != NULL) { + if (reshape_level_key.has_mask && grid_element.mask != nullptr) { *grid_element.mask = *CCG_grid_elem_mask(&reshape_level_key, ccg_grid, x, y); } } diff --git a/source/blender/blenkernel/intern/multires_reshape_smooth.c b/source/blender/blenkernel/intern/multires_reshape_smooth.cc similarity index 83% rename from source/blender/blenkernel/intern/multires_reshape_smooth.c rename to source/blender/blenkernel/intern/multires_reshape_smooth.cc index b14dac6ad02..081b2b0a107 100644 --- a/source/blender/blenkernel/intern/multires_reshape_smooth.c +++ b/source/blender/blenkernel/intern/multires_reshape_smooth.cc @@ -5,7 +5,7 @@ * \ingroup bke */ -#include "multires_reshape.h" +#include "multires_reshape.hh" #include "MEM_guardedalloc.h" @@ -73,19 +73,19 @@ bool debug_invert_m3_m3(float m1[3][3], const float m2[3][3], const char *func, * Used to store pre-calculated information which is expensive or impossible to evaluate when * traversing the final limit surface. */ -typedef struct SurfacePoint { +struct SurfacePoint { float P[3]; float tangent_matrix[3][3]; -} SurfacePoint; +}; -typedef struct SurfaceGrid { +struct SurfaceGrid { SurfacePoint *points; -} SurfaceGrid; +}; /* Geometry elements which are used to simplify creation of topology refiner at the sculpt level. * Contains a limited subset of information needed to construct topology refiner. */ -typedef struct Vertex { +struct Vertex { /* All grid coordinates which the vertex corresponding to. * For a vertices which are created from inner points of grids there is always one coordinate. */ int num_grid_coords; @@ -93,36 +93,36 @@ typedef struct Vertex { float sharpness; bool is_infinite_sharp; -} Vertex; +}; -typedef struct Corner { +struct Corner { const Vertex *vertex; int grid_index; -} Corner; +}; -typedef struct Face { +struct Face { int start_corner_index; int num_corners; -} Face; +}; -typedef struct Edge { +struct Edge { int v1; int v2; float sharpness; -} Edge; +}; /* Storage of data which is linearly interpolated from the reshape level to the top level. */ -typedef struct LinearGridElement { +struct LinearGridElement { float mask; -} LinearGridElement; +}; -typedef struct LinearGrid { +struct LinearGrid { LinearGridElement *elements; -} LinearGrid; +}; -typedef struct LinearGrids { +struct LinearGrids { int num_grids; int level; @@ -134,11 +134,11 @@ typedef struct LinearGrids { /* Elements for all grids are allocated in a single array, for the allocation performance. */ LinearGridElement *elements_storage; -} LinearGrids; +}; -/* Context which holds all information eeded during propagation and smoothing. */ +/* Context which holds all information needed during propagation and smoothing. */ -typedef struct MultiresReshapeSmoothContext { +struct MultiresReshapeSmoothContext { const MultiresReshapeContext *reshape_context; /* Geometry at a reshape multires level. */ @@ -190,7 +190,7 @@ typedef struct MultiresReshapeSmoothContext { * NOTE: Uses same enumerator type as Subdivide operator, since the values are the same and * decoupling type just adds extra headache to convert one enumerator to another. */ eMultiresSubdivideModeType smoothing_type; -} MultiresReshapeSmoothContext; +}; /** \} */ @@ -203,8 +203,8 @@ static void linear_grids_init(LinearGrids *linear_grids) linear_grids->num_grids = 0; linear_grids->level = 0; - linear_grids->grids = NULL; - linear_grids->elements_storage = NULL; + linear_grids->grids = nullptr; + linear_grids->elements_storage = nullptr; } static void linear_grids_allocate(LinearGrids *linear_grids, int num_grids, int level) @@ -217,9 +217,10 @@ static void linear_grids_allocate(LinearGrids *linear_grids, int num_grids, int linear_grids->level = level; linear_grids->grid_size = grid_size; - linear_grids->grids = MEM_malloc_arrayN(num_grids, sizeof(LinearGrid), "linear grids"); - linear_grids->elements_storage = MEM_calloc_arrayN( - num_grid_elements, sizeof(LinearGridElement), "linear elements storage"); + linear_grids->grids = static_cast( + MEM_malloc_arrayN(num_grids, sizeof(LinearGrid), __func__)); + linear_grids->elements_storage = static_cast( + MEM_calloc_arrayN(num_grid_elements, sizeof(LinearGridElement), __func__)); for (int i = 0; i < num_grids; ++i) { const size_t element_offset = grid_area * i; @@ -284,11 +285,12 @@ static void base_surface_grids_allocate(MultiresReshapeSmoothContext *reshape_sm const int grid_size = reshape_context->top.grid_size; const int grid_area = grid_size * grid_size; - SurfaceGrid *surface_grid = MEM_malloc_arrayN(num_grids, sizeof(SurfaceGrid), "delta grids"); + SurfaceGrid *surface_grid = static_cast( + MEM_malloc_arrayN(num_grids, sizeof(SurfaceGrid), __func__)); for (int grid_index = 0; grid_index < num_grids; ++grid_index) { - surface_grid[grid_index].points = MEM_calloc_arrayN( - grid_area, sizeof(SurfacePoint), "delta grid displacement"); + surface_grid[grid_index].points = static_cast( + MEM_calloc_arrayN(grid_area, sizeof(SurfacePoint), __func__)); } reshape_smooth_context->base_surface_grids = surface_grid; @@ -296,7 +298,7 @@ static void base_surface_grids_allocate(MultiresReshapeSmoothContext *reshape_sm static void base_surface_grids_free(MultiresReshapeSmoothContext *reshape_smooth_context) { - if (reshape_smooth_context->base_surface_grids == NULL) { + if (reshape_smooth_context->base_surface_grids == nullptr) { return; } @@ -340,13 +342,13 @@ static void base_surface_grids_write(const MultiresReshapeSmoothContext *reshape /** \name Evaluation of subdivision surface at a reshape level * \{ */ -typedef void (*ForeachTopLevelGridCoordCallback)( - const MultiresReshapeSmoothContext *reshape_smooth_context, - const PTexCoord *ptex_coord, - const GridCoord *grid_coord, - void *userdata_v); +using ForeachTopLevelGridCoordCallback = + void (*)(const MultiresReshapeSmoothContext *reshape_smooth_context, + const PTexCoord *ptex_coord, + const GridCoord *grid_coord, + void *userdata_v); -typedef struct ForeachTopLevelGridCoordTaskData { +struct ForeachHighLevelCoordTaskData { const MultiresReshapeSmoothContext *reshape_smooth_context; int inner_grid_size; @@ -354,7 +356,7 @@ typedef struct ForeachTopLevelGridCoordTaskData { ForeachTopLevelGridCoordCallback callback; void *callback_userdata_v; -} ForeachHighLevelCoordTaskData; +}; /* Find grid index which given face was created for. */ static int get_face_grid_index(const MultiresReshapeSmoothContext *reshape_smooth_context, @@ -381,7 +383,7 @@ static GridCoord *vertex_grid_coord_with_grid_index(const Vertex *vertex, const return &vertex->grid_coords[i]; } } - return NULL; + return nullptr; } /* Get grid coordinates which correspond to corners of the given face. @@ -399,7 +401,7 @@ static void grid_coords_from_face_verts(const MultiresReshapeSmoothContext *resh const int corner_index = face->start_corner_index + i; const Corner *corner = &reshape_smooth_context->geometry.corners[corner_index]; grid_coords[i] = vertex_grid_coord_with_grid_index(corner->vertex, grid_index); - BLI_assert(grid_coords[i] != NULL); + BLI_assert(grid_coords[i] != nullptr); } } @@ -438,9 +440,9 @@ static void interpolate_grid_coord(GridCoord *result, static void foreach_toplevel_grid_coord_task(void *__restrict userdata_v, const int face_index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - ForeachHighLevelCoordTaskData *data = userdata_v; + ForeachHighLevelCoordTaskData *data = static_cast(userdata_v); const MultiresReshapeSmoothContext *reshape_smooth_context = data->reshape_smooth_context; const int inner_grid_size = data->inner_grid_size; @@ -451,9 +453,9 @@ static void foreach_toplevel_grid_coord_task(void *__restrict userdata_v, grid_coords_from_face_verts(reshape_smooth_context, face, face_grid_coords); for (int y = 0; y < inner_grid_size; ++y) { - const float ptex_v = (float)y * inner_grid_size_1_inv; + const float ptex_v = float(y) * inner_grid_size_1_inv; for (int x = 0; x < inner_grid_size; ++x) { - const float ptex_u = (float)x * inner_grid_size_1_inv; + const float ptex_u = float(x) * inner_grid_size_1_inv; PTexCoord ptex_coord; ptex_coord.ptex_face_index = face_index; @@ -478,7 +480,7 @@ static void foreach_toplevel_grid_coord(const MultiresReshapeSmoothContext *resh ForeachHighLevelCoordTaskData data; data.reshape_smooth_context = reshape_smooth_context; data.inner_grid_size = (1 << level_difference) + 1; - data.inner_grid_size_1_inv = 1.0f / (float)(data.inner_grid_size - 1); + data.inner_grid_size_1_inv = 1.0f / float(data.inner_grid_size - 1); data.callback = callback; data.callback_userdata_v = callback_userdata_v; @@ -540,30 +542,30 @@ static void context_init(MultiresReshapeSmoothContext *reshape_smooth_context, reshape_smooth_context->reshape_context = reshape_context; reshape_smooth_context->geometry.num_vertices = 0; - reshape_smooth_context->geometry.vertices = NULL; + reshape_smooth_context->geometry.vertices = nullptr; reshape_smooth_context->geometry.max_edges = 0; reshape_smooth_context->geometry.num_edges = 0; - reshape_smooth_context->geometry.edges = NULL; + reshape_smooth_context->geometry.edges = nullptr; reshape_smooth_context->geometry.num_corners = 0; - reshape_smooth_context->geometry.corners = NULL; + reshape_smooth_context->geometry.corners = nullptr; reshape_smooth_context->geometry.num_faces = 0; - reshape_smooth_context->geometry.faces = NULL; + reshape_smooth_context->geometry.faces = nullptr; linear_grids_init(&reshape_smooth_context->linear_delta_grids); - reshape_smooth_context->non_loose_base_edge_map = NULL; - reshape_smooth_context->reshape_subdiv = NULL; - reshape_smooth_context->base_surface_grids = NULL; + reshape_smooth_context->non_loose_base_edge_map = nullptr; + reshape_smooth_context->reshape_subdiv = nullptr; + reshape_smooth_context->base_surface_grids = nullptr; reshape_smooth_context->smoothing_type = mode; } static void context_free_geometry(MultiresReshapeSmoothContext *reshape_smooth_context) { - if (reshape_smooth_context->geometry.vertices != NULL) { + if (reshape_smooth_context->geometry.vertices != nullptr) { for (int i = 0; i < reshape_smooth_context->geometry.num_vertices; ++i) { MEM_SAFE_FREE(reshape_smooth_context->geometry.vertices[i].grid_coords); } @@ -578,7 +580,7 @@ static void context_free_geometry(MultiresReshapeSmoothContext *reshape_smooth_c static void context_free_subdiv(MultiresReshapeSmoothContext *reshape_smooth_context) { - if (reshape_smooth_context->reshape_subdiv == NULL) { + if (reshape_smooth_context->reshape_subdiv == nullptr) { return; } BKE_subdiv_free(reshape_smooth_context->reshape_subdiv); @@ -598,29 +600,30 @@ static bool foreach_topology_info(const SubdivForeachContext *foreach_context, const int num_edges, const int num_loops, const int num_polygons, - const int *UNUSED(subdiv_polygon_offset)) + const int * /*subdiv_polygon_offset*/) { - MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data; + MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(foreach_context->user_data); const int max_edges = reshape_smooth_context->smoothing_type == MULTIRES_SUBDIVIDE_LINEAR ? num_edges : reshape_smooth_context->geometry.max_edges; /* NOTE: Calloc so the counters are re-set to 0 "for free". */ reshape_smooth_context->geometry.num_vertices = num_vertices; - reshape_smooth_context->geometry.vertices = MEM_calloc_arrayN( - num_vertices, sizeof(Vertex), "smooth vertices"); + reshape_smooth_context->geometry.vertices = static_cast( + MEM_calloc_arrayN(num_vertices, sizeof(Vertex), "smooth vertices")); reshape_smooth_context->geometry.max_edges = max_edges; - reshape_smooth_context->geometry.edges = MEM_malloc_arrayN( - max_edges, sizeof(Edge), "smooth edges"); + reshape_smooth_context->geometry.edges = static_cast( + MEM_malloc_arrayN(max_edges, sizeof(Edge), "smooth edges")); reshape_smooth_context->geometry.num_corners = num_loops; - reshape_smooth_context->geometry.corners = MEM_malloc_arrayN( - num_loops, sizeof(Corner), "smooth corners"); + reshape_smooth_context->geometry.corners = static_cast( + MEM_malloc_arrayN(num_loops, sizeof(Corner), "smooth corners")); reshape_smooth_context->geometry.num_faces = num_polygons; - reshape_smooth_context->geometry.faces = MEM_malloc_arrayN( - num_polygons, sizeof(Face), "smooth faces"); + reshape_smooth_context->geometry.faces = static_cast( + MEM_malloc_arrayN(num_polygons, sizeof(Face), "smooth faces")); return true; } @@ -630,14 +633,15 @@ static void foreach_single_vertex(const SubdivForeachContext *foreach_context, const int coarse_vertex_index, const int subdiv_vertex_index) { - const MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(foreach_context->user_data); BLI_assert(subdiv_vertex_index < reshape_smooth_context->geometry.num_vertices); Vertex *vertex = &reshape_smooth_context->geometry.vertices[subdiv_vertex_index]; - vertex->grid_coords = MEM_reallocN(vertex->grid_coords, - sizeof(Vertex) * (vertex->num_grid_coords + 1)); + vertex->grid_coords = static_cast( + MEM_reallocN(vertex->grid_coords, sizeof(Vertex) * (vertex->num_grid_coords + 1))); vertex->grid_coords[vertex->num_grid_coords] = *grid_coord; ++vertex->num_grid_coords; @@ -648,7 +652,7 @@ static void foreach_single_vertex(const SubdivForeachContext *foreach_context, const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context; const float *cd_vertex_crease = reshape_context->cd_vertex_crease; - if (cd_vertex_crease == NULL) { + if (cd_vertex_crease == nullptr) { return; } @@ -662,13 +666,14 @@ static void foreach_single_vertex(const SubdivForeachContext *foreach_context, vertex->sharpness = BKE_subdiv_crease_to_sharpness_f(crease); } -/* TODO(sergey): De-duplicate with similar function in multires_reshape_vertcos.c */ +/* TODO(sergey): De-duplicate with similar function in multires_reshape_vertcos.cc */ static void foreach_vertex(const SubdivForeachContext *foreach_context, const PTexCoord *ptex_coord, const int coarse_vertex_index, const int subdiv_vertex_index) { - const MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(foreach_context->user_data); const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context; const GridCoord grid_coord = multires_reshape_ptex_coord_to_grid(reshape_context, ptex_coord); @@ -713,72 +718,70 @@ static void foreach_vertex(const SubdivForeachContext *foreach_context, } } -static void foreach_vertex_inner(const struct SubdivForeachContext *foreach_context, - void *UNUSED(tls), +static void foreach_vertex_inner(const SubdivForeachContext *foreach_context, + void * /*tls*/, const int ptex_face_index, const float ptex_face_u, const float ptex_face_v, - const int UNUSED(coarse_poly_index), - const int UNUSED(coarse_corner), + const int /*coarse_poly_index*/, + const int /*coarse_corner*/, const int subdiv_vertex_index) { - const PTexCoord ptex_coord = { - .ptex_face_index = ptex_face_index, - .u = ptex_face_u, - .v = ptex_face_v, - }; + PTexCoord ptex_coord{}; + ptex_coord.ptex_face_index = ptex_face_index; + ptex_coord.u = ptex_face_u; + ptex_coord.v = ptex_face_v; foreach_vertex(foreach_context, &ptex_coord, -1, subdiv_vertex_index); } -static void foreach_vertex_every_corner(const struct SubdivForeachContext *foreach_context, - void *UNUSED(tls_v), +static void foreach_vertex_every_corner(const SubdivForeachContext *foreach_context, + void * /*tls_v*/, const int ptex_face_index, const float ptex_face_u, const float ptex_face_v, const int coarse_vertex_index, - const int UNUSED(coarse_face_index), - const int UNUSED(coarse_face_corner), + const int /*coarse_face_index*/, + const int /*coarse_face_corner*/, const int subdiv_vertex_index) { - const PTexCoord ptex_coord = { - .ptex_face_index = ptex_face_index, - .u = ptex_face_u, - .v = ptex_face_v, - }; + PTexCoord ptex_coord{}; + ptex_coord.ptex_face_index = ptex_face_index; + ptex_coord.u = ptex_face_u; + ptex_coord.v = ptex_face_v; foreach_vertex(foreach_context, &ptex_coord, coarse_vertex_index, subdiv_vertex_index); } -static void foreach_vertex_every_edge(const struct SubdivForeachContext *foreach_context, - void *UNUSED(tls_v), +static void foreach_vertex_every_edge(const SubdivForeachContext *foreach_context, + void * /*tls_v*/, const int ptex_face_index, const float ptex_face_u, const float ptex_face_v, - const int UNUSED(coarse_edge_index), - const int UNUSED(coarse_face_index), - const int UNUSED(coarse_face_corner), + const int /*coarse_edge_index*/, + const int /*coarse_face_index*/, + const int /*coarse_face_corner*/, const int subdiv_vertex_index) { - const PTexCoord ptex_coord = { - .ptex_face_index = ptex_face_index, - .u = ptex_face_u, - .v = ptex_face_v, - }; + PTexCoord ptex_coord{}; + ptex_coord.ptex_face_index = ptex_face_index; + ptex_coord.u = ptex_face_u; + ptex_coord.v = ptex_face_v; foreach_vertex(foreach_context, &ptex_coord, -1, subdiv_vertex_index); } -static void foreach_loop(const struct SubdivForeachContext *foreach_context, - void *UNUSED(tls), - const int UNUSED(ptex_face_index), - const float UNUSED(ptex_face_u), - const float UNUSED(ptex_face_v), - const int UNUSED(coarse_loop_index), +static void foreach_loop(const SubdivForeachContext *foreach_context, + void * /*tls*/, + const int /*ptex_face_index*/, + const float /*ptex_face_u*/, + const float /*ptex_face_v*/, + const int /*coarse_loop_index*/, const int coarse_poly_index, const int coarse_corner, const int subdiv_loop_index, const int subdiv_vertex_index, - const int UNUSED(subdiv_edge_index)) + const int /*subdiv_edge_index*/) { - MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(foreach_context->user_data); const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context; BLI_assert(subdiv_loop_index < reshape_smooth_context->geometry.num_corners); @@ -791,13 +794,14 @@ static void foreach_loop(const struct SubdivForeachContext *foreach_context, } static void foreach_poly(const SubdivForeachContext *foreach_context, - void *UNUSED(tls), - const int UNUSED(coarse_poly_index), + void * /*tls*/, + const int /*coarse_poly_index*/, const int subdiv_poly_index, const int start_loop_index, const int num_loops) { - MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(foreach_context->user_data); BLI_assert(subdiv_poly_index < reshape_smooth_context->geometry.num_faces); @@ -806,13 +810,14 @@ static void foreach_poly(const SubdivForeachContext *foreach_context, face->num_corners = num_loops; } -static void foreach_vertex_of_loose_edge(const struct SubdivForeachContext *foreach_context, - void *UNUSED(tls), - const int UNUSED(coarse_edge_index), - const float UNUSED(u), +static void foreach_vertex_of_loose_edge(const SubdivForeachContext *foreach_context, + void * /*tls*/, + const int /*coarse_edge_index*/, + const float /*u*/, const int vertex_index) { - const MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(foreach_context->user_data); Vertex *vertex = &reshape_smooth_context->geometry.vertices[vertex_index]; if (vertex->num_grid_coords != 0) { @@ -836,19 +841,20 @@ static void store_edge(MultiresReshapeSmoothContext *reshape_smooth_context, edge->sharpness = BKE_subdiv_crease_to_sharpness_char(crease); } -static void foreach_edge(const struct SubdivForeachContext *foreach_context, - void *UNUSED(tls), +static void foreach_edge(const SubdivForeachContext *foreach_context, + void * /*tls*/, const int coarse_edge_index, - const int UNUSED(subdiv_edge_index), + const int /*subdiv_edge_index*/, const bool is_loose, const int subdiv_v1, const int subdiv_v2) { - MultiresReshapeSmoothContext *reshape_smooth_context = foreach_context->user_data; + MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(foreach_context->user_data); if (reshape_smooth_context->smoothing_type == MULTIRES_SUBDIVIDE_LINEAR) { if (!is_loose) { - store_edge(reshape_smooth_context, subdiv_v1, subdiv_v2, (char)255); + store_edge(reshape_smooth_context, subdiv_v1, subdiv_v2, char(255)); } return; } @@ -907,17 +913,16 @@ static void geometry_create(MultiresReshapeSmoothContext *reshape_smooth_context { const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context; - SubdivForeachContext foreach_context = { - .topology_info = foreach_topology_info, - .vertex_inner = foreach_vertex_inner, - .vertex_every_corner = foreach_vertex_every_corner, - .vertex_every_edge = foreach_vertex_every_edge, - .loop = foreach_loop, - .poly = foreach_poly, - .vertex_of_loose_edge = foreach_vertex_of_loose_edge, - .edge = foreach_edge, - .user_data = reshape_smooth_context, - }; + SubdivForeachContext foreach_context{}; + foreach_context.topology_info = foreach_topology_info; + foreach_context.vertex_inner = foreach_vertex_inner; + foreach_context.vertex_every_corner = foreach_vertex_every_corner; + foreach_context.vertex_every_edge = foreach_vertex_every_edge; + foreach_context.loop = foreach_loop; + foreach_context.poly = foreach_poly; + foreach_context.vertex_of_loose_edge = foreach_vertex_of_loose_edge; + foreach_context.edge = foreach_edge; + foreach_context.user_data = reshape_smooth_context; geometry_init_loose_information(reshape_smooth_context); @@ -936,53 +941,60 @@ static void geometry_create(MultiresReshapeSmoothContext *reshape_smooth_context /** \name Generation of OpenSubdiv evaluator for topology created form reshape level * \{ */ -static OpenSubdiv_SchemeType get_scheme_type(const OpenSubdiv_Converter *UNUSED(converter)) +static OpenSubdiv_SchemeType get_scheme_type(const OpenSubdiv_Converter * /*converter*/) { return OSD_SCHEME_CATMARK; } static OpenSubdiv_VtxBoundaryInterpolation get_vtx_boundary_interpolation( - const struct OpenSubdiv_Converter *converter) + const OpenSubdiv_Converter *converter) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context; const SubdivSettings *settings = &reshape_context->subdiv->settings; - return BKE_subdiv_converter_vtx_boundary_interpolation_from_settings(settings); + return OpenSubdiv_VtxBoundaryInterpolation( + BKE_subdiv_converter_vtx_boundary_interpolation_from_settings(settings)); } static OpenSubdiv_FVarLinearInterpolation get_fvar_linear_interpolation( const OpenSubdiv_Converter *converter) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context; const SubdivSettings *settings = &reshape_context->subdiv->settings; - return BKE_subdiv_converter_fvar_linear_from_settings(settings); + return OpenSubdiv_FVarLinearInterpolation( + BKE_subdiv_converter_fvar_linear_from_settings(settings)); } -static bool specifies_full_topology(const OpenSubdiv_Converter *UNUSED(converter)) +static bool specifies_full_topology(const OpenSubdiv_Converter * /*converter*/) { return false; } static int get_num_faces(const OpenSubdiv_Converter *converter) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); return reshape_smooth_context->geometry.num_faces; } static int get_num_vertices(const OpenSubdiv_Converter *converter) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); return reshape_smooth_context->geometry.num_vertices; } static int get_num_face_vertices(const OpenSubdiv_Converter *converter, int face_index) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); BLI_assert(face_index < reshape_smooth_context->geometry.num_faces); const Face *face = &reshape_smooth_context->geometry.faces[face_index]; @@ -993,7 +1005,8 @@ static void get_face_vertices(const OpenSubdiv_Converter *converter, int face_index, int *face_vertices) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); BLI_assert(face_index < reshape_smooth_context->geometry.num_faces); const Face *face = &reshape_smooth_context->geometry.faces[face_index]; @@ -1005,9 +1018,10 @@ static void get_face_vertices(const OpenSubdiv_Converter *converter, } } -static int get_num_edges(const struct OpenSubdiv_Converter *converter) +static int get_num_edges(const OpenSubdiv_Converter *converter) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); return reshape_smooth_context->geometry.num_edges; } @@ -1015,7 +1029,8 @@ static void get_edge_vertices(const OpenSubdiv_Converter *converter, const int edge_index, int edge_vertices[2]) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); BLI_assert(edge_index < reshape_smooth_context->geometry.num_edges); const Edge *edge = &reshape_smooth_context->geometry.edges[edge_index]; @@ -1025,7 +1040,8 @@ static void get_edge_vertices(const OpenSubdiv_Converter *converter, static float get_edge_sharpness(const OpenSubdiv_Converter *converter, const int edge_index) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); BLI_assert(edge_index < reshape_smooth_context->geometry.num_edges); const Edge *edge = &reshape_smooth_context->geometry.edges[edge_index]; @@ -1034,7 +1050,8 @@ static float get_edge_sharpness(const OpenSubdiv_Converter *converter, const int static float get_vertex_sharpness(const OpenSubdiv_Converter *converter, const int vertex_index) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); BLI_assert(vertex_index < reshape_smooth_context->geometry.num_vertices); const Vertex *vertex = &reshape_smooth_context->geometry.vertices[vertex_index]; @@ -1043,7 +1060,8 @@ static float get_vertex_sharpness(const OpenSubdiv_Converter *converter, const i static bool is_infinite_sharp_vertex(const OpenSubdiv_Converter *converter, int vertex_index) { - const MultiresReshapeSmoothContext *reshape_smooth_context = converter->user_data; + const MultiresReshapeSmoothContext *reshape_smooth_context = + static_cast(converter->user_data); BLI_assert(vertex_index < reshape_smooth_context->geometry.num_vertices); @@ -1065,27 +1083,27 @@ static void converter_init(const MultiresReshapeSmoothContext *reshape_smooth_co converter->getNumFaceVertices = get_num_face_vertices; converter->getFaceVertices = get_face_vertices; - converter->getFaceEdges = NULL; + converter->getFaceEdges = nullptr; converter->getEdgeVertices = get_edge_vertices; - converter->getNumEdgeFaces = NULL; - converter->getEdgeFaces = NULL; + converter->getNumEdgeFaces = nullptr; + converter->getEdgeFaces = nullptr; converter->getEdgeSharpness = get_edge_sharpness; - converter->getNumVertexEdges = NULL; - converter->getVertexEdges = NULL; - converter->getNumVertexFaces = NULL; - converter->getVertexFaces = NULL; + converter->getNumVertexEdges = nullptr; + converter->getVertexEdges = nullptr; + converter->getNumVertexFaces = nullptr; + converter->getVertexFaces = nullptr; converter->isInfiniteSharpVertex = is_infinite_sharp_vertex; converter->getVertexSharpness = get_vertex_sharpness; - converter->getNumUVLayers = NULL; - converter->precalcUVLayer = NULL; - converter->finishUVLayer = NULL; - converter->getNumUVCoordinates = NULL; - converter->getFaceCornerUVIndex = NULL; + converter->getNumUVLayers = nullptr; + converter->precalcUVLayer = nullptr; + converter->finishUVLayer = nullptr; + converter->getNumUVCoordinates = nullptr; + converter->getFaceCornerUVIndex = nullptr; - converter->freeUserData = NULL; + converter->freeUserData = nullptr; converter->user_data = (void *)reshape_smooth_context; } @@ -1102,7 +1120,7 @@ static void reshape_subdiv_create(MultiresReshapeSmoothContext *reshape_smooth_c Subdiv *reshape_subdiv = BKE_subdiv_new_from_converter(settings, &converter); OpenSubdiv_EvaluatorSettings evaluator_settings = {0}; - BKE_subdiv_eval_begin(reshape_subdiv, SUBDIV_EVALUATOR_TYPE_CPU, NULL, &evaluator_settings); + BKE_subdiv_eval_begin(reshape_subdiv, SUBDIV_EVALUATOR_TYPE_CPU, nullptr, &evaluator_settings); reshape_smooth_context->reshape_subdiv = reshape_subdiv; @@ -1139,7 +1157,7 @@ BLI_INLINE const GridCoord *reshape_subdiv_refine_vertex_grid_coord(const Vertex /* This is a loose vertex, the coordinate is not important. */ /* TODO(sergey): Once the subdiv_foreach() supports properly ignoring loose elements this * should become an assert instead. */ - return NULL; + return nullptr; } /* NOTE: All grid coordinates will point to the same object position, so can be simple and use * first grid coordinate. */ @@ -1154,7 +1172,7 @@ static void reshape_subdiv_refine_orig_P( const GridCoord *grid_coord = reshape_subdiv_refine_vertex_grid_coord(vertex); /* Check whether this is a loose vertex. */ - if (grid_coord == NULL) { + if (grid_coord == nullptr) { zero_v3(r_P); return; } @@ -1184,7 +1202,7 @@ static void reshape_subdiv_refine_final_P( const GridCoord *grid_coord = reshape_subdiv_refine_vertex_grid_coord(vertex); /* Check whether this is a loose vertex. */ - if (grid_coord == NULL) { + if (grid_coord == nullptr) { zero_v3(r_P); return; } @@ -1257,7 +1275,7 @@ static LinearGridElement linear_grid_element_final_get( LinearGridElement linear_grid_element; linear_grid_element_init(&linear_grid_element); - if (final_grid_element.mask != NULL) { + if (final_grid_element.mask != nullptr) { linear_grid_element.mask = *final_grid_element.mask; } @@ -1279,7 +1297,7 @@ static void linear_grid_element_delta_interpolate( const int reshape_level = reshape_context->reshape.level; const int reshape_level_grid_size = BKE_subdiv_grid_size_from_level(reshape_level); const int reshape_level_grid_size_1 = reshape_level_grid_size - 1; - const float reshape_level_grid_size_1_inv = 1.0f / (float)(reshape_level_grid_size_1); + const float reshape_level_grid_size_1_inv = 1.0f / float(reshape_level_grid_size_1); const float x_f = grid_coord->u * reshape_level_grid_size_1; const float y_f = grid_coord->v * reshape_level_grid_size_1; @@ -1314,9 +1332,9 @@ static void linear_grid_element_delta_interpolate( static void evaluate_linear_delta_grids_callback( const MultiresReshapeSmoothContext *reshape_smooth_context, - const PTexCoord *UNUSED(ptex_coord), + const PTexCoord * /*ptex_coord*/, const GridCoord *grid_coord, - void *UNUSED(userdata_v)) + void * /*userdata_v*/) { LinearGridElement *linear_delta_element = linear_grid_element_get( &reshape_smooth_context->linear_delta_grids, grid_coord); @@ -1332,7 +1350,8 @@ static void evaluate_linear_delta_grids(MultiresReshapeSmoothContext *reshape_sm linear_grids_allocate(&reshape_smooth_context->linear_delta_grids, num_grids, top_level); - foreach_toplevel_grid_coord(reshape_smooth_context, evaluate_linear_delta_grids_callback, NULL); + foreach_toplevel_grid_coord( + reshape_smooth_context, evaluate_linear_delta_grids_callback, nullptr); } static void propagate_linear_data_delta(const MultiresReshapeSmoothContext *reshape_smooth_context, @@ -1347,7 +1366,7 @@ static void propagate_linear_data_delta(const MultiresReshapeSmoothContext *resh const ReshapeConstGridElement orig_grid_element = multires_reshape_orig_grid_element_for_grid_coord(reshape_context, grid_coord); - if (final_grid_element->mask != NULL) { + if (final_grid_element->mask != nullptr) { *final_grid_element->mask = clamp_f( orig_grid_element.mask + linear_delta_element->mask, 0.0f, 1.0f); } @@ -1363,7 +1382,7 @@ static void evaluate_base_surface_grids_callback( const MultiresReshapeSmoothContext *reshape_smooth_context, const PTexCoord *ptex_coord, const GridCoord *grid_coord, - void *UNUSED(userdata_v)) + void * /*userdata_v*/) { float limit_P[3]; float tangent_matrix[3][3]; @@ -1375,7 +1394,8 @@ static void evaluate_base_surface_grids_callback( static void evaluate_base_surface_grids(const MultiresReshapeSmoothContext *reshape_smooth_context) { - foreach_toplevel_grid_coord(reshape_smooth_context, evaluate_base_surface_grids_callback, NULL); + foreach_toplevel_grid_coord( + reshape_smooth_context, evaluate_base_surface_grids_callback, nullptr); } /** \} */ @@ -1415,7 +1435,7 @@ static void evaluate_higher_grid_positions_with_details_callback( const MultiresReshapeSmoothContext *reshape_smooth_context, const PTexCoord *ptex_coord, const GridCoord *grid_coord, - void *UNUSED(userdata_v)) + void * /*userdata_v*/) { const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context; @@ -1465,14 +1485,14 @@ static void evaluate_higher_grid_positions_with_details( const MultiresReshapeSmoothContext *reshape_smooth_context) { foreach_toplevel_grid_coord( - reshape_smooth_context, evaluate_higher_grid_positions_with_details_callback, NULL); + reshape_smooth_context, evaluate_higher_grid_positions_with_details_callback, nullptr); } static void evaluate_higher_grid_positions_callback( const MultiresReshapeSmoothContext *reshape_smooth_context, const PTexCoord *ptex_coord, const GridCoord *grid_coord, - void *UNUSED(userdata_v)) + void * /*userdata_v*/) { const MultiresReshapeContext *reshape_context = reshape_smooth_context->reshape_context; Subdiv *reshape_subdiv = reshape_smooth_context->reshape_subdiv; @@ -1495,7 +1515,7 @@ static void evaluate_higher_grid_positions( const MultiresReshapeSmoothContext *reshape_smooth_context) { foreach_toplevel_grid_coord( - reshape_smooth_context, evaluate_higher_grid_positions_callback, NULL); + reshape_smooth_context, evaluate_higher_grid_positions_callback, nullptr); } /** \} */ diff --git a/source/blender/blenkernel/intern/multires_reshape_subdivide.c b/source/blender/blenkernel/intern/multires_reshape_subdivide.cc similarity index 90% rename from source/blender/blenkernel/intern/multires_reshape_subdivide.c rename to source/blender/blenkernel/intern/multires_reshape_subdivide.cc index 4e74835fd9a..724e07a7d44 100644 --- a/source/blender/blenkernel/intern/multires_reshape_subdivide.c +++ b/source/blender/blenkernel/intern/multires_reshape_subdivide.cc @@ -24,7 +24,7 @@ #include "DEG_depsgraph_query.h" -#include "multires_reshape.h" +#include "multires_reshape.hh" static void multires_subdivide_create_object_space_linear_grids(Mesh *mesh) { @@ -32,7 +32,8 @@ static void multires_subdivide_create_object_space_linear_grids(Mesh *mesh) const MPoly *polys = BKE_mesh_polys(mesh); const MLoop *loops = BKE_mesh_loops(mesh); - MDisps *mdisps = CustomData_get_layer_for_write(&mesh->ldata, CD_MDISPS, mesh->totloop); + MDisps *mdisps = static_cast( + CustomData_get_layer_for_write(&mesh->ldata, CD_MDISPS, mesh->totloop)); const int totpoly = mesh->totpoly; for (int p = 0; p < totpoly; p++) { const MPoly *poly = &polys[p]; @@ -63,7 +64,7 @@ static void multires_subdivide_create_object_space_linear_grids(Mesh *mesh) void multires_subdivide_create_tangent_displacement_linear_grids(Object *object, MultiresModifierData *mmd) { - Mesh *coarse_mesh = object->data; + Mesh *coarse_mesh = static_cast(object->data); multires_force_sculpt_rebuild(object); MultiresReshapeContext reshape_context; @@ -73,7 +74,7 @@ void multires_subdivide_create_tangent_displacement_linear_grids(Object *object, const bool has_mdisps = CustomData_has_layer(&coarse_mesh->ldata, CD_MDISPS); if (!has_mdisps) { CustomData_add_layer( - &coarse_mesh->ldata, CD_MDISPS, CD_SET_DEFAULT, NULL, coarse_mesh->totloop); + &coarse_mesh->ldata, CD_MDISPS, CD_SET_DEFAULT, nullptr, coarse_mesh->totloop); } if (new_top_level == 1) { diff --git a/source/blender/blenkernel/intern/multires_reshape_util.c b/source/blender/blenkernel/intern/multires_reshape_util.cc similarity index 84% rename from source/blender/blenkernel/intern/multires_reshape_util.c rename to source/blender/blenkernel/intern/multires_reshape_util.cc index 426f16ccc2e..6f15a8583ce 100644 --- a/source/blender/blenkernel/intern/multires_reshape_util.c +++ b/source/blender/blenkernel/intern/multires_reshape_util.cc @@ -5,7 +5,7 @@ * \ingroup bke */ -#include "multires_reshape.h" +#include "multires_reshape.hh" #include "MEM_guardedalloc.h" @@ -39,7 +39,7 @@ Subdiv *multires_reshape_create_subdiv(Depsgraph *depsgraph, { Mesh *base_mesh; - if (depsgraph != NULL) { + if (depsgraph != nullptr) { Scene *scene_eval = DEG_get_evaluated_scene(depsgraph); Object *object_eval = DEG_get_evaluated_object(depsgraph, object); base_mesh = mesh_get_eval_deform(depsgraph, scene_eval, object_eval, &CD_MASK_BAREMESH); @@ -51,9 +51,10 @@ Subdiv *multires_reshape_create_subdiv(Depsgraph *depsgraph, SubdivSettings subdiv_settings; BKE_multires_subdiv_settings_init(&subdiv_settings, mmd); Subdiv *subdiv = BKE_subdiv_new_from_mesh(&subdiv_settings, base_mesh); - if (!BKE_subdiv_eval_begin_from_mesh(subdiv, base_mesh, NULL, SUBDIV_EVALUATOR_TYPE_CPU, NULL)) { + if (!BKE_subdiv_eval_begin_from_mesh( + subdiv, base_mesh, nullptr, SUBDIV_EVALUATOR_TYPE_CPU, nullptr)) { BKE_subdiv_free(subdiv); - return NULL; + return nullptr; } return subdiv; } @@ -69,8 +70,8 @@ static void context_init_lookup(MultiresReshapeContext *reshape_context) const MPoly *mpoly = reshape_context->base_polys; const int num_faces = base_mesh->totpoly; - reshape_context->face_start_grid_index = MEM_malloc_arrayN( - num_faces, sizeof(int), "face_start_grid_index"); + reshape_context->face_start_grid_index = static_cast( + MEM_malloc_arrayN(num_faces, sizeof(int), "face_start_grid_index")); int num_grids = 0; int num_ptex_faces = 0; for (int face_index = 0; face_index < num_faces; ++face_index) { @@ -80,10 +81,10 @@ static void context_init_lookup(MultiresReshapeContext *reshape_context) num_ptex_faces += (num_corners == 4) ? 1 : num_corners; } - reshape_context->grid_to_face_index = MEM_malloc_arrayN( - num_grids, sizeof(int), "grid_to_face_index"); - reshape_context->ptex_start_grid_index = MEM_malloc_arrayN( - num_ptex_faces, sizeof(int), "ptex_start_grid_index"); + reshape_context->grid_to_face_index = static_cast( + MEM_malloc_arrayN(num_grids, sizeof(int), "grid_to_face_index")); + reshape_context->ptex_start_grid_index = static_cast( + MEM_malloc_arrayN(num_ptex_faces, sizeof(int), "ptex_start_grid_index")); for (int face_index = 0, grid_index = 0, ptex_index = 0; face_index < num_faces; ++face_index) { const int num_corners = mpoly[face_index].totloop; const int num_face_ptex_faces = (num_corners == 4) ? 1 : num_corners; @@ -103,16 +104,16 @@ static void context_init_lookup(MultiresReshapeContext *reshape_context) static void context_init_grid_pointers(MultiresReshapeContext *reshape_context) { Mesh *base_mesh = reshape_context->base_mesh; - reshape_context->mdisps = CustomData_get_layer_for_write( - &base_mesh->ldata, CD_MDISPS, base_mesh->totloop); - reshape_context->grid_paint_masks = CustomData_get_layer_for_write( - &base_mesh->ldata, CD_GRID_PAINT_MASK, base_mesh->totloop); + reshape_context->mdisps = static_cast( + CustomData_get_layer_for_write(&base_mesh->ldata, CD_MDISPS, base_mesh->totloop)); + reshape_context->grid_paint_masks = static_cast( + CustomData_get_layer_for_write(&base_mesh->ldata, CD_GRID_PAINT_MASK, base_mesh->totloop)); } static void context_init_commoon(MultiresReshapeContext *reshape_context) { - BLI_assert(reshape_context->subdiv != NULL); - BLI_assert(reshape_context->base_mesh != NULL); + BLI_assert(reshape_context->subdiv != nullptr); + BLI_assert(reshape_context->base_mesh != nullptr); reshape_context->face_ptex_offset = BKE_subdiv_face_ptex_offset_get(reshape_context->subdiv); @@ -122,7 +123,7 @@ static void context_init_commoon(MultiresReshapeContext *reshape_context) static bool context_is_valid(MultiresReshapeContext *reshape_context) { - if (reshape_context->mdisps == NULL) { + if (reshape_context->mdisps == nullptr) { /* Multi-resolution displacement has been removed before current changes were applies. */ return false; } @@ -159,7 +160,7 @@ bool multires_reshape_context_create_from_base_mesh(MultiresReshapeContext *resh reshape_context->base_polys = BKE_mesh_polys(base_mesh); reshape_context->base_loops = BKE_mesh_loops(base_mesh); - reshape_context->subdiv = multires_reshape_create_subdiv(NULL, object, mmd); + reshape_context->subdiv = multires_reshape_create_subdiv(nullptr, object, mmd); reshape_context->need_free_subdiv = true; reshape_context->reshape.level = multires_get_level( @@ -207,8 +208,10 @@ bool multires_reshape_context_create_from_object(MultiresReshapeContext *reshape reshape_context->top.level = mmd->totlvl; reshape_context->top.grid_size = BKE_subdiv_grid_size_from_level(reshape_context->top.level); - reshape_context->cd_vertex_crease = CustomData_get_layer(&base_mesh->vdata, CD_CREASE); - reshape_context->cd_edge_crease = CustomData_get_layer(&base_mesh->edata, CD_CREASE); + reshape_context->cd_vertex_crease = static_cast( + CustomData_get_layer(&base_mesh->vdata, CD_CREASE)); + reshape_context->cd_edge_crease = static_cast( + CustomData_get_layer(&base_mesh->edata, CD_CREASE)); context_init_commoon(reshape_context); @@ -244,11 +247,11 @@ bool multires_reshape_context_create_from_ccg(MultiresReshapeContext *reshape_co } bool multires_reshape_context_create_from_modifier(MultiresReshapeContext *reshape_context, - struct Object *object, - struct MultiresModifierData *mmd, + Object *object, + MultiresModifierData *mmd, int top_level) { - Subdiv *subdiv = multires_reshape_create_subdiv(NULL, object, mmd); + Subdiv *subdiv = multires_reshape_create_subdiv(nullptr, object, mmd); const bool result = multires_reshape_context_create_from_subdiv( reshape_context, object, mmd, subdiv, top_level); @@ -259,9 +262,9 @@ bool multires_reshape_context_create_from_modifier(MultiresReshapeContext *resha } bool multires_reshape_context_create_from_subdiv(MultiresReshapeContext *reshape_context, - struct Object *object, - struct MultiresModifierData *mmd, - struct Subdiv *subdiv, + Object *object, + MultiresModifierData *mmd, + Subdiv *subdiv, int top_level) { context_zero(reshape_context); @@ -297,17 +300,17 @@ void multires_reshape_free_original_grids(MultiresReshapeContext *reshape_contex MDisps *orig_mdisps = reshape_context->orig.mdisps; GridPaintMask *orig_grid_paint_masks = reshape_context->orig.grid_paint_masks; - if (orig_mdisps == NULL && orig_grid_paint_masks == NULL) { + if (orig_mdisps == nullptr && orig_grid_paint_masks == nullptr) { return; } const int num_grids = reshape_context->num_grids; for (int grid_index = 0; grid_index < num_grids; grid_index++) { - if (orig_mdisps != NULL) { + if (orig_mdisps != nullptr) { MDisps *orig_grid = &orig_mdisps[grid_index]; MEM_SAFE_FREE(orig_grid->disps); } - if (orig_grid_paint_masks != NULL) { + if (orig_grid_paint_masks != nullptr) { GridPaintMask *orig_paint_mask_grid = &orig_grid_paint_masks[grid_index]; MEM_SAFE_FREE(orig_paint_mask_grid->data); } @@ -316,8 +319,8 @@ void multires_reshape_free_original_grids(MultiresReshapeContext *reshape_contex MEM_SAFE_FREE(orig_mdisps); MEM_SAFE_FREE(orig_grid_paint_masks); - reshape_context->orig.mdisps = NULL; - reshape_context->orig.grid_paint_masks = NULL; + reshape_context->orig.mdisps = nullptr; + reshape_context->orig.grid_paint_masks = nullptr; } void multires_reshape_context_free(MultiresReshapeContext *reshape_context) @@ -449,19 +452,19 @@ void multires_reshape_tangent_matrix_for_corner(const MultiresReshapeContext *re ReshapeGridElement multires_reshape_grid_element_for_grid_coord( const MultiresReshapeContext *reshape_context, const GridCoord *grid_coord) { - ReshapeGridElement grid_element = {NULL, NULL}; + ReshapeGridElement grid_element = {nullptr, nullptr}; const int grid_size = reshape_context->top.grid_size; const int grid_x = lround(grid_coord->u * (grid_size - 1)); const int grid_y = lround(grid_coord->v * (grid_size - 1)); const int grid_element_index = grid_y * grid_size + grid_x; - if (reshape_context->mdisps != NULL) { + if (reshape_context->mdisps != nullptr) { MDisps *displacement_grid = &reshape_context->mdisps[grid_coord->grid_index]; grid_element.displacement = displacement_grid->disps[grid_element_index]; } - if (reshape_context->grid_paint_masks != NULL) { + if (reshape_context->grid_paint_masks != nullptr) { GridPaintMask *grid_paint_mask = &reshape_context->grid_paint_masks[grid_coord->grid_index]; grid_element.mask = &grid_paint_mask->data[grid_element_index]; } @@ -482,9 +485,9 @@ ReshapeConstGridElement multires_reshape_orig_grid_element_for_grid_coord( ReshapeConstGridElement grid_element = {{0.0f, 0.0f, 0.0f}, 0.0f}; const MDisps *mdisps = reshape_context->orig.mdisps; - if (mdisps != NULL) { + if (mdisps != nullptr) { const MDisps *displacement_grid = &mdisps[grid_coord->grid_index]; - if (displacement_grid->disps != NULL) { + if (displacement_grid->disps != nullptr) { const int grid_size = BKE_subdiv_grid_size_from_level(displacement_grid->level); const int grid_x = lround(grid_coord->u * (grid_size - 1)); const int grid_y = lround(grid_coord->v * (grid_size - 1)); @@ -494,9 +497,9 @@ ReshapeConstGridElement multires_reshape_orig_grid_element_for_grid_coord( } const GridPaintMask *grid_paint_masks = reshape_context->orig.grid_paint_masks; - if (grid_paint_masks != NULL) { + if (grid_paint_masks != nullptr) { const GridPaintMask *paint_mask_grid = &grid_paint_masks[grid_coord->grid_index]; - if (paint_mask_grid->data != NULL) { + if (paint_mask_grid->data != nullptr) { const int grid_size = BKE_subdiv_grid_size_from_level(paint_mask_grid->level); const int grid_x = lround(grid_coord->u * (grid_size - 1)); const int grid_y = lround(grid_coord->v * (grid_size - 1)); @@ -542,8 +545,9 @@ static void allocate_displacement_grid(MDisps *displacement_grid, const int leve { const int grid_size = BKE_subdiv_grid_size_from_level(level); const int grid_area = grid_size * grid_size; - float(*disps)[3] = MEM_calloc_arrayN(grid_area, sizeof(float[3]), "multires disps"); - if (displacement_grid->disps != NULL) { + float(*disps)[3] = static_cast( + MEM_calloc_arrayN(grid_area, sizeof(float[3]), "multires disps")); + if (displacement_grid->disps != nullptr) { MEM_freeN(displacement_grid->disps); } /* TODO(sergey): Preserve data on the old level. */ @@ -554,7 +558,7 @@ static void allocate_displacement_grid(MDisps *displacement_grid, const int leve static void ensure_displacement_grid(MDisps *displacement_grid, const int level) { - if (displacement_grid->disps != NULL && displacement_grid->level >= level) { + if (displacement_grid->disps != nullptr && displacement_grid->level >= level) { return; } allocate_displacement_grid(displacement_grid, level); @@ -563,7 +567,8 @@ static void ensure_displacement_grid(MDisps *displacement_grid, const int level) static void ensure_displacement_grids(Mesh *mesh, const int grid_level) { const int num_grids = mesh->totloop; - MDisps *mdisps = CustomData_get_layer_for_write(&mesh->ldata, CD_MDISPS, mesh->totloop); + MDisps *mdisps = static_cast( + CustomData_get_layer_for_write(&mesh->ldata, CD_MDISPS, mesh->totloop)); for (int grid_index = 0; grid_index < num_grids; grid_index++) { ensure_displacement_grid(&mdisps[grid_index], grid_level); } @@ -571,9 +576,9 @@ static void ensure_displacement_grids(Mesh *mesh, const int grid_level) static void ensure_mask_grids(Mesh *mesh, const int level) { - GridPaintMask *grid_paint_masks = CustomData_get_layer_for_write( - &mesh->ldata, CD_GRID_PAINT_MASK, mesh->totloop); - if (grid_paint_masks == NULL) { + GridPaintMask *grid_paint_masks = static_cast( + CustomData_get_layer_for_write(&mesh->ldata, CD_GRID_PAINT_MASK, mesh->totloop)); + if (grid_paint_masks == nullptr) { return; } const int num_grids = mesh->totloop; @@ -589,7 +594,8 @@ static void ensure_mask_grids(Mesh *mesh, const int level) MEM_freeN(grid_paint_mask->data); } /* TODO(sergey): Preserve data on the old level. */ - grid_paint_mask->data = MEM_calloc_arrayN(grid_area, sizeof(float), "gpm.data"); + grid_paint_mask->data = static_cast( + MEM_calloc_arrayN(grid_area, sizeof(float), "gpm.data")); } } @@ -610,10 +616,10 @@ void multires_reshape_store_original_grids(MultiresReshapeContext *reshape_conte const MDisps *mdisps = reshape_context->mdisps; const GridPaintMask *grid_paint_masks = reshape_context->grid_paint_masks; - MDisps *orig_mdisps = MEM_dupallocN(mdisps); - GridPaintMask *orig_grid_paint_masks = NULL; - if (grid_paint_masks != NULL) { - orig_grid_paint_masks = MEM_dupallocN(grid_paint_masks); + MDisps *orig_mdisps = static_cast(MEM_dupallocN(mdisps)); + GridPaintMask *orig_grid_paint_masks = nullptr; + if (grid_paint_masks != nullptr) { + orig_grid_paint_masks = static_cast(MEM_dupallocN(grid_paint_masks)); } const int num_grids = reshape_context->num_grids; @@ -623,13 +629,14 @@ void multires_reshape_store_original_grids(MultiresReshapeContext *reshape_conte * data when accessed during reshape process. * Reshape process will ensure all grids are on top level, but that happens on separate set of * grids which eventually replaces original one. */ - if (orig_grid->disps != NULL) { - orig_grid->disps = MEM_dupallocN(orig_grid->disps); + if (orig_grid->disps != nullptr) { + orig_grid->disps = static_cast(MEM_dupallocN(orig_grid->disps)); } - if (orig_grid_paint_masks != NULL) { + if (orig_grid_paint_masks != nullptr) { GridPaintMask *orig_paint_mask_grid = &orig_grid_paint_masks[grid_index]; - if (orig_paint_mask_grid->data != NULL) { - orig_paint_mask_grid->data = MEM_dupallocN(orig_paint_mask_grid->data); + if (orig_paint_mask_grid->data != nullptr) { + orig_paint_mask_grid->data = static_cast( + MEM_dupallocN(orig_paint_mask_grid->data)); } } } @@ -638,11 +645,11 @@ void multires_reshape_store_original_grids(MultiresReshapeContext *reshape_conte reshape_context->orig.grid_paint_masks = orig_grid_paint_masks; } -typedef void (*ForeachGridCoordinateCallback)(const MultiresReshapeContext *reshape_context, - const GridCoord *grid_coord, - void *userdata_v); +using ForeachGridCoordinateCallback = void (*)(const MultiresReshapeContext *reshape_context, + const GridCoord *grid_coord, + void *userdata_v); -typedef struct ForeachGridCoordinateTaskData { +struct ForeachGridCoordinateTaskData { const MultiresReshapeContext *reshape_context; int grid_size; @@ -650,27 +657,27 @@ typedef struct ForeachGridCoordinateTaskData { ForeachGridCoordinateCallback callback; void *callback_userdata_v; -} ForeachGridCoordinateTaskData; +}; static void foreach_grid_face_coordinate_task(void *__restrict userdata_v, const int face_index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - ForeachGridCoordinateTaskData *data = userdata_v; + ForeachGridCoordinateTaskData *data = static_cast(userdata_v); const MultiresReshapeContext *reshape_context = data->reshape_context; const MPoly *mpoly = reshape_context->base_polys; const int grid_size = data->grid_size; - const float grid_size_1_inv = 1.0f / (((float)grid_size) - 1.0f); + const float grid_size_1_inv = 1.0f / ((float(grid_size)) - 1.0f); const int num_corners = mpoly[face_index].totloop; int grid_index = reshape_context->face_start_grid_index[face_index]; for (int corner = 0; corner < num_corners; ++corner, ++grid_index) { for (int y = 0; y < grid_size; ++y) { - const float v = (float)y * grid_size_1_inv; + const float v = float(y) * grid_size_1_inv; for (int x = 0; x < grid_size; ++x) { - const float u = (float)x * grid_size_1_inv; + const float u = float(x) * grid_size_1_inv; GridCoord grid_coord; grid_coord.grid_index = grid_index; @@ -692,7 +699,7 @@ static void foreach_grid_coordinate(const MultiresReshapeContext *reshape_contex ForeachGridCoordinateTaskData data; data.reshape_context = reshape_context; data.grid_size = BKE_subdiv_grid_size_from_level(level); - data.grid_size_1_inv = 1.0f / (((float)data.grid_size) - 1.0f); + data.grid_size_1_inv = 1.0f / ((float(data.grid_size)) - 1.0f); data.callback = callback; data.callback_userdata_v = userdata_v; @@ -709,7 +716,7 @@ static void foreach_grid_coordinate(const MultiresReshapeContext *reshape_contex static void object_grid_element_to_tangent_displacement( const MultiresReshapeContext *reshape_context, const GridCoord *grid_coord, - void *UNUSED(userdata_v)) + void * /*userdata_v*/) { float P[3]; float tangent_matrix[3][3]; @@ -736,7 +743,7 @@ void multires_reshape_object_grids_to_tangent_displacement( foreach_grid_coordinate(reshape_context, reshape_context->top.level, object_grid_element_to_tangent_displacement, - NULL); + nullptr); } /** \} */ @@ -750,7 +757,7 @@ void multires_reshape_object_grids_to_tangent_displacement( static void assign_final_coords_from_mdisps(const MultiresReshapeContext *reshape_context, const GridCoord *grid_coord, - void *UNUSED(userdata_v)) + void * /*userdata_v*/) { float P[3]; float tangent_matrix[3][3]; @@ -768,12 +775,12 @@ void multires_reshape_assign_final_coords_from_mdisps( const MultiresReshapeContext *reshape_context) { foreach_grid_coordinate( - reshape_context, reshape_context->top.level, assign_final_coords_from_mdisps, NULL); + reshape_context, reshape_context->top.level, assign_final_coords_from_mdisps, nullptr); } static void assign_final_elements_from_orig_mdisps(const MultiresReshapeContext *reshape_context, const GridCoord *grid_coord, - void *UNUSED(userdata_v)) + void * /*userdata_v*/) { float P[3]; float tangent_matrix[3][3]; @@ -789,7 +796,7 @@ static void assign_final_elements_from_orig_mdisps(const MultiresReshapeContext grid_coord); add_v3_v3v3(grid_element.displacement, P, D); - if (grid_element.mask != NULL) { + if (grid_element.mask != nullptr) { *grid_element.mask = orig_grid_element.mask; } } @@ -797,8 +804,10 @@ static void assign_final_elements_from_orig_mdisps(const MultiresReshapeContext void multires_reshape_assign_final_elements_from_orig_mdisps( const MultiresReshapeContext *reshape_context) { - foreach_grid_coordinate( - reshape_context, reshape_context->top.level, assign_final_elements_from_orig_mdisps, NULL); + foreach_grid_coordinate(reshape_context, + reshape_context->top.level, + assign_final_elements_from_orig_mdisps, + nullptr); } /** \} */ diff --git a/source/blender/blenkernel/intern/multires_reshape_vertcos.c b/source/blender/blenkernel/intern/multires_reshape_vertcos.cc similarity index 70% rename from source/blender/blenkernel/intern/multires_reshape_vertcos.c rename to source/blender/blenkernel/intern/multires_reshape_vertcos.cc index 7cf853e0374..12c67ad104e 100644 --- a/source/blender/blenkernel/intern/multires_reshape_vertcos.c +++ b/source/blender/blenkernel/intern/multires_reshape_vertcos.cc @@ -5,7 +5,7 @@ * \ingroup bke */ -#include "multires_reshape.h" +#include "multires_reshape.hh" #include "DNA_mesh_types.h" #include "DNA_meshdata_types.h" @@ -15,12 +15,12 @@ #include "BKE_subdiv_foreach.h" #include "BKE_subdiv_mesh.h" -typedef struct MultiresReshapeAssignVertcosContext { +struct MultiresReshapeAssignVertcosContext { const MultiresReshapeContext *reshape_context; const float (*vert_coords)[3]; - const int num_vert_coords; -} MultiresReshapeAssignVertcosContext; + int num_vert_coords; +}; /** * Set single displacement grid value at a reshape level to a corresponding vertex coordinate. @@ -31,21 +31,23 @@ static void multires_reshape_vertcos_foreach_single_vertex( const GridCoord *grid_coord, const int subdiv_vertex_index) { - MultiresReshapeAssignVertcosContext *reshape_vertcos_context = foreach_context->user_data; + MultiresReshapeAssignVertcosContext *reshape_vertcos_context = + static_cast(foreach_context->user_data); const float *coordinate = reshape_vertcos_context->vert_coords[subdiv_vertex_index]; ReshapeGridElement grid_element = multires_reshape_grid_element_for_grid_coord( reshape_vertcos_context->reshape_context, grid_coord); - BLI_assert(grid_element.displacement != NULL); + BLI_assert(grid_element.displacement != nullptr); copy_v3_v3(grid_element.displacement, coordinate); } -/* TODO(sergey): De-duplicate with similar function in multires_reshape_smooth.c */ +/* TODO(sergey): De-duplicate with similar function in multires_reshape_smooth.cc */ static void multires_reshape_vertcos_foreach_vertex(const SubdivForeachContext *foreach_context, const PTexCoord *ptex_coord, const int subdiv_vertex_index) { - const MultiresReshapeAssignVertcosContext *reshape_vertcos_context = foreach_context->user_data; + const MultiresReshapeAssignVertcosContext *reshape_vertcos_context = + static_cast(foreach_context->user_data); const MultiresReshapeContext *reshape_context = reshape_vertcos_context->reshape_context; const GridCoord grid_coord = multires_reshape_ptex_coord_to_grid(reshape_context, ptex_coord); @@ -95,12 +97,13 @@ static void multires_reshape_vertcos_foreach_vertex(const SubdivForeachContext * static bool multires_reshape_vertcos_foreach_topology_info( const SubdivForeachContext *foreach_context, const int num_vertices, - const int UNUSED(num_edges), - const int UNUSED(num_loops), - const int UNUSED(num_polygons), - const int *UNUSED(subdiv_polygon_offset)) + const int /*num_edges*/, + const int /*num_loops*/, + const int /*num_polygons*/, + const int * /*subdiv_polygon_offset*/) { - MultiresReshapeAssignVertcosContext *reshape_vertcos_context = foreach_context->user_data; + MultiresReshapeAssignVertcosContext *reshape_vertcos_context = + static_cast(foreach_context->user_data); if (num_vertices != reshape_vertcos_context->num_vert_coords) { return false; } @@ -110,59 +113,56 @@ static bool multires_reshape_vertcos_foreach_topology_info( /* SubdivForeachContext::vertex_inner() */ static void multires_reshape_vertcos_foreach_vertex_inner( const SubdivForeachContext *foreach_context, - void *UNUSED(tls_v), + void * /*tls_v*/, const int ptex_face_index, const float ptex_face_u, const float ptex_face_v, - const int UNUSED(coarse_face_index), - const int UNUSED(coarse_face_corner), + const int /*coarse_face_index*/, + const int /*coarse_face_corner*/, const int subdiv_vertex_index) { - const PTexCoord ptex_coord = { - .ptex_face_index = ptex_face_index, - .u = ptex_face_u, - .v = ptex_face_v, - }; + PTexCoord ptex_coord{}; + ptex_coord.ptex_face_index = ptex_face_index; + ptex_coord.u = ptex_face_u; + ptex_coord.v = ptex_face_v; multires_reshape_vertcos_foreach_vertex(foreach_context, &ptex_coord, subdiv_vertex_index); } /* SubdivForeachContext::vertex_every_corner() */ static void multires_reshape_vertcos_foreach_vertex_every_corner( - const struct SubdivForeachContext *foreach_context, - void *UNUSED(tls_v), + const SubdivForeachContext *foreach_context, + void * /*tls_v*/, const int ptex_face_index, const float ptex_face_u, const float ptex_face_v, - const int UNUSED(coarse_vertex_index), - const int UNUSED(coarse_face_index), - const int UNUSED(coarse_face_corner), + const int /*coarse_vertex_index*/, + const int /*coarse_face_index*/, + const int /*coarse_face_corner*/, const int subdiv_vertex_index) { - const PTexCoord ptex_coord = { - .ptex_face_index = ptex_face_index, - .u = ptex_face_u, - .v = ptex_face_v, - }; + PTexCoord ptex_coord{}; + ptex_coord.ptex_face_index = ptex_face_index; + ptex_coord.u = ptex_face_u; + ptex_coord.v = ptex_face_v; multires_reshape_vertcos_foreach_vertex(foreach_context, &ptex_coord, subdiv_vertex_index); } /* SubdivForeachContext::vertex_every_edge() */ static void multires_reshape_vertcos_foreach_vertex_every_edge( - const struct SubdivForeachContext *foreach_context, - void *UNUSED(tls_v), + const SubdivForeachContext *foreach_context, + void * /*tls_v*/, const int ptex_face_index, const float ptex_face_u, const float ptex_face_v, - const int UNUSED(coarse_edge_index), - const int UNUSED(coarse_face_index), - const int UNUSED(coarse_face_corner), + const int /*coarse_edge_index*/, + const int /*coarse_face_index*/, + const int /*coarse_face_corner*/, const int subdiv_vertex_index) { - const PTexCoord ptex_coord = { - .ptex_face_index = ptex_face_index, - .u = ptex_face_u, - .v = ptex_face_v, - }; + PTexCoord ptex_coord{}; + ptex_coord.ptex_face_index = ptex_face_index; + ptex_coord.u = ptex_face_u; + ptex_coord.v = ptex_face_v; multires_reshape_vertcos_foreach_vertex(foreach_context, &ptex_coord, subdiv_vertex_index); } @@ -171,19 +171,17 @@ bool multires_reshape_assign_final_coords_from_vertcos( const float (*vert_coords)[3], const int num_vert_coords) { - MultiresReshapeAssignVertcosContext reshape_vertcos_context = { - .reshape_context = reshape_context, - .vert_coords = vert_coords, - .num_vert_coords = num_vert_coords, - }; + MultiresReshapeAssignVertcosContext reshape_vertcos_context{}; + reshape_vertcos_context.reshape_context = reshape_context; + reshape_vertcos_context.vert_coords = vert_coords; + reshape_vertcos_context.num_vert_coords = num_vert_coords; - SubdivForeachContext foreach_context = { - .topology_info = multires_reshape_vertcos_foreach_topology_info, - .vertex_inner = multires_reshape_vertcos_foreach_vertex_inner, - .vertex_every_edge = multires_reshape_vertcos_foreach_vertex_every_edge, - .vertex_every_corner = multires_reshape_vertcos_foreach_vertex_every_corner, - .user_data = &reshape_vertcos_context, - }; + SubdivForeachContext foreach_context{}; + foreach_context.topology_info = multires_reshape_vertcos_foreach_topology_info; + foreach_context.vertex_inner = multires_reshape_vertcos_foreach_vertex_inner; + foreach_context.vertex_every_edge = multires_reshape_vertcos_foreach_vertex_every_edge; + foreach_context.vertex_every_corner = multires_reshape_vertcos_foreach_vertex_every_corner; + foreach_context.user_data = &reshape_vertcos_context; SubdivToMeshSettings mesh_settings; mesh_settings.resolution = (1 << reshape_context->reshape.level) + 1; diff --git a/source/blender/blenkernel/intern/multires_subdiv.c b/source/blender/blenkernel/intern/multires_subdiv.cc similarity index 100% rename from source/blender/blenkernel/intern/multires_subdiv.c rename to source/blender/blenkernel/intern/multires_subdiv.cc diff --git a/source/blender/blenkernel/intern/multires_unsubdivide.c b/source/blender/blenkernel/intern/multires_unsubdivide.cc similarity index 92% rename from source/blender/blenkernel/intern/multires_unsubdivide.c rename to source/blender/blenkernel/intern/multires_unsubdivide.cc index df95c52ee94..e7ca4992c32 100644 --- a/source/blender/blenkernel/intern/multires_unsubdivide.c +++ b/source/blender/blenkernel/intern/multires_unsubdivide.cc @@ -31,7 +31,7 @@ #include "DEG_depsgraph_query.h" -#include "multires_reshape.h" +#include "multires_reshape.hh" #include "multires_unsubdivide.h" /* This is done in the following steps: @@ -90,7 +90,7 @@ static BMVert *unsubdivide_find_any_pole(BMesh *bm, int *elem_id, int elem) { BMIter iter; BMVert *v; - BMVert *pole = NULL; + BMVert *pole = nullptr; BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) { if (is_vertex_in_id(v, elem_id, elem) && is_vertex_pole_three(v)) { return v; @@ -161,7 +161,8 @@ static bool is_vertex_diagonal(BMVert *from_v, BMVert *to_v) */ static void unsubdivide_face_center_vertex_tag(BMesh *bm, BMVert *initial_vertex) { - bool *visited_verts = MEM_calloc_arrayN(bm->totvert, sizeof(bool), "visited vertices"); + bool *visited_verts = static_cast( + MEM_calloc_arrayN(bm->totvert, sizeof(bool), "visited vertices")); GSQueue *queue; queue = BLI_gsqueue_new(sizeof(BMVert *)); @@ -290,14 +291,14 @@ static bool unsubdivide_tag_disconnected_mesh_element(BMesh *bm, int *elem_id, i * part of the base mesh. If it isn't, then there is no solution. */ GSQueue *initial_vertex = BLI_gsqueue_new(sizeof(BMVert *)); BMVert *initial_vertex_pole = unsubdivide_find_any_pole(bm, elem_id, elem); - if (initial_vertex_pole != NULL) { + if (initial_vertex_pole != nullptr) { BLI_gsqueue_push(initial_vertex, &initial_vertex_pole); } /* Also try from the different 4 vertices of a quad in the current * disconnected element ID. If a solution exists the search should return a valid solution from * one of these vertices. */ - BMFace *f, *init_face = NULL; + BMFace *f, *init_face = nullptr; BMVert *v; BMIter iter_a, iter_b; BM_ITER_MESH (f, &iter_a, bm, BM_FACES_OF_MESH) { @@ -307,7 +308,7 @@ static bool unsubdivide_tag_disconnected_mesh_element(BMesh *bm, int *elem_id, i break; } } - if (init_face != NULL) { + if (init_face != nullptr) { break; } } @@ -352,7 +353,8 @@ static bool unsubdivide_tag_disconnected_mesh_element(BMesh *bm, int *elem_id, i */ static int unsubdivide_init_elem_ids(BMesh *bm, int *elem_id) { - bool *visited_verts = MEM_calloc_arrayN(bm->totvert, sizeof(bool), "visited vertices"); + bool *visited_verts = static_cast( + MEM_calloc_arrayN(bm->totvert, sizeof(bool), "visited vertices")); int current_id = 0; for (int i = 0; i < bm->totvert; i++) { if (!visited_verts[i]) { @@ -459,7 +461,7 @@ static bool multires_unsubdivide_single_level(BMesh *bm) BM_mesh_elem_table_ensure(bm, BM_VERT); /* Build disconnected elements IDs. Each disconnected mesh element is evaluated separately. */ - int *elem_id = MEM_calloc_arrayN(bm->totvert, sizeof(int), " ELEM ID"); + int *elem_id = static_cast(MEM_calloc_arrayN(bm->totvert, sizeof(int), " ELEM ID")); const int tot_ids = unsubdivide_init_elem_ids(bm, elem_id); bool valid_tag_found = true; @@ -495,7 +497,7 @@ static BMEdge *edge_step(BMVert *v, BMEdge *edge, BMVert **r_next_vertex) { BMIter iter; BMEdge *test_edge; - if (edge == NULL) { + if (edge == nullptr) { (*r_next_vertex) = v; return edge; } @@ -505,7 +507,7 @@ static BMEdge *edge_step(BMVert *v, BMEdge *edge, BMVert **r_next_vertex) return test_edge; } } - return NULL; + return nullptr; } static BMFace *face_step(BMEdge *edge, BMFace *f) @@ -539,7 +541,7 @@ static BMEdge *get_initial_edge_y(BMFace *f, BMEdge *edge_x, BMVert *initial_ver } } } - return NULL; + return nullptr; } /** @@ -662,7 +664,8 @@ static void store_grid_data(MultiresUnsubdivideContext *context, const int grid_size = BKE_ccg_gridsize(context->num_original_levels); const int face_grid_size = BKE_ccg_gridsize(context->num_original_levels + 1); const int face_grid_area = face_grid_size * face_grid_size; - float(*face_grid)[3] = MEM_calloc_arrayN(face_grid_area, sizeof(float[3]), "face_grid"); + float(*face_grid)[3] = static_cast( + MEM_calloc_arrayN(face_grid_area, sizeof(float[3]), "face_grid")); for (int i = 0; i < poly->totloop; i++) { const int loop_index = poly->loopstart + i; @@ -715,8 +718,8 @@ static void multires_unsubdivide_extract_single_grid_from_face_edge( const int grid_size = BKE_ccg_gridsize(context->num_new_levels); const int unsubdiv_grid_size = grid->grid_size = BKE_ccg_gridsize(context->num_total_levels); grid->grid_size = unsubdiv_grid_size; - grid->grid_co = MEM_calloc_arrayN( - unsubdiv_grid_size * unsubdiv_grid_size, sizeof(float[3]), "grids coordinates"); + grid->grid_co = static_cast(MEM_calloc_arrayN( + unsubdiv_grid_size * unsubdiv_grid_size, sizeof(float[3]), "grids coordinates")); /* Get the vertex on the corner of the grid. This vertex was tagged previously as it also exist * on the base mesh. */ @@ -861,18 +864,16 @@ static void multires_unsubdivide_get_grid_corners_on_base_mesh(BMFace *f1, static BMesh *get_bmesh_from_mesh(Mesh *mesh) { const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(mesh); - BMesh *bm = BM_mesh_create(&allocsize, - &((struct BMeshCreateParams){ - .use_toolflags = true, - })); - BM_mesh_bm_from_me(NULL, - bm, - mesh, - (&(struct BMeshFromMeshParams){ - .calc_face_normal = true, - .calc_vert_normal = true, - })); + + BMeshCreateParams bm_create_params{}; + bm_create_params.use_toolflags = true; + BMesh *bm = BM_mesh_create(&allocsize, &bm_create_params); + + BMeshFromMeshParams bm_from_me_params{}; + bm_from_me_params.calc_face_normal = true; + bm_from_me_params.calc_vert_normal = true; + BM_mesh_bm_from_me(nullptr, bm, mesh, &bm_from_me_params); return bm; } @@ -902,11 +903,11 @@ static void multires_unsubdivide_add_original_index_datalayers(Mesh *mesh) { multires_unsubdivide_free_original_datalayers(mesh); - int *l_index = CustomData_add_layer_named( - &mesh->ldata, CD_PROP_INT32, CD_SET_DEFAULT, NULL, mesh->totloop, lname); + int *l_index = static_cast(CustomData_add_layer_named( + &mesh->ldata, CD_PROP_INT32, CD_SET_DEFAULT, nullptr, mesh->totloop, lname)); - int *v_index = CustomData_add_layer_named( - &mesh->vdata, CD_PROP_INT32, CD_SET_DEFAULT, NULL, mesh->totvert, vname); + int *v_index = static_cast(CustomData_add_layer_named( + &mesh->vdata, CD_PROP_INT32, CD_SET_DEFAULT, nullptr, mesh->totvert, vname)); /* Initialize these data-layer with the indices in the current mesh. */ for (int i = 0; i < mesh->totloop; i++) { @@ -939,8 +940,8 @@ static void multires_unsubdivide_prepare_original_bmesh_for_extract( bm_original_mesh, BM_VERT | BM_EDGE | BM_FACE, BM_ELEM_SELECT, false); /* Get the mapping data-layer. */ - context->base_to_orig_vmap = CustomData_get_layer_named_for_write( - &base_mesh->vdata, CD_PROP_INT32, vname, base_mesh->totvert); + context->base_to_orig_vmap = static_cast(CustomData_get_layer_named_for_write( + &base_mesh->vdata, CD_PROP_INT32, vname, base_mesh->totvert)); /* Tag the base mesh vertices in the original mesh. */ for (int i = 0; i < base_mesh->totvert; i++) { @@ -950,7 +951,8 @@ static void multires_unsubdivide_prepare_original_bmesh_for_extract( } /* Create a map from loop index to poly index for the original mesh. */ - context->loop_to_face_map = MEM_calloc_arrayN(original_mesh->totloop, sizeof(int), "loop map"); + context->loop_to_face_map = static_cast( + MEM_calloc_arrayN(original_mesh->totloop, sizeof(int), "loop map")); for (int i = 0; i < original_mesh->totpoly; i++) { const MPoly *poly = &original_polys[i]; @@ -994,17 +996,19 @@ static void multires_unsubdivide_extract_grids(MultiresUnsubdivideContext *conte BMesh *bm_original_mesh = context->bm_original_mesh; context->num_grids = base_mesh->totloop; - context->base_mesh_grids = MEM_calloc_arrayN( - base_mesh->totloop, sizeof(MultiresUnsubdivideGrid), "grids"); + context->base_mesh_grids = static_cast( + MEM_calloc_arrayN(base_mesh->totloop, sizeof(MultiresUnsubdivideGrid), "grids")); /* Based on the existing indices in the data-layers, generate two vertex indices maps. */ /* From vertex index in original to vertex index in base and from vertex index in base to vertex * index in original. */ - int *orig_to_base_vmap = MEM_calloc_arrayN(bm_original_mesh->totvert, sizeof(int), "orig vmap"); - int *base_to_orig_vmap = MEM_calloc_arrayN(base_mesh->totvert, sizeof(int), "base vmap"); + int *orig_to_base_vmap = static_cast( + MEM_calloc_arrayN(bm_original_mesh->totvert, sizeof(int), "orig vmap")); + int *base_to_orig_vmap = static_cast( + MEM_calloc_arrayN(base_mesh->totvert, sizeof(int), "base vmap")); - context->base_to_orig_vmap = CustomData_get_layer_named_for_write( - &base_mesh->vdata, CD_PROP_INT32, vname, base_mesh->totvert); + context->base_to_orig_vmap = static_cast(CustomData_get_layer_named_for_write( + &base_mesh->vdata, CD_PROP_INT32, vname, base_mesh->totvert)); for (int i = 0; i < base_mesh->totvert; i++) { base_to_orig_vmap[i] = context->base_to_orig_vmap[i]; } @@ -1100,7 +1104,7 @@ static void multires_unsubdivide_extract_grids(MultiresUnsubdivideContext *conte static void multires_unsubdivide_private_extract_data_free(MultiresUnsubdivideContext *context) { - if (context->bm_original_mesh != NULL) { + if (context->bm_original_mesh != nullptr) { BM_mesh_free(context->bm_original_mesh); } MEM_SAFE_FREE(context->loop_to_face_map); @@ -1108,7 +1112,7 @@ static void multires_unsubdivide_private_extract_data_free(MultiresUnsubdivideCo void multires_unsubdivide_context_init(MultiresUnsubdivideContext *context, Mesh *original_mesh, - struct MultiresModifierData *mmd) + MultiresModifierData *mmd) { context->original_mesh = original_mesh; context->num_new_levels = 0; @@ -1144,13 +1148,10 @@ bool multires_unsubdivide_to_basemesh(MultiresUnsubdivideContext *context) /* Store the new base-mesh as a mesh in context, free bmesh. */ context->base_mesh = BKE_mesh_new_nomain(0, 0, 0, 0, 0); - BM_mesh_bm_to_me(NULL, - NULL, - bm_base_mesh, - context->base_mesh, - (&(struct BMeshToMeshParams){ - .calc_object_remap = true, - })); + + BMeshToMeshParams bm_to_me_params{}; + bm_to_me_params.calc_object_remap = true; + BM_mesh_bm_to_me(nullptr, nullptr, bm_base_mesh, context->base_mesh, &bm_to_me_params); BM_mesh_free(bm_base_mesh); /* Initialize bmesh and maps for the original mesh and extract the grids. */ @@ -1183,8 +1184,8 @@ static void multires_create_grids_in_unsubdivided_base_mesh(MultiresUnsubdivideC if (CustomData_has_layer(&base_mesh->ldata, CD_MDISPS)) { CustomData_free_layers(&base_mesh->ldata, CD_MDISPS, base_mesh->totloop); } - MDisps *mdisps = CustomData_add_layer( - &base_mesh->ldata, CD_MDISPS, CD_SET_DEFAULT, NULL, base_mesh->totloop); + MDisps *mdisps = static_cast(CustomData_add_layer( + &base_mesh->ldata, CD_MDISPS, CD_SET_DEFAULT, nullptr, base_mesh->totloop)); const int totdisp = pow_i(BKE_ccg_gridsize(context->num_total_levels), 2); const int totloop = base_mesh->totloop; @@ -1193,7 +1194,8 @@ static void multires_create_grids_in_unsubdivided_base_mesh(MultiresUnsubdivideC /* Allocate the MDISPS grids and copy the extracted data from context. */ for (int i = 0; i < totloop; i++) { - float(*disps)[3] = MEM_calloc_arrayN(totdisp, sizeof(float[3]), "multires disps"); + float(*disps)[3] = static_cast( + MEM_calloc_arrayN(totdisp, sizeof(float[3]), __func__)); if (mdisps[i].disps) { MEM_freeN(mdisps[i].disps); @@ -1211,13 +1213,13 @@ static void multires_create_grids_in_unsubdivided_base_mesh(MultiresUnsubdivideC } } -int multiresModifier_rebuild_subdiv(struct Depsgraph *depsgraph, - struct Object *object, - struct MultiresModifierData *mmd, +int multiresModifier_rebuild_subdiv(Depsgraph *depsgraph, + Object *object, + MultiresModifierData *mmd, int rebuild_limit, bool switch_view_to_lower_level) { - Mesh *mesh = object->data; + Mesh *mesh = static_cast(object->data); multires_force_sculpt_rebuild(object); @@ -1257,24 +1259,24 @@ int multiresModifier_rebuild_subdiv(struct Depsgraph *depsgraph, } /* Copy the new base mesh to the original mesh. */ - BKE_mesh_nomain_to_mesh(unsubdiv_context.base_mesh, object->data, object); - Mesh *base_mesh = object->data; + Mesh *base_mesh = static_cast(object->data); + BKE_mesh_nomain_to_mesh(unsubdiv_context.base_mesh, base_mesh, object); multires_create_grids_in_unsubdivided_base_mesh(&unsubdiv_context, base_mesh); /* Update the levels in the modifier. Force always to display at level 0 as it contains the new * created level. */ - mmd->totlvl = (char)unsubdiv_context.num_total_levels; + mmd->totlvl = char(unsubdiv_context.num_total_levels); if (switch_view_to_lower_level) { mmd->sculptlvl = 0; mmd->lvl = 0; } else { - mmd->sculptlvl = (char)(mmd->sculptlvl + unsubdiv_context.num_new_levels); - mmd->lvl = (char)(mmd->lvl + unsubdiv_context.num_new_levels); + mmd->sculptlvl = char(mmd->sculptlvl + unsubdiv_context.num_new_levels); + mmd->lvl = char(mmd->lvl + unsubdiv_context.num_new_levels); } - mmd->renderlvl = (char)(mmd->renderlvl + unsubdiv_context.num_new_levels); + mmd->renderlvl = char(mmd->renderlvl + unsubdiv_context.num_new_levels); /* Create a reshape context to convert the MDISPS data to tangent displacement. It can be the * same as the previous one as a new Subdivision needs to be created for the new base mesh. */ diff --git a/source/blender/blenkernel/intern/multires_versioning.c b/source/blender/blenkernel/intern/multires_versioning.cc similarity index 80% rename from source/blender/blenkernel/intern/multires_versioning.c rename to source/blender/blenkernel/intern/multires_versioning.cc index b7888b9e33a..1800bbc68bc 100644 --- a/source/blender/blenkernel/intern/multires_versioning.c +++ b/source/blender/blenkernel/intern/multires_versioning.cc @@ -14,18 +14,18 @@ #include "BKE_subdiv.h" #include "BKE_subdiv_eval.h" -#include "multires_reshape.h" +#include "multires_reshape.hh" #include "opensubdiv_converter_capi.h" #include "subdiv_converter.h" -static float simple_to_catmull_clark_get_edge_sharpness( - const OpenSubdiv_Converter *UNUSED(converter), int UNUSED(manifold_edge_index)) +static float simple_to_catmull_clark_get_edge_sharpness(const OpenSubdiv_Converter * /*converter*/, + int /*manifold_edge_index*/) { return 10.0f; } static bool simple_to_catmull_clark_is_infinite_sharp_vertex( - const OpenSubdiv_Converter *UNUSED(converter), int UNUSED(manifold_vertex_index)) + const OpenSubdiv_Converter * /*converter*/, int /*manifold_vertex_index*/) { return true; } @@ -35,7 +35,7 @@ static Subdiv *subdiv_for_simple_to_catmull_clark(Object *object, MultiresModifi SubdivSettings subdiv_settings; BKE_multires_subdiv_settings_init(&subdiv_settings, mmd); - Mesh *base_mesh = object->data; + const Mesh *base_mesh = static_cast(object->data); OpenSubdiv_Converter converter; BKE_subdiv_converter_init_for_mesh(&converter, &subdiv_settings, base_mesh); @@ -45,9 +45,10 @@ static Subdiv *subdiv_for_simple_to_catmull_clark(Object *object, MultiresModifi Subdiv *subdiv = BKE_subdiv_new_from_converter(&subdiv_settings, &converter); BKE_subdiv_converter_free(&converter); - if (!BKE_subdiv_eval_begin_from_mesh(subdiv, base_mesh, NULL, SUBDIV_EVALUATOR_TYPE_CPU, NULL)) { + if (!BKE_subdiv_eval_begin_from_mesh( + subdiv, base_mesh, nullptr, SUBDIV_EVALUATOR_TYPE_CPU, nullptr)) { BKE_subdiv_free(subdiv); - return NULL; + return nullptr; } return subdiv; @@ -55,7 +56,7 @@ static Subdiv *subdiv_for_simple_to_catmull_clark(Object *object, MultiresModifi void multires_do_versions_simple_to_catmull_clark(Object *object, MultiresModifierData *mmd) { - const Mesh *base_mesh = object->data; + const Mesh *base_mesh = static_cast(object->data); if (base_mesh->totloop == 0) { return; } diff --git a/source/blender/blenkernel/intern/nla.c b/source/blender/blenkernel/intern/nla.c index 5cdfe98f2e0..6b631462ebd 100644 --- a/source/blender/blenkernel/intern/nla.c +++ b/source/blender/blenkernel/intern/nla.c @@ -764,20 +764,13 @@ void BKE_nlastrips_sort_strips(ListBase *strips) strips->last = tmp.last; } -bool BKE_nlastrips_add_strip(ListBase *strips, NlaStrip *strip) +void BKE_nlastrips_add_strip_unsafe(ListBase *strips, NlaStrip *strip) { NlaStrip *ns; bool not_added = true; /* sanity checks */ - if (ELEM(NULL, strips, strip)) { - return false; - } - - /* check if any space to add */ - if (BKE_nlastrips_has_space(strips, strip->start, strip->end) == 0) { - return false; - } + BLI_assert(!ELEM(NULL, strips, strip)); /* find the right place to add the strip to the nominated track */ for (ns = strips->first; ns; ns = ns->next) { @@ -792,8 +785,19 @@ bool BKE_nlastrips_add_strip(ListBase *strips, NlaStrip *strip) /* just add to the end of the list of the strips then... */ BLI_addtail(strips, strip); } +} - /* added... */ +bool BKE_nlastrips_add_strip(ListBase *strips, NlaStrip *strip) +{ + if (ELEM(NULL, strips, strip)) { + return false; + } + + if (!BKE_nlastrips_has_space(strips, strip->start, strip->end)) { + return false; + } + + BKE_nlastrips_add_strip_unsafe(strips, strip); return true; } @@ -1521,6 +1525,30 @@ void BKE_nlastrip_recalculate_bounds(NlaStrip *strip) nlastrip_fix_resize_overlaps(strip); } +void BKE_nlastrip_recalculate_blend(NlaStrip *strip) +{ + + /* check if values need to be re-calculated. */ + if (strip->blendin == 0 && strip->blendout == 0) { + return; + } + + const double strip_len = strip->end - strip->start; + double blend_in = strip->blendin; + double blend_out = strip->blendout; + + double blend_in_max = strip_len - blend_out; + + CLAMP_MIN(blend_in_max, 0); + + /* blend-out is limited to the length of the strip. */ + CLAMP(blend_in, 0, blend_in_max); + CLAMP(blend_out, 0, strip_len - blend_in); + + strip->blendin = blend_in; + strip->blendout = blend_out; +} + /* Animated Strips ------------------------------------------- */ bool BKE_nlatrack_has_animated_strips(NlaTrack *nlt) @@ -1854,6 +1882,7 @@ void BKE_nla_validate_state(AnimData *adt) for (strip = nlt->strips.first; strip; strip = strip->next) { /* auto-blending first */ BKE_nlastrip_validate_autoblends(nlt, strip); + BKE_nlastrip_recalculate_blend(strip); } } } diff --git a/source/blender/blenkernel/intern/nla_test.cc b/source/blender/blenkernel/intern/nla_test.cc new file mode 100644 index 00000000000..ef9ca83e25f --- /dev/null +++ b/source/blender/blenkernel/intern/nla_test.cc @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright 2023 Blender Foundation. All rights reserved. */ + +#include "BKE_nla.h" + +#include "DNA_anim_types.h" +#include "DNA_nla_types.h" + +#include "MEM_guardedalloc.h" + +#include "testing/testing.h" + +namespace blender::bke::tests { + +TEST(nla_strip, BKE_nlastrip_recalculate_blend) +{ + NlaStrip strip{}; + strip.blendin = 4.0; + strip.blendout = 5.0; + strip.start = 1; + strip.end = 10; + + /* Scaling a strip up doesn't affect the blend in/out value */ + strip.end = 20; + BKE_nlastrip_recalculate_blend(&strip); + EXPECT_FLOAT_EQ(strip.blendin, 4.0); + EXPECT_FLOAT_EQ(strip.blendout, 5.0); + + /* Scaling a strip down affects the blend-in value before the blend-out value */ + strip.end = 7; + BKE_nlastrip_recalculate_blend(&strip); + EXPECT_FLOAT_EQ(strip.blendin, 1.0); + EXPECT_FLOAT_EQ(strip.blendout, 5.0); + + /* Scaling a strip down to nothing updates the blend in/out values accordingly */ + strip.end = 1.1; + BKE_nlastrip_recalculate_blend(&strip); + EXPECT_FLOAT_EQ(strip.blendin, 0.0); + EXPECT_FLOAT_EQ(strip.blendout, 0.1); +} + +TEST(nla_strip, BKE_nlastrips_add_strip) +{ + ListBase strips{}; + NlaStrip strip1{}; + strip1.start = 0; + strip1.end = 10; + strips.first = &strip1; + + NlaStrip strip2{}; + strip2.start = 5; + strip2.end = 10; + + /* can't add a null NLA strip to an NLA Track. */ + EXPECT_FALSE(BKE_nlastrips_add_strip(&strips, NULL)); + + /* can't add an NLA strip to an NLA Track that overlaps another NLA strip. */ + EXPECT_FALSE(BKE_nlastrips_add_strip(&strips, &strip2)); + + strip2.start = 15; + strip2.end = 20; + /* can add an NLA strip to an NLA Track that doesn't overlaps another NLA strip. */ + EXPECT_TRUE(BKE_nlastrips_add_strip(&strips, &strip2)); +} + +} // namespace blender::bke::tests diff --git a/source/blender/blenkernel/intern/node.cc b/source/blender/blenkernel/intern/node.cc index e42a974eba2..74351022fd3 100644 --- a/source/blender/blenkernel/intern/node.cc +++ b/source/blender/blenkernel/intern/node.cc @@ -922,7 +922,11 @@ void ntreeBlendReadLib(BlendLibReader *reader, bNodeTree *ntree) * to match the static layout. */ if (!BLO_read_lib_is_undo(reader)) { LISTBASE_FOREACH (bNode *, node, &ntree->nodes) { - node_verify_sockets(ntree, node, false); + /* Don't update node groups here because they may depend on other node groups which are not + * fully versioned yet and don't have `typeinfo` pointers set. */ + if (node->type != NODE_GROUP) { + node_verify_sockets(ntree, node, false); + } } } } @@ -1040,42 +1044,42 @@ static void node_tree_asset_pre_save(void *asset_ptr, AssetMetaData *asset_data) } // namespace blender::bke static AssetTypeInfo AssetType_NT = { - /* pre_save_fn */ blender::bke::node_tree_asset_pre_save, + /*pre_save_fn*/ blender::bke::node_tree_asset_pre_save, }; IDTypeInfo IDType_ID_NT = { - /* id_code */ ID_NT, - /* id_filter */ FILTER_ID_NT, - /* main_listbase_index */ INDEX_ID_NT, - /* struct_size */ sizeof(bNodeTree), - /* name */ "NodeTree", - /* name_plural */ "node_groups", - /* translation_context */ BLT_I18NCONTEXT_ID_NODETREE, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ &AssetType_NT, + /*id_code*/ ID_NT, + /*id_filter*/ FILTER_ID_NT, + /*main_listbase_index*/ INDEX_ID_NT, + /*struct_size*/ sizeof(bNodeTree), + /*name*/ "NodeTree", + /*name_plural*/ "node_groups", + /*translation_context*/ BLT_I18NCONTEXT_ID_NODETREE, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ &AssetType_NT, - /* init_data */ ntree_init_data, - /* copy_data */ ntree_copy_data, - /* free_data */ ntree_free_data, - /* make_local */ nullptr, - /* foreach_id */ node_foreach_id, - /* foreach_cache */ node_foreach_cache, - /* foreach_path */ node_foreach_path, - /* owner_pointer_get */ node_owner_pointer_get, + /*init_data*/ ntree_init_data, + /*copy_data*/ ntree_copy_data, + /*free_data*/ ntree_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ node_foreach_id, + /*foreach_cache*/ node_foreach_cache, + /*foreach_path*/ node_foreach_path, + /*owner_pointer_get*/ node_owner_pointer_get, - /* blend_write */ ntree_blend_write, - /* blend_read_data */ ntree_blend_read_data, - /* blend_read_lib */ ntree_blend_read_lib, - /* blend_read_expand */ ntree_blend_read_expand, + /*blend_write*/ ntree_blend_write, + /*blend_read_data*/ ntree_blend_read_data, + /*blend_read_lib*/ ntree_blend_read_lib, + /*blend_read_expand*/ ntree_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; static void node_add_sockets_from_type(bNodeTree *ntree, bNode *node, bNodeType *ntype) { - if (ntype->declare != nullptr) { + if (ntype->declare || ntype->declare_dynamic) { node_verify_sockets(ntree, node, true); return; } @@ -1378,7 +1382,7 @@ void nodeRegisterType(bNodeType *nt) BLI_assert(nt->idname[0] != '\0'); BLI_assert(nt->poll != nullptr); - if (nt->declare && !nt->declaration_is_dynamic) { + if (nt->declare && !nt->declare_dynamic) { if (nt->fixed_declaration == nullptr) { nt->fixed_declaration = new blender::nodes::NodeDeclaration(); blender::nodes::build_node_declaration(*nt, *nt->fixed_declaration); @@ -2990,7 +2994,7 @@ void node_free_node(bNodeTree *ntree, bNode *node) MEM_freeN(node->prop); } - if (node->typeinfo->declaration_is_dynamic) { + if (node->typeinfo->declare_dynamic) { delete node->runtime->declaration; } @@ -3587,31 +3591,43 @@ static void update_socket_declarations(ListBase *sockets, } } +static void reset_socket_declarations(ListBase *sockets) +{ + LISTBASE_FOREACH (bNodeSocket *, socket, sockets) { + socket->runtime->declaration = nullptr; + } +} + void nodeSocketDeclarationsUpdate(bNode *node) { BLI_assert(node->runtime->declaration != nullptr); - update_socket_declarations(&node->inputs, node->runtime->declaration->inputs); - update_socket_declarations(&node->outputs, node->runtime->declaration->outputs); + if (node->runtime->declaration->skip_updating_sockets) { + reset_socket_declarations(&node->inputs); + reset_socket_declarations(&node->outputs); + } + else { + update_socket_declarations(&node->inputs, node->runtime->declaration->inputs); + update_socket_declarations(&node->outputs, node->runtime->declaration->outputs); + } } -bool nodeDeclarationEnsureOnOutdatedNode(bNodeTree * /*ntree*/, bNode *node) +bool nodeDeclarationEnsureOnOutdatedNode(bNodeTree *ntree, bNode *node) { if (node->runtime->declaration != nullptr) { return false; } - if (node->typeinfo->declare == nullptr) { - return false; - } - if (node->typeinfo->declaration_is_dynamic) { + if (node->typeinfo->declare_dynamic) { node->runtime->declaration = new blender::nodes::NodeDeclaration(); - blender::nodes::build_node_declaration(*node->typeinfo, *node->runtime->declaration); + blender::nodes::build_node_declaration_dynamic(*ntree, *node, *node->runtime->declaration); + return true; } - else { + if (node->typeinfo->declare) { /* Declaration should have been created in #nodeRegisterType. */ BLI_assert(node->typeinfo->fixed_declaration != nullptr); node->runtime->declaration = node->typeinfo->fixed_declaration; + return true; } - return true; + return false; } bool nodeDeclarationEnsure(bNodeTree *ntree, bNode *node) @@ -3803,8 +3819,6 @@ void BKE_node_instance_hash_remove_untagged(bNodeInstanceHash *hash, void ntreeUpdateAllNew(Main *main) { - Vector new_ntrees; - /* Update all new node trees on file read or append, to add/remove sockets * in groups nodes if the group changed, and handle any update flags that * might have been set in file reading or versioning. */ diff --git a/source/blender/blenkernel/intern/node_tree_update.cc b/source/blender/blenkernel/intern/node_tree_update.cc index 397556e23fc..374d67bbfa8 100644 --- a/source/blender/blenkernel/intern/node_tree_update.cc +++ b/source/blender/blenkernel/intern/node_tree_update.cc @@ -22,6 +22,7 @@ #include "MOD_nodes.h" #include "NOD_node_declaration.hh" +#include "NOD_socket.h" #include "NOD_texture.h" #include "DEG_depsgraph_query.h" @@ -538,7 +539,6 @@ class NodeTreeMainUpdater { void update_individual_nodes(bNodeTree &ntree) { - Vector group_inout_nodes; for (bNode *node : ntree.all_nodes()) { nodeDeclarationEnsure(&ntree, node); if (this->should_update_individual_node(ntree, *node)) { @@ -549,18 +549,9 @@ class NodeTreeMainUpdater { if (ntype.updatefunc) { ntype.updatefunc(&ntree, node); } - } - if (ELEM(node->type, NODE_GROUP_INPUT, NODE_GROUP_OUTPUT)) { - group_inout_nodes.append(node); - } - } - /* The update function of group input/output nodes may add new interface sockets. When that - * happens, all the input/output nodes have to be updated again. In the future it would be - * better to move this functionality out of the node update function into the operator that's - * supposed to create the new interface socket. */ - if (ntree.runtime->changed_flag & NTREE_CHANGED_INTERFACE) { - for (bNode *node : group_inout_nodes) { - node->typeinfo->updatefunc(&ntree, node); + if (ntype.declare_dynamic) { + nodes::update_node_declaration_and_sockets(ntree, *node); + } } } } @@ -574,23 +565,8 @@ class NodeTreeMainUpdater { return true; } if (ntree.runtime->changed_flag & NTREE_CHANGED_LINK) { - ntree.ensure_topology_cache(); - /* Node groups currently always rebuilt their sockets when they are updated. - * So avoid calling the update method when no new link was added to it. */ - if (node.type == NODE_GROUP_INPUT) { - if (node.output_sockets().last()->is_directly_linked()) { - return true; - } - } - else if (node.type == NODE_GROUP_OUTPUT) { - if (node.input_sockets().last()->is_directly_linked()) { - return true; - } - } - else { - /* Currently we have no way to tell if a node needs to be updated when a link changed. */ - return true; - } + /* Currently we have no way to tell if a node needs to be updated when a link changed. */ + return true; } if (ntree.runtime->changed_flag & NTREE_CHANGED_INTERFACE) { if (ELEM(node.type, NODE_GROUP_INPUT, NODE_GROUP_OUTPUT)) { @@ -1155,7 +1131,11 @@ void BKE_ntree_update_tag_node_removed(bNodeTree *ntree) void BKE_ntree_update_tag_node_reordered(bNodeTree *ntree) { - add_tree_tag(ntree, NTREE_CHANGED_ANY); + /* Don't add a tree update tag to avoid reevaluations for trivial operations like selection or + * parenting that typically influence the node order. This means the node order can be different + * for original and evaluated trees. A different solution might avoid sorting nodes based on UI + * states like selection, which would require not tying the node order to the drawing order. */ + ntree->runtime->topology_cache_mutex.tag_dirty(); } void BKE_ntree_update_tag_node_mute(bNodeTree *ntree, bNode *node) diff --git a/source/blender/blenkernel/intern/object.cc b/source/blender/blenkernel/intern/object.cc index e90e3080f92..3581fd3b37f 100644 --- a/source/blender/blenkernel/intern/object.cc +++ b/source/blender/blenkernel/intern/object.cc @@ -1230,37 +1230,37 @@ static void object_asset_pre_save(void *asset_ptr, struct AssetMetaData *asset_d } static AssetTypeInfo AssetType_OB = { - /* pre_save_fn */ object_asset_pre_save, + /*pre_save_fn*/ object_asset_pre_save, }; IDTypeInfo IDType_ID_OB = { - /* id_code */ ID_OB, - /* id_filter */ FILTER_ID_OB, - /* main_listbase_index */ INDEX_ID_OB, - /* struct_size */ sizeof(Object), - /* name */ "Object", - /* name_plural */ "objects", - /* translation_context */ BLT_I18NCONTEXT_ID_OBJECT, - /* flags */ 0, - /* asset_type_info */ &AssetType_OB, + /*id_code*/ ID_OB, + /*id_filter*/ FILTER_ID_OB, + /*main_listbase_index*/ INDEX_ID_OB, + /*struct_size*/ sizeof(Object), + /*name*/ "Object", + /*name_plural*/ "objects", + /*translation_context*/ BLT_I18NCONTEXT_ID_OBJECT, + /*flags*/ 0, + /*asset_type_info*/ &AssetType_OB, - /* init_data */ object_init_data, - /* copy_data */ object_copy_data, - /* free_data */ object_free_data, - /* make_local */ nullptr, - /* foreach_id */ object_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ object_foreach_path, - /* owner_pointer_get */ nullptr, + /*init_data*/ object_init_data, + /*copy_data*/ object_copy_data, + /*free_data*/ object_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ object_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ object_foreach_path, + /*owner_pointer_get*/ nullptr, - /* blend_write */ object_blend_write, - /* blend_read_data */ object_blend_read_data, - /* blend_read_lib */ object_blend_read_lib, - /* blend_read_expand */ object_blend_read_expand, + /*blend_write*/ object_blend_write, + /*blend_read_data*/ object_blend_read_data, + /*blend_read_lib*/ object_blend_read_lib, + /*blend_read_expand*/ object_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ object_lib_override_apply_post, + /*lib_override_apply_post*/ object_lib_override_apply_post, }; void BKE_object_workob_clear(Object *workob) @@ -4369,42 +4369,45 @@ void BKE_object_sculpt_data_create(Object *ob) ob->sculpt->mode_type = (eObjectMode)ob->mode; } -bool BKE_object_obdata_texspace_get(Object *ob, char **r_texflag, float **r_loc, float **r_size) +bool BKE_object_obdata_texspace_get(Object *ob, + char **r_texspace_flag, + float **r_texspace_location, + float **r_texspace_size) { - if (ob->data == nullptr) { return false; } switch (GS(((ID *)ob->data)->name)) { case ID_ME: { - BKE_mesh_texspace_get_reference((Mesh *)ob->data, r_texflag, r_loc, r_size); + BKE_mesh_texspace_get_reference( + (Mesh *)ob->data, r_texspace_flag, r_texspace_location, r_texspace_size); break; } case ID_CU_LEGACY: { Curve *cu = (Curve *)ob->data; BKE_curve_texspace_ensure(cu); - if (r_texflag) { - *r_texflag = &cu->texflag; + if (r_texspace_flag) { + *r_texspace_flag = &cu->texspace_flag; } - if (r_loc) { - *r_loc = cu->loc; + if (r_texspace_location) { + *r_texspace_location = cu->texspace_location; } - if (r_size) { - *r_size = cu->size; + if (r_texspace_size) { + *r_texspace_size = cu->texspace_size; } break; } case ID_MB: { MetaBall *mb = (MetaBall *)ob->data; - if (r_texflag) { - *r_texflag = &mb->texflag; + if (r_texspace_flag) { + *r_texspace_flag = &mb->texspace_flag; } - if (r_loc) { - *r_loc = mb->loc; + if (r_texspace_location) { + *r_texspace_location = mb->texspace_location; } - if (r_size) { - *r_size = mb->size; + if (r_texspace_size) { + *r_texspace_size = mb->texspace_size; } break; } diff --git a/source/blender/blenkernel/intern/object_dupli.cc b/source/blender/blenkernel/intern/object_dupli.cc index e5d15918a2b..9fdef8525d4 100644 --- a/source/blender/blenkernel/intern/object_dupli.cc +++ b/source/blender/blenkernel/intern/object_dupli.cc @@ -529,9 +529,8 @@ static void make_duplis_collection(const DupliContext *ctx) } static const DupliGenerator gen_dupli_collection = { - OB_DUPLICOLLECTION, /* type */ - make_duplis_collection /* make_duplis */ -}; + /*type*/ OB_DUPLICOLLECTION, + /*make_duplis*/ make_duplis_collection}; /** \} */ @@ -744,9 +743,8 @@ static void make_duplis_verts(const DupliContext *ctx) } static const DupliGenerator gen_dupli_verts = { - OB_DUPLIVERTS, /* type */ - make_duplis_verts /* make_duplis */ -}; + /*type*/ OB_DUPLIVERTS, + /*make_duplis*/ make_duplis_verts}; /** \} */ @@ -873,9 +871,8 @@ static void make_duplis_font(const DupliContext *ctx) } static const DupliGenerator gen_dupli_verts_font = { - OB_DUPLIVERTS, /* type */ - make_duplis_font /* make_duplis */ -}; + /*type*/ OB_DUPLIVERTS, + /*make_duplis*/ make_duplis_font}; /** \} */ @@ -1034,8 +1031,8 @@ static void make_duplis_geometry_set(const DupliContext *ctx) } static const DupliGenerator gen_dupli_geometry_set = { - GEOMETRY_SET_DUPLI_GENERATOR_TYPE, - make_duplis_geometry_set, + /*type*/ GEOMETRY_SET_DUPLI_GENERATOR_TYPE, + /*make_duplis*/ make_duplis_geometry_set, }; /** \} */ @@ -1335,9 +1332,8 @@ static void make_duplis_faces(const DupliContext *ctx) } static const DupliGenerator gen_dupli_faces = { - OB_DUPLIFACES, /* type */ - make_duplis_faces /* make_duplis */ -}; + /*type*/ OB_DUPLIFACES, + /*make_duplis*/ make_duplis_faces}; /** \} */ @@ -1685,9 +1681,8 @@ static void make_duplis_particles(const DupliContext *ctx) } static const DupliGenerator gen_dupli_particles = { - OB_DUPLIPARTS, /* type */ - make_duplis_particles /* make_duplis */ -}; + /*type*/ OB_DUPLIPARTS, + /*make_duplis*/ make_duplis_particles}; /** \} */ diff --git a/source/blender/blenkernel/intern/paint.cc b/source/blender/blenkernel/intern/paint.cc index f800692c759..8071c1e5d80 100644 --- a/source/blender/blenkernel/intern/paint.cc +++ b/source/blender/blenkernel/intern/paint.cc @@ -76,7 +76,6 @@ #include "bmesh_log.h" // TODO: figure out bad cross module refs -extern "C" void SCULPT_on_sculptsession_bmesh_free(SculptSession *ss); extern "C" void SCULPT_undo_ensure_bmlog(Object *ob); static void init_sculptvert_layer(SculptSession *ss, PBVH *pbvh, int totvert); @@ -150,33 +149,33 @@ static void palette_undo_preserve(BlendLibReader * /*reader*/, ID *id_new, ID *i } IDTypeInfo IDType_ID_PAL = { - /* id_code */ ID_PAL, - /* id_filter */ FILTER_ID_PAL, - /* main_listbase_index */ INDEX_ID_PAL, - /* struct_size */ sizeof(Palette), - /* name */ "Palette", - /* name_plural */ "palettes", - /* translation_context */ BLT_I18NCONTEXT_ID_PALETTE, - /* flags */ IDTYPE_FLAGS_NO_ANIMDATA, - /* asset_type_info */ nullptr, + /*id_code*/ ID_PAL, + /*id_filter*/ FILTER_ID_PAL, + /*main_listbase_index*/ INDEX_ID_PAL, + /*struct_size*/ sizeof(Palette), + /*name*/ "Palette", + /*name_plural*/ "palettes", + /*translation_context*/ BLT_I18NCONTEXT_ID_PALETTE, + /*flags*/ IDTYPE_FLAGS_NO_ANIMDATA, + /*asset_type_info*/ nullptr, - /* init_data */ palette_init_data, - /* copy_data */ palette_copy_data, - /* free_data */ palette_free_data, - /* make_local */ nullptr, - /* foreach_id */ nullptr, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ palette_init_data, + /*copy_data*/ palette_copy_data, + /*free_data*/ palette_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ nullptr, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ palette_blend_write, - /* blend_read_data */ palette_blend_read_data, - /* blend_read_lib */ nullptr, - /* blend_read_expand */ nullptr, + /*blend_write*/ palette_blend_write, + /*blend_read_data*/ palette_blend_read_data, + /*blend_read_lib*/ nullptr, + /*blend_read_expand*/ nullptr, - /* blend_read_undo_preserve */ palette_undo_preserve, + /*blend_read_undo_preserve*/ palette_undo_preserve, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; static void paint_curve_copy_data(Main * /*bmain*/, @@ -218,33 +217,33 @@ static void paint_curve_blend_read_data(BlendDataReader *reader, ID *id) } IDTypeInfo IDType_ID_PC = { - /* id_code */ ID_PC, - /* id_filter */ FILTER_ID_PC, - /* main_listbase_index */ INDEX_ID_PC, - /* struct_size */ sizeof(PaintCurve), - /* name */ "PaintCurve", - /* name_plural */ "paint_curves", - /* translation_context */ BLT_I18NCONTEXT_ID_PAINTCURVE, - /* flags */ IDTYPE_FLAGS_NO_ANIMDATA, - /* asset_type_info */ nullptr, + /*id_code*/ ID_PC, + /*id_filter*/ FILTER_ID_PC, + /*main_listbase_index*/ INDEX_ID_PC, + /*struct_size*/ sizeof(PaintCurve), + /*name*/ "PaintCurve", + /*name_plural*/ "paint_curves", + /*translation_context*/ BLT_I18NCONTEXT_ID_PAINTCURVE, + /*flags*/ IDTYPE_FLAGS_NO_ANIMDATA, + /*asset_type_info*/ nullptr, - /* init_data */ nullptr, - /* copy_data */ paint_curve_copy_data, - /* free_data */ paint_curve_free_data, - /* make_local */ nullptr, - /* foreach_id */ nullptr, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ nullptr, + /*copy_data*/ paint_curve_copy_data, + /*free_data*/ paint_curve_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ nullptr, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ paint_curve_blend_write, - /* blend_read_data */ paint_curve_blend_read_data, - /* blend_read_lib */ nullptr, - /* blend_read_expand */ nullptr, + /*blend_write*/ paint_curve_blend_write, + /*blend_read_data*/ paint_curve_blend_read_data, + /*blend_read_lib*/ nullptr, + /*blend_read_expand*/ nullptr, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; const uchar PAINT_CURSOR_SCULPT[3] = {255, 100, 100}; @@ -1546,7 +1545,6 @@ static void sculptsession_free_pbvh(Object *object) MEM_SAFE_FREE(ss->preview_vert_list); ss->preview_vert_count = 0; - MEM_SAFE_FREE(ss->vertex_info.connected_component); MEM_SAFE_FREE(ss->vertex_info.boundary); MEM_SAFE_FREE(ss->vertex_info.symmetrize_map); @@ -1597,8 +1595,6 @@ void BKE_sculptsession_free(Object *ob) BKE_sculpt_attribute_destroy_temporary_all(ob); if (ss->bm) { - SCULPT_on_sculptsession_bmesh_free(ss); - BKE_sculptsession_bm_to_me(ob, true); ss->bm = nullptr; // BM_mesh_free(ss->bm); @@ -3517,8 +3513,6 @@ bool BKE_sculpt_attribute_exists(Object *ob, CustomData *cdata = sculpt_get_cdata(ob, domain); return CustomData_get_named_layer_index(cdata, proptype, name) != -1; - - return false; } static SculptAttribute *sculpt_alloc_attr(SculptSession *ss) diff --git a/source/blender/blenkernel/intern/particle.cc b/source/blender/blenkernel/intern/particle.cc index 83b665239fc..514accf098b 100644 --- a/source/blender/blenkernel/intern/particle.cc +++ b/source/blender/blenkernel/intern/particle.cc @@ -3513,7 +3513,8 @@ void psys_cache_paths(ParticleSimulationData *sim, float cfra, const bool use_re } } - /* lattices have to be calculated separately to avoid mixups between effector calculations */ + /* Lattices have to be calculated separately to avoid mix-ups between effector calculations. + */ if (psys->lattice_deform_data) { for (k = 0, ca = cache[p]; k <= segments; k++, ca++) { BKE_lattice_deform_data_eval_co( @@ -4215,7 +4216,9 @@ static int get_particle_uv(Mesh *mesh, int i; tf = static_cast(CustomData_get_layer_named(&mesh->fdata, CD_MTFACE, name)); - + if (tf == nullptr) { + tf = static_cast(CustomData_get_layer(&mesh->fdata, CD_MTFACE)); + } if (tf == nullptr) { return 0; } @@ -4451,15 +4454,15 @@ void psys_get_texture( texvec); BKE_mesh_texspace_ensure(me); - sub_v3_v3(texvec, me->loc); - if (me->size[0] != 0.0f) { - texvec[0] /= me->size[0]; + sub_v3_v3(texvec, me->texspace_location); + if (me->texspace_size[0] != 0.0f) { + texvec[0] /= me->texspace_size[0]; } - if (me->size[1] != 0.0f) { - texvec[1] /= me->size[1]; + if (me->texspace_size[1] != 0.0f) { + texvec[1] /= me->texspace_size[1]; } - if (me->size[2] != 0.0f) { - texvec[2] /= me->size[2]; + if (me->texspace_size[2] != 0.0f) { + texvec[2] /= me->texspace_size[2]; } break; case TEXCO_PARTICLE: diff --git a/source/blender/blenkernel/intern/particle_distribute.c b/source/blender/blenkernel/intern/particle_distribute.c index 7cec485e6dd..085f2cc8ddb 100644 --- a/source/blender/blenkernel/intern/particle_distribute.c +++ b/source/blender/blenkernel/intern/particle_distribute.c @@ -316,9 +316,10 @@ static void distribute_grid(Mesh *mesh, ParticleSystem *psys) } } -/* modified copy from rayshade.c */ static void hammersley_create(float *out, int n, int seed, float amount) { + /* This code is originally from a modified copy from `rayshade.c` + * (a file that's no longer included). */ RNG *rng; double ofs[2], t; @@ -917,7 +918,7 @@ static int psys_thread_context_init_distribute(ParticleThreadContext *ctx, } /* XXX This distribution code is totally broken in case from == PART_FROM_CHILD, - * it's always using finaldm even if use_modifier_stack is unset... + * it's always using `final_mesh` even if use_modifier_stack is unset... * But making things consistent here break all existing edited * hair systems, so better wait for complete rewrite. */ diff --git a/source/blender/blenkernel/intern/pbvh.c b/source/blender/blenkernel/intern/pbvh.c index a572377197f..45ac9290dc9 100644 --- a/source/blender/blenkernel/intern/pbvh.c +++ b/source/blender/blenkernel/intern/pbvh.c @@ -44,7 +44,7 @@ #include "atomic_ops.h" -#include "pbvh_intern.h" +#include "pbvh_intern.hh" #include @@ -1250,9 +1250,9 @@ static void pbvh_stack_push(PBVHIter *iter, PBVHNode *node, bool revisiting) iter->stacksize++; } -static PBVHNode *pbvh_iter_next(PBVHIter *iter) +static PBVHNode *pbvh_iter_next(PBVHIter *iter, PBVHNodeFlags leaf_flag = PBVH_Leaf) { - /* purpose here is to traverse tree, visiting child nodes before their + /* purpose here is to traverse tree, visiting child nodes beforse their * parents, this order is necessary for e.g. computing bounding boxes */ while (iter->stacksize) { @@ -1277,7 +1277,7 @@ static PBVHNode *pbvh_iter_next(PBVHIter *iter) continue; /* don't traverse, outside of search zone */ } - if (node->flag & PBVH_Leaf) { + if (node->flag & leaf_flag) { /* immediately hit leaf node */ return node; } @@ -1327,8 +1327,12 @@ static PBVHNode *pbvh_iter_next_occluded(PBVHIter *iter) return NULL; } -void BKE_pbvh_search_gather( - PBVH *pbvh, BKE_pbvh_SearchCallback scb, void *search_data, PBVHNode ***r_array, int *r_tot) +void BKE_pbvh_search_gather_ex(PBVH *pbvh, + BKE_pbvh_SearchCallback scb, + void *search_data, + PBVHNode ***r_array, + int *r_tot, + PBVHNodeFlags leaf_flag) { PBVHIter iter; PBVHNode **array = NULL, *node; @@ -1336,8 +1340,8 @@ void BKE_pbvh_search_gather( pbvh_iter_begin(&iter, pbvh, scb, search_data); - while ((node = pbvh_iter_next(&iter))) { - if (node->flag & PBVH_Leaf) { + while ((node = pbvh_iter_next(&iter, leaf_flag))) { + if (node->flag & leaf_flag) { if (UNLIKELY(tot == space)) { /* resize array if needed */ space = (tot == 0) ? 32 : space * 2; @@ -1360,6 +1364,12 @@ void BKE_pbvh_search_gather( *r_tot = tot; } +void BKE_pbvh_search_gather( + PBVH *pbvh, BKE_pbvh_SearchCallback scb, void *search_data, PBVHNode ***r_array, int *r_tot) +{ + BKE_pbvh_search_gather_ex(pbvh, scb, search_data, r_array, r_tot, PBVH_Leaf); +} + void BKE_pbvh_search_callback(PBVH *pbvh, BKE_pbvh_SearchCallback scb, void *search_data, diff --git a/source/blender/blenkernel/intern/pbvh.cc b/source/blender/blenkernel/intern/pbvh.cc index f91694ece14..fb032e88e57 100644 --- a/source/blender/blenkernel/intern/pbvh.cc +++ b/source/blender/blenkernel/intern/pbvh.cc @@ -8,290 +8,5829 @@ #include "BLI_utildefines.h" +#include "BLI_alloca.h" #include "BLI_bitmap.h" #include "BLI_ghash.h" -#include "BLI_index_range.hh" +#include "BLI_listbase.h" #include "BLI_math.h" #include "BLI_rand.h" -#include "BLI_span.hh" +#include "BLI_string.h" #include "BLI_task.h" +#include "BLI_index_range.hh" +#include "BLI_map.hh" +#include "BLI_math_vector_types.hh" +#include "BLI_set.hh" +#include "BLI_vector.hh" + #include "DNA_mesh_types.h" #include "DNA_meshdata_types.h" +#include "DNA_object_types.h" +#include "DNA_scene_types.h" #include "BKE_attribute.h" #include "BKE_ccg.h" -#include "BKE_mesh.h" +#include "BKE_main.h" +#include "BKE_mesh.h" /* for BKE_mesh_calc_normals */ #include "BKE_mesh_mapping.h" +#include "BKE_object.h" #include "BKE_paint.h" #include "BKE_pbvh.h" #include "BKE_subdiv_ccg.h" +#include "DEG_depsgraph_query.h" + +#include "DRW_pbvh.h" +#include "PIL_time.h" + #include "PIL_time.h" #include "bmesh.h" #include "atomic_ops.h" -#include "pbvh_intern.h" +#include "pbvh_intern.hh" -#include -#include +#include +#define LEAF_LIMIT 4000 + +using blender::float3; using blender::IndexRange; +using blender::Map; +using blender::Set; +using blender::Vector; -namespace blender::bke { +/* Uncomment to test if triangles of the same face are + * properly clustered into single nodes. + */ +//#define TEST_PBVH_FACE_SPLIT -template -inline void to_static_color_type(const eCustomDataType type, const Func &func) +/* Uncomment to test that faces are only assigned to one PBVHNode */ +//#define VALIDATE_UNIQUE_NODE_FACES + +//#define PERFCNTRS + +typedef struct PBVHStack { + PBVHNode *node; + bool revisiting; +} PBVHStack; + +typedef struct PBVHIter { + PBVH *pbvh; + BKE_pbvh_SearchCallback scb; + void *search_data; + + PBVHStack *stack; + int stacksize; + + PBVHStack stackfixed[PBVH_STACK_FIXED_DEPTH]; + int stackspace; +} PBVHIter; + +void BB_reset(BB *bb) { - switch (type) { - case CD_PROP_COLOR: - func(MPropCol()); + bb->bmin[0] = bb->bmin[1] = bb->bmin[2] = FLT_MAX; + bb->bmax[0] = bb->bmax[1] = bb->bmax[2] = -FLT_MAX; +} + +void BB_intersect(BB *r_out, BB *a, BB *b) +{ + for (int i = 0; i < 3; i++) { + r_out->bmin[i] = max_ff(a->bmin[i], b->bmin[i]); + r_out->bmax[i] = min_ff(a->bmax[i], b->bmax[i]); + + if (r_out->bmax[i] < r_out->bmin[i]) { + r_out->bmax[i] = r_out->bmin[i] = 0.0f; + } + } +} + +float BB_volume(const BB *bb) +{ + float dx = bb->bmax[0] - bb->bmin[0]; + float dy = bb->bmax[1] - bb->bmin[1]; + float dz = bb->bmax[2] - bb->bmin[2]; + + return dx * dy * dz; +} + +/* Expand the bounding box to include a new coordinate */ +void BB_expand(BB *bb, const float co[3]) +{ + for (int i = 0; i < 3; i++) { + bb->bmin[i] = min_ff(bb->bmin[i], co[i]); + bb->bmax[i] = max_ff(bb->bmax[i], co[i]); + } +} + +void BB_expand_with_bb(BB *bb, BB *bb2) +{ + for (int i = 0; i < 3; i++) { + bb->bmin[i] = min_ff(bb->bmin[i], bb2->bmin[i]); + bb->bmax[i] = max_ff(bb->bmax[i], bb2->bmax[i]); + } +} + +int BB_widest_axis(const BB *bb) +{ + float dim[3]; + + for (int i = 0; i < 3; i++) { + dim[i] = bb->bmax[i] - bb->bmin[i]; + } + + if (dim[0] > dim[1]) { + if (dim[0] > dim[2]) { + return 0; + } + + return 2; + } + + if (dim[1] > dim[2]) { + return 1; + } + + return 2; +} + +void BBC_update_centroid(BBC *bbc) +{ + for (int i = 0; i < 3; i++) { + bbc->bcentroid[i] = (bbc->bmin[i] + bbc->bmax[i]) * 0.5f; + } +} + +/* Not recursive */ +static void update_node_vb(PBVH *pbvh, PBVHNode *node, int updateflag) +{ + if (!(updateflag & (PBVH_UpdateBB | PBVH_UpdateOriginalBB))) { + return; + } + + /* cannot clear flag here, causes leaky pbvh */ + // node->flag &= ~(updateflag & (PBVH_UpdateBB | PBVH_UpdateOriginalBB)); + + BB vb; + BB orig_vb; + + BB_reset(&vb); + BB_reset(&orig_vb); + + bool do_orig = true; // XXX updateflag & PBVH_UpdateOriginalBB; + bool do_normal = true; // XXX updateflag & PBVH_UpdateBB; + + if (node->flag & PBVH_Leaf) { + PBVHVertexIter vd; + + BKE_pbvh_vertex_iter_begin (pbvh, node, vd, PBVH_ITER_ALL) { + if (do_normal) { + BB_expand(&vb, vd.co); + } + + if (do_orig) { + MSculptVert *mv = pbvh->header.type == PBVH_BMESH ? (MSculptVert *)BM_ELEM_CD_GET_VOID_P( + vd.bm_vert, pbvh->cd_sculpt_vert) : + pbvh->msculptverts + vd.index; + + if (mv->stroke_id != pbvh->stroke_id) { + BB_expand(&orig_vb, vd.co); + } + else { + BB_expand(&orig_vb, mv->origco); + } + } + } + BKE_pbvh_vertex_iter_end; + } + else { + if (do_normal) { + BB_expand_with_bb(&vb, &pbvh->nodes[node->children_offset].vb); + BB_expand_with_bb(&vb, &pbvh->nodes[node->children_offset + 1].vb); + } + + if (do_orig) { + BB_expand_with_bb(&orig_vb, &pbvh->nodes[node->children_offset].orig_vb); + BB_expand_with_bb(&orig_vb, &pbvh->nodes[node->children_offset + 1].orig_vb); + } + } + + if (do_normal) { + node->vb = vb; + } + + if (do_orig) { +#if 0 + float size[3]; + + sub_v3_v3v3(size, orig_vb.bmax, orig_vb.bmin); + mul_v3_fl(size, 0.05); + + sub_v3_v3(orig_vb.bmin, size); + add_v3_v3(orig_vb.bmax, size); +#endif + node->orig_vb = orig_vb; + } +} + +// void BKE_pbvh_node_BB_reset(PBVHNode *node) +//{ +// BB_reset(&node->vb); +//} +// +// void BKE_pbvh_node_BB_expand(PBVHNode *node, float co[3]) +//{ +// BB_expand(&node->vb, co); +//} + +static bool face_materials_match(const PBVH *pbvh, const int a, const int b) +{ + if (pbvh->material_indices) { + if (pbvh->material_indices[a] != pbvh->material_indices[b]) { + return false; + } + } + return (pbvh->mpoly[a].flag & ME_SMOOTH) == (pbvh->mpoly[b].flag & ME_SMOOTH); +} + +static bool grid_materials_match(const DMFlagMat *f1, const DMFlagMat *f2) +{ + return ((f1->flag & ME_SMOOTH) == (f2->flag & ME_SMOOTH) && (f1->mat_nr == f2->mat_nr)); +} + +/* Adapted from BLI_kdopbvh.c */ +/* Returns the index of the first element on the right of the partition */ +static int partition_indices_faces(int *prim_indices, + int *prim_scratch, + int lo, + int hi, + int axis, + float mid, + BBC *prim_bbc, + const MLoopTri *looptri) +{ + for (int i = lo; i < hi; i++) { + prim_scratch[i - lo] = prim_indices[i]; + } + + int lo2 = lo, hi2 = hi - 1; + int i1 = lo, i2 = 0; + + while (i1 < hi) { + int poly = looptri[prim_scratch[i2]].poly; + bool side = prim_bbc[prim_scratch[i2]].bcentroid[axis] >= mid; + + while (i1 < hi && looptri[prim_scratch[i2]].poly == poly) { + prim_indices[side ? hi2-- : lo2++] = prim_scratch[i2]; + i1++; + i2++; + } + } + + return lo2; +} + +static int partition_indices_grids(int *prim_indices, + int *prim_scratch, + int lo, + int hi, + int axis, + float mid, + BBC *prim_bbc, + SubdivCCG *subdiv_ccg) +{ + for (int i = lo; i < hi; i++) { + prim_scratch[i - lo] = prim_indices[i]; + } + + int lo2 = lo, hi2 = hi - 1; + int i1 = lo, i2 = 0; + + while (i1 < hi) { + int poly = BKE_subdiv_ccg_grid_to_face_index(subdiv_ccg, prim_scratch[i2]); + bool side = prim_bbc[prim_scratch[i2]].bcentroid[axis] >= mid; + + while (i1 < hi && BKE_subdiv_ccg_grid_to_face_index(subdiv_ccg, prim_scratch[i2]) == poly) { + prim_indices[side ? hi2-- : lo2++] = prim_scratch[i2]; + i1++; + i2++; + } + } + + return lo2; +} + +/* Returns the index of the first element on the right of the partition */ +static int partition_indices_material(PBVH *pbvh, int lo, int hi) +{ + const MLoopTri *looptri = pbvh->looptri; + const DMFlagMat *flagmats = pbvh->grid_flag_mats; + const int *indices = pbvh->prim_indices; + int i = lo, j = hi; + + for (;;) { + if (pbvh->looptri) { + const int first = looptri[pbvh->prim_indices[lo]].poly; + for (; face_materials_match(pbvh, first, looptri[indices[i]].poly); i++) { + /* pass */ + } + for (; !face_materials_match(pbvh, first, looptri[indices[j]].poly); j--) { + /* pass */ + } + } + else { + const DMFlagMat *first = &flagmats[pbvh->prim_indices[lo]]; + for (; grid_materials_match(first, &flagmats[indices[i]]); i++) { + /* pass */ + } + for (; !grid_materials_match(first, &flagmats[indices[j]]); j--) { + /* pass */ + } + } + + if (!(i < j)) { + return i; + } + + SWAP(int, pbvh->prim_indices[i], pbvh->prim_indices[j]); + i++; + } +} + +void pbvh_grow_nodes(PBVH *pbvh, int totnode) +{ + if (UNLIKELY(totnode > pbvh->node_mem_count)) { + pbvh->node_mem_count = pbvh->node_mem_count + (pbvh->node_mem_count / 3); + if (pbvh->node_mem_count < totnode) { + pbvh->node_mem_count = totnode; + } + pbvh->nodes = (PBVHNode *)MEM_recallocN(pbvh->nodes, sizeof(PBVHNode) * pbvh->node_mem_count); + } + + pbvh->totnode = totnode; + + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + + if (!node->id) { + node->id = ++pbvh->idgen; + } + } +} + +/* Add a vertex to the map, with a positive value for unique vertices and + * a negative value for additional vertices */ +static int map_insert_vert(PBVH *pbvh, GHash *map, uint *face_verts, uint *uniq_verts, int vertex) +{ + void *key, **value_p; + + key = POINTER_FROM_INT(vertex); + if (!BLI_ghash_ensure_p(map, key, &value_p)) { + int value_i; + if (!pbvh->vert_bitmap[vertex]) { + pbvh->vert_bitmap[vertex] = true; + value_i = *uniq_verts; + (*uniq_verts)++; + } + else { + value_i = ~(*face_verts); + (*face_verts)++; + } + *value_p = POINTER_FROM_INT(value_i); + return value_i; + } + + return POINTER_AS_INT(*value_p); +} + +/* Find vertices used by the faces in this node and update the draw buffers */ +static void build_mesh_leaf_node(PBVH *pbvh, PBVHNode *node) +{ + bool has_visible = false; + + node->uniq_verts = node->face_verts = 0; + const int totface = node->totprim; + + /* reserve size is rough guess */ + GHash *map = BLI_ghash_int_new_ex("build_mesh_leaf_node gh", 2 * totface); + + int(*face_vert_indices)[3] = (int(*)[3])MEM_mallocN(sizeof(int[3]) * totface, + "bvh node face vert indices"); + + node->face_vert_indices = (const int(*)[3])face_vert_indices; + + if (pbvh->respect_hide == false) { + has_visible = true; + } + + for (int i = 0; i < totface; i++) { + const MLoopTri *lt = &pbvh->looptri[node->prim_indices[i]]; + for (int j = 0; j < 3; j++) { + face_vert_indices[i][j] = map_insert_vert( + pbvh, map, &node->face_verts, &node->uniq_verts, pbvh->mloop[lt->tri[j]].v); + } + + if (has_visible == false) { + if (!paint_is_face_hidden(lt, pbvh->hide_poly)) { + has_visible = true; + } + } + } + + int *vert_indices = (int *)MEM_callocN(sizeof(int) * (node->uniq_verts + node->face_verts), + "bvh node vert indices"); + node->vert_indices = vert_indices; + + /* Build the vertex list, unique verts first */ + GHashIterator gh_iter; + GHASH_ITER (gh_iter, map) { + void *value = BLI_ghashIterator_getValue(&gh_iter); + int ndx = POINTER_AS_INT(value); + + if (ndx < 0) { + ndx = -ndx + node->uniq_verts - 1; + } + + vert_indices[ndx] = POINTER_AS_INT(BLI_ghashIterator_getKey(&gh_iter)); + } + + for (int i = 0; i < totface; i++) { + const int sides = 3; + + for (int j = 0; j < sides; j++) { + if (face_vert_indices[i][j] < 0) { + face_vert_indices[i][j] = -face_vert_indices[i][j] + node->uniq_verts - 1; + } + } + } + + BKE_pbvh_node_mark_rebuild_draw(node); + + BKE_pbvh_node_fully_hidden_set(node, !has_visible); + BKE_pbvh_vert_tag_update_normal_tri_area(node); + + BLI_ghash_free(map, nullptr, nullptr); +} + +static void update_vb(PBVH *pbvh, PBVHNode *node, BBC *prim_bbc, int offset, int count) +{ + BB_reset(&node->vb); + for (int i = offset + count - 1; i >= offset; i--) { + BB_expand_with_bb(&node->vb, (BB *)(&prim_bbc[pbvh->prim_indices[i]])); + } + node->orig_vb = node->vb; +} + +int BKE_pbvh_count_grid_quads(BLI_bitmap **grid_hidden, + const int *grid_indices, + int totgrid, + int gridsize, + int display_gridsize) +{ + const int gridarea = (gridsize - 1) * (gridsize - 1); + int totquad = 0; + + /* grid hidden layer is present, so have to check each grid for + * visibility */ + + int depth1 = (int)(log2((double)gridsize - 1.0) + DBL_EPSILON); + int depth2 = (int)(log2((double)display_gridsize - 1.0) + DBL_EPSILON); + + int skip = depth2 < depth1 ? 1 << (depth1 - depth2 - 1) : 1; + + for (int i = 0; i < totgrid; i++) { + const BLI_bitmap *gh = grid_hidden[grid_indices[i]]; + + if (gh) { + /* grid hidden are present, have to check each element */ + for (int y = 0; y < gridsize - skip; y += skip) { + for (int x = 0; x < gridsize - skip; x += skip) { + if (!paint_is_grid_face_hidden(gh, gridsize, x, y)) { + totquad++; + } + } + } + } + else { + totquad += gridarea; + } + } + + return totquad; +} + +static void build_grid_leaf_node(PBVH *pbvh, PBVHNode *node) +{ + int totquads = BKE_pbvh_count_grid_quads(pbvh->grid_hidden, + node->prim_indices, + node->totprim, + pbvh->gridkey.grid_size, + pbvh->gridkey.grid_size); + BKE_pbvh_node_fully_hidden_set(node, (totquads == 0)); + BKE_pbvh_node_mark_rebuild_draw(node); + BKE_pbvh_vert_tag_update_normal_tri_area(node); +} + +static void build_leaf(PBVH *pbvh, int node_index, BBC *prim_bbc, int offset, int count) +{ + pbvh->nodes[node_index].flag |= PBVH_Leaf; + + pbvh->nodes[node_index].prim_indices = pbvh->prim_indices + offset; + pbvh->nodes[node_index].totprim = count; + + /* Still need vb for searches */ + update_vb(pbvh, &pbvh->nodes[node_index], prim_bbc, offset, count); + + if (pbvh->looptri) { + build_mesh_leaf_node(pbvh, pbvh->nodes + node_index); + } + else { + build_grid_leaf_node(pbvh, pbvh->nodes + node_index); + } +} + +/* Return zero if all primitives in the node can be drawn with the + * same material (including flat/smooth shading), non-zero otherwise */ +static bool leaf_needs_material_split(PBVH *pbvh, int offset, int count) +{ + if (count <= 1) { + return false; + } + + if (pbvh->looptri) { + const MLoopTri *first = &pbvh->looptri[pbvh->prim_indices[offset]]; + for (int i = offset + count - 1; i > offset; i--) { + int prim = pbvh->prim_indices[i]; + if (!face_materials_match(pbvh, first->poly, pbvh->looptri[prim].poly)) { + return true; + } + } + } + else { + const DMFlagMat *first = &pbvh->grid_flag_mats[pbvh->prim_indices[offset]]; + + for (int i = offset + count - 1; i > offset; i--) { + int prim = pbvh->prim_indices[i]; + if (!grid_materials_match(first, &pbvh->grid_flag_mats[prim])) { + return true; + } + } + } + + return false; +} + +#ifdef TEST_PBVH_FACE_SPLIT +static void test_face_boundaries(PBVH *pbvh) +{ + int faces_num = BKE_pbvh_num_faces(pbvh); + int *node_map = MEM_calloc_arrayN(faces_num, sizeof(int), __func__); + for (int i = 0; i < faces_num; i++) { + node_map[i] = -1; + } + + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + + if (!(node->flag & PBVH_Leaf)) { + continue; + } + + switch (BKE_pbvh_type(pbvh)) { + case PBVH_FACES: { + for (int j = 0; j < node->totprim; j++) { + int poly = pbvh->looptri[node->prim_indices[j]].poly; + + if (node_map[poly] >= 0 && node_map[poly] != i) { + int old_i = node_map[poly]; + int prim_i = node->prim_indices - pbvh->prim_indices + j; + + printf("PBVH split error; poly: %d, prim_i: %d, node1: %d, node2: %d, totprim: %d\n", + poly, + prim_i, + old_i, + i, + node->totprim); + } + + node_map[poly] = i; + } + break; + } + case PBVH_GRIDS: + break; + case PBVH_BMESH: + break; + } + } + + MEM_SAFE_FREE(node_map); +} +#endif + +/* Recursively build a node in the tree + * + * vb is the voxel box around all of the primitives contained in + * this node. + * + * cb is the bounding box around all the centroids of the primitives + * contained in this node + * + * offset and start indicate a range in the array of primitive indices + */ + +static void build_sub(PBVH *pbvh, + int node_index, + BB *cb, + BBC *prim_bbc, + int offset, + int count, + int *prim_scratch, + int depth) +{ + int end; + BB cb_backing; + + if (!prim_scratch) { + prim_scratch = (int *)MEM_malloc_arrayN(pbvh->totprim, sizeof(int), __func__); + } + + /* Decide whether this is a leaf or not */ + const bool below_leaf_limit = count <= pbvh->leaf_limit || depth == PBVH_STACK_FIXED_DEPTH - 1; + if (below_leaf_limit) { + if (!leaf_needs_material_split(pbvh, offset, count)) { + build_leaf(pbvh, node_index, prim_bbc, offset, count); + + if (node_index == 0) { + MEM_SAFE_FREE(prim_scratch); + } + + return; + } + } + + /* Add two child nodes */ + pbvh->nodes[node_index].children_offset = pbvh->totnode; + pbvh_grow_nodes(pbvh, pbvh->totnode + 2); + + /* Update parent node bounding box */ + update_vb(pbvh, &pbvh->nodes[node_index], prim_bbc, offset, count); + + if (!below_leaf_limit) { + /* Find axis with widest range of primitive centroids */ + if (!cb) { + cb = &cb_backing; + BB_reset(cb); + for (int i = offset + count - 1; i >= offset; i--) { + BB_expand(cb, prim_bbc[pbvh->prim_indices[i]].bcentroid); + } + } + const int axis = BB_widest_axis(cb); + + /* Partition primitives along that axis */ + if (pbvh->header.type == PBVH_FACES) { + end = partition_indices_faces(pbvh->prim_indices, + prim_scratch, + offset, + offset + count, + axis, + (cb->bmax[axis] + cb->bmin[axis]) * 0.5f, + prim_bbc, + pbvh->looptri); + } + else { + end = partition_indices_grids(pbvh->prim_indices, + prim_scratch, + offset, + offset + count, + axis, + (cb->bmax[axis] + cb->bmin[axis]) * 0.5f, + prim_bbc, + pbvh->subdiv_ccg); + } + } + else { + /* Partition primitives by material */ + end = partition_indices_material(pbvh, offset, offset + count - 1); + } + + /* Build children */ + build_sub(pbvh, + pbvh->nodes[node_index].children_offset, + nullptr, + prim_bbc, + offset, + end - offset, + prim_scratch, + depth + 1); + build_sub(pbvh, + pbvh->nodes[node_index].children_offset + 1, + nullptr, + prim_bbc, + end, + offset + count - end, + prim_scratch, + depth + 1); + + if (node_index == 0) { + MEM_SAFE_FREE(prim_scratch); + } +} + +static void pbvh_build(PBVH *pbvh, BB *cb, BBC *prim_bbc, int totprim) +{ + if (totprim != pbvh->totprim) { + pbvh->totprim = totprim; + if (pbvh->nodes) { + MEM_freeN(pbvh->nodes); + } + if (pbvh->prim_indices) { + MEM_freeN(pbvh->prim_indices); + } + pbvh->prim_indices = (int *)MEM_mallocN(sizeof(int) * totprim, "bvh prim indices"); + for (int i = 0; i < totprim; i++) { + pbvh->prim_indices[i] = i; + } + pbvh->totnode = 0; + if (pbvh->node_mem_count < 100) { + pbvh->node_mem_count = 100; + pbvh->nodes = (PBVHNode *)MEM_callocN(sizeof(PBVHNode) * pbvh->node_mem_count, + "bvh initial nodes"); + } + } + + pbvh->totnode = 1; + build_sub(pbvh, 0, cb, prim_bbc, 0, totprim, nullptr, 0); +} + +void BKE_pbvh_set_face_areas(PBVH *pbvh, float *face_areas) +{ + pbvh->face_areas = face_areas; +} + +/* XXX investigate this global. */ +extern "C" bool pbvh_show_orig_co = false; + +static void pbvh_draw_args_init(PBVH *pbvh, PBVH_GPU_Args *args, PBVHNode *node) +{ + memset((void *)args, 0, sizeof(*args)); + + args->pbvh_type = pbvh->header.type; + args->mesh_verts_num = pbvh->totvert; + args->mesh_grids_num = pbvh->totgrid; + args->node = node; + + BKE_pbvh_node_num_verts(pbvh, node, nullptr, &args->node_verts_num); + + args->grid_hidden = pbvh->grid_hidden; + args->face_sets_color_default = pbvh->face_sets_color_default; + args->face_sets_color_seed = pbvh->face_sets_color_seed; + args->vert_positions = pbvh->vert_positions; + args->mloop = pbvh->mloop; + args->mpoly = pbvh->mpoly; + args->mlooptri = pbvh->looptri; + args->flat_vcol_shading = pbvh->flat_vcol_shading; + args->show_orig = pbvh_show_orig_co; + args->updategen = node->updategen; + args->msculptverts = pbvh->msculptverts; + + if (ELEM(pbvh->header.type, PBVH_FACES, PBVH_GRIDS)) { + args->hide_poly = (const bool *)(pbvh->pdata ? CustomData_get_layer_named( + pbvh->pdata, CD_PROP_BOOL, ".hide_poly") : + nullptr); + } + + switch (pbvh->header.type) { + case PBVH_FACES: + args->mesh_faces_num = pbvh->mesh->totpoly; + args->vdata = pbvh->vdata; + args->ldata = pbvh->ldata; + args->pdata = pbvh->pdata; + args->totprim = node->totprim; + args->me = pbvh->mesh; + args->mpoly = pbvh->mpoly; + args->vert_normals = pbvh->vert_normals; + + args->active_color = pbvh->mesh->active_color_attribute; + args->render_color = pbvh->mesh->default_color_attribute; + + args->prim_indices = node->prim_indices; + args->face_sets = pbvh->face_sets; break; - case CD_PROP_BYTE_COLOR: - func(MLoopCol()); + case PBVH_GRIDS: + args->vdata = pbvh->vdata; + args->ldata = pbvh->ldata; + args->pdata = pbvh->pdata; + args->ccg_key = pbvh->gridkey; + args->me = pbvh->mesh; + args->totprim = node->totprim; + args->grid_indices = node->prim_indices; + args->subdiv_ccg = pbvh->subdiv_ccg; + args->face_sets = pbvh->face_sets; + args->mpoly = pbvh->mpoly; + + args->active_color = pbvh->mesh->active_color_attribute; + args->render_color = pbvh->mesh->default_color_attribute; + + args->mesh_grids_num = pbvh->totgrid; + args->grids = pbvh->grids; + args->gridfaces = pbvh->gridfaces; + args->grid_flag_mats = pbvh->grid_flag_mats; + args->vert_normals = pbvh->vert_normals; + + args->face_sets = pbvh->face_sets; break; + case PBVH_BMESH: + args->bm = pbvh->header.bm; + + args->active_color = pbvh->mesh->active_color_attribute; + args->render_color = pbvh->mesh->default_color_attribute; + + args->me = pbvh->mesh; + args->vdata = &args->bm->vdata; + args->ldata = &args->bm->ldata; + args->pdata = &args->bm->pdata; + args->bm_faces = node->bm_faces; + args->bm_other_verts = node->bm_other_verts; + args->bm_unique_vert = node->bm_unique_verts; + args->totprim = BLI_table_gset_len(node->bm_faces); + args->cd_mask_layer = CustomData_get_offset(&pbvh->header.bm->vdata, CD_PAINT_MASK); + + args->tribuf = node->tribuf; + args->tri_buffers = node->tri_buffers; + args->tot_tri_buffers = node->tot_tri_buffers; + + break; + } +} + +#ifdef VALIDATE_UNIQUE_NODE_FACES +static void pbvh_validate_node_prims(PBVH *pbvh) +{ + int totface = 0; + + if (pbvh->header.type == PBVH_BMESH) { + return; + } + + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + + if (!(node->flag & PBVH_Leaf)) { + continue; + } + + for (int j = 0; j < node->totprim; j++) { + int poly; + + if (pbvh->header.type == PBVH_FACES) { + poly = pbvh->looptri[node->prim_indices[j]].poly; + } + else { + poly = BKE_subdiv_ccg_grid_to_face_index(pbvh->subdiv_ccg, node->prim_indices[j]); + } + + totface = max_ii(totface, poly + 1); + } + } + + int *facemap = (int *)MEM_malloc_arrayN(totface, sizeof(*facemap), __func__); + + for (int i = 0; i < totface; i++) { + facemap[i] = -1; + } + + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + + if (!(node->flag & PBVH_Leaf)) { + continue; + } + + for (int j = 0; j < node->totprim; j++) { + int poly; + + if (pbvh->header.type == PBVH_FACES) { + poly = pbvh->looptri[node->prim_indices[j]].poly; + } + else { + poly = BKE_subdiv_ccg_grid_to_face_index(pbvh->subdiv_ccg, node->prim_indices[j]); + } + + if (facemap[poly] != -1 && facemap[poly] != i) { + printf("%s: error: face spanned multiple nodes (old: %d new: %d)\n", + __func__, + facemap[poly], + i); + } + + facemap[poly] = i; + } + } + MEM_SAFE_FREE(facemap); +} +#endif + +void BKE_pbvh_build_mesh(PBVH *pbvh, + Mesh *mesh, + const MPoly *mpoly, + const MLoop *mloop, + float (*vert_positions)[3], + MSculptVert *msculptverts, + int totvert, + struct CustomData *vdata, + struct CustomData *ldata, + struct CustomData *pdata, + const MLoopTri *looptri, + int looptri_num, + bool fast_draw, + float *face_areas, + SculptPMap *pmap) +{ + BBC *prim_bbc = nullptr; + BB cb; + + if (pbvh->pmap != pmap) { + BKE_pbvh_pmap_aquire(pmap); + } + + pbvh->pmap = pmap; + pbvh->face_areas = face_areas; + pbvh->mesh = mesh; + pbvh->header.type = PBVH_FACES; + pbvh->mpoly = mpoly; + pbvh->hide_poly = (bool *)CustomData_get_layer_named_for_write( + &mesh->pdata, CD_PROP_BOOL, ".hide_poly", mesh->totpoly); + pbvh->material_indices = (const int *)CustomData_get_layer_named( + &mesh->pdata, CD_PROP_INT32, "material_index"); + pbvh->mloop = mloop; + pbvh->looptri = looptri; + pbvh->msculptverts = msculptverts; + pbvh->vert_positions = vert_positions; + BKE_mesh_vertex_normals_ensure(mesh); + pbvh->vert_normals = BKE_mesh_vertex_normals_for_write(mesh); + pbvh->hide_vert = (bool *)CustomData_get_layer_named_for_write( + &mesh->vdata, CD_PROP_BOOL, ".hide_vert", mesh->totvert); + pbvh->vert_bitmap = (bool *)MEM_calloc_arrayN(totvert, sizeof(bool), "bvh->vert_bitmap"); + pbvh->totvert = totvert; + +#ifdef TEST_PBVH_FACE_SPLIT + /* Use lower limit to increase probability of + * edge cases. + */ + pbvh->leaf_limit = 100; +#else + pbvh->leaf_limit = LEAF_LIMIT; +#endif + + pbvh->vdata = vdata; + pbvh->ldata = ldata; + pbvh->pdata = pdata; + pbvh->faces_num = mesh->totpoly; + + pbvh->face_sets_color_seed = mesh->face_sets_color_seed; + pbvh->face_sets_color_default = mesh->face_sets_color_default; + + BB_reset(&cb); + + /* For each face, store the AABB and the AABB centroid */ + prim_bbc = (BBC *)MEM_mallocN(sizeof(BBC) * looptri_num, "prim_bbc"); + + for (int i = 0; i < mesh->totvert; i++) { + msculptverts[i].flag &= ~SCULPTVERT_NEED_VALENCE; + msculptverts[i].valence = pmap->pmap[i].count; + } + + for (int i = 0; i < looptri_num; i++) { + const MLoopTri *lt = &looptri[i]; + const int sides = 3; + BBC *bbc = prim_bbc + i; + + BB_reset((BB *)bbc); + + for (int j = 0; j < sides; j++) { + BB_expand((BB *)bbc, vert_positions[pbvh->mloop[lt->tri[j]].v]); + } + + BBC_update_centroid(bbc); + + BB_expand(&cb, bbc->bcentroid); + } + + if (looptri_num) { + pbvh_build(pbvh, &cb, prim_bbc, looptri_num); + +#ifdef TEST_PBVH_FACE_SPLIT + test_face_boundaries(pbvh); +#endif + } + + if (fast_draw) { + pbvh->flags |= PBVH_FAST_DRAW; + } + + MEM_freeN(prim_bbc); + + /* Clear the bitmap so it can be used as an update tag later on. */ + memset(pbvh->vert_bitmap, 0, sizeof(bool) * totvert); + + BKE_pbvh_update_active_vcol(pbvh, mesh); + +#ifdef VALIDATE_UNIQUE_NODE_FACES + pbvh_validate_node_prims(pbvh); +#endif +} + +void BKE_pbvh_build_grids(PBVH *pbvh, + CCGElem **grids, + int totgrid, + CCGKey *key, + void **gridfaces, + DMFlagMat *flagmats, + BLI_bitmap **grid_hidden, + bool fast_draw, + float *face_areas, + Mesh *me, + SubdivCCG *subdiv_ccg) +{ + const int gridsize = key->grid_size; + + pbvh->header.type = PBVH_GRIDS; + pbvh->face_areas = face_areas; + pbvh->grids = grids; + pbvh->gridfaces = gridfaces; + pbvh->grid_flag_mats = flagmats; + pbvh->totgrid = totgrid; + pbvh->gridkey = *key; + pbvh->grid_hidden = grid_hidden; + pbvh->subdiv_ccg = subdiv_ccg; + pbvh->faces_num = me->totpoly; + + /* Find maximum number of grids per face. */ + int max_grids = 1; + const MPoly *mpoly = BKE_mesh_polys(me); + + for (int i = 0; i < me->totpoly; i++) { + max_grids = max_ii(max_grids, mpoly[i].totloop); + } + + /* Ensure leaf limit is at least 4 so there's room + * to split at original face boundaries. + * Fixes T102209. + */ + pbvh->leaf_limit = max_ii(LEAF_LIMIT / (gridsize * gridsize), max_grids); + + /* We need the base mesh attribute layout for PBVH draw. */ + pbvh->vdata = &me->vdata; + pbvh->ldata = &me->ldata; + pbvh->pdata = &me->pdata; + + pbvh->mpoly = BKE_mesh_polys(me); + pbvh->mloop = BKE_mesh_loops(me); + + /* We also need the base mesh for PBVH draw. */ + pbvh->mesh = me; + + BB cb; + BB_reset(&cb); + + /* For each grid, store the AABB and the AABB centroid */ + BBC *prim_bbc = (BBC *)MEM_mallocN(sizeof(BBC) * totgrid, "prim_bbc"); + + for (int i = 0; i < totgrid; i++) { + CCGElem *grid = grids[i]; + BBC *bbc = prim_bbc + i; + + BB_reset((BB *)bbc); + + for (int j = 0; j < gridsize * gridsize; j++) { + BB_expand((BB *)bbc, CCG_elem_offset_co(key, grid, j)); + } + + BBC_update_centroid(bbc); + + BB_expand(&cb, bbc->bcentroid); + } + + if (totgrid) { + pbvh_build(pbvh, &cb, prim_bbc, totgrid); + +#ifdef TEST_PBVH_FACE_SPLIT + test_face_boundaries(pbvh); +#endif + } + + if (fast_draw) { + pbvh->flags |= PBVH_FAST_DRAW; + } + + MEM_freeN(prim_bbc); +#ifdef VALIDATE_UNIQUE_NODE_FACES + pbvh_validate_node_prims(pbvh); +#endif +} + +PBVH *BKE_pbvh_new(PBVHType type) +{ + PBVH *pbvh = (PBVH *)MEM_callocN(sizeof(PBVH), "pbvh"); + pbvh->respect_hide = true; + pbvh->draw_cache_invalid = true; + pbvh->header.type = type; + + /* Initialize this to true, instead of waiting for a draw engine + * to set it. Prevents a crash in draw manager instancing code. + */ + pbvh->is_drawing = true; + return pbvh; +} + +void BKE_pbvh_free(PBVH *pbvh) +{ +#ifdef WITH_PBVH_CACHE + BKE_pbvh_cache_remove(pbvh); +#endif + + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = &pbvh->nodes[i]; + + if (node->flag & PBVH_Leaf) { + if (node->draw_batches) { + DRW_pbvh_node_free(node->draw_batches); + } + if (node->vert_indices) { + MEM_freeN((void *)node->vert_indices); + } + if (node->loop_indices) { + MEM_freeN(node->loop_indices); + } + if (node->face_vert_indices) { + MEM_freeN((void *)node->face_vert_indices); + } + if (node->bm_faces) { + BLI_table_gset_free(node->bm_faces, nullptr); + } + if (node->bm_unique_verts) { + BLI_table_gset_free(node->bm_unique_verts, nullptr); + } + if (node->bm_other_verts) { + BLI_table_gset_free(node->bm_other_verts, nullptr); + } + + if (node->tribuf || node->tri_buffers) { + BKE_pbvh_bmesh_free_tris(pbvh, node); + } + +#ifdef PROXY_ADVANCED + BKE_pbvh_free_proxyarray(pbvh, node); +#endif + pbvh_node_pixels_free(node); + } + } + + if (pbvh->deformed) { + if (pbvh->vert_positions) { + /* if pbvh was deformed, new memory was allocated for verts/faces -- free it */ + + MEM_freeN((void *)pbvh->vert_positions); + } + + pbvh->vert_positions = nullptr; + } + + if (pbvh->looptri) { + MEM_freeN((void *)pbvh->looptri); + } + + if (pbvh->nodes) { + MEM_freeN(pbvh->nodes); + } + + if (pbvh->prim_indices) { + MEM_freeN(pbvh->prim_indices); + } + + MEM_SAFE_FREE(pbvh->vert_bitmap); + + BKE_pbvh_pmap_release(pbvh->pmap); + pbvh->pmap = nullptr; + + pbvh->invalid = true; + pbvh_pixels_free(pbvh); + + MEM_freeN(pbvh); +} + +static void pbvh_iter_begin(PBVHIter *iter, + PBVH *pbvh, + BKE_pbvh_SearchCallback scb, + void *search_data) +{ + iter->pbvh = pbvh; + iter->scb = scb; + iter->search_data = search_data; + + iter->stack = iter->stackfixed; + iter->stackspace = PBVH_STACK_FIXED_DEPTH; + + iter->stack[0].node = pbvh->nodes; + iter->stack[0].revisiting = false; + iter->stacksize = 1; +} + +static void pbvh_iter_end(PBVHIter *iter) +{ + if (iter->stackspace > PBVH_STACK_FIXED_DEPTH) { + MEM_freeN(iter->stack); + } +} + +static void pbvh_stack_push(PBVHIter *iter, PBVHNode *node, bool revisiting) +{ + if (UNLIKELY(iter->stacksize == iter->stackspace)) { + iter->stackspace *= 2; + if (iter->stackspace != (PBVH_STACK_FIXED_DEPTH * 2)) { + iter->stack = (PBVHStack *)MEM_reallocN(iter->stack, sizeof(PBVHStack) * iter->stackspace); + } + else { + iter->stack = (PBVHStack *)MEM_mallocN(sizeof(PBVHStack) * iter->stackspace, "PBVHStack"); + memcpy((void *)iter->stack, (void *)iter->stackfixed, sizeof(PBVHStack) * iter->stacksize); + } + } + + iter->stack[iter->stacksize].node = node; + iter->stack[iter->stacksize].revisiting = revisiting; + iter->stacksize++; +} + +static PBVHNode *pbvh_iter_next(PBVHIter *iter, PBVHNodeFlags leaf_flag = PBVH_Leaf) +{ + /* purpose here is to traverse tree, visiting child nodes before their + * parents, this order is necessary for e.g. computing bounding boxes */ + + while (iter->stacksize) { + /* pop node */ + iter->stacksize--; + PBVHNode *node = iter->stack[iter->stacksize].node; + + /* on a mesh with no faces this can happen + * can remove this check if we know meshes have at least 1 face */ + if (node == nullptr) { + return nullptr; + } + + bool revisiting = iter->stack[iter->stacksize].revisiting; + + /* revisiting node already checked */ + if (revisiting) { + return node; + } + + if (iter->scb && !iter->scb(node, iter->search_data)) { + continue; /* don't traverse, outside of search zone */ + } + + if (node->flag & leaf_flag) { + /* immediately hit leaf node */ + return node; + } + + /* come back later when children are done */ + pbvh_stack_push(iter, node, true); + + /* push two child nodes on the stack */ + pbvh_stack_push(iter, iter->pbvh->nodes + node->children_offset + 1, false); + pbvh_stack_push(iter, iter->pbvh->nodes + node->children_offset, false); + } + + return nullptr; +} + +static PBVHNode *pbvh_iter_next_occluded(PBVHIter *iter) +{ + while (iter->stacksize) { + /* pop node */ + iter->stacksize--; + PBVHNode *node = iter->stack[iter->stacksize].node; + + /* on a mesh with no faces this can happen + * can remove this check if we know meshes have at least 1 face */ + if (node == nullptr) { + return nullptr; + } + + float ff = dot_v3v3(node->vb.bmin, node->vb.bmax); + if (isnan(ff) || !isfinite(ff)) { + printf("%s: nan!\n", __func__); + } + + if (iter->scb && !iter->scb(node, iter->search_data)) { + continue; /* don't traverse, outside of search zone */ + } + + if (node->flag & PBVH_Leaf) { + /* immediately hit leaf node */ + return node; + } + + pbvh_stack_push(iter, iter->pbvh->nodes + node->children_offset + 1, false); + pbvh_stack_push(iter, iter->pbvh->nodes + node->children_offset, false); + } + + return nullptr; +} + +void BKE_pbvh_search_gather_ex(PBVH *pbvh, + BKE_pbvh_SearchCallback scb, + void *search_data, + PBVHNode ***r_array, + int *r_tot, + PBVHNodeFlags leaf_flag) +{ + PBVHIter iter; + PBVHNode **array = nullptr, *node; + int tot = 0, space = 0; + + pbvh_iter_begin(&iter, pbvh, scb, search_data); + + while ((node = pbvh_iter_next(&iter, leaf_flag))) { + if (node->flag & leaf_flag) { + if (UNLIKELY(tot == space)) { + /* resize array if needed */ + space = (tot == 0) ? 32 : space * 2; + array = (PBVHNode **)MEM_recallocN_id(array, sizeof(PBVHNode *) * space, __func__); + } + + array[tot] = node; + tot++; + } + } + + pbvh_iter_end(&iter); + + if (tot == 0 && array) { + MEM_freeN(array); + array = nullptr; + } + + *r_array = array; + *r_tot = tot; +} +void BKE_pbvh_search_gather( + PBVH *pbvh, BKE_pbvh_SearchCallback scb, void *search_data, PBVHNode ***r_array, int *r_tot) +{ + BKE_pbvh_search_gather_ex(pbvh, scb, search_data, r_array, r_tot, PBVH_Leaf); +} + +void BKE_pbvh_search_callback(PBVH *pbvh, + BKE_pbvh_SearchCallback scb, + void *search_data, + BKE_pbvh_HitCallback hcb, + void *hit_data) +{ + PBVHIter iter; + PBVHNode *node; + + pbvh_iter_begin(&iter, pbvh, scb, search_data); + + while ((node = pbvh_iter_next(&iter))) { + if (node->flag & PBVH_Leaf) { + hcb(node, hit_data); + } + } + + pbvh_iter_end(&iter); +} + +typedef struct node_tree { + PBVHNode *data; + + struct node_tree *left; + struct node_tree *right; +} node_tree; + +static void node_tree_insert(node_tree *tree, node_tree *new_node) +{ + if (new_node->data->tmin < tree->data->tmin) { + if (tree->left) { + node_tree_insert(tree->left, new_node); + } + else { + tree->left = new_node; + } + } + else { + if (tree->right) { + node_tree_insert(tree->right, new_node); + } + else { + tree->right = new_node; + } + } +} + +static void traverse_tree(node_tree *tree, + BKE_pbvh_HitOccludedCallback hcb, + void *hit_data, + float *tmin) +{ + if (tree->left) { + traverse_tree(tree->left, hcb, hit_data, tmin); + } + + hcb(tree->data, hit_data, tmin); + + if (tree->right) { + traverse_tree(tree->right, hcb, hit_data, tmin); + } +} + +static void free_tree(node_tree *tree) +{ + if (tree->left) { + free_tree(tree->left); + tree->left = nullptr; + } + + if (tree->right) { + free_tree(tree->right); + tree->right = nullptr; + } + + free(tree); +} + +float BKE_pbvh_node_get_tmin(PBVHNode *node) +{ + return node->tmin; +} + +static void BKE_pbvh_search_callback_occluded(PBVH *pbvh, + BKE_pbvh_SearchCallback scb, + void *search_data, + BKE_pbvh_HitOccludedCallback hcb, + void *hit_data) +{ + PBVHIter iter; + PBVHNode *node; + node_tree *tree = nullptr; + + pbvh_iter_begin(&iter, pbvh, scb, search_data); + + while ((node = pbvh_iter_next_occluded(&iter))) { + if (node->flag & PBVH_Leaf) { + node_tree *new_node = (node_tree *)malloc(sizeof(node_tree)); + + new_node->data = node; + + new_node->left = nullptr; + new_node->right = nullptr; + + if (tree) { + node_tree_insert(tree, new_node); + } + else { + tree = new_node; + } + } + } + + pbvh_iter_end(&iter); + + if (tree) { + float tmin = FLT_MAX; + traverse_tree(tree, hcb, hit_data, &tmin); + free_tree(tree); + } +} + +static bool update_search_cb(PBVHNode *node, void *data_v) +{ + int flag = POINTER_AS_INT(data_v); + + if (node->flag & PBVH_Leaf) { + return (node->flag & flag) != 0; + } + + return true; +} + +typedef struct PBVHUpdateData { + PBVH *pbvh; + PBVHNode **nodes; + int totnode; + + float (*vert_normals)[3]; + int flag; + bool show_sculpt_face_sets; + bool flat_vcol_shading; + Mesh *mesh; + PBVHAttrReq *attrs; + int attrs_num; +} PBVHUpdateData; + +static void pbvh_update_normals_clear_task_cb(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict) +{ + PBVHUpdateData *data = (PBVHUpdateData *)userdata; + PBVH *pbvh = data->pbvh; + PBVHNode *node = data->nodes[n]; + float(*vert_normals)[3] = data->vert_normals; + + if (node->flag & PBVH_UpdateNormals) { + const int *verts = node->vert_indices; + const int totvert = node->uniq_verts; + for (int i = 0; i < totvert; i++) { + const int v = verts[i]; + if (pbvh->vert_bitmap[v]) { + zero_v3(vert_normals[v]); + } + } + } +} + +static void pbvh_update_normals_accum_task_cb(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict) +{ + PBVHUpdateData *data = (PBVHUpdateData *)userdata; + + PBVH *pbvh = data->pbvh; + PBVHNode *node = data->nodes[n]; + float(*vert_normals)[3] = data->vert_normals; + + if (node->flag & PBVH_UpdateNormals) { + uint mpoly_prev = UINT_MAX; + float fn[3]; + + const int *faces = node->prim_indices; + const int totface = node->totprim; + + for (int i = 0; i < totface; i++) { + const MLoopTri *lt = &pbvh->looptri[faces[i]]; + const uint vtri[3] = { + pbvh->mloop[lt->tri[0]].v, + pbvh->mloop[lt->tri[1]].v, + pbvh->mloop[lt->tri[2]].v, + }; + const int sides = 3; + + /* Face normal and mask */ + if (lt->poly != mpoly_prev) { + const MPoly *mp = &pbvh->mpoly[lt->poly]; + BKE_mesh_calc_poly_normal(mp, &pbvh->mloop[mp->loopstart], pbvh->vert_positions, fn); + mpoly_prev = lt->poly; + } + + for (int j = sides; j--;) { + const int v = vtri[j]; + + if (pbvh->vert_bitmap[v]) { + /* NOTE: This avoids `lock, add_v3_v3, unlock` + * and is five to ten times quicker than a spin-lock. + * Not exact equivalent though, since atomicity is only ensured for one component + * of the vector at a time, but here it shall not make any sensible difference. */ + for (int k = 3; k--;) { + atomic_add_and_fetch_fl(&vert_normals[v][k], fn[k]); + } + } + } + } + } +} + +static void pbvh_update_normals_store_task_cb(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict) +{ + PBVHUpdateData *data = (PBVHUpdateData *)userdata; + PBVH *pbvh = data->pbvh; + PBVHNode *node = data->nodes[n]; + float(*vert_normals)[3] = data->vert_normals; + + if (node->flag & PBVH_UpdateNormals) { + const int *verts = node->vert_indices; + const int totvert = node->uniq_verts; + + for (int i = 0; i < totvert; i++) { + const int v = verts[i]; + + /* No atomics necessary because we are iterating over uniq_verts only, + * so we know only this thread will handle this vertex. */ + if (pbvh->vert_bitmap[v]) { + normalize_v3(vert_normals[v]); + pbvh->vert_bitmap[v] = false; + } + } + + node->flag &= ~PBVH_UpdateNormals; + } +} + +static void pbvh_faces_update_normals(PBVH *pbvh, PBVHNode **nodes, int totnode) +{ + /* subtle assumptions: + * - We know that for all edited vertices, the nodes with faces + * adjacent to these vertices have been marked with PBVH_UpdateNormals. + * This is true because if the vertex is inside the brush radius, the + * bounding box of its adjacent faces will be as well. + * - However this is only true for the vertices that have actually been + * edited, not for all vertices in the nodes marked for update, so we + * can only update vertices marked in the `vert_bitmap`. + */ + + PBVHUpdateData data = { + .pbvh = pbvh, + .nodes = nodes, + .vert_normals = pbvh->vert_normals, + }; + + TaskParallelSettings settings; + BKE_pbvh_parallel_range_settings(&settings, true, totnode); + + /* Zero normals before accumulation. */ + BLI_task_parallel_range(0, totnode, &data, pbvh_update_normals_clear_task_cb, &settings); + BLI_task_parallel_range(0, totnode, &data, pbvh_update_normals_accum_task_cb, &settings); + BLI_task_parallel_range(0, totnode, &data, pbvh_update_normals_store_task_cb, &settings); +} + +static void pbvh_update_mask_redraw_task_cb(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict) +{ + + PBVHUpdateData *data = (PBVHUpdateData *)userdata; + PBVH *pbvh = data->pbvh; + PBVHNode *node = data->nodes[n]; + if (node->flag & PBVH_UpdateMask) { + + bool has_unmasked = false; + bool has_masked = true; + if (node->flag & PBVH_Leaf) { + PBVHVertexIter vd; + + BKE_pbvh_vertex_iter_begin (pbvh, node, vd, PBVH_ITER_ALL) { + if (vd.mask && *vd.mask < 1.0f) { + has_unmasked = true; + } + if (vd.mask && *vd.mask > 0.0f) { + has_masked = false; + } + } + BKE_pbvh_vertex_iter_end; + } + else { + has_unmasked = true; + has_masked = true; + } + BKE_pbvh_node_fully_masked_set(node, !has_unmasked); + BKE_pbvh_node_fully_unmasked_set(node, has_masked); + + node->flag &= ~PBVH_UpdateMask; + } +} + +static void pbvh_update_mask_redraw(PBVH *pbvh, PBVHNode **nodes, int totnode, int flag) +{ + PBVHUpdateData data = { + .pbvh = pbvh, + .nodes = nodes, + .flag = flag, + }; + + TaskParallelSettings settings; + BKE_pbvh_parallel_range_settings(&settings, true, totnode); + BLI_task_parallel_range(0, totnode, &data, pbvh_update_mask_redraw_task_cb, &settings); +} + +static void pbvh_update_visibility_redraw_task_cb(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict) +{ + + PBVHUpdateData *data = (PBVHUpdateData *)userdata; + PBVH *pbvh = data->pbvh; + PBVHNode *node = data->nodes[n]; + if (node->flag & PBVH_UpdateVisibility) { + node->flag &= ~PBVH_UpdateVisibility; + BKE_pbvh_node_fully_hidden_set(node, true); + if (node->flag & PBVH_Leaf) { + PBVHVertexIter vd; + BKE_pbvh_vertex_iter_begin (pbvh, node, vd, PBVH_ITER_ALL) { + if (vd.visible) { + BKE_pbvh_node_fully_hidden_set(node, false); + return; + } + } + BKE_pbvh_vertex_iter_end; + } + } +} + +static void pbvh_update_visibility_redraw(PBVH *pbvh, PBVHNode **nodes, int totnode, int flag) +{ + PBVHUpdateData data = { + .pbvh = pbvh, + .nodes = nodes, + .flag = flag, + }; + + TaskParallelSettings settings; + BKE_pbvh_parallel_range_settings(&settings, true, totnode); + BLI_task_parallel_range(0, totnode, &data, pbvh_update_visibility_redraw_task_cb, &settings); +} + +static void pbvh_update_BB_redraw_task_cb(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict) +{ + PBVHUpdateData *data = (PBVHUpdateData *)userdata; + PBVH *pbvh = data->pbvh; + PBVHNode *node = data->nodes[n]; + const int flag = data->flag; + + update_node_vb(pbvh, node, flag); + + if ((flag & PBVH_UpdateRedraw) && (node->flag & PBVH_UpdateRedraw)) { + node->flag &= ~PBVH_UpdateRedraw; + } +} + +void pbvh_update_BB_redraw(PBVH *pbvh, PBVHNode **nodes, int totnode, int flag) +{ + /* update BB, redraw flag */ + PBVHUpdateData data = { + .pbvh = pbvh, + .nodes = nodes, + .flag = flag, + }; + + TaskParallelSettings settings; + BKE_pbvh_parallel_range_settings(&settings, true, totnode); + BLI_task_parallel_range(0, totnode, &data, pbvh_update_BB_redraw_task_cb, &settings); +} + +bool BKE_pbvh_get_color_layer(const Mesh *me, CustomDataLayer **r_layer, eAttrDomain *r_attr) +{ + CustomDataLayer *layer = BKE_id_attributes_color_find(&me->id, me->active_color_attribute); + + if (!layer || !ELEM(layer->type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR)) { + *r_layer = nullptr; + *r_attr = ATTR_DOMAIN_POINT; + return false; + } + + eAttrDomain domain = BKE_id_attribute_domain(&me->id, layer); + + if (!ELEM(domain, ATTR_DOMAIN_POINT, ATTR_DOMAIN_CORNER)) { + *r_layer = nullptr; + *r_attr = ATTR_DOMAIN_POINT; + return false; + } + + *r_layer = layer; + *r_attr = domain; + + return true; +} + +static void pbvh_update_draw_buffer_cb(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict) +{ + /* Create and update draw buffers. The functions called here must not + * do any OpenGL calls. Flags are not cleared immediately, that happens + * after GPU_pbvh_buffer_flush() which does the final OpenGL calls. */ + PBVHUpdateData *data = (PBVHUpdateData *)userdata; + PBVH *pbvh = data->pbvh; + PBVHNode *node = data->nodes[n]; + Mesh *me = data->mesh; + + CustomDataLayer *vcol_layer = nullptr; + eAttrDomain vcol_domain; + + BKE_pbvh_get_color_layer(me, &vcol_layer, &vcol_domain); + + CustomData *vdata, *ldata; + + if (!pbvh->header.bm) { + vdata = pbvh->vdata ? pbvh->vdata : &me->vdata; + ldata = pbvh->ldata ? pbvh->ldata : &me->ldata; + } + else { + vdata = &pbvh->header.bm->vdata; + ldata = &pbvh->header.bm->ldata; + } + + Mesh me_query; + BKE_id_attribute_copy_domains_temp(ID_ME, vdata, nullptr, ldata, nullptr, nullptr, &me_query.id); + me_query.active_color_attribute = me->active_color_attribute; + + if (!pbvh->header.bm) { + vdata = pbvh->vdata; + ldata = pbvh->ldata; + } + else { + vdata = &pbvh->header.bm->vdata; + ldata = &pbvh->header.bm->ldata; + } + + if (node->flag & PBVH_RebuildDrawBuffers) { + PBVH_GPU_Args args; + pbvh_draw_args_init(pbvh, &args, node); + + node->draw_batches = DRW_pbvh_node_create(&args); + } + + if (node->flag & PBVH_UpdateDrawBuffers) { + node->updategen++; + node->debug_draw_gen++; + + if (node->draw_batches) { + PBVH_GPU_Args args; + + pbvh_draw_args_init(pbvh, &args, node); + DRW_pbvh_node_update(node->draw_batches, &args); + } + } +} + +void BKE_pbvh_set_flat_vcol_shading(PBVH *pbvh, bool value) +{ + if (value != pbvh->flat_vcol_shading) { + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + + if (!(node->flag & PBVH_Leaf)) { + continue; + } + + BKE_pbvh_node_mark_rebuild_draw(node); + } + } + + pbvh->flat_vcol_shading = value; +} + +void pbvh_free_draw_buffers(PBVH * /* pbvh */, PBVHNode *node) +{ + if (node->draw_batches) { + DRW_pbvh_node_free(node->draw_batches); + node->draw_batches = nullptr; + } +} + +static void pbvh_update_draw_buffers( + PBVH *pbvh, Mesh *me, PBVHNode **nodes, int totnode, int update_flag) +{ + const CustomData *vdata; + + switch (pbvh->header.type) { + case PBVH_BMESH: + if (!pbvh->header.bm) { + /* BMesh hasn't been created yet */ + return; + } + + vdata = &pbvh->header.bm->vdata; + break; + case PBVH_FACES: + vdata = pbvh->vdata; + break; + case PBVH_GRIDS: + vdata = nullptr; + break; + } + UNUSED_VARS(vdata); + + if ((update_flag & PBVH_RebuildDrawBuffers) || ELEM(pbvh->header.type, PBVH_GRIDS, PBVH_BMESH)) { + /* Free buffers uses OpenGL, so not in parallel. */ + for (int n = 0; n < totnode; n++) { + PBVHNode *node = nodes[n]; + if (node->flag & PBVH_RebuildDrawBuffers) { + pbvh_free_draw_buffers(pbvh, node); + } + else if ((node->flag & PBVH_UpdateDrawBuffers) && node->draw_batches) { + PBVH_GPU_Args args; + + pbvh_draw_args_init(pbvh, &args, node); + DRW_pbvh_update_pre(node->draw_batches, &args); + } + } + } + + /* Parallel creation and update of draw buffers. */ + PBVHUpdateData data = { + .pbvh = pbvh, .nodes = nodes, .flat_vcol_shading = pbvh->flat_vcol_shading, .mesh = me}; + + TaskParallelSettings settings; + BKE_pbvh_parallel_range_settings(&settings, true, totnode); + BLI_task_parallel_range(0, totnode, &data, pbvh_update_draw_buffer_cb, &settings); + + for (int i = 0; i < totnode; i++) { + PBVHNode *node = nodes[i]; + + if (node->flag & PBVH_UpdateDrawBuffers) { + /* Flush buffers uses OpenGL, so not in parallel. */ + if (node->draw_batches) { + DRW_pbvh_node_gpu_flush(node->draw_batches); + } + } + + node->flag &= ~(PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers); + } +} + +static int pbvh_flush_bb(PBVH *pbvh, PBVHNode *node, int flag) +{ + int update = 0; + + /* Difficult to multi-thread well, we just do single threaded recursive. */ + if (node->flag & PBVH_Leaf) { + if (flag & PBVH_UpdateBB) { + update |= (node->flag & PBVH_UpdateBB); + node->flag &= ~PBVH_UpdateBB; + } + + if (flag & PBVH_UpdateOriginalBB) { + update |= (node->flag & PBVH_UpdateOriginalBB); + node->flag &= ~PBVH_UpdateOriginalBB; + } + + return update; + } + + update |= pbvh_flush_bb(pbvh, pbvh->nodes + node->children_offset, flag); + update |= pbvh_flush_bb(pbvh, pbvh->nodes + node->children_offset + 1, flag); + + update_node_vb(pbvh, node, update); + + return update; +} + +void BKE_pbvh_update_bounds(PBVH *pbvh, int flag) +{ + if (!pbvh->nodes) { + return; + } + + PBVHNode **nodes; + int totnode; + + BKE_pbvh_search_gather(pbvh, update_search_cb, POINTER_FROM_INT(flag), &nodes, &totnode); + + if (flag & (PBVH_UpdateBB | PBVH_UpdateOriginalBB | PBVH_UpdateRedraw)) { + pbvh_update_BB_redraw(pbvh, nodes, totnode, flag); + } + + if (flag & (PBVH_UpdateBB | PBVH_UpdateOriginalBB)) { + pbvh_flush_bb(pbvh, pbvh->nodes, flag); + } + + MEM_SAFE_FREE(nodes); +} + +void BKE_pbvh_update_vertex_data(PBVH *pbvh, int flag) +{ + if (!pbvh->nodes) { + return; + } + + PBVHNode **nodes; + int totnode; + + BKE_pbvh_search_gather(pbvh, update_search_cb, POINTER_FROM_INT(flag), &nodes, &totnode); + + if (flag & (PBVH_UpdateMask)) { + pbvh_update_mask_redraw(pbvh, nodes, totnode, flag); + } + + if (flag & (PBVH_UpdateColor)) { + for (int i = 0; i < totnode; i++) { + nodes[i]->flag |= PBVH_UpdateRedraw | PBVH_UpdateDrawBuffers | PBVH_UpdateColor; + } + } + + if (flag & (PBVH_UpdateVisibility)) { + pbvh_update_visibility_redraw(pbvh, nodes, totnode, flag); + } + + if (nodes) { + MEM_freeN(nodes); + } +} + +static void pbvh_faces_node_visibility_update(PBVH *pbvh, PBVHNode *node) +{ + int totvert, i; + BKE_pbvh_node_num_verts(pbvh, node, nullptr, &totvert); + const int *vert_indices = BKE_pbvh_node_get_vert_indices(node); + + if (pbvh->hide_vert == nullptr) { + BKE_pbvh_node_fully_hidden_set(node, false); + return; + } + for (i = 0; i < totvert; i++) { + if (!(pbvh->hide_vert[vert_indices[i]])) { + BKE_pbvh_node_fully_hidden_set(node, false); + return; + } + } + + BKE_pbvh_node_fully_hidden_set(node, true); +} + +static void pbvh_grids_node_visibility_update(PBVH *pbvh, PBVHNode *node) +{ + CCGElem **grids; + BLI_bitmap **grid_hidden; + int *grid_indices, totgrid, i; + + BKE_pbvh_node_get_grids(pbvh, node, &grid_indices, &totgrid, nullptr, nullptr, &grids); + grid_hidden = BKE_pbvh_grid_hidden(pbvh); + CCGKey key = *BKE_pbvh_get_grid_key(pbvh); + + for (i = 0; i < totgrid; i++) { + int g = grid_indices[i], x, y; + BLI_bitmap *gh = grid_hidden[g]; + + if (!gh) { + BKE_pbvh_node_fully_hidden_set(node, false); + return; + } + + for (y = 0; y < key.grid_size; y++) { + for (x = 0; x < key.grid_size; x++) { + if (!BLI_BITMAP_TEST(gh, y * key.grid_size + x)) { + BKE_pbvh_node_fully_hidden_set(node, false); + return; + } + } + } + } + BKE_pbvh_node_fully_hidden_set(node, true); +} + +static void pbvh_bmesh_node_visibility_update(PBVHNode *node) +{ + TableGSet *unique, *other; + + unique = BKE_pbvh_bmesh_node_unique_verts(node); + other = BKE_pbvh_bmesh_node_other_verts(node); + + BMVert *v; + + TGSET_ITER (v, unique) { + if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) { + BKE_pbvh_node_fully_hidden_set(node, false); + return; + } + } + TGSET_ITER_END + + TGSET_ITER (v, other) { + if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) { + BKE_pbvh_node_fully_hidden_set(node, false); + return; + } + } + TGSET_ITER_END + + BKE_pbvh_node_fully_hidden_set(node, true); +} + +static void pbvh_update_visibility_task_cb(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict) +{ + + PBVHUpdateData *data = (PBVHUpdateData *)userdata; + PBVH *pbvh = data->pbvh; + PBVHNode *node = data->nodes[n]; + if (node->flag & PBVH_UpdateVisibility) { + switch (BKE_pbvh_type(pbvh)) { + case PBVH_FACES: + pbvh_faces_node_visibility_update(pbvh, node); + break; + case PBVH_GRIDS: + pbvh_grids_node_visibility_update(pbvh, node); + break; + case PBVH_BMESH: + pbvh_bmesh_node_visibility_update(node); + break; + } + node->flag &= ~PBVH_UpdateVisibility; + } +} + +static void pbvh_update_visibility(PBVH *pbvh, PBVHNode **nodes, int totnode) +{ + PBVHUpdateData data = { + .pbvh = pbvh, + .nodes = nodes, + }; + + TaskParallelSettings settings; + BKE_pbvh_parallel_range_settings(&settings, true, totnode); + BLI_task_parallel_range(0, totnode, &data, pbvh_update_visibility_task_cb, &settings); +} + +void BKE_pbvh_update_visibility(PBVH *pbvh) +{ + if (!pbvh->nodes) { + return; + } + + PBVHNode **nodes; + int totnode; + + BKE_pbvh_search_gather( + pbvh, update_search_cb, POINTER_FROM_INT(PBVH_UpdateVisibility), &nodes, &totnode); + pbvh_update_visibility(pbvh, nodes, totnode); + + if (nodes) { + MEM_freeN(nodes); + } +} + +void BKE_pbvh_redraw_BB(PBVH *pbvh, float bb_min[3], float bb_max[3]) +{ + PBVHIter iter; + PBVHNode *node; + BB bb; + + BB_reset(&bb); + + pbvh_iter_begin(&iter, pbvh, nullptr, nullptr); + + while ((node = pbvh_iter_next(&iter))) { + if (node->flag & PBVH_UpdateRedraw) { + BB_expand_with_bb(&bb, &node->vb); + } + } + + pbvh_iter_end(&iter); + + copy_v3_v3(bb_min, bb.bmin); + copy_v3_v3(bb_max, bb.bmax); +} + +void BKE_pbvh_get_grid_updates(PBVH *pbvh, bool clear, void ***r_gridfaces, int *r_totface) +{ + GSet *face_set = BLI_gset_ptr_new(__func__); + PBVHNode *node; + PBVHIter iter; + + pbvh_iter_begin(&iter, pbvh, nullptr, nullptr); + + while ((node = pbvh_iter_next(&iter))) { + if (node->flag & PBVH_UpdateNormals) { + for (uint i = 0; i < node->totprim; i++) { + void *face = pbvh->gridfaces[node->prim_indices[i]]; + BLI_gset_add(face_set, face); + } + + if (clear) { + node->flag &= ~PBVH_UpdateNormals; + } + } + } + + pbvh_iter_end(&iter); + + const int tot = BLI_gset_len(face_set); + if (tot == 0) { + *r_totface = 0; + *r_gridfaces = nullptr; + BLI_gset_free(face_set, nullptr); + return; + } + + void **faces = (void **)MEM_mallocN(sizeof(*faces) * tot, "PBVH Grid Faces"); + + GSetIterator gs_iter; + int i; + GSET_ITER_INDEX (gs_iter, face_set, i) { + faces[i] = BLI_gsetIterator_getKey(&gs_iter); + } + + BLI_gset_free(face_set, nullptr); + + *r_totface = tot; + *r_gridfaces = faces; +} + +/***************************** PBVH Access ***********************************/ + +bool BKE_pbvh_has_faces(const PBVH *pbvh) +{ + if (pbvh->header.type == PBVH_BMESH) { + return (pbvh->header.bm->totface != 0); + } + + return (pbvh->totprim != 0); +} + +void BKE_pbvh_bounding_box(const PBVH *pbvh, float min[3], float max[3]) +{ + if (pbvh->totnode) { + const BB *bb = &pbvh->nodes[0].vb; + copy_v3_v3(min, bb->bmin); + copy_v3_v3(max, bb->bmax); + } + else { + zero_v3(min); + zero_v3(max); + } +} + +BLI_bitmap **BKE_pbvh_grid_hidden(const PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_GRIDS); + return pbvh->grid_hidden; +} + +const CCGKey *BKE_pbvh_get_grid_key(const PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_GRIDS); + return &pbvh->gridkey; +} + +struct CCGElem **BKE_pbvh_get_grids(const PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_GRIDS); + return pbvh->grids; +} + +BLI_bitmap **BKE_pbvh_get_grid_visibility(const PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_GRIDS); + return pbvh->grid_hidden; +} + +int BKE_pbvh_get_grid_num_verts(const PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_GRIDS); + return pbvh->totgrid * pbvh->gridkey.grid_area; +} + +int BKE_pbvh_get_grid_num_faces(const PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_GRIDS); + return pbvh->totgrid * (pbvh->gridkey.grid_size - 1) * (pbvh->gridkey.grid_size - 1); +} + +/***************************** Node Access ***********************************/ + +void BKE_pbvh_node_mark_original_update(PBVHNode *node) +{ + node->flag |= PBVH_UpdateOriginalBB; +} + +void BKE_pbvh_node_mark_update(PBVHNode *node) +{ + node->flag |= PBVH_UpdateNormals | PBVH_UpdateBB | PBVH_UpdateOriginalBB | + PBVH_UpdateDrawBuffers | PBVH_UpdateRedraw | PBVH_UpdateCurvatureDir | + PBVH_RebuildPixels | PBVH_UpdateTriAreas; +} + +void BKE_pbvh_node_mark_update_mask(PBVHNode *node) +{ + node->flag |= PBVH_UpdateMask | PBVH_UpdateDrawBuffers | PBVH_UpdateRedraw; +} + +void BKE_pbvh_node_mark_update_color(PBVHNode *node) +{ + node->flag |= PBVH_UpdateColor | PBVH_UpdateDrawBuffers | PBVH_UpdateRedraw; +} + +void BKE_pbvh_node_mark_update_face_sets(PBVHNode *node) +{ + node->flag |= PBVH_UpdateDrawBuffers | PBVH_UpdateRedraw; +} + +void BKE_pbvh_mark_rebuild_pixels(PBVH *pbvh) +{ + for (int n = 0; n < pbvh->totnode; n++) { + PBVHNode *node = &pbvh->nodes[n]; + if (node->flag & PBVH_Leaf) { + node->flag |= PBVH_RebuildPixels; + } + } +} + +void BKE_pbvh_node_mark_update_visibility(PBVHNode *node) +{ + node->flag |= PBVH_UpdateVisibility | PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers | + PBVH_UpdateRedraw | PBVH_UpdateTris; +} + +void BKE_pbvh_vert_tag_update_normal_visibility(PBVHNode *node) +{ + node->flag |= PBVH_UpdateVisibility | PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers | + PBVH_UpdateRedraw | PBVH_UpdateCurvatureDir | PBVH_UpdateTris; +} + +void BKE_pbvh_node_mark_rebuild_draw(PBVHNode *node) +{ + node->flag |= PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers | PBVH_UpdateRedraw | + PBVH_UpdateCurvatureDir; +} + +void BKE_pbvh_node_mark_redraw(PBVHNode *node) +{ + node->flag |= PBVH_UpdateDrawBuffers | PBVH_UpdateRedraw; +} + +void BKE_pbvh_node_mark_normals_update(PBVHNode *node) +{ + node->flag |= PBVH_UpdateNormals | PBVH_UpdateCurvatureDir; +} + +void BKE_pbvh_node_mark_curvature_update(PBVHNode *node) +{ + node->flag |= PBVH_UpdateCurvatureDir; +} + +void BKE_pbvh_curvature_update_set(PBVHNode *node, bool state) +{ + if (state) { + node->flag |= PBVH_UpdateCurvatureDir; + } + else { + node->flag &= ~PBVH_UpdateCurvatureDir; + } +} + +bool BKE_pbvh_curvature_update_get(PBVHNode *node) +{ + return node->flag & PBVH_UpdateCurvatureDir; +} + +void BKE_pbvh_node_fully_hidden_set(PBVHNode *node, int fully_hidden) +{ + BLI_assert(node->flag & PBVH_Leaf); + + if (fully_hidden) { + node->flag |= PBVH_FullyHidden; + } + else { + node->flag &= ~PBVH_FullyHidden; + } +} + +bool BKE_pbvh_node_fully_hidden_get(PBVHNode *node) +{ + return (node->flag & PBVH_Leaf) && (node->flag & PBVH_FullyHidden); +} + +void BKE_pbvh_node_fully_masked_set(PBVHNode *node, int fully_masked) +{ + BLI_assert(node->flag & PBVH_Leaf); + + if (fully_masked) { + node->flag |= PBVH_FullyMasked; + } + else { + node->flag &= ~PBVH_FullyMasked; + } +} + +bool BKE_pbvh_node_fully_masked_get(PBVHNode *node) +{ + return (node->flag & PBVH_Leaf) && (node->flag & PBVH_FullyMasked); +} + +void BKE_pbvh_node_fully_unmasked_set(PBVHNode *node, int fully_masked) +{ + BLI_assert(node->flag & PBVH_Leaf); + + if (fully_masked) { + node->flag |= PBVH_FullyUnmasked; + } + else { + node->flag &= ~PBVH_FullyUnmasked; + } +} + +bool BKE_pbvh_node_fully_unmasked_get(PBVHNode *node) +{ + return (node->flag & PBVH_Leaf) && (node->flag & PBVH_FullyUnmasked); +} + +void BKE_pbvh_vert_tag_update_normal(PBVH *pbvh, PBVHVertRef vertex) +{ + BLI_assert(pbvh->header.type == PBVH_FACES); + pbvh->vert_bitmap[vertex.i] = true; +} + +void BKE_pbvh_node_get_loops(PBVH *pbvh, + PBVHNode *node, + const int **r_loop_indices, + const MLoop **r_loops) +{ + BLI_assert(BKE_pbvh_type(pbvh) == PBVH_FACES); + + if (r_loop_indices) { + *r_loop_indices = node->loop_indices; + } + + if (r_loops) { + *r_loops = pbvh->mloop; + } +} + +int BKE_pbvh_num_faces(const PBVH *pbvh) +{ + switch (pbvh->header.type) { + case PBVH_GRIDS: + case PBVH_FACES: + return pbvh->faces_num; + case PBVH_BMESH: + return pbvh->header.bm->totface; + } + + BLI_assert_unreachable(); + return 0; +} + +const int *BKE_pbvh_node_get_vert_indices(PBVHNode *node) + +{ + return node->vert_indices; +} + +void BKE_pbvh_node_num_verts(PBVH *pbvh, PBVHNode *node, int *r_uniquevert, int *r_totvert) +{ + int tot; + + switch (pbvh->header.type) { + case PBVH_GRIDS: + tot = node->totprim * pbvh->gridkey.grid_area; + if (r_totvert) { + *r_totvert = tot; + } + if (r_uniquevert) { + *r_uniquevert = tot; + } + break; + case PBVH_FACES: + if (r_totvert) { + *r_totvert = node->uniq_verts + node->face_verts; + } + if (r_uniquevert) { + *r_uniquevert = node->uniq_verts; + } + break; + case PBVH_BMESH: + // not a leaf? return zero + if (!(node->flag & PBVH_Leaf)) { + if (r_totvert) { + *r_totvert = 0; + } + + if (r_uniquevert) { + *r_uniquevert = 0; + } + + return; + } + + pbvh_bmesh_check_other_verts(node); + + tot = BLI_table_gset_len(node->bm_unique_verts); + if (r_totvert) { + *r_totvert = tot + BLI_table_gset_len(node->bm_other_verts); + } + if (r_uniquevert) { + *r_uniquevert = tot; + } + break; + } +} + +void BKE_pbvh_node_get_grids(PBVH *pbvh, + PBVHNode *node, + int **r_grid_indices, + int *r_totgrid, + int *r_maxgrid, + int *r_gridsize, + CCGElem ***r_griddata) +{ + switch (pbvh->header.type) { + case PBVH_GRIDS: + if (r_grid_indices) { + *r_grid_indices = node->prim_indices; + } + if (r_totgrid) { + *r_totgrid = node->totprim; + } + if (r_maxgrid) { + *r_maxgrid = pbvh->totgrid; + } + if (r_gridsize) { + *r_gridsize = pbvh->gridkey.grid_size; + } + if (r_griddata) { + *r_griddata = pbvh->grids; + } + break; + case PBVH_FACES: + case PBVH_BMESH: + if (r_grid_indices) { + *r_grid_indices = nullptr; + } + if (r_totgrid) { + *r_totgrid = 0; + } + if (r_maxgrid) { + *r_maxgrid = 0; + } + if (r_gridsize) { + *r_gridsize = 0; + } + if (r_griddata) { + *r_griddata = nullptr; + } + break; + } +} + +void BKE_pbvh_node_get_BB(PBVHNode *node, float bb_min[3], float bb_max[3]) +{ + copy_v3_v3(bb_min, node->vb.bmin); + copy_v3_v3(bb_max, node->vb.bmax); +} + +void BKE_pbvh_node_get_original_BB(PBVHNode *node, float bb_min[3], float bb_max[3]) +{ + copy_v3_v3(bb_min, node->orig_vb.bmin); + copy_v3_v3(bb_max, node->orig_vb.bmax); +} + +void BKE_pbvh_node_get_proxies(PBVHNode *node, PBVHProxyNode **proxies, int *proxy_count) +{ + if (node->proxy_count > 0) { + if (proxies) { + *proxies = node->proxies; + } + if (proxy_count) { + *proxy_count = node->proxy_count; + } + } + else { + if (proxies) { + *proxies = nullptr; + } + if (proxy_count) { + *proxy_count = 0; + } + } +} + +bool BKE_pbvh_node_has_vert_with_normal_update_tag(PBVH *pbvh, PBVHNode *node) +{ + BLI_assert(pbvh->header.type == PBVH_FACES); + const int *verts = node->vert_indices; + const int totvert = node->uniq_verts + node->face_verts; + + for (int i = 0; i < totvert; i++) { + const int v = verts[i]; + + if (pbvh->vert_bitmap[v]) { + return true; + } + } + + return false; +} + +/********************************* Ray-cast ***********************************/ + +typedef struct { + struct IsectRayAABB_Precalc ray; + bool original; + int stroke_id; +} RaycastData; + +static bool ray_aabb_intersect(PBVHNode *node, void *data_v) +{ + RaycastData *rcd = (RaycastData *)data_v; + const float *bb_min, *bb_max; + + if (rcd->original) { + /* BKE_pbvh_node_get_original_BB */ + bb_min = node->orig_vb.bmin; + bb_max = node->orig_vb.bmax; + } + else { + /* BKE_pbvh_node_get_BB */ + bb_min = node->vb.bmin; + bb_max = node->vb.bmax; + } + + return isect_ray_aabb_v3(&rcd->ray, bb_min, bb_max, &node->tmin); +} + +void BKE_pbvh_raycast(PBVH *pbvh, + BKE_pbvh_HitOccludedCallback cb, + void *data, + const float ray_start[3], + const float ray_normal[3], + bool original, + int stroke_id) +{ + RaycastData rcd; + + isect_ray_aabb_v3_precalc(&rcd.ray, ray_start, ray_normal); + + rcd.original = original; + rcd.stroke_id = stroke_id; + pbvh->stroke_id = stroke_id; + + BKE_pbvh_search_callback_occluded(pbvh, ray_aabb_intersect, &rcd, cb, data); +} + +bool ray_face_intersection_quad(const float ray_start[3], + struct IsectRayPrecalc *isect_precalc, + const float t0[3], + const float t1[3], + const float t2[3], + const float t3[3], + float *depth) +{ + float depth_test; + + if ((isect_ray_tri_watertight_v3(ray_start, isect_precalc, t0, t1, t2, &depth_test, nullptr) && + (depth_test < *depth)) || + (isect_ray_tri_watertight_v3(ray_start, isect_precalc, t0, t2, t3, &depth_test, nullptr) && + (depth_test < *depth))) { + *depth = depth_test; + return true; + } + + return false; +} + +bool ray_face_intersection_tri(const float ray_start[3], + struct IsectRayPrecalc *isect_precalc, + const float t0[3], + const float t1[3], + const float t2[3], + float *depth) +{ + float depth_test; + if (isect_ray_tri_watertight_v3(ray_start, isect_precalc, t0, t1, t2, &depth_test, nullptr) && + (depth_test < *depth)) { + *depth = depth_test; + return true; + } + + return false; +} + +bool ray_update_depth_and_hit_count(const float depth_test, + float *r_depth, + float *r_back_depth, + int *hit_count) +{ + (*hit_count)++; + if (depth_test < *r_depth) { + *r_back_depth = *r_depth; + *r_depth = depth_test; + return true; + } + else if (depth_test > *r_depth && depth_test <= *r_back_depth) { + *r_back_depth = depth_test; + return false; + } + + return false; +} + +bool ray_face_intersection_depth_quad(const float ray_start[3], + struct IsectRayPrecalc *isect_precalc, + const float t0[3], + const float t1[3], + const float t2[3], + const float t3[3], + float *r_depth, + float *r_back_depth, + int *hit_count) +{ + float depth_test; + if (!(isect_ray_tri_watertight_v3(ray_start, isect_precalc, t0, t1, t2, &depth_test, nullptr) || + isect_ray_tri_watertight_v3(ray_start, isect_precalc, t0, t2, t3, &depth_test, nullptr))) { + return false; + } + return ray_update_depth_and_hit_count(depth_test, r_depth, r_back_depth, hit_count); +} + +bool ray_face_intersection_depth_tri(const float ray_start[3], + struct IsectRayPrecalc *isect_precalc, + const float t0[3], + const float t1[3], + const float t2[3], + float *r_depth, + float *r_back_depth, + int *hit_count) +{ + float depth_test; + + if (!isect_ray_tri_watertight_v3(ray_start, isect_precalc, t0, t1, t2, &depth_test, nullptr)) { + return false; + } + return ray_update_depth_and_hit_count(depth_test, r_depth, r_back_depth, hit_count); +} + +/* Take advantage of the fact we know this won't be an intersection. + * Just handle ray-tri edges. */ +static float dist_squared_ray_to_tri_v3_fast(const float ray_origin[3], + const float ray_direction[3], + const float v0[3], + const float v1[3], + const float v2[3], + float r_point[3], + float *r_depth) +{ + const float *tri[3] = {v0, v1, v2}; + float dist_sq_best = FLT_MAX; + for (int i = 0, j = 2; i < 3; j = i++) { + float point_test[3], depth_test = FLT_MAX; + const float dist_sq_test = dist_squared_ray_to_seg_v3( + ray_origin, ray_direction, tri[i], tri[j], point_test, &depth_test); + if (dist_sq_test < dist_sq_best || i == 0) { + copy_v3_v3(r_point, point_test); + *r_depth = depth_test; + dist_sq_best = dist_sq_test; + } + } + return dist_sq_best; +} + +bool ray_face_nearest_quad(const float ray_start[3], + const float ray_normal[3], + const float t0[3], + const float t1[3], + const float t2[3], + const float t3[3], + float *depth, + float *dist_sq) +{ + float dist_sq_test; + float co[3], depth_test; + + if ((dist_sq_test = dist_squared_ray_to_tri_v3_fast( + ray_start, ray_normal, t0, t1, t2, co, &depth_test)) < *dist_sq) { + *dist_sq = dist_sq_test; + *depth = depth_test; + if ((dist_sq_test = dist_squared_ray_to_tri_v3_fast( + ray_start, ray_normal, t0, t2, t3, co, &depth_test)) < *dist_sq) { + *dist_sq = dist_sq_test; + *depth = depth_test; + } + return true; + } + + return false; +} + +bool ray_face_nearest_tri(const float ray_start[3], + const float ray_normal[3], + const float t0[3], + const float t1[3], + const float t2[3], + float *depth, + float *dist_sq) +{ + float dist_sq_test; + float co[3], depth_test; + + if ((dist_sq_test = dist_squared_ray_to_tri_v3_fast( + ray_start, ray_normal, t0, t1, t2, co, &depth_test)) < *dist_sq) { + *dist_sq = dist_sq_test; + *depth = depth_test; + return true; + } + + return false; +} + +static bool pbvh_faces_node_raycast(PBVH *pbvh, + const PBVHNode *node, + float (*origco)[3], + const float ray_start[3], + const float ray_normal[3], + struct IsectRayPrecalc *isect_precalc, + int *hit_count, + float *depth, + float *depth_back, + PBVHVertRef *r_active_vertex_index, + PBVHFaceRef *r_active_face_index, + float *r_face_normal, + int stroke_id) +{ + const float(*positions)[3] = pbvh->vert_positions; + const MLoop *mloop = pbvh->mloop; + const int *faces = node->prim_indices; + int totface = node->totprim; + bool hit = false; + float nearest_vertex_co[3] = {0.0f}; + + for (int i = 0; i < totface; i++) { + const MLoopTri *lt = &pbvh->looptri[faces[i]]; + const int *face_verts = node->face_vert_indices[i]; + + if (pbvh->respect_hide && paint_is_face_hidden(lt, pbvh->hide_poly)) { + continue; + } + + const float *co[3]; + if (origco) { + /* Intersect with backed up original coordinates. */ + co[0] = origco[face_verts[0]]; + co[1] = origco[face_verts[1]]; + co[2] = origco[face_verts[2]]; + } + else { + /* intersect with current coordinates */ + co[0] = positions[mloop[lt->tri[0]].v]; + co[1] = positions[mloop[lt->tri[1]].v]; + co[2] = positions[mloop[lt->tri[2]].v]; + } + + if (!ray_face_intersection_depth_tri( + ray_start, isect_precalc, co[0], co[1], co[2], depth, depth_back, hit_count)) { + continue; + } + + hit = true; + + if (r_face_normal) { + normal_tri_v3(r_face_normal, co[0], co[1], co[2]); + } + + if (r_active_vertex_index) { + float location[3] = {0.0f}; + madd_v3_v3v3fl(location, ray_start, ray_normal, *depth); + for (int j = 0; j < 3; j++) { + /* Always assign nearest_vertex_co in the first iteration to avoid comparison against + * uninitialized values. This stores the closest vertex in the current intersecting + * triangle. */ + if (j == 0 || + len_squared_v3v3(location, co[j]) < len_squared_v3v3(location, nearest_vertex_co)) { + copy_v3_v3(nearest_vertex_co, co[j]); + *r_active_vertex_index = (PBVHVertRef){.i = mloop[lt->tri[j]].v}; + *r_active_face_index = (PBVHFaceRef){.i = lt->poly}; + } + } + } + } + + return hit; +} + +static bool pbvh_grids_node_raycast(PBVH *pbvh, + PBVHNode *node, + float (*origco)[3], + const float ray_start[3], + const float ray_normal[3], + struct IsectRayPrecalc *isect_precalc, + int *hit_count, + float *depth, + float *back_depth, + + PBVHVertRef *r_active_vertex, + PBVHFaceRef *r_active_grid, + float *r_face_normal) +{ + const int totgrid = node->totprim; + const int gridsize = pbvh->gridkey.grid_size; + bool hit = false; + float nearest_vertex_co[3] = {0.0}; + const CCGKey *gridkey = &pbvh->gridkey; + + for (int i = 0; i < totgrid; i++) { + const int grid_index = node->prim_indices[i]; + CCGElem *grid = pbvh->grids[grid_index]; + BLI_bitmap *gh; + + if (!grid) { + continue; + } + + gh = pbvh->grid_hidden[grid_index]; + + for (int y = 0; y < gridsize - 1; y++) { + for (int x = 0; x < gridsize - 1; x++) { + /* check if grid face is hidden */ + if (gh) { + if (paint_is_grid_face_hidden(gh, gridsize, x, y)) { + continue; + } + } + + const float *co[4]; + if (origco) { + co[0] = origco[(y + 1) * gridsize + x]; + co[1] = origco[(y + 1) * gridsize + x + 1]; + co[2] = origco[y * gridsize + x + 1]; + co[3] = origco[y * gridsize + x]; + } + else { + co[0] = CCG_grid_elem_co(gridkey, grid, x, y + 1); + co[1] = CCG_grid_elem_co(gridkey, grid, x + 1, y + 1); + co[2] = CCG_grid_elem_co(gridkey, grid, x + 1, y); + co[3] = CCG_grid_elem_co(gridkey, grid, x, y); + } + + if (!ray_face_intersection_depth_quad(ray_start, + isect_precalc, + co[0], + co[1], + co[2], + co[3], + depth, + back_depth, + hit_count)) { + continue; + } + hit = true; + + if (r_face_normal) { + normal_quad_v3(r_face_normal, co[0], co[1], co[2], co[3]); + } + + if (r_active_vertex) { + float location[3] = {0.0}; + madd_v3_v3v3fl(location, ray_start, ray_normal, *depth); + + const int x_it[4] = {0, 1, 1, 0}; + const int y_it[4] = {1, 1, 0, 0}; + + for (int j = 0; j < 4; j++) { + /* Always assign nearest_vertex_co in the first iteration to avoid comparison against + * uninitialized values. This stores the closest vertex in the current intersecting + * quad. */ + if (j == 0 || len_squared_v3v3(location, co[j]) < + len_squared_v3v3(location, nearest_vertex_co)) { + copy_v3_v3(nearest_vertex_co, co[j]); + + r_active_vertex->i = gridkey->grid_area * grid_index + + (y + y_it[j]) * gridkey->grid_size + (x + x_it[j]); + } + } + } + + if (r_active_grid) { + r_active_grid->i = grid_index; + } + } + } + + if (origco) { + origco += gridsize * gridsize; + } + } + + return hit; +} + +bool BKE_pbvh_node_raycast(PBVH *pbvh, + PBVHNode *node, + float (*origco)[3], + bool use_origco, + const float ray_start[3], + const float ray_normal[3], + struct IsectRayPrecalc *isect_precalc, + int *hit_count, + float *depth, + float *back_depth, + PBVHVertRef *active_vertex, + PBVHFaceRef *active_face_grid, + float *face_normal, + int stroke_id) +{ + bool hit = false; + + if (node->flag & PBVH_FullyHidden) { + return false; + } + + switch (pbvh->header.type) { + case PBVH_FACES: + hit |= pbvh_faces_node_raycast(pbvh, + node, + origco, + ray_start, + ray_normal, + isect_precalc, + hit_count, + depth, + back_depth, + active_vertex, + active_face_grid, + face_normal, + stroke_id); + + break; + case PBVH_GRIDS: + hit |= pbvh_grids_node_raycast(pbvh, + node, + origco, + ray_start, + ray_normal, + isect_precalc, + hit_count, + depth, + back_depth, + active_vertex, + active_face_grid, + face_normal); + break; + case PBVH_BMESH: + hit = pbvh_bmesh_node_raycast(pbvh, + node, + ray_start, + ray_normal, + isect_precalc, + hit_count, + depth, + back_depth, + use_origco, + active_vertex, + active_face_grid, + face_normal, + stroke_id); + break; + } + + return hit; +} + +void BKE_pbvh_raycast_project_ray_root( + PBVH *pbvh, bool original, float ray_start[3], float ray_end[3], float ray_normal[3]) +{ + if (pbvh->nodes) { + float rootmin_start, rootmin_end; + float bb_min_root[3], bb_max_root[3], bb_center[3], bb_diff[3]; + struct IsectRayAABB_Precalc ray; + float ray_normal_inv[3]; + float offset = 1.0f + 1e-3f; + const float offset_vec[3] = {1e-3f, 1e-3f, 1e-3f}; + + if (original) { + BKE_pbvh_node_get_original_BB(pbvh->nodes, bb_min_root, bb_max_root); + } + else { + BKE_pbvh_node_get_BB(pbvh->nodes, bb_min_root, bb_max_root); + } + + /* Slightly offset min and max in case we have a zero width node + * (due to a plane mesh for instance), or faces very close to the bounding box boundary. */ + mid_v3_v3v3(bb_center, bb_max_root, bb_min_root); + /* diff should be same for both min/max since it's calculated from center */ + sub_v3_v3v3(bb_diff, bb_max_root, bb_center); + /* handles case of zero width bb */ + add_v3_v3(bb_diff, offset_vec); + madd_v3_v3v3fl(bb_max_root, bb_center, bb_diff, offset); + madd_v3_v3v3fl(bb_min_root, bb_center, bb_diff, -offset); + + /* first project start ray */ + isect_ray_aabb_v3_precalc(&ray, ray_start, ray_normal); + if (!isect_ray_aabb_v3(&ray, bb_min_root, bb_max_root, &rootmin_start)) { + return; + } + + /* then the end ray */ + mul_v3_v3fl(ray_normal_inv, ray_normal, -1.0); + isect_ray_aabb_v3_precalc(&ray, ray_end, ray_normal_inv); + /* unlikely to fail exiting if entering succeeded, still keep this here */ + if (!isect_ray_aabb_v3(&ray, bb_min_root, bb_max_root, &rootmin_end)) { + return; + } + + madd_v3_v3v3fl(ray_start, ray_start, ray_normal, rootmin_start); + madd_v3_v3v3fl(ray_end, ray_end, ray_normal_inv, rootmin_end); + } +} + +/* -------------------------------------------------------------------- */ + +typedef struct { + struct DistRayAABB_Precalc dist_ray_to_aabb_precalc; + bool original; +} FindNearestRayData; + +static bool nearest_to_ray_aabb_dist_sq(PBVHNode *node, void *data_v) +{ + FindNearestRayData *rcd = (FindNearestRayData *)data_v; + const float *bb_min, *bb_max; + + if (rcd->original) { + /* BKE_pbvh_node_get_original_BB */ + bb_min = node->orig_vb.bmin; + bb_max = node->orig_vb.bmax; + } + else { + /* BKE_pbvh_node_get_BB */ + bb_min = node->vb.bmin; + bb_max = node->vb.bmax; + } + + float co_dummy[3], depth; + node->tmin = dist_squared_ray_to_aabb_v3( + &rcd->dist_ray_to_aabb_precalc, bb_min, bb_max, co_dummy, &depth); + /* Ideally we would skip distances outside the range. */ + return depth > 0.0f; +} + +void BKE_pbvh_find_nearest_to_ray(PBVH *pbvh, + BKE_pbvh_SearchNearestCallback cb, + void *data, + const float ray_start[3], + const float ray_normal[3], + bool original) +{ + FindNearestRayData ncd; + + dist_squared_ray_to_aabb_v3_precalc(&ncd.dist_ray_to_aabb_precalc, ray_start, ray_normal); + ncd.original = original; + + BKE_pbvh_search_callback_occluded(pbvh, nearest_to_ray_aabb_dist_sq, &ncd, cb, data); +} + +static bool pbvh_faces_node_nearest_to_ray(PBVH *pbvh, + const PBVHNode *node, + float (*origco)[3], + const float ray_start[3], + const float ray_normal[3], + float *depth, + float *dist_sq) +{ + const float(*positions)[3] = pbvh->vert_positions; + const MLoop *mloop = pbvh->mloop; + const int *faces = node->prim_indices; + int i, totface = node->totprim; + bool hit = false; + + for (i = 0; i < totface; i++) { + const MLoopTri *lt = &pbvh->looptri[faces[i]]; + const int *face_verts = node->face_vert_indices[i]; + + if (pbvh->respect_hide && paint_is_face_hidden(lt, pbvh->hide_poly)) { + continue; + } + + if (origco) { + /* Intersect with backed-up original coordinates. */ + hit |= ray_face_nearest_tri(ray_start, + ray_normal, + origco[face_verts[0]], + origco[face_verts[1]], + origco[face_verts[2]], + depth, + dist_sq); + } + else { + /* intersect with current coordinates */ + hit |= ray_face_nearest_tri(ray_start, + ray_normal, + positions[mloop[lt->tri[0]].v], + positions[mloop[lt->tri[1]].v], + positions[mloop[lt->tri[2]].v], + depth, + dist_sq); + } + } + + return hit; +} + +static bool pbvh_grids_node_nearest_to_ray(PBVH *pbvh, + PBVHNode *node, + float (*origco)[3], + const float ray_start[3], + const float ray_normal[3], + float *depth, + float *dist_sq) +{ + const int totgrid = node->totprim; + const int gridsize = pbvh->gridkey.grid_size; + bool hit = false; + + for (int i = 0; i < totgrid; i++) { + CCGElem *grid = pbvh->grids[node->prim_indices[i]]; + BLI_bitmap *gh; + + if (!grid) { + continue; + } + + gh = pbvh->grid_hidden[node->prim_indices[i]]; + + for (int y = 0; y < gridsize - 1; y++) { + for (int x = 0; x < gridsize - 1; x++) { + /* check if grid face is hidden */ + if (gh) { + if (paint_is_grid_face_hidden(gh, gridsize, x, y)) { + continue; + } + } + + if (origco) { + hit |= ray_face_nearest_quad(ray_start, + ray_normal, + origco[y * gridsize + x], + origco[y * gridsize + x + 1], + origco[(y + 1) * gridsize + x + 1], + origco[(y + 1) * gridsize + x], + depth, + dist_sq); + } + else { + hit |= ray_face_nearest_quad(ray_start, + ray_normal, + CCG_grid_elem_co(&pbvh->gridkey, grid, x, y), + CCG_grid_elem_co(&pbvh->gridkey, grid, x + 1, y), + CCG_grid_elem_co(&pbvh->gridkey, grid, x + 1, y + 1), + CCG_grid_elem_co(&pbvh->gridkey, grid, x, y + 1), + depth, + dist_sq); + } + } + } + + if (origco) { + origco += gridsize * gridsize; + } + } + + return hit; +} + +bool BKE_pbvh_node_find_nearest_to_ray(PBVH *pbvh, + PBVHNode *node, + float (*origco)[3], + bool use_origco, + const float ray_start[3], + const float ray_normal[3], + float *depth, + float *dist_sq, + int stroke_id) +{ + bool hit = false; + + if (node->flag & PBVH_FullyHidden) { + return false; + } + + switch (pbvh->header.type) { + case PBVH_FACES: + hit |= pbvh_faces_node_nearest_to_ray( + pbvh, node, origco, ray_start, ray_normal, depth, dist_sq); + break; + case PBVH_GRIDS: + hit |= pbvh_grids_node_nearest_to_ray( + pbvh, node, origco, ray_start, ray_normal, depth, dist_sq); + break; + case PBVH_BMESH: + hit = pbvh_bmesh_node_nearest_to_ray( + pbvh, node, ray_start, ray_normal, depth, dist_sq, use_origco, stroke_id); + break; + } + + return hit; +} + +typedef enum { + ISECT_INSIDE, + ISECT_OUTSIDE, + ISECT_INTERSECT, +} PlaneAABBIsect; + +/* Adapted from: + * http://www.gamedev.net/community/forums/topic.asp?topic_id=512123 + * Returns true if the AABB is at least partially within the frustum + * (ok, not a real frustum), false otherwise. + */ +static PlaneAABBIsect test_frustum_aabb(const float bb_min[3], + const float bb_max[3], + PBVHFrustumPlanes *frustum) +{ + PlaneAABBIsect ret = ISECT_INSIDE; + float(*planes)[4] = frustum->planes; + + for (int i = 0; i < frustum->num_planes; i++) { + float vmin[3], vmax[3]; + + for (int axis = 0; axis < 3; axis++) { + if (planes[i][axis] < 0) { + vmin[axis] = bb_min[axis]; + vmax[axis] = bb_max[axis]; + } + else { + vmin[axis] = bb_max[axis]; + vmax[axis] = bb_min[axis]; + } + } + + if (dot_v3v3(planes[i], vmin) + planes[i][3] < 0) { + return ISECT_OUTSIDE; + } + if (dot_v3v3(planes[i], vmax) + planes[i][3] <= 0) { + ret = ISECT_INTERSECT; + } + } + + return ret; +} + +bool BKE_pbvh_node_frustum_contain_AABB(PBVHNode *node, void *data) +{ + const float *bb_min, *bb_max; + /* BKE_pbvh_node_get_BB */ + bb_min = node->vb.bmin; + bb_max = node->vb.bmax; + + return test_frustum_aabb(bb_min, bb_max, (PBVHFrustumPlanes *)data) != ISECT_OUTSIDE; +} + +bool BKE_pbvh_node_frustum_exclude_AABB(PBVHNode *node, void *data) +{ + const float *bb_min, *bb_max; + /* BKE_pbvh_node_get_BB */ + bb_min = node->vb.bmin; + bb_max = node->vb.bmax; + + return test_frustum_aabb(bb_min, bb_max, (PBVHFrustumPlanes *)data) != ISECT_INSIDE; +} + +void BKE_pbvh_update_normals(PBVH *pbvh, struct SubdivCCG *subdiv_ccg) +{ + /* Update normals */ + PBVHNode **nodes; + int totnode; + + if (pbvh->header.type == PBVH_BMESH) { + for (int i = 0; i < pbvh->totnode; i++) { + if (pbvh->nodes[i].flag & PBVH_Leaf) { + BKE_pbvh_bmesh_check_tris(pbvh, pbvh->nodes + i); + } + } + } + + BKE_pbvh_search_gather( + pbvh, update_search_cb, POINTER_FROM_INT(PBVH_UpdateNormals), &nodes, &totnode); + + if (totnode > 0) { + if (pbvh->header.type == PBVH_BMESH) { + pbvh_bmesh_normals_update(pbvh, nodes, totnode); + } + else if (pbvh->header.type == PBVH_FACES) { + pbvh_faces_update_normals(pbvh, nodes, totnode); + } + else if (pbvh->header.type == PBVH_GRIDS) { + struct CCGFace **faces; + int num_faces; + BKE_pbvh_get_grid_updates(pbvh, true, (void ***)&faces, &num_faces); + if (num_faces > 0) { + BKE_subdiv_ccg_update_normals(subdiv_ccg, faces, num_faces); + MEM_freeN(faces); + } + } + } + + MEM_SAFE_FREE(nodes); +} + +void BKE_pbvh_face_sets_color_set(PBVH *pbvh, int seed, int color_default) +{ + pbvh->face_sets_color_seed = seed; + pbvh->face_sets_color_default = color_default; +} + +/** + * PBVH drawing, updating draw buffers as needed and culling any nodes outside + * the specified frustum. + */ +typedef struct PBVHDrawSearchData { + PBVHFrustumPlanes *frustum; + int accum_update_flag; + PBVHAttrReq *attrs; + int attrs_num; +} PBVHDrawSearchData; + +static bool pbvh_draw_search_cb(PBVHNode *node, void *data_v) +{ + PBVHDrawSearchData *data = (PBVHDrawSearchData *)data_v; + if (data->frustum && !BKE_pbvh_node_frustum_contain_AABB(node, data->frustum)) { + return false; + } + + data->accum_update_flag |= node->flag; + return true; +} + +void BKE_pbvh_draw_cb(PBVH *pbvh, + Mesh *me, + bool update_only_visible, + PBVHFrustumPlanes *update_frustum, + PBVHFrustumPlanes *draw_frustum, + void (*draw_fn)(void *user_data, PBVHBatches *batches, PBVH_GPU_Args *args), + void *user_data, + bool /* full_render */, + PBVHAttrReq *attrs, + int attrs_num) +{ + PBVHNode **nodes; + int totnode; + int update_flag = 0; + + pbvh->draw_cache_invalid = false; + + /* Search for nodes that need updates. */ + if (update_only_visible) { + /* Get visible nodes with draw updates. */ + PBVHDrawSearchData data = { + .frustum = update_frustum, .accum_update_flag = 0, attrs, attrs_num}; + BKE_pbvh_search_gather(pbvh, pbvh_draw_search_cb, &data, &nodes, &totnode); + update_flag = data.accum_update_flag; + } + else { + /* Get all nodes with draw updates, also those outside the view. */ + const int search_flag = PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers; + BKE_pbvh_search_gather( + pbvh, update_search_cb, POINTER_FROM_INT(search_flag), &nodes, &totnode); + update_flag = PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers; + } + + /* Update draw buffers. */ + if (totnode != 0 && (update_flag & (PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers))) { + // check that need_full_render is set to GPU_pbvh_need_full_render_get(), + // but only if nodes need updating} + pbvh_update_draw_buffers(pbvh, me, nodes, totnode, update_flag); + } + MEM_SAFE_FREE(nodes); + + /* Draw visible nodes. */ + PBVHDrawSearchData draw_data = {.frustum = draw_frustum, .accum_update_flag = 0}; + BKE_pbvh_search_gather(pbvh, pbvh_draw_search_cb, &draw_data, &nodes, &totnode); + + PBVH_GPU_Args args; + + for (int i = 0; i < totnode; i++) { + PBVHNode *node = nodes[i]; + if (!(node->flag & PBVH_FullyHidden)) { + pbvh_draw_args_init(pbvh, &args, node); + + draw_fn(user_data, node->draw_batches, &args); + } + } + + MEM_SAFE_FREE(nodes); +} + +void BKE_pbvh_draw_debug_cb(PBVH *pbvh, + void (*draw_fn)(PBVHNode *node, + void *user_data, + const float bmin[3], + const float bmax[3], + PBVHNodeFlags flag), + void *user_data) +{ + for (int a = 0; a < pbvh->totnode; a++) { + PBVHNode *node = &pbvh->nodes[a]; + + if (pbvh_show_orig_co) { + draw_fn(node, user_data, node->orig_vb.bmin, node->orig_vb.bmax, node->flag); + } + else { + draw_fn(node, user_data, node->vb.bmin, node->vb.bmax, node->flag); + } + } +} + +void BKE_pbvh_grids_update(PBVH *pbvh, + CCGElem **grids, + void **gridfaces, + DMFlagMat *flagmats, + BLI_bitmap **grid_hidden, + CCGKey *key) +{ + pbvh->gridkey = *key; + pbvh->grids = grids; + pbvh->gridfaces = gridfaces; + + if (flagmats != pbvh->grid_flag_mats || pbvh->grid_hidden != grid_hidden) { + pbvh->grid_flag_mats = flagmats; + pbvh->grid_hidden = grid_hidden; + + for (int a = 0; a < pbvh->totnode; a++) { + BKE_pbvh_node_mark_rebuild_draw(&pbvh->nodes[a]); + } + } +} + +float (*BKE_pbvh_vert_coords_alloc(PBVH *pbvh))[3] +{ + float(*vertCos)[3] = nullptr; + + if (pbvh->vert_positions) { + vertCos = (float(*)[3])MEM_malloc_arrayN(pbvh->totvert, sizeof(float[3]), __func__); + memcpy(vertCos, pbvh->vert_positions, sizeof(float[3]) * pbvh->totvert); + } + + return vertCos; +} + +void BKE_pbvh_vert_coords_apply(PBVH *pbvh, const float (*vertCos)[3], const int totvert) +{ + if (totvert != pbvh->totvert) { + BLI_assert_msg(0, "PBVH: Given deforming vcos number does not match PBVH vertex number!"); + return; + } + + if (!pbvh->deformed) { + if (pbvh->vert_positions) { + /* if pbvh is not already deformed, verts/faces points to the */ + /* original data and applying new coords to this arrays would lead to */ + /* unneeded deformation -- duplicate verts/faces to avoid this */ + + pbvh->vert_positions = (float(*)[3])MEM_dupallocN(pbvh->vert_positions); + /* No need to dupalloc pbvh->looptri, this one is 'totally owned' by pbvh, + * it's never some mesh data. */ + + pbvh->deformed = true; + } + } + + if (pbvh->vert_positions) { + float(*positions)[3] = pbvh->vert_positions; + /* copy new verts coords */ + for (int a = 0; a < pbvh->totvert; a++) { + /* no need for float comparison here (memory is exactly equal or not) */ + if (memcmp(positions[a], vertCos[a], sizeof(float[3])) != 0) { + copy_v3_v3(positions[a], vertCos[a]); + BKE_pbvh_vert_tag_update_normal(pbvh, BKE_pbvh_make_vref(a)); + } + } + + for (int a = 0; a < pbvh->totnode; a++) { + BKE_pbvh_node_mark_update(&pbvh->nodes[a]); + } + + BKE_pbvh_update_bounds(pbvh, PBVH_UpdateBB | PBVH_UpdateOriginalBB); + } +} + +bool BKE_pbvh_is_deformed(PBVH *pbvh) +{ + return pbvh->deformed; +} +/* Proxies */ + +PBVHProxyNode *BKE_pbvh_node_add_proxy(PBVH *pbvh, PBVHNode *node) +{ + int index, totverts; + + index = node->proxy_count; + + node->proxy_count++; + + if (node->proxies) { + node->proxies = (PBVHProxyNode *)MEM_reallocN(node->proxies, + node->proxy_count * sizeof(PBVHProxyNode)); + } + else { + node->proxies = (PBVHProxyNode *)MEM_mallocN(sizeof(PBVHProxyNode), "PBVHNodeProxy"); + } + + BKE_pbvh_node_num_verts(pbvh, node, &totverts, nullptr); + node->proxies[index].co = (float(*)[3])MEM_callocN(sizeof(float[3]) * totverts, + "PBVHNodeProxy.co"); + + return node->proxies + index; +} + +void BKE_pbvh_node_free_proxies(PBVHNode *node) +{ + for (int p = 0; p < node->proxy_count; p++) { + MEM_freeN(node->proxies[p].co); + node->proxies[p].co = nullptr; + } + + MEM_SAFE_FREE(node->proxies); + node->proxies = nullptr; + + node->proxy_count = 0; +} + +void BKE_pbvh_gather_proxies(PBVH *pbvh, PBVHNode ***r_array, int *r_tot) +{ + PBVHNode **array = nullptr; + int tot = 0, space = 0; + + for (int n = 0; n < pbvh->totnode; n++) { + PBVHNode *node = pbvh->nodes + n; + + if (node->proxy_count > 0) { + if (tot == space) { + /* resize array if needed */ + space = (tot == 0) ? 32 : space * 2; + array = (PBVHNode **)MEM_recallocN_id(array, sizeof(PBVHNode *) * space, __func__); + } + + array[tot] = node; + tot++; + } + } + + if (tot == 0 && array) { + MEM_freeN(array); + array = nullptr; + } + + *r_array = array; + *r_tot = tot; +} + +void pbvh_vertex_iter_init(PBVH *pbvh, PBVHNode *node, PBVHVertexIter *vi, int mode) +{ + struct CCGElem **grids; + int *grid_indices; + int totgrid, gridsize, uniq_verts, totvert; + + vi->grid = nullptr; + vi->no = nullptr; + vi->fno = nullptr; + vi->vert_positions = nullptr; + vi->vertex.i = 0LL; + vi->index = 0; + + vi->respect_hide = pbvh->respect_hide; + if (pbvh->respect_hide == false) { + /* The same value for all vertices. */ + vi->visible = true; + } + + BKE_pbvh_node_get_grids(pbvh, node, &grid_indices, &totgrid, nullptr, &gridsize, &grids); + BKE_pbvh_node_num_verts(pbvh, node, &uniq_verts, &totvert); + const int *vert_indices = BKE_pbvh_node_get_vert_indices(node); + vi->key = pbvh->gridkey; + + vi->grids = grids; + vi->grid_indices = grid_indices; + vi->totgrid = (grids) ? totgrid : 1; + vi->gridsize = gridsize; + + if (mode == PBVH_ITER_ALL) { + vi->totvert = totvert; + } + else { + vi->totvert = uniq_verts; + } + vi->vert_indices = vert_indices; + vi->vert_positions = pbvh->vert_positions; + vi->is_mesh = pbvh->vert_positions != nullptr; + + if (pbvh->header.type == PBVH_BMESH) { + if (mode == PBVH_ITER_ALL) { + pbvh_bmesh_check_other_verts(node); + } + + vi->vert_positions = nullptr; + + vi->bi = 0; + vi->bm_cur_set = node->bm_unique_verts; + vi->bm_unique_verts = node->bm_unique_verts; + vi->bm_other_verts = node->bm_other_verts; + vi->bm_vdata = &pbvh->header.bm->vdata; + vi->bm_vert = nullptr; + + vi->cd_sculpt_vert = CustomData_get_offset(vi->bm_vdata, CD_DYNTOPO_VERT); + vi->cd_vert_mask_offset = CustomData_get_offset(vi->bm_vdata, CD_PAINT_MASK); + } + + vi->gh = nullptr; + if (vi->grids && mode == PBVH_ITER_UNIQUE) { + vi->grid_hidden = pbvh->grid_hidden; + } + + vi->mask = nullptr; + if (pbvh->header.type == PBVH_FACES) { + vi->vert_normals = pbvh->vert_normals; + vi->hide_vert = pbvh->hide_vert; + + vi->vmask = (float *)CustomData_get_layer_for_write( + pbvh->vdata, CD_PAINT_MASK, pbvh->mesh->totvert); + } +} + +bool BKE_pbvh_draw_mask(const PBVH *pbvh) +{ + return BKE_pbvh_has_mask(pbvh) && !(pbvh->flags & PBVH_FAST_DRAW); +} + +bool BKE_pbvh_has_mask(const PBVH *pbvh) +{ + switch (pbvh->header.type) { + case PBVH_GRIDS: + return (pbvh->gridkey.has_mask != 0); + case PBVH_FACES: + return (pbvh->vdata && CustomData_get_layer(pbvh->vdata, CD_PAINT_MASK)); + case PBVH_BMESH: + return (pbvh->header.bm && + (CustomData_get_offset(&pbvh->header.bm->vdata, CD_PAINT_MASK) != -1)); + } + + return false; +} + +bool BKE_pbvh_draw_face_sets(PBVH *pbvh) +{ + if (pbvh->flags & PBVH_FAST_DRAW) { + return false; + } + + switch (pbvh->header.type) { + case PBVH_GRIDS: + case PBVH_FACES: + return pbvh->pdata && + CustomData_get_layer_named(pbvh->pdata, CD_PROP_INT32, ".sculpt_face_set") != nullptr; + case PBVH_BMESH: + return (pbvh->header.bm && CustomData_get_named_layer_index(&pbvh->header.bm->pdata, + CD_PROP_INT32, + ".sculpt_face_set") != -1); + } + + return false; +} + +void BKE_pbvh_set_frustum_planes(PBVH *pbvh, PBVHFrustumPlanes *planes) +{ + pbvh->num_planes = planes->num_planes; + for (int i = 0; i < pbvh->num_planes; i++) { + copy_v4_v4(pbvh->planes[i], planes->planes[i]); + } +} + +void BKE_pbvh_get_frustum_planes(PBVH *pbvh, PBVHFrustumPlanes *planes) +{ + planes->num_planes = pbvh->num_planes; + for (int i = 0; i < planes->num_planes; i++) { + copy_v4_v4(planes->planes[i], pbvh->planes[i]); + } +} + +#include "BKE_global.h" +void BKE_pbvh_parallel_range_settings(TaskParallelSettings *settings, + bool use_threading, + int totnode) +{ + memset(settings, 0, sizeof(*settings)); + settings->use_threading = use_threading && totnode > 1 && G.debug_value != 890; +} + +float (*BKE_pbvh_get_vert_positions(const PBVH *pbvh))[3] +{ + BLI_assert(pbvh->header.type == PBVH_FACES); + return pbvh->vert_positions; +} + +const float (*BKE_pbvh_get_vert_normals(const PBVH *pbvh))[3] +{ + BLI_assert(pbvh->header.type == PBVH_FACES); + return pbvh->vert_normals; +} + +const bool *BKE_pbvh_get_vert_hide(const PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_FACES); + return pbvh->hide_vert; +} + +const bool *BKE_pbvh_get_poly_hide(const PBVH *pbvh) +{ + BLI_assert(ELEM(pbvh->header.type, PBVH_FACES, PBVH_GRIDS)); + return pbvh->hide_poly; +} + +bool *BKE_pbvh_get_vert_hide_for_write(PBVH *pbvh) +{ + BLI_assert(pbvh->header.type == PBVH_FACES); + if (pbvh->hide_vert) { + return pbvh->hide_vert; + } + pbvh->hide_vert = (bool *)CustomData_get_layer_named_for_write( + &pbvh->mesh->vdata, CD_PROP_BOOL, ".hide_vert", pbvh->mesh->totvert); + if (pbvh->hide_vert) { + return pbvh->hide_vert; + } + pbvh->hide_vert = (bool *)CustomData_add_layer_named( + &pbvh->mesh->vdata, CD_PROP_BOOL, CD_SET_DEFAULT, nullptr, pbvh->mesh->totvert, ".hide_vert"); + return pbvh->hide_vert; +} + +void BKE_pbvh_subdiv_ccg_set(PBVH *pbvh, SubdivCCG *subdiv_ccg) +{ + pbvh->subdiv_ccg = subdiv_ccg; + pbvh->gridfaces = (void **)subdiv_ccg->grid_faces; + pbvh->grid_hidden = subdiv_ccg->grid_hidden; + pbvh->grid_flag_mats = subdiv_ccg->grid_flag_mats; + pbvh->grids = subdiv_ccg->grids; +} + +void BKE_pbvh_face_sets_set(PBVH *pbvh, int *face_sets) +{ + pbvh->face_sets = face_sets; +} + +void BKE_pbvh_update_hide_attributes_from_mesh(PBVH *pbvh) +{ + if (pbvh->header.type == PBVH_FACES) { + pbvh->hide_vert = (bool *)CustomData_get_layer_named_for_write( + &pbvh->mesh->vdata, CD_PROP_BOOL, ".hide_vert", pbvh->mesh->totvert); + pbvh->hide_poly = (bool *)CustomData_get_layer_named_for_write( + &pbvh->mesh->pdata, CD_PROP_BOOL, ".hide_poly", pbvh->mesh->totpoly); + } +} + +void BKE_pbvh_respect_hide_set(PBVH *pbvh, bool respect_hide) +{ + pbvh->respect_hide = respect_hide; +} + +int BKE_pbvh_get_node_index(PBVH *pbvh, PBVHNode *node) +{ + return (int)(node - pbvh->nodes); +} + +int BKE_pbvh_get_totnodes(PBVH *pbvh) +{ + return pbvh->totnode; +} + +int BKE_pbvh_get_node_id(PBVH *pbvh, PBVHNode *node) +{ + return node->id; +} + +void BKE_pbvh_get_nodes(PBVH *pbvh, int flag, PBVHNode ***r_array, int *r_totnode) +{ + BKE_pbvh_search_gather(pbvh, update_search_cb, POINTER_FROM_INT(flag), r_array, r_totnode); +} + +PBVHNode *BKE_pbvh_node_from_index(PBVH *pbvh, int node_i) +{ + return pbvh->nodes + node_i; +} + +#ifdef PROXY_ADVANCED +// TODO: if this really works, make sure to pull the neighbor iterator out of sculpt.c and put it +// here +/* clang-format off */ +# include "BKE_context.h" +# include "DNA_object_types.h" +# include "DNA_scene_types.h" +# include "../../editors/sculpt_paint/sculpt_intern.h" +/* clang-format on */ + +int checkalloc(void **data, int esize, int oldsize, int newsize, int emask, int umask) +{ + // update channel if it already was allocated once, or is requested by umask + if (newsize != oldsize && (*data || (emask & umask))) { + if (*data) { + *data = MEM_reallocN(*data, newsize * esize); + } + else { + *data = MEM_mallocN(newsize * esize, "pbvh proxy vert arrays"); + } + return emask; + } + + return 0; +} + +void BKE_pbvh_ensure_proxyarray_indexmap(PBVH *pbvh, PBVHNode *node, GHash *vert_node_map) +{ + ProxyVertArray *p = &node->proxyverts; + + int totvert = 0; + BKE_pbvh_node_num_verts(pbvh, node, &totvert, nullptr); + + bool update = !p->indexmap || p->size != totvert; + update = update || (p->indexmap && BLI_ghash_len(p->indexmap) != totvert); + + if (!update) { + return; + } + + if (p->indexmap) { + BLI_ghash_free(p->indexmap, nullptr, nullptr); + } + + GHash *gs = p->indexmap = BLI_ghash_ptr_new("BKE_pbvh_ensure_proxyarray_indexmap"); + + PBVHVertexIter vd; + + int i = 0; + BKE_pbvh_vertex_iter_begin (pbvh, node, vd, PBVH_ITER_UNIQUE) { + BLI_ghash_insert(gs, (void *)vd.vertex.i, (void *)i); + i++; + } + BKE_pbvh_vertex_iter_end; +} + +bool pbvh_proxyarray_needs_update(PBVH *pbvh, PBVHNode *node, int mask) +{ + ProxyVertArray *p = &node->proxyverts; + int totvert = 0; + + if (!(node->flag & PBVH_Leaf) || !node->bm_unique_verts) { + return false; + } + + BKE_pbvh_node_num_verts(pbvh, node, &totvert, nullptr); + + bool bad = p->size != totvert; + bad = bad || ((mask & PV_NEIGHBORS) && p->neighbors_dirty); + bad = bad || (p->datamask & mask) != mask; + + bad = bad && totvert > 0; + + return bad; +} + +GHash *pbvh_build_vert_node_map(PBVH *pbvh, PBVHNode **nodes, int totnode, int mask) +{ + GHash *vert_node_map = BLI_ghash_ptr_new("BKE_pbvh_ensure_proxyarrays"); + + for (int i = 0; i < totnode; i++) { + PBVHVertexIter vd; + PBVHNode *node = nodes[i]; + + if (!(node->flag & PBVH_Leaf)) { + continue; + } + + BKE_pbvh_vertex_iter_begin (pbvh, node, vd, PBVH_ITER_UNIQUE) { + BLI_ghash_insert(vert_node_map, (void *)vd.vertex.i, (void *)(node - pbvh->nodes)); + } + BKE_pbvh_vertex_iter_end; + } + + return vert_node_map; +} + +void BKE_pbvh_ensure_proxyarrays( + SculptSession *ss, PBVH *pbvh, PBVHNode **nodes, int totnode, int mask) +{ + + bool update = false; + + for (int i = 0; i < totnode; i++) { + if (pbvh_proxyarray_needs_update(pbvh, nodes[i], mask)) { + update = true; + break; + } + } + + if (!update) { + return; + } + + GHash *vert_node_map = pbvh_build_vert_node_map(pbvh, nodes, totnode, mask); + + for (int i = 0; i < totnode; i++) { + if (nodes[i]->flag & PBVH_Leaf) { + BKE_pbvh_ensure_proxyarray_indexmap(pbvh, nodes[i], vert_node_map); + } + } + + for (int i = 0; i < totnode; i++) { + if (nodes[i]->flag & PBVH_Leaf) { + BKE_pbvh_ensure_proxyarray(ss, pbvh, nodes[i], mask, vert_node_map, false, false); + } + } + + if (vert_node_map) { + BLI_ghash_free(vert_node_map, nullptr, nullptr); + } +} + +void BKE_pbvh_ensure_proxyarray(SculptSession *ss, + PBVH *pbvh, + PBVHNode *node, + int mask, + GHash *vert_node_map, + bool check_indexmap, + bool force_update) +{ + ProxyVertArray *p = &node->proxyverts; + + if (check_indexmap) { + BKE_pbvh_ensure_proxyarray_indexmap(pbvh, node, vert_node_map); + } + + GHash *gs = p->indexmap; + + int totvert = 0; + BKE_pbvh_node_num_verts(pbvh, node, &totvert, nullptr); + + if (!totvert) { + return; + } + + int updatemask = 0; + +# define UPDATETEST(name, emask, esize) \ + if (mask & emask) { \ + updatemask |= checkalloc((void **)&p->name, esize, p->size, totvert, emask, mask); \ + } + + UPDATETEST(ownerco, PV_OWNERCO, sizeof(void *)) + UPDATETEST(ownerno, PV_OWNERNO, sizeof(void *)) + UPDATETEST(ownermask, PV_OWNERMASK, sizeof(void *)) + UPDATETEST(ownercolor, PV_OWNERCOLOR, sizeof(void *)) + UPDATETEST(co, PV_CO, sizeof(float) * 3) + UPDATETEST(no, PV_NO, sizeof(short) * 3) + UPDATETEST(fno, PV_NO, sizeof(float) * 3) + UPDATETEST(mask, PV_MASK, sizeof(float)) + UPDATETEST(color, PV_COLOR, sizeof(float) * 4) + UPDATETEST(index, PV_INDEX, sizeof(PBVHVertRef)) + UPDATETEST(neighbors, PV_NEIGHBORS, sizeof(ProxyKey) * MAX_PROXY_NEIGHBORS) + + p->size = totvert; + + if (force_update) { + updatemask |= mask; + } + + if ((mask & PV_NEIGHBORS) && p->neighbors_dirty) { + updatemask |= PV_NEIGHBORS; + } + + if (!updatemask) { + return; + } + + p->datamask |= mask; + + PBVHVertexIter vd; + + int i = 0; + +# if 1 + BKE_pbvh_vertex_iter_begin (pbvh, node, vd, PBVH_ITER_UNIQUE) { + void **val; + + if (!BLI_ghash_ensure_p(gs, (void *)vd.vertex.i, &val)) { + *val = (void *)i; + }; + i++; + } + BKE_pbvh_vertex_iter_end; +# endif + + if (updatemask & PV_NEIGHBORS) { + p->neighbors_dirty = false; + } + + i = 0; + BKE_pbvh_vertex_iter_begin (pbvh, node, vd, PBVH_ITER_UNIQUE) { + if (i >= p->size) { + printf("error!! %s\n", __func__); + break; + } + + if (updatemask & PV_OWNERCO) { + p->ownerco[i] = vd.co; + } + if (updatemask & PV_INDEX) { + p->index[i] = vd.vertex; + } + if (updatemask & PV_OWNERNO) { + p->ownerno[i] = vd.no; + } + if (updatemask & PV_NO) { + if (vd.fno) { + if (p->fno) { + copy_v3_v3(p->fno[i], vd.fno); + } + normal_float_to_short_v3(p->no[i], vd.fno); + } + else if (vd.no) { + copy_v3_v3_short(p->no[i], vd.no); + if (p->fno) { + normal_short_to_float_v3(p->fno[i], vd.no); + } + } + else { + p->no[i][0] = p->no[i][1] = p->no[i][2] = 0; + if (p->fno) { + zero_v3(p->fno[i]); + } + } + } + if (updatemask & PV_CO) { + copy_v3_v3(p->co[i], vd.co); + } + if (updatemask & PV_OWNERMASK) { + p->ownermask[i] = vd.mask; + } + if (updatemask & PV_MASK) { + p->mask[i] = vd.mask ? *vd.mask : 0.0f; + } + if (updatemask & PV_COLOR) { + if (vd.vcol) { + copy_v4_v4(p->color[i], vd.vcol->color); + } + } + + if (updatemask & PV_NEIGHBORS) { + int j = 0; + SculptVertexNeighborIter ni; + + SCULPT_VERTEX_NEIGHBORS_ITER_BEGIN (ss, vd.vertex, ni) { + if (j >= MAX_PROXY_NEIGHBORS - 1) { + break; + } + + ProxyKey key; + + int *pindex = (int *)BLI_ghash_lookup_p(gs, (void *)ni.vertex.i); + + if (!pindex) { + if (vert_node_map) { + int *nindex = (int *)BLI_ghash_lookup_p(vert_node_map, (void *)ni.vertex.i); + + if (!nindex) { + p->neighbors_dirty = true; + continue; + } + + PBVHNode *node2 = pbvh->nodes + *nindex; + if (node2->proxyverts.indexmap) { + pindex = (int *)BLI_ghash_lookup_p(node2->proxyverts.indexmap, (void *)ni.vertex.i); + } + else { + pindex = nullptr; + } + + if (!pindex) { + p->neighbors_dirty = true; + continue; + } + + key.node = (int)(node2 - pbvh->nodes); + key.pindex = *pindex; + //* + if (node2->proxyverts.size != 0 && + (key.pindex < 0 || key.pindex >= node2->proxyverts.size)) { + printf("error! %s\n", __func__); + fflush(stdout); + p->neighbors_dirty = true; + continue; + } + //*/ + } + else { + p->neighbors_dirty = true; + continue; + } + } + else { + key.node = (int)(node - pbvh->nodes); + key.pindex = *pindex; + } + + p->neighbors[i][j++] = key; + } + SCULPT_VERTEX_NEIGHBORS_ITER_END(ni); + + p->neighbors[i][j].node = -1; + } + + i++; + } + BKE_pbvh_vertex_iter_end; +} + +typedef struct GatherProxyThread { + PBVHNode **nodes; + PBVH *pbvh; + int mask; +} GatherProxyThread; + +static void pbvh_load_proxyarray_exec(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict tls) +{ + GatherProxyThread *data = (GatherProxyThread *)userdata; + PBVHNode *node = data->nodes[n]; + PBVHVertexIter vd; + ProxyVertArray *p = &node->proxyverts; + int i = 0; + + int mask = p->datamask; + + BKE_pbvh_ensure_proxyarray(nullptr, data->pbvh, node, data->mask, nullptr, false, true); +} + +void BKE_pbvh_load_proxyarrays(PBVH *pbvh, PBVHNode **nodes, int totnode, int mask) +{ + GatherProxyThread data = {.nodes = nodes, .pbvh = pbvh, .mask = mask}; + + mask = mask & ~PV_NEIGHBORS; // don't update neighbors in threaded code? + + TaskParallelSettings settings; + BKE_pbvh_parallel_range_settings(&settings, true, totnode); + BLI_task_parallel_range(0, totnode, &data, pbvh_load_proxyarray_exec, &settings); +} + +static void pbvh_gather_proxyarray_exec(void *__restrict userdata, + const int n, + const TaskParallelTLS *__restrict tls) +{ + GatherProxyThread *data = (GatherProxyThread *)userdata; + PBVHNode *node = data->nodes[n]; + PBVHVertexIter vd; + ProxyVertArray *p = &node->proxyverts; + int i = 0; + + int mask = p->datamask; + + BKE_pbvh_vertex_iter_begin (data->pbvh, node, vd, PBVH_ITER_UNIQUE) { + if (mask & PV_CO) { + copy_v3_v3(vd.co, p->co[i]); + } + + if (mask & PV_COLOR && vd.col) { + copy_v4_v4(vd.col, p->color[i]); + } + + if (vd.mask && (mask & PV_MASK)) { + *vd.mask = p->mask[i]; + } + + i++; + } + BKE_pbvh_vertex_iter_end; +} + +void BKE_pbvh_gather_proxyarray(PBVH *pbvh, PBVHNode **nodes, int totnode) +{ + GatherProxyThread data = {.nodes = nodes, .pbvh = pbvh}; + + TaskParallelSettings settings; + BKE_pbvh_parallel_range_settings(&settings, true, totnode); + BLI_task_parallel_range(0, totnode, &data, pbvh_gather_proxyarray_exec, &settings); +} + +void BKE_pbvh_free_proxyarray(PBVH *pbvh, PBVHNode *node) +{ + ProxyVertArray *p = &node->proxyverts; + + if (p->indexmap) { + BLI_ghash_free(p->indexmap, nullptr, nullptr); + } + if (p->co) + MEM_freeN(p->co); + if (p->no) + MEM_freeN(p->no); + if (p->index) + MEM_freeN(p->index); + if (p->mask) + MEM_freeN(p->mask); + if (p->ownerco) + MEM_freeN(p->ownerco); + if (p->ownerno) + MEM_freeN(p->ownerno); + if (p->ownermask) + MEM_freeN(p->ownermask); + if (p->ownercolor) + MEM_freeN(p->ownercolor); + if (p->color) + MEM_freeN(p->color); + if (p->neighbors) + MEM_freeN(p->neighbors); + + memset(p, 0, sizeof(*p)); +} + +void BKE_pbvh_update_proxyvert(PBVH *pbvh, PBVHNode *node, ProxyVertUpdateRec *rec) +{ +} + +ProxyVertArray *BKE_pbvh_get_proxyarrays(PBVH *pbvh, PBVHNode *node) +{ + return &node->proxyverts; +} + +#endif + +/* checks if pbvh needs to sync its flat vcol shading flag with scene tool settings + scene and ob are allowd to be nullptr (in which case nothing is done). +*/ +void SCULPT_update_flat_vcol_shading(Object *ob, Scene *scene) +{ + if (!scene || !ob || !ob->sculpt || !ob->sculpt->pbvh) { + return; + } + + if (ob->sculpt->pbvh) { + bool flat_vcol_shading = ((scene->toolsettings->sculpt->flags & + SCULPT_DYNTOPO_FLAT_VCOL_SHADING) != 0); + + BKE_pbvh_set_flat_vcol_shading(ob->sculpt->pbvh, flat_vcol_shading); + } +} + +PBVHNode *BKE_pbvh_get_node(PBVH *pbvh, int node) +{ + return pbvh->nodes + node; +} + +bool BKE_pbvh_node_mark_update_index_buffer(PBVH *pbvh, PBVHNode *node) +{ + bool split_indexed = pbvh->header.bm && + (pbvh->flags & (PBVH_DYNTOPO_SMOOTH_SHADING | PBVH_FAST_DRAW)); + + if (split_indexed) { + BKE_pbvh_vert_tag_update_normal_triangulation(node); + } + + return split_indexed; +} + +void BKE_pbvh_vert_tag_update_normal_triangulation(PBVHNode *node) +{ + node->flag |= PBVH_UpdateTris; +} + +void BKE_pbvh_vert_tag_update_normal_tri_area(PBVHNode *node) +{ + node->flag |= PBVH_UpdateTriAreas; +} + +/* must be called outside of threads */ +void BKE_pbvh_face_areas_begin(PBVH *pbvh) +{ + pbvh->face_area_i ^= 1; +} + +void BKE_pbvh_update_all_tri_areas(PBVH *pbvh) +{ + /* swap read/write face area buffers */ + pbvh->face_area_i ^= 1; + + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + if (node->flag & PBVH_Leaf) { + node->flag |= PBVH_UpdateTriAreas; +#if 0 + // ensure node triangulations are valid + // so we don't end up doing it inside brush threads + BKE_pbvh_bmesh_check_tris(pbvh, node); +#endif + } + } +} + +void BKE_pbvh_check_tri_areas(PBVH *pbvh, PBVHNode *node) +{ + if (!(node->flag & PBVH_UpdateTriAreas)) { + return; + } + + if (pbvh->header.type == PBVH_BMESH) { + if (node->flag & PBVH_UpdateTris) { + BKE_pbvh_bmesh_check_tris(pbvh, node); + } + + if (!node->tribuf || !node->tribuf->tottri) { + return; + } + } + + node->flag &= ~PBVH_UpdateTriAreas; + + const int cur_i = pbvh->face_area_i ^ 1; + + switch (BKE_pbvh_type(pbvh)) { + case PBVH_FACES: { + for (int i = 0; i < (int)node->totprim; i++) { + const MLoopTri *lt = &pbvh->looptri[node->prim_indices[i]]; + + if (pbvh->hide_poly && pbvh->hide_poly[lt->poly]) { + /* Skip hidden faces. */ + continue; + } + + pbvh->face_areas[lt->poly * 2 + cur_i] = 0.0f; + } + + for (int i = 0; i < (int)node->totprim; i++) { + const MLoopTri *lt = &pbvh->looptri[node->prim_indices[i]]; + + if (pbvh->hide_poly && pbvh->hide_poly[lt->poly]) { + /* Skip hidden faces. */ + continue; + } + + float area = area_tri_v3(pbvh->vert_positions[pbvh->mloop[lt->tri[0]].v], + pbvh->vert_positions[pbvh->mloop[lt->tri[1]].v], + pbvh->vert_positions[pbvh->mloop[lt->tri[2]].v]); + + pbvh->face_areas[lt->poly * 2 + cur_i] += area; + + /* sanity check on read side of read write buffer */ + if (pbvh->face_areas[lt->poly * 2 + (cur_i ^ 1)] == 0.0f) { + pbvh->face_areas[lt->poly * 2 + (cur_i ^ 1)] = pbvh->face_areas[lt->poly * 2 + cur_i]; + } + } + break; + } + case PBVH_GRIDS: + break; + case PBVH_BMESH: { + BMFace *f; + const int cd_face_area = pbvh->cd_face_area; + + TGSET_ITER (f, node->bm_faces) { + float *areabuf = (float *)BM_ELEM_CD_GET_VOID_P(f, cd_face_area); + areabuf[cur_i] = 0.0f; + } + TGSET_ITER_END; + + for (int i = 0; i < node->tribuf->tottri; i++) { + PBVHTri *tri = node->tribuf->tris + i; + + BMVert *v1 = (BMVert *)(node->tribuf->verts[tri->v[0]].i); + BMVert *v2 = (BMVert *)(node->tribuf->verts[tri->v[1]].i); + BMVert *v3 = (BMVert *)(node->tribuf->verts[tri->v[2]].i); + BMFace *f = (BMFace *)tri->f.i; + + float *areabuf = (float *)BM_ELEM_CD_GET_VOID_P(f, cd_face_area); + areabuf[cur_i] += area_tri_v3(v1->co, v2->co, v3->co); + } + + TGSET_ITER (f, node->bm_faces) { + float *areabuf = (float *)BM_ELEM_CD_GET_VOID_P(f, cd_face_area); + + /* sanity check on read side of read write buffer */ + if (areabuf[cur_i ^ 1] == 0.0f) { + areabuf[cur_i ^ 1] = areabuf[cur_i]; + } + } + TGSET_ITER_END; + + break; + } + default: + break; + } +} + +static void pbvh_pmap_to_edges_add(PBVH *pbvh, + PBVHVertRef vertex, + int **r_edges, + int *r_edges_size, + bool *heap_alloc, + int e, + int p, + int *len, + int **r_polys) +{ + for (int i = 0; i < *len; i++) { + if ((*r_edges)[i] == e) { + if ((*r_polys)[i * 2 + 1] == -1) { + (*r_polys)[i * 2 + 1] = p; + } + return; + } + } + + if (*len >= *r_edges_size) { + int newsize = *len + ((*len) >> 1) + 1; + + int *r_edges_new = (int *)MEM_malloc_arrayN(newsize, sizeof(*r_edges_new), "r_edges_new"); + int *r_polys_new = (int *)MEM_malloc_arrayN(newsize * 2, sizeof(*r_polys_new), "r_polys_new"); + + memcpy((void *)r_edges_new, (void *)*r_edges, sizeof(int) * (*r_edges_size)); + memcpy((void *)r_polys_new, (void *)(*r_polys), sizeof(int) * 2 * (*r_edges_size)); + + *r_edges_size = newsize; + + if (*heap_alloc) { + MEM_freeN(*r_polys); + MEM_freeN(*r_edges); + } + + *r_edges = r_edges_new; + *r_polys = r_polys_new; + + *heap_alloc = true; + } + + (*r_polys)[*len * 2] = p; + (*r_polys)[*len * 2 + 1] = -1; + + (*r_edges)[*len] = e; + (*len)++; +} + +void BKE_pbvh_pmap_to_edges(PBVH *pbvh, + PBVHVertRef vertex, + int **r_edges, + int *r_edges_size, + bool *r_heap_alloc, + int **r_polys) +{ + MeshElemMap *map = pbvh->pmap->pmap + vertex.i; + int len = 0; + + for (int i = 0; i < map->count; i++) { + const MPoly *mp = pbvh->mpoly + map->indices[i]; + const MLoop *ml = pbvh->mloop + mp->loopstart; + + if (pbvh->hide_poly && pbvh->hide_poly[map->indices[i]]) { + /* Skip connectivity from hidden faces. */ + continue; + } + + for (int j = 0; j < mp->totloop; j++, ml++) { + if (ml->v == vertex.i) { + pbvh_pmap_to_edges_add(pbvh, + vertex, + r_edges, + r_edges_size, + r_heap_alloc, + ME_POLY_LOOP_PREV(pbvh->mloop, mp, j)->e, + map->indices[i], + &len, + r_polys); + pbvh_pmap_to_edges_add(pbvh, + vertex, + r_edges, + r_edges_size, + r_heap_alloc, + ml->e, + map->indices[i], + &len, + r_polys); + } + } + } + + *r_edges_size = len; +} + +void BKE_pbvh_set_vemap(PBVH *pbvh, MeshElemMap *vemap) +{ + pbvh->vemap = vemap; +} + +void BKE_pbvh_get_vert_face_areas(PBVH *pbvh, PBVHVertRef vertex, float *r_areas, int valence) +{ + const int cur_i = pbvh->face_area_i; + + switch (BKE_pbvh_type(pbvh)) { + case PBVH_FACES: { + int *edges = BLI_array_alloca(edges, 16); + int *polys = BLI_array_alloca(polys, 32); + bool heap_alloc = false; + int len = 16; + + BKE_pbvh_pmap_to_edges(pbvh, vertex, &edges, &len, &heap_alloc, &polys); + len = MIN2(len, valence); + + if (pbvh->vemap) { + /* sort poly references by vemap edge ordering */ + MeshElemMap *emap = pbvh->vemap + vertex.i; + + int *polys_old = BLI_array_alloca(polys, len * 2); + memcpy((void *)polys_old, (void *)polys, sizeof(int) * len * 2); + + /* note that wire edges will break this, but + should only result in incorrect weights + and isn't worth fixing */ + + for (int i = 0; i < len; i++) { + for (int j = 0; j < len; j++) { + if (emap->indices[i] == edges[j]) { + polys[i * 2] = polys_old[j * 2]; + polys[i * 2 + 1] = polys_old[j * 2 + 1]; + } + } + } + } + for (int i = 0; i < len; i++) { + r_areas[i] = pbvh->face_areas[polys[i * 2] * 2 + cur_i]; + + if (polys[i * 2 + 1] != -1) { + r_areas[i] += pbvh->face_areas[polys[i * 2 + 1] * 2 + cur_i]; + r_areas[i] *= 0.5f; + } + } + + if (heap_alloc) { + MEM_freeN(edges); + MEM_freeN(polys); + } + + break; + } + case PBVH_BMESH: { + BMVert *v = (BMVert *)vertex.i; + BMEdge *e = v->e; + + if (!e) { + for (int i = 0; i < valence; i++) { + r_areas[i] = 1.0f; + } + + return; + } + + const int cd_face_area = pbvh->cd_face_area; + int j = 0; + + do { + float w = 0.0f; + + if (!e->l) { + w = 0.0f; + } + else { + float *a1 = (float *)BM_ELEM_CD_GET_VOID_P(e->l->f, cd_face_area); + float *a2 = (float *)BM_ELEM_CD_GET_VOID_P(e->l->radial_next->f, cd_face_area); + + w += a1[cur_i] * 0.5f; + w += a2[cur_i] * 0.5f; + } + + if (j >= valence) { + printf("%s: error, corrupt edge cycle\n", __func__); + break; + } + + r_areas[j++] = w; + + e = v == e->v1 ? e->v1_disk_link.next : e->v2_disk_link.next; + } while (e != v->e); + + for (; j < valence; j++) { + r_areas[j] = 1.0f; + } + + break; + } + + case PBVH_GRIDS: { /* estimate from edge lengths */ + int index = (int)vertex.i; + + const CCGKey *key = BKE_pbvh_get_grid_key(pbvh); + const int grid_index = index / key->grid_area; + const int vertex_index = index - grid_index * key->grid_area; + + SubdivCCGCoord coord = {.grid_index = grid_index, + .x = vertex_index % key->grid_size, + .y = vertex_index / key->grid_size}; + + SubdivCCGNeighbors neighbors; + BKE_subdiv_ccg_neighbor_coords_get(pbvh->subdiv_ccg, &coord, false, &neighbors); + + float *co1 = CCG_elem_co(key, CCG_elem_offset(key, pbvh->grids[grid_index], vertex_index)); + float totw = 0.0f; + int i = 0; + + for (i = 0; i < neighbors.size; i++) { + SubdivCCGCoord *coord2 = neighbors.coords + i; + + int vertex_index2 = coord2->y * key->grid_size + coord2->x; + + float *co2 = CCG_elem_co( + key, CCG_elem_offset(key, pbvh->grids[coord2->grid_index], vertex_index2)); + float w = len_v3v3(co1, co2); + + r_areas[i] = w; + totw += w; + } + + if (neighbors.size != valence) { + printf("%s: error!\n", __func__); + } + if (totw < 0.000001f) { + for (int i = 0; i < neighbors.size; i++) { + r_areas[i] = 1.0f; + } + } + + for (; i < valence; i++) { + r_areas[i] = 1.0f; + } + + break; + } + } +} + +void BKE_pbvh_set_stroke_id(PBVH *pbvh, int stroke_id) +{ + pbvh->stroke_id = stroke_id; +} + +static void pbvh_boundaries_flag_update(PBVH *pbvh) +{ + + if (pbvh->header.bm) { + BMVert *v; + BMIter iter; + + BM_ITER_MESH (v, &iter, pbvh->header.bm, BM_VERTS_OF_MESH) { + pbvh_boundary_update_bmesh(pbvh, v); + } + } + else { + int totvert = pbvh->totvert; + + if (BKE_pbvh_type(pbvh) == PBVH_GRIDS) { + totvert = BKE_pbvh_get_grid_num_verts(pbvh); + } + + for (int i = 0; i < totvert; i++) { + pbvh->boundary_flags[i] |= SCULPT_BOUNDARY_NEEDS_UPDATE; + } + } +} + +void BKE_pbvh_set_symmetry(PBVH *pbvh, int symmetry, int boundary_symmetry) +{ + if (symmetry == pbvh->symmetry && boundary_symmetry == pbvh->boundary_symmetry) { + return; + } + + pbvh->symmetry = symmetry; + pbvh->boundary_symmetry = boundary_symmetry; + + pbvh_boundaries_flag_update(pbvh); +} + +void BKE_pbvh_set_sculpt_verts(PBVH *pbvh, struct MSculptVert *msculptverts) +{ + pbvh->msculptverts = msculptverts; +} + +void BKE_pbvh_update_vert_boundary_grids(PBVH *pbvh, + struct SubdivCCG *subdiv_ccg, + PBVHVertRef vertex) +{ + MSculptVert *mv = pbvh->msculptverts + vertex.i; + + int *flags = pbvh->boundary_flags + vertex.i; + *flags = 0; + + /* TODO: finish this function. */ + + int index = (int)vertex.i; + + /* TODO: optimize this. We could fill #SculptVertexNeighborIter directly, + * maybe provide coordinate and mask pointers directly rather than converting + * back and forth between #CCGElem and global index. */ + const CCGKey *key = BKE_pbvh_get_grid_key(pbvh); + const int grid_index = index / key->grid_area; + const int vertex_index = index - grid_index * key->grid_area; + + SubdivCCGCoord coord = {.grid_index = grid_index, + .x = vertex_index % key->grid_size, + .y = vertex_index / key->grid_size}; + + SubdivCCGNeighbors neighbors; + BKE_subdiv_ccg_neighbor_coords_get(subdiv_ccg, &coord, false, &neighbors); + + mv->valence = neighbors.size; + mv->flag &= ~SCULPTVERT_NEED_VALENCE; +} + +void BKE_pbvh_update_vert_boundary_faces(int *boundary_flags, + const int *face_sets, + const bool *hide_poly, + const float (*vert_positions)[3], + const MEdge *medge, + const MLoop *mloop, + const MPoly *mpoly, + MSculptVert *msculptverts, + const MeshElemMap *pmap, + PBVHVertRef vertex, + const bool *sharp_edges) +{ + MSculptVert *mv = msculptverts + vertex.i; + const MeshElemMap *vert_map = &pmap[vertex.i]; + + mv->flag &= ~SCULPTVERT_VERT_FSET_HIDDEN; + + int last_fset = -1; + int last_fset2 = -1; + + int *flags = boundary_flags + vertex.i; + *flags = 0; + + int totsharp = 0, totseam = 0; + int visible = false; + + for (int i = 0; i < vert_map->count; i++) { + int f_i = vert_map->indices[i]; + + const MPoly *mp = mpoly + f_i; + const MLoop *ml = mloop + mp->loopstart; + int j = 0; + + for (j = 0; j < mp->totloop; j++, ml++) { + if (ml->v == (int)vertex.i) { + break; + } + } + + if (j < mp->totloop) { + const MEdge *me = medge + ml->e; + if (sharp_edges && sharp_edges[ml->e]) { + *flags |= SCULPT_BOUNDARY_SHARP; + totsharp++; + } + + if (me->flag & ME_SEAM) { + *flags |= SCULPT_BOUNDARY_SEAM; + totseam++; + } + } + + int fset = face_sets ? abs(face_sets[f_i]) : 1; + + if (!hide_poly || !hide_poly[f_i]) { + visible = true; + } + + if (i > 0 && fset != last_fset) { + *flags |= SCULPT_BOUNDARY_FACE_SET; + + if (i > 1 && last_fset2 != last_fset && last_fset != -1 && last_fset2 != -1 && fset != -1 && + last_fset2 != fset) { + *flags |= SCULPT_CORNER_FACE_SET; + } + } + + if (i > 0 && last_fset != fset) { + last_fset2 = last_fset; + } + + last_fset = fset; + } + + if (!visible) { + mv->flag |= SCULPTVERT_VERT_FSET_HIDDEN; + } + + if (totsharp > 2) { + *flags |= SCULPT_CORNER_SHARP; + } + + if (totseam > 2) { + *flags |= SCULPT_CORNER_SEAM; + } +} + +void BKE_pbvh_ignore_uvs_set(PBVH *pbvh, bool value) +{ + if (!!(pbvh->flags & PBVH_IGNORE_UVS) == value) { + return; // no change + } + + if (value) { + pbvh->flags |= PBVH_IGNORE_UVS; + } + else { + pbvh->flags &= ~PBVH_IGNORE_UVS; + } + + pbvh_boundaries_flag_update(pbvh); +} + +bool BKE_pbvh_cache(const struct Mesh *me, PBVH *pbvh) +{ + memset(&pbvh->cached_data, 0, sizeof(pbvh->cached_data)); + + if (pbvh->invalid) { + printf("invalid pbvh!\n"); + return false; + } + + switch (pbvh->header.type) { + case PBVH_BMESH: + if (!pbvh->header.bm) { + return false; + } + + pbvh->cached_data.bm = pbvh->header.bm; + + pbvh->cached_data.vdata = pbvh->header.bm->vdata; + pbvh->cached_data.edata = pbvh->header.bm->edata; + pbvh->cached_data.ldata = pbvh->header.bm->ldata; + pbvh->cached_data.pdata = pbvh->header.bm->pdata; + + pbvh->cached_data.totvert = pbvh->header.bm->totvert; + pbvh->cached_data.totedge = pbvh->header.bm->totedge; + pbvh->cached_data.totloop = pbvh->header.bm->totloop; + pbvh->cached_data.totpoly = pbvh->header.bm->totface; + break; + case PBVH_GRIDS: { + pbvh->cached_data.vdata = me->vdata; + pbvh->cached_data.edata = me->edata; + pbvh->cached_data.ldata = me->ldata; + pbvh->cached_data.pdata = me->pdata; + + int grid_side = pbvh->gridkey.grid_size; + + pbvh->cached_data.totvert = pbvh->totgrid * grid_side * grid_side; + pbvh->cached_data.totedge = me->totedge; + pbvh->cached_data.totloop = me->totloop; + pbvh->cached_data.totpoly = pbvh->totgrid * (grid_side - 1) * (grid_side - 1); + break; + } + case PBVH_FACES: + pbvh->cached_data.vdata = me->vdata; + pbvh->cached_data.edata = me->edata; + pbvh->cached_data.ldata = me->ldata; + pbvh->cached_data.pdata = me->pdata; + + pbvh->cached_data.totvert = me->totvert; + pbvh->cached_data.totedge = me->totedge; + pbvh->cached_data.totloop = me->totloop; + pbvh->cached_data.totpoly = me->totpoly; + break; + } + + return true; +} + +static bool customdata_is_same(const CustomData *a, const CustomData *b) +{ + return memcmp(a, b, sizeof(CustomData)) == 0; +} + +bool BKE_pbvh_cache_is_valid(const struct Object *ob, + const struct Mesh *me, + const PBVH *pbvh, + int pbvh_type) +{ + if (pbvh->invalid) { + printf("pbvh invalid!\n"); + return false; + } + + if (pbvh->header.type != pbvh_type) { + return false; + } + + bool ok = true; + int totvert = 0, totedge = 0, totloop = 0, totpoly = 0; + const CustomData *vdata, *edata, *ldata, *pdata; + + MultiresModifierData *mmd = nullptr; + + LISTBASE_FOREACH (ModifierData *, md, &ob->modifiers) { + if (md->type == eModifierType_Multires) { + mmd = (MultiresModifierData *)md; + break; + } + } + + if (mmd && (mmd->flags & eModifierMode_Realtime)) { + // return false; + } + + switch (pbvh_type) { + case PBVH_BMESH: + if (!pbvh->header.bm || pbvh->header.bm != pbvh->cached_data.bm) { + return false; + } + + totvert = pbvh->header.bm->totvert; + totedge = pbvh->header.bm->totedge; + totloop = pbvh->header.bm->totloop; + totpoly = pbvh->header.bm->totface; + + vdata = &pbvh->header.bm->vdata; + edata = &pbvh->header.bm->edata; + ldata = &pbvh->header.bm->ldata; + pdata = &pbvh->header.bm->pdata; + break; + case PBVH_FACES: + totvert = me->totvert; + totedge = me->totedge; + totloop = me->totloop; + totpoly = me->totpoly; + + vdata = &me->vdata; + edata = &me->edata; + ldata = &me->ldata; + pdata = &me->pdata; + break; + case PBVH_GRIDS: { + if (!mmd) { + return false; + } + + int grid_side = 1 + (1 << (mmd->sculptlvl - 1)); + + totvert = me->totloop * grid_side * grid_side; + totedge = me->totedge; + totloop = me->totloop; + totpoly = me->totloop * (grid_side - 1) * (grid_side - 1); + + vdata = &me->vdata; + edata = &me->edata; + ldata = &me->ldata; + pdata = &me->pdata; + break; + } + } + + ok = ok && totvert == pbvh->cached_data.totvert; + ok = ok && totedge == pbvh->cached_data.totedge; + ok = ok && totloop == pbvh->cached_data.totloop; + ok = ok && totpoly == pbvh->cached_data.totpoly; + + ok = ok && customdata_is_same(vdata, &pbvh->cached_data.vdata); + ok = ok && customdata_is_same(edata, &pbvh->cached_data.edata); + ok = ok && customdata_is_same(ldata, &pbvh->cached_data.ldata); + ok = ok && customdata_is_same(pdata, &pbvh->cached_data.pdata); + + return ok; +} + +GHash *cached_pbvhs = nullptr; +static void pbvh_clear_cached_pbvhs(PBVH *exclude) +{ + Vector pbvhs; + + GHashIterator iter; + GHASH_ITER (iter, cached_pbvhs) { + PBVH *pbvh = (PBVH *)BLI_ghashIterator_getValue(&iter); + + if (pbvh != exclude) { + pbvhs.append(pbvh); + } + } + + for (int i = 0; i < pbvhs.size(); i++) { + PBVH *pbvh = pbvhs[i]; + + if (pbvh->header.bm) { + BM_mesh_free(pbvh->header.bm); + } + + BKE_pbvh_free(pbvh); + } + + BLI_ghash_clear(cached_pbvhs, MEM_freeN, nullptr); +} + +void BKE_pbvh_clear_cache(PBVH *preserve) +{ + pbvh_clear_cached_pbvhs(nullptr); +} + +#define PBVH_CACHE_KEY_SIZE 1024 + +static void pbvh_make_cached_key(Object *ob, char out[PBVH_CACHE_KEY_SIZE]) +{ + sprintf(out, "%s:%p", ob->id.name, G.main); +} + +void BKE_pbvh_invalidate_cache(Object *ob) +{ + Object *ob_orig = DEG_get_original_object(ob); + + char key[PBVH_CACHE_KEY_SIZE]; + pbvh_make_cached_key(ob_orig, key); + +#ifdef WITH_PBVH_CACHE + PBVH *pbvh = BLI_ghash_lookup(cached_pbvhs, key); + + if (pbvh) { + BKE_pbvh_cache_remove(pbvh); + } +#endif +} + +PBVH *BKE_pbvh_get_or_free_cached(Object *ob, Mesh *me, PBVHType pbvh_type) +{ + Object *ob_orig = DEG_get_original_object(ob); + + char key[PBVH_CACHE_KEY_SIZE]; + pbvh_make_cached_key(ob_orig, key); + + PBVH *pbvh = (PBVH *)BLI_ghash_lookup(cached_pbvhs, key); + + if (!pbvh) { + return nullptr; + } + + if (BKE_pbvh_cache_is_valid(ob, me, pbvh, pbvh_type)) { + switch (pbvh_type) { + case PBVH_BMESH: + break; + case PBVH_FACES: + pbvh->vert_normals = BKE_mesh_vertex_normals_for_write(me); + case PBVH_GRIDS: + if (!pbvh->deformed) { + pbvh->vert_positions = BKE_mesh_vert_positions_for_write(me); + } + + pbvh->mloop = me->mloop; + pbvh->mpoly = me->mpoly; + pbvh->vdata = &me->vdata; + pbvh->ldata = &me->ldata; + pbvh->pdata = &me->pdata; + + pbvh->face_sets = (int *)CustomData_get_layer_named( + &me->pdata, CD_PROP_INT32, ".sculpt_face_set"); + + break; + } + + BKE_pbvh_update_active_vcol(pbvh, me); + + return pbvh; + } + + pbvh_clear_cached_pbvhs(nullptr); + return nullptr; +} + +void BKE_pbvh_set_cached(Object *ob, PBVH *pbvh) +{ + if (!pbvh) { + return; + } + + Object *ob_orig = DEG_get_original_object(ob); + + char key[PBVH_CACHE_KEY_SIZE]; + pbvh_make_cached_key(ob_orig, key); + + PBVH *exist = (PBVH *)BLI_ghash_lookup(cached_pbvhs, key); + + if (pbvh->invalid) { + printf("pbvh invalid!"); + } + + if (exist && exist->invalid) { + printf("pbvh invalid!"); + } + + if (!exist || exist != pbvh) { + pbvh_clear_cached_pbvhs(pbvh); + + char key[PBVH_CACHE_KEY_SIZE]; + pbvh_make_cached_key(ob_orig, key); + + BLI_ghash_insert(cached_pbvhs, BLI_strdup(key), pbvh); + } + +#ifdef WITH_PBVH_CACHE + BKE_pbvh_cache(BKE_object_get_original_mesh(ob_orig), pbvh); +#endif +} + +struct SculptPMap *BKE_pbvh_get_pmap(PBVH *pbvh) +{ + return pbvh->pmap; +} + +void BKE_pbvh_set_pmap(PBVH *pbvh, SculptPMap *pmap) +{ + if (pbvh->pmap != pmap) { + BKE_pbvh_pmap_aquire(pmap); + } + + pbvh->pmap = pmap; +} + +/** Does not free pbvh itself. */ +void BKE_pbvh_cache_remove(PBVH *pbvh) +{ + Vector keys; + + GHashIterator iter; + GHASH_ITER (iter, cached_pbvhs) { + PBVH *pbvh2 = (PBVH *)BLI_ghashIterator_getValue(&iter); + + if (pbvh2 == pbvh) { + keys.append((char *)BLI_ghashIterator_getKey(&iter)); + break; + } + } + + for (int i = 0; i < keys.size(); i++) { + BLI_ghash_remove(cached_pbvhs, keys[i], MEM_freeN, nullptr); + } +} + +void BKE_pbvh_set_bmesh(PBVH *pbvh, BMesh *bm) +{ + pbvh->header.bm = bm; +} + +void BKE_pbvh_free_bmesh(PBVH *pbvh, BMesh *bm) +{ + if (pbvh) { + pbvh->header.bm = nullptr; + } + + BM_mesh_free(bm); + + GHashIterator iter; + Vector keys; + Vector pbvhs; + + GHASH_ITER (iter, cached_pbvhs) { + PBVH *pbvh2 = (PBVH *)BLI_ghashIterator_getValue(&iter); + + if (pbvh2->header.bm == bm) { + pbvh2->header.bm = nullptr; + + if (pbvh2 != pbvh) { + bool ok = true; + + for (int i = 0; i < pbvhs.size(); i++) { + if (pbvhs[i] == pbvh2) { + ok = false; + } + } + + if (ok) { + pbvhs.append(pbvh2); + } + } + + keys.append((char *)BLI_ghashIterator_getKey(&iter)); + } + } + + for (int i = 0; i < keys.size(); i++) { + BLI_ghash_remove(cached_pbvhs, keys[i], MEM_freeN, nullptr); + } + + for (int i = 0; i < pbvhs.size(); i++) { + BKE_pbvh_free(pbvhs[i]); + } +} + +BMLog *BKE_pbvh_get_bm_log(PBVH *pbvh) +{ + return pbvh->bm_log; +} + +void BKE_pbvh_system_init() +{ + cached_pbvhs = BLI_ghash_str_new("pbvh cache ghash"); +} + +void BKE_pbvh_system_exit() +{ + pbvh_clear_cached_pbvhs(nullptr); + BLI_ghash_free(cached_pbvhs, nullptr, nullptr); +} + +SculptPMap *BKE_pbvh_make_pmap(const struct Mesh *me) +{ + SculptPMap *pmap = (SculptPMap *)MEM_callocN(sizeof(*pmap), "SculptPMap"); + + BKE_mesh_vert_poly_map_create(&pmap->pmap, + &pmap->pmap_mem, + BKE_mesh_vert_positions(me), + BKE_mesh_edges(me), + BKE_mesh_polys(me), + BKE_mesh_loops(me), + me->totvert, + me->totpoly, + me->totloop, + false); + + pmap->refcount = 1; + + return pmap; +} + +void BKE_pbvh_pmap_aquire(SculptPMap *pmap) +{ + pmap->refcount++; +} + +bool BKE_pbvh_pmap_release(SculptPMap *pmap) +{ + if (!pmap) { + return false; + } + + pmap->refcount--; + + // if (pmap->refcount < 0) { + // printf("%s: error!\n", __func__); + //} + + if (1 && pmap->refcount == 0) { + MEM_SAFE_FREE(pmap->pmap); + MEM_SAFE_FREE(pmap->pmap_mem); + MEM_SAFE_FREE(pmap); + + return true; + } + + return false; +} + +bool BKE_pbvh_is_drawing(const PBVH *pbvh) +{ + return pbvh->is_drawing; +} + +bool BKE_pbvh_draw_cache_invalid(const PBVH *pbvh) +{ + return pbvh->draw_cache_invalid; +} + +void BKE_pbvh_is_drawing_set(PBVH *pbvh, bool val) +{ + pbvh->is_drawing = val; +} + +void BKE_pbvh_node_num_loops(PBVH *pbvh, PBVHNode *node, int *r_totloop) +{ + UNUSED_VARS(pbvh); + BLI_assert(BKE_pbvh_type(pbvh) == PBVH_FACES); + + if (r_totloop) { + *r_totloop = node->loop_indices_num; + } +} + +void BKE_pbvh_update_active_vcol(PBVH *pbvh, const Mesh *mesh) +{ + CustomDataLayer *last_layer = pbvh->color_layer; + + Mesh me_query; + const CustomData *vdata, *ldata; + + if (pbvh->header.type == PBVH_BMESH && pbvh->header.bm) { + vdata = &pbvh->header.bm->vdata; + ldata = &pbvh->header.bm->ldata; + } + else { + vdata = &mesh->vdata; + ldata = &mesh->ldata; + } + + BKE_id_attribute_copy_domains_temp(ID_ME, vdata, nullptr, ldata, nullptr, nullptr, &me_query.id); + me_query.active_color_attribute = mesh->active_color_attribute; + + BKE_pbvh_get_color_layer(&me_query, &pbvh->color_layer, &pbvh->color_domain); + + if (pbvh->color_layer && pbvh->header.bm) { + pbvh->cd_vcol_offset = pbvh->color_layer->offset; + } + else { + pbvh->cd_vcol_offset = -1; + } + + if (pbvh->color_layer != last_layer) { + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + + if (node->flag & PBVH_Leaf) { + BKE_pbvh_node_mark_update_color(node); + } + } + } +} + +void BKE_pbvh_ensure_node_loops(PBVH *pbvh) +{ + BLI_assert(BKE_pbvh_type(pbvh) == PBVH_FACES); + + int totloop = 0; + + /* Check if nodes already have loop indices. */ + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + + if (!(node->flag & PBVH_Leaf)) { + continue; + } + + if (node->loop_indices) { + return; + } + + totloop += node->totprim * 3; + } + + BLI_bitmap *visit = BLI_BITMAP_NEW(totloop, __func__); + + /* Create loop indices from node loop triangles. */ + for (int i = 0; i < pbvh->totnode; i++) { + PBVHNode *node = pbvh->nodes + i; + + if (!(node->flag & PBVH_Leaf)) { + continue; + } + + node->loop_indices = (int *)MEM_malloc_arrayN(node->totprim * 3, sizeof(int), __func__); + node->loop_indices_num = 0; + + for (int j = 0; j < (int)node->totprim; j++) { + const MLoopTri *mlt = pbvh->looptri + node->prim_indices[j]; + + for (int k = 0; k < 3; k++) { + if (!BLI_BITMAP_TEST(visit, mlt->tri[k])) { + node->loop_indices[node->loop_indices_num++] = mlt->tri[k]; + BLI_BITMAP_ENABLE(visit, mlt->tri[k]); + } + } + } + } + + MEM_SAFE_FREE(visit); +} + +bool BKE_pbvh_get_origvert( + PBVH *pbvh, PBVHVertRef vertex, const float **r_co, float **r_no, float **r_color) +{ + MSculptVert *mv; + + switch (pbvh->header.type) { + case PBVH_FACES: + case PBVH_GRIDS: + mv = pbvh->msculptverts + vertex.i; + + if (mv->stroke_id != pbvh->stroke_id) { + mv->stroke_id = pbvh->stroke_id; + float *mask = nullptr; + + if (pbvh->header.type == PBVH_FACES) { + copy_v3_v3(mv->origco, pbvh->vert_positions[vertex.i]); + copy_v3_v3(mv->origno, pbvh->vert_normals[vertex.i]); + mask = (float *)CustomData_get_layer(pbvh->vdata, CD_PAINT_MASK); + + if (mask) { + mask += vertex.i; + } + } + else { + const CCGKey *key = BKE_pbvh_get_grid_key(pbvh); + const int grid_index = vertex.i / key->grid_area; + const int vertex_index = vertex.i - grid_index * key->grid_area; + CCGElem *elem = BKE_pbvh_get_grids(pbvh)[grid_index]; + + copy_v3_v3(mv->origco, CCG_elem_co(key, CCG_elem_offset(key, elem, vertex_index))); + copy_v3_v3(mv->origno, CCG_elem_no(key, CCG_elem_offset(key, elem, vertex_index))); + mask = key->has_mask ? CCG_elem_mask(key, CCG_elem_offset(key, elem, vertex_index)) : + nullptr; + } + + if (mask) { + mv->origmask = (ushort)(*mask * 65535.0f); + } + + if (pbvh->color_layer) { + BKE_pbvh_vertex_color_get(pbvh, vertex, mv->origcolor); + } + } + break; + case PBVH_BMESH: { + BMVert *v = (BMVert *)vertex.i; + mv = BKE_PBVH_SCULPTVERT(pbvh->cd_sculpt_vert, v); + + if (mv->stroke_id != pbvh->stroke_id) { + mv->stroke_id = pbvh->stroke_id; + + copy_v3_v3(mv->origco, v->co); + copy_v3_v3(mv->origno, v->no); + + if (pbvh->cd_vert_mask_offset != -1) { + mv->origmask = (short)(BM_ELEM_CD_GET_FLOAT(v, pbvh->cd_vert_mask_offset) * 65535.0f); + } + + if (pbvh->cd_vcol_offset != -1) { + BKE_pbvh_vertex_color_get(pbvh, vertex, mv->origcolor); + } + } + break; + } + } + + if (r_co) { + *r_co = mv->origco; + } + + if (r_no) { + *r_no = mv->origno; + } + + if (r_color) { + *r_color = mv->origcolor; + } + + return true; +} + +int BKE_pbvh_debug_draw_gen_get(PBVHNode *node) +{ + return node->debug_draw_gen; +} + +void BKE_pbvh_set_boundary_flags(PBVH *pbvh, int *boundary_flags) +{ + pbvh->boundary_flags = boundary_flags; +} + +static void pbvh_face_iter_verts_reserve(PBVHFaceIter *fd, int verts_num) +{ + if (verts_num >= fd->verts_size_) { + fd->verts_size_ = (verts_num + 1) << 2; + + if (fd->verts != fd->verts_reserved_) { + MEM_SAFE_FREE(fd->verts); + } + + fd->verts = (PBVHVertRef *)MEM_malloc_arrayN(fd->verts_size_, sizeof(void *), __func__); + } + + fd->verts_num = verts_num; +} + +BLI_INLINE int face_iter_prim_to_face(PBVHFaceIter *fd, int prim_index) +{ + if (fd->subdiv_ccg_) { + return BKE_subdiv_ccg_grid_to_face_index(fd->subdiv_ccg_, prim_index); + } + + return fd->looptri_[prim_index].poly; +} + +static void pbvh_face_iter_step(PBVHFaceIter *fd, bool do_step) +{ + if (do_step) { + fd->i++; + } + + switch (fd->pbvh_type_) { + case PBVH_BMESH: { + if (do_step) { + fd->bm_faces_iter_++; + + while (fd->bm_faces_iter_ < fd->bm_faces_->cur && + !fd->bm_faces_->elems[fd->bm_faces_iter_]) { + fd->bm_faces_iter_++; + } + + if (fd->bm_faces_iter_ >= fd->bm_faces_->cur) { + return; + } + } + + BMFace *f = (BMFace *)fd->bm_faces_->elems[fd->bm_faces_iter_]; + fd->face.i = (intptr_t)f; + fd->index = f->head.index; + + if (fd->cd_face_set_ != -1) { + fd->face_set = (int *)BM_ELEM_CD_GET_VOID_P(f, fd->cd_face_set_); + } + + if (fd->cd_hide_poly_ != -1) { + fd->hide = (bool *)BM_ELEM_CD_GET_VOID_P(f, fd->cd_hide_poly_); + } + + pbvh_face_iter_verts_reserve(fd, f->len); + int vertex_i = 0; + + BMLoop *l = f->l_first; + do { + fd->verts[vertex_i++].i = (intptr_t)l->v; + } while ((l = l->next) != f->l_first); + + break; + } + case PBVH_GRIDS: + case PBVH_FACES: { + int face_index = 0; + + if (do_step) { + fd->prim_index_++; + + while (fd->prim_index_ < fd->node_->totprim) { + face_index = face_iter_prim_to_face(fd, fd->node_->prim_indices[fd->prim_index_]); + + if (face_index != fd->last_face_index_) { + break; + } + + fd->prim_index_++; + } + } + else if (fd->prim_index_ < fd->node_->totprim) { + face_index = face_iter_prim_to_face(fd, fd->node_->prim_indices[fd->prim_index_]); + } + + if (fd->prim_index_ >= fd->node_->totprim) { + return; + } + + fd->last_face_index_ = face_index; + const MPoly *mp = fd->mpoly_ + face_index; + + fd->face.i = fd->index = face_index; + + if (fd->face_sets_) { + fd->face_set = fd->face_sets_ + face_index; + } + if (fd->hide_poly_) { + fd->hide = fd->hide_poly_ + face_index; + } + + pbvh_face_iter_verts_reserve(fd, mp->totloop); + + const MLoop *ml = fd->mloop_ + mp->loopstart; + const int grid_area = fd->subdiv_key_.grid_area; + + for (int i = 0; i < mp->totloop; i++, ml++) { + if (fd->pbvh_type_ == PBVH_GRIDS) { + /* Grid corners. */ + fd->verts[i].i = (mp->loopstart + i) * grid_area + grid_area - 1; + } + else { + fd->verts[i].i = ml->v; + } + } + break; + } + } +} + +void BKE_pbvh_face_iter_step(PBVHFaceIter *fd) +{ + pbvh_face_iter_step(fd, true); +} + +void BKE_pbvh_face_iter_init(PBVH *pbvh, PBVHNode *node, PBVHFaceIter *fd) +{ + memset(fd, 0, sizeof(*fd)); + + fd->node_ = node; + fd->pbvh_type_ = BKE_pbvh_type(pbvh); + fd->verts = fd->verts_reserved_; + fd->verts_size_ = PBVH_FACE_ITER_VERTS_RESERVED; + + switch (BKE_pbvh_type(pbvh)) { + case PBVH_GRIDS: + fd->subdiv_ccg_ = pbvh->subdiv_ccg; + fd->subdiv_key_ = pbvh->gridkey; + ATTR_FALLTHROUGH; + case PBVH_FACES: + fd->mpoly_ = pbvh->mpoly; + fd->mloop_ = pbvh->mloop; + fd->looptri_ = pbvh->looptri; + fd->hide_poly_ = pbvh->hide_poly; + fd->face_sets_ = pbvh->face_sets; + fd->last_face_index_ = -1; + + break; + case PBVH_BMESH: + fd->bm = pbvh->header.bm; + fd->cd_face_set_ = CustomData_get_offset_named( + &pbvh->header.bm->pdata, CD_PROP_INT32, ".sculpt_face_set"); + fd->cd_hide_poly_ = CustomData_get_offset_named( + &pbvh->header.bm->pdata, CD_PROP_INT32, ".hide_poly"); + + fd->bm_faces_iter_ = 0; + fd->bm_faces_ = node->bm_faces; + break; + } + + if (!BKE_pbvh_face_iter_done(fd)) { + pbvh_face_iter_step(fd, false); + } +} + +void BKE_pbvh_face_iter_finish(PBVHFaceIter *fd) +{ + if (fd->verts != fd->verts_reserved_) { + MEM_SAFE_FREE(fd->verts); + } +} + +bool BKE_pbvh_face_iter_done(PBVHFaceIter *fd) +{ + switch (fd->pbvh_type_) { + case PBVH_FACES: + case PBVH_GRIDS: + return fd->prim_index_ >= fd->node_->totprim; + case PBVH_BMESH: + return fd->bm_faces_iter_ >= fd->bm_faces_->cur; default: BLI_assert_unreachable(); - break; + return true; } } -template void to_float(const T &src, float dst[4]); - -template<> void to_float(const MLoopCol &src, float dst[4]) +void BKE_pbvh_sync_visibility_from_verts(PBVH *pbvh, Mesh *mesh) { - rgba_uchar_to_float(dst, reinterpret_cast(&src)); - srgb_to_linearrgb_v3_v3(dst, dst); -} -template<> void to_float(const MPropCol &src, float dst[4]) -{ - copy_v4_v4(dst, src.color); -} + switch (pbvh->header.type) { + case PBVH_FACES: { + BKE_mesh_flush_hidden_from_verts(mesh); + BKE_pbvh_update_hide_attributes_from_mesh(pbvh); + break; + } + case PBVH_BMESH: { + BMIter iter; + BMVert *v; + BMEdge *e; + BMFace *f; -template void from_float(const float src[4], T &dst); + BM_ITER_MESH (f, &iter, pbvh->header.bm, BM_FACES_OF_MESH) { + BM_elem_flag_disable(f, BM_ELEM_HIDDEN); + } -template<> void from_float(const float src[4], MLoopCol &dst) -{ - float temp[4]; - linearrgb_to_srgb_v3_v3(temp, src); - temp[3] = src[3]; - rgba_float_to_uchar(reinterpret_cast(&dst), temp); -} -template<> void from_float(const float src[4], MPropCol &dst) -{ - copy_v4_v4(dst.color, src); -} + BM_ITER_MESH (e, &iter, pbvh->header.bm, BM_EDGES_OF_MESH) { + BM_elem_flag_disable(e, BM_ELEM_HIDDEN); + } -template -static void pbvh_vertex_color_get_faces(const PBVH &pbvh, PBVHVertRef vertex, float r_color[4]) -{ - int index = vertex.i; + BM_ITER_MESH (v, &iter, pbvh->header.bm, BM_VERTS_OF_MESH) { + if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) { + continue; + } + BMIter iter_l; + BMLoop *l; - if (pbvh.color_domain == ATTR_DOMAIN_CORNER) { - const MeshElemMap &melem = pbvh.pmap->pmap[vertex.i]; - - int count = 0; - zero_v4(r_color); - for (const int i_poly : Span(melem.indices, melem.count)) { - const MPoly &mp = pbvh.mpoly[i_poly]; - Span colors{static_cast(pbvh.color_layer->data) + mp.loopstart, mp.totloop}; - Span loops{pbvh.mloop + mp.loopstart, mp.totloop}; - - for (const int i_loop : IndexRange(mp.totloop)) { - if (loops[i_loop].v == vertex.i) { - float temp[4]; - to_float(colors[i_loop], temp); - - add_v4_v4(r_color, temp); - count++; + BM_ITER_ELEM (l, &iter_l, v, BM_LOOPS_OF_VERT) { + BM_elem_flag_enable(l->e, BM_ELEM_HIDDEN); + BM_elem_flag_enable(l->f, BM_ELEM_HIDDEN); } } - } - - if (count) { - mul_v4_fl(r_color, 1.0f / float(count)); - } - } - else { - to_float(static_cast(pbvh.color_layer->data)[vertex.i], r_color); - } -} - -template -static void pbvh_vertex_color_get_bmesh(const PBVH &pbvh, PBVHVertRef vertex, float r_color[4]) -{ - BMVert *v = reinterpret_cast(vertex.i); - - if (pbvh.color_domain == ATTR_DOMAIN_CORNER) { - BMIter iter; - BMLoop *l; - - int count = 0; - zero_v4(r_color); - - BM_ITER_ELEM (l, &iter, v, BM_LOOPS_OF_VERT) { - float temp[4]; - - T *ptr = static_cast(BM_ELEM_CD_GET_VOID_P(l, pbvh.cd_vcol_offset)); - to_float(*ptr, temp); - - add_v4_v4(r_color, temp); - count++; - } - - if (count) { - mul_v4_fl(r_color, 1.0f / (float)count); - } - } - else { - T *ptr = static_cast(BM_ELEM_CD_GET_VOID_P(v, pbvh.cd_vcol_offset)); - to_float(*ptr, r_color); - } -} - -template -static void pbvh_vertex_color_get(const PBVH &pbvh, PBVHVertRef vertex, float r_color[4]) -{ - switch (pbvh.header.type) { - case PBVH_FACES: - pbvh_vertex_color_get_faces(pbvh, vertex, r_color); break; - case PBVH_BMESH: - pbvh_vertex_color_get_bmesh(pbvh, vertex, r_color); - break; - case PBVH_GRIDS: - break; - } -} + } + case PBVH_GRIDS: { + const MPoly *mp = BKE_mesh_polys(mesh); + CCGKey key = pbvh->gridkey; -template -static void pbvh_vertex_color_set_faces(PBVH &pbvh, PBVHVertRef vertex, const float color[4]) -{ - int index = vertex.i; + bool *hide_poly = (bool *)CustomData_get_layer_named_for_write( + &mesh->pdata, CD_PROP_BOOL, ".hide_poly", mesh->totpoly); - if (pbvh.color_domain == ATTR_DOMAIN_CORNER) { - const MeshElemMap &melem = pbvh.pmap->pmap[vertex.i]; + bool delete_hide_poly = true; + for (int face_index = 0; face_index < mesh->totpoly; face_index++, mp++) { + bool hidden = false; - for (const int i_poly : Span(melem.indices, melem.count)) { - const MPoly &mp = pbvh.mpoly[i_poly]; - MutableSpan colors{static_cast(pbvh.color_layer->data) + mp.loopstart, mp.totloop}; - Span loops{pbvh.mloop + mp.loopstart, mp.totloop}; + for (int loop_index = 0; !hidden && loop_index < mp->totloop; loop_index++) { + int grid_index = mp->loopstart + loop_index; - for (const int i_loop : IndexRange(mp.totloop)) { - if (loops[i_loop].v == vertex.i) { - from_float(color, colors[i_loop]); + if (pbvh->grid_hidden[grid_index] && + BLI_BITMAP_TEST(pbvh->grid_hidden[grid_index], key.grid_area - 1)) { + hidden = true; + + break; + } + } + + if (hidden && !hide_poly) { + hide_poly = (bool *)CustomData_get_layer_named_for_write( + &mesh->pdata, CD_PROP_BOOL, ".hide_poly", mesh->totpoly); + + if (!hide_poly) { + CustomData_add_layer_named( + &mesh->pdata, CD_PROP_BOOL, CD_CONSTRUCT, nullptr, mesh->totpoly, ".hide_poly"); + + hide_poly = (bool *)CustomData_get_layer_named_for_write( + &mesh->pdata, CD_PROP_BOOL, ".hide_poly", mesh->totpoly); + } + } + + if (hide_poly) { + delete_hide_poly = delete_hide_poly && !hidden; + hide_poly[face_index] = hidden; } } - } - } - else { - from_float(color, static_cast(pbvh.color_layer->data)[vertex.i]); - } -} -template -static void pbvh_vertex_color_set_bmesh(PBVH &pbvh, PBVHVertRef vertex, const float color[4]) -{ - BMVert *v = reinterpret_cast(vertex.i); - - if (pbvh.color_domain == ATTR_DOMAIN_CORNER) { - BMIter iter; - BMLoop *l; - - BM_ITER_ELEM (l, &iter, v, BM_LOOPS_OF_VERT) { - T *ptr = static_cast(BM_ELEM_CD_GET_VOID_P(l, pbvh.cd_vcol_offset)); - from_float(color, *ptr); - } - } - else { - T *ptr = static_cast(BM_ELEM_CD_GET_VOID_P(v, pbvh.cd_vcol_offset)); - from_float(color, *ptr); - } -} - -template -static void pbvh_vertex_color_set(PBVH &pbvh, PBVHVertRef vertex, const float color[4]) -{ - switch (pbvh.header.type) { - case PBVH_FACES: - pbvh_vertex_color_set_faces(pbvh, vertex, color); - break; - case PBVH_BMESH: - pbvh_vertex_color_set_bmesh(pbvh, vertex, color); - break; - case PBVH_GRIDS: - break; - } -} - -} // namespace blender::bke - -extern "C" { -void BKE_pbvh_vertex_color_get(const PBVH *pbvh, PBVHVertRef vertex, float r_color[4]) -{ - blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { - using T = decltype(dummy); - blender::bke::pbvh_vertex_color_get(*pbvh, vertex, r_color); - }); -} - -void BKE_pbvh_vertex_color_set(PBVH *pbvh, PBVHVertRef vertex, const float color[4]) -{ - blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { - using T = decltype(dummy); - blender::bke::pbvh_vertex_color_set(*pbvh, vertex, color); - }); -} - -void BKE_pbvh_swap_colors(PBVH *pbvh, - const int *indices, - const int indices_num, - float (*r_colors)[4]) -{ - blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { - using T = decltype(dummy); - T *pbvh_colors = static_cast(pbvh->color_layer->data); - for (const int i : IndexRange(indices_num)) { - T temp = pbvh_colors[indices[i]]; - blender::bke::from_float(r_colors[i], pbvh_colors[indices[i]]); - blender::bke::to_float(temp, r_colors[i]); - } - }); -} - -void BKE_pbvh_store_colors(PBVH *pbvh, - const int *indices, - const int indices_num, - float (*r_colors)[4]) -{ - blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { - using T = decltype(dummy); - T *pbvh_colors = static_cast(pbvh->color_layer->data); - for (const int i : IndexRange(indices_num)) { - blender::bke::to_float(pbvh_colors[indices[i]], r_colors[i]); - } - }); -} - -void BKE_pbvh_store_colors_vertex(PBVH *pbvh, - const int *indices, - const int indices_num, - float (*r_colors)[4]) -{ - if (pbvh->color_domain == ATTR_DOMAIN_POINT) { - BKE_pbvh_store_colors(pbvh, indices, indices_num, r_colors); - } - else { - blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { - using T = decltype(dummy); - for (const int i : IndexRange(indices_num)) { - PBVHVertRef vertex = {(intptr_t)indices[i]}; - - blender::bke::pbvh_vertex_color_get(*pbvh, vertex, r_colors[i]); + if (delete_hide_poly) { + CustomData_free_layer_named(&mesh->pdata, ".hide_poly", mesh->totpoly); } - }); + + BKE_mesh_flush_hidden_from_polys(mesh); + BKE_pbvh_update_hide_attributes_from_mesh(pbvh); + break; + } } } -} diff --git a/source/blender/blenkernel/intern/pbvh_bmesh.c b/source/blender/blenkernel/intern/pbvh_bmesh.cc similarity index 88% rename from source/blender/blenkernel/intern/pbvh_bmesh.c rename to source/blender/blenkernel/intern/pbvh_bmesh.cc index dc93e8ca5da..bf4707864ee 100644 --- a/source/blender/blenkernel/intern/pbvh_bmesh.c +++ b/source/blender/blenkernel/intern/pbvh_bmesh.cc @@ -33,12 +33,17 @@ Topology rake: #include "BLI_ghash.h" #include "BLI_hash.h" #include "BLI_heap_simple.h" +#include "BLI_index_range.hh" +#include "BLI_map.hh" #include "BLI_math.h" +#include "BLI_math_vector_types.hh" #include "BLI_memarena.h" #include "BLI_rand.h" +#include "BLI_set.hh" #include "BLI_sort_utils.h" #include "BLI_task.h" #include "BLI_utildefines.h" +#include "BLI_vector.hh" #include "PIL_time.h" #include "atomic_ops.h" @@ -58,7 +63,7 @@ Topology rake: #include "atomic_ops.h" #include "bmesh.h" #include "bmesh_log.h" -#include "pbvh_intern.h" +#include "pbvh_intern.hh" #include #include @@ -66,6 +71,13 @@ Topology rake: #include +using blender::float2; +using blender::float3; +using blender::IndexRange; +using blender::Map; +using blender::Set; +using blender::Vector; + static void _debugprint(const char *fmt, ...) { va_list args; @@ -240,7 +252,7 @@ static void pbvh_bmesh_node_finalize(PBVH *pbvh, PBVHNode *n = &pbvh->nodes[node_index]; bool has_visible = false; - n->draw_batches = NULL; + n->draw_batches = nullptr; /* Create vert hash sets */ if (!n->bm_unique_verts) { @@ -366,6 +378,18 @@ static void pbvh_print_mem_size(PBVH *pbvh) #endif } +template static T *copy_vector_to_c_array(const char *tag, Vector &vector) +{ + if (vector.size() == 0) { + return nullptr; + } + + void *ret = MEM_malloc_arrayN(sizeof(T), vector.size(), tag); + memcpy(ret, static_cast(vector.data()), sizeof(T) * vector.size()); + + return static_cast(ret); +} + /* Recursively split the node if it exceeds the leaf_limit */ static void pbvh_bmesh_node_split( PBVH *pbvh, const BBC *bbc_array, int node_index, bool add_orco, int depth) @@ -415,7 +439,7 @@ static void pbvh_bmesh_node_split( /* Initialize children */ PBVHNode *c1 = &pbvh->nodes[children], *c2 = &pbvh->nodes[children + 1]; - c1->draw_batches = c2->draw_batches = NULL; + c1->draw_batches = c2->draw_batches = nullptr; c1->depth = c2->depth = n->depth + 1; c1->flag |= PBVH_Leaf; @@ -427,7 +451,7 @@ static void pbvh_bmesh_node_split( c1->bm_unique_verts = BLI_table_gset_new("bm_unique_verts"); c2->bm_unique_verts = BLI_table_gset_new("bm_unique_verts"); - c1->bm_other_verts = c2->bm_other_verts = NULL; + c1->bm_other_verts = c2->bm_other_verts = nullptr; /* Partition the parent node's faces between the two children */ TGSET_ITER (f, n->bm_faces) { @@ -443,7 +467,7 @@ static void pbvh_bmesh_node_split( TGSET_ITER_END #if 0 /* Enforce at least one primitive in each node */ - TableGSet *empty = NULL,*other; + TableGSet *empty = nullptr,*other; if (BLI_table_gset_len(c1->bm_faces) == 0) { empty = c1->bm_faces; other = c2->bm_faces; @@ -456,7 +480,7 @@ static void pbvh_bmesh_node_split( void *key; TGSET_ITER (key,other) { BLI_table_gset_insert(empty,key); - BLI_table_gset_remove(other,key,NULL); + BLI_table_gset_remove(other,key,nullptr); break; } TGSET_ITER_END @@ -484,7 +508,7 @@ static void pbvh_bmesh_node_split( } TGSET_ITER_END - BLI_table_gset_free(n->bm_unique_verts, NULL); + BLI_table_gset_free(n->bm_unique_verts, nullptr); } if (n->bm_faces) { @@ -494,11 +518,11 @@ static void pbvh_bmesh_node_split( } TGSET_ITER_END - BLI_table_gset_free(n->bm_faces, NULL); + BLI_table_gset_free(n->bm_faces, nullptr); } if (n->bm_other_verts) { - BLI_table_gset_free(n->bm_other_verts, NULL); + BLI_table_gset_free(n->bm_other_verts, nullptr); } if (n->layer_disp) { @@ -509,14 +533,14 @@ static void pbvh_bmesh_node_split( BKE_pbvh_bmesh_free_tris(pbvh, n); } - n->bm_faces = NULL; - n->bm_unique_verts = NULL; - n->bm_other_verts = NULL; - n->layer_disp = NULL; + n->bm_faces = nullptr; + n->bm_unique_verts = nullptr; + n->bm_other_verts = nullptr; + n->layer_disp = nullptr; if (n->draw_batches) { DRW_pbvh_node_free(n->draw_batches); - n->draw_batches = NULL; + n->draw_batches = nullptr; } n->flag &= ~PBVH_Leaf; @@ -552,7 +576,7 @@ bool pbvh_bmesh_node_limit_ensure(PBVH *pbvh, int node_index) pbvh->draw_cache_invalid = true; /* For each BMFace, store the AABB and AABB centroid */ - BBC *bbc_array = MEM_mallocN(sizeof(BBC) * bm_faces_size, "BBC"); + BBC *bbc_array = static_cast(MEM_mallocN(sizeof(BBC) * bm_faces_size, "BBC")); BMFace *f; @@ -620,7 +644,7 @@ void bke_pbvh_insert_face_finalize(PBVH *pbvh, BMFace *f, const int ni) updateflag |= PBVH_UpdateColor | PBVH_UpdateMask | PBVH_UpdateNormals | PBVH_UpdateOriginalBB; updateflag |= PBVH_UpdateVisibility | PBVH_UpdateRedraw | PBVH_RebuildDrawBuffers; - node->flag |= updateflag; + node->flag |= (PBVHNodeFlags)updateflag; // ensure verts are in pbvh BMLoop *l = f->l_first; @@ -642,7 +666,7 @@ void bke_pbvh_insert_face_finalize(PBVH *pbvh, BMFace *f, const int ni) BLI_table_gset_add(node->bm_other_verts, l->v); } - node2->flag |= updateflag; + node2->flag |= (PBVHNodeFlags)updateflag; BB_expand(&node2->vb, l->v->co); BB_expand(&node2->orig_vb, mv->origco); @@ -754,7 +778,7 @@ static void pbvh_bmesh_regen_node_verts(PBVH *pbvh, PBVHNode *node) TableGSet *old_unique_verts = node->bm_unique_verts; - BLI_table_gset_free(node->bm_other_verts, NULL); + BLI_table_gset_free(node->bm_other_verts, nullptr); BMVert *v; TGSET_ITER (v, old_unique_verts) { @@ -807,7 +831,7 @@ static void pbvh_bmesh_regen_node_verts(PBVH *pbvh, PBVHNode *node) PBVHNode *node = pbvh->nodes + ni2; BLI_table_gset_add(node->bm_unique_verts, v); - BLI_table_gset_remove(node->bm_other_verts, v, NULL); + BLI_table_gset_remove(node->bm_other_verts, v, nullptr); ok = true; break; @@ -846,7 +870,7 @@ static void pbvh_bmesh_regen_node_verts(PBVH *pbvh, PBVHNode *node) PBVH_UpdateVisibility; } - BLI_table_gset_free(old_unique_verts, NULL); + BLI_table_gset_free(old_unique_verts, nullptr); } void BKE_pbvh_bmesh_mark_node_regen(PBVH *pbvh, PBVHNode *node) @@ -863,7 +887,7 @@ PBVHNode *BKE_pbvh_get_node_leaf_safe(PBVH *pbvh, int i) } } - return NULL; + return nullptr; } void BKE_pbvh_bmesh_regen_node_verts(PBVH *pbvh) @@ -879,7 +903,7 @@ void BKE_pbvh_bmesh_regen_node_verts(PBVH *pbvh) } } -/************************* Called from pbvh.c *************************/ +/************************* Called from pbvh.cc *************************/ static bool pbvh_poly_hidden(PBVH *pbvh, BMFace *f) { @@ -890,7 +914,7 @@ bool BKE_pbvh_bmesh_check_origdata(PBVH *pbvh, BMVert *v, int stroke_id) { PBVHVertRef vertex = {(intptr_t)v}; - return BKE_pbvh_get_origvert(pbvh, vertex, NULL, NULL, NULL); + return BKE_pbvh_get_origvert(pbvh, vertex, nullptr, nullptr, nullptr); } bool pbvh_bmesh_node_raycast(PBVH *pbvh, @@ -981,7 +1005,7 @@ bool pbvh_bmesh_node_raycast(PBVH *pbvh, bool BKE_pbvh_bmesh_node_raycast_detail(PBVH *pbvh, PBVHNode *node, const float ray_start[3], - struct IsectRayPrecalc *isect_precalc, + IsectRayPrecalc *isect_precalc, float *depth, float *r_edge_length) { @@ -1081,7 +1105,7 @@ typedef struct UpdateNormalsTaskData { static void pbvh_update_normals_task_cb(void *__restrict userdata, const int n, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /* tls */) { BMVert *v; BMFace *f; @@ -1089,8 +1113,7 @@ static void pbvh_update_normals_task_cb(void *__restrict userdata, PBVHNode *node = data->node; const int node_nr = data->node_nr; - BMVert **bordervs = NULL; - BLI_array_declare(bordervs); + Vector bordervs; const int cd_face_node_offset = data->cd_face_node_offset; const int cd_vert_node_offset = data->cd_vert_node_offset; @@ -1111,7 +1134,7 @@ static void pbvh_update_normals_task_cb(void *__restrict userdata, if (NORMAL_VERT_BAD(v)) { v->head.hflag |= tag; - BLI_array_append(bordervs, v); + bordervs.append(v); continue; } @@ -1135,7 +1158,7 @@ static void pbvh_update_normals_task_cb(void *__restrict userdata, loop_exit: if (v->head.hflag & tag) { - BLI_array_append(bordervs, v); + bordervs.append(v); continue; } @@ -1163,7 +1186,7 @@ static void pbvh_update_normals_task_cb(void *__restrict userdata, PBVH_CHECK_NAN(v->no); if (dot_v3v3(v->no, v->no) == 0.0f) { - BLI_array_append(bordervs, v); + bordervs.append(v); continue; } @@ -1174,16 +1197,19 @@ static void pbvh_update_normals_task_cb(void *__restrict userdata, } TGSET_ITER_END - data->border_verts = bordervs; - data->tot_border_verts = BLI_array_len(bordervs); + data->border_verts = (BMVert **)MEM_malloc_arrayN( + bordervs.size(), sizeof(void *), "data->border_verts"); + memcpy((void *)data->border_verts, (void *)bordervs.data(), sizeof(void *) * bordervs.size()); + data->tot_border_verts = bordervs.size(); node->flag &= ~PBVH_UpdateNormals; } void pbvh_bmesh_normals_update(PBVH *pbvh, PBVHNode **nodes, int totnode) { TaskParallelSettings settings; - UpdateNormalsTaskData *datas = MEM_calloc_arrayN(totnode, sizeof(*datas), "bmesh normal update"); + UpdateNormalsTaskData *datas = (UpdateNormalsTaskData *)MEM_calloc_arrayN( + totnode, sizeof(*datas), "bmesh normal update"); for (int i = 0; i < totnode; i++) { datas[i].node = nodes[i]; @@ -1310,12 +1336,12 @@ typedef struct FastNodeBuildInfo { * This function is multi-thread-able since each invocation applies * to a sub part of the arrays. */ + static void pbvh_bmesh_node_limit_ensure_fast(PBVH *pbvh, BMFace **nodeinfo, BBC *bbc_array, FastNodeBuildInfo *node, - FastNodeBuildInfo ***r_leaves, - int *r_totleaf, + Vector &leaves, MemArena *arena) { FastNodeBuildInfo *child1, *child2; @@ -1397,8 +1423,10 @@ static void pbvh_bmesh_node_limit_ensure_fast(PBVH *pbvh, * each sequential part belonging to one node only */ BLI_assert((num_child1 + num_child2) == node->totface); - node->child1 = child1 = BLI_memarena_alloc(arena, sizeof(FastNodeBuildInfo)); - node->child2 = child2 = BLI_memarena_alloc(arena, sizeof(FastNodeBuildInfo)); + node->child1 = child1 = (FastNodeBuildInfo *)BLI_memarena_alloc(arena, + sizeof(FastNodeBuildInfo)); + node->child2 = child2 = (FastNodeBuildInfo *)BLI_memarena_alloc(arena, + sizeof(FastNodeBuildInfo)); child1->totface = num_child1; child1->start = node->start; @@ -1408,37 +1436,30 @@ static void pbvh_bmesh_node_limit_ensure_fast(PBVH *pbvh, child2->start = node->start + num_child1; child2->depth = node->depth + 2; - child1->child1 = child1->child2 = child2->child1 = child2->child2 = NULL; + child1->child1 = child1->child2 = child2->child1 = child2->child2 = nullptr; - pbvh_bmesh_node_limit_ensure_fast(pbvh, nodeinfo, bbc_array, child1, r_leaves, r_totleaf, arena); - pbvh_bmesh_node_limit_ensure_fast(pbvh, nodeinfo, bbc_array, child2, r_leaves, r_totleaf, arena); - - FastNodeBuildInfo **leaves = *r_leaves; - BLI_array_declare(leaves); - BLI_array_len_set(leaves, *r_totleaf); + pbvh_bmesh_node_limit_ensure_fast(pbvh, nodeinfo, bbc_array, child1, leaves, arena); + pbvh_bmesh_node_limit_ensure_fast(pbvh, nodeinfo, bbc_array, child2, leaves, arena); if (!child1->child1 && !child1->child2) { - BLI_array_append(leaves, child1); + leaves.append(child1); } if (!child2->child1 && !child2->child2) { - BLI_array_append(leaves, child2); + leaves.append(child2); } - - *r_leaves = leaves; - *r_totleaf = BLI_array_len(leaves); } -typedef struct LeafBuilderThreadData { +struct LeafBuilderThreadData { PBVH *pbvh; BMFace **nodeinfo; BBC *bbc_array; - FastNodeBuildInfo **leaves; -} LeafBuilderThreadData; + Vector &leaves; +}; static void pbvh_bmesh_create_leaf_fast_task_cb(void *__restrict userdata, const int i, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /* tls */) { LeafBuilderThreadData *data = (LeafBuilderThreadData *)userdata; PBVH *pbvh = data->pbvh; @@ -1700,10 +1721,9 @@ void bke_pbvh_update_vert_boundary(int cd_sculpt_vert, int fset; BMVert *v2; BMEdge *e; - } *fsets = NULL; + } *fsets = nullptr; #endif - int *fsets = NULL; - BLI_array_staticdeclare(fsets, 16); + Vector fsets; float(*lastuv)[2] = BLI_array_alloca(lastuv, totuv); float(*lastuv2)[2] = BLI_array_alloca(lastuv2, totuv); @@ -1781,7 +1801,7 @@ void bke_pbvh_update_vert_boundary(int cd_sculpt_vert, BMLoop *l = l_iter->v != v ? l_iter->next : l_iter; for (int i = 0; i < totuv; i++) { - float *luv = BM_ELEM_CD_GET_VOID_P(l, cd_uvs[i]); + float *luv = (float *)BM_ELEM_CD_GET_VOID_P(l, cd_uvs[i]); if (uv_first) { copy_v2_v2(lastuv[i], luv); @@ -1844,14 +1864,14 @@ void bke_pbvh_update_vert_boundary(int cd_sculpt_vert, BM_ELEM_CD_GET_INT(e->l->f, cd_faceset_offset), bound_symmetry, v2->co); bool ok = true; - for (int i = 0; i < BLI_array_len(fsets); i++) { + for (int i = 0; i < fsets.size(); i++) { if (fsets[i] == fset) { ok = false; } } if (ok) { - BLI_array_append(fsets, fset); + fsets.append(fset); } } @@ -1871,14 +1891,14 @@ void bke_pbvh_update_vert_boundary(int cd_sculpt_vert, BM_ELEM_CD_GET_INT(e->l->radial_next->f, cd_faceset_offset), bound_symmetry, v2->co); bool ok2 = true; - for (int i = 0; i < BLI_array_len(fsets); i++) { + for (int i = 0; i < fsets.size(); i++) { if (fsets[i] == fset2) { ok2 = false; } } if (ok2) { - BLI_array_append(fsets, fset2); + fsets.append(fset2); } } @@ -1895,11 +1915,11 @@ void bke_pbvh_update_vert_boundary(int cd_sculpt_vert, val++; } while ((e = BM_DISK_EDGE_NEXT(e, v)) != v->e); - if (BLI_array_len(fsets) > 1) { + if (fsets.size() > 1) { boundflag |= SCULPT_BOUNDARY_FACE_SET; } - if (BLI_array_len(fsets) > 2) { + if (fsets.size() > 2) { boundflag |= SCULPT_CORNER_FACE_SET; } @@ -1947,8 +1967,6 @@ void bke_pbvh_update_vert_boundary(int cd_sculpt_vert, ushort origmask = mv->origmask; atomic_cas_short2(&mv->origmask, origmask, oldcurv, origmask, newcurv); - - BLI_array_free(fsets); } bool BKE_pbvh_check_vert_boundary(PBVH *pbvh, BMVert *v) @@ -2023,14 +2041,13 @@ static void coalese_pbvh(PBVH *pbvh, BMesh *bm) printf("leafsize: %d\n", leafsize); - int *fmap = MEM_calloc_arrayN(bm->totface, sizeof(int), "pbvh face map"); + int *fmap = (int *)MEM_calloc_arrayN(bm->totface, sizeof(int), "pbvh face map"); int totleaf = 0; - FastNodeBuildInfo *nodes = NULL; - BLI_array_declare(nodes); + Vector nodes; const int qsize = leafsize; - BMFace **queue = MEM_calloc_arrayN(qsize, sizeof(*queue), "pbvh queue"); + BMFace **queue = (BMFace **)MEM_calloc_arrayN(qsize, sizeof(*queue), "pbvh queue"); BM_mesh_elem_table_ensure(bm, BM_FACE | BM_VERT); @@ -2043,9 +2060,9 @@ static void coalese_pbvh(PBVH *pbvh, BMesh *bm) } int ni = totleaf++; - BLI_array_grow_one(nodes); + nodes.resize(nodes.size() + 1); - FastNodeBuildInfo *node = nodes + BLI_array_len(nodes) - 1; + FastNodeBuildInfo *node = &nodes[nodes.size() - 1]; copy_v3_v3(node->cent, f->l_first->v->co); copy_v3_v3(node->no, f->no); @@ -2075,7 +2092,7 @@ static void coalese_pbvh(PBVH *pbvh, BMesh *bm) } printf("totleafs: %d\n", totleaf); - BLI_array_grow_items(nodes, BLI_array_len(nodes) << 1); + nodes.resize(nodes.size() << 1); const int tag2 = 1; int pi = totleaf; @@ -2088,7 +2105,7 @@ static void coalese_pbvh(PBVH *pbvh, BMesh *bm) } for (int i = starti; i < endi; i++) { - FastNodeBuildInfo *n1 = nodes + i, *n2 = NULL; + FastNodeBuildInfo *n1 = &nodes[i], *n2 = nullptr; if (n1->tag & tag2) { continue; @@ -2097,7 +2114,7 @@ static void coalese_pbvh(PBVH *pbvh, BMesh *bm) float mindis = 1e17; for (int j = starti; j < endi; j++) { - FastNodeBuildInfo *n3 = nodes + j; + FastNodeBuildInfo *n3 = &nodes[j]; if (j == i || n3->tag) { continue; @@ -2114,7 +2131,7 @@ static void coalese_pbvh(PBVH *pbvh, BMesh *bm) break; } - FastNodeBuildInfo *parent = nodes + pi; + FastNodeBuildInfo *parent = &nodes[pi]; pi++; n1->tag |= tag2; @@ -2130,7 +2147,6 @@ static void coalese_pbvh(PBVH *pbvh, BMesh *bm) MEM_SAFE_FREE(queue); MEM_SAFE_FREE(fmap); - BLI_array_free(nodes); } void BKE_pbvh_update_sculpt_verts(PBVH *pbvh) @@ -2140,7 +2156,7 @@ void BKE_pbvh_update_sculpt_verts(PBVH *pbvh) for (int i = 0; i < totvert; i++) { PBVHVertRef vertex = BKE_pbvh_index_to_vertex(pbvh, i); - BKE_pbvh_get_origvert(pbvh, vertex, NULL, NULL, NULL); + BKE_pbvh_get_origvert(pbvh, vertex, nullptr, nullptr, nullptr); } } @@ -2212,8 +2228,9 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh, } /* bounding box array of all faces, no need to recalculate every time */ - BBC *bbc_array = MEM_mallocN(sizeof(BBC) * bm->totface, "BBC"); - BMFace **nodeinfo = MEM_mallocN(sizeof(*nodeinfo) * bm->totface, "nodeinfo"); + BBC *bbc_array = static_cast(MEM_mallocN(sizeof(BBC) * bm->totface, "BBC")); + BMFace **nodeinfo = static_cast( + MEM_mallocN(sizeof(*nodeinfo) * bm->totface, "nodeinfo")); MemArena *arena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, "fast PBVH node storage"); BMFace *f; @@ -2254,14 +2271,15 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh, } /* setup root node */ - struct FastNodeBuildInfo rootnode = {0}, **leaves = NULL; + struct FastNodeBuildInfo rootnode = {0}; + Vector leaves; rootnode.totface = bm->totface; - int totleaf = 0; /* start recursion, assign faces to nodes accordingly */ - pbvh_bmesh_node_limit_ensure_fast( - pbvh, nodeinfo, bbc_array, &rootnode, &leaves, &totleaf, arena); + pbvh_bmesh_node_limit_ensure_fast(pbvh, nodeinfo, bbc_array, &rootnode, leaves, arena); + + int totleaf = leaves.size(); pbvh_grow_nodes(pbvh, 1); rootnode.node_index = 0; @@ -2269,10 +2287,8 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh, pbvh_bmesh_create_nodes_fast_recursive_create(pbvh, nodeinfo, bbc_array, &rootnode); if (!totleaf) { - leaves = MEM_mallocN(sizeof(void *), "leaves"); totleaf = 1; - - leaves[0] = &rootnode; + leaves.append(&rootnode); } /* build leaf nodes */ @@ -2285,8 +2301,6 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh, // pbvh_bmesh_create_leaf_fast(pbvh, nodeinfo, bbc_array, leaves[i]); - MEM_SAFE_FREE(leaves); - /* take root node and visit and populate children recursively */ pbvh_bmesh_create_nodes_fast_recursive_final(pbvh, nodeinfo, bbc_array, &rootnode); @@ -2345,7 +2359,7 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh, /* make sure read side of double buffer is set too */ TGSET_ITER (f, node->bm_faces) { - float *areabuf = BM_ELEM_CD_GET_VOID_P(f, cd_face_area); + float *areabuf = (float *)BM_ELEM_CD_GET_VOID_P(f, cd_face_area); areabuf[area_dst_i] = areabuf[area_src_i]; } TGSET_ITER_END; @@ -2376,8 +2390,7 @@ bool BKE_pbvh_bmesh_update_topology_nodes(PBVH *pbvh, bool is_snake_hook) { bool modified = false; - PBVHNode **nodes = NULL; - BLI_array_declare(nodes); + Vector nodes; for (int i = 0; i < pbvh->totnode; i++) { PBVHNode *node = pbvh->nodes + i; @@ -2389,11 +2402,11 @@ bool BKE_pbvh_bmesh_update_topology_nodes(PBVH *pbvh, if (node->flag & PBVH_Leaf) { undopush(node, searchdata); - BLI_array_append(nodes, node); + nodes.append(node); } } - for (int j = 0; j < BLI_array_len(nodes); j++) { + for (int j : IndexRange(nodes.size())) { nodes[j]->flag |= PBVH_UpdateCurvatureDir; BKE_pbvh_node_mark_topology_update(nodes[j]); } @@ -2413,8 +2426,6 @@ bool BKE_pbvh_bmesh_update_topology_nodes(PBVH *pbvh, disable_surface_relax, is_snake_hook); - BLI_array_free(nodes); - return modified; } @@ -2427,10 +2438,10 @@ static void pbvh_free_tribuf(PBVHTriBuf *tribuf) BLI_smallhash_release(&tribuf->vertmap); - tribuf->verts = NULL; - tribuf->tris = NULL; - tribuf->loops = NULL; - tribuf->edges = NULL; + tribuf->verts = nullptr; + tribuf->tris = nullptr; + tribuf->loops = nullptr; + tribuf->edges = nullptr; tribuf->totloop = tribuf->tottri = tribuf->totedge = tribuf->totvert = 0; @@ -2451,7 +2462,7 @@ void BKE_pbvh_bmesh_free_tris(PBVH *pbvh, PBVHNode *node) if (node->tribuf) { pbvh_free_tribuf(node->tribuf); MEM_freeN(node->tribuf); - node->tribuf = NULL; + node->tribuf = nullptr; } if (node->tri_buffers) { @@ -2461,7 +2472,7 @@ void BKE_pbvh_bmesh_free_tris(PBVH *pbvh, PBVHNode *node) MEM_SAFE_FREE(node->tri_buffers); - node->tri_buffers = NULL; + node->tri_buffers = nullptr; node->tot_tri_buffers = 0; } } @@ -2493,13 +2504,9 @@ static bool pbvh_bmesh_split_tris(PBVH *pbvh, PBVHNode *node) const int cd_uv = pbvh->header.bm->ldata.layers[layeri].offset; const int cd_size = CustomData_sizeof(CD_PROP_FLOAT2); - PBVHVertRef *verts = NULL; - PBVHTri *tris = NULL; - intptr_t *loops = NULL; - - BLI_array_declare(verts); - BLI_array_declare(tris); - BLI_array_declare(loops); + Vector verts; + Vector tris; + Vector loops; TGSET_ITER (f, node->bm_faces) { BMLoop *l = f->l_first; @@ -2522,10 +2529,10 @@ static bool pbvh_bmesh_split_tris(PBVH *pbvh, PBVHNode *node) } l->head.index = vi++; - BLI_array_append(loops, (intptr_t)l); + loops.append((intptr_t)l); PBVHVertRef sv = {(intptr_t)l->v}; - BLI_array_append(verts, sv); + verts.append(sv); BMIter iter; BMLoop *l2; @@ -2534,8 +2541,8 @@ static bool pbvh_bmesh_split_tris(PBVH *pbvh, PBVHNode *node) bool ok = true; for (int i = 0; i < totlayer; i++) { - float *uv1 = BM_ELEM_CD_GET_VOID_P(l, cd_uv + cd_size * i); - float *uv2 = BM_ELEM_CD_GET_VOID_P(l2, cd_uv + cd_size * i); + float *uv1 = (float *)BM_ELEM_CD_GET_VOID_P(l, cd_uv + cd_size * i); + float *uv2 = (float *)BM_ELEM_CD_GET_VOID_P(l2, cd_uv + cd_size * i); if (len_v3v3(uv1, uv2) > 0.001) { ok = false; @@ -2562,7 +2569,7 @@ static bool pbvh_bmesh_split_tris(PBVH *pbvh, PBVHNode *node) tri.v[2] = l3->head.index; copy_v3_v3(tri.no, f->no); - BLI_array_append(tris, tri); + tris.append(tri); } TGSET_ITER_END @@ -2570,16 +2577,16 @@ static bool pbvh_bmesh_split_tris(PBVH *pbvh, PBVHNode *node) pbvh_free_tribuf(node->tribuf); } else { - node->tribuf = MEM_callocN(sizeof(*node->tribuf), "node->tribuf"); + node->tribuf = (PBVHTriBuf *)MEM_callocN(sizeof(*node->tribuf), "node->tribuf"); } - node->tribuf->verts = verts; - node->tribuf->loops = loops; - node->tribuf->tris = tris; + node->tribuf->verts = copy_vector_to_c_array("node->tribuf->verts", verts); + node->tribuf->loops = copy_vector_to_c_array("node->tribuf->loops", loops); + node->tribuf->tris = copy_vector_to_c_array("node->tribuf->tris", tris); - node->tribuf->tottri = BLI_array_len(tris); - node->tribuf->totvert = BLI_array_len(verts); - node->tribuf->totloop = BLI_array_len(loops); + node->tribuf->tottri = tris.size(); + node->tribuf->totvert = verts.size(); + node->tribuf->totloop = loops.size(); return true; } @@ -2592,10 +2599,11 @@ BLI_INLINE PBVHTri *pbvh_tribuf_add_tri(PBVHTriBuf *tribuf) size_t newsize = (size_t)32 + (size_t)tribuf->tris_size + (size_t)(tribuf->tris_size >> 1); if (!tribuf->tris) { - tribuf->tris = MEM_mallocN(sizeof(*tribuf->tris) * newsize, "tribuf tris"); + tribuf->tris = (PBVHTri *)MEM_mallocN(sizeof(*tribuf->tris) * newsize, "tribuf tris"); } else { - tribuf->tris = MEM_reallocN_id(tribuf->tris, sizeof(*tribuf->tris) * newsize, "tribuf tris"); + tribuf->tris = (PBVHTri *)MEM_reallocN_id( + tribuf->tris, sizeof(*tribuf->tris) * newsize, "tribuf tris"); } tribuf->tris_size = newsize; @@ -2613,13 +2621,13 @@ BLI_INLINE void pbvh_tribuf_add_vert(PBVHTriBuf *tribuf, PBVHVertRef vertex, BML size_t newsize = (size_t)32 + (size_t)(tribuf->verts_size << 1); if (!tribuf->verts) { - tribuf->verts = MEM_mallocN(sizeof(*tribuf->verts) * newsize, "tribuf verts"); - tribuf->loops = MEM_mallocN(sizeof(*tribuf->loops) * newsize, "tribuf loops"); + tribuf->verts = (PBVHVertRef *)MEM_mallocN(sizeof(*tribuf->verts) * newsize, "tribuf verts"); + tribuf->loops = (intptr_t *)MEM_mallocN(sizeof(*tribuf->loops) * newsize, "tribuf loops"); } else { - tribuf->verts = MEM_reallocN_id( + tribuf->verts = (PBVHVertRef *)MEM_reallocN_id( tribuf->verts, sizeof(*tribuf->verts) * newsize, "tribuf verts"); - tribuf->loops = MEM_reallocN_id( + tribuf->loops = (intptr_t *)MEM_reallocN_id( tribuf->loops, sizeof(*tribuf->loops) * newsize, "tribuf loops"); } @@ -2638,10 +2646,10 @@ BLI_INLINE void pbvh_tribuf_add_edge(PBVHTriBuf *tribuf, int v1, int v2) size_t newsize = (size_t)32 + (size_t)(tribuf->edges_size << 1); if (!tribuf->edges) { - tribuf->edges = MEM_mallocN(sizeof(*tribuf->edges) * 2ULL * newsize, "tribuf edges"); + tribuf->edges = (int *)MEM_mallocN(sizeof(*tribuf->edges) * 2ULL * newsize, "tribuf edges"); } else { - tribuf->edges = MEM_reallocN_id( + tribuf->edges = (int *)MEM_reallocN_id( tribuf->edges, sizeof(*tribuf->edges) * 2ULL * newsize, "tribuf edges"); } @@ -2663,7 +2671,7 @@ void pbvh_bmesh_check_other_verts(PBVHNode *node) node->flag &= ~PBVH_UpdateOtherVerts; if (node->bm_other_verts) { - BLI_table_gset_free(node->bm_other_verts, NULL); + BLI_table_gset_free(node->bm_other_verts, nullptr); } node->bm_other_verts = BLI_table_gset_new("bm_other_verts"); @@ -2692,10 +2700,10 @@ static void pbvh_init_tribuf(PBVHNode *node, PBVHTriBuf *tribuf) tribuf->totloop = 0; tribuf->totedge = 0; - tribuf->edges = NULL; - tribuf->verts = NULL; - tribuf->tris = NULL; - tribuf->loops = NULL; + tribuf->edges = nullptr; + tribuf->verts = nullptr; + tribuf->tris = nullptr; + tribuf->loops = nullptr; BLI_smallhash_init_ex(&tribuf->vertmap, node->bm_unique_verts->length); } @@ -2712,7 +2720,7 @@ static uintptr_t tri_loopkey(BMLoop *l, int mat_nr, int cd_fset, int cd_uvs[], i } for (int i = 0; i < totuv; i++) { - float *luv = BM_ELEM_CD_GET_VOID_P(l, cd_uvs[i]); + float *luv = (float *)BM_ELEM_CD_GET_VOID_P(l, cd_uvs[i]); float snap = 4196.0f; uintptr_t x = (uintptr_t)(luv[0] * snap); @@ -2756,17 +2764,12 @@ bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node) BKE_pbvh_bmesh_free_tris(pbvh, node); } - node->tribuf = MEM_callocN(sizeof(*node->tribuf), "node->tribuf"); + node->tribuf = (PBVHTriBuf *)MEM_callocN(sizeof(*node->tribuf), "node->tribuf"); pbvh_init_tribuf(node, node->tribuf); - BMLoop **loops = NULL; - uint(*loops_idx)[3] = NULL; - - BLI_array_staticdeclare(loops, 128); - BLI_array_staticdeclare(loops_idx, 128); - - PBVHTriBuf *tribufs = NULL; // material-specific tri buffers - BLI_array_declare(tribufs); + Vector loops; + Vector loops_idx; + Vector tribufs; node->flag &= ~PBVH_UpdateTris; @@ -2806,26 +2809,26 @@ bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node) if (mat_map[mat_nr] == -1) { PBVHTriBuf _tribuf = {0}; - mat_map[mat_nr] = BLI_array_len(tribufs); + mat_map[mat_nr] = tribufs.size(); pbvh_init_tribuf(node, &_tribuf); _tribuf.mat_nr = mat_nr; - BLI_array_append(tribufs, _tribuf); + + tribufs.append(_tribuf); } #ifdef DYNTOPO_DYNAMIC_TESS const int tottri = (f->len - 2); - BLI_array_clear(loops); - BLI_array_clear(loops_idx); - BLI_array_grow_items(loops, f->len); - BLI_array_grow_items(loops_idx, tottri); + loops.resize(f->len); + loops_idx.resize(tottri); - BM_face_calc_tessellation(f, true, loops, loops_idx); + BM_face_calc_tessellation( + f, true, loops.data(), reinterpret_cast(loops_idx.data())); for (int i = 0; i < tottri; i++) { PBVHTri *tri = pbvh_tribuf_add_tri(node->tribuf); - PBVHTriBuf *mat_tribuf = tribufs + mat_map[mat_nr]; + PBVHTriBuf *mat_tribuf = &tribufs[mat_map[mat_nr]]; PBVHTri *mat_tri = pbvh_tribuf_add_tri(mat_tribuf); tri->eflag = mat_tri->eflag = 0; @@ -2835,7 +2838,7 @@ bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node) BMLoop *l = loops[loops_idx[i][j]]; BMLoop *l2 = loops[loops_idx[i][(j + 1) % 3]]; - void **val = NULL; + void **val = nullptr; BMEdge *e = BM_edge_exists(l->v, l2->v); # ifdef SCULPT_DIAGONAL_EDGE_MARKS @@ -2861,7 +2864,7 @@ bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node) tri->v[j] = (intptr_t)val[0]; tri->l[j] = (intptr_t)l; - val = NULL; + val = nullptr; if (!BLI_smallhash_ensure_p(&mat_tribuf->vertmap, loopkey, &val)) { PBVHVertRef sv = {(intptr_t)l->v}; @@ -2889,7 +2892,7 @@ bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node) int j = 0; do { - void **val = NULL; + void **val = nullptr; if (!BLI_ghash_ensure_p(vmap, l->v, &val)) { PBVHVertRef sv = {(intptr_t)l->v}; @@ -2903,7 +2906,7 @@ bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node) tri->v[j] = (intptr_t)val[0]; tri->l[j] = (intptr_t)l; - val = NULL; + val = nullptr; if (!BLI_ghash_ensure_p(mat_vmaps[mat_nr], l->v, &val)) { PBVHVertRef sv = {(intptr_t)l->v}; @@ -2937,7 +2940,7 @@ bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node) } int mat_nr = f->mat_nr; - PBVHTriBuf *mat_tribuf = tribufs + mat_map[mat_nr]; + PBVHTriBuf *mat_tribuf = &tribufs[mat_map[mat_nr]]; BMLoop *l = f->l_first; do { @@ -2960,13 +2963,10 @@ bool BKE_pbvh_bmesh_check_tris(PBVH *pbvh, PBVHNode *node) } TGSET_ITER_END - BLI_array_free(loops); - BLI_array_free(loops_idx); - bm->elem_index_dirty |= BM_VERT; - node->tri_buffers = tribufs; - node->tot_tri_buffers = BLI_array_len(tribufs); + node->tri_buffers = copy_vector_to_c_array("node->tri_buffers", tribufs); + node->tot_tri_buffers = tribufs.size(); if (node->tribuf->totvert) { copy_v3_v3(node->tribuf->min, min); @@ -3047,7 +3047,7 @@ void BKE_pbvh_bmesh_on_mesh_change(PBVH *pbvh) bool BKE_pbvh_bmesh_mark_update_valence(PBVH *pbvh, PBVHVertRef vertex) { BMVert *v = (BMVert *)vertex.i; - MSculptVert *mv = BM_ELEM_CD_GET_VOID_P(v, pbvh->cd_sculpt_vert); + MSculptVert *mv = (MSculptVert *)BM_ELEM_CD_GET_VOID_P(v, pbvh->cd_sculpt_vert); bool ret = mv->flag & SCULPTVERT_NEED_VALENCE; @@ -3059,7 +3059,7 @@ bool BKE_pbvh_bmesh_mark_update_valence(PBVH *pbvh, PBVHVertRef vertex) bool BKE_pbvh_bmesh_check_valence(PBVH *pbvh, PBVHVertRef vertex) { BMVert *v = (BMVert *)vertex.i; - MSculptVert *mv = BM_ELEM_CD_GET_VOID_P(v, pbvh->cd_sculpt_vert); + MSculptVert *mv = (MSculptVert *)BM_ELEM_CD_GET_VOID_P(v, pbvh->cd_sculpt_vert); if (mv->flag & SCULPTVERT_NEED_VALENCE) { BKE_pbvh_bmesh_update_valence(pbvh->cd_sculpt_vert, vertex); @@ -3074,7 +3074,7 @@ void BKE_pbvh_bmesh_update_valence(int cd_sculpt_vert, PBVHVertRef vertex) BMVert *v = (BMVert *)vertex.i; BMEdge *e; - MSculptVert *mv = BM_ELEM_CD_GET_VOID_P(v, cd_sculpt_vert); + MSculptVert *mv = (MSculptVert *)BM_ELEM_CD_GET_VOID_P(v, cd_sculpt_vert); mv->flag &= ~SCULPTVERT_NEED_VALENCE; @@ -3175,7 +3175,7 @@ static void BKE_pbvh_bmesh_correct_tree(PBVH *pbvh, PBVHNode *node, PBVHNode *pa BMVert *v; node->children_offset = 0; - node->draw_batches = NULL; + node->draw_batches = nullptr; // rebuild bm_other_verts BMFace *f; @@ -3193,7 +3193,7 @@ static void BKE_pbvh_bmesh_correct_tree(PBVH *pbvh, PBVHNode *node, PBVHNode *pa } TGSET_ITER_END - BLI_table_gset_free(node->bm_other_verts, NULL); + BLI_table_gset_free(node->bm_other_verts, nullptr); node->bm_other_verts = other; BB_reset(&node->vb); @@ -3245,8 +3245,8 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh) n3->bm_unique_verts = BLI_table_gset_new("bm_unique_verts"); n3->bm_other_verts = BLI_table_gset_new("bm_other_verts"); n3->bm_faces = BLI_table_gset_new("bm_faces"); - n3->tribuf = NULL; - n3->draw_batches = NULL; + n3->tribuf = nullptr; + n3->draw_batches = nullptr; } else if ((n1->flag & PBVH_Delete) && (n2->flag & PBVH_Delete)) { n->children_offset = 0; @@ -3257,8 +3257,8 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh) n->bm_unique_verts = BLI_table_gset_new("bm_unique_verts"); n->bm_other_verts = BLI_table_gset_new("bm_other_verts"); n->bm_faces = BLI_table_gset_new("bm_faces"); - n->tribuf = NULL; - n->draw_batches = NULL; + n->tribuf = nullptr; + n->draw_batches = nullptr; } } } @@ -3267,7 +3267,7 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh) } } - int *map = MEM_callocN(sizeof(int) * bvh->totnode, "bmesh map temp"); + int *map = (int *)MEM_callocN(sizeof(int) * bvh->totnode, "bmesh map temp"); // build idx map for child offsets int j = 0; @@ -3280,18 +3280,18 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh) else if (1) { if (n->layer_disp) { MEM_freeN(n->layer_disp); - n->layer_disp = NULL; + n->layer_disp = nullptr; } pbvh_free_draw_buffers(bvh, n); if (n->vert_indices) { MEM_freeN((void *)n->vert_indices); - n->vert_indices = NULL; + n->vert_indices = nullptr; } if (n->face_vert_indices) { MEM_freeN((void *)n->face_vert_indices); - n->face_vert_indices = NULL; + n->face_vert_indices = nullptr; } if (n->tribuf || n->tri_buffers) { @@ -3299,18 +3299,18 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh) } if (n->bm_unique_verts) { - BLI_table_gset_free(n->bm_unique_verts, NULL); - n->bm_unique_verts = NULL; + BLI_table_gset_free(n->bm_unique_verts, nullptr); + n->bm_unique_verts = nullptr; } if (n->bm_other_verts) { - BLI_table_gset_free(n->bm_other_verts, NULL); - n->bm_other_verts = NULL; + BLI_table_gset_free(n->bm_other_verts, nullptr); + n->bm_other_verts = nullptr; } if (n->bm_faces) { - BLI_table_gset_free(n->bm_faces, NULL); - n->bm_faces = NULL; + BLI_table_gset_free(n->bm_faces, nullptr); + n->bm_faces = nullptr; } #ifdef PROXY_ADVANCED @@ -3391,8 +3391,7 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh) TGSET_ITER_END } - BMVert **scratch = NULL; - BLI_array_declare(scratch); + Vector scratch; for (int i = 0; i < bvh->totnode; i++) { PBVHNode *n = bvh->nodes + i; @@ -3401,29 +3400,28 @@ static void pbvh_bmesh_compact_tree(PBVH *bvh) continue; } - BLI_array_clear(scratch); + scratch.clear(); BMVert *v; TGSET_ITER (v, n->bm_other_verts) { int ni = BM_ELEM_CD_GET_INT(v, bvh->cd_vert_node_offset); if (ni == DYNTOPO_NODE_NONE) { - BLI_array_append(scratch, v); + scratch.append(v); } // BM_ELEM_CD_SET_INT(v, bvh->cd_vert_node_offset, i); } TGSET_ITER_END - int slen = BLI_array_len(scratch); + int slen = scratch.size(); for (int j = 0; j < slen; j++) { BMVert *v = scratch[j]; - BLI_table_gset_remove(n->bm_other_verts, v, NULL); + BLI_table_gset_remove(n->bm_other_verts, v, nullptr); BLI_table_gset_add(n->bm_unique_verts, v); BM_ELEM_CD_SET_INT(v, bvh->cd_vert_node_offset, i); } } - BLI_array_free(scratch); MEM_freeN(map); } @@ -3449,17 +3447,14 @@ static void recursive_delete_nodes(PBVH *pbvh, int ni) and then re-inserting them*/ static void pbvh_bmesh_balance_tree(PBVH *pbvh) { - PBVHNode **stack = NULL; - float *overlaps = MEM_calloc_arrayN(pbvh->totnode, sizeof(float), "overlaps"); - PBVHNode **parentmap = MEM_calloc_arrayN(pbvh->totnode, sizeof(*parentmap), "parentmap"); - int *depthmap = MEM_calloc_arrayN(pbvh->totnode, sizeof(*depthmap), "depthmap"); - BLI_array_declare(stack); + Vector stack; + float *overlaps = (float *)MEM_calloc_arrayN(pbvh->totnode, sizeof(float), "overlaps"); + PBVHNode **parentmap = (PBVHNode **)MEM_calloc_arrayN( + pbvh->totnode, sizeof(*parentmap), "parentmap"); + int *depthmap = (int *)MEM_calloc_arrayN(pbvh->totnode, sizeof(*depthmap), "depthmap"); - BMFace **faces = NULL; - BLI_array_declare(faces); - - PBVHNode **substack = NULL; - BLI_array_declare(substack); + Vector faces; + Vector substack; for (int i = 0; i < pbvh->totnode; i++) { PBVHNode *node = pbvh->nodes + i; @@ -3497,9 +3492,9 @@ static void pbvh_bmesh_balance_tree(PBVH *pbvh) bool modified = false; - BLI_array_append(stack, pbvh->nodes); - while (BLI_array_len(stack) > 0) { - PBVHNode *node = BLI_array_pop(stack); + stack.append(pbvh->nodes); + while (stack.size() > 0) { + PBVHNode *node = stack.pop_last(); BB clip; if (!(node->flag & PBVH_Leaf) && node->children_offset > 0) { @@ -3517,7 +3512,7 @@ static void pbvh_bmesh_balance_tree(PBVH *pbvh) float factor; /* use higher threshold for the root node and its immediate children */ - switch (BLI_array_len(stack)) { + switch (stack.size()) { case 0: factor = 0.5; break; @@ -3531,7 +3526,7 @@ static void pbvh_bmesh_balance_tree(PBVH *pbvh) } #if 0 - for (int k = 0; k < BLI_array_len(stack); k++) { + for (int k = 0; k < stack.size(); k++) { printf(" "); } @@ -3547,13 +3542,12 @@ static void pbvh_bmesh_balance_tree(PBVH *pbvh) modified = true; // printf(" DELETE! %.4f %.4f %d\n", overlap, volume, BLI_array_len(stack)); - BLI_array_clear(substack); + substack.clear(); + substack.append(child1); + substack.append(child2); - BLI_array_append(substack, child1); - BLI_array_append(substack, child2); - - while (BLI_array_len(substack) > 0) { - PBVHNode *node2 = BLI_array_pop(substack); + while (substack.size() > 0) { + PBVHNode *node2 = substack.pop_last(); node2->flag |= PBVH_Delete; @@ -3568,7 +3562,7 @@ static void pbvh_bmesh_balance_tree(PBVH *pbvh) } BM_ELEM_CD_SET_INT(f, cd_face_node, DYNTOPO_NODE_NONE); - BLI_array_append(faces, f); + faces.append(f); } TGSET_ITER_END; @@ -3581,21 +3575,21 @@ static void pbvh_bmesh_balance_tree(PBVH *pbvh) TGSET_ITER_END; } else if (node2->children_offset > 0 && node2->children_offset < pbvh->totnode) { - BLI_array_append(substack, pbvh->nodes + node2->children_offset); + substack.append(pbvh->nodes + node2->children_offset); if (node2->children_offset + 1 < pbvh->totnode) { - BLI_array_append(substack, pbvh->nodes + node2->children_offset + 1); + substack.append(pbvh->nodes + node2->children_offset + 1); } } } } if (node->children_offset < pbvh->totnode) { - BLI_array_append(stack, child1); + stack.append(child1); } if (node->children_offset + 1 < pbvh->totnode) { - BLI_array_append(stack, child2); + stack.append(child2); } } } @@ -3603,9 +3597,9 @@ static void pbvh_bmesh_balance_tree(PBVH *pbvh) if (modified) { pbvh_bmesh_compact_tree(pbvh); - printf("joined nodes; %d faces\n", BLI_array_len(faces)); + printf("joined nodes; %d faces\n", faces.size()); - for (int i = 0; i < BLI_array_len(faces); i++) { + for (int i = 0; i < faces.size(); i++) { if (BM_elem_is_free((BMElem *)faces[i], BM_FACE)) { printf("corrupted face in pbvh tree; faces[i]: %p\n", faces[i]); continue; @@ -3620,12 +3614,8 @@ static void pbvh_bmesh_balance_tree(PBVH *pbvh) } } - BLI_array_free(faces); - MEM_SAFE_FREE(parentmap); MEM_SAFE_FREE(overlaps); - BLI_array_free(stack); - BLI_array_free(substack); MEM_SAFE_FREE(depthmap); } @@ -3636,7 +3626,7 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) } pbvh_count_subtree_verts(bvh, bvh->nodes); - BKE_pbvh_bmesh_correct_tree(bvh, bvh->nodes, NULL); + BKE_pbvh_bmesh_correct_tree(bvh, bvh->nodes, nullptr); // compact nodes int totnode = 0; @@ -3656,8 +3646,8 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) n3->bm_unique_verts = BLI_table_gset_new("bm_unique_verts"); n3->bm_other_verts = BLI_table_gset_new("bm_other_verts"); n3->bm_faces = BLI_table_gset_new("bm_faces"); - n3->tribuf = NULL; - n3->draw_batches = NULL; + n3->tribuf = nullptr; + n3->draw_batches = nullptr; } else if ((n1->flag & PBVH_Delete) && (n2->flag & PBVH_Delete)) { n->children_offset = 0; @@ -3670,8 +3660,8 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) n->bm_faces = BLI_table_gset_new("bm_faces"); } - n->tribuf = NULL; - n->draw_batches = NULL; + n->tribuf = nullptr; + n->draw_batches = nullptr; } } @@ -3679,7 +3669,7 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) } } - int *map = MEM_callocN(sizeof(int) * bvh->totnode, "bmesh map temp"); + int *map = (int *)MEM_callocN(sizeof(int) * bvh->totnode, "bmesh map temp"); for (int i = 0; i < bvh->totnode; i++) { for (int j = 0; j < bvh->totnode; j++) { @@ -3690,7 +3680,7 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) if (bvh->nodes[i].draw_batches == bvh->nodes[j].draw_batches) { printf("%s: error %d %d\n", __func__, i, j); - bvh->nodes[j].draw_batches = NULL; + bvh->nodes[j].draw_batches = nullptr; } } } @@ -3706,18 +3696,18 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) else { if (n->layer_disp) { MEM_freeN(n->layer_disp); - n->layer_disp = NULL; + n->layer_disp = nullptr; } pbvh_free_draw_buffers(bvh, n); if (n->vert_indices) { MEM_freeN((void *)n->vert_indices); - n->vert_indices = NULL; + n->vert_indices = nullptr; } if (n->face_vert_indices) { MEM_freeN((void *)n->face_vert_indices); - n->face_vert_indices = NULL; + n->face_vert_indices = nullptr; } if (n->tribuf || n->tri_buffers) { @@ -3725,18 +3715,18 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) } if (n->bm_unique_verts) { - BLI_table_gset_free(n->bm_unique_verts, NULL); - n->bm_unique_verts = NULL; + BLI_table_gset_free(n->bm_unique_verts, nullptr); + n->bm_unique_verts = nullptr; } if (n->bm_other_verts) { - BLI_table_gset_free(n->bm_other_verts, NULL); - n->bm_other_verts = NULL; + BLI_table_gset_free(n->bm_other_verts, nullptr); + n->bm_other_verts = nullptr; } if (n->bm_faces) { - BLI_table_gset_free(n->bm_faces, NULL); - n->bm_faces = NULL; + BLI_table_gset_free(n->bm_faces, nullptr); + n->bm_faces = nullptr; } #ifdef PROXY_ADVANCED @@ -3818,8 +3808,7 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) TGSET_ITER_END } - BMVert **scratch = NULL; - BLI_array_declare(scratch); + Vector scratch; for (int i = 0; i < bvh->totnode; i++) { PBVHNode *n = bvh->nodes + i; @@ -3828,29 +3817,28 @@ static void pbvh_bmesh_join_nodes(PBVH *bvh) continue; } - BLI_array_clear(scratch); + scratch.clear(); BMVert *v; TGSET_ITER (v, n->bm_other_verts) { int ni = BM_ELEM_CD_GET_INT(v, bvh->cd_vert_node_offset); if (ni == DYNTOPO_NODE_NONE) { - BLI_array_append(scratch, v); + scratch.append(v); } // BM_ELEM_CD_SET_INT(v, bvh->cd_vert_node_offset, i); } TGSET_ITER_END - int slen = BLI_array_len(scratch); + int slen = scratch.size(); for (int j = 0; j < slen; j++) { BMVert *v = scratch[j]; - BLI_table_gset_remove(n->bm_other_verts, v, NULL); + BLI_table_gset_remove(n->bm_other_verts, v, nullptr); BLI_table_gset_add(n->bm_unique_verts, v); BM_ELEM_CD_SET_INT(v, bvh->cd_vert_node_offset, i); } } - BLI_array_free(scratch); MEM_freeN(map); } @@ -3950,17 +3938,11 @@ void BKE_pbvh_update_offsets(PBVH *pbvh, static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge) { - BMFace **faces = NULL; - BMEdge **newedges = NULL; - BMVert **newverts = NULL; - BMVert **fmap = NULL; // newverts that maps to faces - int *emap = NULL; - - BLI_array_declare(faces); - BLI_array_declare(newedges); - BLI_array_declare(newverts); - BLI_array_declare(fmap); - BLI_array_declare(emap); + Vector faces; + Vector newedges; + Vector newverts; + Vector fmap; + Vector emap; // remove e from radial list of e->v2 for (int i = 0; i < totedge; i++) { @@ -3990,14 +3972,14 @@ static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge) for (int i = 0; i < totedge; i++) { BMEdge *e = edges[i]; - BMVert *v2 = BLI_mempool_alloc(bm->vpool); + BMVert *v2 = (BMVert *)BLI_mempool_alloc(bm->vpool); memset(v2, 0, sizeof(*v2)); v2->head.data = BLI_mempool_alloc(bm->vdata.pool); - BLI_array_append(newverts, v2); + newverts.append(v2); - BMEdge *e2 = BLI_mempool_alloc(bm->epool); - BLI_array_append(newedges, e2); + BMEdge *e2 = (BMEdge *)BLI_mempool_alloc(bm->epool); + newedges.append(e2); memset(e2, 0, sizeof(*e2)); if (bm->edata.pool) { @@ -4011,25 +3993,25 @@ static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge) } do { - BLI_array_append(faces, l->f); - BMFace *f2 = BLI_mempool_alloc(bm->fpool); + faces.append(l->f); + BMFace *f2 = (BMFace *)BLI_mempool_alloc(bm->fpool); - BLI_array_append(faces, l->f); - BLI_array_append(fmap, v2); - BLI_array_append(emap, i); + faces.append(l->f); + fmap.append(v2); + emap.append(i); - BLI_array_append(faces, f2); - BLI_array_append(fmap, v2); - BLI_array_append(emap, i); + faces.append(f2); + fmap.append(v2); + emap.append(i); memset(f2, 0, sizeof(*f2)); f2->head.data = BLI_mempool_alloc(bm->ldata.pool); - BMLoop *prev = NULL; - BMLoop *l2 = NULL; + BMLoop *prev = nullptr; + BMLoop *l2 = nullptr; for (int j = 0; j < 3; j++) { - l2 = BLI_mempool_alloc(bm->lpool); + l2 = (BMLoop *)BLI_mempool_alloc(bm->lpool); memset(l2, 0, sizeof(*l2)); l2->head.data = BLI_mempool_alloc(bm->ldata.pool); @@ -4046,12 +4028,12 @@ static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge) f2->l_first->prev = l2; l2->next = f2->l_first; - BLI_array_append(faces, f2); + faces.append(f2); l = l->radial_next; } while (l != e->l); } - for (int i = 0; i < BLI_array_len(newedges); i++) { + for (int i = 0; i < newedges.size(); i++) { BMEdge *e1 = edges[i]; BMEdge *e2 = newedges[i]; BMVert *v = newverts[i]; @@ -4069,7 +4051,7 @@ static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge) e2->v1_disk_link.next = e2->v1_disk_link.prev = e1; } - for (int i = 0; i < BLI_array_len(faces); i += 2) { + for (int i = 0; i < faces.size(); i += 2) { BMFace *f1 = faces[i], *f2 = faces[i + 1]; BMEdge *e1 = edges[emap[i]]; BMEdge *e2 = newedges[emap[i]]; @@ -4099,11 +4081,6 @@ static void scan_edge_split(BMesh *bm, BMEdge **edges, int totedge) l->next->v = nv; l->next->e = e2; } - - BLI_array_free(newedges); - BLI_array_free(newverts); - BLI_array_free(faces); - BLI_array_free(fmap); } #define MAX_RE_CHILD 3 @@ -4125,7 +4102,8 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) int leaf_limit = MAX2(limit / vsize, 4); BLI_mempool *pool = BLI_mempool_create(sizeof(ReVertNode) + sizeof(void *) * vsize, 0, 8192, 0); - ReVertNode **vnodemap = MEM_calloc_arrayN(pbvh->header.bm->totvert, sizeof(void *), "vnodemap"); + ReVertNode **vnodemap = (ReVertNode **)MEM_calloc_arrayN( + pbvh->header.bm->totvert, sizeof(void *), "vnodemap"); printf("leaf_limit: %d\n", leaf_limit); @@ -4139,26 +4117,25 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) v->head.index = i++; } - BMVert **stack = NULL; - BLI_array_declare(stack); + Vector stack; BM_ITER_MESH (v, &iter, pbvh->header.bm, BM_VERTS_OF_MESH) { if (v->head.hflag & flag) { continue; } - ReVertNode *node = BLI_mempool_calloc(pool); + ReVertNode *node = (ReVertNode *)BLI_mempool_calloc(pool); - BLI_array_clear(stack); - BLI_array_append(stack, v); + stack.clear(); + stack.append(v); v->head.hflag |= flag; vnodemap[v->head.index] = node; node->verts[node->totvert++] = v; - while (BLI_array_len(stack) > 0) { - BMVert *v2 = BLI_array_pop(stack); + while (stack.size() > 0) { + BMVert *v2 = stack.pop_last(); BMEdge *e; if (node->totvert >= leaf_limit) { @@ -4183,7 +4160,7 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) len++; - BLI_array_append(stack, v3); + stack.append(v3); } e = e->v1 == v2 ? e->v1_disk_link.next : e->v2_disk_link.next; @@ -4192,8 +4169,7 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) } const int steps = 4; - ReVertNode **roots = NULL; - BLI_array_declare(roots); + Vector roots; for (int step = 0; step < steps; step++) { const bool last_step = step == steps - 1; @@ -4210,7 +4186,7 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) continue; } - ReVertNode *parent = BLI_mempool_calloc(pool); + ReVertNode *parent = (ReVertNode *)BLI_mempool_calloc(pool); parent->children[0] = node; parent->totchild = 1; @@ -4239,7 +4215,7 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) } while (e != v->e); if (last_step) { - BLI_array_append(roots, parent); + roots.append(parent); } for (int j = 0; j < parent->totchild; j++) { @@ -4256,11 +4232,11 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) BLI_mempool_iter loopiter; BLI_mempool_iternew(pbvh->header.bm->lpool, &loopiter); - BMLoop *l = BLI_mempool_iterstep(&loopiter); + BMLoop *l = (BMLoop *)BLI_mempool_iterstep(&loopiter); BMEdge *e; BMFace *f; - for (i = 0; l; l = BLI_mempool_iterstep(&loopiter), i++) { + for (i = 0; l; l = (BMLoop *)BLI_mempool_iterstep(&loopiter), i++) { l->head.hflag &= ~flag; } BM_ITER_MESH (e, &iter, pbvh->header.bm, BM_EDGES_OF_MESH) { @@ -4271,19 +4247,18 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) f->head.hflag &= ~flag; } - int totroot = BLI_array_len(roots); - ReVertNode **nstack = NULL; - BLI_array_declare(nstack); + int totroot = roots.size(); + Vector nstack; int vorder = 0, eorder = 0, lorder = 0, forder = 0; for (i = 0; i < totroot; i++) { - BLI_array_clear(nstack); + nstack.clear(); ReVertNode *node = roots[i]; - BLI_array_append(nstack, node); + nstack.append(node); - while (BLI_array_len(nstack) > 0) { - ReVertNode *node2 = BLI_array_pop(nstack); + while (nstack.size() > 0) { + ReVertNode *node2 = nstack.pop_last(); if (node2->totchild == 0) { for (int j = 0; j < node2->totvert; j++) { @@ -4358,7 +4333,7 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) } else { for (int j = 0; j < node2->totchild; j++) { - BLI_array_append(nstack, node2->children[j]); + nstack.append(node2->children[j]); } } } @@ -4366,10 +4341,10 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) uint *vidx, *eidx, *lidx, *fidx; - vidx = MEM_malloc_arrayN(pbvh->header.bm->totvert, sizeof(*vidx), "vorder"); - eidx = MEM_malloc_arrayN(pbvh->header.bm->totedge, sizeof(*eidx), "eorder"); - lidx = MEM_malloc_arrayN(pbvh->header.bm->totloop, sizeof(*lidx), "lorder"); - fidx = MEM_malloc_arrayN(pbvh->header.bm->totface, sizeof(*fidx), "forder"); + vidx = (uint *)MEM_malloc_arrayN(pbvh->header.bm->totvert, sizeof(*vidx), "vorder"); + eidx = (uint *)MEM_malloc_arrayN(pbvh->header.bm->totedge, sizeof(*eidx), "eorder"); + lidx = (uint *)MEM_malloc_arrayN(pbvh->header.bm->totloop, sizeof(*lidx), "lorder"); + fidx = (uint *)MEM_malloc_arrayN(pbvh->header.bm->totface, sizeof(*fidx), "forder"); printf("v %d %d\n", vorder, pbvh->header.bm->totvert); printf("e %d %d\n", eorder, pbvh->header.bm->totedge); @@ -4388,9 +4363,9 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) } BLI_mempool_iternew(pbvh->header.bm->lpool, &loopiter); - l = BLI_mempool_iterstep(&loopiter); + l = (BMLoop *)BLI_mempool_iterstep(&loopiter); - for (i = 0; l; l = BLI_mempool_iterstep(&loopiter), i++) { + for (i = 0; l; l = (BMLoop *)BLI_mempool_iterstep(&loopiter), i++) { // handle orphaned loops if (!(l->head.hflag & flag)) { printf("warning in %s: orphaned loop!\n", __func__); @@ -4400,7 +4375,7 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) lidx[i] = (uint)l->head.index; } - printf("roots: %d\n", BLI_array_len(roots)); + printf("roots: %d\n", roots.size()); BM_mesh_remap(pbvh->header.bm, vidx, eidx, fidx, lidx); @@ -4418,6 +4393,7 @@ BMesh *BKE_pbvh_reorder_bmesh(PBVH *pbvh) return pbvh->header.bm; } +#if 0 BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) { if (BKE_pbvh_type(pbvh) != PBVH_BMESH || pbvh->totnode == 0) { @@ -4437,7 +4413,7 @@ BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) BMIter iter; int types[3] = {BM_VERTS_OF_MESH, BM_EDGES_OF_MESH, BM_FACES_OF_MESH}; -#define VISIT_TAG BM_ELEM_TAG +# define VISIT_TAG BM_ELEM_TAG BM_mesh_elem_index_ensure(pbvh->header.bm, BM_VERT | BM_EDGE | BM_FACE); BM_mesh_elem_table_ensure(pbvh->header.bm, BM_VERT | BM_EDGE | BM_FACE); @@ -4524,9 +4500,9 @@ BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) CustomData_bmesh_init_pool(&bm2->ldata, pbvh->header.bm->totloop, BM_LOOP); CustomData_bmesh_init_pool(&bm2->pdata, pbvh->header.bm->totface, BM_FACE); - BMVert **verts = NULL; - BMEdge **edges = NULL; - BMFace **faces = NULL; + BMVert **verts = nullptr; + BMEdge **edges = nullptr; + BMFace **faces = nullptr; BLI_array_declare(verts); BLI_array_declare(edges); BLI_array_declare(faces); @@ -4534,7 +4510,7 @@ BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) for (int i = 0; i < pbvh->totnode; i++) { for (int j = 0; j < nodedata[i].totvert; j++) { BMVert *v1 = nodedata[i].verts[j]; - BMVert *v2 = BM_vert_create(bm2, v1->co, NULL, BM_CREATE_NOP); + BMVert *v2 = BM_vert_create(bm2, v1->co, nullptr, BM_CREATE_NOP); BM_elem_attrs_copy_ex(pbvh->header.bm, bm2, v1, v2, 0, 0L); v2->head.index = v1->head.index = BLI_array_len(verts); @@ -4546,7 +4522,7 @@ BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) for (int j = 0; j < nodedata[i].totedge; j++) { BMEdge *e1 = nodedata[i].edges[j]; BMEdge *e2 = BM_edge_create( - bm2, verts[e1->v1->head.index], verts[e1->v2->head.index], NULL, BM_CREATE_NOP); + bm2, verts[e1->v1->head.index], verts[e1->v2->head.index], nullptr, BM_CREATE_NOP); BM_elem_attrs_copy_ex(pbvh->header.bm, bm2, e1, e2, 0, 0L); e2->head.index = e1->head.index = BLI_array_len(edges); @@ -4554,8 +4530,8 @@ BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) } } - BMVert **fvs = NULL; - BMEdge **fes = NULL; + BMVert **fvs = nullptr; + BMEdge **fes = nullptr; BLI_array_declare(fvs); BLI_array_declare(fes); @@ -4575,7 +4551,7 @@ BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) totloop++; } while (l1 != f1->l_first); - BMFace *f2 = BM_face_create(bm2, fvs, fes, totloop, NULL, BM_CREATE_NOP); + BMFace *f2 = BM_face_create(bm2, fvs, fes, totloop, nullptr, BM_CREATE_NOP); f1->head.index = f2->head.index = BLI_array_len(faces); BLI_array_append(faces, f2); @@ -4624,9 +4600,9 @@ BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) } TGSET_ITER_END; - BLI_table_gset_free(node->bm_faces, NULL); - BLI_table_gset_free(node->bm_other_verts, NULL); - BLI_table_gset_free(node->bm_unique_verts, NULL); + BLI_table_gset_free(node->bm_faces, nullptr); + BLI_table_gset_free(node->bm_other_verts, nullptr); + BLI_table_gset_free(node->bm_unique_verts, nullptr); node->bm_faces = bm_faces; node->bm_other_verts = bm_other_verts; @@ -4655,6 +4631,7 @@ BMesh *BKE_pbvh_reorder_bmesh2(PBVH *pbvh) return bm2; } +#endif typedef struct SortElem { BMElem *elem; @@ -4692,13 +4669,13 @@ BMesh *BKE_pbvh_reorder_bmesh1(PBVH *pbvh) { BMesh *bm = pbvh->header.bm; - int **save_other_vs = MEM_calloc_arrayN(pbvh->totnode, sizeof(int *), __func__); - int **save_unique_vs = MEM_calloc_arrayN(pbvh->totnode, sizeof(int *), __func__); - int **save_fs = MEM_calloc_arrayN(pbvh->totnode, sizeof(int *), __func__); + int **save_other_vs = (int **)MEM_calloc_arrayN(pbvh->totnode, sizeof(int *), __func__); + int **save_unique_vs = (int **)MEM_calloc_arrayN(pbvh->totnode, sizeof(int *), __func__); + int **save_fs = (int **)MEM_calloc_arrayN(pbvh->totnode, sizeof(int *), __func__); - SortElem *verts = MEM_malloc_arrayN(bm->totvert, sizeof(SortElem), __func__); - SortElem *edges = MEM_malloc_arrayN(bm->totedge, sizeof(SortElem), __func__); - SortElem *faces = MEM_malloc_arrayN(bm->totface, sizeof(SortElem), __func__); + SortElem *verts = (SortElem *)MEM_malloc_arrayN(bm->totvert, sizeof(SortElem), __func__); + SortElem *edges = (SortElem *)MEM_malloc_arrayN(bm->totedge, sizeof(SortElem), __func__); + SortElem *faces = (SortElem *)MEM_malloc_arrayN(bm->totface, sizeof(SortElem), __func__); BMIter iter; BMVert *v; @@ -4727,13 +4704,7 @@ BMesh *BKE_pbvh_reorder_bmesh1(PBVH *pbvh) } for (i = 0; i < pbvh->totnode; i++) { - int *other_vs = NULL; - int *unique_vs = NULL; - int *fs = NULL; - - BLI_array_declare(other_vs); - BLI_array_declare(unique_vs); - BLI_array_declare(fs); + Vector other_vs, unique_vs, fs; PBVHNode *node = pbvh->nodes + i; if (!(node->flag & PBVH_Leaf)) { @@ -4744,30 +4715,30 @@ BMesh *BKE_pbvh_reorder_bmesh1(PBVH *pbvh) BMFace *f; TGSET_ITER (v, node->bm_unique_verts) { - BLI_array_append(unique_vs, v->head.index); + unique_vs.append(v->head.index); } TGSET_ITER_END; TGSET_ITER (v, node->bm_other_verts) { - BLI_array_append(other_vs, v->head.index); + other_vs.append(v->head.index); } TGSET_ITER_END; TGSET_ITER (f, node->bm_faces) { - BLI_array_append(fs, f->head.index); + fs.append(f->head.index); } TGSET_ITER_END; - save_unique_vs[i] = unique_vs; - save_other_vs[i] = other_vs; - save_fs[i] = fs; + save_unique_vs[i] = copy_vector_to_c_array(__func__, unique_vs); + save_other_vs[i] = copy_vector_to_c_array(__func__, other_vs); + save_fs[i] = copy_vector_to_c_array(__func__, fs); } qsort(verts, bm->totvert, sizeof(SortElem), sort_verts_faces); qsort(edges, bm->totedge, sizeof(SortElem), sort_edges); qsort(faces, bm->totface, sizeof(SortElem), sort_verts_faces); - uint *vs = MEM_malloc_arrayN(bm->totvert, sizeof(int), __func__); - uint *es = MEM_malloc_arrayN(bm->totedge, sizeof(int), __func__); - uint *fs = MEM_malloc_arrayN(bm->totface, sizeof(int), __func__); + uint *vs = (uint *)MEM_malloc_arrayN(bm->totvert, sizeof(int), __func__); + uint *es = (uint *)MEM_malloc_arrayN(bm->totedge, sizeof(int), __func__); + uint *fs = (uint *)MEM_malloc_arrayN(bm->totface, sizeof(int), __func__); for (i = 0; i < bm->totvert; i++) { vs[i] = (uint)verts[i].index; @@ -4782,12 +4753,12 @@ BMesh *BKE_pbvh_reorder_bmesh1(PBVH *pbvh) faces[i].elem->head.index = faces[i].index; } - BM_mesh_remap(bm, vs, es, fs, NULL); + BM_mesh_remap(bm, vs, es, fs, nullptr); // create new mappings - BMVert **mapvs = MEM_malloc_arrayN(bm->totvert, sizeof(BMVert *), __func__); - BMEdge **mapes = MEM_malloc_arrayN(bm->totedge, sizeof(BMEdge *), __func__); - BMFace **mapfs = MEM_malloc_arrayN(bm->totface, sizeof(BMFace *), __func__); + BMVert **mapvs = (BMVert **)MEM_malloc_arrayN(bm->totvert, sizeof(BMVert *), __func__); + BMEdge **mapes = (BMEdge **)MEM_malloc_arrayN(bm->totedge, sizeof(BMEdge *), __func__); + BMFace **mapfs = (BMFace **)MEM_malloc_arrayN(bm->totface, sizeof(BMFace *), __func__); BM_ITER_MESH (v, &iter, bm, BM_VERTS_OF_MESH) { mapvs[v->head.index] = v; @@ -4811,9 +4782,9 @@ BMesh *BKE_pbvh_reorder_bmesh1(PBVH *pbvh) int tot_other_vs = BLI_table_gset_len(node->bm_other_verts); int tot_fs = BLI_table_gset_len(node->bm_faces); - BLI_table_gset_free(node->bm_unique_verts, NULL); - BLI_table_gset_free(node->bm_other_verts, NULL); - BLI_table_gset_free(node->bm_faces, NULL); + BLI_table_gset_free(node->bm_unique_verts, nullptr); + BLI_table_gset_free(node->bm_other_verts, nullptr); + BLI_table_gset_free(node->bm_faces, nullptr); node->bm_unique_verts = BLI_table_gset_new("bm_unique_verts"); node->bm_other_verts = BLI_table_gset_new("bm_other_verts"); @@ -4981,7 +4952,7 @@ typedef struct MeshTest2 { static MeshTest2 *meshtest2_from_bm(BMesh *bm) { - MeshTest2 *m2 = MEM_callocN(sizeof(MeshTest2), "MeshTest2"); + MeshTest2 *m2 = (MeshTest2 *)MEM_callocN(sizeof(MeshTest2), "MeshTest2"); m2->arena = BLI_memarena_new(1024 * 32, "MeshTest2 arena"); m2->totvert = bm->totvert; @@ -5005,10 +4976,10 @@ static MeshTest2 *meshtest2_from_bm(BMesh *bm) m2->totloop = lindex; - m2->verts = MEM_calloc_arrayN(bm->totvert, sizeof(MeshVert2), "MeshVert2s"); - m2->edges = MEM_calloc_arrayN(bm->totedge, sizeof(MeshEdge2), "MeshEdge2s"); - m2->loops = MEM_calloc_arrayN(m2->totloop, sizeof(MeshLoop2), "MeshLoop2s"); - m2->faces = MEM_calloc_arrayN(bm->totface, sizeof(MeshFace2), "MeshFace2s"); + m2->verts = (MeshVert2 *)MEM_calloc_arrayN(bm->totvert, sizeof(MeshVert2), "MeshVert2s"); + m2->edges = (MeshEdge2 *)MEM_calloc_arrayN(bm->totedge, sizeof(MeshEdge2), "MeshEdge2s"); + m2->loops = (MeshLoop2 *)MEM_calloc_arrayN(m2->totloop, sizeof(MeshLoop2), "MeshLoop2s"); + m2->faces = (MeshFace2 *)MEM_calloc_arrayN(bm->totface, sizeof(MeshFace2), "MeshFace2s"); bm->elem_index_dirty |= BM_VERT | BM_EDGE | BM_FACE; BM_mesh_elem_index_ensure(bm, BM_VERT | BM_EDGE | BM_FACE); @@ -5066,32 +5037,32 @@ static void free_meshtest2(MeshTest2 *m2) static MeshTest *meshtest_from_bm(BMesh *bm) { - MeshTest *m = MEM_callocN(sizeof(MeshTest), "MeshTest"); + MeshTest *m = (MeshTest *)MEM_callocN(sizeof(MeshTest), "MeshTest"); m->arena = BLI_memarena_new(1024 * 32, "m->arena"); - m->v_co = BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_co)); - m->v_no = BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_no)); - m->v_e = BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_e)); - m->v_flag = BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_flag)); - m->v_index = BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_index)); + m->v_co = (float(*)[3])BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_co)); + m->v_no = (float(*)[3])BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_no)); + m->v_e = (int *)BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_e)); + m->v_flag = (int *)BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_flag)); + m->v_index = (int *)BLI_memarena_alloc(m->arena, bm->totvert * sizeof(*m->v_index)); - m->e_v1 = BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); - m->e_v1_next = BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); - m->e_v2 = BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); - m->e_v2_next = BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); - m->e_l = BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); - m->e_index = BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); - m->e_flag = BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); + m->e_v1 = (int *)BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); + m->e_v1_next = (int *)BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); + m->e_v2 = (int *)BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); + m->e_v2_next = (int *)BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); + m->e_l = (int *)BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); + m->e_index = (int *)BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); + m->e_flag = (int *)BLI_memarena_alloc(m->arena, bm->totedge * sizeof(*m->e_v1)); - m->l_v = BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); - m->l_e = BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); - m->l_f = BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); - m->l_next = BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); - m->l_prev = BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); - m->l_radial_next = BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); - m->l_radial_prev = BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); + m->l_v = (int *)BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); + m->l_e = (int *)BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); + m->l_f = (int *)BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); + m->l_next = (int *)BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); + m->l_prev = (int *)BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); + m->l_radial_next = (int *)BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); + m->l_radial_prev = (int *)BLI_memarena_alloc(m->arena, bm->totloop * sizeof(*m->l_e)); - m->f_l = BLI_memarena_alloc(m->arena, bm->totface * sizeof(*m->f_l)); + m->f_l = (int *)BLI_memarena_alloc(m->arena, bm->totface * sizeof(*m->f_l)); m->totvert = bm->totvert; m->totedge = bm->totedge; @@ -5239,7 +5210,7 @@ double pbvh_meshtest2_smooth_test(MeshTest2 *m2, PBVH *pbvh) for (int i = 0; i < m2->totvert; i++) { int vi = BLI_rng_get_int(rng) % m2->totvert; MeshVert2 *v = m2->verts + vi; - MeshEdge2 *e = v->e != -1 ? m2->edges + v->e : NULL; + MeshEdge2 *e = v->e != -1 ? m2->edges + v->e : nullptr; float co[3]; zero_v3(co); @@ -5402,14 +5373,15 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out) printf("building test mesh. . .\n"); BMAllocTemplate templ = {0, 0, 0, 0}; + BMeshCreateParams params2 = {0}; - BMesh *bm = BM_mesh_create( - &templ, - &((struct BMeshCreateParams){.id_elem_mask = BM_VERT | BM_EDGE | BM_FACE, - .id_map = true, - .create_unique_ids = true, - .temporary_ids = false, - .no_reuse_ids = false})); + params2.id_elem_mask = BM_VERT | BM_EDGE | BM_FACE; + params2.id_map = true; + params2.create_unique_ids = true; + params2.temporary_ids = false; + params2.no_reuse_ids = false; + + BMesh *bm = BM_mesh_create(&templ, ¶ms2); // reinit pools BLI_mempool_destroy(bm->vpool); @@ -5428,7 +5400,7 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out) int hashdimen = steps * 8; - BMVert **grid = MEM_malloc_arrayN(steps * steps, sizeof(*grid), "bmvert grid"); + BMVert **grid = (BMVert **)MEM_malloc_arrayN(steps * steps, sizeof(*grid), "bmvert grid"); BM_data_layer_add_named(bm, &bm->vdata, CD_PROP_INT32, "__dyntopo_vert_node"); BM_data_layer_add_named(bm, &bm->pdata, CD_PROP_INT32, "__dyntopo_face_node"); @@ -5477,10 +5449,10 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out) v); #endif - void **val = NULL; + void **val = nullptr; if (!BLI_ghash_ensure_p(vhash, key, &val)) { - BMVert *v2 = BM_vert_create(bm, co, NULL, BM_CREATE_NOP); + BMVert *v2 = BM_vert_create(bm, co, nullptr, BM_CREATE_NOP); *val = (void *)v2; } @@ -5511,11 +5483,11 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out) if (sign < 0) { BMVert *vs[4] = {v4, v3, v2, v1}; - BM_face_create_verts(bm, vs, 4, NULL, BM_CREATE_NOP, true); + BM_face_create_verts(bm, vs, 4, nullptr, BM_CREATE_NOP, true); } else { BMVert *vs[4] = {v1, v2, v3, v4}; - BM_face_create_verts(bm, vs, 4, NULL, BM_CREATE_NOP, true); + BM_face_create_verts(bm, vs, 4, nullptr, BM_CREATE_NOP, true); } } } @@ -5528,7 +5500,7 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out) RNG *rng = BLI_rng_new(0); for (uint i = 0; i < 4; i++) { - rands[i] = MEM_malloc_arrayN(tots[i], sizeof(uint), "rands[i]"); + rands[i] = (uint *)MEM_malloc_arrayN(tots[i], sizeof(uint), "rands[i]"); for (uint j = 0; j < tots[i]; j++) { rands[i][j] = j; @@ -5547,7 +5519,7 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out) } BLI_rng_free(rng); - BLI_ghash_free(vhash, NULL, NULL); + BLI_ghash_free(vhash, nullptr, nullptr); MEM_SAFE_FREE(grid); printf("totvert: %d, totface: %d, tottri: %d\n", bm->totvert, bm->totface, bm->totface * 2); @@ -5577,7 +5549,7 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out) BM_mesh_elem_index_ensure(bm, BM_VERT | BM_EDGE | BM_FACE); BKE_pbvh_build_bmesh(pbvh, - NULL, + nullptr, bm, false, bmlog, @@ -5601,7 +5573,7 @@ void pbvh_bmesh_cache_test(CacheParams *params, BMesh **r_bm, PBVH **r_pbvh_out) sizeof(BMLoop) * (size_t)bm->totloop + sizeof(BMFace) * (size_t)bm->totface; double times[4]; - char *names[4]; + const char *names[4]; int cd_overhead = 0; CustomData *cdatas[4] = {&bm->vdata, &bm->edata, &bm->ldata, &bm->pdata}; @@ -5689,7 +5661,7 @@ static void hash_test() { const int count = 1024 * 1024 * 4; - int *data = MEM_callocN(sizeof(*data) * count, "test data"); + int *data = (int *)MEM_callocN(sizeof(*data) * count, "test data"); TableGSet *gs = BLI_table_gset_new("test"); GHash *gh = BLI_ghash_ptr_new("test"); @@ -5707,7 +5679,7 @@ static void hash_test() for (int i = 0; i < count; i++) { int ri = BLI_rng_get_int(rng) % count; - int *ptr = POINTER_FROM_INT(data[ri]); + int *ptr = (int *)POINTER_FROM_INT(data[ri]); BLI_table_gset_add(gs, ptr); } @@ -5718,7 +5690,7 @@ static void hash_test() for (int i = 0; i < count; i++) { int ri = BLI_rng_get_int(rng) % count; - int *ptr = POINTER_FROM_INT(data[ri]); + int *ptr = (int *)POINTER_FROM_INT(data[ri]); BLI_ghash_insert(gh, ptr, POINTER_FROM_INT(i)); } @@ -5729,7 +5701,7 @@ static void hash_test() for (int i = 0; i < count; i++) { int ri = BLI_rng_get_int(rng) % count; - int *ptr = POINTER_FROM_INT(data[ri]); + int *ptr = (int *)POINTER_FROM_INT(data[ri]); BLI_smallhash_insert(&sh, (uintptr_t)ptr, POINTER_FROM_INT(i)); } @@ -5740,7 +5712,7 @@ static void hash_test() for (int i = 0; i < count; i++) { int ri = BLI_rng_get_int(rng) % count; - int *ptr = POINTER_FROM_INT(data[ri]); + int *ptr = (int *)POINTER_FROM_INT(data[ri]); BLI_ghash_lookup(gh, ptr); } @@ -5751,21 +5723,21 @@ static void hash_test() for (int i = 0; i < count; i++) { int ri = BLI_rng_get_int(rng) % count; - int *ptr = POINTER_FROM_INT(data[ri]); + int *ptr = (int *)POINTER_FROM_INT(data[ri]); BLI_smallhash_lookup(&sh, (uintptr_t)ptr); } printf(" %.3f\n", PIL_check_seconds_timer() - t); BLI_rng_free(rng); - BLI_ghash_free(gh, NULL, NULL); + BLI_ghash_free(gh, nullptr, nullptr); BLI_smallhash_release(&sh); - BLI_table_gset_free(gs, NULL); + BLI_table_gset_free(gs, nullptr); MEM_freeN(data); } -void pbvh_bmesh_do_cache_test() +extern "C" void pbvh_bmesh_do_cache_test() { for (int i = 0; i < 15; i++) { printf("\n\n====== %d of %d =====\n", i + 1, 15); @@ -5801,10 +5773,10 @@ void BKE_pbvh_bmesh_save_indices(PBVH *pbvh) continue; } - node->prim_indices = MEM_calloc_arrayN(1 + node->bm_faces->length + - node->bm_unique_verts->length, - sizeof(int), - "saved bmesh indices"); + node->prim_indices = (int *)MEM_calloc_arrayN(1 + node->bm_faces->length + + node->bm_unique_verts->length, + sizeof(int), + "saved bmesh indices"); int j = 0; @@ -5868,8 +5840,7 @@ void BKE_pbvh_bmesh_from_saved_indices(PBVH *pbvh) BM_mesh_elem_table_ensure(pbvh->header.bm, BM_VERT | BM_EDGE | BM_FACE); BM_mesh_elem_index_ensure(pbvh->header.bm, BM_VERT | BM_EDGE | BM_FACE); - BMLoop **ltable = NULL; - BLI_array_declare(ltable); + Vector ltable; BMFace *f; BMIter iter; @@ -5880,7 +5851,7 @@ void BKE_pbvh_bmesh_from_saved_indices(PBVH *pbvh) do { l->head.index = i++; - BLI_array_append(ltable, l); + ltable.append(l); } while ((l = l->next) != f->l_first); } @@ -5891,11 +5862,11 @@ void BKE_pbvh_bmesh_from_saved_indices(PBVH *pbvh) continue; } - BLI_table_gset_free(node->bm_unique_verts, NULL); - BLI_table_gset_free(node->bm_faces, NULL); + BLI_table_gset_free(node->bm_unique_verts, nullptr); + BLI_table_gset_free(node->bm_faces, nullptr); if (node->bm_other_verts) { - BLI_table_gset_free(node->bm_other_verts, NULL); + BLI_table_gset_free(node->bm_other_verts, nullptr); } node->bm_other_verts = BLI_table_gset_new("bm_other_verts"); @@ -5962,11 +5933,9 @@ void BKE_pbvh_bmesh_from_saved_indices(PBVH *pbvh) } } - node->prim_indices = NULL; + node->prim_indices = nullptr; node->totprim = 0; } - - BLI_array_free(ltable); } static void pbvh_bmesh_fetch_cdrefs(PBVH *pbvh) diff --git a/source/blender/blenkernel/intern/pbvh_colors.cc b/source/blender/blenkernel/intern/pbvh_colors.cc new file mode 100644 index 00000000000..917ca6c55d0 --- /dev/null +++ b/source/blender/blenkernel/intern/pbvh_colors.cc @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** \file + * \ingroup bke + */ + +#include "MEM_guardedalloc.h" + +#include "BLI_utildefines.h" + +#include "BLI_bitmap.h" +#include "BLI_ghash.h" +#include "BLI_index_range.hh" +#include "BLI_math.h" +#include "BLI_rand.h" +#include "BLI_span.hh" +#include "BLI_task.h" + +#include "DNA_mesh_types.h" +#include "DNA_meshdata_types.h" + +#include "BKE_attribute.h" +#include "BKE_ccg.h" +#include "BKE_mesh.h" +#include "BKE_mesh_mapping.h" +#include "BKE_paint.h" +#include "BKE_pbvh.h" +#include "BKE_subdiv_ccg.h" + +#include "PIL_time.h" + +#include "bmesh.h" + +#include "atomic_ops.h" + +#include "pbvh_intern.hh" + +#include + +using blender::IndexRange; + +namespace blender::bke { + +template +inline void to_static_color_type(const eCustomDataType type, const Func &func) +{ + switch (type) { + case CD_PROP_COLOR: + func(MPropCol()); + break; + case CD_PROP_BYTE_COLOR: + func(MLoopCol()); + break; + default: + BLI_assert_unreachable(); + break; + } +} + +template void to_float(const T &src, float dst[4]); + +template<> void to_float(const MLoopCol &src, float dst[4]) +{ + rgba_uchar_to_float(dst, reinterpret_cast(&src)); + srgb_to_linearrgb_v3_v3(dst, dst); +} +template<> void to_float(const MPropCol &src, float dst[4]) +{ + copy_v4_v4(dst, src.color); +} + +template void from_float(const float src[4], T &dst); + +template<> void from_float(const float src[4], MLoopCol &dst) +{ + float temp[4]; + linearrgb_to_srgb_v3_v3(temp, src); + temp[3] = src[3]; + rgba_float_to_uchar(reinterpret_cast(&dst), temp); +} +template<> void from_float(const float src[4], MPropCol &dst) +{ + copy_v4_v4(dst.color, src); +} + +template +static void pbvh_vertex_color_get(const PBVH &pbvh, PBVHVertRef vertex, float r_color[4]) +{ + int index = vertex.i; + + if (pbvh.color_domain == ATTR_DOMAIN_CORNER) { + const MeshElemMap &melem = pbvh.pmap->pmap[index]; + + int count = 0; + zero_v4(r_color); + for (const int i_poly : Span(melem.indices, melem.count)) { + const MPoly &mp = pbvh.mpoly[i_poly]; + Span colors{static_cast(pbvh.color_layer->data) + mp.loopstart, mp.totloop}; + Span loops{pbvh.mloop + mp.loopstart, mp.totloop}; + + for (const int i_loop : IndexRange(mp.totloop)) { + if (loops[i_loop].v == index) { + float temp[4]; + to_float(colors[i_loop], temp); + + add_v4_v4(r_color, temp); + count++; + } + } + } + + if (count) { + mul_v4_fl(r_color, 1.0f / float(count)); + } + } + else { + to_float(static_cast(pbvh.color_layer->data)[index], r_color); + } +} + +template +static void pbvh_vertex_color_set(PBVH &pbvh, PBVHVertRef vertex, const float color[4]) +{ + int index = vertex.i; + + if (pbvh.color_domain == ATTR_DOMAIN_CORNER) { + const MeshElemMap &melem = pbvh.pmap->pmap[index]; + + for (const int i_poly : Span(melem.indices, melem.count)) { + const MPoly &mp = pbvh.mpoly[i_poly]; + MutableSpan colors{static_cast(pbvh.color_layer->data) + mp.loopstart, mp.totloop}; + Span loops{pbvh.mloop + mp.loopstart, mp.totloop}; + + for (const int i_loop : IndexRange(mp.totloop)) { + if (loops[i_loop].v == index) { + from_float(color, colors[i_loop]); + } + } + } + } + else { + from_float(color, static_cast(pbvh.color_layer->data)[index]); + } +} + +template +static void pbvh_vertex_color_get_bmesh(const PBVH &pbvh, PBVHVertRef vertex, float r_color[4]) +{ + BMVert *v = reinterpret_cast(vertex.i); + + if (pbvh.color_domain == ATTR_DOMAIN_CORNER) { + float4 color = {}; + int count = 0; + + int cd_color = pbvh.cd_vcol_offset; + + BMEdge *e = v->e; + do { + BMLoop *l = e->l; + + if (!l) { + continue; + } + + if (l->v != v) { + l = l->next; + } + + float4 color2; + T vcol = *reinterpret_cast BM_ELEM_CD_GET_VOID_P(l, cd_color); + + to_float(vcol, color2); + + color += color2; + count++; + } while ((e = BM_DISK_EDGE_NEXT(e, v)) != v->e); + + if (count > 0) { + color *= 1.0f / (float)count; + } + + copy_v4_v4(r_color, color); + } + else { + to_float(*static_cast(BM_ELEM_CD_GET_VOID_P(v, pbvh.cd_vcol_offset)), r_color); + } +} + +template +static void pbvh_vertex_color_set_bmesh(PBVH &pbvh, PBVHVertRef vertex, const float color[4]) +{ + int index = vertex.i; + + BMVert *v = reinterpret_cast(vertex.i); + + if (pbvh.color_domain == ATTR_DOMAIN_CORNER) { + BMEdge *e = v->e; + int cd_color = pbvh.cd_vcol_offset; + + do { + BMLoop *l = e->l; + do { + T *l_color; + + if (l->v != v) { + l_color = reinterpret_cast(BM_ELEM_CD_GET_VOID_P(l->next, cd_color)); + } + else { + l_color = reinterpret_cast(BM_ELEM_CD_GET_VOID_P(l, cd_color)); + } + + from_float(color, *l_color); + } while ((l = l->radial_next) != e->l); + + } while ((e = BM_DISK_EDGE_NEXT(e, v)) != v->e); + } + else { + from_float(color, *reinterpret_cast(BM_ELEM_CD_GET_VOID_P(v, pbvh.cd_vcol_offset))); + } +} + +} // namespace blender::bke + +extern "C" { +void BKE_pbvh_vertex_color_get(const PBVH *pbvh, PBVHVertRef vertex, float r_color[4]) +{ + blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { + using T = decltype(dummy); + + if (BKE_pbvh_type(pbvh) == PBVH_BMESH) { + blender::bke::pbvh_vertex_color_get_bmesh(*pbvh, vertex, r_color); + } + else { + blender::bke::pbvh_vertex_color_get(*pbvh, vertex, r_color); + } + }); +} + +void BKE_pbvh_vertex_color_set(PBVH *pbvh, PBVHVertRef vertex, const float color[4]) +{ + blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { + using T = decltype(dummy); + + if (BKE_pbvh_type(pbvh) == PBVH_BMESH) { + blender::bke::pbvh_vertex_color_set_bmesh(*pbvh, vertex, color); + } + else { + blender::bke::pbvh_vertex_color_set(*pbvh, vertex, color); + } + }); +} + +void BKE_pbvh_swap_colors(PBVH *pbvh, + const int *indices, + const int indices_num, + float (*r_colors)[4]) +{ + blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { + using T = decltype(dummy); + T *pbvh_colors = static_cast(pbvh->color_layer->data); + for (const int i : IndexRange(indices_num)) { + T temp = pbvh_colors[indices[i]]; + blender::bke::from_float(r_colors[i], pbvh_colors[indices[i]]); + blender::bke::to_float(temp, r_colors[i]); + } + }); +} + +void BKE_pbvh_store_colors(PBVH *pbvh, + const int *indices, + const int indices_num, + float (*r_colors)[4]) +{ + blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { + using T = decltype(dummy); + T *pbvh_colors = static_cast(pbvh->color_layer->data); + for (const int i : IndexRange(indices_num)) { + blender::bke::to_float(pbvh_colors[indices[i]], r_colors[i]); + } + }); +} + +void BKE_pbvh_store_colors_vertex(PBVH *pbvh, + const int *indices, + const int indices_num, + float (*r_colors)[4]) +{ + if (pbvh->color_domain == ATTR_DOMAIN_POINT) { + BKE_pbvh_store_colors(pbvh, indices, indices_num, r_colors); + } + else { + blender::bke::to_static_color_type(eCustomDataType(pbvh->color_layer->type), [&](auto dummy) { + using T = decltype(dummy); + for (const int i : IndexRange(indices_num)) { + blender::bke::pbvh_vertex_color_get(*pbvh, BKE_pbvh_make_vref(indices[i]), r_colors[i]); + } + }); + } +} +} diff --git a/source/blender/blenkernel/intern/pbvh_displacement.c b/source/blender/blenkernel/intern/pbvh_displacement.c index d42457f4100..61550c2095a 100644 --- a/source/blender/blenkernel/intern/pbvh_displacement.c +++ b/source/blender/blenkernel/intern/pbvh_displacement.c @@ -33,7 +33,7 @@ # include "DNA_object_types.h" # include "DNA_scene_types.h" -# include "pbvh_intern.h" +# include "pbvh_intern.hh" # include "bmesh.h" diff --git a/source/blender/blenkernel/intern/pbvh_intern.h b/source/blender/blenkernel/intern/pbvh_intern.hh similarity index 96% rename from source/blender/blenkernel/intern/pbvh_intern.h rename to source/blender/blenkernel/intern/pbvh_intern.hh index 2af63e0c3b2..3b80a3e3c6c 100644 --- a/source/blender/blenkernel/intern/pbvh_intern.h +++ b/source/blender/blenkernel/intern/pbvh_intern.hh @@ -26,32 +26,28 @@ struct MSculptVert; struct CustomData; struct PBVHTriBuf; -#ifdef __cplusplus -extern "C" { -#endif - +struct PBVHGPUFormat; struct MLoop; struct MLoopTri; struct BMIdMap; struct MPoly; +struct MeshElemMap; /* Axis-aligned bounding box */ -typedef struct { +struct BB { float bmin[3], bmax[3]; -} BB; +}; /* Axis-aligned bounding box with centroid */ -typedef struct { +struct BBC { float bmin[3], bmax[3], bcentroid[3]; -} BBC; - -struct MeshElemMap; +}; /* NOTE: this structure is getting large, might want to split it into * union'd structs */ struct PBVHNode { /* Opaque handle for drawing code */ - struct PBVHBatches *draw_batches; + PBVHBatches *draw_batches; /* Voxel bounds */ BB vb; @@ -119,7 +115,7 @@ struct PBVHNode { * marking various updates that need to be applied. */ PBVHNodeFlags flag; - /* Used for raycasting: how close bb is to the ray point. */ + /* Used for ray-casting: how close the bounding-box is to the ray point. */ float tmin; /* Scalar displacements for sculpt mode's layer brush. */ @@ -158,12 +154,13 @@ typedef enum { PBVH_FAST_DRAW = 2, // hides facesets/masks and forces smooth to save GPU bandwidth PBVH_IGNORE_UVS = 4 } PBVHFlags; +ENUM_OPERATORS(PBVHFlags, PBVH_IGNORE_UVS); typedef struct PBVHBMeshLog PBVHBMeshLog; struct DMFlagMat; struct PBVH { - struct PBVHPublic header; + PBVHPublic header; PBVHFlags flags; int idgen; @@ -180,25 +177,26 @@ struct PBVH { int faces_num; /* Do not use directly, use BKE_pbvh_num_faces. */ int leaf_limit; + int pixel_leaf_limit; + int depth_limit; /* Mesh data */ struct MeshElemMap *vemap; SculptPMap *pmap; - struct Mesh *mesh; + Mesh *mesh; /* NOTE: Normals are not `const` because they can be updated for drawing by sculpt code. */ float (*vert_normals)[3]; bool *hide_vert; float (*vert_positions)[3]; - const struct MPoly *mpoly; + const MPoly *mpoly; bool *hide_poly; /** Material indices. Only valid for polygon meshes. */ const int *material_indices; - const struct MLoop *mloop; - const struct MLoopTri *looptri; + const MLoop *mloop; + const MLoopTri *looptri; struct MSculptVert *msculptverts; - CustomData *vdata; CustomData *ldata; CustomData *pdata; @@ -281,14 +279,14 @@ struct PBVH { /* Used by DynTopo to invalidate the draw cache. */ bool draw_cache_invalid; - struct PBVHGPUFormat *vbo_id; int *boundary_flags; int cd_boundary_flag; + PBVHGPUFormat *vbo_id; PBVHPixels pixels; }; -/* pbvh.c */ +/* pbvh.cc */ void BB_reset(BB *bb); /** @@ -309,14 +307,14 @@ float BB_volume(const BB *bb); void pbvh_grow_nodes(PBVH *bvh, int totnode); bool ray_face_intersection_quad(const float ray_start[3], - struct IsectRayPrecalc *isect_precalc, + IsectRayPrecalc *isect_precalc, const float t0[3], const float t1[3], const float t2[3], const float t3[3], float *depth); bool ray_face_intersection_tri(const float ray_start[3], - struct IsectRayPrecalc *isect_precalc, + IsectRayPrecalc *isect_precalc, const float t0[3], const float t1[3], const float t2[3], @@ -348,6 +346,7 @@ bool ray_face_intersection_depth_tri(const float ray_start[3], float *r_depth, float *r_back_depth, int *hit_count); +/* pbvh_bmesh.cc */ /* pbvh_bmesh.c */ bool pbvh_bmesh_node_raycast(PBVH *pbvh, @@ -483,6 +482,3 @@ BLI_INLINE void pbvh_boundary_update_bmesh(PBVH *pbvh, BMVert *v) *flags |= SCULPT_BOUNDARY_NEEDS_UPDATE; } -#ifdef __cplusplus -} -#endif diff --git a/source/blender/blenkernel/intern/pbvh_pixels.cc b/source/blender/blenkernel/intern/pbvh_pixels.cc index 39651349ae9..f18345b9eab 100644 --- a/source/blender/blenkernel/intern/pbvh_pixels.cc +++ b/source/blender/blenkernel/intern/pbvh_pixels.cc @@ -15,22 +15,18 @@ #include "BLI_math.h" #include "BLI_task.h" +#include "PIL_time.h" +#include "BKE_global.h" #include "BKE_image_wrappers.hh" #include "bmesh.h" -#include "pbvh_intern.h" +#include "pbvh_intern.hh" #include "pbvh_uv_islands.hh" namespace blender::bke::pbvh::pixels { -/** - * During debugging this check could be enabled. - * It will write to each image pixel that is covered by the PBVH. - */ -constexpr bool USE_WATERTIGHT_CHECK = false; - /** * Calculate the delta of two neighbor UV coordinates in the given image buffer. */ @@ -57,6 +53,311 @@ static float2 calc_barycentric_delta_x(const ImBuf *image_buffer, return calc_barycentric_delta(uvs, start_uv, end_uv); } +static int count_node_pixels(PBVHNode &node) +{ + if (!node.pixels.node_data) { + return 0; + } + + NodeData &data = BKE_pbvh_pixels_node_data_get(node); + + int totpixel = 0; + + for (UDIMTilePixels &tile : data.tiles) { + for (PackedPixelRow &row : tile.pixel_rows) { + totpixel += row.num_pixels; + } + } + + return totpixel; +} + +struct SplitQueueData { + ThreadQueue *new_nodes; + TaskPool *pool; + + PBVH *pbvh; + Mesh *mesh; + Image *image; + ImageUser *image_user; +}; + +struct SplitNodePair { + SplitNodePair *parent; + PBVHNode node; + int children_offset = 0; + int depth = 0; + int source_index = -1; + bool is_old = false; + SplitQueueData *tdata; + + SplitNodePair(SplitNodePair *node_parent = nullptr) : parent(node_parent) + { + memset(static_cast(&node), 0, sizeof(PBVHNode)); + } +}; + +static void split_thread_job(TaskPool *__restrict pool, void *taskdata); + +static void split_pixel_node( + PBVH *pbvh, SplitNodePair *split, Image *image, ImageUser *image_user, SplitQueueData *tdata) +{ + BB cb; + PBVHNode *node = &split->node; + + cb = node->vb; + + if (count_node_pixels(*node) <= pbvh->pixel_leaf_limit || split->depth >= pbvh->depth_limit) { + BKE_pbvh_pixels_node_data_get(split->node).rebuild_undo_regions(); + return; + } + + /* Find widest axis and its midpoint */ + const int axis = BB_widest_axis(&cb); + const float mid = (cb.bmax[axis] + cb.bmin[axis]) * 0.5f; + + node->flag = (PBVHNodeFlags)(int(node->flag) & int(~PBVH_TexLeaf)); + + SplitNodePair *split1 = MEM_new("split_pixel_node split1", split); + SplitNodePair *split2 = MEM_new("split_pixel_node split1", split); + + split1->depth = split->depth + 1; + split2->depth = split->depth + 1; + + PBVHNode *child1 = &split1->node; + PBVHNode *child2 = &split2->node; + + child1->flag = PBVH_TexLeaf; + child2->flag = PBVH_TexLeaf; + + child1->vb = cb; + child1->vb.bmax[axis] = mid; + + child2->vb = cb; + child2->vb.bmin[axis] = mid; + + NodeData &data = BKE_pbvh_pixels_node_data_get(split->node); + + NodeData *data1 = MEM_new(__func__); + NodeData *data2 = MEM_new(__func__); + child1->pixels.node_data = static_cast(data1); + child2->pixels.node_data = static_cast(data2); + + data1->uv_primitives = data.uv_primitives; + data2->uv_primitives = data.uv_primitives; + + data1->tiles.resize(data.tiles.size()); + data2->tiles.resize(data.tiles.size()); + + for (int i : IndexRange(data.tiles.size())) { + UDIMTilePixels &tile = data.tiles[i]; + UDIMTilePixels &tile1 = data1->tiles[i]; + UDIMTilePixels &tile2 = data2->tiles[i]; + + tile1.tile_number = tile2.tile_number = tile.tile_number; + tile1.flags.dirty = tile2.flags.dirty = 0; + } + + ImageUser image_user2 = *image_user; + + for (int i : IndexRange(data.tiles.size())) { + const UDIMTilePixels &tile = data.tiles[i]; + + image_user2.tile = tile.tile_number; + + ImBuf *image_buffer = BKE_image_acquire_ibuf(image, &image_user2, nullptr); + if (image_buffer == nullptr) { + continue; + } + + const float(*vert_cos)[3] = BKE_pbvh_get_vert_positions(pbvh); + PBVHData &pbvh_data = BKE_pbvh_pixels_data_get(*pbvh); + + for (const PackedPixelRow &row : tile.pixel_rows) { + UDIMTilePixels *tile1 = &data1->tiles[i]; + UDIMTilePixels *tile2 = &data2->tiles[i]; + + UVPrimitivePaintInput &uv_prim = data.uv_primitives.paint_input[row.uv_primitive_index]; + int3 tri = pbvh_data.geom_primitives.vert_indices[uv_prim.geometry_primitive_index]; + + float verts[3][3]; + + copy_v3_v3(verts[0], vert_cos[tri[0]]); + copy_v3_v3(verts[1], vert_cos[tri[1]]); + copy_v3_v3(verts[2], vert_cos[tri[2]]); + + float2 delta = uv_prim.delta_barycentric_coord_u; + float2 uv1 = row.start_barycentric_coord; + float2 uv2 = row.start_barycentric_coord + delta * float(row.num_pixels); + + float co1[3]; + float co2[3]; + + interp_barycentric_tri_v3(verts, uv1[0], uv1[1], co1); + interp_barycentric_tri_v3(verts, uv2[0], uv2[1], co2); + + /* Are we spanning the midpoint? */ + if ((co1[axis] <= mid) != (co2[axis] <= mid)) { + PackedPixelRow row1 = row; + float t; + + if (mid < co1[axis]) { + t = 1.0f - (mid - co2[axis]) / (co1[axis] - co2[axis]); + + SWAP(UDIMTilePixels *, tile1, tile2); + } + else { + t = (mid - co1[axis]) / (co2[axis] - co1[axis]); + } + + int num_pixels = int(floorf(float(row.num_pixels) * t)); + + if (num_pixels) { + row1.num_pixels = num_pixels; + tile1->pixel_rows.append(row1); + } + + if (num_pixels != row.num_pixels) { + PackedPixelRow row2 = row; + + row2.num_pixels = row.num_pixels - num_pixels; + + row2.start_barycentric_coord = row.start_barycentric_coord + + uv_prim.delta_barycentric_coord_u * float(num_pixels); + row2.start_image_coordinate = row.start_image_coordinate; + row2.start_image_coordinate[0] += num_pixels; + + tile2->pixel_rows.append(row2); + } + } + else if (co1[axis] <= mid && co2[axis] <= mid) { + tile1->pixel_rows.append(row); + } + else { + tile2->pixel_rows.append(row); + } + } + + BKE_image_release_ibuf(image, image_buffer, nullptr); + } + + data.undo_regions.clear(); + + if (node->flag & PBVH_Leaf) { + data.clear_data(); + } + else { + pbvh_node_pixels_free(node); + } + + BLI_thread_queue_push(tdata->new_nodes, static_cast(split1)); + BLI_thread_queue_push(tdata->new_nodes, static_cast(split2)); + + BLI_task_pool_push(tdata->pool, split_thread_job, static_cast(split1), false, nullptr); + BLI_task_pool_push(tdata->pool, split_thread_job, static_cast(split2), false, nullptr); +} + +static void split_flush_final_nodes(SplitQueueData *tdata) +{ + PBVH *pbvh = tdata->pbvh; + Vector splits; + + while (!BLI_thread_queue_is_empty(tdata->new_nodes)) { + SplitNodePair *newsplit = static_cast(BLI_thread_queue_pop(tdata->new_nodes)); + + splits.append(newsplit); + + if (newsplit->is_old) { + continue; + } + + if (!newsplit->parent->children_offset) { + newsplit->parent->children_offset = pbvh->totnode; + + pbvh_grow_nodes(pbvh, pbvh->totnode + 2); + newsplit->source_index = newsplit->parent->children_offset; + } + else { + newsplit->source_index = newsplit->parent->children_offset + 1; + } + } + + for (SplitNodePair *split : splits) { + BLI_assert(split->source_index != -1); + + split->node.children_offset = split->children_offset; + pbvh->nodes[split->source_index] = split->node; + } + + for (SplitNodePair *split : splits) { + MEM_delete(split); + } +} + +static void split_thread_job(TaskPool *__restrict pool, void *taskdata) +{ + + SplitQueueData *tdata = static_cast(BLI_task_pool_user_data(pool)); + SplitNodePair *split = static_cast(taskdata); + + split_pixel_node(tdata->pbvh, split, tdata->image, tdata->image_user, tdata); +} + +static void split_pixel_nodes(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image_user) +{ + if (G.debug_value == 891) { + return; + } + + if (!pbvh->depth_limit) { + pbvh->depth_limit = 40; /* TODO: move into a constant */ + } + + if (!pbvh->pixel_leaf_limit) { + pbvh->pixel_leaf_limit = 256 * 256; /* TODO: move into a constant */ + } + + SplitQueueData tdata; + TaskPool *pool = BLI_task_pool_create_suspended(&tdata, TASK_PRIORITY_HIGH); + + tdata.pool = pool; + tdata.pbvh = pbvh; + tdata.mesh = mesh; + tdata.image = image; + tdata.image_user = image_user; + + tdata.new_nodes = BLI_thread_queue_init(); + + /* Set up initial jobs before initializing threads. */ + for (int i : IndexRange(pbvh->totnode)) { + if (pbvh->nodes[i].flag & PBVH_TexLeaf) { + SplitNodePair *split = MEM_new("split_pixel_nodes split"); + + split->source_index = i; + split->is_old = true; + split->node = pbvh->nodes[i]; + split->tdata = &tdata; + + BLI_task_pool_push(pool, split_thread_job, static_cast(split), false, nullptr); + + BLI_thread_queue_push(tdata.new_nodes, static_cast(split)); + } + } + + BLI_task_pool_work_and_wait(pool); + BLI_task_pool_free(pool); + + split_flush_final_nodes(&tdata); + + BLI_thread_queue_free(tdata.new_nodes); +} + +/** + * During debugging this check could be enabled. + * It will write to each image pixel that is covered by the PBVH. + */ +constexpr bool USE_WATERTIGHT_CHECK = false; + static void extract_barycentric_pixels(UDIMTilePixels &tile_data, const ImBuf *image_buffer, const uv_islands::UVIslandsMask &uv_mask, @@ -233,7 +534,10 @@ static void do_encode_pixels(void *__restrict userdata, static bool should_pixels_be_updated(PBVHNode *node) { - if ((node->flag & PBVH_Leaf) == 0) { + if ((node->flag & (PBVH_Leaf | PBVH_TexLeaf)) == 0) { + return false; + } + if (node->children_offset != 0) { return false; } if ((node->flag & PBVH_RebuildPixels) != 0) { @@ -349,24 +653,28 @@ static void apply_watertight_check(PBVH *pbvh, Image *image, ImageUser *image_us BKE_image_partial_update_mark_full_update(image); } -static void update_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image_user) +static bool update_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image_user) { Vector nodes_to_update; if (!find_nodes_to_update(pbvh, nodes_to_update)) { - return; + return false; } const StringRef active_uv_name = CustomData_get_active_layer_name(&mesh->ldata, CD_PROP_FLOAT2); if (active_uv_name.is_empty()) { - return; + return false; } const AttributeAccessor attributes = mesh->attributes(); const VArraySpan uv_map = attributes.lookup(active_uv_name, ATTR_DOMAIN_CORNER); uv_islands::MeshData mesh_data( - {pbvh->looptri, pbvh->totprim}, {pbvh->mloop, mesh->totloop}, pbvh->totvert, uv_map); + {pbvh->looptri, pbvh->totprim}, + {pbvh->mloop, mesh->totloop}, + pbvh->totvert, + uv_map, + {static_cast(static_cast(pbvh->vert_positions)), pbvh->totvert}); uv_islands::UVIslands islands(mesh_data); uv_islands::UVIslandsMask uv_masks; @@ -418,6 +726,15 @@ static void update_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image node->flag = static_cast(node->flag & ~PBVH_RebuildPixels); } + /* Add PBVH_TexLeaf flag */ + for (int i : IndexRange(pbvh->totnode)) { + PBVHNode &node = pbvh->nodes[i]; + + if (node.flag & PBVH_Leaf) { + node.flag = (PBVHNodeFlags)(int(node.flag) | int(PBVH_TexLeaf)); + } + } + //#define DO_PRINT_STATISTICS #ifdef DO_PRINT_STATISTICS /* Print some statistics about compression ratio. */ @@ -430,7 +747,6 @@ static void update_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image continue; } NodeData *node_data = static_cast(node->pixels.node_data); - compressed_data_len += node_data->triangles.mem_size(); for (const UDIMTilePixels &tile_data : node_data->tiles) { compressed_data_len += tile_data.encoded_pixels.size() * sizeof(PackedPixelRow); for (const PackedPixelRow &encoded_pixels : tile_data.encoded_pixels) { @@ -444,6 +760,8 @@ static void update_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image float(compressed_data_len) / num_pixels); } #endif + + return true; } NodeData &BKE_pbvh_pixels_node_data_get(PBVHNode &node) @@ -480,20 +798,25 @@ void BKE_pbvh_pixels_mark_image_dirty(PBVHNode &node, Image &image, ImageUser &i node_data->flags.dirty = false; } } - } // namespace blender::bke::pbvh::pixels -extern "C" { using namespace blender::bke::pbvh::pixels; void BKE_pbvh_build_pixels(PBVH *pbvh, Mesh *mesh, Image *image, ImageUser *image_user) { - update_pixels(pbvh, mesh, image, image_user); + if (update_pixels(pbvh, mesh, image, image_user)) { + split_pixel_nodes(pbvh, mesh, image, image_user); + } } void pbvh_node_pixels_free(PBVHNode *node) { NodeData *node_data = static_cast(node->pixels.node_data); + + if (!node_data) { + return; + } + MEM_delete(node_data); node->pixels.node_data = nullptr; } @@ -504,4 +827,3 @@ void pbvh_pixels_free(PBVH *pbvh) MEM_delete(pbvh_data); pbvh->pixels.data = nullptr; } -} diff --git a/source/blender/blenkernel/intern/pbvh_uv_islands.cc b/source/blender/blenkernel/intern/pbvh_uv_islands.cc index 8554964fae9..522b11b58db 100644 --- a/source/blender/blenkernel/intern/pbvh_uv_islands.cc +++ b/source/blender/blenkernel/intern/pbvh_uv_islands.cc @@ -80,17 +80,6 @@ static int get_uv_loop(const MeshData &mesh_data, const MLoopTri &looptri, const return looptri.tri[0]; } -static bool has_vertex(const MeshData &mesh_data, const MLoopTri &looptri, const int vert) -{ - for (int i = 0; i < 3; i++) { - const int vert_i = mesh_data.loops[looptri.tri[i]].v; - if (vert_i == vert) { - return true; - } - } - return false; -} - static rctf primitive_uv_bounds(const MLoopTri &looptri, const Span uv_map) { rctf result; @@ -210,11 +199,13 @@ static void mesh_data_init(MeshData &mesh_data) MeshData::MeshData(const Span looptris, const Span loops, const int verts_num, - const Span uv_map) + const Span uv_map, + const Span vertex_positions) : looptris(looptris), verts_num(verts_num), loops(loops), uv_map(uv_map), + vertex_positions(vertex_positions), vert_to_edge_map(verts_num), edge_to_primitive_map(0), primitive_to_edge_map(looptris.size()) @@ -245,6 +236,21 @@ UVVertex::UVVertex(const MeshData &mesh_data, const int loop) uv_vertex_init_flags(*this); } +/** + * Get a list containing the indices of mesh primitives (primitive of the input mesh), that + * surround the given uv_vertex in uv-space. + */ +static Vector connecting_mesh_primitive_indices(const UVVertex &uv_vertex) +{ + Vector primitives_around_uv_vertex; + for (const UVEdge *uv_edge : uv_vertex.uv_edges) { + for (const UVPrimitive *uv_primitive : uv_edge->uv_primitives) { + primitives_around_uv_vertex.append_non_duplicates(uv_primitive->primitive_i); + } + } + return primitives_around_uv_vertex; +} + /** \} */ /* -------------------------------------------------------------------- */ @@ -494,7 +500,8 @@ static std::optional sharpest_border_corner(UVIsland &island) } /** The inner edge of a fan. */ -struct InnerEdge { +struct FanSegment { + const int primitive_index; const MLoopTri *primitive; /* UVs order are already applied. So `uvs[0]` matches `primitive->vertices[vert_order[0]]`. */ float2 uvs[3]; @@ -504,8 +511,11 @@ struct InnerEdge { bool found : 1; } flags; - InnerEdge(const MeshData &mesh_data, const MLoopTri *primitive, int vertex) - : primitive(primitive) + FanSegment(const MeshData &mesh_data, + const int primitive_index, + const MLoopTri *primitive, + int vertex) + : primitive_index(primitive_index), primitive(primitive) { flags.found = false; @@ -527,24 +537,41 @@ struct InnerEdge { vert_order[2] = 2; } } + + void print_debug(const MeshData &mesh_data) const + { + std::stringstream ss; + ss << "# p:" << primitive->poly; + ss << " v1:" << mesh_data.loops[primitive->tri[vert_order[0]]].v; + ss << " v2:" << mesh_data.loops[primitive->tri[vert_order[1]]].v; + ss << " v3:" << mesh_data.loops[primitive->tri[vert_order[2]]].v; + ss << " uv1:" << uvs[0]; + ss << " uv2:" << uvs[1]; + ss << " uv3:" << uvs[2]; + if (flags.found) { + ss << " *found"; + } + ss << "\n"; + std::cout << ss.str(); + } }; struct Fan { /* Blades of the fan. */ - Vector inner_edges; + Vector segments; struct { /** * Do all segments of the fan make a full fan, or are there parts missing. Non manifold meshes * can have missing parts. */ - bool full : 1; + bool is_manifold : 1; } flags; Fan(const MeshData &mesh_data, const int vertex) { - flags.full = true; + flags.is_manifold = true; int current_edge = mesh_data.vert_to_edge_map[vertex].first(); const int stop_primitive = mesh_data.edge_to_primitive_map[current_edge].first(); int previous_primitive = stop_primitive; @@ -565,7 +592,7 @@ struct Fan { if (edge_i == current_edge || (edge.vert1 != vertex && edge.vert2 != vertex)) { continue; } - inner_edges.append(InnerEdge(mesh_data, &other_looptri, vertex)); + segments.append(FanSegment(mesh_data, other_primitive_i, &other_looptri, vertex)); current_edge = edge_i; previous_primitive = other_primitive_i; stop = true; @@ -573,7 +600,7 @@ struct Fan { } } if (stop == false) { - flags.full = false; + flags.is_manifold = false; break; } if (stop_primitive == previous_primitive) { @@ -582,10 +609,10 @@ struct Fan { } } - int count_num_to_add() const + int count_edges_not_added() const { int result = 0; - for (const InnerEdge &fan_edge : inner_edges) { + for (const FanSegment &fan_edge : segments) { if (!fan_edge.flags.found) { result++; } @@ -593,26 +620,19 @@ struct Fan { return result; } - void mark_already_added_segments(const MeshData &mesh_data, const UVVertex &uv_vertex) + void mark_already_added_segments(const UVVertex &uv_vertex) { - for (InnerEdge &fan_edge : inner_edges) { - fan_edge.flags.found = false; - const int v0 = mesh_data.loops[fan_edge.primitive->tri[fan_edge.vert_order[0]]].v; - const int v1 = mesh_data.loops[fan_edge.primitive->tri[fan_edge.vert_order[1]]].v; - for (const UVEdge *edge : uv_vertex.uv_edges) { - const int e0 = edge->vertices[0]->vertex; - const int e1 = edge->vertices[1]->vertex; - if ((e0 == v0 && e1 == v1) || (e0 == v1 && e1 == v0)) { - fan_edge.flags.found = true; - break; - } - } + Vector mesh_primitive_indices = connecting_mesh_primitive_indices(uv_vertex); + + /* Go over all fan edges to find if they can be found as primitive around the uv vertex. */ + for (FanSegment &fan_edge : segments) { + fan_edge.flags.found = mesh_primitive_indices.contains(fan_edge.primitive_index); } } void init_uv_coordinates(const MeshData &mesh_data, UVVertex &uv_vertex) { - for (InnerEdge &fan_edge : inner_edges) { + for (FanSegment &fan_edge : segments) { int other_v = mesh_data.loops[fan_edge.primitive->tri[fan_edge.vert_order[0]]].v; if (other_v == uv_vertex.vertex) { other_v = mesh_data.loops[fan_edge.primitive->tri[fan_edge.vert_order[1]]].v; @@ -629,11 +649,153 @@ struct Fan { } } - inner_edges.last().uvs[2] = inner_edges.first().uvs[1]; - for (int i = 0; i < inner_edges.size() - 1; i++) { - inner_edges[i].uvs[2] = inner_edges[i + 1].uvs[1]; + segments.last().uvs[2] = segments.first().uvs[1]; + for (int i = 0; i < segments.size() - 1; i++) { + segments[i].uvs[2] = segments[i + 1].uvs[1]; } } + +#ifndef NDEBUG + /** + * Check if the given vertex is part of the outside of the fan. + * Return true if the given vertex is found on the outside of the fan, otherwise returns false. + */ + bool contains_vertex_on_outside(const MeshData &mesh_data, const int vertex_index) const + { + for (const FanSegment &segment : segments) { + int v2 = mesh_data.loops[segment.primitive->tri[segment.vert_order[1]]].v; + if (vertex_index == v2) { + return true; + } + } + return false; + } + +#endif + + static bool is_path_valid(const Span &path, + const MeshData &mesh_data, + const int from_vertex, + const int to_vertex) + { + int current_vert = from_vertex; + for (FanSegment *segment : path) { + int v1 = mesh_data.loops[segment->primitive->tri[segment->vert_order[1]]].v; + int v2 = mesh_data.loops[segment->primitive->tri[segment->vert_order[2]]].v; + if (!ELEM(current_vert, v1, v2)) { + return false; + } + current_vert = v1 == current_vert ? v2 : v1; + } + return current_vert == to_vertex; + } + + /** + * Find the closest path over the fan between `from_vertex` and `to_vertex`. The result contains + * exclude the starting and final edge. + * + * Algorithm only uses the winding order of the given fan segments. + */ + static Vector path_between(const Span edge_order, + const MeshData &mesh_data, + const int from_vertex, + const int to_vertex, + const bool reversed) + { + const int from_vert_order = 1; + const int to_vert_order = 2; + const int index_increment = reversed ? -1 : 1; + + Vector result; + result.reserve(edge_order.size()); + int index = 0; + while (true) { + FanSegment *segment = edge_order[index]; + int v2 = mesh_data.loops[segment->primitive->tri[segment->vert_order[from_vert_order]]].v; + if (v2 == from_vertex) { + break; + } + index = (index + index_increment + edge_order.size()) % edge_order.size(); + } + + while (true) { + FanSegment *segment = edge_order[index]; + result.append(segment); + + int v3 = mesh_data.loops[segment->primitive->tri[segment->vert_order[to_vert_order]]].v; + if (v3 == to_vertex) { + break; + } + + index = (index + index_increment + edge_order.size()) % edge_order.size(); + } + + return result; + } + + /** + * Score the given solution to be the best. Best solution would have the lowest score. + * + * Score is determined by counting the number of steps and subtracting that with steps that have + * not yet been visited. + */ + static int64_t score(const Span solution) + { + int64_t not_visited_steps = 0; + for (FanSegment *segment : solution) { + if (!segment->flags.found) { + not_visited_steps++; + } + } + return solution.size() - not_visited_steps; + } + + Vector best_path_between(const MeshData &mesh_data, + const int from_vertex, + const int to_vertex) + { + BLI_assert_msg(contains_vertex_on_outside(mesh_data, from_vertex), + "Inconsistency detected, `from_vertex` isn't part of the outside of the fan."); + BLI_assert_msg(contains_vertex_on_outside(mesh_data, to_vertex), + "Inconsistency detected, `to_vertex` isn't part of the outside of the fan."); + if (to_vertex == from_vertex) { + return Vector(); + } + + Array edges(segments.size()); + for (int64_t index : segments.index_range()) { + edges[index] = &segments[index]; + } + + Vector winding_1 = path_between(edges, mesh_data, from_vertex, to_vertex, false); + Vector winding_2 = path_between(edges, mesh_data, from_vertex, to_vertex, true); + + bool winding_1_valid = is_path_valid(winding_1, mesh_data, from_vertex, to_vertex); + bool winding_2_valid = is_path_valid(winding_2, mesh_data, from_vertex, to_vertex); + + if (winding_1_valid && !winding_2_valid) { + return winding_1; + } + if (!winding_1_valid && winding_2_valid) { + return winding_2; + } + if (!winding_1_valid && !winding_2_valid) { + BLI_assert_msg(false, "Both solutions aren't valid."); + return Vector(); + } + if (score(winding_1) < score(winding_2)) { + return winding_1; + } + return winding_2; + } + + void print_debug(const MeshData &mesh_data) const + { + for (const FanSegment &segment : segments) { + segment.print_debug(mesh_data); + } + std::cout << "\n"; + } }; static void add_uv_primitive_shared_uv_edge(const MeshData &mesh_data, @@ -677,25 +839,11 @@ static void add_uv_primitive_shared_uv_edge(const MeshData &mesh_data, uv_primitive_append_to_uv_vertices(prim1); island.uv_primitives.append(prim1); } - -static int find_fill_border(const MeshData &mesh_data, const int v1, const int v2, const int v3) -{ - for (const int edge_i : mesh_data.vert_to_edge_map[v1]) { - for (const int primitive_i : mesh_data.edge_to_primitive_map[edge_i]) { - const MLoopTri &looptri = mesh_data.looptris[primitive_i]; - if (has_vertex(mesh_data, looptri, v1) && has_vertex(mesh_data, looptri, v2) && - has_vertex(mesh_data, looptri, v3)) { - return primitive_i; - } - } - } - return -1; -} /** * Find a primitive that can be used to fill give corner. * Will return -1 when no primitive can be found. */ -static int find_fill_border(const MeshData &mesh_data, UVBorderCorner &corner) +static int find_fill_primitive(const MeshData &mesh_data, UVBorderCorner &corner) { if (corner.first->get_uv_vertex(1) != corner.second->get_uv_vertex(0)) { return -1; @@ -747,29 +895,44 @@ static void extend_at_vert(const MeshData &mesh_data, UVBorderCorner &corner, float min_uv_distance) { + int border_index = corner.first->border_index; UVBorder &border = island.borders[border_index]; + if (!corner.connected_in_mesh()) { + return; + } UVVertex *uv_vertex = corner.second->get_uv_vertex(0); Fan fan(mesh_data, uv_vertex->vertex); - if (!fan.flags.full) { + if (!fan.flags.is_manifold) { return; } fan.init_uv_coordinates(mesh_data, *uv_vertex); - fan.mark_already_added_segments(mesh_data, *uv_vertex); - int num_to_add = fan.count_num_to_add(); + fan.mark_already_added_segments(*uv_vertex); + int num_to_add = fan.count_edges_not_added(); - if (num_to_add == 0) { + /* In 3d space everything can connected, but in uv space it may not. + * in this case in the space between we should extract the primitives to be added + * from the fan. */ + Vector winding_solution = fan.best_path_between( + mesh_data, corner.first->get_uv_vertex(0)->vertex, corner.second->get_uv_vertex(1)->vertex); + + /* + * When all edges are already added and its winding solution contains one segment to be added, + * the segment should be split into two segments in order one for both sides. + * + * Although the fill_primitive can fill the missing segment it could lead to a squashed + * triangle when the corner angle is near 180 degrees. In order to fix this we will + * always add two segments both using the same fill primitive. + */ + if (winding_solution.size() < 2 && (num_to_add == 0 || corner.angle > 2.0f)) { int fill_primitive_1_i = corner.second->uv_primitive->primitive_i; int fill_primitive_2_i = corner.first->uv_primitive->primitive_i; - const int fill_primitive_i = find_fill_border(mesh_data, corner); + const int fill_primitive_i = winding_solution.size() == 1 ? + winding_solution[0]->primitive_index : + find_fill_primitive(mesh_data, corner); - /* - * Although the fill_primitive can fill the missing segment it could lead to a squashed - * triangle when the corner angle is near 180 degrees. In order to fix this we will - * always add two segments both using the same fill primitive. - */ if (fill_primitive_i != -1) { fill_primitive_1_i = fill_primitive_i; fill_primitive_2_i = fill_primitive_i; @@ -809,91 +972,42 @@ static void extend_at_vert(const MeshData &mesh_data, } else { UVEdge *current_edge = corner.first->edge; - Vector new_border_edges; - for (int i = 0; i < num_to_add; i++) { + num_to_add = winding_solution.size(); + for (int64_t segment_index : winding_solution.index_range()) { + float2 old_uv = current_edge->get_other_uv_vertex(uv_vertex->vertex)->uv; int shared_edge_vertex = current_edge->get_other_uv_vertex(uv_vertex->vertex)->vertex; - float factor = (i + 1.0f) / (num_to_add + 1.0f); + float factor = (segment_index + 1.0f) / num_to_add; float2 new_uv = corner.uv(factor, min_uv_distance); - /* Find an segment that contains the 'current edge'. */ - for (InnerEdge &segment : fan.inner_edges) { - if (segment.flags.found) { - continue; - } + FanSegment &segment = *winding_solution[segment_index]; - /* Find primitive that shares the current edge and the segment edge. */ - const int fill_primitive_i = find_fill_border( - mesh_data, - uv_vertex->vertex, - shared_edge_vertex, - mesh_data.loops[segment.primitive->tri[segment.vert_order[1]]].v); - if (fill_primitive_i == -1) { - continue; - } - const MLoopTri &fill_primitive = mesh_data.looptris[fill_primitive_i]; - const int other_prim_vertex = primitive_get_other_uv_vertex( - mesh_data, fill_primitive, uv_vertex->vertex, shared_edge_vertex); + const int fill_primitive_i = segment.primitive_index; + const MLoopTri &fill_primitive = mesh_data.looptris[fill_primitive_i]; + const int other_prim_vertex = primitive_get_other_uv_vertex( + mesh_data, fill_primitive, uv_vertex->vertex, shared_edge_vertex); - UVVertex uv_vertex_template; - uv_vertex_template.vertex = uv_vertex->vertex; - uv_vertex_template.uv = uv_vertex->uv; - UVVertex *vertex_1_ptr = island.lookup_or_create(uv_vertex_template); - uv_vertex_template.vertex = shared_edge_vertex; - uv_vertex_template.uv = old_uv; - UVVertex *vertex_2_ptr = island.lookup_or_create(uv_vertex_template); - uv_vertex_template.vertex = other_prim_vertex; - uv_vertex_template.uv = new_uv; - UVVertex *vertex_3_ptr = island.lookup_or_create(uv_vertex_template); + UVVertex uv_vertex_template; + uv_vertex_template.vertex = uv_vertex->vertex; + uv_vertex_template.uv = uv_vertex->uv; + UVVertex *vertex_1_ptr = island.lookup_or_create(uv_vertex_template); + uv_vertex_template.vertex = shared_edge_vertex; + uv_vertex_template.uv = old_uv; + UVVertex *vertex_2_ptr = island.lookup_or_create(uv_vertex_template); + uv_vertex_template.vertex = other_prim_vertex; + uv_vertex_template.uv = new_uv; + UVVertex *vertex_3_ptr = island.lookup_or_create(uv_vertex_template); - add_uv_primitive_fill( - island, *vertex_1_ptr, *vertex_2_ptr, *vertex_3_ptr, fill_primitive_i); + add_uv_primitive_fill(island, *vertex_1_ptr, *vertex_2_ptr, *vertex_3_ptr, fill_primitive_i); - segment.flags.found = true; - - UVPrimitive &new_prim = island.uv_primitives.last(); - current_edge = new_prim.get_uv_edge(uv_vertex->vertex, other_prim_vertex); - UVBorderEdge new_border(new_prim.get_uv_edge(shared_edge_vertex, other_prim_vertex), - &new_prim); - new_border_edges.append(new_border); - break; - } - } - - { - /* Add final segment. */ - float2 old_uv = current_edge->get_other_uv_vertex(uv_vertex->vertex)->uv; - int shared_edge_vertex = current_edge->get_other_uv_vertex(uv_vertex->vertex)->vertex; - const int fill_primitive_i = find_fill_border(mesh_data, - uv_vertex->vertex, - shared_edge_vertex, - corner.second->get_uv_vertex(1)->vertex); - if (fill_primitive_i != -1) { - const MLoopTri &fill_primitive = mesh_data.looptris[fill_primitive_i]; - const int other_prim_vertex = primitive_get_other_uv_vertex( - mesh_data, fill_primitive, uv_vertex->vertex, shared_edge_vertex); - - UVVertex uv_vertex_template; - uv_vertex_template.vertex = uv_vertex->vertex; - uv_vertex_template.uv = uv_vertex->uv; - UVVertex *vertex_1_ptr = island.lookup_or_create(uv_vertex_template); - uv_vertex_template.vertex = shared_edge_vertex; - uv_vertex_template.uv = old_uv; - UVVertex *vertex_2_ptr = island.lookup_or_create(uv_vertex_template); - uv_vertex_template.vertex = other_prim_vertex; - uv_vertex_template.uv = corner.second->get_uv_vertex(1)->uv; - UVVertex *vertex_3_ptr = island.lookup_or_create(uv_vertex_template); - add_uv_primitive_fill( - island, *vertex_1_ptr, *vertex_2_ptr, *vertex_3_ptr, fill_primitive_i); - - UVPrimitive &new_prim = island.uv_primitives.last(); - UVBorderEdge new_border(new_prim.get_uv_edge(shared_edge_vertex, other_prim_vertex), - &new_prim); - new_border_edges.append(new_border); - } + UVPrimitive &new_prim = island.uv_primitives.last(); + current_edge = new_prim.get_uv_edge(uv_vertex->vertex, other_prim_vertex); + UVBorderEdge new_border(new_prim.get_uv_edge(shared_edge_vertex, other_prim_vertex), + &new_prim); + new_border_edges.append(new_border); } int border_insert = corner.first->index; @@ -941,7 +1055,6 @@ void UVIsland::extend_border(const MeshData &mesh_data, for (UVBorder &border : borders) { border.update_indexes(border_index++); } - while (true) { std::optional extension_corner = sharpest_border_corner(*this); if (!extension_corner.has_value()) { @@ -961,6 +1074,63 @@ void UVIsland::extend_border(const MeshData &mesh_data, } } +void UVIsland::print_debug(const MeshData &mesh_data) const +{ + std::stringstream ss; + ss << "#### Start UVIsland ####\n"; + ss << "import bpy\n"; + ss << "import bpy_extras.object_utils\n"; + ss << "import mathutils\n"; + + ss << "uvisland_vertices = [\n"; + for (const float3 &vertex_position : mesh_data.vertex_positions) { + ss << " mathutils.Vector((" << vertex_position.x << ", " << vertex_position.y << ", " + << vertex_position.z << ")),\n"; + } + ss << "]\n"; + + ss << "uvisland_edges = []\n"; + + ss << "uvisland_faces = [\n"; + for (const VectorList::UsedVector &uvprimitives : uv_primitives) { + for (const UVPrimitive &uvprimitive : uvprimitives) { + ss << " [" << uvprimitive.edges[0]->vertices[0]->vertex << ", " + << uvprimitive.edges[0]->vertices[1]->vertex << ", " + << uvprimitive + .get_other_uv_vertex(uvprimitive.edges[0]->vertices[0], + uvprimitive.edges[0]->vertices[1]) + ->vertex + << "],\n"; + } + } + ss << "]\n"; + + ss << "uvisland_uvs = [\n"; + for (const VectorList::UsedVector &uvprimitives : uv_primitives) { + for (const UVPrimitive &uvprimitive : uvprimitives) { + float2 uv = uvprimitive.edges[0]->vertices[0]->uv; + ss << " " << uv.x << ", " << uv.y << ",\n"; + uv = uvprimitive.edges[0]->vertices[1]->uv; + ss << " " << uv.x << ", " << uv.y << ",\n"; + uv = uvprimitive + .get_other_uv_vertex(uvprimitive.edges[0]->vertices[0], + uvprimitive.edges[0]->vertices[1]) + ->uv; + ss << " " << uv.x << ", " << uv.y << ",\n"; + } + } + ss << "]\n"; + + ss << "uvisland_mesh = bpy.data.meshes.new(name='UVIsland')\n"; + ss << "uvisland_mesh.from_pydata(uvisland_vertices, uvisland_edges, uvisland_faces)\n"; + ss << "uv_map = uvisland_mesh.attributes.new('UVMap', 'FLOAT2', 'CORNER')\n"; + ss << "uv_map.data.foreach_set('vector', uvisland_uvs)\n"; + ss << "bpy_extras.object_utils.object_data_add(bpy.context, uvisland_mesh)\n"; + ss << "#### End UVIsland ####\n\n\n"; + + std::cout << ss.str(); +} + /** \} */ /* -------------------------------------------------------------------- */ @@ -1086,6 +1256,29 @@ float2 UVBorderCorner::uv(float factor, float min_uv_distance) return result; } +bool UVBorderCorner::connected_in_mesh() const +{ + return first->get_uv_vertex(1) == second->get_uv_vertex(0); +} + +void UVBorderCorner::print_debug() const +{ + std::stringstream ss; + ss << "# "; + if (connected_in_mesh()) { + ss << first->get_uv_vertex(0)->vertex << "-"; + ss << first->get_uv_vertex(1)->vertex << "-"; + ss << second->get_uv_vertex(1)->vertex << "\n"; + } + else { + ss << first->get_uv_vertex(0)->vertex << "-"; + ss << first->get_uv_vertex(1)->vertex << ", "; + ss << second->get_uv_vertex(0)->vertex << "-"; + ss << second->get_uv_vertex(1)->vertex << "\n"; + } + std::cout << ss.str(); +} + /** \} */ /* -------------------------------------------------------------------- */ @@ -1266,9 +1459,10 @@ UVIslands::UVIslands(const MeshData &mesh_data) { islands.reserve(mesh_data.uv_island_len); - for (int64_t uv_island_id = 0; uv_island_id < mesh_data.uv_island_len; uv_island_id++) { + for (const int64_t uv_island_id : IndexRange(mesh_data.uv_island_len)) { islands.append_as(UVIsland()); UVIsland *uv_island = &islands.last(); + uv_island->id = uv_island_id; for (const int primitive_i : mesh_data.looptris.index_range()) { if (mesh_data.uv_island_ids[primitive_i] == uv_island_id) { add_primitive(mesh_data, *uv_island, primitive_i); @@ -1292,6 +1486,13 @@ void UVIslands::extend_borders(const MeshData &mesh_data, const UVIslandsMask &i } } +void UVIslands::print_debug(const MeshData &mesh_data) const +{ + for (const UVIsland &island : islands) { + island.print_debug(mesh_data); + } +} + /** \} */ /* -------------------------------------------------------------------- */ diff --git a/source/blender/blenkernel/intern/pbvh_uv_islands.hh b/source/blender/blenkernel/intern/pbvh_uv_islands.hh index b97d07a2146..6324b742fc4 100644 --- a/source/blender/blenkernel/intern/pbvh_uv_islands.hh +++ b/source/blender/blenkernel/intern/pbvh_uv_islands.hh @@ -43,7 +43,6 @@ struct UVEdge; struct UVIslands; struct UVIslandsMask; struct UVPrimitive; -struct UVPrimitiveEdge; struct MeshData; struct UVVertex; @@ -123,6 +122,7 @@ struct MeshData { const int64_t verts_num; const Span loops; const Span uv_map; + const Span vertex_positions; VertToEdgeMap vert_to_edge_map; @@ -143,7 +143,8 @@ struct MeshData { explicit MeshData(Span looptris, Span loops, const int verts_num, - const Span uv_map); + const Span uv_map, + const Span vertex_positions); }; struct UVVertex { @@ -249,6 +250,15 @@ struct UVBorderCorner { * resulting uv coordinate. The distance is in uv space. */ float2 uv(float factor, float min_uv_distance); + + /** + * Does this corner exist as 2 connected edges of the mesh. + * + * During the extraction phase a connection can be made in uv-space that + * doesn't reflect to two connected edges inside the mesh. + */ + bool connected_in_mesh() const; + void print_debug() const; }; struct UVBorder { @@ -279,6 +289,12 @@ struct UVBorder { }; struct UVIsland { + /** + * Id (Index) of the UVIsland. Contains the index of this island in UVIslands. + * + * Useful during debugging to set a breaking condition on a specific island/vert. + */ + int id; VectorList uv_vertices; VectorList uv_edges; VectorList uv_primitives; @@ -313,6 +329,9 @@ struct UVIsland { bool has_shared_edge(const UVPrimitive &primitive) const; bool has_shared_edge(const MeshData &mesh_data, const int primitive_i) const; void extend_border(const UVPrimitive &primitive); + + /** Print a python script to the console that generates a mesh representing this UVIsland. */ + void print_debug(const MeshData &mesh_data) const; }; struct UVIslands { @@ -322,6 +341,7 @@ struct UVIslands { void extract_borders(); void extend_borders(const MeshData &mesh_data, const UVIslandsMask &islands_mask); + void print_debug(const MeshData &mesh_data) const; }; /** Mask to find the index of the UVIsland for a given UV coordinate. */ diff --git a/source/blender/blenkernel/intern/pointcloud.cc b/source/blender/blenkernel/intern/pointcloud.cc index 8207bb2cd05..aa3c2593f34 100644 --- a/source/blender/blenkernel/intern/pointcloud.cc +++ b/source/blender/blenkernel/intern/pointcloud.cc @@ -166,33 +166,33 @@ static void pointcloud_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_PT = { - /* id_code */ ID_PT, - /* id_filter */ FILTER_ID_PT, - /* main_listbase_index */ INDEX_ID_PT, - /* struct_size */ sizeof(PointCloud), - /* name */ "PointCloud", - /* name_plural */ "pointclouds", - /* translation_context */ BLT_I18NCONTEXT_ID_POINTCLOUD, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_PT, + /*id_filter*/ FILTER_ID_PT, + /*main_listbase_index*/ INDEX_ID_PT, + /*struct_size*/ sizeof(PointCloud), + /*name*/ "PointCloud", + /*name_plural*/ "pointclouds", + /*translation_context*/ BLT_I18NCONTEXT_ID_POINTCLOUD, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ pointcloud_init_data, - /* copy_data */ pointcloud_copy_data, - /* free_data */ pointcloud_free_data, - /* make_local */ nullptr, - /* foreach_id */ pointcloud_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ pointcloud_init_data, + /*copy_data*/ pointcloud_copy_data, + /*free_data*/ pointcloud_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ pointcloud_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ pointcloud_blend_write, - /* blend_read_data */ pointcloud_blend_read_data, - /* blend_read_lib */ pointcloud_blend_read_lib, - /* blend_read_expand */ pointcloud_blend_read_expand, + /*blend_write*/ pointcloud_blend_write, + /*blend_read_data*/ pointcloud_blend_read_data, + /*blend_read_lib*/ pointcloud_blend_read_lib, + /*blend_read_expand*/ pointcloud_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; static void pointcloud_random(PointCloud *pointcloud) diff --git a/source/blender/blenkernel/intern/scene.cc b/source/blender/blenkernel/intern/scene.cc index a4d666fa590..3d6c0a95921 100644 --- a/source/blender/blenkernel/intern/scene.cc +++ b/source/blender/blenkernel/intern/scene.cc @@ -278,7 +278,7 @@ static void scene_copy_data(Main *bmain, ID *id_dst, const ID *id_src, const int (ID *)scene_src->master_collection, (ID **)&scene_dst->master_collection, flag_private_id_data); - scene_dst->master_collection->owner_id = &scene_dst->id; + scene_dst->master_collection->runtime.owner_id = &scene_dst->id; } /* View Layers */ @@ -1774,6 +1774,7 @@ IDTypeInfo IDType_ID_SCE = get_type_info(); const char *RE_engine_id_BLENDER_EEVEE = "BLENDER_EEVEE"; const char *RE_engine_id_BLENDER_WORKBENCH = "BLENDER_WORKBENCH"; +const char *RE_engine_id_BLENDER_WORKBENCH_NEXT = "BLENDER_WORKBENCH_NEXT"; const char *RE_engine_id_CYCLES = "CYCLES"; void free_avicodecdata(AviCodecData *acd) @@ -2978,7 +2979,8 @@ bool BKE_scene_uses_blender_eevee(const Scene *scene) bool BKE_scene_uses_blender_workbench(const Scene *scene) { - return STREQ(scene->r.engine, RE_engine_id_BLENDER_WORKBENCH); + return STREQ(scene->r.engine, RE_engine_id_BLENDER_WORKBENCH) || + STREQ(scene->r.engine, RE_engine_id_BLENDER_WORKBENCH_NEXT); } bool BKE_scene_uses_cycles(const Scene *scene) diff --git a/source/blender/blenkernel/intern/screen.c b/source/blender/blenkernel/intern/screen.c index 2c896788b20..95ef6b7b925 100644 --- a/source/blender/blenkernel/intern/screen.c +++ b/source/blender/blenkernel/intern/screen.c @@ -279,7 +279,8 @@ IDTypeInfo IDType_ID_SCR = { .name = "Screen", .name_plural = "screens", .translation_context = BLT_I18NCONTEXT_ID_SCREEN, - .flags = IDTYPE_FLAGS_NO_COPY | IDTYPE_FLAGS_ONLY_APPEND | IDTYPE_FLAGS_NO_ANIMDATA, + .flags = IDTYPE_FLAGS_NO_COPY | IDTYPE_FLAGS_ONLY_APPEND | IDTYPE_FLAGS_NO_ANIMDATA | + IDTYPE_FLAGS_NO_MEMFILE_UNDO, .asset_type_info = NULL, .init_data = NULL, diff --git a/source/blender/blenkernel/intern/simulation.cc b/source/blender/blenkernel/intern/simulation.cc index 14719c83ee9..c0e67b910f0 100644 --- a/source/blender/blenkernel/intern/simulation.cc +++ b/source/blender/blenkernel/intern/simulation.cc @@ -132,33 +132,33 @@ static void simulation_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_SIM = { - /* id_code */ ID_SIM, - /* id_filter */ FILTER_ID_SIM, - /* main_listbase_index */ INDEX_ID_SIM, - /* struct_size */ sizeof(Simulation), - /* name */ "Simulation", - /* name_plural */ "simulations", - /* translation_context */ BLT_I18NCONTEXT_ID_SIMULATION, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_SIM, + /*id_filter*/ FILTER_ID_SIM, + /*main_listbase_index*/ INDEX_ID_SIM, + /*struct_size*/ sizeof(Simulation), + /*name*/ "Simulation", + /*name_plural*/ "simulations", + /*translation_context*/ BLT_I18NCONTEXT_ID_SIMULATION, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ simulation_init_data, - /* copy_data */ simulation_copy_data, - /* free_data */ simulation_free_data, - /* make_local */ nullptr, - /* foreach_id */ simulation_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ simulation_init_data, + /*copy_data*/ simulation_copy_data, + /*free_data*/ simulation_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ simulation_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ simulation_blend_write, - /* blend_read_data */ simulation_blend_read_data, - /* blend_read_lib */ simulation_blend_read_lib, - /* blend_read_expand */ simulation_blend_read_expand, + /*blend_write*/ simulation_blend_write, + /*blend_read_data*/ simulation_blend_read_data, + /*blend_read_lib*/ simulation_blend_read_lib, + /*blend_read_expand*/ simulation_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; void *BKE_simulation_add(Main *bmain, const char *name) diff --git a/source/blender/blenkernel/intern/softbody.c b/source/blender/blenkernel/intern/softbody.c index 457b8de4592..1a19ccade7a 100644 --- a/source/blender/blenkernel/intern/softbody.c +++ b/source/blender/blenkernel/intern/softbody.c @@ -377,7 +377,7 @@ static void ccd_mesh_update(Object *ob, ccd_Mesh *pccd_M) pccd_M->bbmin[0] = pccd_M->bbmin[1] = pccd_M->bbmin[2] = 1e30f; pccd_M->bbmax[0] = pccd_M->bbmax[1] = pccd_M->bbmax[2] = -1e30f; - /* blow it up with forcefield ranges */ + /* Blow it up with force-field ranges. */ hull = max_ff(ob->pd->pdef_sbift, ob->pd->pdef_sboft); /* rotate current to previous */ @@ -848,7 +848,7 @@ static void renew_softbody(Object *ob, int totpoint, int totspring) } else { bp->goal = 0.0f; - /* so this will definily be below SOFTGOALSNAP */ + /* So this will definitely be below #SOFTGOALSNAP. */ } bp->nofsprings = 0; @@ -1529,7 +1529,6 @@ static void sb_sfesf_threads_run(struct Depsgraph *depsgraph, } sb_threads = MEM_callocN(sizeof(SB_thread_context) * totthread, "SBSpringsThread"); - memset(sb_threads, 0, sizeof(SB_thread_context) * totthread); left = totsprings; dec = totsprings / totthread + 1; for (i = 0; i < totthread; i++) { @@ -1794,7 +1793,7 @@ static int sb_detect_vertex_collisionCached(float opco[3], return deflected; } -/* sandbox to plug in various deflection algos */ +/* Sandbox to plug in various deflection algorithms. */ static int sb_deflect_face(Object *ob, float *actpos, float *facenormal, @@ -2208,7 +2207,6 @@ static void sb_cf_threads_run(Scene *scene, // printf("sb_cf_threads_run spawning %d threads\n", totthread); sb_threads = MEM_callocN(sizeof(SB_thread_context) * totthread, "SBThread"); - memset(sb_threads, 0, sizeof(SB_thread_context) * totthread); left = totpoint; dec = totpoint / totthread + 1; for (i = 0; i < totthread; i++) { @@ -2552,11 +2550,12 @@ static void softbody_swap_state(Object *ob, float *ppos, float *pvel) } #endif -/* care for bodypoints taken out of the 'ordinary' solver step +/** + * Care for body-points taken out of the 'ordinary' solver step * because they are screwed to goal by bolts * they just need to move along with the goal in time * we need to adjust them on sub frame timing in solver - * so now when frame is done .. put 'em to the position at the end of frame + * so now when frame is done .. put them to the position at the end of frame. */ static void softbody_apply_goalsnap(Object *ob) { @@ -2801,9 +2800,9 @@ static void reference_to_scratch(Object *ob) // printf("reference_to_scratch\n"); } -/* +/** * helper function to get proper spring length - * when object is rescaled + * when object is re-scaled */ static float globallen(float *v1, float *v2, Object *ob) { diff --git a/source/blender/blenkernel/intern/subdiv.c b/source/blender/blenkernel/intern/subdiv.cc similarity index 90% rename from source/blender/blenkernel/intern/subdiv.c rename to source/blender/blenkernel/intern/subdiv.cc index 9098c010747..50d6248f0b5 100644 --- a/source/blender/blenkernel/intern/subdiv.c +++ b/source/blender/blenkernel/intern/subdiv.cc @@ -96,7 +96,7 @@ bool BKE_subdiv_settings_equal(const SubdivSettings *settings_a, const SubdivSet /* Creation from scratch. */ Subdiv *BKE_subdiv_new_from_converter(const SubdivSettings *settings, - struct OpenSubdiv_Converter *converter) + OpenSubdiv_Converter *converter) { SubdivStats stats; BKE_subdiv_stats_init(&stats); @@ -104,7 +104,7 @@ Subdiv *BKE_subdiv_new_from_converter(const SubdivSettings *settings, OpenSubdiv_TopologyRefinerSettings topology_refiner_settings; topology_refiner_settings.level = settings->level; topology_refiner_settings.is_adaptive = settings->is_adaptive; - struct OpenSubdiv_TopologyRefiner *osd_topology_refiner = NULL; + OpenSubdiv_TopologyRefiner *osd_topology_refiner = nullptr; if (converter->getNumVertices(converter) != 0) { osd_topology_refiner = openSubdiv_createTopologyRefinerFromConverter( converter, &topology_refiner_settings); @@ -114,11 +114,11 @@ Subdiv *BKE_subdiv_new_from_converter(const SubdivSettings *settings, * The thing here is: OpenSubdiv can only deal with faces, but our * side of subdiv also deals with loose vertices and edges. */ } - Subdiv *subdiv = MEM_callocN(sizeof(Subdiv), "subdiv from converter"); + Subdiv *subdiv = MEM_cnew(__func__); subdiv->settings = *settings; subdiv->topology_refiner = osd_topology_refiner; - subdiv->evaluator = NULL; - subdiv->displacement_evaluator = NULL; + subdiv->evaluator = nullptr; + subdiv->displacement_evaluator = nullptr; BKE_subdiv_stats_end(&stats, SUBDIV_STATS_TOPOLOGY_REFINER_CREATION_TIME); subdiv->stats = stats; return subdiv; @@ -127,7 +127,7 @@ Subdiv *BKE_subdiv_new_from_converter(const SubdivSettings *settings, Subdiv *BKE_subdiv_new_from_mesh(const SubdivSettings *settings, const Mesh *mesh) { if (mesh->totvert == 0) { - return NULL; + return nullptr; } OpenSubdiv_Converter converter; BKE_subdiv_converter_init_for_mesh(&converter, settings, mesh); @@ -144,7 +144,7 @@ Subdiv *BKE_subdiv_update_from_converter(Subdiv *subdiv, { /* Check if the existing descriptor can be re-used. */ bool can_reuse_subdiv = true; - if (subdiv != NULL && subdiv->topology_refiner != NULL) { + if (subdiv != nullptr && subdiv->topology_refiner != nullptr) { if (!BKE_subdiv_settings_equal(&subdiv->settings, settings)) { can_reuse_subdiv = false; } @@ -162,7 +162,7 @@ Subdiv *BKE_subdiv_update_from_converter(Subdiv *subdiv, return subdiv; } /* Create new subdiv. */ - if (subdiv != NULL) { + if (subdiv != nullptr) { BKE_subdiv_free(subdiv); } return BKE_subdiv_new_from_converter(settings, converter); @@ -183,7 +183,7 @@ Subdiv *BKE_subdiv_update_from_mesh(Subdiv *subdiv, void BKE_subdiv_free(Subdiv *subdiv) { - if (subdiv->evaluator != NULL) { + if (subdiv->evaluator != nullptr) { const eOpenSubdivEvaluator evaluator_type = subdiv->evaluator->type; if (evaluator_type != OPENSUBDIV_EVALUATOR_CPU) { /* Let the draw code do the freeing, to ensure that the OpenGL context is valid. */ @@ -192,11 +192,11 @@ void BKE_subdiv_free(Subdiv *subdiv) } openSubdiv_deleteEvaluator(subdiv->evaluator); } - if (subdiv->topology_refiner != NULL) { + if (subdiv->topology_refiner != nullptr) { openSubdiv_deleteTopologyRefiner(subdiv->topology_refiner); } BKE_subdiv_displacement_detach(subdiv); - if (subdiv->cache_.face_ptex_offset != NULL) { + if (subdiv->cache_.face_ptex_offset != nullptr) { MEM_freeN(subdiv->cache_.face_ptex_offset); } MEM_freeN(subdiv); @@ -208,16 +208,16 @@ void BKE_subdiv_free(Subdiv *subdiv) int *BKE_subdiv_face_ptex_offset_get(Subdiv *subdiv) { - if (subdiv->cache_.face_ptex_offset != NULL) { + if (subdiv->cache_.face_ptex_offset != nullptr) { return subdiv->cache_.face_ptex_offset; } OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner; - if (topology_refiner == NULL) { - return NULL; + if (topology_refiner == nullptr) { + return nullptr; } const int num_coarse_faces = topology_refiner->getNumFaces(topology_refiner); - subdiv->cache_.face_ptex_offset = MEM_malloc_arrayN( - num_coarse_faces + 1, sizeof(int), "subdiv face_ptex_offset"); + subdiv->cache_.face_ptex_offset = static_cast( + MEM_malloc_arrayN(num_coarse_faces + 1, sizeof(int), __func__)); int ptex_offset = 0; for (int face_index = 0; face_index < num_coarse_faces; face_index++) { const int num_ptex_faces = topology_refiner->getNumFacePtexFaces(topology_refiner, face_index); diff --git a/source/blender/blenkernel/intern/subdiv_ccg_mask.c b/source/blender/blenkernel/intern/subdiv_ccg_mask.cc similarity index 86% rename from source/blender/blenkernel/intern/subdiv_ccg_mask.c rename to source/blender/blenkernel/intern/subdiv_ccg_mask.cc index 86891f0fa6e..538c4c5eaf4 100644 --- a/source/blender/blenkernel/intern/subdiv_ccg_mask.c +++ b/source/blender/blenkernel/intern/subdiv_ccg_mask.cc @@ -5,7 +5,7 @@ * \ingroup bke */ -#include +#include #include "BKE_subdiv_ccg.h" @@ -22,12 +22,12 @@ #include "MEM_guardedalloc.h" -typedef struct PolyCornerIndex { +struct PolyCornerIndex { int poly_index; int corner; -} PolyCornerIndex; +}; -typedef struct GridPaintMaskData { +struct GridPaintMaskData { // int grid_size; const MPoly *mpoly; const GridPaintMask *grid_paint_mask; @@ -38,7 +38,7 @@ typedef struct GridPaintMaskData { * there we only have one ptex. */ PolyCornerIndex *ptex_poly_corner; -} GridPaintMaskData; +}; static int mask_get_grid_and_coord(SubdivCCGMaskEvaluator *mask_evaluator, const int ptex_face_index, @@ -48,7 +48,7 @@ static int mask_get_grid_and_coord(SubdivCCGMaskEvaluator *mask_evaluator, float *grid_u, float *grid_v) { - GridPaintMaskData *data = mask_evaluator->user_data; + GridPaintMaskData *data = static_cast(mask_evaluator->user_data); const PolyCornerIndex *poly_corner = &data->ptex_poly_corner[ptex_face_index]; const MPoly *poly = &data->mpoly[poly_corner->poly_index]; const int start_grid_index = poly->loopstart + poly_corner->corner; @@ -70,7 +70,7 @@ BLI_INLINE float read_mask_grid(const GridPaintMask *mask_grid, const float grid_u, const float grid_v) { - if (mask_grid->data == NULL) { + if (mask_grid->data == nullptr) { return 0; } const int grid_size = BKE_subdiv_grid_size_from_level(mask_grid->level); @@ -92,7 +92,7 @@ static float eval_mask(SubdivCCGMaskEvaluator *mask_evaluator, static void free_mask_data(SubdivCCGMaskEvaluator *mask_evaluator) { - GridPaintMaskData *data = mask_evaluator->user_data; + GridPaintMaskData *data = static_cast(mask_evaluator->user_data); MEM_freeN(data->ptex_poly_corner); MEM_freeN(data); } @@ -113,12 +113,12 @@ static int count_num_ptex_faces(const Mesh *mesh) static void mask_data_init_mapping(SubdivCCGMaskEvaluator *mask_evaluator, const Mesh *mesh) { - GridPaintMaskData *data = mask_evaluator->user_data; + GridPaintMaskData *data = static_cast(mask_evaluator->user_data); const MPoly *mpoly = BKE_mesh_polys(mesh); const int num_ptex_faces = count_num_ptex_faces(mesh); /* Allocate memory. */ - data->ptex_poly_corner = MEM_malloc_arrayN( - num_ptex_faces, sizeof(*data->ptex_poly_corner), "ptex poly corner"); + data->ptex_poly_corner = static_cast( + MEM_malloc_arrayN(num_ptex_faces, sizeof(*data->ptex_poly_corner), __func__)); /* Fill in offsets. */ int ptex_face_index = 0; PolyCornerIndex *ptex_poly_corner = data->ptex_poly_corner; @@ -141,9 +141,10 @@ static void mask_data_init_mapping(SubdivCCGMaskEvaluator *mask_evaluator, const static void mask_init_data(SubdivCCGMaskEvaluator *mask_evaluator, const Mesh *mesh) { - GridPaintMaskData *data = mask_evaluator->user_data; + GridPaintMaskData *data = static_cast(mask_evaluator->user_data); data->mpoly = BKE_mesh_polys(mesh); - data->grid_paint_mask = CustomData_get_layer(&mesh->ldata, CD_GRID_PAINT_MASK); + data->grid_paint_mask = static_cast( + CustomData_get_layer(&mesh->ldata, CD_GRID_PAINT_MASK)); mask_data_init_mapping(mask_evaluator, mesh); } @@ -153,8 +154,7 @@ static void mask_init_functions(SubdivCCGMaskEvaluator *mask_evaluator) mask_evaluator->free = free_mask_data; } -bool BKE_subdiv_ccg_mask_init_from_paint(SubdivCCGMaskEvaluator *mask_evaluator, - const struct Mesh *mesh) +bool BKE_subdiv_ccg_mask_init_from_paint(SubdivCCGMaskEvaluator *mask_evaluator, const Mesh *mesh) { if (!CustomData_get_layer(&mesh->ldata, CD_GRID_PAINT_MASK)) { return false; diff --git a/source/blender/blenkernel/intern/subdiv_ccg_material.c b/source/blender/blenkernel/intern/subdiv_ccg_material.cc similarity index 87% rename from source/blender/blenkernel/intern/subdiv_ccg_material.c rename to source/blender/blenkernel/intern/subdiv_ccg_material.cc index 891e1d1b630..23f5371b504 100644 --- a/source/blender/blenkernel/intern/subdiv_ccg_material.c +++ b/source/blender/blenkernel/intern/subdiv_ccg_material.cc @@ -13,11 +13,11 @@ #include "DNA_mesh_types.h" #include "DNA_meshdata_types.h" -typedef struct CCGMaterialFromMeshData { +struct CCGMaterialFromMeshData { const Mesh *mesh; const MPoly *polys; const int *material_indices; -} CCGMaterialFromMeshData; +}; static DMFlagMat subdiv_ccg_material_flags_eval( SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator, const int coarse_face_index) @@ -40,8 +40,8 @@ static void subdiv_ccg_material_flags_free( void BKE_subdiv_ccg_material_flags_init_from_mesh( SubdivCCGMaterialFlagsEvaluator *material_flags_evaluator, const Mesh *mesh) { - CCGMaterialFromMeshData *data = MEM_mallocN(sizeof(CCGMaterialFromMeshData), - "ccg material eval"); + CCGMaterialFromMeshData *data = static_cast( + MEM_mallocN(sizeof(CCGMaterialFromMeshData), __func__)); data->mesh = mesh; data->material_indices = (const int *)CustomData_get_layer_named( &mesh->pdata, CD_PROP_INT32, "material_index"); diff --git a/source/blender/blenkernel/intern/subdiv_converter.c b/source/blender/blenkernel/intern/subdiv_converter.cc similarity index 96% rename from source/blender/blenkernel/intern/subdiv_converter.c rename to source/blender/blenkernel/intern/subdiv_converter.cc index 1d92d845b36..33c14fc50a9 100644 --- a/source/blender/blenkernel/intern/subdiv_converter.c +++ b/source/blender/blenkernel/intern/subdiv_converter.cc @@ -11,7 +11,7 @@ #include "opensubdiv_converter_capi.h" -void BKE_subdiv_converter_free(struct OpenSubdiv_Converter *converter) +void BKE_subdiv_converter_free(OpenSubdiv_Converter *converter) { if (converter->freeUserData) { converter->freeUserData(converter); diff --git a/source/blender/blenkernel/intern/subdiv_converter_mesh.c b/source/blender/blenkernel/intern/subdiv_converter_mesh.cc similarity index 77% rename from source/blender/blenkernel/intern/subdiv_converter_mesh.c rename to source/blender/blenkernel/intern/subdiv_converter_mesh.cc index 64d31ed7f58..8be3f128e10 100644 --- a/source/blender/blenkernel/intern/subdiv_converter_mesh.c +++ b/source/blender/blenkernel/intern/subdiv_converter_mesh.cc @@ -7,7 +7,7 @@ #include "subdiv_converter.h" -#include +#include #include "DNA_mesh_types.h" #include "DNA_meshdata_types.h" @@ -31,7 +31,7 @@ * This forces Catmark scheme with all edges marked as infinitely sharp. */ #define BUGGY_SIMPLE_SCHEME_WORKAROUND 1 -typedef struct ConverterStorage { +struct ConverterStorage { SubdivSettings settings; const Mesh *mesh; const float (*vert_positions)[3]; @@ -66,7 +66,7 @@ typedef struct ConverterStorage { /* Number of non-loose elements. */ int num_manifold_vertices; int num_manifold_edges; -} ConverterStorage; +}; static OpenSubdiv_SchemeType get_scheme_type(const OpenSubdiv_Converter *converter) { @@ -74,7 +74,7 @@ static OpenSubdiv_SchemeType get_scheme_type(const OpenSubdiv_Converter *convert (void)converter; return OSD_SCHEME_CATMARK; #else - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); if (storage->settings.is_simple) { return OSD_SCHEME_BILINEAR; } @@ -85,45 +85,47 @@ static OpenSubdiv_SchemeType get_scheme_type(const OpenSubdiv_Converter *convert } static OpenSubdiv_VtxBoundaryInterpolation get_vtx_boundary_interpolation( - const struct OpenSubdiv_Converter *converter) + const OpenSubdiv_Converter *converter) { - ConverterStorage *storage = converter->user_data; - return BKE_subdiv_converter_vtx_boundary_interpolation_from_settings(&storage->settings); + ConverterStorage *storage = static_cast(converter->user_data); + return OpenSubdiv_VtxBoundaryInterpolation( + BKE_subdiv_converter_vtx_boundary_interpolation_from_settings(&storage->settings)); } static OpenSubdiv_FVarLinearInterpolation get_fvar_linear_interpolation( const OpenSubdiv_Converter *converter) { - ConverterStorage *storage = converter->user_data; - return BKE_subdiv_converter_fvar_linear_from_settings(&storage->settings); + ConverterStorage *storage = static_cast(converter->user_data); + return OpenSubdiv_FVarLinearInterpolation( + BKE_subdiv_converter_fvar_linear_from_settings(&storage->settings)); } -static bool specifies_full_topology(const OpenSubdiv_Converter *UNUSED(converter)) +static bool specifies_full_topology(const OpenSubdiv_Converter * /*converter*/) { return false; } static int get_num_faces(const OpenSubdiv_Converter *converter) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); return storage->mesh->totpoly; } static int get_num_edges(const OpenSubdiv_Converter *converter) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); return storage->num_manifold_edges; } static int get_num_vertices(const OpenSubdiv_Converter *converter) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); return storage->num_manifold_vertices; } static int get_num_face_vertices(const OpenSubdiv_Converter *converter, int manifold_face_index) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); return storage->polys[manifold_face_index].totloop; } @@ -131,7 +133,7 @@ static void get_face_vertices(const OpenSubdiv_Converter *converter, int manifold_face_index, int *manifold_face_vertices) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); const MPoly *poly = &storage->polys[manifold_face_index]; const MLoop *mloop = storage->loops; for (int corner = 0; corner < poly->totloop; corner++) { @@ -144,7 +146,7 @@ static void get_edge_vertices(const OpenSubdiv_Converter *converter, int manifold_edge_index, int *manifold_edge_vertices) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); const int edge_index = storage->manifold_edge_index_reverse[manifold_edge_index]; const MEdge *edge = &storage->edges[edge_index]; manifold_edge_vertices[0] = storage->manifold_vertex_index[edge->v1]; @@ -153,13 +155,13 @@ static void get_edge_vertices(const OpenSubdiv_Converter *converter, static float get_edge_sharpness(const OpenSubdiv_Converter *converter, int manifold_edge_index) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); #if BUGGY_SIMPLE_SCHEME_WORKAROUND if (storage->settings.is_simple) { return 10.0f; } #endif - if (!storage->settings.use_creases || storage->cd_edge_crease == NULL) { + if (!storage->settings.use_creases || storage->cd_edge_crease == nullptr) { return 0.0f; } const int edge_index = storage->manifold_edge_index_reverse[manifold_edge_index]; @@ -169,7 +171,7 @@ static float get_edge_sharpness(const OpenSubdiv_Converter *converter, int manif static bool is_infinite_sharp_vertex(const OpenSubdiv_Converter *converter, int manifold_vertex_index) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); #if BUGGY_SIMPLE_SCHEME_WORKAROUND if (storage->settings.is_simple) { return true; @@ -181,8 +183,8 @@ static bool is_infinite_sharp_vertex(const OpenSubdiv_Converter *converter, static float get_vertex_sharpness(const OpenSubdiv_Converter *converter, int manifold_vertex_index) { - ConverterStorage *storage = converter->user_data; - if (!storage->settings.use_creases || storage->cd_vertex_crease == NULL) { + ConverterStorage *storage = static_cast(converter->user_data); + if (!storage->settings.use_creases || storage->cd_vertex_crease == nullptr) { return 0.0f; } const int vertex_index = storage->manifold_vertex_index_reverse[manifold_vertex_index]; @@ -191,23 +193,24 @@ static float get_vertex_sharpness(const OpenSubdiv_Converter *converter, int man static int get_num_uv_layers(const OpenSubdiv_Converter *converter) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); const Mesh *mesh = storage->mesh; return CustomData_number_of_layers(&mesh->ldata, CD_PROP_FLOAT2); } static void precalc_uv_layer(const OpenSubdiv_Converter *converter, const int layer_index) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); const Mesh *mesh = storage->mesh; - const float(*mloopuv)[2] = CustomData_get_layer_n(&mesh->ldata, CD_PROP_FLOAT2, layer_index); + const float(*mloopuv)[2] = static_cast( + CustomData_get_layer_n(&mesh->ldata, CD_PROP_FLOAT2, layer_index)); const int num_poly = mesh->totpoly; const int num_vert = mesh->totvert; const float limit[2] = {STD_UV_CONNECT_LIMIT, STD_UV_CONNECT_LIMIT}; /* Initialize memory required for the operations. */ - if (storage->loop_uv_indices == NULL) { - storage->loop_uv_indices = MEM_malloc_arrayN( - mesh->totloop, sizeof(int), "loop uv vertex index"); + if (storage->loop_uv_indices == nullptr) { + storage->loop_uv_indices = static_cast( + MEM_malloc_arrayN(mesh->totloop, sizeof(int), "loop uv vertex index")); } UvVertMap *uv_vert_map = BKE_mesh_uv_vert_map_create( storage->polys, @@ -224,7 +227,7 @@ static void precalc_uv_layer(const OpenSubdiv_Converter *converter, const int la storage->num_uv_coordinates = -1; for (int vertex_index = 0; vertex_index < num_vert; vertex_index++) { const UvMapVert *uv_vert = BKE_mesh_uv_vert_map_get_vert(uv_vert_map, vertex_index); - while (uv_vert != NULL) { + while (uv_vert != nullptr) { if (uv_vert->separate) { storage->num_uv_coordinates++; } @@ -241,13 +244,13 @@ static void precalc_uv_layer(const OpenSubdiv_Converter *converter, const int la BKE_mesh_uv_vert_map_free(uv_vert_map); } -static void finish_uv_layer(const OpenSubdiv_Converter *UNUSED(converter)) +static void finish_uv_layer(const OpenSubdiv_Converter * /*converter*/) { } static int get_num_uvs(const OpenSubdiv_Converter *converter) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); return storage->num_uv_coordinates; } @@ -255,14 +258,14 @@ static int get_face_corner_uv_index(const OpenSubdiv_Converter *converter, const int face_index, const int corner) { - ConverterStorage *storage = converter->user_data; + ConverterStorage *storage = static_cast(converter->user_data); const MPoly *mp = &storage->polys[face_index]; return storage->loop_uv_indices[mp->loopstart + corner]; } static void free_user_data(const OpenSubdiv_Converter *converter) { - ConverterStorage *user_data = converter->user_data; + ConverterStorage *user_data = static_cast(converter->user_data); MEM_SAFE_FREE(user_data->loop_uv_indices); MEM_freeN(user_data->manifold_vertex_index); MEM_freeN(user_data->infinite_sharp_vertices_map); @@ -284,17 +287,17 @@ static void init_functions(OpenSubdiv_Converter *converter) converter->getNumFaceVertices = get_num_face_vertices; converter->getFaceVertices = get_face_vertices; - converter->getFaceEdges = NULL; + converter->getFaceEdges = nullptr; converter->getEdgeVertices = get_edge_vertices; - converter->getNumEdgeFaces = NULL; - converter->getEdgeFaces = NULL; + converter->getNumEdgeFaces = nullptr; + converter->getEdgeFaces = nullptr; converter->getEdgeSharpness = get_edge_sharpness; - converter->getNumVertexEdges = NULL; - converter->getVertexEdges = NULL; - converter->getNumVertexFaces = NULL; - converter->getVertexFaces = NULL; + converter->getNumVertexEdges = nullptr; + converter->getVertexEdges = nullptr; + converter->getNumVertexFaces = nullptr; + converter->getVertexFaces = nullptr; converter->isInfiniteSharpVertex = is_infinite_sharp_vertex; converter->getVertexSharpness = get_vertex_sharpness; @@ -313,35 +316,36 @@ static void initialize_manifold_index_array(const BLI_bitmap *used_map, int **r_indices_reverse, int *r_num_manifold_elements) { - int *indices = NULL; - if (r_indices != NULL) { - indices = MEM_malloc_arrayN(num_elements, sizeof(int), "manifold indices"); + int *indices = nullptr; + if (r_indices != nullptr) { + indices = static_cast(MEM_malloc_arrayN(num_elements, sizeof(int), "manifold indices")); } - int *indices_reverse = NULL; - if (r_indices_reverse != NULL) { - indices_reverse = MEM_malloc_arrayN(num_elements, sizeof(int), "manifold indices reverse"); + int *indices_reverse = nullptr; + if (r_indices_reverse != nullptr) { + indices_reverse = static_cast( + MEM_malloc_arrayN(num_elements, sizeof(int), "manifold indices reverse")); } int offset = 0; for (int i = 0; i < num_elements; i++) { if (BLI_BITMAP_TEST_BOOL(used_map, i)) { - if (indices != NULL) { + if (indices != nullptr) { indices[i] = i - offset; } - if (indices_reverse != NULL) { + if (indices_reverse != nullptr) { indices_reverse[i - offset] = i; } } else { - if (indices != NULL) { + if (indices != nullptr) { indices[i] = -1; } offset++; } } - if (r_indices != NULL) { + if (r_indices != nullptr) { *r_indices = indices; } - if (r_indices_reverse != NULL) { + if (r_indices_reverse != nullptr) { *r_indices_reverse = indices_reverse; } *r_num_manifold_elements = num_elements - offset; @@ -371,7 +375,7 @@ static void initialize_manifold_indices(ConverterStorage *storage) &storage->num_manifold_vertices); initialize_manifold_index_array(edge_used_map, mesh->totedge, - NULL, + nullptr, &storage->manifold_edge_index_reverse, &storage->num_manifold_edges); /* Initialize infinite sharp mapping. */ @@ -392,21 +396,24 @@ static void init_user_data(OpenSubdiv_Converter *converter, const SubdivSettings *settings, const Mesh *mesh) { - ConverterStorage *user_data = MEM_mallocN(sizeof(ConverterStorage), __func__); + ConverterStorage *user_data = static_cast( + MEM_mallocN(sizeof(ConverterStorage), __func__)); user_data->settings = *settings; user_data->mesh = mesh; user_data->vert_positions = BKE_mesh_vert_positions(mesh); user_data->edges = BKE_mesh_edges(mesh); user_data->polys = BKE_mesh_polys(mesh); user_data->loops = BKE_mesh_loops(mesh); - user_data->cd_vertex_crease = CustomData_get_layer(&mesh->vdata, CD_CREASE); - user_data->cd_edge_crease = CustomData_get_layer(&mesh->edata, CD_CREASE); - user_data->loop_uv_indices = NULL; + user_data->cd_vertex_crease = static_cast( + CustomData_get_layer(&mesh->vdata, CD_CREASE)); + user_data->cd_edge_crease = static_cast( + CustomData_get_layer(&mesh->edata, CD_CREASE)); + user_data->loop_uv_indices = nullptr; initialize_manifold_indices(user_data); converter->user_data = user_data; } -void BKE_subdiv_converter_init_for_mesh(struct OpenSubdiv_Converter *converter, +void BKE_subdiv_converter_init_for_mesh(OpenSubdiv_Converter *converter, const SubdivSettings *settings, const Mesh *mesh) { diff --git a/source/blender/blenkernel/intern/subdiv_deform.c b/source/blender/blenkernel/intern/subdiv_deform.cc similarity index 82% rename from source/blender/blenkernel/intern/subdiv_deform.c rename to source/blender/blenkernel/intern/subdiv_deform.cc index 4924125bc4d..b5a0d6cb040 100644 --- a/source/blender/blenkernel/intern/subdiv_deform.c +++ b/source/blender/blenkernel/intern/subdiv_deform.cc @@ -27,7 +27,7 @@ /** \name Subdivision context * \{ */ -typedef struct SubdivDeformContext { +struct SubdivDeformContext { const Mesh *coarse_mesh; Subdiv *subdiv; @@ -45,15 +45,15 @@ typedef struct SubdivDeformContext { int *accumulated_counters; bool have_displacement; -} SubdivDeformContext; +}; static void subdiv_mesh_prepare_accumulator(SubdivDeformContext *ctx, int num_vertices) { if (!ctx->have_displacement) { return; } - ctx->accumulated_counters = MEM_calloc_arrayN( - num_vertices, sizeof(*ctx->accumulated_counters), "subdiv accumulated counters"); + ctx->accumulated_counters = static_cast( + MEM_calloc_arrayN(num_vertices, sizeof(*ctx->accumulated_counters), __func__)); } static void subdiv_mesh_context_free(SubdivDeformContext *ctx) @@ -98,46 +98,47 @@ static void subdiv_accumulate_vertex_displacement(SubdivDeformContext *ctx, * \{ */ static bool subdiv_mesh_topology_info(const SubdivForeachContext *foreach_context, - const int UNUSED(num_vertices), - const int UNUSED(num_edges), - const int UNUSED(num_loops), - const int UNUSED(num_polygons), - const int *UNUSED(subdiv_polygon_offset)) + const int /*num_vertices*/, + const int /*num_edges*/, + const int /*num_loops*/, + const int /*num_polygons*/, + const int * /*subdiv_polygon_offset*/) { - SubdivDeformContext *subdiv_context = foreach_context->user_data; + SubdivDeformContext *subdiv_context = static_cast( + foreach_context->user_data); subdiv_mesh_prepare_accumulator(subdiv_context, subdiv_context->coarse_mesh->totvert); return true; } static void subdiv_mesh_vertex_every_corner(const SubdivForeachContext *foreach_context, - void *UNUSED(tls), + void * /*tls*/, const int ptex_face_index, const float u, const float v, const int coarse_vertex_index, - const int UNUSED(coarse_poly_index), - const int UNUSED(coarse_corner), - const int UNUSED(subdiv_vertex_index)) + const int /*coarse_poly_index*/, + const int /*coarse_corner*/, + const int /*subdiv_vertex_index*/) { - SubdivDeformContext *ctx = foreach_context->user_data; + SubdivDeformContext *ctx = static_cast(foreach_context->user_data); subdiv_accumulate_vertex_displacement(ctx, ptex_face_index, u, v, coarse_vertex_index); } static void subdiv_mesh_vertex_corner(const SubdivForeachContext *foreach_context, - void *UNUSED(tls), + void * /*tls*/, const int ptex_face_index, const float u, const float v, const int coarse_vertex_index, - const int UNUSED(coarse_poly_index), - const int UNUSED(coarse_corner), - const int UNUSED(subdiv_vertex_index)) + const int /*coarse_poly_index*/, + const int /*coarse_corner*/, + const int /*subdiv_vertex_index*/) { - SubdivDeformContext *ctx = foreach_context->user_data; + SubdivDeformContext *ctx = static_cast(foreach_context->user_data); BLI_assert(coarse_vertex_index != ORIGINDEX_NONE); BLI_assert(coarse_vertex_index < ctx->num_verts); float inv_num_accumulated = 1.0f; - if (ctx->accumulated_counters != NULL) { + if (ctx->accumulated_counters != nullptr) { inv_num_accumulated = 1.0f / ctx->accumulated_counters[coarse_vertex_index]; } /* Displacement is accumulated in subdiv vertex position. @@ -179,8 +180,8 @@ static void setup_foreach_callbacks(const SubdivDeformContext *subdiv_context, /** \name Public entry point * \{ */ -void BKE_subdiv_deform_coarse_vertices(struct Subdiv *subdiv, - const struct Mesh *coarse_mesh, +void BKE_subdiv_deform_coarse_vertices(Subdiv *subdiv, + const Mesh *coarse_mesh, float (*vertex_cos)[3], int num_verts) { @@ -188,7 +189,7 @@ void BKE_subdiv_deform_coarse_vertices(struct Subdiv *subdiv, /* Make sure evaluator is up to date with possible new topology, and that * is refined for the new positions of coarse vertices. */ if (!BKE_subdiv_eval_begin_from_mesh( - subdiv, coarse_mesh, vertex_cos, SUBDIV_EVALUATOR_TYPE_CPU, NULL)) { + subdiv, coarse_mesh, vertex_cos, SUBDIV_EVALUATOR_TYPE_CPU, nullptr)) { /* This could happen in two situations: * - OpenSubdiv is disabled. * - Something totally bad happened, and OpenSubdiv rejected our @@ -206,7 +207,7 @@ void BKE_subdiv_deform_coarse_vertices(struct Subdiv *subdiv, subdiv_context.subdiv = subdiv; subdiv_context.vertex_cos = vertex_cos; subdiv_context.num_verts = num_verts; - subdiv_context.have_displacement = (subdiv->displacement_evaluator != NULL); + subdiv_context.have_displacement = (subdiv->displacement_evaluator != nullptr); SubdivForeachContext foreach_context; setup_foreach_callbacks(&subdiv_context, &foreach_context); diff --git a/source/blender/blenkernel/intern/subdiv_displacement.c b/source/blender/blenkernel/intern/subdiv_displacement.cc similarity index 73% rename from source/blender/blenkernel/intern/subdiv_displacement.c rename to source/blender/blenkernel/intern/subdiv_displacement.cc index 693c64a18a4..cab5ed6c89e 100644 --- a/source/blender/blenkernel/intern/subdiv_displacement.c +++ b/source/blender/blenkernel/intern/subdiv_displacement.cc @@ -13,12 +13,12 @@ void BKE_subdiv_displacement_detach(Subdiv *subdiv) { - if (subdiv->displacement_evaluator == NULL) { + if (subdiv->displacement_evaluator == nullptr) { return; } - if (subdiv->displacement_evaluator->free != NULL) { + if (subdiv->displacement_evaluator->free != nullptr) { subdiv->displacement_evaluator->free(subdiv->displacement_evaluator); } MEM_freeN(subdiv->displacement_evaluator); - subdiv->displacement_evaluator = NULL; + subdiv->displacement_evaluator = nullptr; } diff --git a/source/blender/blenkernel/intern/subdiv_displacement_multires.c b/source/blender/blenkernel/intern/subdiv_displacement_multires.cc similarity index 91% rename from source/blender/blenkernel/intern/subdiv_displacement_multires.c rename to source/blender/blenkernel/intern/subdiv_displacement_multires.cc index e45500c36f4..9ca3498e27b 100644 --- a/source/blender/blenkernel/intern/subdiv_displacement_multires.c +++ b/source/blender/blenkernel/intern/subdiv_displacement_multires.cc @@ -5,7 +5,7 @@ * \ingroup bke */ -#include +#include #include "BKE_subdiv.h" @@ -24,12 +24,12 @@ #include "MEM_guardedalloc.h" -typedef struct PolyCornerIndex { +struct PolyCornerIndex { int poly_index; int corner; -} PolyCornerIndex; +}; -typedef struct MultiresDisplacementData { +struct MultiresDisplacementData { Subdiv *subdiv; int grid_size; /* Mesh is used to read external displacement. */ @@ -49,7 +49,7 @@ typedef struct MultiresDisplacementData { /* Sanity check, is used in debug builds. * Controls that initialize() was called prior to eval_displacement(). */ bool is_initialized; -} MultiresDisplacementData; +}; /* Denotes which grid to use to average value of the displacement read from the * grid which corresponds to the ptex face. */ @@ -68,7 +68,8 @@ static int displacement_get_grid_and_coord(SubdivDisplacement *displacement, float *grid_u, float *grid_v) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); const PolyCornerIndex *poly_corner = &data->ptex_poly_corner[ptex_face_index]; const MPoly *poly = &data->mpoly[poly_corner->poly_index]; const int start_grid_index = poly->loopstart + poly_corner->corner; @@ -91,7 +92,8 @@ static const MDisps *displacement_get_other_grid(SubdivDisplacement *displacemen const int corner, const int corner_delta) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); const PolyCornerIndex *poly_corner = &data->ptex_poly_corner[ptex_face_index]; const MPoly *poly = &data->mpoly[poly_corner->poly_index]; const int effective_corner = (poly->totloop == 4) ? corner : poly_corner->corner; @@ -105,7 +107,7 @@ BLI_INLINE eAverageWith read_displacement_grid(const MDisps *displacement_grid, const float grid_v, float r_tangent_D[3]) { - if (displacement_grid->disps == NULL) { + if (displacement_grid->disps == nullptr) { zero_v3(r_tangent_D); return AVERAGE_WITH_NONE; } @@ -216,7 +218,8 @@ static void average_with_other(SubdivDisplacement *displacement, const int corner_delta, float r_D[3]) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); const MDisps *other_displacement_grid = displacement_get_other_grid( displacement, ptex_face_index, corner, corner_delta); int other_ptex_face_index, other_corner_index; @@ -239,11 +242,12 @@ static void average_with_other(SubdivDisplacement *displacement, static void average_with_all(SubdivDisplacement *displacement, const int ptex_face_index, const int corner, - const float UNUSED(grid_u), - const float UNUSED(grid_v), + const float /*grid_u*/, + const float /*grid_v*/, float r_D[3]) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); const PolyCornerIndex *poly_corner = &data->ptex_poly_corner[ptex_face_index]; const MPoly *poly = &data->mpoly[poly_corner->poly_index]; const int num_corners = poly->totloop; @@ -256,7 +260,7 @@ static void average_with_next(SubdivDisplacement *displacement, const int ptex_face_index, const int corner, const float grid_u, - const float UNUSED(grid_v), + const float /*grid_v*/, float r_D[3]) { average_with_other(displacement, ptex_face_index, corner, 0.0f, grid_u, 1, r_D); @@ -265,7 +269,7 @@ static void average_with_next(SubdivDisplacement *displacement, static void average_with_prev(SubdivDisplacement *displacement, const int ptex_face_index, const int corner, - const float UNUSED(grid_u), + const float /*grid_u*/, const float grid_v, float r_D[3]) { @@ -314,7 +318,8 @@ static int displacement_get_face_corner(MultiresDisplacementData *data, static void initialize(SubdivDisplacement *displacement) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); multiresModifier_ensure_external_read(data->mesh, data->mmd); data->is_initialized = true; } @@ -327,7 +332,8 @@ static void eval_displacement(SubdivDisplacement *displacement, const float dPdv[3], float r_D[3]) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); BLI_assert(data->is_initialized); const int grid_size = data->grid_size; /* Get displacement in tangent space. */ @@ -352,7 +358,8 @@ static void eval_displacement(SubdivDisplacement *displacement, static void free_displacement(SubdivDisplacement *displacement) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); MEM_freeN(data->ptex_poly_corner); MEM_freeN(data); } @@ -372,12 +379,13 @@ static int count_num_ptex_faces(const Mesh *mesh) static void displacement_data_init_mapping(SubdivDisplacement *displacement, const Mesh *mesh) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); const MPoly *mpoly = BKE_mesh_polys(mesh); const int num_ptex_faces = count_num_ptex_faces(mesh); /* Allocate memory. */ - data->ptex_poly_corner = MEM_malloc_arrayN( - num_ptex_faces, sizeof(*data->ptex_poly_corner), "ptex poly corner"); + data->ptex_poly_corner = static_cast( + MEM_malloc_arrayN(num_ptex_faces, sizeof(*data->ptex_poly_corner), "ptex poly corner")); /* Fill in offsets. */ int ptex_face_index = 0; PolyCornerIndex *ptex_poly_corner = data->ptex_poly_corner; @@ -403,13 +411,14 @@ static void displacement_init_data(SubdivDisplacement *displacement, Mesh *mesh, const MultiresModifierData *mmd) { - MultiresDisplacementData *data = displacement->user_data; + MultiresDisplacementData *data = static_cast( + displacement->user_data); data->subdiv = subdiv; data->grid_size = BKE_subdiv_grid_size_from_level(mmd->totlvl); data->mesh = mesh; data->mmd = mmd; data->mpoly = BKE_mesh_polys(mesh); - data->mdisps = CustomData_get_layer(&mesh->ldata, CD_MDISPS); + data->mdisps = static_cast(CustomData_get_layer(&mesh->ldata, CD_MDISPS)); data->face_ptex_offset = BKE_subdiv_face_ptex_offset_get(subdiv); data->is_initialized = false; displacement_data_init_mapping(displacement, mesh); @@ -434,8 +443,7 @@ void BKE_subdiv_displacement_attach_from_multires(Subdiv *subdiv, return; } /* Allocate all required memory. */ - SubdivDisplacement *displacement = MEM_callocN(sizeof(SubdivDisplacement), - "multires displacement"); + SubdivDisplacement *displacement = MEM_cnew("multires displacement"); displacement->user_data = MEM_callocN(sizeof(MultiresDisplacementData), "multires displacement data"); displacement_init_data(displacement, subdiv, mesh, mmd); diff --git a/source/blender/blenkernel/intern/subdiv_eval.c b/source/blender/blenkernel/intern/subdiv_eval.cc similarity index 91% rename from source/blender/blenkernel/intern/subdiv_eval.c rename to source/blender/blenkernel/intern/subdiv_eval.cc index 302eeacbf2e..b8cda67f642 100644 --- a/source/blender/blenkernel/intern/subdiv_eval.c +++ b/source/blender/blenkernel/intern/subdiv_eval.cc @@ -53,19 +53,19 @@ bool BKE_subdiv_eval_begin(Subdiv *subdiv, const OpenSubdiv_EvaluatorSettings *settings) { BKE_subdiv_stats_reset(&subdiv->stats, SUBDIV_STATS_EVALUATOR_CREATE); - if (subdiv->topology_refiner == NULL) { + if (subdiv->topology_refiner == nullptr) { /* Happens on input mesh with just loose geometry, * or when OpenSubdiv is disabled */ return false; } - if (subdiv->evaluator == NULL) { + if (subdiv->evaluator == nullptr) { eOpenSubdivEvaluator opensubdiv_evaluator_type = opensubdiv_evalutor_from_subdiv_evaluator_type(evaluator_type); BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_EVALUATOR_CREATE); subdiv->evaluator = openSubdiv_createEvaluatorFromTopologyRefiner( subdiv->topology_refiner, opensubdiv_evaluator_type, evaluator_cache); BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_EVALUATOR_CREATE); - if (subdiv->evaluator == NULL) { + if (subdiv->evaluator == nullptr) { return false; } } @@ -97,7 +97,8 @@ static void set_coarse_positions(Subdiv *subdiv, } } /* Use a temporary buffer so we do not upload vertices one at a time to the GPU. */ - float(*buffer)[3] = MEM_mallocN(sizeof(float[3]) * mesh->totvert, "subdiv tmp coarse positions"); + float(*buffer)[3] = static_cast( + MEM_mallocN(sizeof(float[3]) * mesh->totvert, __func__)); int manifold_vertex_count = 0; for (int vertex_index = 0, manifold_vertex_index = 0; vertex_index < mesh->totvert; vertex_index++) { @@ -105,7 +106,7 @@ static void set_coarse_positions(Subdiv *subdiv, continue; } const float *vertex_co; - if (coarse_vertex_cos != NULL) { + if (coarse_vertex_cos != nullptr) { vertex_co = coarse_vertex_cos[vertex_index]; } else { @@ -122,20 +123,20 @@ static void set_coarse_positions(Subdiv *subdiv, } /* Context which is used to fill face varying data in parallel. */ -typedef struct FaceVaryingDataFromUVContext { +struct FaceVaryingDataFromUVContext { OpenSubdiv_TopologyRefiner *topology_refiner; const Mesh *mesh; const MPoly *polys; const float (*mloopuv)[2]; float (*buffer)[2]; int layer_index; -} FaceVaryingDataFromUVContext; +}; static void set_face_varying_data_from_uv_task(void *__restrict userdata, const int face_index, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - FaceVaryingDataFromUVContext *ctx = userdata; + FaceVaryingDataFromUVContext *ctx = static_cast(userdata); OpenSubdiv_TopologyRefiner *topology_refiner = ctx->topology_refiner; const int layer_index = ctx->layer_index; const MPoly *mpoly = &ctx->polys[face_index]; @@ -164,7 +165,8 @@ static void set_face_varying_data_from_uv(Subdiv *subdiv, const int num_fvar_values = topology_refiner->getNumFVarValues(topology_refiner, layer_index); /* Use a temporary buffer so we do not upload UVs one at a time to the GPU. */ - float(*buffer)[2] = MEM_mallocN(sizeof(float[2]) * num_fvar_values, "temp UV storage"); + float(*buffer)[2] = static_cast( + MEM_mallocN(sizeof(float[2]) * num_fvar_values, __func__)); FaceVaryingDataFromUVContext ctx; ctx.topology_refiner = topology_refiner; @@ -188,8 +190,10 @@ static void set_face_varying_data_from_uv(Subdiv *subdiv, static void set_vertex_data_from_orco(Subdiv *subdiv, const Mesh *mesh) { - const float(*orco)[3] = CustomData_get_layer(&mesh->vdata, CD_ORCO); - const float(*cloth_orco)[3] = CustomData_get_layer(&mesh->vdata, CD_CLOTH_ORCO); + const float(*orco)[3] = static_cast( + CustomData_get_layer(&mesh->vdata, CD_ORCO)); + const float(*cloth_orco)[3] = static_cast( + CustomData_get_layer(&mesh->vdata, CD_CLOTH_ORCO)); if (orco || cloth_orco) { OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner; @@ -241,7 +245,7 @@ bool BKE_subdiv_eval_refine_from_mesh(Subdiv *subdiv, const Mesh *mesh, const float (*coarse_vertex_cos)[3]) { - if (subdiv->evaluator == NULL) { + if (subdiv->evaluator == nullptr) { /* NOTE: This situation is supposed to be handled by begin(). */ BLI_assert_msg(0, "Is not supposed to happen"); return false; @@ -251,7 +255,8 @@ bool BKE_subdiv_eval_refine_from_mesh(Subdiv *subdiv, /* Set face-varying data to UV maps. */ const int num_uv_layers = CustomData_number_of_layers(&mesh->ldata, CD_PROP_FLOAT2); for (int layer_index = 0; layer_index < num_uv_layers; layer_index++) { - const float(*mloopuv)[2] = CustomData_get_layer_n(&mesh->ldata, CD_PROP_FLOAT2, layer_index); + const float(*mloopuv)[2] = static_cast( + CustomData_get_layer_n(&mesh->ldata, CD_PROP_FLOAT2, layer_index)); set_face_varying_data_from_uv(subdiv, mesh, mloopuv, layer_index); } /* Set vertex data to orco. */ @@ -265,10 +270,10 @@ bool BKE_subdiv_eval_refine_from_mesh(Subdiv *subdiv, void BKE_subdiv_eval_init_displacement(Subdiv *subdiv) { - if (subdiv->displacement_evaluator == NULL) { + if (subdiv->displacement_evaluator == nullptr) { return; } - if (subdiv->displacement_evaluator->initialize == NULL) { + if (subdiv->displacement_evaluator->initialize == nullptr) { return; } subdiv->displacement_evaluator->initialize(subdiv->displacement_evaluator); @@ -281,7 +286,8 @@ void BKE_subdiv_eval_init_displacement(Subdiv *subdiv) void BKE_subdiv_eval_limit_point( Subdiv *subdiv, const int ptex_face_index, const float u, const float v, float r_P[3]) { - BKE_subdiv_eval_limit_point_and_derivatives(subdiv, ptex_face_index, u, v, r_P, NULL, NULL); + BKE_subdiv_eval_limit_point_and_derivatives( + subdiv, ptex_face_index, u, v, r_P, nullptr, nullptr); } void BKE_subdiv_eval_limit_point_and_derivatives(Subdiv *subdiv, @@ -305,7 +311,7 @@ void BKE_subdiv_eval_limit_point_and_derivatives(Subdiv *subdiv, * which there must be proper derivatives. This might break continuity of normals, but is better * that giving totally unusable derivatives. */ - if (r_dPdu != NULL && r_dPdv != NULL) { + if (r_dPdu != nullptr && r_dPdv != nullptr) { if ((is_zero_v3(r_dPdu) || is_zero_v3(r_dPdv)) || equals_v3v3(r_dPdu, r_dPdv)) { subdiv->evaluator->evaluateLimit(subdiv->evaluator, ptex_face_index, @@ -356,7 +362,7 @@ void BKE_subdiv_eval_displacement(Subdiv *subdiv, const float dPdv[3], float r_D[3]) { - if (subdiv->displacement_evaluator == NULL) { + if (subdiv->displacement_evaluator == nullptr) { zero_v3(r_D); return; } diff --git a/source/blender/blenkernel/intern/subdiv_foreach.c b/source/blender/blenkernel/intern/subdiv_foreach.cc similarity index 97% rename from source/blender/blenkernel/intern/subdiv_foreach.c rename to source/blender/blenkernel/intern/subdiv_foreach.cc index e851c969de8..86a1c18c416 100644 --- a/source/blender/blenkernel/intern/subdiv_foreach.c +++ b/source/blender/blenkernel/intern/subdiv_foreach.cc @@ -65,7 +65,7 @@ BLI_INLINE int ptex_face_resolution_get(const MPoly *poly, int resolution) /** \name Context which is passed to all threaded tasks * \{ */ -typedef struct SubdivForeachTaskContext { +struct SubdivForeachTaskContext { const Mesh *coarse_mesh; const MEdge *coarse_edges; const MPoly *coarse_polys; @@ -109,7 +109,7 @@ typedef struct SubdivForeachTaskContext { * were already evaluated. */ BLI_bitmap *coarse_edges_used_map; -} SubdivForeachTaskContext; +}; /** \} */ @@ -120,7 +120,7 @@ typedef struct SubdivForeachTaskContext { static void *subdiv_foreach_tls_alloc(SubdivForeachTaskContext *ctx) { const SubdivForeachContext *foreach_context = ctx->foreach_context; - void *tls = NULL; + void *tls = nullptr; if (foreach_context->user_data_tls_size != 0) { tls = MEM_mallocN(foreach_context->user_data_tls_size, "tls"); memcpy(tls, foreach_context->user_data_tls, foreach_context->user_data_tls_size); @@ -130,10 +130,10 @@ static void *subdiv_foreach_tls_alloc(SubdivForeachTaskContext *ctx) static void subdiv_foreach_tls_free(SubdivForeachTaskContext *ctx, void *tls) { - if (tls == NULL) { + if (tls == nullptr) { return; } - if (ctx->foreach_context != NULL) { + if (ctx->foreach_context != nullptr) { ctx->foreach_context->user_data_tls_free(tls); } MEM_freeN(tls); @@ -260,12 +260,12 @@ static void subdiv_foreach_ctx_init(Subdiv *subdiv, SubdivForeachTaskContext *ct /* Allocate maps and offsets. */ ctx->coarse_vertices_used_map = BLI_BITMAP_NEW(coarse_mesh->totvert, "vertices used map"); ctx->coarse_edges_used_map = BLI_BITMAP_NEW(coarse_mesh->totedge, "edges used map"); - ctx->subdiv_vertex_offset = MEM_malloc_arrayN( - coarse_mesh->totpoly, sizeof(*ctx->subdiv_vertex_offset), "vertex_offset"); - ctx->subdiv_edge_offset = MEM_malloc_arrayN( - coarse_mesh->totpoly, sizeof(*ctx->subdiv_edge_offset), "subdiv_edge_offset"); - ctx->subdiv_polygon_offset = MEM_malloc_arrayN( - coarse_mesh->totpoly, sizeof(*ctx->subdiv_polygon_offset), "subdiv_edge_offset"); + ctx->subdiv_vertex_offset = static_cast(MEM_malloc_arrayN( + coarse_mesh->totpoly, sizeof(*ctx->subdiv_vertex_offset), "vertex_offset")); + ctx->subdiv_edge_offset = static_cast(MEM_malloc_arrayN( + coarse_mesh->totpoly, sizeof(*ctx->subdiv_edge_offset), "subdiv_edge_offset")); + ctx->subdiv_polygon_offset = static_cast(MEM_malloc_arrayN( + coarse_mesh->totpoly, sizeof(*ctx->subdiv_polygon_offset), "subdiv_edge_offset")); /* Initialize all offsets. */ subdiv_foreach_ctx_init_offsets(ctx); /* Calculate number of geometry in the result subdivision mesh. */ @@ -399,7 +399,7 @@ static void subdiv_foreach_every_corner_vertices_special(SubdivForeachTaskContex static void subdiv_foreach_every_corner_vertices(SubdivForeachTaskContext *ctx, void *tls) { - if (ctx->foreach_context->vertex_every_corner == NULL) { + if (ctx->foreach_context->vertex_every_corner == nullptr) { return; } const Mesh *coarse_mesh = ctx->coarse_mesh; @@ -424,7 +424,7 @@ static void subdiv_foreach_edge_vertices_regular_do(SubdivForeachTaskContext *ct { const int resolution = ctx->settings->resolution; const int resolution_1 = resolution - 1; - const float inv_resolution_1 = 1.0f / (float)resolution_1; + const float inv_resolution_1 = 1.0f / float(resolution_1); const int num_subdiv_vertices_per_coarse_edge = resolution - 2; const int coarse_poly_index = coarse_poly - ctx->coarse_polys; const int ptex_face_index = ctx->face_ptex_offset[coarse_poly_index]; @@ -487,7 +487,7 @@ static void subdiv_foreach_edge_vertices_special_do(SubdivForeachTaskContext *ct const int resolution = ctx->settings->resolution; const int num_subdiv_vertices_per_coarse_edge = resolution - 2; const int num_vertices_per_ptex_edge = ((resolution >> 1) + 1); - const float inv_ptex_resolution_1 = 1.0f / (float)(num_vertices_per_ptex_edge - 1); + const float inv_ptex_resolution_1 = 1.0f / float(num_vertices_per_ptex_edge - 1); const int coarse_poly_index = coarse_poly - ctx->coarse_polys; const int ptex_face_start_index = ctx->face_ptex_offset[coarse_poly_index]; int ptex_face_index = ptex_face_start_index; @@ -576,7 +576,7 @@ static void subdiv_foreach_every_edge_vertices_special(SubdivForeachTaskContext static void subdiv_foreach_every_edge_vertices(SubdivForeachTaskContext *ctx, void *tls) { - if (ctx->foreach_context->vertex_every_edge == NULL) { + if (ctx->foreach_context->vertex_every_edge == nullptr) { return; } const Mesh *coarse_mesh = ctx->coarse_mesh; @@ -598,7 +598,7 @@ static void subdiv_foreach_inner_vertices_regular(SubdivForeachTaskContext *ctx, const MPoly *coarse_poly) { const int resolution = ctx->settings->resolution; - const float inv_resolution_1 = 1.0f / (float)(resolution - 1); + const float inv_resolution_1 = 1.0f / float(resolution - 1); const int coarse_poly_index = coarse_poly - ctx->coarse_polys; const int ptex_face_index = ctx->face_ptex_offset[coarse_poly_index]; const int start_vertex_index = ctx->subdiv_vertex_offset[coarse_poly_index]; @@ -625,7 +625,7 @@ static void subdiv_foreach_inner_vertices_special(SubdivForeachTaskContext *ctx, { const int resolution = ctx->settings->resolution; const int ptex_face_resolution = ptex_face_resolution_get(coarse_poly, resolution); - const float inv_ptex_face_resolution_1 = 1.0f / (float)(ptex_face_resolution - 1); + const float inv_ptex_face_resolution_1 = 1.0f / float(ptex_face_resolution - 1); const int coarse_poly_index = coarse_poly - ctx->coarse_polys; int ptex_face_index = ctx->face_ptex_offset[coarse_poly_index]; const int start_vertex_index = ctx->subdiv_vertex_offset[coarse_poly_index]; @@ -672,7 +672,7 @@ static void subdiv_foreach_inner_vertices(SubdivForeachTaskContext *ctx, /* Traverse all vertices which are emitted from given coarse polygon. */ static void subdiv_foreach_vertices(SubdivForeachTaskContext *ctx, void *tls, const int poly_index) { - if (ctx->foreach_context->vertex_inner != NULL) { + if (ctx->foreach_context->vertex_inner != nullptr) { subdiv_foreach_inner_vertices(ctx, tls, &ctx->coarse_polys[poly_index]); } } @@ -1096,7 +1096,7 @@ static void subdiv_foreach_loops_regular(SubdivForeachTaskContext *ctx, const int ptex_inner_resolution = ptex_resolution - 2; const int num_subdiv_edges_per_coarse_edge = resolution - 1; const int num_subdiv_vertices_per_coarse_edge = resolution - 2; - const float inv_ptex_resolution_1 = 1.0f / (float)(ptex_resolution - 1); + const float inv_ptex_resolution_1 = 1.0f / float(ptex_resolution - 1); const int ptex_face_index = ctx->face_ptex_offset[coarse_poly_index]; const int start_vertex_index = ctx->vertices_inner_offset + ctx->subdiv_vertex_offset[coarse_poly_index]; @@ -1285,7 +1285,7 @@ static void subdiv_foreach_loops_special(SubdivForeachTaskContext *ctx, const int coarse_poly_index = coarse_poly - ctx->coarse_polys; const int ptex_face_resolution = ptex_face_resolution_get(coarse_poly, resolution); const int ptex_face_inner_resolution = ptex_face_resolution - 2; - const float inv_ptex_resolution_1 = 1.0f / (float)(ptex_face_resolution - 1); + const float inv_ptex_resolution_1 = 1.0f / float(ptex_face_resolution - 1); const int num_inner_vertices_per_ptex = (ptex_face_resolution - 1) * (ptex_face_resolution - 2); const int num_inner_edges_per_ptex_face = num_inner_edges_per_ptex_face_get( ptex_face_inner_resolution + 1); @@ -1664,7 +1664,7 @@ static void subdiv_foreach_loose_vertices_task(void *__restrict userdata, const int coarse_vertex_index, const TaskParallelTLS *__restrict tls) { - SubdivForeachTaskContext *ctx = userdata; + SubdivForeachTaskContext *ctx = static_cast(userdata); if (BLI_BITMAP_TEST_BOOL(ctx->coarse_vertices_used_map, coarse_vertex_index)) { /* Vertex is not loose, was handled when handling polygons. */ return; @@ -1678,14 +1678,14 @@ static void subdiv_foreach_vertices_of_loose_edges_task(void *__restrict userdat const int coarse_edge_index, const TaskParallelTLS *__restrict tls) { - SubdivForeachTaskContext *ctx = userdata; + SubdivForeachTaskContext *ctx = static_cast(userdata); if (BLI_BITMAP_TEST_BOOL(ctx->coarse_edges_used_map, coarse_edge_index)) { /* Vertex is not loose, was handled when handling polygons. */ return; } const int resolution = ctx->settings->resolution; const int resolution_1 = resolution - 1; - const float inv_resolution_1 = 1.0f / (float)resolution_1; + const float inv_resolution_1 = 1.0f / float(resolution_1); const int num_subdiv_vertices_per_coarse_edge = resolution - 2; const MEdge *coarse_edge = &ctx->coarse_edges[coarse_edge_index]; /* Subdivision vertices which corresponds to edge's v1 and v2. */ @@ -1720,7 +1720,7 @@ static void subdiv_foreach_vertices_of_loose_edges_task(void *__restrict userdat static void subdiv_foreach_single_geometry_vertices(SubdivForeachTaskContext *ctx, void *tls) { - if (ctx->foreach_context->vertex_corner == NULL) { + if (ctx->foreach_context->vertex_corner == nullptr) { return; } const Mesh *coarse_mesh = ctx->coarse_mesh; @@ -1759,10 +1759,10 @@ static void subdiv_foreach_single_thread_tasks(SubdivForeachTaskContext *ctx) subdiv_foreach_tls_free(ctx, tls); const SubdivForeachContext *foreach_context = ctx->foreach_context; - const bool is_loose_geometry_tagged = (foreach_context->vertex_every_edge != NULL && - foreach_context->vertex_every_corner != NULL); - const bool is_loose_geometry_tags_needed = (foreach_context->vertex_loose != NULL || - foreach_context->vertex_of_loose_edge != NULL); + const bool is_loose_geometry_tagged = (foreach_context->vertex_every_edge != nullptr && + foreach_context->vertex_every_corner != nullptr); + const bool is_loose_geometry_tags_needed = (foreach_context->vertex_loose != nullptr || + foreach_context->vertex_of_loose_edge != nullptr); if (is_loose_geometry_tagged && is_loose_geometry_tags_needed) { subdiv_foreach_mark_non_loose_geometry(ctx); } @@ -1772,17 +1772,17 @@ static void subdiv_foreach_task(void *__restrict userdata, const int poly_index, const TaskParallelTLS *__restrict tls) { - SubdivForeachTaskContext *ctx = userdata; + SubdivForeachTaskContext *ctx = static_cast(userdata); /* Traverse hi-poly vertex coordinates and normals. */ subdiv_foreach_vertices(ctx, tls->userdata_chunk, poly_index); /* Traverse mesh geometry for the given base poly index. */ - if (ctx->foreach_context->edge != NULL) { + if (ctx->foreach_context->edge != nullptr) { subdiv_foreach_edges(ctx, tls->userdata_chunk, poly_index); } - if (ctx->foreach_context->loop != NULL) { + if (ctx->foreach_context->loop != nullptr) { subdiv_foreach_loops(ctx, tls->userdata_chunk, poly_index); } - if (ctx->foreach_context->poly != NULL) { + if (ctx->foreach_context->poly != nullptr) { subdiv_foreach_polys(ctx, tls->userdata_chunk, poly_index); } } @@ -1791,13 +1791,13 @@ static void subdiv_foreach_boundary_edges_task(void *__restrict userdata, const int edge_index, const TaskParallelTLS *__restrict tls) { - SubdivForeachTaskContext *ctx = userdata; + SubdivForeachTaskContext *ctx = static_cast(userdata); subdiv_foreach_boundary_edges(ctx, tls->userdata_chunk, edge_index); } static void subdiv_foreach_free(const void *__restrict userdata, void *__restrict userdata_chunk) { - const SubdivForeachTaskContext *ctx = userdata; + const SubdivForeachTaskContext *ctx = static_cast(userdata); ctx->foreach_context->user_data_tls_free(userdata_chunk); } @@ -1814,7 +1814,7 @@ bool BKE_subdiv_foreach_subdiv_geometry(Subdiv *subdiv, ctx.settings = mesh_settings; ctx.foreach_context = context; subdiv_foreach_ctx_init(subdiv, &ctx); - if (context->topology_info != NULL) { + if (context->topology_info != nullptr) { if (!context->topology_info(context, ctx.num_subdiv_vertices, ctx.num_subdiv_edges, @@ -1833,7 +1833,7 @@ bool BKE_subdiv_foreach_subdiv_geometry(Subdiv *subdiv, parallel_range_settings.userdata_chunk = context->user_data_tls; parallel_range_settings.userdata_chunk_size = context->user_data_tls_size; parallel_range_settings.min_iter_per_thread = 1; - if (context->user_data_tls_free != NULL) { + if (context->user_data_tls_free != nullptr) { parallel_range_settings.func_free = subdiv_foreach_free; } @@ -1845,21 +1845,21 @@ bool BKE_subdiv_foreach_subdiv_geometry(Subdiv *subdiv, BLI_task_parallel_range( 0, coarse_mesh->totpoly, &ctx, subdiv_foreach_task, ¶llel_range_settings); - if (context->vertex_loose != NULL) { + if (context->vertex_loose != nullptr) { BLI_task_parallel_range(0, coarse_mesh->totvert, &ctx, subdiv_foreach_loose_vertices_task, ¶llel_range_settings); } - if (context->vertex_of_loose_edge != NULL) { + if (context->vertex_of_loose_edge != nullptr) { BLI_task_parallel_range(0, coarse_mesh->totedge, &ctx, subdiv_foreach_vertices_of_loose_edges_task, ¶llel_range_settings); } - if (context->edge != NULL) { + if (context->edge != nullptr) { BLI_task_parallel_range(0, coarse_mesh->totedge, &ctx, diff --git a/source/blender/blenkernel/intern/subdiv_mesh.cc b/source/blender/blenkernel/intern/subdiv_mesh.cc index 601aa1fa725..a343a0e02bb 100644 --- a/source/blender/blenkernel/intern/subdiv_mesh.cc +++ b/source/blender/blenkernel/intern/subdiv_mesh.cc @@ -471,11 +471,11 @@ static void subdiv_vertex_orco_evaluate(const SubdivMeshContext *ctx, if (ctx->orco) { copy_v3_v3(ctx->orco[subdiv_vertex_index], vertex_data); if (ctx->cloth_orco) { - copy_v3_v3(ctx->orco[subdiv_vertex_index], vertex_data + 3); + copy_v3_v3(ctx->cloth_orco[subdiv_vertex_index], vertex_data + 3); } } else if (ctx->cloth_orco) { - copy_v3_v3(ctx->orco[subdiv_vertex_index], vertex_data); + copy_v3_v3(ctx->cloth_orco[subdiv_vertex_index], vertex_data); } } } diff --git a/source/blender/blenkernel/intern/subdiv_stats.c b/source/blender/blenkernel/intern/subdiv_stats.cc similarity index 100% rename from source/blender/blenkernel/intern/subdiv_stats.c rename to source/blender/blenkernel/intern/subdiv_stats.cc diff --git a/source/blender/blenkernel/intern/subdiv_topology.c b/source/blender/blenkernel/intern/subdiv_topology.cc similarity index 84% rename from source/blender/blenkernel/intern/subdiv_topology.c rename to source/blender/blenkernel/intern/subdiv_topology.cc index 8dcd4b3517c..fc2cc1b7026 100644 --- a/source/blender/blenkernel/intern/subdiv_topology.c +++ b/source/blender/blenkernel/intern/subdiv_topology.cc @@ -11,7 +11,7 @@ #include "opensubdiv_topology_refiner_capi.h" -int BKE_subdiv_topology_num_fvar_layers_get(const struct Subdiv *subdiv) +int BKE_subdiv_topology_num_fvar_layers_get(const Subdiv *subdiv) { OpenSubdiv_TopologyRefiner *topology_refiner = subdiv->topology_refiner; return topology_refiner->getNumFVarChannels(topology_refiner); diff --git a/source/blender/blenkernel/intern/subsurf_ccg.c b/source/blender/blenkernel/intern/subsurf_ccg.cc similarity index 82% rename from source/blender/blenkernel/intern/subsurf_ccg.c rename to source/blender/blenkernel/intern/subsurf_ccg.cc index f5835b7fbb8..abeb0f33a18 100644 --- a/source/blender/blenkernel/intern/subsurf_ccg.c +++ b/source/blender/blenkernel/intern/subsurf_ccg.cc @@ -5,18 +5,11 @@ * \ingroup bke */ -#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L -# ifdef __GNUC__ -# pragma GCC diagnostic ignored "-Wvla" -# endif -# define USE_DYNSIZE -#endif - -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include "atomic_ops.h" @@ -36,6 +29,7 @@ #include "BLI_task.h" #include "BLI_threads.h" #include "BLI_utildefines.h" +#include "BLI_vector.hh" #include "BKE_ccg.h" #include "BKE_cdderivedmesh.h" @@ -49,10 +43,6 @@ #include "BKE_scene.h" #include "BKE_subsurf.h" -#ifndef USE_DYNSIZE -# include "BLI_array.h" -#endif - #include "CCGSubSurf.h" /* assumes MLoop's are laid out 4 for each poly, in order */ @@ -66,26 +56,26 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss, static void *arena_alloc(CCGAllocatorHDL a, int numBytes) { - return BLI_memarena_alloc(a, numBytes); + return BLI_memarena_alloc(reinterpret_cast(a), numBytes); } static void *arena_realloc(CCGAllocatorHDL a, void *ptr, int newSize, int oldSize) { - void *p2 = BLI_memarena_alloc(a, newSize); + void *p2 = BLI_memarena_alloc(reinterpret_cast(a), newSize); if (ptr) { memcpy(p2, ptr, oldSize); } return p2; } -static void arena_free(CCGAllocatorHDL UNUSED(a), void *UNUSED(ptr)) +static void arena_free(CCGAllocatorHDL /*a*/, void * /*ptr*/) { /* do nothing */ } static void arena_release(CCGAllocatorHDL a) { - BLI_memarena_free(a); + BLI_memarena_free(reinterpret_cast(a)); } typedef enum { @@ -96,6 +86,7 @@ typedef enum { CCG_ALLOC_MASK = 8, CCG_SIMPLE_SUBDIV = 16, } CCGFlags; +ENUM_OPERATORS(CCGFlags, CCG_SIMPLE_SUBDIV); static CCGSubSurf *_getSubSurf(CCGSubSurf *prevSS, int subdivLevels, int numLayers, CCGFlags flags) { @@ -111,7 +102,7 @@ static CCGSubSurf *_getSubSurf(CCGSubSurf *prevSS, int subdivLevels, int numLaye if (prevSS) { int oldUseAging; - ccgSubSurf_getUseAgeCounts(prevSS, &oldUseAging, NULL, NULL, NULL); + ccgSubSurf_getUseAgeCounts(prevSS, &oldUseAging, nullptr, nullptr, nullptr); if ((oldUseAging != useAging) || (ccgSubSurf_getSimpleSubdiv(prevSS) != !!(flags & CCG_SIMPLE_SUBDIV))) { @@ -153,7 +144,7 @@ static CCGSubSurf *_getSubSurf(CCGSubSurf *prevSS, int subdivLevels, int numLaye ccgSS = ccgSubSurf_new(&ifc, subdivLevels, &allocatorIFC, allocator); } else { - ccgSS = ccgSubSurf_new(&ifc, subdivLevels, NULL, NULL); + ccgSS = ccgSubSurf_new(&ifc, subdivLevels, nullptr, nullptr); } if (useAging) { @@ -240,7 +231,7 @@ static int getFaceIndex( } static void get_face_uv_map_vert( - UvVertMap *vmap, struct MPoly *mpoly, struct MLoop *ml, int fi, CCGVertHDL *fverts) + UvVertMap *vmap, MPoly *mpoly, MLoop *ml, int fi, CCGVertHDL *fverts) { UvMapVert *v, *nv; int j, nverts = mpoly[fi].totloop; @@ -271,11 +262,8 @@ static int ss_sync_from_uv(CCGSubSurf *ss, int i, seam; UvMapVert *v; UvVertMap *vmap; + blender::Vector fverts; float limit[2]; -#ifndef USE_DYNSIZE - CCGVertHDL *fverts = NULL; - BLI_array_declare(fverts); -#endif EdgeSet *eset; float uv[3] = {0.0f, 0.0f, 0.0f}; /* only first 2 values are written into */ @@ -285,7 +273,7 @@ static int ss_sync_from_uv(CCGSubSurf *ss, * Also, initially intention is to treat merged vertices from mirror modifier as seams. * This fixes a very old regression (2.49 was correct here) */ vmap = BKE_mesh_uv_vert_map_create( - mpoly, NULL, NULL, mloop, mloopuv, totface, totvert, limit, false, true); + mpoly, nullptr, nullptr, mloop, mloopuv, totface, totvert, limit, false, true); if (!vmap) { return 0; } @@ -304,7 +292,7 @@ static int ss_sync_from_uv(CCGSubSurf *ss, } } - seam = (v != NULL); + seam = (v != nullptr); for (v = BKE_mesh_uv_vert_map_get_vert(vmap, i); v; v = v->next) { if (v->separate) { @@ -327,17 +315,12 @@ static int ss_sync_from_uv(CCGSubSurf *ss, int nverts = mp->totloop; int j, j_next; CCGFace *origf = ccgSubSurf_getFace(origss, POINTER_FROM_INT(i)); - /* uint *fv = &mp->v1; */ + // uint *fv = &mp->v1; MLoop *ml = mloop + mp->loopstart; -#ifdef USE_DYNSIZE - CCGVertHDL fverts[nverts]; -#else - BLI_array_clear(fverts); - BLI_array_grow_items(fverts, nverts); -#endif + fverts.reinitialize(nverts); - get_face_uv_map_vert(vmap, mpoly, ml, i, fverts); + get_face_uv_map_vert(vmap, mpoly, ml, i, fverts.data()); for (j = 0, j_next = nverts - 1; j < nverts; j_next = j++) { uint v0 = POINTER_AS_UINT(fverts[j_next]); @@ -362,21 +345,12 @@ static int ss_sync_from_uv(CCGSubSurf *ss, int nverts = mp->totloop; CCGFace *f; -#ifdef USE_DYNSIZE - CCGVertHDL fverts[nverts]; -#else - BLI_array_clear(fverts); - BLI_array_grow_items(fverts, nverts); -#endif + fverts.reinitialize(nverts); - get_face_uv_map_vert(vmap, mpoly, ml, i, fverts); - ccgSubSurf_syncFace(ss, POINTER_FROM_INT(i), nverts, fverts, &f); + get_face_uv_map_vert(vmap, mpoly, ml, i, fverts.data()); + ccgSubSurf_syncFace(ss, POINTER_FROM_INT(i), nverts, fverts.data(), &f); } -#ifndef USE_DYNSIZE - BLI_array_free(fverts); -#endif - BKE_mesh_uv_vert_map_free(vmap); ccgSubSurf_processSync(ss); @@ -387,20 +361,21 @@ static void set_subsurf_legacy_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh * { CCGFaceIterator fi; int index, gridSize, gridFaces, /*edgeSize,*/ totface, x, y, S; - const float(*dmloopuv)[2] = CustomData_get_layer_n(&dm->loopData, CD_PROP_FLOAT2, n); + const float(*dmloopuv)[2] = static_cast( + CustomData_get_layer_n(&dm->loopData, CD_PROP_FLOAT2, n)); /* need to update both CD_MTFACE & CD_PROP_FLOAT2, hrmf, we could get away with * just tface except applying the modifier then looses subsurf UV */ - MTFace *tface = CustomData_get_layer_n_for_write( - &result->faceData, CD_MTFACE, n, result->numTessFaceData); - float(*mloopuv)[2] = CustomData_get_layer_n_for_write( - &result->loopData, CD_PROP_FLOAT2, n, result->getNumLoops(dm)); + MTFace *tface = static_cast( + CustomData_get_layer_n_for_write(&result->faceData, CD_MTFACE, n, result->numTessFaceData)); + float(*mloopuv)[2] = static_cast(CustomData_get_layer_n_for_write( + &result->loopData, CD_PROP_FLOAT2, n, result->getNumLoops(dm))); if (!dmloopuv || (!tface && !mloopuv)) { return; } /* create a CCGSubSurf from uv's */ - CCGSubSurf *uvss = _getSubSurf(NULL, ccgSubSurf_getSubdivisionLevels(ss), 2, CCG_USE_ARENA); + CCGSubSurf *uvss = _getSubSurf(nullptr, ccgSubSurf_getSubdivisionLevels(ss), 2, CCG_USE_ARENA); if (!ss_sync_from_uv(uvss, ss, dm, dmloopuv)) { ccgSubSurf_free(uvss); @@ -414,7 +389,8 @@ static void set_subsurf_legacy_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh * gridFaces = gridSize - 1; /* make a map from original faces to CCGFaces */ - CCGFace **faceMap = MEM_mallocN(totface * sizeof(*faceMap), "facemapuv"); + CCGFace **faceMap = static_cast( + MEM_mallocN(totface * sizeof(*faceMap), "facemapuv")); for (ccgSubSurf_initFaceIterator(uvss, &fi); !ccgFaceIterator_isStopped(&fi); ccgFaceIterator_next(&fi)) { CCGFace *f = ccgFaceIterator_getCurrent(&fi); @@ -430,7 +406,8 @@ static void set_subsurf_legacy_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh * int numVerts = ccgSubSurf_getFaceNumVerts(f); for (S = 0; S < numVerts; S++) { - float(*faceGridData)[2] = ccgSubSurf_getFaceGridDataArray(uvss, f, S); + float(*faceGridData)[2] = static_cast( + ccgSubSurf_getFaceGridDataArray(uvss, f, S)); for (y = 0; y < gridFaces; y++) { for (x = 0; x < gridFaces; x++) { @@ -472,16 +449,16 @@ static void set_subsurf_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh *result, #define SUB_ELEMS_FACE 50 typedef float FaceVertWeight[SUB_ELEMS_FACE][SUB_ELEMS_FACE]; -typedef struct FaceVertWeightEntry { +struct FaceVertWeightEntry { FaceVertWeight *weight; float *w; int valid; -} FaceVertWeightEntry; +}; -typedef struct WeightTable { +struct WeightTable { FaceVertWeightEntry *weight_table; int len; -} WeightTable; +}; static float *get_ss_weights(WeightTable *wtable, int gridCuts, int faceLen) { @@ -496,21 +473,22 @@ static float *get_ss_weights(WeightTable *wtable, int gridCuts, int faceLen) MEM_freeN(wtable->weight_table); } - wtable->weight_table = tmp; + wtable->weight_table = static_cast(tmp); wtable->len = faceLen + 1; } if (!wtable->weight_table[faceLen].valid) { wtable->weight_table[faceLen].valid = 1; - wtable->weight_table[faceLen].w = w = MEM_callocN( - sizeof(float) * faceLen * faceLen * (gridCuts + 2) * (gridCuts + 2), "weight table alloc"); - fac = 1.0f / (float)faceLen; + wtable->weight_table[faceLen].w = w = static_cast( + MEM_callocN(sizeof(float) * faceLen * faceLen * (gridCuts + 2) * (gridCuts + 2), + "weight table alloc")); + fac = 1.0f / float(faceLen); for (i = 0; i < faceLen; i++) { for (x = 0; x < gridCuts + 2; x++) { for (y = 0; y < gridCuts + 2; y++) { - fx = 0.5f - (float)x / (float)(gridCuts + 1) / 2.0f; - fy = 0.5f - (float)y / (float)(gridCuts + 1) / 2.0f; + fx = 0.5f - float(x) / float(gridCuts + 1) / 2.0f; + fy = 0.5f - float(y) / float(gridCuts + 1) / 2.0f; fac2 = faceLen - 4; w1 = (1.0f - fx) * (1.0f - fy) + (-fac2 * fx * fy * fac); @@ -520,7 +498,7 @@ static float *get_ss_weights(WeightTable *wtable, int gridCuts, int faceLen) /* these values aren't used for tri's and cause divide by zero */ if (faceLen > 3) { fac2 = 1.0f - (w1 + w2 + w4); - fac2 = fac2 / (float)(faceLen - 3); + fac2 = fac2 / float(faceLen - 3); for (j = 0; j < faceLen; j++) { w[j] = fac2; } @@ -559,11 +537,8 @@ static void ss_sync_ccg_from_derivedmesh(CCGSubSurf *ss, float (*vertexCos)[3], int useFlatSubdiv) { - float creaseFactor = (float)ccgSubSurf_getSubdivisionLevels(ss); -#ifndef USE_DYNSIZE - CCGVertHDL *fVerts = NULL; - BLI_array_declare(fVerts); -#endif + float creaseFactor = float(ccgSubSurf_getSubdivisionLevels(ss)); + blender::Vector fverts; float(*positions)[3] = (float(*)[3])dm->getVertArray(dm); MEdge *medge = dm->getEdgeArray(dm); MEdge *me; @@ -610,23 +585,18 @@ static void ss_sync_ccg_from_derivedmesh(CCGSubSurf *ss, for (i = 0; i < dm->numPolyData; i++, mp++) { CCGFace *f; -#ifdef USE_DYNSIZE - CCGVertHDL fVerts[mp->totloop]; -#else - BLI_array_clear(fVerts); - BLI_array_grow_items(fVerts, mp->totloop); -#endif + fverts.reinitialize(mp->totloop); ml = mloop + mp->loopstart; for (j = 0; j < mp->totloop; j++, ml++) { - fVerts[j] = POINTER_FROM_UINT(ml->v); + fverts[j] = POINTER_FROM_UINT(ml->v); } /* This is very bad, means mesh is internally inconsistent. * it is not really possible to continue without modifying * other parts of code significantly to handle missing faces. * since this really shouldn't even be possible we just bail. */ - if (ccgSubSurf_syncFace(ss, POINTER_FROM_INT(i), mp->totloop, fVerts, &f) == + if (ccgSubSurf_syncFace(ss, POINTER_FROM_INT(i), mp->totloop, fverts.data(), &f) == eCCGError_InvalidValue) { static int hasGivenError = 0; @@ -644,17 +614,13 @@ static void ss_sync_ccg_from_derivedmesh(CCGSubSurf *ss, } ccgSubSurf_processSync(ss); - -#ifndef USE_DYNSIZE - BLI_array_free(fVerts); -#endif } static void ss_sync_from_derivedmesh(CCGSubSurf *ss, DerivedMesh *dm, float (*vertexCos)[3], int use_flat_subdiv, - bool UNUSED(use_subdiv_uvs)) + bool /*use_subdiv_uvs*/) { ss_sync_ccg_from_derivedmesh(ss, dm, vertexCos, use_flat_subdiv); } @@ -718,7 +684,7 @@ static void UNUSED_FUNCTION(ccgDM_getMinMax)(DerivedMesh *dm, float r_min[3], fl for (ccgSubSurf_initVertIterator(ss, &vi); !ccgVertIterator_isStopped(&vi); ccgVertIterator_next(&vi)) { CCGVert *v = ccgVertIterator_getCurrent(&vi); - float *co = ccgSubSurf_getVertData(ss, v); + float *co = static_cast(ccgSubSurf_getVertData(ss, v)); minmax_v3_v3v3(co, r_min, r_max); } @@ -726,7 +692,7 @@ static void UNUSED_FUNCTION(ccgDM_getMinMax)(DerivedMesh *dm, float r_min[3], fl for (ccgSubSurf_initEdgeIterator(ss, &ei); !ccgEdgeIterator_isStopped(&ei); ccgEdgeIterator_next(&ei)) { CCGEdge *e = ccgEdgeIterator_getCurrent(&ei); - CCGElem *edgeData = ccgSubSurf_getEdgeDataArray(ss, e); + CCGElem *edgeData = static_cast(ccgSubSurf_getEdgeDataArray(ss, e)); for (i = 0; i < edgeSize; i++) { minmax_v3_v3v3(CCG_elem_offset_co(&key, edgeData, i), r_min, r_max); @@ -739,7 +705,7 @@ static void UNUSED_FUNCTION(ccgDM_getMinMax)(DerivedMesh *dm, float r_min[3], fl int S, x, y, numVerts = ccgSubSurf_getFaceNumVerts(f); for (S = 0; S < numVerts; S++) { - CCGElem *faceGridData = ccgSubSurf_getFaceGridDataArray(ss, f, S); + CCGElem *faceGridData = static_cast(ccgSubSurf_getFaceGridDataArray(ss, f, S)); for (y = 0; y < gridSize; y++) { for (x = 0; x < gridSize; x++) { @@ -812,13 +778,13 @@ static CCGElem *get_vertex_elem(CCGDerivedMesh *ccgdm, int vertNum) offset = vertNum - ccgdm->faceMap[i].startVert; if (offset < 1) { - return ccgSubSurf_getFaceCenterData(f); + return static_cast(ccgSubSurf_getFaceCenterData(f)); } if (offset < gridSideEnd) { offset -= 1; grid = offset / gridSideVerts; x = offset % gridSideVerts + 1; - return ccgSubSurf_getFaceGridEdgeData(ss, f, grid, x); + return static_cast(ccgSubSurf_getFaceGridEdgeData(ss, f, grid, x)); } if (offset < gridInternalEnd) { offset -= gridSideEnd; @@ -826,7 +792,7 @@ static CCGElem *get_vertex_elem(CCGDerivedMesh *ccgdm, int vertNum) offset %= gridInternalVerts; y = offset / gridSideVerts + 1; x = offset % gridSideVerts + 1; - return ccgSubSurf_getFaceGridData(ss, f, grid, x, y); + return static_cast(ccgSubSurf_getFaceGridData(ss, f, grid, x, y)); } } if ((vertNum < ccgdm->vertMap[0].startVert) && (ccgSubSurf_getNumEdges(ss) > 0)) { @@ -843,7 +809,7 @@ static CCGElem *get_vertex_elem(CCGDerivedMesh *ccgdm, int vertNum) e = ccgdm->edgeMap[i].edge; x = vertNum - ccgdm->edgeMap[i].startVert + 1; - return ccgSubSurf_getEdgeData(ss, e, x); + return static_cast(ccgSubSurf_getEdgeData(ss, e, x)); } /* this vert comes from vert data */ @@ -851,7 +817,7 @@ static CCGElem *get_vertex_elem(CCGDerivedMesh *ccgdm, int vertNum) i = vertNum - ccgdm->vertMap[0].startVert; v = ccgdm->vertMap[i].vert; - return ccgSubSurf_getVertData(ss, v); + return static_cast(ccgSubSurf_getVertData(ss, v)); } static void ccgDM_getFinalVertCo(DerivedMesh *dm, int vertNum, float r_co[3]) @@ -901,12 +867,12 @@ static void ccgDM_copyFinalVertArray(DerivedMesh *dm, float (*r_positions)[3]) CCGFace *f = ccgdm->faceMap[index].face; int x, y, S, numVerts = ccgSubSurf_getFaceNumVerts(f); - vd = ccgSubSurf_getFaceCenterData(f); + vd = static_cast(ccgSubSurf_getFaceCenterData(f)); ccgDM_to_MVert(r_positions[i++], &key, vd); for (S = 0; S < numVerts; S++) { for (x = 1; x < gridSize - 1; x++) { - vd = ccgSubSurf_getFaceGridEdgeData(ss, f, S, x); + vd = static_cast(ccgSubSurf_getFaceGridEdgeData(ss, f, S, x)); ccgDM_to_MVert(r_positions[i++], &key, vd); } } @@ -914,7 +880,7 @@ static void ccgDM_copyFinalVertArray(DerivedMesh *dm, float (*r_positions)[3]) for (S = 0; S < numVerts; S++) { for (y = 1; y < gridSize - 1; y++) { for (x = 1; x < gridSize - 1; x++) { - vd = ccgSubSurf_getFaceGridData(ss, f, S, x, y); + vd = static_cast(ccgSubSurf_getFaceGridData(ss, f, S, x, y)); ccgDM_to_MVert(r_positions[i++], &key, vd); } } @@ -930,7 +896,7 @@ static void ccgDM_copyFinalVertArray(DerivedMesh *dm, float (*r_positions)[3]) /* NOTE(@campbellbarton): This gives errors with `--debug-fpe` the normals don't seem to be * unit length. This is most likely caused by edges with no faces which are now zeroed out, * see comment in: `ccgSubSurf__calcVertNormals()`. */ - vd = ccgSubSurf_getEdgeData(ss, e, x); + vd = static_cast(ccgSubSurf_getEdgeData(ss, e, x)); ccgDM_to_MVert(r_positions[i++], &key, vd); } } @@ -939,7 +905,7 @@ static void ccgDM_copyFinalVertArray(DerivedMesh *dm, float (*r_positions)[3]) for (index = 0; index < totvert; index++) { CCGVert *v = ccgdm->vertMap[index].vert; - vd = ccgSubSurf_getVertData(ss, v); + vd = static_cast(ccgSubSurf_getVertData(ss, v)); ccgDM_to_MVert(r_positions[i++], &key, vd); } } @@ -1017,20 +983,20 @@ static void ccgDM_copyFinalEdgeArray(DerivedMesh *dm, MEdge *medge) } } -typedef struct CopyFinalLoopArrayData { +struct CopyFinalLoopArrayData { CCGDerivedMesh *ccgdm; MLoop *mloop; int grid_size; int *grid_offset; int edge_size; size_t mloop_index; -} CopyFinalLoopArrayData; +}; static void copyFinalLoopArray_task_cb(void *__restrict userdata, const int iter, - const TaskParallelTLS *__restrict UNUSED(tls)) + const TaskParallelTLS *__restrict /*tls*/) { - CopyFinalLoopArrayData *data = userdata; + CopyFinalLoopArrayData *data = static_cast(userdata); CCGDerivedMesh *ccgdm = data->ccgdm; CCGSubSurf *ss = ccgdm->ss; const int grid_size = data->grid_size; @@ -1038,7 +1004,7 @@ static void copyFinalLoopArray_task_cb(void *__restrict userdata, CCGFace *f = ccgdm->faceMap[iter].face; const int num_verts = ccgSubSurf_getFaceNumVerts(f); const int grid_index = data->grid_offset[iter]; - const size_t loop_index = 4 * (size_t)grid_index * (grid_size - 1) * (grid_size - 1); + const size_t loop_index = 4 * size_t(grid_index) * (grid_size - 1) * (grid_size - 1); MLoop *ml = &data->mloop[loop_index]; for (int S = 0; S < num_verts; S++) { for (int y = 0; y < grid_size - 1; y++) { @@ -1128,7 +1094,7 @@ static void ccgDM_copyFinalPolyArray(DerivedMesh *dm, MPoly *mpoly) for (index = 0; index < totface; index++) { CCGFace *f = ccgdm->faceMap[index].face; int x, y, S, numVerts = ccgSubSurf_getFaceNumVerts(f); - int flag = (faceFlags) ? faceFlags[index].flag : ME_SMOOTH; + char flag = (faceFlags) ? faceFlags[index].flag : char(ME_SMOOTH); for (S = 0; S < numVerts; S++) { for (y = 0; y < gridSize - 1; y++) { @@ -1157,12 +1123,12 @@ static void ccgDM_release(DerivedMesh *dm) /* Check that mmd still exists */ if (!ccgdm->multires.local_mmd && BLI_findindex(&ccgdm->multires.ob->modifiers, ccgdm->multires.mmd) < 0) { - ccgdm->multires.mmd = NULL; + ccgdm->multires.mmd = nullptr; } if (ccgdm->multires.mmd) { if (ccgdm->multires.modified_flags & MULTIRES_COORDS_MODIFIED) { - multires_modifier_update_mdisps(dm, NULL); + multires_modifier_update_mdisps(dm, nullptr); } if (ccgdm->multires.modified_flags & MULTIRES_HIDDEN_MODIFIED) { multires_modifier_update_hidden(dm); @@ -1171,7 +1137,7 @@ static void ccgDM_release(DerivedMesh *dm) } if (ccgdm->ehash) { - BLI_edgehash_free(ccgdm->ehash, NULL); + BLI_edgehash_free(ccgdm->ehash, nullptr); } if (ccgdm->reverseFaceMap) { @@ -1232,7 +1198,7 @@ static void *ccgDM_get_vert_data_layer(DerivedMesh *dm, int type) /* Avoid re-creation if the layer exists already */ BLI_rw_mutex_lock(&ccgdm->origindex_cache_rwlock, THREAD_LOCK_READ); - origindex = DM_get_vert_data_layer(dm, CD_ORIGINDEX); + origindex = static_cast(DM_get_vert_data_layer(dm, CD_ORIGINDEX)); BLI_rw_mutex_unlock(&ccgdm->origindex_cache_rwlock); if (origindex) { return origindex; @@ -1240,8 +1206,8 @@ static void *ccgDM_get_vert_data_layer(DerivedMesh *dm, int type) BLI_rw_mutex_lock(&ccgdm->origindex_cache_rwlock, THREAD_LOCK_WRITE); - origindex = CustomData_add_layer( - &dm->vertData, CD_ORIGINDEX, CD_SET_DEFAULT, NULL, dm->numVertData); + origindex = static_cast(CustomData_add_layer( + &dm->vertData, CD_ORIGINDEX, CD_SET_DEFAULT, nullptr, dm->numVertData)); totorig = ccgSubSurf_getNumVerts(ss); totnone = dm->numVertData - totorig; @@ -1274,13 +1240,13 @@ static void *ccgDM_get_edge_data_layer(DerivedMesh *dm, int type) int edgeSize = ccgSubSurf_getEdgeSize(ss); /* Avoid re-creation if the layer exists already */ - origindex = DM_get_edge_data_layer(dm, CD_ORIGINDEX); + origindex = static_cast(DM_get_edge_data_layer(dm, CD_ORIGINDEX)); if (origindex) { return origindex; } - origindex = CustomData_add_layer( - &dm->edgeData, CD_ORIGINDEX, CD_SET_DEFAULT, NULL, dm->numEdgeData); + origindex = static_cast(CustomData_add_layer( + &dm->edgeData, CD_ORIGINDEX, CD_SET_DEFAULT, nullptr, dm->numEdgeData)); totedge = ccgSubSurf_getNumEdges(ss); totorig = totedge * (edgeSize - 1); @@ -1317,13 +1283,13 @@ static void *ccgDM_get_poly_data_layer(DerivedMesh *dm, int type) int gridFaces = ccgSubSurf_getGridSize(ss) - 1; /* Avoid re-creation if the layer exists already */ - origindex = DM_get_poly_data_layer(dm, CD_ORIGINDEX); + origindex = static_cast(DM_get_poly_data_layer(dm, CD_ORIGINDEX)); if (origindex) { return origindex; } - origindex = CustomData_add_layer( - &dm->polyData, CD_ORIGINDEX, CD_SET_DEFAULT, NULL, dm->numPolyData); + origindex = static_cast(CustomData_add_layer( + &dm->polyData, CD_ORIGINDEX, CD_SET_DEFAULT, nullptr, dm->numPolyData)); totface = ccgSubSurf_getNumFaces(ss); @@ -1384,7 +1350,7 @@ static void ccgdm_create_grids(DerivedMesh *dm) // gridSize = ccgDM_getGridSize(dm); /* UNUSED */ /* compute offset into grid array for each face */ - gridOffset = MEM_mallocN(sizeof(int) * numFaces, "ccgdm.gridOffset"); + gridOffset = static_cast(MEM_mallocN(sizeof(int) * numFaces, "ccgdm.gridOffset")); for (gIndex = 0, index = 0; index < numFaces; index++) { CCGFace *f = ccgdm->faceMap[index].face; @@ -1395,18 +1361,21 @@ static void ccgdm_create_grids(DerivedMesh *dm) } /* compute grid data */ - gridData = MEM_mallocN(sizeof(CCGElem *) * numGrids, "ccgdm.gridData"); - gridFaces = MEM_mallocN(sizeof(CCGFace *) * numGrids, "ccgdm.gridFaces"); - gridFlagMats = MEM_mallocN(sizeof(DMFlagMat) * numGrids, "ccgdm.gridFlagMats"); + gridData = static_cast(MEM_mallocN(sizeof(CCGElem *) * numGrids, "ccgdm.gridData")); + gridFaces = static_cast( + MEM_mallocN(sizeof(CCGFace *) * numGrids, "ccgdm.gridFaces")); + gridFlagMats = static_cast( + MEM_mallocN(sizeof(DMFlagMat) * numGrids, "ccgdm.gridFlagMats")); - ccgdm->gridHidden = MEM_callocN(sizeof(*ccgdm->gridHidden) * numGrids, "ccgdm.gridHidden"); + ccgdm->gridHidden = static_cast( + MEM_callocN(sizeof(*ccgdm->gridHidden) * numGrids, "ccgdm.gridHidden")); for (gIndex = 0, index = 0; index < numFaces; index++) { CCGFace *f = ccgdm->faceMap[index].face; int numVerts = ccgSubSurf_getFaceNumVerts(f); for (S = 0; S < numVerts; S++, gIndex++) { - gridData[gIndex] = ccgSubSurf_getFaceGridDataArray(ss, f, S); + gridData[gIndex] = static_cast(ccgSubSurf_getFaceGridDataArray(ss, f, S)); gridFaces[gIndex] = f; gridFlagMats[gIndex] = ccgdm->faceFlags[index]; } @@ -1466,7 +1435,7 @@ static void ccgDM_recalcLoopTri(DerivedMesh *dm) DM_ensure_looptri_data(dm); MLoopTri *mlooptri = dm->looptris.array_wip; - BLI_assert(tottri == 0 || mlooptri != NULL); + BLI_assert(tottri == 0 || mlooptri != nullptr); BLI_assert(poly_to_tri_count(dm->numPolyData, dm->numLoopData) == dm->looptris.num); BLI_assert(tottri == dm->looptris.num); @@ -1486,9 +1455,9 @@ static void ccgDM_recalcLoopTri(DerivedMesh *dm) lt->poly = poly_index; } - BLI_assert(dm->looptris.array == NULL); + BLI_assert(dm->looptris.array == nullptr); atomic_cas_ptr((void **)&dm->looptris.array, dm->looptris.array, dm->looptris.array_wip); - dm->looptris.array_wip = NULL; + dm->looptris.array_wip = nullptr; } static void set_default_ccgdm_callbacks(CCGDerivedMesh *ccgdm) @@ -1530,7 +1499,8 @@ static void create_ccgdm_maps(CCGDerivedMesh *ccgdm, CCGSubSurf *ss) int totvert, totedge, totface; totvert = ccgSubSurf_getNumVerts(ss); - ccgdm->vertMap = MEM_mallocN(totvert * sizeof(*ccgdm->vertMap), "vertMap"); + ccgdm->vertMap = static_cast( + MEM_mallocN(totvert * sizeof(*ccgdm->vertMap), "vertMap")); for (ccgSubSurf_initVertIterator(ss, &vi); !ccgVertIterator_isStopped(&vi); ccgVertIterator_next(&vi)) { CCGVert *v = ccgVertIterator_getCurrent(&vi); @@ -1539,7 +1509,8 @@ static void create_ccgdm_maps(CCGDerivedMesh *ccgdm, CCGSubSurf *ss) } totedge = ccgSubSurf_getNumEdges(ss); - ccgdm->edgeMap = MEM_mallocN(totedge * sizeof(*ccgdm->edgeMap), "edgeMap"); + ccgdm->edgeMap = static_cast( + MEM_mallocN(totedge * sizeof(*ccgdm->edgeMap), "edgeMap")); for (ccgSubSurf_initEdgeIterator(ss, &ei); !ccgEdgeIterator_isStopped(&ei); ccgEdgeIterator_next(&ei)) { CCGEdge *e = ccgEdgeIterator_getCurrent(&ei); @@ -1548,7 +1519,8 @@ static void create_ccgdm_maps(CCGDerivedMesh *ccgdm, CCGSubSurf *ss) } totface = ccgSubSurf_getNumFaces(ss); - ccgdm->faceMap = MEM_mallocN(totface * sizeof(*ccgdm->faceMap), "faceMap"); + ccgdm->faceMap = static_cast( + MEM_mallocN(totface * sizeof(*ccgdm->faceMap), "faceMap")); for (ccgSubSurf_initFaceIterator(ss, &fi); !ccgFaceIterator_isStopped(&fi); ccgFaceIterator_next(&fi)) { CCGFace *f = ccgFaceIterator_getCurrent(&fi); @@ -1573,20 +1545,17 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, int vertNum = 0, edgeNum = 0, faceNum = 0; short *edgeFlags = ccgdm->edgeFlags; DMFlagMat *faceFlags = ccgdm->faceFlags; - int *polyidx = NULL; -#ifndef USE_DYNSIZE - int *loopidx = NULL, *vertidx = NULL; - BLI_array_declare(loopidx); - BLI_array_declare(vertidx); -#endif + int *polyidx = nullptr; + blender::Vector loopidx; + blender::Vector vertidx; int loopindex, loopindex2; int edgeSize; int gridSize; int gridFaces, gridCuts; int gridSideEdges; int gridInternalEdges; - WeightTable wtable = {NULL}; - MEdge *medge = NULL; + WeightTable wtable = {nullptr}; + MEdge *medge = nullptr; bool has_edge_cd; edgeSize = ccgSubSurf_getEdgeSize(ss); @@ -1599,15 +1568,15 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, medge = dm->getEdgeArray(dm); - const MPoly *mpoly = CustomData_get_layer(&dm->polyData, CD_MPOLY); - const int *material_indices = CustomData_get_layer_named( - &dm->polyData, CD_MPOLY, "material_index"); - const int *base_polyOrigIndex = CustomData_get_layer(&dm->polyData, CD_ORIGINDEX); + const MPoly *mpoly = static_cast(CustomData_get_layer(&dm->polyData, CD_MPOLY)); + const int *material_indices = static_cast( + CustomData_get_layer_named(&dm->polyData, CD_MPOLY, "material_index")); + const int *base_polyOrigIndex = static_cast( + CustomData_get_layer(&dm->polyData, CD_ORIGINDEX)); - int *vertOrigIndex = DM_get_vert_data_layer(&ccgdm->dm, CD_ORIGINDEX); - int *edgeOrigIndex = DM_get_edge_data_layer(&ccgdm->dm, CD_ORIGINDEX); - - int *polyOrigIndex = DM_get_poly_data_layer(&ccgdm->dm, CD_ORIGINDEX); + int *vertOrigIndex = static_cast(DM_get_vert_data_layer(&ccgdm->dm, CD_ORIGINDEX)); + int *edgeOrigIndex = static_cast(DM_get_edge_data_layer(&ccgdm->dm, CD_ORIGINDEX)); + int *polyOrigIndex = static_cast(DM_get_poly_data_layer(&ccgdm->dm, CD_ORIGINDEX)); has_edge_cd = ((ccgdm->dm.edgeData.totlayer - (edgeOrigIndex ? 1 : 0)) != 0); @@ -1620,9 +1589,7 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, int g2_wid = gridCuts + 2; float *w, *w2; int s, x, y; -#ifdef USE_DYNSIZE - int loopidx[numVerts], vertidx[numVerts]; -#endif + w = get_ss_weights(&wtable, gridCuts, numVerts); ccgdm->faceMap[index].startVert = vertNum; @@ -1636,18 +1603,12 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, /* set the face base vert */ *((int *)ccgSubSurf_getFaceUserData(ss, f)) = vertNum; -#ifndef USE_DYNSIZE - BLI_array_clear(loopidx); - BLI_array_grow_items(loopidx, numVerts); -#endif + loopidx.reinitialize(numVerts); for (s = 0; s < numVerts; s++) { loopidx[s] = loopindex++; } -#ifndef USE_DYNSIZE - BLI_array_clear(vertidx); - BLI_array_grow_items(vertidx, numVerts); -#endif + vertidx.reinitialize(numVerts); for (s = 0; s < numVerts; s++) { CCGVert *v = ccgSubSurf_getFaceVert(f, s); vertidx[s] = POINTER_AS_INT(ccgSubSurf_getVertVertHandle(v)); @@ -1655,7 +1616,7 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, /* I think this is for interpolating the center vert? */ w2 = w; // + numVerts*(g2_wid-1) * (g2_wid-1); //numVerts*((g2_wid-1) * g2_wid+g2_wid-1); - DM_interp_vert_data(dm, &ccgdm->dm, vertidx, w2, numVerts, vertNum); + DM_interp_vert_data(dm, &ccgdm->dm, vertidx.data(), w2, numVerts, vertNum); if (vertOrigIndex) { *vertOrigIndex = ORIGINDEX_NONE; vertOrigIndex++; @@ -1667,7 +1628,7 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, for (s = 0; s < numVerts; s++) { for (x = 1; x < gridFaces; x++) { w2 = w + s * numVerts * g2_wid * g2_wid + x * numVerts; - DM_interp_vert_data(dm, &ccgdm->dm, vertidx, w2, numVerts, vertNum); + DM_interp_vert_data(dm, &ccgdm->dm, vertidx.data(), w2, numVerts, vertNum); if (vertOrigIndex) { *vertOrigIndex = ORIGINDEX_NONE; @@ -1683,7 +1644,7 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, for (y = 1; y < gridFaces; y++) { for (x = 1; x < gridFaces; x++) { w2 = w + s * numVerts * g2_wid * g2_wid + (y * g2_wid + x) * numVerts; - DM_interp_vert_data(dm, &ccgdm->dm, vertidx, w2, numVerts, vertNum); + DM_interp_vert_data(dm, &ccgdm->dm, vertidx.data(), w2, numVerts, vertNum); if (vertOrigIndex) { *vertOrigIndex = ORIGINDEX_NONE; @@ -1706,23 +1667,43 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, for (y = 0; y < gridFaces; y++) { for (x = 0; x < gridFaces; x++) { w2 = w + s * numVerts * g2_wid * g2_wid + (y * g2_wid + x) * numVerts; - CustomData_interp( - &dm->loopData, &ccgdm->dm.loopData, loopidx, w2, NULL, numVerts, loopindex2); + CustomData_interp(&dm->loopData, + &ccgdm->dm.loopData, + loopidx.data(), + w2, + nullptr, + numVerts, + loopindex2); loopindex2++; w2 = w + s * numVerts * g2_wid * g2_wid + ((y + 1) * g2_wid + (x)) * numVerts; - CustomData_interp( - &dm->loopData, &ccgdm->dm.loopData, loopidx, w2, NULL, numVerts, loopindex2); + CustomData_interp(&dm->loopData, + &ccgdm->dm.loopData, + loopidx.data(), + w2, + nullptr, + numVerts, + loopindex2); loopindex2++; w2 = w + s * numVerts * g2_wid * g2_wid + ((y + 1) * g2_wid + (x + 1)) * numVerts; - CustomData_interp( - &dm->loopData, &ccgdm->dm.loopData, loopidx, w2, NULL, numVerts, loopindex2); + CustomData_interp(&dm->loopData, + &ccgdm->dm.loopData, + loopidx.data(), + w2, + nullptr, + numVerts, + loopindex2); loopindex2++; w2 = w + s * numVerts * g2_wid * g2_wid + ((y)*g2_wid + (x + 1)) * numVerts; - CustomData_interp( - &dm->loopData, &ccgdm->dm.loopData, loopidx, w2, NULL, numVerts, loopindex2); + CustomData_interp(&dm->loopData, + &ccgdm->dm.loopData, + loopidx.data(), + w2, + nullptr, + numVerts, + loopindex2); loopindex2++; /* Copy over poly data, e.g. #CD_FACEMAP. */ @@ -1774,7 +1755,7 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, for (x = 1; x < edgeSize - 1; x++) { float w[2]; - w[1] = (float)x / (edgeSize - 1); + w[1] = float(x) / (edgeSize - 1); w[0] = 1 - w[1]; DM_interp_vert_data(dm, &ccgdm->dm, vertIdx, w, 2, vertNum); if (vertOrigIndex) { @@ -1832,10 +1813,6 @@ static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm, vertNum++; } -#ifndef USE_DYNSIZE - BLI_array_free(vertidx); - BLI_array_free(loopidx); -#endif free_ss_weights(&wtable); BLI_assert(vertNum == ccgSubSurf_getNumFinalVerts(ss)); @@ -1851,7 +1828,7 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss, { const int totedge = ccgSubSurf_getNumEdges(ss); const int totface = ccgSubSurf_getNumFaces(ss); - CCGDerivedMesh *ccgdm = MEM_callocN(sizeof(*ccgdm), "ccgdm"); + CCGDerivedMesh *ccgdm = MEM_cnew(__func__); BLI_assert(totedge == ccgSubSurf_getNumEdges(ss)); BLI_assert(totface == ccgSubSurf_getNumFaces(ss)); @@ -1864,8 +1841,8 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss, ccgSubSurf_getNumFinalFaces(ss) * 4, ccgSubSurf_getNumFinalFaces(ss)); - ccgdm->reverseFaceMap = MEM_callocN(sizeof(int) * ccgSubSurf_getNumFinalFaces(ss), - "reverseFaceMap"); + ccgdm->reverseFaceMap = static_cast( + MEM_callocN(sizeof(int) * ccgSubSurf_getNumFinalFaces(ss), "reverseFaceMap")); create_ccgdm_maps(ccgdm, ss); @@ -1876,8 +1853,9 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss, ccgdm->useSubsurfUv = useSubsurfUv; /* CDDM hack. */ - ccgdm->edgeFlags = MEM_callocN(sizeof(short) * totedge, "edgeFlags"); - ccgdm->faceFlags = MEM_callocN(sizeof(DMFlagMat) * totface, "faceFlags"); + ccgdm->edgeFlags = static_cast(MEM_callocN(sizeof(short) * totedge, "edgeFlags")); + ccgdm->faceFlags = static_cast( + MEM_callocN(sizeof(DMFlagMat) * totface, "faceFlags")); set_ccgdm_all_geometry(ccgdm, ss, dm, useSubsurfUv != 0); @@ -1895,14 +1873,16 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss, /***/ -struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm, - struct SubsurfModifierData *smd, - const struct Scene *scene, - float (*vertCos)[3], - SubsurfFlags flags) +DerivedMesh *subsurf_make_derived_from_derived(DerivedMesh *dm, + SubsurfModifierData *smd, + const Scene *scene, + float (*vertCos)[3], + SubsurfFlags flags) { - const int useSimple = (smd->subdivType == ME_SIMPLE_SUBSURF) ? CCG_SIMPLE_SUBDIV : 0; - const CCGFlags useAging = (smd->flags & eSubsurfModifierFlag_DebugIncr) ? CCG_USE_AGING : 0; + const CCGFlags useSimple = (smd->subdivType == ME_SIMPLE_SUBSURF) ? CCG_SIMPLE_SUBDIV : + CCGFlags(0); + const CCGFlags useAging = (smd->flags & eSubsurfModifierFlag_DebugIncr) ? CCG_USE_AGING : + CCGFlags(0); const int useSubsurfUv = (smd->uv_smooth != SUBSURF_UV_SMOOTH_NONE); const int drawInteriorEdges = !(smd->flags & eSubsurfModifierFlag_ControlEdges); const bool ignore_simplify = (flags & SUBSURF_IGNORE_SIMPLIFY); @@ -1911,25 +1891,30 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm, /* NOTE: editmode calculation can only run once per * modifier stack evaluation (uses freed cache) T36299. */ if (flags & SUBSURF_FOR_EDIT_MODE) { - int levels = (scene != NULL && !ignore_simplify) ? + int levels = (scene != nullptr && !ignore_simplify) ? get_render_subsurf_level(&scene->r, smd->levels, false) : smd->levels; /* TODO(sergey): Same as emCache below. */ if ((flags & SUBSURF_IN_EDIT_MODE) && smd->mCache) { - ccgSubSurf_free(smd->mCache); - smd->mCache = NULL; + ccgSubSurf_free(static_cast(smd->mCache)); + smd->mCache = nullptr; } - smd->emCache = _getSubSurf(smd->emCache, levels, 3, useSimple | useAging | CCG_CALC_NORMALS); + smd->emCache = _getSubSurf(static_cast(smd->emCache), + levels, + 3, + useSimple | useAging | CCG_CALC_NORMALS); - ss_sync_from_derivedmesh(smd->emCache, dm, vertCos, useSimple, useSubsurfUv); - result = getCCGDerivedMesh(smd->emCache, drawInteriorEdges, useSubsurfUv, dm); + ss_sync_from_derivedmesh( + static_cast(smd->emCache), dm, vertCos, useSimple, useSubsurfUv); + result = getCCGDerivedMesh( + static_cast(smd->emCache), drawInteriorEdges, useSubsurfUv, dm); } else if (flags & SUBSURF_USE_RENDER_PARAMS) { /* Do not use cache in render mode. */ CCGSubSurf *ss; - int levels = (scene != NULL && !ignore_simplify) ? + int levels = (scene != nullptr && !ignore_simplify) ? get_render_subsurf_level(&scene->r, smd->renderLevels, true) : smd->renderLevels; @@ -1937,7 +1922,7 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm, return dm; } - ss = _getSubSurf(NULL, levels, 3, useSimple | CCG_USE_ARENA | CCG_CALC_NORMALS); + ss = _getSubSurf(nullptr, levels, 3, useSimple | CCG_USE_ARENA | CCG_CALC_NORMALS); ss_sync_from_derivedmesh(ss, dm, vertCos, useSimple, useSubsurfUv); @@ -1947,7 +1932,7 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm, } else { int useIncremental = (smd->flags & eSubsurfModifierFlag_Incremental); - int levels = (scene != NULL && !ignore_simplify) ? + int levels = (scene != nullptr && !ignore_simplify) ? get_render_subsurf_level(&scene->r, smd->levels, false) : smd->levels; CCGSubSurf *ss; @@ -1964,25 +1949,28 @@ struct DerivedMesh *subsurf_make_derived_from_derived(struct DerivedMesh *dm, * mode, so now we have a parameter to verify it. - brecht */ if (!(flags & SUBSURF_IN_EDIT_MODE) && smd->emCache) { - ccgSubSurf_free(smd->emCache); - smd->emCache = NULL; + ccgSubSurf_free(static_cast(smd->emCache)); + smd->emCache = nullptr; } if (useIncremental && (flags & SUBSURF_IS_FINAL_CALC)) { - smd->mCache = ss = _getSubSurf( - smd->mCache, levels, 3, useSimple | useAging | CCG_CALC_NORMALS); + smd->mCache = ss = _getSubSurf(static_cast(smd->mCache), + levels, + 3, + useSimple | useAging | CCG_CALC_NORMALS); ss_sync_from_derivedmesh(ss, dm, vertCos, useSimple, useSubsurfUv); - result = getCCGDerivedMesh(smd->mCache, drawInteriorEdges, useSubsurfUv, dm); + result = getCCGDerivedMesh( + static_cast(smd->mCache), drawInteriorEdges, useSubsurfUv, dm); } else { CCGFlags ccg_flags = useSimple | CCG_USE_ARENA | CCG_CALC_NORMALS; - CCGSubSurf *prevSS = NULL; + CCGSubSurf *prevSS = nullptr; - if (smd->mCache && (flags & SUBSURF_IS_FINAL_CALC)) { - ccgSubSurf_free(smd->mCache); - smd->mCache = NULL; + if ((smd->mCache) && (flags & SUBSURF_IS_FINAL_CALC)) { + ccgSubSurf_free(static_cast(smd->mCache)); + smd->mCache = nullptr; } if (flags & SUBSURF_ALLOC_PAINT_MASK) { @@ -2017,12 +2005,12 @@ void subsurf_calculate_limit_positions(Mesh *me, float (*r_positions)[3]) * calculated vert positions is incorrect for the verts * on the boundary of the mesh. */ - CCGSubSurf *ss = _getSubSurf(NULL, 1, 3, CCG_USE_ARENA); + CCGSubSurf *ss = _getSubSurf(nullptr, 1, 3, CCG_USE_ARENA); float edge_sum[3], face_sum[3]; CCGVertIterator vi; DerivedMesh *dm = CDDM_from_mesh(me); - ss_sync_from_derivedmesh(ss, dm, NULL, 0, 0); + ss_sync_from_derivedmesh(ss, dm, nullptr, 0, 0); for (ccgSubSurf_initVertIterator(ss, &vi); !ccgVertIterator_isStopped(&vi); ccgVertIterator_next(&vi)) { @@ -2030,7 +2018,6 @@ void subsurf_calculate_limit_positions(Mesh *me, float (*r_positions)[3]) int idx = POINTER_AS_INT(ccgSubSurf_getVertVertHandle(v)); int N = ccgSubSurf_getVertNumEdges(v); int numFaces = ccgSubSurf_getVertNumFaces(v); - float *co; int i; zero_v3(edge_sum); @@ -2038,20 +2025,21 @@ void subsurf_calculate_limit_positions(Mesh *me, float (*r_positions)[3]) for (i = 0; i < N; i++) { CCGEdge *e = ccgSubSurf_getVertEdge(v, i); - add_v3_v3v3(edge_sum, edge_sum, ccgSubSurf_getEdgeData(ss, e, 1)); + add_v3_v3v3( + edge_sum, edge_sum, static_cast(ccgSubSurf_getEdgeData(ss, e, 1))); } for (i = 0; i < numFaces; i++) { CCGFace *f = ccgSubSurf_getVertFace(v, i); - add_v3_v3(face_sum, ccgSubSurf_getFaceCenterData(f)); + add_v3_v3(face_sum, static_cast(ccgSubSurf_getFaceCenterData(f))); } /* ad-hoc correction for boundary vertices, to at least avoid them * moving completely out of place (brecht) */ if (numFaces && numFaces != N) { - mul_v3_fl(face_sum, (float)N / (float)numFaces); + mul_v3_fl(face_sum, float(N) / float(numFaces)); } - co = ccgSubSurf_getVertData(ss, v); + const float *co = static_cast(ccgSubSurf_getVertData(ss, v)); r_positions[idx][0] = (co[0] * N * N + edge_sum[0] * 4 + face_sum[0]) / (N * (N + 5)); r_positions[idx][1] = (co[1] * N * N + edge_sum[1] * 4 + face_sum[1]) / (N * (N + 5)); r_positions[idx][2] = (co[2] * N * N + edge_sum[2] * 4 + face_sum[2]) / (N * (N + 5)); diff --git a/source/blender/blenkernel/intern/texture.cc b/source/blender/blenkernel/intern/texture.cc index d2cecb372a0..ed385fae454 100644 --- a/source/blender/blenkernel/intern/texture.cc +++ b/source/blender/blenkernel/intern/texture.cc @@ -191,33 +191,33 @@ static void texture_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_TE = { - /* id_code */ ID_TE, - /* id_filter */ FILTER_ID_TE, - /* main_listbase_index */ INDEX_ID_TE, - /* struct_size */ sizeof(Tex), - /* name */ "Texture", - /* name_plural */ "textures", - /* translation_context */ BLT_I18NCONTEXT_ID_TEXTURE, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_TE, + /*id_filter*/ FILTER_ID_TE, + /*main_listbase_index*/ INDEX_ID_TE, + /*struct_size*/ sizeof(Tex), + /*name*/ "Texture", + /*name_plural*/ "textures", + /*translation_context*/ BLT_I18NCONTEXT_ID_TEXTURE, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ texture_init_data, - /* copy_data */ texture_copy_data, - /* free_data */ texture_free_data, - /* make_local */ nullptr, - /* foreach_id */ texture_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ texture_init_data, + /*copy_data*/ texture_copy_data, + /*free_data*/ texture_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ texture_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ texture_blend_write, - /* blend_read_data */ texture_blend_read_data, - /* blend_read_lib */ texture_blend_read_lib, - /* blend_read_expand */ texture_blend_read_expand, + /*blend_write*/ texture_blend_write, + /*blend_read_data*/ texture_blend_read_data, + /*blend_read_lib*/ texture_blend_read_lib, + /*blend_read_expand*/ texture_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; void BKE_texture_mtex_foreach_id(LibraryForeachIDData *data, MTex *mtex) diff --git a/source/blender/blenkernel/intern/undo_system.c b/source/blender/blenkernel/intern/undo_system.cc similarity index 86% rename from source/blender/blenkernel/intern/undo_system.c rename to source/blender/blenkernel/intern/undo_system.cc index 0c0de957773..946d94f1503 100644 --- a/source/blender/blenkernel/intern/undo_system.c +++ b/source/blender/blenkernel/intern/undo_system.cc @@ -52,14 +52,14 @@ static CLG_LogRef LOG = {"bke.undosys"}; /** \name Undo Types * \{ */ -const UndoType *BKE_UNDOSYS_TYPE_IMAGE = NULL; -const UndoType *BKE_UNDOSYS_TYPE_MEMFILE = NULL; -const UndoType *BKE_UNDOSYS_TYPE_PAINTCURVE = NULL; -const UndoType *BKE_UNDOSYS_TYPE_PARTICLE = NULL; -const UndoType *BKE_UNDOSYS_TYPE_SCULPT = NULL; -const UndoType *BKE_UNDOSYS_TYPE_TEXT = NULL; +const UndoType *BKE_UNDOSYS_TYPE_IMAGE = nullptr; +const UndoType *BKE_UNDOSYS_TYPE_MEMFILE = nullptr; +const UndoType *BKE_UNDOSYS_TYPE_PAINTCURVE = nullptr; +const UndoType *BKE_UNDOSYS_TYPE_PARTICLE = nullptr; +const UndoType *BKE_UNDOSYS_TYPE_SCULPT = nullptr; +const UndoType *BKE_UNDOSYS_TYPE_TEXT = nullptr; -static ListBase g_undo_types = {NULL, NULL}; +static ListBase g_undo_types = {nullptr, nullptr}; static const UndoType *BKE_undosys_type_from_context(bContext *C) { @@ -69,7 +69,7 @@ static const UndoType *BKE_undosys_type_from_context(bContext *C) return ut; } } - return NULL; + return nullptr; } /** \} */ @@ -115,13 +115,13 @@ static bool g_undo_callback_running = false; * * \{ */ -static void undosys_id_ref_store(void *UNUSED(user_data), UndoRefID *id_ref) +static void undosys_id_ref_store(void * /*user_data*/, UndoRefID *id_ref) { BLI_assert(id_ref->name[0] == '\0'); if (id_ref->ptr) { BLI_strncpy(id_ref->name, id_ref->ptr->name, sizeof(id_ref->name)); /* Not needed, just prevents stale data access. */ - id_ref->ptr = NULL; + id_ref->ptr = nullptr; } } @@ -129,7 +129,7 @@ static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref) { /* NOTE: we could optimize this, * for now it's not too bad since it only runs when we access undo! */ - Main *bmain = user_data; + Main *bmain = static_cast
(user_data); ListBase *lb = which_libbase(bmain, GS(id_ref->name)); LISTBASE_FOREACH (ID *, id, lb) { if (STREQ(id_ref->name, id->name) && !ID_IS_LINKED(id)) { @@ -146,7 +146,7 @@ static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, Und bool ok = us->type->step_encode(C, bmain, us); UNDO_NESTED_CHECK_END; if (ok) { - if (us->type->step_foreach_ID_ref != NULL) { + if (us->type->step_foreach_ID_ref != nullptr) { /* Don't use from context yet because sometimes context is fake and * not all members are filled in. */ us->type->step_foreach_ID_ref(us, undosys_id_ref_store, bmain); @@ -221,7 +221,7 @@ static void undosys_step_free_and_unlink(UndoStack *ustack, UndoStep *us) #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER if (ustack->step_active_memfile == us) { - ustack->step_active_memfile = NULL; + ustack->step_active_memfile = nullptr; } #endif } @@ -235,7 +235,7 @@ static void undosys_step_free_and_unlink(UndoStack *ustack, UndoStep *us) #ifndef NDEBUG static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty) { - if (ustack->step_active != NULL) { + if (ustack->step_active != nullptr) { BLI_assert(!BLI_listbase_is_empty(&ustack->steps)); BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1); } @@ -244,14 +244,14 @@ static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty) } } #else -static void undosys_stack_validate(UndoStack *UNUSED(ustack), bool UNUSED(expect_non_empty)) +static void undosys_stack_validate(UndoStack * /*ustack*/, bool /*expect_non_empty*/) { } #endif UndoStack *BKE_undosys_stack_create(void) { - UndoStack *ustack = MEM_callocN(sizeof(UndoStack), __func__); + UndoStack *ustack = MEM_cnew(__func__); return ustack; } @@ -265,12 +265,12 @@ void BKE_undosys_stack_clear(UndoStack *ustack) { UNDO_NESTED_ASSERT(false); CLOG_INFO(&LOG, 1, "steps=%d", BLI_listbase_count(&ustack->steps)); - for (UndoStep *us = ustack->steps.last, *us_prev; us; us = us_prev) { + for (UndoStep *us = static_cast(ustack->steps.last), *us_prev; us; us = us_prev) { us_prev = us->prev; undosys_step_free_and_unlink(ustack, us); } BLI_listbase_clear(&ustack->steps); - ustack->step_active = NULL; + ustack->step_active = nullptr; } void BKE_undosys_stack_clear_active(UndoStack *ustack) @@ -280,10 +280,10 @@ void BKE_undosys_stack_clear_active(UndoStack *ustack) if (us) { ustack->step_active = us->prev; - bool is_not_empty = ustack->step_active != NULL; + bool is_not_empty = ustack->step_active != nullptr; while (ustack->steps.last != ustack->step_active) { - UndoStep *us_iter = ustack->steps.last; + UndoStep *us_iter = static_cast(ustack->steps.last); undosys_step_free_and_unlink(ustack, us_iter); undosys_stack_validate(ustack, is_not_empty); } @@ -297,7 +297,7 @@ static void undosys_stack_clear_all_last(UndoStack *ustack, UndoStep *us) bool is_not_empty = true; UndoStep *us_iter; do { - us_iter = ustack->steps.last; + us_iter = static_cast(ustack->steps.last); BLI_assert(us_iter != ustack->step_active); undosys_step_free_and_unlink(ustack, us_iter); undosys_stack_validate(ustack, is_not_empty); @@ -315,7 +315,7 @@ static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoS bool is_not_empty = true; UndoStep *us_iter; do { - us_iter = ustack->steps.first; + us_iter = static_cast(ustack->steps.first); if (us_iter == us_exclude) { us_iter = us_iter->next; } @@ -329,7 +329,7 @@ static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoS static bool undosys_stack_push_main(UndoStack *ustack, const char *name, struct Main *bmain) { UNDO_NESTED_ASSERT(false); - BLI_assert(ustack->step_init == NULL); + BLI_assert(ustack->step_init == nullptr); CLOG_INFO(&LOG, 1, "'%s'", name); bContext *C_temp = CTX_create(); CTX_data_main_set(C_temp, bmain); @@ -348,7 +348,7 @@ void BKE_undosys_stack_init_from_main(UndoStack *ustack, struct Main *bmain) void BKE_undosys_stack_init_from_context(UndoStack *ustack, bContext *C) { const UndoType *ut = BKE_undosys_type_from_context(C); - if (!ELEM(ut, NULL, BKE_UNDOSYS_TYPE_MEMFILE)) { + if (!ELEM(ut, nullptr, BKE_UNDOSYS_TYPE_MEMFILE)) { BKE_undosys_step_push_with_type(ustack, C, IFACE_("Original Mode"), ut); } } @@ -356,7 +356,8 @@ void BKE_undosys_stack_init_from_context(UndoStack *ustack, bContext *C) bool BKE_undosys_stack_has_undo(const UndoStack *ustack, const char *name) { if (name) { - const UndoStep *us = BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name)); + const UndoStep *us = static_cast( + BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name))); return us && us->prev; } @@ -391,11 +392,11 @@ void BKE_undosys_stack_limit_steps_and_memory(UndoStack *ustack, int steps, size CLOG_INFO(&LOG, 1, "steps=%d, memory_limit=%zu", steps, memory_limit); UndoStep *us; - UndoStep *us_exclude = NULL; + UndoStep *us_exclude = nullptr; /* keep at least two (original + other) */ size_t data_size_all = 0; size_t us_count = 0; - for (us = ustack->steps.last; us && us->prev; us = us->prev) { + for (us = static_cast(ustack->steps.last); us && us->prev; us = us->prev) { if (memory_limit) { data_size_all += us->data_size; if (data_size_all > memory_limit) { @@ -447,7 +448,7 @@ UndoStep *BKE_undosys_step_push_init_with_type(UndoStack *ustack, { UNDO_NESTED_ASSERT(false); /* We could detect and clean this up (but it should never happen!). */ - BLI_assert(ustack->step_init == NULL); + BLI_assert(ustack->step_init == nullptr); if (ut->step_encode_init) { undosys_stack_validate(ustack, false); @@ -455,8 +456,8 @@ UndoStep *BKE_undosys_step_push_init_with_type(UndoStack *ustack, undosys_stack_clear_all_last(ustack, ustack->step_active->next); } - UndoStep *us = MEM_callocN(ut->step_size, __func__); - if (name != NULL) { + UndoStep *us = static_cast(MEM_callocN(ut->step_size, __func__)); + if (name != nullptr) { BLI_strncpy(us->name, name, sizeof(us->name)); } us->type = ut; @@ -467,17 +468,17 @@ UndoStep *BKE_undosys_step_push_init_with_type(UndoStack *ustack, return us; } - return NULL; + return nullptr; } UndoStep *BKE_undosys_step_push_init(UndoStack *ustack, bContext *C, const char *name) { UNDO_NESTED_ASSERT(false); /* We could detect and clean this up (but it should never happen!). */ - BLI_assert(ustack->step_init == NULL); + BLI_assert(ustack->step_init == nullptr); const UndoType *ut = BKE_undosys_type_from_context(C); - if (ut == NULL) { - return NULL; + if (ut == nullptr) { + return nullptr; } return BKE_undosys_step_push_init_with_type(ustack, C, name, ut); } @@ -487,11 +488,11 @@ eUndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack, const char *name, const UndoType *ut) { - BLI_assert((ut->flags & UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE) == 0 || C != NULL); + BLI_assert((ut->flags & UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE) == 0 || C != nullptr); UNDO_NESTED_ASSERT(false); undosys_stack_validate(ustack, false); - bool is_not_empty = ustack->step_active != NULL; + bool is_not_empty = ustack->step_active != nullptr; eUndoPushReturn retval = UNDO_PUSH_RET_FAILURE; /* Might not be final place for this to be called - probably only want to call it from some @@ -502,9 +503,9 @@ eUndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack, retval |= UNDO_PUSH_RET_OVERRIDE_CHANGED; } - /* Remove all undo-steps after (also when 'ustack->step_active == NULL'). */ + /* Remove all undo-steps after (also when 'ustack->step_active == nullptr'). */ while (ustack->steps.last != ustack->step_active) { - UndoStep *us_iter = ustack->steps.last; + UndoStep *us_iter = static_cast(ustack->steps.last); undosys_step_free_and_unlink(ustack, us_iter); undosys_stack_validate(ustack, is_not_empty); } @@ -514,17 +515,17 @@ eUndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack, } #ifdef WITH_GLOBAL_UNDO_ENSURE_UPDATED - if (ut->step_foreach_ID_ref != NULL) { + if (ut->step_foreach_ID_ref != nullptr) { if (G_MAIN->is_memfile_undo_written == false) { const char *name_internal = "MemFile Internal (pre)"; /* Don't let 'step_init' cause issues when adding memfile undo step. */ void *step_init = ustack->step_init; - ustack->step_init = NULL; + ustack->step_init = nullptr; const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN); /* Restore 'step_init'. */ - ustack->step_init = step_init; + ustack->step_init = static_cast(step_init); if (ok) { - UndoStep *us = ustack->steps.last; + UndoStep *us = static_cast(ustack->steps.last); BLI_assert(STREQ(us->name, name_internal)); us->skip = true; # ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER @@ -537,8 +538,10 @@ eUndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack, bool use_memfile_step = false; { - UndoStep *us = ustack->step_init ? ustack->step_init : MEM_callocN(ut->step_size, __func__); - ustack->step_init = NULL; + UndoStep *us = ustack->step_init ? + ustack->step_init : + static_cast(MEM_callocN(ut->step_size, __func__)); + ustack->step_init = nullptr; if (us->name[0] == '\0') { BLI_strncpy(us->name, name, sizeof(us->name)); } @@ -566,7 +569,7 @@ eUndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack, const char *name_internal = us_prev->name; const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN); if (ok) { - UndoStep *us = ustack->steps.last; + UndoStep *us = static_cast(ustack->steps.last); BLI_assert(STREQ(us->name, name_internal)); us_prev->skip = true; #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER @@ -591,8 +594,8 @@ eUndoPushReturn BKE_undosys_step_push(UndoStack *ustack, bContext *C, const char UNDO_NESTED_ASSERT(false); const UndoType *ut = ustack->step_init ? ustack->step_init->type : BKE_undosys_type_from_context(C); - if (ut == NULL) { - return false; + if (ut == nullptr) { + return UNDO_PUSH_RET_FAILURE; } return BKE_undosys_step_push_with_type(ustack, C, name, ut); } @@ -627,40 +630,40 @@ UndoStep *BKE_undosys_step_find_by_name_with_type(UndoStack *ustack, const char *name, const UndoType *ut) { - for (UndoStep *us = ustack->steps.last; us; us = us->prev) { + for (UndoStep *us = static_cast(ustack->steps.last); us; us = us->prev) { if (us->type == ut) { if (STREQ(name, us->name)) { return us; } } } - return NULL; + return nullptr; } UndoStep *BKE_undosys_step_find_by_name(UndoStack *ustack, const char *name) { - return BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name)); + return static_cast(BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name))); } UndoStep *BKE_undosys_step_find_by_type(UndoStack *ustack, const UndoType *ut) { - for (UndoStep *us = ustack->steps.last; us; us = us->prev) { + for (UndoStep *us = static_cast(ustack->steps.last); us; us = us->prev) { if (us->type == ut) { return us; } } - return NULL; + return nullptr; } eUndoStepDir BKE_undosys_step_calc_direction(const UndoStack *ustack, const UndoStep *us_target, const UndoStep *us_reference) { - if (us_reference == NULL) { + if (us_reference == nullptr) { us_reference = ustack->step_active; } - BLI_assert(us_reference != NULL); + BLI_assert(us_reference != nullptr); /* Note that we use heuristics to make this lookup as fast as possible in most common cases, * assuming that: @@ -676,12 +679,12 @@ eUndoStepDir BKE_undosys_step_calc_direction(const UndoStack *ustack, } /* Search forward, and then backward. */ - for (UndoStep *us_iter = us_reference->next; us_iter != NULL; us_iter = us_iter->next) { + for (UndoStep *us_iter = us_reference->next; us_iter != nullptr; us_iter = us_iter->next) { if (us_iter == us_target) { return STEP_REDO; } } - for (UndoStep *us_iter = us_reference->prev; us_iter != NULL; us_iter = us_iter->prev) { + for (UndoStep *us_iter = us_reference->prev; us_iter != nullptr; us_iter = us_iter->prev) { if (us_iter == us_target) { return STEP_UNDO; } @@ -718,16 +721,16 @@ bool BKE_undosys_step_load_data_ex(UndoStack *ustack, const bool use_skip) { UNDO_NESTED_ASSERT(false); - if (us_target == NULL) { - CLOG_ERROR(&LOG, "called with a NULL target step"); + if (us_target == nullptr) { + CLOG_ERROR(&LOG, "called with a nullptr target step"); return false; } undosys_stack_validate(ustack, true); - if (us_reference == NULL) { + if (us_reference == nullptr) { us_reference = ustack->step_active; } - if (us_reference == NULL) { + if (us_reference == nullptr) { CLOG_ERROR(&LOG, "could not find a valid initial active target step as reference"); return false; } @@ -742,10 +745,10 @@ bool BKE_undosys_step_load_data_ex(UndoStack *ustack, * the one passed as parameter. */ UndoStep *us_target_active = us_target; if (use_skip) { - while (us_target_active != NULL && us_target_active->skip) { + while (us_target_active != nullptr && us_target_active->skip) { us_target_active = (undo_dir == -1) ? us_target_active->prev : us_target_active->next; } - if (us_target_active == NULL) { + if (us_target_active == nullptr) { CLOG_INFO(&LOG, 2, "undo/redo did not find a step after stepping over skip-steps " @@ -765,9 +768,9 @@ bool BKE_undosys_step_load_data_ex(UndoStack *ustack, /* Undo/Redo steps until we reach given target step (or beyond if it has to be skipped), * from given reference step. */ bool is_processing_extra_skipped_steps = false; - for (UndoStep *us_iter = undosys_step_iter_first(us_reference, undo_dir); us_iter != NULL; + for (UndoStep *us_iter = undosys_step_iter_first(us_reference, undo_dir); us_iter != nullptr; us_iter = (undo_dir == -1) ? us_iter->prev : us_iter->next) { - BLI_assert(us_iter != NULL); + BLI_assert(us_iter != nullptr); const bool is_final = (us_iter == us_target_active); @@ -802,12 +805,12 @@ bool BKE_undosys_step_load_data_ex(UndoStack *ustack, bool BKE_undosys_step_load_data(UndoStack *ustack, bContext *C, UndoStep *us_target) { /* Note that here we do not skip 'skipped' steps by default. */ - return BKE_undosys_step_load_data_ex(ustack, C, us_target, NULL, false); + return BKE_undosys_step_load_data_ex(ustack, C, us_target, nullptr, false); } void BKE_undosys_step_load_from_index(UndoStack *ustack, bContext *C, const int index) { - UndoStep *us_target = BLI_findlink(&ustack->steps, index); + UndoStep *us_target = static_cast(BLI_findlink(&ustack->steps, index)); BLI_assert(us_target->skip == false); if (us_target == ustack->step_active) { return; @@ -823,7 +826,7 @@ bool BKE_undosys_step_undo_with_data_ex(UndoStack *ustack, /* In case there is no active step, we consider we just load given step, so reference must be * itself (due to weird 'load current active step in undo case' thing, see comments in * #BKE_undosys_step_load_data_ex). */ - UndoStep *us_reference = ustack->step_active != NULL ? ustack->step_active : us_target; + UndoStep *us_reference = ustack->step_active != nullptr ? ustack->step_active : us_target; BLI_assert(BKE_undosys_step_calc_direction(ustack, us_target, us_reference) == -1); @@ -837,7 +840,7 @@ bool BKE_undosys_step_undo_with_data(UndoStack *ustack, bContext *C, UndoStep *u bool BKE_undosys_step_undo(UndoStack *ustack, bContext *C) { - if (ustack->step_active != NULL) { + if (ustack->step_active != nullptr) { return BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active->prev); } return false; @@ -850,7 +853,7 @@ bool BKE_undosys_step_redo_with_data_ex(UndoStack *ustack, { /* In case there is no active step, we consider we just load given step, so reference must be * the previous one. */ - UndoStep *us_reference = ustack->step_active != NULL ? ustack->step_active : us_target->prev; + UndoStep *us_reference = ustack->step_active != nullptr ? ustack->step_active : us_target->prev; BLI_assert(BKE_undosys_step_calc_direction(ustack, us_target, us_reference) == 1); @@ -864,7 +867,7 @@ bool BKE_undosys_step_redo_with_data(UndoStack *ustack, bContext *C, UndoStep *u bool BKE_undosys_step_redo(UndoStack *ustack, bContext *C) { - if (ustack->step_active != NULL) { + if (ustack->step_active != nullptr) { return BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active->next); } return false; @@ -872,9 +875,7 @@ bool BKE_undosys_step_redo(UndoStack *ustack, bContext *C) UndoType *BKE_undosys_type_append(void (*undosys_fn)(UndoType *)) { - UndoType *ut; - - ut = MEM_callocN(sizeof(UndoType), __func__); + UndoType *ut = MEM_cnew(__func__); undosys_fn(ut); @@ -886,7 +887,7 @@ UndoType *BKE_undosys_type_append(void (*undosys_fn)(UndoType *)) void BKE_undosys_type_free_all(void) { UndoType *ut; - while ((ut = BLI_pophead(&g_undo_types))) { + while ((ut = static_cast(BLI_pophead(&g_undo_types)))) { MEM_freeN(ut); } } @@ -924,7 +925,7 @@ void BKE_undosys_stack_group_end(UndoStack *ustack) BLI_assert(ustack->group_level >= 0); if (ustack->group_level == 0) { - if (LIKELY(ustack->step_active != NULL)) { + if (LIKELY(ustack->step_active != nullptr)) { ustack->step_active->skip = false; } } @@ -944,7 +945,7 @@ static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack, { LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) { const UndoType *ut = us->type; - if (ut->step_foreach_ID_ref != NULL) { + if (ut->step_foreach_ID_ref != nullptr) { ut->step_foreach_ID_ref(us, foreach_ID_ref_fn, user_data); } } diff --git a/source/blender/blenkernel/intern/volume.cc b/source/blender/blenkernel/intern/volume.cc index 04501e3fe4a..bad2e844cc0 100644 --- a/source/blender/blenkernel/intern/volume.cc +++ b/source/blender/blenkernel/intern/volume.cc @@ -564,8 +564,8 @@ static void volume_foreach_cache(ID *id, { Volume *volume = (Volume *)id; IDCacheKey key = { - /* id_session_uuid */ id->session_uuid, - /* offset_in_ID */ offsetof(Volume, runtime.grids), + /*id_session_uuid*/ id->session_uuid, + /*offset_in_ID*/ offsetof(Volume, runtime.grids), }; function_callback(id, &key, (void **)&volume->runtime.grids, 0, user_data); @@ -644,33 +644,33 @@ static void volume_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_VO = { - /* id_code */ ID_VO, - /* id_filter */ FILTER_ID_VO, - /* main_listbase_index */ INDEX_ID_VO, - /* struct_size */ sizeof(Volume), - /* name */ "Volume", - /* name_plural */ "volumes", - /* translation_context */ BLT_I18NCONTEXT_ID_VOLUME, - /* flags */ IDTYPE_FLAGS_APPEND_IS_REUSABLE, - /* asset_type_info */ nullptr, + /*id_code*/ ID_VO, + /*id_filter*/ FILTER_ID_VO, + /*main_listbase_index*/ INDEX_ID_VO, + /*struct_size*/ sizeof(Volume), + /*name*/ "Volume", + /*name_plural*/ "volumes", + /*translation_context*/ BLT_I18NCONTEXT_ID_VOLUME, + /*flags*/ IDTYPE_FLAGS_APPEND_IS_REUSABLE, + /*asset_type_info*/ nullptr, - /* init_data */ volume_init_data, - /* copy_data */ volume_copy_data, - /* free_data */ volume_free_data, - /* make_local */ nullptr, - /* foreach_id */ volume_foreach_id, - /* foreach_cache */ volume_foreach_cache, - /* foreach_path */ volume_foreach_path, - /* owner_pointer_get */ nullptr, + /*init_data*/ volume_init_data, + /*copy_data*/ volume_copy_data, + /*free_data*/ volume_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ volume_foreach_id, + /*foreach_cache*/ volume_foreach_cache, + /*foreach_path*/ volume_foreach_path, + /*owner_pointer_get*/ nullptr, - /* blend_write */ volume_blend_write, - /* blend_read_data */ volume_blend_read_data, - /* blend_read_lib */ volume_blend_read_lib, - /* blend_read_expand */ volume_blend_read_expand, + /*blend_write*/ volume_blend_write, + /*blend_read_data*/ volume_blend_read_data, + /*blend_read_lib*/ volume_blend_read_lib, + /*blend_read_expand*/ volume_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; void BKE_volume_init_grids(Volume *volume) @@ -1474,7 +1474,9 @@ void BKE_volume_grid_transform_matrix(const VolumeGrid *volume_grid, float mat[4 #endif } -void BKE_volume_grid_transform_matrix_set(struct VolumeGrid *volume_grid, const float mat[4][4]) +void BKE_volume_grid_transform_matrix_set(const Volume *volume, + VolumeGrid *volume_grid, + const float mat[4][4]) { #ifdef WITH_OPENVDB openvdb::math::Mat4f mat_openvdb; @@ -1483,11 +1485,11 @@ void BKE_volume_grid_transform_matrix_set(struct VolumeGrid *volume_grid, const mat_openvdb(col, row) = mat[col][row]; } } - openvdb::GridBase::Ptr grid = volume_grid->grid(); + openvdb::GridBase::Ptr grid = BKE_volume_grid_openvdb_for_write(volume, volume_grid, false); grid->setTransform(std::make_shared( std::make_shared(mat_openvdb))); #else - UNUSED_VARS(volume_grid, mat); + UNUSED_VARS(volume, volume_grid, mat); #endif } @@ -1504,7 +1506,6 @@ Volume *BKE_volume_new_for_eval(const Volume *volume_src) volume_dst->totcol = volume_src->totcol; volume_dst->render = volume_src->render; volume_dst->display = volume_src->display; - BKE_volume_init_grids(volume_dst); return volume_dst; } diff --git a/source/blender/blenkernel/intern/workspace.cc b/source/blender/blenkernel/intern/workspace.cc index 6ffef5555fb..fb5d4f09a2f 100644 --- a/source/blender/blenkernel/intern/workspace.cc +++ b/source/blender/blenkernel/intern/workspace.cc @@ -186,33 +186,34 @@ static void workspace_blend_read_expand(BlendExpander *expander, ID *id) } IDTypeInfo IDType_ID_WS = { - /* id_code */ ID_WS, - /* id_filter */ FILTER_ID_WS, - /* main_listbase_index */ INDEX_ID_WS, - /* struct_size */ sizeof(WorkSpace), - /* name */ "WorkSpace", - /* name_plural */ "workspaces", - /* translation_context */ BLT_I18NCONTEXT_ID_WORKSPACE, - /* flags */ IDTYPE_FLAGS_NO_COPY | IDTYPE_FLAGS_ONLY_APPEND | IDTYPE_FLAGS_NO_ANIMDATA, - /* asset_type_info */ nullptr, + /*id_code*/ ID_WS, + /*id_filter*/ FILTER_ID_WS, + /*main_listbase_index*/ INDEX_ID_WS, + /*struct_size*/ sizeof(WorkSpace), + /*name*/ "WorkSpace", + /*name_plural*/ "workspaces", + /*translation_context*/ BLT_I18NCONTEXT_ID_WORKSPACE, + /*flags*/ IDTYPE_FLAGS_NO_COPY | IDTYPE_FLAGS_ONLY_APPEND | IDTYPE_FLAGS_NO_ANIMDATA | + IDTYPE_FLAGS_NO_MEMFILE_UNDO, + /*asset_type_info*/ nullptr, - /* init_data */ workspace_init_data, - /* copy_data */ nullptr, - /* free_data */ workspace_free_data, - /* make_local */ nullptr, - /* foreach_id */ workspace_foreach_id, - /* foreach_cache */ nullptr, - /* foreach_path */ nullptr, - /* owner_pointer_get */ nullptr, + /*init_data*/ workspace_init_data, + /*copy_data*/ nullptr, + /*free_data*/ workspace_free_data, + /*make_local*/ nullptr, + /*foreach_id*/ workspace_foreach_id, + /*foreach_cache*/ nullptr, + /*foreach_path*/ nullptr, + /*owner_pointer_get*/ nullptr, - /* blend_write */ workspace_blend_write, - /* blend_read_data */ workspace_blend_read_data, - /* blend_read_lib */ workspace_blend_read_lib, - /* blend_read_expand */ workspace_blend_read_expand, + /*blend_write*/ workspace_blend_write, + /*blend_read_data*/ workspace_blend_read_data, + /*blend_read_lib*/ workspace_blend_read_lib, + /*blend_read_expand*/ workspace_blend_read_expand, - /* blend_read_undo_preserve */ nullptr, + /*blend_read_undo_preserve*/ nullptr, - /* lib_override_apply_post */ nullptr, + /*lib_override_apply_post*/ nullptr, }; /* -------------------------------------------------------------------- */ diff --git a/source/blender/blenkernel/intern/writeffmpeg.c b/source/blender/blenkernel/intern/writeffmpeg.c index 8d6dba440fd..be8f985ec56 100644 --- a/source/blender/blenkernel/intern/writeffmpeg.c +++ b/source/blender/blenkernel/intern/writeffmpeg.c @@ -469,18 +469,19 @@ static AVRational calc_time_base(uint den, double num, int codec_id) static const AVCodec *get_av1_encoder( FFMpegContext *context, RenderData *rd, AVDictionary **opts, int rectx, int recty) { - /* There are three possible encoders for AV1: libaom-av1, librav1e, and libsvtav1. librav1e tends - * to give the best compression quality while libsvtav1 tends to be the fastest encoder. One of - * each will be picked based on the preset setting, and if a particular encoder is not available, - * then use the default returned by FFMpeg. */ + /* There are three possible encoders for AV1: `libaom-av1`, librav1e, and `libsvtav1`. librav1e + * tends to give the best compression quality while `libsvtav1` tends to be the fastest encoder. + * One of each will be picked based on the preset setting, and if a particular encoder is not + * available, then use the default returned by FFMpeg. */ const AVCodec *codec = NULL; switch (context->ffmpeg_preset) { case FFM_PRESET_BEST: - /* Default to libaom-av1 for BEST preset due to it performing better than rav1e in terms of - * video quality (VMAF scores). Fallback to rav1e if libaom-av1 isn't available. */ - codec = avcodec_find_encoder_by_name("libaom-av1"); + /* `libaom-av1` may produce better VMAF-scoring videos in several cases, but there are cases + * where using a different encoder is desirable, such as in T103849. */ + codec = avcodec_find_encoder_by_name("librav1e"); if (!codec) { - codec = avcodec_find_encoder_by_name("librav1e"); + /* Fallback to `libaom-av1` if librav1e is not found. */ + codec = avcodec_find_encoder_by_name("libaom-av1"); } break; case FFM_PRESET_REALTIME: @@ -524,8 +525,8 @@ static const AVCodec *get_av1_encoder( break; } if (context->ffmpeg_crf >= 0) { - /* librav1e does not use -crf, but uses -qp in the range of 0-255. Calculates the roughly - * equivalent float, and truncates it to an integer. */ + /* librav1e does not use `-crf`, but uses `-qp` in the range of 0-255. + * Calculates the roughly equivalent float, and truncates it to an integer. */ unsigned int qp_value = ((float)context->ffmpeg_crf) * 255.0F / 51.0F; if (qp_value > 255) { qp_value = 255; @@ -539,7 +540,7 @@ static const AVCodec *get_av1_encoder( } else if (STREQ(codec->name, "libsvtav1")) { /* Set preset value based on ffmpeg_preset. - * Must check context->ffmpeg_preset again in case this encoder was selected due to the + * Must check `context->ffmpeg_preset` again in case this encoder was selected due to the * absence of another. */ switch (context->ffmpeg_preset) { case FFM_PRESET_REALTIME: @@ -554,13 +555,13 @@ static const AVCodec *get_av1_encoder( break; } if (context->ffmpeg_crf >= 0) { - /* libsvtav1 does not support crf until FFmpeg builds since 2022-02-24, use qp as fallback. - */ + /* `libsvtav1` does not support `crf` until FFmpeg builds since 2022-02-24, + * use `qp` as fallback. */ ffmpeg_dict_set_int(opts, "qp", context->ffmpeg_crf); } } else if (STREQ(codec->name, "libaom-av1")) { - /* Speed up libaom-av1 encoding by enabling multithreading and setting tiles. */ + /* Speed up libaom-av1 encoding by enabling multi-threading and setting tiles. */ ffmpeg_dict_set_int(opts, "row-mt", 1); const char *tiles_string = NULL; bool tiles_string_is_dynamic = false; diff --git a/source/blender/blenlib/BLI_color.hh b/source/blender/blenlib/BLI_color.hh index 0256cec667c..f334ade6dda 100644 --- a/source/blender/blenlib/BLI_color.hh +++ b/source/blender/blenlib/BLI_color.hh @@ -248,7 +248,7 @@ class ColorSceneLinearByteEncoded4b final template class ColorTheme4 final : public ColorRGBA { public: - constexpr ColorTheme4() : ColorRGBA(){}; + constexpr ColorTheme4() = default; constexpr ColorTheme4(const ChannelStorageType *rgba) : ColorRGBA(rgba) diff --git a/source/blender/blenlib/BLI_generic_virtual_array.hh b/source/blender/blenlib/BLI_generic_virtual_array.hh index cba767341c1..cb45da5e495 100644 --- a/source/blender/blenlib/BLI_generic_virtual_array.hh +++ b/source/blender/blenlib/BLI_generic_virtual_array.hh @@ -388,25 +388,24 @@ template class VArrayImpl_For_GVArray : public VArrayImpl { return true; } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - varray_.materialize(mask, r_span.data()); + varray_.materialize(mask, dst); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - varray_.materialize_to_uninitialized(mask, r_span.data()); + varray_.materialize_to_uninitialized(mask, dst); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - varray_.materialize_compressed(mask, r_span.data()); + varray_.materialize_compressed(mask, dst); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - varray_.materialize_compressed_to_uninitialized(mask, r_span.data()); + varray_.materialize_compressed_to_uninitialized(mask, dst); } }; @@ -539,25 +538,24 @@ template class VMutableArrayImpl_For_GVMutableArray : public VMutabl return true; } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - varray_.materialize(mask, r_span.data()); + varray_.materialize(mask, dst); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - varray_.materialize_to_uninitialized(mask, r_span.data()); + varray_.materialize_to_uninitialized(mask, dst); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - varray_.materialize_compressed(mask, r_span.data()); + varray_.materialize_compressed(mask, dst); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - varray_.materialize_compressed_to_uninitialized(mask, r_span.data()); + varray_.materialize_compressed_to_uninitialized(mask, dst); } }; diff --git a/source/blender/blenlib/BLI_ghash.h b/source/blender/blenlib/BLI_ghash.h index ac35d47bdb0..ac2b8a70205 100644 --- a/source/blender/blenlib/BLI_ghash.h +++ b/source/blender/blenlib/BLI_ghash.h @@ -404,7 +404,7 @@ int BLI_table_gset_len(TableGSet *ts); for (_i1 = 0; _i1 < (ts)->cur; _i1++) { \ if (!(ts)->elems[_i1]) \ continue; \ - v = (ts)->elems[_i1]; \ + v = static_cast((ts)->elems[_i1]); \ index++; #define TGSET_ITER_INDEX_END \ diff --git a/source/blender/blenlib/BLI_index_mask.hh b/source/blender/blenlib/BLI_index_mask.hh index 2144955f4f5..c37820d648b 100644 --- a/source/blender/blenlib/BLI_index_mask.hh +++ b/source/blender/blenlib/BLI_index_mask.hh @@ -234,8 +234,15 @@ class IndexMask { return indices_.first() >= range.first() && indices_.last() <= range.last(); } - IndexMask slice(int64_t start, int64_t size) const; - IndexMask slice(IndexRange slice) const; + IndexMask slice(const int64_t start, const int64_t size) const + { + return IndexMask(indices_.slice(start, size)); + } + + IndexMask slice(const IndexRange slice) const + { + return IndexMask(indices_.slice(slice)); + } IndexMask slice_safe(int64_t start, int64_t size) const; IndexMask slice_safe(IndexRange slice) const; diff --git a/source/blender/blenlib/BLI_length_parameterize.hh b/source/blender/blenlib/BLI_length_parameterize.hh index d81bcbe1e7a..df00e004060 100644 --- a/source/blender/blenlib/BLI_length_parameterize.hh +++ b/source/blender/blenlib/BLI_length_parameterize.hh @@ -105,7 +105,7 @@ inline void sample_at_length(const Span accumulated_segment_lengths, BLI_assert(lengths.size() > 0); BLI_assert(sample_length >= 0.0f); - BLI_assert(sample_length <= lengths.last()); + BLI_assert(sample_length <= lengths.last() + 0.00001f); if (hint != nullptr && hint->segment_index >= 0) { const float length_in_segment = sample_length - hint->segment_start; diff --git a/source/blender/blenlib/BLI_math_base.hh b/source/blender/blenlib/BLI_math_base.hh index 098c53e43d2..fdee18840ce 100644 --- a/source/blender/blenlib/BLI_math_base.hh +++ b/source/blender/blenlib/BLI_math_base.hh @@ -34,6 +34,11 @@ template inline T abs(const T &a) return std::abs(a); } +template inline T sign(const T &a) +{ + return (T(0) < a) - (a < T(0)); +} + template inline T min(const T &a, const T &b) { return std::min(a, b); diff --git a/source/blender/blenlib/BLI_math_matrix.h b/source/blender/blenlib/BLI_math_matrix.h index 538474f58b6..1278bc90e44 100644 --- a/source/blender/blenlib/BLI_math_matrix.h +++ b/source/blender/blenlib/BLI_math_matrix.h @@ -83,16 +83,12 @@ void mul_m3_m4m4(float R[3][3], const float A[4][4], const float B[4][4]); /** * Special matrix multiplies - * - uniq: `R <-- AB`, R is neither A nor B * - pre: `R <-- AR` * - post: `R <-- RB`. */ -void mul_m3_m3m3_uniq(float R[3][3], const float A[3][3], const float B[3][3]); void mul_m3_m3_pre(float R[3][3], const float A[3][3]); void mul_m3_m3_post(float R[3][3], const float B[3][3]); -void mul_m4_m4m4_uniq(float R[4][4], const float A[4][4], const float B[4][4]); -void mul_m4_m4m4_db_uniq(double R[4][4], const double A[4][4], const double B[4][4]); -void mul_m4db_m4db_m4fl_uniq(double R[4][4], const double A[4][4], const float B[4][4]); +void mul_m4db_m4db_m4fl(double R[4][4], const double A[4][4], const float B[4][4]); void mul_m4_m4_pre(float R[4][4], const float A[4][4]); void mul_m4_m4_post(float R[4][4], const float B[4][4]); diff --git a/source/blender/blenlib/BLI_math_matrix.hh b/source/blender/blenlib/BLI_math_matrix.hh index 79699e082f6..d6a5e907111 100644 --- a/source/blender/blenlib/BLI_math_matrix.hh +++ b/source/blender/blenlib/BLI_math_matrix.hh @@ -1136,6 +1136,7 @@ MatBase perspective(T left, T right, T bottom, T top, T near_clip, T fa mat[2][2] = -(far_clip + near_clip) / z_delta; mat[2][3] = -1.0f; mat[3][2] = (-2.0f * near_clip * far_clip) / z_delta; + mat[3][3] = 0.0f; } return mat; } diff --git a/source/blender/blenlib/BLI_math_vector.hh b/source/blender/blenlib/BLI_math_vector.hh index 1e8f5a3f42a..cab5d16dd9b 100644 --- a/source/blender/blenlib/BLI_math_vector.hh +++ b/source/blender/blenlib/BLI_math_vector.hh @@ -70,6 +70,19 @@ template [[nodiscard]] inline VecBase abs(const V return result; } +/** + * Returns -1 if \a a is less than 0, 0 if \a a is equal to 0, and +1 if \a a is greater than 0. + */ +template +[[nodiscard]] inline VecBase sign(const VecBase &a) +{ + VecBase result; + for (int i = 0; i < Size; i++) { + result[i] = math::sign(a[i]); + } + return result; +} + template [[nodiscard]] inline VecBase min(const VecBase &a, const VecBase &b) { diff --git a/source/blender/blenlib/BLI_math_vector_types.hh b/source/blender/blenlib/BLI_math_vector_types.hh index 1a408ad4b8d..89f866a5172 100644 --- a/source/blender/blenlib/BLI_math_vector_types.hh +++ b/source/blender/blenlib/BLI_math_vector_types.hh @@ -174,6 +174,18 @@ template struct VecBase : public vec_struct_base } } + /** Swizzling. */ + + template= 3)> VecBase xy() const + { + return *reinterpret_cast *>(this); + } + + template= 4)> VecBase xyz() const + { + return *reinterpret_cast *>(this); + } + #undef BLI_ENABLE_IF_VEC /** Conversion from pointers (from C-style vectors). */ diff --git a/source/blender/blenlib/BLI_offset_indices.hh b/source/blender/blenlib/BLI_offset_indices.hh new file mode 100644 index 00000000000..4ae2bde3705 --- /dev/null +++ b/source/blender/blenlib/BLI_offset_indices.hh @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_index_range.hh" +#include "BLI_span.hh" + +namespace blender::offset_indices { + +/** + * References an array of ascending indices. A pair of consecutive indices encode an index range. + * Another common way to store the same kind of data is to store the start and size of every range + * separately. Using offsets instead halves the memory consumption. The downside is that the + * array has to be one element longer than the total number of ranges. The extra element is + * necessary to be able to get the last index range without requiring an extra branch for the case. + * + * This class is a thin wrapper around such an array that makes it easy to retrieve the index range + * at a specific index. + */ +template class OffsetIndices { + private: + static_assert(std::is_integral_v); + + Span offsets_; + + public: + OffsetIndices(const Span offsets) : offsets_(offsets) + { + BLI_assert(std::is_sorted(offsets_.begin(), offsets_.end())); + } + + T size(const int64_t index) const + { + BLI_assert(index >= 0); + BLI_assert(index < offsets_.size() - 1); + const int64_t begin = offsets_[index]; + const int64_t end = offsets_[index + 1]; + const int64_t size = end - begin; + return size; + } + + /** Return the total number of elements in the the referenced arrays. */ + T total_size() const + { + return offsets_.last(); + } + + /** Return the number of ranges encoded by the offsets. */ + T ranges_num() const + { + return offsets_.size() - 1; + } + + IndexRange operator[](const int64_t index) const + { + BLI_assert(index >= 0); + BLI_assert(index < offsets_.size() - 1); + const int64_t begin = offsets_[index]; + const int64_t end = offsets_[index + 1]; + const int64_t size = end - begin; + return IndexRange(begin, size); + } + + IndexRange operator[](const IndexRange indices) const + { + const int64_t begin = offsets_[indices.start()]; + const int64_t end = offsets_[indices.one_after_last()]; + const int64_t size = end - begin; + return IndexRange(begin, size); + } + + /** + * Return a subset of the offsets describing the specified range of source elements. + * This is a slice into the source ranges rather than the indexed elements described by the + * offset values. + */ + OffsetIndices slice(const IndexRange range) const + { + BLI_assert(offsets_.index_range().drop_back(1).contains(range.last())); + return OffsetIndices(offsets_.slice(range.start(), range.one_after_last())); + } +}; + +/** + * Turn an array of sizes into the offset at each index including all previous sizes. + */ +void accumulate_counts_to_offsets(MutableSpan counts_to_offsets, int start_offset = 0); + +} // namespace blender::offset_indices + +namespace blender { +using offset_indices::OffsetIndices; +} diff --git a/source/blender/blenlib/BLI_path_util.h b/source/blender/blenlib/BLI_path_util.h index 4ea059391b6..78f8e529740 100644 --- a/source/blender/blenlib/BLI_path_util.h +++ b/source/blender/blenlib/BLI_path_util.h @@ -516,6 +516,24 @@ int BLI_path_cmp_normalized(const char *p1, const char *p2) # define ALTSEP_STR "\\" #endif +/** + * Return true if the slash can be used as a separator on this platform. + */ +BLI_INLINE bool BLI_path_slash_is_native_compat(const char ch) +{ + /* On UNIX it only makes sense to treat `/` as a path separator. + * On WIN32 either may be used. */ + if (ch == SEP) { + return true; + } +#ifdef WIN32 + if (ch == ALTSEP) { + return true; + } +#endif + return false; +} + /* Parent and current dir helpers. */ #define FILENAME_PARENT ".." #define FILENAME_CURRENT "." diff --git a/source/blender/blenlib/BLI_task.hh b/source/blender/blenlib/BLI_task.hh index e7d9a21439a..60585e35099 100644 --- a/source/blender/blenlib/BLI_task.hh +++ b/source/blender/blenlib/BLI_task.hh @@ -37,7 +37,7 @@ namespace blender::threading { template -void parallel_for_each(Range &range, const Function &function) +void parallel_for_each(Range &&range, const Function &function) { #ifdef WITH_TBB tbb::parallel_for_each(range, function); @@ -71,6 +71,36 @@ void parallel_for(IndexRange range, int64_t grain_size, const Function &function function(range); } +/** + * Same as #parallel_for but tries to make the sub-range sizes multiples of the given alignment. + * This can improve performance when the range is processed using vectorized and/or unrolled loops, + * because the fallback loop that processes remaining values is used less often. A disadvantage of + * using this instead of #parallel_for is that the size differences between sub-ranges can be + * larger, which means that work is distributed less evenly. + */ +template +void parallel_for_aligned(const IndexRange range, + const int64_t grain_size, + const int64_t alignment, + const Function &function) +{ + const int64_t global_begin = range.start(); + const int64_t global_end = range.one_after_last(); + const int64_t alignment_mask = ~(alignment - 1); + parallel_for(range, grain_size, [&](const IndexRange unaligned_range) { + /* Move the sub-range boundaries down to the next aligned index. The "global" begin and end + * remain fixed though. */ + const int64_t unaligned_begin = unaligned_range.start(); + const int64_t unaligned_end = unaligned_range.one_after_last(); + const int64_t aligned_begin = std::max(global_begin, unaligned_begin & alignment_mask); + const int64_t aligned_end = unaligned_end == global_end ? + unaligned_end : + std::max(global_begin, unaligned_end & alignment_mask); + const IndexRange aligned_range{aligned_begin, aligned_end - aligned_begin}; + function(aligned_range); + }); +} + template Value parallel_reduce(IndexRange range, int64_t grain_size, diff --git a/source/blender/blenlib/BLI_vector_list.hh b/source/blender/blenlib/BLI_vector_list.hh index dee333db39a..533d9efc421 100644 --- a/source/blender/blenlib/BLI_vector_list.hh +++ b/source/blender/blenlib/BLI_vector_list.hh @@ -117,4 +117,4 @@ class VectorList { } }; -} // namespace blender \ No newline at end of file +} // namespace blender diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh index 189cb85d468..c57c1dae961 100644 --- a/source/blender/blenlib/BLI_virtual_array.hh +++ b/source/blender/blenlib/BLI_virtual_array.hh @@ -107,17 +107,16 @@ template class VArrayImpl { * Copy values from the virtual array into the provided span. The index of the value in the * virtual array is the same as the index in the span. */ - virtual void materialize(IndexMask mask, MutableSpan r_span) const + virtual void materialize(IndexMask mask, T *dst) const { - mask.foreach_index([&](const int64_t i) { r_span[i] = this->get(i); }); + mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); }); } /** * Same as #materialize but #r_span is expected to be uninitialized. */ - virtual void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const + virtual void materialize_to_uninitialized(IndexMask mask, T *dst) const { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); }); } @@ -126,12 +125,11 @@ template class VArrayImpl { * in virtual array is not the same as the index in the output span. Instead, the span is filled * without gaps. */ - virtual void materialize_compressed(IndexMask mask, MutableSpan r_span) const + virtual void materialize_compressed(IndexMask mask, T *dst) const { - BLI_assert(mask.size() == r_span.size()); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { - r_span[i] = this->get(best_mask[i]); + dst[i] = this->get(best_mask[i]); } }); } @@ -139,10 +137,8 @@ template class VArrayImpl { /** * Same as #materialize_compressed but #r_span is expected to be uninitialized. */ - virtual void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan r_span) const + virtual void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const { - BLI_assert(mask.size() == r_span.size()); - T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { new (dst + i) T(this->get(best_mask[i])); @@ -160,15 +156,6 @@ template class VArrayImpl { { return false; } - - /** - * Return true when the other virtual array should be considered to be the same, e.g. because it - * shares the same underlying memory. - */ - virtual bool is_same(const VArrayImpl & /*other*/) const - { - return false; - } }; /** Similar to #VArrayImpl, but adds methods that allow modifying the referenced elements. */ @@ -242,44 +229,27 @@ template class VArrayImpl_For_Span : public VMutableArrayImpl { return CommonVArrayInfo(CommonVArrayInfo::Type::Span, true, data_); } - bool is_same(const VArrayImpl &other) const final + void materialize(IndexMask mask, T *dst) const override { - if (other.size() != this->size_) { - return false; - } - const CommonVArrayInfo other_info = other.common_info(); - if (other_info.type != CommonVArrayInfo::Type::Span) { - return false; - } - return data_ == static_cast(other_info.data); + mask.foreach_index([&](const int64_t i) { dst[i] = data_[i]; }); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - mask.foreach_index([&](const int64_t i) { r_span[i] = data_[i]; }); - } - - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override - { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) T(data_[i]); }); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { - r_span[i] = data_[best_mask[i]]; + dst[i] = data_[best_mask[i]]; } }); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { new (dst + i) T(data_[best_mask[i]]); @@ -357,29 +327,24 @@ template class VArrayImpl_For_Single final : public VArrayImpl { return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - r_span.fill_indices(mask, value_); + mask.foreach_index([&](const int64_t i) { dst[i] = value_; }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); }); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - UNUSED_VARS_NDEBUG(mask); - r_span.fill(value_); + initialized_fill_n(dst, mask.size(), value_); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - uninitialized_fill_n(r_span.data(), mask.size(), value_); + uninitialized_fill_n(dst, mask.size(), value_); } }; @@ -406,22 +371,18 @@ template class VArrayImpl_For_Func final : public return get_func_(index); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, T *dst) const override { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, T *dst) const override { - T *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); }); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { dst[i] = get_func_(best_mask[i]); @@ -429,11 +390,8 @@ template class VArrayImpl_For_Func final : public }); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, T *dst) const override { - BLI_assert(mask.size() == r_span.size()); - T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { new (dst + i) T(get_func_(best_mask[i])); @@ -476,22 +434,18 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl { SetFunc(data_[index], std::move(value)); } - void materialize(IndexMask mask, MutableSpan r_span) const override + void materialize(IndexMask mask, ElemT *dst) const override { - ElemT *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); }); } - void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const override + void materialize_to_uninitialized(IndexMask mask, ElemT *dst) const override { - ElemT *dst = r_span.data(); mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); }); } - void materialize_compressed(IndexMask mask, MutableSpan r_span) const override + void materialize_compressed(IndexMask mask, ElemT *dst) const override { - BLI_assert(mask.size() == r_span.size()); - ElemT *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { dst[i] = GetFunc(data_[best_mask[i]]); @@ -499,34 +453,14 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl { }); } - void materialize_compressed_to_uninitialized(IndexMask mask, - MutableSpan r_span) const override + void materialize_compressed_to_uninitialized(IndexMask mask, ElemT *dst) const override { - BLI_assert(mask.size() == r_span.size()); - ElemT *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { new (dst + i) ElemT(GetFunc(data_[best_mask[i]])); } }); } - - bool is_same(const VArrayImpl &other) const override - { - if (other.size() != this->size_) { - return false; - } - if (const VArrayImpl_For_DerivedSpan *other_typed = - dynamic_cast *>(&other)) { - return other_typed->data_ == data_; - } - if (const VArrayImpl_For_DerivedSpan *other_typed = - dynamic_cast *>( - &other)) { - return other_typed->data_ == data_; - } - return false; - } }; template class VArrayCommon { return *static_cast(info.data); } - /** - * Return true when the other virtual references the same underlying memory. - */ - bool is_same(const VArrayCommon &other) const - { - if (!*this || !other) { - return false; - } - /* Check in both directions in case one does not know how to compare to the other - * implementation. */ - if (impl_->is_same(*other.impl_)) { - return true; - } - if (other.impl_->is_same(*impl_)) { - return true; - } - return false; - } - /** Copy the entire virtual array into a span. */ void materialize(MutableSpan r_span) const { @@ -835,7 +750,7 @@ template class VArrayCommon { void materialize(IndexMask mask, MutableSpan r_span) const { BLI_assert(mask.min_array_size() <= this->size()); - impl_->materialize(mask, r_span); + impl_->materialize(mask, r_span.data()); } void materialize_to_uninitialized(MutableSpan r_span) const @@ -846,18 +761,18 @@ template class VArrayCommon { void materialize_to_uninitialized(IndexMask mask, MutableSpan r_span) const { BLI_assert(mask.min_array_size() <= this->size()); - impl_->materialize_to_uninitialized(mask, r_span); + impl_->materialize_to_uninitialized(mask, r_span.data()); } /** Copy some elements of the virtual array into a span. */ void materialize_compressed(IndexMask mask, MutableSpan r_span) const { - impl_->materialize_compressed(mask, r_span); + impl_->materialize_compressed(mask, r_span.data()); } void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan r_span) const { - impl_->materialize_compressed_to_uninitialized(mask, r_span); + impl_->materialize_compressed_to_uninitialized(mask, r_span.data()); } /** See #GVArrayImpl::try_assign_GVArray. */ @@ -865,6 +780,11 @@ template class VArrayCommon { { return impl_->try_assign_GVArray(varray); } + + const VArrayImpl *get_implementation() const + { + return impl_; + } }; template class VMutableArray; diff --git a/source/blender/blenlib/CMakeLists.txt b/source/blender/blenlib/CMakeLists.txt index 7f74a26b4f1..7e5c69431b7 100644 --- a/source/blender/blenlib/CMakeLists.txt +++ b/source/blender/blenlib/CMakeLists.txt @@ -119,6 +119,7 @@ set(SRC intern/mesh_intersect.cc intern/noise.c intern/noise.cc + intern/offset_indices.cc intern/path_util.c intern/polyfill_2d.c intern/polyfill_2d_beautify.c @@ -277,9 +278,13 @@ set(SRC BLI_math_inline.h BLI_math_interp.h BLI_math_matrix.h + BLI_math_matrix.hh + BLI_math_matrix_types.hh BLI_math_mpq.hh BLI_math_rotation.h + BLI_math_rotation.hh BLI_math_rotation_legacy.hh + BLI_math_rotation_types.hh BLI_math_solvers.h BLI_math_statistics.h BLI_math_time.h @@ -300,6 +305,7 @@ set(SRC BLI_multi_value_map.hh BLI_noise.h BLI_noise.hh + BLI_offset_indices.hh BLI_parameter_pack_utils.hh BLI_path_util.h BLI_polyfill_2d.h diff --git a/source/blender/blenlib/intern/BLI_filelist.c b/source/blender/blenlib/intern/BLI_filelist.c index 4bcb023691a..7dca60128d3 100644 --- a/source/blender/blenlib/intern/BLI_filelist.c +++ b/source/blender/blenlib/intern/BLI_filelist.c @@ -4,6 +4,7 @@ * \ingroup bli */ +#include #include #include #include @@ -37,6 +38,7 @@ #include "BLI_listbase.h" #include "BLI_path_util.h" #include "BLI_string.h" +#include "BLI_string_utils.h" #include "../imbuf/IMB_imbuf.h" @@ -44,7 +46,7 @@ * Ordering function for sorting lists of files/directories. Returns -1 if * entry1 belongs before entry2, 0 if they are equal, 1 if they should be swapped. */ -static int bli_compare(struct direntry *entry1, struct direntry *entry2) +static int direntry_cmp(struct direntry *entry1, struct direntry *entry2) { /* type is equal to stat.st_mode */ @@ -106,113 +108,119 @@ struct BuildDirCtx { */ static void bli_builddir(struct BuildDirCtx *dir_ctx, const char *dirname) { + DIR *dir = opendir(dirname); + if (UNLIKELY(dir == NULL)) { + fprintf(stderr, + "Failed to open dir (%s): %s\n", + errno ? strerror(errno) : "unknown error", + dirname); + return; + } + struct ListBase dirbase = {NULL, NULL}; int newnum = 0; - DIR *dir; + const struct dirent *fname; + bool has_current = false, has_parent = false; - if ((dir = opendir(dirname)) != NULL) { - const struct dirent *fname; - bool has_current = false, has_parent = false; + char dirname_with_slash[FILE_MAXDIR + 1]; + size_t dirname_with_slash_len = BLI_strncpy_rlen( + dirname_with_slash, dirname, sizeof(dirname_with_slash) - 1); - while ((fname = readdir(dir)) != NULL) { + if ((dirname_with_slash_len > 0) && + (BLI_path_slash_is_native_compat(dirname[dirname_with_slash_len - 1]) == false)) { + dirname_with_slash[dirname_with_slash_len++] = SEP; + dirname_with_slash[dirname_with_slash_len] = '\0'; + } + + while ((fname = readdir(dir)) != NULL) { + struct dirlink *const dlink = (struct dirlink *)malloc(sizeof(struct dirlink)); + if (dlink != NULL) { + dlink->name = BLI_strdup(fname->d_name); + if (FILENAME_IS_PARENT(dlink->name)) { + has_parent = true; + } + else if (FILENAME_IS_CURRENT(dlink->name)) { + has_current = true; + } + BLI_addhead(&dirbase, dlink); + newnum++; + } + } + + if (!has_parent) { + char pardir[FILE_MAXDIR]; + + BLI_strncpy(pardir, dirname, sizeof(pardir)); + if (BLI_path_parent_dir(pardir) && (BLI_access(pardir, R_OK) == 0)) { struct dirlink *const dlink = (struct dirlink *)malloc(sizeof(struct dirlink)); if (dlink != NULL) { - dlink->name = BLI_strdup(fname->d_name); - if (FILENAME_IS_PARENT(dlink->name)) { - has_parent = true; - } - else if (FILENAME_IS_CURRENT(dlink->name)) { - has_current = true; - } + dlink->name = BLI_strdup(FILENAME_PARENT); BLI_addhead(&dirbase, dlink); newnum++; } } - - if (!has_parent) { - char pardir[FILE_MAXDIR]; - - BLI_strncpy(pardir, dirname, sizeof(pardir)); - if (BLI_path_parent_dir(pardir) && (BLI_access(pardir, R_OK) == 0)) { - struct dirlink *const dlink = (struct dirlink *)malloc(sizeof(struct dirlink)); - if (dlink != NULL) { - dlink->name = BLI_strdup(FILENAME_PARENT); - BLI_addhead(&dirbase, dlink); - newnum++; - } - } + } + if (!has_current) { + struct dirlink *const dlink = (struct dirlink *)malloc(sizeof(struct dirlink)); + if (dlink != NULL) { + dlink->name = BLI_strdup(FILENAME_CURRENT); + BLI_addhead(&dirbase, dlink); + newnum++; } - if (!has_current) { - struct dirlink *const dlink = (struct dirlink *)malloc(sizeof(struct dirlink)); - if (dlink != NULL) { - dlink->name = BLI_strdup(FILENAME_CURRENT); - BLI_addhead(&dirbase, dlink); - newnum++; + } + + if (newnum) { + if (dir_ctx->files) { + void *const tmp = MEM_reallocN(dir_ctx->files, + (dir_ctx->files_num + newnum) * sizeof(struct direntry)); + if (tmp) { + dir_ctx->files = (struct direntry *)tmp; + } + else { /* Reallocation may fail. */ + MEM_freeN(dir_ctx->files); + dir_ctx->files = NULL; } } - if (newnum) { - if (dir_ctx->files) { - void *const tmp = MEM_reallocN(dir_ctx->files, - (dir_ctx->files_num + newnum) * sizeof(struct direntry)); - if (tmp) { - dir_ctx->files = (struct direntry *)tmp; - } - else { /* realloc fail */ - MEM_freeN(dir_ctx->files); - dir_ctx->files = NULL; - } - } + if (dir_ctx->files == NULL) { + dir_ctx->files = (struct direntry *)MEM_mallocN(newnum * sizeof(struct direntry), __func__); + } - if (dir_ctx->files == NULL) { - dir_ctx->files = (struct direntry *)MEM_mallocN(newnum * sizeof(struct direntry), - __func__); - } - - if (dir_ctx->files) { - struct dirlink *dlink = (struct dirlink *)dirbase.first; - struct direntry *file = &dir_ctx->files[dir_ctx->files_num]; - while (dlink) { - char fullname[PATH_MAX]; - BLI_path_join(fullname, sizeof(fullname), dirname, dlink->name); - memset(file, 0, sizeof(struct direntry)); - file->relname = dlink->name; - file->path = BLI_strdup(fullname); - if (BLI_stat(fullname, &file->s) != -1) { - file->type = file->s.st_mode; - } - else if (FILENAME_IS_CURRPAR(file->relname)) { - /* Hack around for UNC paths on windows: - * does not support stat on '\\SERVER\foo\..', sigh... */ - file->type |= S_IFDIR; - } - dir_ctx->files_num++; - file++; - dlink = dlink->next; - } - } - else { - printf("Couldn't get memory for dir\n"); - exit(1); - } - - BLI_freelist(&dirbase); - if (dir_ctx->files) { - qsort(dir_ctx->files, - dir_ctx->files_num, - sizeof(struct direntry), - (int (*)(const void *, const void *))bli_compare); - } + if (UNLIKELY(dir_ctx->files == NULL)) { + fprintf(stderr, "Couldn't get memory for dir: %s\n", dirname); + dir_ctx->files_num = 0; } else { - printf("%s empty directory\n", dirname); + struct dirlink *dlink = (struct dirlink *)dirbase.first; + struct direntry *file = &dir_ctx->files[dir_ctx->files_num]; + + while (dlink) { + memset(file, 0, sizeof(struct direntry)); + file->relname = dlink->name; + file->path = BLI_string_joinN(dirname_with_slash, dlink->name); + if (BLI_stat(file->path, &file->s) != -1) { + file->type = file->s.st_mode; + } + else if (FILENAME_IS_CURRPAR(file->relname)) { + /* Unfortunately a hack around UNC paths on WIN32, + * which does not support `stat` on `\\SERVER\foo\..`. */ + file->type |= S_IFDIR; + } + dir_ctx->files_num++; + file++; + dlink = dlink->next; + } + + qsort(dir_ctx->files, + dir_ctx->files_num, + sizeof(struct direntry), + (int (*)(const void *, const void *))direntry_cmp); } - closedir(dir); - } - else { - printf("%s non-existent directory\n", dirname); + BLI_freelist(&dirbase); } + + closedir(dir); } uint BLI_filelist_dir_contents(const char *dirname, struct direntry **r_filelist) diff --git a/source/blender/blenlib/intern/BLI_heap.c b/source/blender/blenlib/intern/BLI_heap.c index 0bc50f62232..6ca881e5ab2 100644 --- a/source/blender/blenlib/intern/BLI_heap.c +++ b/source/blender/blenlib/intern/BLI_heap.c @@ -34,7 +34,7 @@ struct HeapNode_Chunk { * Number of nodes to include per #HeapNode_Chunk when no reserved size is passed, * or we allocate past the reserved number. * - * \note Optimize number for 64kb allocs. + * \note Optimize number for 64kb allocations. * \note keep type in sync with nodes_num in heap_node_alloc_chunk. */ #define HEAP_CHUNK_DEFAULT_NUM \ diff --git a/source/blender/blenlib/intern/array_utils.cc b/source/blender/blenlib/intern/array_utils.cc index 1b5b071f0cd..1e1ef354461 100644 --- a/source/blender/blenlib/intern/array_utils.cc +++ b/source/blender/blenlib/intern/array_utils.cc @@ -10,7 +10,8 @@ void copy(const GVArray &src, const int64_t grain_size) { BLI_assert(src.type() == dst.type()); - BLI_assert(src.size() == dst.size()); + BLI_assert(src.size() >= selection.min_array_size()); + BLI_assert(dst.size() >= selection.min_array_size()); threading::parallel_for(selection.index_range(), grain_size, [&](const IndexRange range) { src.materialize_to_uninitialized(selection.slice(range), dst.data()); }); diff --git a/source/blender/blenlib/intern/index_mask.cc b/source/blender/blenlib/intern/index_mask.cc index 72282bc69f3..adcc2de8bdb 100644 --- a/source/blender/blenlib/intern/index_mask.cc +++ b/source/blender/blenlib/intern/index_mask.cc @@ -5,16 +5,6 @@ namespace blender { -IndexMask IndexMask::slice(int64_t start, int64_t size) const -{ - return this->slice(IndexRange(start, size)); -} - -IndexMask IndexMask::slice(IndexRange slice) const -{ - return IndexMask(indices_.slice(slice)); -} - IndexMask IndexMask::slice_safe(int64_t start, int64_t size) const { return this->slice_safe(IndexRange(start, size)); diff --git a/source/blender/blenlib/intern/math_boolean.cc b/source/blender/blenlib/intern/math_boolean.cc index 689c23ce092..7c0cf165174 100644 --- a/source/blender/blenlib/intern/math_boolean.cc +++ b/source/blender/blenlib/intern/math_boolean.cc @@ -501,11 +501,15 @@ static int fast_expansion_sum_zeroelim( while ((eindex < elen) && (findex < flen)) { if ((fnow > enow) == (fnow > -enow)) { Two_Sum(Q, enow, Qnew, hh); - enow = e[++eindex]; + if (++eindex < elen) { + enow = e[eindex]; + } } else { Two_Sum(Q, fnow, Qnew, hh); - fnow = f[++findex]; + if (++findex < flen) { + fnow = f[findex]; + } } Q = Qnew; if (hh != 0.0) { @@ -515,7 +519,9 @@ static int fast_expansion_sum_zeroelim( } while (eindex < elen) { Two_Sum(Q, enow, Qnew, hh); - enow = e[++eindex]; + if (++eindex < elen) { + enow = e[eindex]; + } Q = Qnew; if (hh != 0.0) { h[hindex++] = hh; @@ -523,7 +529,9 @@ static int fast_expansion_sum_zeroelim( } while (findex < flen) { Two_Sum(Q, fnow, Qnew, hh); - fnow = f[++findex]; + if (++findex < flen) { + fnow = f[findex]; + } Q = Qnew; if (hh != 0.0) { h[hindex++] = hh; diff --git a/source/blender/blenlib/intern/math_matrix.c b/source/blender/blenlib/intern/math_matrix.c index d997eae26fb..b8eaeb5c654 100644 --- a/source/blender/blenlib/intern/math_matrix.c +++ b/source/blender/blenlib/intern/math_matrix.c @@ -257,22 +257,14 @@ void shuffle_m4(float R[4][4], const int index[4]) void mul_m4_m4m4(float R[4][4], const float A[4][4], const float B[4][4]) { - if (A == R) { - mul_m4_m4_post(R, B); + if (R == A || R == B) { + float T[4][4]; + mul_m4_m4m4(T, A, B); + copy_m4_m4(R, T); + return; } - else if (B == R) { - mul_m4_m4_pre(R, A); - } - else { - mul_m4_m4m4_uniq(R, A, B); - } -} -void mul_m4_m4m4_uniq(float R[4][4], const float A[4][4], const float B[4][4]) -{ - BLI_assert(!ELEM(R, A, B)); - - /* Matrix product: `R[j][k] = A[j][i] . B[i][k]`. */ + /* Matrix product: `R[j][k] = B[j][i] . A[i][k]`. */ #ifdef BLI_HAVE_SSE2 __m128 A0 = _mm_loadu_ps(A[0]); __m128 A1 = _mm_loadu_ps(A[1]); @@ -313,39 +305,16 @@ void mul_m4_m4m4_uniq(float R[4][4], const float A[4][4], const float B[4][4]) #endif } -void mul_m4_m4m4_db_uniq(double R[4][4], const double A[4][4], const double B[4][4]) +void mul_m4db_m4db_m4fl(double R[4][4], const double A[4][4], const float B[4][4]) { - BLI_assert(!ELEM(R, A, B)); + if (R == A) { + double T[4][4]; + mul_m4db_m4db_m4fl(T, A, B); + copy_m4_m4_db(R, T); + return; + } - /* Matrix product: `R[j][k] = A[j][i] . B[i][k]`. */ - - R[0][0] = B[0][0] * A[0][0] + B[0][1] * A[1][0] + B[0][2] * A[2][0] + B[0][3] * A[3][0]; - R[0][1] = B[0][0] * A[0][1] + B[0][1] * A[1][1] + B[0][2] * A[2][1] + B[0][3] * A[3][1]; - R[0][2] = B[0][0] * A[0][2] + B[0][1] * A[1][2] + B[0][2] * A[2][2] + B[0][3] * A[3][2]; - R[0][3] = B[0][0] * A[0][3] + B[0][1] * A[1][3] + B[0][2] * A[2][3] + B[0][3] * A[3][3]; - - R[1][0] = B[1][0] * A[0][0] + B[1][1] * A[1][0] + B[1][2] * A[2][0] + B[1][3] * A[3][0]; - R[1][1] = B[1][0] * A[0][1] + B[1][1] * A[1][1] + B[1][2] * A[2][1] + B[1][3] * A[3][1]; - R[1][2] = B[1][0] * A[0][2] + B[1][1] * A[1][2] + B[1][2] * A[2][2] + B[1][3] * A[3][2]; - R[1][3] = B[1][0] * A[0][3] + B[1][1] * A[1][3] + B[1][2] * A[2][3] + B[1][3] * A[3][3]; - - R[2][0] = B[2][0] * A[0][0] + B[2][1] * A[1][0] + B[2][2] * A[2][0] + B[2][3] * A[3][0]; - R[2][1] = B[2][0] * A[0][1] + B[2][1] * A[1][1] + B[2][2] * A[2][1] + B[2][3] * A[3][1]; - R[2][2] = B[2][0] * A[0][2] + B[2][1] * A[1][2] + B[2][2] * A[2][2] + B[2][3] * A[3][2]; - R[2][3] = B[2][0] * A[0][3] + B[2][1] * A[1][3] + B[2][2] * A[2][3] + B[2][3] * A[3][3]; - - R[3][0] = B[3][0] * A[0][0] + B[3][1] * A[1][0] + B[3][2] * A[2][0] + B[3][3] * A[3][0]; - R[3][1] = B[3][0] * A[0][1] + B[3][1] * A[1][1] + B[3][2] * A[2][1] + B[3][3] * A[3][1]; - R[3][2] = B[3][0] * A[0][2] + B[3][1] * A[1][2] + B[3][2] * A[2][2] + B[3][3] * A[3][2]; - R[3][3] = B[3][0] * A[0][3] + B[3][1] * A[1][3] + B[3][2] * A[2][3] + B[3][3] * A[3][3]; -} - -void mul_m4db_m4db_m4fl_uniq(double R[4][4], const double A[4][4], const float B[4][4]) -{ - /* Remove second check since types don't match. */ - BLI_assert(!ELEM(R, A /*, B */)); - - /* Matrix product: `R[j][k] = A[j][i] . B[i][k]`. */ + /* Matrix product: `R[j][k] = B[j][i] . A[i][k]`. */ R[0][0] = B[0][0] * A[0][0] + B[0][1] * A[1][0] + B[0][2] * A[2][0] + B[0][3] * A[3][0]; R[0][1] = B[0][0] * A[0][1] + B[0][1] * A[1][1] + B[0][2] * A[2][1] + B[0][3] * A[3][1]; @@ -370,53 +339,32 @@ void mul_m4db_m4db_m4fl_uniq(double R[4][4], const double A[4][4], const float B void mul_m4_m4_pre(float R[4][4], const float A[4][4]) { - BLI_assert(A != R); - float B[4][4]; - copy_m4_m4(B, R); - mul_m4_m4m4_uniq(R, A, B); + mul_m4_m4m4(R, A, R); } void mul_m4_m4_post(float R[4][4], const float B[4][4]) { - BLI_assert(B != R); - float A[4][4]; - copy_m4_m4(A, R); - mul_m4_m4m4_uniq(R, A, B); -} - -void mul_m3_m3m3(float R[3][3], const float A[3][3], const float B[3][3]) -{ - if (A == R) { - mul_m3_m3_post(R, B); - } - else if (B == R) { - mul_m3_m3_pre(R, A); - } - else { - mul_m3_m3m3_uniq(R, A, B); - } + mul_m4_m4m4(R, R, B); } void mul_m3_m3_pre(float R[3][3], const float A[3][3]) { - BLI_assert(A != R); - float B[3][3]; - copy_m3_m3(B, R); - mul_m3_m3m3_uniq(R, A, B); + mul_m3_m3m3(R, A, R); } void mul_m3_m3_post(float R[3][3], const float B[3][3]) { - BLI_assert(B != R); - float A[3][3]; - copy_m3_m3(A, R); - mul_m3_m3m3_uniq(R, A, B); + mul_m3_m3m3(R, R, B); } -void mul_m3_m3m3_uniq(float R[3][3], const float A[3][3], const float B[3][3]) +void mul_m3_m3m3(float R[3][3], const float A[3][3], const float B[3][3]) { - BLI_assert(!ELEM(R, A, B)); - + if (R == A || R == B) { + float T[3][3]; + mul_m3_m3m3(T, A, B); + copy_m3_m3(R, T); + return; + } R[0][0] = B[0][0] * A[0][0] + B[0][1] * A[1][0] + B[0][2] * A[2][0]; R[0][1] = B[0][0] * A[0][1] + B[0][1] * A[1][1] + B[0][2] * A[2][1]; R[0][2] = B[0][0] * A[0][2] + B[0][1] * A[1][2] + B[0][2] * A[2][2]; @@ -432,88 +380,102 @@ void mul_m3_m3m3_uniq(float R[3][3], const float A[3][3], const float B[3][3]) void mul_m4_m4m3(float R[4][4], const float A[4][4], const float B[3][3]) { - float B_[3][3], A_[4][4]; + if (R == A) { + float T[4][4]; + /* The mul_m4_m4m3 only writes to the upper-left 3x3 block, so make it so the rest of the + * matrix is copied from the input to the output. + * + * TODO(sergey): It does sound a bit redundant from the number of copy operations, so there is + * a potential for optimization. */ + copy_m4_m4(T, A); + mul_m4_m4m3(T, A, B); + copy_m4_m4(R, T); + return; + } - /* copy so it works when R is the same pointer as A or B */ - /* TODO: avoid copying when matrices are different */ - copy_m4_m4(A_, A); - copy_m3_m3(B_, B); - - R[0][0] = B_[0][0] * A_[0][0] + B_[0][1] * A_[1][0] + B_[0][2] * A_[2][0]; - R[0][1] = B_[0][0] * A_[0][1] + B_[0][1] * A_[1][1] + B_[0][2] * A_[2][1]; - R[0][2] = B_[0][0] * A_[0][2] + B_[0][1] * A_[1][2] + B_[0][2] * A_[2][2]; - R[1][0] = B_[1][0] * A_[0][0] + B_[1][1] * A_[1][0] + B_[1][2] * A_[2][0]; - R[1][1] = B_[1][0] * A_[0][1] + B_[1][1] * A_[1][1] + B_[1][2] * A_[2][1]; - R[1][2] = B_[1][0] * A_[0][2] + B_[1][1] * A_[1][2] + B_[1][2] * A_[2][2]; - R[2][0] = B_[2][0] * A_[0][0] + B_[2][1] * A_[1][0] + B_[2][2] * A_[2][0]; - R[2][1] = B_[2][0] * A_[0][1] + B_[2][1] * A_[1][1] + B_[2][2] * A_[2][1]; - R[2][2] = B_[2][0] * A_[0][2] + B_[2][1] * A_[1][2] + B_[2][2] * A_[2][2]; + R[0][0] = B[0][0] * A[0][0] + B[0][1] * A[1][0] + B[0][2] * A[2][0]; + R[0][1] = B[0][0] * A[0][1] + B[0][1] * A[1][1] + B[0][2] * A[2][1]; + R[0][2] = B[0][0] * A[0][2] + B[0][1] * A[1][2] + B[0][2] * A[2][2]; + R[1][0] = B[1][0] * A[0][0] + B[1][1] * A[1][0] + B[1][2] * A[2][0]; + R[1][1] = B[1][0] * A[0][1] + B[1][1] * A[1][1] + B[1][2] * A[2][1]; + R[1][2] = B[1][0] * A[0][2] + B[1][1] * A[1][2] + B[1][2] * A[2][2]; + R[2][0] = B[2][0] * A[0][0] + B[2][1] * A[1][0] + B[2][2] * A[2][0]; + R[2][1] = B[2][0] * A[0][1] + B[2][1] * A[1][1] + B[2][2] * A[2][1]; + R[2][2] = B[2][0] * A[0][2] + B[2][1] * A[1][2] + B[2][2] * A[2][2]; } void mul_m3_m3m4(float R[3][3], const float A[3][3], const float B[4][4]) { - float B_[4][4], A_[3][3]; + if (R == A) { + float T[3][3]; + mul_m3_m3m4(T, A, B); + copy_m3_m3(R, T); + return; + } - /* copy so it works when R is the same pointer as A or B */ - /* TODO: avoid copying when matrices are different */ - copy_m3_m3(A_, A); - copy_m4_m4(B_, B); + /* Matrix product: `R[j][k] = B[j][i] . A[i][k]`. */ - /* R[i][j] = B_[i][k] * A_[k][j] */ - R[0][0] = B_[0][0] * A_[0][0] + B_[0][1] * A_[1][0] + B_[0][2] * A_[2][0]; - R[0][1] = B_[0][0] * A_[0][1] + B_[0][1] * A_[1][1] + B_[0][2] * A_[2][1]; - R[0][2] = B_[0][0] * A_[0][2] + B_[0][1] * A_[1][2] + B_[0][2] * A_[2][2]; + R[0][0] = B[0][0] * A[0][0] + B[0][1] * A[1][0] + B[0][2] * A[2][0]; + R[0][1] = B[0][0] * A[0][1] + B[0][1] * A[1][1] + B[0][2] * A[2][1]; + R[0][2] = B[0][0] * A[0][2] + B[0][1] * A[1][2] + B[0][2] * A[2][2]; - R[1][0] = B_[1][0] * A_[0][0] + B_[1][1] * A_[1][0] + B_[1][2] * A_[2][0]; - R[1][1] = B_[1][0] * A_[0][1] + B_[1][1] * A_[1][1] + B_[1][2] * A_[2][1]; - R[1][2] = B_[1][0] * A_[0][2] + B_[1][1] * A_[1][2] + B_[1][2] * A_[2][2]; + R[1][0] = B[1][0] * A[0][0] + B[1][1] * A[1][0] + B[1][2] * A[2][0]; + R[1][1] = B[1][0] * A[0][1] + B[1][1] * A[1][1] + B[1][2] * A[2][1]; + R[1][2] = B[1][0] * A[0][2] + B[1][1] * A[1][2] + B[1][2] * A[2][2]; - R[2][0] = B_[2][0] * A_[0][0] + B_[2][1] * A_[1][0] + B_[2][2] * A_[2][0]; - R[2][1] = B_[2][0] * A_[0][1] + B_[2][1] * A_[1][1] + B_[2][2] * A_[2][1]; - R[2][2] = B_[2][0] * A_[0][2] + B_[2][1] * A_[1][2] + B_[2][2] * A_[2][2]; + R[2][0] = B[2][0] * A[0][0] + B[2][1] * A[1][0] + B[2][2] * A[2][0]; + R[2][1] = B[2][0] * A[0][1] + B[2][1] * A[1][1] + B[2][2] * A[2][1]; + R[2][2] = B[2][0] * A[0][2] + B[2][1] * A[1][2] + B[2][2] * A[2][2]; } void mul_m3_m4m3(float R[3][3], const float A[4][4], const float B[3][3]) { - float B_[3][3], A_[4][4]; + if (R == B) { + float T[3][3]; + mul_m3_m4m3(T, A, B); + copy_m3_m3(R, T); + return; + } - /* copy so it works when R is the same pointer as A or B */ - /* TODO: avoid copying when matrices are different */ - copy_m4_m4(A_, A); - copy_m3_m3(B_, B); + /* Matrix product: `R[j][k] = B[j][i] . A[i][k]`. */ - /* R[i][j] = B[i][k] * A[k][j] */ - R[0][0] = B_[0][0] * A_[0][0] + B_[0][1] * A_[1][0] + B_[0][2] * A_[2][0]; - R[0][1] = B_[0][0] * A_[0][1] + B_[0][1] * A_[1][1] + B_[0][2] * A_[2][1]; - R[0][2] = B_[0][0] * A_[0][2] + B_[0][1] * A_[1][2] + B_[0][2] * A_[2][2]; + R[0][0] = B[0][0] * A[0][0] + B[0][1] * A[1][0] + B[0][2] * A[2][0]; + R[0][1] = B[0][0] * A[0][1] + B[0][1] * A[1][1] + B[0][2] * A[2][1]; + R[0][2] = B[0][0] * A[0][2] + B[0][1] * A[1][2] + B[0][2] * A[2][2]; - R[1][0] = B_[1][0] * A_[0][0] + B_[1][1] * A_[1][0] + B_[1][2] * A_[2][0]; - R[1][1] = B_[1][0] * A_[0][1] + B_[1][1] * A_[1][1] + B_[1][2] * A_[2][1]; - R[1][2] = B_[1][0] * A_[0][2] + B_[1][1] * A_[1][2] + B_[1][2] * A_[2][2]; + R[1][0] = B[1][0] * A[0][0] + B[1][1] * A[1][0] + B[1][2] * A[2][0]; + R[1][1] = B[1][0] * A[0][1] + B[1][1] * A[1][1] + B[1][2] * A[2][1]; + R[1][2] = B[1][0] * A[0][2] + B[1][1] * A[1][2] + B[1][2] * A[2][2]; - R[2][0] = B_[2][0] * A_[0][0] + B_[2][1] * A_[1][0] + B_[2][2] * A_[2][0]; - R[2][1] = B_[2][0] * A_[0][1] + B_[2][1] * A_[1][1] + B_[2][2] * A_[2][1]; - R[2][2] = B_[2][0] * A_[0][2] + B_[2][1] * A_[1][2] + B_[2][2] * A_[2][2]; + R[2][0] = B[2][0] * A[0][0] + B[2][1] * A[1][0] + B[2][2] * A[2][0]; + R[2][1] = B[2][0] * A[0][1] + B[2][1] * A[1][1] + B[2][2] * A[2][1]; + R[2][2] = B[2][0] * A[0][2] + B[2][1] * A[1][2] + B[2][2] * A[2][2]; } void mul_m4_m3m4(float R[4][4], const float A[3][3], const float B[4][4]) { - float B_[4][4], A_[3][3]; + if (R == B) { + float T[4][4]; + /* The mul_m4_m4m3 only writes to the upper-left 3x3 block, so make it so the rest of the + * matrix is copied from the input to the output. + * + * TODO(sergey): It does sound a bit redundant from the number of copy operations, so there is + * a potential for optimization. */ + copy_m4_m4(T, B); + mul_m4_m3m4(T, A, B); + copy_m4_m4(R, T); + return; + } - /* copy so it works when R is the same pointer as A or B */ - /* TODO: avoid copying when matrices are different */ - copy_m3_m3(A_, A); - copy_m4_m4(B_, B); - - R[0][0] = B_[0][0] * A_[0][0] + B_[0][1] * A_[1][0] + B_[0][2] * A_[2][0]; - R[0][1] = B_[0][0] * A_[0][1] + B_[0][1] * A_[1][1] + B_[0][2] * A_[2][1]; - R[0][2] = B_[0][0] * A_[0][2] + B_[0][1] * A_[1][2] + B_[0][2] * A_[2][2]; - R[1][0] = B_[1][0] * A_[0][0] + B_[1][1] * A_[1][0] + B_[1][2] * A_[2][0]; - R[1][1] = B_[1][0] * A_[0][1] + B_[1][1] * A_[1][1] + B_[1][2] * A_[2][1]; - R[1][2] = B_[1][0] * A_[0][2] + B_[1][1] * A_[1][2] + B_[1][2] * A_[2][2]; - R[2][0] = B_[2][0] * A_[0][0] + B_[2][1] * A_[1][0] + B_[2][2] * A_[2][0]; - R[2][1] = B_[2][0] * A_[0][1] + B_[2][1] * A_[1][1] + B_[2][2] * A_[2][1]; - R[2][2] = B_[2][0] * A_[0][2] + B_[2][1] * A_[1][2] + B_[2][2] * A_[2][2]; + R[0][0] = B[0][0] * A[0][0] + B[0][1] * A[1][0] + B[0][2] * A[2][0]; + R[0][1] = B[0][0] * A[0][1] + B[0][1] * A[1][1] + B[0][2] * A[2][1]; + R[0][2] = B[0][0] * A[0][2] + B[0][1] * A[1][2] + B[0][2] * A[2][2]; + R[1][0] = B[1][0] * A[0][0] + B[1][1] * A[1][0] + B[1][2] * A[2][0]; + R[1][1] = B[1][0] * A[0][1] + B[1][1] * A[1][1] + B[1][2] * A[2][1]; + R[1][2] = B[1][0] * A[0][2] + B[1][1] * A[1][2] + B[1][2] * A[2][2]; + R[2][0] = B[2][0] * A[0][0] + B[2][1] * A[1][0] + B[2][2] * A[2][0]; + R[2][1] = B[2][0] * A[0][1] + B[2][1] * A[1][1] + B[2][2] * A[2][1]; + R[2][2] = B[2][0] * A[0][2] + B[2][1] * A[1][2] + B[2][2] * A[2][2]; } void mul_m3_m4m4(float R[3][3], const float A[4][4], const float B[4][4]) @@ -1304,7 +1266,7 @@ void mul_m4_m4m4_aligned_scale(float R[4][4], const float A[4][4], const float B mat4_to_loc_rot_size(loc_b, rot_b, size_b, B); mul_v3_m4v3(loc_r, A, loc_b); - mul_m3_m3m3_uniq(rot_r, rot_a, rot_b); + mul_m3_m3m3(rot_r, rot_a, rot_b); mul_v3_v3v3(size_r, size_a, size_b); loc_rot_size_to_mat4(R, loc_r, rot_r, size_r); @@ -1320,7 +1282,7 @@ void mul_m4_m4m4_split_channels(float R[4][4], const float A[4][4], const float mat4_to_loc_rot_size(loc_b, rot_b, size_b, B); add_v3_v3v3(loc_r, loc_a, loc_b); - mul_m3_m3m3_uniq(rot_r, rot_a, rot_b); + mul_m3_m3m3(rot_r, rot_a, rot_b); mul_v3_v3v3(size_r, size_a, size_b); loc_rot_size_to_mat4(R, loc_r, rot_r, size_r); diff --git a/source/blender/blenlib/intern/math_matrix.cc b/source/blender/blenlib/intern/math_matrix.cc index 9ab484d0d8a..e89b58b2a76 100644 --- a/source/blender/blenlib/intern/math_matrix.cc +++ b/source/blender/blenlib/intern/math_matrix.cc @@ -230,9 +230,9 @@ MatBase pseudo_invert(const MatBase &mat, T epsilo JacobiSVD svd( Eigen::Map(mat.base_ptr(), Size, Size), ComputeThinU | ComputeThinV); - (Eigen::Map(U.base_ptr())) = svd.matrixU(); + Eigen::Map(U.base_ptr()) = svd.matrixU(); (Eigen::Map(S_val)) = svd.singularValues(); - (Eigen::Map(V.base_ptr())) = svd.matrixV(); + Eigen::Map(V.base_ptr()) = svd.matrixV(); } /* Invert or nullify component based on epsilon comparison. */ @@ -290,9 +290,9 @@ static void polar_decompose(const MatBase &mat3, JacobiSVD svd( Eigen::Map(mat3.base_ptr(), 3, 3), ComputeThinU | ComputeThinV); - (Eigen::Map(W.base_ptr())) = svd.matrixU(); + Eigen::Map(W.base_ptr()) = svd.matrixU(); (Eigen::Map(S_val)) = svd.singularValues(); - (Map(V.base_ptr())) = svd.matrixV(); + Map(V.base_ptr()) = svd.matrixV(); } MatBase S = from_scale>(S_val); diff --git a/source/blender/blenlib/intern/mesh_boolean.cc b/source/blender/blenlib/intern/mesh_boolean.cc index 354f241a292..9de75de5ffa 100644 --- a/source/blender/blenlib/intern/mesh_boolean.cc +++ b/source/blender/blenlib/intern/mesh_boolean.cc @@ -1746,7 +1746,7 @@ static int find_containing_cell(const Vert *v, * (Adapted from #closest_on_tri_to_point_v3()). * The arguments ab, ac, ..., r are used as temporaries * in this routine. Passing them in from the caller can - * avoid many allocs and frees of temporary mpq3 values + * avoid many allocations and frees of temporary mpq3 values * and the mpq_class values within them. */ static mpq_class closest_on_tri_to_point(const mpq3 &p, diff --git a/source/blender/blenlib/intern/mesh_intersect.cc b/source/blender/blenlib/intern/mesh_intersect.cc index 8dcb513cb9f..7b82ee4e6cd 100644 --- a/source/blender/blenlib/intern/mesh_intersect.cc +++ b/source/blender/blenlib/intern/mesh_intersect.cc @@ -1147,7 +1147,7 @@ static int filter_plane_side(const double3 &p, * This works because the ratio of the projections of ab and ac onto n is the same as * the ratio along the line ab of the intersection point to the whole of ab. * The ab, ac, and dotbuf arguments are used as a temporaries; declaring them - * in the caller can avoid many allocs and frees of mpq3 and mpq_class structures. + * in the caller can avoid many allocations and frees of mpq3 and mpq_class structures. */ static inline mpq3 tti_interp( const mpq3 &a, const mpq3 &b, const mpq3 &c, const mpq3 &n, mpq3 &ab, mpq3 &ac, mpq3 &dotbuf) @@ -1167,7 +1167,7 @@ static inline mpq3 tti_interp( * order. This is the same as -oriented(a, b, c, a + ad), but uses fewer arithmetic operations. * TODO: change arguments to `const Vert *` and use floating filters. * The ba, ca, n, and dotbuf arguments are used as temporaries; declaring them - * in the caller can avoid many allocs and frees of mpq3 and mpq_class structures. + * in the caller can avoid many allocations and frees of mpq3 and mpq_class structures. */ static inline int tti_above(const mpq3 &a, const mpq3 &b, @@ -2030,7 +2030,8 @@ static Array polyfill_triangulate_poly(Face *f, IMeshArena *arena) } return Array{f0, f1}; } - /* Project along negative face normal so (x,y) can be used in 2d. */ float axis_mat[3][3]; + /* Project along negative face normal so (x,y) can be used in 2d. */ + float axis_mat[3][3]; float(*projverts)[2]; uint(*tris)[3]; const int totfilltri = flen - 2; diff --git a/source/blender/blenlib/intern/offset_indices.cc b/source/blender/blenlib/intern/offset_indices.cc new file mode 100644 index 00000000000..2ac11fe631e --- /dev/null +++ b/source/blender/blenlib/intern/offset_indices.cc @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_offset_indices.hh" + +namespace blender::offset_indices { + +void accumulate_counts_to_offsets(MutableSpan counts_to_offsets, const int start_offset) +{ + int offset = start_offset; + for (const int i : counts_to_offsets.index_range().drop_back(1)) { + const int count = counts_to_offsets[i]; + BLI_assert(count >= 0); + counts_to_offsets[i] = offset; + offset += count; + } + counts_to_offsets.last() = offset; +} + +} // namespace blender::offset_indices diff --git a/source/blender/blenlib/intern/path_util.c b/source/blender/blenlib/intern/path_util.c index cba2377161a..a985e18cff5 100644 --- a/source/blender/blenlib/intern/path_util.c +++ b/source/blender/blenlib/intern/path_util.c @@ -51,23 +51,6 @@ static bool BLI_path_is_abs(const char *name); // #define DEBUG_STRSIZE -/** - * On UNIX it only makes sense to treat `/` as a path separator. - * On WIN32 either may be used. - */ -static bool is_sep_native_compat(const char ch) -{ - if (ch == SEP) { - return true; - } -#ifdef WIN32 - if (ch == ALTSEP) { - return true; - } -#endif - return false; -} - /* implementation */ int BLI_path_sequence_decode(const char *string, char *head, char *tail, ushort *r_digits_len) @@ -1467,7 +1450,7 @@ size_t BLI_path_append(char *__restrict dst, const size_t maxlen, const char *__ size_t dirlen = BLI_strnlen(dst, maxlen); /* Inline #BLI_path_slash_ensure. */ - if ((dirlen > 0) && !is_sep_native_compat(dst[dirlen - 1])) { + if ((dirlen > 0) && !BLI_path_slash_is_native_compat(dst[dirlen - 1])) { dst[dirlen++] = SEP; dst[dirlen] = '\0'; } @@ -1484,7 +1467,7 @@ size_t BLI_path_append_dir(char *__restrict dst, const size_t maxlen, const char size_t dirlen = BLI_path_append(dst, maxlen, dir); if (dirlen + 1 < maxlen) { /* Inline #BLI_path_slash_ensure. */ - if ((dirlen > 0) && !is_sep_native_compat(dst[dirlen - 1])) { + if ((dirlen > 0) && !BLI_path_slash_is_native_compat(dst[dirlen - 1])) { dst[dirlen++] = SEP; dst[dirlen] = '\0'; } @@ -1539,7 +1522,7 @@ size_t BLI_path_join_array(char *__restrict dst, bool has_trailing_slash = false; if (ofs != 0) { size_t len = ofs; - while ((len != 0) && is_sep_native_compat(path[len - 1])) { + while ((len != 0) && BLI_path_slash_is_native_compat(path[len - 1])) { len -= 1; } @@ -1553,18 +1536,18 @@ size_t BLI_path_join_array(char *__restrict dst, path = path_array[path_index]; has_trailing_slash = false; const char *path_init = path; - while (is_sep_native_compat(path[0])) { + while (BLI_path_slash_is_native_compat(path[0])) { path++; } size_t len = strlen(path); if (len != 0) { - while ((len != 0) && is_sep_native_compat(path[len - 1])) { + while ((len != 0) && BLI_path_slash_is_native_compat(path[len - 1])) { len -= 1; } if (len != 0) { /* the very first path may have a slash at the end */ - if (ofs && !is_sep_native_compat(dst[ofs - 1])) { + if (ofs && !BLI_path_slash_is_native_compat(dst[ofs - 1])) { dst[ofs++] = SEP; if (ofs == dst_last) { break; @@ -1587,7 +1570,7 @@ size_t BLI_path_join_array(char *__restrict dst, } if (has_trailing_slash) { - if ((ofs != dst_last) && (ofs != 0) && !is_sep_native_compat(dst[ofs - 1])) { + if ((ofs != dst_last) && (ofs != 0) && !BLI_path_slash_is_native_compat(dst[ofs - 1])) { dst[ofs++] = SEP; } } @@ -1615,7 +1598,7 @@ static bool path_name_at_index_forward(const char *__restrict path, int i = 0; while (true) { const char c = path[i]; - if ((c == '\0') || is_sep_native_compat(c)) { + if ((c == '\0') || BLI_path_slash_is_native_compat(c)) { if (prev + 1 != i) { prev += 1; /* Skip '/./' (behave as if they don't exist). */ @@ -1650,7 +1633,7 @@ static bool path_name_at_index_backward(const char *__restrict path, int i = prev - 1; while (true) { const char c = i >= 0 ? path[i] : '\0'; - if ((c == '\0') || is_sep_native_compat(c)) { + if ((c == '\0') || BLI_path_slash_is_native_compat(c)) { if (prev - 1 != i) { i += 1; /* Skip '/./' (behave as if they don't exist). */ @@ -1749,7 +1732,7 @@ int BLI_path_slash_ensure(char *string, size_t string_maxlen) { int len = strlen(string); BLI_assert(len < string_maxlen); - if (len == 0 || !is_sep_native_compat(string[len - 1])) { + if (len == 0 || !BLI_path_slash_is_native_compat(string[len - 1])) { /* Avoid unlikely buffer overflow. */ if (len + 1 < string_maxlen) { string[len] = SEP; @@ -1764,7 +1747,7 @@ void BLI_path_slash_rstrip(char *string) { int len = strlen(string); while (len) { - if (is_sep_native_compat(string[len - 1])) { + if (BLI_path_slash_is_native_compat(string[len - 1])) { string[len - 1] = '\0'; len--; } diff --git a/source/blender/blenlib/intern/scanfill_utils.c b/source/blender/blenlib/intern/scanfill_utils.c index 6bf3c4719f6..28b89a79fb0 100644 --- a/source/blender/blenlib/intern/scanfill_utils.c +++ b/source/blender/blenlib/intern/scanfill_utils.c @@ -361,7 +361,6 @@ bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx, { const uint poly_num = (uint)sf_ctx->poly_nr + 1; uint eed_index = 0; - int totvert_new = 0; bool changed = false; PolyInfo *poly_info; @@ -453,7 +452,6 @@ bool BLI_scanfill_calc_self_isect(ScanFillContext *sf_ctx, if (eve->user_flag != 1) { BLI_remlink(&sf_ctx->fillvertbase, eve); BLI_addtail(remvertbase, eve); - totvert_new--; } else { eve->user_flag = 0; diff --git a/source/blender/blenlib/intern/storage.c b/source/blender/blenlib/intern/storage.c index c04fc41ab4d..64f4a0b1a26 100644 --- a/source/blender/blenlib/intern/storage.c +++ b/source/blender/blenlib/intern/storage.c @@ -209,7 +209,14 @@ eFileAttributes BLI_file_attributes(const char *path) if (conv_utf_8_to_16(path, wline, ARRAY_SIZE(wline)) != 0) { return ret; } + DWORD attr = GetFileAttributesW(wline); + if (attr == INVALID_FILE_ATTRIBUTES) { + BLI_assert_msg(GetLastError() != ERROR_FILE_NOT_FOUND, + "BLI_file_attributes should only be called on existing files."); + return ret; + } + if (attr & FILE_ATTRIBUTE_READONLY) { ret |= FILE_ATTR_READONLY; } diff --git a/source/blender/blenlib/tests/BLI_linear_allocator_test.cc b/source/blender/blenlib/tests/BLI_linear_allocator_test.cc index 2ed1786f9e0..cc6bae0ea0e 100644 --- a/source/blender/blenlib/tests/BLI_linear_allocator_test.cc +++ b/source/blender/blenlib/tests/BLI_linear_allocator_test.cc @@ -58,9 +58,16 @@ TEST(linear_allocator, CopyString) blender::AlignedBuffer<256, 1> buffer; allocator.provide_buffer(buffer); + /* False positive warning with GCC 12.2, + * considers assignment outside of array bounds (`char [0]`). */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Warray-bounds" + StringRefNull ref1 = allocator.copy_string("Hello"); StringRefNull ref2 = allocator.copy_string("World"); +#pragma GCC diagnostic pop + EXPECT_EQ(ref1, "Hello"); EXPECT_EQ(ref2, "World"); EXPECT_EQ(ref2.data() - ref1.data(), 6); diff --git a/source/blender/blenlib/tests/BLI_math_matrix_test.cc b/source/blender/blenlib/tests/BLI_math_matrix_test.cc index 71c99a00807..4a9f7936444 100644 --- a/source/blender/blenlib/tests/BLI_math_matrix_test.cc +++ b/source/blender/blenlib/tests/BLI_math_matrix_test.cc @@ -430,7 +430,7 @@ TEST(math_matrix, MatrixTransform) result = transform_direction(m3, p); EXPECT_V3_NEAR(result, expect, 1e-5); - expect = {-0.5, -1, -1.7222222}; + expect = {-0.333333, -0.666666, -1.14814}; result = project_point(pers4, p); EXPECT_V3_NEAR(result, expect, 1e-5); @@ -457,13 +457,13 @@ TEST(math_matrix, MatrixProjection) expect = transpose(float4x4({-0.8f, 0.0f, 0.2f, 0.0f}, {0.0f, -0.666667f, 0.333333f, 0.0f}, {0.0f, 0.0f, -2.33333f, 0.666667f}, - {0.0f, 0.0f, -1.0f, 1.0f})); + {0.0f, 0.0f, -1.0f, 0.0f})); EXPECT_M4_NEAR(pers1, expect, 1e-5); expect = transpose(float4x4({4.0f, 0.0f, 0.2f, 0.0f}, {0.0f, 3.33333f, 0.333333f, 0.0f}, {0.0f, 0.0f, -2.33333f, 0.666667f}, - {0.0f, 0.0f, -1.0f, 1.0f})); + {0.0f, 0.0f, -1.0f, 0.0f})); EXPECT_M4_NEAR(pers2, expect, 1e-5); } diff --git a/source/blender/blenlib/tests/BLI_math_vector_test.cc b/source/blender/blenlib/tests/BLI_math_vector_test.cc index 5686be975b5..4b05c7a6cd5 100644 --- a/source/blender/blenlib/tests/BLI_math_vector_test.cc +++ b/source/blender/blenlib/tests/BLI_math_vector_test.cc @@ -125,4 +125,13 @@ TEST(math_vector, DivideCeil) EXPECT_FLOAT_EQ(result.z, 0); } +TEST(math_vector, Sign) +{ + const int3 a(-21, 16, 0); + const int3 result = math::sign(a); + EXPECT_FLOAT_EQ(result.x, -1); + EXPECT_FLOAT_EQ(result.y, 1); + EXPECT_FLOAT_EQ(result.z, 0); +} + } // namespace blender::tests diff --git a/source/blender/blenlib/tests/BLI_task_test.cc b/source/blender/blenlib/tests/BLI_task_test.cc index 63bb767466f..b7ee6962aec 100644 --- a/source/blender/blenlib/tests/BLI_task_test.cc +++ b/source/blender/blenlib/tests/BLI_task_test.cc @@ -196,11 +196,9 @@ TEST(task, MempoolIterTLS) int i; /* Add numbers negative `1..ITEMS_NUM` inclusive. */ - int items_num = 0; for (i = 0; i < ITEMS_NUM; i++) { data[i] = (int *)BLI_mempool_alloc(mempool); *data[i] = -(i + 1); - items_num++; } TaskParallelSettings settings; diff --git a/source/blender/blenloader/BLO_read_write.h b/source/blender/blenloader/BLO_read_write.h index 56b0cc81598..35768cb7e7a 100644 --- a/source/blender/blenloader/BLO_read_write.h +++ b/source/blender/blenloader/BLO_read_write.h @@ -42,7 +42,6 @@ typedef struct BlendWriter BlendWriter; struct BlendFileReadReport; struct Main; -struct ReportList; /* -------------------------------------------------------------------- */ /** \name Blend Write API diff --git a/source/blender/blenloader/BLO_readfile.h b/source/blender/blenloader/BLO_readfile.h index 4c34b628a6d..8b5f9d10044 100644 --- a/source/blender/blenloader/BLO_readfile.h +++ b/source/blender/blenloader/BLO_readfile.h @@ -16,13 +16,11 @@ extern "C" { struct BHead; struct BlendThumbnail; -struct Collection; struct FileData; struct LinkNode; struct ListBase; struct Main; struct MemFile; -struct Object; struct ReportList; struct Scene; struct UserDef; @@ -124,6 +122,7 @@ typedef enum eBLOReadSkip { /** Do not attempt to re-use IDs from old bmain for unchanged ones in case of undo. */ BLO_READ_SKIP_UNDO_OLD_MAIN = (1 << 2), } eBLOReadSkip; +ENUM_OPERATORS(eBLOReadSkip, BLO_READ_SKIP_UNDO_OLD_MAIN) #define BLO_READ_SKIP_ALL (BLO_READ_SKIP_USERDEF | BLO_READ_SKIP_DATA) /** diff --git a/source/blender/blenloader/CMakeLists.txt b/source/blender/blenloader/CMakeLists.txt index 7ce16bb8751..f0722808da7 100644 --- a/source/blender/blenloader/CMakeLists.txt +++ b/source/blender/blenloader/CMakeLists.txt @@ -45,7 +45,7 @@ set(SRC intern/versioning_260.c intern/versioning_270.c intern/versioning_280.c - intern/versioning_290.c + intern/versioning_290.cc intern/versioning_300.cc intern/versioning_400.cc intern/versioning_common.cc diff --git a/source/blender/blenloader/intern/readfile.cc b/source/blender/blenloader/intern/readfile.cc index f4a5b6dd2fc..c440dbf8c7a 100644 --- a/source/blender/blenloader/intern/readfile.cc +++ b/source/blender/blenloader/intern/readfile.cc @@ -2299,11 +2299,11 @@ static bool lib_link_seq_clipboard_cb(Sequence *seq, void *arg_pt) { IDNameLib_Map *id_map = static_cast(arg_pt); - lib_link_seq_clipboard_pt_restore((ID *)seq->scene, id_map); - lib_link_seq_clipboard_pt_restore((ID *)seq->scene_camera, id_map); - lib_link_seq_clipboard_pt_restore((ID *)seq->clip, id_map); - lib_link_seq_clipboard_pt_restore((ID *)seq->mask, id_map); - lib_link_seq_clipboard_pt_restore((ID *)seq->sound, id_map); + lib_link_seq_clipboard_pt_restore(reinterpret_cast(seq->scene), id_map); + lib_link_seq_clipboard_pt_restore(reinterpret_cast(seq->scene_camera), id_map); + lib_link_seq_clipboard_pt_restore(reinterpret_cast(seq->clip), id_map); + lib_link_seq_clipboard_pt_restore(reinterpret_cast(seq->mask), id_map); + lib_link_seq_clipboard_pt_restore(reinterpret_cast(seq->sound), id_map); return true; } @@ -2325,7 +2325,7 @@ static int lib_link_main_data_restore_cb(LibraryIDLinkCallbackData *cb_data) /* We probably need to add more cases here (hint: nodetrees), * but will wait for changes from D5559 to get in first. */ if (GS((*id_pointer)->name) == ID_GR) { - Collection *collection = (Collection *)*id_pointer; + Collection *collection = reinterpret_cast(*id_pointer); if (collection->flag & COLLECTION_IS_MASTER) { /* We should never reach that point anymore, since master collection private ID should be * properly tagged with IDWALK_CB_EMBEDDED. */ @@ -2357,7 +2357,7 @@ static void lib_link_main_data_restore(IDNameLib_Map *id_map, Main *newmain) static void lib_link_wm_xr_data_restore(IDNameLib_Map *id_map, wmXrData *xr_data) { xr_data->session_settings.base_pose_object = static_cast(restore_pointer_by_name( - id_map, (ID *)xr_data->session_settings.base_pose_object, USER_REAL)); + id_map, reinterpret_cast(xr_data->session_settings.base_pose_object), USER_REAL)); } static void lib_link_window_scene_data_restore(wmWindow *win, Scene *scene, ViewLayer *view_layer) @@ -2367,7 +2367,7 @@ static void lib_link_window_scene_data_restore(wmWindow *win, Scene *scene, View LISTBASE_FOREACH (ScrArea *, area, &screen->areabase) { LISTBASE_FOREACH (SpaceLink *, sl, &area->spacedata) { if (sl->spacetype == SPACE_VIEW3D) { - View3D *v3d = (View3D *)sl; + View3D *v3d = reinterpret_cast(sl); if (v3d->camera == nullptr || v3d->scenelock) { v3d->camera = scene->camera; @@ -2416,7 +2416,7 @@ static void lib_link_restore_viewer_path(IDNameLib_Map *id_map, ViewerPath *view if (elem->type == VIEWER_PATH_ELEM_TYPE_ID) { IDViewerPathElem *typed_elem = reinterpret_cast(elem); typed_elem->id = static_cast( - restore_pointer_by_name(id_map, (ID *)typed_elem->id, USER_IGNORE)); + restore_pointer_by_name(id_map, typed_elem->id, USER_IGNORE)); } } } @@ -2431,225 +2431,253 @@ static void lib_link_workspace_layout_restore(IDNameLib_Map *id_map, { LISTBASE_FOREACH (ScrArea *, area, &screen->areabase) { LISTBASE_FOREACH (SpaceLink *, sl, &area->spacedata) { - if (sl->spacetype == SPACE_VIEW3D) { - View3D *v3d = (View3D *)sl; + switch (static_cast(sl->spacetype)) { + case SPACE_VIEW3D: { + View3D *v3d = reinterpret_cast(sl); - v3d->camera = static_cast( - restore_pointer_by_name(id_map, (ID *)v3d->camera, USER_REAL)); - v3d->ob_center = static_cast( - restore_pointer_by_name(id_map, (ID *)v3d->ob_center, USER_REAL)); + v3d->camera = static_cast( + restore_pointer_by_name(id_map, reinterpret_cast(v3d->camera), USER_REAL)); + v3d->ob_center = static_cast(restore_pointer_by_name( + id_map, reinterpret_cast(v3d->ob_center), USER_REAL)); - lib_link_restore_viewer_path(id_map, &v3d->viewer_path); - } - else if (sl->spacetype == SPACE_GRAPH) { - SpaceGraph *sipo = (SpaceGraph *)sl; - bDopeSheet *ads = sipo->ads; + lib_link_restore_viewer_path(id_map, &v3d->viewer_path); + break; + } + case SPACE_GRAPH: { + SpaceGraph *sipo = reinterpret_cast(sl); + bDopeSheet *ads = sipo->ads; - if (ads) { - ads->source = static_cast( - restore_pointer_by_name(id_map, (ID *)ads->source, USER_REAL)); + if (ads) { + ads->source = static_cast( + restore_pointer_by_name(id_map, reinterpret_cast(ads->source), USER_REAL)); - if (ads->filter_grp) { - ads->filter_grp = static_cast( - restore_pointer_by_name(id_map, (ID *)ads->filter_grp, USER_IGNORE)); + if (ads->filter_grp) { + ads->filter_grp = static_cast(restore_pointer_by_name( + id_map, reinterpret_cast(ads->filter_grp), USER_IGNORE)); + } } + + /* force recalc of list of channels (i.e. includes calculating F-Curve colors) + * thus preventing the "black curves" problem post-undo + */ + sipo->runtime.flag |= SIPO_RUNTIME_FLAG_NEED_CHAN_SYNC_COLOR; + break; } + case SPACE_PROPERTIES: { + SpaceProperties *sbuts = reinterpret_cast(sl); + sbuts->pinid = static_cast( + restore_pointer_by_name(id_map, sbuts->pinid, USER_IGNORE)); + if (sbuts->pinid == nullptr) { + sbuts->flag &= ~SB_PIN_CONTEXT; + } - /* force recalc of list of channels (i.e. includes calculating F-Curve colors) - * thus preventing the "black curves" problem post-undo - */ - sipo->runtime.flag |= SIPO_RUNTIME_FLAG_NEED_CHAN_SYNC_COLOR; - } - else if (sl->spacetype == SPACE_PROPERTIES) { - SpaceProperties *sbuts = (SpaceProperties *)sl; - sbuts->pinid = static_cast( - restore_pointer_by_name(id_map, sbuts->pinid, USER_IGNORE)); - if (sbuts->pinid == nullptr) { - sbuts->flag &= ~SB_PIN_CONTEXT; + /* TODO: restore path pointers: T40046 + * (complicated because this contains data pointers too, not just ID). */ + MEM_SAFE_FREE(sbuts->path); + break; } - - /* TODO: restore path pointers: T40046 - * (complicated because this contains data pointers too, not just ID). */ - MEM_SAFE_FREE(sbuts->path); - } - else if (sl->spacetype == SPACE_FILE) { - SpaceFile *sfile = (SpaceFile *)sl; - sfile->op = nullptr; - sfile->tags = FILE_TAG_REBUILD_MAIN_FILES; - } - else if (sl->spacetype == SPACE_ACTION) { - SpaceAction *saction = (SpaceAction *)sl; - - saction->action = static_cast( - restore_pointer_by_name(id_map, (ID *)saction->action, USER_REAL)); - saction->ads.source = static_cast( - restore_pointer_by_name(id_map, (ID *)saction->ads.source, USER_REAL)); - - if (saction->ads.filter_grp) { - saction->ads.filter_grp = static_cast( - restore_pointer_by_name(id_map, (ID *)saction->ads.filter_grp, USER_IGNORE)); + case SPACE_FILE: { + SpaceFile *sfile = reinterpret_cast(sl); + sfile->op = nullptr; + sfile->tags = FILE_TAG_REBUILD_MAIN_FILES; + break; } + case SPACE_ACTION: { + SpaceAction *saction = reinterpret_cast(sl); - /* force recalc of list of channels, potentially updating the active action - * while we're at it (as it can only be updated that way) T28962. - */ - saction->runtime.flag |= SACTION_RUNTIME_FLAG_NEED_CHAN_SYNC; - } - else if (sl->spacetype == SPACE_IMAGE) { - SpaceImage *sima = (SpaceImage *)sl; + saction->action = static_cast(restore_pointer_by_name( + id_map, reinterpret_cast(saction->action), USER_REAL)); + saction->ads.source = static_cast(restore_pointer_by_name( + id_map, reinterpret_cast(saction->ads.source), USER_REAL)); - sima->image = static_cast( - restore_pointer_by_name(id_map, (ID *)sima->image, USER_REAL)); + if (saction->ads.filter_grp) { + saction->ads.filter_grp = static_cast(restore_pointer_by_name( + id_map, reinterpret_cast(saction->ads.filter_grp), USER_IGNORE)); + } - /* this will be freed, not worth attempting to find same scene, - * since it gets initialized later */ - sima->iuser.scene = nullptr; + /* force recalc of list of channels, potentially updating the active action + * while we're at it (as it can only be updated that way) T28962. + */ + saction->runtime.flag |= SACTION_RUNTIME_FLAG_NEED_CHAN_SYNC; + break; + } + case SPACE_IMAGE: { + SpaceImage *sima = reinterpret_cast(sl); + + sima->image = static_cast( + restore_pointer_by_name(id_map, reinterpret_cast(sima->image), USER_REAL)); + + /* this will be freed, not worth attempting to find same scene, + * since it gets initialized later */ + sima->iuser.scene = nullptr; #if 0 - /* Those are allocated and freed by space code, no need to handle them here. */ - MEM_SAFE_FREE(sima->scopes.waveform_1); - MEM_SAFE_FREE(sima->scopes.waveform_2); - MEM_SAFE_FREE(sima->scopes.waveform_3); - MEM_SAFE_FREE(sima->scopes.vecscope); + /* Those are allocated and freed by space code, no need to handle them here. */ + MEM_SAFE_FREE(sima->scopes.waveform_1); + MEM_SAFE_FREE(sima->scopes.waveform_2); + MEM_SAFE_FREE(sima->scopes.waveform_3); + MEM_SAFE_FREE(sima->scopes.vecscope); #endif - sima->scopes.ok = 0; + sima->scopes.ok = 0; - /* NOTE: pre-2.5, this was local data not lib data, but now we need this as lib data - * so assume that here we're doing for undo only... - */ - sima->gpd = static_cast( - restore_pointer_by_name(id_map, (ID *)sima->gpd, USER_REAL)); - sima->mask_info.mask = static_cast( - restore_pointer_by_name(id_map, (ID *)sima->mask_info.mask, USER_REAL)); - } - else if (sl->spacetype == SPACE_SEQ) { - SpaceSeq *sseq = (SpaceSeq *)sl; + /* NOTE: pre-2.5, this was local data not lib data, but now we need this as lib data + * so assume that here we're doing for undo only... + */ + sima->gpd = static_cast( + restore_pointer_by_name(id_map, reinterpret_cast(sima->gpd), USER_REAL)); + sima->mask_info.mask = static_cast(restore_pointer_by_name( + id_map, reinterpret_cast(sima->mask_info.mask), USER_REAL)); + break; + } + case SPACE_SEQ: { + SpaceSeq *sseq = reinterpret_cast(sl); - /* NOTE: pre-2.5, this was local data not lib data, but now we need this as lib data - * so assume that here we're doing for undo only... - */ - sseq->gpd = static_cast( - restore_pointer_by_name(id_map, (ID *)sseq->gpd, USER_REAL)); - } - else if (sl->spacetype == SPACE_NLA) { - SpaceNla *snla = (SpaceNla *)sl; - bDopeSheet *ads = snla->ads; + /* NOTE: pre-2.5, this was local data not lib data, but now we need this as lib data + * so assume that here we're doing for undo only... + */ + sseq->gpd = static_cast( + restore_pointer_by_name(id_map, reinterpret_cast(sseq->gpd), USER_REAL)); + break; + } + case SPACE_NLA: { + SpaceNla *snla = reinterpret_cast(sl); + bDopeSheet *ads = snla->ads; - if (ads) { - ads->source = static_cast( - restore_pointer_by_name(id_map, (ID *)ads->source, USER_REAL)); + if (ads) { + ads->source = static_cast( + restore_pointer_by_name(id_map, reinterpret_cast(ads->source), USER_REAL)); - if (ads->filter_grp) { - ads->filter_grp = static_cast( - restore_pointer_by_name(id_map, (ID *)ads->filter_grp, USER_IGNORE)); + if (ads->filter_grp) { + ads->filter_grp = static_cast(restore_pointer_by_name( + id_map, reinterpret_cast(ads->filter_grp), USER_IGNORE)); + } } + break; } - } - else if (sl->spacetype == SPACE_TEXT) { - SpaceText *st = (SpaceText *)sl; + case SPACE_TEXT: { + SpaceText *st = reinterpret_cast(sl); - st->text = static_cast( - restore_pointer_by_name(id_map, (ID *)st->text, USER_IGNORE)); - if (st->text == nullptr) { - st->text = static_cast(newmain->texts.first); + st->text = static_cast( + restore_pointer_by_name(id_map, reinterpret_cast(st->text), USER_IGNORE)); + if (st->text == nullptr) { + st->text = static_cast(newmain->texts.first); + } + } break; + case SPACE_SCRIPT: { + SpaceScript *scpt = reinterpret_cast(sl); + + scpt->script = static_cast